Magellan Linux

Annotation of /trunk/kernel-alx/patches-3.8/0104-3.8.5-all-fixes.patch

Parent Directory Parent Directory | Revision Log Revision Log


Revision 2156 - (hide annotations) (download)
Tue Apr 16 06:34:34 2013 UTC (11 years, 1 month ago) by niro
File size: 126908 byte(s)
-linux-3.8.5
1 niro 2156 diff --git a/arch/arm/boot/dts/tegra20.dtsi b/arch/arm/boot/dts/tegra20.dtsi
2     index b8effa1..4ceb266 100644
3     --- a/arch/arm/boot/dts/tegra20.dtsi
4     +++ b/arch/arm/boot/dts/tegra20.dtsi
5     @@ -314,7 +314,7 @@
6    
7     spi@7000d800 {
8     compatible = "nvidia,tegra20-slink";
9     - reg = <0x7000d480 0x200>;
10     + reg = <0x7000d800 0x200>;
11     interrupts = <0 83 0x04>;
12     nvidia,dma-request-selector = <&apbdma 17>;
13     #address-cells = <1>;
14     diff --git a/arch/arm/boot/dts/tegra30.dtsi b/arch/arm/boot/dts/tegra30.dtsi
15     index 529fdb8..aeea838 100644
16     --- a/arch/arm/boot/dts/tegra30.dtsi
17     +++ b/arch/arm/boot/dts/tegra30.dtsi
18     @@ -309,7 +309,7 @@
19    
20     spi@7000d800 {
21     compatible = "nvidia,tegra30-slink", "nvidia,tegra20-slink";
22     - reg = <0x7000d480 0x200>;
23     + reg = <0x7000d800 0x200>;
24     interrupts = <0 83 0x04>;
25     nvidia,dma-request-selector = <&apbdma 17>;
26     #address-cells = <1>;
27     diff --git a/arch/arm/mm/dma-mapping.c b/arch/arm/mm/dma-mapping.c
28     index dda3904..5397da0 100644
29     --- a/arch/arm/mm/dma-mapping.c
30     +++ b/arch/arm/mm/dma-mapping.c
31     @@ -330,6 +330,7 @@ static int __init atomic_pool_init(void)
32     {
33     struct dma_pool *pool = &atomic_pool;
34     pgprot_t prot = pgprot_dmacoherent(pgprot_kernel);
35     + gfp_t gfp = GFP_KERNEL | GFP_DMA;
36     unsigned long nr_pages = pool->size >> PAGE_SHIFT;
37     unsigned long *bitmap;
38     struct page *page;
39     @@ -348,8 +349,8 @@ static int __init atomic_pool_init(void)
40     if (IS_ENABLED(CONFIG_CMA))
41     ptr = __alloc_from_contiguous(NULL, pool->size, prot, &page);
42     else
43     - ptr = __alloc_remap_buffer(NULL, pool->size, GFP_KERNEL, prot,
44     - &page, NULL);
45     + ptr = __alloc_remap_buffer(NULL, pool->size, gfp, prot, &page,
46     + NULL);
47     if (ptr) {
48     int i;
49    
50     diff --git a/arch/x86/lib/usercopy_64.c b/arch/x86/lib/usercopy_64.c
51     index 05928aa..906fea3 100644
52     --- a/arch/x86/lib/usercopy_64.c
53     +++ b/arch/x86/lib/usercopy_64.c
54     @@ -74,10 +74,10 @@ copy_user_handle_tail(char *to, char *from, unsigned len, unsigned zerorest)
55     char c;
56     unsigned zero_len;
57    
58     - for (; len; --len) {
59     + for (; len; --len, to++) {
60     if (__get_user_nocheck(c, from++, sizeof(char)))
61     break;
62     - if (__put_user_nocheck(c, to++, sizeof(char)))
63     + if (__put_user_nocheck(c, to, sizeof(char)))
64     break;
65     }
66    
67     diff --git a/drivers/acpi/glue.c b/drivers/acpi/glue.c
68     index 35da181..e9e486f 100644
69     --- a/drivers/acpi/glue.c
70     +++ b/drivers/acpi/glue.c
71     @@ -95,40 +95,31 @@ static int acpi_find_bridge_device(struct device *dev, acpi_handle * handle)
72     return ret;
73     }
74    
75     -/* Get device's handler per its address under its parent */
76     -struct acpi_find_child {
77     - acpi_handle handle;
78     - u64 address;
79     -};
80     -
81     -static acpi_status
82     -do_acpi_find_child(acpi_handle handle, u32 lvl, void *context, void **rv)
83     +static acpi_status do_acpi_find_child(acpi_handle handle, u32 lvl_not_used,
84     + void *addr_p, void **ret_p)
85     {
86     + unsigned long long addr;
87     acpi_status status;
88     - struct acpi_device_info *info;
89     - struct acpi_find_child *find = context;
90     -
91     - status = acpi_get_object_info(handle, &info);
92     - if (ACPI_SUCCESS(status)) {
93     - if ((info->address == find->address)
94     - && (info->valid & ACPI_VALID_ADR))
95     - find->handle = handle;
96     - kfree(info);
97     +
98     + status = acpi_evaluate_integer(handle, METHOD_NAME__ADR, NULL, &addr);
99     + if (ACPI_SUCCESS(status) && addr == *((u64 *)addr_p)) {
100     + *ret_p = handle;
101     + return AE_CTRL_TERMINATE;
102     }
103     return AE_OK;
104     }
105    
106     acpi_handle acpi_get_child(acpi_handle parent, u64 address)
107     {
108     - struct acpi_find_child find = { NULL, address };
109     + void *ret = NULL;
110    
111     if (!parent)
112     return NULL;
113     - acpi_walk_namespace(ACPI_TYPE_DEVICE, parent,
114     - 1, do_acpi_find_child, NULL, &find, NULL);
115     - return find.handle;
116     -}
117    
118     + acpi_walk_namespace(ACPI_TYPE_DEVICE, parent, 1, NULL,
119     + do_acpi_find_child, &address, &ret);
120     + return (acpi_handle)ret;
121     +}
122     EXPORT_SYMBOL(acpi_get_child);
123    
124     static int acpi_bind_one(struct device *dev, acpi_handle handle)
125     diff --git a/drivers/firmware/Kconfig b/drivers/firmware/Kconfig
126     index 9b00072..42c759a 100644
127     --- a/drivers/firmware/Kconfig
128     +++ b/drivers/firmware/Kconfig
129     @@ -53,6 +53,24 @@ config EFI_VARS
130     Subsequent efibootmgr releases may be found at:
131     <http://linux.dell.com/efibootmgr>
132    
133     +config EFI_VARS_PSTORE
134     + bool "Register efivars backend for pstore"
135     + depends on EFI_VARS && PSTORE
136     + default y
137     + help
138     + Say Y here to enable use efivars as a backend to pstore. This
139     + will allow writing console messages, crash dumps, or anything
140     + else supported by pstore to EFI variables.
141     +
142     +config EFI_VARS_PSTORE_DEFAULT_DISABLE
143     + bool "Disable using efivars as a pstore backend by default"
144     + depends on EFI_VARS_PSTORE
145     + default n
146     + help
147     + Saying Y here will disable the use of efivars as a storage
148     + backend for pstore by default. This setting can be overridden
149     + using the efivars module's pstore_disable parameter.
150     +
151     config EFI_PCDP
152     bool "Console device selection via EFI PCDP or HCDP table"
153     depends on ACPI && EFI && IA64
154     diff --git a/drivers/firmware/efivars.c b/drivers/firmware/efivars.c
155     index 2a2e145..e1d6253 100644
156     --- a/drivers/firmware/efivars.c
157     +++ b/drivers/firmware/efivars.c
158     @@ -103,6 +103,11 @@ MODULE_VERSION(EFIVARS_VERSION);
159     */
160     #define GUID_LEN 36
161    
162     +static bool efivars_pstore_disable =
163     + IS_ENABLED(CONFIG_EFI_VARS_PSTORE_DEFAULT_DISABLE);
164     +
165     +module_param_named(pstore_disable, efivars_pstore_disable, bool, 0644);
166     +
167     /*
168     * The maximum size of VariableName + Data = 1024
169     * Therefore, it's reasonable to save that much
170     @@ -1301,9 +1306,7 @@ static const struct inode_operations efivarfs_dir_inode_operations = {
171     .create = efivarfs_create,
172     };
173    
174     -static struct pstore_info efi_pstore_info;
175     -
176     -#ifdef CONFIG_PSTORE
177     +#ifdef CONFIG_EFI_VARS_PSTORE
178    
179     static int efi_pstore_open(struct pstore_info *psi)
180     {
181     @@ -1500,38 +1503,6 @@ static int efi_pstore_erase(enum pstore_type_id type, u64 id, int count,
182    
183     return 0;
184     }
185     -#else
186     -static int efi_pstore_open(struct pstore_info *psi)
187     -{
188     - return 0;
189     -}
190     -
191     -static int efi_pstore_close(struct pstore_info *psi)
192     -{
193     - return 0;
194     -}
195     -
196     -static ssize_t efi_pstore_read(u64 *id, enum pstore_type_id *type, int *count,
197     - struct timespec *timespec,
198     - char **buf, struct pstore_info *psi)
199     -{
200     - return -1;
201     -}
202     -
203     -static int efi_pstore_write(enum pstore_type_id type,
204     - enum kmsg_dump_reason reason, u64 *id,
205     - unsigned int part, int count, size_t size,
206     - struct pstore_info *psi)
207     -{
208     - return 0;
209     -}
210     -
211     -static int efi_pstore_erase(enum pstore_type_id type, u64 id, int count,
212     - struct timespec time, struct pstore_info *psi)
213     -{
214     - return 0;
215     -}
216     -#endif
217    
218     static struct pstore_info efi_pstore_info = {
219     .owner = THIS_MODULE,
220     @@ -1543,6 +1514,24 @@ static struct pstore_info efi_pstore_info = {
221     .erase = efi_pstore_erase,
222     };
223    
224     +static void efivar_pstore_register(struct efivars *efivars)
225     +{
226     + efivars->efi_pstore_info = efi_pstore_info;
227     + efivars->efi_pstore_info.buf = kmalloc(4096, GFP_KERNEL);
228     + if (efivars->efi_pstore_info.buf) {
229     + efivars->efi_pstore_info.bufsize = 1024;
230     + efivars->efi_pstore_info.data = efivars;
231     + spin_lock_init(&efivars->efi_pstore_info.buf_lock);
232     + pstore_register(&efivars->efi_pstore_info);
233     + }
234     +}
235     +#else
236     +static void efivar_pstore_register(struct efivars *efivars)
237     +{
238     + return;
239     +}
240     +#endif
241     +
242     static ssize_t efivar_create(struct file *filp, struct kobject *kobj,
243     struct bin_attribute *bin_attr,
244     char *buf, loff_t pos, size_t count)
245     @@ -1942,15 +1931,8 @@ int register_efivars(struct efivars *efivars,
246     if (error)
247     unregister_efivars(efivars);
248    
249     - efivars->efi_pstore_info = efi_pstore_info;
250     -
251     - efivars->efi_pstore_info.buf = kmalloc(4096, GFP_KERNEL);
252     - if (efivars->efi_pstore_info.buf) {
253     - efivars->efi_pstore_info.bufsize = 1024;
254     - efivars->efi_pstore_info.data = efivars;
255     - spin_lock_init(&efivars->efi_pstore_info.buf_lock);
256     - pstore_register(&efivars->efi_pstore_info);
257     - }
258     + if (!efivars_pstore_disable)
259     + efivar_pstore_register(efivars);
260    
261     register_filesystem(&efivarfs_type);
262    
263     diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c
264     index aaded22..dfd9ed3 100644
265     --- a/drivers/gpu/drm/drm_edid.c
266     +++ b/drivers/gpu/drm/drm_edid.c
267     @@ -894,7 +894,7 @@ static struct drm_display_mode *drm_mode_detailed(struct drm_device *dev,
268     unsigned vblank = (pt->vactive_vblank_hi & 0xf) << 8 | pt->vblank_lo;
269     unsigned hsync_offset = (pt->hsync_vsync_offset_pulse_width_hi & 0xc0) << 2 | pt->hsync_offset_lo;
270     unsigned hsync_pulse_width = (pt->hsync_vsync_offset_pulse_width_hi & 0x30) << 4 | pt->hsync_pulse_width_lo;
271     - unsigned vsync_offset = (pt->hsync_vsync_offset_pulse_width_hi & 0xc) >> 2 | pt->vsync_offset_pulse_width_lo >> 4;
272     + unsigned vsync_offset = (pt->hsync_vsync_offset_pulse_width_hi & 0xc) << 2 | pt->vsync_offset_pulse_width_lo >> 4;
273     unsigned vsync_pulse_width = (pt->hsync_vsync_offset_pulse_width_hi & 0x3) << 4 | (pt->vsync_offset_pulse_width_lo & 0xf);
274    
275     /* ignore tiny modes */
276     @@ -975,6 +975,7 @@ set_size:
277     }
278    
279     mode->type = DRM_MODE_TYPE_DRIVER;
280     + mode->vrefresh = drm_mode_vrefresh(mode);
281     drm_mode_set_name(mode);
282    
283     return mode;
284     diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
285     index 8a7c48b..261efc8e 100644
286     --- a/drivers/gpu/drm/i915/i915_debugfs.c
287     +++ b/drivers/gpu/drm/i915/i915_debugfs.c
288     @@ -103,7 +103,7 @@ static const char *cache_level_str(int type)
289     static void
290     describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj)
291     {
292     - seq_printf(m, "%p: %s%s %8zdKiB %04x %04x %d %d %d%s%s%s",
293     + seq_printf(m, "%pK: %s%s %8zdKiB %04x %04x %d %d %d%s%s%s",
294     &obj->base,
295     get_pin_flag(obj),
296     get_tiling_flag(obj),
297     diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
298     index 26d08bb..7adf5a7 100644
299     --- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c
300     +++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
301     @@ -706,15 +706,20 @@ validate_exec_list(struct drm_i915_gem_exec_object2 *exec,
302     int count)
303     {
304     int i;
305     + int relocs_total = 0;
306     + int relocs_max = INT_MAX / sizeof(struct drm_i915_gem_relocation_entry);
307    
308     for (i = 0; i < count; i++) {
309     char __user *ptr = (char __user *)(uintptr_t)exec[i].relocs_ptr;
310     int length; /* limited by fault_in_pages_readable() */
311    
312     - /* First check for malicious input causing overflow */
313     - if (exec[i].relocation_count >
314     - INT_MAX / sizeof(struct drm_i915_gem_relocation_entry))
315     + /* First check for malicious input causing overflow in
316     + * the worst case where we need to allocate the entire
317     + * relocation tree as a single array.
318     + */
319     + if (exec[i].relocation_count > relocs_max - relocs_total)
320     return -EINVAL;
321     + relocs_total += exec[i].relocation_count;
322    
323     length = exec[i].relocation_count *
324     sizeof(struct drm_i915_gem_relocation_entry);
325     diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c
326     index fb3715b..2e1c10a 100644
327     --- a/drivers/gpu/drm/i915/intel_dp.c
328     +++ b/drivers/gpu/drm/i915/intel_dp.c
329     @@ -1850,7 +1850,7 @@ intel_dp_start_link_train(struct intel_dp *intel_dp)
330     for (i = 0; i < intel_dp->lane_count; i++)
331     if ((intel_dp->train_set[i] & DP_TRAIN_MAX_SWING_REACHED) == 0)
332     break;
333     - if (i == intel_dp->lane_count && voltage_tries == 5) {
334     + if (i == intel_dp->lane_count) {
335     ++loop_tries;
336     if (loop_tries == 5) {
337     DRM_DEBUG_KMS("too many full retries, give up\n");
338     diff --git a/drivers/gpu/drm/i915/intel_panel.c b/drivers/gpu/drm/i915/intel_panel.c
339     index a3730e0..bee8cb6 100644
340     --- a/drivers/gpu/drm/i915/intel_panel.c
341     +++ b/drivers/gpu/drm/i915/intel_panel.c
342     @@ -321,9 +321,6 @@ void intel_panel_enable_backlight(struct drm_device *dev,
343     if (dev_priv->backlight_level == 0)
344     dev_priv->backlight_level = intel_panel_get_max_backlight(dev);
345    
346     - dev_priv->backlight_enabled = true;
347     - intel_panel_actually_set_backlight(dev, dev_priv->backlight_level);
348     -
349     if (INTEL_INFO(dev)->gen >= 4) {
350     uint32_t reg, tmp;
351    
352     @@ -359,12 +356,12 @@ void intel_panel_enable_backlight(struct drm_device *dev,
353     }
354    
355     set_level:
356     - /* Check the current backlight level and try to set again if it's zero.
357     - * On some machines, BLC_PWM_CPU_CTL is cleared to zero automatically
358     - * when BLC_PWM_CPU_CTL2 and BLC_PWM_PCH_CTL1 are written.
359     + /* Call below after setting BLC_PWM_CPU_CTL2 and BLC_PWM_PCH_CTL1.
360     + * BLC_PWM_CPU_CTL may be cleared to zero automatically when these
361     + * registers are set.
362     */
363     - if (!intel_panel_get_backlight(dev))
364     - intel_panel_actually_set_backlight(dev, dev_priv->backlight_level);
365     + dev_priv->backlight_enabled = true;
366     + intel_panel_actually_set_backlight(dev, dev_priv->backlight_level);
367     }
368    
369     static void intel_panel_init_backlight(struct drm_device *dev)
370     diff --git a/drivers/gpu/drm/mgag200/mgag200_mode.c b/drivers/gpu/drm/mgag200/mgag200_mode.c
371     index d3d99a2..4f50c40 100644
372     --- a/drivers/gpu/drm/mgag200/mgag200_mode.c
373     +++ b/drivers/gpu/drm/mgag200/mgag200_mode.c
374     @@ -382,19 +382,19 @@ static int mga_g200eh_set_plls(struct mga_device *mdev, long clock)
375     m = n = p = 0;
376     vcomax = 800000;
377     vcomin = 400000;
378     - pllreffreq = 3333;
379     + pllreffreq = 33333;
380    
381     delta = 0xffffffff;
382     permitteddelta = clock * 5 / 1000;
383    
384     - for (testp = 16; testp > 0; testp--) {
385     + for (testp = 16; testp > 0; testp >>= 1) {
386     if (clock * testp > vcomax)
387     continue;
388     if (clock * testp < vcomin)
389     continue;
390    
391     for (testm = 1; testm < 33; testm++) {
392     - for (testn = 1; testn < 257; testn++) {
393     + for (testn = 17; testn < 257; testn++) {
394     computed = (pllreffreq * testn) /
395     (testm * testp);
396     if (computed > clock)
397     @@ -404,11 +404,11 @@ static int mga_g200eh_set_plls(struct mga_device *mdev, long clock)
398     if (tmpdelta < delta) {
399     delta = tmpdelta;
400     n = testn - 1;
401     - m = (testm - 1) | ((n >> 1) & 0x80);
402     + m = (testm - 1);
403     p = testp - 1;
404     }
405     if ((clock * testp) >= 600000)
406     - p |= 80;
407     + p |= 0x80;
408     }
409     }
410     }
411     diff --git a/drivers/gpu/drm/radeon/ni.c b/drivers/gpu/drm/radeon/ni.c
412     index 835992d..b64e55d 100644
413     --- a/drivers/gpu/drm/radeon/ni.c
414     +++ b/drivers/gpu/drm/radeon/ni.c
415     @@ -466,13 +466,19 @@ static void cayman_gpu_init(struct radeon_device *rdev)
416     (rdev->pdev->device == 0x9907) ||
417     (rdev->pdev->device == 0x9908) ||
418     (rdev->pdev->device == 0x9909) ||
419     + (rdev->pdev->device == 0x990B) ||
420     + (rdev->pdev->device == 0x990C) ||
421     + (rdev->pdev->device == 0x990F) ||
422     (rdev->pdev->device == 0x9910) ||
423     - (rdev->pdev->device == 0x9917)) {
424     + (rdev->pdev->device == 0x9917) ||
425     + (rdev->pdev->device == 0x9999)) {
426     rdev->config.cayman.max_simds_per_se = 6;
427     rdev->config.cayman.max_backends_per_se = 2;
428     } else if ((rdev->pdev->device == 0x9903) ||
429     (rdev->pdev->device == 0x9904) ||
430     (rdev->pdev->device == 0x990A) ||
431     + (rdev->pdev->device == 0x990D) ||
432     + (rdev->pdev->device == 0x990E) ||
433     (rdev->pdev->device == 0x9913) ||
434     (rdev->pdev->device == 0x9918)) {
435     rdev->config.cayman.max_simds_per_se = 4;
436     @@ -481,6 +487,9 @@ static void cayman_gpu_init(struct radeon_device *rdev)
437     (rdev->pdev->device == 0x9990) ||
438     (rdev->pdev->device == 0x9991) ||
439     (rdev->pdev->device == 0x9994) ||
440     + (rdev->pdev->device == 0x9995) ||
441     + (rdev->pdev->device == 0x9996) ||
442     + (rdev->pdev->device == 0x999A) ||
443     (rdev->pdev->device == 0x99A0)) {
444     rdev->config.cayman.max_simds_per_se = 3;
445     rdev->config.cayman.max_backends_per_se = 1;
446     @@ -614,11 +623,22 @@ static void cayman_gpu_init(struct radeon_device *rdev)
447     WREG32(DMA_TILING_CONFIG + DMA0_REGISTER_OFFSET, gb_addr_config);
448     WREG32(DMA_TILING_CONFIG + DMA1_REGISTER_OFFSET, gb_addr_config);
449    
450     - tmp = gb_addr_config & NUM_PIPES_MASK;
451     - tmp = r6xx_remap_render_backend(rdev, tmp,
452     - rdev->config.cayman.max_backends_per_se *
453     - rdev->config.cayman.max_shader_engines,
454     - CAYMAN_MAX_BACKENDS, disabled_rb_mask);
455     + if ((rdev->config.cayman.max_backends_per_se == 1) &&
456     + (rdev->flags & RADEON_IS_IGP)) {
457     + if ((disabled_rb_mask & 3) == 1) {
458     + /* RB0 disabled, RB1 enabled */
459     + tmp = 0x11111111;
460     + } else {
461     + /* RB1 disabled, RB0 enabled */
462     + tmp = 0x00000000;
463     + }
464     + } else {
465     + tmp = gb_addr_config & NUM_PIPES_MASK;
466     + tmp = r6xx_remap_render_backend(rdev, tmp,
467     + rdev->config.cayman.max_backends_per_se *
468     + rdev->config.cayman.max_shader_engines,
469     + CAYMAN_MAX_BACKENDS, disabled_rb_mask);
470     + }
471     WREG32(GB_BACKEND_MAP, tmp);
472    
473     cgts_tcc_disable = 0xffff0000;
474     @@ -1662,6 +1682,7 @@ int cayman_resume(struct radeon_device *rdev)
475     int cayman_suspend(struct radeon_device *rdev)
476     {
477     r600_audio_fini(rdev);
478     + radeon_vm_manager_fini(rdev);
479     cayman_cp_enable(rdev, false);
480     cayman_dma_stop(rdev);
481     evergreen_irq_suspend(rdev);
482     diff --git a/drivers/gpu/drm/radeon/radeon_benchmark.c b/drivers/gpu/drm/radeon/radeon_benchmark.c
483     index bedda9c..a2f0c24 100644
484     --- a/drivers/gpu/drm/radeon/radeon_benchmark.c
485     +++ b/drivers/gpu/drm/radeon/radeon_benchmark.c
486     @@ -135,13 +135,15 @@ static void radeon_benchmark_move(struct radeon_device *rdev, unsigned size,
487     sdomain, ddomain, "dma");
488     }
489    
490     - time = radeon_benchmark_do_move(rdev, size, saddr, daddr,
491     - RADEON_BENCHMARK_COPY_BLIT, n);
492     - if (time < 0)
493     - goto out_cleanup;
494     - if (time > 0)
495     - radeon_benchmark_log_results(n, size, time,
496     - sdomain, ddomain, "blit");
497     + if (rdev->asic->copy.blit) {
498     + time = radeon_benchmark_do_move(rdev, size, saddr, daddr,
499     + RADEON_BENCHMARK_COPY_BLIT, n);
500     + if (time < 0)
501     + goto out_cleanup;
502     + if (time > 0)
503     + radeon_benchmark_log_results(n, size, time,
504     + sdomain, ddomain, "blit");
505     + }
506    
507     out_cleanup:
508     if (sobj) {
509     diff --git a/drivers/gpu/drm/radeon/si.c b/drivers/gpu/drm/radeon/si.c
510     index ae8b482..dd00721 100644
511     --- a/drivers/gpu/drm/radeon/si.c
512     +++ b/drivers/gpu/drm/radeon/si.c
513     @@ -4232,6 +4232,7 @@ int si_resume(struct radeon_device *rdev)
514    
515     int si_suspend(struct radeon_device *rdev)
516     {
517     + radeon_vm_manager_fini(rdev);
518     si_cp_enable(rdev, false);
519     cayman_dma_stop(rdev);
520     si_irq_suspend(rdev);
521     diff --git a/drivers/i2c/busses/i2c-tegra.c b/drivers/i2c/busses/i2c-tegra.c
522     index 7b38877..9e622b7 100644
523     --- a/drivers/i2c/busses/i2c-tegra.c
524     +++ b/drivers/i2c/busses/i2c-tegra.c
525     @@ -392,7 +392,11 @@ static int tegra_i2c_init(struct tegra_i2c_dev *i2c_dev)
526     u32 val;
527     int err = 0;
528    
529     - tegra_i2c_clock_enable(i2c_dev);
530     + err = tegra_i2c_clock_enable(i2c_dev);
531     + if (err < 0) {
532     + dev_err(i2c_dev->dev, "Clock enable failed %d\n", err);
533     + return err;
534     + }
535    
536     tegra_periph_reset_assert(i2c_dev->div_clk);
537     udelay(2);
538     @@ -599,7 +603,12 @@ static int tegra_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg msgs[],
539     if (i2c_dev->is_suspended)
540     return -EBUSY;
541    
542     - tegra_i2c_clock_enable(i2c_dev);
543     + ret = tegra_i2c_clock_enable(i2c_dev);
544     + if (ret < 0) {
545     + dev_err(i2c_dev->dev, "Clock enable failed %d\n", ret);
546     + return ret;
547     + }
548     +
549     for (i = 0; i < num; i++) {
550     enum msg_end_type end_type = MSG_END_STOP;
551     if (i < (num - 1)) {
552     diff --git a/drivers/infiniband/ulp/ipoib/ipoib_cm.c b/drivers/infiniband/ulp/ipoib/ipoib_cm.c
553     index 67b0c1d..1ef880d 100644
554     --- a/drivers/infiniband/ulp/ipoib/ipoib_cm.c
555     +++ b/drivers/infiniband/ulp/ipoib/ipoib_cm.c
556     @@ -758,9 +758,13 @@ void ipoib_cm_send(struct net_device *dev, struct sk_buff *skb, struct ipoib_cm_
557     if (++priv->tx_outstanding == ipoib_sendq_size) {
558     ipoib_dbg(priv, "TX ring 0x%x full, stopping kernel net queue\n",
559     tx->qp->qp_num);
560     - if (ib_req_notify_cq(priv->send_cq, IB_CQ_NEXT_COMP))
561     - ipoib_warn(priv, "request notify on send CQ failed\n");
562     netif_stop_queue(dev);
563     + rc = ib_req_notify_cq(priv->send_cq,
564     + IB_CQ_NEXT_COMP | IB_CQ_REPORT_MISSED_EVENTS);
565     + if (rc < 0)
566     + ipoib_warn(priv, "request notify on send CQ failed\n");
567     + else if (rc)
568     + ipoib_send_comp_handler(priv->send_cq, dev);
569     }
570     }
571     }
572     diff --git a/drivers/md/dm-bufio.c b/drivers/md/dm-bufio.c
573     index 651ca79..64eff90 100644
574     --- a/drivers/md/dm-bufio.c
575     +++ b/drivers/md/dm-bufio.c
576     @@ -1026,6 +1026,8 @@ void dm_bufio_prefetch(struct dm_bufio_client *c,
577     {
578     struct blk_plug plug;
579    
580     + BUG_ON(dm_bufio_in_request());
581     +
582     blk_start_plug(&plug);
583     dm_bufio_lock(c);
584    
585     diff --git a/drivers/md/dm-thin.c b/drivers/md/dm-thin.c
586     index 7a66d73..2d3a2af 100644
587     --- a/drivers/md/dm-thin.c
588     +++ b/drivers/md/dm-thin.c
589     @@ -2479,7 +2479,7 @@ static struct target_type pool_target = {
590     .name = "thin-pool",
591     .features = DM_TARGET_SINGLETON | DM_TARGET_ALWAYS_WRITEABLE |
592     DM_TARGET_IMMUTABLE,
593     - .version = {1, 6, 1},
594     + .version = {1, 7, 0},
595     .module = THIS_MODULE,
596     .ctr = pool_ctr,
597     .dtr = pool_dtr,
598     @@ -2766,7 +2766,7 @@ static int thin_iterate_devices(struct dm_target *ti,
599    
600     static struct target_type thin_target = {
601     .name = "thin",
602     - .version = {1, 7, 1},
603     + .version = {1, 8, 0},
604     .module = THIS_MODULE,
605     .ctr = thin_ctr,
606     .dtr = thin_dtr,
607     diff --git a/drivers/md/dm-verity.c b/drivers/md/dm-verity.c
608     index 6ad5383..a746f1d 100644
609     --- a/drivers/md/dm-verity.c
610     +++ b/drivers/md/dm-verity.c
611     @@ -93,6 +93,13 @@ struct dm_verity_io {
612     */
613     };
614    
615     +struct dm_verity_prefetch_work {
616     + struct work_struct work;
617     + struct dm_verity *v;
618     + sector_t block;
619     + unsigned n_blocks;
620     +};
621     +
622     static struct shash_desc *io_hash_desc(struct dm_verity *v, struct dm_verity_io *io)
623     {
624     return (struct shash_desc *)(io + 1);
625     @@ -424,15 +431,18 @@ static void verity_end_io(struct bio *bio, int error)
626     * The root buffer is not prefetched, it is assumed that it will be cached
627     * all the time.
628     */
629     -static void verity_prefetch_io(struct dm_verity *v, struct dm_verity_io *io)
630     +static void verity_prefetch_io(struct work_struct *work)
631     {
632     + struct dm_verity_prefetch_work *pw =
633     + container_of(work, struct dm_verity_prefetch_work, work);
634     + struct dm_verity *v = pw->v;
635     int i;
636    
637     for (i = v->levels - 2; i >= 0; i--) {
638     sector_t hash_block_start;
639     sector_t hash_block_end;
640     - verity_hash_at_level(v, io->block, i, &hash_block_start, NULL);
641     - verity_hash_at_level(v, io->block + io->n_blocks - 1, i, &hash_block_end, NULL);
642     + verity_hash_at_level(v, pw->block, i, &hash_block_start, NULL);
643     + verity_hash_at_level(v, pw->block + pw->n_blocks - 1, i, &hash_block_end, NULL);
644     if (!i) {
645     unsigned cluster = ACCESS_ONCE(dm_verity_prefetch_cluster);
646    
647     @@ -452,6 +462,25 @@ no_prefetch_cluster:
648     dm_bufio_prefetch(v->bufio, hash_block_start,
649     hash_block_end - hash_block_start + 1);
650     }
651     +
652     + kfree(pw);
653     +}
654     +
655     +static void verity_submit_prefetch(struct dm_verity *v, struct dm_verity_io *io)
656     +{
657     + struct dm_verity_prefetch_work *pw;
658     +
659     + pw = kmalloc(sizeof(struct dm_verity_prefetch_work),
660     + GFP_NOIO | __GFP_NORETRY | __GFP_NOMEMALLOC | __GFP_NOWARN);
661     +
662     + if (!pw)
663     + return;
664     +
665     + INIT_WORK(&pw->work, verity_prefetch_io);
666     + pw->v = v;
667     + pw->block = io->block;
668     + pw->n_blocks = io->n_blocks;
669     + queue_work(v->verify_wq, &pw->work);
670     }
671    
672     /*
673     @@ -498,7 +527,7 @@ static int verity_map(struct dm_target *ti, struct bio *bio)
674     memcpy(io->io_vec, bio_iovec(bio),
675     io->io_vec_size * sizeof(struct bio_vec));
676    
677     - verity_prefetch_io(v, io);
678     + verity_submit_prefetch(v, io);
679    
680     generic_make_request(bio);
681    
682     @@ -858,7 +887,7 @@ bad:
683    
684     static struct target_type verity_target = {
685     .name = "verity",
686     - .version = {1, 1, 1},
687     + .version = {1, 2, 0},
688     .module = THIS_MODULE,
689     .ctr = verity_ctr,
690     .dtr = verity_dtr,
691     diff --git a/drivers/md/persistent-data/dm-btree-remove.c b/drivers/md/persistent-data/dm-btree-remove.c
692     index c4f2813..b88757c 100644
693     --- a/drivers/md/persistent-data/dm-btree-remove.c
694     +++ b/drivers/md/persistent-data/dm-btree-remove.c
695     @@ -139,15 +139,8 @@ struct child {
696     struct btree_node *n;
697     };
698    
699     -static struct dm_btree_value_type le64_type = {
700     - .context = NULL,
701     - .size = sizeof(__le64),
702     - .inc = NULL,
703     - .dec = NULL,
704     - .equal = NULL
705     -};
706     -
707     -static int init_child(struct dm_btree_info *info, struct btree_node *parent,
708     +static int init_child(struct dm_btree_info *info, struct dm_btree_value_type *vt,
709     + struct btree_node *parent,
710     unsigned index, struct child *result)
711     {
712     int r, inc;
713     @@ -164,7 +157,7 @@ static int init_child(struct dm_btree_info *info, struct btree_node *parent,
714     result->n = dm_block_data(result->block);
715    
716     if (inc)
717     - inc_children(info->tm, result->n, &le64_type);
718     + inc_children(info->tm, result->n, vt);
719    
720     *((__le64 *) value_ptr(parent, index)) =
721     cpu_to_le64(dm_block_location(result->block));
722     @@ -236,7 +229,7 @@ static void __rebalance2(struct dm_btree_info *info, struct btree_node *parent,
723     }
724    
725     static int rebalance2(struct shadow_spine *s, struct dm_btree_info *info,
726     - unsigned left_index)
727     + struct dm_btree_value_type *vt, unsigned left_index)
728     {
729     int r;
730     struct btree_node *parent;
731     @@ -244,11 +237,11 @@ static int rebalance2(struct shadow_spine *s, struct dm_btree_info *info,
732    
733     parent = dm_block_data(shadow_current(s));
734    
735     - r = init_child(info, parent, left_index, &left);
736     + r = init_child(info, vt, parent, left_index, &left);
737     if (r)
738     return r;
739    
740     - r = init_child(info, parent, left_index + 1, &right);
741     + r = init_child(info, vt, parent, left_index + 1, &right);
742     if (r) {
743     exit_child(info, &left);
744     return r;
745     @@ -368,7 +361,7 @@ static void __rebalance3(struct dm_btree_info *info, struct btree_node *parent,
746     }
747    
748     static int rebalance3(struct shadow_spine *s, struct dm_btree_info *info,
749     - unsigned left_index)
750     + struct dm_btree_value_type *vt, unsigned left_index)
751     {
752     int r;
753     struct btree_node *parent = dm_block_data(shadow_current(s));
754     @@ -377,17 +370,17 @@ static int rebalance3(struct shadow_spine *s, struct dm_btree_info *info,
755     /*
756     * FIXME: fill out an array?
757     */
758     - r = init_child(info, parent, left_index, &left);
759     + r = init_child(info, vt, parent, left_index, &left);
760     if (r)
761     return r;
762    
763     - r = init_child(info, parent, left_index + 1, &center);
764     + r = init_child(info, vt, parent, left_index + 1, &center);
765     if (r) {
766     exit_child(info, &left);
767     return r;
768     }
769    
770     - r = init_child(info, parent, left_index + 2, &right);
771     + r = init_child(info, vt, parent, left_index + 2, &right);
772     if (r) {
773     exit_child(info, &left);
774     exit_child(info, &center);
775     @@ -434,7 +427,8 @@ static int get_nr_entries(struct dm_transaction_manager *tm,
776     }
777    
778     static int rebalance_children(struct shadow_spine *s,
779     - struct dm_btree_info *info, uint64_t key)
780     + struct dm_btree_info *info,
781     + struct dm_btree_value_type *vt, uint64_t key)
782     {
783     int i, r, has_left_sibling, has_right_sibling;
784     uint32_t child_entries;
785     @@ -472,13 +466,13 @@ static int rebalance_children(struct shadow_spine *s,
786     has_right_sibling = i < (le32_to_cpu(n->header.nr_entries) - 1);
787    
788     if (!has_left_sibling)
789     - r = rebalance2(s, info, i);
790     + r = rebalance2(s, info, vt, i);
791    
792     else if (!has_right_sibling)
793     - r = rebalance2(s, info, i - 1);
794     + r = rebalance2(s, info, vt, i - 1);
795    
796     else
797     - r = rebalance3(s, info, i - 1);
798     + r = rebalance3(s, info, vt, i - 1);
799    
800     return r;
801     }
802     @@ -529,7 +523,7 @@ static int remove_raw(struct shadow_spine *s, struct dm_btree_info *info,
803     if (le32_to_cpu(n->header.flags) & LEAF_NODE)
804     return do_leaf(n, key, index);
805    
806     - r = rebalance_children(s, info, key);
807     + r = rebalance_children(s, info, vt, key);
808     if (r)
809     break;
810    
811     @@ -550,6 +544,14 @@ static int remove_raw(struct shadow_spine *s, struct dm_btree_info *info,
812     return r;
813     }
814    
815     +static struct dm_btree_value_type le64_type = {
816     + .context = NULL,
817     + .size = sizeof(__le64),
818     + .inc = NULL,
819     + .dec = NULL,
820     + .equal = NULL
821     +};
822     +
823     int dm_btree_remove(struct dm_btree_info *info, dm_block_t root,
824     uint64_t *keys, dm_block_t *new_root)
825     {
826     diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
827     index 19d77a0..94ce78e 100644
828     --- a/drivers/md/raid5.c
829     +++ b/drivers/md/raid5.c
830     @@ -674,9 +674,11 @@ static void ops_run_io(struct stripe_head *sh, struct stripe_head_state *s)
831     bi->bi_next = NULL;
832     if (rrdev)
833     set_bit(R5_DOUBLE_LOCKED, &sh->dev[i].flags);
834     - trace_block_bio_remap(bdev_get_queue(bi->bi_bdev),
835     - bi, disk_devt(conf->mddev->gendisk),
836     - sh->dev[i].sector);
837     +
838     + if (conf->mddev->gendisk)
839     + trace_block_bio_remap(bdev_get_queue(bi->bi_bdev),
840     + bi, disk_devt(conf->mddev->gendisk),
841     + sh->dev[i].sector);
842     generic_make_request(bi);
843     }
844     if (rrdev) {
845     @@ -704,9 +706,10 @@ static void ops_run_io(struct stripe_head *sh, struct stripe_head_state *s)
846     rbi->bi_io_vec[0].bv_offset = 0;
847     rbi->bi_size = STRIPE_SIZE;
848     rbi->bi_next = NULL;
849     - trace_block_bio_remap(bdev_get_queue(rbi->bi_bdev),
850     - rbi, disk_devt(conf->mddev->gendisk),
851     - sh->dev[i].sector);
852     + if (conf->mddev->gendisk)
853     + trace_block_bio_remap(bdev_get_queue(rbi->bi_bdev),
854     + rbi, disk_devt(conf->mddev->gendisk),
855     + sh->dev[i].sector);
856     generic_make_request(rbi);
857     }
858     if (!rdev && !rrdev) {
859     @@ -2319,17 +2322,6 @@ schedule_reconstruction(struct stripe_head *sh, struct stripe_head_state *s,
860     int level = conf->level;
861    
862     if (rcw) {
863     - /* if we are not expanding this is a proper write request, and
864     - * there will be bios with new data to be drained into the
865     - * stripe cache
866     - */
867     - if (!expand) {
868     - sh->reconstruct_state = reconstruct_state_drain_run;
869     - set_bit(STRIPE_OP_BIODRAIN, &s->ops_request);
870     - } else
871     - sh->reconstruct_state = reconstruct_state_run;
872     -
873     - set_bit(STRIPE_OP_RECONSTRUCT, &s->ops_request);
874    
875     for (i = disks; i--; ) {
876     struct r5dev *dev = &sh->dev[i];
877     @@ -2342,6 +2334,21 @@ schedule_reconstruction(struct stripe_head *sh, struct stripe_head_state *s,
878     s->locked++;
879     }
880     }
881     + /* if we are not expanding this is a proper write request, and
882     + * there will be bios with new data to be drained into the
883     + * stripe cache
884     + */
885     + if (!expand) {
886     + if (!s->locked)
887     + /* False alarm, nothing to do */
888     + return;
889     + sh->reconstruct_state = reconstruct_state_drain_run;
890     + set_bit(STRIPE_OP_BIODRAIN, &s->ops_request);
891     + } else
892     + sh->reconstruct_state = reconstruct_state_run;
893     +
894     + set_bit(STRIPE_OP_RECONSTRUCT, &s->ops_request);
895     +
896     if (s->locked + conf->max_degraded == disks)
897     if (!test_and_set_bit(STRIPE_FULL_WRITE, &sh->state))
898     atomic_inc(&conf->pending_full_writes);
899     @@ -2350,11 +2357,6 @@ schedule_reconstruction(struct stripe_head *sh, struct stripe_head_state *s,
900     BUG_ON(!(test_bit(R5_UPTODATE, &sh->dev[pd_idx].flags) ||
901     test_bit(R5_Wantcompute, &sh->dev[pd_idx].flags)));
902    
903     - sh->reconstruct_state = reconstruct_state_prexor_drain_run;
904     - set_bit(STRIPE_OP_PREXOR, &s->ops_request);
905     - set_bit(STRIPE_OP_BIODRAIN, &s->ops_request);
906     - set_bit(STRIPE_OP_RECONSTRUCT, &s->ops_request);
907     -
908     for (i = disks; i--; ) {
909     struct r5dev *dev = &sh->dev[i];
910     if (i == pd_idx)
911     @@ -2369,6 +2371,13 @@ schedule_reconstruction(struct stripe_head *sh, struct stripe_head_state *s,
912     s->locked++;
913     }
914     }
915     + if (!s->locked)
916     + /* False alarm - nothing to do */
917     + return;
918     + sh->reconstruct_state = reconstruct_state_prexor_drain_run;
919     + set_bit(STRIPE_OP_PREXOR, &s->ops_request);
920     + set_bit(STRIPE_OP_BIODRAIN, &s->ops_request);
921     + set_bit(STRIPE_OP_RECONSTRUCT, &s->ops_request);
922     }
923    
924     /* keep the parity disk(s) locked while asynchronous operations
925     @@ -2603,6 +2612,8 @@ handle_failed_sync(struct r5conf *conf, struct stripe_head *sh,
926     int i;
927    
928     clear_bit(STRIPE_SYNCING, &sh->state);
929     + if (test_and_clear_bit(R5_Overlap, &sh->dev[sh->pd_idx].flags))
930     + wake_up(&conf->wait_for_overlap);
931     s->syncing = 0;
932     s->replacing = 0;
933     /* There is nothing more to do for sync/check/repair.
934     @@ -2776,6 +2787,7 @@ static void handle_stripe_clean_event(struct r5conf *conf,
935     {
936     int i;
937     struct r5dev *dev;
938     + int discard_pending = 0;
939    
940     for (i = disks; i--; )
941     if (sh->dev[i].written) {
942     @@ -2804,9 +2816,23 @@ static void handle_stripe_clean_event(struct r5conf *conf,
943     STRIPE_SECTORS,
944     !test_bit(STRIPE_DEGRADED, &sh->state),
945     0);
946     - }
947     - } else if (test_bit(R5_Discard, &sh->dev[i].flags))
948     - clear_bit(R5_Discard, &sh->dev[i].flags);
949     + } else if (test_bit(R5_Discard, &dev->flags))
950     + discard_pending = 1;
951     + }
952     + if (!discard_pending &&
953     + test_bit(R5_Discard, &sh->dev[sh->pd_idx].flags)) {
954     + clear_bit(R5_Discard, &sh->dev[sh->pd_idx].flags);
955     + clear_bit(R5_UPTODATE, &sh->dev[sh->pd_idx].flags);
956     + if (sh->qd_idx >= 0) {
957     + clear_bit(R5_Discard, &sh->dev[sh->qd_idx].flags);
958     + clear_bit(R5_UPTODATE, &sh->dev[sh->qd_idx].flags);
959     + }
960     + /* now that discard is done we can proceed with any sync */
961     + clear_bit(STRIPE_DISCARD, &sh->state);
962     + if (test_bit(STRIPE_SYNC_REQUESTED, &sh->state))
963     + set_bit(STRIPE_HANDLE, &sh->state);
964     +
965     + }
966    
967     if (test_and_clear_bit(STRIPE_FULL_WRITE, &sh->state))
968     if (atomic_dec_and_test(&conf->pending_full_writes))
969     @@ -2865,8 +2891,10 @@ static void handle_stripe_dirtying(struct r5conf *conf,
970     set_bit(STRIPE_HANDLE, &sh->state);
971     if (rmw < rcw && rmw > 0) {
972     /* prefer read-modify-write, but need to get some data */
973     - blk_add_trace_msg(conf->mddev->queue, "raid5 rmw %llu %d",
974     - (unsigned long long)sh->sector, rmw);
975     + if (conf->mddev->queue)
976     + blk_add_trace_msg(conf->mddev->queue,
977     + "raid5 rmw %llu %d",
978     + (unsigned long long)sh->sector, rmw);
979     for (i = disks; i--; ) {
980     struct r5dev *dev = &sh->dev[i];
981     if ((dev->towrite || i == sh->pd_idx) &&
982     @@ -2916,7 +2944,7 @@ static void handle_stripe_dirtying(struct r5conf *conf,
983     }
984     }
985     }
986     - if (rcw)
987     + if (rcw && conf->mddev->queue)
988     blk_add_trace_msg(conf->mddev->queue, "raid5 rcw %llu %d %d %d",
989     (unsigned long long)sh->sector,
990     rcw, qread, test_bit(STRIPE_DELAYED, &sh->state));
991     @@ -3456,9 +3484,15 @@ static void handle_stripe(struct stripe_head *sh)
992     return;
993     }
994    
995     - if (test_and_clear_bit(STRIPE_SYNC_REQUESTED, &sh->state)) {
996     - set_bit(STRIPE_SYNCING, &sh->state);
997     - clear_bit(STRIPE_INSYNC, &sh->state);
998     + if (test_bit(STRIPE_SYNC_REQUESTED, &sh->state)) {
999     + spin_lock(&sh->stripe_lock);
1000     + /* Cannot process 'sync' concurrently with 'discard' */
1001     + if (!test_bit(STRIPE_DISCARD, &sh->state) &&
1002     + test_and_clear_bit(STRIPE_SYNC_REQUESTED, &sh->state)) {
1003     + set_bit(STRIPE_SYNCING, &sh->state);
1004     + clear_bit(STRIPE_INSYNC, &sh->state);
1005     + }
1006     + spin_unlock(&sh->stripe_lock);
1007     }
1008     clear_bit(STRIPE_DELAYED, &sh->state);
1009    
1010     @@ -3618,6 +3652,8 @@ static void handle_stripe(struct stripe_head *sh)
1011     test_bit(STRIPE_INSYNC, &sh->state)) {
1012     md_done_sync(conf->mddev, STRIPE_SECTORS, 1);
1013     clear_bit(STRIPE_SYNCING, &sh->state);
1014     + if (test_and_clear_bit(R5_Overlap, &sh->dev[sh->pd_idx].flags))
1015     + wake_up(&conf->wait_for_overlap);
1016     }
1017    
1018     /* If the failed drives are just a ReadError, then we might need
1019     @@ -4023,9 +4059,10 @@ static int chunk_aligned_read(struct mddev *mddev, struct bio * raid_bio)
1020     atomic_inc(&conf->active_aligned_reads);
1021     spin_unlock_irq(&conf->device_lock);
1022    
1023     - trace_block_bio_remap(bdev_get_queue(align_bi->bi_bdev),
1024     - align_bi, disk_devt(mddev->gendisk),
1025     - raid_bio->bi_sector);
1026     + if (mddev->gendisk)
1027     + trace_block_bio_remap(bdev_get_queue(align_bi->bi_bdev),
1028     + align_bi, disk_devt(mddev->gendisk),
1029     + raid_bio->bi_sector);
1030     generic_make_request(align_bi);
1031     return 1;
1032     } else {
1033     @@ -4119,7 +4156,8 @@ static void raid5_unplug(struct blk_plug_cb *blk_cb, bool from_schedule)
1034     }
1035     spin_unlock_irq(&conf->device_lock);
1036     }
1037     - trace_block_unplug(mddev->queue, cnt, !from_schedule);
1038     + if (mddev->queue)
1039     + trace_block_unplug(mddev->queue, cnt, !from_schedule);
1040     kfree(cb);
1041     }
1042    
1043     @@ -4182,6 +4220,13 @@ static void make_discard_request(struct mddev *mddev, struct bio *bi)
1044     sh = get_active_stripe(conf, logical_sector, 0, 0, 0);
1045     prepare_to_wait(&conf->wait_for_overlap, &w,
1046     TASK_UNINTERRUPTIBLE);
1047     + set_bit(R5_Overlap, &sh->dev[sh->pd_idx].flags);
1048     + if (test_bit(STRIPE_SYNCING, &sh->state)) {
1049     + release_stripe(sh);
1050     + schedule();
1051     + goto again;
1052     + }
1053     + clear_bit(R5_Overlap, &sh->dev[sh->pd_idx].flags);
1054     spin_lock_irq(&sh->stripe_lock);
1055     for (d = 0; d < conf->raid_disks; d++) {
1056     if (d == sh->pd_idx || d == sh->qd_idx)
1057     @@ -4194,6 +4239,7 @@ static void make_discard_request(struct mddev *mddev, struct bio *bi)
1058     goto again;
1059     }
1060     }
1061     + set_bit(STRIPE_DISCARD, &sh->state);
1062     finish_wait(&conf->wait_for_overlap, &w);
1063     for (d = 0; d < conf->raid_disks; d++) {
1064     if (d == sh->pd_idx || d == sh->qd_idx)
1065     diff --git a/drivers/md/raid5.h b/drivers/md/raid5.h
1066     index 18b2c4a..050a334 100644
1067     --- a/drivers/md/raid5.h
1068     +++ b/drivers/md/raid5.h
1069     @@ -323,6 +323,7 @@ enum {
1070     STRIPE_COMPUTE_RUN,
1071     STRIPE_OPS_REQ_PENDING,
1072     STRIPE_ON_UNPLUG_LIST,
1073     + STRIPE_DISCARD,
1074     };
1075    
1076     /*
1077     diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
1078     index a079da17..272f81a 100644
1079     --- a/drivers/net/bonding/bond_main.c
1080     +++ b/drivers/net/bonding/bond_main.c
1081     @@ -1728,6 +1728,8 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev)
1082    
1083     bond_compute_features(bond);
1084    
1085     + bond_update_speed_duplex(new_slave);
1086     +
1087     read_lock(&bond->lock);
1088    
1089     new_slave->last_arp_rx = jiffies -
1090     @@ -1780,8 +1782,6 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev)
1091     new_slave->link == BOND_LINK_DOWN ? "DOWN" :
1092     (new_slave->link == BOND_LINK_UP ? "UP" : "BACK"));
1093    
1094     - bond_update_speed_duplex(new_slave);
1095     -
1096     if (USES_PRIMARY(bond->params.mode) && bond->params.primary[0]) {
1097     /* if there is a primary slave, remember it */
1098     if (strcmp(bond->params.primary, new_slave->dev->name) == 0) {
1099     @@ -2463,8 +2463,6 @@ static void bond_miimon_commit(struct bonding *bond)
1100     bond_set_backup_slave(slave);
1101     }
1102    
1103     - bond_update_speed_duplex(slave);
1104     -
1105     pr_info("%s: link status definitely up for interface %s, %u Mbps %s duplex.\n",
1106     bond->dev->name, slave->dev->name,
1107     slave->speed, slave->duplex ? "full" : "half");
1108     diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
1109     index a5edac8..3a73bb9 100644
1110     --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
1111     +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
1112     @@ -2523,6 +2523,7 @@ load_error2:
1113     bp->port.pmf = 0;
1114     load_error1:
1115     bnx2x_napi_disable(bp);
1116     + bnx2x_del_all_napi(bp);
1117     /* clear pf_load status, as it was already set */
1118     bnx2x_clear_pf_load(bp);
1119     load_error0:
1120     diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.h
1121     index b4d7b26..937f5b5 100644
1122     --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.h
1123     +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.h
1124     @@ -456,8 +456,9 @@ struct bnx2x_fw_port_stats_old {
1125    
1126     #define UPDATE_QSTAT(s, t) \
1127     do { \
1128     - qstats->t##_hi = qstats_old->t##_hi + le32_to_cpu(s.hi); \
1129     qstats->t##_lo = qstats_old->t##_lo + le32_to_cpu(s.lo); \
1130     + qstats->t##_hi = qstats_old->t##_hi + le32_to_cpu(s.hi) \
1131     + + ((qstats->t##_lo < qstats_old->t##_lo) ? 1 : 0); \
1132     } while (0)
1133    
1134     #define UPDATE_QSTAT_OLD(f) \
1135     diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c
1136     index f0b38fa..3ec98f2 100644
1137     --- a/drivers/net/ethernet/broadcom/tg3.c
1138     +++ b/drivers/net/ethernet/broadcom/tg3.c
1139     @@ -4093,6 +4093,14 @@ static void tg3_phy_copper_begin(struct tg3 *tp)
1140     tp->link_config.active_speed = tp->link_config.speed;
1141     tp->link_config.active_duplex = tp->link_config.duplex;
1142    
1143     + if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
1144     + /* With autoneg disabled, 5715 only links up when the
1145     + * advertisement register has the configured speed
1146     + * enabled.
1147     + */
1148     + tg3_writephy(tp, MII_ADVERTISE, ADVERTISE_ALL);
1149     + }
1150     +
1151     bmcr = 0;
1152     switch (tp->link_config.speed) {
1153     default:
1154     diff --git a/drivers/net/ethernet/sfc/efx.c b/drivers/net/ethernet/sfc/efx.c
1155     index bf57b3c..0bc0099 100644
1156     --- a/drivers/net/ethernet/sfc/efx.c
1157     +++ b/drivers/net/ethernet/sfc/efx.c
1158     @@ -779,6 +779,7 @@ efx_realloc_channels(struct efx_nic *efx, u32 rxq_entries, u32 txq_entries)
1159     tx_queue->txd.entries);
1160     }
1161    
1162     + efx_device_detach_sync(efx);
1163     efx_stop_all(efx);
1164     efx_stop_interrupts(efx, true);
1165    
1166     @@ -832,6 +833,7 @@ out:
1167    
1168     efx_start_interrupts(efx, true);
1169     efx_start_all(efx);
1170     + netif_device_attach(efx->net_dev);
1171     return rc;
1172    
1173     rollback:
1174     @@ -1641,8 +1643,12 @@ static void efx_stop_all(struct efx_nic *efx)
1175     /* Flush efx_mac_work(), refill_workqueue, monitor_work */
1176     efx_flush_all(efx);
1177    
1178     - /* Stop the kernel transmit interface late, so the watchdog
1179     - * timer isn't ticking over the flush */
1180     + /* Stop the kernel transmit interface. This is only valid if
1181     + * the device is stopped or detached; otherwise the watchdog
1182     + * may fire immediately.
1183     + */
1184     + WARN_ON(netif_running(efx->net_dev) &&
1185     + netif_device_present(efx->net_dev));
1186     netif_tx_disable(efx->net_dev);
1187    
1188     efx_stop_datapath(efx);
1189     @@ -1963,16 +1969,18 @@ static int efx_change_mtu(struct net_device *net_dev, int new_mtu)
1190     if (new_mtu > EFX_MAX_MTU)
1191     return -EINVAL;
1192    
1193     - efx_stop_all(efx);
1194     -
1195     netif_dbg(efx, drv, efx->net_dev, "changing MTU to %d\n", new_mtu);
1196    
1197     + efx_device_detach_sync(efx);
1198     + efx_stop_all(efx);
1199     +
1200     mutex_lock(&efx->mac_lock);
1201     net_dev->mtu = new_mtu;
1202     efx->type->reconfigure_mac(efx);
1203     mutex_unlock(&efx->mac_lock);
1204    
1205     efx_start_all(efx);
1206     + netif_device_attach(efx->net_dev);
1207     return 0;
1208     }
1209    
1210     diff --git a/drivers/net/ethernet/sfc/efx.h b/drivers/net/ethernet/sfc/efx.h
1211     index 50247df..d2f790d 100644
1212     --- a/drivers/net/ethernet/sfc/efx.h
1213     +++ b/drivers/net/ethernet/sfc/efx.h
1214     @@ -171,9 +171,9 @@ static inline void efx_device_detach_sync(struct efx_nic *efx)
1215     * TX scheduler is stopped when we're done and before
1216     * netif_device_present() becomes false.
1217     */
1218     - netif_tx_lock(dev);
1219     + netif_tx_lock_bh(dev);
1220     netif_device_detach(dev);
1221     - netif_tx_unlock(dev);
1222     + netif_tx_unlock_bh(dev);
1223     }
1224    
1225     #endif /* EFX_EFX_H */
1226     diff --git a/drivers/net/ethernet/sfc/net_driver.h b/drivers/net/ethernet/sfc/net_driver.h
1227     index 2d756c1..0a90abd 100644
1228     --- a/drivers/net/ethernet/sfc/net_driver.h
1229     +++ b/drivers/net/ethernet/sfc/net_driver.h
1230     @@ -210,6 +210,7 @@ struct efx_tx_queue {
1231     * Will be %NULL if the buffer slot is currently free.
1232     * @page: The associated page buffer. Valif iff @flags & %EFX_RX_BUF_PAGE.
1233     * Will be %NULL if the buffer slot is currently free.
1234     + * @page_offset: Offset within page. Valid iff @flags & %EFX_RX_BUF_PAGE.
1235     * @len: Buffer length, in bytes.
1236     * @flags: Flags for buffer and packet state.
1237     */
1238     @@ -219,7 +220,8 @@ struct efx_rx_buffer {
1239     struct sk_buff *skb;
1240     struct page *page;
1241     } u;
1242     - unsigned int len;
1243     + u16 page_offset;
1244     + u16 len;
1245     u16 flags;
1246     };
1247     #define EFX_RX_BUF_PAGE 0x0001
1248     diff --git a/drivers/net/ethernet/sfc/nic.c b/drivers/net/ethernet/sfc/nic.c
1249     index 0ad790c..eaa8e87 100644
1250     --- a/drivers/net/ethernet/sfc/nic.c
1251     +++ b/drivers/net/ethernet/sfc/nic.c
1252     @@ -376,7 +376,8 @@ efx_may_push_tx_desc(struct efx_tx_queue *tx_queue, unsigned int write_count)
1253     return false;
1254    
1255     tx_queue->empty_read_count = 0;
1256     - return ((empty_read_count ^ write_count) & ~EFX_EMPTY_COUNT_VALID) == 0;
1257     + return ((empty_read_count ^ write_count) & ~EFX_EMPTY_COUNT_VALID) == 0
1258     + && tx_queue->write_count - write_count == 1;
1259     }
1260    
1261     /* For each entry inserted into the software descriptor ring, create a
1262     diff --git a/drivers/net/ethernet/sfc/ptp.c b/drivers/net/ethernet/sfc/ptp.c
1263     index 0767043f..3f93624 100644
1264     --- a/drivers/net/ethernet/sfc/ptp.c
1265     +++ b/drivers/net/ethernet/sfc/ptp.c
1266     @@ -1439,7 +1439,7 @@ static int efx_phc_settime(struct ptp_clock_info *ptp,
1267    
1268     delta = timespec_sub(*e_ts, time_now);
1269    
1270     - efx_phc_adjtime(ptp, timespec_to_ns(&delta));
1271     + rc = efx_phc_adjtime(ptp, timespec_to_ns(&delta));
1272     if (rc != 0)
1273     return rc;
1274    
1275     diff --git a/drivers/net/ethernet/sfc/rx.c b/drivers/net/ethernet/sfc/rx.c
1276     index d780a0d..bb579a6 100644
1277     --- a/drivers/net/ethernet/sfc/rx.c
1278     +++ b/drivers/net/ethernet/sfc/rx.c
1279     @@ -90,11 +90,7 @@ static unsigned int rx_refill_threshold;
1280     static inline unsigned int efx_rx_buf_offset(struct efx_nic *efx,
1281     struct efx_rx_buffer *buf)
1282     {
1283     - /* Offset is always within one page, so we don't need to consider
1284     - * the page order.
1285     - */
1286     - return ((unsigned int) buf->dma_addr & (PAGE_SIZE - 1)) +
1287     - efx->type->rx_buffer_hash_size;
1288     + return buf->page_offset + efx->type->rx_buffer_hash_size;
1289     }
1290     static inline unsigned int efx_rx_buf_size(struct efx_nic *efx)
1291     {
1292     @@ -187,6 +183,7 @@ static int efx_init_rx_buffers_page(struct efx_rx_queue *rx_queue)
1293     struct efx_nic *efx = rx_queue->efx;
1294     struct efx_rx_buffer *rx_buf;
1295     struct page *page;
1296     + unsigned int page_offset;
1297     struct efx_rx_page_state *state;
1298     dma_addr_t dma_addr;
1299     unsigned index, count;
1300     @@ -211,12 +208,14 @@ static int efx_init_rx_buffers_page(struct efx_rx_queue *rx_queue)
1301     state->dma_addr = dma_addr;
1302    
1303     dma_addr += sizeof(struct efx_rx_page_state);
1304     + page_offset = sizeof(struct efx_rx_page_state);
1305    
1306     split:
1307     index = rx_queue->added_count & rx_queue->ptr_mask;
1308     rx_buf = efx_rx_buffer(rx_queue, index);
1309     rx_buf->dma_addr = dma_addr + EFX_PAGE_IP_ALIGN;
1310     rx_buf->u.page = page;
1311     + rx_buf->page_offset = page_offset + EFX_PAGE_IP_ALIGN;
1312     rx_buf->len = efx->rx_buffer_len - EFX_PAGE_IP_ALIGN;
1313     rx_buf->flags = EFX_RX_BUF_PAGE;
1314     ++rx_queue->added_count;
1315     @@ -227,6 +226,7 @@ static int efx_init_rx_buffers_page(struct efx_rx_queue *rx_queue)
1316     /* Use the second half of the page */
1317     get_page(page);
1318     dma_addr += (PAGE_SIZE >> 1);
1319     + page_offset += (PAGE_SIZE >> 1);
1320     ++count;
1321     goto split;
1322     }
1323     @@ -236,7 +236,8 @@ static int efx_init_rx_buffers_page(struct efx_rx_queue *rx_queue)
1324     }
1325    
1326     static void efx_unmap_rx_buffer(struct efx_nic *efx,
1327     - struct efx_rx_buffer *rx_buf)
1328     + struct efx_rx_buffer *rx_buf,
1329     + unsigned int used_len)
1330     {
1331     if ((rx_buf->flags & EFX_RX_BUF_PAGE) && rx_buf->u.page) {
1332     struct efx_rx_page_state *state;
1333     @@ -247,6 +248,10 @@ static void efx_unmap_rx_buffer(struct efx_nic *efx,
1334     state->dma_addr,
1335     efx_rx_buf_size(efx),
1336     DMA_FROM_DEVICE);
1337     + } else if (used_len) {
1338     + dma_sync_single_for_cpu(&efx->pci_dev->dev,
1339     + rx_buf->dma_addr, used_len,
1340     + DMA_FROM_DEVICE);
1341     }
1342     } else if (!(rx_buf->flags & EFX_RX_BUF_PAGE) && rx_buf->u.skb) {
1343     dma_unmap_single(&efx->pci_dev->dev, rx_buf->dma_addr,
1344     @@ -269,7 +274,7 @@ static void efx_free_rx_buffer(struct efx_nic *efx,
1345     static void efx_fini_rx_buffer(struct efx_rx_queue *rx_queue,
1346     struct efx_rx_buffer *rx_buf)
1347     {
1348     - efx_unmap_rx_buffer(rx_queue->efx, rx_buf);
1349     + efx_unmap_rx_buffer(rx_queue->efx, rx_buf, 0);
1350     efx_free_rx_buffer(rx_queue->efx, rx_buf);
1351     }
1352    
1353     @@ -535,10 +540,10 @@ void efx_rx_packet(struct efx_rx_queue *rx_queue, unsigned int index,
1354     goto out;
1355     }
1356    
1357     - /* Release card resources - assumes all RX buffers consumed in-order
1358     - * per RX queue
1359     + /* Release and/or sync DMA mapping - assumes all RX buffers
1360     + * consumed in-order per RX queue
1361     */
1362     - efx_unmap_rx_buffer(efx, rx_buf);
1363     + efx_unmap_rx_buffer(efx, rx_buf, len);
1364    
1365     /* Prefetch nice and early so data will (hopefully) be in cache by
1366     * the time we look at it.
1367     diff --git a/drivers/net/netconsole.c b/drivers/net/netconsole.c
1368     index 6989ebe..21a942c 100644
1369     --- a/drivers/net/netconsole.c
1370     +++ b/drivers/net/netconsole.c
1371     @@ -630,6 +630,7 @@ static int netconsole_netdev_event(struct notifier_block *this,
1372     goto done;
1373    
1374     spin_lock_irqsave(&target_list_lock, flags);
1375     +restart:
1376     list_for_each_entry(nt, &target_list, list) {
1377     netconsole_target_get(nt);
1378     if (nt->np.dev == dev) {
1379     @@ -642,15 +643,17 @@ static int netconsole_netdev_event(struct notifier_block *this,
1380     case NETDEV_UNREGISTER:
1381     /*
1382     * rtnl_lock already held
1383     + * we might sleep in __netpoll_cleanup()
1384     */
1385     - if (nt->np.dev) {
1386     - __netpoll_cleanup(&nt->np);
1387     - dev_put(nt->np.dev);
1388     - nt->np.dev = NULL;
1389     - }
1390     + spin_unlock_irqrestore(&target_list_lock, flags);
1391     + __netpoll_cleanup(&nt->np);
1392     + spin_lock_irqsave(&target_list_lock, flags);
1393     + dev_put(nt->np.dev);
1394     + nt->np.dev = NULL;
1395     nt->enabled = 0;
1396     stopped = true;
1397     - break;
1398     + netconsole_target_put(nt);
1399     + goto restart;
1400     }
1401     }
1402     netconsole_target_put(nt);
1403     diff --git a/drivers/net/usb/cdc_mbim.c b/drivers/net/usb/cdc_mbim.c
1404     index 248d2dc..16c8429 100644
1405     --- a/drivers/net/usb/cdc_mbim.c
1406     +++ b/drivers/net/usb/cdc_mbim.c
1407     @@ -68,18 +68,9 @@ static int cdc_mbim_bind(struct usbnet *dev, struct usb_interface *intf)
1408     struct cdc_ncm_ctx *ctx;
1409     struct usb_driver *subdriver = ERR_PTR(-ENODEV);
1410     int ret = -ENODEV;
1411     - u8 data_altsetting = CDC_NCM_DATA_ALTSETTING_NCM;
1412     + u8 data_altsetting = cdc_ncm_select_altsetting(dev, intf);
1413     struct cdc_mbim_state *info = (void *)&dev->data;
1414    
1415     - /* see if interface supports MBIM alternate setting */
1416     - if (intf->num_altsetting == 2) {
1417     - if (!cdc_ncm_comm_intf_is_mbim(intf->cur_altsetting))
1418     - usb_set_interface(dev->udev,
1419     - intf->cur_altsetting->desc.bInterfaceNumber,
1420     - CDC_NCM_COMM_ALTSETTING_MBIM);
1421     - data_altsetting = CDC_NCM_DATA_ALTSETTING_MBIM;
1422     - }
1423     -
1424     /* Probably NCM, defer for cdc_ncm_bind */
1425     if (!cdc_ncm_comm_intf_is_mbim(intf->cur_altsetting))
1426     goto err;
1427     diff --git a/drivers/net/usb/cdc_ncm.c b/drivers/net/usb/cdc_ncm.c
1428     index 6d25439..70fb846 100644
1429     --- a/drivers/net/usb/cdc_ncm.c
1430     +++ b/drivers/net/usb/cdc_ncm.c
1431     @@ -55,6 +55,14 @@
1432    
1433     #define DRIVER_VERSION "14-Mar-2012"
1434    
1435     +#if IS_ENABLED(CONFIG_USB_NET_CDC_MBIM)
1436     +static bool prefer_mbim = true;
1437     +#else
1438     +static bool prefer_mbim;
1439     +#endif
1440     +module_param(prefer_mbim, bool, S_IRUGO | S_IWUSR);
1441     +MODULE_PARM_DESC(prefer_mbim, "Prefer MBIM setting on dual NCM/MBIM functions");
1442     +
1443     static void cdc_ncm_txpath_bh(unsigned long param);
1444     static void cdc_ncm_tx_timeout_start(struct cdc_ncm_ctx *ctx);
1445     static enum hrtimer_restart cdc_ncm_tx_timer_cb(struct hrtimer *hr_timer);
1446     @@ -550,9 +558,12 @@ void cdc_ncm_unbind(struct usbnet *dev, struct usb_interface *intf)
1447     }
1448     EXPORT_SYMBOL_GPL(cdc_ncm_unbind);
1449    
1450     -static int cdc_ncm_bind(struct usbnet *dev, struct usb_interface *intf)
1451     +/* Select the MBIM altsetting iff it is preferred and available,
1452     + * returning the number of the corresponding data interface altsetting
1453     + */
1454     +u8 cdc_ncm_select_altsetting(struct usbnet *dev, struct usb_interface *intf)
1455     {
1456     - int ret;
1457     + struct usb_host_interface *alt;
1458    
1459     /* The MBIM spec defines a NCM compatible default altsetting,
1460     * which we may have matched:
1461     @@ -568,23 +579,27 @@ static int cdc_ncm_bind(struct usbnet *dev, struct usb_interface *intf)
1462     * endpoint descriptors, shall be constructed according to
1463     * the rules given in section 6 (USB Device Model) of this
1464     * specification."
1465     - *
1466     - * Do not bind to such interfaces, allowing cdc_mbim to handle
1467     - * them
1468     */
1469     -#if IS_ENABLED(CONFIG_USB_NET_CDC_MBIM)
1470     - if ((intf->num_altsetting == 2) &&
1471     - !usb_set_interface(dev->udev,
1472     - intf->cur_altsetting->desc.bInterfaceNumber,
1473     - CDC_NCM_COMM_ALTSETTING_MBIM)) {
1474     - if (cdc_ncm_comm_intf_is_mbim(intf->cur_altsetting))
1475     - return -ENODEV;
1476     - else
1477     - usb_set_interface(dev->udev,
1478     - intf->cur_altsetting->desc.bInterfaceNumber,
1479     - CDC_NCM_COMM_ALTSETTING_NCM);
1480     + if (prefer_mbim && intf->num_altsetting == 2) {
1481     + alt = usb_altnum_to_altsetting(intf, CDC_NCM_COMM_ALTSETTING_MBIM);
1482     + if (alt && cdc_ncm_comm_intf_is_mbim(alt) &&
1483     + !usb_set_interface(dev->udev,
1484     + intf->cur_altsetting->desc.bInterfaceNumber,
1485     + CDC_NCM_COMM_ALTSETTING_MBIM))
1486     + return CDC_NCM_DATA_ALTSETTING_MBIM;
1487     }
1488     -#endif
1489     + return CDC_NCM_DATA_ALTSETTING_NCM;
1490     +}
1491     +EXPORT_SYMBOL_GPL(cdc_ncm_select_altsetting);
1492     +
1493     +static int cdc_ncm_bind(struct usbnet *dev, struct usb_interface *intf)
1494     +{
1495     + int ret;
1496     +
1497     + /* MBIM backwards compatible function? */
1498     + cdc_ncm_select_altsetting(dev, intf);
1499     + if (cdc_ncm_comm_intf_is_mbim(intf->cur_altsetting))
1500     + return -ENODEV;
1501    
1502     /* NCM data altsetting is always 1 */
1503     ret = cdc_ncm_bind_common(dev, intf, 1);
1504     diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c
1505     index 19d9035..b69ca0f 100644
1506     --- a/drivers/net/usb/qmi_wwan.c
1507     +++ b/drivers/net/usb/qmi_wwan.c
1508     @@ -139,16 +139,9 @@ static int qmi_wwan_bind(struct usbnet *dev, struct usb_interface *intf)
1509    
1510     BUILD_BUG_ON((sizeof(((struct usbnet *)0)->data) < sizeof(struct qmi_wwan_state)));
1511    
1512     - /* control and data is shared? */
1513     - if (intf->cur_altsetting->desc.bNumEndpoints == 3) {
1514     - info->control = intf;
1515     - info->data = intf;
1516     - goto shared;
1517     - }
1518     -
1519     - /* else require a single interrupt status endpoint on control intf */
1520     - if (intf->cur_altsetting->desc.bNumEndpoints != 1)
1521     - goto err;
1522     + /* set up initial state */
1523     + info->control = intf;
1524     + info->data = intf;
1525    
1526     /* and a number of CDC descriptors */
1527     while (len > 3) {
1528     @@ -207,25 +200,14 @@ next_desc:
1529     buf += h->bLength;
1530     }
1531    
1532     - /* did we find all the required ones? */
1533     - if (!(found & (1 << USB_CDC_HEADER_TYPE)) ||
1534     - !(found & (1 << USB_CDC_UNION_TYPE))) {
1535     - dev_err(&intf->dev, "CDC functional descriptors missing\n");
1536     - goto err;
1537     - }
1538     -
1539     - /* verify CDC Union */
1540     - if (desc->bInterfaceNumber != cdc_union->bMasterInterface0) {
1541     - dev_err(&intf->dev, "bogus CDC Union: master=%u\n", cdc_union->bMasterInterface0);
1542     - goto err;
1543     - }
1544     -
1545     - /* need to save these for unbind */
1546     - info->control = intf;
1547     - info->data = usb_ifnum_to_if(dev->udev, cdc_union->bSlaveInterface0);
1548     - if (!info->data) {
1549     - dev_err(&intf->dev, "bogus CDC Union: slave=%u\n", cdc_union->bSlaveInterface0);
1550     - goto err;
1551     + /* Use separate control and data interfaces if we found a CDC Union */
1552     + if (cdc_union) {
1553     + info->data = usb_ifnum_to_if(dev->udev, cdc_union->bSlaveInterface0);
1554     + if (desc->bInterfaceNumber != cdc_union->bMasterInterface0 || !info->data) {
1555     + dev_err(&intf->dev, "bogus CDC Union: master=%u, slave=%u\n",
1556     + cdc_union->bMasterInterface0, cdc_union->bSlaveInterface0);
1557     + goto err;
1558     + }
1559     }
1560    
1561     /* errors aren't fatal - we can live with the dynamic address */
1562     @@ -235,11 +217,12 @@ next_desc:
1563     }
1564    
1565     /* claim data interface and set it up */
1566     - status = usb_driver_claim_interface(driver, info->data, dev);
1567     - if (status < 0)
1568     - goto err;
1569     + if (info->control != info->data) {
1570     + status = usb_driver_claim_interface(driver, info->data, dev);
1571     + if (status < 0)
1572     + goto err;
1573     + }
1574    
1575     -shared:
1576     status = qmi_wwan_register_subdriver(dev);
1577     if (status < 0 && info->control != info->data) {
1578     usb_set_intfdata(info->data, NULL);
1579     diff --git a/drivers/net/wireless/mwifiex/join.c b/drivers/net/wireless/mwifiex/join.c
1580     index 88664ae..3473876 100644
1581     --- a/drivers/net/wireless/mwifiex/join.c
1582     +++ b/drivers/net/wireless/mwifiex/join.c
1583     @@ -1092,10 +1092,9 @@ mwifiex_cmd_802_11_ad_hoc_join(struct mwifiex_private *priv,
1584     adhoc_join->bss_descriptor.bssid,
1585     adhoc_join->bss_descriptor.ssid);
1586    
1587     - for (i = 0; bss_desc->supported_rates[i] &&
1588     - i < MWIFIEX_SUPPORTED_RATES;
1589     - i++)
1590     - ;
1591     + for (i = 0; i < MWIFIEX_SUPPORTED_RATES &&
1592     + bss_desc->supported_rates[i]; i++)
1593     + ;
1594     rates_size = i;
1595    
1596     /* Copy Data Rates from the Rates recorded in scan response */
1597     diff --git a/drivers/net/wireless/rt2x00/rt2x00dev.c b/drivers/net/wireless/rt2x00/rt2x00dev.c
1598     index 44f8b3f..cdbfc30 100644
1599     --- a/drivers/net/wireless/rt2x00/rt2x00dev.c
1600     +++ b/drivers/net/wireless/rt2x00/rt2x00dev.c
1601     @@ -1209,7 +1209,9 @@ int rt2x00lib_probe_dev(struct rt2x00_dev *rt2x00dev)
1602     rt2x00dev->hw->wiphy->interface_modes |=
1603     BIT(NL80211_IFTYPE_ADHOC) |
1604     BIT(NL80211_IFTYPE_AP) |
1605     +#ifdef CONFIG_MAC80211_MESH
1606     BIT(NL80211_IFTYPE_MESH_POINT) |
1607     +#endif
1608     BIT(NL80211_IFTYPE_WDS);
1609    
1610     rt2x00dev->hw->wiphy->flags |= WIPHY_FLAG_IBSS_RSN;
1611     diff --git a/drivers/net/wireless/rtlwifi/rtl8192cu/hw.c b/drivers/net/wireless/rtlwifi/rtl8192cu/hw.c
1612     index b1ccff4..c08d0f4 100644
1613     --- a/drivers/net/wireless/rtlwifi/rtl8192cu/hw.c
1614     +++ b/drivers/net/wireless/rtlwifi/rtl8192cu/hw.c
1615     @@ -1377,74 +1377,57 @@ void rtl92cu_card_disable(struct ieee80211_hw *hw)
1616    
1617     void rtl92cu_set_check_bssid(struct ieee80211_hw *hw, bool check_bssid)
1618     {
1619     - /* dummy routine needed for callback from rtl_op_configure_filter() */
1620     -}
1621     -
1622     -/*========================================================================== */
1623     -
1624     -static void _rtl92cu_set_check_bssid(struct ieee80211_hw *hw,
1625     - enum nl80211_iftype type)
1626     -{
1627     struct rtl_priv *rtlpriv = rtl_priv(hw);
1628     - u32 reg_rcr = rtl_read_dword(rtlpriv, REG_RCR);
1629     struct rtl_hal *rtlhal = rtl_hal(rtlpriv);
1630     - struct rtl_phy *rtlphy = &(rtlpriv->phy);
1631     - u8 filterout_non_associated_bssid = false;
1632     + u32 reg_rcr = rtl_read_dword(rtlpriv, REG_RCR);
1633    
1634     - switch (type) {
1635     - case NL80211_IFTYPE_ADHOC:
1636     - case NL80211_IFTYPE_STATION:
1637     - filterout_non_associated_bssid = true;
1638     - break;
1639     - case NL80211_IFTYPE_UNSPECIFIED:
1640     - case NL80211_IFTYPE_AP:
1641     - default:
1642     - break;
1643     - }
1644     - if (filterout_non_associated_bssid) {
1645     + if (rtlpriv->psc.rfpwr_state != ERFON)
1646     + return;
1647     +
1648     + if (check_bssid) {
1649     + u8 tmp;
1650     if (IS_NORMAL_CHIP(rtlhal->version)) {
1651     - switch (rtlphy->current_io_type) {
1652     - case IO_CMD_RESUME_DM_BY_SCAN:
1653     - reg_rcr |= (RCR_CBSSID_DATA | RCR_CBSSID_BCN);
1654     - rtlpriv->cfg->ops->set_hw_reg(hw,
1655     - HW_VAR_RCR, (u8 *)(&reg_rcr));
1656     - /* enable update TSF */
1657     - _rtl92cu_set_bcn_ctrl_reg(hw, 0, BIT(4));
1658     - break;
1659     - case IO_CMD_PAUSE_DM_BY_SCAN:
1660     - reg_rcr &= ~(RCR_CBSSID_DATA | RCR_CBSSID_BCN);
1661     - rtlpriv->cfg->ops->set_hw_reg(hw,
1662     - HW_VAR_RCR, (u8 *)(&reg_rcr));
1663     - /* disable update TSF */
1664     - _rtl92cu_set_bcn_ctrl_reg(hw, BIT(4), 0);
1665     - break;
1666     - }
1667     + reg_rcr |= (RCR_CBSSID_DATA | RCR_CBSSID_BCN);
1668     + tmp = BIT(4);
1669     } else {
1670     - reg_rcr |= (RCR_CBSSID);
1671     - rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_RCR,
1672     - (u8 *)(&reg_rcr));
1673     - _rtl92cu_set_bcn_ctrl_reg(hw, 0, (BIT(4)|BIT(5)));
1674     + reg_rcr |= RCR_CBSSID;
1675     + tmp = BIT(4) | BIT(5);
1676     }
1677     - } else if (filterout_non_associated_bssid == false) {
1678     + rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_RCR,
1679     + (u8 *) (&reg_rcr));
1680     + _rtl92cu_set_bcn_ctrl_reg(hw, 0, tmp);
1681     + } else {
1682     + u8 tmp;
1683     if (IS_NORMAL_CHIP(rtlhal->version)) {
1684     - reg_rcr &= (~(RCR_CBSSID_DATA | RCR_CBSSID_BCN));
1685     - rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_RCR,
1686     - (u8 *)(&reg_rcr));
1687     - _rtl92cu_set_bcn_ctrl_reg(hw, BIT(4), 0);
1688     + reg_rcr &= ~(RCR_CBSSID_DATA | RCR_CBSSID_BCN);
1689     + tmp = BIT(4);
1690     } else {
1691     - reg_rcr &= (~RCR_CBSSID);
1692     - rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_RCR,
1693     - (u8 *)(&reg_rcr));
1694     - _rtl92cu_set_bcn_ctrl_reg(hw, (BIT(4)|BIT(5)), 0);
1695     + reg_rcr &= ~RCR_CBSSID;
1696     + tmp = BIT(4) | BIT(5);
1697     }
1698     + reg_rcr &= (~(RCR_CBSSID_DATA | RCR_CBSSID_BCN));
1699     + rtlpriv->cfg->ops->set_hw_reg(hw,
1700     + HW_VAR_RCR, (u8 *) (&reg_rcr));
1701     + _rtl92cu_set_bcn_ctrl_reg(hw, tmp, 0);
1702     }
1703     }
1704    
1705     +/*========================================================================== */
1706     +
1707     int rtl92cu_set_network_type(struct ieee80211_hw *hw, enum nl80211_iftype type)
1708     {
1709     + struct rtl_priv *rtlpriv = rtl_priv(hw);
1710     +
1711     if (_rtl92cu_set_media_status(hw, type))
1712     return -EOPNOTSUPP;
1713     - _rtl92cu_set_check_bssid(hw, type);
1714     +
1715     + if (rtlpriv->mac80211.link_state == MAC80211_LINKED) {
1716     + if (type != NL80211_IFTYPE_AP)
1717     + rtl92cu_set_check_bssid(hw, true);
1718     + } else {
1719     + rtl92cu_set_check_bssid(hw, false);
1720     + }
1721     +
1722     return 0;
1723     }
1724    
1725     @@ -2058,8 +2041,6 @@ void rtl92cu_update_hal_rate_table(struct ieee80211_hw *hw,
1726     (shortgi_rate << 4) | (shortgi_rate);
1727     }
1728     rtl_write_dword(rtlpriv, REG_ARFR0 + ratr_index * 4, ratr_value);
1729     - RT_TRACE(rtlpriv, COMP_RATR, DBG_DMESG, "%x\n",
1730     - rtl_read_dword(rtlpriv, REG_ARFR0));
1731     }
1732    
1733     void rtl92cu_update_hal_rate_mask(struct ieee80211_hw *hw, u8 rssi_level)
1734     diff --git a/drivers/rtc/rtc-at91rm9200.c b/drivers/rtc/rtc-at91rm9200.c
1735     index b6469e2..d38b267 100644
1736     --- a/drivers/rtc/rtc-at91rm9200.c
1737     +++ b/drivers/rtc/rtc-at91rm9200.c
1738     @@ -44,6 +44,7 @@ static DECLARE_COMPLETION(at91_rtc_updated);
1739     static unsigned int at91_alarm_year = AT91_RTC_EPOCH;
1740     static void __iomem *at91_rtc_regs;
1741     static int irq;
1742     +static u32 at91_rtc_imr;
1743    
1744     /*
1745     * Decode time/date into rtc_time structure
1746     @@ -108,9 +109,11 @@ static int at91_rtc_settime(struct device *dev, struct rtc_time *tm)
1747     cr = at91_rtc_read(AT91_RTC_CR);
1748     at91_rtc_write(AT91_RTC_CR, cr | AT91_RTC_UPDCAL | AT91_RTC_UPDTIM);
1749    
1750     + at91_rtc_imr |= AT91_RTC_ACKUPD;
1751     at91_rtc_write(AT91_RTC_IER, AT91_RTC_ACKUPD);
1752     wait_for_completion(&at91_rtc_updated); /* wait for ACKUPD interrupt */
1753     at91_rtc_write(AT91_RTC_IDR, AT91_RTC_ACKUPD);
1754     + at91_rtc_imr &= ~AT91_RTC_ACKUPD;
1755    
1756     at91_rtc_write(AT91_RTC_TIMR,
1757     bin2bcd(tm->tm_sec) << 0
1758     @@ -142,7 +145,7 @@ static int at91_rtc_readalarm(struct device *dev, struct rtc_wkalrm *alrm)
1759     tm->tm_yday = rtc_year_days(tm->tm_mday, tm->tm_mon, tm->tm_year);
1760     tm->tm_year = at91_alarm_year - 1900;
1761    
1762     - alrm->enabled = (at91_rtc_read(AT91_RTC_IMR) & AT91_RTC_ALARM)
1763     + alrm->enabled = (at91_rtc_imr & AT91_RTC_ALARM)
1764     ? 1 : 0;
1765    
1766     pr_debug("%s(): %4d-%02d-%02d %02d:%02d:%02d\n", __func__,
1767     @@ -168,6 +171,7 @@ static int at91_rtc_setalarm(struct device *dev, struct rtc_wkalrm *alrm)
1768     tm.tm_sec = alrm->time.tm_sec;
1769    
1770     at91_rtc_write(AT91_RTC_IDR, AT91_RTC_ALARM);
1771     + at91_rtc_imr &= ~AT91_RTC_ALARM;
1772     at91_rtc_write(AT91_RTC_TIMALR,
1773     bin2bcd(tm.tm_sec) << 0
1774     | bin2bcd(tm.tm_min) << 8
1775     @@ -180,6 +184,7 @@ static int at91_rtc_setalarm(struct device *dev, struct rtc_wkalrm *alrm)
1776    
1777     if (alrm->enabled) {
1778     at91_rtc_write(AT91_RTC_SCCR, AT91_RTC_ALARM);
1779     + at91_rtc_imr |= AT91_RTC_ALARM;
1780     at91_rtc_write(AT91_RTC_IER, AT91_RTC_ALARM);
1781     }
1782    
1783     @@ -196,9 +201,12 @@ static int at91_rtc_alarm_irq_enable(struct device *dev, unsigned int enabled)
1784    
1785     if (enabled) {
1786     at91_rtc_write(AT91_RTC_SCCR, AT91_RTC_ALARM);
1787     + at91_rtc_imr |= AT91_RTC_ALARM;
1788     at91_rtc_write(AT91_RTC_IER, AT91_RTC_ALARM);
1789     - } else
1790     + } else {
1791     at91_rtc_write(AT91_RTC_IDR, AT91_RTC_ALARM);
1792     + at91_rtc_imr &= ~AT91_RTC_ALARM;
1793     + }
1794    
1795     return 0;
1796     }
1797     @@ -207,12 +215,10 @@ static int at91_rtc_alarm_irq_enable(struct device *dev, unsigned int enabled)
1798     */
1799     static int at91_rtc_proc(struct device *dev, struct seq_file *seq)
1800     {
1801     - unsigned long imr = at91_rtc_read(AT91_RTC_IMR);
1802     -
1803     seq_printf(seq, "update_IRQ\t: %s\n",
1804     - (imr & AT91_RTC_ACKUPD) ? "yes" : "no");
1805     + (at91_rtc_imr & AT91_RTC_ACKUPD) ? "yes" : "no");
1806     seq_printf(seq, "periodic_IRQ\t: %s\n",
1807     - (imr & AT91_RTC_SECEV) ? "yes" : "no");
1808     + (at91_rtc_imr & AT91_RTC_SECEV) ? "yes" : "no");
1809    
1810     return 0;
1811     }
1812     @@ -227,7 +233,7 @@ static irqreturn_t at91_rtc_interrupt(int irq, void *dev_id)
1813     unsigned int rtsr;
1814     unsigned long events = 0;
1815    
1816     - rtsr = at91_rtc_read(AT91_RTC_SR) & at91_rtc_read(AT91_RTC_IMR);
1817     + rtsr = at91_rtc_read(AT91_RTC_SR) & at91_rtc_imr;
1818     if (rtsr) { /* this interrupt is shared! Is it ours? */
1819     if (rtsr & AT91_RTC_ALARM)
1820     events |= (RTC_AF | RTC_IRQF);
1821     @@ -291,6 +297,7 @@ static int __init at91_rtc_probe(struct platform_device *pdev)
1822     at91_rtc_write(AT91_RTC_IDR, AT91_RTC_ACKUPD | AT91_RTC_ALARM |
1823     AT91_RTC_SECEV | AT91_RTC_TIMEV |
1824     AT91_RTC_CALEV);
1825     + at91_rtc_imr = 0;
1826    
1827     ret = request_irq(irq, at91_rtc_interrupt,
1828     IRQF_SHARED,
1829     @@ -330,6 +337,7 @@ static int __exit at91_rtc_remove(struct platform_device *pdev)
1830     at91_rtc_write(AT91_RTC_IDR, AT91_RTC_ACKUPD | AT91_RTC_ALARM |
1831     AT91_RTC_SECEV | AT91_RTC_TIMEV |
1832     AT91_RTC_CALEV);
1833     + at91_rtc_imr = 0;
1834     free_irq(irq, pdev);
1835    
1836     rtc_device_unregister(rtc);
1837     @@ -342,31 +350,35 @@ static int __exit at91_rtc_remove(struct platform_device *pdev)
1838    
1839     /* AT91RM9200 RTC Power management control */
1840    
1841     -static u32 at91_rtc_imr;
1842     +static u32 at91_rtc_bkpimr;
1843     +
1844    
1845     static int at91_rtc_suspend(struct device *dev)
1846     {
1847     /* this IRQ is shared with DBGU and other hardware which isn't
1848     * necessarily doing PM like we are...
1849     */
1850     - at91_rtc_imr = at91_rtc_read(AT91_RTC_IMR)
1851     - & (AT91_RTC_ALARM|AT91_RTC_SECEV);
1852     - if (at91_rtc_imr) {
1853     - if (device_may_wakeup(dev))
1854     + at91_rtc_bkpimr = at91_rtc_imr & (AT91_RTC_ALARM|AT91_RTC_SECEV);
1855     + if (at91_rtc_bkpimr) {
1856     + if (device_may_wakeup(dev)) {
1857     enable_irq_wake(irq);
1858     - else
1859     - at91_rtc_write(AT91_RTC_IDR, at91_rtc_imr);
1860     - }
1861     + } else {
1862     + at91_rtc_write(AT91_RTC_IDR, at91_rtc_bkpimr);
1863     + at91_rtc_imr &= ~at91_rtc_bkpimr;
1864     + }
1865     +}
1866     return 0;
1867     }
1868    
1869     static int at91_rtc_resume(struct device *dev)
1870     {
1871     - if (at91_rtc_imr) {
1872     - if (device_may_wakeup(dev))
1873     + if (at91_rtc_bkpimr) {
1874     + if (device_may_wakeup(dev)) {
1875     disable_irq_wake(irq);
1876     - else
1877     - at91_rtc_write(AT91_RTC_IER, at91_rtc_imr);
1878     + } else {
1879     + at91_rtc_imr |= at91_rtc_bkpimr;
1880     + at91_rtc_write(AT91_RTC_IER, at91_rtc_bkpimr);
1881     + }
1882     }
1883     return 0;
1884     }
1885     diff --git a/drivers/rtc/rtc-at91rm9200.h b/drivers/rtc/rtc-at91rm9200.h
1886     index da1945e..5f940b6 100644
1887     --- a/drivers/rtc/rtc-at91rm9200.h
1888     +++ b/drivers/rtc/rtc-at91rm9200.h
1889     @@ -64,7 +64,6 @@
1890     #define AT91_RTC_SCCR 0x1c /* Status Clear Command Register */
1891     #define AT91_RTC_IER 0x20 /* Interrupt Enable Register */
1892     #define AT91_RTC_IDR 0x24 /* Interrupt Disable Register */
1893     -#define AT91_RTC_IMR 0x28 /* Interrupt Mask Register */
1894    
1895     #define AT91_RTC_VER 0x2c /* Valid Entry Register */
1896     #define AT91_RTC_NVTIM (1 << 0) /* Non valid Time */
1897     diff --git a/drivers/target/iscsi/iscsi_target_auth.c b/drivers/target/iscsi/iscsi_target_auth.c
1898     index db0cf7c..a0fc7b9 100644
1899     --- a/drivers/target/iscsi/iscsi_target_auth.c
1900     +++ b/drivers/target/iscsi/iscsi_target_auth.c
1901     @@ -166,6 +166,7 @@ static int chap_server_compute_md5(
1902     {
1903     char *endptr;
1904     unsigned long id;
1905     + unsigned char id_as_uchar;
1906     unsigned char digest[MD5_SIGNATURE_SIZE];
1907     unsigned char type, response[MD5_SIGNATURE_SIZE * 2 + 2];
1908     unsigned char identifier[10], *challenge = NULL;
1909     @@ -355,7 +356,9 @@ static int chap_server_compute_md5(
1910     goto out;
1911     }
1912    
1913     - sg_init_one(&sg, &id, 1);
1914     + /* To handle both endiannesses */
1915     + id_as_uchar = id;
1916     + sg_init_one(&sg, &id_as_uchar, 1);
1917     ret = crypto_hash_update(&desc, &sg, 1);
1918     if (ret < 0) {
1919     pr_err("crypto_hash_update() failed for id\n");
1920     diff --git a/drivers/target/target_core_file.h b/drivers/target/target_core_file.h
1921     index bc02b01..37ffc5b 100644
1922     --- a/drivers/target/target_core_file.h
1923     +++ b/drivers/target/target_core_file.h
1924     @@ -7,7 +7,7 @@
1925     #define FD_DEVICE_QUEUE_DEPTH 32
1926     #define FD_MAX_DEVICE_QUEUE_DEPTH 128
1927     #define FD_BLOCKSIZE 512
1928     -#define FD_MAX_SECTORS 1024
1929     +#define FD_MAX_SECTORS 2048
1930    
1931     #define RRF_EMULATE_CDB 0x01
1932     #define RRF_GOT_LBA 0x02
1933     diff --git a/drivers/tty/serial/sunsu.c b/drivers/tty/serial/sunsu.c
1934     index 220da3f..94b0ad7 100644
1935     --- a/drivers/tty/serial/sunsu.c
1936     +++ b/drivers/tty/serial/sunsu.c
1937     @@ -974,6 +974,7 @@ static struct uart_ops sunsu_pops = {
1938     #define UART_NR 4
1939    
1940     static struct uart_sunsu_port sunsu_ports[UART_NR];
1941     +static int nr_inst; /* Number of already registered ports */
1942    
1943     #ifdef CONFIG_SERIO
1944    
1945     @@ -1343,13 +1344,8 @@ static int __init sunsu_console_setup(struct console *co, char *options)
1946     printk("Console: ttyS%d (SU)\n",
1947     (sunsu_reg.minor - 64) + co->index);
1948    
1949     - /*
1950     - * Check whether an invalid uart number has been specified, and
1951     - * if so, search for the first available port that does have
1952     - * console support.
1953     - */
1954     - if (co->index >= UART_NR)
1955     - co->index = 0;
1956     + if (co->index > nr_inst)
1957     + return -ENODEV;
1958     port = &sunsu_ports[co->index].port;
1959    
1960     /*
1961     @@ -1414,7 +1410,6 @@ static enum su_type su_get_type(struct device_node *dp)
1962    
1963     static int su_probe(struct platform_device *op)
1964     {
1965     - static int inst;
1966     struct device_node *dp = op->dev.of_node;
1967     struct uart_sunsu_port *up;
1968     struct resource *rp;
1969     @@ -1424,16 +1419,16 @@ static int su_probe(struct platform_device *op)
1970    
1971     type = su_get_type(dp);
1972     if (type == SU_PORT_PORT) {
1973     - if (inst >= UART_NR)
1974     + if (nr_inst >= UART_NR)
1975     return -EINVAL;
1976     - up = &sunsu_ports[inst];
1977     + up = &sunsu_ports[nr_inst];
1978     } else {
1979     up = kzalloc(sizeof(*up), GFP_KERNEL);
1980     if (!up)
1981     return -ENOMEM;
1982     }
1983    
1984     - up->port.line = inst;
1985     + up->port.line = nr_inst;
1986    
1987     spin_lock_init(&up->port.lock);
1988    
1989     @@ -1467,6 +1462,8 @@ static int su_probe(struct platform_device *op)
1990     }
1991     dev_set_drvdata(&op->dev, up);
1992    
1993     + nr_inst++;
1994     +
1995     return 0;
1996     }
1997    
1998     @@ -1494,7 +1491,7 @@ static int su_probe(struct platform_device *op)
1999    
2000     dev_set_drvdata(&op->dev, up);
2001    
2002     - inst++;
2003     + nr_inst++;
2004    
2005     return 0;
2006    
2007     diff --git a/drivers/usb/class/cdc-acm.c b/drivers/usb/class/cdc-acm.c
2008     index 2d92cce..35d2cf1 100644
2009     --- a/drivers/usb/class/cdc-acm.c
2010     +++ b/drivers/usb/class/cdc-acm.c
2011     @@ -600,7 +600,6 @@ static void acm_port_destruct(struct tty_port *port)
2012    
2013     dev_dbg(&acm->control->dev, "%s\n", __func__);
2014    
2015     - tty_unregister_device(acm_tty_driver, acm->minor);
2016     acm_release_minor(acm);
2017     usb_put_intf(acm->control);
2018     kfree(acm->country_codes);
2019     @@ -1418,6 +1417,8 @@ static void acm_disconnect(struct usb_interface *intf)
2020    
2021     stop_data_traffic(acm);
2022    
2023     + tty_unregister_device(acm_tty_driver, acm->minor);
2024     +
2025     usb_free_urb(acm->ctrlurb);
2026     for (i = 0; i < ACM_NW; i++)
2027     usb_free_urb(acm->wb[i].urb);
2028     diff --git a/drivers/usb/core/hcd-pci.c b/drivers/usb/core/hcd-pci.c
2029     index 622b4a4..2b487d4 100644
2030     --- a/drivers/usb/core/hcd-pci.c
2031     +++ b/drivers/usb/core/hcd-pci.c
2032     @@ -173,6 +173,7 @@ int usb_hcd_pci_probe(struct pci_dev *dev, const struct pci_device_id *id)
2033     struct hc_driver *driver;
2034     struct usb_hcd *hcd;
2035     int retval;
2036     + int hcd_irq = 0;
2037    
2038     if (usb_disabled())
2039     return -ENODEV;
2040     @@ -187,15 +188,19 @@ int usb_hcd_pci_probe(struct pci_dev *dev, const struct pci_device_id *id)
2041     return -ENODEV;
2042     dev->current_state = PCI_D0;
2043    
2044     - /* The xHCI driver supports MSI and MSI-X,
2045     - * so don't fail if the BIOS doesn't provide a legacy IRQ.
2046     + /*
2047     + * The xHCI driver has its own irq management
2048     + * make sure irq setup is not touched for xhci in generic hcd code
2049     */
2050     - if (!dev->irq && (driver->flags & HCD_MASK) != HCD_USB3) {
2051     - dev_err(&dev->dev,
2052     - "Found HC with no IRQ. Check BIOS/PCI %s setup!\n",
2053     - pci_name(dev));
2054     - retval = -ENODEV;
2055     - goto disable_pci;
2056     + if ((driver->flags & HCD_MASK) != HCD_USB3) {
2057     + if (!dev->irq) {
2058     + dev_err(&dev->dev,
2059     + "Found HC with no IRQ. Check BIOS/PCI %s setup!\n",
2060     + pci_name(dev));
2061     + retval = -ENODEV;
2062     + goto disable_pci;
2063     + }
2064     + hcd_irq = dev->irq;
2065     }
2066    
2067     hcd = usb_create_hcd(driver, &dev->dev, pci_name(dev));
2068     @@ -245,7 +250,7 @@ int usb_hcd_pci_probe(struct pci_dev *dev, const struct pci_device_id *id)
2069    
2070     pci_set_master(dev);
2071    
2072     - retval = usb_add_hcd(hcd, dev->irq, IRQF_SHARED);
2073     + retval = usb_add_hcd(hcd, hcd_irq, IRQF_SHARED);
2074     if (retval != 0)
2075     goto unmap_registers;
2076     set_hs_companion(dev, hcd);
2077     diff --git a/drivers/usb/gadget/g_ffs.c b/drivers/usb/gadget/g_ffs.c
2078     index 3953dd4..3b343b2 100644
2079     --- a/drivers/usb/gadget/g_ffs.c
2080     +++ b/drivers/usb/gadget/g_ffs.c
2081     @@ -357,7 +357,7 @@ static int gfs_bind(struct usb_composite_dev *cdev)
2082     goto error;
2083     gfs_dev_desc.iProduct = gfs_strings[USB_GADGET_PRODUCT_IDX].id;
2084    
2085     - for (i = func_num; --i; ) {
2086     + for (i = func_num; i--; ) {
2087     ret = functionfs_bind(ffs_tab[i].ffs_data, cdev);
2088     if (unlikely(ret < 0)) {
2089     while (++i < func_num)
2090     @@ -413,7 +413,7 @@ static int gfs_unbind(struct usb_composite_dev *cdev)
2091     gether_cleanup();
2092     gfs_ether_setup = false;
2093    
2094     - for (i = func_num; --i; )
2095     + for (i = func_num; i--; )
2096     if (ffs_tab[i].ffs_data)
2097     functionfs_unbind(ffs_tab[i].ffs_data);
2098    
2099     diff --git a/drivers/usb/host/ehci-hcd.c b/drivers/usb/host/ehci-hcd.c
2100     index b416a3f..416a6dc 100644
2101     --- a/drivers/usb/host/ehci-hcd.c
2102     +++ b/drivers/usb/host/ehci-hcd.c
2103     @@ -302,6 +302,7 @@ static void ehci_quiesce (struct ehci_hcd *ehci)
2104    
2105     static void end_unlink_async(struct ehci_hcd *ehci);
2106     static void unlink_empty_async(struct ehci_hcd *ehci);
2107     +static void unlink_empty_async_suspended(struct ehci_hcd *ehci);
2108     static void ehci_work(struct ehci_hcd *ehci);
2109     static void start_unlink_intr(struct ehci_hcd *ehci, struct ehci_qh *qh);
2110     static void end_unlink_intr(struct ehci_hcd *ehci, struct ehci_qh *qh);
2111     @@ -748,11 +749,9 @@ static irqreturn_t ehci_irq (struct usb_hcd *hcd)
2112     /* guard against (alleged) silicon errata */
2113     if (cmd & CMD_IAAD)
2114     ehci_dbg(ehci, "IAA with IAAD still set?\n");
2115     - if (ehci->async_iaa) {
2116     + if (ehci->async_iaa)
2117     COUNT(ehci->stats.iaa);
2118     - end_unlink_async(ehci);
2119     - } else
2120     - ehci_dbg(ehci, "IAA with nothing unlinked?\n");
2121     + end_unlink_async(ehci);
2122     }
2123    
2124     /* remote wakeup [4.3.1] */
2125     diff --git a/drivers/usb/host/ehci-hub.c b/drivers/usb/host/ehci-hub.c
2126     index 4d3b294..7d06e77 100644
2127     --- a/drivers/usb/host/ehci-hub.c
2128     +++ b/drivers/usb/host/ehci-hub.c
2129     @@ -328,7 +328,7 @@ static int ehci_bus_suspend (struct usb_hcd *hcd)
2130     ehci->rh_state = EHCI_RH_SUSPENDED;
2131    
2132     end_unlink_async(ehci);
2133     - unlink_empty_async(ehci);
2134     + unlink_empty_async_suspended(ehci);
2135     ehci_handle_intr_unlinks(ehci);
2136     end_free_itds(ehci);
2137    
2138     diff --git a/drivers/usb/host/ehci-q.c b/drivers/usb/host/ehci-q.c
2139     index eda2cf4..23d1369 100644
2140     --- a/drivers/usb/host/ehci-q.c
2141     +++ b/drivers/usb/host/ehci-q.c
2142     @@ -1178,7 +1178,7 @@ static void single_unlink_async(struct ehci_hcd *ehci, struct ehci_qh *qh)
2143     struct ehci_qh *prev;
2144    
2145     /* Add to the end of the list of QHs waiting for the next IAAD */
2146     - qh->qh_state = QH_STATE_UNLINK;
2147     + qh->qh_state = QH_STATE_UNLINK_WAIT;
2148     if (ehci->async_unlink)
2149     ehci->async_unlink_last->unlink_next = qh;
2150     else
2151     @@ -1221,9 +1221,19 @@ static void start_iaa_cycle(struct ehci_hcd *ehci, bool nested)
2152    
2153     /* Do only the first waiting QH (nVidia bug?) */
2154     qh = ehci->async_unlink;
2155     - ehci->async_iaa = qh;
2156     - ehci->async_unlink = qh->unlink_next;
2157     - qh->unlink_next = NULL;
2158     +
2159     + /*
2160     + * Intel (?) bug: The HC can write back the overlay region
2161     + * even after the IAA interrupt occurs. In self-defense,
2162     + * always go through two IAA cycles for each QH.
2163     + */
2164     + if (qh->qh_state == QH_STATE_UNLINK_WAIT) {
2165     + qh->qh_state = QH_STATE_UNLINK;
2166     + } else {
2167     + ehci->async_iaa = qh;
2168     + ehci->async_unlink = qh->unlink_next;
2169     + qh->unlink_next = NULL;
2170     + }
2171    
2172     /* Make sure the unlinks are all visible to the hardware */
2173     wmb();
2174     @@ -1306,6 +1316,19 @@ static void unlink_empty_async(struct ehci_hcd *ehci)
2175     }
2176     }
2177    
2178     +/* The root hub is suspended; unlink all the async QHs */
2179     +static void unlink_empty_async_suspended(struct ehci_hcd *ehci)
2180     +{
2181     + struct ehci_qh *qh;
2182     +
2183     + while (ehci->async->qh_next.qh) {
2184     + qh = ehci->async->qh_next.qh;
2185     + WARN_ON(!list_empty(&qh->qtd_list));
2186     + single_unlink_async(ehci, qh);
2187     + }
2188     + start_iaa_cycle(ehci, false);
2189     +}
2190     +
2191     /* makes sure the async qh will become idle */
2192     /* caller must own ehci->lock */
2193    
2194     diff --git a/drivers/usb/host/ehci-timer.c b/drivers/usb/host/ehci-timer.c
2195     index 20dbdcb..c3fa130 100644
2196     --- a/drivers/usb/host/ehci-timer.c
2197     +++ b/drivers/usb/host/ehci-timer.c
2198     @@ -304,7 +304,7 @@ static void ehci_iaa_watchdog(struct ehci_hcd *ehci)
2199     * (a) SMP races against real IAA firing and retriggering, and
2200     * (b) clean HC shutdown, when IAA watchdog was pending.
2201     */
2202     - if (ehci->async_iaa) {
2203     + if (1) {
2204     u32 cmd, status;
2205    
2206     /* If we get here, IAA is *REALLY* late. It's barely
2207     diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c
2208     index f1f01a8..849470b 100644
2209     --- a/drivers/usb/host/xhci.c
2210     +++ b/drivers/usb/host/xhci.c
2211     @@ -350,7 +350,7 @@ static int xhci_try_enable_msi(struct usb_hcd *hcd)
2212     * generate interrupts. Don't even try to enable MSI.
2213     */
2214     if (xhci->quirks & XHCI_BROKEN_MSI)
2215     - return 0;
2216     + goto legacy_irq;
2217    
2218     /* unregister the legacy interrupt */
2219     if (hcd->irq)
2220     @@ -371,6 +371,7 @@ static int xhci_try_enable_msi(struct usb_hcd *hcd)
2221     return -EINVAL;
2222     }
2223    
2224     + legacy_irq:
2225     /* fall back to legacy interrupt*/
2226     ret = request_irq(pdev->irq, &usb_hcd_irq, IRQF_SHARED,
2227     hcd->irq_descr, hcd);
2228     diff --git a/drivers/usb/host/xhci.h b/drivers/usb/host/xhci.h
2229     index f791bd0..2c510e4 100644
2230     --- a/drivers/usb/host/xhci.h
2231     +++ b/drivers/usb/host/xhci.h
2232     @@ -206,8 +206,8 @@ struct xhci_op_regs {
2233     /* bits 12:31 are reserved (and should be preserved on writes). */
2234    
2235     /* IMAN - Interrupt Management Register */
2236     -#define IMAN_IP (1 << 1)
2237     -#define IMAN_IE (1 << 0)
2238     +#define IMAN_IE (1 << 1)
2239     +#define IMAN_IP (1 << 0)
2240    
2241     /* USBSTS - USB status - status bitmasks */
2242     /* HC not running - set to 1 when run/stop bit is cleared. */
2243     diff --git a/drivers/usb/musb/da8xx.c b/drivers/usb/musb/da8xx.c
2244     index 7c71769d..41613a2 100644
2245     --- a/drivers/usb/musb/da8xx.c
2246     +++ b/drivers/usb/musb/da8xx.c
2247     @@ -327,7 +327,7 @@ static irqreturn_t da8xx_musb_interrupt(int irq, void *hci)
2248     u8 devctl = musb_readb(mregs, MUSB_DEVCTL);
2249     int err;
2250    
2251     - err = musb->int_usb & USB_INTR_VBUSERROR;
2252     + err = musb->int_usb & MUSB_INTR_VBUSERROR;
2253     if (err) {
2254     /*
2255     * The Mentor core doesn't debounce VBUS as needed
2256     diff --git a/drivers/usb/serial/garmin_gps.c b/drivers/usb/serial/garmin_gps.c
2257     index 203358d..34e702b 100644
2258     --- a/drivers/usb/serial/garmin_gps.c
2259     +++ b/drivers/usb/serial/garmin_gps.c
2260     @@ -959,10 +959,7 @@ static void garmin_close(struct usb_serial_port *port)
2261     if (!serial)
2262     return;
2263    
2264     - mutex_lock(&port->serial->disc_mutex);
2265     -
2266     - if (!port->serial->disconnected)
2267     - garmin_clear(garmin_data_p);
2268     + garmin_clear(garmin_data_p);
2269    
2270     /* shutdown our urbs */
2271     usb_kill_urb(port->read_urb);
2272     @@ -971,8 +968,6 @@ static void garmin_close(struct usb_serial_port *port)
2273     /* keep reset state so we know that we must start a new session */
2274     if (garmin_data_p->state != STATE_RESET)
2275     garmin_data_p->state = STATE_DISCONNECTED;
2276     -
2277     - mutex_unlock(&port->serial->disc_mutex);
2278     }
2279    
2280    
2281     diff --git a/drivers/usb/serial/io_ti.c b/drivers/usb/serial/io_ti.c
2282     index 82afc4d..aa6462f 100644
2283     --- a/drivers/usb/serial/io_ti.c
2284     +++ b/drivers/usb/serial/io_ti.c
2285     @@ -2702,6 +2702,7 @@ static struct usb_serial_driver edgeport_2port_device = {
2286     .set_termios = edge_set_termios,
2287     .tiocmget = edge_tiocmget,
2288     .tiocmset = edge_tiocmset,
2289     + .get_icount = edge_get_icount,
2290     .write = edge_write,
2291     .write_room = edge_write_room,
2292     .chars_in_buffer = edge_chars_in_buffer,
2293     diff --git a/drivers/usb/serial/usb-serial.c b/drivers/usb/serial/usb-serial.c
2294     index 15af799..3dc3ad2 100644
2295     --- a/drivers/usb/serial/usb-serial.c
2296     +++ b/drivers/usb/serial/usb-serial.c
2297     @@ -151,6 +151,7 @@ static void destroy_serial(struct kref *kref)
2298     }
2299     }
2300    
2301     + usb_put_intf(serial->interface);
2302     usb_put_dev(serial->dev);
2303     kfree(serial);
2304     }
2305     @@ -614,7 +615,7 @@ static struct usb_serial *create_serial(struct usb_device *dev,
2306     }
2307     serial->dev = usb_get_dev(dev);
2308     serial->type = driver;
2309     - serial->interface = interface;
2310     + serial->interface = usb_get_intf(interface);
2311     kref_init(&serial->kref);
2312     mutex_init(&serial->disc_mutex);
2313     serial->minor = SERIAL_TTY_NO_MINOR;
2314     diff --git a/drivers/usb/storage/unusual_devs.h b/drivers/usb/storage/unusual_devs.h
2315     index d305a5a..b75e90b 100644
2316     --- a/drivers/usb/storage/unusual_devs.h
2317     +++ b/drivers/usb/storage/unusual_devs.h
2318     @@ -488,6 +488,13 @@ UNUSUAL_DEV( 0x04e8, 0x5122, 0x0000, 0x9999,
2319     USB_SC_DEVICE, USB_PR_DEVICE, NULL,
2320     US_FL_MAX_SECTORS_64 | US_FL_BULK_IGNORE_TAG),
2321    
2322     +/* Added by Dmitry Artamonow <mad_soft@inbox.ru> */
2323     +UNUSUAL_DEV( 0x04e8, 0x5136, 0x0000, 0x9999,
2324     + "Samsung",
2325     + "YP-Z3",
2326     + USB_SC_DEVICE, USB_PR_DEVICE, NULL,
2327     + US_FL_MAX_SECTORS_64),
2328     +
2329     /* Entry and supporting patch by Theodore Kilgore <kilgota@auburn.edu>.
2330     * Device uses standards-violating 32-byte Bulk Command Block Wrappers and
2331     * reports itself as "Proprietary SCSI Bulk." Cf. device entry 0x084d:0x0011.
2332     diff --git a/drivers/vhost/net.c b/drivers/vhost/net.c
2333     index 959b1cd..ec6fb3f 100644
2334     --- a/drivers/vhost/net.c
2335     +++ b/drivers/vhost/net.c
2336     @@ -339,7 +339,8 @@ static void handle_tx(struct vhost_net *net)
2337     msg.msg_controllen = 0;
2338     ubufs = NULL;
2339     } else {
2340     - struct ubuf_info *ubuf = &vq->ubuf_info[head];
2341     + struct ubuf_info *ubuf;
2342     + ubuf = vq->ubuf_info + vq->upend_idx;
2343    
2344     vq->heads[vq->upend_idx].len =
2345     VHOST_DMA_IN_PROGRESS;
2346     diff --git a/drivers/video/ep93xx-fb.c b/drivers/video/ep93xx-fb.c
2347     index 3f2519d..e06cd5d 100644
2348     --- a/drivers/video/ep93xx-fb.c
2349     +++ b/drivers/video/ep93xx-fb.c
2350     @@ -23,6 +23,7 @@
2351     #include <linux/slab.h>
2352     #include <linux/clk.h>
2353     #include <linux/fb.h>
2354     +#include <linux/io.h>
2355    
2356     #include <linux/platform_data/video-ep93xx.h>
2357    
2358     diff --git a/drivers/watchdog/sp5100_tco.c b/drivers/watchdog/sp5100_tco.c
2359     index e3b8f75..0e9d8c4 100644
2360     --- a/drivers/watchdog/sp5100_tco.c
2361     +++ b/drivers/watchdog/sp5100_tco.c
2362     @@ -40,13 +40,12 @@
2363     #include "sp5100_tco.h"
2364    
2365     /* Module and version information */
2366     -#define TCO_VERSION "0.03"
2367     +#define TCO_VERSION "0.05"
2368     #define TCO_MODULE_NAME "SP5100 TCO timer"
2369     #define TCO_DRIVER_NAME TCO_MODULE_NAME ", v" TCO_VERSION
2370    
2371     /* internal variables */
2372     static u32 tcobase_phys;
2373     -static u32 resbase_phys;
2374     static u32 tco_wdt_fired;
2375     static void __iomem *tcobase;
2376     static unsigned int pm_iobase;
2377     @@ -54,10 +53,6 @@ static DEFINE_SPINLOCK(tco_lock); /* Guards the hardware */
2378     static unsigned long timer_alive;
2379     static char tco_expect_close;
2380     static struct pci_dev *sp5100_tco_pci;
2381     -static struct resource wdt_res = {
2382     - .name = "Watchdog Timer",
2383     - .flags = IORESOURCE_MEM,
2384     -};
2385    
2386     /* the watchdog platform device */
2387     static struct platform_device *sp5100_tco_platform_device;
2388     @@ -75,12 +70,6 @@ module_param(nowayout, bool, 0);
2389     MODULE_PARM_DESC(nowayout, "Watchdog cannot be stopped once started."
2390     " (default=" __MODULE_STRING(WATCHDOG_NOWAYOUT) ")");
2391    
2392     -static unsigned int force_addr;
2393     -module_param(force_addr, uint, 0);
2394     -MODULE_PARM_DESC(force_addr, "Force the use of specified MMIO address."
2395     - " ONLY USE THIS PARAMETER IF YOU REALLY KNOW"
2396     - " WHAT YOU ARE DOING (default=none)");
2397     -
2398     /*
2399     * Some TCO specific functions
2400     */
2401     @@ -176,39 +165,6 @@ static void tco_timer_enable(void)
2402     }
2403     }
2404    
2405     -static void tco_timer_disable(void)
2406     -{
2407     - int val;
2408     -
2409     - if (sp5100_tco_pci->revision >= 0x40) {
2410     - /* For SB800 or later */
2411     - /* Enable watchdog decode bit and Disable watchdog timer */
2412     - outb(SB800_PM_WATCHDOG_CONTROL, SB800_IO_PM_INDEX_REG);
2413     - val = inb(SB800_IO_PM_DATA_REG);
2414     - val |= SB800_PCI_WATCHDOG_DECODE_EN;
2415     - val |= SB800_PM_WATCHDOG_DISABLE;
2416     - outb(val, SB800_IO_PM_DATA_REG);
2417     - } else {
2418     - /* For SP5100 or SB7x0 */
2419     - /* Enable watchdog decode bit */
2420     - pci_read_config_dword(sp5100_tco_pci,
2421     - SP5100_PCI_WATCHDOG_MISC_REG,
2422     - &val);
2423     -
2424     - val |= SP5100_PCI_WATCHDOG_DECODE_EN;
2425     -
2426     - pci_write_config_dword(sp5100_tco_pci,
2427     - SP5100_PCI_WATCHDOG_MISC_REG,
2428     - val);
2429     -
2430     - /* Disable Watchdog timer */
2431     - outb(SP5100_PM_WATCHDOG_CONTROL, SP5100_IO_PM_INDEX_REG);
2432     - val = inb(SP5100_IO_PM_DATA_REG);
2433     - val |= SP5100_PM_WATCHDOG_DISABLE;
2434     - outb(val, SP5100_IO_PM_DATA_REG);
2435     - }
2436     -}
2437     -
2438     /*
2439     * /dev/watchdog handling
2440     */
2441     @@ -361,7 +317,7 @@ static unsigned char sp5100_tco_setupdevice(void)
2442     {
2443     struct pci_dev *dev = NULL;
2444     const char *dev_name = NULL;
2445     - u32 val, tmp_val;
2446     + u32 val;
2447     u32 index_reg, data_reg, base_addr;
2448    
2449     /* Match the PCI device */
2450     @@ -459,63 +415,8 @@ static unsigned char sp5100_tco_setupdevice(void)
2451     } else
2452     pr_debug("SBResource_MMIO is disabled(0x%04x)\n", val);
2453    
2454     - /*
2455     - * Lastly re-programming the watchdog timer MMIO address,
2456     - * This method is a last resort...
2457     - *
2458     - * Before re-programming, to ensure that the watchdog timer
2459     - * is disabled, disable the watchdog timer.
2460     - */
2461     - tco_timer_disable();
2462     -
2463     - if (force_addr) {
2464     - /*
2465     - * Force the use of watchdog timer MMIO address, and aligned to
2466     - * 8byte boundary.
2467     - */
2468     - force_addr &= ~0x7;
2469     - val = force_addr;
2470     -
2471     - pr_info("Force the use of 0x%04x as MMIO address\n", val);
2472     - } else {
2473     - /*
2474     - * Get empty slot into the resource tree for watchdog timer.
2475     - */
2476     - if (allocate_resource(&iomem_resource,
2477     - &wdt_res,
2478     - SP5100_WDT_MEM_MAP_SIZE,
2479     - 0xf0000000,
2480     - 0xfffffff8,
2481     - 0x8,
2482     - NULL,
2483     - NULL)) {
2484     - pr_err("MMIO allocation failed\n");
2485     - goto unreg_region;
2486     - }
2487     -
2488     - val = resbase_phys = wdt_res.start;
2489     - pr_debug("Got 0x%04x from resource tree\n", val);
2490     - }
2491     -
2492     - /* Restore to the low three bits */
2493     - outb(base_addr+0, index_reg);
2494     - tmp_val = val | (inb(data_reg) & 0x7);
2495     -
2496     - /* Re-programming the watchdog timer base address */
2497     - outb(base_addr+0, index_reg);
2498     - outb((tmp_val >> 0) & 0xff, data_reg);
2499     - outb(base_addr+1, index_reg);
2500     - outb((tmp_val >> 8) & 0xff, data_reg);
2501     - outb(base_addr+2, index_reg);
2502     - outb((tmp_val >> 16) & 0xff, data_reg);
2503     - outb(base_addr+3, index_reg);
2504     - outb((tmp_val >> 24) & 0xff, data_reg);
2505     -
2506     - if (!request_mem_region_exclusive(val, SP5100_WDT_MEM_MAP_SIZE,
2507     - dev_name)) {
2508     - pr_err("MMIO address 0x%04x already in use\n", val);
2509     - goto unreg_resource;
2510     - }
2511     + pr_notice("failed to find MMIO address, giving up.\n");
2512     + goto unreg_region;
2513    
2514     setup_wdt:
2515     tcobase_phys = val;
2516     @@ -555,9 +456,6 @@ setup_wdt:
2517    
2518     unreg_mem_region:
2519     release_mem_region(tcobase_phys, SP5100_WDT_MEM_MAP_SIZE);
2520     -unreg_resource:
2521     - if (resbase_phys)
2522     - release_resource(&wdt_res);
2523     unreg_region:
2524     release_region(pm_iobase, SP5100_PM_IOPORTS_SIZE);
2525     exit:
2526     @@ -567,7 +465,6 @@ exit:
2527     static int sp5100_tco_init(struct platform_device *dev)
2528     {
2529     int ret;
2530     - char addr_str[16];
2531    
2532     /*
2533     * Check whether or not the hardware watchdog is there. If found, then
2534     @@ -599,23 +496,14 @@ static int sp5100_tco_init(struct platform_device *dev)
2535     clear_bit(0, &timer_alive);
2536    
2537     /* Show module parameters */
2538     - if (force_addr == tcobase_phys)
2539     - /* The force_addr is vaild */
2540     - sprintf(addr_str, "0x%04x", force_addr);
2541     - else
2542     - strcpy(addr_str, "none");
2543     -
2544     - pr_info("initialized (0x%p). heartbeat=%d sec (nowayout=%d, "
2545     - "force_addr=%s)\n",
2546     - tcobase, heartbeat, nowayout, addr_str);
2547     + pr_info("initialized (0x%p). heartbeat=%d sec (nowayout=%d)\n",
2548     + tcobase, heartbeat, nowayout);
2549    
2550     return 0;
2551    
2552     exit:
2553     iounmap(tcobase);
2554     release_mem_region(tcobase_phys, SP5100_WDT_MEM_MAP_SIZE);
2555     - if (resbase_phys)
2556     - release_resource(&wdt_res);
2557     release_region(pm_iobase, SP5100_PM_IOPORTS_SIZE);
2558     return ret;
2559     }
2560     @@ -630,8 +518,6 @@ static void sp5100_tco_cleanup(void)
2561     misc_deregister(&sp5100_tco_miscdev);
2562     iounmap(tcobase);
2563     release_mem_region(tcobase_phys, SP5100_WDT_MEM_MAP_SIZE);
2564     - if (resbase_phys)
2565     - release_resource(&wdt_res);
2566     release_region(pm_iobase, SP5100_PM_IOPORTS_SIZE);
2567     }
2568    
2569     diff --git a/drivers/watchdog/sp5100_tco.h b/drivers/watchdog/sp5100_tco.h
2570     index 71594a0..2b28c00 100644
2571     --- a/drivers/watchdog/sp5100_tco.h
2572     +++ b/drivers/watchdog/sp5100_tco.h
2573     @@ -57,7 +57,7 @@
2574     #define SB800_PM_WATCHDOG_DISABLE (1 << 2)
2575     #define SB800_PM_WATCHDOG_SECOND_RES (3 << 0)
2576     #define SB800_ACPI_MMIO_DECODE_EN (1 << 0)
2577     -#define SB800_ACPI_MMIO_SEL (1 << 2)
2578     +#define SB800_ACPI_MMIO_SEL (1 << 1)
2579    
2580    
2581     #define SB800_PM_WDT_MMIO_OFFSET 0xB00
2582     diff --git a/fs/cifs/asn1.c b/fs/cifs/asn1.c
2583     index cfd1ce3..1d36db1 100644
2584     --- a/fs/cifs/asn1.c
2585     +++ b/fs/cifs/asn1.c
2586     @@ -614,53 +614,10 @@ decode_negTokenInit(unsigned char *security_blob, int length,
2587     }
2588     }
2589    
2590     - /* mechlistMIC */
2591     - if (asn1_header_decode(&ctx, &end, &cls, &con, &tag) == 0) {
2592     - /* Check if we have reached the end of the blob, but with
2593     - no mechListMic (e.g. NTLMSSP instead of KRB5) */
2594     - if (ctx.error == ASN1_ERR_DEC_EMPTY)
2595     - goto decode_negtoken_exit;
2596     - cFYI(1, "Error decoding last part negTokenInit exit3");
2597     - return 0;
2598     - } else if ((cls != ASN1_CTX) || (con != ASN1_CON)) {
2599     - /* tag = 3 indicating mechListMIC */
2600     - cFYI(1, "Exit 4 cls = %d con = %d tag = %d end = %p (%d)",
2601     - cls, con, tag, end, *end);
2602     - return 0;
2603     - }
2604     -
2605     - /* sequence */
2606     - if (asn1_header_decode(&ctx, &end, &cls, &con, &tag) == 0) {
2607     - cFYI(1, "Error decoding last part negTokenInit exit5");
2608     - return 0;
2609     - } else if ((cls != ASN1_UNI) || (con != ASN1_CON)
2610     - || (tag != ASN1_SEQ)) {
2611     - cFYI(1, "cls = %d con = %d tag = %d end = %p (%d)",
2612     - cls, con, tag, end, *end);
2613     - }
2614     -
2615     - /* sequence of */
2616     - if (asn1_header_decode(&ctx, &end, &cls, &con, &tag) == 0) {
2617     - cFYI(1, "Error decoding last part negTokenInit exit 7");
2618     - return 0;
2619     - } else if ((cls != ASN1_CTX) || (con != ASN1_CON)) {
2620     - cFYI(1, "Exit 8 cls = %d con = %d tag = %d end = %p (%d)",
2621     - cls, con, tag, end, *end);
2622     - return 0;
2623     - }
2624     -
2625     - /* general string */
2626     - if (asn1_header_decode(&ctx, &end, &cls, &con, &tag) == 0) {
2627     - cFYI(1, "Error decoding last part negTokenInit exit9");
2628     - return 0;
2629     - } else if ((cls != ASN1_UNI) || (con != ASN1_PRI)
2630     - || (tag != ASN1_GENSTR)) {
2631     - cFYI(1, "Exit10 cls = %d con = %d tag = %d end = %p (%d)",
2632     - cls, con, tag, end, *end);
2633     - return 0;
2634     - }
2635     - cFYI(1, "Need to call asn1_octets_decode() function for %s",
2636     - ctx.pointer); /* is this UTF-8 or ASCII? */
2637     -decode_negtoken_exit:
2638     + /*
2639     + * We currently ignore anything at the end of the SPNEGO blob after
2640     + * the mechTypes have been parsed, since none of that info is
2641     + * used at the moment.
2642     + */
2643     return 1;
2644     }
2645     diff --git a/fs/cifs/cifsfs.c b/fs/cifs/cifsfs.c
2646     index e328339..b9db388 100644
2647     --- a/fs/cifs/cifsfs.c
2648     +++ b/fs/cifs/cifsfs.c
2649     @@ -91,6 +91,30 @@ struct workqueue_struct *cifsiod_wq;
2650     __u8 cifs_client_guid[SMB2_CLIENT_GUID_SIZE];
2651     #endif
2652    
2653     +/*
2654     + * Bumps refcount for cifs super block.
2655     + * Note that it should be only called if a referece to VFS super block is
2656     + * already held, e.g. in open-type syscalls context. Otherwise it can race with
2657     + * atomic_dec_and_test in deactivate_locked_super.
2658     + */
2659     +void
2660     +cifs_sb_active(struct super_block *sb)
2661     +{
2662     + struct cifs_sb_info *server = CIFS_SB(sb);
2663     +
2664     + if (atomic_inc_return(&server->active) == 1)
2665     + atomic_inc(&sb->s_active);
2666     +}
2667     +
2668     +void
2669     +cifs_sb_deactive(struct super_block *sb)
2670     +{
2671     + struct cifs_sb_info *server = CIFS_SB(sb);
2672     +
2673     + if (atomic_dec_and_test(&server->active))
2674     + deactivate_super(sb);
2675     +}
2676     +
2677     static int
2678     cifs_read_super(struct super_block *sb)
2679     {
2680     diff --git a/fs/cifs/cifsfs.h b/fs/cifs/cifsfs.h
2681     index 7163419..0e32c34 100644
2682     --- a/fs/cifs/cifsfs.h
2683     +++ b/fs/cifs/cifsfs.h
2684     @@ -41,6 +41,10 @@ extern struct file_system_type cifs_fs_type;
2685     extern const struct address_space_operations cifs_addr_ops;
2686     extern const struct address_space_operations cifs_addr_ops_smallbuf;
2687    
2688     +/* Functions related to super block operations */
2689     +extern void cifs_sb_active(struct super_block *sb);
2690     +extern void cifs_sb_deactive(struct super_block *sb);
2691     +
2692     /* Functions related to inodes */
2693     extern const struct inode_operations cifs_dir_inode_ops;
2694     extern struct inode *cifs_root_iget(struct super_block *);
2695     diff --git a/fs/cifs/file.c b/fs/cifs/file.c
2696     index 8ea6ca5..1d93ee8 100644
2697     --- a/fs/cifs/file.c
2698     +++ b/fs/cifs/file.c
2699     @@ -294,6 +294,8 @@ cifs_new_fileinfo(struct cifs_fid *fid, struct file *file,
2700     INIT_WORK(&cfile->oplock_break, cifs_oplock_break);
2701     mutex_init(&cfile->fh_mutex);
2702    
2703     + cifs_sb_active(inode->i_sb);
2704     +
2705     /*
2706     * If the server returned a read oplock and we have mandatory brlocks,
2707     * set oplock level to None.
2708     @@ -343,7 +345,8 @@ void cifsFileInfo_put(struct cifsFileInfo *cifs_file)
2709     struct cifs_tcon *tcon = tlink_tcon(cifs_file->tlink);
2710     struct TCP_Server_Info *server = tcon->ses->server;
2711     struct cifsInodeInfo *cifsi = CIFS_I(inode);
2712     - struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
2713     + struct super_block *sb = inode->i_sb;
2714     + struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
2715     struct cifsLockInfo *li, *tmp;
2716     struct cifs_fid fid;
2717     struct cifs_pending_open open;
2718     @@ -408,6 +411,7 @@ void cifsFileInfo_put(struct cifsFileInfo *cifs_file)
2719    
2720     cifs_put_tlink(cifs_file->tlink);
2721     dput(cifs_file->dentry);
2722     + cifs_sb_deactive(sb);
2723     kfree(cifs_file);
2724     }
2725    
2726     diff --git a/fs/ext4/ext4.h b/fs/ext4/ext4.h
2727     index 8462eb3..bbcd6a0 100644
2728     --- a/fs/ext4/ext4.h
2729     +++ b/fs/ext4/ext4.h
2730     @@ -338,9 +338,9 @@ struct ext4_group_desc
2731     */
2732    
2733     struct flex_groups {
2734     - atomic_t free_inodes;
2735     - atomic_t free_clusters;
2736     - atomic_t used_dirs;
2737     + atomic64_t free_clusters;
2738     + atomic_t free_inodes;
2739     + atomic_t used_dirs;
2740     };
2741    
2742     #define EXT4_BG_INODE_UNINIT 0x0001 /* Inode table/bitmap not in use */
2743     diff --git a/fs/ext4/extents.c b/fs/ext4/extents.c
2744     index d42a8c4..4d315a0 100644
2745     --- a/fs/ext4/extents.c
2746     +++ b/fs/ext4/extents.c
2747     @@ -3089,6 +3089,7 @@ static int ext4_split_extent(handle_t *handle,
2748     int err = 0;
2749     int uninitialized;
2750     int split_flag1, flags1;
2751     + int allocated = map->m_len;
2752    
2753     depth = ext_depth(inode);
2754     ex = path[depth].p_ext;
2755     @@ -3108,6 +3109,8 @@ static int ext4_split_extent(handle_t *handle,
2756     map->m_lblk + map->m_len, split_flag1, flags1);
2757     if (err)
2758     goto out;
2759     + } else {
2760     + allocated = ee_len - (map->m_lblk - ee_block);
2761     }
2762    
2763     ext4_ext_drop_refs(path);
2764     @@ -3130,7 +3133,7 @@ static int ext4_split_extent(handle_t *handle,
2765    
2766     ext4_ext_show_leaf(inode, path);
2767     out:
2768     - return err ? err : map->m_len;
2769     + return err ? err : allocated;
2770     }
2771    
2772     /*
2773     @@ -3275,7 +3278,7 @@ static int ext4_ext_convert_to_initialized(handle_t *handle,
2774    
2775     if (EXT4_EXT_MAY_ZEROOUT & split_flag)
2776     max_zeroout = sbi->s_extent_max_zeroout_kb >>
2777     - inode->i_sb->s_blocksize_bits;
2778     + (inode->i_sb->s_blocksize_bits - 10);
2779    
2780     /* If extent is less than s_max_zeroout_kb, zeroout directly */
2781     if (max_zeroout && (ee_len <= max_zeroout)) {
2782     @@ -3720,6 +3723,7 @@ out:
2783     allocated - map->m_len);
2784     allocated = map->m_len;
2785     }
2786     + map->m_len = allocated;
2787    
2788     /*
2789     * If we have done fallocate with the offset that is already
2790     diff --git a/fs/ext4/ialloc.c b/fs/ext4/ialloc.c
2791     index 3f32c80..ec2909e 100644
2792     --- a/fs/ext4/ialloc.c
2793     +++ b/fs/ext4/ialloc.c
2794     @@ -324,8 +324,8 @@ error_return:
2795     }
2796    
2797     struct orlov_stats {
2798     + __u64 free_clusters;
2799     __u32 free_inodes;
2800     - __u32 free_clusters;
2801     __u32 used_dirs;
2802     };
2803    
2804     @@ -342,7 +342,7 @@ static void get_orlov_stats(struct super_block *sb, ext4_group_t g,
2805    
2806     if (flex_size > 1) {
2807     stats->free_inodes = atomic_read(&flex_group[g].free_inodes);
2808     - stats->free_clusters = atomic_read(&flex_group[g].free_clusters);
2809     + stats->free_clusters = atomic64_read(&flex_group[g].free_clusters);
2810     stats->used_dirs = atomic_read(&flex_group[g].used_dirs);
2811     return;
2812     }
2813     diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c
2814     index 39f1fa7..22c5c67 100644
2815     --- a/fs/ext4/inode.c
2816     +++ b/fs/ext4/inode.c
2817     @@ -211,7 +211,8 @@ void ext4_evict_inode(struct inode *inode)
2818     * don't use page cache.
2819     */
2820     if (ext4_should_journal_data(inode) &&
2821     - (S_ISLNK(inode->i_mode) || S_ISREG(inode->i_mode))) {
2822     + (S_ISLNK(inode->i_mode) || S_ISREG(inode->i_mode)) &&
2823     + inode->i_ino != EXT4_JOURNAL_INO) {
2824     journal_t *journal = EXT4_SB(inode->i_sb)->s_journal;
2825     tid_t commit_tid = EXT4_I(inode)->i_datasync_tid;
2826    
2827     diff --git a/fs/ext4/mballoc.c b/fs/ext4/mballoc.c
2828     index 28bbf9b..82f8c2d 100644
2829     --- a/fs/ext4/mballoc.c
2830     +++ b/fs/ext4/mballoc.c
2831     @@ -2829,8 +2829,8 @@ ext4_mb_mark_diskspace_used(struct ext4_allocation_context *ac,
2832     if (sbi->s_log_groups_per_flex) {
2833     ext4_group_t flex_group = ext4_flex_group(sbi,
2834     ac->ac_b_ex.fe_group);
2835     - atomic_sub(ac->ac_b_ex.fe_len,
2836     - &sbi->s_flex_groups[flex_group].free_clusters);
2837     + atomic64_sub(ac->ac_b_ex.fe_len,
2838     + &sbi->s_flex_groups[flex_group].free_clusters);
2839     }
2840    
2841     err = ext4_handle_dirty_metadata(handle, NULL, bitmap_bh);
2842     @@ -4691,8 +4691,8 @@ do_more:
2843    
2844     if (sbi->s_log_groups_per_flex) {
2845     ext4_group_t flex_group = ext4_flex_group(sbi, block_group);
2846     - atomic_add(count_clusters,
2847     - &sbi->s_flex_groups[flex_group].free_clusters);
2848     + atomic64_add(count_clusters,
2849     + &sbi->s_flex_groups[flex_group].free_clusters);
2850     }
2851    
2852     ext4_mb_unload_buddy(&e4b);
2853     @@ -4836,8 +4836,8 @@ int ext4_group_add_blocks(handle_t *handle, struct super_block *sb,
2854    
2855     if (sbi->s_log_groups_per_flex) {
2856     ext4_group_t flex_group = ext4_flex_group(sbi, block_group);
2857     - atomic_add(EXT4_NUM_B2C(sbi, blocks_freed),
2858     - &sbi->s_flex_groups[flex_group].free_clusters);
2859     + atomic64_add(EXT4_NUM_B2C(sbi, blocks_freed),
2860     + &sbi->s_flex_groups[flex_group].free_clusters);
2861     }
2862    
2863     ext4_mb_unload_buddy(&e4b);
2864     diff --git a/fs/ext4/resize.c b/fs/ext4/resize.c
2865     index 1aab70d..9eace36 100644
2866     --- a/fs/ext4/resize.c
2867     +++ b/fs/ext4/resize.c
2868     @@ -1360,8 +1360,8 @@ static void ext4_update_super(struct super_block *sb,
2869     sbi->s_log_groups_per_flex) {
2870     ext4_group_t flex_group;
2871     flex_group = ext4_flex_group(sbi, group_data[0].group);
2872     - atomic_add(EXT4_NUM_B2C(sbi, free_blocks),
2873     - &sbi->s_flex_groups[flex_group].free_clusters);
2874     + atomic64_add(EXT4_NUM_B2C(sbi, free_blocks),
2875     + &sbi->s_flex_groups[flex_group].free_clusters);
2876     atomic_add(EXT4_INODES_PER_GROUP(sb) * flex_gd->count,
2877     &sbi->s_flex_groups[flex_group].free_inodes);
2878     }
2879     diff --git a/fs/ext4/super.c b/fs/ext4/super.c
2880     index 5fa223d..24c767d 100644
2881     --- a/fs/ext4/super.c
2882     +++ b/fs/ext4/super.c
2883     @@ -1979,8 +1979,8 @@ static int ext4_fill_flex_info(struct super_block *sb)
2884     flex_group = ext4_flex_group(sbi, i);
2885     atomic_add(ext4_free_inodes_count(sb, gdp),
2886     &sbi->s_flex_groups[flex_group].free_inodes);
2887     - atomic_add(ext4_free_group_clusters(sb, gdp),
2888     - &sbi->s_flex_groups[flex_group].free_clusters);
2889     + atomic64_add(ext4_free_group_clusters(sb, gdp),
2890     + &sbi->s_flex_groups[flex_group].free_clusters);
2891     atomic_add(ext4_used_dirs_count(sb, gdp),
2892     &sbi->s_flex_groups[flex_group].used_dirs);
2893     }
2894     diff --git a/fs/jbd2/transaction.c b/fs/jbd2/transaction.c
2895     index df9f297..73b9253 100644
2896     --- a/fs/jbd2/transaction.c
2897     +++ b/fs/jbd2/transaction.c
2898     @@ -1047,9 +1047,12 @@ out:
2899     void jbd2_journal_set_triggers(struct buffer_head *bh,
2900     struct jbd2_buffer_trigger_type *type)
2901     {
2902     - struct journal_head *jh = bh2jh(bh);
2903     + struct journal_head *jh = jbd2_journal_grab_journal_head(bh);
2904    
2905     + if (WARN_ON(!jh))
2906     + return;
2907     jh->b_triggers = type;
2908     + jbd2_journal_put_journal_head(jh);
2909     }
2910    
2911     void jbd2_buffer_frozen_trigger(struct journal_head *jh, void *mapped_data,
2912     @@ -1101,17 +1104,18 @@ int jbd2_journal_dirty_metadata(handle_t *handle, struct buffer_head *bh)
2913     {
2914     transaction_t *transaction = handle->h_transaction;
2915     journal_t *journal = transaction->t_journal;
2916     - struct journal_head *jh = bh2jh(bh);
2917     + struct journal_head *jh;
2918     int ret = 0;
2919    
2920     - jbd_debug(5, "journal_head %p\n", jh);
2921     - JBUFFER_TRACE(jh, "entry");
2922     if (is_handle_aborted(handle))
2923     goto out;
2924     - if (!buffer_jbd(bh)) {
2925     + jh = jbd2_journal_grab_journal_head(bh);
2926     + if (!jh) {
2927     ret = -EUCLEAN;
2928     goto out;
2929     }
2930     + jbd_debug(5, "journal_head %p\n", jh);
2931     + JBUFFER_TRACE(jh, "entry");
2932    
2933     jbd_lock_bh_state(bh);
2934    
2935     @@ -1202,6 +1206,7 @@ int jbd2_journal_dirty_metadata(handle_t *handle, struct buffer_head *bh)
2936     spin_unlock(&journal->j_list_lock);
2937     out_unlock_bh:
2938     jbd_unlock_bh_state(bh);
2939     + jbd2_journal_put_journal_head(jh);
2940     out:
2941     JBUFFER_TRACE(jh, "exit");
2942     WARN_ON(ret); /* All errors are bugs, so dump the stack */
2943     diff --git a/fs/nfsd/vfs.c b/fs/nfsd/vfs.c
2944     index d586117..69c6413 100644
2945     --- a/fs/nfsd/vfs.c
2946     +++ b/fs/nfsd/vfs.c
2947     @@ -1013,6 +1013,7 @@ nfsd_vfs_write(struct svc_rqst *rqstp, struct svc_fh *fhp, struct file *file,
2948     int host_err;
2949     int stable = *stablep;
2950     int use_wgather;
2951     + loff_t pos = offset;
2952    
2953     dentry = file->f_path.dentry;
2954     inode = dentry->d_inode;
2955     @@ -1025,7 +1026,7 @@ nfsd_vfs_write(struct svc_rqst *rqstp, struct svc_fh *fhp, struct file *file,
2956    
2957     /* Write the data. */
2958     oldfs = get_fs(); set_fs(KERNEL_DS);
2959     - host_err = vfs_writev(file, (struct iovec __user *)vec, vlen, &offset);
2960     + host_err = vfs_writev(file, (struct iovec __user *)vec, vlen, &pos);
2961     set_fs(oldfs);
2962     if (host_err < 0)
2963     goto out_nfserr;
2964     diff --git a/fs/proc/generic.c b/fs/proc/generic.c
2965     index 76ddae8..b796da2 100644
2966     --- a/fs/proc/generic.c
2967     +++ b/fs/proc/generic.c
2968     @@ -412,8 +412,7 @@ static const struct dentry_operations proc_dentry_operations =
2969     struct dentry *proc_lookup_de(struct proc_dir_entry *de, struct inode *dir,
2970     struct dentry *dentry)
2971     {
2972     - struct inode *inode = NULL;
2973     - int error = -ENOENT;
2974     + struct inode *inode;
2975    
2976     spin_lock(&proc_subdir_lock);
2977     for (de = de->subdir; de ; de = de->next) {
2978     @@ -422,22 +421,16 @@ struct dentry *proc_lookup_de(struct proc_dir_entry *de, struct inode *dir,
2979     if (!memcmp(dentry->d_name.name, de->name, de->namelen)) {
2980     pde_get(de);
2981     spin_unlock(&proc_subdir_lock);
2982     - error = -ENOMEM;
2983     inode = proc_get_inode(dir->i_sb, de);
2984     - goto out_unlock;
2985     + if (!inode)
2986     + return ERR_PTR(-ENOMEM);
2987     + d_set_d_op(dentry, &proc_dentry_operations);
2988     + d_add(dentry, inode);
2989     + return NULL;
2990     }
2991     }
2992     spin_unlock(&proc_subdir_lock);
2993     -out_unlock:
2994     -
2995     - if (inode) {
2996     - d_set_d_op(dentry, &proc_dentry_operations);
2997     - d_add(dentry, inode);
2998     - return NULL;
2999     - }
3000     - if (de)
3001     - pde_put(de);
3002     - return ERR_PTR(error);
3003     + return ERR_PTR(-ENOENT);
3004     }
3005    
3006     struct dentry *proc_lookup(struct inode *dir, struct dentry *dentry,
3007     diff --git a/fs/proc/inode.c b/fs/proc/inode.c
3008     index 439ae688..0ac1e1b 100644
3009     --- a/fs/proc/inode.c
3010     +++ b/fs/proc/inode.c
3011     @@ -445,12 +445,10 @@ static const struct file_operations proc_reg_file_ops_no_compat = {
3012    
3013     struct inode *proc_get_inode(struct super_block *sb, struct proc_dir_entry *de)
3014     {
3015     - struct inode * inode;
3016     + struct inode *inode = new_inode_pseudo(sb);
3017    
3018     - inode = iget_locked(sb, de->low_ino);
3019     - if (!inode)
3020     - return NULL;
3021     - if (inode->i_state & I_NEW) {
3022     + if (inode) {
3023     + inode->i_ino = de->low_ino;
3024     inode->i_mtime = inode->i_atime = inode->i_ctime = CURRENT_TIME;
3025     PROC_I(inode)->pde = de;
3026    
3027     @@ -478,11 +476,10 @@ struct inode *proc_get_inode(struct super_block *sb, struct proc_dir_entry *de)
3028     inode->i_fop = de->proc_fops;
3029     }
3030     }
3031     - unlock_new_inode(inode);
3032     } else
3033     pde_put(de);
3034     return inode;
3035     -}
3036     +}
3037    
3038     int proc_fill_super(struct super_block *s)
3039     {
3040     @@ -499,6 +496,5 @@ int proc_fill_super(struct super_block *s)
3041     return 0;
3042    
3043     printk("proc_read_super: get root inode failed\n");
3044     - pde_put(&proc_root);
3045     return -ENOMEM;
3046     }
3047     diff --git a/fs/udf/udf_sb.h b/fs/udf/udf_sb.h
3048     index 5f02722..8d1c9d4 100644
3049     --- a/fs/udf/udf_sb.h
3050     +++ b/fs/udf/udf_sb.h
3051     @@ -82,7 +82,7 @@ struct udf_virtual_data {
3052     struct udf_bitmap {
3053     __u32 s_extLength;
3054     __u32 s_extPosition;
3055     - __u16 s_nr_groups;
3056     + int s_nr_groups;
3057     struct buffer_head **s_block_bitmap;
3058     };
3059    
3060     diff --git a/include/drm/drm_pciids.h b/include/drm/drm_pciids.h
3061     index c5c35e6..1cdbfe9 100644
3062     --- a/include/drm/drm_pciids.h
3063     +++ b/include/drm/drm_pciids.h
3064     @@ -568,7 +568,11 @@
3065     {0x1002, 0x9908, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
3066     {0x1002, 0x9909, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
3067     {0x1002, 0x990A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
3068     - {0x1002, 0x990F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
3069     + {0x1002, 0x990B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
3070     + {0x1002, 0x990C, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
3071     + {0x1002, 0x990D, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
3072     + {0x1002, 0x990E, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
3073     + {0x1002, 0x990F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
3074     {0x1002, 0x9910, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
3075     {0x1002, 0x9913, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
3076     {0x1002, 0x9917, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
3077     @@ -579,6 +583,13 @@
3078     {0x1002, 0x9992, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
3079     {0x1002, 0x9993, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
3080     {0x1002, 0x9994, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
3081     + {0x1002, 0x9995, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
3082     + {0x1002, 0x9996, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
3083     + {0x1002, 0x9997, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
3084     + {0x1002, 0x9998, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
3085     + {0x1002, 0x9999, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
3086     + {0x1002, 0x999A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
3087     + {0x1002, 0x999B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
3088     {0x1002, 0x99A0, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
3089     {0x1002, 0x99A2, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
3090     {0x1002, 0x99A4, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
3091     diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
3092     index 320e976..98399e2 100644
3093     --- a/include/linux/skbuff.h
3094     +++ b/include/linux/skbuff.h
3095     @@ -491,7 +491,7 @@ struct sk_buff {
3096     union {
3097     __u32 mark;
3098     __u32 dropcount;
3099     - __u32 avail_size;
3100     + __u32 reserved_tailroom;
3101     };
3102    
3103     sk_buff_data_t inner_transport_header;
3104     @@ -1269,11 +1269,13 @@ static inline void __skb_fill_page_desc(struct sk_buff *skb, int i,
3105     * do not lose pfmemalloc information as the pages would not be
3106     * allocated using __GFP_MEMALLOC.
3107     */
3108     - if (page->pfmemalloc && !page->mapping)
3109     - skb->pfmemalloc = true;
3110     frag->page.p = page;
3111     frag->page_offset = off;
3112     skb_frag_size_set(frag, size);
3113     +
3114     + page = compound_head(page);
3115     + if (page->pfmemalloc && !page->mapping)
3116     + skb->pfmemalloc = true;
3117     }
3118    
3119     /**
3120     @@ -1428,7 +1430,10 @@ static inline int skb_tailroom(const struct sk_buff *skb)
3121     */
3122     static inline int skb_availroom(const struct sk_buff *skb)
3123     {
3124     - return skb_is_nonlinear(skb) ? 0 : skb->avail_size - skb->len;
3125     + if (skb_is_nonlinear(skb))
3126     + return 0;
3127     +
3128     + return skb->end - skb->tail - skb->reserved_tailroom;
3129     }
3130    
3131     /**
3132     diff --git a/include/linux/usb/cdc_ncm.h b/include/linux/usb/cdc_ncm.h
3133     index 3b8f9d4..cc25b70 100644
3134     --- a/include/linux/usb/cdc_ncm.h
3135     +++ b/include/linux/usb/cdc_ncm.h
3136     @@ -127,6 +127,7 @@ struct cdc_ncm_ctx {
3137     u16 connected;
3138     };
3139    
3140     +extern u8 cdc_ncm_select_altsetting(struct usbnet *dev, struct usb_interface *intf);
3141     extern int cdc_ncm_bind_common(struct usbnet *dev, struct usb_interface *intf, u8 data_altsetting);
3142     extern void cdc_ncm_unbind(struct usbnet *dev, struct usb_interface *intf);
3143     extern struct sk_buff *cdc_ncm_fill_tx_frame(struct cdc_ncm_ctx *ctx, struct sk_buff *skb, __le32 sign);
3144     diff --git a/include/net/inet_frag.h b/include/net/inet_frag.h
3145     index 32786a0..2ab2e43 100644
3146     --- a/include/net/inet_frag.h
3147     +++ b/include/net/inet_frag.h
3148     @@ -35,6 +35,13 @@ struct inet_frag_queue {
3149    
3150     #define INETFRAGS_HASHSZ 64
3151    
3152     +/* averaged:
3153     + * max_depth = default ipfrag_high_thresh / INETFRAGS_HASHSZ /
3154     + * rounded up (SKB_TRUELEN(0) + sizeof(struct ipq or
3155     + * struct frag_queue))
3156     + */
3157     +#define INETFRAGS_MAXDEPTH 128
3158     +
3159     struct inet_frags {
3160     struct hlist_head hash[INETFRAGS_HASHSZ];
3161     rwlock_t lock;
3162     @@ -65,6 +72,8 @@ int inet_frag_evictor(struct netns_frags *nf, struct inet_frags *f, bool force);
3163     struct inet_frag_queue *inet_frag_find(struct netns_frags *nf,
3164     struct inet_frags *f, void *key, unsigned int hash)
3165     __releases(&f->lock);
3166     +void inet_frag_maybe_warn_overflow(struct inet_frag_queue *q,
3167     + const char *prefix);
3168    
3169     static inline void inet_frag_put(struct inet_frag_queue *q, struct inet_frags *f)
3170     {
3171     diff --git a/include/net/ip_fib.h b/include/net/ip_fib.h
3172     index 9497be1..e49db91 100644
3173     --- a/include/net/ip_fib.h
3174     +++ b/include/net/ip_fib.h
3175     @@ -152,18 +152,16 @@ struct fib_result_nl {
3176     };
3177    
3178     #ifdef CONFIG_IP_ROUTE_MULTIPATH
3179     -
3180     #define FIB_RES_NH(res) ((res).fi->fib_nh[(res).nh_sel])
3181     -
3182     -#define FIB_TABLE_HASHSZ 2
3183     -
3184     #else /* CONFIG_IP_ROUTE_MULTIPATH */
3185     -
3186     #define FIB_RES_NH(res) ((res).fi->fib_nh[0])
3187     +#endif /* CONFIG_IP_ROUTE_MULTIPATH */
3188    
3189     +#ifdef CONFIG_IP_MULTIPLE_TABLES
3190     #define FIB_TABLE_HASHSZ 256
3191     -
3192     -#endif /* CONFIG_IP_ROUTE_MULTIPATH */
3193     +#else
3194     +#define FIB_TABLE_HASHSZ 2
3195     +#endif
3196    
3197     extern __be32 fib_info_update_nh_saddr(struct net *net, struct fib_nh *nh);
3198    
3199     diff --git a/ipc/mqueue.c b/ipc/mqueue.c
3200     index 71a3ca1..6ebfbf5 100644
3201     --- a/ipc/mqueue.c
3202     +++ b/ipc/mqueue.c
3203     @@ -840,7 +840,8 @@ out_putfd:
3204     fd = error;
3205     }
3206     mutex_unlock(&root->d_inode->i_mutex);
3207     - mnt_drop_write(mnt);
3208     + if (!ro)
3209     + mnt_drop_write(mnt);
3210     out_putname:
3211     putname(name);
3212     return fd;
3213     diff --git a/kernel/time/tick-broadcast.c b/kernel/time/tick-broadcast.c
3214     index f113755..a13987a 100644
3215     --- a/kernel/time/tick-broadcast.c
3216     +++ b/kernel/time/tick-broadcast.c
3217     @@ -66,7 +66,8 @@ static void tick_broadcast_start_periodic(struct clock_event_device *bc)
3218     */
3219     int tick_check_broadcast_device(struct clock_event_device *dev)
3220     {
3221     - if ((tick_broadcast_device.evtdev &&
3222     + if ((dev->features & CLOCK_EVT_FEAT_DUMMY) ||
3223     + (tick_broadcast_device.evtdev &&
3224     tick_broadcast_device.evtdev->rating >= dev->rating) ||
3225     (dev->features & CLOCK_EVT_FEAT_C3STOP))
3226     return 0;
3227     diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
3228     index 43defd1..b27052c 100644
3229     --- a/kernel/trace/ftrace.c
3230     +++ b/kernel/trace/ftrace.c
3231     @@ -3082,8 +3082,8 @@ __unregister_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops,
3232     continue;
3233     }
3234    
3235     - hlist_del(&entry->node);
3236     - call_rcu(&entry->rcu, ftrace_free_entry_rcu);
3237     + hlist_del_rcu(&entry->node);
3238     + call_rcu_sched(&entry->rcu, ftrace_free_entry_rcu);
3239     }
3240     }
3241     __disable_ftrace_function_probe();
3242     diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
3243     index 3c13e46..2ffbc24 100644
3244     --- a/kernel/trace/trace.c
3245     +++ b/kernel/trace/trace.c
3246     @@ -703,7 +703,7 @@ __update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu)
3247     void
3248     update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu)
3249     {
3250     - struct ring_buffer *buf = tr->buffer;
3251     + struct ring_buffer *buf;
3252    
3253     if (trace_stop_count)
3254     return;
3255     @@ -715,6 +715,7 @@ update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu)
3256     }
3257     arch_spin_lock(&ftrace_max_lock);
3258    
3259     + buf = tr->buffer;
3260     tr->buffer = max_tr.buffer;
3261     max_tr.buffer = buf;
3262    
3263     @@ -2849,8 +2850,12 @@ static void set_tracer_flags(unsigned int mask, int enabled)
3264     if (mask == TRACE_ITER_RECORD_CMD)
3265     trace_event_enable_cmd_record(enabled);
3266    
3267     - if (mask == TRACE_ITER_OVERWRITE)
3268     + if (mask == TRACE_ITER_OVERWRITE) {
3269     ring_buffer_change_overwrite(global_trace.buffer, enabled);
3270     +#ifdef CONFIG_TRACER_MAX_TRACE
3271     + ring_buffer_change_overwrite(max_tr.buffer, enabled);
3272     +#endif
3273     + }
3274    
3275     if (mask == TRACE_ITER_PRINTK)
3276     trace_printk_start_stop_comm(enabled);
3277     @@ -2870,6 +2875,8 @@ static int trace_set_options(char *option)
3278     cmp += 2;
3279     }
3280    
3281     + mutex_lock(&trace_types_lock);
3282     +
3283     for (i = 0; trace_options[i]; i++) {
3284     if (strcmp(cmp, trace_options[i]) == 0) {
3285     set_tracer_flags(1 << i, !neg);
3286     @@ -2878,11 +2885,10 @@ static int trace_set_options(char *option)
3287     }
3288    
3289     /* If no option could be set, test the specific tracer options */
3290     - if (!trace_options[i]) {
3291     - mutex_lock(&trace_types_lock);
3292     + if (!trace_options[i])
3293     ret = set_tracer_option(current_trace, cmp, neg);
3294     - mutex_unlock(&trace_types_lock);
3295     - }
3296     +
3297     + mutex_unlock(&trace_types_lock);
3298    
3299     return ret;
3300     }
3301     @@ -4640,7 +4646,10 @@ trace_options_core_write(struct file *filp, const char __user *ubuf, size_t cnt,
3302    
3303     if (val != 0 && val != 1)
3304     return -EINVAL;
3305     +
3306     + mutex_lock(&trace_types_lock);
3307     set_tracer_flags(1 << index, val);
3308     + mutex_unlock(&trace_types_lock);
3309    
3310     *ppos += cnt;
3311    
3312     diff --git a/mm/hugetlb.c b/mm/hugetlb.c
3313     index 546db81..d7cec92 100644
3314     --- a/mm/hugetlb.c
3315     +++ b/mm/hugetlb.c
3316     @@ -2127,8 +2127,12 @@ int hugetlb_report_node_meminfo(int nid, char *buf)
3317     /* Return the number pages of memory we physically have, in PAGE_SIZE units. */
3318     unsigned long hugetlb_total_pages(void)
3319     {
3320     - struct hstate *h = &default_hstate;
3321     - return h->nr_huge_pages * pages_per_huge_page(h);
3322     + struct hstate *h;
3323     + unsigned long nr_total_pages = 0;
3324     +
3325     + for_each_hstate(h)
3326     + nr_total_pages += h->nr_huge_pages * pages_per_huge_page(h);
3327     + return nr_total_pages;
3328     }
3329    
3330     static int hugetlb_acct_memory(struct hstate *h, long delta)
3331     diff --git a/net/batman-adv/bat_iv_ogm.c b/net/batman-adv/bat_iv_ogm.c
3332     index 7d02ebd..1ee94d0 100644
3333     --- a/net/batman-adv/bat_iv_ogm.c
3334     +++ b/net/batman-adv/bat_iv_ogm.c
3335     @@ -1298,7 +1298,8 @@ static int batadv_iv_ogm_receive(struct sk_buff *skb,
3336     batadv_ogm_packet = (struct batadv_ogm_packet *)packet_buff;
3337    
3338     /* unpack the aggregated packets and process them one by one */
3339     - do {
3340     + while (batadv_iv_ogm_aggr_packet(buff_pos, packet_len,
3341     + batadv_ogm_packet->tt_num_changes)) {
3342     tt_buff = packet_buff + buff_pos + BATADV_OGM_HLEN;
3343    
3344     batadv_iv_ogm_process(ethhdr, batadv_ogm_packet, tt_buff,
3345     @@ -1309,8 +1310,7 @@ static int batadv_iv_ogm_receive(struct sk_buff *skb,
3346    
3347     packet_pos = packet_buff + buff_pos;
3348     batadv_ogm_packet = (struct batadv_ogm_packet *)packet_pos;
3349     - } while (batadv_iv_ogm_aggr_packet(buff_pos, packet_len,
3350     - batadv_ogm_packet->tt_num_changes));
3351     + }
3352    
3353     kfree_skb(skb);
3354     return NET_RX_SUCCESS;
3355     diff --git a/net/bridge/br_netlink.c b/net/bridge/br_netlink.c
3356     index 5dc66ab..580e176 100644
3357     --- a/net/bridge/br_netlink.c
3358     +++ b/net/bridge/br_netlink.c
3359     @@ -28,6 +28,7 @@ static inline size_t br_port_info_size(void)
3360     + nla_total_size(1) /* IFLA_BRPORT_MODE */
3361     + nla_total_size(1) /* IFLA_BRPORT_GUARD */
3362     + nla_total_size(1) /* IFLA_BRPORT_PROTECT */
3363     + + nla_total_size(1) /* IFLA_BRPORT_FAST_LEAVE */
3364     + 0;
3365     }
3366    
3367     diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c
3368     index 798f920..6212ec9 100644
3369     --- a/net/core/rtnetlink.c
3370     +++ b/net/core/rtnetlink.c
3371     @@ -2539,7 +2539,7 @@ static int rtnetlink_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
3372     struct rtattr *attr = (void *)nlh + NLMSG_ALIGN(min_len);
3373    
3374     while (RTA_OK(attr, attrlen)) {
3375     - unsigned int flavor = attr->rta_type;
3376     + unsigned int flavor = attr->rta_type & NLA_TYPE_MASK;
3377     if (flavor) {
3378     if (flavor > rta_max[sz_idx])
3379     return -EINVAL;
3380     diff --git a/net/ipv4/inet_fragment.c b/net/ipv4/inet_fragment.c
3381     index 4750d2b..03f5af7 100644
3382     --- a/net/ipv4/inet_fragment.c
3383     +++ b/net/ipv4/inet_fragment.c
3384     @@ -21,6 +21,7 @@
3385     #include <linux/rtnetlink.h>
3386     #include <linux/slab.h>
3387    
3388     +#include <net/sock.h>
3389     #include <net/inet_frag.h>
3390    
3391     static void inet_frag_secret_rebuild(unsigned long dummy)
3392     @@ -276,6 +277,7 @@ struct inet_frag_queue *inet_frag_find(struct netns_frags *nf,
3393     {
3394     struct inet_frag_queue *q;
3395     struct hlist_node *n;
3396     + int depth = 0;
3397    
3398     hlist_for_each_entry(q, n, &f->hash[hash], list) {
3399     if (q->net == nf && f->match(q, key)) {
3400     @@ -283,9 +285,25 @@ struct inet_frag_queue *inet_frag_find(struct netns_frags *nf,
3401     read_unlock(&f->lock);
3402     return q;
3403     }
3404     + depth++;
3405     }
3406     read_unlock(&f->lock);
3407    
3408     - return inet_frag_create(nf, f, key);
3409     + if (depth <= INETFRAGS_MAXDEPTH)
3410     + return inet_frag_create(nf, f, key);
3411     + else
3412     + return ERR_PTR(-ENOBUFS);
3413     }
3414     EXPORT_SYMBOL(inet_frag_find);
3415     +
3416     +void inet_frag_maybe_warn_overflow(struct inet_frag_queue *q,
3417     + const char *prefix)
3418     +{
3419     + static const char msg[] = "inet_frag_find: Fragment hash bucket"
3420     + " list length grew over limit " __stringify(INETFRAGS_MAXDEPTH)
3421     + ". Dropping fragment.\n";
3422     +
3423     + if (PTR_ERR(q) == -ENOBUFS)
3424     + LIMIT_NETDEBUG(KERN_WARNING "%s%s", prefix, msg);
3425     +}
3426     +EXPORT_SYMBOL(inet_frag_maybe_warn_overflow);
3427     diff --git a/net/ipv4/ip_fragment.c b/net/ipv4/ip_fragment.c
3428     index eb9d63a..a8fc332 100644
3429     --- a/net/ipv4/ip_fragment.c
3430     +++ b/net/ipv4/ip_fragment.c
3431     @@ -299,14 +299,11 @@ static inline struct ipq *ip_find(struct net *net, struct iphdr *iph, u32 user)
3432     hash = ipqhashfn(iph->id, iph->saddr, iph->daddr, iph->protocol);
3433    
3434     q = inet_frag_find(&net->ipv4.frags, &ip4_frags, &arg, hash);
3435     - if (q == NULL)
3436     - goto out_nomem;
3437     -
3438     + if (IS_ERR_OR_NULL(q)) {
3439     + inet_frag_maybe_warn_overflow(q, pr_fmt());
3440     + return NULL;
3441     + }
3442     return container_of(q, struct ipq, q);
3443     -
3444     -out_nomem:
3445     - LIMIT_NETDEBUG(KERN_ERR pr_fmt("ip_frag_create: no memory left !\n"));
3446     - return NULL;
3447     }
3448    
3449     /* Is the fragment too far ahead to be part of ipq? */
3450     diff --git a/net/ipv4/ip_gre.c b/net/ipv4/ip_gre.c
3451     index e81b1ca..a85062b 100644
3452     --- a/net/ipv4/ip_gre.c
3453     +++ b/net/ipv4/ip_gre.c
3454     @@ -761,10 +761,7 @@ static netdev_tx_t ipgre_tunnel_xmit(struct sk_buff *skb, struct net_device *dev
3455    
3456     if (dev->header_ops && dev->type == ARPHRD_IPGRE) {
3457     gre_hlen = 0;
3458     - if (skb->protocol == htons(ETH_P_IP))
3459     - tiph = (const struct iphdr *)skb->data;
3460     - else
3461     - tiph = &tunnel->parms.iph;
3462     + tiph = (const struct iphdr *)skb->data;
3463     } else {
3464     gre_hlen = tunnel->hlen;
3465     tiph = &tunnel->parms.iph;
3466     diff --git a/net/ipv4/ip_options.c b/net/ipv4/ip_options.c
3467     index f6289bf..9100fe0 100644
3468     --- a/net/ipv4/ip_options.c
3469     +++ b/net/ipv4/ip_options.c
3470     @@ -370,7 +370,6 @@ int ip_options_compile(struct net *net,
3471     }
3472     switch (optptr[3]&0xF) {
3473     case IPOPT_TS_TSONLY:
3474     - opt->ts = optptr - iph;
3475     if (skb)
3476     timeptr = &optptr[optptr[2]-1];
3477     opt->ts_needtime = 1;
3478     @@ -381,7 +380,6 @@ int ip_options_compile(struct net *net,
3479     pp_ptr = optptr + 2;
3480     goto error;
3481     }
3482     - opt->ts = optptr - iph;
3483     if (rt) {
3484     spec_dst_fill(&spec_dst, skb);
3485     memcpy(&optptr[optptr[2]-1], &spec_dst, 4);
3486     @@ -396,7 +394,6 @@ int ip_options_compile(struct net *net,
3487     pp_ptr = optptr + 2;
3488     goto error;
3489     }
3490     - opt->ts = optptr - iph;
3491     {
3492     __be32 addr;
3493     memcpy(&addr, &optptr[optptr[2]-1], 4);
3494     @@ -429,12 +426,12 @@ int ip_options_compile(struct net *net,
3495     pp_ptr = optptr + 3;
3496     goto error;
3497     }
3498     - opt->ts = optptr - iph;
3499     if (skb) {
3500     optptr[3] = (optptr[3]&0xF)|((overflow+1)<<4);
3501     opt->is_changed = 1;
3502     }
3503     }
3504     + opt->ts = optptr - iph;
3505     break;
3506     case IPOPT_RA:
3507     if (optlen < 4) {
3508     diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
3509     index 2aa69c8..45b63ca 100644
3510     --- a/net/ipv4/tcp.c
3511     +++ b/net/ipv4/tcp.c
3512     @@ -773,7 +773,7 @@ struct sk_buff *sk_stream_alloc_skb(struct sock *sk, int size, gfp_t gfp)
3513     * Make sure that we have exactly size bytes
3514     * available to the caller, no more, no less.
3515     */
3516     - skb->avail_size = size;
3517     + skb->reserved_tailroom = skb->end - skb->tail - size;
3518     return skb;
3519     }
3520     __kfree_skb(skb);
3521     diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
3522     index eadb693..d9130a9 100644
3523     --- a/net/ipv4/tcp_ipv4.c
3524     +++ b/net/ipv4/tcp_ipv4.c
3525     @@ -274,13 +274,6 @@ static void tcp_v4_mtu_reduced(struct sock *sk)
3526     struct inet_sock *inet = inet_sk(sk);
3527     u32 mtu = tcp_sk(sk)->mtu_info;
3528    
3529     - /* We are not interested in TCP_LISTEN and open_requests (SYN-ACKs
3530     - * send out by Linux are always <576bytes so they should go through
3531     - * unfragmented).
3532     - */
3533     - if (sk->sk_state == TCP_LISTEN)
3534     - return;
3535     -
3536     dst = inet_csk_update_pmtu(sk, mtu);
3537     if (!dst)
3538     return;
3539     @@ -408,6 +401,13 @@ void tcp_v4_err(struct sk_buff *icmp_skb, u32 info)
3540     goto out;
3541    
3542     if (code == ICMP_FRAG_NEEDED) { /* PMTU discovery (RFC1191) */
3543     + /* We are not interested in TCP_LISTEN and open_requests
3544     + * (SYN-ACKs send out by Linux are always <576bytes so
3545     + * they should go through unfragmented).
3546     + */
3547     + if (sk->sk_state == TCP_LISTEN)
3548     + goto out;
3549     +
3550     tp->mtu_info = info;
3551     if (!sock_owned_by_user(sk)) {
3552     tcp_v4_mtu_reduced(sk);
3553     diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
3554     index ff031a5..c36c6c2 100644
3555     --- a/net/ipv4/tcp_output.c
3556     +++ b/net/ipv4/tcp_output.c
3557     @@ -1298,7 +1298,6 @@ static void __pskb_trim_head(struct sk_buff *skb, int len)
3558     eat = min_t(int, len, skb_headlen(skb));
3559     if (eat) {
3560     __skb_pull(skb, eat);
3561     - skb->avail_size -= eat;
3562     len -= eat;
3563     if (!len)
3564     return;
3565     diff --git a/net/ipv6/netfilter/nf_conntrack_reasm.c b/net/ipv6/netfilter/nf_conntrack_reasm.c
3566     index 3dacecc..2f3a018 100644
3567     --- a/net/ipv6/netfilter/nf_conntrack_reasm.c
3568     +++ b/net/ipv6/netfilter/nf_conntrack_reasm.c
3569     @@ -14,6 +14,8 @@
3570     * 2 of the License, or (at your option) any later version.
3571     */
3572    
3573     +#define pr_fmt(fmt) "IPv6-nf: " fmt
3574     +
3575     #include <linux/errno.h>
3576     #include <linux/types.h>
3577     #include <linux/string.h>
3578     @@ -180,13 +182,11 @@ static inline struct frag_queue *fq_find(struct net *net, __be32 id,
3579    
3580     q = inet_frag_find(&net->nf_frag.frags, &nf_frags, &arg, hash);
3581     local_bh_enable();
3582     - if (q == NULL)
3583     - goto oom;
3584     -
3585     + if (IS_ERR_OR_NULL(q)) {
3586     + inet_frag_maybe_warn_overflow(q, pr_fmt());
3587     + return NULL;
3588     + }
3589     return container_of(q, struct frag_queue, q);
3590     -
3591     -oom:
3592     - return NULL;
3593     }
3594    
3595    
3596     diff --git a/net/ipv6/reassembly.c b/net/ipv6/reassembly.c
3597     index e5253ec..d9ba8a2 100644
3598     --- a/net/ipv6/reassembly.c
3599     +++ b/net/ipv6/reassembly.c
3600     @@ -26,6 +26,9 @@
3601     * YOSHIFUJI,H. @USAGI Always remove fragment header to
3602     * calculate ICV correctly.
3603     */
3604     +
3605     +#define pr_fmt(fmt) "IPv6: " fmt
3606     +
3607     #include <linux/errno.h>
3608     #include <linux/types.h>
3609     #include <linux/string.h>
3610     @@ -197,9 +200,10 @@ fq_find(struct net *net, __be32 id, const struct in6_addr *src, const struct in6
3611     hash = inet6_hash_frag(id, src, dst, ip6_frags.rnd);
3612    
3613     q = inet_frag_find(&net->ipv6.frags, &ip6_frags, &arg, hash);
3614     - if (q == NULL)
3615     + if (IS_ERR_OR_NULL(q)) {
3616     + inet_frag_maybe_warn_overflow(q, pr_fmt());
3617     return NULL;
3618     -
3619     + }
3620     return container_of(q, struct frag_queue, q);
3621     }
3622    
3623     diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c
3624     index 4f435371..8d19346 100644
3625     --- a/net/ipv6/tcp_ipv6.c
3626     +++ b/net/ipv6/tcp_ipv6.c
3627     @@ -389,6 +389,13 @@ static void tcp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
3628     }
3629    
3630     if (type == ICMPV6_PKT_TOOBIG) {
3631     + /* We are not interested in TCP_LISTEN and open_requests
3632     + * (SYN-ACKs send out by Linux are always <576bytes so
3633     + * they should go through unfragmented).
3634     + */
3635     + if (sk->sk_state == TCP_LISTEN)
3636     + goto out;
3637     +
3638     tp->mtu_info = ntohl(info);
3639     if (!sock_owned_by_user(sk))
3640     tcp_v6_mtu_reduced(sk);
3641     diff --git a/net/sctp/associola.c b/net/sctp/associola.c
3642     index b45ed1f..67c6823 100644
3643     --- a/net/sctp/associola.c
3644     +++ b/net/sctp/associola.c
3645     @@ -1080,7 +1080,7 @@ struct sctp_transport *sctp_assoc_lookup_tsn(struct sctp_association *asoc,
3646     transports) {
3647    
3648     if (transport == active)
3649     - break;
3650     + continue;
3651     list_for_each_entry(chunk, &transport->transmitted,
3652     transmitted_list) {
3653     if (key == chunk->subh.data_hdr->tsn) {
3654     diff --git a/net/sctp/sm_statefuns.c b/net/sctp/sm_statefuns.c
3655     index 5131fcf..de1a013 100644
3656     --- a/net/sctp/sm_statefuns.c
3657     +++ b/net/sctp/sm_statefuns.c
3658     @@ -2082,7 +2082,7 @@ sctp_disposition_t sctp_sf_do_5_2_4_dupcook(struct net *net,
3659     }
3660    
3661     /* Delete the tempory new association. */
3662     - sctp_add_cmd_sf(commands, SCTP_CMD_NEW_ASOC, SCTP_ASOC(new_asoc));
3663     + sctp_add_cmd_sf(commands, SCTP_CMD_SET_ASOC, SCTP_ASOC(new_asoc));
3664     sctp_add_cmd_sf(commands, SCTP_CMD_DELETE_TCB, SCTP_NULL());
3665    
3666     /* Restore association pointer to provide SCTP command interpeter
3667     diff --git a/sound/pci/hda/hda_codec.c b/sound/pci/hda/hda_codec.c
3668     index 713e9af..e46b6a3 100644
3669     --- a/sound/pci/hda/hda_codec.c
3670     +++ b/sound/pci/hda/hda_codec.c
3671     @@ -2967,7 +2967,7 @@ static unsigned int convert_to_spdif_status(unsigned short val)
3672     if (val & AC_DIG1_PROFESSIONAL)
3673     sbits |= IEC958_AES0_PROFESSIONAL;
3674     if (sbits & IEC958_AES0_PROFESSIONAL) {
3675     - if (sbits & AC_DIG1_EMPHASIS)
3676     + if (val & AC_DIG1_EMPHASIS)
3677     sbits |= IEC958_AES0_PRO_EMPHASIS_5015;
3678     } else {
3679     if (val & AC_DIG1_EMPHASIS)
3680     diff --git a/sound/pci/hda/patch_conexant.c b/sound/pci/hda/patch_conexant.c
3681     index 009b77a..09fae16 100644
3682     --- a/sound/pci/hda/patch_conexant.c
3683     +++ b/sound/pci/hda/patch_conexant.c
3684     @@ -1175,7 +1175,7 @@ static int patch_cxt5045(struct hda_codec *codec)
3685     }
3686    
3687     if (spec->beep_amp)
3688     - snd_hda_attach_beep_device(codec, spec->beep_amp);
3689     + snd_hda_attach_beep_device(codec, get_amp_nid_(spec->beep_amp));
3690    
3691     return 0;
3692     }
3693     @@ -1954,7 +1954,7 @@ static int patch_cxt5051(struct hda_codec *codec)
3694     }
3695    
3696     if (spec->beep_amp)
3697     - snd_hda_attach_beep_device(codec, spec->beep_amp);
3698     + snd_hda_attach_beep_device(codec, get_amp_nid_(spec->beep_amp));
3699    
3700     return 0;
3701     }
3702     @@ -3136,7 +3136,7 @@ static int patch_cxt5066(struct hda_codec *codec)
3703     }
3704    
3705     if (spec->beep_amp)
3706     - snd_hda_attach_beep_device(codec, spec->beep_amp);
3707     + snd_hda_attach_beep_device(codec, get_amp_nid_(spec->beep_amp));
3708    
3709     return 0;
3710     }
3711     @@ -4576,7 +4576,7 @@ static int patch_conexant_auto(struct hda_codec *codec)
3712     spec->capture_stream = &cx_auto_pcm_analog_capture;
3713     codec->patch_ops = cx_auto_patch_ops;
3714     if (spec->beep_amp)
3715     - snd_hda_attach_beep_device(codec, spec->beep_amp);
3716     + snd_hda_attach_beep_device(codec, get_amp_nid_(spec->beep_amp));
3717    
3718     /* Some laptops with Conexant chips show stalls in S3 resume,
3719     * which falls into the single-cmd mode.
3720     diff --git a/sound/usb/mixer.c b/sound/usb/mixer.c
3721     index e90daf8..64a564d 100644
3722     --- a/sound/usb/mixer.c
3723     +++ b/sound/usb/mixer.c
3724     @@ -715,8 +715,9 @@ static int check_input_term(struct mixer_build *state, int id, struct usb_audio_
3725     case UAC2_CLOCK_SELECTOR: {
3726     struct uac_selector_unit_descriptor *d = p1;
3727     /* call recursively to retrieve the channel info */
3728     - if (check_input_term(state, d->baSourceID[0], term) < 0)
3729     - return -ENODEV;
3730     + err = check_input_term(state, d->baSourceID[0], term);
3731     + if (err < 0)
3732     + return err;
3733     term->type = d->bDescriptorSubtype << 16; /* virtual type */
3734     term->id = id;
3735     term->name = uac_selector_unit_iSelector(d);
3736     @@ -725,7 +726,8 @@ static int check_input_term(struct mixer_build *state, int id, struct usb_audio_
3737     case UAC1_PROCESSING_UNIT:
3738     case UAC1_EXTENSION_UNIT:
3739     /* UAC2_PROCESSING_UNIT_V2 */
3740     - /* UAC2_EFFECT_UNIT */ {
3741     + /* UAC2_EFFECT_UNIT */
3742     + case UAC2_EXTENSION_UNIT_V2: {
3743     struct uac_processing_unit_descriptor *d = p1;
3744    
3745     if (state->mixer->protocol == UAC_VERSION_2 &&
3746     @@ -1355,8 +1357,9 @@ static int parse_audio_feature_unit(struct mixer_build *state, int unitid, void
3747     return err;
3748    
3749     /* determine the input source type and name */
3750     - if (check_input_term(state, hdr->bSourceID, &iterm) < 0)
3751     - return -EINVAL;
3752     + err = check_input_term(state, hdr->bSourceID, &iterm);
3753     + if (err < 0)
3754     + return err;
3755    
3756     master_bits = snd_usb_combine_bytes(bmaControls, csize);
3757     /* master configuration quirks */
3758     @@ -2051,6 +2054,8 @@ static int parse_audio_unit(struct mixer_build *state, int unitid)
3759     return parse_audio_extension_unit(state, unitid, p1);
3760     else /* UAC_VERSION_2 */
3761     return parse_audio_processing_unit(state, unitid, p1);
3762     + case UAC2_EXTENSION_UNIT_V2:
3763     + return parse_audio_extension_unit(state, unitid, p1);
3764     default:
3765     snd_printk(KERN_ERR "usbaudio: unit %u: unexpected type 0x%02x\n", unitid, p1[2]);
3766     return -EINVAL;
3767     @@ -2117,7 +2122,7 @@ static int snd_usb_mixer_controls(struct usb_mixer_interface *mixer)
3768     state.oterm.type = le16_to_cpu(desc->wTerminalType);
3769     state.oterm.name = desc->iTerminal;
3770     err = parse_audio_unit(&state, desc->bSourceID);
3771     - if (err < 0)
3772     + if (err < 0 && err != -EINVAL)
3773     return err;
3774     } else { /* UAC_VERSION_2 */
3775     struct uac2_output_terminal_descriptor *desc = p;
3776     @@ -2129,12 +2134,12 @@ static int snd_usb_mixer_controls(struct usb_mixer_interface *mixer)
3777     state.oterm.type = le16_to_cpu(desc->wTerminalType);
3778     state.oterm.name = desc->iTerminal;
3779     err = parse_audio_unit(&state, desc->bSourceID);
3780     - if (err < 0)
3781     + if (err < 0 && err != -EINVAL)
3782     return err;
3783    
3784     /* for UAC2, use the same approach to also add the clock selectors */
3785     err = parse_audio_unit(&state, desc->bCSourceID);
3786     - if (err < 0)
3787     + if (err < 0 && err != -EINVAL)
3788     return err;
3789     }
3790     }