Magellan Linux

Contents of /trunk/kernel-alx/patches-4.14/0177-4.14.78-all-fixes.patch

Parent Directory Parent Directory | Revision Log Revision Log


Revision 3238 - (show annotations) (download)
Fri Nov 9 12:14:58 2018 UTC (5 years, 5 months ago) by niro
File size: 67338 byte(s)
-added up to patches-4.14.79
1 diff --git a/Makefile b/Makefile
2 index 16d1a18496fb..89574ee68d6b 100644
3 --- a/Makefile
4 +++ b/Makefile
5 @@ -1,7 +1,7 @@
6 # SPDX-License-Identifier: GPL-2.0
7 VERSION = 4
8 PATCHLEVEL = 14
9 -SUBLEVEL = 77
10 +SUBLEVEL = 78
11 EXTRAVERSION =
12 NAME = Petit Gorille
13
14 diff --git a/arch/arc/Makefile b/arch/arc/Makefile
15 index 6c1b20dd76ad..7c6c97782022 100644
16 --- a/arch/arc/Makefile
17 +++ b/arch/arc/Makefile
18 @@ -6,34 +6,12 @@
19 # published by the Free Software Foundation.
20 #
21
22 -ifeq ($(CROSS_COMPILE),)
23 -ifndef CONFIG_CPU_BIG_ENDIAN
24 -CROSS_COMPILE := arc-linux-
25 -else
26 -CROSS_COMPILE := arceb-linux-
27 -endif
28 -endif
29 -
30 KBUILD_DEFCONFIG := nsim_700_defconfig
31
32 cflags-y += -fno-common -pipe -fno-builtin -mmedium-calls -D__linux__
33 cflags-$(CONFIG_ISA_ARCOMPACT) += -mA7
34 cflags-$(CONFIG_ISA_ARCV2) += -mcpu=archs
35
36 -is_700 = $(shell $(CC) -dM -E - < /dev/null | grep -q "ARC700" && echo 1 || echo 0)
37 -
38 -ifdef CONFIG_ISA_ARCOMPACT
39 -ifeq ($(is_700), 0)
40 - $(error Toolchain not configured for ARCompact builds)
41 -endif
42 -endif
43 -
44 -ifdef CONFIG_ISA_ARCV2
45 -ifeq ($(is_700), 1)
46 - $(error Toolchain not configured for ARCv2 builds)
47 -endif
48 -endif
49 -
50 ifdef CONFIG_ARC_CURR_IN_REG
51 # For a global register defintion, make sure it gets passed to every file
52 # We had a customer reported bug where some code built in kernel was NOT using
53 @@ -87,7 +65,7 @@ ldflags-$(CONFIG_CPU_BIG_ENDIAN) += -EB
54 # --build-id w/o "-marclinux". Default arc-elf32-ld is OK
55 ldflags-$(upto_gcc44) += -marclinux
56
57 -LIBGCC := $(shell $(CC) $(cflags-y) --print-libgcc-file-name)
58 +LIBGCC = $(shell $(CC) $(cflags-y) --print-libgcc-file-name)
59
60 # Modules with short calls might break for calls into builtin-kernel
61 KBUILD_CFLAGS_MODULE += -mlong-calls -mno-millicode
62 diff --git a/arch/powerpc/include/asm/code-patching.h b/arch/powerpc/include/asm/code-patching.h
63 index 2c895e8d07f7..812535f40124 100644
64 --- a/arch/powerpc/include/asm/code-patching.h
65 +++ b/arch/powerpc/include/asm/code-patching.h
66 @@ -31,6 +31,7 @@ unsigned int create_cond_branch(const unsigned int *addr,
67 unsigned long target, int flags);
68 int patch_branch(unsigned int *addr, unsigned long target, int flags);
69 int patch_instruction(unsigned int *addr, unsigned int instr);
70 +int raw_patch_instruction(unsigned int *addr, unsigned int instr);
71
72 int instr_is_relative_branch(unsigned int instr);
73 int instr_is_relative_link_branch(unsigned int instr);
74 diff --git a/arch/powerpc/kernel/tm.S b/arch/powerpc/kernel/tm.S
75 index 1da12f521cb7..b735b727ed2b 100644
76 --- a/arch/powerpc/kernel/tm.S
77 +++ b/arch/powerpc/kernel/tm.S
78 @@ -167,13 +167,27 @@ _GLOBAL(tm_reclaim)
79 std r1, PACATMSCRATCH(r13)
80 ld r1, PACAR1(r13)
81
82 - /* Store the PPR in r11 and reset to decent value */
83 std r11, GPR11(r1) /* Temporary stash */
84
85 + /*
86 + * Move the saved user r1 to the kernel stack in case PACATMSCRATCH is
87 + * clobbered by an exception once we turn on MSR_RI below.
88 + */
89 + ld r11, PACATMSCRATCH(r13)
90 + std r11, GPR1(r1)
91 +
92 + /*
93 + * Store r13 away so we can free up the scratch SPR for the SLB fault
94 + * handler (needed once we start accessing the thread_struct).
95 + */
96 + GET_SCRATCH0(r11)
97 + std r11, GPR13(r1)
98 +
99 /* Reset MSR RI so we can take SLB faults again */
100 li r11, MSR_RI
101 mtmsrd r11, 1
102
103 + /* Store the PPR in r11 and reset to decent value */
104 mfspr r11, SPRN_PPR
105 HMT_MEDIUM
106
107 @@ -198,11 +212,11 @@ _GLOBAL(tm_reclaim)
108 SAVE_GPR(8, r7) /* user r8 */
109 SAVE_GPR(9, r7) /* user r9 */
110 SAVE_GPR(10, r7) /* user r10 */
111 - ld r3, PACATMSCRATCH(r13) /* user r1 */
112 + ld r3, GPR1(r1) /* user r1 */
113 ld r4, GPR7(r1) /* user r7 */
114 ld r5, GPR11(r1) /* user r11 */
115 ld r6, GPR12(r1) /* user r12 */
116 - GET_SCRATCH0(8) /* user r13 */
117 + ld r8, GPR13(r1) /* user r13 */
118 std r3, GPR1(r7)
119 std r4, GPR7(r7)
120 std r5, GPR11(r7)
121 diff --git a/arch/powerpc/lib/code-patching.c b/arch/powerpc/lib/code-patching.c
122 index 882c750dc519..130405158afa 100644
123 --- a/arch/powerpc/lib/code-patching.c
124 +++ b/arch/powerpc/lib/code-patching.c
125 @@ -39,7 +39,7 @@ static int __patch_instruction(unsigned int *exec_addr, unsigned int instr,
126 return 0;
127 }
128
129 -static int raw_patch_instruction(unsigned int *addr, unsigned int instr)
130 +int raw_patch_instruction(unsigned int *addr, unsigned int instr)
131 {
132 return __patch_instruction(addr, instr, addr);
133 }
134 @@ -156,7 +156,7 @@ static int do_patch_instruction(unsigned int *addr, unsigned int instr)
135 * when text_poke_area is not ready, but we still need
136 * to allow patching. We just do the plain old patching
137 */
138 - if (!this_cpu_read(*PTRRELOC(&text_poke_area)))
139 + if (!this_cpu_read(text_poke_area))
140 return raw_patch_instruction(addr, instr);
141
142 local_irq_save(flags);
143 diff --git a/arch/powerpc/lib/feature-fixups.c b/arch/powerpc/lib/feature-fixups.c
144 index 762a899e85a4..e1bcdc32a851 100644
145 --- a/arch/powerpc/lib/feature-fixups.c
146 +++ b/arch/powerpc/lib/feature-fixups.c
147 @@ -63,7 +63,7 @@ static int patch_alt_instruction(unsigned int *src, unsigned int *dest,
148 }
149 }
150
151 - patch_instruction(dest, instr);
152 + raw_patch_instruction(dest, instr);
153
154 return 0;
155 }
156 @@ -92,7 +92,7 @@ static int patch_feature_section(unsigned long value, struct fixup_entry *fcur)
157 }
158
159 for (; dest < end; dest++)
160 - patch_instruction(dest, PPC_INST_NOP);
161 + raw_patch_instruction(dest, PPC_INST_NOP);
162
163 return 0;
164 }
165 @@ -292,7 +292,7 @@ void do_lwsync_fixups(unsigned long value, void *fixup_start, void *fixup_end)
166
167 for (; start < end; start++) {
168 dest = (void *)start + *start;
169 - patch_instruction(dest, PPC_INST_LWSYNC);
170 + raw_patch_instruction(dest, PPC_INST_LWSYNC);
171 }
172 }
173
174 @@ -310,7 +310,7 @@ static void do_final_fixups(void)
175 length = (__end_interrupts - _stext) / sizeof(int);
176
177 while (length--) {
178 - patch_instruction(dest, *src);
179 + raw_patch_instruction(dest, *src);
180 src++;
181 dest++;
182 }
183 diff --git a/drivers/clocksource/timer-fttmr010.c b/drivers/clocksource/timer-fttmr010.c
184 index cdf23b628688..cdfe1c82f3f0 100644
185 --- a/drivers/clocksource/timer-fttmr010.c
186 +++ b/drivers/clocksource/timer-fttmr010.c
187 @@ -130,13 +130,17 @@ static int fttmr010_timer_set_next_event(unsigned long cycles,
188 cr &= ~fttmr010->t1_enable_val;
189 writel(cr, fttmr010->base + TIMER_CR);
190
191 - /* Setup the match register forward/backward in time */
192 - cr = readl(fttmr010->base + TIMER1_COUNT);
193 - if (fttmr010->count_down)
194 - cr -= cycles;
195 - else
196 - cr += cycles;
197 - writel(cr, fttmr010->base + TIMER1_MATCH1);
198 + if (fttmr010->count_down) {
199 + /*
200 + * ASPEED Timer Controller will load TIMER1_LOAD register
201 + * into TIMER1_COUNT register when the timer is re-enabled.
202 + */
203 + writel(cycles, fttmr010->base + TIMER1_LOAD);
204 + } else {
205 + /* Setup the match register forward in time */
206 + cr = readl(fttmr010->base + TIMER1_COUNT);
207 + writel(cr + cycles, fttmr010->base + TIMER1_MATCH1);
208 + }
209
210 /* Start */
211 cr = readl(fttmr010->base + TIMER_CR);
212 diff --git a/drivers/clocksource/timer-ti-32k.c b/drivers/clocksource/timer-ti-32k.c
213 index 880a861ab3c8..713214d085e0 100644
214 --- a/drivers/clocksource/timer-ti-32k.c
215 +++ b/drivers/clocksource/timer-ti-32k.c
216 @@ -98,6 +98,9 @@ static int __init ti_32k_timer_init(struct device_node *np)
217 return -ENXIO;
218 }
219
220 + if (!of_machine_is_compatible("ti,am43"))
221 + ti_32k_timer.cs.flags |= CLOCK_SOURCE_SUSPEND_NONSTOP;
222 +
223 ti_32k_timer.counter = ti_32k_timer.base;
224
225 /*
226 diff --git a/drivers/gpu/drm/arm/malidp_drv.c b/drivers/gpu/drm/arm/malidp_drv.c
227 index 1a57cc28955e..ff3348ee9595 100644
228 --- a/drivers/gpu/drm/arm/malidp_drv.c
229 +++ b/drivers/gpu/drm/arm/malidp_drv.c
230 @@ -617,6 +617,7 @@ static int malidp_bind(struct device *dev)
231 drm->irq_enabled = true;
232
233 ret = drm_vblank_init(drm, drm->mode_config.num_crtc);
234 + drm_crtc_vblank_reset(&malidp->crtc);
235 if (ret < 0) {
236 DRM_ERROR("failed to initialise vblank\n");
237 goto vblank_fail;
238 diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
239 index 562220ec9d41..c75f4ccbcdef 100644
240 --- a/drivers/gpu/drm/i915/i915_drv.c
241 +++ b/drivers/gpu/drm/i915/i915_drv.c
242 @@ -878,7 +878,6 @@ static int i915_driver_init_early(struct drm_i915_private *dev_priv,
243
244 spin_lock_init(&dev_priv->mm.object_stat_lock);
245 mutex_init(&dev_priv->sb_lock);
246 - mutex_init(&dev_priv->modeset_restore_lock);
247 mutex_init(&dev_priv->av_mutex);
248 mutex_init(&dev_priv->wm.wm_mutex);
249 mutex_init(&dev_priv->pps_mutex);
250 @@ -1505,11 +1504,6 @@ static int i915_drm_suspend(struct drm_device *dev)
251 pci_power_t opregion_target_state;
252 int error;
253
254 - /* ignore lid events during suspend */
255 - mutex_lock(&dev_priv->modeset_restore_lock);
256 - dev_priv->modeset_restore = MODESET_SUSPENDED;
257 - mutex_unlock(&dev_priv->modeset_restore_lock);
258 -
259 disable_rpm_wakeref_asserts(dev_priv);
260
261 /* We do a lot of poking in a lot of registers, make sure they work
262 @@ -1718,10 +1712,6 @@ static int i915_drm_resume(struct drm_device *dev)
263
264 intel_fbdev_set_suspend(dev, FBINFO_STATE_RUNNING, false);
265
266 - mutex_lock(&dev_priv->modeset_restore_lock);
267 - dev_priv->modeset_restore = MODESET_DONE;
268 - mutex_unlock(&dev_priv->modeset_restore_lock);
269 -
270 intel_opregion_notify_adapter(dev_priv, PCI_D0);
271
272 intel_autoenable_gt_powersave(dev_priv);
273 diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
274 index 51411894d2cd..41f51509c9e4 100644
275 --- a/drivers/gpu/drm/i915/i915_drv.h
276 +++ b/drivers/gpu/drm/i915/i915_drv.h
277 @@ -1183,6 +1183,7 @@ enum intel_sbi_destination {
278 #define QUIRK_BACKLIGHT_PRESENT (1<<3)
279 #define QUIRK_PIN_SWIZZLED_PAGES (1<<5)
280 #define QUIRK_INCREASE_T12_DELAY (1<<6)
281 +#define QUIRK_INCREASE_DDI_DISABLED_TIME (1<<7)
282
283 struct intel_fbdev;
284 struct intel_fbc_work;
285 @@ -1614,12 +1615,6 @@ struct i915_gpu_error {
286 unsigned long test_irq_rings;
287 };
288
289 -enum modeset_restore {
290 - MODESET_ON_LID_OPEN,
291 - MODESET_DONE,
292 - MODESET_SUSPENDED,
293 -};
294 -
295 #define DP_AUX_A 0x40
296 #define DP_AUX_B 0x10
297 #define DP_AUX_C 0x20
298 @@ -2296,8 +2291,6 @@ struct drm_i915_private {
299
300 unsigned long quirks;
301
302 - enum modeset_restore modeset_restore;
303 - struct mutex modeset_restore_lock;
304 struct drm_atomic_state *modeset_restore_state;
305 struct drm_modeset_acquire_ctx reset_ctx;
306
307 diff --git a/drivers/gpu/drm/i915/intel_ddi.c b/drivers/gpu/drm/i915/intel_ddi.c
308 index 3a4a581345c4..77085b9bcb30 100644
309 --- a/drivers/gpu/drm/i915/intel_ddi.c
310 +++ b/drivers/gpu/drm/i915/intel_ddi.c
311 @@ -1526,15 +1526,24 @@ void intel_ddi_enable_transcoder_func(const struct intel_crtc_state *crtc_state)
312 I915_WRITE(TRANS_DDI_FUNC_CTL(cpu_transcoder), temp);
313 }
314
315 -void intel_ddi_disable_transcoder_func(struct drm_i915_private *dev_priv,
316 - enum transcoder cpu_transcoder)
317 +void intel_ddi_disable_transcoder_func(const struct intel_crtc_state *crtc_state)
318 {
319 + struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
320 + struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
321 + enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
322 i915_reg_t reg = TRANS_DDI_FUNC_CTL(cpu_transcoder);
323 uint32_t val = I915_READ(reg);
324
325 val &= ~(TRANS_DDI_FUNC_ENABLE | TRANS_DDI_PORT_MASK | TRANS_DDI_DP_VC_PAYLOAD_ALLOC);
326 val |= TRANS_DDI_PORT_NONE;
327 I915_WRITE(reg, val);
328 +
329 + if (dev_priv->quirks & QUIRK_INCREASE_DDI_DISABLED_TIME &&
330 + intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI)) {
331 + DRM_DEBUG_KMS("Quirk Increase DDI disabled time\n");
332 + /* Quirk time at 100ms for reliable operation */
333 + msleep(100);
334 + }
335 }
336
337 bool intel_ddi_connector_get_hw_state(struct intel_connector *intel_connector)
338 diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
339 index cf648c526e12..2006ab44fbf9 100644
340 --- a/drivers/gpu/drm/i915/intel_display.c
341 +++ b/drivers/gpu/drm/i915/intel_display.c
342 @@ -5653,7 +5653,7 @@ static void haswell_crtc_disable(struct intel_crtc_state *old_crtc_state,
343 intel_ddi_set_vc_payload_alloc(intel_crtc->config, false);
344
345 if (!transcoder_is_dsi(cpu_transcoder))
346 - intel_ddi_disable_transcoder_func(dev_priv, cpu_transcoder);
347 + intel_ddi_disable_transcoder_func(old_crtc_state);
348
349 if (INTEL_GEN(dev_priv) >= 9)
350 skylake_scaler_disable(intel_crtc);
351 @@ -14286,6 +14286,18 @@ static void quirk_increase_t12_delay(struct drm_device *dev)
352 DRM_INFO("Applying T12 delay quirk\n");
353 }
354
355 +/*
356 + * GeminiLake NUC HDMI outputs require additional off time
357 + * this allows the onboard retimer to correctly sync to signal
358 + */
359 +static void quirk_increase_ddi_disabled_time(struct drm_device *dev)
360 +{
361 + struct drm_i915_private *dev_priv = to_i915(dev);
362 +
363 + dev_priv->quirks |= QUIRK_INCREASE_DDI_DISABLED_TIME;
364 + DRM_INFO("Applying Increase DDI Disabled quirk\n");
365 +}
366 +
367 struct intel_quirk {
368 int device;
369 int subsystem_vendor;
370 @@ -14372,6 +14384,13 @@ static struct intel_quirk intel_quirks[] = {
371
372 /* Toshiba Satellite P50-C-18C */
373 { 0x191B, 0x1179, 0xF840, quirk_increase_t12_delay },
374 +
375 + /* GeminiLake NUC */
376 + { 0x3185, 0x8086, 0x2072, quirk_increase_ddi_disabled_time },
377 + { 0x3184, 0x8086, 0x2072, quirk_increase_ddi_disabled_time },
378 + /* ASRock ITX*/
379 + { 0x3185, 0x1849, 0x2212, quirk_increase_ddi_disabled_time },
380 + { 0x3184, 0x1849, 0x2212, quirk_increase_ddi_disabled_time },
381 };
382
383 static void intel_init_quirks(struct drm_device *dev)
384 diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h
385 index 589905aab185..3adb9c3b412e 100644
386 --- a/drivers/gpu/drm/i915/intel_drv.h
387 +++ b/drivers/gpu/drm/i915/intel_drv.h
388 @@ -1254,8 +1254,7 @@ void intel_ddi_init(struct drm_i915_private *dev_priv, enum port port);
389 enum port intel_ddi_get_encoder_port(struct intel_encoder *intel_encoder);
390 bool intel_ddi_get_hw_state(struct intel_encoder *encoder, enum pipe *pipe);
391 void intel_ddi_enable_transcoder_func(const struct intel_crtc_state *crtc_state);
392 -void intel_ddi_disable_transcoder_func(struct drm_i915_private *dev_priv,
393 - enum transcoder cpu_transcoder);
394 +void intel_ddi_disable_transcoder_func(const struct intel_crtc_state *crtc_state);
395 void intel_ddi_enable_pipe_clock(const struct intel_crtc_state *crtc_state);
396 void intel_ddi_disable_pipe_clock(const struct intel_crtc_state *crtc_state);
397 struct intel_encoder *
398 diff --git a/drivers/gpu/drm/i915/intel_lvds.c b/drivers/gpu/drm/i915/intel_lvds.c
399 index dae4e22a2c3f..fe67e458b003 100644
400 --- a/drivers/gpu/drm/i915/intel_lvds.c
401 +++ b/drivers/gpu/drm/i915/intel_lvds.c
402 @@ -44,8 +44,6 @@
403 /* Private structure for the integrated LVDS support */
404 struct intel_lvds_connector {
405 struct intel_connector base;
406 -
407 - struct notifier_block lid_notifier;
408 };
409
410 struct intel_lvds_pps {
411 @@ -440,26 +438,9 @@ static bool intel_lvds_compute_config(struct intel_encoder *intel_encoder,
412 return true;
413 }
414
415 -/**
416 - * Detect the LVDS connection.
417 - *
418 - * Since LVDS doesn't have hotlug, we use the lid as a proxy. Open means
419 - * connected and closed means disconnected. We also send hotplug events as
420 - * needed, using lid status notification from the input layer.
421 - */
422 static enum drm_connector_status
423 intel_lvds_detect(struct drm_connector *connector, bool force)
424 {
425 - struct drm_i915_private *dev_priv = to_i915(connector->dev);
426 - enum drm_connector_status status;
427 -
428 - DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
429 - connector->base.id, connector->name);
430 -
431 - status = intel_panel_detect(dev_priv);
432 - if (status != connector_status_unknown)
433 - return status;
434 -
435 return connector_status_connected;
436 }
437
438 @@ -484,117 +465,6 @@ static int intel_lvds_get_modes(struct drm_connector *connector)
439 return 1;
440 }
441
442 -static int intel_no_modeset_on_lid_dmi_callback(const struct dmi_system_id *id)
443 -{
444 - DRM_INFO("Skipping forced modeset for %s\n", id->ident);
445 - return 1;
446 -}
447 -
448 -/* The GPU hangs up on these systems if modeset is performed on LID open */
449 -static const struct dmi_system_id intel_no_modeset_on_lid[] = {
450 - {
451 - .callback = intel_no_modeset_on_lid_dmi_callback,
452 - .ident = "Toshiba Tecra A11",
453 - .matches = {
454 - DMI_MATCH(DMI_SYS_VENDOR, "TOSHIBA"),
455 - DMI_MATCH(DMI_PRODUCT_NAME, "TECRA A11"),
456 - },
457 - },
458 -
459 - { } /* terminating entry */
460 -};
461 -
462 -/*
463 - * Lid events. Note the use of 'modeset':
464 - * - we set it to MODESET_ON_LID_OPEN on lid close,
465 - * and set it to MODESET_DONE on open
466 - * - we use it as a "only once" bit (ie we ignore
467 - * duplicate events where it was already properly set)
468 - * - the suspend/resume paths will set it to
469 - * MODESET_SUSPENDED and ignore the lid open event,
470 - * because they restore the mode ("lid open").
471 - */
472 -static int intel_lid_notify(struct notifier_block *nb, unsigned long val,
473 - void *unused)
474 -{
475 - struct intel_lvds_connector *lvds_connector =
476 - container_of(nb, struct intel_lvds_connector, lid_notifier);
477 - struct drm_connector *connector = &lvds_connector->base.base;
478 - struct drm_device *dev = connector->dev;
479 - struct drm_i915_private *dev_priv = to_i915(dev);
480 -
481 - if (dev->switch_power_state != DRM_SWITCH_POWER_ON)
482 - return NOTIFY_OK;
483 -
484 - mutex_lock(&dev_priv->modeset_restore_lock);
485 - if (dev_priv->modeset_restore == MODESET_SUSPENDED)
486 - goto exit;
487 - /*
488 - * check and update the status of LVDS connector after receiving
489 - * the LID nofication event.
490 - */
491 - connector->status = connector->funcs->detect(connector, false);
492 -
493 - /* Don't force modeset on machines where it causes a GPU lockup */
494 - if (dmi_check_system(intel_no_modeset_on_lid))
495 - goto exit;
496 - if (!acpi_lid_open()) {
497 - /* do modeset on next lid open event */
498 - dev_priv->modeset_restore = MODESET_ON_LID_OPEN;
499 - goto exit;
500 - }
501 -
502 - if (dev_priv->modeset_restore == MODESET_DONE)
503 - goto exit;
504 -
505 - /*
506 - * Some old platform's BIOS love to wreak havoc while the lid is closed.
507 - * We try to detect this here and undo any damage. The split for PCH
508 - * platforms is rather conservative and a bit arbitrary expect that on
509 - * those platforms VGA disabling requires actual legacy VGA I/O access,
510 - * and as part of the cleanup in the hw state restore we also redisable
511 - * the vga plane.
512 - */
513 - if (!HAS_PCH_SPLIT(dev_priv))
514 - intel_display_resume(dev);
515 -
516 - dev_priv->modeset_restore = MODESET_DONE;
517 -
518 -exit:
519 - mutex_unlock(&dev_priv->modeset_restore_lock);
520 - return NOTIFY_OK;
521 -}
522 -
523 -static int
524 -intel_lvds_connector_register(struct drm_connector *connector)
525 -{
526 - struct intel_lvds_connector *lvds = to_lvds_connector(connector);
527 - int ret;
528 -
529 - ret = intel_connector_register(connector);
530 - if (ret)
531 - return ret;
532 -
533 - lvds->lid_notifier.notifier_call = intel_lid_notify;
534 - if (acpi_lid_notifier_register(&lvds->lid_notifier)) {
535 - DRM_DEBUG_KMS("lid notifier registration failed\n");
536 - lvds->lid_notifier.notifier_call = NULL;
537 - }
538 -
539 - return 0;
540 -}
541 -
542 -static void
543 -intel_lvds_connector_unregister(struct drm_connector *connector)
544 -{
545 - struct intel_lvds_connector *lvds = to_lvds_connector(connector);
546 -
547 - if (lvds->lid_notifier.notifier_call)
548 - acpi_lid_notifier_unregister(&lvds->lid_notifier);
549 -
550 - intel_connector_unregister(connector);
551 -}
552 -
553 /**
554 * intel_lvds_destroy - unregister and free LVDS structures
555 * @connector: connector to free
556 @@ -627,8 +497,8 @@ static const struct drm_connector_funcs intel_lvds_connector_funcs = {
557 .fill_modes = drm_helper_probe_single_connector_modes,
558 .atomic_get_property = intel_digital_connector_atomic_get_property,
559 .atomic_set_property = intel_digital_connector_atomic_set_property,
560 - .late_register = intel_lvds_connector_register,
561 - .early_unregister = intel_lvds_connector_unregister,
562 + .late_register = intel_connector_register,
563 + .early_unregister = intel_connector_unregister,
564 .destroy = intel_lvds_destroy,
565 .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
566 .atomic_duplicate_state = intel_digital_connector_duplicate_state,
567 @@ -1091,8 +961,6 @@ void intel_lvds_init(struct drm_i915_private *dev_priv)
568 * 2) check for VBT data
569 * 3) check to see if LVDS is already on
570 * if none of the above, no panel
571 - * 4) make sure lid is open
572 - * if closed, act like it's not there for now
573 */
574
575 /*
576 diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c
577 index 672b0be41d44..a306493e2e97 100644
578 --- a/drivers/hid/hid-core.c
579 +++ b/drivers/hid/hid-core.c
580 @@ -1964,6 +1964,9 @@ static const struct hid_device_id hid_have_special_driver[] = {
581 { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_WIRELESS_2011_ISO) },
582 { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_WIRELESS_2011_JIS) },
583 { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_MAGIC_KEYBOARD_ANSI) },
584 + { HID_BLUETOOTH_DEVICE(BT_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_MAGIC_KEYBOARD_ANSI) },
585 + { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_MAGIC_KEYBOARD_NUMPAD_ANSI) },
586 + { HID_BLUETOOTH_DEVICE(BT_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_MAGIC_KEYBOARD_NUMPAD_ANSI) },
587 { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_FOUNTAIN_TP_ONLY) },
588 { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER1_TP_ONLY) },
589 #endif
590 diff --git a/drivers/hwtracing/intel_th/pci.c b/drivers/hwtracing/intel_th/pci.c
591 index c2a2ce8ee541..ef699477d94a 100644
592 --- a/drivers/hwtracing/intel_th/pci.c
593 +++ b/drivers/hwtracing/intel_th/pci.c
594 @@ -168,6 +168,11 @@ static const struct pci_device_id intel_th_pci_id_table[] = {
595 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x18e1),
596 .driver_data = (kernel_ulong_t)&intel_th_2x,
597 },
598 + {
599 + /* Ice Lake PCH */
600 + PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x34a6),
601 + .driver_data = (kernel_ulong_t)&intel_th_2x,
602 + },
603 { 0 },
604 };
605
606 diff --git a/drivers/i2c/busses/i2c-rcar.c b/drivers/i2c/busses/i2c-rcar.c
607 index 15d764afec3b..7f044df1ea07 100644
608 --- a/drivers/i2c/busses/i2c-rcar.c
609 +++ b/drivers/i2c/busses/i2c-rcar.c
610 @@ -32,6 +32,7 @@
611 #include <linux/of_device.h>
612 #include <linux/platform_device.h>
613 #include <linux/pm_runtime.h>
614 +#include <linux/reset.h>
615 #include <linux/slab.h>
616
617 /* register offsets */
618 @@ -111,8 +112,9 @@
619 #define ID_ARBLOST (1 << 3)
620 #define ID_NACK (1 << 4)
621 /* persistent flags */
622 +#define ID_P_NO_RXDMA (1 << 30) /* HW forbids RXDMA sometimes */
623 #define ID_P_PM_BLOCKED (1 << 31)
624 -#define ID_P_MASK ID_P_PM_BLOCKED
625 +#define ID_P_MASK (ID_P_PM_BLOCKED | ID_P_NO_RXDMA)
626
627 enum rcar_i2c_type {
628 I2C_RCAR_GEN1,
629 @@ -140,6 +142,8 @@ struct rcar_i2c_priv {
630 struct dma_chan *dma_rx;
631 struct scatterlist sg;
632 enum dma_data_direction dma_direction;
633 +
634 + struct reset_control *rstc;
635 };
636
637 #define rcar_i2c_priv_to_dev(p) ((p)->adap.dev.parent)
638 @@ -321,6 +325,11 @@ static void rcar_i2c_dma_unmap(struct rcar_i2c_priv *priv)
639 dma_unmap_single(chan->device->dev, sg_dma_address(&priv->sg),
640 sg_dma_len(&priv->sg), priv->dma_direction);
641
642 + /* Gen3 can only do one RXDMA per transfer and we just completed it */
643 + if (priv->devtype == I2C_RCAR_GEN3 &&
644 + priv->dma_direction == DMA_FROM_DEVICE)
645 + priv->flags |= ID_P_NO_RXDMA;
646 +
647 priv->dma_direction = DMA_NONE;
648 }
649
650 @@ -358,8 +367,9 @@ static void rcar_i2c_dma(struct rcar_i2c_priv *priv)
651 unsigned char *buf;
652 int len;
653
654 - /* Do not use DMA if it's not available or for messages < 8 bytes */
655 - if (IS_ERR(chan) || msg->len < 8)
656 + /* Do various checks to see if DMA is feasible at all */
657 + if (IS_ERR(chan) || msg->len < 8 ||
658 + (read && priv->flags & ID_P_NO_RXDMA))
659 return;
660
661 if (read) {
662 @@ -688,6 +698,25 @@ static void rcar_i2c_release_dma(struct rcar_i2c_priv *priv)
663 }
664 }
665
666 +/* I2C is a special case, we need to poll the status of a reset */
667 +static int rcar_i2c_do_reset(struct rcar_i2c_priv *priv)
668 +{
669 + int i, ret;
670 +
671 + ret = reset_control_reset(priv->rstc);
672 + if (ret)
673 + return ret;
674 +
675 + for (i = 0; i < LOOP_TIMEOUT; i++) {
676 + ret = reset_control_status(priv->rstc);
677 + if (ret == 0)
678 + return 0;
679 + udelay(1);
680 + }
681 +
682 + return -ETIMEDOUT;
683 +}
684 +
685 static int rcar_i2c_master_xfer(struct i2c_adapter *adap,
686 struct i2c_msg *msgs,
687 int num)
688 @@ -699,6 +728,16 @@ static int rcar_i2c_master_xfer(struct i2c_adapter *adap,
689
690 pm_runtime_get_sync(dev);
691
692 + /* Gen3 needs a reset before allowing RXDMA once */
693 + if (priv->devtype == I2C_RCAR_GEN3) {
694 + priv->flags |= ID_P_NO_RXDMA;
695 + if (!IS_ERR(priv->rstc)) {
696 + ret = rcar_i2c_do_reset(priv);
697 + if (ret == 0)
698 + priv->flags &= ~ID_P_NO_RXDMA;
699 + }
700 + }
701 +
702 rcar_i2c_init(priv);
703
704 ret = rcar_i2c_bus_barrier(priv);
705 @@ -868,6 +907,15 @@ static int rcar_i2c_probe(struct platform_device *pdev)
706 if (ret < 0)
707 goto out_pm_put;
708
709 + if (priv->devtype == I2C_RCAR_GEN3) {
710 + priv->rstc = devm_reset_control_get_exclusive(&pdev->dev, NULL);
711 + if (!IS_ERR(priv->rstc)) {
712 + ret = reset_control_status(priv->rstc);
713 + if (ret < 0)
714 + priv->rstc = ERR_PTR(-ENOTSUPP);
715 + }
716 + }
717 +
718 /* Stay always active when multi-master to keep arbitration working */
719 if (of_property_read_bool(dev->of_node, "multi-master"))
720 priv->flags |= ID_P_PM_BLOCKED;
721 diff --git a/drivers/infiniband/hw/hfi1/chip.c b/drivers/infiniband/hw/hfi1/chip.c
722 index 33cf1734c4e5..f9faacce9250 100644
723 --- a/drivers/infiniband/hw/hfi1/chip.c
724 +++ b/drivers/infiniband/hw/hfi1/chip.c
725 @@ -6722,6 +6722,7 @@ void start_freeze_handling(struct hfi1_pportdata *ppd, int flags)
726 struct hfi1_devdata *dd = ppd->dd;
727 struct send_context *sc;
728 int i;
729 + int sc_flags;
730
731 if (flags & FREEZE_SELF)
732 write_csr(dd, CCE_CTRL, CCE_CTRL_SPC_FREEZE_SMASK);
733 @@ -6732,11 +6733,13 @@ void start_freeze_handling(struct hfi1_pportdata *ppd, int flags)
734 /* notify all SDMA engines that they are going into a freeze */
735 sdma_freeze_notify(dd, !!(flags & FREEZE_LINK_DOWN));
736
737 + sc_flags = SCF_FROZEN | SCF_HALTED | (flags & FREEZE_LINK_DOWN ?
738 + SCF_LINK_DOWN : 0);
739 /* do halt pre-handling on all enabled send contexts */
740 for (i = 0; i < dd->num_send_contexts; i++) {
741 sc = dd->send_contexts[i].sc;
742 if (sc && (sc->flags & SCF_ENABLED))
743 - sc_stop(sc, SCF_FROZEN | SCF_HALTED);
744 + sc_stop(sc, sc_flags);
745 }
746
747 /* Send context are frozen. Notify user space */
748 @@ -10646,6 +10649,8 @@ int set_link_state(struct hfi1_pportdata *ppd, u32 state)
749 add_rcvctrl(dd, RCV_CTRL_RCV_PORT_ENABLE_SMASK);
750
751 handle_linkup_change(dd, 1);
752 + pio_kernel_linkup(dd);
753 +
754 ppd->host_link_state = HLS_UP_INIT;
755 break;
756 case HLS_UP_ARMED:
757 diff --git a/drivers/infiniband/hw/hfi1/pio.c b/drivers/infiniband/hw/hfi1/pio.c
758 index 19a8e6052820..07bf282fd8aa 100644
759 --- a/drivers/infiniband/hw/hfi1/pio.c
760 +++ b/drivers/infiniband/hw/hfi1/pio.c
761 @@ -942,20 +942,18 @@ void sc_free(struct send_context *sc)
762 void sc_disable(struct send_context *sc)
763 {
764 u64 reg;
765 - unsigned long flags;
766 struct pio_buf *pbuf;
767
768 if (!sc)
769 return;
770
771 /* do all steps, even if already disabled */
772 - spin_lock_irqsave(&sc->alloc_lock, flags);
773 + spin_lock_irq(&sc->alloc_lock);
774 reg = read_kctxt_csr(sc->dd, sc->hw_context, SC(CTRL));
775 reg &= ~SC(CTRL_CTXT_ENABLE_SMASK);
776 sc->flags &= ~SCF_ENABLED;
777 sc_wait_for_packet_egress(sc, 1);
778 write_kctxt_csr(sc->dd, sc->hw_context, SC(CTRL), reg);
779 - spin_unlock_irqrestore(&sc->alloc_lock, flags);
780
781 /*
782 * Flush any waiters. Once the context is disabled,
783 @@ -965,7 +963,7 @@ void sc_disable(struct send_context *sc)
784 * proceed with the flush.
785 */
786 udelay(1);
787 - spin_lock_irqsave(&sc->release_lock, flags);
788 + spin_lock(&sc->release_lock);
789 if (sc->sr) { /* this context has a shadow ring */
790 while (sc->sr_tail != sc->sr_head) {
791 pbuf = &sc->sr[sc->sr_tail].pbuf;
792 @@ -976,7 +974,8 @@ void sc_disable(struct send_context *sc)
793 sc->sr_tail = 0;
794 }
795 }
796 - spin_unlock_irqrestore(&sc->release_lock, flags);
797 + spin_unlock(&sc->release_lock);
798 + spin_unlock_irq(&sc->alloc_lock);
799 }
800
801 /* return SendEgressCtxtStatus.PacketOccupancy */
802 @@ -1199,11 +1198,39 @@ void pio_kernel_unfreeze(struct hfi1_devdata *dd)
803 sc = dd->send_contexts[i].sc;
804 if (!sc || !(sc->flags & SCF_FROZEN) || sc->type == SC_USER)
805 continue;
806 + if (sc->flags & SCF_LINK_DOWN)
807 + continue;
808
809 sc_enable(sc); /* will clear the sc frozen flag */
810 }
811 }
812
813 +/**
814 + * pio_kernel_linkup() - Re-enable send contexts after linkup event
815 + * @dd: valid devive data
816 + *
817 + * When the link goes down, the freeze path is taken. However, a link down
818 + * event is different from a freeze because if the send context is re-enabled
819 + * whowever is sending data will start sending data again, which will hang
820 + * any QP that is sending data.
821 + *
822 + * The freeze path now looks at the type of event that occurs and takes this
823 + * path for link down event.
824 + */
825 +void pio_kernel_linkup(struct hfi1_devdata *dd)
826 +{
827 + struct send_context *sc;
828 + int i;
829 +
830 + for (i = 0; i < dd->num_send_contexts; i++) {
831 + sc = dd->send_contexts[i].sc;
832 + if (!sc || !(sc->flags & SCF_LINK_DOWN) || sc->type == SC_USER)
833 + continue;
834 +
835 + sc_enable(sc); /* will clear the sc link down flag */
836 + }
837 +}
838 +
839 /*
840 * Wait for the SendPioInitCtxt.PioInitInProgress bit to clear.
841 * Returns:
842 @@ -1403,11 +1430,10 @@ void sc_stop(struct send_context *sc, int flag)
843 {
844 unsigned long flags;
845
846 - /* mark the context */
847 - sc->flags |= flag;
848 -
849 /* stop buffer allocations */
850 spin_lock_irqsave(&sc->alloc_lock, flags);
851 + /* mark the context */
852 + sc->flags |= flag;
853 sc->flags &= ~SCF_ENABLED;
854 spin_unlock_irqrestore(&sc->alloc_lock, flags);
855 wake_up(&sc->halt_wait);
856 diff --git a/drivers/infiniband/hw/hfi1/pio.h b/drivers/infiniband/hw/hfi1/pio.h
857 index 99ca5edb0b43..c7c4e6e5d317 100644
858 --- a/drivers/infiniband/hw/hfi1/pio.h
859 +++ b/drivers/infiniband/hw/hfi1/pio.h
860 @@ -145,6 +145,7 @@ struct send_context {
861 #define SCF_IN_FREE 0x02
862 #define SCF_HALTED 0x04
863 #define SCF_FROZEN 0x08
864 +#define SCF_LINK_DOWN 0x10
865
866 struct send_context_info {
867 struct send_context *sc; /* allocated working context */
868 @@ -312,6 +313,7 @@ void set_pio_integrity(struct send_context *sc);
869 void pio_reset_all(struct hfi1_devdata *dd);
870 void pio_freeze(struct hfi1_devdata *dd);
871 void pio_kernel_unfreeze(struct hfi1_devdata *dd);
872 +void pio_kernel_linkup(struct hfi1_devdata *dd);
873
874 /* global PIO send control operations */
875 #define PSC_GLOBAL_ENABLE 0
876 diff --git a/drivers/input/keyboard/atakbd.c b/drivers/input/keyboard/atakbd.c
877 index f1235831283d..fdeda0b0fbd6 100644
878 --- a/drivers/input/keyboard/atakbd.c
879 +++ b/drivers/input/keyboard/atakbd.c
880 @@ -79,8 +79,7 @@ MODULE_LICENSE("GPL");
881 */
882
883
884 -static unsigned char atakbd_keycode[0x72] = { /* American layout */
885 - [0] = KEY_GRAVE,
886 +static unsigned char atakbd_keycode[0x73] = { /* American layout */
887 [1] = KEY_ESC,
888 [2] = KEY_1,
889 [3] = KEY_2,
890 @@ -121,9 +120,9 @@ static unsigned char atakbd_keycode[0x72] = { /* American layout */
891 [38] = KEY_L,
892 [39] = KEY_SEMICOLON,
893 [40] = KEY_APOSTROPHE,
894 - [41] = KEY_BACKSLASH, /* FIXME, '#' */
895 + [41] = KEY_GRAVE,
896 [42] = KEY_LEFTSHIFT,
897 - [43] = KEY_GRAVE, /* FIXME: '~' */
898 + [43] = KEY_BACKSLASH,
899 [44] = KEY_Z,
900 [45] = KEY_X,
901 [46] = KEY_C,
902 @@ -149,45 +148,34 @@ static unsigned char atakbd_keycode[0x72] = { /* American layout */
903 [66] = KEY_F8,
904 [67] = KEY_F9,
905 [68] = KEY_F10,
906 - [69] = KEY_ESC,
907 - [70] = KEY_DELETE,
908 - [71] = KEY_KP7,
909 - [72] = KEY_KP8,
910 - [73] = KEY_KP9,
911 + [71] = KEY_HOME,
912 + [72] = KEY_UP,
913 [74] = KEY_KPMINUS,
914 - [75] = KEY_KP4,
915 - [76] = KEY_KP5,
916 - [77] = KEY_KP6,
917 + [75] = KEY_LEFT,
918 + [77] = KEY_RIGHT,
919 [78] = KEY_KPPLUS,
920 - [79] = KEY_KP1,
921 - [80] = KEY_KP2,
922 - [81] = KEY_KP3,
923 - [82] = KEY_KP0,
924 - [83] = KEY_KPDOT,
925 - [90] = KEY_KPLEFTPAREN,
926 - [91] = KEY_KPRIGHTPAREN,
927 - [92] = KEY_KPASTERISK, /* FIXME */
928 - [93] = KEY_KPASTERISK,
929 - [94] = KEY_KPPLUS,
930 - [95] = KEY_HELP,
931 + [80] = KEY_DOWN,
932 + [82] = KEY_INSERT,
933 + [83] = KEY_DELETE,
934 [96] = KEY_102ND,
935 - [97] = KEY_KPASTERISK, /* FIXME */
936 - [98] = KEY_KPSLASH,
937 + [97] = KEY_UNDO,
938 + [98] = KEY_HELP,
939 [99] = KEY_KPLEFTPAREN,
940 [100] = KEY_KPRIGHTPAREN,
941 [101] = KEY_KPSLASH,
942 [102] = KEY_KPASTERISK,
943 - [103] = KEY_UP,
944 - [104] = KEY_KPASTERISK, /* FIXME */
945 - [105] = KEY_LEFT,
946 - [106] = KEY_RIGHT,
947 - [107] = KEY_KPASTERISK, /* FIXME */
948 - [108] = KEY_DOWN,
949 - [109] = KEY_KPASTERISK, /* FIXME */
950 - [110] = KEY_KPASTERISK, /* FIXME */
951 - [111] = KEY_KPASTERISK, /* FIXME */
952 - [112] = KEY_KPASTERISK, /* FIXME */
953 - [113] = KEY_KPASTERISK /* FIXME */
954 + [103] = KEY_KP7,
955 + [104] = KEY_KP8,
956 + [105] = KEY_KP9,
957 + [106] = KEY_KP4,
958 + [107] = KEY_KP5,
959 + [108] = KEY_KP6,
960 + [109] = KEY_KP1,
961 + [110] = KEY_KP2,
962 + [111] = KEY_KP3,
963 + [112] = KEY_KP0,
964 + [113] = KEY_KPDOT,
965 + [114] = KEY_KPENTER,
966 };
967
968 static struct input_dev *atakbd_dev;
969 @@ -195,21 +183,15 @@ static struct input_dev *atakbd_dev;
970 static void atakbd_interrupt(unsigned char scancode, char down)
971 {
972
973 - if (scancode < 0x72) { /* scancodes < 0xf2 are keys */
974 + if (scancode < 0x73) { /* scancodes < 0xf3 are keys */
975
976 // report raw events here?
977
978 scancode = atakbd_keycode[scancode];
979
980 - if (scancode == KEY_CAPSLOCK) { /* CapsLock is a toggle switch key on Amiga */
981 - input_report_key(atakbd_dev, scancode, 1);
982 - input_report_key(atakbd_dev, scancode, 0);
983 - input_sync(atakbd_dev);
984 - } else {
985 - input_report_key(atakbd_dev, scancode, down);
986 - input_sync(atakbd_dev);
987 - }
988 - } else /* scancodes >= 0xf2 are mouse data, most likely */
989 + input_report_key(atakbd_dev, scancode, down);
990 + input_sync(atakbd_dev);
991 + } else /* scancodes >= 0xf3 are mouse data, most likely */
992 printk(KERN_INFO "atakbd: unhandled scancode %x\n", scancode);
993
994 return;
995 diff --git a/drivers/iommu/amd_iommu.c b/drivers/iommu/amd_iommu.c
996 index 9137030423cd..efa6cd2500b9 100644
997 --- a/drivers/iommu/amd_iommu.c
998 +++ b/drivers/iommu/amd_iommu.c
999 @@ -253,7 +253,13 @@ static u16 get_alias(struct device *dev)
1000
1001 /* The callers make sure that get_device_id() does not fail here */
1002 devid = get_device_id(dev);
1003 +
1004 + /* For ACPI HID devices, we simply return the devid as such */
1005 + if (!dev_is_pci(dev))
1006 + return devid;
1007 +
1008 ivrs_alias = amd_iommu_alias_table[devid];
1009 +
1010 pci_for_each_dma_alias(pdev, __last_alias, &pci_alias);
1011
1012 if (ivrs_alias == pci_alias)
1013 diff --git a/drivers/media/usb/dvb-usb-v2/af9035.c b/drivers/media/usb/dvb-usb-v2/af9035.c
1014 index 666d319d3d1a..1f6c1eefe389 100644
1015 --- a/drivers/media/usb/dvb-usb-v2/af9035.c
1016 +++ b/drivers/media/usb/dvb-usb-v2/af9035.c
1017 @@ -402,8 +402,10 @@ static int af9035_i2c_master_xfer(struct i2c_adapter *adap,
1018 if (msg[0].addr == state->af9033_i2c_addr[1])
1019 reg |= 0x100000;
1020
1021 - ret = af9035_wr_regs(d, reg, &msg[0].buf[3],
1022 - msg[0].len - 3);
1023 + ret = (msg[0].len >= 3) ? af9035_wr_regs(d, reg,
1024 + &msg[0].buf[3],
1025 + msg[0].len - 3)
1026 + : -EOPNOTSUPP;
1027 } else {
1028 /* I2C write */
1029 u8 buf[MAX_XFER_SIZE];
1030 diff --git a/drivers/net/ethernet/ibm/emac/core.c b/drivers/net/ethernet/ibm/emac/core.c
1031 index 7feff2450ed6..d1da8f05ef85 100644
1032 --- a/drivers/net/ethernet/ibm/emac/core.c
1033 +++ b/drivers/net/ethernet/ibm/emac/core.c
1034 @@ -2671,12 +2671,17 @@ static int emac_init_phy(struct emac_instance *dev)
1035 if (of_phy_is_fixed_link(np)) {
1036 int res = emac_dt_mdio_probe(dev);
1037
1038 - if (!res) {
1039 - res = of_phy_register_fixed_link(np);
1040 - if (res)
1041 - mdiobus_unregister(dev->mii_bus);
1042 + if (res)
1043 + return res;
1044 +
1045 + res = of_phy_register_fixed_link(np);
1046 + dev->phy_dev = of_phy_find_device(np);
1047 + if (res || !dev->phy_dev) {
1048 + mdiobus_unregister(dev->mii_bus);
1049 + return res ? res : -EINVAL;
1050 }
1051 - return res;
1052 + emac_adjust_link(dev->ndev);
1053 + put_device(&dev->phy_dev->mdio.dev);
1054 }
1055 return 0;
1056 }
1057 diff --git a/drivers/net/ethernet/mellanox/mlx4/eq.c b/drivers/net/ethernet/mellanox/mlx4/eq.c
1058 index 6f57c052053e..050dc213e8db 100644
1059 --- a/drivers/net/ethernet/mellanox/mlx4/eq.c
1060 +++ b/drivers/net/ethernet/mellanox/mlx4/eq.c
1061 @@ -240,7 +240,8 @@ static void mlx4_set_eq_affinity_hint(struct mlx4_priv *priv, int vec)
1062 struct mlx4_dev *dev = &priv->dev;
1063 struct mlx4_eq *eq = &priv->eq_table.eq[vec];
1064
1065 - if (!eq->affinity_mask || cpumask_empty(eq->affinity_mask))
1066 + if (!cpumask_available(eq->affinity_mask) ||
1067 + cpumask_empty(eq->affinity_mask))
1068 return;
1069
1070 hint_err = irq_set_affinity_hint(eq->irq, eq->affinity_mask);
1071 diff --git a/drivers/net/ethernet/renesas/ravb.h b/drivers/net/ethernet/renesas/ravb.h
1072 index 96a27b00c90e..897bd33c2c50 100644
1073 --- a/drivers/net/ethernet/renesas/ravb.h
1074 +++ b/drivers/net/ethernet/renesas/ravb.h
1075 @@ -431,6 +431,7 @@ enum EIS_BIT {
1076 EIS_CULF1 = 0x00000080,
1077 EIS_TFFF = 0x00000100,
1078 EIS_QFS = 0x00010000,
1079 + EIS_RESERVED = (GENMASK(31, 17) | GENMASK(15, 11)),
1080 };
1081
1082 /* RIC0 */
1083 @@ -475,6 +476,7 @@ enum RIS0_BIT {
1084 RIS0_FRF15 = 0x00008000,
1085 RIS0_FRF16 = 0x00010000,
1086 RIS0_FRF17 = 0x00020000,
1087 + RIS0_RESERVED = GENMASK(31, 18),
1088 };
1089
1090 /* RIC1 */
1091 @@ -531,6 +533,7 @@ enum RIS2_BIT {
1092 RIS2_QFF16 = 0x00010000,
1093 RIS2_QFF17 = 0x00020000,
1094 RIS2_RFFF = 0x80000000,
1095 + RIS2_RESERVED = GENMASK(30, 18),
1096 };
1097
1098 /* TIC */
1099 @@ -547,6 +550,7 @@ enum TIS_BIT {
1100 TIS_FTF1 = 0x00000002, /* Undocumented? */
1101 TIS_TFUF = 0x00000100,
1102 TIS_TFWF = 0x00000200,
1103 + TIS_RESERVED = (GENMASK(31, 20) | GENMASK(15, 12) | GENMASK(7, 4))
1104 };
1105
1106 /* ISS */
1107 @@ -620,6 +624,7 @@ enum GIC_BIT {
1108 enum GIS_BIT {
1109 GIS_PTCF = 0x00000001, /* Undocumented? */
1110 GIS_PTMF = 0x00000004,
1111 + GIS_RESERVED = GENMASK(15, 10),
1112 };
1113
1114 /* GIE (R-Car Gen3 only) */
1115 diff --git a/drivers/net/ethernet/renesas/ravb_main.c b/drivers/net/ethernet/renesas/ravb_main.c
1116 index e87a779bfcfe..ff3a293ffe36 100644
1117 --- a/drivers/net/ethernet/renesas/ravb_main.c
1118 +++ b/drivers/net/ethernet/renesas/ravb_main.c
1119 @@ -721,10 +721,11 @@ static void ravb_error_interrupt(struct net_device *ndev)
1120 u32 eis, ris2;
1121
1122 eis = ravb_read(ndev, EIS);
1123 - ravb_write(ndev, ~EIS_QFS, EIS);
1124 + ravb_write(ndev, ~(EIS_QFS | EIS_RESERVED), EIS);
1125 if (eis & EIS_QFS) {
1126 ris2 = ravb_read(ndev, RIS2);
1127 - ravb_write(ndev, ~(RIS2_QFF0 | RIS2_RFFF), RIS2);
1128 + ravb_write(ndev, ~(RIS2_QFF0 | RIS2_RFFF | RIS2_RESERVED),
1129 + RIS2);
1130
1131 /* Receive Descriptor Empty int */
1132 if (ris2 & RIS2_QFF0)
1133 @@ -777,7 +778,7 @@ static bool ravb_timestamp_interrupt(struct net_device *ndev)
1134 u32 tis = ravb_read(ndev, TIS);
1135
1136 if (tis & TIS_TFUF) {
1137 - ravb_write(ndev, ~TIS_TFUF, TIS);
1138 + ravb_write(ndev, ~(TIS_TFUF | TIS_RESERVED), TIS);
1139 ravb_get_tx_tstamp(ndev);
1140 return true;
1141 }
1142 @@ -912,7 +913,7 @@ static int ravb_poll(struct napi_struct *napi, int budget)
1143 /* Processing RX Descriptor Ring */
1144 if (ris0 & mask) {
1145 /* Clear RX interrupt */
1146 - ravb_write(ndev, ~mask, RIS0);
1147 + ravb_write(ndev, ~(mask | RIS0_RESERVED), RIS0);
1148 if (ravb_rx(ndev, &quota, q))
1149 goto out;
1150 }
1151 @@ -920,7 +921,7 @@ static int ravb_poll(struct napi_struct *napi, int budget)
1152 if (tis & mask) {
1153 spin_lock_irqsave(&priv->lock, flags);
1154 /* Clear TX interrupt */
1155 - ravb_write(ndev, ~mask, TIS);
1156 + ravb_write(ndev, ~(mask | TIS_RESERVED), TIS);
1157 ravb_tx_free(ndev, q, true);
1158 netif_wake_subqueue(ndev, q);
1159 mmiowb();
1160 diff --git a/drivers/net/ethernet/renesas/ravb_ptp.c b/drivers/net/ethernet/renesas/ravb_ptp.c
1161 index eede70ec37f8..9e3222fd69f9 100644
1162 --- a/drivers/net/ethernet/renesas/ravb_ptp.c
1163 +++ b/drivers/net/ethernet/renesas/ravb_ptp.c
1164 @@ -319,7 +319,7 @@ void ravb_ptp_interrupt(struct net_device *ndev)
1165 }
1166 }
1167
1168 - ravb_write(ndev, ~gis, GIS);
1169 + ravb_write(ndev, ~(gis | GIS_RESERVED), GIS);
1170 }
1171
1172 void ravb_ptp_init(struct net_device *ndev, struct platform_device *pdev)
1173 diff --git a/drivers/pci/dwc/pcie-designware.c b/drivers/pci/dwc/pcie-designware.c
1174 index 88abdddee2ad..a06ad2c65174 100644
1175 --- a/drivers/pci/dwc/pcie-designware.c
1176 +++ b/drivers/pci/dwc/pcie-designware.c
1177 @@ -138,7 +138,7 @@ static void dw_pcie_prog_outbound_atu_unroll(struct dw_pcie *pci, int index,
1178 if (val & PCIE_ATU_ENABLE)
1179 return;
1180
1181 - usleep_range(LINK_WAIT_IATU_MIN, LINK_WAIT_IATU_MAX);
1182 + mdelay(LINK_WAIT_IATU);
1183 }
1184 dev_err(pci->dev, "outbound iATU is not being enabled\n");
1185 }
1186 @@ -181,7 +181,7 @@ void dw_pcie_prog_outbound_atu(struct dw_pcie *pci, int index, int type,
1187 if (val & PCIE_ATU_ENABLE)
1188 return;
1189
1190 - usleep_range(LINK_WAIT_IATU_MIN, LINK_WAIT_IATU_MAX);
1191 + mdelay(LINK_WAIT_IATU);
1192 }
1193 dev_err(pci->dev, "outbound iATU is not being enabled\n");
1194 }
1195 @@ -239,7 +239,7 @@ static int dw_pcie_prog_inbound_atu_unroll(struct dw_pcie *pci, int index,
1196 if (val & PCIE_ATU_ENABLE)
1197 return 0;
1198
1199 - usleep_range(LINK_WAIT_IATU_MIN, LINK_WAIT_IATU_MAX);
1200 + mdelay(LINK_WAIT_IATU);
1201 }
1202 dev_err(pci->dev, "inbound iATU is not being enabled\n");
1203
1204 @@ -285,7 +285,7 @@ int dw_pcie_prog_inbound_atu(struct dw_pcie *pci, int index, int bar,
1205 if (val & PCIE_ATU_ENABLE)
1206 return 0;
1207
1208 - usleep_range(LINK_WAIT_IATU_MIN, LINK_WAIT_IATU_MAX);
1209 + mdelay(LINK_WAIT_IATU);
1210 }
1211 dev_err(pci->dev, "inbound iATU is not being enabled\n");
1212
1213 diff --git a/drivers/pci/dwc/pcie-designware.h b/drivers/pci/dwc/pcie-designware.h
1214 index cb493bcae8b4..3551dd607b90 100644
1215 --- a/drivers/pci/dwc/pcie-designware.h
1216 +++ b/drivers/pci/dwc/pcie-designware.h
1217 @@ -28,8 +28,7 @@
1218
1219 /* Parameters for the waiting for iATU enabled routine */
1220 #define LINK_WAIT_MAX_IATU_RETRIES 5
1221 -#define LINK_WAIT_IATU_MIN 9000
1222 -#define LINK_WAIT_IATU_MAX 10000
1223 +#define LINK_WAIT_IATU 9
1224
1225 /* Synopsys-specific PCIe configuration registers */
1226 #define PCIE_PORT_LINK_CONTROL 0x710
1227 diff --git a/drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c b/drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c
1228 index 2799a6b08f73..25d2741cdf96 100644
1229 --- a/drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c
1230 +++ b/drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c
1231 @@ -3465,11 +3465,10 @@ static int ibmvscsis_probe(struct vio_dev *vdev,
1232 vscsi->dds.window[LOCAL].liobn,
1233 vscsi->dds.window[REMOTE].liobn);
1234
1235 - strcpy(vscsi->eye, "VSCSI ");
1236 - strncat(vscsi->eye, vdev->name, MAX_EYE);
1237 + snprintf(vscsi->eye, sizeof(vscsi->eye), "VSCSI %s", vdev->name);
1238
1239 vscsi->dds.unit_id = vdev->unit_address;
1240 - strncpy(vscsi->dds.partition_name, partition_name,
1241 + strscpy(vscsi->dds.partition_name, partition_name,
1242 sizeof(vscsi->dds.partition_name));
1243 vscsi->dds.partition_num = partition_number;
1244
1245 diff --git a/drivers/scsi/ipr.c b/drivers/scsi/ipr.c
1246 index f838bd73befa..35d54ee1c5c7 100644
1247 --- a/drivers/scsi/ipr.c
1248 +++ b/drivers/scsi/ipr.c
1249 @@ -3308,6 +3308,65 @@ static void ipr_release_dump(struct kref *kref)
1250 LEAVE;
1251 }
1252
1253 +static void ipr_add_remove_thread(struct work_struct *work)
1254 +{
1255 + unsigned long lock_flags;
1256 + struct ipr_resource_entry *res;
1257 + struct scsi_device *sdev;
1258 + struct ipr_ioa_cfg *ioa_cfg =
1259 + container_of(work, struct ipr_ioa_cfg, scsi_add_work_q);
1260 + u8 bus, target, lun;
1261 + int did_work;
1262 +
1263 + ENTER;
1264 + spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
1265 +
1266 +restart:
1267 + do {
1268 + did_work = 0;
1269 + if (!ioa_cfg->hrrq[IPR_INIT_HRRQ].allow_cmds) {
1270 + spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
1271 + return;
1272 + }
1273 +
1274 + list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
1275 + if (res->del_from_ml && res->sdev) {
1276 + did_work = 1;
1277 + sdev = res->sdev;
1278 + if (!scsi_device_get(sdev)) {
1279 + if (!res->add_to_ml)
1280 + list_move_tail(&res->queue, &ioa_cfg->free_res_q);
1281 + else
1282 + res->del_from_ml = 0;
1283 + spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
1284 + scsi_remove_device(sdev);
1285 + scsi_device_put(sdev);
1286 + spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
1287 + }
1288 + break;
1289 + }
1290 + }
1291 + } while (did_work);
1292 +
1293 + list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
1294 + if (res->add_to_ml) {
1295 + bus = res->bus;
1296 + target = res->target;
1297 + lun = res->lun;
1298 + res->add_to_ml = 0;
1299 + spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
1300 + scsi_add_device(ioa_cfg->host, bus, target, lun);
1301 + spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
1302 + goto restart;
1303 + }
1304 + }
1305 +
1306 + ioa_cfg->scan_done = 1;
1307 + spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
1308 + kobject_uevent(&ioa_cfg->host->shost_dev.kobj, KOBJ_CHANGE);
1309 + LEAVE;
1310 +}
1311 +
1312 /**
1313 * ipr_worker_thread - Worker thread
1314 * @work: ioa config struct
1315 @@ -3322,13 +3381,9 @@ static void ipr_release_dump(struct kref *kref)
1316 static void ipr_worker_thread(struct work_struct *work)
1317 {
1318 unsigned long lock_flags;
1319 - struct ipr_resource_entry *res;
1320 - struct scsi_device *sdev;
1321 struct ipr_dump *dump;
1322 struct ipr_ioa_cfg *ioa_cfg =
1323 container_of(work, struct ipr_ioa_cfg, work_q);
1324 - u8 bus, target, lun;
1325 - int did_work;
1326
1327 ENTER;
1328 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
1329 @@ -3366,49 +3421,9 @@ static void ipr_worker_thread(struct work_struct *work)
1330 return;
1331 }
1332
1333 -restart:
1334 - do {
1335 - did_work = 0;
1336 - if (!ioa_cfg->hrrq[IPR_INIT_HRRQ].allow_cmds) {
1337 - spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
1338 - return;
1339 - }
1340 + schedule_work(&ioa_cfg->scsi_add_work_q);
1341
1342 - list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
1343 - if (res->del_from_ml && res->sdev) {
1344 - did_work = 1;
1345 - sdev = res->sdev;
1346 - if (!scsi_device_get(sdev)) {
1347 - if (!res->add_to_ml)
1348 - list_move_tail(&res->queue, &ioa_cfg->free_res_q);
1349 - else
1350 - res->del_from_ml = 0;
1351 - spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
1352 - scsi_remove_device(sdev);
1353 - scsi_device_put(sdev);
1354 - spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
1355 - }
1356 - break;
1357 - }
1358 - }
1359 - } while (did_work);
1360 -
1361 - list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
1362 - if (res->add_to_ml) {
1363 - bus = res->bus;
1364 - target = res->target;
1365 - lun = res->lun;
1366 - res->add_to_ml = 0;
1367 - spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
1368 - scsi_add_device(ioa_cfg->host, bus, target, lun);
1369 - spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
1370 - goto restart;
1371 - }
1372 - }
1373 -
1374 - ioa_cfg->scan_done = 1;
1375 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
1376 - kobject_uevent(&ioa_cfg->host->shost_dev.kobj, KOBJ_CHANGE);
1377 LEAVE;
1378 }
1379
1380 @@ -9937,6 +9952,7 @@ static void ipr_init_ioa_cfg(struct ipr_ioa_cfg *ioa_cfg,
1381 INIT_LIST_HEAD(&ioa_cfg->free_res_q);
1382 INIT_LIST_HEAD(&ioa_cfg->used_res_q);
1383 INIT_WORK(&ioa_cfg->work_q, ipr_worker_thread);
1384 + INIT_WORK(&ioa_cfg->scsi_add_work_q, ipr_add_remove_thread);
1385 init_waitqueue_head(&ioa_cfg->reset_wait_q);
1386 init_waitqueue_head(&ioa_cfg->msi_wait_q);
1387 init_waitqueue_head(&ioa_cfg->eeh_wait_q);
1388 diff --git a/drivers/scsi/ipr.h b/drivers/scsi/ipr.h
1389 index c7f0e9e3cd7d..085e6c90f9e6 100644
1390 --- a/drivers/scsi/ipr.h
1391 +++ b/drivers/scsi/ipr.h
1392 @@ -1568,6 +1568,7 @@ struct ipr_ioa_cfg {
1393 u8 saved_mode_page_len;
1394
1395 struct work_struct work_q;
1396 + struct work_struct scsi_add_work_q;
1397 struct workqueue_struct *reset_work_q;
1398
1399 wait_queue_head_t reset_wait_q;
1400 diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
1401 index 4a532318b211..6d3091ff9b92 100644
1402 --- a/drivers/scsi/sd.c
1403 +++ b/drivers/scsi/sd.c
1404 @@ -1285,7 +1285,8 @@ static int sd_init_command(struct scsi_cmnd *cmd)
1405 case REQ_OP_ZONE_RESET:
1406 return sd_zbc_setup_reset_cmnd(cmd);
1407 default:
1408 - BUG();
1409 + WARN_ON_ONCE(1);
1410 + return BLKPREP_KILL;
1411 }
1412 }
1413
1414 diff --git a/drivers/staging/ccree/ssi_buffer_mgr.c b/drivers/staging/ccree/ssi_buffer_mgr.c
1415 index 63936091d524..4ba6e9c422c4 100644
1416 --- a/drivers/staging/ccree/ssi_buffer_mgr.c
1417 +++ b/drivers/staging/ccree/ssi_buffer_mgr.c
1418 @@ -492,7 +492,8 @@ void ssi_buffer_mgr_unmap_blkcipher_request(
1419 DMA_TO_DEVICE);
1420 }
1421 /* Release pool */
1422 - if (req_ctx->dma_buf_type == SSI_DMA_BUF_MLLI) {
1423 + if (req_ctx->dma_buf_type == SSI_DMA_BUF_MLLI &&
1424 + req_ctx->mlli_params.mlli_virt_addr) {
1425 dma_pool_free(req_ctx->mlli_params.curr_pool,
1426 req_ctx->mlli_params.mlli_virt_addr,
1427 req_ctx->mlli_params.mlli_dma_addr);
1428 diff --git a/fs/namespace.c b/fs/namespace.c
1429 index 3ee3ee5819bc..9dc146e7b5e0 100644
1430 --- a/fs/namespace.c
1431 +++ b/fs/namespace.c
1432 @@ -446,10 +446,10 @@ int mnt_want_write_file_path(struct file *file)
1433 {
1434 int ret;
1435
1436 - sb_start_write(file_inode(file)->i_sb);
1437 + sb_start_write(file->f_path.mnt->mnt_sb);
1438 ret = __mnt_want_write_file(file);
1439 if (ret)
1440 - sb_end_write(file_inode(file)->i_sb);
1441 + sb_end_write(file->f_path.mnt->mnt_sb);
1442 return ret;
1443 }
1444
1445 @@ -540,8 +540,7 @@ void __mnt_drop_write_file(struct file *file)
1446
1447 void mnt_drop_write_file_path(struct file *file)
1448 {
1449 - __mnt_drop_write_file(file);
1450 - sb_end_write(file_inode(file)->i_sb);
1451 + mnt_drop_write(file->f_path.mnt);
1452 }
1453
1454 void mnt_drop_write_file(struct file *file)
1455 diff --git a/include/linux/huge_mm.h b/include/linux/huge_mm.h
1456 index 87067d23a48b..bfa38da4c261 100644
1457 --- a/include/linux/huge_mm.h
1458 +++ b/include/linux/huge_mm.h
1459 @@ -42,7 +42,7 @@ extern int mincore_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd,
1460 unsigned char *vec);
1461 extern bool move_huge_pmd(struct vm_area_struct *vma, unsigned long old_addr,
1462 unsigned long new_addr, unsigned long old_end,
1463 - pmd_t *old_pmd, pmd_t *new_pmd, bool *need_flush);
1464 + pmd_t *old_pmd, pmd_t *new_pmd);
1465 extern int change_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd,
1466 unsigned long addr, pgprot_t newprot,
1467 int prot_numa);
1468 diff --git a/kernel/memremap.c b/kernel/memremap.c
1469 index 2b136d4988f7..790ddf3bce19 100644
1470 --- a/kernel/memremap.c
1471 +++ b/kernel/memremap.c
1472 @@ -355,10 +355,27 @@ void *devm_memremap_pages(struct device *dev, struct resource *res,
1473 struct dev_pagemap *pgmap;
1474 struct page_map *page_map;
1475 int error, nid, is_ram, i = 0;
1476 + struct dev_pagemap *conflict_pgmap;
1477
1478 align_start = res->start & ~(SECTION_SIZE - 1);
1479 align_size = ALIGN(res->start + resource_size(res), SECTION_SIZE)
1480 - align_start;
1481 + align_end = align_start + align_size - 1;
1482 +
1483 + conflict_pgmap = get_dev_pagemap(PHYS_PFN(align_start), NULL);
1484 + if (conflict_pgmap) {
1485 + dev_WARN(dev, "Conflicting mapping in same section\n");
1486 + put_dev_pagemap(conflict_pgmap);
1487 + return ERR_PTR(-ENOMEM);
1488 + }
1489 +
1490 + conflict_pgmap = get_dev_pagemap(PHYS_PFN(align_end), NULL);
1491 + if (conflict_pgmap) {
1492 + dev_WARN(dev, "Conflicting mapping in same section\n");
1493 + put_dev_pagemap(conflict_pgmap);
1494 + return ERR_PTR(-ENOMEM);
1495 + }
1496 +
1497 is_ram = region_intersects(align_start, align_size,
1498 IORESOURCE_SYSTEM_RAM, IORES_DESC_NONE);
1499
1500 @@ -396,7 +413,6 @@ void *devm_memremap_pages(struct device *dev, struct resource *res,
1501
1502 mutex_lock(&pgmap_lock);
1503 error = 0;
1504 - align_end = align_start + align_size - 1;
1505
1506 foreach_order_pgoff(res, order, pgoff) {
1507 struct dev_pagemap *dup;
1508 diff --git a/mm/huge_memory.c b/mm/huge_memory.c
1509 index 39c1fedcfdb4..adacfe66cf3d 100644
1510 --- a/mm/huge_memory.c
1511 +++ b/mm/huge_memory.c
1512 @@ -1765,7 +1765,7 @@ static pmd_t move_soft_dirty_pmd(pmd_t pmd)
1513
1514 bool move_huge_pmd(struct vm_area_struct *vma, unsigned long old_addr,
1515 unsigned long new_addr, unsigned long old_end,
1516 - pmd_t *old_pmd, pmd_t *new_pmd, bool *need_flush)
1517 + pmd_t *old_pmd, pmd_t *new_pmd)
1518 {
1519 spinlock_t *old_ptl, *new_ptl;
1520 pmd_t pmd;
1521 @@ -1796,7 +1796,7 @@ bool move_huge_pmd(struct vm_area_struct *vma, unsigned long old_addr,
1522 if (new_ptl != old_ptl)
1523 spin_lock_nested(new_ptl, SINGLE_DEPTH_NESTING);
1524 pmd = pmdp_huge_get_and_clear(mm, old_addr, old_pmd);
1525 - if (pmd_present(pmd) && pmd_dirty(pmd))
1526 + if (pmd_present(pmd))
1527 force_flush = true;
1528 VM_BUG_ON(!pmd_none(*new_pmd));
1529
1530 @@ -1807,12 +1807,10 @@ bool move_huge_pmd(struct vm_area_struct *vma, unsigned long old_addr,
1531 }
1532 pmd = move_soft_dirty_pmd(pmd);
1533 set_pmd_at(mm, new_addr, new_pmd, pmd);
1534 - if (new_ptl != old_ptl)
1535 - spin_unlock(new_ptl);
1536 if (force_flush)
1537 flush_tlb_range(vma, old_addr, old_addr + PMD_SIZE);
1538 - else
1539 - *need_flush = true;
1540 + if (new_ptl != old_ptl)
1541 + spin_unlock(new_ptl);
1542 spin_unlock(old_ptl);
1543 return true;
1544 }
1545 diff --git a/mm/mremap.c b/mm/mremap.c
1546 index 049470aa1e3e..88ceeb4ef817 100644
1547 --- a/mm/mremap.c
1548 +++ b/mm/mremap.c
1549 @@ -115,7 +115,7 @@ static pte_t move_soft_dirty_pte(pte_t pte)
1550 static void move_ptes(struct vm_area_struct *vma, pmd_t *old_pmd,
1551 unsigned long old_addr, unsigned long old_end,
1552 struct vm_area_struct *new_vma, pmd_t *new_pmd,
1553 - unsigned long new_addr, bool need_rmap_locks, bool *need_flush)
1554 + unsigned long new_addr, bool need_rmap_locks)
1555 {
1556 struct mm_struct *mm = vma->vm_mm;
1557 pte_t *old_pte, *new_pte, pte;
1558 @@ -163,15 +163,17 @@ static void move_ptes(struct vm_area_struct *vma, pmd_t *old_pmd,
1559
1560 pte = ptep_get_and_clear(mm, old_addr, old_pte);
1561 /*
1562 - * If we are remapping a dirty PTE, make sure
1563 + * If we are remapping a valid PTE, make sure
1564 * to flush TLB before we drop the PTL for the
1565 - * old PTE or we may race with page_mkclean().
1566 + * PTE.
1567 *
1568 - * This check has to be done after we removed the
1569 - * old PTE from page tables or another thread may
1570 - * dirty it after the check and before the removal.
1571 + * NOTE! Both old and new PTL matter: the old one
1572 + * for racing with page_mkclean(), the new one to
1573 + * make sure the physical page stays valid until
1574 + * the TLB entry for the old mapping has been
1575 + * flushed.
1576 */
1577 - if (pte_present(pte) && pte_dirty(pte))
1578 + if (pte_present(pte))
1579 force_flush = true;
1580 pte = move_pte(pte, new_vma->vm_page_prot, old_addr, new_addr);
1581 pte = move_soft_dirty_pte(pte);
1582 @@ -179,13 +181,11 @@ static void move_ptes(struct vm_area_struct *vma, pmd_t *old_pmd,
1583 }
1584
1585 arch_leave_lazy_mmu_mode();
1586 + if (force_flush)
1587 + flush_tlb_range(vma, old_end - len, old_end);
1588 if (new_ptl != old_ptl)
1589 spin_unlock(new_ptl);
1590 pte_unmap(new_pte - 1);
1591 - if (force_flush)
1592 - flush_tlb_range(vma, old_end - len, old_end);
1593 - else
1594 - *need_flush = true;
1595 pte_unmap_unlock(old_pte - 1, old_ptl);
1596 if (need_rmap_locks)
1597 drop_rmap_locks(vma);
1598 @@ -200,7 +200,6 @@ unsigned long move_page_tables(struct vm_area_struct *vma,
1599 {
1600 unsigned long extent, next, old_end;
1601 pmd_t *old_pmd, *new_pmd;
1602 - bool need_flush = false;
1603 unsigned long mmun_start; /* For mmu_notifiers */
1604 unsigned long mmun_end; /* For mmu_notifiers */
1605
1606 @@ -231,8 +230,7 @@ unsigned long move_page_tables(struct vm_area_struct *vma,
1607 if (need_rmap_locks)
1608 take_rmap_locks(vma);
1609 moved = move_huge_pmd(vma, old_addr, new_addr,
1610 - old_end, old_pmd, new_pmd,
1611 - &need_flush);
1612 + old_end, old_pmd, new_pmd);
1613 if (need_rmap_locks)
1614 drop_rmap_locks(vma);
1615 if (moved)
1616 @@ -250,10 +248,8 @@ unsigned long move_page_tables(struct vm_area_struct *vma,
1617 if (extent > LATENCY_LIMIT)
1618 extent = LATENCY_LIMIT;
1619 move_ptes(vma, old_pmd, old_addr, old_addr + extent, new_vma,
1620 - new_pmd, new_addr, need_rmap_locks, &need_flush);
1621 + new_pmd, new_addr, need_rmap_locks);
1622 }
1623 - if (need_flush)
1624 - flush_tlb_range(vma, old_end-len, old_addr);
1625
1626 mmu_notifier_invalidate_range_end(vma->vm_mm, mmun_start, mmun_end);
1627
1628 diff --git a/net/batman-adv/bat_v_elp.c b/net/batman-adv/bat_v_elp.c
1629 index bd1064d98e16..e92dfedccc16 100644
1630 --- a/net/batman-adv/bat_v_elp.c
1631 +++ b/net/batman-adv/bat_v_elp.c
1632 @@ -227,7 +227,7 @@ batadv_v_elp_wifi_neigh_probe(struct batadv_hardif_neigh_node *neigh)
1633 * the packet to be exactly of that size to make the link
1634 * throughput estimation effective.
1635 */
1636 - skb_put(skb, probe_len - hard_iface->bat_v.elp_skb->len);
1637 + skb_put_zero(skb, probe_len - hard_iface->bat_v.elp_skb->len);
1638
1639 batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
1640 "Sending unicast (probe) ELP packet on interface %s to %pM\n",
1641 @@ -254,6 +254,7 @@ static void batadv_v_elp_periodic_work(struct work_struct *work)
1642 struct batadv_priv *bat_priv;
1643 struct sk_buff *skb;
1644 u32 elp_interval;
1645 + bool ret;
1646
1647 bat_v = container_of(work, struct batadv_hard_iface_bat_v, elp_wq.work);
1648 hard_iface = container_of(bat_v, struct batadv_hard_iface, bat_v);
1649 @@ -315,8 +316,11 @@ static void batadv_v_elp_periodic_work(struct work_struct *work)
1650 * may sleep and that is not allowed in an rcu protected
1651 * context. Therefore schedule a task for that.
1652 */
1653 - queue_work(batadv_event_workqueue,
1654 - &hardif_neigh->bat_v.metric_work);
1655 + ret = queue_work(batadv_event_workqueue,
1656 + &hardif_neigh->bat_v.metric_work);
1657 +
1658 + if (!ret)
1659 + batadv_hardif_neigh_put(hardif_neigh);
1660 }
1661 rcu_read_unlock();
1662
1663 diff --git a/net/batman-adv/bridge_loop_avoidance.c b/net/batman-adv/bridge_loop_avoidance.c
1664 index 422ee16b7854..c3c848f64fdd 100644
1665 --- a/net/batman-adv/bridge_loop_avoidance.c
1666 +++ b/net/batman-adv/bridge_loop_avoidance.c
1667 @@ -1772,6 +1772,7 @@ batadv_bla_loopdetect_check(struct batadv_priv *bat_priv, struct sk_buff *skb,
1668 {
1669 struct batadv_bla_backbone_gw *backbone_gw;
1670 struct ethhdr *ethhdr;
1671 + bool ret;
1672
1673 ethhdr = eth_hdr(skb);
1674
1675 @@ -1795,8 +1796,13 @@ batadv_bla_loopdetect_check(struct batadv_priv *bat_priv, struct sk_buff *skb,
1676 if (unlikely(!backbone_gw))
1677 return true;
1678
1679 - queue_work(batadv_event_workqueue, &backbone_gw->report_work);
1680 - /* backbone_gw is unreferenced in the report work function function */
1681 + ret = queue_work(batadv_event_workqueue, &backbone_gw->report_work);
1682 +
1683 + /* backbone_gw is unreferenced in the report work function function
1684 + * if queue_work() call was successful
1685 + */
1686 + if (!ret)
1687 + batadv_backbone_gw_put(backbone_gw);
1688
1689 return true;
1690 }
1691 diff --git a/net/batman-adv/gateway_client.c b/net/batman-adv/gateway_client.c
1692 index 06276ae9f752..c6a7341f0527 100644
1693 --- a/net/batman-adv/gateway_client.c
1694 +++ b/net/batman-adv/gateway_client.c
1695 @@ -31,6 +31,7 @@
1696 #include <linux/kernel.h>
1697 #include <linux/kref.h>
1698 #include <linux/list.h>
1699 +#include <linux/lockdep.h>
1700 #include <linux/netdevice.h>
1701 #include <linux/netlink.h>
1702 #include <linux/rculist.h>
1703 @@ -325,6 +326,9 @@ out:
1704 * @bat_priv: the bat priv with all the soft interface information
1705 * @orig_node: originator announcing gateway capabilities
1706 * @gateway: announced bandwidth information
1707 + *
1708 + * Has to be called with the appropriate locks being acquired
1709 + * (gw.list_lock).
1710 */
1711 static void batadv_gw_node_add(struct batadv_priv *bat_priv,
1712 struct batadv_orig_node *orig_node,
1713 @@ -332,6 +336,8 @@ static void batadv_gw_node_add(struct batadv_priv *bat_priv,
1714 {
1715 struct batadv_gw_node *gw_node;
1716
1717 + lockdep_assert_held(&bat_priv->gw.list_lock);
1718 +
1719 if (gateway->bandwidth_down == 0)
1720 return;
1721
1722 @@ -346,10 +352,8 @@ static void batadv_gw_node_add(struct batadv_priv *bat_priv,
1723 gw_node->bandwidth_down = ntohl(gateway->bandwidth_down);
1724 gw_node->bandwidth_up = ntohl(gateway->bandwidth_up);
1725
1726 - spin_lock_bh(&bat_priv->gw.list_lock);
1727 kref_get(&gw_node->refcount);
1728 hlist_add_head_rcu(&gw_node->list, &bat_priv->gw.gateway_list);
1729 - spin_unlock_bh(&bat_priv->gw.list_lock);
1730
1731 batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
1732 "Found new gateway %pM -> gw bandwidth: %u.%u/%u.%u MBit\n",
1733 @@ -405,11 +409,14 @@ void batadv_gw_node_update(struct batadv_priv *bat_priv,
1734 {
1735 struct batadv_gw_node *gw_node, *curr_gw = NULL;
1736
1737 + spin_lock_bh(&bat_priv->gw.list_lock);
1738 gw_node = batadv_gw_node_get(bat_priv, orig_node);
1739 if (!gw_node) {
1740 batadv_gw_node_add(bat_priv, orig_node, gateway);
1741 + spin_unlock_bh(&bat_priv->gw.list_lock);
1742 goto out;
1743 }
1744 + spin_unlock_bh(&bat_priv->gw.list_lock);
1745
1746 if ((gw_node->bandwidth_down == ntohl(gateway->bandwidth_down)) &&
1747 (gw_node->bandwidth_up == ntohl(gateway->bandwidth_up)))
1748 diff --git a/net/batman-adv/network-coding.c b/net/batman-adv/network-coding.c
1749 index 3604d7899e2c..7a7dcac20566 100644
1750 --- a/net/batman-adv/network-coding.c
1751 +++ b/net/batman-adv/network-coding.c
1752 @@ -850,16 +850,27 @@ batadv_nc_get_nc_node(struct batadv_priv *bat_priv,
1753 spinlock_t *lock; /* Used to lock list selected by "int in_coding" */
1754 struct list_head *list;
1755
1756 + /* Select ingoing or outgoing coding node */
1757 + if (in_coding) {
1758 + lock = &orig_neigh_node->in_coding_list_lock;
1759 + list = &orig_neigh_node->in_coding_list;
1760 + } else {
1761 + lock = &orig_neigh_node->out_coding_list_lock;
1762 + list = &orig_neigh_node->out_coding_list;
1763 + }
1764 +
1765 + spin_lock_bh(lock);
1766 +
1767 /* Check if nc_node is already added */
1768 nc_node = batadv_nc_find_nc_node(orig_node, orig_neigh_node, in_coding);
1769
1770 /* Node found */
1771 if (nc_node)
1772 - return nc_node;
1773 + goto unlock;
1774
1775 nc_node = kzalloc(sizeof(*nc_node), GFP_ATOMIC);
1776 if (!nc_node)
1777 - return NULL;
1778 + goto unlock;
1779
1780 /* Initialize nc_node */
1781 INIT_LIST_HEAD(&nc_node->list);
1782 @@ -868,22 +879,14 @@ batadv_nc_get_nc_node(struct batadv_priv *bat_priv,
1783 kref_get(&orig_neigh_node->refcount);
1784 nc_node->orig_node = orig_neigh_node;
1785
1786 - /* Select ingoing or outgoing coding node */
1787 - if (in_coding) {
1788 - lock = &orig_neigh_node->in_coding_list_lock;
1789 - list = &orig_neigh_node->in_coding_list;
1790 - } else {
1791 - lock = &orig_neigh_node->out_coding_list_lock;
1792 - list = &orig_neigh_node->out_coding_list;
1793 - }
1794 -
1795 batadv_dbg(BATADV_DBG_NC, bat_priv, "Adding nc_node %pM -> %pM\n",
1796 nc_node->addr, nc_node->orig_node->orig);
1797
1798 /* Add nc_node to orig_node */
1799 - spin_lock_bh(lock);
1800 kref_get(&nc_node->refcount);
1801 list_add_tail_rcu(&nc_node->list, list);
1802 +
1803 +unlock:
1804 spin_unlock_bh(lock);
1805
1806 return nc_node;
1807 diff --git a/net/batman-adv/soft-interface.c b/net/batman-adv/soft-interface.c
1808 index aa2c49fa31ce..8cedb5db1ab3 100644
1809 --- a/net/batman-adv/soft-interface.c
1810 +++ b/net/batman-adv/soft-interface.c
1811 @@ -566,15 +566,20 @@ int batadv_softif_create_vlan(struct batadv_priv *bat_priv, unsigned short vid)
1812 struct batadv_softif_vlan *vlan;
1813 int err;
1814
1815 + spin_lock_bh(&bat_priv->softif_vlan_list_lock);
1816 +
1817 vlan = batadv_softif_vlan_get(bat_priv, vid);
1818 if (vlan) {
1819 batadv_softif_vlan_put(vlan);
1820 + spin_unlock_bh(&bat_priv->softif_vlan_list_lock);
1821 return -EEXIST;
1822 }
1823
1824 vlan = kzalloc(sizeof(*vlan), GFP_ATOMIC);
1825 - if (!vlan)
1826 + if (!vlan) {
1827 + spin_unlock_bh(&bat_priv->softif_vlan_list_lock);
1828 return -ENOMEM;
1829 + }
1830
1831 vlan->bat_priv = bat_priv;
1832 vlan->vid = vid;
1833 @@ -582,17 +587,23 @@ int batadv_softif_create_vlan(struct batadv_priv *bat_priv, unsigned short vid)
1834
1835 atomic_set(&vlan->ap_isolation, 0);
1836
1837 + kref_get(&vlan->refcount);
1838 + hlist_add_head_rcu(&vlan->list, &bat_priv->softif_vlan_list);
1839 + spin_unlock_bh(&bat_priv->softif_vlan_list_lock);
1840 +
1841 + /* batadv_sysfs_add_vlan cannot be in the spinlock section due to the
1842 + * sleeping behavior of the sysfs functions and the fs_reclaim lock
1843 + */
1844 err = batadv_sysfs_add_vlan(bat_priv->soft_iface, vlan);
1845 if (err) {
1846 - kfree(vlan);
1847 + /* ref for the function */
1848 + batadv_softif_vlan_put(vlan);
1849 +
1850 + /* ref for the list */
1851 + batadv_softif_vlan_put(vlan);
1852 return err;
1853 }
1854
1855 - spin_lock_bh(&bat_priv->softif_vlan_list_lock);
1856 - kref_get(&vlan->refcount);
1857 - hlist_add_head_rcu(&vlan->list, &bat_priv->softif_vlan_list);
1858 - spin_unlock_bh(&bat_priv->softif_vlan_list_lock);
1859 -
1860 /* add a new TT local entry. This one will be marked with the NOPURGE
1861 * flag
1862 */
1863 diff --git a/net/batman-adv/sysfs.c b/net/batman-adv/sysfs.c
1864 index 0ae8b30e4eaa..2ef9b136fc39 100644
1865 --- a/net/batman-adv/sysfs.c
1866 +++ b/net/batman-adv/sysfs.c
1867 @@ -186,7 +186,8 @@ ssize_t batadv_store_##_name(struct kobject *kobj, \
1868 \
1869 return __batadv_store_uint_attr(buff, count, _min, _max, \
1870 _post_func, attr, \
1871 - &bat_priv->_var, net_dev); \
1872 + &bat_priv->_var, net_dev, \
1873 + NULL); \
1874 }
1875
1876 #define BATADV_ATTR_SIF_SHOW_UINT(_name, _var) \
1877 @@ -260,7 +261,9 @@ ssize_t batadv_store_##_name(struct kobject *kobj, \
1878 \
1879 length = __batadv_store_uint_attr(buff, count, _min, _max, \
1880 _post_func, attr, \
1881 - &hard_iface->_var, net_dev); \
1882 + &hard_iface->_var, \
1883 + hard_iface->soft_iface, \
1884 + net_dev); \
1885 \
1886 batadv_hardif_put(hard_iface); \
1887 return length; \
1888 @@ -354,10 +357,12 @@ __batadv_store_bool_attr(char *buff, size_t count,
1889
1890 static int batadv_store_uint_attr(const char *buff, size_t count,
1891 struct net_device *net_dev,
1892 + struct net_device *slave_dev,
1893 const char *attr_name,
1894 unsigned int min, unsigned int max,
1895 atomic_t *attr)
1896 {
1897 + char ifname[IFNAMSIZ + 3] = "";
1898 unsigned long uint_val;
1899 int ret;
1900
1901 @@ -383,8 +388,11 @@ static int batadv_store_uint_attr(const char *buff, size_t count,
1902 if (atomic_read(attr) == uint_val)
1903 return count;
1904
1905 - batadv_info(net_dev, "%s: Changing from: %i to: %lu\n",
1906 - attr_name, atomic_read(attr), uint_val);
1907 + if (slave_dev)
1908 + snprintf(ifname, sizeof(ifname), "%s: ", slave_dev->name);
1909 +
1910 + batadv_info(net_dev, "%s: %sChanging from: %i to: %lu\n",
1911 + attr_name, ifname, atomic_read(attr), uint_val);
1912
1913 atomic_set(attr, uint_val);
1914 return count;
1915 @@ -395,12 +403,13 @@ static ssize_t __batadv_store_uint_attr(const char *buff, size_t count,
1916 void (*post_func)(struct net_device *),
1917 const struct attribute *attr,
1918 atomic_t *attr_store,
1919 - struct net_device *net_dev)
1920 + struct net_device *net_dev,
1921 + struct net_device *slave_dev)
1922 {
1923 int ret;
1924
1925 - ret = batadv_store_uint_attr(buff, count, net_dev, attr->name, min, max,
1926 - attr_store);
1927 + ret = batadv_store_uint_attr(buff, count, net_dev, slave_dev,
1928 + attr->name, min, max, attr_store);
1929 if (post_func && ret)
1930 post_func(net_dev);
1931
1932 @@ -569,7 +578,7 @@ static ssize_t batadv_store_gw_sel_class(struct kobject *kobj,
1933 return __batadv_store_uint_attr(buff, count, 1, BATADV_TQ_MAX_VALUE,
1934 batadv_post_gw_reselect, attr,
1935 &bat_priv->gw.sel_class,
1936 - bat_priv->soft_iface);
1937 + bat_priv->soft_iface, NULL);
1938 }
1939
1940 static ssize_t batadv_show_gw_bwidth(struct kobject *kobj,
1941 @@ -1078,8 +1087,9 @@ static ssize_t batadv_store_throughput_override(struct kobject *kobj,
1942 if (old_tp_override == tp_override)
1943 goto out;
1944
1945 - batadv_info(net_dev, "%s: Changing from: %u.%u MBit to: %u.%u MBit\n",
1946 - "throughput_override",
1947 + batadv_info(hard_iface->soft_iface,
1948 + "%s: %s: Changing from: %u.%u MBit to: %u.%u MBit\n",
1949 + "throughput_override", net_dev->name,
1950 old_tp_override / 10, old_tp_override % 10,
1951 tp_override / 10, tp_override % 10);
1952
1953 diff --git a/net/batman-adv/translation-table.c b/net/batman-adv/translation-table.c
1954 index 0f4d4eece3e4..9da3455847ff 100644
1955 --- a/net/batman-adv/translation-table.c
1956 +++ b/net/batman-adv/translation-table.c
1957 @@ -1587,6 +1587,8 @@ batadv_tt_global_orig_entry_add(struct batadv_tt_global_entry *tt_global,
1958 {
1959 struct batadv_tt_orig_list_entry *orig_entry;
1960
1961 + spin_lock_bh(&tt_global->list_lock);
1962 +
1963 orig_entry = batadv_tt_global_orig_entry_find(tt_global, orig_node);
1964 if (orig_entry) {
1965 /* refresh the ttvn: the current value could be a bogus one that
1966 @@ -1609,11 +1611,9 @@ batadv_tt_global_orig_entry_add(struct batadv_tt_global_entry *tt_global,
1967 orig_entry->flags = flags;
1968 kref_init(&orig_entry->refcount);
1969
1970 - spin_lock_bh(&tt_global->list_lock);
1971 kref_get(&orig_entry->refcount);
1972 hlist_add_head_rcu(&orig_entry->list,
1973 &tt_global->orig_list);
1974 - spin_unlock_bh(&tt_global->list_lock);
1975 atomic_inc(&tt_global->orig_list_count);
1976
1977 sync_flags:
1978 @@ -1621,6 +1621,8 @@ sync_flags:
1979 out:
1980 if (orig_entry)
1981 batadv_tt_orig_list_entry_put(orig_entry);
1982 +
1983 + spin_unlock_bh(&tt_global->list_lock);
1984 }
1985
1986 /**
1987 diff --git a/net/batman-adv/tvlv.c b/net/batman-adv/tvlv.c
1988 index 1d9e267caec9..d6d6d95e48aa 100644
1989 --- a/net/batman-adv/tvlv.c
1990 +++ b/net/batman-adv/tvlv.c
1991 @@ -528,15 +528,20 @@ void batadv_tvlv_handler_register(struct batadv_priv *bat_priv,
1992 {
1993 struct batadv_tvlv_handler *tvlv_handler;
1994
1995 + spin_lock_bh(&bat_priv->tvlv.handler_list_lock);
1996 +
1997 tvlv_handler = batadv_tvlv_handler_get(bat_priv, type, version);
1998 if (tvlv_handler) {
1999 + spin_unlock_bh(&bat_priv->tvlv.handler_list_lock);
2000 batadv_tvlv_handler_put(tvlv_handler);
2001 return;
2002 }
2003
2004 tvlv_handler = kzalloc(sizeof(*tvlv_handler), GFP_ATOMIC);
2005 - if (!tvlv_handler)
2006 + if (!tvlv_handler) {
2007 + spin_unlock_bh(&bat_priv->tvlv.handler_list_lock);
2008 return;
2009 + }
2010
2011 tvlv_handler->ogm_handler = optr;
2012 tvlv_handler->unicast_handler = uptr;
2013 @@ -546,7 +551,6 @@ void batadv_tvlv_handler_register(struct batadv_priv *bat_priv,
2014 kref_init(&tvlv_handler->refcount);
2015 INIT_HLIST_NODE(&tvlv_handler->list);
2016
2017 - spin_lock_bh(&bat_priv->tvlv.handler_list_lock);
2018 kref_get(&tvlv_handler->refcount);
2019 hlist_add_head_rcu(&tvlv_handler->list, &bat_priv->tvlv.handler_list);
2020 spin_unlock_bh(&bat_priv->tvlv.handler_list_lock);