Magellan Linux

Contents of /trunk/kernel-alx-legacy/patches-4.9/0106-4.9.7-all-fixes.patch

Parent Directory Parent Directory | Revision Log Revision Log


Revision 3608 - (show annotations) (download)
Fri Aug 14 07:34:29 2020 UTC (3 years, 8 months ago) by niro
File size: 71811 byte(s)
-added kerenl-alx-legacy pkg
1 diff --git a/Makefile b/Makefile
2 index ef95231d1625..da704d903321 100644
3 --- a/Makefile
4 +++ b/Makefile
5 @@ -1,6 +1,6 @@
6 VERSION = 4
7 PATCHLEVEL = 9
8 -SUBLEVEL = 6
9 +SUBLEVEL = 7
10 EXTRAVERSION =
11 NAME = Roaring Lionus
12
13 diff --git a/arch/arc/include/asm/delay.h b/arch/arc/include/asm/delay.h
14 index a36e8601114d..d5da2115d78a 100644
15 --- a/arch/arc/include/asm/delay.h
16 +++ b/arch/arc/include/asm/delay.h
17 @@ -26,7 +26,9 @@ static inline void __delay(unsigned long loops)
18 " lp 1f \n"
19 " nop \n"
20 "1: \n"
21 - : : "r"(loops));
22 + :
23 + : "r"(loops)
24 + : "lp_count");
25 }
26
27 extern void __bad_udelay(void);
28 diff --git a/arch/arc/kernel/unaligned.c b/arch/arc/kernel/unaligned.c
29 index abd961f3e763..91ebe382147f 100644
30 --- a/arch/arc/kernel/unaligned.c
31 +++ b/arch/arc/kernel/unaligned.c
32 @@ -241,8 +241,9 @@ int misaligned_fixup(unsigned long address, struct pt_regs *regs,
33 if (state.fault)
34 goto fault;
35
36 + /* clear any remanants of delay slot */
37 if (delay_mode(regs)) {
38 - regs->ret = regs->bta;
39 + regs->ret = regs->bta ~1U;
40 regs->status32 &= ~STATUS_DE_MASK;
41 } else {
42 regs->ret += state.instr_len;
43 diff --git a/arch/parisc/include/asm/bitops.h b/arch/parisc/include/asm/bitops.h
44 index 3f9406d9b9d6..da87943328a5 100644
45 --- a/arch/parisc/include/asm/bitops.h
46 +++ b/arch/parisc/include/asm/bitops.h
47 @@ -6,7 +6,7 @@
48 #endif
49
50 #include <linux/compiler.h>
51 -#include <asm/types.h> /* for BITS_PER_LONG/SHIFT_PER_LONG */
52 +#include <asm/types.h>
53 #include <asm/byteorder.h>
54 #include <asm/barrier.h>
55 #include <linux/atomic.h>
56 @@ -17,6 +17,12 @@
57 * to include/asm-i386/bitops.h or kerneldoc
58 */
59
60 +#if __BITS_PER_LONG == 64
61 +#define SHIFT_PER_LONG 6
62 +#else
63 +#define SHIFT_PER_LONG 5
64 +#endif
65 +
66 #define CHOP_SHIFTCOUNT(x) (((unsigned long) (x)) & (BITS_PER_LONG - 1))
67
68
69 diff --git a/arch/parisc/include/uapi/asm/bitsperlong.h b/arch/parisc/include/uapi/asm/bitsperlong.h
70 index e0a23c7bdd43..07fa7e50bdc0 100644
71 --- a/arch/parisc/include/uapi/asm/bitsperlong.h
72 +++ b/arch/parisc/include/uapi/asm/bitsperlong.h
73 @@ -3,10 +3,8 @@
74
75 #if defined(__LP64__)
76 #define __BITS_PER_LONG 64
77 -#define SHIFT_PER_LONG 6
78 #else
79 #define __BITS_PER_LONG 32
80 -#define SHIFT_PER_LONG 5
81 #endif
82
83 #include <asm-generic/bitsperlong.h>
84 diff --git a/arch/parisc/include/uapi/asm/swab.h b/arch/parisc/include/uapi/asm/swab.h
85 index e78403b129ef..928e1bbac98f 100644
86 --- a/arch/parisc/include/uapi/asm/swab.h
87 +++ b/arch/parisc/include/uapi/asm/swab.h
88 @@ -1,6 +1,7 @@
89 #ifndef _PARISC_SWAB_H
90 #define _PARISC_SWAB_H
91
92 +#include <asm/bitsperlong.h>
93 #include <linux/types.h>
94 #include <linux/compiler.h>
95
96 @@ -38,7 +39,7 @@ static inline __attribute_const__ __u32 __arch_swab32(__u32 x)
97 }
98 #define __arch_swab32 __arch_swab32
99
100 -#if BITS_PER_LONG > 32
101 +#if __BITS_PER_LONG > 32
102 /*
103 ** From "PA-RISC 2.0 Architecture", HP Professional Books.
104 ** See Appendix I page 8 , "Endian Byte Swapping".
105 @@ -61,6 +62,6 @@ static inline __attribute_const__ __u64 __arch_swab64(__u64 x)
106 return x;
107 }
108 #define __arch_swab64 __arch_swab64
109 -#endif /* BITS_PER_LONG > 32 */
110 +#endif /* __BITS_PER_LONG > 32 */
111
112 #endif /* _PARISC_SWAB_H */
113 diff --git a/arch/s390/kernel/ptrace.c b/arch/s390/kernel/ptrace.c
114 index 9336e824e2db..fc2974b929c3 100644
115 --- a/arch/s390/kernel/ptrace.c
116 +++ b/arch/s390/kernel/ptrace.c
117 @@ -963,6 +963,11 @@ static int s390_fpregs_set(struct task_struct *target,
118 if (target == current)
119 save_fpu_regs();
120
121 + if (MACHINE_HAS_VX)
122 + convert_vx_to_fp(fprs, target->thread.fpu.vxrs);
123 + else
124 + memcpy(&fprs, target->thread.fpu.fprs, sizeof(fprs));
125 +
126 /* If setting FPC, must validate it first. */
127 if (count > 0 && pos < offsetof(s390_fp_regs, fprs)) {
128 u32 ufpc[2] = { target->thread.fpu.fpc, 0 };
129 @@ -1067,6 +1072,9 @@ static int s390_vxrs_low_set(struct task_struct *target,
130 if (target == current)
131 save_fpu_regs();
132
133 + for (i = 0; i < __NUM_VXRS_LOW; i++)
134 + vxrs[i] = *((__u64 *)(target->thread.fpu.vxrs + i) + 1);
135 +
136 rc = user_regset_copyin(&pos, &count, &kbuf, &ubuf, vxrs, 0, -1);
137 if (rc == 0)
138 for (i = 0; i < __NUM_VXRS_LOW; i++)
139 diff --git a/arch/s390/mm/pgtable.c b/arch/s390/mm/pgtable.c
140 index 7a1897c51c54..d56ef26d4681 100644
141 --- a/arch/s390/mm/pgtable.c
142 +++ b/arch/s390/mm/pgtable.c
143 @@ -202,7 +202,7 @@ static inline pgste_t ptep_xchg_start(struct mm_struct *mm,
144 return pgste;
145 }
146
147 -static inline void ptep_xchg_commit(struct mm_struct *mm,
148 +static inline pte_t ptep_xchg_commit(struct mm_struct *mm,
149 unsigned long addr, pte_t *ptep,
150 pgste_t pgste, pte_t old, pte_t new)
151 {
152 @@ -220,6 +220,7 @@ static inline void ptep_xchg_commit(struct mm_struct *mm,
153 } else {
154 *ptep = new;
155 }
156 + return old;
157 }
158
159 pte_t ptep_xchg_direct(struct mm_struct *mm, unsigned long addr,
160 @@ -231,7 +232,7 @@ pte_t ptep_xchg_direct(struct mm_struct *mm, unsigned long addr,
161 preempt_disable();
162 pgste = ptep_xchg_start(mm, addr, ptep);
163 old = ptep_flush_direct(mm, addr, ptep);
164 - ptep_xchg_commit(mm, addr, ptep, pgste, old, new);
165 + old = ptep_xchg_commit(mm, addr, ptep, pgste, old, new);
166 preempt_enable();
167 return old;
168 }
169 @@ -246,7 +247,7 @@ pte_t ptep_xchg_lazy(struct mm_struct *mm, unsigned long addr,
170 preempt_disable();
171 pgste = ptep_xchg_start(mm, addr, ptep);
172 old = ptep_flush_lazy(mm, addr, ptep);
173 - ptep_xchg_commit(mm, addr, ptep, pgste, old, new);
174 + old = ptep_xchg_commit(mm, addr, ptep, pgste, old, new);
175 preempt_enable();
176 return old;
177 }
178 diff --git a/arch/tile/kernel/ptrace.c b/arch/tile/kernel/ptrace.c
179 index d89b7011667c..e279572824b1 100644
180 --- a/arch/tile/kernel/ptrace.c
181 +++ b/arch/tile/kernel/ptrace.c
182 @@ -111,7 +111,7 @@ static int tile_gpr_set(struct task_struct *target,
183 const void *kbuf, const void __user *ubuf)
184 {
185 int ret;
186 - struct pt_regs regs;
187 + struct pt_regs regs = *task_pt_regs(target);
188
189 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &regs, 0,
190 sizeof(regs));
191 diff --git a/arch/x86/platform/mellanox/mlx-platform.c b/arch/x86/platform/mellanox/mlx-platform.c
192 index 7dcfcca97399..c0355d789fce 100644
193 --- a/arch/x86/platform/mellanox/mlx-platform.c
194 +++ b/arch/x86/platform/mellanox/mlx-platform.c
195 @@ -233,7 +233,7 @@ static int __init mlxplat_init(void)
196 return 0;
197
198 fail_platform_mux_register:
199 - for (i--; i > 0 ; i--)
200 + while (--i >= 0)
201 platform_device_unregister(priv->pdev_mux[i]);
202 platform_device_unregister(priv->pdev_i2c);
203 fail_alloc:
204 diff --git a/drivers/base/memory.c b/drivers/base/memory.c
205 index 62c63c0c5c22..e7f86a8887d2 100644
206 --- a/drivers/base/memory.c
207 +++ b/drivers/base/memory.c
208 @@ -410,14 +410,14 @@ static ssize_t show_valid_zones(struct device *dev,
209 sprintf(buf, "%s", zone->name);
210
211 /* MMOP_ONLINE_KERNEL */
212 - zone_shift = zone_can_shift(start_pfn, nr_pages, ZONE_NORMAL);
213 + zone_can_shift(start_pfn, nr_pages, ZONE_NORMAL, &zone_shift);
214 if (zone_shift) {
215 strcat(buf, " ");
216 strcat(buf, (zone + zone_shift)->name);
217 }
218
219 /* MMOP_ONLINE_MOVABLE */
220 - zone_shift = zone_can_shift(start_pfn, nr_pages, ZONE_MOVABLE);
221 + zone_can_shift(start_pfn, nr_pages, ZONE_MOVABLE, &zone_shift);
222 if (zone_shift) {
223 strcat(buf, " ");
224 strcat(buf, (zone + zone_shift)->name);
225 diff --git a/drivers/gpu/drm/drm_atomic_helper.c b/drivers/gpu/drm/drm_atomic_helper.c
226 index 338766c64c99..a05bb3891119 100644
227 --- a/drivers/gpu/drm/drm_atomic_helper.c
228 +++ b/drivers/gpu/drm/drm_atomic_helper.c
229 @@ -3115,6 +3115,8 @@ void __drm_atomic_helper_plane_duplicate_state(struct drm_plane *plane,
230
231 if (state->fb)
232 drm_framebuffer_reference(state->fb);
233 +
234 + state->fence = NULL;
235 }
236 EXPORT_SYMBOL(__drm_atomic_helper_plane_duplicate_state);
237
238 diff --git a/drivers/gpu/drm/drm_modes.c b/drivers/gpu/drm/drm_modes.c
239 index 53f07ac7c174..e14366de0e6e 100644
240 --- a/drivers/gpu/drm/drm_modes.c
241 +++ b/drivers/gpu/drm/drm_modes.c
242 @@ -1462,6 +1462,13 @@ drm_mode_create_from_cmdline_mode(struct drm_device *dev,
243 return NULL;
244
245 mode->type |= DRM_MODE_TYPE_USERDEF;
246 + /* fix up 1368x768: GFT/CVT can't express 1366 width due to alignment */
247 + if (cmd->xres == 1366 && mode->hdisplay == 1368) {
248 + mode->hdisplay = 1366;
249 + mode->hsync_start--;
250 + mode->hsync_end--;
251 + drm_mode_set_name(mode);
252 + }
253 drm_mode_set_crtcinfo(mode, CRTC_INTERLACE_HALVE_V);
254 return mode;
255 }
256 diff --git a/drivers/gpu/drm/drm_probe_helper.c b/drivers/gpu/drm/drm_probe_helper.c
257 index f6b64d7d3528..276474d13763 100644
258 --- a/drivers/gpu/drm/drm_probe_helper.c
259 +++ b/drivers/gpu/drm/drm_probe_helper.c
260 @@ -143,8 +143,18 @@ void drm_kms_helper_poll_enable_locked(struct drm_device *dev)
261 }
262
263 if (dev->mode_config.delayed_event) {
264 + /*
265 + * FIXME:
266 + *
267 + * Use short (1s) delay to handle the initial delayed event.
268 + * This delay should not be needed, but Optimus/nouveau will
269 + * fail in a mysterious way if the delayed event is handled as
270 + * soon as possible like it is done in
271 + * drm_helper_probe_single_connector_modes() in case the poll
272 + * was enabled before.
273 + */
274 poll = true;
275 - delay = 0;
276 + delay = HZ;
277 }
278
279 if (poll)
280 diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
281 index 18dfdd5c1b3b..670beebc32f6 100644
282 --- a/drivers/gpu/drm/i915/i915_drv.c
283 +++ b/drivers/gpu/drm/i915/i915_drv.c
284 @@ -2372,7 +2372,7 @@ static int intel_runtime_suspend(struct device *kdev)
285
286 assert_forcewakes_inactive(dev_priv);
287
288 - if (!IS_VALLEYVIEW(dev_priv) || !IS_CHERRYVIEW(dev_priv))
289 + if (!IS_VALLEYVIEW(dev_priv) && !IS_CHERRYVIEW(dev_priv))
290 intel_hpd_poll_init(dev_priv);
291
292 DRM_DEBUG_KMS("Device suspended\n");
293 diff --git a/drivers/gpu/drm/i915/i915_gem_evict.c b/drivers/gpu/drm/i915/i915_gem_evict.c
294 index 5b6f81c1dbca..7467355e4a18 100644
295 --- a/drivers/gpu/drm/i915/i915_gem_evict.c
296 +++ b/drivers/gpu/drm/i915/i915_gem_evict.c
297 @@ -194,6 +194,7 @@ i915_gem_evict_something(struct i915_address_space *vm,
298 }
299
300 /* Unbinding will emit any required flushes */
301 + ret = 0;
302 while (!list_empty(&eviction_list)) {
303 vma = list_first_entry(&eviction_list,
304 struct i915_vma,
305 diff --git a/drivers/gpu/drm/i915/intel_crt.c b/drivers/gpu/drm/i915/intel_crt.c
306 index dfbcf16b41df..4149a0fbe8bd 100644
307 --- a/drivers/gpu/drm/i915/intel_crt.c
308 +++ b/drivers/gpu/drm/i915/intel_crt.c
309 @@ -499,6 +499,7 @@ static bool intel_crt_detect_ddc(struct drm_connector *connector)
310 struct drm_i915_private *dev_priv = to_i915(crt->base.base.dev);
311 struct edid *edid;
312 struct i2c_adapter *i2c;
313 + bool ret = false;
314
315 BUG_ON(crt->base.type != INTEL_OUTPUT_ANALOG);
316
317 @@ -515,17 +516,17 @@ static bool intel_crt_detect_ddc(struct drm_connector *connector)
318 */
319 if (!is_digital) {
320 DRM_DEBUG_KMS("CRT detected via DDC:0x50 [EDID]\n");
321 - return true;
322 + ret = true;
323 + } else {
324 + DRM_DEBUG_KMS("CRT not detected via DDC:0x50 [EDID reports a digital panel]\n");
325 }
326 -
327 - DRM_DEBUG_KMS("CRT not detected via DDC:0x50 [EDID reports a digital panel]\n");
328 } else {
329 DRM_DEBUG_KMS("CRT not detected via DDC:0x50 [no valid EDID found]\n");
330 }
331
332 kfree(edid);
333
334 - return false;
335 + return ret;
336 }
337
338 static enum drm_connector_status
339 diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
340 index 869b29fe9ec4..8079e5b380cb 100644
341 --- a/drivers/gpu/drm/i915/intel_display.c
342 +++ b/drivers/gpu/drm/i915/intel_display.c
343 @@ -2587,8 +2587,9 @@ intel_fill_fb_info(struct drm_i915_private *dev_priv,
344 * We only keep the x/y offsets, so push all of the
345 * gtt offset into the x/y offsets.
346 */
347 - _intel_adjust_tile_offset(&x, &y, tile_size,
348 - tile_width, tile_height, pitch_tiles,
349 + _intel_adjust_tile_offset(&x, &y,
350 + tile_width, tile_height,
351 + tile_size, pitch_tiles,
352 gtt_offset_rotated * tile_size, 0);
353
354 gtt_offset_rotated += rot_info->plane[i].width * rot_info->plane[i].height;
355 @@ -2975,6 +2976,9 @@ int skl_check_plane_surface(struct intel_plane_state *plane_state)
356 unsigned int rotation = plane_state->base.rotation;
357 int ret;
358
359 + if (!plane_state->base.visible)
360 + return 0;
361 +
362 /* Rotate src coordinates to match rotated GTT view */
363 if (intel_rotation_90_or_270(rotation))
364 drm_rect_rotate(&plane_state->base.src,
365 @@ -6865,6 +6869,12 @@ static void intel_crtc_disable_noatomic(struct drm_crtc *crtc)
366 }
367
368 state = drm_atomic_state_alloc(crtc->dev);
369 + if (!state) {
370 + DRM_DEBUG_KMS("failed to disable [CRTC:%d:%s], out of memory",
371 + crtc->base.id, crtc->name);
372 + return;
373 + }
374 +
375 state->acquire_ctx = crtc->dev->mode_config.acquire_ctx;
376
377 /* Everything's already locked, -EDEADLK can't happen. */
378 diff --git a/drivers/gpu/drm/i915/intel_fbdev.c b/drivers/gpu/drm/i915/intel_fbdev.c
379 index b7098f98bb67..9127e57f383c 100644
380 --- a/drivers/gpu/drm/i915/intel_fbdev.c
381 +++ b/drivers/gpu/drm/i915/intel_fbdev.c
382 @@ -745,6 +745,9 @@ void intel_fbdev_initial_config_async(struct drm_device *dev)
383 {
384 struct intel_fbdev *ifbdev = to_i915(dev)->fbdev;
385
386 + if (!ifbdev)
387 + return;
388 +
389 ifbdev->cookie = async_schedule(intel_fbdev_initial_config, ifbdev);
390 }
391
392 diff --git a/drivers/gpu/drm/i915/intel_lrc.c b/drivers/gpu/drm/i915/intel_lrc.c
393 index 0adb879833ff..67db1577ee49 100644
394 --- a/drivers/gpu/drm/i915/intel_lrc.c
395 +++ b/drivers/gpu/drm/i915/intel_lrc.c
396 @@ -858,8 +858,7 @@ static inline int gen8_emit_flush_coherentl3_wa(struct intel_engine_cs *engine,
397 * this batch updates GEN8_L3SQCREG4 with default value we need to
398 * set this bit here to retain the WA during flush.
399 */
400 - if (IS_SKL_REVID(dev_priv, 0, SKL_REVID_E0) ||
401 - IS_KBL_REVID(dev_priv, 0, KBL_REVID_E0))
402 + if (IS_SKL_REVID(dev_priv, 0, SKL_REVID_E0))
403 l3sqc4_flush |= GEN8_LQSC_RO_PERF_DIS;
404
405 wa_ctx_emit(batch, index, (MI_STORE_REGISTER_MEM_GEN8 |
406 diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c
407 index ed9955dce156..8babfe0ce4e3 100644
408 --- a/drivers/gpu/drm/i915/intel_ringbuffer.c
409 +++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
410 @@ -1153,14 +1153,6 @@ static int kbl_init_workarounds(struct intel_engine_cs *engine)
411 WA_SET_BIT_MASKED(HDC_CHICKEN0,
412 HDC_FENCE_DEST_SLM_DISABLE);
413
414 - /* GEN8_L3SQCREG4 has a dependency with WA batch so any new changes
415 - * involving this register should also be added to WA batch as required.
416 - */
417 - if (IS_KBL_REVID(dev_priv, 0, KBL_REVID_E0))
418 - /* WaDisableLSQCROPERFforOCL:kbl */
419 - I915_WRITE(GEN8_L3SQCREG4, I915_READ(GEN8_L3SQCREG4) |
420 - GEN8_LQSC_RO_PERF_DIS);
421 -
422 /* WaToEnableHwFixForPushConstHWBug:kbl */
423 if (IS_KBL_REVID(dev_priv, KBL_REVID_C0, REVID_FOREVER))
424 WA_SET_BIT_MASKED(COMMON_SLICE_CHICKEN2,
425 diff --git a/drivers/gpu/drm/radeon/radeon_drv.c b/drivers/gpu/drm/radeon/radeon_drv.c
426 index 00ea0002b539..e0c143b865f3 100644
427 --- a/drivers/gpu/drm/radeon/radeon_drv.c
428 +++ b/drivers/gpu/drm/radeon/radeon_drv.c
429 @@ -366,11 +366,10 @@ static void
430 radeon_pci_shutdown(struct pci_dev *pdev)
431 {
432 /* if we are running in a VM, make sure the device
433 - * torn down properly on reboot/shutdown.
434 - * unfortunately we can't detect certain
435 - * hypervisors so just do this all the time.
436 + * torn down properly on reboot/shutdown
437 */
438 - radeon_pci_remove(pdev);
439 + if (radeon_device_is_virtual())
440 + radeon_pci_remove(pdev);
441 }
442
443 static int radeon_pmops_suspend(struct device *dev)
444 diff --git a/drivers/gpu/drm/vc4/vc4_crtc.c b/drivers/gpu/drm/vc4/vc4_crtc.c
445 index 7f08d681a74b..d544ff9b0d46 100644
446 --- a/drivers/gpu/drm/vc4/vc4_crtc.c
447 +++ b/drivers/gpu/drm/vc4/vc4_crtc.c
448 @@ -832,7 +832,7 @@ static void vc4_crtc_destroy_state(struct drm_crtc *crtc,
449
450 }
451
452 - __drm_atomic_helper_crtc_destroy_state(state);
453 + drm_atomic_helper_crtc_destroy_state(crtc, state);
454 }
455
456 static const struct drm_crtc_funcs vc4_crtc_funcs = {
457 diff --git a/drivers/gpu/drm/vc4/vc4_gem.c b/drivers/gpu/drm/vc4/vc4_gem.c
458 index 303f23c96220..18e37171e9c8 100644
459 --- a/drivers/gpu/drm/vc4/vc4_gem.c
460 +++ b/drivers/gpu/drm/vc4/vc4_gem.c
461 @@ -594,12 +594,14 @@ vc4_get_bcl(struct drm_device *dev, struct vc4_exec_info *exec)
462 args->shader_rec_count);
463 struct vc4_bo *bo;
464
465 - if (uniforms_offset < shader_rec_offset ||
466 + if (shader_rec_offset < args->bin_cl_size ||
467 + uniforms_offset < shader_rec_offset ||
468 exec_size < uniforms_offset ||
469 args->shader_rec_count >= (UINT_MAX /
470 sizeof(struct vc4_shader_state)) ||
471 temp_size < exec_size) {
472 DRM_ERROR("overflow in exec arguments\n");
473 + ret = -EINVAL;
474 goto fail;
475 }
476
477 diff --git a/drivers/gpu/drm/vc4/vc4_render_cl.c b/drivers/gpu/drm/vc4/vc4_render_cl.c
478 index 08886a309757..5cdd003605f5 100644
479 --- a/drivers/gpu/drm/vc4/vc4_render_cl.c
480 +++ b/drivers/gpu/drm/vc4/vc4_render_cl.c
481 @@ -461,7 +461,7 @@ static int vc4_rcl_surface_setup(struct vc4_exec_info *exec,
482 }
483
484 ret = vc4_full_res_bounds_check(exec, *obj, surf);
485 - if (!ret)
486 + if (ret)
487 return ret;
488
489 return 0;
490 diff --git a/drivers/infiniband/core/cma.c b/drivers/infiniband/core/cma.c
491 index 2a6fc47a1dfb..c25768c2dd3b 100644
492 --- a/drivers/infiniband/core/cma.c
493 +++ b/drivers/infiniband/core/cma.c
494 @@ -2768,7 +2768,8 @@ static int cma_bind_addr(struct rdma_cm_id *id, struct sockaddr *src_addr,
495 if (!src_addr || !src_addr->sa_family) {
496 src_addr = (struct sockaddr *) &id->route.addr.src_addr;
497 src_addr->sa_family = dst_addr->sa_family;
498 - if (dst_addr->sa_family == AF_INET6) {
499 + if (IS_ENABLED(CONFIG_IPV6) &&
500 + dst_addr->sa_family == AF_INET6) {
501 struct sockaddr_in6 *src_addr6 = (struct sockaddr_in6 *) src_addr;
502 struct sockaddr_in6 *dst_addr6 = (struct sockaddr_in6 *) dst_addr;
503 src_addr6->sin6_scope_id = dst_addr6->sin6_scope_id;
504 diff --git a/drivers/infiniband/core/umem.c b/drivers/infiniband/core/umem.c
505 index 84b4eff90395..c22fde6207d1 100644
506 --- a/drivers/infiniband/core/umem.c
507 +++ b/drivers/infiniband/core/umem.c
508 @@ -134,6 +134,7 @@ struct ib_umem *ib_umem_get(struct ib_ucontext *context, unsigned long addr,
509 IB_ACCESS_REMOTE_ATOMIC | IB_ACCESS_MW_BIND));
510
511 if (access & IB_ACCESS_ON_DEMAND) {
512 + put_pid(umem->pid);
513 ret = ib_umem_odp_get(context, umem);
514 if (ret) {
515 kfree(umem);
516 @@ -149,6 +150,7 @@ struct ib_umem *ib_umem_get(struct ib_ucontext *context, unsigned long addr,
517
518 page_list = (struct page **) __get_free_page(GFP_KERNEL);
519 if (!page_list) {
520 + put_pid(umem->pid);
521 kfree(umem);
522 return ERR_PTR(-ENOMEM);
523 }
524 diff --git a/drivers/infiniband/hw/cxgb4/device.c b/drivers/infiniband/hw/cxgb4/device.c
525 index b99dc9e0ffb2..b85a1a983e07 100644
526 --- a/drivers/infiniband/hw/cxgb4/device.c
527 +++ b/drivers/infiniband/hw/cxgb4/device.c
528 @@ -848,9 +848,17 @@ static int c4iw_rdev_open(struct c4iw_rdev *rdev)
529 }
530 }
531
532 + rdev->free_workq = create_singlethread_workqueue("iw_cxgb4_free");
533 + if (!rdev->free_workq) {
534 + err = -ENOMEM;
535 + goto err_free_status_page;
536 + }
537 +
538 rdev->status_page->db_off = 0;
539
540 return 0;
541 +err_free_status_page:
542 + free_page((unsigned long)rdev->status_page);
543 destroy_ocqp_pool:
544 c4iw_ocqp_pool_destroy(rdev);
545 destroy_rqtpool:
546 @@ -864,6 +872,7 @@ static int c4iw_rdev_open(struct c4iw_rdev *rdev)
547
548 static void c4iw_rdev_close(struct c4iw_rdev *rdev)
549 {
550 + destroy_workqueue(rdev->free_workq);
551 kfree(rdev->wr_log);
552 free_page((unsigned long)rdev->status_page);
553 c4iw_pblpool_destroy(rdev);
554 diff --git a/drivers/infiniband/hw/cxgb4/iw_cxgb4.h b/drivers/infiniband/hw/cxgb4/iw_cxgb4.h
555 index 4788e1a46fde..7d540667dad2 100644
556 --- a/drivers/infiniband/hw/cxgb4/iw_cxgb4.h
557 +++ b/drivers/infiniband/hw/cxgb4/iw_cxgb4.h
558 @@ -45,6 +45,7 @@
559 #include <linux/kref.h>
560 #include <linux/timer.h>
561 #include <linux/io.h>
562 +#include <linux/workqueue.h>
563
564 #include <asm/byteorder.h>
565
566 @@ -107,6 +108,7 @@ struct c4iw_dev_ucontext {
567 struct list_head qpids;
568 struct list_head cqids;
569 struct mutex lock;
570 + struct kref kref;
571 };
572
573 enum c4iw_rdev_flags {
574 @@ -183,6 +185,7 @@ struct c4iw_rdev {
575 atomic_t wr_log_idx;
576 struct wr_log_entry *wr_log;
577 int wr_log_size;
578 + struct workqueue_struct *free_workq;
579 };
580
581 static inline int c4iw_fatal_error(struct c4iw_rdev *rdev)
582 @@ -482,6 +485,8 @@ struct c4iw_qp {
583 int sq_sig_all;
584 struct completion rq_drained;
585 struct completion sq_drained;
586 + struct work_struct free_work;
587 + struct c4iw_ucontext *ucontext;
588 };
589
590 static inline struct c4iw_qp *to_c4iw_qp(struct ib_qp *ibqp)
591 @@ -495,6 +500,7 @@ struct c4iw_ucontext {
592 u32 key;
593 spinlock_t mmap_lock;
594 struct list_head mmaps;
595 + struct kref kref;
596 };
597
598 static inline struct c4iw_ucontext *to_c4iw_ucontext(struct ib_ucontext *c)
599 @@ -502,6 +508,18 @@ static inline struct c4iw_ucontext *to_c4iw_ucontext(struct ib_ucontext *c)
600 return container_of(c, struct c4iw_ucontext, ibucontext);
601 }
602
603 +void _c4iw_free_ucontext(struct kref *kref);
604 +
605 +static inline void c4iw_put_ucontext(struct c4iw_ucontext *ucontext)
606 +{
607 + kref_put(&ucontext->kref, _c4iw_free_ucontext);
608 +}
609 +
610 +static inline void c4iw_get_ucontext(struct c4iw_ucontext *ucontext)
611 +{
612 + kref_get(&ucontext->kref);
613 +}
614 +
615 struct c4iw_mm_entry {
616 struct list_head entry;
617 u64 addr;
618 diff --git a/drivers/infiniband/hw/cxgb4/provider.c b/drivers/infiniband/hw/cxgb4/provider.c
619 index 645e606a17c5..8278ba06f995 100644
620 --- a/drivers/infiniband/hw/cxgb4/provider.c
621 +++ b/drivers/infiniband/hw/cxgb4/provider.c
622 @@ -91,17 +91,28 @@ static int c4iw_process_mad(struct ib_device *ibdev, int mad_flags,
623 return -ENOSYS;
624 }
625
626 -static int c4iw_dealloc_ucontext(struct ib_ucontext *context)
627 +void _c4iw_free_ucontext(struct kref *kref)
628 {
629 - struct c4iw_dev *rhp = to_c4iw_dev(context->device);
630 - struct c4iw_ucontext *ucontext = to_c4iw_ucontext(context);
631 + struct c4iw_ucontext *ucontext;
632 + struct c4iw_dev *rhp;
633 struct c4iw_mm_entry *mm, *tmp;
634
635 - PDBG("%s context %p\n", __func__, context);
636 + ucontext = container_of(kref, struct c4iw_ucontext, kref);
637 + rhp = to_c4iw_dev(ucontext->ibucontext.device);
638 +
639 + PDBG("%s ucontext %p\n", __func__, ucontext);
640 list_for_each_entry_safe(mm, tmp, &ucontext->mmaps, entry)
641 kfree(mm);
642 c4iw_release_dev_ucontext(&rhp->rdev, &ucontext->uctx);
643 kfree(ucontext);
644 +}
645 +
646 +static int c4iw_dealloc_ucontext(struct ib_ucontext *context)
647 +{
648 + struct c4iw_ucontext *ucontext = to_c4iw_ucontext(context);
649 +
650 + PDBG("%s context %p\n", __func__, context);
651 + c4iw_put_ucontext(ucontext);
652 return 0;
653 }
654
655 @@ -125,6 +136,7 @@ static struct ib_ucontext *c4iw_alloc_ucontext(struct ib_device *ibdev,
656 c4iw_init_dev_ucontext(&rhp->rdev, &context->uctx);
657 INIT_LIST_HEAD(&context->mmaps);
658 spin_lock_init(&context->mmap_lock);
659 + kref_init(&context->kref);
660
661 if (udata->outlen < sizeof(uresp) - sizeof(uresp.reserved)) {
662 if (!warned++)
663 diff --git a/drivers/infiniband/hw/cxgb4/qp.c b/drivers/infiniband/hw/cxgb4/qp.c
664 index b7ac97b27c88..bb0fde6e2047 100644
665 --- a/drivers/infiniband/hw/cxgb4/qp.c
666 +++ b/drivers/infiniband/hw/cxgb4/qp.c
667 @@ -714,13 +714,32 @@ static int build_inv_stag(union t4_wr *wqe, struct ib_send_wr *wr, u8 *len16)
668 return 0;
669 }
670
671 -static void _free_qp(struct kref *kref)
672 +static void free_qp_work(struct work_struct *work)
673 +{
674 + struct c4iw_ucontext *ucontext;
675 + struct c4iw_qp *qhp;
676 + struct c4iw_dev *rhp;
677 +
678 + qhp = container_of(work, struct c4iw_qp, free_work);
679 + ucontext = qhp->ucontext;
680 + rhp = qhp->rhp;
681 +
682 + PDBG("%s qhp %p ucontext %p\n", __func__, qhp, ucontext);
683 + destroy_qp(&rhp->rdev, &qhp->wq,
684 + ucontext ? &ucontext->uctx : &rhp->rdev.uctx);
685 +
686 + if (ucontext)
687 + c4iw_put_ucontext(ucontext);
688 + kfree(qhp);
689 +}
690 +
691 +static void queue_qp_free(struct kref *kref)
692 {
693 struct c4iw_qp *qhp;
694
695 qhp = container_of(kref, struct c4iw_qp, kref);
696 PDBG("%s qhp %p\n", __func__, qhp);
697 - kfree(qhp);
698 + queue_work(qhp->rhp->rdev.free_workq, &qhp->free_work);
699 }
700
701 void c4iw_qp_add_ref(struct ib_qp *qp)
702 @@ -732,7 +751,7 @@ void c4iw_qp_add_ref(struct ib_qp *qp)
703 void c4iw_qp_rem_ref(struct ib_qp *qp)
704 {
705 PDBG("%s ib_qp %p\n", __func__, qp);
706 - kref_put(&to_c4iw_qp(qp)->kref, _free_qp);
707 + kref_put(&to_c4iw_qp(qp)->kref, queue_qp_free);
708 }
709
710 static void add_to_fc_list(struct list_head *head, struct list_head *entry)
711 @@ -1642,7 +1661,6 @@ int c4iw_destroy_qp(struct ib_qp *ib_qp)
712 struct c4iw_dev *rhp;
713 struct c4iw_qp *qhp;
714 struct c4iw_qp_attributes attrs;
715 - struct c4iw_ucontext *ucontext;
716
717 qhp = to_c4iw_qp(ib_qp);
718 rhp = qhp->rhp;
719 @@ -1662,11 +1680,6 @@ int c4iw_destroy_qp(struct ib_qp *ib_qp)
720 spin_unlock_irq(&rhp->lock);
721 free_ird(rhp, qhp->attr.max_ird);
722
723 - ucontext = ib_qp->uobject ?
724 - to_c4iw_ucontext(ib_qp->uobject->context) : NULL;
725 - destroy_qp(&rhp->rdev, &qhp->wq,
726 - ucontext ? &ucontext->uctx : &rhp->rdev.uctx);
727 -
728 c4iw_qp_rem_ref(ib_qp);
729
730 PDBG("%s ib_qp %p qpid 0x%0x\n", __func__, ib_qp, qhp->wq.sq.qid);
731 @@ -1767,6 +1780,7 @@ struct ib_qp *c4iw_create_qp(struct ib_pd *pd, struct ib_qp_init_attr *attrs,
732 mutex_init(&qhp->mutex);
733 init_waitqueue_head(&qhp->wait);
734 kref_init(&qhp->kref);
735 + INIT_WORK(&qhp->free_work, free_qp_work);
736
737 ret = insert_handle(rhp, &rhp->qpidr, qhp, qhp->wq.sq.qid);
738 if (ret)
739 @@ -1853,6 +1867,9 @@ struct ib_qp *c4iw_create_qp(struct ib_pd *pd, struct ib_qp_init_attr *attrs,
740 ma_sync_key_mm->len = PAGE_SIZE;
741 insert_mmap(ucontext, ma_sync_key_mm);
742 }
743 +
744 + c4iw_get_ucontext(ucontext);
745 + qhp->ucontext = ucontext;
746 }
747 qhp->ibqp.qp_num = qhp->wq.sq.qid;
748 init_timer(&(qhp->timer));
749 diff --git a/drivers/infiniband/sw/rxe/rxe_net.c b/drivers/infiniband/sw/rxe/rxe_net.c
750 index ffff5a54cb34..f4f3942ebbd1 100644
751 --- a/drivers/infiniband/sw/rxe/rxe_net.c
752 +++ b/drivers/infiniband/sw/rxe/rxe_net.c
753 @@ -554,7 +554,7 @@ struct rxe_dev *rxe_net_add(struct net_device *ndev)
754 }
755
756 spin_lock_bh(&dev_list_lock);
757 - list_add_tail(&rxe_dev_list, &rxe->list);
758 + list_add_tail(&rxe->list, &rxe_dev_list);
759 spin_unlock_bh(&dev_list_lock);
760 return rxe;
761 }
762 diff --git a/drivers/infiniband/sw/rxe/rxe_qp.c b/drivers/infiniband/sw/rxe/rxe_qp.c
763 index 486d576e55bc..44b2108253bd 100644
764 --- a/drivers/infiniband/sw/rxe/rxe_qp.c
765 +++ b/drivers/infiniband/sw/rxe/rxe_qp.c
766 @@ -813,8 +813,7 @@ void rxe_qp_destroy(struct rxe_qp *qp)
767 del_timer_sync(&qp->rnr_nak_timer);
768
769 rxe_cleanup_task(&qp->req.task);
770 - if (qp_type(qp) == IB_QPT_RC)
771 - rxe_cleanup_task(&qp->comp.task);
772 + rxe_cleanup_task(&qp->comp.task);
773
774 /* flush out any receive wr's or pending requests */
775 __rxe_do_task(&qp->req.task);
776 diff --git a/drivers/infiniband/ulp/iser/iscsi_iser.c b/drivers/infiniband/ulp/iser/iscsi_iser.c
777 index 64b3d11dcf1e..140f3f354cf3 100644
778 --- a/drivers/infiniband/ulp/iser/iscsi_iser.c
779 +++ b/drivers/infiniband/ulp/iser/iscsi_iser.c
780 @@ -651,13 +651,6 @@ iscsi_iser_session_create(struct iscsi_endpoint *ep,
781 SHOST_DIX_GUARD_CRC);
782 }
783
784 - /*
785 - * Limit the sg_tablesize and max_sectors based on the device
786 - * max fastreg page list length.
787 - */
788 - shost->sg_tablesize = min_t(unsigned short, shost->sg_tablesize,
789 - ib_conn->device->ib_device->attrs.max_fast_reg_page_list_len);
790 -
791 if (iscsi_host_add(shost,
792 ib_conn->device->ib_device->dma_device)) {
793 mutex_unlock(&iser_conn->state_mutex);
794 diff --git a/drivers/infiniband/ulp/srp/ib_srp.c b/drivers/infiniband/ulp/srp/ib_srp.c
795 index d980fb458ad4..e7dcf14a76e2 100644
796 --- a/drivers/infiniband/ulp/srp/ib_srp.c
797 +++ b/drivers/infiniband/ulp/srp/ib_srp.c
798 @@ -366,6 +366,7 @@ static struct srp_fr_pool *srp_create_fr_pool(struct ib_device *device,
799 struct srp_fr_desc *d;
800 struct ib_mr *mr;
801 int i, ret = -EINVAL;
802 + enum ib_mr_type mr_type;
803
804 if (pool_size <= 0)
805 goto err;
806 @@ -379,9 +380,13 @@ static struct srp_fr_pool *srp_create_fr_pool(struct ib_device *device,
807 spin_lock_init(&pool->lock);
808 INIT_LIST_HEAD(&pool->free_list);
809
810 + if (device->attrs.device_cap_flags & IB_DEVICE_SG_GAPS_REG)
811 + mr_type = IB_MR_TYPE_SG_GAPS;
812 + else
813 + mr_type = IB_MR_TYPE_MEM_REG;
814 +
815 for (i = 0, d = &pool->desc[0]; i < pool->size; i++, d++) {
816 - mr = ib_alloc_mr(pd, IB_MR_TYPE_MEM_REG,
817 - max_page_list_len);
818 + mr = ib_alloc_mr(pd, mr_type, max_page_list_len);
819 if (IS_ERR(mr)) {
820 ret = PTR_ERR(mr);
821 goto destroy_pool;
822 @@ -3678,6 +3683,12 @@ static int __init srp_init_module(void)
823 indirect_sg_entries = cmd_sg_entries;
824 }
825
826 + if (indirect_sg_entries > SG_MAX_SEGMENTS) {
827 + pr_warn("Clamping indirect_sg_entries to %u\n",
828 + SG_MAX_SEGMENTS);
829 + indirect_sg_entries = SG_MAX_SEGMENTS;
830 + }
831 +
832 srp_remove_wq = create_workqueue("srp_remove");
833 if (!srp_remove_wq) {
834 ret = -ENOMEM;
835 diff --git a/drivers/isdn/hardware/eicon/message.c b/drivers/isdn/hardware/eicon/message.c
836 index 1a1d99704fe6..296f1411fe84 100644
837 --- a/drivers/isdn/hardware/eicon/message.c
838 +++ b/drivers/isdn/hardware/eicon/message.c
839 @@ -11297,7 +11297,8 @@ static void mixer_notify_update(PLCI *plci, byte others)
840 ((CAPI_MSG *) msg)->header.ncci = 0;
841 ((CAPI_MSG *) msg)->info.facility_req.Selector = SELECTOR_LINE_INTERCONNECT;
842 ((CAPI_MSG *) msg)->info.facility_req.structs[0] = 3;
843 - PUT_WORD(&(((CAPI_MSG *) msg)->info.facility_req.structs[1]), LI_REQ_SILENT_UPDATE);
844 + ((CAPI_MSG *) msg)->info.facility_req.structs[1] = LI_REQ_SILENT_UPDATE & 0xff;
845 + ((CAPI_MSG *) msg)->info.facility_req.structs[2] = LI_REQ_SILENT_UPDATE >> 8;
846 ((CAPI_MSG *) msg)->info.facility_req.structs[3] = 0;
847 w = api_put(notify_plci->appl, (CAPI_MSG *) msg);
848 if (w != _QUEUE_FULL)
849 diff --git a/drivers/media/i2c/Kconfig b/drivers/media/i2c/Kconfig
850 index 2669b4bad910..5a27bffa02fb 100644
851 --- a/drivers/media/i2c/Kconfig
852 +++ b/drivers/media/i2c/Kconfig
853 @@ -655,6 +655,7 @@ config VIDEO_S5K6A3
854 config VIDEO_S5K4ECGX
855 tristate "Samsung S5K4ECGX sensor support"
856 depends on I2C && VIDEO_V4L2 && VIDEO_V4L2_SUBDEV_API
857 + select CRC32
858 ---help---
859 This is a V4L2 sensor-level driver for Samsung S5K4ECGX 5M
860 camera sensor with an embedded SoC image signal processor.
861 diff --git a/drivers/media/i2c/tvp5150.c b/drivers/media/i2c/tvp5150.c
862 index 7268e706e216..59aa4dafb60b 100644
863 --- a/drivers/media/i2c/tvp5150.c
864 +++ b/drivers/media/i2c/tvp5150.c
865 @@ -288,8 +288,12 @@ static inline void tvp5150_selmux(struct v4l2_subdev *sd)
866 tvp5150_write(sd, TVP5150_OP_MODE_CTL, opmode);
867 tvp5150_write(sd, TVP5150_VD_IN_SRC_SEL_1, input);
868
869 - /* Svideo should enable YCrCb output and disable GPCL output
870 - * For Composite and TV, it should be the reverse
871 + /*
872 + * Setup the FID/GLCO/VLK/HVLK and INTREQ/GPCL/VBLK output signals. For
873 + * S-Video we output the vertical lock (VLK) signal on FID/GLCO/VLK/HVLK
874 + * and set INTREQ/GPCL/VBLK to logic 0. For composite we output the
875 + * field indicator (FID) signal on FID/GLCO/VLK/HVLK and set
876 + * INTREQ/GPCL/VBLK to logic 1.
877 */
878 val = tvp5150_read(sd, TVP5150_MISC_CTL);
879 if (val < 0) {
880 @@ -298,9 +302,9 @@ static inline void tvp5150_selmux(struct v4l2_subdev *sd)
881 }
882
883 if (decoder->input == TVP5150_SVIDEO)
884 - val = (val & ~0x40) | 0x10;
885 + val = (val & ~TVP5150_MISC_CTL_GPCL) | TVP5150_MISC_CTL_HVLK;
886 else
887 - val = (val & ~0x10) | 0x40;
888 + val = (val & ~TVP5150_MISC_CTL_HVLK) | TVP5150_MISC_CTL_GPCL;
889 tvp5150_write(sd, TVP5150_MISC_CTL, val);
890 };
891
892 @@ -452,7 +456,12 @@ static const struct i2c_reg_value tvp5150_init_enable[] = {
893 },{ /* Automatic offset and AGC enabled */
894 TVP5150_ANAL_CHL_CTL, 0x15
895 },{ /* Activate YCrCb output 0x9 or 0xd ? */
896 - TVP5150_MISC_CTL, 0x6f
897 + TVP5150_MISC_CTL, TVP5150_MISC_CTL_GPCL |
898 + TVP5150_MISC_CTL_INTREQ_OE |
899 + TVP5150_MISC_CTL_YCBCR_OE |
900 + TVP5150_MISC_CTL_SYNC_OE |
901 + TVP5150_MISC_CTL_VBLANK |
902 + TVP5150_MISC_CTL_CLOCK_OE,
903 },{ /* Activates video std autodetection for all standards */
904 TVP5150_AUTOSW_MSK, 0x0
905 },{ /* Default format: 0x47. For 4:2:2: 0x40 */
906 @@ -858,8 +867,6 @@ static int tvp5150_fill_fmt(struct v4l2_subdev *sd,
907
908 f = &format->format;
909
910 - tvp5150_reset(sd, 0);
911 -
912 f->width = decoder->rect.width;
913 f->height = decoder->rect.height / 2;
914
915 @@ -1048,21 +1055,27 @@ static const struct media_entity_operations tvp5150_sd_media_ops = {
916 static int tvp5150_s_stream(struct v4l2_subdev *sd, int enable)
917 {
918 struct tvp5150 *decoder = to_tvp5150(sd);
919 - /* Output format: 8-bit ITU-R BT.656 with embedded syncs */
920 - int val = 0x09;
921 -
922 - /* Output format: 8-bit 4:2:2 YUV with discrete sync */
923 - if (decoder->mbus_type == V4L2_MBUS_PARALLEL)
924 - val = 0x0d;
925 + int val;
926
927 - /* Initializes TVP5150 to its default values */
928 - /* # set PCLK (27MHz) */
929 - tvp5150_write(sd, TVP5150_CONF_SHARED_PIN, 0x00);
930 + /* Enable or disable the video output signals. */
931 + val = tvp5150_read(sd, TVP5150_MISC_CTL);
932 + if (val < 0)
933 + return val;
934 +
935 + val &= ~(TVP5150_MISC_CTL_YCBCR_OE | TVP5150_MISC_CTL_SYNC_OE |
936 + TVP5150_MISC_CTL_CLOCK_OE);
937 +
938 + if (enable) {
939 + /*
940 + * Enable the YCbCr and clock outputs. In discrete sync mode
941 + * (non-BT.656) additionally enable the the sync outputs.
942 + */
943 + val |= TVP5150_MISC_CTL_YCBCR_OE | TVP5150_MISC_CTL_CLOCK_OE;
944 + if (decoder->mbus_type == V4L2_MBUS_PARALLEL)
945 + val |= TVP5150_MISC_CTL_SYNC_OE;
946 + }
947
948 - if (enable)
949 - tvp5150_write(sd, TVP5150_MISC_CTL, val);
950 - else
951 - tvp5150_write(sd, TVP5150_MISC_CTL, 0x00);
952 + tvp5150_write(sd, TVP5150_MISC_CTL, val);
953
954 return 0;
955 }
956 @@ -1521,7 +1534,6 @@ static int tvp5150_probe(struct i2c_client *c,
957 res = core->hdl.error;
958 goto err;
959 }
960 - v4l2_ctrl_handler_setup(&core->hdl);
961
962 /* Default is no cropping */
963 core->rect.top = 0;
964 @@ -1532,6 +1544,8 @@ static int tvp5150_probe(struct i2c_client *c,
965 core->rect.left = 0;
966 core->rect.width = TVP5150_H_MAX;
967
968 + tvp5150_reset(sd, 0); /* Calls v4l2_ctrl_handler_setup() */
969 +
970 res = v4l2_async_register_subdev(sd);
971 if (res < 0)
972 goto err;
973 diff --git a/drivers/media/i2c/tvp5150_reg.h b/drivers/media/i2c/tvp5150_reg.h
974 index 25a994944918..30a48c28d05a 100644
975 --- a/drivers/media/i2c/tvp5150_reg.h
976 +++ b/drivers/media/i2c/tvp5150_reg.h
977 @@ -9,6 +9,15 @@
978 #define TVP5150_ANAL_CHL_CTL 0x01 /* Analog channel controls */
979 #define TVP5150_OP_MODE_CTL 0x02 /* Operation mode controls */
980 #define TVP5150_MISC_CTL 0x03 /* Miscellaneous controls */
981 +#define TVP5150_MISC_CTL_VBLK_GPCL BIT(7)
982 +#define TVP5150_MISC_CTL_GPCL BIT(6)
983 +#define TVP5150_MISC_CTL_INTREQ_OE BIT(5)
984 +#define TVP5150_MISC_CTL_HVLK BIT(4)
985 +#define TVP5150_MISC_CTL_YCBCR_OE BIT(3)
986 +#define TVP5150_MISC_CTL_SYNC_OE BIT(2)
987 +#define TVP5150_MISC_CTL_VBLANK BIT(1)
988 +#define TVP5150_MISC_CTL_CLOCK_OE BIT(0)
989 +
990 #define TVP5150_AUTOSW_MSK 0x04 /* Autoswitch mask: TVP5150A / TVP5150AM */
991
992 /* Reserved 05h */
993 diff --git a/drivers/media/usb/dvb-usb/pctv452e.c b/drivers/media/usb/dvb-usb/pctv452e.c
994 index 07fa08be9e99..d54ebe7e0215 100644
995 --- a/drivers/media/usb/dvb-usb/pctv452e.c
996 +++ b/drivers/media/usb/dvb-usb/pctv452e.c
997 @@ -97,14 +97,13 @@ struct pctv452e_state {
998 u8 c; /* transaction counter, wraps around... */
999 u8 initialized; /* set to 1 if 0x15 has been sent */
1000 u16 last_rc_key;
1001 -
1002 - unsigned char data[80];
1003 };
1004
1005 static int tt3650_ci_msg(struct dvb_usb_device *d, u8 cmd, u8 *data,
1006 unsigned int write_len, unsigned int read_len)
1007 {
1008 struct pctv452e_state *state = (struct pctv452e_state *)d->priv;
1009 + u8 *buf;
1010 u8 id;
1011 unsigned int rlen;
1012 int ret;
1013 @@ -114,36 +113,39 @@ static int tt3650_ci_msg(struct dvb_usb_device *d, u8 cmd, u8 *data,
1014 return -EIO;
1015 }
1016
1017 - mutex_lock(&state->ca_mutex);
1018 + buf = kmalloc(64, GFP_KERNEL);
1019 + if (!buf)
1020 + return -ENOMEM;
1021 +
1022 id = state->c++;
1023
1024 - state->data[0] = SYNC_BYTE_OUT;
1025 - state->data[1] = id;
1026 - state->data[2] = cmd;
1027 - state->data[3] = write_len;
1028 + buf[0] = SYNC_BYTE_OUT;
1029 + buf[1] = id;
1030 + buf[2] = cmd;
1031 + buf[3] = write_len;
1032
1033 - memcpy(state->data + 4, data, write_len);
1034 + memcpy(buf + 4, data, write_len);
1035
1036 rlen = (read_len > 0) ? 64 : 0;
1037 - ret = dvb_usb_generic_rw(d, state->data, 4 + write_len,
1038 - state->data, rlen, /* delay_ms */ 0);
1039 + ret = dvb_usb_generic_rw(d, buf, 4 + write_len,
1040 + buf, rlen, /* delay_ms */ 0);
1041 if (0 != ret)
1042 goto failed;
1043
1044 ret = -EIO;
1045 - if (SYNC_BYTE_IN != state->data[0] || id != state->data[1])
1046 + if (SYNC_BYTE_IN != buf[0] || id != buf[1])
1047 goto failed;
1048
1049 - memcpy(data, state->data + 4, read_len);
1050 + memcpy(data, buf + 4, read_len);
1051
1052 - mutex_unlock(&state->ca_mutex);
1053 + kfree(buf);
1054 return 0;
1055
1056 failed:
1057 err("CI error %d; %02X %02X %02X -> %*ph.",
1058 - ret, SYNC_BYTE_OUT, id, cmd, 3, state->data);
1059 + ret, SYNC_BYTE_OUT, id, cmd, 3, buf);
1060
1061 - mutex_unlock(&state->ca_mutex);
1062 + kfree(buf);
1063 return ret;
1064 }
1065
1066 @@ -410,53 +412,57 @@ static int pctv452e_i2c_msg(struct dvb_usb_device *d, u8 addr,
1067 u8 *rcv_buf, u8 rcv_len)
1068 {
1069 struct pctv452e_state *state = (struct pctv452e_state *)d->priv;
1070 + u8 *buf;
1071 u8 id;
1072 int ret;
1073
1074 - mutex_lock(&state->ca_mutex);
1075 + buf = kmalloc(64, GFP_KERNEL);
1076 + if (!buf)
1077 + return -ENOMEM;
1078 +
1079 id = state->c++;
1080
1081 ret = -EINVAL;
1082 if (snd_len > 64 - 7 || rcv_len > 64 - 7)
1083 goto failed;
1084
1085 - state->data[0] = SYNC_BYTE_OUT;
1086 - state->data[1] = id;
1087 - state->data[2] = PCTV_CMD_I2C;
1088 - state->data[3] = snd_len + 3;
1089 - state->data[4] = addr << 1;
1090 - state->data[5] = snd_len;
1091 - state->data[6] = rcv_len;
1092 + buf[0] = SYNC_BYTE_OUT;
1093 + buf[1] = id;
1094 + buf[2] = PCTV_CMD_I2C;
1095 + buf[3] = snd_len + 3;
1096 + buf[4] = addr << 1;
1097 + buf[5] = snd_len;
1098 + buf[6] = rcv_len;
1099
1100 - memcpy(state->data + 7, snd_buf, snd_len);
1101 + memcpy(buf + 7, snd_buf, snd_len);
1102
1103 - ret = dvb_usb_generic_rw(d, state->data, 7 + snd_len,
1104 - state->data, /* rcv_len */ 64,
1105 + ret = dvb_usb_generic_rw(d, buf, 7 + snd_len,
1106 + buf, /* rcv_len */ 64,
1107 /* delay_ms */ 0);
1108 if (ret < 0)
1109 goto failed;
1110
1111 /* TT USB protocol error. */
1112 ret = -EIO;
1113 - if (SYNC_BYTE_IN != state->data[0] || id != state->data[1])
1114 + if (SYNC_BYTE_IN != buf[0] || id != buf[1])
1115 goto failed;
1116
1117 /* I2C device didn't respond as expected. */
1118 ret = -EREMOTEIO;
1119 - if (state->data[5] < snd_len || state->data[6] < rcv_len)
1120 + if (buf[5] < snd_len || buf[6] < rcv_len)
1121 goto failed;
1122
1123 - memcpy(rcv_buf, state->data + 7, rcv_len);
1124 - mutex_unlock(&state->ca_mutex);
1125 + memcpy(rcv_buf, buf + 7, rcv_len);
1126
1127 + kfree(buf);
1128 return rcv_len;
1129
1130 failed:
1131 err("I2C error %d; %02X %02X %02X %02X %02X -> %*ph",
1132 ret, SYNC_BYTE_OUT, id, addr << 1, snd_len, rcv_len,
1133 - 7, state->data);
1134 + 7, buf);
1135
1136 - mutex_unlock(&state->ca_mutex);
1137 + kfree(buf);
1138 return ret;
1139 }
1140
1141 @@ -505,7 +511,7 @@ static u32 pctv452e_i2c_func(struct i2c_adapter *adapter)
1142 static int pctv452e_power_ctrl(struct dvb_usb_device *d, int i)
1143 {
1144 struct pctv452e_state *state = (struct pctv452e_state *)d->priv;
1145 - u8 *rx;
1146 + u8 *b0, *rx;
1147 int ret;
1148
1149 info("%s: %d\n", __func__, i);
1150 @@ -516,11 +522,12 @@ static int pctv452e_power_ctrl(struct dvb_usb_device *d, int i)
1151 if (state->initialized)
1152 return 0;
1153
1154 - rx = kmalloc(PCTV_ANSWER_LEN, GFP_KERNEL);
1155 - if (!rx)
1156 + b0 = kmalloc(5 + PCTV_ANSWER_LEN, GFP_KERNEL);
1157 + if (!b0)
1158 return -ENOMEM;
1159
1160 - mutex_lock(&state->ca_mutex);
1161 + rx = b0 + 5;
1162 +
1163 /* hmm where shoud this should go? */
1164 ret = usb_set_interface(d->udev, 0, ISOC_INTERFACE_ALTERNATIVE);
1165 if (ret != 0)
1166 @@ -528,66 +535,70 @@ static int pctv452e_power_ctrl(struct dvb_usb_device *d, int i)
1167 __func__, ret);
1168
1169 /* this is a one-time initialization, dont know where to put */
1170 - state->data[0] = 0xaa;
1171 - state->data[1] = state->c++;
1172 - state->data[2] = PCTV_CMD_RESET;
1173 - state->data[3] = 1;
1174 - state->data[4] = 0;
1175 + b0[0] = 0xaa;
1176 + b0[1] = state->c++;
1177 + b0[2] = PCTV_CMD_RESET;
1178 + b0[3] = 1;
1179 + b0[4] = 0;
1180 /* reset board */
1181 - ret = dvb_usb_generic_rw(d, state->data, 5, rx, PCTV_ANSWER_LEN, 0);
1182 + ret = dvb_usb_generic_rw(d, b0, 5, rx, PCTV_ANSWER_LEN, 0);
1183 if (ret)
1184 goto ret;
1185
1186 - state->data[1] = state->c++;
1187 - state->data[4] = 1;
1188 + b0[1] = state->c++;
1189 + b0[4] = 1;
1190 /* reset board (again?) */
1191 - ret = dvb_usb_generic_rw(d, state->data, 5, rx, PCTV_ANSWER_LEN, 0);
1192 + ret = dvb_usb_generic_rw(d, b0, 5, rx, PCTV_ANSWER_LEN, 0);
1193 if (ret)
1194 goto ret;
1195
1196 state->initialized = 1;
1197
1198 ret:
1199 - mutex_unlock(&state->ca_mutex);
1200 - kfree(rx);
1201 + kfree(b0);
1202 return ret;
1203 }
1204
1205 static int pctv452e_rc_query(struct dvb_usb_device *d)
1206 {
1207 struct pctv452e_state *state = (struct pctv452e_state *)d->priv;
1208 + u8 *b, *rx;
1209 int ret, i;
1210 u8 id;
1211
1212 - mutex_lock(&state->ca_mutex);
1213 + b = kmalloc(CMD_BUFFER_SIZE + PCTV_ANSWER_LEN, GFP_KERNEL);
1214 + if (!b)
1215 + return -ENOMEM;
1216 +
1217 + rx = b + CMD_BUFFER_SIZE;
1218 +
1219 id = state->c++;
1220
1221 /* prepare command header */
1222 - state->data[0] = SYNC_BYTE_OUT;
1223 - state->data[1] = id;
1224 - state->data[2] = PCTV_CMD_IR;
1225 - state->data[3] = 0;
1226 + b[0] = SYNC_BYTE_OUT;
1227 + b[1] = id;
1228 + b[2] = PCTV_CMD_IR;
1229 + b[3] = 0;
1230
1231 /* send ir request */
1232 - ret = dvb_usb_generic_rw(d, state->data, 4,
1233 - state->data, PCTV_ANSWER_LEN, 0);
1234 + ret = dvb_usb_generic_rw(d, b, 4, rx, PCTV_ANSWER_LEN, 0);
1235 if (ret != 0)
1236 goto ret;
1237
1238 if (debug > 3) {
1239 - info("%s: read: %2d: %*ph: ", __func__, ret, 3, state->data);
1240 - for (i = 0; (i < state->data[3]) && ((i + 3) < PCTV_ANSWER_LEN); i++)
1241 - info(" %02x", state->data[i + 3]);
1242 + info("%s: read: %2d: %*ph: ", __func__, ret, 3, rx);
1243 + for (i = 0; (i < rx[3]) && ((i+3) < PCTV_ANSWER_LEN); i++)
1244 + info(" %02x", rx[i+3]);
1245
1246 info("\n");
1247 }
1248
1249 - if ((state->data[3] == 9) && (state->data[12] & 0x01)) {
1250 + if ((rx[3] == 9) && (rx[12] & 0x01)) {
1251 /* got a "press" event */
1252 - state->last_rc_key = RC_SCANCODE_RC5(state->data[7], state->data[6]);
1253 + state->last_rc_key = RC_SCANCODE_RC5(rx[7], rx[6]);
1254 if (debug > 2)
1255 info("%s: cmd=0x%02x sys=0x%02x\n",
1256 - __func__, state->data[6], state->data[7]);
1257 + __func__, rx[6], rx[7]);
1258
1259 rc_keydown(d->rc_dev, RC_TYPE_RC5, state->last_rc_key, 0);
1260 } else if (state->last_rc_key) {
1261 @@ -595,7 +606,7 @@ static int pctv452e_rc_query(struct dvb_usb_device *d)
1262 state->last_rc_key = 0;
1263 }
1264 ret:
1265 - mutex_unlock(&state->ca_mutex);
1266 + kfree(b);
1267 return ret;
1268 }
1269
1270 diff --git a/drivers/net/can/c_can/c_can_pci.c b/drivers/net/can/c_can/c_can_pci.c
1271 index 7be393c96b1a..cf7c18947189 100644
1272 --- a/drivers/net/can/c_can/c_can_pci.c
1273 +++ b/drivers/net/can/c_can/c_can_pci.c
1274 @@ -161,6 +161,7 @@ static int c_can_pci_probe(struct pci_dev *pdev,
1275
1276 dev->irq = pdev->irq;
1277 priv->base = addr;
1278 + priv->device = &pdev->dev;
1279
1280 if (!c_can_pci_data->freq) {
1281 dev_err(&pdev->dev, "no clock frequency defined\n");
1282 diff --git a/drivers/net/can/ti_hecc.c b/drivers/net/can/ti_hecc.c
1283 index 680d1ff07a55..6749b1829469 100644
1284 --- a/drivers/net/can/ti_hecc.c
1285 +++ b/drivers/net/can/ti_hecc.c
1286 @@ -948,7 +948,12 @@ static int ti_hecc_probe(struct platform_device *pdev)
1287 netif_napi_add(ndev, &priv->napi, ti_hecc_rx_poll,
1288 HECC_DEF_NAPI_WEIGHT);
1289
1290 - clk_enable(priv->clk);
1291 + err = clk_prepare_enable(priv->clk);
1292 + if (err) {
1293 + dev_err(&pdev->dev, "clk_prepare_enable() failed\n");
1294 + goto probe_exit_clk;
1295 + }
1296 +
1297 err = register_candev(ndev);
1298 if (err) {
1299 dev_err(&pdev->dev, "register_candev() failed\n");
1300 @@ -981,7 +986,7 @@ static int ti_hecc_remove(struct platform_device *pdev)
1301 struct ti_hecc_priv *priv = netdev_priv(ndev);
1302
1303 unregister_candev(ndev);
1304 - clk_disable(priv->clk);
1305 + clk_disable_unprepare(priv->clk);
1306 clk_put(priv->clk);
1307 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1308 iounmap(priv->base);
1309 @@ -1006,7 +1011,7 @@ static int ti_hecc_suspend(struct platform_device *pdev, pm_message_t state)
1310 hecc_set_bit(priv, HECC_CANMC, HECC_CANMC_PDR);
1311 priv->can.state = CAN_STATE_SLEEPING;
1312
1313 - clk_disable(priv->clk);
1314 + clk_disable_unprepare(priv->clk);
1315
1316 return 0;
1317 }
1318 @@ -1015,8 +1020,11 @@ static int ti_hecc_resume(struct platform_device *pdev)
1319 {
1320 struct net_device *dev = platform_get_drvdata(pdev);
1321 struct ti_hecc_priv *priv = netdev_priv(dev);
1322 + int err;
1323
1324 - clk_enable(priv->clk);
1325 + err = clk_prepare_enable(priv->clk);
1326 + if (err)
1327 + return err;
1328
1329 hecc_clear_bit(priv, HECC_CANMC, HECC_CANMC_PDR);
1330 priv->can.state = CAN_STATE_ERROR_ACTIVE;
1331 diff --git a/drivers/pinctrl/intel/pinctrl-baytrail.c b/drivers/pinctrl/intel/pinctrl-baytrail.c
1332 index 71bbeb9321ba..079015385fd8 100644
1333 --- a/drivers/pinctrl/intel/pinctrl-baytrail.c
1334 +++ b/drivers/pinctrl/intel/pinctrl-baytrail.c
1335 @@ -1092,6 +1092,7 @@ static int byt_pin_config_get(struct pinctrl_dev *pctl_dev, unsigned int offset,
1336 enum pin_config_param param = pinconf_to_config_param(*config);
1337 void __iomem *conf_reg = byt_gpio_reg(vg, offset, BYT_CONF0_REG);
1338 void __iomem *val_reg = byt_gpio_reg(vg, offset, BYT_VAL_REG);
1339 + void __iomem *db_reg = byt_gpio_reg(vg, offset, BYT_DEBOUNCE_REG);
1340 unsigned long flags;
1341 u32 conf, pull, val, debounce;
1342 u16 arg = 0;
1343 @@ -1128,7 +1129,7 @@ static int byt_pin_config_get(struct pinctrl_dev *pctl_dev, unsigned int offset,
1344 return -EINVAL;
1345
1346 raw_spin_lock_irqsave(&vg->lock, flags);
1347 - debounce = readl(byt_gpio_reg(vg, offset, BYT_DEBOUNCE_REG));
1348 + debounce = readl(db_reg);
1349 raw_spin_unlock_irqrestore(&vg->lock, flags);
1350
1351 switch (debounce & BYT_DEBOUNCE_PULSE_MASK) {
1352 @@ -1176,6 +1177,7 @@ static int byt_pin_config_set(struct pinctrl_dev *pctl_dev,
1353 unsigned int param, arg;
1354 void __iomem *conf_reg = byt_gpio_reg(vg, offset, BYT_CONF0_REG);
1355 void __iomem *val_reg = byt_gpio_reg(vg, offset, BYT_VAL_REG);
1356 + void __iomem *db_reg = byt_gpio_reg(vg, offset, BYT_DEBOUNCE_REG);
1357 unsigned long flags;
1358 u32 conf, val, debounce;
1359 int i, ret = 0;
1360 @@ -1238,36 +1240,40 @@ static int byt_pin_config_set(struct pinctrl_dev *pctl_dev,
1361
1362 break;
1363 case PIN_CONFIG_INPUT_DEBOUNCE:
1364 - debounce = readl(byt_gpio_reg(vg, offset,
1365 - BYT_DEBOUNCE_REG));
1366 - conf &= ~BYT_DEBOUNCE_PULSE_MASK;
1367 + debounce = readl(db_reg);
1368 + debounce &= ~BYT_DEBOUNCE_PULSE_MASK;
1369
1370 switch (arg) {
1371 + case 0:
1372 + conf &= BYT_DEBOUNCE_EN;
1373 + break;
1374 case 375:
1375 - conf |= BYT_DEBOUNCE_PULSE_375US;
1376 + debounce |= BYT_DEBOUNCE_PULSE_375US;
1377 break;
1378 case 750:
1379 - conf |= BYT_DEBOUNCE_PULSE_750US;
1380 + debounce |= BYT_DEBOUNCE_PULSE_750US;
1381 break;
1382 case 1500:
1383 - conf |= BYT_DEBOUNCE_PULSE_1500US;
1384 + debounce |= BYT_DEBOUNCE_PULSE_1500US;
1385 break;
1386 case 3000:
1387 - conf |= BYT_DEBOUNCE_PULSE_3MS;
1388 + debounce |= BYT_DEBOUNCE_PULSE_3MS;
1389 break;
1390 case 6000:
1391 - conf |= BYT_DEBOUNCE_PULSE_6MS;
1392 + debounce |= BYT_DEBOUNCE_PULSE_6MS;
1393 break;
1394 case 12000:
1395 - conf |= BYT_DEBOUNCE_PULSE_12MS;
1396 + debounce |= BYT_DEBOUNCE_PULSE_12MS;
1397 break;
1398 case 24000:
1399 - conf |= BYT_DEBOUNCE_PULSE_24MS;
1400 + debounce |= BYT_DEBOUNCE_PULSE_24MS;
1401 break;
1402 default:
1403 ret = -EINVAL;
1404 }
1405
1406 + if (!ret)
1407 + writel(debounce, db_reg);
1408 break;
1409 default:
1410 ret = -ENOTSUPP;
1411 diff --git a/drivers/pinctrl/intel/pinctrl-broxton.c b/drivers/pinctrl/intel/pinctrl-broxton.c
1412 index 59cb7a6fc5be..901b356b09d7 100644
1413 --- a/drivers/pinctrl/intel/pinctrl-broxton.c
1414 +++ b/drivers/pinctrl/intel/pinctrl-broxton.c
1415 @@ -19,7 +19,7 @@
1416
1417 #define BXT_PAD_OWN 0x020
1418 #define BXT_HOSTSW_OWN 0x080
1419 -#define BXT_PADCFGLOCK 0x090
1420 +#define BXT_PADCFGLOCK 0x060
1421 #define BXT_GPI_IE 0x110
1422
1423 #define BXT_COMMUNITY(s, e) \
1424 diff --git a/drivers/pinctrl/uniphier/pinctrl-uniphier-ld20.c b/drivers/pinctrl/uniphier/pinctrl-uniphier-ld20.c
1425 index aa8bd9794683..96686336e3a3 100644
1426 --- a/drivers/pinctrl/uniphier/pinctrl-uniphier-ld20.c
1427 +++ b/drivers/pinctrl/uniphier/pinctrl-uniphier-ld20.c
1428 @@ -561,7 +561,7 @@ static const int ether_rgmii_muxvals[] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1429 0, 0, 0, 0};
1430 static const unsigned ether_rmii_pins[] = {30, 31, 32, 33, 34, 35, 36, 37, 39,
1431 41, 42, 45};
1432 -static const int ether_rmii_muxvals[] = {1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1};
1433 +static const int ether_rmii_muxvals[] = {0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1};
1434 static const unsigned i2c0_pins[] = {63, 64};
1435 static const int i2c0_muxvals[] = {0, 0};
1436 static const unsigned i2c1_pins[] = {65, 66};
1437 diff --git a/drivers/platform/x86/intel_mid_powerbtn.c b/drivers/platform/x86/intel_mid_powerbtn.c
1438 index 1fc0de870ff8..361770568ad0 100644
1439 --- a/drivers/platform/x86/intel_mid_powerbtn.c
1440 +++ b/drivers/platform/x86/intel_mid_powerbtn.c
1441 @@ -77,7 +77,7 @@ static int mfld_pb_probe(struct platform_device *pdev)
1442
1443 input_set_capability(input, EV_KEY, KEY_POWER);
1444
1445 - error = request_threaded_irq(irq, NULL, mfld_pb_isr, 0,
1446 + error = request_threaded_irq(irq, NULL, mfld_pb_isr, IRQF_ONESHOT,
1447 DRIVER_NAME, input);
1448 if (error) {
1449 dev_err(&pdev->dev, "Unable to request irq %d for mfld power"
1450 diff --git a/drivers/video/fbdev/core/fbcmap.c b/drivers/video/fbdev/core/fbcmap.c
1451 index f89245b8ba8e..68a113594808 100644
1452 --- a/drivers/video/fbdev/core/fbcmap.c
1453 +++ b/drivers/video/fbdev/core/fbcmap.c
1454 @@ -163,17 +163,18 @@ void fb_dealloc_cmap(struct fb_cmap *cmap)
1455
1456 int fb_copy_cmap(const struct fb_cmap *from, struct fb_cmap *to)
1457 {
1458 - int tooff = 0, fromoff = 0;
1459 - int size;
1460 + unsigned int tooff = 0, fromoff = 0;
1461 + size_t size;
1462
1463 if (to->start > from->start)
1464 fromoff = to->start - from->start;
1465 else
1466 tooff = from->start - to->start;
1467 - size = to->len - tooff;
1468 - if (size > (int) (from->len - fromoff))
1469 - size = from->len - fromoff;
1470 - if (size <= 0)
1471 + if (fromoff >= from->len || tooff >= to->len)
1472 + return -EINVAL;
1473 +
1474 + size = min_t(size_t, to->len - tooff, from->len - fromoff);
1475 + if (size == 0)
1476 return -EINVAL;
1477 size *= sizeof(u16);
1478
1479 @@ -187,17 +188,18 @@ int fb_copy_cmap(const struct fb_cmap *from, struct fb_cmap *to)
1480
1481 int fb_cmap_to_user(const struct fb_cmap *from, struct fb_cmap_user *to)
1482 {
1483 - int tooff = 0, fromoff = 0;
1484 - int size;
1485 + unsigned int tooff = 0, fromoff = 0;
1486 + size_t size;
1487
1488 if (to->start > from->start)
1489 fromoff = to->start - from->start;
1490 else
1491 tooff = from->start - to->start;
1492 - size = to->len - tooff;
1493 - if (size > (int) (from->len - fromoff))
1494 - size = from->len - fromoff;
1495 - if (size <= 0)
1496 + if (fromoff >= from->len || tooff >= to->len)
1497 + return -EINVAL;
1498 +
1499 + size = min_t(size_t, to->len - tooff, from->len - fromoff);
1500 + if (size == 0)
1501 return -EINVAL;
1502 size *= sizeof(u16);
1503
1504 diff --git a/drivers/virtio/virtio_mmio.c b/drivers/virtio/virtio_mmio.c
1505 index 48bfea91dbca..50840984fbfa 100644
1506 --- a/drivers/virtio/virtio_mmio.c
1507 +++ b/drivers/virtio/virtio_mmio.c
1508 @@ -59,6 +59,7 @@
1509 #define pr_fmt(fmt) "virtio-mmio: " fmt
1510
1511 #include <linux/acpi.h>
1512 +#include <linux/dma-mapping.h>
1513 #include <linux/highmem.h>
1514 #include <linux/interrupt.h>
1515 #include <linux/io.h>
1516 @@ -497,6 +498,7 @@ static int virtio_mmio_probe(struct platform_device *pdev)
1517 struct virtio_mmio_device *vm_dev;
1518 struct resource *mem;
1519 unsigned long magic;
1520 + int rc;
1521
1522 mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1523 if (!mem)
1524 @@ -545,9 +547,25 @@ static int virtio_mmio_probe(struct platform_device *pdev)
1525 }
1526 vm_dev->vdev.id.vendor = readl(vm_dev->base + VIRTIO_MMIO_VENDOR_ID);
1527
1528 - if (vm_dev->version == 1)
1529 + if (vm_dev->version == 1) {
1530 writel(PAGE_SIZE, vm_dev->base + VIRTIO_MMIO_GUEST_PAGE_SIZE);
1531
1532 + rc = dma_set_mask(&pdev->dev, DMA_BIT_MASK(64));
1533 + /*
1534 + * In the legacy case, ensure our coherently-allocated virtio
1535 + * ring will be at an address expressable as a 32-bit PFN.
1536 + */
1537 + if (!rc)
1538 + dma_set_coherent_mask(&pdev->dev,
1539 + DMA_BIT_MASK(32 + PAGE_SHIFT));
1540 + } else {
1541 + rc = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
1542 + }
1543 + if (rc)
1544 + rc = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
1545 + if (rc)
1546 + dev_warn(&pdev->dev, "Failed to enable 64-bit or 32-bit DMA. Trying to continue, but this might not work.\n");
1547 +
1548 platform_set_drvdata(pdev, vm_dev);
1549
1550 return register_virtio_device(&vm_dev->vdev);
1551 diff --git a/drivers/virtio/virtio_ring.c b/drivers/virtio/virtio_ring.c
1552 index 489bfc61cf30..f1360487a594 100644
1553 --- a/drivers/virtio/virtio_ring.c
1554 +++ b/drivers/virtio/virtio_ring.c
1555 @@ -159,6 +159,13 @@ static bool vring_use_dma_api(struct virtio_device *vdev)
1556 if (xen_domain())
1557 return true;
1558
1559 + /*
1560 + * On ARM-based machines, the DMA ops will do the right thing,
1561 + * so always use them with legacy devices.
1562 + */
1563 + if (IS_ENABLED(CONFIG_ARM) || IS_ENABLED(CONFIG_ARM64))
1564 + return !virtio_has_feature(vdev, VIRTIO_F_VERSION_1);
1565 +
1566 return false;
1567 }
1568
1569 diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
1570 index 8e3a5a266917..be4da91d880f 100644
1571 --- a/fs/btrfs/inode.c
1572 +++ b/fs/btrfs/inode.c
1573 @@ -3819,10 +3819,7 @@ static int btrfs_read_locked_inode(struct inode *inode)
1574 break;
1575 case S_IFDIR:
1576 inode->i_fop = &btrfs_dir_file_operations;
1577 - if (root == root->fs_info->tree_root)
1578 - inode->i_op = &btrfs_dir_ro_inode_operations;
1579 - else
1580 - inode->i_op = &btrfs_dir_inode_operations;
1581 + inode->i_op = &btrfs_dir_inode_operations;
1582 break;
1583 case S_IFLNK:
1584 inode->i_op = &btrfs_symlink_inode_operations;
1585 @@ -5682,6 +5679,7 @@ static struct inode *new_simple_dir(struct super_block *s,
1586
1587 inode->i_ino = BTRFS_EMPTY_SUBVOL_DIR_OBJECTID;
1588 inode->i_op = &btrfs_dir_ro_inode_operations;
1589 + inode->i_opflags &= ~IOP_XATTR;
1590 inode->i_fop = &simple_dir_operations;
1591 inode->i_mode = S_IFDIR | S_IRUGO | S_IWUSR | S_IXUGO;
1592 inode->i_mtime = current_time(inode);
1593 @@ -10587,8 +10585,6 @@ static const struct inode_operations btrfs_dir_inode_operations = {
1594 static const struct inode_operations btrfs_dir_ro_inode_operations = {
1595 .lookup = btrfs_lookup,
1596 .permission = btrfs_permission,
1597 - .get_acl = btrfs_get_acl,
1598 - .set_acl = btrfs_set_acl,
1599 .update_time = btrfs_update_time,
1600 };
1601
1602 diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c
1603 index 241da19b7da4..78ff8b63d5f7 100644
1604 --- a/fs/nfs/nfs4proc.c
1605 +++ b/fs/nfs/nfs4proc.c
1606 @@ -2678,7 +2678,8 @@ static inline void nfs4_exclusive_attrset(struct nfs4_opendata *opendata,
1607 sattr->ia_valid |= ATTR_MTIME;
1608
1609 /* Except MODE, it seems harmless of setting twice. */
1610 - if ((attrset[1] & FATTR4_WORD1_MODE))
1611 + if (opendata->o_arg.createmode != NFS4_CREATE_EXCLUSIVE &&
1612 + attrset[1] & FATTR4_WORD1_MODE)
1613 sattr->ia_valid &= ~ATTR_MODE;
1614
1615 if (attrset[2] & FATTR4_WORD2_SECURITY_LABEL)
1616 @@ -8371,6 +8372,7 @@ nfs4_layoutget_handle_exception(struct rpc_task *task,
1617 goto out;
1618 }
1619
1620 + nfs4_sequence_free_slot(&lgp->res.seq_res);
1621 err = nfs4_handle_exception(server, nfs4err, exception);
1622 if (!status) {
1623 if (exception->retry)
1624 diff --git a/fs/xfs/xfs_qm.c b/fs/xfs/xfs_qm.c
1625 index 45e50ea90769..b669b123287b 100644
1626 --- a/fs/xfs/xfs_qm.c
1627 +++ b/fs/xfs/xfs_qm.c
1628 @@ -1177,7 +1177,8 @@ xfs_qm_dqusage_adjust(
1629 * the case in all other instances. It's OK that we do this because
1630 * quotacheck is done only at mount time.
1631 */
1632 - error = xfs_iget(mp, NULL, ino, 0, XFS_ILOCK_EXCL, &ip);
1633 + error = xfs_iget(mp, NULL, ino, XFS_IGET_DONTCACHE, XFS_ILOCK_EXCL,
1634 + &ip);
1635 if (error) {
1636 *res = BULKSTAT_RV_NOTHING;
1637 return error;
1638 diff --git a/include/linux/memory_hotplug.h b/include/linux/memory_hotplug.h
1639 index 01033fadea47..c1784c0b4f35 100644
1640 --- a/include/linux/memory_hotplug.h
1641 +++ b/include/linux/memory_hotplug.h
1642 @@ -284,7 +284,7 @@ extern void sparse_remove_one_section(struct zone *zone, struct mem_section *ms,
1643 unsigned long map_offset);
1644 extern struct page *sparse_decode_mem_map(unsigned long coded_mem_map,
1645 unsigned long pnum);
1646 -extern int zone_can_shift(unsigned long pfn, unsigned long nr_pages,
1647 - enum zone_type target);
1648 +extern bool zone_can_shift(unsigned long pfn, unsigned long nr_pages,
1649 + enum zone_type target, int *zone_shift);
1650
1651 #endif /* __LINUX_MEMORY_HOTPLUG_H */
1652 diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h
1653 index 0f088f3a2fed..f99c993dd500 100644
1654 --- a/include/linux/mmzone.h
1655 +++ b/include/linux/mmzone.h
1656 @@ -972,12 +972,16 @@ static __always_inline struct zoneref *next_zones_zonelist(struct zoneref *z,
1657 * @zonelist - The zonelist to search for a suitable zone
1658 * @highest_zoneidx - The zone index of the highest zone to return
1659 * @nodes - An optional nodemask to filter the zonelist with
1660 - * @zone - The first suitable zone found is returned via this parameter
1661 + * @return - Zoneref pointer for the first suitable zone found (see below)
1662 *
1663 * This function returns the first zone at or below a given zone index that is
1664 * within the allowed nodemask. The zoneref returned is a cursor that can be
1665 * used to iterate the zonelist with next_zones_zonelist by advancing it by
1666 * one before calling.
1667 + *
1668 + * When no eligible zone is found, zoneref->zone is NULL (zoneref itself is
1669 + * never NULL). This may happen either genuinely, or due to concurrent nodemask
1670 + * update due to cpuset modification.
1671 */
1672 static inline struct zoneref *first_zones_zonelist(struct zonelist *zonelist,
1673 enum zone_type highest_zoneidx,
1674 diff --git a/include/linux/nfs4.h b/include/linux/nfs4.h
1675 index 9094faf0699d..039e76e91896 100644
1676 --- a/include/linux/nfs4.h
1677 +++ b/include/linux/nfs4.h
1678 @@ -282,7 +282,7 @@ enum nfsstat4 {
1679
1680 static inline bool seqid_mutating_err(u32 err)
1681 {
1682 - /* rfc 3530 section 8.1.5: */
1683 + /* See RFC 7530, section 9.1.7 */
1684 switch (err) {
1685 case NFS4ERR_STALE_CLIENTID:
1686 case NFS4ERR_STALE_STATEID:
1687 @@ -291,6 +291,7 @@ static inline bool seqid_mutating_err(u32 err)
1688 case NFS4ERR_BADXDR:
1689 case NFS4ERR_RESOURCE:
1690 case NFS4ERR_NOFILEHANDLE:
1691 + case NFS4ERR_MOVED:
1692 return false;
1693 };
1694 return true;
1695 diff --git a/include/linux/sunrpc/clnt.h b/include/linux/sunrpc/clnt.h
1696 index 85cc819676e8..333ad11b3dd9 100644
1697 --- a/include/linux/sunrpc/clnt.h
1698 +++ b/include/linux/sunrpc/clnt.h
1699 @@ -216,5 +216,6 @@ void rpc_clnt_xprt_switch_put(struct rpc_clnt *);
1700 void rpc_clnt_xprt_switch_add_xprt(struct rpc_clnt *, struct rpc_xprt *);
1701 bool rpc_clnt_xprt_switch_has_addr(struct rpc_clnt *clnt,
1702 const struct sockaddr *sap);
1703 +void rpc_cleanup_clids(void);
1704 #endif /* __KERNEL__ */
1705 #endif /* _LINUX_SUNRPC_CLNT_H */
1706 diff --git a/include/uapi/rdma/cxgb3-abi.h b/include/uapi/rdma/cxgb3-abi.h
1707 index 48a19bda071b..d24eee12128f 100644
1708 --- a/include/uapi/rdma/cxgb3-abi.h
1709 +++ b/include/uapi/rdma/cxgb3-abi.h
1710 @@ -30,7 +30,7 @@
1711 * SOFTWARE.
1712 */
1713 #ifndef CXGB3_ABI_USER_H
1714 -#define CXBG3_ABI_USER_H
1715 +#define CXGB3_ABI_USER_H
1716
1717 #include <linux/types.h>
1718
1719 diff --git a/kernel/events/core.c b/kernel/events/core.c
1720 index 02c8421f8c01..e5a8839e7076 100644
1721 --- a/kernel/events/core.c
1722 +++ b/kernel/events/core.c
1723 @@ -9503,6 +9503,37 @@ static int perf_event_set_clock(struct perf_event *event, clockid_t clk_id)
1724 return 0;
1725 }
1726
1727 +/*
1728 + * Variation on perf_event_ctx_lock_nested(), except we take two context
1729 + * mutexes.
1730 + */
1731 +static struct perf_event_context *
1732 +__perf_event_ctx_lock_double(struct perf_event *group_leader,
1733 + struct perf_event_context *ctx)
1734 +{
1735 + struct perf_event_context *gctx;
1736 +
1737 +again:
1738 + rcu_read_lock();
1739 + gctx = READ_ONCE(group_leader->ctx);
1740 + if (!atomic_inc_not_zero(&gctx->refcount)) {
1741 + rcu_read_unlock();
1742 + goto again;
1743 + }
1744 + rcu_read_unlock();
1745 +
1746 + mutex_lock_double(&gctx->mutex, &ctx->mutex);
1747 +
1748 + if (group_leader->ctx != gctx) {
1749 + mutex_unlock(&ctx->mutex);
1750 + mutex_unlock(&gctx->mutex);
1751 + put_ctx(gctx);
1752 + goto again;
1753 + }
1754 +
1755 + return gctx;
1756 +}
1757 +
1758 /**
1759 * sys_perf_event_open - open a performance event, associate it to a task/cpu
1760 *
1761 @@ -9746,12 +9777,31 @@ SYSCALL_DEFINE5(perf_event_open,
1762 }
1763
1764 if (move_group) {
1765 - gctx = group_leader->ctx;
1766 - mutex_lock_double(&gctx->mutex, &ctx->mutex);
1767 + gctx = __perf_event_ctx_lock_double(group_leader, ctx);
1768 +
1769 if (gctx->task == TASK_TOMBSTONE) {
1770 err = -ESRCH;
1771 goto err_locked;
1772 }
1773 +
1774 + /*
1775 + * Check if we raced against another sys_perf_event_open() call
1776 + * moving the software group underneath us.
1777 + */
1778 + if (!(group_leader->group_caps & PERF_EV_CAP_SOFTWARE)) {
1779 + /*
1780 + * If someone moved the group out from under us, check
1781 + * if this new event wound up on the same ctx, if so
1782 + * its the regular !move_group case, otherwise fail.
1783 + */
1784 + if (gctx != ctx) {
1785 + err = -EINVAL;
1786 + goto err_locked;
1787 + } else {
1788 + perf_event_ctx_unlock(group_leader, gctx);
1789 + move_group = 0;
1790 + }
1791 + }
1792 } else {
1793 mutex_lock(&ctx->mutex);
1794 }
1795 @@ -9853,7 +9903,7 @@ SYSCALL_DEFINE5(perf_event_open,
1796 perf_unpin_context(ctx);
1797
1798 if (move_group)
1799 - mutex_unlock(&gctx->mutex);
1800 + perf_event_ctx_unlock(group_leader, gctx);
1801 mutex_unlock(&ctx->mutex);
1802
1803 if (task) {
1804 @@ -9879,7 +9929,7 @@ SYSCALL_DEFINE5(perf_event_open,
1805
1806 err_locked:
1807 if (move_group)
1808 - mutex_unlock(&gctx->mutex);
1809 + perf_event_ctx_unlock(group_leader, gctx);
1810 mutex_unlock(&ctx->mutex);
1811 /* err_file: */
1812 fput(event_file);
1813 diff --git a/kernel/sysctl.c b/kernel/sysctl.c
1814 index 706309f9ed84..c1095cdc0fe2 100644
1815 --- a/kernel/sysctl.c
1816 +++ b/kernel/sysctl.c
1817 @@ -2487,6 +2487,7 @@ static int __do_proc_doulongvec_minmax(void *data, struct ctl_table *table, int
1818 break;
1819 if (neg)
1820 continue;
1821 + val = convmul * val / convdiv;
1822 if ((min && val < *min) || (max && val > *max))
1823 continue;
1824 *i = val;
1825 diff --git a/kernel/ucount.c b/kernel/ucount.c
1826 index 9d20d5dd298a..4bbd38ec3788 100644
1827 --- a/kernel/ucount.c
1828 +++ b/kernel/ucount.c
1829 @@ -128,10 +128,10 @@ static struct ucounts *get_ucounts(struct user_namespace *ns, kuid_t uid)
1830 struct hlist_head *hashent = ucounts_hashentry(ns, uid);
1831 struct ucounts *ucounts, *new;
1832
1833 - spin_lock(&ucounts_lock);
1834 + spin_lock_irq(&ucounts_lock);
1835 ucounts = find_ucounts(ns, uid, hashent);
1836 if (!ucounts) {
1837 - spin_unlock(&ucounts_lock);
1838 + spin_unlock_irq(&ucounts_lock);
1839
1840 new = kzalloc(sizeof(*new), GFP_KERNEL);
1841 if (!new)
1842 @@ -141,7 +141,7 @@ static struct ucounts *get_ucounts(struct user_namespace *ns, kuid_t uid)
1843 new->uid = uid;
1844 atomic_set(&new->count, 0);
1845
1846 - spin_lock(&ucounts_lock);
1847 + spin_lock_irq(&ucounts_lock);
1848 ucounts = find_ucounts(ns, uid, hashent);
1849 if (ucounts) {
1850 kfree(new);
1851 @@ -152,16 +152,18 @@ static struct ucounts *get_ucounts(struct user_namespace *ns, kuid_t uid)
1852 }
1853 if (!atomic_add_unless(&ucounts->count, 1, INT_MAX))
1854 ucounts = NULL;
1855 - spin_unlock(&ucounts_lock);
1856 + spin_unlock_irq(&ucounts_lock);
1857 return ucounts;
1858 }
1859
1860 static void put_ucounts(struct ucounts *ucounts)
1861 {
1862 + unsigned long flags;
1863 +
1864 if (atomic_dec_and_test(&ucounts->count)) {
1865 - spin_lock(&ucounts_lock);
1866 + spin_lock_irqsave(&ucounts_lock, flags);
1867 hlist_del_init(&ucounts->node);
1868 - spin_unlock(&ucounts_lock);
1869 + spin_unlock_irqrestore(&ucounts_lock, flags);
1870
1871 kfree(ucounts);
1872 }
1873 diff --git a/mm/huge_memory.c b/mm/huge_memory.c
1874 index 8ca40b70beae..917555cf6be0 100644
1875 --- a/mm/huge_memory.c
1876 +++ b/mm/huge_memory.c
1877 @@ -772,6 +772,12 @@ struct page *follow_devmap_pmd(struct vm_area_struct *vma, unsigned long addr,
1878
1879 assert_spin_locked(pmd_lockptr(mm, pmd));
1880
1881 + /*
1882 + * When we COW a devmap PMD entry, we split it into PTEs, so we should
1883 + * not be in this function with `flags & FOLL_COW` set.
1884 + */
1885 + WARN_ONCE(flags & FOLL_COW, "mm: In follow_devmap_pmd with FOLL_COW set");
1886 +
1887 if (flags & FOLL_WRITE && !pmd_write(*pmd))
1888 return NULL;
1889
1890 @@ -1118,6 +1124,16 @@ int do_huge_pmd_wp_page(struct fault_env *fe, pmd_t orig_pmd)
1891 return ret;
1892 }
1893
1894 +/*
1895 + * FOLL_FORCE can write to even unwritable pmd's, but only
1896 + * after we've gone through a COW cycle and they are dirty.
1897 + */
1898 +static inline bool can_follow_write_pmd(pmd_t pmd, unsigned int flags)
1899 +{
1900 + return pmd_write(pmd) ||
1901 + ((flags & FOLL_FORCE) && (flags & FOLL_COW) && pmd_dirty(pmd));
1902 +}
1903 +
1904 struct page *follow_trans_huge_pmd(struct vm_area_struct *vma,
1905 unsigned long addr,
1906 pmd_t *pmd,
1907 @@ -1128,7 +1144,7 @@ struct page *follow_trans_huge_pmd(struct vm_area_struct *vma,
1908
1909 assert_spin_locked(pmd_lockptr(mm, pmd));
1910
1911 - if (flags & FOLL_WRITE && !pmd_write(*pmd))
1912 + if (flags & FOLL_WRITE && !can_follow_write_pmd(*pmd, flags))
1913 goto out;
1914
1915 /* Avoid dumping huge zero page */
1916 diff --git a/mm/memcontrol.c b/mm/memcontrol.c
1917 index d536a9daa511..4c6ade54d833 100644
1918 --- a/mm/memcontrol.c
1919 +++ b/mm/memcontrol.c
1920 @@ -4360,9 +4360,9 @@ static int mem_cgroup_do_precharge(unsigned long count)
1921 return ret;
1922 }
1923
1924 - /* Try charges one by one with reclaim */
1925 + /* Try charges one by one with reclaim, but do not retry */
1926 while (count--) {
1927 - ret = try_charge(mc.to, GFP_KERNEL & ~__GFP_NORETRY, 1);
1928 + ret = try_charge(mc.to, GFP_KERNEL | __GFP_NORETRY, 1);
1929 if (ret)
1930 return ret;
1931 mc.precharge++;
1932 diff --git a/mm/memory_hotplug.c b/mm/memory_hotplug.c
1933 index cad4b9125695..c3a8141ac788 100644
1934 --- a/mm/memory_hotplug.c
1935 +++ b/mm/memory_hotplug.c
1936 @@ -1033,36 +1033,39 @@ static void node_states_set_node(int node, struct memory_notify *arg)
1937 node_set_state(node, N_MEMORY);
1938 }
1939
1940 -int zone_can_shift(unsigned long pfn, unsigned long nr_pages,
1941 - enum zone_type target)
1942 +bool zone_can_shift(unsigned long pfn, unsigned long nr_pages,
1943 + enum zone_type target, int *zone_shift)
1944 {
1945 struct zone *zone = page_zone(pfn_to_page(pfn));
1946 enum zone_type idx = zone_idx(zone);
1947 int i;
1948
1949 + *zone_shift = 0;
1950 +
1951 if (idx < target) {
1952 /* pages must be at end of current zone */
1953 if (pfn + nr_pages != zone_end_pfn(zone))
1954 - return 0;
1955 + return false;
1956
1957 /* no zones in use between current zone and target */
1958 for (i = idx + 1; i < target; i++)
1959 if (zone_is_initialized(zone - idx + i))
1960 - return 0;
1961 + return false;
1962 }
1963
1964 if (target < idx) {
1965 /* pages must be at beginning of current zone */
1966 if (pfn != zone->zone_start_pfn)
1967 - return 0;
1968 + return false;
1969
1970 /* no zones in use between current zone and target */
1971 for (i = target + 1; i < idx; i++)
1972 if (zone_is_initialized(zone - idx + i))
1973 - return 0;
1974 + return false;
1975 }
1976
1977 - return target - idx;
1978 + *zone_shift = target - idx;
1979 + return true;
1980 }
1981
1982 /* Must be protected by mem_hotplug_begin() */
1983 @@ -1089,10 +1092,13 @@ int __ref online_pages(unsigned long pfn, unsigned long nr_pages, int online_typ
1984 !can_online_high_movable(zone))
1985 return -EINVAL;
1986
1987 - if (online_type == MMOP_ONLINE_KERNEL)
1988 - zone_shift = zone_can_shift(pfn, nr_pages, ZONE_NORMAL);
1989 - else if (online_type == MMOP_ONLINE_MOVABLE)
1990 - zone_shift = zone_can_shift(pfn, nr_pages, ZONE_MOVABLE);
1991 + if (online_type == MMOP_ONLINE_KERNEL) {
1992 + if (!zone_can_shift(pfn, nr_pages, ZONE_NORMAL, &zone_shift))
1993 + return -EINVAL;
1994 + } else if (online_type == MMOP_ONLINE_MOVABLE) {
1995 + if (!zone_can_shift(pfn, nr_pages, ZONE_MOVABLE, &zone_shift))
1996 + return -EINVAL;
1997 + }
1998
1999 zone = move_pfn_range(zone_shift, pfn, pfn + nr_pages);
2000 if (!zone)
2001 diff --git a/mm/mempolicy.c b/mm/mempolicy.c
2002 index 0b859af06b87..f75704717e47 100644
2003 --- a/mm/mempolicy.c
2004 +++ b/mm/mempolicy.c
2005 @@ -2023,8 +2023,8 @@ alloc_pages_vma(gfp_t gfp, int order, struct vm_area_struct *vma,
2006
2007 nmask = policy_nodemask(gfp, pol);
2008 zl = policy_zonelist(gfp, pol, node);
2009 - mpol_cond_put(pol);
2010 page = __alloc_pages_nodemask(gfp, order, zl, nmask);
2011 + mpol_cond_put(pol);
2012 out:
2013 if (unlikely(!page && read_mems_allowed_retry(cpuset_mems_cookie)))
2014 goto retry_cpuset;
2015 diff --git a/mm/page_alloc.c b/mm/page_alloc.c
2016 index 34ada718ef47..f4a02e240fb6 100644
2017 --- a/mm/page_alloc.c
2018 +++ b/mm/page_alloc.c
2019 @@ -3502,12 +3502,13 @@ __alloc_pages_slowpath(gfp_t gfp_mask, unsigned int order,
2020 struct page *page = NULL;
2021 unsigned int alloc_flags;
2022 unsigned long did_some_progress;
2023 - enum compact_priority compact_priority = DEF_COMPACT_PRIORITY;
2024 + enum compact_priority compact_priority;
2025 enum compact_result compact_result;
2026 - int compaction_retries = 0;
2027 - int no_progress_loops = 0;
2028 + int compaction_retries;
2029 + int no_progress_loops;
2030 unsigned long alloc_start = jiffies;
2031 unsigned int stall_timeout = 10 * HZ;
2032 + unsigned int cpuset_mems_cookie;
2033
2034 /*
2035 * In the slowpath, we sanity check order to avoid ever trying to
2036 @@ -3528,6 +3529,23 @@ __alloc_pages_slowpath(gfp_t gfp_mask, unsigned int order,
2037 (__GFP_ATOMIC|__GFP_DIRECT_RECLAIM)))
2038 gfp_mask &= ~__GFP_ATOMIC;
2039
2040 +retry_cpuset:
2041 + compaction_retries = 0;
2042 + no_progress_loops = 0;
2043 + compact_priority = DEF_COMPACT_PRIORITY;
2044 + cpuset_mems_cookie = read_mems_allowed_begin();
2045 + /*
2046 + * We need to recalculate the starting point for the zonelist iterator
2047 + * because we might have used different nodemask in the fast path, or
2048 + * there was a cpuset modification and we are retrying - otherwise we
2049 + * could end up iterating over non-eligible zones endlessly.
2050 + */
2051 + ac->preferred_zoneref = first_zones_zonelist(ac->zonelist,
2052 + ac->high_zoneidx, ac->nodemask);
2053 + if (!ac->preferred_zoneref->zone)
2054 + goto nopage;
2055 +
2056 +
2057 /*
2058 * The fast path uses conservative alloc_flags to succeed only until
2059 * kswapd needs to be woken up, and to avoid the cost of setting up
2060 @@ -3687,6 +3705,13 @@ __alloc_pages_slowpath(gfp_t gfp_mask, unsigned int order,
2061 &compaction_retries))
2062 goto retry;
2063
2064 + /*
2065 + * It's possible we raced with cpuset update so the OOM would be
2066 + * premature (see below the nopage: label for full explanation).
2067 + */
2068 + if (read_mems_allowed_retry(cpuset_mems_cookie))
2069 + goto retry_cpuset;
2070 +
2071 /* Reclaim has failed us, start killing things */
2072 page = __alloc_pages_may_oom(gfp_mask, order, ac, &did_some_progress);
2073 if (page)
2074 @@ -3699,6 +3724,16 @@ __alloc_pages_slowpath(gfp_t gfp_mask, unsigned int order,
2075 }
2076
2077 nopage:
2078 + /*
2079 + * When updating a task's mems_allowed or mempolicy nodemask, it is
2080 + * possible to race with parallel threads in such a way that our
2081 + * allocation can fail while the mask is being updated. If we are about
2082 + * to fail, check if the cpuset changed during allocation and if so,
2083 + * retry.
2084 + */
2085 + if (read_mems_allowed_retry(cpuset_mems_cookie))
2086 + goto retry_cpuset;
2087 +
2088 warn_alloc(gfp_mask,
2089 "page allocation failure: order:%u", order);
2090 got_pg:
2091 @@ -3713,7 +3748,6 @@ __alloc_pages_nodemask(gfp_t gfp_mask, unsigned int order,
2092 struct zonelist *zonelist, nodemask_t *nodemask)
2093 {
2094 struct page *page;
2095 - unsigned int cpuset_mems_cookie;
2096 unsigned int alloc_flags = ALLOC_WMARK_LOW;
2097 gfp_t alloc_mask = gfp_mask; /* The gfp_t that was actually used for allocation */
2098 struct alloc_context ac = {
2099 @@ -3750,9 +3784,6 @@ __alloc_pages_nodemask(gfp_t gfp_mask, unsigned int order,
2100 if (IS_ENABLED(CONFIG_CMA) && ac.migratetype == MIGRATE_MOVABLE)
2101 alloc_flags |= ALLOC_CMA;
2102
2103 -retry_cpuset:
2104 - cpuset_mems_cookie = read_mems_allowed_begin();
2105 -
2106 /* Dirty zone balancing only done in the fast path */
2107 ac.spread_dirty_pages = (gfp_mask & __GFP_WRITE);
2108
2109 @@ -3763,8 +3794,13 @@ __alloc_pages_nodemask(gfp_t gfp_mask, unsigned int order,
2110 */
2111 ac.preferred_zoneref = first_zones_zonelist(ac.zonelist,
2112 ac.high_zoneidx, ac.nodemask);
2113 - if (!ac.preferred_zoneref) {
2114 + if (!ac.preferred_zoneref->zone) {
2115 page = NULL;
2116 + /*
2117 + * This might be due to race with cpuset_current_mems_allowed
2118 + * update, so make sure we retry with original nodemask in the
2119 + * slow path.
2120 + */
2121 goto no_zone;
2122 }
2123
2124 @@ -3773,6 +3809,7 @@ __alloc_pages_nodemask(gfp_t gfp_mask, unsigned int order,
2125 if (likely(page))
2126 goto out;
2127
2128 +no_zone:
2129 /*
2130 * Runtime PM, block IO and its error handling path can deadlock
2131 * because I/O on the device might not complete.
2132 @@ -3784,21 +3821,10 @@ __alloc_pages_nodemask(gfp_t gfp_mask, unsigned int order,
2133 * Restore the original nodemask if it was potentially replaced with
2134 * &cpuset_current_mems_allowed to optimize the fast-path attempt.
2135 */
2136 - if (cpusets_enabled())
2137 + if (unlikely(ac.nodemask != nodemask))
2138 ac.nodemask = nodemask;
2139 - page = __alloc_pages_slowpath(alloc_mask, order, &ac);
2140
2141 -no_zone:
2142 - /*
2143 - * When updating a task's mems_allowed, it is possible to race with
2144 - * parallel threads in such a way that an allocation can fail while
2145 - * the mask is being updated. If a page allocation is about to fail,
2146 - * check if the cpuset changed during allocation and if so, retry.
2147 - */
2148 - if (unlikely(!page && read_mems_allowed_retry(cpuset_mems_cookie))) {
2149 - alloc_mask = gfp_mask;
2150 - goto retry_cpuset;
2151 - }
2152 + page = __alloc_pages_slowpath(alloc_mask, order, &ac);
2153
2154 out:
2155 if (memcg_kmem_enabled() && (gfp_mask & __GFP_ACCOUNT) && page &&
2156 diff --git a/net/sunrpc/clnt.c b/net/sunrpc/clnt.c
2157 index 62a482790937..b2ae4f150ec6 100644
2158 --- a/net/sunrpc/clnt.c
2159 +++ b/net/sunrpc/clnt.c
2160 @@ -336,6 +336,11 @@ static int rpc_client_register(struct rpc_clnt *clnt,
2161
2162 static DEFINE_IDA(rpc_clids);
2163
2164 +void rpc_cleanup_clids(void)
2165 +{
2166 + ida_destroy(&rpc_clids);
2167 +}
2168 +
2169 static int rpc_alloc_clid(struct rpc_clnt *clnt)
2170 {
2171 int clid;
2172 diff --git a/net/sunrpc/sunrpc_syms.c b/net/sunrpc/sunrpc_syms.c
2173 index ee5d3d253102..3142f38d1104 100644
2174 --- a/net/sunrpc/sunrpc_syms.c
2175 +++ b/net/sunrpc/sunrpc_syms.c
2176 @@ -119,6 +119,7 @@ init_sunrpc(void)
2177 static void __exit
2178 cleanup_sunrpc(void)
2179 {
2180 + rpc_cleanup_clids();
2181 rpcauth_remove_module();
2182 cleanup_socket_xprt();
2183 svc_cleanup_xprt_sock();