Magellan Linux

Contents of /trunk/kernel-alx/patches-5.4/0138-5.4.39-all-fixes.patch

Parent Directory Parent Directory | Revision Log Revision Log


Revision 3519 - (show annotations) (download)
Mon May 11 14:36:44 2020 UTC (4 years ago) by niro
File size: 61780 byte(s)
-linux-5.4.39
1 diff --git a/Makefile b/Makefile
2 index 989e7d649633..ff2b90ddc9bc 100644
3 --- a/Makefile
4 +++ b/Makefile
5 @@ -1,7 +1,7 @@
6 # SPDX-License-Identifier: GPL-2.0
7 VERSION = 5
8 PATCHLEVEL = 4
9 -SUBLEVEL = 38
10 +SUBLEVEL = 39
11 EXTRAVERSION =
12 NAME = Kleptomaniac Octopus
13
14 diff --git a/arch/arm/boot/dts/imx6qdl-sr-som-ti.dtsi b/arch/arm/boot/dts/imx6qdl-sr-som-ti.dtsi
15 index 44a97ba93a95..352ac585ca6b 100644
16 --- a/arch/arm/boot/dts/imx6qdl-sr-som-ti.dtsi
17 +++ b/arch/arm/boot/dts/imx6qdl-sr-som-ti.dtsi
18 @@ -153,6 +153,7 @@
19 bus-width = <4>;
20 keep-power-in-suspend;
21 mmc-pwrseq = <&pwrseq_ti_wifi>;
22 + cap-power-off-card;
23 non-removable;
24 vmmc-supply = <&vcc_3v3>;
25 /* vqmmc-supply = <&nvcc_sd1>; - MMC layer doesn't like it! */
26 diff --git a/arch/arm64/kernel/vdso/Makefile b/arch/arm64/kernel/vdso/Makefile
27 index dd2514bb1511..3862cad2410c 100644
28 --- a/arch/arm64/kernel/vdso/Makefile
29 +++ b/arch/arm64/kernel/vdso/Makefile
30 @@ -32,7 +32,7 @@ UBSAN_SANITIZE := n
31 OBJECT_FILES_NON_STANDARD := y
32 KCOV_INSTRUMENT := n
33
34 -CFLAGS_vgettimeofday.o = -O2 -mcmodel=tiny
35 +CFLAGS_vgettimeofday.o = -O2 -mcmodel=tiny -fasynchronous-unwind-tables
36
37 ifneq ($(c-gettimeofday-y),)
38 CFLAGS_vgettimeofday.o += -include $(c-gettimeofday-y)
39 diff --git a/drivers/acpi/device_pm.c b/drivers/acpi/device_pm.c
40 index 5e4a8860a9c0..ea9ecf3d70c2 100644
41 --- a/drivers/acpi/device_pm.c
42 +++ b/drivers/acpi/device_pm.c
43 @@ -273,13 +273,13 @@ int acpi_device_set_power(struct acpi_device *device, int state)
44 end:
45 if (result) {
46 dev_warn(&device->dev, "Failed to change power state to %s\n",
47 - acpi_power_state_string(state));
48 + acpi_power_state_string(target_state));
49 } else {
50 device->power.state = target_state;
51 ACPI_DEBUG_PRINT((ACPI_DB_INFO,
52 "Device [%s] transitioned to %s\n",
53 device->pnp.bus_id,
54 - acpi_power_state_string(state)));
55 + acpi_power_state_string(target_state)));
56 }
57
58 return result;
59 diff --git a/drivers/crypto/caam/caamalg.c b/drivers/crypto/caam/caamalg.c
60 index 2912006b946b..4ce9c2b4544a 100644
61 --- a/drivers/crypto/caam/caamalg.c
62 +++ b/drivers/crypto/caam/caamalg.c
63 @@ -1810,7 +1810,7 @@ static struct skcipher_edesc *skcipher_edesc_alloc(struct skcipher_request *req,
64
65 if (ivsize || mapped_dst_nents > 1)
66 sg_to_sec4_set_last(edesc->sec4_sg + dst_sg_idx +
67 - mapped_dst_nents);
68 + mapped_dst_nents - 1 + !!ivsize);
69
70 if (sec4_sg_bytes) {
71 edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
72 diff --git a/drivers/dma-buf/dma-buf.c b/drivers/dma-buf/dma-buf.c
73 index 0fb0358f0073..adc88e1dc999 100644
74 --- a/drivers/dma-buf/dma-buf.c
75 +++ b/drivers/dma-buf/dma-buf.c
76 @@ -388,7 +388,8 @@ static long dma_buf_ioctl(struct file *file,
77
78 return ret;
79
80 - case DMA_BUF_SET_NAME:
81 + case DMA_BUF_SET_NAME_A:
82 + case DMA_BUF_SET_NAME_B:
83 return dma_buf_set_name(dmabuf, (const char __user *)arg);
84
85 default:
86 diff --git a/drivers/dma/dmatest.c b/drivers/dma/dmatest.c
87 index a2cadfa2e6d7..364dd34799d4 100644
88 --- a/drivers/dma/dmatest.c
89 +++ b/drivers/dma/dmatest.c
90 @@ -240,7 +240,7 @@ static bool is_threaded_test_run(struct dmatest_info *info)
91 struct dmatest_thread *thread;
92
93 list_for_each_entry(thread, &dtc->threads, node) {
94 - if (!thread->done)
95 + if (!thread->done && !thread->pending)
96 return true;
97 }
98 }
99 @@ -662,8 +662,8 @@ static int dmatest_func(void *data)
100 flags = DMA_CTRL_ACK | DMA_PREP_INTERRUPT;
101
102 ktime = ktime_get();
103 - while (!kthread_should_stop()
104 - && !(params->iterations && total_tests >= params->iterations)) {
105 + while (!(kthread_should_stop() ||
106 + (params->iterations && total_tests >= params->iterations))) {
107 struct dma_async_tx_descriptor *tx = NULL;
108 struct dmaengine_unmap_data *um;
109 dma_addr_t *dsts;
110 diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
111 index 360c87ba4595..be61ae1430ed 100644
112 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
113 +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
114 @@ -2698,7 +2698,8 @@ fill_plane_dcc_attributes(struct amdgpu_device *adev,
115 const union dc_tiling_info *tiling_info,
116 const uint64_t info,
117 struct dc_plane_dcc_param *dcc,
118 - struct dc_plane_address *address)
119 + struct dc_plane_address *address,
120 + bool force_disable_dcc)
121 {
122 struct dc *dc = adev->dm.dc;
123 struct dc_dcc_surface_param input;
124 @@ -2710,6 +2711,9 @@ fill_plane_dcc_attributes(struct amdgpu_device *adev,
125 memset(&input, 0, sizeof(input));
126 memset(&output, 0, sizeof(output));
127
128 + if (force_disable_dcc)
129 + return 0;
130 +
131 if (!offset)
132 return 0;
133
134 @@ -2759,7 +2763,8 @@ fill_plane_buffer_attributes(struct amdgpu_device *adev,
135 union dc_tiling_info *tiling_info,
136 struct plane_size *plane_size,
137 struct dc_plane_dcc_param *dcc,
138 - struct dc_plane_address *address)
139 + struct dc_plane_address *address,
140 + bool force_disable_dcc)
141 {
142 const struct drm_framebuffer *fb = &afb->base;
143 int ret;
144 @@ -2869,7 +2874,8 @@ fill_plane_buffer_attributes(struct amdgpu_device *adev,
145
146 ret = fill_plane_dcc_attributes(adev, afb, format, rotation,
147 plane_size, tiling_info,
148 - tiling_flags, dcc, address);
149 + tiling_flags, dcc, address,
150 + force_disable_dcc);
151 if (ret)
152 return ret;
153 }
154 @@ -2961,7 +2967,8 @@ fill_dc_plane_info_and_addr(struct amdgpu_device *adev,
155 const struct drm_plane_state *plane_state,
156 const uint64_t tiling_flags,
157 struct dc_plane_info *plane_info,
158 - struct dc_plane_address *address)
159 + struct dc_plane_address *address,
160 + bool force_disable_dcc)
161 {
162 const struct drm_framebuffer *fb = plane_state->fb;
163 const struct amdgpu_framebuffer *afb =
164 @@ -3040,7 +3047,8 @@ fill_dc_plane_info_and_addr(struct amdgpu_device *adev,
165 plane_info->rotation, tiling_flags,
166 &plane_info->tiling_info,
167 &plane_info->plane_size,
168 - &plane_info->dcc, address);
169 + &plane_info->dcc, address,
170 + force_disable_dcc);
171 if (ret)
172 return ret;
173
174 @@ -3063,6 +3071,7 @@ static int fill_dc_plane_attributes(struct amdgpu_device *adev,
175 struct dc_plane_info plane_info;
176 uint64_t tiling_flags;
177 int ret;
178 + bool force_disable_dcc = false;
179
180 ret = fill_dc_scaling_info(plane_state, &scaling_info);
181 if (ret)
182 @@ -3077,9 +3086,11 @@ static int fill_dc_plane_attributes(struct amdgpu_device *adev,
183 if (ret)
184 return ret;
185
186 + force_disable_dcc = adev->asic_type == CHIP_RAVEN && adev->in_suspend;
187 ret = fill_dc_plane_info_and_addr(adev, plane_state, tiling_flags,
188 &plane_info,
189 - &dc_plane_state->address);
190 + &dc_plane_state->address,
191 + force_disable_dcc);
192 if (ret)
193 return ret;
194
195 @@ -4481,6 +4492,7 @@ static int dm_plane_helper_prepare_fb(struct drm_plane *plane,
196 uint64_t tiling_flags;
197 uint32_t domain;
198 int r;
199 + bool force_disable_dcc = false;
200
201 dm_plane_state_old = to_dm_plane_state(plane->state);
202 dm_plane_state_new = to_dm_plane_state(new_state);
203 @@ -4539,11 +4551,13 @@ static int dm_plane_helper_prepare_fb(struct drm_plane *plane,
204 dm_plane_state_old->dc_state != dm_plane_state_new->dc_state) {
205 struct dc_plane_state *plane_state = dm_plane_state_new->dc_state;
206
207 + force_disable_dcc = adev->asic_type == CHIP_RAVEN && adev->in_suspend;
208 fill_plane_buffer_attributes(
209 adev, afb, plane_state->format, plane_state->rotation,
210 tiling_flags, &plane_state->tiling_info,
211 &plane_state->plane_size, &plane_state->dcc,
212 - &plane_state->address);
213 + &plane_state->address,
214 + force_disable_dcc);
215 }
216
217 return 0;
218 @@ -5767,7 +5781,12 @@ static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
219 fill_dc_plane_info_and_addr(
220 dm->adev, new_plane_state, tiling_flags,
221 &bundle->plane_infos[planes_count],
222 - &bundle->flip_addrs[planes_count].address);
223 + &bundle->flip_addrs[planes_count].address,
224 + false);
225 +
226 + DRM_DEBUG_DRIVER("plane: id=%d dcc_en=%d\n",
227 + new_plane_state->plane->index,
228 + bundle->plane_infos[planes_count].dcc.enable);
229
230 bundle->surface_updates[planes_count].plane_info =
231 &bundle->plane_infos[planes_count];
232 @@ -7138,7 +7157,8 @@ dm_determine_update_type_for_commit(struct amdgpu_display_manager *dm,
233 ret = fill_dc_plane_info_and_addr(
234 dm->adev, new_plane_state, tiling_flags,
235 &plane_info,
236 - &flip_addr.address);
237 + &flip_addr.address,
238 + false);
239 if (ret)
240 goto cleanup;
241
242 diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c
243 index 3f50b8865db4..ea2849338d6c 100644
244 --- a/drivers/gpu/drm/drm_edid.c
245 +++ b/drivers/gpu/drm/drm_edid.c
246 @@ -4743,7 +4743,7 @@ static struct drm_display_mode *drm_mode_displayid_detailed(struct drm_device *d
247 struct drm_display_mode *mode;
248 unsigned pixel_clock = (timings->pixel_clock[0] |
249 (timings->pixel_clock[1] << 8) |
250 - (timings->pixel_clock[2] << 16));
251 + (timings->pixel_clock[2] << 16)) + 1;
252 unsigned hactive = (timings->hactive[0] | timings->hactive[1] << 8) + 1;
253 unsigned hblank = (timings->hblank[0] | timings->hblank[1] << 8) + 1;
254 unsigned hsync = (timings->hsync[0] | (timings->hsync[1] & 0x7f) << 8) + 1;
255 diff --git a/drivers/gpu/drm/qxl/qxl_cmd.c b/drivers/gpu/drm/qxl/qxl_cmd.c
256 index ef09dc6bc635..d082c194cccc 100644
257 --- a/drivers/gpu/drm/qxl/qxl_cmd.c
258 +++ b/drivers/gpu/drm/qxl/qxl_cmd.c
259 @@ -480,9 +480,10 @@ int qxl_hw_surface_alloc(struct qxl_device *qdev,
260 return ret;
261
262 ret = qxl_release_reserve_list(release, true);
263 - if (ret)
264 + if (ret) {
265 + qxl_release_free(qdev, release);
266 return ret;
267 -
268 + }
269 cmd = (struct qxl_surface_cmd *)qxl_release_map(qdev, release);
270 cmd->type = QXL_SURFACE_CMD_CREATE;
271 cmd->flags = QXL_SURF_FLAG_KEEP_DATA;
272 @@ -499,8 +500,8 @@ int qxl_hw_surface_alloc(struct qxl_device *qdev,
273 /* no need to add a release to the fence for this surface bo,
274 since it is only released when we ask to destroy the surface
275 and it would never signal otherwise */
276 - qxl_push_command_ring_release(qdev, release, QXL_CMD_SURFACE, false);
277 qxl_release_fence_buffer_objects(release);
278 + qxl_push_command_ring_release(qdev, release, QXL_CMD_SURFACE, false);
279
280 surf->hw_surf_alloc = true;
281 spin_lock(&qdev->surf_id_idr_lock);
282 @@ -542,9 +543,8 @@ int qxl_hw_surface_dealloc(struct qxl_device *qdev,
283 cmd->surface_id = id;
284 qxl_release_unmap(qdev, release, &cmd->release_info);
285
286 - qxl_push_command_ring_release(qdev, release, QXL_CMD_SURFACE, false);
287 -
288 qxl_release_fence_buffer_objects(release);
289 + qxl_push_command_ring_release(qdev, release, QXL_CMD_SURFACE, false);
290
291 return 0;
292 }
293 diff --git a/drivers/gpu/drm/qxl/qxl_display.c b/drivers/gpu/drm/qxl/qxl_display.c
294 index 16d73b22f3f5..92d84280096e 100644
295 --- a/drivers/gpu/drm/qxl/qxl_display.c
296 +++ b/drivers/gpu/drm/qxl/qxl_display.c
297 @@ -523,8 +523,8 @@ static int qxl_primary_apply_cursor(struct drm_plane *plane)
298 cmd->u.set.visible = 1;
299 qxl_release_unmap(qdev, release, &cmd->release_info);
300
301 - qxl_push_cursor_ring_release(qdev, release, QXL_CMD_CURSOR, false);
302 qxl_release_fence_buffer_objects(release);
303 + qxl_push_cursor_ring_release(qdev, release, QXL_CMD_CURSOR, false);
304
305 return ret;
306
307 @@ -665,8 +665,8 @@ static void qxl_cursor_atomic_update(struct drm_plane *plane,
308 cmd->u.position.y = plane->state->crtc_y + fb->hot_y;
309
310 qxl_release_unmap(qdev, release, &cmd->release_info);
311 - qxl_push_cursor_ring_release(qdev, release, QXL_CMD_CURSOR, false);
312 qxl_release_fence_buffer_objects(release);
313 + qxl_push_cursor_ring_release(qdev, release, QXL_CMD_CURSOR, false);
314
315 if (old_cursor_bo != NULL)
316 qxl_bo_unpin(old_cursor_bo);
317 @@ -713,8 +713,8 @@ static void qxl_cursor_atomic_disable(struct drm_plane *plane,
318 cmd->type = QXL_CURSOR_HIDE;
319 qxl_release_unmap(qdev, release, &cmd->release_info);
320
321 - qxl_push_cursor_ring_release(qdev, release, QXL_CMD_CURSOR, false);
322 qxl_release_fence_buffer_objects(release);
323 + qxl_push_cursor_ring_release(qdev, release, QXL_CMD_CURSOR, false);
324 }
325
326 static void qxl_update_dumb_head(struct qxl_device *qdev,
327 diff --git a/drivers/gpu/drm/qxl/qxl_draw.c b/drivers/gpu/drm/qxl/qxl_draw.c
328 index 5bebf1ea1c5d..3599db096973 100644
329 --- a/drivers/gpu/drm/qxl/qxl_draw.c
330 +++ b/drivers/gpu/drm/qxl/qxl_draw.c
331 @@ -209,9 +209,10 @@ void qxl_draw_dirty_fb(struct qxl_device *qdev,
332 goto out_release_backoff;
333
334 rects = drawable_set_clipping(qdev, num_clips, clips_bo);
335 - if (!rects)
336 + if (!rects) {
337 + ret = -EINVAL;
338 goto out_release_backoff;
339 -
340 + }
341 drawable = (struct qxl_drawable *)qxl_release_map(qdev, release);
342
343 drawable->clip.type = SPICE_CLIP_TYPE_RECTS;
344 @@ -242,8 +243,8 @@ void qxl_draw_dirty_fb(struct qxl_device *qdev,
345 }
346 qxl_bo_kunmap(clips_bo);
347
348 - qxl_push_command_ring_release(qdev, release, QXL_CMD_DRAW, false);
349 qxl_release_fence_buffer_objects(release);
350 + qxl_push_command_ring_release(qdev, release, QXL_CMD_DRAW, false);
351
352 out_release_backoff:
353 if (ret)
354 diff --git a/drivers/gpu/drm/qxl/qxl_ioctl.c b/drivers/gpu/drm/qxl/qxl_ioctl.c
355 index 8117a45b3610..72f3f1bbb40c 100644
356 --- a/drivers/gpu/drm/qxl/qxl_ioctl.c
357 +++ b/drivers/gpu/drm/qxl/qxl_ioctl.c
358 @@ -261,11 +261,8 @@ static int qxl_process_single_command(struct qxl_device *qdev,
359 apply_surf_reloc(qdev, &reloc_info[i]);
360 }
361
362 + qxl_release_fence_buffer_objects(release);
363 ret = qxl_push_command_ring_release(qdev, release, cmd->type, true);
364 - if (ret)
365 - qxl_release_backoff_reserve_list(release);
366 - else
367 - qxl_release_fence_buffer_objects(release);
368
369 out_free_bos:
370 out_free_release:
371 diff --git a/drivers/hv/vmbus_drv.c b/drivers/hv/vmbus_drv.c
372 index 40f6b73dae94..9cdd434bb340 100644
373 --- a/drivers/hv/vmbus_drv.c
374 +++ b/drivers/hv/vmbus_drv.c
375 @@ -978,6 +978,9 @@ static int vmbus_resume(struct device *child_device)
376
377 return drv->resume(dev);
378 }
379 +#else
380 +#define vmbus_suspend NULL
381 +#define vmbus_resume NULL
382 #endif /* CONFIG_PM_SLEEP */
383
384 /*
385 @@ -995,11 +998,22 @@ static void vmbus_device_release(struct device *device)
386 }
387
388 /*
389 - * Note: we must use SET_NOIRQ_SYSTEM_SLEEP_PM_OPS rather than
390 - * SET_SYSTEM_SLEEP_PM_OPS: see the comment before vmbus_bus_pm.
391 + * Note: we must use the "noirq" ops: see the comment before vmbus_bus_pm.
392 + *
393 + * suspend_noirq/resume_noirq are set to NULL to support Suspend-to-Idle: we
394 + * shouldn't suspend the vmbus devices upon Suspend-to-Idle, otherwise there
395 + * is no way to wake up a Generation-2 VM.
396 + *
397 + * The other 4 ops are for hibernation.
398 */
399 +
400 static const struct dev_pm_ops vmbus_pm = {
401 - SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(vmbus_suspend, vmbus_resume)
402 + .suspend_noirq = NULL,
403 + .resume_noirq = NULL,
404 + .freeze_noirq = vmbus_suspend,
405 + .thaw_noirq = vmbus_resume,
406 + .poweroff_noirq = vmbus_suspend,
407 + .restore_noirq = vmbus_resume,
408 };
409
410 /* The one and only one */
411 @@ -2280,6 +2294,9 @@ static int vmbus_bus_resume(struct device *dev)
412
413 return 0;
414 }
415 +#else
416 +#define vmbus_bus_suspend NULL
417 +#define vmbus_bus_resume NULL
418 #endif /* CONFIG_PM_SLEEP */
419
420 static const struct acpi_device_id vmbus_acpi_device_ids[] = {
421 @@ -2290,16 +2307,24 @@ static const struct acpi_device_id vmbus_acpi_device_ids[] = {
422 MODULE_DEVICE_TABLE(acpi, vmbus_acpi_device_ids);
423
424 /*
425 - * Note: we must use SET_NOIRQ_SYSTEM_SLEEP_PM_OPS rather than
426 - * SET_SYSTEM_SLEEP_PM_OPS, otherwise NIC SR-IOV can not work, because the
427 - * "pci_dev_pm_ops" uses the "noirq" callbacks: in the resume path, the
428 - * pci "noirq" restore callback runs before "non-noirq" callbacks (see
429 + * Note: we must use the "no_irq" ops, otherwise hibernation can not work with
430 + * PCI device assignment, because "pci_dev_pm_ops" uses the "noirq" ops: in
431 + * the resume path, the pci "noirq" restore op runs before "non-noirq" op (see
432 * resume_target_kernel() -> dpm_resume_start(), and hibernation_restore() ->
433 * dpm_resume_end()). This means vmbus_bus_resume() and the pci-hyperv's
434 - * resume callback must also run via the "noirq" callbacks.
435 + * resume callback must also run via the "noirq" ops.
436 + *
437 + * Set suspend_noirq/resume_noirq to NULL for Suspend-to-Idle: see the comment
438 + * earlier in this file before vmbus_pm.
439 */
440 +
441 static const struct dev_pm_ops vmbus_bus_pm = {
442 - SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(vmbus_bus_suspend, vmbus_bus_resume)
443 + .suspend_noirq = NULL,
444 + .resume_noirq = NULL,
445 + .freeze_noirq = vmbus_bus_suspend,
446 + .thaw_noirq = vmbus_bus_resume,
447 + .poweroff_noirq = vmbus_bus_suspend,
448 + .restore_noirq = vmbus_bus_resume
449 };
450
451 static struct acpi_driver vmbus_acpi_driver = {
452 diff --git a/drivers/i2c/busses/i2c-amd-mp2-pci.c b/drivers/i2c/busses/i2c-amd-mp2-pci.c
453 index 5e4800d72e00..cd3fd5ee5f65 100644
454 --- a/drivers/i2c/busses/i2c-amd-mp2-pci.c
455 +++ b/drivers/i2c/busses/i2c-amd-mp2-pci.c
456 @@ -349,12 +349,12 @@ static int amd_mp2_pci_probe(struct pci_dev *pci_dev,
457 if (!privdata)
458 return -ENOMEM;
459
460 + privdata->pci_dev = pci_dev;
461 rc = amd_mp2_pci_init(privdata, pci_dev);
462 if (rc)
463 return rc;
464
465 mutex_init(&privdata->c2p_lock);
466 - privdata->pci_dev = pci_dev;
467
468 pm_runtime_set_autosuspend_delay(&pci_dev->dev, 1000);
469 pm_runtime_use_autosuspend(&pci_dev->dev);
470 diff --git a/drivers/i2c/busses/i2c-aspeed.c b/drivers/i2c/busses/i2c-aspeed.c
471 index 7b098ff5f5dd..dad6e432de89 100644
472 --- a/drivers/i2c/busses/i2c-aspeed.c
473 +++ b/drivers/i2c/busses/i2c-aspeed.c
474 @@ -603,6 +603,7 @@ static irqreturn_t aspeed_i2c_bus_irq(int irq, void *dev_id)
475 /* Ack all interrupts except for Rx done */
476 writel(irq_received & ~ASPEED_I2CD_INTR_RX_DONE,
477 bus->base + ASPEED_I2C_INTR_STS_REG);
478 + readl(bus->base + ASPEED_I2C_INTR_STS_REG);
479 irq_remaining = irq_received;
480
481 #if IS_ENABLED(CONFIG_I2C_SLAVE)
482 @@ -645,9 +646,11 @@ static irqreturn_t aspeed_i2c_bus_irq(int irq, void *dev_id)
483 irq_received, irq_handled);
484
485 /* Ack Rx done */
486 - if (irq_received & ASPEED_I2CD_INTR_RX_DONE)
487 + if (irq_received & ASPEED_I2CD_INTR_RX_DONE) {
488 writel(ASPEED_I2CD_INTR_RX_DONE,
489 bus->base + ASPEED_I2C_INTR_STS_REG);
490 + readl(bus->base + ASPEED_I2C_INTR_STS_REG);
491 + }
492 spin_unlock(&bus->lock);
493 return irq_remaining ? IRQ_NONE : IRQ_HANDLED;
494 }
495 diff --git a/drivers/i2c/busses/i2c-bcm-iproc.c b/drivers/i2c/busses/i2c-bcm-iproc.c
496 index 9ffdffaf6141..03475f179973 100644
497 --- a/drivers/i2c/busses/i2c-bcm-iproc.c
498 +++ b/drivers/i2c/busses/i2c-bcm-iproc.c
499 @@ -359,6 +359,9 @@ static bool bcm_iproc_i2c_slave_isr(struct bcm_iproc_i2c_dev *iproc_i2c,
500 value = (u8)((val >> S_RX_DATA_SHIFT) & S_RX_DATA_MASK);
501 i2c_slave_event(iproc_i2c->slave,
502 I2C_SLAVE_WRITE_RECEIVED, &value);
503 + if (rx_status == I2C_SLAVE_RX_END)
504 + i2c_slave_event(iproc_i2c->slave,
505 + I2C_SLAVE_STOP, &value);
506 }
507 } else if (status & BIT(IS_S_TX_UNDERRUN_SHIFT)) {
508 /* Master read other than start */
509 diff --git a/drivers/infiniband/core/cm.c b/drivers/infiniband/core/cm.c
510 index 319e4b4ae639..09af96ec41dd 100644
511 --- a/drivers/infiniband/core/cm.c
512 +++ b/drivers/infiniband/core/cm.c
513 @@ -597,18 +597,6 @@ static int cm_init_av_by_path(struct sa_path_rec *path,
514 return 0;
515 }
516
517 -static int cm_alloc_id(struct cm_id_private *cm_id_priv)
518 -{
519 - int err;
520 - u32 id;
521 -
522 - err = xa_alloc_cyclic_irq(&cm.local_id_table, &id, cm_id_priv,
523 - xa_limit_32b, &cm.local_id_next, GFP_KERNEL);
524 -
525 - cm_id_priv->id.local_id = (__force __be32)id ^ cm.random_id_operand;
526 - return err;
527 -}
528 -
529 static u32 cm_local_id(__be32 local_id)
530 {
531 return (__force u32) (local_id ^ cm.random_id_operand);
532 @@ -862,6 +850,7 @@ struct ib_cm_id *ib_create_cm_id(struct ib_device *device,
533 void *context)
534 {
535 struct cm_id_private *cm_id_priv;
536 + u32 id;
537 int ret;
538
539 cm_id_priv = kzalloc(sizeof *cm_id_priv, GFP_KERNEL);
540 @@ -873,9 +862,6 @@ struct ib_cm_id *ib_create_cm_id(struct ib_device *device,
541 cm_id_priv->id.cm_handler = cm_handler;
542 cm_id_priv->id.context = context;
543 cm_id_priv->id.remote_cm_qpn = 1;
544 - ret = cm_alloc_id(cm_id_priv);
545 - if (ret)
546 - goto error;
547
548 spin_lock_init(&cm_id_priv->lock);
549 init_completion(&cm_id_priv->comp);
550 @@ -884,11 +870,20 @@ struct ib_cm_id *ib_create_cm_id(struct ib_device *device,
551 INIT_LIST_HEAD(&cm_id_priv->altr_list);
552 atomic_set(&cm_id_priv->work_count, -1);
553 atomic_set(&cm_id_priv->refcount, 1);
554 +
555 + ret = xa_alloc_cyclic_irq(&cm.local_id_table, &id, NULL, xa_limit_32b,
556 + &cm.local_id_next, GFP_KERNEL);
557 + if (ret < 0)
558 + goto error;
559 + cm_id_priv->id.local_id = (__force __be32)id ^ cm.random_id_operand;
560 + xa_store_irq(&cm.local_id_table, cm_local_id(cm_id_priv->id.local_id),
561 + cm_id_priv, GFP_KERNEL);
562 +
563 return &cm_id_priv->id;
564
565 error:
566 kfree(cm_id_priv);
567 - return ERR_PTR(-ENOMEM);
568 + return ERR_PTR(ret);
569 }
570 EXPORT_SYMBOL(ib_create_cm_id);
571
572 diff --git a/drivers/infiniband/core/rdma_core.c b/drivers/infiniband/core/rdma_core.c
573 index ccf4d069c25c..1c95fefa1f06 100644
574 --- a/drivers/infiniband/core/rdma_core.c
575 +++ b/drivers/infiniband/core/rdma_core.c
576 @@ -362,7 +362,7 @@ lookup_get_fd_uobject(const struct uverbs_api_object *obj,
577 * and the caller is expected to ensure that uverbs_close_fd is never
578 * done while a call top lookup is possible.
579 */
580 - if (f->f_op != fd_type->fops) {
581 + if (f->f_op != fd_type->fops || uobject->ufile != ufile) {
582 fput(f);
583 return ERR_PTR(-EBADF);
584 }
585 @@ -689,7 +689,6 @@ void rdma_lookup_put_uobject(struct ib_uobject *uobj,
586 enum rdma_lookup_mode mode)
587 {
588 assert_uverbs_usecnt(uobj, mode);
589 - uobj->uapi_object->type_class->lookup_put(uobj, mode);
590 /*
591 * In order to unlock an object, either decrease its usecnt for
592 * read access or zero it in case of exclusive access. See
593 @@ -706,6 +705,7 @@ void rdma_lookup_put_uobject(struct ib_uobject *uobj,
594 break;
595 }
596
597 + uobj->uapi_object->type_class->lookup_put(uobj, mode);
598 /* Pairs with the kref obtained by type->lookup_get */
599 uverbs_uobject_put(uobj);
600 }
601 diff --git a/drivers/infiniband/hw/mlx4/main.c b/drivers/infiniband/hw/mlx4/main.c
602 index 369a203332a2..61a1b0bdede0 100644
603 --- a/drivers/infiniband/hw/mlx4/main.c
604 +++ b/drivers/infiniband/hw/mlx4/main.c
605 @@ -1492,8 +1492,9 @@ static int __mlx4_ib_create_default_rules(
606 int i;
607
608 for (i = 0; i < ARRAY_SIZE(pdefault_rules->rules_create_list); i++) {
609 + union ib_flow_spec ib_spec = {};
610 int ret;
611 - union ib_flow_spec ib_spec;
612 +
613 switch (pdefault_rules->rules_create_list[i]) {
614 case 0:
615 /* no rule */
616 diff --git a/drivers/infiniband/hw/mlx5/qp.c b/drivers/infiniband/hw/mlx5/qp.c
617 index 881decb1309a..96edc5c30204 100644
618 --- a/drivers/infiniband/hw/mlx5/qp.c
619 +++ b/drivers/infiniband/hw/mlx5/qp.c
620 @@ -5496,7 +5496,9 @@ static void to_rdma_ah_attr(struct mlx5_ib_dev *ibdev,
621 rdma_ah_set_path_bits(ah_attr, path->grh_mlid & 0x7f);
622 rdma_ah_set_static_rate(ah_attr,
623 path->static_rate ? path->static_rate - 5 : 0);
624 - if (path->grh_mlid & (1 << 7)) {
625 +
626 + if (path->grh_mlid & (1 << 7) ||
627 + ah_attr->type == RDMA_AH_ATTR_TYPE_ROCE) {
628 u32 tc_fl = be32_to_cpu(path->tclass_flowlabel);
629
630 rdma_ah_set_grh(ah_attr, NULL,
631 diff --git a/drivers/infiniband/sw/rdmavt/cq.c b/drivers/infiniband/sw/rdmavt/cq.c
632 index 0fee3c87776b..bd729aa1d510 100644
633 --- a/drivers/infiniband/sw/rdmavt/cq.c
634 +++ b/drivers/infiniband/sw/rdmavt/cq.c
635 @@ -248,8 +248,8 @@ int rvt_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr,
636 */
637 if (udata && udata->outlen >= sizeof(__u64)) {
638 cq->ip = rvt_create_mmap_info(rdi, sz, udata, u_wc);
639 - if (!cq->ip) {
640 - err = -ENOMEM;
641 + if (IS_ERR(cq->ip)) {
642 + err = PTR_ERR(cq->ip);
643 goto bail_wc;
644 }
645
646 diff --git a/drivers/infiniband/sw/rdmavt/mmap.c b/drivers/infiniband/sw/rdmavt/mmap.c
647 index 652f4a7efc1b..37853aa3bcf7 100644
648 --- a/drivers/infiniband/sw/rdmavt/mmap.c
649 +++ b/drivers/infiniband/sw/rdmavt/mmap.c
650 @@ -154,7 +154,7 @@ done:
651 * @udata: user data (must be valid!)
652 * @obj: opaque pointer to a cq, wq etc
653 *
654 - * Return: rvt_mmap struct on success
655 + * Return: rvt_mmap struct on success, ERR_PTR on failure
656 */
657 struct rvt_mmap_info *rvt_create_mmap_info(struct rvt_dev_info *rdi, u32 size,
658 struct ib_udata *udata, void *obj)
659 @@ -166,7 +166,7 @@ struct rvt_mmap_info *rvt_create_mmap_info(struct rvt_dev_info *rdi, u32 size,
660
661 ip = kmalloc_node(sizeof(*ip), GFP_KERNEL, rdi->dparms.node);
662 if (!ip)
663 - return ip;
664 + return ERR_PTR(-ENOMEM);
665
666 size = PAGE_ALIGN(size);
667
668 diff --git a/drivers/infiniband/sw/rdmavt/qp.c b/drivers/infiniband/sw/rdmavt/qp.c
669 index 799254a049ba..d35465389357 100644
670 --- a/drivers/infiniband/sw/rdmavt/qp.c
671 +++ b/drivers/infiniband/sw/rdmavt/qp.c
672 @@ -1244,8 +1244,8 @@ struct ib_qp *rvt_create_qp(struct ib_pd *ibpd,
673
674 qp->ip = rvt_create_mmap_info(rdi, s, udata,
675 qp->r_rq.wq);
676 - if (!qp->ip) {
677 - ret = ERR_PTR(-ENOMEM);
678 + if (IS_ERR(qp->ip)) {
679 + ret = ERR_CAST(qp->ip);
680 goto bail_qpn;
681 }
682
683 diff --git a/drivers/infiniband/sw/rdmavt/srq.c b/drivers/infiniband/sw/rdmavt/srq.c
684 index 24fef021d51d..f547c115af03 100644
685 --- a/drivers/infiniband/sw/rdmavt/srq.c
686 +++ b/drivers/infiniband/sw/rdmavt/srq.c
687 @@ -111,8 +111,8 @@ int rvt_create_srq(struct ib_srq *ibsrq, struct ib_srq_init_attr *srq_init_attr,
688 u32 s = sizeof(struct rvt_rwq) + srq->rq.size * sz;
689
690 srq->ip = rvt_create_mmap_info(dev, s, udata, srq->rq.wq);
691 - if (!srq->ip) {
692 - ret = -ENOMEM;
693 + if (IS_ERR(srq->ip)) {
694 + ret = PTR_ERR(srq->ip);
695 goto bail_wq;
696 }
697
698 diff --git a/drivers/infiniband/sw/siw/siw_qp_tx.c b/drivers/infiniband/sw/siw/siw_qp_tx.c
699 index 5d97bba0ce6d..e7cd04eda04a 100644
700 --- a/drivers/infiniband/sw/siw/siw_qp_tx.c
701 +++ b/drivers/infiniband/sw/siw/siw_qp_tx.c
702 @@ -920,20 +920,27 @@ static int siw_fastreg_mr(struct ib_pd *pd, struct siw_sqe *sqe)
703 {
704 struct ib_mr *base_mr = (struct ib_mr *)(uintptr_t)sqe->base_mr;
705 struct siw_device *sdev = to_siw_dev(pd->device);
706 - struct siw_mem *mem = siw_mem_id2obj(sdev, sqe->rkey >> 8);
707 + struct siw_mem *mem;
708 int rv = 0;
709
710 siw_dbg_pd(pd, "STag 0x%08x\n", sqe->rkey);
711
712 - if (unlikely(!mem || !base_mr)) {
713 + if (unlikely(!base_mr)) {
714 pr_warn("siw: fastreg: STag 0x%08x unknown\n", sqe->rkey);
715 return -EINVAL;
716 }
717 +
718 if (unlikely(base_mr->rkey >> 8 != sqe->rkey >> 8)) {
719 pr_warn("siw: fastreg: STag 0x%08x: bad MR\n", sqe->rkey);
720 - rv = -EINVAL;
721 - goto out;
722 + return -EINVAL;
723 }
724 +
725 + mem = siw_mem_id2obj(sdev, sqe->rkey >> 8);
726 + if (unlikely(!mem)) {
727 + pr_warn("siw: fastreg: STag 0x%08x unknown\n", sqe->rkey);
728 + return -EINVAL;
729 + }
730 +
731 if (unlikely(mem->pd != pd)) {
732 pr_warn("siw: fastreg: PD mismatch\n");
733 rv = -EINVAL;
734 diff --git a/drivers/iommu/amd_iommu_init.c b/drivers/iommu/amd_iommu_init.c
735 index b5ae9f7c0510..ef14b00fa94b 100644
736 --- a/drivers/iommu/amd_iommu_init.c
737 +++ b/drivers/iommu/amd_iommu_init.c
738 @@ -2946,7 +2946,7 @@ static int __init parse_amd_iommu_intr(char *str)
739 {
740 for (; *str; ++str) {
741 if (strncmp(str, "legacy", 6) == 0) {
742 - amd_iommu_guest_ir = AMD_IOMMU_GUEST_IR_LEGACY;
743 + amd_iommu_guest_ir = AMD_IOMMU_GUEST_IR_LEGACY_GA;
744 break;
745 }
746 if (strncmp(str, "vapic", 5) == 0) {
747 diff --git a/drivers/iommu/qcom_iommu.c b/drivers/iommu/qcom_iommu.c
748 index e0b3fa2bb7ab..280de92b332e 100644
749 --- a/drivers/iommu/qcom_iommu.c
750 +++ b/drivers/iommu/qcom_iommu.c
751 @@ -814,8 +814,11 @@ static int qcom_iommu_device_probe(struct platform_device *pdev)
752 qcom_iommu->dev = dev;
753
754 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
755 - if (res)
756 + if (res) {
757 qcom_iommu->local_base = devm_ioremap_resource(dev, res);
758 + if (IS_ERR(qcom_iommu->local_base))
759 + return PTR_ERR(qcom_iommu->local_base);
760 + }
761
762 qcom_iommu->iface_clk = devm_clk_get(dev, "iface");
763 if (IS_ERR(qcom_iommu->iface_clk)) {
764 diff --git a/drivers/md/dm-mpath.c b/drivers/md/dm-mpath.c
765 index e0c32793c248..456d790c918c 100644
766 --- a/drivers/md/dm-mpath.c
767 +++ b/drivers/md/dm-mpath.c
768 @@ -576,10 +576,12 @@ static struct pgpath *__map_bio(struct multipath *m, struct bio *bio)
769
770 /* Do we need to select a new pgpath? */
771 pgpath = READ_ONCE(m->current_pgpath);
772 - queue_io = test_bit(MPATHF_QUEUE_IO, &m->flags);
773 - if (!pgpath || !queue_io)
774 + if (!pgpath || !test_bit(MPATHF_QUEUE_IO, &m->flags))
775 pgpath = choose_pgpath(m, bio->bi_iter.bi_size);
776
777 + /* MPATHF_QUEUE_IO might have been cleared by choose_pgpath. */
778 + queue_io = test_bit(MPATHF_QUEUE_IO, &m->flags);
779 +
780 if ((pgpath && queue_io) ||
781 (!pgpath && test_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags))) {
782 /* Queue for the daemon to resubmit */
783 diff --git a/drivers/md/dm-verity-fec.c b/drivers/md/dm-verity-fec.c
784 index 49147e634046..fb41b4f23c48 100644
785 --- a/drivers/md/dm-verity-fec.c
786 +++ b/drivers/md/dm-verity-fec.c
787 @@ -435,7 +435,7 @@ int verity_fec_decode(struct dm_verity *v, struct dm_verity_io *io,
788 fio->level++;
789
790 if (type == DM_VERITY_BLOCK_TYPE_METADATA)
791 - block += v->data_blocks;
792 + block = block - v->hash_start + v->data_blocks;
793
794 /*
795 * For RS(M, N), the continuous FEC data is divided into blocks of N
796 diff --git a/drivers/md/dm-writecache.c b/drivers/md/dm-writecache.c
797 index 5bf60d6be96a..0d6ca723257f 100644
798 --- a/drivers/md/dm-writecache.c
799 +++ b/drivers/md/dm-writecache.c
800 @@ -878,6 +878,24 @@ static int writecache_alloc_entries(struct dm_writecache *wc)
801 return 0;
802 }
803
804 +static int writecache_read_metadata(struct dm_writecache *wc, sector_t n_sectors)
805 +{
806 + struct dm_io_region region;
807 + struct dm_io_request req;
808 +
809 + region.bdev = wc->ssd_dev->bdev;
810 + region.sector = wc->start_sector;
811 + region.count = n_sectors;
812 + req.bi_op = REQ_OP_READ;
813 + req.bi_op_flags = REQ_SYNC;
814 + req.mem.type = DM_IO_VMA;
815 + req.mem.ptr.vma = (char *)wc->memory_map;
816 + req.client = wc->dm_io;
817 + req.notify.fn = NULL;
818 +
819 + return dm_io(&req, 1, &region, NULL);
820 +}
821 +
822 static void writecache_resume(struct dm_target *ti)
823 {
824 struct dm_writecache *wc = ti->private;
825 @@ -888,8 +906,18 @@ static void writecache_resume(struct dm_target *ti)
826
827 wc_lock(wc);
828
829 - if (WC_MODE_PMEM(wc))
830 + if (WC_MODE_PMEM(wc)) {
831 persistent_memory_invalidate_cache(wc->memory_map, wc->memory_map_size);
832 + } else {
833 + r = writecache_read_metadata(wc, wc->metadata_sectors);
834 + if (r) {
835 + size_t sb_entries_offset;
836 + writecache_error(wc, r, "unable to read metadata: %d", r);
837 + sb_entries_offset = offsetof(struct wc_memory_superblock, entries);
838 + memset((char *)wc->memory_map + sb_entries_offset, -1,
839 + (wc->metadata_sectors << SECTOR_SHIFT) - sb_entries_offset);
840 + }
841 + }
842
843 wc->tree = RB_ROOT;
844 INIT_LIST_HEAD(&wc->lru);
845 @@ -1984,6 +2012,12 @@ static int writecache_ctr(struct dm_target *ti, unsigned argc, char **argv)
846 ti->error = "Invalid block size";
847 goto bad;
848 }
849 + if (wc->block_size < bdev_logical_block_size(wc->dev->bdev) ||
850 + wc->block_size < bdev_logical_block_size(wc->ssd_dev->bdev)) {
851 + r = -EINVAL;
852 + ti->error = "Block size is smaller than device logical block size";
853 + goto bad;
854 + }
855 wc->block_size_bits = __ffs(wc->block_size);
856
857 wc->max_writeback_jobs = MAX_WRITEBACK_JOBS;
858 @@ -2072,8 +2106,6 @@ invalid_optional:
859 goto bad;
860 }
861 } else {
862 - struct dm_io_region region;
863 - struct dm_io_request req;
864 size_t n_blocks, n_metadata_blocks;
865 uint64_t n_bitmap_bits;
866
867 @@ -2130,19 +2162,9 @@ invalid_optional:
868 goto bad;
869 }
870
871 - region.bdev = wc->ssd_dev->bdev;
872 - region.sector = wc->start_sector;
873 - region.count = wc->metadata_sectors;
874 - req.bi_op = REQ_OP_READ;
875 - req.bi_op_flags = REQ_SYNC;
876 - req.mem.type = DM_IO_VMA;
877 - req.mem.ptr.vma = (char *)wc->memory_map;
878 - req.client = wc->dm_io;
879 - req.notify.fn = NULL;
880 -
881 - r = dm_io(&req, 1, &region, NULL);
882 + r = writecache_read_metadata(wc, wc->block_size >> SECTOR_SHIFT);
883 if (r) {
884 - ti->error = "Unable to read metadata";
885 + ti->error = "Unable to read first block of metadata";
886 goto bad;
887 }
888 }
889 diff --git a/drivers/mmc/host/cqhci.c b/drivers/mmc/host/cqhci.c
890 index 5047f7343ffc..c19f4c3f115a 100644
891 --- a/drivers/mmc/host/cqhci.c
892 +++ b/drivers/mmc/host/cqhci.c
893 @@ -5,6 +5,7 @@
894 #include <linux/delay.h>
895 #include <linux/highmem.h>
896 #include <linux/io.h>
897 +#include <linux/iopoll.h>
898 #include <linux/module.h>
899 #include <linux/dma-mapping.h>
900 #include <linux/slab.h>
901 @@ -343,12 +344,16 @@ static int cqhci_enable(struct mmc_host *mmc, struct mmc_card *card)
902 /* CQHCI is idle and should halt immediately, so set a small timeout */
903 #define CQHCI_OFF_TIMEOUT 100
904
905 +static u32 cqhci_read_ctl(struct cqhci_host *cq_host)
906 +{
907 + return cqhci_readl(cq_host, CQHCI_CTL);
908 +}
909 +
910 static void cqhci_off(struct mmc_host *mmc)
911 {
912 struct cqhci_host *cq_host = mmc->cqe_private;
913 - ktime_t timeout;
914 - bool timed_out;
915 u32 reg;
916 + int err;
917
918 if (!cq_host->enabled || !mmc->cqe_on || cq_host->recovery_halt)
919 return;
920 @@ -358,15 +363,9 @@ static void cqhci_off(struct mmc_host *mmc)
921
922 cqhci_writel(cq_host, CQHCI_HALT, CQHCI_CTL);
923
924 - timeout = ktime_add_us(ktime_get(), CQHCI_OFF_TIMEOUT);
925 - while (1) {
926 - timed_out = ktime_compare(ktime_get(), timeout) > 0;
927 - reg = cqhci_readl(cq_host, CQHCI_CTL);
928 - if ((reg & CQHCI_HALT) || timed_out)
929 - break;
930 - }
931 -
932 - if (timed_out)
933 + err = readx_poll_timeout(cqhci_read_ctl, cq_host, reg,
934 + reg & CQHCI_HALT, 0, CQHCI_OFF_TIMEOUT);
935 + if (err < 0)
936 pr_err("%s: cqhci: CQE stuck on\n", mmc_hostname(mmc));
937 else
938 pr_debug("%s: cqhci: CQE off\n", mmc_hostname(mmc));
939 diff --git a/drivers/mmc/host/meson-mx-sdio.c b/drivers/mmc/host/meson-mx-sdio.c
940 index ba9a63db73da..999214e8cf2b 100644
941 --- a/drivers/mmc/host/meson-mx-sdio.c
942 +++ b/drivers/mmc/host/meson-mx-sdio.c
943 @@ -357,14 +357,6 @@ static void meson_mx_mmc_request(struct mmc_host *mmc, struct mmc_request *mrq)
944 meson_mx_mmc_start_cmd(mmc, mrq->cmd);
945 }
946
947 -static int meson_mx_mmc_card_busy(struct mmc_host *mmc)
948 -{
949 - struct meson_mx_mmc_host *host = mmc_priv(mmc);
950 - u32 irqc = readl(host->base + MESON_MX_SDIO_IRQC);
951 -
952 - return !!(irqc & MESON_MX_SDIO_IRQC_FORCE_DATA_DAT_MASK);
953 -}
954 -
955 static void meson_mx_mmc_read_response(struct mmc_host *mmc,
956 struct mmc_command *cmd)
957 {
958 @@ -506,7 +498,6 @@ static void meson_mx_mmc_timeout(struct timer_list *t)
959 static struct mmc_host_ops meson_mx_mmc_ops = {
960 .request = meson_mx_mmc_request,
961 .set_ios = meson_mx_mmc_set_ios,
962 - .card_busy = meson_mx_mmc_card_busy,
963 .get_cd = mmc_gpio_get_cd,
964 .get_ro = mmc_gpio_get_ro,
965 };
966 @@ -570,7 +561,7 @@ static int meson_mx_mmc_add_host(struct meson_mx_mmc_host *host)
967 mmc->f_max = clk_round_rate(host->cfg_div_clk,
968 clk_get_rate(host->parent_clk));
969
970 - mmc->caps |= MMC_CAP_ERASE | MMC_CAP_CMD23;
971 + mmc->caps |= MMC_CAP_ERASE | MMC_CAP_CMD23 | MMC_CAP_WAIT_WHILE_BUSY;
972 mmc->ops = &meson_mx_mmc_ops;
973
974 ret = mmc_of_parse(mmc);
975 diff --git a/drivers/mmc/host/sdhci-msm.c b/drivers/mmc/host/sdhci-msm.c
976 index 3d0bb5e2e09b..0148f8e6bb37 100644
977 --- a/drivers/mmc/host/sdhci-msm.c
978 +++ b/drivers/mmc/host/sdhci-msm.c
979 @@ -1944,6 +1944,8 @@ static int sdhci_msm_probe(struct platform_device *pdev)
980 goto clk_disable;
981 }
982
983 + msm_host->mmc->caps |= MMC_CAP_WAIT_WHILE_BUSY | MMC_CAP_NEED_RSP_BUSY;
984 +
985 pm_runtime_get_noresume(&pdev->dev);
986 pm_runtime_set_active(&pdev->dev);
987 pm_runtime_enable(&pdev->dev);
988 diff --git a/drivers/mmc/host/sdhci-pci-core.c b/drivers/mmc/host/sdhci-pci-core.c
989 index 5091e2c1c0e5..9b66e8b374ed 100644
990 --- a/drivers/mmc/host/sdhci-pci-core.c
991 +++ b/drivers/mmc/host/sdhci-pci-core.c
992 @@ -601,6 +601,9 @@ static int intel_select_drive_strength(struct mmc_card *card,
993 struct sdhci_pci_slot *slot = sdhci_priv(host);
994 struct intel_host *intel_host = sdhci_pci_priv(slot);
995
996 + if (!(mmc_driver_type_mask(intel_host->drv_strength) & card_drv))
997 + return 0;
998 +
999 return intel_host->drv_strength;
1000 }
1001
1002 diff --git a/drivers/mmc/host/sdhci-xenon.c b/drivers/mmc/host/sdhci-xenon.c
1003 index 1dea1ba66f7b..4703cd540c7f 100644
1004 --- a/drivers/mmc/host/sdhci-xenon.c
1005 +++ b/drivers/mmc/host/sdhci-xenon.c
1006 @@ -235,6 +235,16 @@ static void xenon_voltage_switch(struct sdhci_host *host)
1007 {
1008 /* Wait for 5ms after set 1.8V signal enable bit */
1009 usleep_range(5000, 5500);
1010 +
1011 + /*
1012 + * For some reason the controller's Host Control2 register reports
1013 + * the bit representing 1.8V signaling as 0 when read after it was
1014 + * written as 1. Subsequent read reports 1.
1015 + *
1016 + * Since this may cause some issues, do an empty read of the Host
1017 + * Control2 register here to circumvent this.
1018 + */
1019 + sdhci_readw(host, SDHCI_HOST_CONTROL2);
1020 }
1021
1022 static const struct sdhci_ops sdhci_xenon_ops = {
1023 diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c
1024 index f97c48fd3eda..31b7dcd791c2 100644
1025 --- a/drivers/nvme/host/core.c
1026 +++ b/drivers/nvme/host/core.c
1027 @@ -3566,6 +3566,8 @@ static int nvme_alloc_ns(struct nvme_ctrl *ctrl, unsigned nsid)
1028
1029 return 0;
1030 out_put_disk:
1031 + /* prevent double queue cleanup */
1032 + ns->disk->queue = NULL;
1033 put_disk(ns->disk);
1034 out_unlink_ns:
1035 mutex_lock(&ctrl->subsys->lock);
1036 diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c
1037 index 06037e3c7854..03d272a09e26 100644
1038 --- a/drivers/scsi/qla2xxx/qla_os.c
1039 +++ b/drivers/scsi/qla2xxx/qla_os.c
1040 @@ -3700,6 +3700,13 @@ qla2x00_remove_one(struct pci_dev *pdev)
1041 }
1042 qla2x00_wait_for_hba_ready(base_vha);
1043
1044 + /*
1045 + * if UNLOADING flag is already set, then continue unload,
1046 + * where it was set first.
1047 + */
1048 + if (test_and_set_bit(UNLOADING, &base_vha->dpc_flags))
1049 + return;
1050 +
1051 if (IS_QLA25XX(ha) || IS_QLA2031(ha) || IS_QLA27XX(ha) ||
1052 IS_QLA28XX(ha)) {
1053 if (ha->flags.fw_started)
1054 @@ -3718,15 +3725,6 @@ qla2x00_remove_one(struct pci_dev *pdev)
1055
1056 qla2x00_wait_for_sess_deletion(base_vha);
1057
1058 - /*
1059 - * if UNLOAD flag is already set, then continue unload,
1060 - * where it was set first.
1061 - */
1062 - if (test_bit(UNLOADING, &base_vha->dpc_flags))
1063 - return;
1064 -
1065 - set_bit(UNLOADING, &base_vha->dpc_flags);
1066 -
1067 qla_nvme_delete(base_vha);
1068
1069 dma_free_coherent(&ha->pdev->dev,
1070 @@ -4859,6 +4857,9 @@ qla2x00_alloc_work(struct scsi_qla_host *vha, enum qla_work_type type)
1071 struct qla_work_evt *e;
1072 uint8_t bail;
1073
1074 + if (test_bit(UNLOADING, &vha->dpc_flags))
1075 + return NULL;
1076 +
1077 QLA_VHA_MARK_BUSY(vha, bail);
1078 if (bail)
1079 return NULL;
1080 @@ -6053,13 +6054,6 @@ qla2x00_disable_board_on_pci_error(struct work_struct *work)
1081 struct pci_dev *pdev = ha->pdev;
1082 scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev);
1083
1084 - /*
1085 - * if UNLOAD flag is already set, then continue unload,
1086 - * where it was set first.
1087 - */
1088 - if (test_bit(UNLOADING, &base_vha->dpc_flags))
1089 - return;
1090 -
1091 ql_log(ql_log_warn, base_vha, 0x015b,
1092 "Disabling adapter.\n");
1093
1094 @@ -6070,9 +6064,14 @@ qla2x00_disable_board_on_pci_error(struct work_struct *work)
1095 return;
1096 }
1097
1098 - qla2x00_wait_for_sess_deletion(base_vha);
1099 + /*
1100 + * if UNLOADING flag is already set, then continue unload,
1101 + * where it was set first.
1102 + */
1103 + if (test_and_set_bit(UNLOADING, &base_vha->dpc_flags))
1104 + return;
1105
1106 - set_bit(UNLOADING, &base_vha->dpc_flags);
1107 + qla2x00_wait_for_sess_deletion(base_vha);
1108
1109 qla2x00_delete_all_vps(ha, base_vha);
1110
1111 diff --git a/drivers/target/target_core_iblock.c b/drivers/target/target_core_iblock.c
1112 index 51ffd5c002de..1c181d31f4c8 100644
1113 --- a/drivers/target/target_core_iblock.c
1114 +++ b/drivers/target/target_core_iblock.c
1115 @@ -432,7 +432,7 @@ iblock_execute_zero_out(struct block_device *bdev, struct se_cmd *cmd)
1116 target_to_linux_sector(dev, cmd->t_task_lba),
1117 target_to_linux_sector(dev,
1118 sbc_get_write_same_sectors(cmd)),
1119 - GFP_KERNEL, false);
1120 + GFP_KERNEL, BLKDEV_ZERO_NOUNMAP);
1121 if (ret)
1122 return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
1123
1124 diff --git a/drivers/vfio/vfio_iommu_type1.c b/drivers/vfio/vfio_iommu_type1.c
1125 index d864277ea16f..6cc47af1f06d 100644
1126 --- a/drivers/vfio/vfio_iommu_type1.c
1127 +++ b/drivers/vfio/vfio_iommu_type1.c
1128 @@ -380,8 +380,8 @@ static int vaddr_get_pfn(struct mm_struct *mm, unsigned long vaddr,
1129 vma = find_vma_intersection(mm, vaddr, vaddr + 1);
1130
1131 if (vma && vma->vm_flags & VM_PFNMAP) {
1132 - *pfn = ((vaddr - vma->vm_start) >> PAGE_SHIFT) + vma->vm_pgoff;
1133 - if (is_invalid_reserved_pfn(*pfn))
1134 + if (!follow_pfn(vma, vaddr, pfn) &&
1135 + is_invalid_reserved_pfn(*pfn))
1136 ret = 0;
1137 }
1138
1139 @@ -593,7 +593,7 @@ static int vfio_iommu_type1_pin_pages(void *iommu_data,
1140 continue;
1141 }
1142
1143 - remote_vaddr = dma->vaddr + iova - dma->iova;
1144 + remote_vaddr = dma->vaddr + (iova - dma->iova);
1145 ret = vfio_pin_page_external(dma, remote_vaddr, &phys_pfn[i],
1146 do_accounting);
1147 if (ret)
1148 diff --git a/fs/btrfs/block-group.c b/fs/btrfs/block-group.c
1149 index 95330f40f998..2fead6c3c687 100644
1150 --- a/fs/btrfs/block-group.c
1151 +++ b/fs/btrfs/block-group.c
1152 @@ -910,7 +910,7 @@ int btrfs_remove_block_group(struct btrfs_trans_handle *trans,
1153 path = btrfs_alloc_path();
1154 if (!path) {
1155 ret = -ENOMEM;
1156 - goto out;
1157 + goto out_put_group;
1158 }
1159
1160 /*
1161 @@ -948,7 +948,7 @@ int btrfs_remove_block_group(struct btrfs_trans_handle *trans,
1162 ret = btrfs_orphan_add(trans, BTRFS_I(inode));
1163 if (ret) {
1164 btrfs_add_delayed_iput(inode);
1165 - goto out;
1166 + goto out_put_group;
1167 }
1168 clear_nlink(inode);
1169 /* One for the block groups ref */
1170 @@ -971,13 +971,13 @@ int btrfs_remove_block_group(struct btrfs_trans_handle *trans,
1171
1172 ret = btrfs_search_slot(trans, tree_root, &key, path, -1, 1);
1173 if (ret < 0)
1174 - goto out;
1175 + goto out_put_group;
1176 if (ret > 0)
1177 btrfs_release_path(path);
1178 if (ret == 0) {
1179 ret = btrfs_del_item(trans, tree_root, path);
1180 if (ret)
1181 - goto out;
1182 + goto out_put_group;
1183 btrfs_release_path(path);
1184 }
1185
1186 @@ -1094,9 +1094,9 @@ int btrfs_remove_block_group(struct btrfs_trans_handle *trans,
1187
1188 ret = remove_block_group_free_space(trans, block_group);
1189 if (ret)
1190 - goto out;
1191 + goto out_put_group;
1192
1193 - btrfs_put_block_group(block_group);
1194 + /* Once for the block groups rbtree */
1195 btrfs_put_block_group(block_group);
1196
1197 ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
1198 @@ -1119,6 +1119,10 @@ int btrfs_remove_block_group(struct btrfs_trans_handle *trans,
1199 /* once for the tree */
1200 free_extent_map(em);
1201 }
1202 +
1203 +out_put_group:
1204 + /* Once for the lookup reference */
1205 + btrfs_put_block_group(block_group);
1206 out:
1207 if (remove_rsv)
1208 btrfs_delayed_refs_rsv_release(fs_info, 1);
1209 diff --git a/fs/btrfs/relocation.c b/fs/btrfs/relocation.c
1210 index 32a09ed540a2..e890f09e2073 100644
1211 --- a/fs/btrfs/relocation.c
1212 +++ b/fs/btrfs/relocation.c
1213 @@ -4605,6 +4605,7 @@ int btrfs_recover_relocation(struct btrfs_root *root)
1214 if (IS_ERR(fs_root)) {
1215 err = PTR_ERR(fs_root);
1216 list_add_tail(&reloc_root->root_list, &reloc_roots);
1217 + btrfs_end_transaction(trans);
1218 goto out_unset;
1219 }
1220
1221 diff --git a/fs/btrfs/transaction.c b/fs/btrfs/transaction.c
1222 index 98b6903e3938..cdca0f656594 100644
1223 --- a/fs/btrfs/transaction.c
1224 +++ b/fs/btrfs/transaction.c
1225 @@ -590,10 +590,19 @@ again:
1226 }
1227
1228 got_it:
1229 - btrfs_record_root_in_trans(h, root);
1230 -
1231 if (!current->journal_info)
1232 current->journal_info = h;
1233 +
1234 + /*
1235 + * btrfs_record_root_in_trans() needs to alloc new extents, and may
1236 + * call btrfs_join_transaction() while we're also starting a
1237 + * transaction.
1238 + *
1239 + * Thus it need to be called after current->journal_info initialized,
1240 + * or we can deadlock.
1241 + */
1242 + btrfs_record_root_in_trans(h, root);
1243 +
1244 return h;
1245
1246 join_fail:
1247 diff --git a/fs/btrfs/tree-log.c b/fs/btrfs/tree-log.c
1248 index 6f2178618c22..7d464b049507 100644
1249 --- a/fs/btrfs/tree-log.c
1250 +++ b/fs/btrfs/tree-log.c
1251 @@ -4242,6 +4242,9 @@ static int btrfs_log_prealloc_extents(struct btrfs_trans_handle *trans,
1252 const u64 ino = btrfs_ino(inode);
1253 struct btrfs_path *dst_path = NULL;
1254 bool dropped_extents = false;
1255 + u64 truncate_offset = i_size;
1256 + struct extent_buffer *leaf;
1257 + int slot;
1258 int ins_nr = 0;
1259 int start_slot;
1260 int ret;
1261 @@ -4256,9 +4259,43 @@ static int btrfs_log_prealloc_extents(struct btrfs_trans_handle *trans,
1262 if (ret < 0)
1263 goto out;
1264
1265 + /*
1266 + * We must check if there is a prealloc extent that starts before the
1267 + * i_size and crosses the i_size boundary. This is to ensure later we
1268 + * truncate down to the end of that extent and not to the i_size, as
1269 + * otherwise we end up losing part of the prealloc extent after a log
1270 + * replay and with an implicit hole if there is another prealloc extent
1271 + * that starts at an offset beyond i_size.
1272 + */
1273 + ret = btrfs_previous_item(root, path, ino, BTRFS_EXTENT_DATA_KEY);
1274 + if (ret < 0)
1275 + goto out;
1276 +
1277 + if (ret == 0) {
1278 + struct btrfs_file_extent_item *ei;
1279 +
1280 + leaf = path->nodes[0];
1281 + slot = path->slots[0];
1282 + ei = btrfs_item_ptr(leaf, slot, struct btrfs_file_extent_item);
1283 +
1284 + if (btrfs_file_extent_type(leaf, ei) ==
1285 + BTRFS_FILE_EXTENT_PREALLOC) {
1286 + u64 extent_end;
1287 +
1288 + btrfs_item_key_to_cpu(leaf, &key, slot);
1289 + extent_end = key.offset +
1290 + btrfs_file_extent_num_bytes(leaf, ei);
1291 +
1292 + if (extent_end > i_size)
1293 + truncate_offset = extent_end;
1294 + }
1295 + } else {
1296 + ret = 0;
1297 + }
1298 +
1299 while (true) {
1300 - struct extent_buffer *leaf = path->nodes[0];
1301 - int slot = path->slots[0];
1302 + leaf = path->nodes[0];
1303 + slot = path->slots[0];
1304
1305 if (slot >= btrfs_header_nritems(leaf)) {
1306 if (ins_nr > 0) {
1307 @@ -4296,7 +4333,7 @@ static int btrfs_log_prealloc_extents(struct btrfs_trans_handle *trans,
1308 ret = btrfs_truncate_inode_items(trans,
1309 root->log_root,
1310 &inode->vfs_inode,
1311 - i_size,
1312 + truncate_offset,
1313 BTRFS_EXTENT_DATA_KEY);
1314 } while (ret == -EAGAIN);
1315 if (ret)
1316 diff --git a/fs/nfs/nfs3acl.c b/fs/nfs/nfs3acl.c
1317 index c5c3fc6e6c60..26c94b32d6f4 100644
1318 --- a/fs/nfs/nfs3acl.c
1319 +++ b/fs/nfs/nfs3acl.c
1320 @@ -253,37 +253,45 @@ int nfs3_proc_setacls(struct inode *inode, struct posix_acl *acl,
1321
1322 int nfs3_set_acl(struct inode *inode, struct posix_acl *acl, int type)
1323 {
1324 - struct posix_acl *alloc = NULL, *dfacl = NULL;
1325 + struct posix_acl *orig = acl, *dfacl = NULL, *alloc;
1326 int status;
1327
1328 if (S_ISDIR(inode->i_mode)) {
1329 switch(type) {
1330 case ACL_TYPE_ACCESS:
1331 - alloc = dfacl = get_acl(inode, ACL_TYPE_DEFAULT);
1332 + alloc = get_acl(inode, ACL_TYPE_DEFAULT);
1333 if (IS_ERR(alloc))
1334 goto fail;
1335 + dfacl = alloc;
1336 break;
1337
1338 case ACL_TYPE_DEFAULT:
1339 - dfacl = acl;
1340 - alloc = acl = get_acl(inode, ACL_TYPE_ACCESS);
1341 + alloc = get_acl(inode, ACL_TYPE_ACCESS);
1342 if (IS_ERR(alloc))
1343 goto fail;
1344 + dfacl = acl;
1345 + acl = alloc;
1346 break;
1347 }
1348 }
1349
1350 if (acl == NULL) {
1351 - alloc = acl = posix_acl_from_mode(inode->i_mode, GFP_KERNEL);
1352 + alloc = posix_acl_from_mode(inode->i_mode, GFP_KERNEL);
1353 if (IS_ERR(alloc))
1354 goto fail;
1355 + acl = alloc;
1356 }
1357 status = __nfs3_proc_setacls(inode, acl, dfacl);
1358 - posix_acl_release(alloc);
1359 +out:
1360 + if (acl != orig)
1361 + posix_acl_release(acl);
1362 + if (dfacl != orig)
1363 + posix_acl_release(dfacl);
1364 return status;
1365
1366 fail:
1367 - return PTR_ERR(alloc);
1368 + status = PTR_ERR(alloc);
1369 + goto out;
1370 }
1371
1372 const struct xattr_handler *nfs3_xattr_handlers[] = {
1373 diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c
1374 index 6b29703d2fe1..e257653f25ab 100644
1375 --- a/fs/nfs/nfs4proc.c
1376 +++ b/fs/nfs/nfs4proc.c
1377 @@ -7852,6 +7852,7 @@ static void
1378 nfs4_bind_one_conn_to_session_done(struct rpc_task *task, void *calldata)
1379 {
1380 struct nfs41_bind_conn_to_session_args *args = task->tk_msg.rpc_argp;
1381 + struct nfs41_bind_conn_to_session_res *res = task->tk_msg.rpc_resp;
1382 struct nfs_client *clp = args->client;
1383
1384 switch (task->tk_status) {
1385 @@ -7860,6 +7861,12 @@ nfs4_bind_one_conn_to_session_done(struct rpc_task *task, void *calldata)
1386 nfs4_schedule_session_recovery(clp->cl_session,
1387 task->tk_status);
1388 }
1389 + if (args->dir == NFS4_CDFC4_FORE_OR_BOTH &&
1390 + res->dir != NFS4_CDFS4_BOTH) {
1391 + rpc_task_close_connection(task);
1392 + if (args->retries++ < MAX_BIND_CONN_TO_SESSION_RETRIES)
1393 + rpc_restart_call(task);
1394 + }
1395 }
1396
1397 static const struct rpc_call_ops nfs4_bind_one_conn_to_session_ops = {
1398 @@ -7882,6 +7889,7 @@ int nfs4_proc_bind_one_conn_to_session(struct rpc_clnt *clnt,
1399 struct nfs41_bind_conn_to_session_args args = {
1400 .client = clp,
1401 .dir = NFS4_CDFC4_FORE_OR_BOTH,
1402 + .retries = 0,
1403 };
1404 struct nfs41_bind_conn_to_session_res res;
1405 struct rpc_message msg = {
1406 diff --git a/fs/ocfs2/dlmfs/dlmfs.c b/fs/ocfs2/dlmfs/dlmfs.c
1407 index 8e4f1ace467c..1de77f1a600b 100644
1408 --- a/fs/ocfs2/dlmfs/dlmfs.c
1409 +++ b/fs/ocfs2/dlmfs/dlmfs.c
1410 @@ -275,7 +275,6 @@ static ssize_t dlmfs_file_write(struct file *filp,
1411 loff_t *ppos)
1412 {
1413 int bytes_left;
1414 - ssize_t writelen;
1415 char *lvb_buf;
1416 struct inode *inode = file_inode(filp);
1417
1418 @@ -285,32 +284,30 @@ static ssize_t dlmfs_file_write(struct file *filp,
1419 if (*ppos >= i_size_read(inode))
1420 return -ENOSPC;
1421
1422 + /* don't write past the lvb */
1423 + if (count > i_size_read(inode) - *ppos)
1424 + count = i_size_read(inode) - *ppos;
1425 +
1426 if (!count)
1427 return 0;
1428
1429 if (!access_ok(buf, count))
1430 return -EFAULT;
1431
1432 - /* don't write past the lvb */
1433 - if ((count + *ppos) > i_size_read(inode))
1434 - writelen = i_size_read(inode) - *ppos;
1435 - else
1436 - writelen = count - *ppos;
1437 -
1438 - lvb_buf = kmalloc(writelen, GFP_NOFS);
1439 + lvb_buf = kmalloc(count, GFP_NOFS);
1440 if (!lvb_buf)
1441 return -ENOMEM;
1442
1443 - bytes_left = copy_from_user(lvb_buf, buf, writelen);
1444 - writelen -= bytes_left;
1445 - if (writelen)
1446 - user_dlm_write_lvb(inode, lvb_buf, writelen);
1447 + bytes_left = copy_from_user(lvb_buf, buf, count);
1448 + count -= bytes_left;
1449 + if (count)
1450 + user_dlm_write_lvb(inode, lvb_buf, count);
1451
1452 kfree(lvb_buf);
1453
1454 - *ppos = *ppos + writelen;
1455 - mlog(0, "wrote %zd bytes\n", writelen);
1456 - return writelen;
1457 + *ppos = *ppos + count;
1458 + mlog(0, "wrote %zu bytes\n", count);
1459 + return count;
1460 }
1461
1462 static void dlmfs_init_once(void *foo)
1463 diff --git a/fs/super.c b/fs/super.c
1464 index cd352530eca9..a288cd60d2ae 100644
1465 --- a/fs/super.c
1466 +++ b/fs/super.c
1467 @@ -1302,8 +1302,8 @@ int get_tree_bdev(struct fs_context *fc,
1468 mutex_lock(&bdev->bd_fsfreeze_mutex);
1469 if (bdev->bd_fsfreeze_count > 0) {
1470 mutex_unlock(&bdev->bd_fsfreeze_mutex);
1471 - blkdev_put(bdev, mode);
1472 warnf(fc, "%pg: Can't mount, blockdev is frozen", bdev);
1473 + blkdev_put(bdev, mode);
1474 return -EBUSY;
1475 }
1476
1477 diff --git a/include/linux/nfs_xdr.h b/include/linux/nfs_xdr.h
1478 index 9b8324ec08f3..99d327d0bccb 100644
1479 --- a/include/linux/nfs_xdr.h
1480 +++ b/include/linux/nfs_xdr.h
1481 @@ -1307,11 +1307,13 @@ struct nfs41_impl_id {
1482 struct nfstime4 date;
1483 };
1484
1485 +#define MAX_BIND_CONN_TO_SESSION_RETRIES 3
1486 struct nfs41_bind_conn_to_session_args {
1487 struct nfs_client *client;
1488 struct nfs4_sessionid sessionid;
1489 u32 dir;
1490 bool use_conn_in_rdma_mode;
1491 + int retries;
1492 };
1493
1494 struct nfs41_bind_conn_to_session_res {
1495 diff --git a/include/linux/sunrpc/clnt.h b/include/linux/sunrpc/clnt.h
1496 index abc63bd1be2b..336802acc629 100644
1497 --- a/include/linux/sunrpc/clnt.h
1498 +++ b/include/linux/sunrpc/clnt.h
1499 @@ -237,5 +237,10 @@ static inline int rpc_reply_expected(struct rpc_task *task)
1500 (task->tk_msg.rpc_proc->p_decode != NULL);
1501 }
1502
1503 +static inline void rpc_task_close_connection(struct rpc_task *task)
1504 +{
1505 + if (task->tk_xprt)
1506 + xprt_force_disconnect(task->tk_xprt);
1507 +}
1508 #endif /* __KERNEL__ */
1509 #endif /* _LINUX_SUNRPC_CLNT_H */
1510 diff --git a/include/uapi/linux/dma-buf.h b/include/uapi/linux/dma-buf.h
1511 index dbc7092e04b5..7f30393b92c3 100644
1512 --- a/include/uapi/linux/dma-buf.h
1513 +++ b/include/uapi/linux/dma-buf.h
1514 @@ -39,6 +39,12 @@ struct dma_buf_sync {
1515
1516 #define DMA_BUF_BASE 'b'
1517 #define DMA_BUF_IOCTL_SYNC _IOW(DMA_BUF_BASE, 0, struct dma_buf_sync)
1518 +
1519 +/* 32/64bitness of this uapi was botched in android, there's no difference
1520 + * between them in actual uapi, they're just different numbers.
1521 + */
1522 #define DMA_BUF_SET_NAME _IOW(DMA_BUF_BASE, 1, const char *)
1523 +#define DMA_BUF_SET_NAME_A _IOW(DMA_BUF_BASE, 1, u32)
1524 +#define DMA_BUF_SET_NAME_B _IOW(DMA_BUF_BASE, 1, u64)
1525
1526 #endif
1527 diff --git a/kernel/power/hibernate.c b/kernel/power/hibernate.c
1528 index 3c0a5a8170b0..b6c5895ced36 100644
1529 --- a/kernel/power/hibernate.c
1530 +++ b/kernel/power/hibernate.c
1531 @@ -898,6 +898,13 @@ static int software_resume(void)
1532 error = freeze_processes();
1533 if (error)
1534 goto Close_Finish;
1535 +
1536 + error = freeze_kernel_threads();
1537 + if (error) {
1538 + thaw_processes();
1539 + goto Close_Finish;
1540 + }
1541 +
1542 error = load_image_and_restore();
1543 thaw_processes();
1544 Finish:
1545 diff --git a/security/selinux/hooks.c b/security/selinux/hooks.c
1546 index 39410913a694..552e73d90fd2 100644
1547 --- a/security/selinux/hooks.c
1548 +++ b/security/selinux/hooks.c
1549 @@ -5521,40 +5521,60 @@ static int selinux_tun_dev_open(void *security)
1550
1551 static int selinux_nlmsg_perm(struct sock *sk, struct sk_buff *skb)
1552 {
1553 - int err = 0;
1554 - u32 perm;
1555 + int rc = 0;
1556 + unsigned int msg_len;
1557 + unsigned int data_len = skb->len;
1558 + unsigned char *data = skb->data;
1559 struct nlmsghdr *nlh;
1560 struct sk_security_struct *sksec = sk->sk_security;
1561 + u16 sclass = sksec->sclass;
1562 + u32 perm;
1563
1564 - if (skb->len < NLMSG_HDRLEN) {
1565 - err = -EINVAL;
1566 - goto out;
1567 - }
1568 - nlh = nlmsg_hdr(skb);
1569 + while (data_len >= nlmsg_total_size(0)) {
1570 + nlh = (struct nlmsghdr *)data;
1571 +
1572 + /* NOTE: the nlmsg_len field isn't reliably set by some netlink
1573 + * users which means we can't reject skb's with bogus
1574 + * length fields; our solution is to follow what
1575 + * netlink_rcv_skb() does and simply skip processing at
1576 + * messages with length fields that are clearly junk
1577 + */
1578 + if (nlh->nlmsg_len < NLMSG_HDRLEN || nlh->nlmsg_len > data_len)
1579 + return 0;
1580
1581 - err = selinux_nlmsg_lookup(sksec->sclass, nlh->nlmsg_type, &perm);
1582 - if (err) {
1583 - if (err == -EINVAL) {
1584 + rc = selinux_nlmsg_lookup(sclass, nlh->nlmsg_type, &perm);
1585 + if (rc == 0) {
1586 + rc = sock_has_perm(sk, perm);
1587 + if (rc)
1588 + return rc;
1589 + } else if (rc == -EINVAL) {
1590 + /* -EINVAL is a missing msg/perm mapping */
1591 pr_warn_ratelimited("SELinux: unrecognized netlink"
1592 - " message: protocol=%hu nlmsg_type=%hu sclass=%s"
1593 - " pig=%d comm=%s\n",
1594 - sk->sk_protocol, nlh->nlmsg_type,
1595 - secclass_map[sksec->sclass - 1].name,
1596 - task_pid_nr(current), current->comm);
1597 - if (!enforcing_enabled(&selinux_state) ||
1598 - security_get_allow_unknown(&selinux_state))
1599 - err = 0;
1600 + " message: protocol=%hu nlmsg_type=%hu sclass=%s"
1601 + " pid=%d comm=%s\n",
1602 + sk->sk_protocol, nlh->nlmsg_type,
1603 + secclass_map[sclass - 1].name,
1604 + task_pid_nr(current), current->comm);
1605 + if (enforcing_enabled(&selinux_state) &&
1606 + !security_get_allow_unknown(&selinux_state))
1607 + return rc;
1608 + rc = 0;
1609 + } else if (rc == -ENOENT) {
1610 + /* -ENOENT is a missing socket/class mapping, ignore */
1611 + rc = 0;
1612 + } else {
1613 + return rc;
1614 }
1615
1616 - /* Ignore */
1617 - if (err == -ENOENT)
1618 - err = 0;
1619 - goto out;
1620 + /* move to the next message after applying netlink padding */
1621 + msg_len = NLMSG_ALIGN(nlh->nlmsg_len);
1622 + if (msg_len >= data_len)
1623 + return 0;
1624 + data_len -= msg_len;
1625 + data += msg_len;
1626 }
1627
1628 - err = sock_has_perm(sk, perm);
1629 -out:
1630 - return err;
1631 + return rc;
1632 }
1633
1634 #ifdef CONFIG_NETFILTER
1635 diff --git a/sound/core/oss/pcm_plugin.c b/sound/core/oss/pcm_plugin.c
1636 index 8539047145de..da400da1fafe 100644
1637 --- a/sound/core/oss/pcm_plugin.c
1638 +++ b/sound/core/oss/pcm_plugin.c
1639 @@ -211,21 +211,23 @@ static snd_pcm_sframes_t plug_client_size(struct snd_pcm_substream *plug,
1640 if (stream == SNDRV_PCM_STREAM_PLAYBACK) {
1641 plugin = snd_pcm_plug_last(plug);
1642 while (plugin && drv_frames > 0) {
1643 - if (check_size && drv_frames > plugin->buf_frames)
1644 - drv_frames = plugin->buf_frames;
1645 plugin_prev = plugin->prev;
1646 if (plugin->src_frames)
1647 drv_frames = plugin->src_frames(plugin, drv_frames);
1648 + if (check_size && plugin->buf_frames &&
1649 + drv_frames > plugin->buf_frames)
1650 + drv_frames = plugin->buf_frames;
1651 plugin = plugin_prev;
1652 }
1653 } else if (stream == SNDRV_PCM_STREAM_CAPTURE) {
1654 plugin = snd_pcm_plug_first(plug);
1655 while (plugin && drv_frames > 0) {
1656 plugin_next = plugin->next;
1657 + if (check_size && plugin->buf_frames &&
1658 + drv_frames > plugin->buf_frames)
1659 + drv_frames = plugin->buf_frames;
1660 if (plugin->dst_frames)
1661 drv_frames = plugin->dst_frames(plugin, drv_frames);
1662 - if (check_size && drv_frames > plugin->buf_frames)
1663 - drv_frames = plugin->buf_frames;
1664 plugin = plugin_next;
1665 }
1666 } else
1667 @@ -251,26 +253,28 @@ static snd_pcm_sframes_t plug_slave_size(struct snd_pcm_substream *plug,
1668 plugin = snd_pcm_plug_first(plug);
1669 while (plugin && frames > 0) {
1670 plugin_next = plugin->next;
1671 + if (check_size && plugin->buf_frames &&
1672 + frames > plugin->buf_frames)
1673 + frames = plugin->buf_frames;
1674 if (plugin->dst_frames) {
1675 frames = plugin->dst_frames(plugin, frames);
1676 if (frames < 0)
1677 return frames;
1678 }
1679 - if (check_size && frames > plugin->buf_frames)
1680 - frames = plugin->buf_frames;
1681 plugin = plugin_next;
1682 }
1683 } else if (stream == SNDRV_PCM_STREAM_CAPTURE) {
1684 plugin = snd_pcm_plug_last(plug);
1685 while (plugin) {
1686 - if (check_size && frames > plugin->buf_frames)
1687 - frames = plugin->buf_frames;
1688 plugin_prev = plugin->prev;
1689 if (plugin->src_frames) {
1690 frames = plugin->src_frames(plugin, frames);
1691 if (frames < 0)
1692 return frames;
1693 }
1694 + if (check_size && plugin->buf_frames &&
1695 + frames > plugin->buf_frames)
1696 + frames = plugin->buf_frames;
1697 plugin = plugin_prev;
1698 }
1699 } else
1700 diff --git a/sound/isa/opti9xx/miro.c b/sound/isa/opti9xx/miro.c
1701 index 0458934de1c7..9ca5c83de8a7 100644
1702 --- a/sound/isa/opti9xx/miro.c
1703 +++ b/sound/isa/opti9xx/miro.c
1704 @@ -867,10 +867,13 @@ static void snd_miro_write(struct snd_miro *chip, unsigned char reg,
1705 spin_unlock_irqrestore(&chip->lock, flags);
1706 }
1707
1708 +static inline void snd_miro_write_mask(struct snd_miro *chip,
1709 + unsigned char reg, unsigned char value, unsigned char mask)
1710 +{
1711 + unsigned char oldval = snd_miro_read(chip, reg);
1712
1713 -#define snd_miro_write_mask(chip, reg, value, mask) \
1714 - snd_miro_write(chip, reg, \
1715 - (snd_miro_read(chip, reg) & ~(mask)) | ((value) & (mask)))
1716 + snd_miro_write(chip, reg, (oldval & ~mask) | (value & mask));
1717 +}
1718
1719 /*
1720 * Proc Interface
1721 diff --git a/sound/isa/opti9xx/opti92x-ad1848.c b/sound/isa/opti9xx/opti92x-ad1848.c
1722 index fb36bb5d55df..fb87eedc8121 100644
1723 --- a/sound/isa/opti9xx/opti92x-ad1848.c
1724 +++ b/sound/isa/opti9xx/opti92x-ad1848.c
1725 @@ -317,10 +317,13 @@ static void snd_opti9xx_write(struct snd_opti9xx *chip, unsigned char reg,
1726 }
1727
1728
1729 -#define snd_opti9xx_write_mask(chip, reg, value, mask) \
1730 - snd_opti9xx_write(chip, reg, \
1731 - (snd_opti9xx_read(chip, reg) & ~(mask)) | ((value) & (mask)))
1732 +static inline void snd_opti9xx_write_mask(struct snd_opti9xx *chip,
1733 + unsigned char reg, unsigned char value, unsigned char mask)
1734 +{
1735 + unsigned char oldval = snd_opti9xx_read(chip, reg);
1736
1737 + snd_opti9xx_write(chip, reg, (oldval & ~mask) | (value & mask));
1738 +}
1739
1740 static int snd_opti9xx_configure(struct snd_opti9xx *chip,
1741 long port,
1742 diff --git a/sound/pci/hda/patch_hdmi.c b/sound/pci/hda/patch_hdmi.c
1743 index f1febfc47ba6..663168ddce72 100644
1744 --- a/sound/pci/hda/patch_hdmi.c
1745 +++ b/sound/pci/hda/patch_hdmi.c
1746 @@ -1861,8 +1861,10 @@ static bool check_non_pcm_per_cvt(struct hda_codec *codec, hda_nid_t cvt_nid)
1747 /* Add sanity check to pass klockwork check.
1748 * This should never happen.
1749 */
1750 - if (WARN_ON(spdif == NULL))
1751 + if (WARN_ON(spdif == NULL)) {
1752 + mutex_unlock(&codec->spdif_mutex);
1753 return true;
1754 + }
1755 non_pcm = !!(spdif->status & IEC958_AES0_NONAUDIO);
1756 mutex_unlock(&codec->spdif_mutex);
1757 return non_pcm;
1758 diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
1759 index ee1b89f2bcd5..64270983ab7d 100644
1760 --- a/sound/pci/hda/patch_realtek.c
1761 +++ b/sound/pci/hda/patch_realtek.c
1762 @@ -7295,6 +7295,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
1763 SND_PCI_QUIRK(0x1558, 0x8560, "System76 Gazelle (gaze14)", ALC269_FIXUP_HEADSET_MIC),
1764 SND_PCI_QUIRK(0x1558, 0x8561, "System76 Gazelle (gaze14)", ALC269_FIXUP_HEADSET_MIC),
1765 SND_PCI_QUIRK(0x17aa, 0x1036, "Lenovo P520", ALC233_FIXUP_LENOVO_MULTI_CODECS),
1766 + SND_PCI_QUIRK(0x17aa, 0x1048, "ThinkCentre Station", ALC283_FIXUP_HEADSET_MIC),
1767 SND_PCI_QUIRK(0x17aa, 0x20f2, "Thinkpad SL410/510", ALC269_FIXUP_SKU_IGNORE),
1768 SND_PCI_QUIRK(0x17aa, 0x215e, "Thinkpad L512", ALC269_FIXUP_SKU_IGNORE),
1769 SND_PCI_QUIRK(0x17aa, 0x21b8, "Thinkpad Edge 14", ALC269_FIXUP_SKU_IGNORE),
1770 diff --git a/sound/usb/line6/podhd.c b/sound/usb/line6/podhd.c
1771 index 27bf61c177c0..5d9954a2d05e 100644
1772 --- a/sound/usb/line6/podhd.c
1773 +++ b/sound/usb/line6/podhd.c
1774 @@ -21,8 +21,7 @@
1775 enum {
1776 LINE6_PODHD300,
1777 LINE6_PODHD400,
1778 - LINE6_PODHD500_0,
1779 - LINE6_PODHD500_1,
1780 + LINE6_PODHD500,
1781 LINE6_PODX3,
1782 LINE6_PODX3LIVE,
1783 LINE6_PODHD500X,
1784 @@ -318,8 +317,7 @@ static const struct usb_device_id podhd_id_table[] = {
1785 /* TODO: no need to alloc data interfaces when only audio is used */
1786 { LINE6_DEVICE(0x5057), .driver_info = LINE6_PODHD300 },
1787 { LINE6_DEVICE(0x5058), .driver_info = LINE6_PODHD400 },
1788 - { LINE6_IF_NUM(0x414D, 0), .driver_info = LINE6_PODHD500_0 },
1789 - { LINE6_IF_NUM(0x414D, 1), .driver_info = LINE6_PODHD500_1 },
1790 + { LINE6_IF_NUM(0x414D, 0), .driver_info = LINE6_PODHD500 },
1791 { LINE6_IF_NUM(0x414A, 0), .driver_info = LINE6_PODX3 },
1792 { LINE6_IF_NUM(0x414B, 0), .driver_info = LINE6_PODX3LIVE },
1793 { LINE6_IF_NUM(0x4159, 0), .driver_info = LINE6_PODHD500X },
1794 @@ -352,23 +350,13 @@ static const struct line6_properties podhd_properties_table[] = {
1795 .ep_audio_r = 0x82,
1796 .ep_audio_w = 0x01,
1797 },
1798 - [LINE6_PODHD500_0] = {
1799 + [LINE6_PODHD500] = {
1800 .id = "PODHD500",
1801 .name = "POD HD500",
1802 - .capabilities = LINE6_CAP_PCM
1803 + .capabilities = LINE6_CAP_PCM | LINE6_CAP_CONTROL
1804 | LINE6_CAP_HWMON,
1805 .altsetting = 1,
1806 - .ep_ctrl_r = 0x81,
1807 - .ep_ctrl_w = 0x01,
1808 - .ep_audio_r = 0x86,
1809 - .ep_audio_w = 0x02,
1810 - },
1811 - [LINE6_PODHD500_1] = {
1812 - .id = "PODHD500",
1813 - .name = "POD HD500",
1814 - .capabilities = LINE6_CAP_PCM
1815 - | LINE6_CAP_HWMON,
1816 - .altsetting = 0,
1817 + .ctrl_if = 1,
1818 .ep_ctrl_r = 0x81,
1819 .ep_ctrl_w = 0x01,
1820 .ep_audio_r = 0x86,
1821 diff --git a/sound/usb/quirks.c b/sound/usb/quirks.c
1822 index 7448ab07bd36..5a81c444a18b 100644
1823 --- a/sound/usb/quirks.c
1824 +++ b/sound/usb/quirks.c
1825 @@ -1643,7 +1643,7 @@ u64 snd_usb_interface_dsd_format_quirks(struct snd_usb_audio *chip,
1826
1827 case USB_ID(0x0d8c, 0x0316): /* Hegel HD12 DSD */
1828 case USB_ID(0x10cb, 0x0103): /* The Bit Opus #3; with fp->dsd_raw */
1829 - case USB_ID(0x16b0, 0x06b2): /* NuPrime DAC-10 */
1830 + case USB_ID(0x16d0, 0x06b2): /* NuPrime DAC-10 */
1831 case USB_ID(0x16d0, 0x09dd): /* Encore mDSD */
1832 case USB_ID(0x16d0, 0x0733): /* Furutech ADL Stratos */
1833 case USB_ID(0x16d0, 0x09db): /* NuPrime Audio DAC-9 */