Magellan Linux

Contents of /trunk/kernel-magellan/patches-5.0/0104-5.0.5-all-fixes.patch

Parent Directory Parent Directory | Revision Log Revision Log


Revision 3331 - (show annotations) (download)
Fri Apr 26 12:20:24 2019 UTC (5 years ago) by niro
File size: 66224 byte(s)
-linux-5.0.5
1 diff --git a/Makefile b/Makefile
2 index 06fda21614bc..63152c5ca136 100644
3 --- a/Makefile
4 +++ b/Makefile
5 @@ -1,7 +1,7 @@
6 # SPDX-License-Identifier: GPL-2.0
7 VERSION = 5
8 PATCHLEVEL = 0
9 -SUBLEVEL = 4
10 +SUBLEVEL = 5
11 EXTRAVERSION =
12 NAME = Shy Crocodile
13
14 diff --git a/arch/mips/include/asm/jump_label.h b/arch/mips/include/asm/jump_label.h
15 index e77672539e8e..e4456e450f94 100644
16 --- a/arch/mips/include/asm/jump_label.h
17 +++ b/arch/mips/include/asm/jump_label.h
18 @@ -21,15 +21,15 @@
19 #endif
20
21 #ifdef CONFIG_CPU_MICROMIPS
22 -#define NOP_INSN "nop32"
23 +#define B_INSN "b32"
24 #else
25 -#define NOP_INSN "nop"
26 +#define B_INSN "b"
27 #endif
28
29 static __always_inline bool arch_static_branch(struct static_key *key, bool branch)
30 {
31 - asm_volatile_goto("1:\t" NOP_INSN "\n\t"
32 - "nop\n\t"
33 + asm_volatile_goto("1:\t" B_INSN " 2f\n\t"
34 + "2:\tnop\n\t"
35 ".pushsection __jump_table, \"aw\"\n\t"
36 WORD_INSN " 1b, %l[l_yes], %0\n\t"
37 ".popsection\n\t"
38 diff --git a/arch/mips/kernel/vmlinux.lds.S b/arch/mips/kernel/vmlinux.lds.S
39 index cb7e9ed7a453..33ee0d18fb0a 100644
40 --- a/arch/mips/kernel/vmlinux.lds.S
41 +++ b/arch/mips/kernel/vmlinux.lds.S
42 @@ -140,6 +140,13 @@ SECTIONS
43 PERCPU_SECTION(1 << CONFIG_MIPS_L1_CACHE_SHIFT)
44 #endif
45
46 +#ifdef CONFIG_MIPS_ELF_APPENDED_DTB
47 + .appended_dtb : AT(ADDR(.appended_dtb) - LOAD_OFFSET) {
48 + *(.appended_dtb)
49 + KEEP(*(.appended_dtb))
50 + }
51 +#endif
52 +
53 #ifdef CONFIG_RELOCATABLE
54 . = ALIGN(4);
55
56 @@ -164,11 +171,6 @@ SECTIONS
57 __appended_dtb = .;
58 /* leave space for appended DTB */
59 . += 0x100000;
60 -#elif defined(CONFIG_MIPS_ELF_APPENDED_DTB)
61 - .appended_dtb : AT(ADDR(.appended_dtb) - LOAD_OFFSET) {
62 - *(.appended_dtb)
63 - KEEP(*(.appended_dtb))
64 - }
65 #endif
66 /*
67 * Align to 64K in attempt to eliminate holes before the
68 diff --git a/arch/mips/loongson64/lemote-2f/irq.c b/arch/mips/loongson64/lemote-2f/irq.c
69 index 9e33e45aa17c..b213cecb8e3a 100644
70 --- a/arch/mips/loongson64/lemote-2f/irq.c
71 +++ b/arch/mips/loongson64/lemote-2f/irq.c
72 @@ -103,7 +103,7 @@ static struct irqaction ip6_irqaction = {
73 static struct irqaction cascade_irqaction = {
74 .handler = no_action,
75 .name = "cascade",
76 - .flags = IRQF_NO_THREAD,
77 + .flags = IRQF_NO_THREAD | IRQF_NO_SUSPEND,
78 };
79
80 void __init mach_init_irq(void)
81 diff --git a/arch/powerpc/include/asm/vdso_datapage.h b/arch/powerpc/include/asm/vdso_datapage.h
82 index 1afe90ade595..bbc06bd72b1f 100644
83 --- a/arch/powerpc/include/asm/vdso_datapage.h
84 +++ b/arch/powerpc/include/asm/vdso_datapage.h
85 @@ -82,10 +82,10 @@ struct vdso_data {
86 __u32 icache_block_size; /* L1 i-cache block size */
87 __u32 dcache_log_block_size; /* L1 d-cache log block size */
88 __u32 icache_log_block_size; /* L1 i-cache log block size */
89 - __s32 wtom_clock_sec; /* Wall to monotonic clock */
90 - __s32 wtom_clock_nsec;
91 - struct timespec stamp_xtime; /* xtime as at tb_orig_stamp */
92 - __u32 stamp_sec_fraction; /* fractional seconds of stamp_xtime */
93 + __u32 stamp_sec_fraction; /* fractional seconds of stamp_xtime */
94 + __s32 wtom_clock_nsec; /* Wall to monotonic clock nsec */
95 + __s64 wtom_clock_sec; /* Wall to monotonic clock sec */
96 + struct timespec stamp_xtime; /* xtime as at tb_orig_stamp */
97 __u32 syscall_map_64[SYSCALL_MAP_SIZE]; /* map of syscalls */
98 __u32 syscall_map_32[SYSCALL_MAP_SIZE]; /* map of syscalls */
99 };
100 diff --git a/arch/powerpc/kernel/security.c b/arch/powerpc/kernel/security.c
101 index 9b8631533e02..b33bafb8fcea 100644
102 --- a/arch/powerpc/kernel/security.c
103 +++ b/arch/powerpc/kernel/security.c
104 @@ -190,29 +190,22 @@ ssize_t cpu_show_spectre_v2(struct device *dev, struct device_attribute *attr, c
105 bcs = security_ftr_enabled(SEC_FTR_BCCTRL_SERIALISED);
106 ccd = security_ftr_enabled(SEC_FTR_COUNT_CACHE_DISABLED);
107
108 - if (bcs || ccd || count_cache_flush_type != COUNT_CACHE_FLUSH_NONE) {
109 - bool comma = false;
110 + if (bcs || ccd) {
111 seq_buf_printf(&s, "Mitigation: ");
112
113 - if (bcs) {
114 + if (bcs)
115 seq_buf_printf(&s, "Indirect branch serialisation (kernel only)");
116 - comma = true;
117 - }
118
119 - if (ccd) {
120 - if (comma)
121 - seq_buf_printf(&s, ", ");
122 - seq_buf_printf(&s, "Indirect branch cache disabled");
123 - comma = true;
124 - }
125 -
126 - if (comma)
127 + if (bcs && ccd)
128 seq_buf_printf(&s, ", ");
129
130 - seq_buf_printf(&s, "Software count cache flush");
131 + if (ccd)
132 + seq_buf_printf(&s, "Indirect branch cache disabled");
133 + } else if (count_cache_flush_type != COUNT_CACHE_FLUSH_NONE) {
134 + seq_buf_printf(&s, "Mitigation: Software count cache flush");
135
136 if (count_cache_flush_type == COUNT_CACHE_FLUSH_HW)
137 - seq_buf_printf(&s, "(hardware accelerated)");
138 + seq_buf_printf(&s, " (hardware accelerated)");
139 } else if (btb_flush_enabled) {
140 seq_buf_printf(&s, "Mitigation: Branch predictor state flush");
141 } else {
142 diff --git a/arch/powerpc/kernel/vdso64/gettimeofday.S b/arch/powerpc/kernel/vdso64/gettimeofday.S
143 index a4ed9edfd5f0..1f324c28705b 100644
144 --- a/arch/powerpc/kernel/vdso64/gettimeofday.S
145 +++ b/arch/powerpc/kernel/vdso64/gettimeofday.S
146 @@ -92,7 +92,7 @@ V_FUNCTION_BEGIN(__kernel_clock_gettime)
147 * At this point, r4,r5 contain our sec/nsec values.
148 */
149
150 - lwa r6,WTOM_CLOCK_SEC(r3)
151 + ld r6,WTOM_CLOCK_SEC(r3)
152 lwa r9,WTOM_CLOCK_NSEC(r3)
153
154 /* We now have our result in r6,r9. We create a fake dependency
155 @@ -125,7 +125,7 @@ V_FUNCTION_BEGIN(__kernel_clock_gettime)
156 bne cr6,75f
157
158 /* CLOCK_MONOTONIC_COARSE */
159 - lwa r6,WTOM_CLOCK_SEC(r3)
160 + ld r6,WTOM_CLOCK_SEC(r3)
161 lwa r9,WTOM_CLOCK_NSEC(r3)
162
163 /* check if counter has updated */
164 diff --git a/arch/x86/include/asm/unwind.h b/arch/x86/include/asm/unwind.h
165 index 1f86e1b0a5cd..499578f7e6d7 100644
166 --- a/arch/x86/include/asm/unwind.h
167 +++ b/arch/x86/include/asm/unwind.h
168 @@ -23,6 +23,12 @@ struct unwind_state {
169 #elif defined(CONFIG_UNWINDER_FRAME_POINTER)
170 bool got_irq;
171 unsigned long *bp, *orig_sp, ip;
172 + /*
173 + * If non-NULL: The current frame is incomplete and doesn't contain a
174 + * valid BP. When looking for the next frame, use this instead of the
175 + * non-existent saved BP.
176 + */
177 + unsigned long *next_bp;
178 struct pt_regs *regs;
179 #else
180 unsigned long *sp;
181 diff --git a/arch/x86/kernel/unwind_frame.c b/arch/x86/kernel/unwind_frame.c
182 index 3dc26f95d46e..9b9fd4826e7a 100644
183 --- a/arch/x86/kernel/unwind_frame.c
184 +++ b/arch/x86/kernel/unwind_frame.c
185 @@ -320,10 +320,14 @@ bool unwind_next_frame(struct unwind_state *state)
186 }
187
188 /* Get the next frame pointer: */
189 - if (state->regs)
190 + if (state->next_bp) {
191 + next_bp = state->next_bp;
192 + state->next_bp = NULL;
193 + } else if (state->regs) {
194 next_bp = (unsigned long *)state->regs->bp;
195 - else
196 + } else {
197 next_bp = (unsigned long *)READ_ONCE_TASK_STACK(state->task, *state->bp);
198 + }
199
200 /* Move to the next frame if it's safe: */
201 if (!update_stack_state(state, next_bp))
202 @@ -398,6 +402,21 @@ void __unwind_start(struct unwind_state *state, struct task_struct *task,
203
204 bp = get_frame_pointer(task, regs);
205
206 + /*
207 + * If we crash with IP==0, the last successfully executed instruction
208 + * was probably an indirect function call with a NULL function pointer.
209 + * That means that SP points into the middle of an incomplete frame:
210 + * *SP is a return pointer, and *(SP-sizeof(unsigned long)) is where we
211 + * would have written a frame pointer if we hadn't crashed.
212 + * Pretend that the frame is complete and that BP points to it, but save
213 + * the real BP so that we can use it when looking for the next frame.
214 + */
215 + if (regs && regs->ip == 0 &&
216 + (unsigned long *)kernel_stack_pointer(regs) >= first_frame) {
217 + state->next_bp = bp;
218 + bp = ((unsigned long *)kernel_stack_pointer(regs)) - 1;
219 + }
220 +
221 /* Initialize stack info and make sure the frame data is accessible: */
222 get_stack_info(bp, state->task, &state->stack_info,
223 &state->stack_mask);
224 @@ -410,7 +429,7 @@ void __unwind_start(struct unwind_state *state, struct task_struct *task,
225 */
226 while (!unwind_done(state) &&
227 (!on_stack(&state->stack_info, first_frame, sizeof(long)) ||
228 - state->bp < first_frame))
229 + (state->next_bp == NULL && state->bp < first_frame)))
230 unwind_next_frame(state);
231 }
232 EXPORT_SYMBOL_GPL(__unwind_start);
233 diff --git a/arch/x86/kernel/unwind_orc.c b/arch/x86/kernel/unwind_orc.c
234 index 26038eacf74a..89be1be1790c 100644
235 --- a/arch/x86/kernel/unwind_orc.c
236 +++ b/arch/x86/kernel/unwind_orc.c
237 @@ -113,6 +113,20 @@ static struct orc_entry *orc_ftrace_find(unsigned long ip)
238 }
239 #endif
240
241 +/*
242 + * If we crash with IP==0, the last successfully executed instruction
243 + * was probably an indirect function call with a NULL function pointer,
244 + * and we don't have unwind information for NULL.
245 + * This hardcoded ORC entry for IP==0 allows us to unwind from a NULL function
246 + * pointer into its parent and then continue normally from there.
247 + */
248 +static struct orc_entry null_orc_entry = {
249 + .sp_offset = sizeof(long),
250 + .sp_reg = ORC_REG_SP,
251 + .bp_reg = ORC_REG_UNDEFINED,
252 + .type = ORC_TYPE_CALL
253 +};
254 +
255 static struct orc_entry *orc_find(unsigned long ip)
256 {
257 static struct orc_entry *orc;
258 @@ -120,6 +134,9 @@ static struct orc_entry *orc_find(unsigned long ip)
259 if (!orc_init)
260 return NULL;
261
262 + if (ip == 0)
263 + return &null_orc_entry;
264 +
265 /* For non-init vmlinux addresses, use the fast lookup table: */
266 if (ip >= LOOKUP_START_IP && ip < LOOKUP_STOP_IP) {
267 unsigned int idx, start, stop;
268 diff --git a/drivers/block/loop.c b/drivers/block/loop.c
269 index cf5538942834..2faefdd6f420 100644
270 --- a/drivers/block/loop.c
271 +++ b/drivers/block/loop.c
272 @@ -656,7 +656,7 @@ static int loop_validate_file(struct file *file, struct block_device *bdev)
273 return -EBADF;
274
275 l = f->f_mapping->host->i_bdev->bd_disk->private_data;
276 - if (l->lo_state == Lo_unbound) {
277 + if (l->lo_state != Lo_bound) {
278 return -EINVAL;
279 }
280 f = l->lo_backing_file;
281 diff --git a/drivers/bluetooth/h4_recv.h b/drivers/bluetooth/h4_recv.h
282 index b432651f8236..307d82166f48 100644
283 --- a/drivers/bluetooth/h4_recv.h
284 +++ b/drivers/bluetooth/h4_recv.h
285 @@ -60,6 +60,10 @@ static inline struct sk_buff *h4_recv_buf(struct hci_dev *hdev,
286 const struct h4_recv_pkt *pkts,
287 int pkts_count)
288 {
289 + /* Check for error from previous call */
290 + if (IS_ERR(skb))
291 + skb = NULL;
292 +
293 while (count) {
294 int i, len;
295
296 diff --git a/drivers/bluetooth/hci_h4.c b/drivers/bluetooth/hci_h4.c
297 index fb97a3bf069b..5d97d77627c1 100644
298 --- a/drivers/bluetooth/hci_h4.c
299 +++ b/drivers/bluetooth/hci_h4.c
300 @@ -174,6 +174,10 @@ struct sk_buff *h4_recv_buf(struct hci_dev *hdev, struct sk_buff *skb,
301 struct hci_uart *hu = hci_get_drvdata(hdev);
302 u8 alignment = hu->alignment ? hu->alignment : 1;
303
304 + /* Check for error from previous call */
305 + if (IS_ERR(skb))
306 + skb = NULL;
307 +
308 while (count) {
309 int i, len;
310
311 diff --git a/drivers/bluetooth/hci_ldisc.c b/drivers/bluetooth/hci_ldisc.c
312 index fbf7b4df23ab..9562e72c1ae5 100644
313 --- a/drivers/bluetooth/hci_ldisc.c
314 +++ b/drivers/bluetooth/hci_ldisc.c
315 @@ -207,11 +207,11 @@ void hci_uart_init_work(struct work_struct *work)
316 err = hci_register_dev(hu->hdev);
317 if (err < 0) {
318 BT_ERR("Can't register HCI device");
319 + clear_bit(HCI_UART_PROTO_READY, &hu->flags);
320 + hu->proto->close(hu);
321 hdev = hu->hdev;
322 hu->hdev = NULL;
323 hci_free_dev(hdev);
324 - clear_bit(HCI_UART_PROTO_READY, &hu->flags);
325 - hu->proto->close(hu);
326 return;
327 }
328
329 @@ -616,6 +616,7 @@ static void hci_uart_tty_receive(struct tty_struct *tty, const u8 *data,
330 static int hci_uart_register_dev(struct hci_uart *hu)
331 {
332 struct hci_dev *hdev;
333 + int err;
334
335 BT_DBG("");
336
337 @@ -659,11 +660,22 @@ static int hci_uart_register_dev(struct hci_uart *hu)
338 else
339 hdev->dev_type = HCI_PRIMARY;
340
341 + /* Only call open() for the protocol after hdev is fully initialized as
342 + * open() (or a timer/workqueue it starts) may attempt to reference it.
343 + */
344 + err = hu->proto->open(hu);
345 + if (err) {
346 + hu->hdev = NULL;
347 + hci_free_dev(hdev);
348 + return err;
349 + }
350 +
351 if (test_bit(HCI_UART_INIT_PENDING, &hu->hdev_flags))
352 return 0;
353
354 if (hci_register_dev(hdev) < 0) {
355 BT_ERR("Can't register HCI device");
356 + hu->proto->close(hu);
357 hu->hdev = NULL;
358 hci_free_dev(hdev);
359 return -ENODEV;
360 @@ -683,20 +695,14 @@ static int hci_uart_set_proto(struct hci_uart *hu, int id)
361 if (!p)
362 return -EPROTONOSUPPORT;
363
364 - err = p->open(hu);
365 - if (err)
366 - return err;
367 -
368 hu->proto = p;
369 - set_bit(HCI_UART_PROTO_READY, &hu->flags);
370
371 err = hci_uart_register_dev(hu);
372 if (err) {
373 - clear_bit(HCI_UART_PROTO_READY, &hu->flags);
374 - p->close(hu);
375 return err;
376 }
377
378 + set_bit(HCI_UART_PROTO_READY, &hu->flags);
379 return 0;
380 }
381
382 diff --git a/drivers/clocksource/timer-riscv.c b/drivers/clocksource/timer-riscv.c
383 index 431892200a08..ead71bfac689 100644
384 --- a/drivers/clocksource/timer-riscv.c
385 +++ b/drivers/clocksource/timer-riscv.c
386 @@ -58,7 +58,7 @@ static u64 riscv_sched_clock(void)
387 static DEFINE_PER_CPU(struct clocksource, riscv_clocksource) = {
388 .name = "riscv_clocksource",
389 .rating = 300,
390 - .mask = CLOCKSOURCE_MASK(BITS_PER_LONG),
391 + .mask = CLOCKSOURCE_MASK(64),
392 .flags = CLOCK_SOURCE_IS_CONTINUOUS,
393 .read = riscv_clocksource_rdtime,
394 };
395 @@ -103,8 +103,7 @@ static int __init riscv_timer_init_dt(struct device_node *n)
396 cs = per_cpu_ptr(&riscv_clocksource, cpuid);
397 clocksource_register_hz(cs, riscv_timebase);
398
399 - sched_clock_register(riscv_sched_clock,
400 - BITS_PER_LONG, riscv_timebase);
401 + sched_clock_register(riscv_sched_clock, 64, riscv_timebase);
402
403 error = cpuhp_setup_state(CPUHP_AP_RISCV_TIMER_STARTING,
404 "clockevents/riscv/timer:starting",
405 diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
406 index bacdaef77b6c..278dd55ff476 100644
407 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
408 +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
409 @@ -738,7 +738,7 @@ static int gmc_v9_0_allocate_vm_inv_eng(struct amdgpu_device *adev)
410 }
411
412 ring->vm_inv_eng = inv_eng - 1;
413 - change_bit(inv_eng - 1, (unsigned long *)(&vm_inv_engs[vmhub]));
414 + vm_inv_engs[vmhub] &= ~(1 << ring->vm_inv_eng);
415
416 dev_info(adev->dev, "ring %s uses VM inv eng %u on hub %u\n",
417 ring->name, ring->vm_inv_eng, ring->funcs->vmhub);
418 diff --git a/drivers/gpu/drm/vkms/vkms_crtc.c b/drivers/gpu/drm/vkms/vkms_crtc.c
419 index eb56ee893761..e747a7d16739 100644
420 --- a/drivers/gpu/drm/vkms/vkms_crtc.c
421 +++ b/drivers/gpu/drm/vkms/vkms_crtc.c
422 @@ -98,6 +98,7 @@ static void vkms_atomic_crtc_reset(struct drm_crtc *crtc)
423 vkms_state = kzalloc(sizeof(*vkms_state), GFP_KERNEL);
424 if (!vkms_state)
425 return;
426 + INIT_WORK(&vkms_state->crc_work, vkms_crc_work_handle);
427
428 crtc->state = &vkms_state->base;
429 crtc->state->crtc = crtc;
430 diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c b/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c
431 index b913a56f3426..2a9112515f46 100644
432 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c
433 +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c
434 @@ -564,11 +564,9 @@ static int vmw_fb_set_par(struct fb_info *info)
435 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
436 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC)
437 };
438 - struct drm_display_mode *old_mode;
439 struct drm_display_mode *mode;
440 int ret;
441
442 - old_mode = par->set_mode;
443 mode = drm_mode_duplicate(vmw_priv->dev, &new_mode);
444 if (!mode) {
445 DRM_ERROR("Could not create new fb mode.\n");
446 @@ -579,11 +577,7 @@ static int vmw_fb_set_par(struct fb_info *info)
447 mode->vdisplay = var->yres;
448 vmw_guess_mode_timing(mode);
449
450 - if (old_mode && drm_mode_equal(old_mode, mode)) {
451 - drm_mode_destroy(vmw_priv->dev, mode);
452 - mode = old_mode;
453 - old_mode = NULL;
454 - } else if (!vmw_kms_validate_mode_vram(vmw_priv,
455 + if (!vmw_kms_validate_mode_vram(vmw_priv,
456 mode->hdisplay *
457 DIV_ROUND_UP(var->bits_per_pixel, 8),
458 mode->vdisplay)) {
459 @@ -620,8 +614,8 @@ static int vmw_fb_set_par(struct fb_info *info)
460 schedule_delayed_work(&par->local_work, 0);
461
462 out_unlock:
463 - if (old_mode)
464 - drm_mode_destroy(vmw_priv->dev, old_mode);
465 + if (par->set_mode)
466 + drm_mode_destroy(vmw_priv->dev, par->set_mode);
467 par->set_mode = mode;
468
469 mutex_unlock(&par->bo_mutex);
470 diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_gmrid_manager.c b/drivers/gpu/drm/vmwgfx/vmwgfx_gmrid_manager.c
471 index b93c558dd86e..7da752ca1c34 100644
472 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_gmrid_manager.c
473 +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_gmrid_manager.c
474 @@ -57,7 +57,7 @@ static int vmw_gmrid_man_get_node(struct ttm_mem_type_manager *man,
475
476 id = ida_alloc_max(&gman->gmr_ida, gman->max_gmr_ids - 1, GFP_KERNEL);
477 if (id < 0)
478 - return id;
479 + return (id != -ENOMEM ? 0 : id);
480
481 spin_lock(&gman->lock);
482
483 diff --git a/drivers/infiniband/core/cma.c b/drivers/infiniband/core/cma.c
484 index 84f077b2b90a..81bded0d37d1 100644
485 --- a/drivers/infiniband/core/cma.c
486 +++ b/drivers/infiniband/core/cma.c
487 @@ -2966,13 +2966,22 @@ static void addr_handler(int status, struct sockaddr *src_addr,
488 {
489 struct rdma_id_private *id_priv = context;
490 struct rdma_cm_event event = {};
491 + struct sockaddr *addr;
492 + struct sockaddr_storage old_addr;
493
494 mutex_lock(&id_priv->handler_mutex);
495 if (!cma_comp_exch(id_priv, RDMA_CM_ADDR_QUERY,
496 RDMA_CM_ADDR_RESOLVED))
497 goto out;
498
499 - memcpy(cma_src_addr(id_priv), src_addr, rdma_addr_size(src_addr));
500 + /*
501 + * Store the previous src address, so that if we fail to acquire
502 + * matching rdma device, old address can be restored back, which helps
503 + * to cancel the cma listen operation correctly.
504 + */
505 + addr = cma_src_addr(id_priv);
506 + memcpy(&old_addr, addr, rdma_addr_size(addr));
507 + memcpy(addr, src_addr, rdma_addr_size(src_addr));
508 if (!status && !id_priv->cma_dev) {
509 status = cma_acquire_dev_by_src_ip(id_priv);
510 if (status)
511 @@ -2983,6 +2992,8 @@ static void addr_handler(int status, struct sockaddr *src_addr,
512 }
513
514 if (status) {
515 + memcpy(addr, &old_addr,
516 + rdma_addr_size((struct sockaddr *)&old_addr));
517 if (!cma_comp_exch(id_priv, RDMA_CM_ADDR_RESOLVED,
518 RDMA_CM_ADDR_BOUND))
519 goto out;
520 diff --git a/drivers/iommu/amd_iommu.c b/drivers/iommu/amd_iommu.c
521 index 2a7b78bb98b4..e628ef23418f 100644
522 --- a/drivers/iommu/amd_iommu.c
523 +++ b/drivers/iommu/amd_iommu.c
524 @@ -2605,7 +2605,12 @@ static int map_sg(struct device *dev, struct scatterlist *sglist,
525
526 /* Everything is mapped - write the right values into s->dma_address */
527 for_each_sg(sglist, s, nelems, i) {
528 - s->dma_address += address + s->offset;
529 + /*
530 + * Add in the remaining piece of the scatter-gather offset that
531 + * was masked out when we were determining the physical address
532 + * via (sg_phys(s) & PAGE_MASK) earlier.
533 + */
534 + s->dma_address += address + (s->offset & ~PAGE_MASK);
535 s->dma_length = s->length;
536 }
537
538 diff --git a/drivers/iommu/iova.c b/drivers/iommu/iova.c
539 index f8d3ba247523..2de8122e218f 100644
540 --- a/drivers/iommu/iova.c
541 +++ b/drivers/iommu/iova.c
542 @@ -207,8 +207,10 @@ static int __alloc_and_insert_iova_range(struct iova_domain *iovad,
543 curr_iova = rb_entry(curr, struct iova, node);
544 } while (curr && new_pfn <= curr_iova->pfn_hi);
545
546 - if (limit_pfn < size || new_pfn < iovad->start_pfn)
547 + if (limit_pfn < size || new_pfn < iovad->start_pfn) {
548 + iovad->max32_alloc_size = size;
549 goto iova32_full;
550 + }
551
552 /* pfn_lo will point to size aligned address if size_aligned is set */
553 new->pfn_lo = new_pfn;
554 @@ -222,7 +224,6 @@ static int __alloc_and_insert_iova_range(struct iova_domain *iovad,
555 return 0;
556
557 iova32_full:
558 - iovad->max32_alloc_size = size;
559 spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags);
560 return -ENOMEM;
561 }
562 diff --git a/drivers/irqchip/irq-gic-v3-its.c b/drivers/irqchip/irq-gic-v3-its.c
563 index f867d41b0aa1..93e32a59640c 100644
564 --- a/drivers/irqchip/irq-gic-v3-its.c
565 +++ b/drivers/irqchip/irq-gic-v3-its.c
566 @@ -1482,7 +1482,7 @@ static int lpi_range_cmp(void *priv, struct list_head *a, struct list_head *b)
567 ra = container_of(a, struct lpi_range, entry);
568 rb = container_of(b, struct lpi_range, entry);
569
570 - return rb->base_id - ra->base_id;
571 + return ra->base_id - rb->base_id;
572 }
573
574 static void merge_lpi_ranges(void)
575 diff --git a/drivers/media/usb/uvc/uvc_ctrl.c b/drivers/media/usb/uvc/uvc_ctrl.c
576 index d45415cbe6e7..14cff91b7aea 100644
577 --- a/drivers/media/usb/uvc/uvc_ctrl.c
578 +++ b/drivers/media/usb/uvc/uvc_ctrl.c
579 @@ -1212,7 +1212,7 @@ static void uvc_ctrl_fill_event(struct uvc_video_chain *chain,
580
581 __uvc_query_v4l2_ctrl(chain, ctrl, mapping, &v4l2_ctrl);
582
583 - memset(ev->reserved, 0, sizeof(ev->reserved));
584 + memset(ev, 0, sizeof(*ev));
585 ev->type = V4L2_EVENT_CTRL;
586 ev->id = v4l2_ctrl.id;
587 ev->u.ctrl.value = value;
588 diff --git a/drivers/media/v4l2-core/v4l2-ctrls.c b/drivers/media/v4l2-core/v4l2-ctrls.c
589 index 5e3806feb5d7..8a82427c4d54 100644
590 --- a/drivers/media/v4l2-core/v4l2-ctrls.c
591 +++ b/drivers/media/v4l2-core/v4l2-ctrls.c
592 @@ -1387,7 +1387,7 @@ static u32 user_flags(const struct v4l2_ctrl *ctrl)
593
594 static void fill_event(struct v4l2_event *ev, struct v4l2_ctrl *ctrl, u32 changes)
595 {
596 - memset(ev->reserved, 0, sizeof(ev->reserved));
597 + memset(ev, 0, sizeof(*ev));
598 ev->type = V4L2_EVENT_CTRL;
599 ev->id = ctrl->id;
600 ev->u.ctrl.changes = changes;
601 diff --git a/drivers/mmc/host/alcor.c b/drivers/mmc/host/alcor.c
602 index c712b7deb3a9..82a97866e0cf 100644
603 --- a/drivers/mmc/host/alcor.c
604 +++ b/drivers/mmc/host/alcor.c
605 @@ -1044,14 +1044,27 @@ static void alcor_init_mmc(struct alcor_sdmmc_host *host)
606 mmc->caps2 = MMC_CAP2_NO_SDIO;
607 mmc->ops = &alcor_sdc_ops;
608
609 - /* Hardware cannot do scatter lists */
610 + /* The hardware does DMA data transfer of 4096 bytes to/from a single
611 + * buffer address. Scatterlists are not supported, but upon DMA
612 + * completion (signalled via IRQ), the original vendor driver does
613 + * then immediately set up another DMA transfer of the next 4096
614 + * bytes.
615 + *
616 + * This means that we need to handle the I/O in 4096 byte chunks.
617 + * Lacking a way to limit the sglist entries to 4096 bytes, we instead
618 + * impose that only one segment is provided, with maximum size 4096,
619 + * which also happens to be the minimum size. This means that the
620 + * single-entry sglist handled by this driver can be handed directly
621 + * to the hardware, nice and simple.
622 + *
623 + * Unfortunately though, that means we only do 4096 bytes I/O per
624 + * MMC command. A future improvement would be to make the driver
625 + * accept sg lists and entries of any size, and simply iterate
626 + * through them 4096 bytes at a time.
627 + */
628 mmc->max_segs = AU6601_MAX_DMA_SEGMENTS;
629 mmc->max_seg_size = AU6601_MAX_DMA_BLOCK_SIZE;
630 -
631 - mmc->max_blk_size = mmc->max_seg_size;
632 - mmc->max_blk_count = mmc->max_segs;
633 -
634 - mmc->max_req_size = mmc->max_seg_size * mmc->max_segs;
635 + mmc->max_req_size = mmc->max_seg_size;
636 }
637
638 static int alcor_pci_sdmmc_drv_probe(struct platform_device *pdev)
639 diff --git a/drivers/mmc/host/mxcmmc.c b/drivers/mmc/host/mxcmmc.c
640 index 4d17032d15ee..7b530e5a86da 100644
641 --- a/drivers/mmc/host/mxcmmc.c
642 +++ b/drivers/mmc/host/mxcmmc.c
643 @@ -292,11 +292,8 @@ static void mxcmci_swap_buffers(struct mmc_data *data)
644 struct scatterlist *sg;
645 int i;
646
647 - for_each_sg(data->sg, sg, data->sg_len, i) {
648 - void *buf = kmap_atomic(sg_page(sg) + sg->offset);
649 - buffer_swap32(buf, sg->length);
650 - kunmap_atomic(buf);
651 - }
652 + for_each_sg(data->sg, sg, data->sg_len, i)
653 + buffer_swap32(sg_virt(sg), sg->length);
654 }
655 #else
656 static inline void mxcmci_swap_buffers(struct mmc_data *data) {}
657 @@ -613,7 +610,6 @@ static int mxcmci_transfer_data(struct mxcmci_host *host)
658 {
659 struct mmc_data *data = host->req->data;
660 struct scatterlist *sg;
661 - void *buf;
662 int stat, i;
663
664 host->data = data;
665 @@ -621,18 +617,14 @@ static int mxcmci_transfer_data(struct mxcmci_host *host)
666
667 if (data->flags & MMC_DATA_READ) {
668 for_each_sg(data->sg, sg, data->sg_len, i) {
669 - buf = kmap_atomic(sg_page(sg) + sg->offset);
670 - stat = mxcmci_pull(host, buf, sg->length);
671 - kunmap(buf);
672 + stat = mxcmci_pull(host, sg_virt(sg), sg->length);
673 if (stat)
674 return stat;
675 host->datasize += sg->length;
676 }
677 } else {
678 for_each_sg(data->sg, sg, data->sg_len, i) {
679 - buf = kmap_atomic(sg_page(sg) + sg->offset);
680 - stat = mxcmci_push(host, buf, sg->length);
681 - kunmap(buf);
682 + stat = mxcmci_push(host, sg_virt(sg), sg->length);
683 if (stat)
684 return stat;
685 host->datasize += sg->length;
686 diff --git a/drivers/mmc/host/pxamci.c b/drivers/mmc/host/pxamci.c
687 index 8779bbaa6b69..194a81888792 100644
688 --- a/drivers/mmc/host/pxamci.c
689 +++ b/drivers/mmc/host/pxamci.c
690 @@ -162,7 +162,7 @@ static void pxamci_dma_irq(void *param);
691 static void pxamci_setup_data(struct pxamci_host *host, struct mmc_data *data)
692 {
693 struct dma_async_tx_descriptor *tx;
694 - enum dma_data_direction direction;
695 + enum dma_transfer_direction direction;
696 struct dma_slave_config config;
697 struct dma_chan *chan;
698 unsigned int nob = data->blocks;
699 diff --git a/drivers/mmc/host/renesas_sdhi_core.c b/drivers/mmc/host/renesas_sdhi_core.c
700 index 7e2a75c4f36f..d9be22b310e6 100644
701 --- a/drivers/mmc/host/renesas_sdhi_core.c
702 +++ b/drivers/mmc/host/renesas_sdhi_core.c
703 @@ -634,6 +634,7 @@ int renesas_sdhi_probe(struct platform_device *pdev,
704 struct renesas_sdhi *priv;
705 struct resource *res;
706 int irq, ret, i;
707 + u16 ver;
708
709 of_data = of_device_get_match_data(&pdev->dev);
710
711 @@ -766,12 +767,17 @@ int renesas_sdhi_probe(struct platform_device *pdev,
712 if (ret)
713 goto efree;
714
715 + ver = sd_ctrl_read16(host, CTL_VERSION);
716 + /* GEN2_SDR104 is first known SDHI to use 32bit block count */
717 + if (ver < SDHI_VER_GEN2_SDR104 && mmc_data->max_blk_count > U16_MAX)
718 + mmc_data->max_blk_count = U16_MAX;
719 +
720 ret = tmio_mmc_host_probe(host);
721 if (ret < 0)
722 goto edisclk;
723
724 /* One Gen2 SDHI incarnation does NOT have a CBSY bit */
725 - if (sd_ctrl_read16(host, CTL_VERSION) == SDHI_VER_GEN2_SDR50)
726 + if (ver == SDHI_VER_GEN2_SDR50)
727 mmc_data->flags &= ~TMIO_MMC_HAVE_CBSY;
728
729 /* Enable tuning iff we have an SCC and a supported mode */
730 diff --git a/drivers/net/ethernet/mellanox/mlx5/core/qp.c b/drivers/net/ethernet/mellanox/mlx5/core/qp.c
731 index 370ca94b6775..c7c2920c05c4 100644
732 --- a/drivers/net/ethernet/mellanox/mlx5/core/qp.c
733 +++ b/drivers/net/ethernet/mellanox/mlx5/core/qp.c
734 @@ -40,6 +40,9 @@
735 #include "mlx5_core.h"
736 #include "lib/eq.h"
737
738 +static int mlx5_core_drain_dct(struct mlx5_core_dev *dev,
739 + struct mlx5_core_dct *dct);
740 +
741 static struct mlx5_core_rsc_common *
742 mlx5_get_rsc(struct mlx5_qp_table *table, u32 rsn)
743 {
744 @@ -227,13 +230,42 @@ static void destroy_resource_common(struct mlx5_core_dev *dev,
745 wait_for_completion(&qp->common.free);
746 }
747
748 +static int _mlx5_core_destroy_dct(struct mlx5_core_dev *dev,
749 + struct mlx5_core_dct *dct, bool need_cleanup)
750 +{
751 + u32 out[MLX5_ST_SZ_DW(destroy_dct_out)] = {0};
752 + u32 in[MLX5_ST_SZ_DW(destroy_dct_in)] = {0};
753 + struct mlx5_core_qp *qp = &dct->mqp;
754 + int err;
755 +
756 + err = mlx5_core_drain_dct(dev, dct);
757 + if (err) {
758 + if (dev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR) {
759 + goto destroy;
760 + } else {
761 + mlx5_core_warn(
762 + dev, "failed drain DCT 0x%x with error 0x%x\n",
763 + qp->qpn, err);
764 + return err;
765 + }
766 + }
767 + wait_for_completion(&dct->drained);
768 +destroy:
769 + if (need_cleanup)
770 + destroy_resource_common(dev, &dct->mqp);
771 + MLX5_SET(destroy_dct_in, in, opcode, MLX5_CMD_OP_DESTROY_DCT);
772 + MLX5_SET(destroy_dct_in, in, dctn, qp->qpn);
773 + MLX5_SET(destroy_dct_in, in, uid, qp->uid);
774 + err = mlx5_cmd_exec(dev, (void *)&in, sizeof(in),
775 + (void *)&out, sizeof(out));
776 + return err;
777 +}
778 +
779 int mlx5_core_create_dct(struct mlx5_core_dev *dev,
780 struct mlx5_core_dct *dct,
781 u32 *in, int inlen)
782 {
783 u32 out[MLX5_ST_SZ_DW(create_dct_out)] = {0};
784 - u32 din[MLX5_ST_SZ_DW(destroy_dct_in)] = {0};
785 - u32 dout[MLX5_ST_SZ_DW(destroy_dct_out)] = {0};
786 struct mlx5_core_qp *qp = &dct->mqp;
787 int err;
788
789 @@ -254,11 +286,7 @@ int mlx5_core_create_dct(struct mlx5_core_dev *dev,
790
791 return 0;
792 err_cmd:
793 - MLX5_SET(destroy_dct_in, din, opcode, MLX5_CMD_OP_DESTROY_DCT);
794 - MLX5_SET(destroy_dct_in, din, dctn, qp->qpn);
795 - MLX5_SET(destroy_dct_in, din, uid, qp->uid);
796 - mlx5_cmd_exec(dev, (void *)&in, sizeof(din),
797 - (void *)&out, sizeof(dout));
798 + _mlx5_core_destroy_dct(dev, dct, false);
799 return err;
800 }
801 EXPORT_SYMBOL_GPL(mlx5_core_create_dct);
802 @@ -323,29 +351,7 @@ static int mlx5_core_drain_dct(struct mlx5_core_dev *dev,
803 int mlx5_core_destroy_dct(struct mlx5_core_dev *dev,
804 struct mlx5_core_dct *dct)
805 {
806 - u32 out[MLX5_ST_SZ_DW(destroy_dct_out)] = {0};
807 - u32 in[MLX5_ST_SZ_DW(destroy_dct_in)] = {0};
808 - struct mlx5_core_qp *qp = &dct->mqp;
809 - int err;
810 -
811 - err = mlx5_core_drain_dct(dev, dct);
812 - if (err) {
813 - if (dev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR) {
814 - goto destroy;
815 - } else {
816 - mlx5_core_warn(dev, "failed drain DCT 0x%x with error 0x%x\n", qp->qpn, err);
817 - return err;
818 - }
819 - }
820 - wait_for_completion(&dct->drained);
821 -destroy:
822 - destroy_resource_common(dev, &dct->mqp);
823 - MLX5_SET(destroy_dct_in, in, opcode, MLX5_CMD_OP_DESTROY_DCT);
824 - MLX5_SET(destroy_dct_in, in, dctn, qp->qpn);
825 - MLX5_SET(destroy_dct_in, in, uid, qp->uid);
826 - err = mlx5_cmd_exec(dev, (void *)&in, sizeof(in),
827 - (void *)&out, sizeof(out));
828 - return err;
829 + return _mlx5_core_destroy_dct(dev, dct, true);
830 }
831 EXPORT_SYMBOL_GPL(mlx5_core_destroy_dct);
832
833 diff --git a/drivers/scsi/ibmvscsi/ibmvscsi.c b/drivers/scsi/ibmvscsi/ibmvscsi.c
834 index 1135e74646e2..8cec5230fe31 100644
835 --- a/drivers/scsi/ibmvscsi/ibmvscsi.c
836 +++ b/drivers/scsi/ibmvscsi/ibmvscsi.c
837 @@ -96,6 +96,7 @@ static int client_reserve = 1;
838 static char partition_name[96] = "UNKNOWN";
839 static unsigned int partition_number = -1;
840 static LIST_HEAD(ibmvscsi_head);
841 +static DEFINE_SPINLOCK(ibmvscsi_driver_lock);
842
843 static struct scsi_transport_template *ibmvscsi_transport_template;
844
845 @@ -2270,7 +2271,9 @@ static int ibmvscsi_probe(struct vio_dev *vdev, const struct vio_device_id *id)
846 }
847
848 dev_set_drvdata(&vdev->dev, hostdata);
849 + spin_lock(&ibmvscsi_driver_lock);
850 list_add_tail(&hostdata->host_list, &ibmvscsi_head);
851 + spin_unlock(&ibmvscsi_driver_lock);
852 return 0;
853
854 add_srp_port_failed:
855 @@ -2292,15 +2295,27 @@ static int ibmvscsi_probe(struct vio_dev *vdev, const struct vio_device_id *id)
856 static int ibmvscsi_remove(struct vio_dev *vdev)
857 {
858 struct ibmvscsi_host_data *hostdata = dev_get_drvdata(&vdev->dev);
859 - list_del(&hostdata->host_list);
860 - unmap_persist_bufs(hostdata);
861 + unsigned long flags;
862 +
863 + srp_remove_host(hostdata->host);
864 + scsi_remove_host(hostdata->host);
865 +
866 + purge_requests(hostdata, DID_ERROR);
867 +
868 + spin_lock_irqsave(hostdata->host->host_lock, flags);
869 release_event_pool(&hostdata->pool, hostdata);
870 + spin_unlock_irqrestore(hostdata->host->host_lock, flags);
871 +
872 ibmvscsi_release_crq_queue(&hostdata->queue, hostdata,
873 max_events);
874
875 kthread_stop(hostdata->work_thread);
876 - srp_remove_host(hostdata->host);
877 - scsi_remove_host(hostdata->host);
878 + unmap_persist_bufs(hostdata);
879 +
880 + spin_lock(&ibmvscsi_driver_lock);
881 + list_del(&hostdata->host_list);
882 + spin_unlock(&ibmvscsi_driver_lock);
883 +
884 scsi_host_put(hostdata->host);
885
886 return 0;
887 diff --git a/drivers/scsi/qla2xxx/qla_init.c b/drivers/scsi/qla2xxx/qla_init.c
888 index f44e640229e7..7f8946844a5e 100644
889 --- a/drivers/scsi/qla2xxx/qla_init.c
890 +++ b/drivers/scsi/qla2xxx/qla_init.c
891 @@ -4968,6 +4968,13 @@ qla2x00_configure_local_loop(scsi_qla_host_t *vha)
892 (area != vha->d_id.b.area || domain != vha->d_id.b.domain))
893 continue;
894
895 + /* Bypass if not same domain and area of adapter. */
896 + if (area && domain && ((area != vha->d_id.b.area) ||
897 + (domain != vha->d_id.b.domain)) &&
898 + (ha->current_topology == ISP_CFG_NL))
899 + continue;
900 +
901 +
902 /* Bypass invalid local loop ID. */
903 if (loop_id > LAST_LOCAL_LOOP_ID)
904 continue;
905 diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
906 index a6828391d6b3..5a6e8e12701a 100644
907 --- a/drivers/scsi/scsi_lib.c
908 +++ b/drivers/scsi/scsi_lib.c
909 @@ -2598,8 +2598,10 @@ void scsi_device_resume(struct scsi_device *sdev)
910 * device deleted during suspend)
911 */
912 mutex_lock(&sdev->state_mutex);
913 - sdev->quiesced_by = NULL;
914 - blk_clear_pm_only(sdev->request_queue);
915 + if (sdev->quiesced_by) {
916 + sdev->quiesced_by = NULL;
917 + blk_clear_pm_only(sdev->request_queue);
918 + }
919 if (sdev->sdev_state == SDEV_QUIESCE)
920 scsi_device_set_state(sdev, SDEV_RUNNING);
921 mutex_unlock(&sdev->state_mutex);
922 diff --git a/fs/aio.c b/fs/aio.c
923 index 528d03680526..3d9669d011b9 100644
924 --- a/fs/aio.c
925 +++ b/fs/aio.c
926 @@ -167,9 +167,13 @@ struct kioctx {
927 unsigned id;
928 };
929
930 +/*
931 + * First field must be the file pointer in all the
932 + * iocb unions! See also 'struct kiocb' in <linux/fs.h>
933 + */
934 struct fsync_iocb {
935 - struct work_struct work;
936 struct file *file;
937 + struct work_struct work;
938 bool datasync;
939 };
940
941 @@ -183,8 +187,15 @@ struct poll_iocb {
942 struct work_struct work;
943 };
944
945 +/*
946 + * NOTE! Each of the iocb union members has the file pointer
947 + * as the first entry in their struct definition. So you can
948 + * access the file pointer through any of the sub-structs,
949 + * or directly as just 'ki_filp' in this struct.
950 + */
951 struct aio_kiocb {
952 union {
953 + struct file *ki_filp;
954 struct kiocb rw;
955 struct fsync_iocb fsync;
956 struct poll_iocb poll;
957 @@ -1060,6 +1071,8 @@ static inline void iocb_put(struct aio_kiocb *iocb)
958 {
959 if (refcount_read(&iocb->ki_refcnt) == 0 ||
960 refcount_dec_and_test(&iocb->ki_refcnt)) {
961 + if (iocb->ki_filp)
962 + fput(iocb->ki_filp);
963 percpu_ref_put(&iocb->ki_ctx->reqs);
964 kmem_cache_free(kiocb_cachep, iocb);
965 }
966 @@ -1424,7 +1437,6 @@ static void aio_complete_rw(struct kiocb *kiocb, long res, long res2)
967 file_end_write(kiocb->ki_filp);
968 }
969
970 - fput(kiocb->ki_filp);
971 aio_complete(iocb, res, res2);
972 }
973
974 @@ -1432,9 +1444,6 @@ static int aio_prep_rw(struct kiocb *req, const struct iocb *iocb)
975 {
976 int ret;
977
978 - req->ki_filp = fget(iocb->aio_fildes);
979 - if (unlikely(!req->ki_filp))
980 - return -EBADF;
981 req->ki_complete = aio_complete_rw;
982 req->private = NULL;
983 req->ki_pos = iocb->aio_offset;
984 @@ -1451,7 +1460,7 @@ static int aio_prep_rw(struct kiocb *req, const struct iocb *iocb)
985 ret = ioprio_check_cap(iocb->aio_reqprio);
986 if (ret) {
987 pr_debug("aio ioprio check cap error: %d\n", ret);
988 - goto out_fput;
989 + return ret;
990 }
991
992 req->ki_ioprio = iocb->aio_reqprio;
993 @@ -1460,14 +1469,10 @@ static int aio_prep_rw(struct kiocb *req, const struct iocb *iocb)
994
995 ret = kiocb_set_rw_flags(req, iocb->aio_rw_flags);
996 if (unlikely(ret))
997 - goto out_fput;
998 + return ret;
999
1000 req->ki_flags &= ~IOCB_HIPRI; /* no one is going to poll for this I/O */
1001 return 0;
1002 -
1003 -out_fput:
1004 - fput(req->ki_filp);
1005 - return ret;
1006 }
1007
1008 static int aio_setup_rw(int rw, const struct iocb *iocb, struct iovec **iovec,
1009 @@ -1521,24 +1526,19 @@ static ssize_t aio_read(struct kiocb *req, const struct iocb *iocb,
1010 if (ret)
1011 return ret;
1012 file = req->ki_filp;
1013 -
1014 - ret = -EBADF;
1015 if (unlikely(!(file->f_mode & FMODE_READ)))
1016 - goto out_fput;
1017 + return -EBADF;
1018 ret = -EINVAL;
1019 if (unlikely(!file->f_op->read_iter))
1020 - goto out_fput;
1021 + return -EINVAL;
1022
1023 ret = aio_setup_rw(READ, iocb, &iovec, vectored, compat, &iter);
1024 if (ret)
1025 - goto out_fput;
1026 + return ret;
1027 ret = rw_verify_area(READ, file, &req->ki_pos, iov_iter_count(&iter));
1028 if (!ret)
1029 aio_rw_done(req, call_read_iter(file, req, &iter));
1030 kfree(iovec);
1031 -out_fput:
1032 - if (unlikely(ret))
1033 - fput(file);
1034 return ret;
1035 }
1036
1037 @@ -1555,16 +1555,14 @@ static ssize_t aio_write(struct kiocb *req, const struct iocb *iocb,
1038 return ret;
1039 file = req->ki_filp;
1040
1041 - ret = -EBADF;
1042 if (unlikely(!(file->f_mode & FMODE_WRITE)))
1043 - goto out_fput;
1044 - ret = -EINVAL;
1045 + return -EBADF;
1046 if (unlikely(!file->f_op->write_iter))
1047 - goto out_fput;
1048 + return -EINVAL;
1049
1050 ret = aio_setup_rw(WRITE, iocb, &iovec, vectored, compat, &iter);
1051 if (ret)
1052 - goto out_fput;
1053 + return ret;
1054 ret = rw_verify_area(WRITE, file, &req->ki_pos, iov_iter_count(&iter));
1055 if (!ret) {
1056 /*
1057 @@ -1582,9 +1580,6 @@ static ssize_t aio_write(struct kiocb *req, const struct iocb *iocb,
1058 aio_rw_done(req, call_write_iter(file, req, &iter));
1059 }
1060 kfree(iovec);
1061 -out_fput:
1062 - if (unlikely(ret))
1063 - fput(file);
1064 return ret;
1065 }
1066
1067 @@ -1594,7 +1589,6 @@ static void aio_fsync_work(struct work_struct *work)
1068 int ret;
1069
1070 ret = vfs_fsync(req->file, req->datasync);
1071 - fput(req->file);
1072 aio_complete(container_of(req, struct aio_kiocb, fsync), ret, 0);
1073 }
1074
1075 @@ -1605,13 +1599,8 @@ static int aio_fsync(struct fsync_iocb *req, const struct iocb *iocb,
1076 iocb->aio_rw_flags))
1077 return -EINVAL;
1078
1079 - req->file = fget(iocb->aio_fildes);
1080 - if (unlikely(!req->file))
1081 - return -EBADF;
1082 - if (unlikely(!req->file->f_op->fsync)) {
1083 - fput(req->file);
1084 + if (unlikely(!req->file->f_op->fsync))
1085 return -EINVAL;
1086 - }
1087
1088 req->datasync = datasync;
1089 INIT_WORK(&req->work, aio_fsync_work);
1090 @@ -1621,10 +1610,7 @@ static int aio_fsync(struct fsync_iocb *req, const struct iocb *iocb,
1091
1092 static inline void aio_poll_complete(struct aio_kiocb *iocb, __poll_t mask)
1093 {
1094 - struct file *file = iocb->poll.file;
1095 -
1096 aio_complete(iocb, mangle_poll(mask), 0);
1097 - fput(file);
1098 }
1099
1100 static void aio_poll_complete_work(struct work_struct *work)
1101 @@ -1749,9 +1735,6 @@ static ssize_t aio_poll(struct aio_kiocb *aiocb, const struct iocb *iocb)
1102
1103 INIT_WORK(&req->work, aio_poll_complete_work);
1104 req->events = demangle_poll(iocb->aio_buf) | EPOLLERR | EPOLLHUP;
1105 - req->file = fget(iocb->aio_fildes);
1106 - if (unlikely(!req->file))
1107 - return -EBADF;
1108
1109 req->head = NULL;
1110 req->woken = false;
1111 @@ -1794,10 +1777,8 @@ static ssize_t aio_poll(struct aio_kiocb *aiocb, const struct iocb *iocb)
1112 spin_unlock_irq(&ctx->ctx_lock);
1113
1114 out:
1115 - if (unlikely(apt.error)) {
1116 - fput(req->file);
1117 + if (unlikely(apt.error))
1118 return apt.error;
1119 - }
1120
1121 if (mask)
1122 aio_poll_complete(aiocb, mask);
1123 @@ -1835,6 +1816,11 @@ static int __io_submit_one(struct kioctx *ctx, const struct iocb *iocb,
1124 if (unlikely(!req))
1125 goto out_put_reqs_available;
1126
1127 + req->ki_filp = fget(iocb->aio_fildes);
1128 + ret = -EBADF;
1129 + if (unlikely(!req->ki_filp))
1130 + goto out_put_req;
1131 +
1132 if (iocb->aio_flags & IOCB_FLAG_RESFD) {
1133 /*
1134 * If the IOCB_FLAG_RESFD flag of aio_flags is set, get an
1135 diff --git a/fs/cifs/smb2pdu.c b/fs/cifs/smb2pdu.c
1136 index 77b3aaa39b35..104905732fbe 100644
1137 --- a/fs/cifs/smb2pdu.c
1138 +++ b/fs/cifs/smb2pdu.c
1139 @@ -1605,9 +1605,16 @@ SMB2_tcon(const unsigned int xid, struct cifs_ses *ses, const char *tree,
1140 iov[1].iov_base = unc_path;
1141 iov[1].iov_len = unc_path_len;
1142
1143 - /* 3.11 tcon req must be signed if not encrypted. See MS-SMB2 3.2.4.1.1 */
1144 + /*
1145 + * 3.11 tcon req must be signed if not encrypted. See MS-SMB2 3.2.4.1.1
1146 + * unless it is guest or anonymous user. See MS-SMB2 3.2.5.3.1
1147 + * (Samba servers don't always set the flag so also check if null user)
1148 + */
1149 if ((ses->server->dialect == SMB311_PROT_ID) &&
1150 - !smb3_encryption_required(tcon))
1151 + !smb3_encryption_required(tcon) &&
1152 + !(ses->session_flags &
1153 + (SMB2_SESSION_FLAG_IS_GUEST|SMB2_SESSION_FLAG_IS_NULL)) &&
1154 + ((ses->user_name != NULL) || (ses->sectype == Kerberos)))
1155 req->sync_hdr.Flags |= SMB2_FLAGS_SIGNED;
1156
1157 memset(&rqst, 0, sizeof(struct smb_rqst));
1158 diff --git a/fs/ext4/ext4_jbd2.h b/fs/ext4/ext4_jbd2.h
1159 index 15b6dd733780..df908ef79cce 100644
1160 --- a/fs/ext4/ext4_jbd2.h
1161 +++ b/fs/ext4/ext4_jbd2.h
1162 @@ -384,7 +384,7 @@ static inline void ext4_update_inode_fsync_trans(handle_t *handle,
1163 {
1164 struct ext4_inode_info *ei = EXT4_I(inode);
1165
1166 - if (ext4_handle_valid(handle)) {
1167 + if (ext4_handle_valid(handle) && !is_handle_aborted(handle)) {
1168 ei->i_sync_tid = handle->h_transaction->t_tid;
1169 if (datasync)
1170 ei->i_datasync_tid = handle->h_transaction->t_tid;
1171 diff --git a/fs/ext4/file.c b/fs/ext4/file.c
1172 index 69d65d49837b..98ec11f69cd4 100644
1173 --- a/fs/ext4/file.c
1174 +++ b/fs/ext4/file.c
1175 @@ -125,7 +125,7 @@ ext4_unaligned_aio(struct inode *inode, struct iov_iter *from, loff_t pos)
1176 struct super_block *sb = inode->i_sb;
1177 int blockmask = sb->s_blocksize - 1;
1178
1179 - if (pos >= i_size_read(inode))
1180 + if (pos >= ALIGN(i_size_read(inode), sb->s_blocksize))
1181 return 0;
1182
1183 if ((pos | iov_iter_alignment(from)) & blockmask)
1184 diff --git a/fs/ext4/indirect.c b/fs/ext4/indirect.c
1185 index bf7fa1507e81..9e96a0bd08d9 100644
1186 --- a/fs/ext4/indirect.c
1187 +++ b/fs/ext4/indirect.c
1188 @@ -1387,10 +1387,14 @@ end_range:
1189 partial->p + 1,
1190 partial2->p,
1191 (chain+n-1) - partial);
1192 - BUFFER_TRACE(partial->bh, "call brelse");
1193 - brelse(partial->bh);
1194 - BUFFER_TRACE(partial2->bh, "call brelse");
1195 - brelse(partial2->bh);
1196 + while (partial > chain) {
1197 + BUFFER_TRACE(partial->bh, "call brelse");
1198 + brelse(partial->bh);
1199 + }
1200 + while (partial2 > chain2) {
1201 + BUFFER_TRACE(partial2->bh, "call brelse");
1202 + brelse(partial2->bh);
1203 + }
1204 return 0;
1205 }
1206
1207 diff --git a/fs/f2fs/segment.c b/fs/f2fs/segment.c
1208 index 9b79056d705d..e1b1d390b329 100644
1209 --- a/fs/f2fs/segment.c
1210 +++ b/fs/f2fs/segment.c
1211 @@ -215,7 +215,8 @@ void f2fs_register_inmem_page(struct inode *inode, struct page *page)
1212 }
1213
1214 static int __revoke_inmem_pages(struct inode *inode,
1215 - struct list_head *head, bool drop, bool recover)
1216 + struct list_head *head, bool drop, bool recover,
1217 + bool trylock)
1218 {
1219 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
1220 struct inmem_pages *cur, *tmp;
1221 @@ -227,7 +228,16 @@ static int __revoke_inmem_pages(struct inode *inode,
1222 if (drop)
1223 trace_f2fs_commit_inmem_page(page, INMEM_DROP);
1224
1225 - lock_page(page);
1226 + if (trylock) {
1227 + /*
1228 + * to avoid deadlock in between page lock and
1229 + * inmem_lock.
1230 + */
1231 + if (!trylock_page(page))
1232 + continue;
1233 + } else {
1234 + lock_page(page);
1235 + }
1236
1237 f2fs_wait_on_page_writeback(page, DATA, true, true);
1238
1239 @@ -318,13 +328,19 @@ void f2fs_drop_inmem_pages(struct inode *inode)
1240 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
1241 struct f2fs_inode_info *fi = F2FS_I(inode);
1242
1243 - mutex_lock(&fi->inmem_lock);
1244 - __revoke_inmem_pages(inode, &fi->inmem_pages, true, false);
1245 - spin_lock(&sbi->inode_lock[ATOMIC_FILE]);
1246 - if (!list_empty(&fi->inmem_ilist))
1247 - list_del_init(&fi->inmem_ilist);
1248 - spin_unlock(&sbi->inode_lock[ATOMIC_FILE]);
1249 - mutex_unlock(&fi->inmem_lock);
1250 + while (!list_empty(&fi->inmem_pages)) {
1251 + mutex_lock(&fi->inmem_lock);
1252 + __revoke_inmem_pages(inode, &fi->inmem_pages,
1253 + true, false, true);
1254 +
1255 + if (list_empty(&fi->inmem_pages)) {
1256 + spin_lock(&sbi->inode_lock[ATOMIC_FILE]);
1257 + if (!list_empty(&fi->inmem_ilist))
1258 + list_del_init(&fi->inmem_ilist);
1259 + spin_unlock(&sbi->inode_lock[ATOMIC_FILE]);
1260 + }
1261 + mutex_unlock(&fi->inmem_lock);
1262 + }
1263
1264 clear_inode_flag(inode, FI_ATOMIC_FILE);
1265 fi->i_gc_failures[GC_FAILURE_ATOMIC] = 0;
1266 @@ -429,12 +445,15 @@ retry:
1267 * recovery or rewrite & commit last transaction. For other
1268 * error number, revoking was done by filesystem itself.
1269 */
1270 - err = __revoke_inmem_pages(inode, &revoke_list, false, true);
1271 + err = __revoke_inmem_pages(inode, &revoke_list,
1272 + false, true, false);
1273
1274 /* drop all uncommitted pages */
1275 - __revoke_inmem_pages(inode, &fi->inmem_pages, true, false);
1276 + __revoke_inmem_pages(inode, &fi->inmem_pages,
1277 + true, false, false);
1278 } else {
1279 - __revoke_inmem_pages(inode, &revoke_list, false, false);
1280 + __revoke_inmem_pages(inode, &revoke_list,
1281 + false, false, false);
1282 }
1283
1284 return err;
1285 diff --git a/fs/udf/truncate.c b/fs/udf/truncate.c
1286 index b647f0bd150c..94220ba85628 100644
1287 --- a/fs/udf/truncate.c
1288 +++ b/fs/udf/truncate.c
1289 @@ -260,6 +260,9 @@ void udf_truncate_extents(struct inode *inode)
1290 epos.block = eloc;
1291 epos.bh = udf_tread(sb,
1292 udf_get_lb_pblock(sb, &eloc, 0));
1293 + /* Error reading indirect block? */
1294 + if (!epos.bh)
1295 + return;
1296 if (elen)
1297 indirect_ext_len =
1298 (elen + sb->s_blocksize - 1) >>
1299 diff --git a/include/linux/ceph/libceph.h b/include/linux/ceph/libceph.h
1300 index a420c07904bc..337d5049ff93 100644
1301 --- a/include/linux/ceph/libceph.h
1302 +++ b/include/linux/ceph/libceph.h
1303 @@ -294,6 +294,8 @@ extern void ceph_destroy_client(struct ceph_client *client);
1304 extern int __ceph_open_session(struct ceph_client *client,
1305 unsigned long started);
1306 extern int ceph_open_session(struct ceph_client *client);
1307 +int ceph_wait_for_latest_osdmap(struct ceph_client *client,
1308 + unsigned long timeout);
1309
1310 /* pagevec.c */
1311 extern void ceph_release_page_vector(struct page **pages, int num_pages);
1312 diff --git a/include/linux/fs.h b/include/linux/fs.h
1313 index 29d8e2cfed0e..fd423fec8d83 100644
1314 --- a/include/linux/fs.h
1315 +++ b/include/linux/fs.h
1316 @@ -304,13 +304,19 @@ enum rw_hint {
1317
1318 struct kiocb {
1319 struct file *ki_filp;
1320 +
1321 + /* The 'ki_filp' pointer is shared in a union for aio */
1322 + randomized_struct_fields_start
1323 +
1324 loff_t ki_pos;
1325 void (*ki_complete)(struct kiocb *iocb, long ret, long ret2);
1326 void *private;
1327 int ki_flags;
1328 u16 ki_hint;
1329 u16 ki_ioprio; /* See linux/ioprio.h */
1330 -} __randomize_layout;
1331 +
1332 + randomized_struct_fields_end
1333 +};
1334
1335 static inline bool is_sync_kiocb(struct kiocb *kiocb)
1336 {
1337 diff --git a/kernel/futex.c b/kernel/futex.c
1338 index a0514e01c3eb..52668d44e07b 100644
1339 --- a/kernel/futex.c
1340 +++ b/kernel/futex.c
1341 @@ -3440,6 +3440,10 @@ static int handle_futex_death(u32 __user *uaddr, struct task_struct *curr, int p
1342 {
1343 u32 uval, uninitialized_var(nval), mval;
1344
1345 + /* Futex address must be 32bit aligned */
1346 + if ((((unsigned long)uaddr) % sizeof(*uaddr)) != 0)
1347 + return -1;
1348 +
1349 retry:
1350 if (get_user(uval, uaddr))
1351 return -1;
1352 diff --git a/kernel/locking/lockdep.c b/kernel/locking/lockdep.c
1353 index 95932333a48b..e805fe3bf87f 100644
1354 --- a/kernel/locking/lockdep.c
1355 +++ b/kernel/locking/lockdep.c
1356 @@ -3535,6 +3535,9 @@ static int __lock_downgrade(struct lockdep_map *lock, unsigned long ip)
1357 unsigned int depth;
1358 int i;
1359
1360 + if (unlikely(!debug_locks))
1361 + return 0;
1362 +
1363 depth = curr->lockdep_depth;
1364 /*
1365 * This function is about (re)setting the class of a held lock,
1366 diff --git a/net/bluetooth/hci_sock.c b/net/bluetooth/hci_sock.c
1367 index 1506e1632394..d4e2a166ae17 100644
1368 --- a/net/bluetooth/hci_sock.c
1369 +++ b/net/bluetooth/hci_sock.c
1370 @@ -831,8 +831,6 @@ static int hci_sock_release(struct socket *sock)
1371 if (!sk)
1372 return 0;
1373
1374 - hdev = hci_pi(sk)->hdev;
1375 -
1376 switch (hci_pi(sk)->channel) {
1377 case HCI_CHANNEL_MONITOR:
1378 atomic_dec(&monitor_promisc);
1379 @@ -854,6 +852,7 @@ static int hci_sock_release(struct socket *sock)
1380
1381 bt_sock_unlink(&hci_sk_list, sk);
1382
1383 + hdev = hci_pi(sk)->hdev;
1384 if (hdev) {
1385 if (hci_pi(sk)->channel == HCI_CHANNEL_USER) {
1386 /* When releasing a user channel exclusive access,
1387 diff --git a/net/bridge/netfilter/ebtables.c b/net/bridge/netfilter/ebtables.c
1388 index 6693e209efe8..f77888ec93f1 100644
1389 --- a/net/bridge/netfilter/ebtables.c
1390 +++ b/net/bridge/netfilter/ebtables.c
1391 @@ -31,10 +31,6 @@
1392 /* needed for logical [in,out]-dev filtering */
1393 #include "../br_private.h"
1394
1395 -#define BUGPRINT(format, args...) printk("kernel msg: ebtables bug: please "\
1396 - "report to author: "format, ## args)
1397 -/* #define BUGPRINT(format, args...) */
1398 -
1399 /* Each cpu has its own set of counters, so there is no need for write_lock in
1400 * the softirq
1401 * For reading or updating the counters, the user context needs to
1402 @@ -466,8 +462,6 @@ static int ebt_verify_pointers(const struct ebt_replace *repl,
1403 /* we make userspace set this right,
1404 * so there is no misunderstanding
1405 */
1406 - BUGPRINT("EBT_ENTRY_OR_ENTRIES shouldn't be set "
1407 - "in distinguisher\n");
1408 return -EINVAL;
1409 }
1410 if (i != NF_BR_NUMHOOKS)
1411 @@ -485,18 +479,14 @@ static int ebt_verify_pointers(const struct ebt_replace *repl,
1412 offset += e->next_offset;
1413 }
1414 }
1415 - if (offset != limit) {
1416 - BUGPRINT("entries_size too small\n");
1417 + if (offset != limit)
1418 return -EINVAL;
1419 - }
1420
1421 /* check if all valid hooks have a chain */
1422 for (i = 0; i < NF_BR_NUMHOOKS; i++) {
1423 if (!newinfo->hook_entry[i] &&
1424 - (valid_hooks & (1 << i))) {
1425 - BUGPRINT("Valid hook without chain\n");
1426 + (valid_hooks & (1 << i)))
1427 return -EINVAL;
1428 - }
1429 }
1430 return 0;
1431 }
1432 @@ -523,26 +513,20 @@ ebt_check_entry_size_and_hooks(const struct ebt_entry *e,
1433 /* this checks if the previous chain has as many entries
1434 * as it said it has
1435 */
1436 - if (*n != *cnt) {
1437 - BUGPRINT("nentries does not equal the nr of entries "
1438 - "in the chain\n");
1439 + if (*n != *cnt)
1440 return -EINVAL;
1441 - }
1442 +
1443 if (((struct ebt_entries *)e)->policy != EBT_DROP &&
1444 ((struct ebt_entries *)e)->policy != EBT_ACCEPT) {
1445 /* only RETURN from udc */
1446 if (i != NF_BR_NUMHOOKS ||
1447 - ((struct ebt_entries *)e)->policy != EBT_RETURN) {
1448 - BUGPRINT("bad policy\n");
1449 + ((struct ebt_entries *)e)->policy != EBT_RETURN)
1450 return -EINVAL;
1451 - }
1452 }
1453 if (i == NF_BR_NUMHOOKS) /* it's a user defined chain */
1454 (*udc_cnt)++;
1455 - if (((struct ebt_entries *)e)->counter_offset != *totalcnt) {
1456 - BUGPRINT("counter_offset != totalcnt");
1457 + if (((struct ebt_entries *)e)->counter_offset != *totalcnt)
1458 return -EINVAL;
1459 - }
1460 *n = ((struct ebt_entries *)e)->nentries;
1461 *cnt = 0;
1462 return 0;
1463 @@ -550,15 +534,13 @@ ebt_check_entry_size_and_hooks(const struct ebt_entry *e,
1464 /* a plain old entry, heh */
1465 if (sizeof(struct ebt_entry) > e->watchers_offset ||
1466 e->watchers_offset > e->target_offset ||
1467 - e->target_offset >= e->next_offset) {
1468 - BUGPRINT("entry offsets not in right order\n");
1469 + e->target_offset >= e->next_offset)
1470 return -EINVAL;
1471 - }
1472 +
1473 /* this is not checked anywhere else */
1474 - if (e->next_offset - e->target_offset < sizeof(struct ebt_entry_target)) {
1475 - BUGPRINT("target size too small\n");
1476 + if (e->next_offset - e->target_offset < sizeof(struct ebt_entry_target))
1477 return -EINVAL;
1478 - }
1479 +
1480 (*cnt)++;
1481 (*totalcnt)++;
1482 return 0;
1483 @@ -678,18 +660,15 @@ ebt_check_entry(struct ebt_entry *e, struct net *net,
1484 if (e->bitmask == 0)
1485 return 0;
1486
1487 - if (e->bitmask & ~EBT_F_MASK) {
1488 - BUGPRINT("Unknown flag for bitmask\n");
1489 + if (e->bitmask & ~EBT_F_MASK)
1490 return -EINVAL;
1491 - }
1492 - if (e->invflags & ~EBT_INV_MASK) {
1493 - BUGPRINT("Unknown flag for inv bitmask\n");
1494 +
1495 + if (e->invflags & ~EBT_INV_MASK)
1496 return -EINVAL;
1497 - }
1498 - if ((e->bitmask & EBT_NOPROTO) && (e->bitmask & EBT_802_3)) {
1499 - BUGPRINT("NOPROTO & 802_3 not allowed\n");
1500 +
1501 + if ((e->bitmask & EBT_NOPROTO) && (e->bitmask & EBT_802_3))
1502 return -EINVAL;
1503 - }
1504 +
1505 /* what hook do we belong to? */
1506 for (i = 0; i < NF_BR_NUMHOOKS; i++) {
1507 if (!newinfo->hook_entry[i])
1508 @@ -748,13 +727,11 @@ ebt_check_entry(struct ebt_entry *e, struct net *net,
1509 t->u.target = target;
1510 if (t->u.target == &ebt_standard_target) {
1511 if (gap < sizeof(struct ebt_standard_target)) {
1512 - BUGPRINT("Standard target size too big\n");
1513 ret = -EFAULT;
1514 goto cleanup_watchers;
1515 }
1516 if (((struct ebt_standard_target *)t)->verdict <
1517 -NUM_STANDARD_TARGETS) {
1518 - BUGPRINT("Invalid standard target\n");
1519 ret = -EFAULT;
1520 goto cleanup_watchers;
1521 }
1522 @@ -813,10 +790,9 @@ static int check_chainloops(const struct ebt_entries *chain, struct ebt_cl_stack
1523 if (strcmp(t->u.name, EBT_STANDARD_TARGET))
1524 goto letscontinue;
1525 if (e->target_offset + sizeof(struct ebt_standard_target) >
1526 - e->next_offset) {
1527 - BUGPRINT("Standard target size too big\n");
1528 + e->next_offset)
1529 return -1;
1530 - }
1531 +
1532 verdict = ((struct ebt_standard_target *)t)->verdict;
1533 if (verdict >= 0) { /* jump to another chain */
1534 struct ebt_entries *hlp2 =
1535 @@ -825,14 +801,12 @@ static int check_chainloops(const struct ebt_entries *chain, struct ebt_cl_stack
1536 if (hlp2 == cl_s[i].cs.chaininfo)
1537 break;
1538 /* bad destination or loop */
1539 - if (i == udc_cnt) {
1540 - BUGPRINT("bad destination\n");
1541 + if (i == udc_cnt)
1542 return -1;
1543 - }
1544 - if (cl_s[i].cs.n) {
1545 - BUGPRINT("loop\n");
1546 +
1547 + if (cl_s[i].cs.n)
1548 return -1;
1549 - }
1550 +
1551 if (cl_s[i].hookmask & (1 << hooknr))
1552 goto letscontinue;
1553 /* this can't be 0, so the loop test is correct */
1554 @@ -865,24 +839,21 @@ static int translate_table(struct net *net, const char *name,
1555 i = 0;
1556 while (i < NF_BR_NUMHOOKS && !newinfo->hook_entry[i])
1557 i++;
1558 - if (i == NF_BR_NUMHOOKS) {
1559 - BUGPRINT("No valid hooks specified\n");
1560 + if (i == NF_BR_NUMHOOKS)
1561 return -EINVAL;
1562 - }
1563 - if (newinfo->hook_entry[i] != (struct ebt_entries *)newinfo->entries) {
1564 - BUGPRINT("Chains don't start at beginning\n");
1565 +
1566 + if (newinfo->hook_entry[i] != (struct ebt_entries *)newinfo->entries)
1567 return -EINVAL;
1568 - }
1569 +
1570 /* make sure chains are ordered after each other in same order
1571 * as their corresponding hooks
1572 */
1573 for (j = i + 1; j < NF_BR_NUMHOOKS; j++) {
1574 if (!newinfo->hook_entry[j])
1575 continue;
1576 - if (newinfo->hook_entry[j] <= newinfo->hook_entry[i]) {
1577 - BUGPRINT("Hook order must be followed\n");
1578 + if (newinfo->hook_entry[j] <= newinfo->hook_entry[i])
1579 return -EINVAL;
1580 - }
1581 +
1582 i = j;
1583 }
1584
1585 @@ -900,15 +871,11 @@ static int translate_table(struct net *net, const char *name,
1586 if (ret != 0)
1587 return ret;
1588
1589 - if (i != j) {
1590 - BUGPRINT("nentries does not equal the nr of entries in the "
1591 - "(last) chain\n");
1592 + if (i != j)
1593 return -EINVAL;
1594 - }
1595 - if (k != newinfo->nentries) {
1596 - BUGPRINT("Total nentries is wrong\n");
1597 +
1598 + if (k != newinfo->nentries)
1599 return -EINVAL;
1600 - }
1601
1602 /* get the location of the udc, put them in an array
1603 * while we're at it, allocate the chainstack
1604 @@ -942,7 +909,6 @@ static int translate_table(struct net *net, const char *name,
1605 ebt_get_udc_positions, newinfo, &i, cl_s);
1606 /* sanity check */
1607 if (i != udc_cnt) {
1608 - BUGPRINT("i != udc_cnt\n");
1609 vfree(cl_s);
1610 return -EFAULT;
1611 }
1612 @@ -1042,7 +1008,6 @@ static int do_replace_finish(struct net *net, struct ebt_replace *repl,
1613 goto free_unlock;
1614
1615 if (repl->num_counters && repl->num_counters != t->private->nentries) {
1616 - BUGPRINT("Wrong nr. of counters requested\n");
1617 ret = -EINVAL;
1618 goto free_unlock;
1619 }
1620 @@ -1118,15 +1083,12 @@ static int do_replace(struct net *net, const void __user *user,
1621 if (copy_from_user(&tmp, user, sizeof(tmp)) != 0)
1622 return -EFAULT;
1623
1624 - if (len != sizeof(tmp) + tmp.entries_size) {
1625 - BUGPRINT("Wrong len argument\n");
1626 + if (len != sizeof(tmp) + tmp.entries_size)
1627 return -EINVAL;
1628 - }
1629
1630 - if (tmp.entries_size == 0) {
1631 - BUGPRINT("Entries_size never zero\n");
1632 + if (tmp.entries_size == 0)
1633 return -EINVAL;
1634 - }
1635 +
1636 /* overflow check */
1637 if (tmp.nentries >= ((INT_MAX - sizeof(struct ebt_table_info)) /
1638 NR_CPUS - SMP_CACHE_BYTES) / sizeof(struct ebt_counter))
1639 @@ -1153,7 +1115,6 @@ static int do_replace(struct net *net, const void __user *user,
1640 }
1641 if (copy_from_user(
1642 newinfo->entries, tmp.entries, tmp.entries_size) != 0) {
1643 - BUGPRINT("Couldn't copy entries from userspace\n");
1644 ret = -EFAULT;
1645 goto free_entries;
1646 }
1647 @@ -1194,10 +1155,8 @@ int ebt_register_table(struct net *net, const struct ebt_table *input_table,
1648
1649 if (input_table == NULL || (repl = input_table->table) == NULL ||
1650 repl->entries == NULL || repl->entries_size == 0 ||
1651 - repl->counters != NULL || input_table->private != NULL) {
1652 - BUGPRINT("Bad table data for ebt_register_table!!!\n");
1653 + repl->counters != NULL || input_table->private != NULL)
1654 return -EINVAL;
1655 - }
1656
1657 /* Don't add one table to multiple lists. */
1658 table = kmemdup(input_table, sizeof(struct ebt_table), GFP_KERNEL);
1659 @@ -1235,13 +1194,10 @@ int ebt_register_table(struct net *net, const struct ebt_table *input_table,
1660 ((char *)repl->hook_entry[i] - repl->entries);
1661 }
1662 ret = translate_table(net, repl->name, newinfo);
1663 - if (ret != 0) {
1664 - BUGPRINT("Translate_table failed\n");
1665 + if (ret != 0)
1666 goto free_chainstack;
1667 - }
1668
1669 if (table->check && table->check(newinfo, table->valid_hooks)) {
1670 - BUGPRINT("The table doesn't like its own initial data, lol\n");
1671 ret = -EINVAL;
1672 goto free_chainstack;
1673 }
1674 @@ -1252,7 +1208,6 @@ int ebt_register_table(struct net *net, const struct ebt_table *input_table,
1675 list_for_each_entry(t, &net->xt.tables[NFPROTO_BRIDGE], list) {
1676 if (strcmp(t->name, table->name) == 0) {
1677 ret = -EEXIST;
1678 - BUGPRINT("Table name already exists\n");
1679 goto free_unlock;
1680 }
1681 }
1682 @@ -1320,7 +1275,6 @@ static int do_update_counters(struct net *net, const char *name,
1683 goto free_tmp;
1684
1685 if (num_counters != t->private->nentries) {
1686 - BUGPRINT("Wrong nr of counters\n");
1687 ret = -EINVAL;
1688 goto unlock_mutex;
1689 }
1690 @@ -1447,10 +1401,8 @@ static int copy_counters_to_user(struct ebt_table *t,
1691 if (num_counters == 0)
1692 return 0;
1693
1694 - if (num_counters != nentries) {
1695 - BUGPRINT("Num_counters wrong\n");
1696 + if (num_counters != nentries)
1697 return -EINVAL;
1698 - }
1699
1700 counterstmp = vmalloc(array_size(nentries, sizeof(*counterstmp)));
1701 if (!counterstmp)
1702 @@ -1496,15 +1448,11 @@ static int copy_everything_to_user(struct ebt_table *t, void __user *user,
1703 (tmp.num_counters ? nentries * sizeof(struct ebt_counter) : 0))
1704 return -EINVAL;
1705
1706 - if (tmp.nentries != nentries) {
1707 - BUGPRINT("Nentries wrong\n");
1708 + if (tmp.nentries != nentries)
1709 return -EINVAL;
1710 - }
1711
1712 - if (tmp.entries_size != entries_size) {
1713 - BUGPRINT("Wrong size\n");
1714 + if (tmp.entries_size != entries_size)
1715 return -EINVAL;
1716 - }
1717
1718 ret = copy_counters_to_user(t, oldcounters, tmp.counters,
1719 tmp.num_counters, nentries);
1720 @@ -1576,7 +1524,6 @@ static int do_ebt_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
1721 }
1722 mutex_unlock(&ebt_mutex);
1723 if (copy_to_user(user, &tmp, *len) != 0) {
1724 - BUGPRINT("c2u Didn't work\n");
1725 ret = -EFAULT;
1726 break;
1727 }
1728 diff --git a/net/ceph/ceph_common.c b/net/ceph/ceph_common.c
1729 index 9cab80207ced..79eac465ec65 100644
1730 --- a/net/ceph/ceph_common.c
1731 +++ b/net/ceph/ceph_common.c
1732 @@ -738,7 +738,6 @@ int __ceph_open_session(struct ceph_client *client, unsigned long started)
1733 }
1734 EXPORT_SYMBOL(__ceph_open_session);
1735
1736 -
1737 int ceph_open_session(struct ceph_client *client)
1738 {
1739 int ret;
1740 @@ -754,6 +753,23 @@ int ceph_open_session(struct ceph_client *client)
1741 }
1742 EXPORT_SYMBOL(ceph_open_session);
1743
1744 +int ceph_wait_for_latest_osdmap(struct ceph_client *client,
1745 + unsigned long timeout)
1746 +{
1747 + u64 newest_epoch;
1748 + int ret;
1749 +
1750 + ret = ceph_monc_get_version(&client->monc, "osdmap", &newest_epoch);
1751 + if (ret)
1752 + return ret;
1753 +
1754 + if (client->osdc.osdmap->epoch >= newest_epoch)
1755 + return 0;
1756 +
1757 + ceph_osdc_maybe_request_map(&client->osdc);
1758 + return ceph_monc_wait_osdmap(&client->monc, newest_epoch, timeout);
1759 +}
1760 +EXPORT_SYMBOL(ceph_wait_for_latest_osdmap);
1761
1762 static int __init init_ceph_lib(void)
1763 {
1764 diff --git a/net/ceph/mon_client.c b/net/ceph/mon_client.c
1765 index 18deb3d889c4..a53e4fbb6319 100644
1766 --- a/net/ceph/mon_client.c
1767 +++ b/net/ceph/mon_client.c
1768 @@ -922,6 +922,15 @@ int ceph_monc_blacklist_add(struct ceph_mon_client *monc,
1769 mutex_unlock(&monc->mutex);
1770
1771 ret = wait_generic_request(req);
1772 + if (!ret)
1773 + /*
1774 + * Make sure we have the osdmap that includes the blacklist
1775 + * entry. This is needed to ensure that the OSDs pick up the
1776 + * new blacklist before processing any future requests from
1777 + * this client.
1778 + */
1779 + ret = ceph_wait_for_latest_osdmap(monc->client, 0);
1780 +
1781 out:
1782 put_generic_request(req);
1783 return ret;
1784 diff --git a/sound/ac97/bus.c b/sound/ac97/bus.c
1785 index 9f0c480489ef..9cbf6927abe9 100644
1786 --- a/sound/ac97/bus.c
1787 +++ b/sound/ac97/bus.c
1788 @@ -84,7 +84,7 @@ ac97_of_get_child_device(struct ac97_controller *ac97_ctrl, int idx,
1789 if ((idx != of_property_read_u32(node, "reg", &reg)) ||
1790 !of_device_is_compatible(node, compat))
1791 continue;
1792 - return of_node_get(node);
1793 + return node;
1794 }
1795
1796 return NULL;
1797 diff --git a/sound/firewire/motu/motu.c b/sound/firewire/motu/motu.c
1798 index 220e61926ea4..513291ba0ab0 100644
1799 --- a/sound/firewire/motu/motu.c
1800 +++ b/sound/firewire/motu/motu.c
1801 @@ -36,7 +36,7 @@ static void name_card(struct snd_motu *motu)
1802 fw_csr_iterator_init(&it, motu->unit->directory);
1803 while (fw_csr_iterator_next(&it, &key, &val)) {
1804 switch (key) {
1805 - case CSR_VERSION:
1806 + case CSR_MODEL:
1807 version = val;
1808 break;
1809 }
1810 @@ -46,7 +46,7 @@ static void name_card(struct snd_motu *motu)
1811 strcpy(motu->card->shortname, motu->spec->name);
1812 strcpy(motu->card->mixername, motu->spec->name);
1813 snprintf(motu->card->longname, sizeof(motu->card->longname),
1814 - "MOTU %s (version:%d), GUID %08x%08x at %s, S%d",
1815 + "MOTU %s (version:%06x), GUID %08x%08x at %s, S%d",
1816 motu->spec->name, version,
1817 fw_dev->config_rom[3], fw_dev->config_rom[4],
1818 dev_name(&motu->unit->device), 100 << fw_dev->max_speed);
1819 @@ -237,20 +237,20 @@ static const struct snd_motu_spec motu_audio_express = {
1820 #define SND_MOTU_DEV_ENTRY(model, data) \
1821 { \
1822 .match_flags = IEEE1394_MATCH_VENDOR_ID | \
1823 - IEEE1394_MATCH_MODEL_ID | \
1824 - IEEE1394_MATCH_SPECIFIER_ID, \
1825 + IEEE1394_MATCH_SPECIFIER_ID | \
1826 + IEEE1394_MATCH_VERSION, \
1827 .vendor_id = OUI_MOTU, \
1828 - .model_id = model, \
1829 .specifier_id = OUI_MOTU, \
1830 + .version = model, \
1831 .driver_data = (kernel_ulong_t)data, \
1832 }
1833
1834 static const struct ieee1394_device_id motu_id_table[] = {
1835 - SND_MOTU_DEV_ENTRY(0x101800, &motu_828mk2),
1836 - SND_MOTU_DEV_ENTRY(0x107800, &snd_motu_spec_traveler),
1837 - SND_MOTU_DEV_ENTRY(0x106800, &motu_828mk3), /* FireWire only. */
1838 - SND_MOTU_DEV_ENTRY(0x100800, &motu_828mk3), /* Hybrid. */
1839 - SND_MOTU_DEV_ENTRY(0x104800, &motu_audio_express),
1840 + SND_MOTU_DEV_ENTRY(0x000003, &motu_828mk2),
1841 + SND_MOTU_DEV_ENTRY(0x000009, &snd_motu_spec_traveler),
1842 + SND_MOTU_DEV_ENTRY(0x000015, &motu_828mk3), /* FireWire only. */
1843 + SND_MOTU_DEV_ENTRY(0x000035, &motu_828mk3), /* Hybrid. */
1844 + SND_MOTU_DEV_ENTRY(0x000033, &motu_audio_express),
1845 { }
1846 };
1847 MODULE_DEVICE_TABLE(ieee1394, motu_id_table);
1848 diff --git a/sound/pci/hda/hda_codec.c b/sound/pci/hda/hda_codec.c
1849 index 9f8d59e7e89f..b238e903b9d7 100644
1850 --- a/sound/pci/hda/hda_codec.c
1851 +++ b/sound/pci/hda/hda_codec.c
1852 @@ -2917,6 +2917,7 @@ static void hda_call_codec_resume(struct hda_codec *codec)
1853 hda_jackpoll_work(&codec->jackpoll_work.work);
1854 else
1855 snd_hda_jack_report_sync(codec);
1856 + codec->core.dev.power.power_state = PMSG_ON;
1857 snd_hdac_leave_pm(&codec->core);
1858 }
1859
1860 @@ -2950,10 +2951,62 @@ static int hda_codec_runtime_resume(struct device *dev)
1861 }
1862 #endif /* CONFIG_PM */
1863
1864 +#ifdef CONFIG_PM_SLEEP
1865 +static int hda_codec_force_resume(struct device *dev)
1866 +{
1867 + int ret;
1868 +
1869 + /* The get/put pair below enforces the runtime resume even if the
1870 + * device hasn't been used at suspend time. This trick is needed to
1871 + * update the jack state change during the sleep.
1872 + */
1873 + pm_runtime_get_noresume(dev);
1874 + ret = pm_runtime_force_resume(dev);
1875 + pm_runtime_put(dev);
1876 + return ret;
1877 +}
1878 +
1879 +static int hda_codec_pm_suspend(struct device *dev)
1880 +{
1881 + dev->power.power_state = PMSG_SUSPEND;
1882 + return pm_runtime_force_suspend(dev);
1883 +}
1884 +
1885 +static int hda_codec_pm_resume(struct device *dev)
1886 +{
1887 + dev->power.power_state = PMSG_RESUME;
1888 + return hda_codec_force_resume(dev);
1889 +}
1890 +
1891 +static int hda_codec_pm_freeze(struct device *dev)
1892 +{
1893 + dev->power.power_state = PMSG_FREEZE;
1894 + return pm_runtime_force_suspend(dev);
1895 +}
1896 +
1897 +static int hda_codec_pm_thaw(struct device *dev)
1898 +{
1899 + dev->power.power_state = PMSG_THAW;
1900 + return hda_codec_force_resume(dev);
1901 +}
1902 +
1903 +static int hda_codec_pm_restore(struct device *dev)
1904 +{
1905 + dev->power.power_state = PMSG_RESTORE;
1906 + return hda_codec_force_resume(dev);
1907 +}
1908 +#endif /* CONFIG_PM_SLEEP */
1909 +
1910 /* referred in hda_bind.c */
1911 const struct dev_pm_ops hda_codec_driver_pm = {
1912 - SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend,
1913 - pm_runtime_force_resume)
1914 +#ifdef CONFIG_PM_SLEEP
1915 + .suspend = hda_codec_pm_suspend,
1916 + .resume = hda_codec_pm_resume,
1917 + .freeze = hda_codec_pm_freeze,
1918 + .thaw = hda_codec_pm_thaw,
1919 + .poweroff = hda_codec_pm_suspend,
1920 + .restore = hda_codec_pm_restore,
1921 +#endif /* CONFIG_PM_SLEEP */
1922 SET_RUNTIME_PM_OPS(hda_codec_runtime_suspend, hda_codec_runtime_resume,
1923 NULL)
1924 };
1925 diff --git a/sound/pci/hda/hda_intel.c b/sound/pci/hda/hda_intel.c
1926 index e5c49003e75f..ece256a3b48f 100644
1927 --- a/sound/pci/hda/hda_intel.c
1928 +++ b/sound/pci/hda/hda_intel.c
1929 @@ -947,7 +947,7 @@ static void __azx_runtime_suspend(struct azx *chip)
1930 display_power(chip, false);
1931 }
1932
1933 -static void __azx_runtime_resume(struct azx *chip)
1934 +static void __azx_runtime_resume(struct azx *chip, bool from_rt)
1935 {
1936 struct hda_intel *hda = container_of(chip, struct hda_intel, chip);
1937 struct hdac_bus *bus = azx_bus(chip);
1938 @@ -964,7 +964,7 @@ static void __azx_runtime_resume(struct azx *chip)
1939 azx_init_pci(chip);
1940 hda_intel_init_chip(chip, true);
1941
1942 - if (status) {
1943 + if (status && from_rt) {
1944 list_for_each_codec(codec, &chip->bus)
1945 if (status & (1 << codec->addr))
1946 schedule_delayed_work(&codec->jackpoll_work,
1947 @@ -1016,7 +1016,7 @@ static int azx_resume(struct device *dev)
1948 chip->msi = 0;
1949 if (azx_acquire_irq(chip, 1) < 0)
1950 return -EIO;
1951 - __azx_runtime_resume(chip);
1952 + __azx_runtime_resume(chip, false);
1953 snd_power_change_state(card, SNDRV_CTL_POWER_D0);
1954
1955 trace_azx_resume(chip);
1956 @@ -1081,7 +1081,7 @@ static int azx_runtime_resume(struct device *dev)
1957 chip = card->private_data;
1958 if (!azx_has_pm_runtime(chip))
1959 return 0;
1960 - __azx_runtime_resume(chip);
1961 + __azx_runtime_resume(chip, true);
1962
1963 /* disable controller Wake Up event*/
1964 azx_writew(chip, WAKEEN, azx_readw(chip, WAKEEN) &
1965 @@ -2144,10 +2144,12 @@ static struct snd_pci_quirk power_save_blacklist[] = {
1966 SND_PCI_QUIRK(0x8086, 0x2057, "Intel NUC5i7RYB", 0),
1967 /* https://bugzilla.redhat.com/show_bug.cgi?id=1520902 */
1968 SND_PCI_QUIRK(0x8086, 0x2068, "Intel NUC7i3BNB", 0),
1969 - /* https://bugzilla.redhat.com/show_bug.cgi?id=1572975 */
1970 - SND_PCI_QUIRK(0x17aa, 0x36a7, "Lenovo C50 All in one", 0),
1971 /* https://bugzilla.kernel.org/show_bug.cgi?id=198611 */
1972 SND_PCI_QUIRK(0x17aa, 0x2227, "Lenovo X1 Carbon 3rd Gen", 0),
1973 + /* https://bugzilla.redhat.com/show_bug.cgi?id=1689623 */
1974 + SND_PCI_QUIRK(0x17aa, 0x367b, "Lenovo IdeaCentre B550", 0),
1975 + /* https://bugzilla.redhat.com/show_bug.cgi?id=1572975 */
1976 + SND_PCI_QUIRK(0x17aa, 0x36a7, "Lenovo C50 All in one", 0),
1977 {}
1978 };
1979 #endif /* CONFIG_PM */
1980 diff --git a/tools/objtool/check.c b/tools/objtool/check.c
1981 index 0414a0d52262..5dde107083c6 100644
1982 --- a/tools/objtool/check.c
1983 +++ b/tools/objtool/check.c
1984 @@ -2184,9 +2184,10 @@ static void cleanup(struct objtool_file *file)
1985 elf_close(file->elf);
1986 }
1987
1988 +static struct objtool_file file;
1989 +
1990 int check(const char *_objname, bool orc)
1991 {
1992 - struct objtool_file file;
1993 int ret, warnings = 0;
1994
1995 objname = _objname;
1996 diff --git a/tools/perf/util/probe-event.c b/tools/perf/util/probe-event.c
1997 index 18a59fba97ff..cc4773157b9b 100644
1998 --- a/tools/perf/util/probe-event.c
1999 +++ b/tools/perf/util/probe-event.c
2000 @@ -157,8 +157,10 @@ static struct map *kernel_get_module_map(const char *module)
2001 if (module && strchr(module, '/'))
2002 return dso__new_map(module);
2003
2004 - if (!module)
2005 - module = "kernel";
2006 + if (!module) {
2007 + pos = machine__kernel_map(host_machine);
2008 + return map__get(pos);
2009 + }
2010
2011 for (pos = maps__first(maps); pos; pos = map__next(pos)) {
2012 /* short_name is "[module]" */