Magellan Linux

Contents of /trunk/kernel-alx/patches-5.4/0159-5.4.60-all-fixes.patch

Parent Directory Parent Directory | Revision Log Revision Log


Revision 3614 - (show annotations) (download)
Tue Aug 25 10:59:10 2020 UTC (3 years, 8 months ago) by niro
File size: 165586 byte(s)
-linux-5.4.60
1 diff --git a/Documentation/devicetree/bindings/iio/multiplexer/io-channel-mux.txt b/Documentation/devicetree/bindings/iio/multiplexer/io-channel-mux.txt
2 index c82794002595f..89647d7143879 100644
3 --- a/Documentation/devicetree/bindings/iio/multiplexer/io-channel-mux.txt
4 +++ b/Documentation/devicetree/bindings/iio/multiplexer/io-channel-mux.txt
5 @@ -21,7 +21,7 @@ controller state. The mux controller state is described in
6
7 Example:
8 mux: mux-controller {
9 - compatible = "mux-gpio";
10 + compatible = "gpio-mux";
11 #mux-control-cells = <0>;
12
13 mux-gpios = <&pioA 0 GPIO_ACTIVE_HIGH>,
14 diff --git a/Makefile b/Makefile
15 index cc72b8472f24a..7c001e21e28e7 100644
16 --- a/Makefile
17 +++ b/Makefile
18 @@ -1,7 +1,7 @@
19 # SPDX-License-Identifier: GPL-2.0
20 VERSION = 5
21 PATCHLEVEL = 4
22 -SUBLEVEL = 59
23 +SUBLEVEL = 60
24 EXTRAVERSION =
25 NAME = Kleptomaniac Octopus
26
27 diff --git a/arch/arm64/boot/dts/marvell/armada-3720-espressobin.dts b/arch/arm64/boot/dts/marvell/armada-3720-espressobin.dts
28 index fbcf03f86c967..05dc58c13fa41 100644
29 --- a/arch/arm64/boot/dts/marvell/armada-3720-espressobin.dts
30 +++ b/arch/arm64/boot/dts/marvell/armada-3720-espressobin.dts
31 @@ -19,6 +19,12 @@
32 model = "Globalscale Marvell ESPRESSOBin Board";
33 compatible = "globalscale,espressobin", "marvell,armada3720", "marvell,armada3710";
34
35 + aliases {
36 + ethernet0 = &eth0;
37 + serial0 = &uart0;
38 + serial1 = &uart1;
39 + };
40 +
41 chosen {
42 stdout-path = "serial0:115200n8";
43 };
44 diff --git a/arch/arm64/kernel/perf_event.c b/arch/arm64/kernel/perf_event.c
45 index a0b4f1bca4917..19128d994ee97 100644
46 --- a/arch/arm64/kernel/perf_event.c
47 +++ b/arch/arm64/kernel/perf_event.c
48 @@ -155,7 +155,7 @@ armv8pmu_events_sysfs_show(struct device *dev,
49
50 pmu_attr = container_of(attr, struct perf_pmu_events_attr, attr);
51
52 - return sprintf(page, "event=0x%03llx\n", pmu_attr->id);
53 + return sprintf(page, "event=0x%04llx\n", pmu_attr->id);
54 }
55
56 #define ARMV8_EVENT_ATTR(name, config) \
57 @@ -303,10 +303,13 @@ armv8pmu_event_attr_is_visible(struct kobject *kobj,
58 test_bit(pmu_attr->id, cpu_pmu->pmceid_bitmap))
59 return attr->mode;
60
61 - pmu_attr->id -= ARMV8_PMUV3_EXT_COMMON_EVENT_BASE;
62 - if (pmu_attr->id < ARMV8_PMUV3_MAX_COMMON_EVENTS &&
63 - test_bit(pmu_attr->id, cpu_pmu->pmceid_ext_bitmap))
64 - return attr->mode;
65 + if (pmu_attr->id >= ARMV8_PMUV3_EXT_COMMON_EVENT_BASE) {
66 + u64 id = pmu_attr->id - ARMV8_PMUV3_EXT_COMMON_EVENT_BASE;
67 +
68 + if (id < ARMV8_PMUV3_MAX_COMMON_EVENTS &&
69 + test_bit(id, cpu_pmu->pmceid_ext_bitmap))
70 + return attr->mode;
71 + }
72
73 return 0;
74 }
75 diff --git a/arch/mips/boot/dts/ingenic/qi_lb60.dts b/arch/mips/boot/dts/ingenic/qi_lb60.dts
76 index 7a371d9c5a33f..eda37fb516f0e 100644
77 --- a/arch/mips/boot/dts/ingenic/qi_lb60.dts
78 +++ b/arch/mips/boot/dts/ingenic/qi_lb60.dts
79 @@ -69,7 +69,7 @@
80 "Speaker", "OUTL",
81 "Speaker", "OUTR",
82 "INL", "LOUT",
83 - "INL", "ROUT";
84 + "INR", "ROUT";
85
86 simple-audio-card,aux-devs = <&amp>;
87
88 diff --git a/arch/mips/kernel/topology.c b/arch/mips/kernel/topology.c
89 index cd3e1f82e1a5d..08ad6371fbe08 100644
90 --- a/arch/mips/kernel/topology.c
91 +++ b/arch/mips/kernel/topology.c
92 @@ -20,7 +20,7 @@ static int __init topology_init(void)
93 for_each_present_cpu(i) {
94 struct cpu *c = &per_cpu(cpu_devices, i);
95
96 - c->hotpluggable = 1;
97 + c->hotpluggable = !!i;
98 ret = register_cpu(c, i);
99 if (ret)
100 printk(KERN_WARNING "topology_init: register_cpu %d "
101 diff --git a/arch/openrisc/kernel/stacktrace.c b/arch/openrisc/kernel/stacktrace.c
102 index 43f140a28bc72..54d38809e22cb 100644
103 --- a/arch/openrisc/kernel/stacktrace.c
104 +++ b/arch/openrisc/kernel/stacktrace.c
105 @@ -13,6 +13,7 @@
106 #include <linux/export.h>
107 #include <linux/sched.h>
108 #include <linux/sched/debug.h>
109 +#include <linux/sched/task_stack.h>
110 #include <linux/stacktrace.h>
111
112 #include <asm/processor.h>
113 @@ -68,12 +69,25 @@ void save_stack_trace_tsk(struct task_struct *tsk, struct stack_trace *trace)
114 {
115 unsigned long *sp = NULL;
116
117 + if (!try_get_task_stack(tsk))
118 + return;
119 +
120 if (tsk == current)
121 sp = (unsigned long *) &sp;
122 - else
123 - sp = (unsigned long *) KSTK_ESP(tsk);
124 + else {
125 + unsigned long ksp;
126 +
127 + /* Locate stack from kernel context */
128 + ksp = task_thread_info(tsk)->ksp;
129 + ksp += STACK_FRAME_OVERHEAD; /* redzone */
130 + ksp += sizeof(struct pt_regs);
131 +
132 + sp = (unsigned long *) ksp;
133 + }
134
135 unwind_stack(trace, sp, save_stack_address_nosched);
136 +
137 + put_task_stack(tsk);
138 }
139 EXPORT_SYMBOL_GPL(save_stack_trace_tsk);
140
141 diff --git a/arch/powerpc/include/asm/percpu.h b/arch/powerpc/include/asm/percpu.h
142 index dce863a7635cd..8e5b7d0b851c6 100644
143 --- a/arch/powerpc/include/asm/percpu.h
144 +++ b/arch/powerpc/include/asm/percpu.h
145 @@ -10,8 +10,6 @@
146
147 #ifdef CONFIG_SMP
148
149 -#include <asm/paca.h>
150 -
151 #define __my_cpu_offset local_paca->data_offset
152
153 #endif /* CONFIG_SMP */
154 @@ -19,4 +17,6 @@
155
156 #include <asm-generic/percpu.h>
157
158 +#include <asm/paca.h>
159 +
160 #endif /* _ASM_POWERPC_PERCPU_H_ */
161 diff --git a/arch/powerpc/mm/fault.c b/arch/powerpc/mm/fault.c
162 index 881a026a603a6..187047592d53c 100644
163 --- a/arch/powerpc/mm/fault.c
164 +++ b/arch/powerpc/mm/fault.c
165 @@ -241,6 +241,9 @@ static bool bad_kernel_fault(struct pt_regs *regs, unsigned long error_code,
166 return false;
167 }
168
169 +// This comes from 64-bit struct rt_sigframe + __SIGNAL_FRAMESIZE
170 +#define SIGFRAME_MAX_SIZE (4096 + 128)
171 +
172 static bool bad_stack_expansion(struct pt_regs *regs, unsigned long address,
173 struct vm_area_struct *vma, unsigned int flags,
174 bool *must_retry)
175 @@ -248,7 +251,7 @@ static bool bad_stack_expansion(struct pt_regs *regs, unsigned long address,
176 /*
177 * N.B. The POWER/Open ABI allows programs to access up to
178 * 288 bytes below the stack pointer.
179 - * The kernel signal delivery code writes up to about 1.5kB
180 + * The kernel signal delivery code writes a bit over 4KB
181 * below the stack pointer (r1) before decrementing it.
182 * The exec code can write slightly over 640kB to the stack
183 * before setting the user r1. Thus we allow the stack to
184 @@ -273,7 +276,7 @@ static bool bad_stack_expansion(struct pt_regs *regs, unsigned long address,
185 * between the last mapped region and the stack will
186 * expand the stack rather than segfaulting.
187 */
188 - if (address + 2048 >= uregs->gpr[1])
189 + if (address + SIGFRAME_MAX_SIZE >= uregs->gpr[1])
190 return false;
191
192 if ((flags & FAULT_FLAG_WRITE) && (flags & FAULT_FLAG_USER) &&
193 diff --git a/arch/powerpc/mm/ptdump/hashpagetable.c b/arch/powerpc/mm/ptdump/hashpagetable.c
194 index a07278027c6f4..a2e8c3b2cf351 100644
195 --- a/arch/powerpc/mm/ptdump/hashpagetable.c
196 +++ b/arch/powerpc/mm/ptdump/hashpagetable.c
197 @@ -259,7 +259,7 @@ static int pseries_find(unsigned long ea, int psize, bool primary, u64 *v, u64 *
198 for (i = 0; i < HPTES_PER_GROUP; i += 4, hpte_group += 4) {
199 lpar_rc = plpar_pte_read_4(0, hpte_group, (void *)ptes);
200
201 - if (lpar_rc != H_SUCCESS)
202 + if (lpar_rc)
203 continue;
204 for (j = 0; j < 4; j++) {
205 if (HPTE_V_COMPARE(ptes[j].v, want_v) &&
206 diff --git a/arch/powerpc/platforms/pseries/hotplug-memory.c b/arch/powerpc/platforms/pseries/hotplug-memory.c
207 index f1888352b4e0b..e7d23a933a0d3 100644
208 --- a/arch/powerpc/platforms/pseries/hotplug-memory.c
209 +++ b/arch/powerpc/platforms/pseries/hotplug-memory.c
210 @@ -27,7 +27,7 @@ static bool rtas_hp_event;
211 unsigned long pseries_memory_block_size(void)
212 {
213 struct device_node *np;
214 - unsigned int memblock_size = MIN_MEMORY_BLOCK_SIZE;
215 + u64 memblock_size = MIN_MEMORY_BLOCK_SIZE;
216 struct resource r;
217
218 np = of_find_node_by_path("/ibm,dynamic-reconfiguration-memory");
219 diff --git a/arch/sh/boards/mach-landisk/setup.c b/arch/sh/boards/mach-landisk/setup.c
220 index 16b4d8b0bb850..2c44b94f82fb2 100644
221 --- a/arch/sh/boards/mach-landisk/setup.c
222 +++ b/arch/sh/boards/mach-landisk/setup.c
223 @@ -82,6 +82,9 @@ device_initcall(landisk_devices_setup);
224
225 static void __init landisk_setup(char **cmdline_p)
226 {
227 + /* I/O port identity mapping */
228 + __set_io_port_base(0);
229 +
230 /* LED ON */
231 __raw_writeb(__raw_readb(PA_LED) | 0x03, PA_LED);
232
233 diff --git a/arch/x86/events/rapl.c b/arch/x86/events/rapl.c
234 index 3c222d6fdee3b..187c72a58e69c 100644
235 --- a/arch/x86/events/rapl.c
236 +++ b/arch/x86/events/rapl.c
237 @@ -642,7 +642,7 @@ static const struct attribute_group *rapl_attr_update[] = {
238 &rapl_events_pkg_group,
239 &rapl_events_ram_group,
240 &rapl_events_gpu_group,
241 - &rapl_events_gpu_group,
242 + &rapl_events_psys_group,
243 NULL,
244 };
245
246 diff --git a/arch/x86/kernel/apic/vector.c b/arch/x86/kernel/apic/vector.c
247 index df4d5385e6ddd..c8203694d9ce4 100644
248 --- a/arch/x86/kernel/apic/vector.c
249 +++ b/arch/x86/kernel/apic/vector.c
250 @@ -554,6 +554,10 @@ static int x86_vector_alloc_irqs(struct irq_domain *domain, unsigned int virq,
251 irqd->chip_data = apicd;
252 irqd->hwirq = virq + i;
253 irqd_set_single_target(irqd);
254 +
255 + /* Don't invoke affinity setter on deactivated interrupts */
256 + irqd_set_affinity_on_activate(irqd);
257 +
258 /*
259 * Legacy vectors are already assigned when the IOAPIC
260 * takes them over. They stay on the same vector. This is
261 diff --git a/arch/x86/kernel/tsc_msr.c b/arch/x86/kernel/tsc_msr.c
262 index c65adaf813848..41200706e6da1 100644
263 --- a/arch/x86/kernel/tsc_msr.c
264 +++ b/arch/x86/kernel/tsc_msr.c
265 @@ -133,10 +133,15 @@ static const struct freq_desc freq_desc_ann = {
266 .mask = 0x0f,
267 };
268
269 -/* 24 MHz crystal? : 24 * 13 / 4 = 78 MHz */
270 +/*
271 + * 24 MHz crystal? : 24 * 13 / 4 = 78 MHz
272 + * Frequency step for Lightning Mountain SoC is fixed to 78 MHz,
273 + * so all the frequency entries are 78000.
274 + */
275 static const struct freq_desc freq_desc_lgm = {
276 .use_msr_plat = true,
277 - .freqs = { 78000, 78000, 78000, 78000, 78000, 78000, 78000, 78000 },
278 + .freqs = { 78000, 78000, 78000, 78000, 78000, 78000, 78000, 78000,
279 + 78000, 78000, 78000, 78000, 78000, 78000, 78000, 78000 },
280 .mask = 0x0f,
281 };
282
283 diff --git a/arch/xtensa/include/asm/thread_info.h b/arch/xtensa/include/asm/thread_info.h
284 index f092cc3f4e66d..956d4d47c6cd1 100644
285 --- a/arch/xtensa/include/asm/thread_info.h
286 +++ b/arch/xtensa/include/asm/thread_info.h
287 @@ -55,6 +55,10 @@ struct thread_info {
288 mm_segment_t addr_limit; /* thread address space */
289
290 unsigned long cpenable;
291 +#if XCHAL_HAVE_EXCLUSIVE
292 + /* result of the most recent exclusive store */
293 + unsigned long atomctl8;
294 +#endif
295
296 /* Allocate storage for extra user states and coprocessor states. */
297 #if XTENSA_HAVE_COPROCESSORS
298 diff --git a/arch/xtensa/kernel/asm-offsets.c b/arch/xtensa/kernel/asm-offsets.c
299 index 33a257b33723a..dc5c83cad9be8 100644
300 --- a/arch/xtensa/kernel/asm-offsets.c
301 +++ b/arch/xtensa/kernel/asm-offsets.c
302 @@ -93,6 +93,9 @@ int main(void)
303 DEFINE(THREAD_RA, offsetof (struct task_struct, thread.ra));
304 DEFINE(THREAD_SP, offsetof (struct task_struct, thread.sp));
305 DEFINE(THREAD_CPENABLE, offsetof (struct thread_info, cpenable));
306 +#if XCHAL_HAVE_EXCLUSIVE
307 + DEFINE(THREAD_ATOMCTL8, offsetof (struct thread_info, atomctl8));
308 +#endif
309 #if XTENSA_HAVE_COPROCESSORS
310 DEFINE(THREAD_XTREGS_CP0, offsetof(struct thread_info, xtregs_cp.cp0));
311 DEFINE(THREAD_XTREGS_CP1, offsetof(struct thread_info, xtregs_cp.cp1));
312 diff --git a/arch/xtensa/kernel/entry.S b/arch/xtensa/kernel/entry.S
313 index 9e3676879168a..59671603c9c62 100644
314 --- a/arch/xtensa/kernel/entry.S
315 +++ b/arch/xtensa/kernel/entry.S
316 @@ -374,6 +374,11 @@ common_exception:
317 s32i a2, a1, PT_LCOUNT
318 #endif
319
320 +#if XCHAL_HAVE_EXCLUSIVE
321 + /* Clear exclusive access monitor set by interrupted code */
322 + clrex
323 +#endif
324 +
325 /* It is now save to restore the EXC_TABLE_FIXUP variable. */
326
327 rsr a2, exccause
328 @@ -2024,6 +2029,12 @@ ENTRY(_switch_to)
329 s32i a3, a4, THREAD_CPENABLE
330 #endif
331
332 +#if XCHAL_HAVE_EXCLUSIVE
333 + l32i a3, a5, THREAD_ATOMCTL8
334 + getex a3
335 + s32i a3, a4, THREAD_ATOMCTL8
336 +#endif
337 +
338 /* Flush register file. */
339
340 spill_registers_kernel
341 diff --git a/arch/xtensa/kernel/perf_event.c b/arch/xtensa/kernel/perf_event.c
342 index 9bae79f703013..86c9ba9631551 100644
343 --- a/arch/xtensa/kernel/perf_event.c
344 +++ b/arch/xtensa/kernel/perf_event.c
345 @@ -401,7 +401,7 @@ static struct pmu xtensa_pmu = {
346 .read = xtensa_pmu_read,
347 };
348
349 -static int xtensa_pmu_setup(int cpu)
350 +static int xtensa_pmu_setup(unsigned int cpu)
351 {
352 unsigned i;
353
354 diff --git a/crypto/af_alg.c b/crypto/af_alg.c
355 index a3b9df99af6de..35e026ba2c7ed 100644
356 --- a/crypto/af_alg.c
357 +++ b/crypto/af_alg.c
358 @@ -635,6 +635,7 @@ void af_alg_pull_tsgl(struct sock *sk, size_t used, struct scatterlist *dst,
359
360 if (!ctx->used)
361 ctx->merge = 0;
362 + ctx->init = ctx->more;
363 }
364 EXPORT_SYMBOL_GPL(af_alg_pull_tsgl);
365
366 @@ -734,9 +735,10 @@ EXPORT_SYMBOL_GPL(af_alg_wmem_wakeup);
367 *
368 * @sk socket of connection to user space
369 * @flags If MSG_DONTWAIT is set, then only report if function would sleep
370 + * @min Set to minimum request size if partial requests are allowed.
371 * @return 0 when writable memory is available, < 0 upon error
372 */
373 -int af_alg_wait_for_data(struct sock *sk, unsigned flags)
374 +int af_alg_wait_for_data(struct sock *sk, unsigned flags, unsigned min)
375 {
376 DEFINE_WAIT_FUNC(wait, woken_wake_function);
377 struct alg_sock *ask = alg_sk(sk);
378 @@ -754,7 +756,9 @@ int af_alg_wait_for_data(struct sock *sk, unsigned flags)
379 if (signal_pending(current))
380 break;
381 timeout = MAX_SCHEDULE_TIMEOUT;
382 - if (sk_wait_event(sk, &timeout, (ctx->used || !ctx->more),
383 + if (sk_wait_event(sk, &timeout,
384 + ctx->init && (!ctx->more ||
385 + (min && ctx->used >= min)),
386 &wait)) {
387 err = 0;
388 break;
389 @@ -843,10 +847,11 @@ int af_alg_sendmsg(struct socket *sock, struct msghdr *msg, size_t size,
390 }
391
392 lock_sock(sk);
393 - if (!ctx->more && ctx->used) {
394 + if (ctx->init && (init || !ctx->more)) {
395 err = -EINVAL;
396 goto unlock;
397 }
398 + ctx->init = true;
399
400 if (init) {
401 ctx->enc = enc;
402 diff --git a/crypto/algif_aead.c b/crypto/algif_aead.c
403 index 0ae000a61c7f5..43c6aa784858b 100644
404 --- a/crypto/algif_aead.c
405 +++ b/crypto/algif_aead.c
406 @@ -106,8 +106,8 @@ static int _aead_recvmsg(struct socket *sock, struct msghdr *msg,
407 size_t usedpages = 0; /* [in] RX bufs to be used from user */
408 size_t processed = 0; /* [in] TX bufs to be consumed */
409
410 - if (!ctx->used) {
411 - err = af_alg_wait_for_data(sk, flags);
412 + if (!ctx->init || ctx->more) {
413 + err = af_alg_wait_for_data(sk, flags, 0);
414 if (err)
415 return err;
416 }
417 @@ -558,12 +558,6 @@ static int aead_accept_parent_nokey(void *private, struct sock *sk)
418
419 INIT_LIST_HEAD(&ctx->tsgl_list);
420 ctx->len = len;
421 - ctx->used = 0;
422 - atomic_set(&ctx->rcvused, 0);
423 - ctx->more = 0;
424 - ctx->merge = 0;
425 - ctx->enc = 0;
426 - ctx->aead_assoclen = 0;
427 crypto_init_wait(&ctx->wait);
428
429 ask->private = ctx;
430 diff --git a/crypto/algif_skcipher.c b/crypto/algif_skcipher.c
431 index ec5567c87a6df..81c4022285a7c 100644
432 --- a/crypto/algif_skcipher.c
433 +++ b/crypto/algif_skcipher.c
434 @@ -61,8 +61,8 @@ static int _skcipher_recvmsg(struct socket *sock, struct msghdr *msg,
435 int err = 0;
436 size_t len = 0;
437
438 - if (!ctx->used) {
439 - err = af_alg_wait_for_data(sk, flags);
440 + if (!ctx->init || (ctx->more && ctx->used < bs)) {
441 + err = af_alg_wait_for_data(sk, flags, bs);
442 if (err)
443 return err;
444 }
445 @@ -333,6 +333,7 @@ static int skcipher_accept_parent_nokey(void *private, struct sock *sk)
446 ctx = sock_kmalloc(sk, len, GFP_KERNEL);
447 if (!ctx)
448 return -ENOMEM;
449 + memset(ctx, 0, len);
450
451 ctx->iv = sock_kmalloc(sk, crypto_skcipher_ivsize(tfm),
452 GFP_KERNEL);
453 @@ -340,16 +341,10 @@ static int skcipher_accept_parent_nokey(void *private, struct sock *sk)
454 sock_kfree_s(sk, ctx, len);
455 return -ENOMEM;
456 }
457 -
458 memset(ctx->iv, 0, crypto_skcipher_ivsize(tfm));
459
460 INIT_LIST_HEAD(&ctx->tsgl_list);
461 ctx->len = len;
462 - ctx->used = 0;
463 - atomic_set(&ctx->rcvused, 0);
464 - ctx->more = 0;
465 - ctx->merge = 0;
466 - ctx->enc = 0;
467 crypto_init_wait(&ctx->wait);
468
469 ask->private = ctx;
470 diff --git a/drivers/base/dd.c b/drivers/base/dd.c
471 index b25bcab2a26bd..1d5dd37f3abe4 100644
472 --- a/drivers/base/dd.c
473 +++ b/drivers/base/dd.c
474 @@ -872,7 +872,9 @@ static int __device_attach(struct device *dev, bool allow_async)
475 int ret = 0;
476
477 device_lock(dev);
478 - if (dev->driver) {
479 + if (dev->p->dead) {
480 + goto out_unlock;
481 + } else if (dev->driver) {
482 if (device_is_bound(dev)) {
483 ret = 1;
484 goto out_unlock;
485 diff --git a/drivers/clk/actions/owl-s500.c b/drivers/clk/actions/owl-s500.c
486 index e2007ac4d235d..0eb83a0b70bcc 100644
487 --- a/drivers/clk/actions/owl-s500.c
488 +++ b/drivers/clk/actions/owl-s500.c
489 @@ -183,7 +183,7 @@ static OWL_GATE(timer_clk, "timer_clk", "hosc", CMU_DEVCLKEN1, 27, 0, 0);
490 static OWL_GATE(hdmi_clk, "hdmi_clk", "hosc", CMU_DEVCLKEN1, 3, 0, 0);
491
492 /* divider clocks */
493 -static OWL_DIVIDER(h_clk, "h_clk", "ahbprevdiv_clk", CMU_BUSCLK1, 12, 2, NULL, 0, 0);
494 +static OWL_DIVIDER(h_clk, "h_clk", "ahbprediv_clk", CMU_BUSCLK1, 12, 2, NULL, 0, 0);
495 static OWL_DIVIDER(rmii_ref_clk, "rmii_ref_clk", "ethernet_pll_clk", CMU_ETHERNETPLL, 1, 1, rmii_ref_div_table, 0, 0);
496
497 /* factor clocks */
498 diff --git a/drivers/clk/bcm/clk-bcm2835.c b/drivers/clk/bcm/clk-bcm2835.c
499 index 6e5d635f030f4..45420b514149f 100644
500 --- a/drivers/clk/bcm/clk-bcm2835.c
501 +++ b/drivers/clk/bcm/clk-bcm2835.c
502 @@ -314,6 +314,7 @@ struct bcm2835_cprman {
503 struct device *dev;
504 void __iomem *regs;
505 spinlock_t regs_lock; /* spinlock for all clocks */
506 + unsigned int soc;
507
508 /*
509 * Real names of cprman clock parents looked up through
510 @@ -525,6 +526,20 @@ static int bcm2835_pll_is_on(struct clk_hw *hw)
511 A2W_PLL_CTRL_PRST_DISABLE;
512 }
513
514 +static u32 bcm2835_pll_get_prediv_mask(struct bcm2835_cprman *cprman,
515 + const struct bcm2835_pll_data *data)
516 +{
517 + /*
518 + * On BCM2711 there isn't a pre-divisor available in the PLL feedback
519 + * loop. Bits 13:14 of ANA1 (PLLA,PLLB,PLLC,PLLD) have been re-purposed
520 + * for to for VCO RANGE bits.
521 + */
522 + if (cprman->soc & SOC_BCM2711)
523 + return 0;
524 +
525 + return data->ana->fb_prediv_mask;
526 +}
527 +
528 static void bcm2835_pll_choose_ndiv_and_fdiv(unsigned long rate,
529 unsigned long parent_rate,
530 u32 *ndiv, u32 *fdiv)
531 @@ -582,7 +597,7 @@ static unsigned long bcm2835_pll_get_rate(struct clk_hw *hw,
532 ndiv = (a2wctrl & A2W_PLL_CTRL_NDIV_MASK) >> A2W_PLL_CTRL_NDIV_SHIFT;
533 pdiv = (a2wctrl & A2W_PLL_CTRL_PDIV_MASK) >> A2W_PLL_CTRL_PDIV_SHIFT;
534 using_prediv = cprman_read(cprman, data->ana_reg_base + 4) &
535 - data->ana->fb_prediv_mask;
536 + bcm2835_pll_get_prediv_mask(cprman, data);
537
538 if (using_prediv) {
539 ndiv *= 2;
540 @@ -665,6 +680,7 @@ static int bcm2835_pll_set_rate(struct clk_hw *hw,
541 struct bcm2835_pll *pll = container_of(hw, struct bcm2835_pll, hw);
542 struct bcm2835_cprman *cprman = pll->cprman;
543 const struct bcm2835_pll_data *data = pll->data;
544 + u32 prediv_mask = bcm2835_pll_get_prediv_mask(cprman, data);
545 bool was_using_prediv, use_fb_prediv, do_ana_setup_first;
546 u32 ndiv, fdiv, a2w_ctl;
547 u32 ana[4];
548 @@ -682,7 +698,7 @@ static int bcm2835_pll_set_rate(struct clk_hw *hw,
549 for (i = 3; i >= 0; i--)
550 ana[i] = cprman_read(cprman, data->ana_reg_base + i * 4);
551
552 - was_using_prediv = ana[1] & data->ana->fb_prediv_mask;
553 + was_using_prediv = ana[1] & prediv_mask;
554
555 ana[0] &= ~data->ana->mask0;
556 ana[0] |= data->ana->set0;
557 @@ -692,10 +708,10 @@ static int bcm2835_pll_set_rate(struct clk_hw *hw,
558 ana[3] |= data->ana->set3;
559
560 if (was_using_prediv && !use_fb_prediv) {
561 - ana[1] &= ~data->ana->fb_prediv_mask;
562 + ana[1] &= ~prediv_mask;
563 do_ana_setup_first = true;
564 } else if (!was_using_prediv && use_fb_prediv) {
565 - ana[1] |= data->ana->fb_prediv_mask;
566 + ana[1] |= prediv_mask;
567 do_ana_setup_first = false;
568 } else {
569 do_ana_setup_first = true;
570 @@ -2234,6 +2250,7 @@ static int bcm2835_clk_probe(struct platform_device *pdev)
571 platform_set_drvdata(pdev, cprman);
572
573 cprman->onecell.num = asize;
574 + cprman->soc = pdata->soc;
575 hws = cprman->onecell.hws;
576
577 for (i = 0; i < asize; i++) {
578 diff --git a/drivers/clk/qcom/clk-alpha-pll.c b/drivers/clk/qcom/clk-alpha-pll.c
579 index 055318f979915..a69f53e435ed5 100644
580 --- a/drivers/clk/qcom/clk-alpha-pll.c
581 +++ b/drivers/clk/qcom/clk-alpha-pll.c
582 @@ -55,7 +55,6 @@
583 #define PLL_STATUS(p) ((p)->offset + (p)->regs[PLL_OFF_STATUS])
584 #define PLL_OPMODE(p) ((p)->offset + (p)->regs[PLL_OFF_OPMODE])
585 #define PLL_FRAC(p) ((p)->offset + (p)->regs[PLL_OFF_FRAC])
586 -#define PLL_CAL_VAL(p) ((p)->offset + (p)->regs[PLL_OFF_CAL_VAL])
587
588 const u8 clk_alpha_pll_regs[][PLL_OFF_MAX_REGS] = {
589 [CLK_ALPHA_PLL_TYPE_DEFAULT] = {
590 @@ -114,7 +113,6 @@ const u8 clk_alpha_pll_regs[][PLL_OFF_MAX_REGS] = {
591 [PLL_OFF_STATUS] = 0x30,
592 [PLL_OFF_OPMODE] = 0x38,
593 [PLL_OFF_ALPHA_VAL] = 0x40,
594 - [PLL_OFF_CAL_VAL] = 0x44,
595 },
596 };
597 EXPORT_SYMBOL_GPL(clk_alpha_pll_regs);
598 diff --git a/drivers/clk/qcom/gcc-sdm660.c b/drivers/clk/qcom/gcc-sdm660.c
599 index bf5730832ef3d..c6fb57cd576f5 100644
600 --- a/drivers/clk/qcom/gcc-sdm660.c
601 +++ b/drivers/clk/qcom/gcc-sdm660.c
602 @@ -1715,6 +1715,9 @@ static struct clk_branch gcc_mss_cfg_ahb_clk = {
603
604 static struct clk_branch gcc_mss_mnoc_bimc_axi_clk = {
605 .halt_reg = 0x8a004,
606 + .halt_check = BRANCH_HALT,
607 + .hwcg_reg = 0x8a004,
608 + .hwcg_bit = 1,
609 .clkr = {
610 .enable_reg = 0x8a004,
611 .enable_mask = BIT(0),
612 diff --git a/drivers/clk/qcom/gcc-sm8150.c b/drivers/clk/qcom/gcc-sm8150.c
613 index fad42897a7a7f..ee908fbfeab17 100644
614 --- a/drivers/clk/qcom/gcc-sm8150.c
615 +++ b/drivers/clk/qcom/gcc-sm8150.c
616 @@ -1616,6 +1616,7 @@ static struct clk_branch gcc_gpu_cfg_ahb_clk = {
617 };
618
619 static struct clk_branch gcc_gpu_gpll0_clk_src = {
620 + .halt_check = BRANCH_HALT_SKIP,
621 .clkr = {
622 .enable_reg = 0x52004,
623 .enable_mask = BIT(15),
624 @@ -1631,13 +1632,14 @@ static struct clk_branch gcc_gpu_gpll0_clk_src = {
625 };
626
627 static struct clk_branch gcc_gpu_gpll0_div_clk_src = {
628 + .halt_check = BRANCH_HALT_SKIP,
629 .clkr = {
630 .enable_reg = 0x52004,
631 .enable_mask = BIT(16),
632 .hw.init = &(struct clk_init_data){
633 .name = "gcc_gpu_gpll0_div_clk_src",
634 .parent_hws = (const struct clk_hw *[]){
635 - &gcc_gpu_gpll0_clk_src.clkr.hw },
636 + &gpll0_out_even.clkr.hw },
637 .num_parents = 1,
638 .flags = CLK_SET_RATE_PARENT,
639 .ops = &clk_branch2_ops,
640 @@ -1728,6 +1730,7 @@ static struct clk_branch gcc_npu_cfg_ahb_clk = {
641 };
642
643 static struct clk_branch gcc_npu_gpll0_clk_src = {
644 + .halt_check = BRANCH_HALT_SKIP,
645 .clkr = {
646 .enable_reg = 0x52004,
647 .enable_mask = BIT(18),
648 @@ -1743,13 +1746,14 @@ static struct clk_branch gcc_npu_gpll0_clk_src = {
649 };
650
651 static struct clk_branch gcc_npu_gpll0_div_clk_src = {
652 + .halt_check = BRANCH_HALT_SKIP,
653 .clkr = {
654 .enable_reg = 0x52004,
655 .enable_mask = BIT(19),
656 .hw.init = &(struct clk_init_data){
657 .name = "gcc_npu_gpll0_div_clk_src",
658 .parent_hws = (const struct clk_hw *[]){
659 - &gcc_npu_gpll0_clk_src.clkr.hw },
660 + &gpll0_out_even.clkr.hw },
661 .num_parents = 1,
662 .flags = CLK_SET_RATE_PARENT,
663 .ops = &clk_branch2_ops,
664 diff --git a/drivers/clk/sirf/clk-atlas6.c b/drivers/clk/sirf/clk-atlas6.c
665 index c84d5bab7ac28..b95483bb6a5ec 100644
666 --- a/drivers/clk/sirf/clk-atlas6.c
667 +++ b/drivers/clk/sirf/clk-atlas6.c
668 @@ -135,7 +135,7 @@ static void __init atlas6_clk_init(struct device_node *np)
669
670 for (i = pll1; i < maxclk; i++) {
671 atlas6_clks[i] = clk_register(NULL, atlas6_clk_hw_array[i]);
672 - BUG_ON(!atlas6_clks[i]);
673 + BUG_ON(IS_ERR(atlas6_clks[i]));
674 }
675 clk_register_clkdev(atlas6_clks[cpu], NULL, "cpu");
676 clk_register_clkdev(atlas6_clks[io], NULL, "io");
677 diff --git a/drivers/crypto/caam/caamalg.c b/drivers/crypto/caam/caamalg.c
678 index 4ce9c2b4544a2..fdd994ee55e22 100644
679 --- a/drivers/crypto/caam/caamalg.c
680 +++ b/drivers/crypto/caam/caamalg.c
681 @@ -818,12 +818,6 @@ static int ctr_skcipher_setkey(struct crypto_skcipher *skcipher,
682 return skcipher_setkey(skcipher, key, keylen, ctx1_iv_off);
683 }
684
685 -static int arc4_skcipher_setkey(struct crypto_skcipher *skcipher,
686 - const u8 *key, unsigned int keylen)
687 -{
688 - return skcipher_setkey(skcipher, key, keylen, 0);
689 -}
690 -
691 static int des_skcipher_setkey(struct crypto_skcipher *skcipher,
692 const u8 *key, unsigned int keylen)
693 {
694 @@ -2058,21 +2052,6 @@ static struct caam_skcipher_alg driver_algs[] = {
695 },
696 .caam.class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_ECB,
697 },
698 - {
699 - .skcipher = {
700 - .base = {
701 - .cra_name = "ecb(arc4)",
702 - .cra_driver_name = "ecb-arc4-caam",
703 - .cra_blocksize = ARC4_BLOCK_SIZE,
704 - },
705 - .setkey = arc4_skcipher_setkey,
706 - .encrypt = skcipher_encrypt,
707 - .decrypt = skcipher_decrypt,
708 - .min_keysize = ARC4_MIN_KEY_SIZE,
709 - .max_keysize = ARC4_MAX_KEY_SIZE,
710 - },
711 - .caam.class1_alg_type = OP_ALG_ALGSEL_ARC4 | OP_ALG_AAI_ECB,
712 - },
713 };
714
715 static struct caam_aead_alg driver_aeads[] = {
716 @@ -3533,7 +3512,6 @@ int caam_algapi_init(struct device *ctrldev)
717 struct caam_drv_private *priv = dev_get_drvdata(ctrldev);
718 int i = 0, err = 0;
719 u32 aes_vid, aes_inst, des_inst, md_vid, md_inst, ccha_inst, ptha_inst;
720 - u32 arc4_inst;
721 unsigned int md_limit = SHA512_DIGEST_SIZE;
722 bool registered = false, gcm_support;
723
724 @@ -3553,8 +3531,6 @@ int caam_algapi_init(struct device *ctrldev)
725 CHA_ID_LS_DES_SHIFT;
726 aes_inst = cha_inst & CHA_ID_LS_AES_MASK;
727 md_inst = (cha_inst & CHA_ID_LS_MD_MASK) >> CHA_ID_LS_MD_SHIFT;
728 - arc4_inst = (cha_inst & CHA_ID_LS_ARC4_MASK) >>
729 - CHA_ID_LS_ARC4_SHIFT;
730 ccha_inst = 0;
731 ptha_inst = 0;
732
733 @@ -3575,7 +3551,6 @@ int caam_algapi_init(struct device *ctrldev)
734 md_inst = mdha & CHA_VER_NUM_MASK;
735 ccha_inst = rd_reg32(&priv->ctrl->vreg.ccha) & CHA_VER_NUM_MASK;
736 ptha_inst = rd_reg32(&priv->ctrl->vreg.ptha) & CHA_VER_NUM_MASK;
737 - arc4_inst = rd_reg32(&priv->ctrl->vreg.afha) & CHA_VER_NUM_MASK;
738
739 gcm_support = aesa & CHA_VER_MISC_AES_GCM;
740 }
741 @@ -3598,10 +3573,6 @@ int caam_algapi_init(struct device *ctrldev)
742 if (!aes_inst && (alg_sel == OP_ALG_ALGSEL_AES))
743 continue;
744
745 - /* Skip ARC4 algorithms if not supported by device */
746 - if (!arc4_inst && alg_sel == OP_ALG_ALGSEL_ARC4)
747 - continue;
748 -
749 /*
750 * Check support for AES modes not available
751 * on LP devices.
752 diff --git a/drivers/crypto/caam/compat.h b/drivers/crypto/caam/compat.h
753 index 60e2a54c19f11..c3c22a8de4c00 100644
754 --- a/drivers/crypto/caam/compat.h
755 +++ b/drivers/crypto/caam/compat.h
756 @@ -43,7 +43,6 @@
757 #include <crypto/akcipher.h>
758 #include <crypto/scatterwalk.h>
759 #include <crypto/skcipher.h>
760 -#include <crypto/arc4.h>
761 #include <crypto/internal/skcipher.h>
762 #include <crypto/internal/hash.h>
763 #include <crypto/internal/rsa.h>
764 diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn10/rv1_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn10/rv1_clk_mgr.c
765 index 47f529ce280ae..2718396083ee4 100644
766 --- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn10/rv1_clk_mgr.c
767 +++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn10/rv1_clk_mgr.c
768 @@ -85,12 +85,77 @@ static int rv1_determine_dppclk_threshold(struct clk_mgr_internal *clk_mgr, stru
769 return disp_clk_threshold;
770 }
771
772 -static void ramp_up_dispclk_with_dpp(struct clk_mgr_internal *clk_mgr, struct dc *dc, struct dc_clocks *new_clocks)
773 +static void ramp_up_dispclk_with_dpp(
774 + struct clk_mgr_internal *clk_mgr,
775 + struct dc *dc,
776 + struct dc_clocks *new_clocks,
777 + bool safe_to_lower)
778 {
779 int i;
780 int dispclk_to_dpp_threshold = rv1_determine_dppclk_threshold(clk_mgr, new_clocks);
781 bool request_dpp_div = new_clocks->dispclk_khz > new_clocks->dppclk_khz;
782
783 + /* this function is to change dispclk, dppclk and dprefclk according to
784 + * bandwidth requirement. Its call stack is rv1_update_clocks -->
785 + * update_clocks --> dcn10_prepare_bandwidth / dcn10_optimize_bandwidth
786 + * --> prepare_bandwidth / optimize_bandwidth. before change dcn hw,
787 + * prepare_bandwidth will be called first to allow enough clock,
788 + * watermark for change, after end of dcn hw change, optimize_bandwidth
789 + * is executed to lower clock to save power for new dcn hw settings.
790 + *
791 + * below is sequence of commit_planes_for_stream:
792 + *
793 + * step 1: prepare_bandwidth - raise clock to have enough bandwidth
794 + * step 2: lock_doublebuffer_enable
795 + * step 3: pipe_control_lock(true) - make dchubp register change will
796 + * not take effect right way
797 + * step 4: apply_ctx_for_surface - program dchubp
798 + * step 5: pipe_control_lock(false) - dchubp register change take effect
799 + * step 6: optimize_bandwidth --> dc_post_update_surfaces_to_stream
800 + * for full_date, optimize clock to save power
801 + *
802 + * at end of step 1, dcn clocks (dprefclk, dispclk, dppclk) may be
803 + * changed for new dchubp configuration. but real dcn hub dchubps are
804 + * still running with old configuration until end of step 5. this need
805 + * clocks settings at step 1 should not less than that before step 1.
806 + * this is checked by two conditions: 1. if (should_set_clock(safe_to_lower
807 + * , new_clocks->dispclk_khz, clk_mgr_base->clks.dispclk_khz) ||
808 + * new_clocks->dispclk_khz == clk_mgr_base->clks.dispclk_khz)
809 + * 2. request_dpp_div = new_clocks->dispclk_khz > new_clocks->dppclk_khz
810 + *
811 + * the second condition is based on new dchubp configuration. dppclk
812 + * for new dchubp may be different from dppclk before step 1.
813 + * for example, before step 1, dchubps are as below:
814 + * pipe 0: recout=(0,40,1920,980) viewport=(0,0,1920,979)
815 + * pipe 1: recout=(0,0,1920,1080) viewport=(0,0,1920,1080)
816 + * for dppclk for pipe0 need dppclk = dispclk
817 + *
818 + * new dchubp pipe split configuration:
819 + * pipe 0: recout=(0,0,960,1080) viewport=(0,0,960,1080)
820 + * pipe 1: recout=(960,0,960,1080) viewport=(960,0,960,1080)
821 + * dppclk only needs dppclk = dispclk /2.
822 + *
823 + * dispclk, dppclk are not lock by otg master lock. they take effect
824 + * after step 1. during this transition, dispclk are the same, but
825 + * dppclk is changed to half of previous clock for old dchubp
826 + * configuration between step 1 and step 6. This may cause p-state
827 + * warning intermittently.
828 + *
829 + * for new_clocks->dispclk_khz == clk_mgr_base->clks.dispclk_khz, we
830 + * need make sure dppclk are not changed to less between step 1 and 6.
831 + * for new_clocks->dispclk_khz > clk_mgr_base->clks.dispclk_khz,
832 + * new display clock is raised, but we do not know ratio of
833 + * new_clocks->dispclk_khz and clk_mgr_base->clks.dispclk_khz,
834 + * new_clocks->dispclk_khz /2 does not guarantee equal or higher than
835 + * old dppclk. we could ignore power saving different between
836 + * dppclk = displck and dppclk = dispclk / 2 between step 1 and step 6.
837 + * as long as safe_to_lower = false, set dpclk = dispclk to simplify
838 + * condition check.
839 + * todo: review this change for other asic.
840 + **/
841 + if (!safe_to_lower)
842 + request_dpp_div = false;
843 +
844 /* set disp clk to dpp clk threshold */
845
846 clk_mgr->funcs->set_dispclk(clk_mgr, dispclk_to_dpp_threshold);
847 @@ -206,7 +271,7 @@ static void rv1_update_clocks(struct clk_mgr *clk_mgr_base,
848 /* program dispclk on = as a w/a for sleep resume clock ramping issues */
849 if (should_set_clock(safe_to_lower, new_clocks->dispclk_khz, clk_mgr_base->clks.dispclk_khz)
850 || new_clocks->dispclk_khz == clk_mgr_base->clks.dispclk_khz) {
851 - ramp_up_dispclk_with_dpp(clk_mgr, dc, new_clocks);
852 + ramp_up_dispclk_with_dpp(clk_mgr, dc, new_clocks, safe_to_lower);
853 clk_mgr_base->clks.dispclk_khz = new_clocks->dispclk_khz;
854 send_request_to_lower = true;
855 }
856 diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/ci_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/ci_smumgr.c
857 index 2e71ca3e19f58..09a3d8ae44491 100644
858 --- a/drivers/gpu/drm/amd/powerplay/smumgr/ci_smumgr.c
859 +++ b/drivers/gpu/drm/amd/powerplay/smumgr/ci_smumgr.c
860 @@ -2725,7 +2725,10 @@ static int ci_initialize_mc_reg_table(struct pp_hwmgr *hwmgr)
861
862 static bool ci_is_dpm_running(struct pp_hwmgr *hwmgr)
863 {
864 - return ci_is_smc_ram_running(hwmgr);
865 + return (1 == PHM_READ_INDIRECT_FIELD(hwmgr->device,
866 + CGS_IND_REG__SMC, FEATURE_STATUS,
867 + VOLTAGE_CONTROLLER_ON))
868 + ? true : false;
869 }
870
871 static int ci_smu_init(struct pp_hwmgr *hwmgr)
872 diff --git a/drivers/gpu/drm/drm_dp_mst_topology.c b/drivers/gpu/drm/drm_dp_mst_topology.c
873 index 006d6087700fb..2de1eebe591f9 100644
874 --- a/drivers/gpu/drm/drm_dp_mst_topology.c
875 +++ b/drivers/gpu/drm/drm_dp_mst_topology.c
876 @@ -3369,11 +3369,11 @@ bool drm_dp_mst_allocate_vcpi(struct drm_dp_mst_topology_mgr *mgr,
877 {
878 int ret;
879
880 - port = drm_dp_mst_topology_get_port_validated(mgr, port);
881 - if (!port)
882 + if (slots < 0)
883 return false;
884
885 - if (slots < 0)
886 + port = drm_dp_mst_topology_get_port_validated(mgr, port);
887 + if (!port)
888 return false;
889
890 if (port->vcpi.vcpi > 0) {
891 @@ -3389,6 +3389,7 @@ bool drm_dp_mst_allocate_vcpi(struct drm_dp_mst_topology_mgr *mgr,
892 if (ret) {
893 DRM_DEBUG_KMS("failed to init vcpi slots=%d max=63 ret=%d\n",
894 DIV_ROUND_UP(pbn, mgr->pbn_div), ret);
895 + drm_dp_mst_topology_put_port(port);
896 goto out;
897 }
898 DRM_DEBUG_KMS("initing vcpi for pbn=%d slots=%d\n",
899 diff --git a/drivers/gpu/drm/drm_panel_orientation_quirks.c b/drivers/gpu/drm/drm_panel_orientation_quirks.c
900 index d00ea384dcbfe..58f5dc2f6dd52 100644
901 --- a/drivers/gpu/drm/drm_panel_orientation_quirks.c
902 +++ b/drivers/gpu/drm/drm_panel_orientation_quirks.c
903 @@ -121,6 +121,12 @@ static const struct dmi_system_id orientation_data[] = {
904 DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "T101HA"),
905 },
906 .driver_data = (void *)&lcd800x1280_rightside_up,
907 + }, { /* Asus T103HAF */
908 + .matches = {
909 + DMI_EXACT_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
910 + DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "T103HAF"),
911 + },
912 + .driver_data = (void *)&lcd800x1280_rightside_up,
913 }, { /* GPD MicroPC (generic strings, also match on bios date) */
914 .matches = {
915 DMI_EXACT_MATCH(DMI_SYS_VENDOR, "Default string"),
916 diff --git a/drivers/gpu/drm/imx/imx-ldb.c b/drivers/gpu/drm/imx/imx-ldb.c
917 index 9af5a08d5490f..d6629fc869f3f 100644
918 --- a/drivers/gpu/drm/imx/imx-ldb.c
919 +++ b/drivers/gpu/drm/imx/imx-ldb.c
920 @@ -302,18 +302,19 @@ static void imx_ldb_encoder_disable(struct drm_encoder *encoder)
921 {
922 struct imx_ldb_channel *imx_ldb_ch = enc_to_imx_ldb_ch(encoder);
923 struct imx_ldb *ldb = imx_ldb_ch->ldb;
924 + int dual = ldb->ldb_ctrl & LDB_SPLIT_MODE_EN;
925 int mux, ret;
926
927 drm_panel_disable(imx_ldb_ch->panel);
928
929 - if (imx_ldb_ch == &ldb->channel[0])
930 + if (imx_ldb_ch == &ldb->channel[0] || dual)
931 ldb->ldb_ctrl &= ~LDB_CH0_MODE_EN_MASK;
932 - else if (imx_ldb_ch == &ldb->channel[1])
933 + if (imx_ldb_ch == &ldb->channel[1] || dual)
934 ldb->ldb_ctrl &= ~LDB_CH1_MODE_EN_MASK;
935
936 regmap_write(ldb->regmap, IOMUXC_GPR2, ldb->ldb_ctrl);
937
938 - if (ldb->ldb_ctrl & LDB_SPLIT_MODE_EN) {
939 + if (dual) {
940 clk_disable_unprepare(ldb->clk[0]);
941 clk_disable_unprepare(ldb->clk[1]);
942 }
943 diff --git a/drivers/gpu/drm/panfrost/panfrost_gem.c b/drivers/gpu/drm/panfrost/panfrost_gem.c
944 index 77c3a3855c682..c05e013bb8e3d 100644
945 --- a/drivers/gpu/drm/panfrost/panfrost_gem.c
946 +++ b/drivers/gpu/drm/panfrost/panfrost_gem.c
947 @@ -46,7 +46,7 @@ static void panfrost_gem_free_object(struct drm_gem_object *obj)
948 sg_free_table(&bo->sgts[i]);
949 }
950 }
951 - kfree(bo->sgts);
952 + kvfree(bo->sgts);
953 }
954
955 drm_gem_shmem_free_object(obj);
956 diff --git a/drivers/gpu/drm/panfrost/panfrost_mmu.c b/drivers/gpu/drm/panfrost/panfrost_mmu.c
957 index 5d75f8cf64776..3dc9b30a64b01 100644
958 --- a/drivers/gpu/drm/panfrost/panfrost_mmu.c
959 +++ b/drivers/gpu/drm/panfrost/panfrost_mmu.c
960 @@ -486,7 +486,7 @@ static int panfrost_mmu_map_fault_addr(struct panfrost_device *pfdev, int as,
961 pages = kvmalloc_array(bo->base.base.size >> PAGE_SHIFT,
962 sizeof(struct page *), GFP_KERNEL | __GFP_ZERO);
963 if (!pages) {
964 - kfree(bo->sgts);
965 + kvfree(bo->sgts);
966 bo->sgts = NULL;
967 mutex_unlock(&bo->base.pages_lock);
968 ret = -ENOMEM;
969 diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
970 index f47d5710cc951..33b1519887474 100644
971 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
972 +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
973 @@ -2666,7 +2666,7 @@ int vmw_kms_fbdev_init_data(struct vmw_private *dev_priv,
974 ++i;
975 }
976
977 - if (i != unit) {
978 + if (&con->head == &dev_priv->dev->mode_config.connector_list) {
979 DRM_ERROR("Could not find initial display unit.\n");
980 ret = -EINVAL;
981 goto out_unlock;
982 @@ -2690,13 +2690,13 @@ int vmw_kms_fbdev_init_data(struct vmw_private *dev_priv,
983 break;
984 }
985
986 - if (mode->type & DRM_MODE_TYPE_PREFERRED)
987 - *p_mode = mode;
988 - else {
989 + if (&mode->head == &con->modes) {
990 WARN_ONCE(true, "Could not find initial preferred mode.\n");
991 *p_mode = list_first_entry(&con->modes,
992 struct drm_display_mode,
993 head);
994 + } else {
995 + *p_mode = mode;
996 }
997
998 out_unlock:
999 diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c b/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c
1000 index 5702219ec38f6..7b54c1f56208f 100644
1001 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c
1002 +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c
1003 @@ -81,7 +81,7 @@ static int vmw_ldu_commit_list(struct vmw_private *dev_priv)
1004 struct vmw_legacy_display_unit *entry;
1005 struct drm_framebuffer *fb = NULL;
1006 struct drm_crtc *crtc = NULL;
1007 - int i = 0;
1008 + int i;
1009
1010 /* If there is no display topology the host just assumes
1011 * that the guest will set the same layout as the host.
1012 @@ -92,12 +92,11 @@ static int vmw_ldu_commit_list(struct vmw_private *dev_priv)
1013 crtc = &entry->base.crtc;
1014 w = max(w, crtc->x + crtc->mode.hdisplay);
1015 h = max(h, crtc->y + crtc->mode.vdisplay);
1016 - i++;
1017 }
1018
1019 if (crtc == NULL)
1020 return 0;
1021 - fb = entry->base.crtc.primary->state->fb;
1022 + fb = crtc->primary->state->fb;
1023
1024 return vmw_kms_write_svga(dev_priv, w, h, fb->pitches[0],
1025 fb->format->cpp[0] * 8,
1026 diff --git a/drivers/gpu/ipu-v3/ipu-image-convert.c b/drivers/gpu/ipu-v3/ipu-image-convert.c
1027 index eeca50d9a1ee4..aa1d4b6d278f7 100644
1028 --- a/drivers/gpu/ipu-v3/ipu-image-convert.c
1029 +++ b/drivers/gpu/ipu-v3/ipu-image-convert.c
1030 @@ -137,6 +137,17 @@ struct ipu_image_convert_ctx;
1031 struct ipu_image_convert_chan;
1032 struct ipu_image_convert_priv;
1033
1034 +enum eof_irq_mask {
1035 + EOF_IRQ_IN = BIT(0),
1036 + EOF_IRQ_ROT_IN = BIT(1),
1037 + EOF_IRQ_OUT = BIT(2),
1038 + EOF_IRQ_ROT_OUT = BIT(3),
1039 +};
1040 +
1041 +#define EOF_IRQ_COMPLETE (EOF_IRQ_IN | EOF_IRQ_OUT)
1042 +#define EOF_IRQ_ROT_COMPLETE (EOF_IRQ_IN | EOF_IRQ_OUT | \
1043 + EOF_IRQ_ROT_IN | EOF_IRQ_ROT_OUT)
1044 +
1045 struct ipu_image_convert_ctx {
1046 struct ipu_image_convert_chan *chan;
1047
1048 @@ -173,6 +184,9 @@ struct ipu_image_convert_ctx {
1049 /* where to place converted tile in dest image */
1050 unsigned int out_tile_map[MAX_TILES];
1051
1052 + /* mask of completed EOF irqs at every tile conversion */
1053 + enum eof_irq_mask eof_mask;
1054 +
1055 struct list_head list;
1056 };
1057
1058 @@ -189,6 +203,8 @@ struct ipu_image_convert_chan {
1059 struct ipuv3_channel *rotation_out_chan;
1060
1061 /* the IPU end-of-frame irqs */
1062 + int in_eof_irq;
1063 + int rot_in_eof_irq;
1064 int out_eof_irq;
1065 int rot_out_eof_irq;
1066
1067 @@ -1380,6 +1396,9 @@ static int convert_start(struct ipu_image_convert_run *run, unsigned int tile)
1068 dev_dbg(priv->ipu->dev, "%s: task %u: starting ctx %p run %p tile %u -> %u\n",
1069 __func__, chan->ic_task, ctx, run, tile, dst_tile);
1070
1071 + /* clear EOF irq mask */
1072 + ctx->eof_mask = 0;
1073 +
1074 if (ipu_rot_mode_is_irt(ctx->rot_mode)) {
1075 /* swap width/height for resizer */
1076 dest_width = d_image->tile[dst_tile].height;
1077 @@ -1615,7 +1634,7 @@ static bool ic_settings_changed(struct ipu_image_convert_ctx *ctx)
1078 }
1079
1080 /* hold irqlock when calling */
1081 -static irqreturn_t do_irq(struct ipu_image_convert_run *run)
1082 +static irqreturn_t do_tile_complete(struct ipu_image_convert_run *run)
1083 {
1084 struct ipu_image_convert_ctx *ctx = run->ctx;
1085 struct ipu_image_convert_chan *chan = ctx->chan;
1086 @@ -1700,6 +1719,7 @@ static irqreturn_t do_irq(struct ipu_image_convert_run *run)
1087 ctx->cur_buf_num ^= 1;
1088 }
1089
1090 + ctx->eof_mask = 0; /* clear EOF irq mask for next tile */
1091 ctx->next_tile++;
1092 return IRQ_HANDLED;
1093 done:
1094 @@ -1709,13 +1729,15 @@ done:
1095 return IRQ_WAKE_THREAD;
1096 }
1097
1098 -static irqreturn_t norotate_irq(int irq, void *data)
1099 +static irqreturn_t eof_irq(int irq, void *data)
1100 {
1101 struct ipu_image_convert_chan *chan = data;
1102 + struct ipu_image_convert_priv *priv = chan->priv;
1103 struct ipu_image_convert_ctx *ctx;
1104 struct ipu_image_convert_run *run;
1105 + irqreturn_t ret = IRQ_HANDLED;
1106 + bool tile_complete = false;
1107 unsigned long flags;
1108 - irqreturn_t ret;
1109
1110 spin_lock_irqsave(&chan->irqlock, flags);
1111
1112 @@ -1728,46 +1750,33 @@ static irqreturn_t norotate_irq(int irq, void *data)
1113
1114 ctx = run->ctx;
1115
1116 - if (ipu_rot_mode_is_irt(ctx->rot_mode)) {
1117 - /* this is a rotation operation, just ignore */
1118 - spin_unlock_irqrestore(&chan->irqlock, flags);
1119 - return IRQ_HANDLED;
1120 - }
1121 -
1122 - ret = do_irq(run);
1123 -out:
1124 - spin_unlock_irqrestore(&chan->irqlock, flags);
1125 - return ret;
1126 -}
1127 -
1128 -static irqreturn_t rotate_irq(int irq, void *data)
1129 -{
1130 - struct ipu_image_convert_chan *chan = data;
1131 - struct ipu_image_convert_priv *priv = chan->priv;
1132 - struct ipu_image_convert_ctx *ctx;
1133 - struct ipu_image_convert_run *run;
1134 - unsigned long flags;
1135 - irqreturn_t ret;
1136 -
1137 - spin_lock_irqsave(&chan->irqlock, flags);
1138 -
1139 - /* get current run and its context */
1140 - run = chan->current_run;
1141 - if (!run) {
1142 + if (irq == chan->in_eof_irq) {
1143 + ctx->eof_mask |= EOF_IRQ_IN;
1144 + } else if (irq == chan->out_eof_irq) {
1145 + ctx->eof_mask |= EOF_IRQ_OUT;
1146 + } else if (irq == chan->rot_in_eof_irq ||
1147 + irq == chan->rot_out_eof_irq) {
1148 + if (!ipu_rot_mode_is_irt(ctx->rot_mode)) {
1149 + /* this was NOT a rotation op, shouldn't happen */
1150 + dev_err(priv->ipu->dev,
1151 + "Unexpected rotation interrupt\n");
1152 + goto out;
1153 + }
1154 + ctx->eof_mask |= (irq == chan->rot_in_eof_irq) ?
1155 + EOF_IRQ_ROT_IN : EOF_IRQ_ROT_OUT;
1156 + } else {
1157 + dev_err(priv->ipu->dev, "Received unknown irq %d\n", irq);
1158 ret = IRQ_NONE;
1159 goto out;
1160 }
1161
1162 - ctx = run->ctx;
1163 -
1164 - if (!ipu_rot_mode_is_irt(ctx->rot_mode)) {
1165 - /* this was NOT a rotation operation, shouldn't happen */
1166 - dev_err(priv->ipu->dev, "Unexpected rotation interrupt\n");
1167 - spin_unlock_irqrestore(&chan->irqlock, flags);
1168 - return IRQ_HANDLED;
1169 - }
1170 + if (ipu_rot_mode_is_irt(ctx->rot_mode))
1171 + tile_complete = (ctx->eof_mask == EOF_IRQ_ROT_COMPLETE);
1172 + else
1173 + tile_complete = (ctx->eof_mask == EOF_IRQ_COMPLETE);
1174
1175 - ret = do_irq(run);
1176 + if (tile_complete)
1177 + ret = do_tile_complete(run);
1178 out:
1179 spin_unlock_irqrestore(&chan->irqlock, flags);
1180 return ret;
1181 @@ -1801,6 +1810,10 @@ static void force_abort(struct ipu_image_convert_ctx *ctx)
1182
1183 static void release_ipu_resources(struct ipu_image_convert_chan *chan)
1184 {
1185 + if (chan->in_eof_irq >= 0)
1186 + free_irq(chan->in_eof_irq, chan);
1187 + if (chan->rot_in_eof_irq >= 0)
1188 + free_irq(chan->rot_in_eof_irq, chan);
1189 if (chan->out_eof_irq >= 0)
1190 free_irq(chan->out_eof_irq, chan);
1191 if (chan->rot_out_eof_irq >= 0)
1192 @@ -1819,7 +1832,27 @@ static void release_ipu_resources(struct ipu_image_convert_chan *chan)
1193
1194 chan->in_chan = chan->out_chan = chan->rotation_in_chan =
1195 chan->rotation_out_chan = NULL;
1196 - chan->out_eof_irq = chan->rot_out_eof_irq = -1;
1197 + chan->in_eof_irq = -1;
1198 + chan->rot_in_eof_irq = -1;
1199 + chan->out_eof_irq = -1;
1200 + chan->rot_out_eof_irq = -1;
1201 +}
1202 +
1203 +static int get_eof_irq(struct ipu_image_convert_chan *chan,
1204 + struct ipuv3_channel *channel)
1205 +{
1206 + struct ipu_image_convert_priv *priv = chan->priv;
1207 + int ret, irq;
1208 +
1209 + irq = ipu_idmac_channel_irq(priv->ipu, channel, IPU_IRQ_EOF);
1210 +
1211 + ret = request_threaded_irq(irq, eof_irq, do_bh, 0, "ipu-ic", chan);
1212 + if (ret < 0) {
1213 + dev_err(priv->ipu->dev, "could not acquire irq %d\n", irq);
1214 + return ret;
1215 + }
1216 +
1217 + return irq;
1218 }
1219
1220 static int get_ipu_resources(struct ipu_image_convert_chan *chan)
1221 @@ -1855,31 +1888,33 @@ static int get_ipu_resources(struct ipu_image_convert_chan *chan)
1222 }
1223
1224 /* acquire the EOF interrupts */
1225 - chan->out_eof_irq = ipu_idmac_channel_irq(priv->ipu,
1226 - chan->out_chan,
1227 - IPU_IRQ_EOF);
1228 + ret = get_eof_irq(chan, chan->in_chan);
1229 + if (ret < 0) {
1230 + chan->in_eof_irq = -1;
1231 + goto err;
1232 + }
1233 + chan->in_eof_irq = ret;
1234
1235 - ret = request_threaded_irq(chan->out_eof_irq, norotate_irq, do_bh,
1236 - 0, "ipu-ic", chan);
1237 + ret = get_eof_irq(chan, chan->rotation_in_chan);
1238 if (ret < 0) {
1239 - dev_err(priv->ipu->dev, "could not acquire irq %d\n",
1240 - chan->out_eof_irq);
1241 - chan->out_eof_irq = -1;
1242 + chan->rot_in_eof_irq = -1;
1243 goto err;
1244 }
1245 + chan->rot_in_eof_irq = ret;
1246
1247 - chan->rot_out_eof_irq = ipu_idmac_channel_irq(priv->ipu,
1248 - chan->rotation_out_chan,
1249 - IPU_IRQ_EOF);
1250 + ret = get_eof_irq(chan, chan->out_chan);
1251 + if (ret < 0) {
1252 + chan->out_eof_irq = -1;
1253 + goto err;
1254 + }
1255 + chan->out_eof_irq = ret;
1256
1257 - ret = request_threaded_irq(chan->rot_out_eof_irq, rotate_irq, do_bh,
1258 - 0, "ipu-ic", chan);
1259 + ret = get_eof_irq(chan, chan->rotation_out_chan);
1260 if (ret < 0) {
1261 - dev_err(priv->ipu->dev, "could not acquire irq %d\n",
1262 - chan->rot_out_eof_irq);
1263 chan->rot_out_eof_irq = -1;
1264 goto err;
1265 }
1266 + chan->rot_out_eof_irq = ret;
1267
1268 return 0;
1269 err:
1270 @@ -2458,6 +2493,8 @@ int ipu_image_convert_init(struct ipu_soc *ipu, struct device *dev)
1271 chan->ic_task = i;
1272 chan->priv = priv;
1273 chan->dma_ch = &image_convert_dma_chan[i];
1274 + chan->in_eof_irq = -1;
1275 + chan->rot_in_eof_irq = -1;
1276 chan->out_eof_irq = -1;
1277 chan->rot_out_eof_irq = -1;
1278
1279 diff --git a/drivers/i2c/busses/i2c-bcm-iproc.c b/drivers/i2c/busses/i2c-bcm-iproc.c
1280 index 03475f1799730..dd9661c11782a 100644
1281 --- a/drivers/i2c/busses/i2c-bcm-iproc.c
1282 +++ b/drivers/i2c/busses/i2c-bcm-iproc.c
1283 @@ -1037,7 +1037,7 @@ static int bcm_iproc_i2c_unreg_slave(struct i2c_client *slave)
1284 if (!iproc_i2c->slave)
1285 return -EINVAL;
1286
1287 - iproc_i2c->slave = NULL;
1288 + disable_irq(iproc_i2c->irq);
1289
1290 /* disable all slave interrupts */
1291 tmp = iproc_i2c_rd_reg(iproc_i2c, IE_OFFSET);
1292 @@ -1050,6 +1050,17 @@ static int bcm_iproc_i2c_unreg_slave(struct i2c_client *slave)
1293 tmp &= ~BIT(S_CFG_EN_NIC_SMB_ADDR3_SHIFT);
1294 iproc_i2c_wr_reg(iproc_i2c, S_CFG_SMBUS_ADDR_OFFSET, tmp);
1295
1296 + /* flush TX/RX FIFOs */
1297 + tmp = (BIT(S_FIFO_RX_FLUSH_SHIFT) | BIT(S_FIFO_TX_FLUSH_SHIFT));
1298 + iproc_i2c_wr_reg(iproc_i2c, S_FIFO_CTRL_OFFSET, tmp);
1299 +
1300 + /* clear all pending slave interrupts */
1301 + iproc_i2c_wr_reg(iproc_i2c, IS_OFFSET, ISR_MASK_SLAVE);
1302 +
1303 + iproc_i2c->slave = NULL;
1304 +
1305 + enable_irq(iproc_i2c->irq);
1306 +
1307 return 0;
1308 }
1309
1310 diff --git a/drivers/i2c/busses/i2c-rcar.c b/drivers/i2c/busses/i2c-rcar.c
1311 index 36af8fdb66586..0b90aa0318df3 100644
1312 --- a/drivers/i2c/busses/i2c-rcar.c
1313 +++ b/drivers/i2c/busses/i2c-rcar.c
1314 @@ -580,13 +580,14 @@ static bool rcar_i2c_slave_irq(struct rcar_i2c_priv *priv)
1315 rcar_i2c_write(priv, ICSIER, SDR | SSR | SAR);
1316 }
1317
1318 - rcar_i2c_write(priv, ICSSR, ~SAR & 0xff);
1319 + /* Clear SSR, too, because of old STOPs to other clients than us */
1320 + rcar_i2c_write(priv, ICSSR, ~(SAR | SSR) & 0xff);
1321 }
1322
1323 /* master sent stop */
1324 if (ssr_filtered & SSR) {
1325 i2c_slave_event(priv->slave, I2C_SLAVE_STOP, &value);
1326 - rcar_i2c_write(priv, ICSIER, SAR | SSR);
1327 + rcar_i2c_write(priv, ICSIER, SAR);
1328 rcar_i2c_write(priv, ICSSR, ~SSR & 0xff);
1329 }
1330
1331 @@ -850,7 +851,7 @@ static int rcar_reg_slave(struct i2c_client *slave)
1332 priv->slave = slave;
1333 rcar_i2c_write(priv, ICSAR, slave->addr);
1334 rcar_i2c_write(priv, ICSSR, 0);
1335 - rcar_i2c_write(priv, ICSIER, SAR | SSR);
1336 + rcar_i2c_write(priv, ICSIER, SAR);
1337 rcar_i2c_write(priv, ICSCR, SIE | SDBS);
1338
1339 return 0;
1340 @@ -862,12 +863,14 @@ static int rcar_unreg_slave(struct i2c_client *slave)
1341
1342 WARN_ON(!priv->slave);
1343
1344 - /* disable irqs and ensure none is running before clearing ptr */
1345 + /* ensure no irq is running before clearing ptr */
1346 + disable_irq(priv->irq);
1347 rcar_i2c_write(priv, ICSIER, 0);
1348 - rcar_i2c_write(priv, ICSCR, 0);
1349 + rcar_i2c_write(priv, ICSSR, 0);
1350 + enable_irq(priv->irq);
1351 + rcar_i2c_write(priv, ICSCR, SDBS);
1352 rcar_i2c_write(priv, ICSAR, 0); /* Gen2: must be 0 if not using slave */
1353
1354 - synchronize_irq(priv->irq);
1355 priv->slave = NULL;
1356
1357 pm_runtime_put(rcar_i2c_priv_to_dev(priv));
1358 diff --git a/drivers/iio/dac/ad5592r-base.c b/drivers/iio/dac/ad5592r-base.c
1359 index 2d897e64c6a9e..424922cad1e39 100644
1360 --- a/drivers/iio/dac/ad5592r-base.c
1361 +++ b/drivers/iio/dac/ad5592r-base.c
1362 @@ -416,7 +416,7 @@ static int ad5592r_read_raw(struct iio_dev *iio_dev,
1363 s64 tmp = *val * (3767897513LL / 25LL);
1364 *val = div_s64_rem(tmp, 1000000000LL, val2);
1365
1366 - ret = IIO_VAL_INT_PLUS_MICRO;
1367 + return IIO_VAL_INT_PLUS_MICRO;
1368 } else {
1369 int mult;
1370
1371 @@ -447,7 +447,7 @@ static int ad5592r_read_raw(struct iio_dev *iio_dev,
1372 ret = IIO_VAL_INT;
1373 break;
1374 default:
1375 - ret = -EINVAL;
1376 + return -EINVAL;
1377 }
1378
1379 unlock:
1380 diff --git a/drivers/infiniband/core/counters.c b/drivers/infiniband/core/counters.c
1381 index 11210bf7fd61b..f454d63008d69 100644
1382 --- a/drivers/infiniband/core/counters.c
1383 +++ b/drivers/infiniband/core/counters.c
1384 @@ -284,7 +284,7 @@ int rdma_counter_bind_qp_auto(struct ib_qp *qp, u8 port)
1385 struct rdma_counter *counter;
1386 int ret;
1387
1388 - if (!qp->res.valid)
1389 + if (!qp->res.valid || rdma_is_kernel_res(&qp->res))
1390 return 0;
1391
1392 if (!rdma_is_port_valid(dev, port))
1393 @@ -487,7 +487,7 @@ int rdma_counter_bind_qpn(struct ib_device *dev, u8 port,
1394 goto err;
1395 }
1396
1397 - if (counter->res.task != qp->res.task) {
1398 + if (rdma_is_kernel_res(&counter->res) != rdma_is_kernel_res(&qp->res)) {
1399 ret = -EINVAL;
1400 goto err_task;
1401 }
1402 diff --git a/drivers/infiniband/core/uverbs_cmd.c b/drivers/infiniband/core/uverbs_cmd.c
1403 index e2ddcb0dc4ee3..c398d1a64614c 100644
1404 --- a/drivers/infiniband/core/uverbs_cmd.c
1405 +++ b/drivers/infiniband/core/uverbs_cmd.c
1406 @@ -757,6 +757,7 @@ static int ib_uverbs_reg_mr(struct uverbs_attr_bundle *attrs)
1407 mr->uobject = uobj;
1408 atomic_inc(&pd->usecnt);
1409 mr->res.type = RDMA_RESTRACK_MR;
1410 + mr->iova = cmd.hca_va;
1411 rdma_restrack_uadd(&mr->res);
1412
1413 uobj->object = mr;
1414 @@ -847,6 +848,9 @@ static int ib_uverbs_rereg_mr(struct uverbs_attr_bundle *attrs)
1415 atomic_dec(&old_pd->usecnt);
1416 }
1417
1418 + if (cmd.flags & IB_MR_REREG_TRANS)
1419 + mr->iova = cmd.hca_va;
1420 +
1421 memset(&resp, 0, sizeof(resp));
1422 resp.lkey = mr->lkey;
1423 resp.rkey = mr->rkey;
1424 diff --git a/drivers/infiniband/hw/cxgb4/mem.c b/drivers/infiniband/hw/cxgb4/mem.c
1425 index 35c284af574da..dcb58cef336d9 100644
1426 --- a/drivers/infiniband/hw/cxgb4/mem.c
1427 +++ b/drivers/infiniband/hw/cxgb4/mem.c
1428 @@ -399,7 +399,6 @@ static int finish_mem_reg(struct c4iw_mr *mhp, u32 stag)
1429 mmid = stag >> 8;
1430 mhp->ibmr.rkey = mhp->ibmr.lkey = stag;
1431 mhp->ibmr.length = mhp->attr.len;
1432 - mhp->ibmr.iova = mhp->attr.va_fbo;
1433 mhp->ibmr.page_size = 1U << (mhp->attr.page_size + 12);
1434 pr_debug("mmid 0x%x mhp %p\n", mmid, mhp);
1435 return xa_insert_irq(&mhp->rhp->mrs, mmid, mhp, GFP_KERNEL);
1436 diff --git a/drivers/infiniband/hw/mlx4/mr.c b/drivers/infiniband/hw/mlx4/mr.c
1437 index 6ae503cfc5264..9114cb7307692 100644
1438 --- a/drivers/infiniband/hw/mlx4/mr.c
1439 +++ b/drivers/infiniband/hw/mlx4/mr.c
1440 @@ -439,7 +439,6 @@ struct ib_mr *mlx4_ib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
1441
1442 mr->ibmr.rkey = mr->ibmr.lkey = mr->mmr.key;
1443 mr->ibmr.length = length;
1444 - mr->ibmr.iova = virt_addr;
1445 mr->ibmr.page_size = 1U << shift;
1446
1447 return &mr->ibmr;
1448 diff --git a/drivers/infiniband/ulp/ipoib/ipoib.h b/drivers/infiniband/ulp/ipoib/ipoib.h
1449 index 0e5f27caf2b2d..50a3557386090 100644
1450 --- a/drivers/infiniband/ulp/ipoib/ipoib.h
1451 +++ b/drivers/infiniband/ulp/ipoib/ipoib.h
1452 @@ -515,7 +515,7 @@ void ipoib_ib_dev_cleanup(struct net_device *dev);
1453
1454 int ipoib_ib_dev_open_default(struct net_device *dev);
1455 int ipoib_ib_dev_open(struct net_device *dev);
1456 -int ipoib_ib_dev_stop(struct net_device *dev);
1457 +void ipoib_ib_dev_stop(struct net_device *dev);
1458 void ipoib_ib_dev_up(struct net_device *dev);
1459 void ipoib_ib_dev_down(struct net_device *dev);
1460 int ipoib_ib_dev_stop_default(struct net_device *dev);
1461 diff --git a/drivers/infiniband/ulp/ipoib/ipoib_ib.c b/drivers/infiniband/ulp/ipoib/ipoib_ib.c
1462 index da3c5315bbb51..494f413dc3c6c 100644
1463 --- a/drivers/infiniband/ulp/ipoib/ipoib_ib.c
1464 +++ b/drivers/infiniband/ulp/ipoib/ipoib_ib.c
1465 @@ -670,13 +670,12 @@ int ipoib_send(struct net_device *dev, struct sk_buff *skb,
1466 return rc;
1467 }
1468
1469 -static void __ipoib_reap_ah(struct net_device *dev)
1470 +static void ipoib_reap_dead_ahs(struct ipoib_dev_priv *priv)
1471 {
1472 - struct ipoib_dev_priv *priv = ipoib_priv(dev);
1473 struct ipoib_ah *ah, *tah;
1474 unsigned long flags;
1475
1476 - netif_tx_lock_bh(dev);
1477 + netif_tx_lock_bh(priv->dev);
1478 spin_lock_irqsave(&priv->lock, flags);
1479
1480 list_for_each_entry_safe(ah, tah, &priv->dead_ahs, list)
1481 @@ -687,37 +686,37 @@ static void __ipoib_reap_ah(struct net_device *dev)
1482 }
1483
1484 spin_unlock_irqrestore(&priv->lock, flags);
1485 - netif_tx_unlock_bh(dev);
1486 + netif_tx_unlock_bh(priv->dev);
1487 }
1488
1489 void ipoib_reap_ah(struct work_struct *work)
1490 {
1491 struct ipoib_dev_priv *priv =
1492 container_of(work, struct ipoib_dev_priv, ah_reap_task.work);
1493 - struct net_device *dev = priv->dev;
1494
1495 - __ipoib_reap_ah(dev);
1496 + ipoib_reap_dead_ahs(priv);
1497
1498 if (!test_bit(IPOIB_STOP_REAPER, &priv->flags))
1499 queue_delayed_work(priv->wq, &priv->ah_reap_task,
1500 round_jiffies_relative(HZ));
1501 }
1502
1503 -static void ipoib_flush_ah(struct net_device *dev)
1504 +static void ipoib_start_ah_reaper(struct ipoib_dev_priv *priv)
1505 {
1506 - struct ipoib_dev_priv *priv = ipoib_priv(dev);
1507 -
1508 - cancel_delayed_work(&priv->ah_reap_task);
1509 - flush_workqueue(priv->wq);
1510 - ipoib_reap_ah(&priv->ah_reap_task.work);
1511 + clear_bit(IPOIB_STOP_REAPER, &priv->flags);
1512 + queue_delayed_work(priv->wq, &priv->ah_reap_task,
1513 + round_jiffies_relative(HZ));
1514 }
1515
1516 -static void ipoib_stop_ah(struct net_device *dev)
1517 +static void ipoib_stop_ah_reaper(struct ipoib_dev_priv *priv)
1518 {
1519 - struct ipoib_dev_priv *priv = ipoib_priv(dev);
1520 -
1521 set_bit(IPOIB_STOP_REAPER, &priv->flags);
1522 - ipoib_flush_ah(dev);
1523 + cancel_delayed_work(&priv->ah_reap_task);
1524 + /*
1525 + * After ipoib_stop_ah_reaper() we always go through
1526 + * ipoib_reap_dead_ahs() which ensures the work is really stopped and
1527 + * does a final flush out of the dead_ah's list
1528 + */
1529 }
1530
1531 static int recvs_pending(struct net_device *dev)
1532 @@ -846,18 +845,6 @@ timeout:
1533 return 0;
1534 }
1535
1536 -int ipoib_ib_dev_stop(struct net_device *dev)
1537 -{
1538 - struct ipoib_dev_priv *priv = ipoib_priv(dev);
1539 -
1540 - priv->rn_ops->ndo_stop(dev);
1541 -
1542 - clear_bit(IPOIB_FLAG_INITIALIZED, &priv->flags);
1543 - ipoib_flush_ah(dev);
1544 -
1545 - return 0;
1546 -}
1547 -
1548 int ipoib_ib_dev_open_default(struct net_device *dev)
1549 {
1550 struct ipoib_dev_priv *priv = ipoib_priv(dev);
1551 @@ -901,10 +888,7 @@ int ipoib_ib_dev_open(struct net_device *dev)
1552 return -1;
1553 }
1554
1555 - clear_bit(IPOIB_STOP_REAPER, &priv->flags);
1556 - queue_delayed_work(priv->wq, &priv->ah_reap_task,
1557 - round_jiffies_relative(HZ));
1558 -
1559 + ipoib_start_ah_reaper(priv);
1560 if (priv->rn_ops->ndo_open(dev)) {
1561 pr_warn("%s: Failed to open dev\n", dev->name);
1562 goto dev_stop;
1563 @@ -915,13 +899,20 @@ int ipoib_ib_dev_open(struct net_device *dev)
1564 return 0;
1565
1566 dev_stop:
1567 - set_bit(IPOIB_STOP_REAPER, &priv->flags);
1568 - cancel_delayed_work(&priv->ah_reap_task);
1569 - set_bit(IPOIB_FLAG_INITIALIZED, &priv->flags);
1570 - ipoib_ib_dev_stop(dev);
1571 + ipoib_stop_ah_reaper(priv);
1572 return -1;
1573 }
1574
1575 +void ipoib_ib_dev_stop(struct net_device *dev)
1576 +{
1577 + struct ipoib_dev_priv *priv = ipoib_priv(dev);
1578 +
1579 + priv->rn_ops->ndo_stop(dev);
1580 +
1581 + clear_bit(IPOIB_FLAG_INITIALIZED, &priv->flags);
1582 + ipoib_stop_ah_reaper(priv);
1583 +}
1584 +
1585 void ipoib_pkey_dev_check_presence(struct net_device *dev)
1586 {
1587 struct ipoib_dev_priv *priv = ipoib_priv(dev);
1588 @@ -1232,7 +1223,7 @@ static void __ipoib_ib_dev_flush(struct ipoib_dev_priv *priv,
1589 ipoib_mcast_dev_flush(dev);
1590 if (oper_up)
1591 set_bit(IPOIB_FLAG_OPER_UP, &priv->flags);
1592 - ipoib_flush_ah(dev);
1593 + ipoib_reap_dead_ahs(priv);
1594 }
1595
1596 if (level >= IPOIB_FLUSH_NORMAL)
1597 @@ -1307,7 +1298,7 @@ void ipoib_ib_dev_cleanup(struct net_device *dev)
1598 * the neighbor garbage collection is stopped and reaped.
1599 * That should all be done now, so make a final ah flush.
1600 */
1601 - ipoib_stop_ah(dev);
1602 + ipoib_reap_dead_ahs(priv);
1603
1604 clear_bit(IPOIB_PKEY_ASSIGNED, &priv->flags);
1605
1606 diff --git a/drivers/infiniband/ulp/ipoib/ipoib_main.c b/drivers/infiniband/ulp/ipoib/ipoib_main.c
1607 index 4fd095fd63b6f..044bcacad6e48 100644
1608 --- a/drivers/infiniband/ulp/ipoib/ipoib_main.c
1609 +++ b/drivers/infiniband/ulp/ipoib/ipoib_main.c
1610 @@ -1979,6 +1979,8 @@ static void ipoib_ndo_uninit(struct net_device *dev)
1611
1612 /* no more works over the priv->wq */
1613 if (priv->wq) {
1614 + /* See ipoib_mcast_carrier_on_task() */
1615 + WARN_ON(test_bit(IPOIB_FLAG_OPER_UP, &priv->flags));
1616 flush_workqueue(priv->wq);
1617 destroy_workqueue(priv->wq);
1618 priv->wq = NULL;
1619 diff --git a/drivers/input/mouse/sentelic.c b/drivers/input/mouse/sentelic.c
1620 index e99d9bf1a267d..e78c4c7eda34d 100644
1621 --- a/drivers/input/mouse/sentelic.c
1622 +++ b/drivers/input/mouse/sentelic.c
1623 @@ -441,7 +441,7 @@ static ssize_t fsp_attr_set_setreg(struct psmouse *psmouse, void *data,
1624
1625 fsp_reg_write_enable(psmouse, false);
1626
1627 - return count;
1628 + return retval;
1629 }
1630
1631 PSMOUSE_DEFINE_WO_ATTR(setreg, S_IWUSR, NULL, fsp_attr_set_setreg);
1632 diff --git a/drivers/iommu/omap-iommu-debug.c b/drivers/iommu/omap-iommu-debug.c
1633 index 8e19bfa94121e..a99afb5d9011c 100644
1634 --- a/drivers/iommu/omap-iommu-debug.c
1635 +++ b/drivers/iommu/omap-iommu-debug.c
1636 @@ -98,8 +98,11 @@ static ssize_t debug_read_regs(struct file *file, char __user *userbuf,
1637 mutex_lock(&iommu_debug_lock);
1638
1639 bytes = omap_iommu_dump_ctx(obj, p, count);
1640 + if (bytes < 0)
1641 + goto err;
1642 bytes = simple_read_from_buffer(userbuf, count, ppos, buf, bytes);
1643
1644 +err:
1645 mutex_unlock(&iommu_debug_lock);
1646 kfree(buf);
1647
1648 diff --git a/drivers/irqchip/irq-gic-v3-its.c b/drivers/irqchip/irq-gic-v3-its.c
1649 index 263cf9240b168..7966b19ceba79 100644
1650 --- a/drivers/irqchip/irq-gic-v3-its.c
1651 +++ b/drivers/irqchip/irq-gic-v3-its.c
1652 @@ -2581,6 +2581,7 @@ static int its_irq_domain_alloc(struct irq_domain *domain, unsigned int virq,
1653 msi_alloc_info_t *info = args;
1654 struct its_device *its_dev = info->scratchpad[0].ptr;
1655 struct its_node *its = its_dev->its;
1656 + struct irq_data *irqd;
1657 irq_hw_number_t hwirq;
1658 int err;
1659 int i;
1660 @@ -2600,7 +2601,9 @@ static int its_irq_domain_alloc(struct irq_domain *domain, unsigned int virq,
1661
1662 irq_domain_set_hwirq_and_chip(domain, virq + i,
1663 hwirq + i, &its_irq_chip, its_dev);
1664 - irqd_set_single_target(irq_desc_get_irq_data(irq_to_desc(virq + i)));
1665 + irqd = irq_get_irq_data(virq + i);
1666 + irqd_set_single_target(irqd);
1667 + irqd_set_affinity_on_activate(irqd);
1668 pr_debug("ID:%d pID:%d vID:%d\n",
1669 (int)(hwirq + i - its_dev->event_map.lpi_base),
1670 (int)(hwirq + i), virq + i);
1671 diff --git a/drivers/md/bcache/bcache.h b/drivers/md/bcache/bcache.h
1672 index 3d2b63585da95..217c838a1b405 100644
1673 --- a/drivers/md/bcache/bcache.h
1674 +++ b/drivers/md/bcache/bcache.h
1675 @@ -264,7 +264,7 @@ struct bcache_device {
1676 #define BCACHE_DEV_UNLINK_DONE 2
1677 #define BCACHE_DEV_WB_RUNNING 3
1678 #define BCACHE_DEV_RATE_DW_RUNNING 4
1679 - unsigned int nr_stripes;
1680 + int nr_stripes;
1681 unsigned int stripe_size;
1682 atomic_t *stripe_sectors_dirty;
1683 unsigned long *full_dirty_stripes;
1684 diff --git a/drivers/md/bcache/bset.c b/drivers/md/bcache/bset.c
1685 index 08768796b5439..fda68c00ddd53 100644
1686 --- a/drivers/md/bcache/bset.c
1687 +++ b/drivers/md/bcache/bset.c
1688 @@ -321,7 +321,7 @@ int bch_btree_keys_alloc(struct btree_keys *b,
1689
1690 b->page_order = page_order;
1691
1692 - t->data = (void *) __get_free_pages(gfp, b->page_order);
1693 + t->data = (void *) __get_free_pages(__GFP_COMP|gfp, b->page_order);
1694 if (!t->data)
1695 goto err;
1696
1697 diff --git a/drivers/md/bcache/btree.c b/drivers/md/bcache/btree.c
1698 index 3c1109fceb2fb..46556bde032e2 100644
1699 --- a/drivers/md/bcache/btree.c
1700 +++ b/drivers/md/bcache/btree.c
1701 @@ -840,7 +840,7 @@ int bch_btree_cache_alloc(struct cache_set *c)
1702 mutex_init(&c->verify_lock);
1703
1704 c->verify_ondisk = (void *)
1705 - __get_free_pages(GFP_KERNEL, ilog2(bucket_pages(c)));
1706 + __get_free_pages(GFP_KERNEL|__GFP_COMP, ilog2(bucket_pages(c)));
1707
1708 c->verify_data = mca_bucket_alloc(c, &ZERO_KEY, GFP_KERNEL);
1709
1710 diff --git a/drivers/md/bcache/journal.c b/drivers/md/bcache/journal.c
1711 index 6730820780b06..8250d2d1d780c 100644
1712 --- a/drivers/md/bcache/journal.c
1713 +++ b/drivers/md/bcache/journal.c
1714 @@ -1002,8 +1002,8 @@ int bch_journal_alloc(struct cache_set *c)
1715 j->w[1].c = c;
1716
1717 if (!(init_fifo(&j->pin, JOURNAL_PIN, GFP_KERNEL)) ||
1718 - !(j->w[0].data = (void *) __get_free_pages(GFP_KERNEL, JSET_BITS)) ||
1719 - !(j->w[1].data = (void *) __get_free_pages(GFP_KERNEL, JSET_BITS)))
1720 + !(j->w[0].data = (void *) __get_free_pages(GFP_KERNEL|__GFP_COMP, JSET_BITS)) ||
1721 + !(j->w[1].data = (void *) __get_free_pages(GFP_KERNEL|__GFP_COMP, JSET_BITS)))
1722 return -ENOMEM;
1723
1724 return 0;
1725 diff --git a/drivers/md/bcache/super.c b/drivers/md/bcache/super.c
1726 index 168d647078591..25ad64a3919f6 100644
1727 --- a/drivers/md/bcache/super.c
1728 +++ b/drivers/md/bcache/super.c
1729 @@ -1754,7 +1754,7 @@ void bch_cache_set_unregister(struct cache_set *c)
1730 }
1731
1732 #define alloc_bucket_pages(gfp, c) \
1733 - ((void *) __get_free_pages(__GFP_ZERO|gfp, ilog2(bucket_pages(c))))
1734 + ((void *) __get_free_pages(__GFP_ZERO|__GFP_COMP|gfp, ilog2(bucket_pages(c))))
1735
1736 struct cache_set *bch_cache_set_alloc(struct cache_sb *sb)
1737 {
1738 diff --git a/drivers/md/bcache/writeback.c b/drivers/md/bcache/writeback.c
1739 index d60268fe49e10..0b02210ab4355 100644
1740 --- a/drivers/md/bcache/writeback.c
1741 +++ b/drivers/md/bcache/writeback.c
1742 @@ -519,15 +519,19 @@ void bcache_dev_sectors_dirty_add(struct cache_set *c, unsigned int inode,
1743 uint64_t offset, int nr_sectors)
1744 {
1745 struct bcache_device *d = c->devices[inode];
1746 - unsigned int stripe_offset, stripe, sectors_dirty;
1747 + unsigned int stripe_offset, sectors_dirty;
1748 + int stripe;
1749
1750 if (!d)
1751 return;
1752
1753 + stripe = offset_to_stripe(d, offset);
1754 + if (stripe < 0)
1755 + return;
1756 +
1757 if (UUID_FLASH_ONLY(&c->uuids[inode]))
1758 atomic_long_add(nr_sectors, &c->flash_dev_dirty_sectors);
1759
1760 - stripe = offset_to_stripe(d, offset);
1761 stripe_offset = offset & (d->stripe_size - 1);
1762
1763 while (nr_sectors) {
1764 @@ -567,12 +571,12 @@ static bool dirty_pred(struct keybuf *buf, struct bkey *k)
1765 static void refill_full_stripes(struct cached_dev *dc)
1766 {
1767 struct keybuf *buf = &dc->writeback_keys;
1768 - unsigned int start_stripe, stripe, next_stripe;
1769 + unsigned int start_stripe, next_stripe;
1770 + int stripe;
1771 bool wrapped = false;
1772
1773 stripe = offset_to_stripe(&dc->disk, KEY_OFFSET(&buf->last_scanned));
1774 -
1775 - if (stripe >= dc->disk.nr_stripes)
1776 + if (stripe < 0)
1777 stripe = 0;
1778
1779 start_stripe = stripe;
1780 diff --git a/drivers/md/bcache/writeback.h b/drivers/md/bcache/writeback.h
1781 index 4e4c6810dc3c7..c4ff76037227b 100644
1782 --- a/drivers/md/bcache/writeback.h
1783 +++ b/drivers/md/bcache/writeback.h
1784 @@ -33,10 +33,22 @@ static inline uint64_t bcache_dev_sectors_dirty(struct bcache_device *d)
1785 return ret;
1786 }
1787
1788 -static inline unsigned int offset_to_stripe(struct bcache_device *d,
1789 +static inline int offset_to_stripe(struct bcache_device *d,
1790 uint64_t offset)
1791 {
1792 do_div(offset, d->stripe_size);
1793 +
1794 + /* d->nr_stripes is in range [1, INT_MAX] */
1795 + if (unlikely(offset >= d->nr_stripes)) {
1796 + pr_err("Invalid stripe %llu (>= nr_stripes %d).\n",
1797 + offset, d->nr_stripes);
1798 + return -EINVAL;
1799 + }
1800 +
1801 + /*
1802 + * Here offset is definitly smaller than INT_MAX,
1803 + * return it as int will never overflow.
1804 + */
1805 return offset;
1806 }
1807
1808 @@ -44,7 +56,10 @@ static inline bool bcache_dev_stripe_dirty(struct cached_dev *dc,
1809 uint64_t offset,
1810 unsigned int nr_sectors)
1811 {
1812 - unsigned int stripe = offset_to_stripe(&dc->disk, offset);
1813 + int stripe = offset_to_stripe(&dc->disk, offset);
1814 +
1815 + if (stripe < 0)
1816 + return false;
1817
1818 while (1) {
1819 if (atomic_read(dc->disk.stripe_sectors_dirty + stripe))
1820 diff --git a/drivers/md/dm-rq.c b/drivers/md/dm-rq.c
1821 index 3f8577e2c13be..2bd2444ad99c6 100644
1822 --- a/drivers/md/dm-rq.c
1823 +++ b/drivers/md/dm-rq.c
1824 @@ -70,9 +70,6 @@ void dm_start_queue(struct request_queue *q)
1825
1826 void dm_stop_queue(struct request_queue *q)
1827 {
1828 - if (blk_mq_queue_stopped(q))
1829 - return;
1830 -
1831 blk_mq_quiesce_queue(q);
1832 }
1833
1834 diff --git a/drivers/md/md-cluster.c b/drivers/md/md-cluster.c
1835 index 73fd50e779754..d50737ec40394 100644
1836 --- a/drivers/md/md-cluster.c
1837 +++ b/drivers/md/md-cluster.c
1838 @@ -1139,6 +1139,7 @@ static int resize_bitmaps(struct mddev *mddev, sector_t newsize, sector_t oldsiz
1839 bitmap = get_bitmap_from_slot(mddev, i);
1840 if (IS_ERR(bitmap)) {
1841 pr_err("can't get bitmap from slot %d\n", i);
1842 + bitmap = NULL;
1843 goto out;
1844 }
1845 counts = &bitmap->counts;
1846 diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
1847 index a3cbc9f4fec17..02acd5d5a8488 100644
1848 --- a/drivers/md/raid5.c
1849 +++ b/drivers/md/raid5.c
1850 @@ -3604,6 +3604,7 @@ static int need_this_block(struct stripe_head *sh, struct stripe_head_state *s,
1851 * is missing/faulty, then we need to read everything we can.
1852 */
1853 if (sh->raid_conf->level != 6 &&
1854 + sh->raid_conf->rmw_level != PARITY_DISABLE_RMW &&
1855 sh->sector < sh->raid_conf->mddev->recovery_cp)
1856 /* reconstruct-write isn't being forced */
1857 return 0;
1858 @@ -4839,7 +4840,7 @@ static void handle_stripe(struct stripe_head *sh)
1859 * or to load a block that is being partially written.
1860 */
1861 if (s.to_read || s.non_overwrite
1862 - || (conf->level == 6 && s.to_write && s.failed)
1863 + || (s.to_write && s.failed)
1864 || (s.syncing && (s.uptodate + s.compute < disks))
1865 || s.replacing
1866 || s.expanding)
1867 diff --git a/drivers/media/platform/rockchip/rga/rga-hw.c b/drivers/media/platform/rockchip/rga/rga-hw.c
1868 index 4be6dcf292fff..aaa96f256356b 100644
1869 --- a/drivers/media/platform/rockchip/rga/rga-hw.c
1870 +++ b/drivers/media/platform/rockchip/rga/rga-hw.c
1871 @@ -200,22 +200,25 @@ static void rga_cmd_set_trans_info(struct rga_ctx *ctx)
1872 dst_info.data.format = ctx->out.fmt->hw_format;
1873 dst_info.data.swap = ctx->out.fmt->color_swap;
1874
1875 - if (ctx->in.fmt->hw_format >= RGA_COLOR_FMT_YUV422SP) {
1876 - if (ctx->out.fmt->hw_format < RGA_COLOR_FMT_YUV422SP) {
1877 - switch (ctx->in.colorspace) {
1878 - case V4L2_COLORSPACE_REC709:
1879 - src_info.data.csc_mode =
1880 - RGA_SRC_CSC_MODE_BT709_R0;
1881 - break;
1882 - default:
1883 - src_info.data.csc_mode =
1884 - RGA_SRC_CSC_MODE_BT601_R0;
1885 - break;
1886 - }
1887 + /*
1888 + * CSC mode must only be set when the colorspace families differ between
1889 + * input and output. It must remain unset (zeroed) if both are the same.
1890 + */
1891 +
1892 + if (RGA_COLOR_FMT_IS_YUV(ctx->in.fmt->hw_format) &&
1893 + RGA_COLOR_FMT_IS_RGB(ctx->out.fmt->hw_format)) {
1894 + switch (ctx->in.colorspace) {
1895 + case V4L2_COLORSPACE_REC709:
1896 + src_info.data.csc_mode = RGA_SRC_CSC_MODE_BT709_R0;
1897 + break;
1898 + default:
1899 + src_info.data.csc_mode = RGA_SRC_CSC_MODE_BT601_R0;
1900 + break;
1901 }
1902 }
1903
1904 - if (ctx->out.fmt->hw_format >= RGA_COLOR_FMT_YUV422SP) {
1905 + if (RGA_COLOR_FMT_IS_RGB(ctx->in.fmt->hw_format) &&
1906 + RGA_COLOR_FMT_IS_YUV(ctx->out.fmt->hw_format)) {
1907 switch (ctx->out.colorspace) {
1908 case V4L2_COLORSPACE_REC709:
1909 dst_info.data.csc_mode = RGA_SRC_CSC_MODE_BT709_R0;
1910 diff --git a/drivers/media/platform/rockchip/rga/rga-hw.h b/drivers/media/platform/rockchip/rga/rga-hw.h
1911 index 96cb0314dfa70..e8917e5630a48 100644
1912 --- a/drivers/media/platform/rockchip/rga/rga-hw.h
1913 +++ b/drivers/media/platform/rockchip/rga/rga-hw.h
1914 @@ -95,6 +95,11 @@
1915 #define RGA_COLOR_FMT_CP_8BPP 15
1916 #define RGA_COLOR_FMT_MASK 15
1917
1918 +#define RGA_COLOR_FMT_IS_YUV(fmt) \
1919 + (((fmt) >= RGA_COLOR_FMT_YUV422SP) && ((fmt) < RGA_COLOR_FMT_CP_1BPP))
1920 +#define RGA_COLOR_FMT_IS_RGB(fmt) \
1921 + ((fmt) < RGA_COLOR_FMT_YUV422SP)
1922 +
1923 #define RGA_COLOR_NONE_SWAP 0
1924 #define RGA_COLOR_RB_SWAP 1
1925 #define RGA_COLOR_ALPHA_SWAP 2
1926 diff --git a/drivers/media/platform/vsp1/vsp1_dl.c b/drivers/media/platform/vsp1/vsp1_dl.c
1927 index d7b43037e500a..e07b135613eb5 100644
1928 --- a/drivers/media/platform/vsp1/vsp1_dl.c
1929 +++ b/drivers/media/platform/vsp1/vsp1_dl.c
1930 @@ -431,6 +431,8 @@ vsp1_dl_cmd_pool_create(struct vsp1_device *vsp1, enum vsp1_extcmd_type type,
1931 if (!pool)
1932 return NULL;
1933
1934 + pool->vsp1 = vsp1;
1935 +
1936 spin_lock_init(&pool->lock);
1937 INIT_LIST_HEAD(&pool->free);
1938
1939 diff --git a/drivers/mfd/arizona-core.c b/drivers/mfd/arizona-core.c
1940 index 4a31907a4525f..3ff872c205eeb 100644
1941 --- a/drivers/mfd/arizona-core.c
1942 +++ b/drivers/mfd/arizona-core.c
1943 @@ -1430,6 +1430,15 @@ err_irq:
1944 arizona_irq_exit(arizona);
1945 err_pm:
1946 pm_runtime_disable(arizona->dev);
1947 +
1948 + switch (arizona->pdata.clk32k_src) {
1949 + case ARIZONA_32KZ_MCLK1:
1950 + case ARIZONA_32KZ_MCLK2:
1951 + arizona_clk32k_disable(arizona);
1952 + break;
1953 + default:
1954 + break;
1955 + }
1956 err_reset:
1957 arizona_enable_reset(arizona);
1958 regulator_disable(arizona->dcvdd);
1959 @@ -1452,6 +1461,15 @@ int arizona_dev_exit(struct arizona *arizona)
1960 regulator_disable(arizona->dcvdd);
1961 regulator_put(arizona->dcvdd);
1962
1963 + switch (arizona->pdata.clk32k_src) {
1964 + case ARIZONA_32KZ_MCLK1:
1965 + case ARIZONA_32KZ_MCLK2:
1966 + arizona_clk32k_disable(arizona);
1967 + break;
1968 + default:
1969 + break;
1970 + }
1971 +
1972 mfd_remove_devices(arizona->dev);
1973 arizona_free_irq(arizona, ARIZONA_IRQ_UNDERCLOCKED, arizona);
1974 arizona_free_irq(arizona, ARIZONA_IRQ_OVERCLOCKED, arizona);
1975 diff --git a/drivers/mfd/dln2.c b/drivers/mfd/dln2.c
1976 index 4faa8d2e5d045..707f4287ab4a0 100644
1977 --- a/drivers/mfd/dln2.c
1978 +++ b/drivers/mfd/dln2.c
1979 @@ -287,7 +287,11 @@ static void dln2_rx(struct urb *urb)
1980 len = urb->actual_length - sizeof(struct dln2_header);
1981
1982 if (handle == DLN2_HANDLE_EVENT) {
1983 + unsigned long flags;
1984 +
1985 + spin_lock_irqsave(&dln2->event_cb_lock, flags);
1986 dln2_run_event_callbacks(dln2, id, echo, data, len);
1987 + spin_unlock_irqrestore(&dln2->event_cb_lock, flags);
1988 } else {
1989 /* URB will be re-submitted in _dln2_transfer (free_rx_slot) */
1990 if (dln2_transfer_complete(dln2, urb, handle, echo))
1991 diff --git a/drivers/mmc/host/renesas_sdhi_internal_dmac.c b/drivers/mmc/host/renesas_sdhi_internal_dmac.c
1992 index a66f8d6d61d1b..cb89f0578d425 100644
1993 --- a/drivers/mmc/host/renesas_sdhi_internal_dmac.c
1994 +++ b/drivers/mmc/host/renesas_sdhi_internal_dmac.c
1995 @@ -229,15 +229,12 @@ static void renesas_sdhi_internal_dmac_issue_tasklet_fn(unsigned long arg)
1996 DTRAN_CTRL_DM_START);
1997 }
1998
1999 -static void renesas_sdhi_internal_dmac_complete_tasklet_fn(unsigned long arg)
2000 +static bool renesas_sdhi_internal_dmac_complete(struct tmio_mmc_host *host)
2001 {
2002 - struct tmio_mmc_host *host = (struct tmio_mmc_host *)arg;
2003 enum dma_data_direction dir;
2004
2005 - spin_lock_irq(&host->lock);
2006 -
2007 if (!host->data)
2008 - goto out;
2009 + return false;
2010
2011 if (host->data->flags & MMC_DATA_READ)
2012 dir = DMA_FROM_DEVICE;
2013 @@ -250,6 +247,17 @@ static void renesas_sdhi_internal_dmac_complete_tasklet_fn(unsigned long arg)
2014 if (dir == DMA_FROM_DEVICE)
2015 clear_bit(SDHI_INTERNAL_DMAC_RX_IN_USE, &global_flags);
2016
2017 + return true;
2018 +}
2019 +
2020 +static void renesas_sdhi_internal_dmac_complete_tasklet_fn(unsigned long arg)
2021 +{
2022 + struct tmio_mmc_host *host = (struct tmio_mmc_host *)arg;
2023 +
2024 + spin_lock_irq(&host->lock);
2025 + if (!renesas_sdhi_internal_dmac_complete(host))
2026 + goto out;
2027 +
2028 tmio_mmc_do_data_irq(host);
2029 out:
2030 spin_unlock_irq(&host->lock);
2031 diff --git a/drivers/mtd/nand/raw/fsl_upm.c b/drivers/mtd/nand/raw/fsl_upm.c
2032 index 1054cc070747e..20b0ee174dc61 100644
2033 --- a/drivers/mtd/nand/raw/fsl_upm.c
2034 +++ b/drivers/mtd/nand/raw/fsl_upm.c
2035 @@ -62,7 +62,6 @@ static int fun_chip_ready(struct nand_chip *chip)
2036 static void fun_wait_rnb(struct fsl_upm_nand *fun)
2037 {
2038 if (fun->rnb_gpio[fun->mchip_number] >= 0) {
2039 - struct mtd_info *mtd = nand_to_mtd(&fun->chip);
2040 int cnt = 1000000;
2041
2042 while (--cnt && !fun_chip_ready(&fun->chip))
2043 diff --git a/drivers/net/ethernet/marvell/octeontx2/af/common.h b/drivers/net/ethernet/marvell/octeontx2/af/common.h
2044 index 413c3f254cf85..c881a573da662 100644
2045 --- a/drivers/net/ethernet/marvell/octeontx2/af/common.h
2046 +++ b/drivers/net/ethernet/marvell/octeontx2/af/common.h
2047 @@ -43,7 +43,7 @@ struct qmem {
2048 void *base;
2049 dma_addr_t iova;
2050 int alloc_sz;
2051 - u8 entry_sz;
2052 + u16 entry_sz;
2053 u8 align;
2054 u32 qsize;
2055 };
2056 diff --git a/drivers/net/ethernet/qualcomm/emac/emac.c b/drivers/net/ethernet/qualcomm/emac/emac.c
2057 index c84ab052ef265..3eee8df359a12 100644
2058 --- a/drivers/net/ethernet/qualcomm/emac/emac.c
2059 +++ b/drivers/net/ethernet/qualcomm/emac/emac.c
2060 @@ -485,13 +485,24 @@ static int emac_clks_phase1_init(struct platform_device *pdev,
2061
2062 ret = clk_prepare_enable(adpt->clk[EMAC_CLK_CFG_AHB]);
2063 if (ret)
2064 - return ret;
2065 + goto disable_clk_axi;
2066
2067 ret = clk_set_rate(adpt->clk[EMAC_CLK_HIGH_SPEED], 19200000);
2068 if (ret)
2069 - return ret;
2070 + goto disable_clk_cfg_ahb;
2071 +
2072 + ret = clk_prepare_enable(adpt->clk[EMAC_CLK_HIGH_SPEED]);
2073 + if (ret)
2074 + goto disable_clk_cfg_ahb;
2075
2076 - return clk_prepare_enable(adpt->clk[EMAC_CLK_HIGH_SPEED]);
2077 + return 0;
2078 +
2079 +disable_clk_cfg_ahb:
2080 + clk_disable_unprepare(adpt->clk[EMAC_CLK_CFG_AHB]);
2081 +disable_clk_axi:
2082 + clk_disable_unprepare(adpt->clk[EMAC_CLK_AXI]);
2083 +
2084 + return ret;
2085 }
2086
2087 /* Enable clocks; needs emac_clks_phase1_init to be called before */
2088 diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-ipq806x.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-ipq806x.c
2089 index 4d75158c64b29..826626e870d5c 100644
2090 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac-ipq806x.c
2091 +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-ipq806x.c
2092 @@ -350,6 +350,7 @@ static int ipq806x_gmac_probe(struct platform_device *pdev)
2093 plat_dat->has_gmac = true;
2094 plat_dat->bsp_priv = gmac;
2095 plat_dat->fix_mac_speed = ipq806x_gmac_fix_mac_speed;
2096 + plat_dat->multicast_filter_bins = 0;
2097
2098 err = stmmac_dvr_probe(&pdev->dev, plat_dat, &stmmac_res);
2099 if (err)
2100 diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c b/drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c
2101 index bc9b01376e807..1d0b64bd1e1a9 100644
2102 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c
2103 +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c
2104 @@ -166,6 +166,9 @@ static void dwmac1000_set_filter(struct mac_device_info *hw,
2105 value = GMAC_FRAME_FILTER_PR | GMAC_FRAME_FILTER_PCF;
2106 } else if (dev->flags & IFF_ALLMULTI) {
2107 value = GMAC_FRAME_FILTER_PM; /* pass all multi */
2108 + } else if (!netdev_mc_empty(dev) && (mcbitslog2 == 0)) {
2109 + /* Fall back to all multicast if we've no filter */
2110 + value = GMAC_FRAME_FILTER_PM;
2111 } else if (!netdev_mc_empty(dev)) {
2112 struct netdev_hw_addr *ha;
2113
2114 diff --git a/drivers/nvdimm/security.c b/drivers/nvdimm/security.c
2115 index 89b85970912db..35d265014e1ec 100644
2116 --- a/drivers/nvdimm/security.c
2117 +++ b/drivers/nvdimm/security.c
2118 @@ -450,14 +450,19 @@ void __nvdimm_security_overwrite_query(struct nvdimm *nvdimm)
2119 else
2120 dev_dbg(&nvdimm->dev, "overwrite completed\n");
2121
2122 - if (nvdimm->sec.overwrite_state)
2123 - sysfs_notify_dirent(nvdimm->sec.overwrite_state);
2124 + /*
2125 + * Mark the overwrite work done and update dimm security flags,
2126 + * then send a sysfs event notification to wake up userspace
2127 + * poll threads to picked up the changed state.
2128 + */
2129 nvdimm->sec.overwrite_tmo = 0;
2130 clear_bit(NDD_SECURITY_OVERWRITE, &nvdimm->flags);
2131 clear_bit(NDD_WORK_PENDING, &nvdimm->flags);
2132 - put_device(&nvdimm->dev);
2133 nvdimm->sec.flags = nvdimm_security_flags(nvdimm, NVDIMM_USER);
2134 - nvdimm->sec.flags = nvdimm_security_flags(nvdimm, NVDIMM_MASTER);
2135 + nvdimm->sec.ext_flags = nvdimm_security_flags(nvdimm, NVDIMM_MASTER);
2136 + if (nvdimm->sec.overwrite_state)
2137 + sysfs_notify_dirent(nvdimm->sec.overwrite_state);
2138 + put_device(&nvdimm->dev);
2139 }
2140
2141 void nvdimm_security_overwrite_query(struct work_struct *work)
2142 diff --git a/drivers/pci/bus.c b/drivers/pci/bus.c
2143 index 8e40b3e6da77d..3cef835b375fd 100644
2144 --- a/drivers/pci/bus.c
2145 +++ b/drivers/pci/bus.c
2146 @@ -322,12 +322,8 @@ void pci_bus_add_device(struct pci_dev *dev)
2147
2148 dev->match_driver = true;
2149 retval = device_attach(&dev->dev);
2150 - if (retval < 0 && retval != -EPROBE_DEFER) {
2151 + if (retval < 0 && retval != -EPROBE_DEFER)
2152 pci_warn(dev, "device attach failed (%d)\n", retval);
2153 - pci_proc_detach_device(dev);
2154 - pci_remove_sysfs_dev_files(dev);
2155 - return;
2156 - }
2157
2158 pci_dev_assign_added(dev, true);
2159 }
2160 diff --git a/drivers/pci/controller/dwc/pcie-qcom.c b/drivers/pci/controller/dwc/pcie-qcom.c
2161 index 70ded8900e285..270d502b8cd50 100644
2162 --- a/drivers/pci/controller/dwc/pcie-qcom.c
2163 +++ b/drivers/pci/controller/dwc/pcie-qcom.c
2164 @@ -45,7 +45,13 @@
2165 #define PCIE_CAP_CPL_TIMEOUT_DISABLE 0x10
2166
2167 #define PCIE20_PARF_PHY_CTRL 0x40
2168 +#define PHY_CTRL_PHY_TX0_TERM_OFFSET_MASK GENMASK(20, 16)
2169 +#define PHY_CTRL_PHY_TX0_TERM_OFFSET(x) ((x) << 16)
2170 +
2171 #define PCIE20_PARF_PHY_REFCLK 0x4C
2172 +#define PHY_REFCLK_SSP_EN BIT(16)
2173 +#define PHY_REFCLK_USE_PAD BIT(12)
2174 +
2175 #define PCIE20_PARF_DBI_BASE_ADDR 0x168
2176 #define PCIE20_PARF_SLV_ADDR_SPACE_SIZE 0x16C
2177 #define PCIE20_PARF_MHI_CLOCK_RESET_CTRL 0x174
2178 @@ -76,6 +82,18 @@
2179 #define DBI_RO_WR_EN 1
2180
2181 #define PERST_DELAY_US 1000
2182 +/* PARF registers */
2183 +#define PCIE20_PARF_PCS_DEEMPH 0x34
2184 +#define PCS_DEEMPH_TX_DEEMPH_GEN1(x) ((x) << 16)
2185 +#define PCS_DEEMPH_TX_DEEMPH_GEN2_3_5DB(x) ((x) << 8)
2186 +#define PCS_DEEMPH_TX_DEEMPH_GEN2_6DB(x) ((x) << 0)
2187 +
2188 +#define PCIE20_PARF_PCS_SWING 0x38
2189 +#define PCS_SWING_TX_SWING_FULL(x) ((x) << 8)
2190 +#define PCS_SWING_TX_SWING_LOW(x) ((x) << 0)
2191 +
2192 +#define PCIE20_PARF_CONFIG_BITS 0x50
2193 +#define PHY_RX0_EQ(x) ((x) << 24)
2194
2195 #define PCIE20_v3_PARF_SLV_ADDR_SPACE_SIZE 0x358
2196 #define SLV_ADDR_SPACE_SZ 0x10000000
2197 @@ -275,6 +293,7 @@ static int qcom_pcie_init_2_1_0(struct qcom_pcie *pcie)
2198 struct qcom_pcie_resources_2_1_0 *res = &pcie->res.v2_1_0;
2199 struct dw_pcie *pci = pcie->pci;
2200 struct device *dev = pci->dev;
2201 + struct device_node *node = dev->of_node;
2202 u32 val;
2203 int ret;
2204
2205 @@ -319,9 +338,29 @@ static int qcom_pcie_init_2_1_0(struct qcom_pcie *pcie)
2206 val &= ~BIT(0);
2207 writel(val, pcie->parf + PCIE20_PARF_PHY_CTRL);
2208
2209 + if (of_device_is_compatible(node, "qcom,pcie-ipq8064")) {
2210 + writel(PCS_DEEMPH_TX_DEEMPH_GEN1(24) |
2211 + PCS_DEEMPH_TX_DEEMPH_GEN2_3_5DB(24) |
2212 + PCS_DEEMPH_TX_DEEMPH_GEN2_6DB(34),
2213 + pcie->parf + PCIE20_PARF_PCS_DEEMPH);
2214 + writel(PCS_SWING_TX_SWING_FULL(120) |
2215 + PCS_SWING_TX_SWING_LOW(120),
2216 + pcie->parf + PCIE20_PARF_PCS_SWING);
2217 + writel(PHY_RX0_EQ(4), pcie->parf + PCIE20_PARF_CONFIG_BITS);
2218 + }
2219 +
2220 + if (of_device_is_compatible(node, "qcom,pcie-ipq8064")) {
2221 + /* set TX termination offset */
2222 + val = readl(pcie->parf + PCIE20_PARF_PHY_CTRL);
2223 + val &= ~PHY_CTRL_PHY_TX0_TERM_OFFSET_MASK;
2224 + val |= PHY_CTRL_PHY_TX0_TERM_OFFSET(7);
2225 + writel(val, pcie->parf + PCIE20_PARF_PHY_CTRL);
2226 + }
2227 +
2228 /* enable external reference clock */
2229 val = readl(pcie->parf + PCIE20_PARF_PHY_REFCLK);
2230 - val |= BIT(16);
2231 + val &= ~PHY_REFCLK_USE_PAD;
2232 + val |= PHY_REFCLK_SSP_EN;
2233 writel(val, pcie->parf + PCIE20_PARF_PHY_REFCLK);
2234
2235 ret = reset_control_deassert(res->phy_reset);
2236 diff --git a/drivers/pci/hotplug/acpiphp_glue.c b/drivers/pci/hotplug/acpiphp_glue.c
2237 index b3869951c0eb7..6e60b4b1bf53b 100644
2238 --- a/drivers/pci/hotplug/acpiphp_glue.c
2239 +++ b/drivers/pci/hotplug/acpiphp_glue.c
2240 @@ -122,13 +122,21 @@ static struct acpiphp_context *acpiphp_grab_context(struct acpi_device *adev)
2241 struct acpiphp_context *context;
2242
2243 acpi_lock_hp_context();
2244 +
2245 context = acpiphp_get_context(adev);
2246 - if (!context || context->func.parent->is_going_away) {
2247 - acpi_unlock_hp_context();
2248 - return NULL;
2249 + if (!context)
2250 + goto unlock;
2251 +
2252 + if (context->func.parent->is_going_away) {
2253 + acpiphp_put_context(context);
2254 + context = NULL;
2255 + goto unlock;
2256 }
2257 +
2258 get_bridge(context->func.parent);
2259 acpiphp_put_context(context);
2260 +
2261 +unlock:
2262 acpi_unlock_hp_context();
2263 return context;
2264 }
2265 diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c
2266 index 9bc0f321aaf0e..c98067579e9f3 100644
2267 --- a/drivers/pci/quirks.c
2268 +++ b/drivers/pci/quirks.c
2269 @@ -5208,7 +5208,8 @@ DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_SERVERWORKS, 0x0422, quirk_no_ext_tags);
2270 */
2271 static void quirk_amd_harvest_no_ats(struct pci_dev *pdev)
2272 {
2273 - if (pdev->device == 0x7340 && pdev->revision != 0xc5)
2274 + if ((pdev->device == 0x7312 && pdev->revision != 0x00) ||
2275 + (pdev->device == 0x7340 && pdev->revision != 0xc5))
2276 return;
2277
2278 pci_info(pdev, "disabling ATS\n");
2279 @@ -5219,6 +5220,8 @@ static void quirk_amd_harvest_no_ats(struct pci_dev *pdev)
2280 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, 0x98e4, quirk_amd_harvest_no_ats);
2281 /* AMD Iceland dGPU */
2282 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, 0x6900, quirk_amd_harvest_no_ats);
2283 +/* AMD Navi10 dGPU */
2284 +DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, 0x7312, quirk_amd_harvest_no_ats);
2285 /* AMD Navi14 dGPU */
2286 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, 0x7340, quirk_amd_harvest_no_ats);
2287 #endif /* CONFIG_PCI_ATS */
2288 diff --git a/drivers/pinctrl/pinctrl-ingenic.c b/drivers/pinctrl/pinctrl-ingenic.c
2289 index 6e2683016c1f0..8bd0a078bfc47 100644
2290 --- a/drivers/pinctrl/pinctrl-ingenic.c
2291 +++ b/drivers/pinctrl/pinctrl-ingenic.c
2292 @@ -1500,9 +1500,9 @@ static void ingenic_gpio_irq_ack(struct irq_data *irqd)
2293 */
2294 high = ingenic_gpio_get_value(jzgc, irq);
2295 if (high)
2296 - irq_set_type(jzgc, irq, IRQ_TYPE_EDGE_FALLING);
2297 + irq_set_type(jzgc, irq, IRQ_TYPE_LEVEL_LOW);
2298 else
2299 - irq_set_type(jzgc, irq, IRQ_TYPE_EDGE_RISING);
2300 + irq_set_type(jzgc, irq, IRQ_TYPE_LEVEL_HIGH);
2301 }
2302
2303 if (jzgc->jzpc->version >= ID_JZ4760)
2304 @@ -1538,7 +1538,7 @@ static int ingenic_gpio_irq_set_type(struct irq_data *irqd, unsigned int type)
2305 */
2306 bool high = ingenic_gpio_get_value(jzgc, irqd->hwirq);
2307
2308 - type = high ? IRQ_TYPE_EDGE_FALLING : IRQ_TYPE_EDGE_RISING;
2309 + type = high ? IRQ_TYPE_LEVEL_LOW : IRQ_TYPE_LEVEL_HIGH;
2310 }
2311
2312 irq_set_type(jzgc, irqd->hwirq, type);
2313 @@ -1644,7 +1644,8 @@ static int ingenic_gpio_get_direction(struct gpio_chip *gc, unsigned int offset)
2314 unsigned int pin = gc->base + offset;
2315
2316 if (jzpc->version >= ID_JZ4760)
2317 - return ingenic_get_pin_config(jzpc, pin, JZ4760_GPIO_PAT1);
2318 + return ingenic_get_pin_config(jzpc, pin, JZ4760_GPIO_INT) ||
2319 + ingenic_get_pin_config(jzpc, pin, JZ4760_GPIO_PAT1);
2320
2321 if (ingenic_get_pin_config(jzpc, pin, JZ4740_GPIO_SELECT))
2322 return true;
2323 diff --git a/drivers/platform/chrome/cros_ec_ishtp.c b/drivers/platform/chrome/cros_ec_ishtp.c
2324 index 25ca2c894b4de..ab0662a33b41a 100644
2325 --- a/drivers/platform/chrome/cros_ec_ishtp.c
2326 +++ b/drivers/platform/chrome/cros_ec_ishtp.c
2327 @@ -645,8 +645,10 @@ static int cros_ec_ishtp_probe(struct ishtp_cl_device *cl_device)
2328
2329 /* Register croc_ec_dev mfd */
2330 rv = cros_ec_dev_init(client_data);
2331 - if (rv)
2332 + if (rv) {
2333 + down_write(&init_lock);
2334 goto end_cros_ec_dev_init_error;
2335 + }
2336
2337 return 0;
2338
2339 diff --git a/drivers/pwm/pwm-bcm-iproc.c b/drivers/pwm/pwm-bcm-iproc.c
2340 index 1f829edd8ee70..d392a828fc493 100644
2341 --- a/drivers/pwm/pwm-bcm-iproc.c
2342 +++ b/drivers/pwm/pwm-bcm-iproc.c
2343 @@ -85,8 +85,6 @@ static void iproc_pwmc_get_state(struct pwm_chip *chip, struct pwm_device *pwm,
2344 u64 tmp, multi, rate;
2345 u32 value, prescale;
2346
2347 - rate = clk_get_rate(ip->clk);
2348 -
2349 value = readl(ip->base + IPROC_PWM_CTRL_OFFSET);
2350
2351 if (value & BIT(IPROC_PWM_CTRL_EN_SHIFT(pwm->hwpwm)))
2352 @@ -99,6 +97,13 @@ static void iproc_pwmc_get_state(struct pwm_chip *chip, struct pwm_device *pwm,
2353 else
2354 state->polarity = PWM_POLARITY_INVERSED;
2355
2356 + rate = clk_get_rate(ip->clk);
2357 + if (rate == 0) {
2358 + state->period = 0;
2359 + state->duty_cycle = 0;
2360 + return;
2361 + }
2362 +
2363 value = readl(ip->base + IPROC_PWM_PRESCALE_OFFSET);
2364 prescale = value >> IPROC_PWM_PRESCALE_SHIFT(pwm->hwpwm);
2365 prescale &= IPROC_PWM_PRESCALE_MAX;
2366 diff --git a/drivers/remoteproc/qcom_q6v5.c b/drivers/remoteproc/qcom_q6v5.c
2367 index cb0f4a0be0322..eaeb6aee6da5c 100644
2368 --- a/drivers/remoteproc/qcom_q6v5.c
2369 +++ b/drivers/remoteproc/qcom_q6v5.c
2370 @@ -151,6 +151,8 @@ int qcom_q6v5_request_stop(struct qcom_q6v5 *q6v5)
2371 {
2372 int ret;
2373
2374 + q6v5->running = false;
2375 +
2376 qcom_smem_state_update_bits(q6v5->state,
2377 BIT(q6v5->stop_bit), BIT(q6v5->stop_bit));
2378
2379 diff --git a/drivers/remoteproc/qcom_q6v5_mss.c b/drivers/remoteproc/qcom_q6v5_mss.c
2380 index d84e9f306086b..a67c55785b4de 100644
2381 --- a/drivers/remoteproc/qcom_q6v5_mss.c
2382 +++ b/drivers/remoteproc/qcom_q6v5_mss.c
2383 @@ -381,6 +381,12 @@ static int q6v5_load(struct rproc *rproc, const struct firmware *fw)
2384 {
2385 struct q6v5 *qproc = rproc->priv;
2386
2387 + /* MBA is restricted to a maximum size of 1M */
2388 + if (fw->size > qproc->mba_size || fw->size > SZ_1M) {
2389 + dev_err(qproc->dev, "MBA firmware load failed\n");
2390 + return -EINVAL;
2391 + }
2392 +
2393 memcpy(qproc->mba_region, fw->data, fw->size);
2394
2395 return 0;
2396 @@ -1028,15 +1034,14 @@ static int q6v5_mpss_load(struct q6v5 *qproc)
2397 } else if (phdr->p_filesz) {
2398 /* Replace "xxx.xxx" with "xxx.bxx" */
2399 sprintf(fw_name + fw_name_len - 3, "b%02d", i);
2400 - ret = request_firmware(&seg_fw, fw_name, qproc->dev);
2401 + ret = request_firmware_into_buf(&seg_fw, fw_name, qproc->dev,
2402 + ptr, phdr->p_filesz);
2403 if (ret) {
2404 dev_err(qproc->dev, "failed to load %s\n", fw_name);
2405 iounmap(ptr);
2406 goto release_firmware;
2407 }
2408
2409 - memcpy(ptr, seg_fw->data, seg_fw->size);
2410 -
2411 release_firmware(seg_fw);
2412 }
2413
2414 diff --git a/drivers/scsi/lpfc/lpfc_nvmet.c b/drivers/scsi/lpfc/lpfc_nvmet.c
2415 index 9884228800a50..f14394ab0e037 100644
2416 --- a/drivers/scsi/lpfc/lpfc_nvmet.c
2417 +++ b/drivers/scsi/lpfc/lpfc_nvmet.c
2418 @@ -1923,7 +1923,7 @@ lpfc_nvmet_destroy_targetport(struct lpfc_hba *phba)
2419 }
2420 tgtp->tport_unreg_cmp = &tport_unreg_cmp;
2421 nvmet_fc_unregister_targetport(phba->targetport);
2422 - if (!wait_for_completion_timeout(tgtp->tport_unreg_cmp,
2423 + if (!wait_for_completion_timeout(&tport_unreg_cmp,
2424 msecs_to_jiffies(LPFC_NVMET_WAIT_TMO)))
2425 lpfc_printf_log(phba, KERN_ERR, LOG_NVME,
2426 "6179 Unreg targetport x%px timeout "
2427 diff --git a/drivers/usb/serial/ftdi_sio.c b/drivers/usb/serial/ftdi_sio.c
2428 index 9ad44a96dfe3a..33f1cca7eaa61 100644
2429 --- a/drivers/usb/serial/ftdi_sio.c
2430 +++ b/drivers/usb/serial/ftdi_sio.c
2431 @@ -2480,12 +2480,11 @@ static int ftdi_prepare_write_buffer(struct usb_serial_port *port,
2432 #define FTDI_RS_ERR_MASK (FTDI_RS_BI | FTDI_RS_PE | FTDI_RS_FE | FTDI_RS_OE)
2433
2434 static int ftdi_process_packet(struct usb_serial_port *port,
2435 - struct ftdi_private *priv, char *packet, int len)
2436 + struct ftdi_private *priv, unsigned char *buf, int len)
2437 {
2438 + unsigned char status;
2439 int i;
2440 - char status;
2441 char flag;
2442 - char *ch;
2443
2444 if (len < 2) {
2445 dev_dbg(&port->dev, "malformed packet\n");
2446 @@ -2495,7 +2494,7 @@ static int ftdi_process_packet(struct usb_serial_port *port,
2447 /* Compare new line status to the old one, signal if different/
2448 N.B. packet may be processed more than once, but differences
2449 are only processed once. */
2450 - status = packet[0] & FTDI_STATUS_B0_MASK;
2451 + status = buf[0] & FTDI_STATUS_B0_MASK;
2452 if (status != priv->prev_status) {
2453 char diff_status = status ^ priv->prev_status;
2454
2455 @@ -2521,13 +2520,12 @@ static int ftdi_process_packet(struct usb_serial_port *port,
2456 }
2457
2458 /* save if the transmitter is empty or not */
2459 - if (packet[1] & FTDI_RS_TEMT)
2460 + if (buf[1] & FTDI_RS_TEMT)
2461 priv->transmit_empty = 1;
2462 else
2463 priv->transmit_empty = 0;
2464
2465 - len -= 2;
2466 - if (!len)
2467 + if (len == 2)
2468 return 0; /* status only */
2469
2470 /*
2471 @@ -2535,40 +2533,41 @@ static int ftdi_process_packet(struct usb_serial_port *port,
2472 * data payload to avoid over-reporting.
2473 */
2474 flag = TTY_NORMAL;
2475 - if (packet[1] & FTDI_RS_ERR_MASK) {
2476 + if (buf[1] & FTDI_RS_ERR_MASK) {
2477 /* Break takes precedence over parity, which takes precedence
2478 * over framing errors */
2479 - if (packet[1] & FTDI_RS_BI) {
2480 + if (buf[1] & FTDI_RS_BI) {
2481 flag = TTY_BREAK;
2482 port->icount.brk++;
2483 usb_serial_handle_break(port);
2484 - } else if (packet[1] & FTDI_RS_PE) {
2485 + } else if (buf[1] & FTDI_RS_PE) {
2486 flag = TTY_PARITY;
2487 port->icount.parity++;
2488 - } else if (packet[1] & FTDI_RS_FE) {
2489 + } else if (buf[1] & FTDI_RS_FE) {
2490 flag = TTY_FRAME;
2491 port->icount.frame++;
2492 }
2493 /* Overrun is special, not associated with a char */
2494 - if (packet[1] & FTDI_RS_OE) {
2495 + if (buf[1] & FTDI_RS_OE) {
2496 port->icount.overrun++;
2497 tty_insert_flip_char(&port->port, 0, TTY_OVERRUN);
2498 }
2499 }
2500
2501 - port->icount.rx += len;
2502 - ch = packet + 2;
2503 + port->icount.rx += len - 2;
2504
2505 if (port->port.console && port->sysrq) {
2506 - for (i = 0; i < len; i++, ch++) {
2507 - if (!usb_serial_handle_sysrq_char(port, *ch))
2508 - tty_insert_flip_char(&port->port, *ch, flag);
2509 + for (i = 2; i < len; i++) {
2510 + if (usb_serial_handle_sysrq_char(port, buf[i]))
2511 + continue;
2512 + tty_insert_flip_char(&port->port, buf[i], flag);
2513 }
2514 } else {
2515 - tty_insert_flip_string_fixed_flag(&port->port, ch, flag, len);
2516 + tty_insert_flip_string_fixed_flag(&port->port, buf + 2, flag,
2517 + len - 2);
2518 }
2519
2520 - return len;
2521 + return len - 2;
2522 }
2523
2524 static void ftdi_process_read_urb(struct urb *urb)
2525 diff --git a/drivers/watchdog/f71808e_wdt.c b/drivers/watchdog/f71808e_wdt.c
2526 index e46104c2fd94e..893cef70c1599 100644
2527 --- a/drivers/watchdog/f71808e_wdt.c
2528 +++ b/drivers/watchdog/f71808e_wdt.c
2529 @@ -689,9 +689,9 @@ static int __init watchdog_init(int sioaddr)
2530 * into the module have been registered yet.
2531 */
2532 watchdog.sioaddr = sioaddr;
2533 - watchdog.ident.options = WDIOC_SETTIMEOUT
2534 - | WDIOF_MAGICCLOSE
2535 - | WDIOF_KEEPALIVEPING;
2536 + watchdog.ident.options = WDIOF_MAGICCLOSE
2537 + | WDIOF_KEEPALIVEPING
2538 + | WDIOF_CARDRESET;
2539
2540 snprintf(watchdog.ident.identity,
2541 sizeof(watchdog.ident.identity), "%s watchdog",
2542 @@ -705,6 +705,13 @@ static int __init watchdog_init(int sioaddr)
2543 wdt_conf = superio_inb(sioaddr, F71808FG_REG_WDT_CONF);
2544 watchdog.caused_reboot = wdt_conf & BIT(F71808FG_FLAG_WDTMOUT_STS);
2545
2546 + /*
2547 + * We don't want WDTMOUT_STS to stick around till regular reboot.
2548 + * Write 1 to the bit to clear it to zero.
2549 + */
2550 + superio_outb(sioaddr, F71808FG_REG_WDT_CONF,
2551 + wdt_conf | BIT(F71808FG_FLAG_WDTMOUT_STS));
2552 +
2553 superio_exit(sioaddr);
2554
2555 err = watchdog_set_timeout(timeout);
2556 diff --git a/drivers/watchdog/watchdog_dev.c b/drivers/watchdog/watchdog_dev.c
2557 index c4147e93aa7d4..3729f99fd8eca 100644
2558 --- a/drivers/watchdog/watchdog_dev.c
2559 +++ b/drivers/watchdog/watchdog_dev.c
2560 @@ -974,6 +974,15 @@ static int watchdog_cdev_register(struct watchdog_device *wdd)
2561 if (IS_ERR_OR_NULL(watchdog_kworker))
2562 return -ENODEV;
2563
2564 + device_initialize(&wd_data->dev);
2565 + wd_data->dev.devt = MKDEV(MAJOR(watchdog_devt), wdd->id);
2566 + wd_data->dev.class = &watchdog_class;
2567 + wd_data->dev.parent = wdd->parent;
2568 + wd_data->dev.groups = wdd->groups;
2569 + wd_data->dev.release = watchdog_core_data_release;
2570 + dev_set_drvdata(&wd_data->dev, wdd);
2571 + dev_set_name(&wd_data->dev, "watchdog%d", wdd->id);
2572 +
2573 kthread_init_work(&wd_data->work, watchdog_ping_work);
2574 hrtimer_init(&wd_data->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL_HARD);
2575 wd_data->timer.function = watchdog_timer_expired;
2576 @@ -994,15 +1003,6 @@ static int watchdog_cdev_register(struct watchdog_device *wdd)
2577 }
2578 }
2579
2580 - device_initialize(&wd_data->dev);
2581 - wd_data->dev.devt = MKDEV(MAJOR(watchdog_devt), wdd->id);
2582 - wd_data->dev.class = &watchdog_class;
2583 - wd_data->dev.parent = wdd->parent;
2584 - wd_data->dev.groups = wdd->groups;
2585 - wd_data->dev.release = watchdog_core_data_release;
2586 - dev_set_drvdata(&wd_data->dev, wdd);
2587 - dev_set_name(&wd_data->dev, "watchdog%d", wdd->id);
2588 -
2589 /* Fill in the data structures */
2590 cdev_init(&wd_data->cdev, &watchdog_fops);
2591
2592 diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h
2593 index 36cd210ee2ef7..2374f3f6f3b70 100644
2594 --- a/fs/btrfs/ctree.h
2595 +++ b/fs/btrfs/ctree.h
2596 @@ -990,8 +990,10 @@ struct btrfs_root {
2597 wait_queue_head_t log_writer_wait;
2598 wait_queue_head_t log_commit_wait[2];
2599 struct list_head log_ctxs[2];
2600 + /* Used only for log trees of subvolumes, not for the log root tree */
2601 atomic_t log_writers;
2602 atomic_t log_commit[2];
2603 + /* Used only for log trees of subvolumes, not for the log root tree */
2604 atomic_t log_batch;
2605 int log_transid;
2606 /* No matter the commit succeeds or not*/
2607 @@ -3164,7 +3166,7 @@ do { \
2608 /* Report first abort since mount */ \
2609 if (!test_and_set_bit(BTRFS_FS_STATE_TRANS_ABORTED, \
2610 &((trans)->fs_info->fs_state))) { \
2611 - if ((errno) != -EIO) { \
2612 + if ((errno) != -EIO && (errno) != -EROFS) { \
2613 WARN(1, KERN_DEBUG \
2614 "BTRFS: Transaction aborted (error %d)\n", \
2615 (errno)); \
2616 diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c
2617 index 273d1ccdd45df..ad1c8e3b8133a 100644
2618 --- a/fs/btrfs/disk-io.c
2619 +++ b/fs/btrfs/disk-io.c
2620 @@ -1475,9 +1475,16 @@ int btrfs_init_fs_root(struct btrfs_root *root)
2621 spin_lock_init(&root->ino_cache_lock);
2622 init_waitqueue_head(&root->ino_cache_wait);
2623
2624 - ret = get_anon_bdev(&root->anon_dev);
2625 - if (ret)
2626 - goto fail;
2627 + /*
2628 + * Don't assign anonymous block device to roots that are not exposed to
2629 + * userspace, the id pool is limited to 1M
2630 + */
2631 + if (is_fstree(root->root_key.objectid) &&
2632 + btrfs_root_refs(&root->root_item) > 0) {
2633 + ret = get_anon_bdev(&root->anon_dev);
2634 + if (ret)
2635 + goto fail;
2636 + }
2637
2638 mutex_lock(&root->objectid_mutex);
2639 ret = btrfs_find_highest_objectid(root,
2640 diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c
2641 index 47ecf7216b3e5..739332b462059 100644
2642 --- a/fs/btrfs/extent-tree.c
2643 +++ b/fs/btrfs/extent-tree.c
2644 @@ -5221,7 +5221,14 @@ int btrfs_drop_snapshot(struct btrfs_root *root,
2645 goto out;
2646 }
2647
2648 - trans = btrfs_start_transaction(tree_root, 0);
2649 + /*
2650 + * Use join to avoid potential EINTR from transaction start. See
2651 + * wait_reserve_ticket and the whole reservation callchain.
2652 + */
2653 + if (for_reloc)
2654 + trans = btrfs_join_transaction(tree_root);
2655 + else
2656 + trans = btrfs_start_transaction(tree_root, 0);
2657 if (IS_ERR(trans)) {
2658 err = PTR_ERR(trans);
2659 goto out_free;
2660 diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c
2661 index 99dcb38976592..035ea5bc692ad 100644
2662 --- a/fs/btrfs/extent_io.c
2663 +++ b/fs/btrfs/extent_io.c
2664 @@ -4467,15 +4467,25 @@ int try_release_extent_mapping(struct page *page, gfp_t mask)
2665 free_extent_map(em);
2666 break;
2667 }
2668 - if (!test_range_bit(tree, em->start,
2669 - extent_map_end(em) - 1,
2670 - EXTENT_LOCKED, 0, NULL)) {
2671 + if (test_range_bit(tree, em->start,
2672 + extent_map_end(em) - 1,
2673 + EXTENT_LOCKED, 0, NULL))
2674 + goto next;
2675 + /*
2676 + * If it's not in the list of modified extents, used
2677 + * by a fast fsync, we can remove it. If it's being
2678 + * logged we can safely remove it since fsync took an
2679 + * extra reference on the em.
2680 + */
2681 + if (list_empty(&em->list) ||
2682 + test_bit(EXTENT_FLAG_LOGGING, &em->flags)) {
2683 set_bit(BTRFS_INODE_NEEDS_FULL_SYNC,
2684 &btrfs_inode->runtime_flags);
2685 remove_extent_mapping(map, em);
2686 /* once for the rb tree */
2687 free_extent_map(em);
2688 }
2689 +next:
2690 start = extent_map_end(em);
2691 write_unlock(&map->lock);
2692
2693 diff --git a/fs/btrfs/free-space-cache.c b/fs/btrfs/free-space-cache.c
2694 index d86ada9c3c541..8bfc0f348ad55 100644
2695 --- a/fs/btrfs/free-space-cache.c
2696 +++ b/fs/btrfs/free-space-cache.c
2697 @@ -2166,7 +2166,7 @@ out:
2698 static bool try_merge_free_space(struct btrfs_free_space_ctl *ctl,
2699 struct btrfs_free_space *info, bool update_stat)
2700 {
2701 - struct btrfs_free_space *left_info;
2702 + struct btrfs_free_space *left_info = NULL;
2703 struct btrfs_free_space *right_info;
2704 bool merged = false;
2705 u64 offset = info->offset;
2706 @@ -2181,7 +2181,7 @@ static bool try_merge_free_space(struct btrfs_free_space_ctl *ctl,
2707 if (right_info && rb_prev(&right_info->offset_index))
2708 left_info = rb_entry(rb_prev(&right_info->offset_index),
2709 struct btrfs_free_space, offset_index);
2710 - else
2711 + else if (!right_info)
2712 left_info = tree_search_offset(ctl, offset - 1, 0, 0);
2713
2714 if (right_info && !right_info->bitmap) {
2715 diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
2716 index e408181a5eba3..fa7f3a59813ea 100644
2717 --- a/fs/btrfs/inode.c
2718 +++ b/fs/btrfs/inode.c
2719 @@ -641,12 +641,18 @@ cont:
2720 page_error_op |
2721 PAGE_END_WRITEBACK);
2722
2723 - for (i = 0; i < nr_pages; i++) {
2724 - WARN_ON(pages[i]->mapping);
2725 - put_page(pages[i]);
2726 + /*
2727 + * Ensure we only free the compressed pages if we have
2728 + * them allocated, as we can still reach here with
2729 + * inode_need_compress() == false.
2730 + */
2731 + if (pages) {
2732 + for (i = 0; i < nr_pages; i++) {
2733 + WARN_ON(pages[i]->mapping);
2734 + put_page(pages[i]);
2735 + }
2736 + kfree(pages);
2737 }
2738 - kfree(pages);
2739 -
2740 return 0;
2741 }
2742 }
2743 @@ -4681,6 +4687,8 @@ int btrfs_delete_subvolume(struct inode *dir, struct dentry *dentry)
2744 }
2745 }
2746
2747 + free_anon_bdev(dest->anon_dev);
2748 + dest->anon_dev = 0;
2749 out_end_trans:
2750 trans->block_rsv = NULL;
2751 trans->bytes_reserved = 0;
2752 @@ -7186,7 +7194,7 @@ struct extent_map *btrfs_get_extent(struct btrfs_inode *inode,
2753 extent_type == BTRFS_FILE_EXTENT_PREALLOC) {
2754 /* Only regular file could have regular/prealloc extent */
2755 if (!S_ISREG(inode->vfs_inode.i_mode)) {
2756 - ret = -EUCLEAN;
2757 + err = -EUCLEAN;
2758 btrfs_crit(fs_info,
2759 "regular/prealloc extent found for non-regular inode %llu",
2760 btrfs_ino(inode));
2761 diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c
2762 index d88b8d8897cc5..88745b5182126 100644
2763 --- a/fs/btrfs/ioctl.c
2764 +++ b/fs/btrfs/ioctl.c
2765 @@ -167,8 +167,11 @@ static int btrfs_ioctl_getflags(struct file *file, void __user *arg)
2766 return 0;
2767 }
2768
2769 -/* Check if @flags are a supported and valid set of FS_*_FL flags */
2770 -static int check_fsflags(unsigned int flags)
2771 +/*
2772 + * Check if @flags are a supported and valid set of FS_*_FL flags and that
2773 + * the old and new flags are not conflicting
2774 + */
2775 +static int check_fsflags(unsigned int old_flags, unsigned int flags)
2776 {
2777 if (flags & ~(FS_IMMUTABLE_FL | FS_APPEND_FL | \
2778 FS_NOATIME_FL | FS_NODUMP_FL | \
2779 @@ -177,9 +180,19 @@ static int check_fsflags(unsigned int flags)
2780 FS_NOCOW_FL))
2781 return -EOPNOTSUPP;
2782
2783 + /* COMPR and NOCOMP on new/old are valid */
2784 if ((flags & FS_NOCOMP_FL) && (flags & FS_COMPR_FL))
2785 return -EINVAL;
2786
2787 + if ((flags & FS_COMPR_FL) && (flags & FS_NOCOW_FL))
2788 + return -EINVAL;
2789 +
2790 + /* NOCOW and compression options are mutually exclusive */
2791 + if ((old_flags & FS_NOCOW_FL) && (flags & (FS_COMPR_FL | FS_NOCOMP_FL)))
2792 + return -EINVAL;
2793 + if ((flags & FS_NOCOW_FL) && (old_flags & (FS_COMPR_FL | FS_NOCOMP_FL)))
2794 + return -EINVAL;
2795 +
2796 return 0;
2797 }
2798
2799 @@ -193,7 +206,7 @@ static int btrfs_ioctl_setflags(struct file *file, void __user *arg)
2800 unsigned int fsflags, old_fsflags;
2801 int ret;
2802 const char *comp = NULL;
2803 - u32 binode_flags = binode->flags;
2804 + u32 binode_flags;
2805
2806 if (!inode_owner_or_capable(inode))
2807 return -EPERM;
2808 @@ -204,22 +217,23 @@ static int btrfs_ioctl_setflags(struct file *file, void __user *arg)
2809 if (copy_from_user(&fsflags, arg, sizeof(fsflags)))
2810 return -EFAULT;
2811
2812 - ret = check_fsflags(fsflags);
2813 - if (ret)
2814 - return ret;
2815 -
2816 ret = mnt_want_write_file(file);
2817 if (ret)
2818 return ret;
2819
2820 inode_lock(inode);
2821 -
2822 fsflags = btrfs_mask_fsflags_for_type(inode, fsflags);
2823 old_fsflags = btrfs_inode_flags_to_fsflags(binode->flags);
2824 +
2825 ret = vfs_ioc_setflags_prepare(inode, old_fsflags, fsflags);
2826 if (ret)
2827 goto out_unlock;
2828
2829 + ret = check_fsflags(old_fsflags, fsflags);
2830 + if (ret)
2831 + goto out_unlock;
2832 +
2833 + binode_flags = binode->flags;
2834 if (fsflags & FS_SYNC_FL)
2835 binode_flags |= BTRFS_INODE_SYNC;
2836 else
2837 diff --git a/fs/btrfs/ref-verify.c b/fs/btrfs/ref-verify.c
2838 index 454a1015d026b..9a2f15f4c80e0 100644
2839 --- a/fs/btrfs/ref-verify.c
2840 +++ b/fs/btrfs/ref-verify.c
2841 @@ -286,6 +286,8 @@ static struct block_entry *add_block_entry(struct btrfs_fs_info *fs_info,
2842 exist_re = insert_root_entry(&exist->roots, re);
2843 if (exist_re)
2844 kfree(re);
2845 + } else {
2846 + kfree(re);
2847 }
2848 kfree(be);
2849 return exist;
2850 diff --git a/fs/btrfs/relocation.c b/fs/btrfs/relocation.c
2851 index 1b087ee338ccb..af3605a0bf2e0 100644
2852 --- a/fs/btrfs/relocation.c
2853 +++ b/fs/btrfs/relocation.c
2854 @@ -2312,12 +2312,20 @@ static noinline_for_stack int merge_reloc_root(struct reloc_control *rc,
2855 btrfs_unlock_up_safe(path, 0);
2856 }
2857
2858 - min_reserved = fs_info->nodesize * (BTRFS_MAX_LEVEL - 1) * 2;
2859 + /*
2860 + * In merge_reloc_root(), we modify the upper level pointer to swap the
2861 + * tree blocks between reloc tree and subvolume tree. Thus for tree
2862 + * block COW, we COW at most from level 1 to root level for each tree.
2863 + *
2864 + * Thus the needed metadata size is at most root_level * nodesize,
2865 + * and * 2 since we have two trees to COW.
2866 + */
2867 + min_reserved = fs_info->nodesize * btrfs_root_level(root_item) * 2;
2868 memset(&next_key, 0, sizeof(next_key));
2869
2870 while (1) {
2871 ret = btrfs_block_rsv_refill(root, rc->block_rsv, min_reserved,
2872 - BTRFS_RESERVE_FLUSH_ALL);
2873 + BTRFS_RESERVE_FLUSH_LIMIT);
2874 if (ret) {
2875 err = ret;
2876 goto out;
2877 diff --git a/fs/btrfs/super.c b/fs/btrfs/super.c
2878 index aea24202cd355..4b0ee34aa65d5 100644
2879 --- a/fs/btrfs/super.c
2880 +++ b/fs/btrfs/super.c
2881 @@ -435,6 +435,7 @@ int btrfs_parse_options(struct btrfs_fs_info *info, char *options,
2882 char *compress_type;
2883 bool compress_force = false;
2884 enum btrfs_compression_type saved_compress_type;
2885 + int saved_compress_level;
2886 bool saved_compress_force;
2887 int no_compress = 0;
2888
2889 @@ -517,6 +518,7 @@ int btrfs_parse_options(struct btrfs_fs_info *info, char *options,
2890 info->compress_type : BTRFS_COMPRESS_NONE;
2891 saved_compress_force =
2892 btrfs_test_opt(info, FORCE_COMPRESS);
2893 + saved_compress_level = info->compress_level;
2894 if (token == Opt_compress ||
2895 token == Opt_compress_force ||
2896 strncmp(args[0].from, "zlib", 4) == 0) {
2897 @@ -561,6 +563,8 @@ int btrfs_parse_options(struct btrfs_fs_info *info, char *options,
2898 no_compress = 0;
2899 } else if (strncmp(args[0].from, "no", 2) == 0) {
2900 compress_type = "no";
2901 + info->compress_level = 0;
2902 + info->compress_type = 0;
2903 btrfs_clear_opt(info->mount_opt, COMPRESS);
2904 btrfs_clear_opt(info->mount_opt, FORCE_COMPRESS);
2905 compress_force = false;
2906 @@ -581,11 +585,11 @@ int btrfs_parse_options(struct btrfs_fs_info *info, char *options,
2907 */
2908 btrfs_clear_opt(info->mount_opt, FORCE_COMPRESS);
2909 }
2910 - if ((btrfs_test_opt(info, COMPRESS) &&
2911 - (info->compress_type != saved_compress_type ||
2912 - compress_force != saved_compress_force)) ||
2913 - (!btrfs_test_opt(info, COMPRESS) &&
2914 - no_compress == 1)) {
2915 + if (no_compress == 1) {
2916 + btrfs_info(info, "use no compression");
2917 + } else if ((info->compress_type != saved_compress_type) ||
2918 + (compress_force != saved_compress_force) ||
2919 + (info->compress_level != saved_compress_level)) {
2920 btrfs_info(info, "%s %s compression, level %d",
2921 (compress_force) ? "force" : "use",
2922 compress_type, info->compress_level);
2923 @@ -1848,6 +1852,12 @@ static int btrfs_remount(struct super_block *sb, int *flags, char *data)
2924 set_bit(BTRFS_FS_OPEN, &fs_info->flags);
2925 }
2926 out:
2927 + /*
2928 + * We need to set SB_I_VERSION here otherwise it'll get cleared by VFS,
2929 + * since the absence of the flag means it can be toggled off by remount.
2930 + */
2931 + *flags |= SB_I_VERSION;
2932 +
2933 wake_up_process(fs_info->transaction_kthread);
2934 btrfs_remount_cleanup(fs_info, old_opts);
2935 return 0;
2936 @@ -2254,9 +2264,7 @@ static int btrfs_unfreeze(struct super_block *sb)
2937 static int btrfs_show_devname(struct seq_file *m, struct dentry *root)
2938 {
2939 struct btrfs_fs_info *fs_info = btrfs_sb(root->d_sb);
2940 - struct btrfs_fs_devices *cur_devices;
2941 struct btrfs_device *dev, *first_dev = NULL;
2942 - struct list_head *head;
2943
2944 /*
2945 * Lightweight locking of the devices. We should not need
2946 @@ -2266,18 +2274,13 @@ static int btrfs_show_devname(struct seq_file *m, struct dentry *root)
2947 * least until the rcu_read_unlock.
2948 */
2949 rcu_read_lock();
2950 - cur_devices = fs_info->fs_devices;
2951 - while (cur_devices) {
2952 - head = &cur_devices->devices;
2953 - list_for_each_entry_rcu(dev, head, dev_list) {
2954 - if (test_bit(BTRFS_DEV_STATE_MISSING, &dev->dev_state))
2955 - continue;
2956 - if (!dev->name)
2957 - continue;
2958 - if (!first_dev || dev->devid < first_dev->devid)
2959 - first_dev = dev;
2960 - }
2961 - cur_devices = cur_devices->seed;
2962 + list_for_each_entry_rcu(dev, &fs_info->fs_devices->devices, dev_list) {
2963 + if (test_bit(BTRFS_DEV_STATE_MISSING, &dev->dev_state))
2964 + continue;
2965 + if (!dev->name)
2966 + continue;
2967 + if (!first_dev || dev->devid < first_dev->devid)
2968 + first_dev = dev;
2969 }
2970
2971 if (first_dev)
2972 diff --git a/fs/btrfs/sysfs.c b/fs/btrfs/sysfs.c
2973 index f6d3c80f2e289..5c299e1f2297e 100644
2974 --- a/fs/btrfs/sysfs.c
2975 +++ b/fs/btrfs/sysfs.c
2976 @@ -975,7 +975,9 @@ int btrfs_sysfs_add_device_link(struct btrfs_fs_devices *fs_devices,
2977 {
2978 int error = 0;
2979 struct btrfs_device *dev;
2980 + unsigned int nofs_flag;
2981
2982 + nofs_flag = memalloc_nofs_save();
2983 list_for_each_entry(dev, &fs_devices->devices, dev_list) {
2984 struct hd_struct *disk;
2985 struct kobject *disk_kobj;
2986 @@ -994,6 +996,7 @@ int btrfs_sysfs_add_device_link(struct btrfs_fs_devices *fs_devices,
2987 if (error)
2988 break;
2989 }
2990 + memalloc_nofs_restore(nofs_flag);
2991
2992 return error;
2993 }
2994 diff --git a/fs/btrfs/tree-log.c b/fs/btrfs/tree-log.c
2995 index f46afbff668eb..3c090549ed07d 100644
2996 --- a/fs/btrfs/tree-log.c
2997 +++ b/fs/btrfs/tree-log.c
2998 @@ -3140,29 +3140,17 @@ int btrfs_sync_log(struct btrfs_trans_handle *trans,
2999 btrfs_init_log_ctx(&root_log_ctx, NULL);
3000
3001 mutex_lock(&log_root_tree->log_mutex);
3002 - atomic_inc(&log_root_tree->log_batch);
3003 - atomic_inc(&log_root_tree->log_writers);
3004
3005 index2 = log_root_tree->log_transid % 2;
3006 list_add_tail(&root_log_ctx.list, &log_root_tree->log_ctxs[index2]);
3007 root_log_ctx.log_transid = log_root_tree->log_transid;
3008
3009 - mutex_unlock(&log_root_tree->log_mutex);
3010 -
3011 - mutex_lock(&log_root_tree->log_mutex);
3012 -
3013 /*
3014 * Now we are safe to update the log_root_tree because we're under the
3015 * log_mutex, and we're a current writer so we're holding the commit
3016 * open until we drop the log_mutex.
3017 */
3018 ret = update_log_root(trans, log, &new_root_item);
3019 -
3020 - if (atomic_dec_and_test(&log_root_tree->log_writers)) {
3021 - /* atomic_dec_and_test implies a barrier */
3022 - cond_wake_up_nomb(&log_root_tree->log_writer_wait);
3023 - }
3024 -
3025 if (ret) {
3026 if (!list_empty(&root_log_ctx.list))
3027 list_del_init(&root_log_ctx.list);
3028 @@ -3208,8 +3196,6 @@ int btrfs_sync_log(struct btrfs_trans_handle *trans,
3029 root_log_ctx.log_transid - 1);
3030 }
3031
3032 - wait_for_writer(log_root_tree);
3033 -
3034 /*
3035 * now that we've moved on to the tree of log tree roots,
3036 * check the full commit flag again
3037 @@ -4054,11 +4040,8 @@ static noinline int copy_items(struct btrfs_trans_handle *trans,
3038 fs_info->csum_root,
3039 ds + cs, ds + cs + cl - 1,
3040 &ordered_sums, 0);
3041 - if (ret) {
3042 - btrfs_release_path(dst_path);
3043 - kfree(ins_data);
3044 - return ret;
3045 - }
3046 + if (ret)
3047 + break;
3048 }
3049 }
3050 }
3051 @@ -4071,7 +4054,6 @@ static noinline int copy_items(struct btrfs_trans_handle *trans,
3052 * we have to do this after the loop above to avoid changing the
3053 * log tree while trying to change the log tree.
3054 */
3055 - ret = 0;
3056 while (!list_empty(&ordered_sums)) {
3057 struct btrfs_ordered_sum *sums = list_entry(ordered_sums.next,
3058 struct btrfs_ordered_sum,
3059 diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c
3060 index 1e6e3c1d97dfa..196ddbcd29360 100644
3061 --- a/fs/btrfs/volumes.c
3062 +++ b/fs/btrfs/volumes.c
3063 @@ -219,7 +219,9 @@ static int __btrfs_map_block(struct btrfs_fs_info *fs_info,
3064 *
3065 * global::fs_devs - add, remove, updates to the global list
3066 *
3067 - * does not protect: manipulation of the fs_devices::devices list!
3068 + * does not protect: manipulation of the fs_devices::devices list in general
3069 + * but in mount context it could be used to exclude list modifications by eg.
3070 + * scan ioctl
3071 *
3072 * btrfs_device::name - renames (write side), read is RCU
3073 *
3074 @@ -232,6 +234,9 @@ static int __btrfs_map_block(struct btrfs_fs_info *fs_info,
3075 * may be used to exclude some operations from running concurrently without any
3076 * modifications to the list (see write_all_supers)
3077 *
3078 + * Is not required at mount and close times, because our device list is
3079 + * protected by the uuid_mutex at that point.
3080 + *
3081 * balance_mutex
3082 * -------------
3083 * protects balance structures (status, state) and context accessed from
3084 @@ -778,6 +783,11 @@ static int btrfs_free_stale_devices(const char *path,
3085 return ret;
3086 }
3087
3088 +/*
3089 + * This is only used on mount, and we are protected from competing things
3090 + * messing with our fs_devices by the uuid_mutex, thus we do not need the
3091 + * fs_devices->device_list_mutex here.
3092 + */
3093 static int btrfs_open_one_device(struct btrfs_fs_devices *fs_devices,
3094 struct btrfs_device *device, fmode_t flags,
3095 void *holder)
3096 @@ -1418,8 +1428,14 @@ int btrfs_open_devices(struct btrfs_fs_devices *fs_devices,
3097 int ret;
3098
3099 lockdep_assert_held(&uuid_mutex);
3100 + /*
3101 + * The device_list_mutex cannot be taken here in case opening the
3102 + * underlying device takes further locks like bd_mutex.
3103 + *
3104 + * We also don't need the lock here as this is called during mount and
3105 + * exclusion is provided by uuid_mutex
3106 + */
3107
3108 - mutex_lock(&fs_devices->device_list_mutex);
3109 if (fs_devices->opened) {
3110 fs_devices->opened++;
3111 ret = 0;
3112 @@ -1427,7 +1443,6 @@ int btrfs_open_devices(struct btrfs_fs_devices *fs_devices,
3113 list_sort(NULL, &fs_devices->devices, devid_cmp);
3114 ret = open_fs_devices(fs_devices, flags, holder);
3115 }
3116 - mutex_unlock(&fs_devices->device_list_mutex);
3117
3118 return ret;
3119 }
3120 @@ -3283,7 +3298,7 @@ static int insert_balance_item(struct btrfs_fs_info *fs_info,
3121 if (!path)
3122 return -ENOMEM;
3123
3124 - trans = btrfs_start_transaction(root, 0);
3125 + trans = btrfs_start_transaction_fallback_global_rsv(root, 0);
3126 if (IS_ERR(trans)) {
3127 btrfs_free_path(path);
3128 return PTR_ERR(trans);
3129 @@ -4246,7 +4261,22 @@ int btrfs_balance(struct btrfs_fs_info *fs_info,
3130 mutex_lock(&fs_info->balance_mutex);
3131 if (ret == -ECANCELED && atomic_read(&fs_info->balance_pause_req))
3132 btrfs_info(fs_info, "balance: paused");
3133 - else if (ret == -ECANCELED && atomic_read(&fs_info->balance_cancel_req))
3134 + /*
3135 + * Balance can be canceled by:
3136 + *
3137 + * - Regular cancel request
3138 + * Then ret == -ECANCELED and balance_cancel_req > 0
3139 + *
3140 + * - Fatal signal to "btrfs" process
3141 + * Either the signal caught by wait_reserve_ticket() and callers
3142 + * got -EINTR, or caught by btrfs_should_cancel_balance() and
3143 + * got -ECANCELED.
3144 + * Either way, in this case balance_cancel_req = 0, and
3145 + * ret == -EINTR or ret == -ECANCELED.
3146 + *
3147 + * So here we only check the return value to catch canceled balance.
3148 + */
3149 + else if (ret == -ECANCELED || ret == -EINTR)
3150 btrfs_info(fs_info, "balance: canceled");
3151 else
3152 btrfs_info(fs_info, "balance: ended with status: %d", ret);
3153 @@ -7267,7 +7297,6 @@ int btrfs_read_chunk_tree(struct btrfs_fs_info *fs_info)
3154 * otherwise we don't need it.
3155 */
3156 mutex_lock(&uuid_mutex);
3157 - mutex_lock(&fs_info->chunk_mutex);
3158
3159 /*
3160 * It is possible for mount and umount to race in such a way that
3161 @@ -7312,7 +7341,9 @@ int btrfs_read_chunk_tree(struct btrfs_fs_info *fs_info)
3162 } else if (found_key.type == BTRFS_CHUNK_ITEM_KEY) {
3163 struct btrfs_chunk *chunk;
3164 chunk = btrfs_item_ptr(leaf, slot, struct btrfs_chunk);
3165 + mutex_lock(&fs_info->chunk_mutex);
3166 ret = read_one_chunk(&found_key, leaf, chunk);
3167 + mutex_unlock(&fs_info->chunk_mutex);
3168 if (ret)
3169 goto error;
3170 }
3171 @@ -7342,7 +7373,6 @@ int btrfs_read_chunk_tree(struct btrfs_fs_info *fs_info)
3172 }
3173 ret = 0;
3174 error:
3175 - mutex_unlock(&fs_info->chunk_mutex);
3176 mutex_unlock(&uuid_mutex);
3177
3178 btrfs_free_path(path);
3179 diff --git a/fs/ceph/dir.c b/fs/ceph/dir.c
3180 index 2e4764fd18727..3367a8194f24b 100644
3181 --- a/fs/ceph/dir.c
3182 +++ b/fs/ceph/dir.c
3183 @@ -920,6 +920,10 @@ static int ceph_symlink(struct inode *dir, struct dentry *dentry,
3184 req->r_num_caps = 2;
3185 req->r_dentry_drop = CEPH_CAP_FILE_SHARED | CEPH_CAP_AUTH_EXCL;
3186 req->r_dentry_unless = CEPH_CAP_FILE_EXCL;
3187 + if (as_ctx.pagelist) {
3188 + req->r_pagelist = as_ctx.pagelist;
3189 + as_ctx.pagelist = NULL;
3190 + }
3191 err = ceph_mdsc_do_request(mdsc, dir, req);
3192 if (!err && !req->r_reply_info.head->is_dentry)
3193 err = ceph_handle_notrace_create(dir, dentry);
3194 diff --git a/fs/ceph/mds_client.c b/fs/ceph/mds_client.c
3195 index b79fe6549df6f..701bc3f4d4ba1 100644
3196 --- a/fs/ceph/mds_client.c
3197 +++ b/fs/ceph/mds_client.c
3198 @@ -3091,8 +3091,10 @@ static void handle_session(struct ceph_mds_session *session,
3199 goto bad;
3200 /* version >= 3, feature bits */
3201 ceph_decode_32_safe(&p, end, len, bad);
3202 - ceph_decode_64_safe(&p, end, features, bad);
3203 - p += len - sizeof(features);
3204 + if (len) {
3205 + ceph_decode_64_safe(&p, end, features, bad);
3206 + p += len - sizeof(features);
3207 + }
3208 }
3209
3210 mutex_lock(&mdsc->mutex);
3211 diff --git a/fs/cifs/smb2misc.c b/fs/cifs/smb2misc.c
3212 index 14265b4bbcc00..2fc96f7923ee5 100644
3213 --- a/fs/cifs/smb2misc.c
3214 +++ b/fs/cifs/smb2misc.c
3215 @@ -509,15 +509,31 @@ cifs_ses_oplock_break(struct work_struct *work)
3216 kfree(lw);
3217 }
3218
3219 +static void
3220 +smb2_queue_pending_open_break(struct tcon_link *tlink, __u8 *lease_key,
3221 + __le32 new_lease_state)
3222 +{
3223 + struct smb2_lease_break_work *lw;
3224 +
3225 + lw = kmalloc(sizeof(struct smb2_lease_break_work), GFP_KERNEL);
3226 + if (!lw) {
3227 + cifs_put_tlink(tlink);
3228 + return;
3229 + }
3230 +
3231 + INIT_WORK(&lw->lease_break, cifs_ses_oplock_break);
3232 + lw->tlink = tlink;
3233 + lw->lease_state = new_lease_state;
3234 + memcpy(lw->lease_key, lease_key, SMB2_LEASE_KEY_SIZE);
3235 + queue_work(cifsiod_wq, &lw->lease_break);
3236 +}
3237 +
3238 static bool
3239 -smb2_tcon_has_lease(struct cifs_tcon *tcon, struct smb2_lease_break *rsp,
3240 - struct smb2_lease_break_work *lw)
3241 +smb2_tcon_has_lease(struct cifs_tcon *tcon, struct smb2_lease_break *rsp)
3242 {
3243 - bool found;
3244 __u8 lease_state;
3245 struct list_head *tmp;
3246 struct cifsFileInfo *cfile;
3247 - struct cifs_pending_open *open;
3248 struct cifsInodeInfo *cinode;
3249 int ack_req = le32_to_cpu(rsp->Flags &
3250 SMB2_NOTIFY_BREAK_LEASE_FLAG_ACK_REQUIRED);
3251 @@ -556,22 +572,29 @@ smb2_tcon_has_lease(struct cifs_tcon *tcon, struct smb2_lease_break *rsp,
3252 &cinode->flags);
3253
3254 cifs_queue_oplock_break(cfile);
3255 - kfree(lw);
3256 return true;
3257 }
3258
3259 - found = false;
3260 + return false;
3261 +}
3262 +
3263 +static struct cifs_pending_open *
3264 +smb2_tcon_find_pending_open_lease(struct cifs_tcon *tcon,
3265 + struct smb2_lease_break *rsp)
3266 +{
3267 + __u8 lease_state = le32_to_cpu(rsp->NewLeaseState);
3268 + int ack_req = le32_to_cpu(rsp->Flags &
3269 + SMB2_NOTIFY_BREAK_LEASE_FLAG_ACK_REQUIRED);
3270 + struct cifs_pending_open *open;
3271 + struct cifs_pending_open *found = NULL;
3272 +
3273 list_for_each_entry(open, &tcon->pending_opens, olist) {
3274 if (memcmp(open->lease_key, rsp->LeaseKey,
3275 SMB2_LEASE_KEY_SIZE))
3276 continue;
3277
3278 if (!found && ack_req) {
3279 - found = true;
3280 - memcpy(lw->lease_key, open->lease_key,
3281 - SMB2_LEASE_KEY_SIZE);
3282 - lw->tlink = cifs_get_tlink(open->tlink);
3283 - queue_work(cifsiod_wq, &lw->lease_break);
3284 + found = open;
3285 }
3286
3287 cifs_dbg(FYI, "found in the pending open list\n");
3288 @@ -592,14 +615,7 @@ smb2_is_valid_lease_break(char *buffer)
3289 struct TCP_Server_Info *server;
3290 struct cifs_ses *ses;
3291 struct cifs_tcon *tcon;
3292 - struct smb2_lease_break_work *lw;
3293 -
3294 - lw = kmalloc(sizeof(struct smb2_lease_break_work), GFP_KERNEL);
3295 - if (!lw)
3296 - return false;
3297 -
3298 - INIT_WORK(&lw->lease_break, cifs_ses_oplock_break);
3299 - lw->lease_state = rsp->NewLeaseState;
3300 + struct cifs_pending_open *open;
3301
3302 cifs_dbg(FYI, "Checking for lease break\n");
3303
3304 @@ -617,11 +633,27 @@ smb2_is_valid_lease_break(char *buffer)
3305 spin_lock(&tcon->open_file_lock);
3306 cifs_stats_inc(
3307 &tcon->stats.cifs_stats.num_oplock_brks);
3308 - if (smb2_tcon_has_lease(tcon, rsp, lw)) {
3309 + if (smb2_tcon_has_lease(tcon, rsp)) {
3310 spin_unlock(&tcon->open_file_lock);
3311 spin_unlock(&cifs_tcp_ses_lock);
3312 return true;
3313 }
3314 + open = smb2_tcon_find_pending_open_lease(tcon,
3315 + rsp);
3316 + if (open) {
3317 + __u8 lease_key[SMB2_LEASE_KEY_SIZE];
3318 + struct tcon_link *tlink;
3319 +
3320 + tlink = cifs_get_tlink(open->tlink);
3321 + memcpy(lease_key, open->lease_key,
3322 + SMB2_LEASE_KEY_SIZE);
3323 + spin_unlock(&tcon->open_file_lock);
3324 + spin_unlock(&cifs_tcp_ses_lock);
3325 + smb2_queue_pending_open_break(tlink,
3326 + lease_key,
3327 + rsp->NewLeaseState);
3328 + return true;
3329 + }
3330 spin_unlock(&tcon->open_file_lock);
3331
3332 if (tcon->crfid.is_valid &&
3333 @@ -639,7 +671,6 @@ smb2_is_valid_lease_break(char *buffer)
3334 }
3335 }
3336 spin_unlock(&cifs_tcp_ses_lock);
3337 - kfree(lw);
3338 cifs_dbg(FYI, "Can not process lease break - no lease matched\n");
3339 return false;
3340 }
3341 diff --git a/fs/cifs/smb2pdu.c b/fs/cifs/smb2pdu.c
3342 index 06b1a86d76b18..7ff05c06f2a4c 100644
3343 --- a/fs/cifs/smb2pdu.c
3344 +++ b/fs/cifs/smb2pdu.c
3345 @@ -1323,6 +1323,8 @@ SMB2_auth_kerberos(struct SMB2_sess_data *sess_data)
3346 spnego_key = cifs_get_spnego_key(ses);
3347 if (IS_ERR(spnego_key)) {
3348 rc = PTR_ERR(spnego_key);
3349 + if (rc == -ENOKEY)
3350 + cifs_dbg(VFS, "Verify user has a krb5 ticket and keyutils is installed\n");
3351 spnego_key = NULL;
3352 goto out;
3353 }
3354 diff --git a/fs/ext2/ialloc.c b/fs/ext2/ialloc.c
3355 index fda7d3f5b4be5..432c3febea6df 100644
3356 --- a/fs/ext2/ialloc.c
3357 +++ b/fs/ext2/ialloc.c
3358 @@ -80,6 +80,7 @@ static void ext2_release_inode(struct super_block *sb, int group, int dir)
3359 if (dir)
3360 le16_add_cpu(&desc->bg_used_dirs_count, -1);
3361 spin_unlock(sb_bgl_lock(EXT2_SB(sb), group));
3362 + percpu_counter_inc(&EXT2_SB(sb)->s_freeinodes_counter);
3363 if (dir)
3364 percpu_counter_dec(&EXT2_SB(sb)->s_dirs_counter);
3365 mark_buffer_dirty(bh);
3366 @@ -528,7 +529,7 @@ got:
3367 goto fail;
3368 }
3369
3370 - percpu_counter_add(&sbi->s_freeinodes_counter, -1);
3371 + percpu_counter_dec(&sbi->s_freeinodes_counter);
3372 if (S_ISDIR(mode))
3373 percpu_counter_inc(&sbi->s_dirs_counter);
3374
3375 diff --git a/fs/minix/inode.c b/fs/minix/inode.c
3376 index 0dd929346f3f3..7b09a9158e401 100644
3377 --- a/fs/minix/inode.c
3378 +++ b/fs/minix/inode.c
3379 @@ -150,8 +150,10 @@ static int minix_remount (struct super_block * sb, int * flags, char * data)
3380 return 0;
3381 }
3382
3383 -static bool minix_check_superblock(struct minix_sb_info *sbi)
3384 +static bool minix_check_superblock(struct super_block *sb)
3385 {
3386 + struct minix_sb_info *sbi = minix_sb(sb);
3387 +
3388 if (sbi->s_imap_blocks == 0 || sbi->s_zmap_blocks == 0)
3389 return false;
3390
3391 @@ -161,7 +163,7 @@ static bool minix_check_superblock(struct minix_sb_info *sbi)
3392 * of indirect blocks which places the limit well above U32_MAX.
3393 */
3394 if (sbi->s_version == MINIX_V1 &&
3395 - sbi->s_max_size > (7 + 512 + 512*512) * BLOCK_SIZE)
3396 + sb->s_maxbytes > (7 + 512 + 512*512) * BLOCK_SIZE)
3397 return false;
3398
3399 return true;
3400 @@ -202,7 +204,7 @@ static int minix_fill_super(struct super_block *s, void *data, int silent)
3401 sbi->s_zmap_blocks = ms->s_zmap_blocks;
3402 sbi->s_firstdatazone = ms->s_firstdatazone;
3403 sbi->s_log_zone_size = ms->s_log_zone_size;
3404 - sbi->s_max_size = ms->s_max_size;
3405 + s->s_maxbytes = ms->s_max_size;
3406 s->s_magic = ms->s_magic;
3407 if (s->s_magic == MINIX_SUPER_MAGIC) {
3408 sbi->s_version = MINIX_V1;
3409 @@ -233,7 +235,7 @@ static int minix_fill_super(struct super_block *s, void *data, int silent)
3410 sbi->s_zmap_blocks = m3s->s_zmap_blocks;
3411 sbi->s_firstdatazone = m3s->s_firstdatazone;
3412 sbi->s_log_zone_size = m3s->s_log_zone_size;
3413 - sbi->s_max_size = m3s->s_max_size;
3414 + s->s_maxbytes = m3s->s_max_size;
3415 sbi->s_ninodes = m3s->s_ninodes;
3416 sbi->s_nzones = m3s->s_zones;
3417 sbi->s_dirsize = 64;
3418 @@ -245,7 +247,7 @@ static int minix_fill_super(struct super_block *s, void *data, int silent)
3419 } else
3420 goto out_no_fs;
3421
3422 - if (!minix_check_superblock(sbi))
3423 + if (!minix_check_superblock(s))
3424 goto out_illegal_sb;
3425
3426 /*
3427 diff --git a/fs/minix/itree_v1.c b/fs/minix/itree_v1.c
3428 index 046cc96ee7adb..1fed906042aa8 100644
3429 --- a/fs/minix/itree_v1.c
3430 +++ b/fs/minix/itree_v1.c
3431 @@ -29,12 +29,12 @@ static int block_to_path(struct inode * inode, long block, int offsets[DEPTH])
3432 if (block < 0) {
3433 printk("MINIX-fs: block_to_path: block %ld < 0 on dev %pg\n",
3434 block, inode->i_sb->s_bdev);
3435 - } else if (block >= (minix_sb(inode->i_sb)->s_max_size/BLOCK_SIZE)) {
3436 - if (printk_ratelimit())
3437 - printk("MINIX-fs: block_to_path: "
3438 - "block %ld too big on dev %pg\n",
3439 - block, inode->i_sb->s_bdev);
3440 - } else if (block < 7) {
3441 + return 0;
3442 + }
3443 + if ((u64)block * BLOCK_SIZE >= inode->i_sb->s_maxbytes)
3444 + return 0;
3445 +
3446 + if (block < 7) {
3447 offsets[n++] = block;
3448 } else if ((block -= 7) < 512) {
3449 offsets[n++] = 7;
3450 diff --git a/fs/minix/itree_v2.c b/fs/minix/itree_v2.c
3451 index f7fc7eccccccd..9d00f31a2d9d1 100644
3452 --- a/fs/minix/itree_v2.c
3453 +++ b/fs/minix/itree_v2.c
3454 @@ -32,13 +32,12 @@ static int block_to_path(struct inode * inode, long block, int offsets[DEPTH])
3455 if (block < 0) {
3456 printk("MINIX-fs: block_to_path: block %ld < 0 on dev %pg\n",
3457 block, sb->s_bdev);
3458 - } else if ((u64)block * (u64)sb->s_blocksize >=
3459 - minix_sb(sb)->s_max_size) {
3460 - if (printk_ratelimit())
3461 - printk("MINIX-fs: block_to_path: "
3462 - "block %ld too big on dev %pg\n",
3463 - block, sb->s_bdev);
3464 - } else if (block < DIRCOUNT) {
3465 + return 0;
3466 + }
3467 + if ((u64)block * (u64)sb->s_blocksize >= sb->s_maxbytes)
3468 + return 0;
3469 +
3470 + if (block < DIRCOUNT) {
3471 offsets[n++] = block;
3472 } else if ((block -= DIRCOUNT) < INDIRCOUNT(sb)) {
3473 offsets[n++] = DIRCOUNT;
3474 diff --git a/fs/minix/minix.h b/fs/minix/minix.h
3475 index df081e8afcc3c..168d45d3de73e 100644
3476 --- a/fs/minix/minix.h
3477 +++ b/fs/minix/minix.h
3478 @@ -32,7 +32,6 @@ struct minix_sb_info {
3479 unsigned long s_zmap_blocks;
3480 unsigned long s_firstdatazone;
3481 unsigned long s_log_zone_size;
3482 - unsigned long s_max_size;
3483 int s_dirsize;
3484 int s_namelen;
3485 struct buffer_head ** s_imap;
3486 diff --git a/fs/nfs/file.c b/fs/nfs/file.c
3487 index 95dc90570786c..387a2cfa7e172 100644
3488 --- a/fs/nfs/file.c
3489 +++ b/fs/nfs/file.c
3490 @@ -140,6 +140,7 @@ static int
3491 nfs_file_flush(struct file *file, fl_owner_t id)
3492 {
3493 struct inode *inode = file_inode(file);
3494 + errseq_t since;
3495
3496 dprintk("NFS: flush(%pD2)\n", file);
3497
3498 @@ -148,7 +149,9 @@ nfs_file_flush(struct file *file, fl_owner_t id)
3499 return 0;
3500
3501 /* Flush writes to the server and return any errors */
3502 - return nfs_wb_all(inode);
3503 + since = filemap_sample_wb_err(file->f_mapping);
3504 + nfs_wb_all(inode);
3505 + return filemap_check_wb_err(file->f_mapping, since);
3506 }
3507
3508 ssize_t
3509 @@ -580,12 +583,14 @@ static const struct vm_operations_struct nfs_file_vm_ops = {
3510 .page_mkwrite = nfs_vm_page_mkwrite,
3511 };
3512
3513 -static int nfs_need_check_write(struct file *filp, struct inode *inode)
3514 +static int nfs_need_check_write(struct file *filp, struct inode *inode,
3515 + int error)
3516 {
3517 struct nfs_open_context *ctx;
3518
3519 ctx = nfs_file_open_context(filp);
3520 - if (nfs_ctx_key_to_expire(ctx, inode))
3521 + if (nfs_error_is_fatal_on_server(error) ||
3522 + nfs_ctx_key_to_expire(ctx, inode))
3523 return 1;
3524 return 0;
3525 }
3526 @@ -596,6 +601,8 @@ ssize_t nfs_file_write(struct kiocb *iocb, struct iov_iter *from)
3527 struct inode *inode = file_inode(file);
3528 unsigned long written = 0;
3529 ssize_t result;
3530 + errseq_t since;
3531 + int error;
3532
3533 result = nfs_key_timeout_notify(file, inode);
3534 if (result)
3535 @@ -620,6 +627,7 @@ ssize_t nfs_file_write(struct kiocb *iocb, struct iov_iter *from)
3536 if (iocb->ki_pos > i_size_read(inode))
3537 nfs_revalidate_mapping(inode, file->f_mapping);
3538
3539 + since = filemap_sample_wb_err(file->f_mapping);
3540 nfs_start_io_write(inode);
3541 result = generic_write_checks(iocb, from);
3542 if (result > 0) {
3543 @@ -638,7 +646,8 @@ ssize_t nfs_file_write(struct kiocb *iocb, struct iov_iter *from)
3544 goto out;
3545
3546 /* Return error values */
3547 - if (nfs_need_check_write(file, inode)) {
3548 + error = filemap_check_wb_err(file->f_mapping, since);
3549 + if (nfs_need_check_write(file, inode, error)) {
3550 int err = nfs_wb_all(inode);
3551 if (err < 0)
3552 result = err;
3553 diff --git a/fs/nfs/nfs4file.c b/fs/nfs/nfs4file.c
3554 index fb55c04cdc6bd..534b6fd70ffdb 100644
3555 --- a/fs/nfs/nfs4file.c
3556 +++ b/fs/nfs/nfs4file.c
3557 @@ -109,6 +109,7 @@ static int
3558 nfs4_file_flush(struct file *file, fl_owner_t id)
3559 {
3560 struct inode *inode = file_inode(file);
3561 + errseq_t since;
3562
3563 dprintk("NFS: flush(%pD2)\n", file);
3564
3565 @@ -124,7 +125,9 @@ nfs4_file_flush(struct file *file, fl_owner_t id)
3566 return filemap_fdatawrite(file->f_mapping);
3567
3568 /* Flush writes to the server and return any errors */
3569 - return nfs_wb_all(inode);
3570 + since = filemap_sample_wb_err(file->f_mapping);
3571 + nfs_wb_all(inode);
3572 + return filemap_check_wb_err(file->f_mapping, since);
3573 }
3574
3575 #ifdef CONFIG_NFS_V4_2
3576 diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c
3577 index 1a1bd2fe6e98d..d0cb827b72cfa 100644
3578 --- a/fs/nfs/nfs4proc.c
3579 +++ b/fs/nfs/nfs4proc.c
3580 @@ -5811,8 +5811,6 @@ static int _nfs4_get_security_label(struct inode *inode, void *buf,
3581 return ret;
3582 if (!(fattr.valid & NFS_ATTR_FATTR_V4_SECURITY_LABEL))
3583 return -ENOENT;
3584 - if (buflen < label.len)
3585 - return -ERANGE;
3586 return 0;
3587 }
3588
3589 diff --git a/fs/nfs/nfs4xdr.c b/fs/nfs/nfs4xdr.c
3590 index 7c0ff1a3b5914..677751bc3a334 100644
3591 --- a/fs/nfs/nfs4xdr.c
3592 +++ b/fs/nfs/nfs4xdr.c
3593 @@ -4169,7 +4169,11 @@ static int decode_attr_security_label(struct xdr_stream *xdr, uint32_t *bitmap,
3594 return -EIO;
3595 if (len < NFS4_MAXLABELLEN) {
3596 if (label) {
3597 - memcpy(label->label, p, len);
3598 + if (label->len) {
3599 + if (label->len < len)
3600 + return -ERANGE;
3601 + memcpy(label->label, p, len);
3602 + }
3603 label->len = len;
3604 label->pi = pi;
3605 label->lfs = lfs;
3606 diff --git a/fs/ocfs2/ocfs2.h b/fs/ocfs2/ocfs2.h
3607 index 9461bd3e1c0c8..0a8cd8e59a92c 100644
3608 --- a/fs/ocfs2/ocfs2.h
3609 +++ b/fs/ocfs2/ocfs2.h
3610 @@ -326,8 +326,8 @@ struct ocfs2_super
3611 spinlock_t osb_lock;
3612 u32 s_next_generation;
3613 unsigned long osb_flags;
3614 - s16 s_inode_steal_slot;
3615 - s16 s_meta_steal_slot;
3616 + u16 s_inode_steal_slot;
3617 + u16 s_meta_steal_slot;
3618 atomic_t s_num_inodes_stolen;
3619 atomic_t s_num_meta_stolen;
3620
3621 diff --git a/fs/ocfs2/suballoc.c b/fs/ocfs2/suballoc.c
3622 index 503e724d39f53..5e0eaea474055 100644
3623 --- a/fs/ocfs2/suballoc.c
3624 +++ b/fs/ocfs2/suballoc.c
3625 @@ -879,9 +879,9 @@ static void __ocfs2_set_steal_slot(struct ocfs2_super *osb, int slot, int type)
3626 {
3627 spin_lock(&osb->osb_lock);
3628 if (type == INODE_ALLOC_SYSTEM_INODE)
3629 - osb->s_inode_steal_slot = slot;
3630 + osb->s_inode_steal_slot = (u16)slot;
3631 else if (type == EXTENT_ALLOC_SYSTEM_INODE)
3632 - osb->s_meta_steal_slot = slot;
3633 + osb->s_meta_steal_slot = (u16)slot;
3634 spin_unlock(&osb->osb_lock);
3635 }
3636
3637 diff --git a/fs/ocfs2/super.c b/fs/ocfs2/super.c
3638 index c81e86c623807..70d8857b161df 100644
3639 --- a/fs/ocfs2/super.c
3640 +++ b/fs/ocfs2/super.c
3641 @@ -78,7 +78,7 @@ struct mount_options
3642 unsigned long commit_interval;
3643 unsigned long mount_opt;
3644 unsigned int atime_quantum;
3645 - signed short slot;
3646 + unsigned short slot;
3647 int localalloc_opt;
3648 unsigned int resv_level;
3649 int dir_resv_level;
3650 @@ -1334,7 +1334,7 @@ static int ocfs2_parse_options(struct super_block *sb,
3651 goto bail;
3652 }
3653 if (option)
3654 - mopt->slot = (s16)option;
3655 + mopt->slot = (u16)option;
3656 break;
3657 case Opt_commit:
3658 if (match_int(&args[0], &option)) {
3659 diff --git a/fs/orangefs/file.c b/fs/orangefs/file.c
3660 index a5612abc09363..bcd4fd5ad1751 100644
3661 --- a/fs/orangefs/file.c
3662 +++ b/fs/orangefs/file.c
3663 @@ -311,23 +311,8 @@ static ssize_t orangefs_file_read_iter(struct kiocb *iocb,
3664 struct iov_iter *iter)
3665 {
3666 int ret;
3667 - struct orangefs_read_options *ro;
3668 -
3669 orangefs_stats.reads++;
3670
3671 - /*
3672 - * Remember how they set "count" in read(2) or pread(2) or whatever -
3673 - * users can use count as a knob to control orangefs io size and later
3674 - * we can try to help them fill as many pages as possible in readpage.
3675 - */
3676 - if (!iocb->ki_filp->private_data) {
3677 - iocb->ki_filp->private_data = kmalloc(sizeof *ro, GFP_KERNEL);
3678 - if (!iocb->ki_filp->private_data)
3679 - return(ENOMEM);
3680 - ro = iocb->ki_filp->private_data;
3681 - ro->blksiz = iter->count;
3682 - }
3683 -
3684 down_read(&file_inode(iocb->ki_filp)->i_rwsem);
3685 ret = orangefs_revalidate_mapping(file_inode(iocb->ki_filp));
3686 if (ret)
3687 @@ -615,12 +600,6 @@ static int orangefs_lock(struct file *filp, int cmd, struct file_lock *fl)
3688 return rc;
3689 }
3690
3691 -static int orangefs_file_open(struct inode * inode, struct file *file)
3692 -{
3693 - file->private_data = NULL;
3694 - return generic_file_open(inode, file);
3695 -}
3696 -
3697 static int orangefs_flush(struct file *file, fl_owner_t id)
3698 {
3699 /*
3700 @@ -634,9 +613,6 @@ static int orangefs_flush(struct file *file, fl_owner_t id)
3701 struct inode *inode = file->f_mapping->host;
3702 int r;
3703
3704 - kfree(file->private_data);
3705 - file->private_data = NULL;
3706 -
3707 if (inode->i_state & I_DIRTY_TIME) {
3708 spin_lock(&inode->i_lock);
3709 inode->i_state &= ~I_DIRTY_TIME;
3710 @@ -659,7 +635,7 @@ const struct file_operations orangefs_file_operations = {
3711 .lock = orangefs_lock,
3712 .unlocked_ioctl = orangefs_ioctl,
3713 .mmap = orangefs_file_mmap,
3714 - .open = orangefs_file_open,
3715 + .open = generic_file_open,
3716 .flush = orangefs_flush,
3717 .release = orangefs_file_release,
3718 .fsync = orangefs_fsync,
3719 diff --git a/fs/orangefs/inode.c b/fs/orangefs/inode.c
3720 index efb12197da181..636892ffec0ba 100644
3721 --- a/fs/orangefs/inode.c
3722 +++ b/fs/orangefs/inode.c
3723 @@ -259,46 +259,19 @@ static int orangefs_readpage(struct file *file, struct page *page)
3724 pgoff_t index; /* which page */
3725 struct page *next_page;
3726 char *kaddr;
3727 - struct orangefs_read_options *ro = file->private_data;
3728 loff_t read_size;
3729 - loff_t roundedup;
3730 int buffer_index = -1; /* orangefs shared memory slot */
3731 int slot_index; /* index into slot */
3732 int remaining;
3733
3734 /*
3735 - * If they set some miniscule size for "count" in read(2)
3736 - * (for example) then let's try to read a page, or the whole file
3737 - * if it is smaller than a page. Once "count" goes over a page
3738 - * then lets round up to the highest page size multiple that is
3739 - * less than or equal to "count" and do that much orangefs IO and
3740 - * try to fill as many pages as we can from it.
3741 - *
3742 - * "count" should be represented in ro->blksiz.
3743 - *
3744 - * inode->i_size = file size.
3745 + * Get up to this many bytes from Orangefs at a time and try
3746 + * to fill them into the page cache at once. Tests with dd made
3747 + * this seem like a reasonable static number, if there was
3748 + * interest perhaps this number could be made setable through
3749 + * sysfs...
3750 */
3751 - if (ro) {
3752 - if (ro->blksiz < PAGE_SIZE) {
3753 - if (inode->i_size < PAGE_SIZE)
3754 - read_size = inode->i_size;
3755 - else
3756 - read_size = PAGE_SIZE;
3757 - } else {
3758 - roundedup = ((PAGE_SIZE - 1) & ro->blksiz) ?
3759 - ((ro->blksiz + PAGE_SIZE) & ~(PAGE_SIZE -1)) :
3760 - ro->blksiz;
3761 - if (roundedup > inode->i_size)
3762 - read_size = inode->i_size;
3763 - else
3764 - read_size = roundedup;
3765 -
3766 - }
3767 - } else {
3768 - read_size = PAGE_SIZE;
3769 - }
3770 - if (!read_size)
3771 - read_size = PAGE_SIZE;
3772 + read_size = 524288;
3773
3774 if (PageDirty(page))
3775 orangefs_launder_page(page);
3776 diff --git a/fs/orangefs/orangefs-kernel.h b/fs/orangefs/orangefs-kernel.h
3777 index 34a6c99fa29bd..3003007681a05 100644
3778 --- a/fs/orangefs/orangefs-kernel.h
3779 +++ b/fs/orangefs/orangefs-kernel.h
3780 @@ -239,10 +239,6 @@ struct orangefs_write_range {
3781 kgid_t gid;
3782 };
3783
3784 -struct orangefs_read_options {
3785 - ssize_t blksiz;
3786 -};
3787 -
3788 extern struct orangefs_stats orangefs_stats;
3789
3790 /*
3791 diff --git a/fs/ubifs/journal.c b/fs/ubifs/journal.c
3792 index 826dad0243dcc..a6ae2428e4c96 100644
3793 --- a/fs/ubifs/journal.c
3794 +++ b/fs/ubifs/journal.c
3795 @@ -539,7 +539,7 @@ int ubifs_jnl_update(struct ubifs_info *c, const struct inode *dir,
3796 const struct fscrypt_name *nm, const struct inode *inode,
3797 int deletion, int xent)
3798 {
3799 - int err, dlen, ilen, len, lnum, ino_offs, dent_offs;
3800 + int err, dlen, ilen, len, lnum, ino_offs, dent_offs, orphan_added = 0;
3801 int aligned_dlen, aligned_ilen, sync = IS_DIRSYNC(dir);
3802 int last_reference = !!(deletion && inode->i_nlink == 0);
3803 struct ubifs_inode *ui = ubifs_inode(inode);
3804 @@ -630,6 +630,7 @@ int ubifs_jnl_update(struct ubifs_info *c, const struct inode *dir,
3805 goto out_finish;
3806 }
3807 ui->del_cmtno = c->cmt_no;
3808 + orphan_added = 1;
3809 }
3810
3811 err = write_head(c, BASEHD, dent, len, &lnum, &dent_offs, sync);
3812 @@ -702,7 +703,7 @@ out_release:
3813 kfree(dent);
3814 out_ro:
3815 ubifs_ro_mode(c, err);
3816 - if (last_reference)
3817 + if (orphan_added)
3818 ubifs_delete_orphan(c, inode->i_ino);
3819 finish_reservation(c);
3820 return err;
3821 @@ -1217,7 +1218,7 @@ int ubifs_jnl_rename(struct ubifs_info *c, const struct inode *old_dir,
3822 void *p;
3823 union ubifs_key key;
3824 struct ubifs_dent_node *dent, *dent2;
3825 - int err, dlen1, dlen2, ilen, lnum, offs, len;
3826 + int err, dlen1, dlen2, ilen, lnum, offs, len, orphan_added = 0;
3827 int aligned_dlen1, aligned_dlen2, plen = UBIFS_INO_NODE_SZ;
3828 int last_reference = !!(new_inode && new_inode->i_nlink == 0);
3829 int move = (old_dir != new_dir);
3830 @@ -1333,6 +1334,7 @@ int ubifs_jnl_rename(struct ubifs_info *c, const struct inode *old_dir,
3831 goto out_finish;
3832 }
3833 new_ui->del_cmtno = c->cmt_no;
3834 + orphan_added = 1;
3835 }
3836
3837 err = write_head(c, BASEHD, dent, len, &lnum, &offs, sync);
3838 @@ -1414,7 +1416,7 @@ out_release:
3839 release_head(c, BASEHD);
3840 out_ro:
3841 ubifs_ro_mode(c, err);
3842 - if (last_reference)
3843 + if (orphan_added)
3844 ubifs_delete_orphan(c, new_inode->i_ino);
3845 out_finish:
3846 finish_reservation(c);
3847 diff --git a/fs/ufs/super.c b/fs/ufs/super.c
3848 index 1da0be667409b..e3b69fb280e8c 100644
3849 --- a/fs/ufs/super.c
3850 +++ b/fs/ufs/super.c
3851 @@ -101,7 +101,7 @@ static struct inode *ufs_nfs_get_inode(struct super_block *sb, u64 ino, u32 gene
3852 struct ufs_sb_private_info *uspi = UFS_SB(sb)->s_uspi;
3853 struct inode *inode;
3854
3855 - if (ino < UFS_ROOTINO || ino > uspi->s_ncg * uspi->s_ipg)
3856 + if (ino < UFS_ROOTINO || ino > (u64)uspi->s_ncg * uspi->s_ipg)
3857 return ERR_PTR(-ESTALE);
3858
3859 inode = ufs_iget(sb, ino);
3860 diff --git a/include/crypto/if_alg.h b/include/crypto/if_alg.h
3861 index 864849e942c45..c1a8d4a41bb16 100644
3862 --- a/include/crypto/if_alg.h
3863 +++ b/include/crypto/if_alg.h
3864 @@ -135,6 +135,7 @@ struct af_alg_async_req {
3865 * SG?
3866 * @enc: Cryptographic operation to be performed when
3867 * recvmsg is invoked.
3868 + * @init: True if metadata has been sent.
3869 * @len: Length of memory allocated for this data structure.
3870 */
3871 struct af_alg_ctx {
3872 @@ -151,6 +152,7 @@ struct af_alg_ctx {
3873 bool more;
3874 bool merge;
3875 bool enc;
3876 + bool init;
3877
3878 unsigned int len;
3879 };
3880 @@ -226,7 +228,7 @@ unsigned int af_alg_count_tsgl(struct sock *sk, size_t bytes, size_t offset);
3881 void af_alg_pull_tsgl(struct sock *sk, size_t used, struct scatterlist *dst,
3882 size_t dst_offset);
3883 void af_alg_wmem_wakeup(struct sock *sk);
3884 -int af_alg_wait_for_data(struct sock *sk, unsigned flags);
3885 +int af_alg_wait_for_data(struct sock *sk, unsigned flags, unsigned min);
3886 int af_alg_sendmsg(struct socket *sock, struct msghdr *msg, size_t size,
3887 unsigned int ivsize);
3888 ssize_t af_alg_sendpage(struct socket *sock, struct page *page,
3889 diff --git a/include/linux/intel-iommu.h b/include/linux/intel-iommu.h
3890 index 1e5dad8b8e59b..ed870da78326b 100644
3891 --- a/include/linux/intel-iommu.h
3892 +++ b/include/linux/intel-iommu.h
3893 @@ -359,8 +359,8 @@ enum {
3894
3895 #define QI_DEV_EIOTLB_ADDR(a) ((u64)(a) & VTD_PAGE_MASK)
3896 #define QI_DEV_EIOTLB_SIZE (((u64)1) << 11)
3897 -#define QI_DEV_EIOTLB_GLOB(g) ((u64)g)
3898 -#define QI_DEV_EIOTLB_PASID(p) (((u64)p) << 32)
3899 +#define QI_DEV_EIOTLB_GLOB(g) ((u64)(g) & 0x1)
3900 +#define QI_DEV_EIOTLB_PASID(p) ((u64)((p) & 0xfffff) << 32)
3901 #define QI_DEV_EIOTLB_SID(sid) ((u64)((sid) & 0xffff) << 16)
3902 #define QI_DEV_EIOTLB_QDEP(qd) ((u64)((qd) & 0x1f) << 4)
3903 #define QI_DEV_EIOTLB_PFSID(pfsid) (((u64)(pfsid & 0xf) << 12) | \
3904 diff --git a/include/linux/irq.h b/include/linux/irq.h
3905 index f8755e5fcd742..e9e69c511ea92 100644
3906 --- a/include/linux/irq.h
3907 +++ b/include/linux/irq.h
3908 @@ -211,6 +211,8 @@ struct irq_data {
3909 * IRQD_CAN_RESERVE - Can use reservation mode
3910 * IRQD_MSI_NOMASK_QUIRK - Non-maskable MSI quirk for affinity change
3911 * required
3912 + * IRQD_AFFINITY_ON_ACTIVATE - Affinity is set on activation. Don't call
3913 + * irq_chip::irq_set_affinity() when deactivated.
3914 */
3915 enum {
3916 IRQD_TRIGGER_MASK = 0xf,
3917 @@ -234,6 +236,7 @@ enum {
3918 IRQD_DEFAULT_TRIGGER_SET = (1 << 25),
3919 IRQD_CAN_RESERVE = (1 << 26),
3920 IRQD_MSI_NOMASK_QUIRK = (1 << 27),
3921 + IRQD_AFFINITY_ON_ACTIVATE = (1 << 29),
3922 };
3923
3924 #define __irqd_to_state(d) ACCESS_PRIVATE((d)->common, state_use_accessors)
3925 @@ -408,6 +411,16 @@ static inline bool irqd_msi_nomask_quirk(struct irq_data *d)
3926 return __irqd_to_state(d) & IRQD_MSI_NOMASK_QUIRK;
3927 }
3928
3929 +static inline void irqd_set_affinity_on_activate(struct irq_data *d)
3930 +{
3931 + __irqd_to_state(d) |= IRQD_AFFINITY_ON_ACTIVATE;
3932 +}
3933 +
3934 +static inline bool irqd_affinity_on_activate(struct irq_data *d)
3935 +{
3936 + return __irqd_to_state(d) & IRQD_AFFINITY_ON_ACTIVATE;
3937 +}
3938 +
3939 #undef __irqd_to_state
3940
3941 static inline irq_hw_number_t irqd_to_hwirq(struct irq_data *d)
3942 diff --git a/include/net/sock.h b/include/net/sock.h
3943 index 8263bbf756a22..6d9c1131fe5c8 100644
3944 --- a/include/net/sock.h
3945 +++ b/include/net/sock.h
3946 @@ -849,6 +849,8 @@ static inline int sk_memalloc_socks(void)
3947 {
3948 return static_branch_unlikely(&memalloc_socks_key);
3949 }
3950 +
3951 +void __receive_sock(struct file *file);
3952 #else
3953
3954 static inline int sk_memalloc_socks(void)
3955 @@ -856,6 +858,8 @@ static inline int sk_memalloc_socks(void)
3956 return 0;
3957 }
3958
3959 +static inline void __receive_sock(struct file *file)
3960 +{ }
3961 #endif
3962
3963 static inline gfp_t sk_gfp_mask(const struct sock *sk, gfp_t gfp_mask)
3964 diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c
3965 index df73685de1144..3b1d0a4725a49 100644
3966 --- a/kernel/irq/manage.c
3967 +++ b/kernel/irq/manage.c
3968 @@ -281,12 +281,16 @@ static bool irq_set_affinity_deactivated(struct irq_data *data,
3969 struct irq_desc *desc = irq_data_to_desc(data);
3970
3971 /*
3972 + * Handle irq chips which can handle affinity only in activated
3973 + * state correctly
3974 + *
3975 * If the interrupt is not yet activated, just store the affinity
3976 * mask and do not call the chip driver at all. On activation the
3977 * driver has to make sure anyway that the interrupt is in a
3978 * useable state so startup works.
3979 */
3980 - if (!IS_ENABLED(CONFIG_IRQ_DOMAIN_HIERARCHY) || irqd_is_activated(data))
3981 + if (!IS_ENABLED(CONFIG_IRQ_DOMAIN_HIERARCHY) ||
3982 + irqd_is_activated(data) || !irqd_affinity_on_activate(data))
3983 return false;
3984
3985 cpumask_copy(desc->irq_common_data.affinity, mask);
3986 diff --git a/kernel/irq/pm.c b/kernel/irq/pm.c
3987 index 8f557fa1f4fe4..c6c7e187ae748 100644
3988 --- a/kernel/irq/pm.c
3989 +++ b/kernel/irq/pm.c
3990 @@ -185,14 +185,18 @@ void rearm_wake_irq(unsigned int irq)
3991 unsigned long flags;
3992 struct irq_desc *desc = irq_get_desc_buslock(irq, &flags, IRQ_GET_DESC_CHECK_GLOBAL);
3993
3994 - if (!desc || !(desc->istate & IRQS_SUSPENDED) ||
3995 - !irqd_is_wakeup_set(&desc->irq_data))
3996 + if (!desc)
3997 return;
3998
3999 + if (!(desc->istate & IRQS_SUSPENDED) ||
4000 + !irqd_is_wakeup_set(&desc->irq_data))
4001 + goto unlock;
4002 +
4003 desc->istate &= ~IRQS_SUSPENDED;
4004 irqd_set(&desc->irq_data, IRQD_WAKEUP_ARMED);
4005 __enable_irq(desc);
4006
4007 +unlock:
4008 irq_put_desc_busunlock(desc, flags);
4009 }
4010
4011 diff --git a/kernel/kprobes.c b/kernel/kprobes.c
4012 index 0a967db226d8a..bbff4bccb885d 100644
4013 --- a/kernel/kprobes.c
4014 +++ b/kernel/kprobes.c
4015 @@ -2104,6 +2104,13 @@ static void kill_kprobe(struct kprobe *p)
4016 * the original probed function (which will be freed soon) any more.
4017 */
4018 arch_remove_kprobe(p);
4019 +
4020 + /*
4021 + * The module is going away. We should disarm the kprobe which
4022 + * is using ftrace.
4023 + */
4024 + if (kprobe_ftrace(p))
4025 + disarm_kprobe_ftrace(p);
4026 }
4027
4028 /* Disable one kprobe */
4029 diff --git a/kernel/module.c b/kernel/module.c
4030 index 6baa1080cdb76..819c5d3b4c295 100644
4031 --- a/kernel/module.c
4032 +++ b/kernel/module.c
4033 @@ -1517,18 +1517,34 @@ struct module_sect_attrs {
4034 struct module_sect_attr attrs[0];
4035 };
4036
4037 +#define MODULE_SECT_READ_SIZE (3 /* "0x", "\n" */ + (BITS_PER_LONG / 4))
4038 static ssize_t module_sect_read(struct file *file, struct kobject *kobj,
4039 struct bin_attribute *battr,
4040 char *buf, loff_t pos, size_t count)
4041 {
4042 struct module_sect_attr *sattr =
4043 container_of(battr, struct module_sect_attr, battr);
4044 + char bounce[MODULE_SECT_READ_SIZE + 1];
4045 + size_t wrote;
4046
4047 if (pos != 0)
4048 return -EINVAL;
4049
4050 - return sprintf(buf, "0x%px\n",
4051 - kallsyms_show_value(file->f_cred) ? (void *)sattr->address : NULL);
4052 + /*
4053 + * Since we're a binary read handler, we must account for the
4054 + * trailing NUL byte that sprintf will write: if "buf" is
4055 + * too small to hold the NUL, or the NUL is exactly the last
4056 + * byte, the read will look like it got truncated by one byte.
4057 + * Since there is no way to ask sprintf nicely to not write
4058 + * the NUL, we have to use a bounce buffer.
4059 + */
4060 + wrote = scnprintf(bounce, sizeof(bounce), "0x%px\n",
4061 + kallsyms_show_value(file->f_cred)
4062 + ? (void *)sattr->address : NULL);
4063 + count = min(count, wrote);
4064 + memcpy(buf, bounce, count);
4065 +
4066 + return count;
4067 }
4068
4069 static void free_sect_attrs(struct module_sect_attrs *sect_attrs)
4070 @@ -1577,7 +1593,7 @@ static void add_sect_attrs(struct module *mod, const struct load_info *info)
4071 goto out;
4072 sect_attrs->nsections++;
4073 sattr->battr.read = module_sect_read;
4074 - sattr->battr.size = 3 /* "0x", "\n" */ + (BITS_PER_LONG / 4);
4075 + sattr->battr.size = MODULE_SECT_READ_SIZE;
4076 sattr->battr.attr.mode = 0400;
4077 *(gattr++) = &(sattr++)->battr;
4078 }
4079 diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
4080 index 15160d707da45..705852c1724aa 100644
4081 --- a/kernel/trace/ftrace.c
4082 +++ b/kernel/trace/ftrace.c
4083 @@ -5699,8 +5699,11 @@ static int referenced_filters(struct dyn_ftrace *rec)
4084 int cnt = 0;
4085
4086 for (ops = ftrace_ops_list; ops != &ftrace_list_end; ops = ops->next) {
4087 - if (ops_references_rec(ops, rec))
4088 - cnt++;
4089 + if (ops_references_rec(ops, rec)) {
4090 + cnt++;
4091 + if (ops->flags & FTRACE_OPS_FL_SAVE_REGS)
4092 + rec->flags |= FTRACE_FL_REGS;
4093 + }
4094 }
4095
4096 return cnt;
4097 @@ -5877,8 +5880,8 @@ void ftrace_module_enable(struct module *mod)
4098 if (ftrace_start_up)
4099 cnt += referenced_filters(rec);
4100
4101 - /* This clears FTRACE_FL_DISABLED */
4102 - rec->flags = cnt;
4103 + rec->flags &= ~FTRACE_FL_DISABLED;
4104 + rec->flags += cnt;
4105
4106 if (ftrace_start_up && cnt) {
4107 int failed = __ftrace_replace_code(rec, 1);
4108 @@ -6459,12 +6462,12 @@ void ftrace_pid_follow_fork(struct trace_array *tr, bool enable)
4109 if (enable) {
4110 register_trace_sched_process_fork(ftrace_pid_follow_sched_process_fork,
4111 tr);
4112 - register_trace_sched_process_exit(ftrace_pid_follow_sched_process_exit,
4113 + register_trace_sched_process_free(ftrace_pid_follow_sched_process_exit,
4114 tr);
4115 } else {
4116 unregister_trace_sched_process_fork(ftrace_pid_follow_sched_process_fork,
4117 tr);
4118 - unregister_trace_sched_process_exit(ftrace_pid_follow_sched_process_exit,
4119 + unregister_trace_sched_process_free(ftrace_pid_follow_sched_process_exit,
4120 tr);
4121 }
4122 }
4123 diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
4124 index 721947b9962db..f9c2bdbbd8936 100644
4125 --- a/kernel/trace/trace.c
4126 +++ b/kernel/trace/trace.c
4127 @@ -5686,7 +5686,7 @@ static int tracing_set_tracer(struct trace_array *tr, const char *buf)
4128 }
4129
4130 /* If trace pipe files are being read, we can't change the tracer */
4131 - if (tr->current_trace->ref) {
4132 + if (tr->trace_ref) {
4133 ret = -EBUSY;
4134 goto out;
4135 }
4136 @@ -5902,7 +5902,7 @@ static int tracing_open_pipe(struct inode *inode, struct file *filp)
4137
4138 nonseekable_open(inode, filp);
4139
4140 - tr->current_trace->ref++;
4141 + tr->trace_ref++;
4142 out:
4143 mutex_unlock(&trace_types_lock);
4144 return ret;
4145 @@ -5921,7 +5921,7 @@ static int tracing_release_pipe(struct inode *inode, struct file *file)
4146
4147 mutex_lock(&trace_types_lock);
4148
4149 - tr->current_trace->ref--;
4150 + tr->trace_ref--;
4151
4152 if (iter->trace->pipe_close)
4153 iter->trace->pipe_close(iter);
4154 @@ -7230,7 +7230,7 @@ static int tracing_buffers_open(struct inode *inode, struct file *filp)
4155
4156 filp->private_data = info;
4157
4158 - tr->current_trace->ref++;
4159 + tr->trace_ref++;
4160
4161 mutex_unlock(&trace_types_lock);
4162
4163 @@ -7331,7 +7331,7 @@ static int tracing_buffers_release(struct inode *inode, struct file *file)
4164
4165 mutex_lock(&trace_types_lock);
4166
4167 - iter->tr->current_trace->ref--;
4168 + iter->tr->trace_ref--;
4169
4170 __trace_array_put(iter->tr);
4171
4172 @@ -8470,7 +8470,7 @@ static int __remove_instance(struct trace_array *tr)
4173 {
4174 int i;
4175
4176 - if (tr->ref || (tr->current_trace && tr->current_trace->ref))
4177 + if (tr->ref || (tr->current_trace && tr->trace_ref))
4178 return -EBUSY;
4179
4180 list_del(&tr->list);
4181 diff --git a/kernel/trace/trace.h b/kernel/trace/trace.h
4182 index a3c29d5fcc616..4055158c1dd25 100644
4183 --- a/kernel/trace/trace.h
4184 +++ b/kernel/trace/trace.h
4185 @@ -309,6 +309,7 @@ struct trace_array {
4186 struct trace_event_file *trace_marker_file;
4187 cpumask_var_t tracing_cpumask; /* only trace on set CPUs */
4188 int ref;
4189 + int trace_ref;
4190 #ifdef CONFIG_FUNCTION_TRACER
4191 struct ftrace_ops *ops;
4192 struct trace_pid_list __rcu *function_pids;
4193 @@ -498,7 +499,6 @@ struct tracer {
4194 struct tracer *next;
4195 struct tracer_flags *flags;
4196 int enabled;
4197 - int ref;
4198 bool print_max;
4199 bool allow_instances;
4200 #ifdef CONFIG_TRACER_MAX_TRACE
4201 diff --git a/kernel/trace/trace_events.c b/kernel/trace/trace_events.c
4202 index 995061bb2deca..ed9eb97b64b47 100644
4203 --- a/kernel/trace/trace_events.c
4204 +++ b/kernel/trace/trace_events.c
4205 @@ -527,12 +527,12 @@ void trace_event_follow_fork(struct trace_array *tr, bool enable)
4206 if (enable) {
4207 register_trace_prio_sched_process_fork(event_filter_pid_sched_process_fork,
4208 tr, INT_MIN);
4209 - register_trace_prio_sched_process_exit(event_filter_pid_sched_process_exit,
4210 + register_trace_prio_sched_process_free(event_filter_pid_sched_process_exit,
4211 tr, INT_MAX);
4212 } else {
4213 unregister_trace_sched_process_fork(event_filter_pid_sched_process_fork,
4214 tr);
4215 - unregister_trace_sched_process_exit(event_filter_pid_sched_process_exit,
4216 + unregister_trace_sched_process_free(event_filter_pid_sched_process_exit,
4217 tr);
4218 }
4219 }
4220 diff --git a/kernel/trace/trace_hwlat.c b/kernel/trace/trace_hwlat.c
4221 index 862f4b0139fcb..35512ed26d9ff 100644
4222 --- a/kernel/trace/trace_hwlat.c
4223 +++ b/kernel/trace/trace_hwlat.c
4224 @@ -270,6 +270,7 @@ static bool disable_migrate;
4225 static void move_to_next_cpu(void)
4226 {
4227 struct cpumask *current_mask = &save_cpumask;
4228 + struct trace_array *tr = hwlat_trace;
4229 int next_cpu;
4230
4231 if (disable_migrate)
4232 @@ -283,7 +284,7 @@ static void move_to_next_cpu(void)
4233 goto disable;
4234
4235 get_online_cpus();
4236 - cpumask_and(current_mask, cpu_online_mask, tracing_buffer_mask);
4237 + cpumask_and(current_mask, cpu_online_mask, tr->tracing_cpumask);
4238 next_cpu = cpumask_next(smp_processor_id(), current_mask);
4239 put_online_cpus();
4240
4241 @@ -360,7 +361,7 @@ static int start_kthread(struct trace_array *tr)
4242 /* Just pick the first CPU on first iteration */
4243 current_mask = &save_cpumask;
4244 get_online_cpus();
4245 - cpumask_and(current_mask, cpu_online_mask, tracing_buffer_mask);
4246 + cpumask_and(current_mask, cpu_online_mask, tr->tracing_cpumask);
4247 put_online_cpus();
4248 next_cpu = cpumask_first(current_mask);
4249
4250 diff --git a/lib/devres.c b/lib/devres.c
4251 index 17624d35e82d4..77c80ca9e4856 100644
4252 --- a/lib/devres.c
4253 +++ b/lib/devres.c
4254 @@ -155,6 +155,7 @@ void __iomem *devm_ioremap_resource(struct device *dev,
4255 {
4256 resource_size_t size;
4257 void __iomem *dest_ptr;
4258 + char *pretty_name;
4259
4260 BUG_ON(!dev);
4261
4262 @@ -165,7 +166,15 @@ void __iomem *devm_ioremap_resource(struct device *dev,
4263
4264 size = resource_size(res);
4265
4266 - if (!devm_request_mem_region(dev, res->start, size, dev_name(dev))) {
4267 + if (res->name)
4268 + pretty_name = devm_kasprintf(dev, GFP_KERNEL, "%s %s",
4269 + dev_name(dev), res->name);
4270 + else
4271 + pretty_name = devm_kstrdup(dev, dev_name(dev), GFP_KERNEL);
4272 + if (!pretty_name)
4273 + return IOMEM_ERR_PTR(-ENOMEM);
4274 +
4275 + if (!devm_request_mem_region(dev, res->start, size, pretty_name)) {
4276 dev_err(dev, "can't request region for resource %pR\n", res);
4277 return IOMEM_ERR_PTR(-EBUSY);
4278 }
4279 diff --git a/lib/test_kmod.c b/lib/test_kmod.c
4280 index 9cf77628fc913..87a0cc750ea23 100644
4281 --- a/lib/test_kmod.c
4282 +++ b/lib/test_kmod.c
4283 @@ -745,7 +745,7 @@ static int trigger_config_run_type(struct kmod_test_device *test_dev,
4284 break;
4285 case TEST_KMOD_FS_TYPE:
4286 kfree_const(config->test_fs);
4287 - config->test_driver = NULL;
4288 + config->test_fs = NULL;
4289 copied = config_copy_test_fs(config, test_str,
4290 strlen(test_str));
4291 break;
4292 diff --git a/mm/khugepaged.c b/mm/khugepaged.c
4293 index 5977f7824a9ac..719f49d1fba2f 100644
4294 --- a/mm/khugepaged.c
4295 +++ b/mm/khugepaged.c
4296 @@ -1294,7 +1294,7 @@ void collapse_pte_mapped_thp(struct mm_struct *mm, unsigned long addr)
4297 {
4298 unsigned long haddr = addr & HPAGE_PMD_MASK;
4299 struct vm_area_struct *vma = find_vma(mm, haddr);
4300 - struct page *hpage = NULL;
4301 + struct page *hpage;
4302 pte_t *start_pte, *pte;
4303 pmd_t *pmd, _pmd;
4304 spinlock_t *ptl;
4305 @@ -1314,9 +1314,17 @@ void collapse_pte_mapped_thp(struct mm_struct *mm, unsigned long addr)
4306 if (!hugepage_vma_check(vma, vma->vm_flags | VM_HUGEPAGE))
4307 return;
4308
4309 + hpage = find_lock_page(vma->vm_file->f_mapping,
4310 + linear_page_index(vma, haddr));
4311 + if (!hpage)
4312 + return;
4313 +
4314 + if (!PageHead(hpage))
4315 + goto drop_hpage;
4316 +
4317 pmd = mm_find_pmd(mm, haddr);
4318 if (!pmd)
4319 - return;
4320 + goto drop_hpage;
4321
4322 start_pte = pte_offset_map_lock(mm, pmd, haddr, &ptl);
4323
4324 @@ -1335,30 +1343,11 @@ void collapse_pte_mapped_thp(struct mm_struct *mm, unsigned long addr)
4325
4326 page = vm_normal_page(vma, addr, *pte);
4327
4328 - if (!page || !PageCompound(page))
4329 - goto abort;
4330 -
4331 - if (!hpage) {
4332 - hpage = compound_head(page);
4333 - /*
4334 - * The mapping of the THP should not change.
4335 - *
4336 - * Note that uprobe, debugger, or MAP_PRIVATE may
4337 - * change the page table, but the new page will
4338 - * not pass PageCompound() check.
4339 - */
4340 - if (WARN_ON(hpage->mapping != vma->vm_file->f_mapping))
4341 - goto abort;
4342 - }
4343 -
4344 /*
4345 - * Confirm the page maps to the correct subpage.
4346 - *
4347 - * Note that uprobe, debugger, or MAP_PRIVATE may change
4348 - * the page table, but the new page will not pass
4349 - * PageCompound() check.
4350 + * Note that uprobe, debugger, or MAP_PRIVATE may change the
4351 + * page table, but the new page will not be a subpage of hpage.
4352 */
4353 - if (WARN_ON(hpage + i != page))
4354 + if (hpage + i != page)
4355 goto abort;
4356 count++;
4357 }
4358 @@ -1377,21 +1366,26 @@ void collapse_pte_mapped_thp(struct mm_struct *mm, unsigned long addr)
4359 pte_unmap_unlock(start_pte, ptl);
4360
4361 /* step 3: set proper refcount and mm_counters. */
4362 - if (hpage) {
4363 + if (count) {
4364 page_ref_sub(hpage, count);
4365 add_mm_counter(vma->vm_mm, mm_counter_file(hpage), -count);
4366 }
4367
4368 /* step 4: collapse pmd */
4369 ptl = pmd_lock(vma->vm_mm, pmd);
4370 - _pmd = pmdp_collapse_flush(vma, addr, pmd);
4371 + _pmd = pmdp_collapse_flush(vma, haddr, pmd);
4372 spin_unlock(ptl);
4373 mm_dec_nr_ptes(mm);
4374 pte_free(mm, pmd_pgtable(_pmd));
4375 +
4376 +drop_hpage:
4377 + unlock_page(hpage);
4378 + put_page(hpage);
4379 return;
4380
4381 abort:
4382 pte_unmap_unlock(start_pte, ptl);
4383 + goto drop_hpage;
4384 }
4385
4386 static int khugepaged_collapse_pte_mapped_thps(struct mm_slot *mm_slot)
4387 @@ -1420,6 +1414,7 @@ out:
4388 static void retract_page_tables(struct address_space *mapping, pgoff_t pgoff)
4389 {
4390 struct vm_area_struct *vma;
4391 + struct mm_struct *mm;
4392 unsigned long addr;
4393 pmd_t *pmd, _pmd;
4394
4395 @@ -1448,7 +1443,8 @@ static void retract_page_tables(struct address_space *mapping, pgoff_t pgoff)
4396 continue;
4397 if (vma->vm_end < addr + HPAGE_PMD_SIZE)
4398 continue;
4399 - pmd = mm_find_pmd(vma->vm_mm, addr);
4400 + mm = vma->vm_mm;
4401 + pmd = mm_find_pmd(mm, addr);
4402 if (!pmd)
4403 continue;
4404 /*
4405 @@ -1458,17 +1454,19 @@ static void retract_page_tables(struct address_space *mapping, pgoff_t pgoff)
4406 * mmap_sem while holding page lock. Fault path does it in
4407 * reverse order. Trylock is a way to avoid deadlock.
4408 */
4409 - if (down_write_trylock(&vma->vm_mm->mmap_sem)) {
4410 - spinlock_t *ptl = pmd_lock(vma->vm_mm, pmd);
4411 - /* assume page table is clear */
4412 - _pmd = pmdp_collapse_flush(vma, addr, pmd);
4413 - spin_unlock(ptl);
4414 - up_write(&vma->vm_mm->mmap_sem);
4415 - mm_dec_nr_ptes(vma->vm_mm);
4416 - pte_free(vma->vm_mm, pmd_pgtable(_pmd));
4417 + if (down_write_trylock(&mm->mmap_sem)) {
4418 + if (!khugepaged_test_exit(mm)) {
4419 + spinlock_t *ptl = pmd_lock(mm, pmd);
4420 + /* assume page table is clear */
4421 + _pmd = pmdp_collapse_flush(vma, addr, pmd);
4422 + spin_unlock(ptl);
4423 + mm_dec_nr_ptes(mm);
4424 + pte_free(mm, pmd_pgtable(_pmd));
4425 + }
4426 + up_write(&mm->mmap_sem);
4427 } else {
4428 /* Try again later */
4429 - khugepaged_add_pte_mapped_thp(vma->vm_mm, addr);
4430 + khugepaged_add_pte_mapped_thp(mm, addr);
4431 }
4432 }
4433 i_mmap_unlock_write(mapping);
4434 diff --git a/mm/memory_hotplug.c b/mm/memory_hotplug.c
4435 index c054945a9a742..3128d95847125 100644
4436 --- a/mm/memory_hotplug.c
4437 +++ b/mm/memory_hotplug.c
4438 @@ -1751,7 +1751,7 @@ static int __ref try_remove_memory(int nid, u64 start, u64 size)
4439 */
4440 rc = walk_memory_blocks(start, size, NULL, check_memblock_offlined_cb);
4441 if (rc)
4442 - goto done;
4443 + return rc;
4444
4445 /* remove memmap entry */
4446 firmware_map_remove(start, start + size, "System RAM");
4447 @@ -1771,9 +1771,8 @@ static int __ref try_remove_memory(int nid, u64 start, u64 size)
4448
4449 try_offline_node(nid);
4450
4451 -done:
4452 mem_hotplug_done();
4453 - return rc;
4454 + return 0;
4455 }
4456
4457 /**
4458 diff --git a/mm/page_counter.c b/mm/page_counter.c
4459 index de31470655f66..147ff99187b81 100644
4460 --- a/mm/page_counter.c
4461 +++ b/mm/page_counter.c
4462 @@ -77,7 +77,7 @@ void page_counter_charge(struct page_counter *counter, unsigned long nr_pages)
4463 long new;
4464
4465 new = atomic_long_add_return(nr_pages, &c->usage);
4466 - propagate_protected_usage(counter, new);
4467 + propagate_protected_usage(c, new);
4468 /*
4469 * This is indeed racy, but we can live with some
4470 * inaccuracy in the watermark.
4471 @@ -121,7 +121,7 @@ bool page_counter_try_charge(struct page_counter *counter,
4472 new = atomic_long_add_return(nr_pages, &c->usage);
4473 if (new > c->max) {
4474 atomic_long_sub(nr_pages, &c->usage);
4475 - propagate_protected_usage(counter, new);
4476 + propagate_protected_usage(c, new);
4477 /*
4478 * This is racy, but we can live with some
4479 * inaccuracy in the failcnt.
4480 @@ -130,7 +130,7 @@ bool page_counter_try_charge(struct page_counter *counter,
4481 *fail = c;
4482 goto failed;
4483 }
4484 - propagate_protected_usage(counter, new);
4485 + propagate_protected_usage(c, new);
4486 /*
4487 * Just like with failcnt, we can live with some
4488 * inaccuracy in the watermark.
4489 diff --git a/net/compat.c b/net/compat.c
4490 index 0f7ded26059ec..c848bcb517f3e 100644
4491 --- a/net/compat.c
4492 +++ b/net/compat.c
4493 @@ -291,6 +291,7 @@ void scm_detach_fds_compat(struct msghdr *kmsg, struct scm_cookie *scm)
4494 break;
4495 }
4496 /* Bump the usage count and install the file. */
4497 + __receive_sock(fp[i]);
4498 fd_install(new_fd, get_file(fp[i]));
4499 }
4500
4501 diff --git a/net/core/sock.c b/net/core/sock.c
4502 index 991ab80234cec..919f1a1739e90 100644
4503 --- a/net/core/sock.c
4504 +++ b/net/core/sock.c
4505 @@ -2736,6 +2736,27 @@ int sock_no_mmap(struct file *file, struct socket *sock, struct vm_area_struct *
4506 }
4507 EXPORT_SYMBOL(sock_no_mmap);
4508
4509 +/*
4510 + * When a file is received (via SCM_RIGHTS, etc), we must bump the
4511 + * various sock-based usage counts.
4512 + */
4513 +void __receive_sock(struct file *file)
4514 +{
4515 + struct socket *sock;
4516 + int error;
4517 +
4518 + /*
4519 + * The resulting value of "error" is ignored here since we only
4520 + * need to take action when the file is a socket and testing
4521 + * "sock" for NULL is sufficient.
4522 + */
4523 + sock = sock_from_file(file, &error);
4524 + if (sock) {
4525 + sock_update_netprioidx(&sock->sk->sk_cgrp_data);
4526 + sock_update_classid(&sock->sk->sk_cgrp_data);
4527 + }
4528 +}
4529 +
4530 ssize_t sock_no_sendpage(struct socket *sock, struct page *page, int offset, size_t size, int flags)
4531 {
4532 ssize_t res;
4533 diff --git a/net/mac80211/sta_info.c b/net/mac80211/sta_info.c
4534 index b1669f0244706..f5d96107af6de 100644
4535 --- a/net/mac80211/sta_info.c
4536 +++ b/net/mac80211/sta_info.c
4537 @@ -1033,7 +1033,7 @@ static void __sta_info_destroy_part2(struct sta_info *sta)
4538 might_sleep();
4539 lockdep_assert_held(&local->sta_mtx);
4540
4541 - while (sta->sta_state == IEEE80211_STA_AUTHORIZED) {
4542 + if (sta->sta_state == IEEE80211_STA_AUTHORIZED) {
4543 ret = sta_info_move_state(sta, IEEE80211_STA_ASSOC);
4544 WARN_ON_ONCE(ret);
4545 }
4546 diff --git a/scripts/recordmcount.c b/scripts/recordmcount.c
4547 index e59022b3f1254..b9c2ee7ab43fa 100644
4548 --- a/scripts/recordmcount.c
4549 +++ b/scripts/recordmcount.c
4550 @@ -42,6 +42,8 @@
4551 #define R_ARM_THM_CALL 10
4552 #define R_ARM_CALL 28
4553
4554 +#define R_AARCH64_CALL26 283
4555 +
4556 static int fd_map; /* File descriptor for file being modified. */
4557 static int mmap_failed; /* Boolean flag. */
4558 static char gpfx; /* prefix for global symbol name (sometimes '_') */
4559 diff --git a/sound/pci/echoaudio/echoaudio.c b/sound/pci/echoaudio/echoaudio.c
4560 index ca9125726be24..8596ae4c2bdef 100644
4561 --- a/sound/pci/echoaudio/echoaudio.c
4562 +++ b/sound/pci/echoaudio/echoaudio.c
4563 @@ -2198,7 +2198,6 @@ static int snd_echo_resume(struct device *dev)
4564 if (err < 0) {
4565 kfree(commpage_bak);
4566 dev_err(dev, "resume init_hw err=%d\n", err);
4567 - snd_echo_free(chip);
4568 return err;
4569 }
4570
4571 @@ -2225,7 +2224,6 @@ static int snd_echo_resume(struct device *dev)
4572 if (request_irq(pci->irq, snd_echo_interrupt, IRQF_SHARED,
4573 KBUILD_MODNAME, chip)) {
4574 dev_err(chip->card->dev, "cannot grab irq\n");
4575 - snd_echo_free(chip);
4576 return -EBUSY;
4577 }
4578 chip->irq = pci->irq;
4579 diff --git a/tools/build/Makefile.feature b/tools/build/Makefile.feature
4580 index 8a19753cc26aa..8c6e1ea67f213 100644
4581 --- a/tools/build/Makefile.feature
4582 +++ b/tools/build/Makefile.feature
4583 @@ -8,7 +8,7 @@ endif
4584
4585 feature_check = $(eval $(feature_check_code))
4586 define feature_check_code
4587 - feature-$(1) := $(shell $(MAKE) OUTPUT=$(OUTPUT_FEATURES) CFLAGS="$(EXTRA_CFLAGS) $(FEATURE_CHECK_CFLAGS-$(1))" CXXFLAGS="$(EXTRA_CXXFLAGS) $(FEATURE_CHECK_CXXFLAGS-$(1))" LDFLAGS="$(LDFLAGS) $(FEATURE_CHECK_LDFLAGS-$(1))" -C $(feature_dir) $(OUTPUT_FEATURES)test-$1.bin >/dev/null 2>/dev/null && echo 1 || echo 0)
4588 + feature-$(1) := $(shell $(MAKE) OUTPUT=$(OUTPUT_FEATURES) CC="$(CC)" CXX="$(CXX)" CFLAGS="$(EXTRA_CFLAGS) $(FEATURE_CHECK_CFLAGS-$(1))" CXXFLAGS="$(EXTRA_CXXFLAGS) $(FEATURE_CHECK_CXXFLAGS-$(1))" LDFLAGS="$(LDFLAGS) $(FEATURE_CHECK_LDFLAGS-$(1))" -C $(feature_dir) $(OUTPUT_FEATURES)test-$1.bin >/dev/null 2>/dev/null && echo 1 || echo 0)
4589 endef
4590
4591 feature_set = $(eval $(feature_set_code))
4592 diff --git a/tools/build/feature/Makefile b/tools/build/feature/Makefile
4593 index 8499385365c02..054e09ab4a9e4 100644
4594 --- a/tools/build/feature/Makefile
4595 +++ b/tools/build/feature/Makefile
4596 @@ -70,8 +70,6 @@ FILES= \
4597
4598 FILES := $(addprefix $(OUTPUT),$(FILES))
4599
4600 -CC ?= $(CROSS_COMPILE)gcc
4601 -CXX ?= $(CROSS_COMPILE)g++
4602 PKG_CONFIG ?= $(CROSS_COMPILE)pkg-config
4603 LLVM_CONFIG ?= llvm-config
4604
4605 diff --git a/tools/perf/bench/mem-functions.c b/tools/perf/bench/mem-functions.c
4606 index 9235b76501be8..19d45c377ac18 100644
4607 --- a/tools/perf/bench/mem-functions.c
4608 +++ b/tools/perf/bench/mem-functions.c
4609 @@ -223,12 +223,8 @@ static int bench_mem_common(int argc, const char **argv, struct bench_mem_info *
4610 return 0;
4611 }
4612
4613 -static u64 do_memcpy_cycles(const struct function *r, size_t size, void *src, void *dst)
4614 +static void memcpy_prefault(memcpy_t fn, size_t size, void *src, void *dst)
4615 {
4616 - u64 cycle_start = 0ULL, cycle_end = 0ULL;
4617 - memcpy_t fn = r->fn.memcpy;
4618 - int i;
4619 -
4620 /* Make sure to always prefault zero pages even if MMAP_THRESH is crossed: */
4621 memset(src, 0, size);
4622
4623 @@ -237,6 +233,15 @@ static u64 do_memcpy_cycles(const struct function *r, size_t size, void *src, vo
4624 * to not measure page fault overhead:
4625 */
4626 fn(dst, src, size);
4627 +}
4628 +
4629 +static u64 do_memcpy_cycles(const struct function *r, size_t size, void *src, void *dst)
4630 +{
4631 + u64 cycle_start = 0ULL, cycle_end = 0ULL;
4632 + memcpy_t fn = r->fn.memcpy;
4633 + int i;
4634 +
4635 + memcpy_prefault(fn, size, src, dst);
4636
4637 cycle_start = get_cycles();
4638 for (i = 0; i < nr_loops; ++i)
4639 @@ -252,11 +257,7 @@ static double do_memcpy_gettimeofday(const struct function *r, size_t size, void
4640 memcpy_t fn = r->fn.memcpy;
4641 int i;
4642
4643 - /*
4644 - * We prefault the freshly allocated memory range here,
4645 - * to not measure page fault overhead:
4646 - */
4647 - fn(dst, src, size);
4648 + memcpy_prefault(fn, size, src, dst);
4649
4650 BUG_ON(gettimeofday(&tv_start, NULL));
4651 for (i = 0; i < nr_loops; ++i)
4652 diff --git a/tools/perf/util/intel-pt-decoder/intel-pt-decoder.c b/tools/perf/util/intel-pt-decoder/intel-pt-decoder.c
4653 index f8ccfd6be0eee..7ffcbd6fcd1ae 100644
4654 --- a/tools/perf/util/intel-pt-decoder/intel-pt-decoder.c
4655 +++ b/tools/perf/util/intel-pt-decoder/intel-pt-decoder.c
4656 @@ -1164,6 +1164,7 @@ static int intel_pt_walk_fup(struct intel_pt_decoder *decoder)
4657 return 0;
4658 if (err == -EAGAIN ||
4659 intel_pt_fup_with_nlip(decoder, &intel_pt_insn, ip, err)) {
4660 + decoder->pkt_state = INTEL_PT_STATE_IN_SYNC;
4661 if (intel_pt_fup_event(decoder))
4662 return 0;
4663 return -EAGAIN;
4664 @@ -1942,17 +1943,13 @@ next:
4665 }
4666 if (decoder->set_fup_mwait)
4667 no_tip = true;
4668 + if (no_tip)
4669 + decoder->pkt_state = INTEL_PT_STATE_FUP_NO_TIP;
4670 + else
4671 + decoder->pkt_state = INTEL_PT_STATE_FUP;
4672 err = intel_pt_walk_fup(decoder);
4673 - if (err != -EAGAIN) {
4674 - if (err)
4675 - return err;
4676 - if (no_tip)
4677 - decoder->pkt_state =
4678 - INTEL_PT_STATE_FUP_NO_TIP;
4679 - else
4680 - decoder->pkt_state = INTEL_PT_STATE_FUP;
4681 - return 0;
4682 - }
4683 + if (err != -EAGAIN)
4684 + return err;
4685 if (no_tip) {
4686 no_tip = false;
4687 break;
4688 @@ -1980,8 +1977,10 @@ next:
4689 * possibility of another CBR change that gets caught up
4690 * in the PSB+.
4691 */
4692 - if (decoder->cbr != decoder->cbr_seen)
4693 + if (decoder->cbr != decoder->cbr_seen) {
4694 + decoder->state.type = 0;
4695 return 0;
4696 + }
4697 break;
4698
4699 case INTEL_PT_PIP:
4700 @@ -2022,8 +2021,10 @@ next:
4701
4702 case INTEL_PT_CBR:
4703 intel_pt_calc_cbr(decoder);
4704 - if (decoder->cbr != decoder->cbr_seen)
4705 + if (decoder->cbr != decoder->cbr_seen) {
4706 + decoder->state.type = 0;
4707 return 0;
4708 + }
4709 break;
4710
4711 case INTEL_PT_MODE_EXEC:
4712 @@ -2599,15 +2600,11 @@ const struct intel_pt_state *intel_pt_decode(struct intel_pt_decoder *decoder)
4713 err = intel_pt_walk_tip(decoder);
4714 break;
4715 case INTEL_PT_STATE_FUP:
4716 - decoder->pkt_state = INTEL_PT_STATE_IN_SYNC;
4717 err = intel_pt_walk_fup(decoder);
4718 if (err == -EAGAIN)
4719 err = intel_pt_walk_fup_tip(decoder);
4720 - else if (!err)
4721 - decoder->pkt_state = INTEL_PT_STATE_FUP;
4722 break;
4723 case INTEL_PT_STATE_FUP_NO_TIP:
4724 - decoder->pkt_state = INTEL_PT_STATE_IN_SYNC;
4725 err = intel_pt_walk_fup(decoder);
4726 if (err == -EAGAIN)
4727 err = intel_pt_walk_trace(decoder);
4728 diff --git a/tools/testing/selftests/bpf/test_progs.c b/tools/testing/selftests/bpf/test_progs.c
4729 index 8cb3469dd11f2..48bbe8e0ce48d 100644
4730 --- a/tools/testing/selftests/bpf/test_progs.c
4731 +++ b/tools/testing/selftests/bpf/test_progs.c
4732 @@ -7,6 +7,8 @@
4733 #include <argp.h>
4734 #include <string.h>
4735
4736 +#define EXIT_NO_TEST 2
4737 +
4738 /* defined in test_progs.h */
4739 struct test_env env;
4740
4741 @@ -584,5 +586,8 @@ int main(int argc, char **argv)
4742 free(env.test_selector.num_set);
4743 free(env.subtest_selector.num_set);
4744
4745 + if (env.succ_cnt + env.fail_cnt + env.skip_cnt == 0)
4746 + return EXIT_NO_TEST;
4747 +
4748 return env.fail_cnt ? EXIT_FAILURE : EXIT_SUCCESS;
4749 }
4750 diff --git a/tools/testing/selftests/powerpc/ptrace/ptrace-pkey.c b/tools/testing/selftests/powerpc/ptrace/ptrace-pkey.c
4751 index bdbbbe8431e03..3694613f418f6 100644
4752 --- a/tools/testing/selftests/powerpc/ptrace/ptrace-pkey.c
4753 +++ b/tools/testing/selftests/powerpc/ptrace/ptrace-pkey.c
4754 @@ -44,7 +44,7 @@ struct shared_info {
4755 unsigned long amr2;
4756
4757 /* AMR value that ptrace should refuse to write to the child. */
4758 - unsigned long amr3;
4759 + unsigned long invalid_amr;
4760
4761 /* IAMR value the parent expects to read from the child. */
4762 unsigned long expected_iamr;
4763 @@ -57,8 +57,8 @@ struct shared_info {
4764 * (even though they're valid ones) because userspace doesn't have
4765 * access to those registers.
4766 */
4767 - unsigned long new_iamr;
4768 - unsigned long new_uamor;
4769 + unsigned long invalid_iamr;
4770 + unsigned long invalid_uamor;
4771 };
4772
4773 static int sys_pkey_alloc(unsigned long flags, unsigned long init_access_rights)
4774 @@ -66,11 +66,6 @@ static int sys_pkey_alloc(unsigned long flags, unsigned long init_access_rights)
4775 return syscall(__NR_pkey_alloc, flags, init_access_rights);
4776 }
4777
4778 -static int sys_pkey_free(int pkey)
4779 -{
4780 - return syscall(__NR_pkey_free, pkey);
4781 -}
4782 -
4783 static int child(struct shared_info *info)
4784 {
4785 unsigned long reg;
4786 @@ -100,28 +95,32 @@ static int child(struct shared_info *info)
4787
4788 info->amr1 |= 3ul << pkeyshift(pkey1);
4789 info->amr2 |= 3ul << pkeyshift(pkey2);
4790 - info->amr3 |= info->amr2 | 3ul << pkeyshift(pkey3);
4791 + /*
4792 + * invalid amr value where we try to force write
4793 + * things which are deined by a uamor setting.
4794 + */
4795 + info->invalid_amr = info->amr2 | (~0x0UL & ~info->expected_uamor);
4796
4797 + /*
4798 + * if PKEY_DISABLE_EXECUTE succeeded we should update the expected_iamr
4799 + */
4800 if (disable_execute)
4801 info->expected_iamr |= 1ul << pkeyshift(pkey1);
4802 else
4803 info->expected_iamr &= ~(1ul << pkeyshift(pkey1));
4804
4805 - info->expected_iamr &= ~(1ul << pkeyshift(pkey2) | 1ul << pkeyshift(pkey3));
4806 -
4807 - info->expected_uamor |= 3ul << pkeyshift(pkey1) |
4808 - 3ul << pkeyshift(pkey2);
4809 - info->new_iamr |= 1ul << pkeyshift(pkey1) | 1ul << pkeyshift(pkey2);
4810 - info->new_uamor |= 3ul << pkeyshift(pkey1);
4811 + /*
4812 + * We allocated pkey2 and pkey 3 above. Clear the IAMR bits.
4813 + */
4814 + info->expected_iamr &= ~(1ul << pkeyshift(pkey2));
4815 + info->expected_iamr &= ~(1ul << pkeyshift(pkey3));
4816
4817 /*
4818 - * We won't use pkey3. We just want a plausible but invalid key to test
4819 - * whether ptrace will let us write to AMR bits we are not supposed to.
4820 - *
4821 - * This also tests whether the kernel restores the UAMOR permissions
4822 - * after a key is freed.
4823 + * Create an IAMR value different from expected value.
4824 + * Kernel will reject an IAMR and UAMOR change.
4825 */
4826 - sys_pkey_free(pkey3);
4827 + info->invalid_iamr = info->expected_iamr | (1ul << pkeyshift(pkey1) | 1ul << pkeyshift(pkey2));
4828 + info->invalid_uamor = info->expected_uamor & ~(0x3ul << pkeyshift(pkey1));
4829
4830 printf("%-30s AMR: %016lx pkey1: %d pkey2: %d pkey3: %d\n",
4831 user_write, info->amr1, pkey1, pkey2, pkey3);
4832 @@ -196,9 +195,9 @@ static int parent(struct shared_info *info, pid_t pid)
4833 PARENT_SKIP_IF_UNSUPPORTED(ret, &info->child_sync);
4834 PARENT_FAIL_IF(ret, &info->child_sync);
4835
4836 - info->amr1 = info->amr2 = info->amr3 = regs[0];
4837 - info->expected_iamr = info->new_iamr = regs[1];
4838 - info->expected_uamor = info->new_uamor = regs[2];
4839 + info->amr1 = info->amr2 = regs[0];
4840 + info->expected_iamr = regs[1];
4841 + info->expected_uamor = regs[2];
4842
4843 /* Wake up child so that it can set itself up. */
4844 ret = prod_child(&info->child_sync);
4845 @@ -234,10 +233,10 @@ static int parent(struct shared_info *info, pid_t pid)
4846 return ret;
4847
4848 /* Write invalid AMR value in child. */
4849 - ret = ptrace_write_regs(pid, NT_PPC_PKEY, &info->amr3, 1);
4850 + ret = ptrace_write_regs(pid, NT_PPC_PKEY, &info->invalid_amr, 1);
4851 PARENT_FAIL_IF(ret, &info->child_sync);
4852
4853 - printf("%-30s AMR: %016lx\n", ptrace_write_running, info->amr3);
4854 + printf("%-30s AMR: %016lx\n", ptrace_write_running, info->invalid_amr);
4855
4856 /* Wake up child so that it can verify it didn't change. */
4857 ret = prod_child(&info->child_sync);
4858 @@ -249,7 +248,7 @@ static int parent(struct shared_info *info, pid_t pid)
4859
4860 /* Try to write to IAMR. */
4861 regs[0] = info->amr1;
4862 - regs[1] = info->new_iamr;
4863 + regs[1] = info->invalid_iamr;
4864 ret = ptrace_write_regs(pid, NT_PPC_PKEY, regs, 2);
4865 PARENT_FAIL_IF(!ret, &info->child_sync);
4866
4867 @@ -257,7 +256,7 @@ static int parent(struct shared_info *info, pid_t pid)
4868 ptrace_write_running, regs[0], regs[1]);
4869
4870 /* Try to write to IAMR and UAMOR. */
4871 - regs[2] = info->new_uamor;
4872 + regs[2] = info->invalid_uamor;
4873 ret = ptrace_write_regs(pid, NT_PPC_PKEY, regs, 3);
4874 PARENT_FAIL_IF(!ret, &info->child_sync);
4875