Magellan Linux

Contents of /trunk/kernel-alx/patches-4.9/0242-4.9.143-all-fixes.patch

Parent Directory Parent Directory | Revision Log Revision Log


Revision 3295 - (show annotations) (download)
Tue Mar 12 10:43:05 2019 UTC (5 years, 1 month ago) by niro
File size: 59228 byte(s)
-linux-4.9.143
1 diff --git a/Makefile b/Makefile
2 index 72ed8ff90329..8ec52cd19526 100644
3 --- a/Makefile
4 +++ b/Makefile
5 @@ -1,6 +1,6 @@
6 VERSION = 4
7 PATCHLEVEL = 9
8 -SUBLEVEL = 142
9 +SUBLEVEL = 143
10 EXTRAVERSION =
11 NAME = Roaring Lionus
12
13 @@ -509,6 +509,39 @@ ifneq ($(filter install,$(MAKECMDGOALS)),)
14 endif
15 endif
16
17 +ifeq ($(cc-name),clang)
18 +ifneq ($(CROSS_COMPILE),)
19 +CLANG_TARGET := -target $(notdir $(CROSS_COMPILE:%-=%))
20 +GCC_TOOLCHAIN := $(realpath $(dir $(shell which $(LD)))/..)
21 +endif
22 +ifneq ($(GCC_TOOLCHAIN),)
23 +CLANG_GCC_TC := -gcc-toolchain $(GCC_TOOLCHAIN)
24 +endif
25 +KBUILD_CFLAGS += $(CLANG_TARGET) $(CLANG_GCC_TC)
26 +KBUILD_AFLAGS += $(CLANG_TARGET) $(CLANG_GCC_TC)
27 +KBUILD_CPPFLAGS += $(call cc-option,-Qunused-arguments,)
28 +KBUILD_CFLAGS += $(call cc-disable-warning, unused-variable)
29 +KBUILD_CFLAGS += $(call cc-disable-warning, format-invalid-specifier)
30 +KBUILD_CFLAGS += $(call cc-disable-warning, gnu)
31 +KBUILD_CFLAGS += $(call cc-disable-warning, address-of-packed-member)
32 +# Quiet clang warning: comparison of unsigned expression < 0 is always false
33 +KBUILD_CFLAGS += $(call cc-disable-warning, tautological-compare)
34 +# CLANG uses a _MergedGlobals as optimization, but this breaks modpost, as the
35 +# source of a reference will be _MergedGlobals and not on of the whitelisted names.
36 +# See modpost pattern 2
37 +KBUILD_CFLAGS += $(call cc-option, -mno-global-merge,)
38 +KBUILD_CFLAGS += $(call cc-option, -fcatch-undefined-behavior)
39 +KBUILD_CFLAGS += $(call cc-option, -no-integrated-as)
40 +KBUILD_AFLAGS += $(call cc-option, -no-integrated-as)
41 +else
42 +
43 +# These warnings generated too much noise in a regular build.
44 +# Use make W=1 to enable them (see scripts/Makefile.build)
45 +KBUILD_CFLAGS += $(call cc-disable-warning, unused-but-set-variable)
46 +KBUILD_CFLAGS += $(call cc-disable-warning, unused-const-variable)
47 +endif
48 +
49 +
50 ifeq ($(mixed-targets),1)
51 # ===========================================================================
52 # We're called with mixed targets (*config and build targets).
53 @@ -704,38 +737,6 @@ ifdef CONFIG_CC_STACKPROTECTOR
54 endif
55 KBUILD_CFLAGS += $(stackp-flag)
56
57 -ifeq ($(cc-name),clang)
58 -ifneq ($(CROSS_COMPILE),)
59 -CLANG_TARGET := -target $(notdir $(CROSS_COMPILE:%-=%))
60 -GCC_TOOLCHAIN := $(realpath $(dir $(shell which $(LD)))/..)
61 -endif
62 -ifneq ($(GCC_TOOLCHAIN),)
63 -CLANG_GCC_TC := -gcc-toolchain $(GCC_TOOLCHAIN)
64 -endif
65 -KBUILD_CFLAGS += $(CLANG_TARGET) $(CLANG_GCC_TC)
66 -KBUILD_AFLAGS += $(CLANG_TARGET) $(CLANG_GCC_TC)
67 -KBUILD_CPPFLAGS += $(call cc-option,-Qunused-arguments,)
68 -KBUILD_CFLAGS += $(call cc-disable-warning, unused-variable)
69 -KBUILD_CFLAGS += $(call cc-disable-warning, format-invalid-specifier)
70 -KBUILD_CFLAGS += $(call cc-disable-warning, gnu)
71 -KBUILD_CFLAGS += $(call cc-disable-warning, address-of-packed-member)
72 -# Quiet clang warning: comparison of unsigned expression < 0 is always false
73 -KBUILD_CFLAGS += $(call cc-disable-warning, tautological-compare)
74 -# CLANG uses a _MergedGlobals as optimization, but this breaks modpost, as the
75 -# source of a reference will be _MergedGlobals and not on of the whitelisted names.
76 -# See modpost pattern 2
77 -KBUILD_CFLAGS += $(call cc-option, -mno-global-merge,)
78 -KBUILD_CFLAGS += $(call cc-option, -fcatch-undefined-behavior)
79 -KBUILD_CFLAGS += $(call cc-option, -no-integrated-as)
80 -KBUILD_AFLAGS += $(call cc-option, -no-integrated-as)
81 -else
82 -
83 -# These warnings generated too much noise in a regular build.
84 -# Use make W=1 to enable them (see scripts/Makefile.build)
85 -KBUILD_CFLAGS += $(call cc-disable-warning, unused-but-set-variable)
86 -KBUILD_CFLAGS += $(call cc-disable-warning, unused-const-variable)
87 -endif
88 -
89 ifdef CONFIG_FRAME_POINTER
90 KBUILD_CFLAGS += -fno-omit-frame-pointer -fno-optimize-sibling-calls
91 else
92 diff --git a/arch/arm/Makefile b/arch/arm/Makefile
93 index 6be9ee148b78..e14ddca59d02 100644
94 --- a/arch/arm/Makefile
95 +++ b/arch/arm/Makefile
96 @@ -104,7 +104,7 @@ tune-$(CONFIG_CPU_V6K) =$(call cc-option,-mtune=arm1136j-s,-mtune=strongarm)
97 tune-y := $(tune-y)
98
99 ifeq ($(CONFIG_AEABI),y)
100 -CFLAGS_ABI :=-mabi=aapcs-linux -mno-thumb-interwork -mfpu=vfp
101 +CFLAGS_ABI :=-mabi=aapcs-linux -mfpu=vfp
102 else
103 CFLAGS_ABI :=$(call cc-option,-mapcs-32,-mabi=apcs-gnu) $(call cc-option,-mno-thumb-interwork,)
104 endif
105 diff --git a/arch/arm/boot/compressed/Makefile b/arch/arm/boot/compressed/Makefile
106 index d50430c40045..552c7d7f84ce 100644
107 --- a/arch/arm/boot/compressed/Makefile
108 +++ b/arch/arm/boot/compressed/Makefile
109 @@ -112,7 +112,7 @@ CFLAGS_fdt_ro.o := $(nossp_flags)
110 CFLAGS_fdt_rw.o := $(nossp_flags)
111 CFLAGS_fdt_wip.o := $(nossp_flags)
112
113 -ccflags-y := -fpic -mno-single-pic-base -fno-builtin -I$(obj)
114 +ccflags-y := -fpic $(call cc-option,-mno-single-pic-base,) -fno-builtin -I$(obj)
115 asflags-y := -DZIMAGE
116
117 # Supply kernel BSS size to the decompressor via a linker symbol.
118 diff --git a/arch/arm/firmware/trusted_foundations.c b/arch/arm/firmware/trusted_foundations.c
119 index 3fb1b5a1dce9..689e6565abfc 100644
120 --- a/arch/arm/firmware/trusted_foundations.c
121 +++ b/arch/arm/firmware/trusted_foundations.c
122 @@ -31,21 +31,25 @@
123
124 static unsigned long cpu_boot_addr;
125
126 -static void __naked tf_generic_smc(u32 type, u32 arg1, u32 arg2)
127 +static void tf_generic_smc(u32 type, u32 arg1, u32 arg2)
128 {
129 + register u32 r0 asm("r0") = type;
130 + register u32 r1 asm("r1") = arg1;
131 + register u32 r2 asm("r2") = arg2;
132 +
133 asm volatile(
134 ".arch_extension sec\n\t"
135 - "stmfd sp!, {r4 - r11, lr}\n\t"
136 + "stmfd sp!, {r4 - r11}\n\t"
137 __asmeq("%0", "r0")
138 __asmeq("%1", "r1")
139 __asmeq("%2", "r2")
140 "mov r3, #0\n\t"
141 "mov r4, #0\n\t"
142 "smc #0\n\t"
143 - "ldmfd sp!, {r4 - r11, pc}"
144 + "ldmfd sp!, {r4 - r11}\n\t"
145 :
146 - : "r" (type), "r" (arg1), "r" (arg2)
147 - : "memory");
148 + : "r" (r0), "r" (r1), "r" (r2)
149 + : "memory", "r3", "r12", "lr");
150 }
151
152 static int tf_set_cpu_boot_addr(int cpu, unsigned long boot_addr)
153 diff --git a/arch/x86/events/core.c b/arch/x86/events/core.c
154 index 655a65eaf105..cadf99923600 100644
155 --- a/arch/x86/events/core.c
156 +++ b/arch/x86/events/core.c
157 @@ -437,26 +437,6 @@ int x86_setup_perfctr(struct perf_event *event)
158 if (config == -1LL)
159 return -EINVAL;
160
161 - /*
162 - * Branch tracing:
163 - */
164 - if (attr->config == PERF_COUNT_HW_BRANCH_INSTRUCTIONS &&
165 - !attr->freq && hwc->sample_period == 1) {
166 - /* BTS is not supported by this architecture. */
167 - if (!x86_pmu.bts_active)
168 - return -EOPNOTSUPP;
169 -
170 - /* BTS is currently only allowed for user-mode. */
171 - if (!attr->exclude_kernel)
172 - return -EOPNOTSUPP;
173 -
174 - /* disallow bts if conflicting events are present */
175 - if (x86_add_exclusive(x86_lbr_exclusive_lbr))
176 - return -EBUSY;
177 -
178 - event->destroy = hw_perf_lbr_event_destroy;
179 - }
180 -
181 hwc->config |= config;
182
183 return 0;
184 diff --git a/arch/x86/events/intel/core.c b/arch/x86/events/intel/core.c
185 index 815039327932..4f8560774082 100644
186 --- a/arch/x86/events/intel/core.c
187 +++ b/arch/x86/events/intel/core.c
188 @@ -2198,16 +2198,7 @@ done:
189 static struct event_constraint *
190 intel_bts_constraints(struct perf_event *event)
191 {
192 - struct hw_perf_event *hwc = &event->hw;
193 - unsigned int hw_event, bts_event;
194 -
195 - if (event->attr.freq)
196 - return NULL;
197 -
198 - hw_event = hwc->config & INTEL_ARCH_EVENT_MASK;
199 - bts_event = x86_pmu.event_map(PERF_COUNT_HW_BRANCH_INSTRUCTIONS);
200 -
201 - if (unlikely(hw_event == bts_event && hwc->sample_period == 1))
202 + if (unlikely(intel_pmu_has_bts(event)))
203 return &bts_constraint;
204
205 return NULL;
206 @@ -2822,10 +2813,47 @@ static unsigned long intel_pmu_free_running_flags(struct perf_event *event)
207 return flags;
208 }
209
210 +static int intel_pmu_bts_config(struct perf_event *event)
211 +{
212 + struct perf_event_attr *attr = &event->attr;
213 +
214 + if (unlikely(intel_pmu_has_bts(event))) {
215 + /* BTS is not supported by this architecture. */
216 + if (!x86_pmu.bts_active)
217 + return -EOPNOTSUPP;
218 +
219 + /* BTS is currently only allowed for user-mode. */
220 + if (!attr->exclude_kernel)
221 + return -EOPNOTSUPP;
222 +
223 + /* disallow bts if conflicting events are present */
224 + if (x86_add_exclusive(x86_lbr_exclusive_lbr))
225 + return -EBUSY;
226 +
227 + event->destroy = hw_perf_lbr_event_destroy;
228 + }
229 +
230 + return 0;
231 +}
232 +
233 +static int core_pmu_hw_config(struct perf_event *event)
234 +{
235 + int ret = x86_pmu_hw_config(event);
236 +
237 + if (ret)
238 + return ret;
239 +
240 + return intel_pmu_bts_config(event);
241 +}
242 +
243 static int intel_pmu_hw_config(struct perf_event *event)
244 {
245 int ret = x86_pmu_hw_config(event);
246
247 + if (ret)
248 + return ret;
249 +
250 + ret = intel_pmu_bts_config(event);
251 if (ret)
252 return ret;
253
254 @@ -2848,7 +2876,7 @@ static int intel_pmu_hw_config(struct perf_event *event)
255 /*
256 * BTS is set up earlier in this path, so don't account twice
257 */
258 - if (!intel_pmu_has_bts(event)) {
259 + if (!unlikely(intel_pmu_has_bts(event))) {
260 /* disallow lbr if conflicting events are present */
261 if (x86_add_exclusive(x86_lbr_exclusive_lbr))
262 return -EBUSY;
263 @@ -3265,7 +3293,7 @@ static __initconst const struct x86_pmu core_pmu = {
264 .enable_all = core_pmu_enable_all,
265 .enable = core_pmu_enable_event,
266 .disable = x86_pmu_disable_event,
267 - .hw_config = x86_pmu_hw_config,
268 + .hw_config = core_pmu_hw_config,
269 .schedule_events = x86_schedule_events,
270 .eventsel = MSR_ARCH_PERFMON_EVENTSEL0,
271 .perfctr = MSR_ARCH_PERFMON_PERFCTR0,
272 diff --git a/arch/x86/events/perf_event.h b/arch/x86/events/perf_event.h
273 index 1bfebbc4d156..7ace39c51ff7 100644
274 --- a/arch/x86/events/perf_event.h
275 +++ b/arch/x86/events/perf_event.h
276 @@ -835,11 +835,16 @@ static inline int amd_pmu_init(void)
277
278 static inline bool intel_pmu_has_bts(struct perf_event *event)
279 {
280 - if (event->attr.config == PERF_COUNT_HW_BRANCH_INSTRUCTIONS &&
281 - !event->attr.freq && event->hw.sample_period == 1)
282 - return true;
283 + struct hw_perf_event *hwc = &event->hw;
284 + unsigned int hw_event, bts_event;
285 +
286 + if (event->attr.freq)
287 + return false;
288 +
289 + hw_event = hwc->config & INTEL_ARCH_EVENT_MASK;
290 + bts_event = x86_pmu.event_map(PERF_COUNT_HW_BRANCH_INSTRUCTIONS);
291
292 - return false;
293 + return hw_event == bts_event && hwc->sample_period == 1;
294 }
295
296 int intel_pmu_save_and_restart(struct perf_event *event);
297 diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
298 index 8a4d6bc8fed0..676edfc19a95 100644
299 --- a/arch/x86/kvm/mmu.c
300 +++ b/arch/x86/kvm/mmu.c
301 @@ -4297,9 +4297,9 @@ static bool need_remote_flush(u64 old, u64 new)
302 }
303
304 static u64 mmu_pte_write_fetch_gpte(struct kvm_vcpu *vcpu, gpa_t *gpa,
305 - const u8 *new, int *bytes)
306 + int *bytes)
307 {
308 - u64 gentry;
309 + u64 gentry = 0;
310 int r;
311
312 /*
313 @@ -4311,22 +4311,12 @@ static u64 mmu_pte_write_fetch_gpte(struct kvm_vcpu *vcpu, gpa_t *gpa,
314 /* Handle a 32-bit guest writing two halves of a 64-bit gpte */
315 *gpa &= ~(gpa_t)7;
316 *bytes = 8;
317 - r = kvm_vcpu_read_guest(vcpu, *gpa, &gentry, 8);
318 - if (r)
319 - gentry = 0;
320 - new = (const u8 *)&gentry;
321 }
322
323 - switch (*bytes) {
324 - case 4:
325 - gentry = *(const u32 *)new;
326 - break;
327 - case 8:
328 - gentry = *(const u64 *)new;
329 - break;
330 - default:
331 - gentry = 0;
332 - break;
333 + if (*bytes == 4 || *bytes == 8) {
334 + r = kvm_vcpu_read_guest_atomic(vcpu, *gpa, &gentry, *bytes);
335 + if (r)
336 + gentry = 0;
337 }
338
339 return gentry;
340 @@ -4437,8 +4427,6 @@ static void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa,
341
342 pgprintk("%s: gpa %llx bytes %d\n", __func__, gpa, bytes);
343
344 - gentry = mmu_pte_write_fetch_gpte(vcpu, &gpa, new, &bytes);
345 -
346 /*
347 * No need to care whether allocation memory is successful
348 * or not since pte prefetch is skiped if it does not have
349 @@ -4447,6 +4435,9 @@ static void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa,
350 mmu_topup_memory_caches(vcpu);
351
352 spin_lock(&vcpu->kvm->mmu_lock);
353 +
354 + gentry = mmu_pte_write_fetch_gpte(vcpu, &gpa, &bytes);
355 +
356 ++vcpu->kvm->stat.mmu_pte_write;
357 kvm_mmu_audit(vcpu, AUDIT_PRE_PTE_WRITE);
358
359 diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
360 index 5f44d63a9d69..4bc35ac28d11 100644
361 --- a/arch/x86/kvm/svm.c
362 +++ b/arch/x86/kvm/svm.c
363 @@ -1672,21 +1672,31 @@ out:
364 return ERR_PTR(err);
365 }
366
367 +static void svm_clear_current_vmcb(struct vmcb *vmcb)
368 +{
369 + int i;
370 +
371 + for_each_online_cpu(i)
372 + cmpxchg(&per_cpu(svm_data, i)->current_vmcb, vmcb, NULL);
373 +}
374 +
375 static void svm_free_vcpu(struct kvm_vcpu *vcpu)
376 {
377 struct vcpu_svm *svm = to_svm(vcpu);
378
379 + /*
380 + * The vmcb page can be recycled, causing a false negative in
381 + * svm_vcpu_load(). So, ensure that no logical CPU has this
382 + * vmcb page recorded as its current vmcb.
383 + */
384 + svm_clear_current_vmcb(svm->vmcb);
385 +
386 __free_page(pfn_to_page(svm->vmcb_pa >> PAGE_SHIFT));
387 __free_pages(virt_to_page(svm->msrpm), MSRPM_ALLOC_ORDER);
388 __free_page(virt_to_page(svm->nested.hsave));
389 __free_pages(virt_to_page(svm->nested.msrpm), MSRPM_ALLOC_ORDER);
390 kvm_vcpu_uninit(vcpu);
391 kmem_cache_free(kvm_vcpu_cache, svm);
392 - /*
393 - * The vmcb page can be recycled, causing a false negative in
394 - * svm_vcpu_load(). So do a full IBPB now.
395 - */
396 - indirect_branch_prediction_barrier();
397 }
398
399 static void svm_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
400 diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
401 index 5013ef165f44..27d13b870e07 100644
402 --- a/arch/x86/kvm/x86.c
403 +++ b/arch/x86/kvm/x86.c
404 @@ -6661,7 +6661,8 @@ static void vcpu_scan_ioapic(struct kvm_vcpu *vcpu)
405 else {
406 if (vcpu->arch.apicv_active)
407 kvm_x86_ops->sync_pir_to_irr(vcpu);
408 - kvm_ioapic_scan_entry(vcpu, vcpu->arch.ioapic_handled_vectors);
409 + if (ioapic_in_kernel(vcpu->kvm))
410 + kvm_ioapic_scan_entry(vcpu, vcpu->arch.ioapic_handled_vectors);
411 }
412 bitmap_or((ulong *)eoi_exit_bitmap, vcpu->arch.ioapic_handled_vectors,
413 vcpu_to_synic(vcpu)->vec_bitmap, 256);
414 diff --git a/arch/xtensa/kernel/asm-offsets.c b/arch/xtensa/kernel/asm-offsets.c
415 index 8e10e357ee32..f1af06b8f3cd 100644
416 --- a/arch/xtensa/kernel/asm-offsets.c
417 +++ b/arch/xtensa/kernel/asm-offsets.c
418 @@ -91,14 +91,14 @@ int main(void)
419 DEFINE(THREAD_SP, offsetof (struct task_struct, thread.sp));
420 DEFINE(THREAD_CPENABLE, offsetof (struct thread_info, cpenable));
421 #if XTENSA_HAVE_COPROCESSORS
422 - DEFINE(THREAD_XTREGS_CP0, offsetof (struct thread_info, xtregs_cp));
423 - DEFINE(THREAD_XTREGS_CP1, offsetof (struct thread_info, xtregs_cp));
424 - DEFINE(THREAD_XTREGS_CP2, offsetof (struct thread_info, xtregs_cp));
425 - DEFINE(THREAD_XTREGS_CP3, offsetof (struct thread_info, xtregs_cp));
426 - DEFINE(THREAD_XTREGS_CP4, offsetof (struct thread_info, xtregs_cp));
427 - DEFINE(THREAD_XTREGS_CP5, offsetof (struct thread_info, xtregs_cp));
428 - DEFINE(THREAD_XTREGS_CP6, offsetof (struct thread_info, xtregs_cp));
429 - DEFINE(THREAD_XTREGS_CP7, offsetof (struct thread_info, xtregs_cp));
430 + DEFINE(THREAD_XTREGS_CP0, offsetof(struct thread_info, xtregs_cp.cp0));
431 + DEFINE(THREAD_XTREGS_CP1, offsetof(struct thread_info, xtregs_cp.cp1));
432 + DEFINE(THREAD_XTREGS_CP2, offsetof(struct thread_info, xtregs_cp.cp2));
433 + DEFINE(THREAD_XTREGS_CP3, offsetof(struct thread_info, xtregs_cp.cp3));
434 + DEFINE(THREAD_XTREGS_CP4, offsetof(struct thread_info, xtregs_cp.cp4));
435 + DEFINE(THREAD_XTREGS_CP5, offsetof(struct thread_info, xtregs_cp.cp5));
436 + DEFINE(THREAD_XTREGS_CP6, offsetof(struct thread_info, xtregs_cp.cp6));
437 + DEFINE(THREAD_XTREGS_CP7, offsetof(struct thread_info, xtregs_cp.cp7));
438 #endif
439 DEFINE(THREAD_XTREGS_USER, offsetof (struct thread_info, xtregs_user));
440 DEFINE(XTREGS_USER_SIZE, sizeof(xtregs_user_t));
441 diff --git a/arch/xtensa/kernel/process.c b/arch/xtensa/kernel/process.c
442 index e0ded48561db..570307c91846 100644
443 --- a/arch/xtensa/kernel/process.c
444 +++ b/arch/xtensa/kernel/process.c
445 @@ -85,18 +85,21 @@ void coprocessor_release_all(struct thread_info *ti)
446
447 void coprocessor_flush_all(struct thread_info *ti)
448 {
449 - unsigned long cpenable;
450 + unsigned long cpenable, old_cpenable;
451 int i;
452
453 preempt_disable();
454
455 + RSR_CPENABLE(old_cpenable);
456 cpenable = ti->cpenable;
457 + WSR_CPENABLE(cpenable);
458
459 for (i = 0; i < XCHAL_CP_MAX; i++) {
460 if ((cpenable & 1) != 0 && coprocessor_owner[i] == ti)
461 coprocessor_flush(ti, i);
462 cpenable >>= 1;
463 }
464 + WSR_CPENABLE(old_cpenable);
465
466 preempt_enable();
467 }
468 diff --git a/drivers/bus/arm-cci.c b/drivers/bus/arm-cci.c
469 index 10f56133b281..8e08cb4fd7df 100644
470 --- a/drivers/bus/arm-cci.c
471 +++ b/drivers/bus/arm-cci.c
472 @@ -2103,8 +2103,6 @@ asmlinkage void __naked cci_enable_port_for_self(void)
473 [sizeof_struct_cpu_port] "i" (sizeof(struct cpu_port)),
474 [sizeof_struct_ace_port] "i" (sizeof(struct cci_ace_port)),
475 [offsetof_port_phys] "i" (offsetof(struct cci_ace_port, phys)) );
476 -
477 - unreachable();
478 }
479
480 /**
481 diff --git a/drivers/dma/at_hdmac.c b/drivers/dma/at_hdmac.c
482 index e2cec5b357fd..a32cd71f94bb 100644
483 --- a/drivers/dma/at_hdmac.c
484 +++ b/drivers/dma/at_hdmac.c
485 @@ -1774,6 +1774,12 @@ static void atc_free_chan_resources(struct dma_chan *chan)
486 atchan->descs_allocated = 0;
487 atchan->status = 0;
488
489 + /*
490 + * Free atslave allocated in at_dma_xlate()
491 + */
492 + kfree(chan->private);
493 + chan->private = NULL;
494 +
495 dev_vdbg(chan2dev(chan), "free_chan_resources: done\n");
496 }
497
498 @@ -1808,7 +1814,7 @@ static struct dma_chan *at_dma_xlate(struct of_phandle_args *dma_spec,
499 dma_cap_zero(mask);
500 dma_cap_set(DMA_SLAVE, mask);
501
502 - atslave = devm_kzalloc(&dmac_pdev->dev, sizeof(*atslave), GFP_KERNEL);
503 + atslave = kzalloc(sizeof(*atslave), GFP_KERNEL);
504 if (!atslave)
505 return NULL;
506
507 @@ -2139,6 +2145,8 @@ static int at_dma_remove(struct platform_device *pdev)
508 struct resource *io;
509
510 at_dma_off(atdma);
511 + if (pdev->dev.of_node)
512 + of_dma_controller_free(pdev->dev.of_node);
513 dma_async_device_unregister(&atdma->dma_common);
514
515 dma_pool_destroy(atdma->memset_pool);
516 diff --git a/drivers/firmware/efi/libstub/Makefile b/drivers/firmware/efi/libstub/Makefile
517 index 2cd9496eb696..310f8feb5174 100644
518 --- a/drivers/firmware/efi/libstub/Makefile
519 +++ b/drivers/firmware/efi/libstub/Makefile
520 @@ -12,7 +12,8 @@ cflags-$(CONFIG_X86) += -m$(BITS) -D__KERNEL__ $(LINUX_INCLUDE) -O2 \
521
522 cflags-$(CONFIG_ARM64) := $(subst -pg,,$(KBUILD_CFLAGS)) -fpie
523 cflags-$(CONFIG_ARM) := $(subst -pg,,$(KBUILD_CFLAGS)) \
524 - -fno-builtin -fpic -mno-single-pic-base
525 + -fno-builtin -fpic \
526 + $(call cc-option,-mno-single-pic-base)
527
528 cflags-$(CONFIG_EFI_ARMSTUB) += -I$(srctree)/scripts/dtc/libfdt
529
530 diff --git a/drivers/firmware/efi/libstub/efi-stub-helper.c b/drivers/firmware/efi/libstub/efi-stub-helper.c
531 index aded10662020..09d10dcf1fc6 100644
532 --- a/drivers/firmware/efi/libstub/efi-stub-helper.c
533 +++ b/drivers/firmware/efi/libstub/efi-stub-helper.c
534 @@ -355,6 +355,14 @@ efi_status_t efi_parse_options(char *cmdline)
535 {
536 char *str;
537
538 + /*
539 + * Currently, the only efi= option we look for is 'nochunk', which
540 + * is intended to work around known issues on certain x86 UEFI
541 + * versions. So ignore for now on other architectures.
542 + */
543 + if (!IS_ENABLED(CONFIG_X86))
544 + return EFI_SUCCESS;
545 +
546 /*
547 * If no EFI parameters were specified on the cmdline we've got
548 * nothing to do.
549 @@ -528,7 +536,8 @@ efi_status_t handle_cmdline_files(efi_system_table_t *sys_table_arg,
550 size = files[j].size;
551 while (size) {
552 unsigned long chunksize;
553 - if (size > __chunk_size)
554 +
555 + if (IS_ENABLED(CONFIG_X86) && size > __chunk_size)
556 chunksize = __chunk_size;
557 else
558 chunksize = size;
559 diff --git a/drivers/hv/channel.c b/drivers/hv/channel.c
560 index 1606e7f08f4b..784c45484825 100644
561 --- a/drivers/hv/channel.c
562 +++ b/drivers/hv/channel.c
563 @@ -448,6 +448,14 @@ int vmbus_establish_gpadl(struct vmbus_channel *channel, void *kbuffer,
564 }
565 wait_for_completion(&msginfo->waitevent);
566
567 + if (msginfo->response.gpadl_created.creation_status != 0) {
568 + pr_err("Failed to establish GPADL: err = 0x%x\n",
569 + msginfo->response.gpadl_created.creation_status);
570 +
571 + ret = -EDQUOT;
572 + goto cleanup;
573 + }
574 +
575 if (channel->rescind) {
576 ret = -ENODEV;
577 goto cleanup;
578 diff --git a/drivers/iio/magnetometer/st_magn_buffer.c b/drivers/iio/magnetometer/st_magn_buffer.c
579 index 0a9e8fadfa9d..37ab30566464 100644
580 --- a/drivers/iio/magnetometer/st_magn_buffer.c
581 +++ b/drivers/iio/magnetometer/st_magn_buffer.c
582 @@ -30,11 +30,6 @@ int st_magn_trig_set_state(struct iio_trigger *trig, bool state)
583 return st_sensors_set_dataready_irq(indio_dev, state);
584 }
585
586 -static int st_magn_buffer_preenable(struct iio_dev *indio_dev)
587 -{
588 - return st_sensors_set_enable(indio_dev, true);
589 -}
590 -
591 static int st_magn_buffer_postenable(struct iio_dev *indio_dev)
592 {
593 int err;
594 @@ -50,7 +45,7 @@ static int st_magn_buffer_postenable(struct iio_dev *indio_dev)
595 if (err < 0)
596 goto st_magn_buffer_postenable_error;
597
598 - return err;
599 + return st_sensors_set_enable(indio_dev, true);
600
601 st_magn_buffer_postenable_error:
602 kfree(mdata->buffer_data);
603 @@ -63,11 +58,11 @@ static int st_magn_buffer_predisable(struct iio_dev *indio_dev)
604 int err;
605 struct st_sensor_data *mdata = iio_priv(indio_dev);
606
607 - err = iio_triggered_buffer_predisable(indio_dev);
608 + err = st_sensors_set_enable(indio_dev, false);
609 if (err < 0)
610 goto st_magn_buffer_predisable_error;
611
612 - err = st_sensors_set_enable(indio_dev, false);
613 + err = iio_triggered_buffer_predisable(indio_dev);
614
615 st_magn_buffer_predisable_error:
616 kfree(mdata->buffer_data);
617 @@ -75,7 +70,6 @@ st_magn_buffer_predisable_error:
618 }
619
620 static const struct iio_buffer_setup_ops st_magn_buffer_setup_ops = {
621 - .preenable = &st_magn_buffer_preenable,
622 .postenable = &st_magn_buffer_postenable,
623 .predisable = &st_magn_buffer_predisable,
624 };
625 diff --git a/drivers/media/usb/em28xx/em28xx-dvb.c b/drivers/media/usb/em28xx/em28xx-dvb.c
626 index 8cedef0daae4..b0aea48907b7 100644
627 --- a/drivers/media/usb/em28xx/em28xx-dvb.c
628 +++ b/drivers/media/usb/em28xx/em28xx-dvb.c
629 @@ -2016,6 +2016,8 @@ static int em28xx_dvb_fini(struct em28xx *dev)
630 }
631 }
632
633 + em28xx_unregister_dvb(dvb);
634 +
635 /* remove I2C SEC */
636 client = dvb->i2c_client_sec;
637 if (client) {
638 @@ -2037,7 +2039,6 @@ static int em28xx_dvb_fini(struct em28xx *dev)
639 i2c_unregister_device(client);
640 }
641
642 - em28xx_unregister_dvb(dvb);
643 kfree(dvb);
644 dev->dvb = NULL;
645 kref_put(&dev->ref, em28xx_free_device);
646 diff --git a/drivers/misc/mic/scif/scif_rma.c b/drivers/misc/mic/scif/scif_rma.c
647 index f806a4471eb9..32ab0f43f506 100644
648 --- a/drivers/misc/mic/scif/scif_rma.c
649 +++ b/drivers/misc/mic/scif/scif_rma.c
650 @@ -414,7 +414,7 @@ static int scif_create_remote_lookup(struct scif_dev *remote_dev,
651 if (err)
652 goto error_window;
653 err = scif_map_page(&window->num_pages_lookup.lookup[j],
654 - vmalloc_dma_phys ?
655 + vmalloc_num_pages ?
656 vmalloc_to_page(&window->num_pages[i]) :
657 virt_to_page(&window->num_pages[i]),
658 remote_dev);
659 diff --git a/drivers/net/rionet.c b/drivers/net/rionet.c
660 index a31f4610b493..2c2604e3f633 100644
661 --- a/drivers/net/rionet.c
662 +++ b/drivers/net/rionet.c
663 @@ -216,9 +216,9 @@ static int rionet_start_xmit(struct sk_buff *skb, struct net_device *ndev)
664 * it just report sending a packet to the target
665 * (without actual packet transfer).
666 */
667 - dev_kfree_skb_any(skb);
668 ndev->stats.tx_packets++;
669 ndev->stats.tx_bytes += skb->len;
670 + dev_kfree_skb_any(skb);
671 }
672 }
673
674 diff --git a/drivers/net/usb/ipheth.c b/drivers/net/usb/ipheth.c
675 index 76465b117b72..f1f8227e7342 100644
676 --- a/drivers/net/usb/ipheth.c
677 +++ b/drivers/net/usb/ipheth.c
678 @@ -140,7 +140,6 @@ struct ipheth_device {
679 struct usb_device *udev;
680 struct usb_interface *intf;
681 struct net_device *net;
682 - struct sk_buff *tx_skb;
683 struct urb *tx_urb;
684 struct urb *rx_urb;
685 unsigned char *tx_buf;
686 @@ -229,6 +228,7 @@ static void ipheth_rcvbulk_callback(struct urb *urb)
687 case -ENOENT:
688 case -ECONNRESET:
689 case -ESHUTDOWN:
690 + case -EPROTO:
691 return;
692 case 0:
693 break;
694 @@ -280,7 +280,6 @@ static void ipheth_sndbulk_callback(struct urb *urb)
695 dev_err(&dev->intf->dev, "%s: urb status: %d\n",
696 __func__, status);
697
698 - dev_kfree_skb_irq(dev->tx_skb);
699 netif_wake_queue(dev->net);
700 }
701
702 @@ -410,7 +409,7 @@ static int ipheth_tx(struct sk_buff *skb, struct net_device *net)
703 if (skb->len > IPHETH_BUF_SIZE) {
704 WARN(1, "%s: skb too large: %d bytes\n", __func__, skb->len);
705 dev->net->stats.tx_dropped++;
706 - dev_kfree_skb_irq(skb);
707 + dev_kfree_skb_any(skb);
708 return NETDEV_TX_OK;
709 }
710
711 @@ -430,12 +429,11 @@ static int ipheth_tx(struct sk_buff *skb, struct net_device *net)
712 dev_err(&dev->intf->dev, "%s: usb_submit_urb: %d\n",
713 __func__, retval);
714 dev->net->stats.tx_errors++;
715 - dev_kfree_skb_irq(skb);
716 + dev_kfree_skb_any(skb);
717 } else {
718 - dev->tx_skb = skb;
719 -
720 dev->net->stats.tx_packets++;
721 dev->net->stats.tx_bytes += skb->len;
722 + dev_consume_skb_any(skb);
723 netif_stop_queue(net);
724 }
725
726 diff --git a/drivers/net/wireless/ti/wlcore/cmd.c b/drivers/net/wireless/ti/wlcore/cmd.c
727 index 96f83f09b8c5..7f4da727bb7b 100644
728 --- a/drivers/net/wireless/ti/wlcore/cmd.c
729 +++ b/drivers/net/wireless/ti/wlcore/cmd.c
730 @@ -35,7 +35,6 @@
731 #include "wl12xx_80211.h"
732 #include "cmd.h"
733 #include "event.h"
734 -#include "ps.h"
735 #include "tx.h"
736 #include "hw_ops.h"
737
738 @@ -192,10 +191,6 @@ int wlcore_cmd_wait_for_event_or_timeout(struct wl1271 *wl,
739
740 timeout_time = jiffies + msecs_to_jiffies(WL1271_EVENT_TIMEOUT);
741
742 - ret = wl1271_ps_elp_wakeup(wl);
743 - if (ret < 0)
744 - return ret;
745 -
746 do {
747 if (time_after(jiffies, timeout_time)) {
748 wl1271_debug(DEBUG_CMD, "timeout waiting for event %d",
749 @@ -227,7 +222,6 @@ int wlcore_cmd_wait_for_event_or_timeout(struct wl1271 *wl,
750 } while (!event);
751
752 out:
753 - wl1271_ps_elp_sleep(wl);
754 kfree(events_vector);
755 return ret;
756 }
757 diff --git a/drivers/s390/net/qeth_core_main.c b/drivers/s390/net/qeth_core_main.c
758 index a5e603062ee0..8f77fc0630ce 100644
759 --- a/drivers/s390/net/qeth_core_main.c
760 +++ b/drivers/s390/net/qeth_core_main.c
761 @@ -4540,8 +4540,8 @@ static int qeth_snmp_command_cb(struct qeth_card *card,
762 {
763 struct qeth_ipa_cmd *cmd;
764 struct qeth_arp_query_info *qinfo;
765 - struct qeth_snmp_cmd *snmp;
766 unsigned char *data;
767 + void *snmp_data;
768 __u16 data_len;
769
770 QETH_CARD_TEXT(card, 3, "snpcmdcb");
771 @@ -4549,7 +4549,6 @@ static int qeth_snmp_command_cb(struct qeth_card *card,
772 cmd = (struct qeth_ipa_cmd *) sdata;
773 data = (unsigned char *)((char *)cmd - reply->offset);
774 qinfo = (struct qeth_arp_query_info *) reply->param;
775 - snmp = &cmd->data.setadapterparms.data.snmp;
776
777 if (cmd->hdr.return_code) {
778 QETH_CARD_TEXT_(card, 4, "scer1%x", cmd->hdr.return_code);
779 @@ -4562,10 +4561,15 @@ static int qeth_snmp_command_cb(struct qeth_card *card,
780 return 0;
781 }
782 data_len = *((__u16 *)QETH_IPA_PDU_LEN_PDU1(data));
783 - if (cmd->data.setadapterparms.hdr.seq_no == 1)
784 - data_len -= (__u16)((char *)&snmp->data - (char *)cmd);
785 - else
786 - data_len -= (__u16)((char *)&snmp->request - (char *)cmd);
787 + if (cmd->data.setadapterparms.hdr.seq_no == 1) {
788 + snmp_data = &cmd->data.setadapterparms.data.snmp;
789 + data_len -= offsetof(struct qeth_ipa_cmd,
790 + data.setadapterparms.data.snmp);
791 + } else {
792 + snmp_data = &cmd->data.setadapterparms.data.snmp.request;
793 + data_len -= offsetof(struct qeth_ipa_cmd,
794 + data.setadapterparms.data.snmp.request);
795 + }
796
797 /* check if there is enough room in userspace */
798 if ((qinfo->udata_len - qinfo->udata_offset) < data_len) {
799 @@ -4578,16 +4582,9 @@ static int qeth_snmp_command_cb(struct qeth_card *card,
800 QETH_CARD_TEXT_(card, 4, "sseqn%i",
801 cmd->data.setadapterparms.hdr.seq_no);
802 /*copy entries to user buffer*/
803 - if (cmd->data.setadapterparms.hdr.seq_no == 1) {
804 - memcpy(qinfo->udata + qinfo->udata_offset,
805 - (char *)snmp,
806 - data_len + offsetof(struct qeth_snmp_cmd, data));
807 - qinfo->udata_offset += offsetof(struct qeth_snmp_cmd, data);
808 - } else {
809 - memcpy(qinfo->udata + qinfo->udata_offset,
810 - (char *)&snmp->request, data_len);
811 - }
812 + memcpy(qinfo->udata + qinfo->udata_offset, snmp_data, data_len);
813 qinfo->udata_offset += data_len;
814 +
815 /* check if all replies received ... */
816 QETH_CARD_TEXT_(card, 4, "srtot%i",
817 cmd->data.setadapterparms.hdr.used_total);
818 diff --git a/drivers/usb/core/quirks.c b/drivers/usb/core/quirks.c
819 index 1e8f68960014..808437c5ec49 100644
820 --- a/drivers/usb/core/quirks.c
821 +++ b/drivers/usb/core/quirks.c
822 @@ -64,6 +64,9 @@ static const struct usb_device_id usb_quirk_list[] = {
823 /* Microsoft LifeCam-VX700 v2.0 */
824 { USB_DEVICE(0x045e, 0x0770), .driver_info = USB_QUIRK_RESET_RESUME },
825
826 + /* Cherry Stream G230 2.0 (G85-231) and 3.0 (G85-232) */
827 + { USB_DEVICE(0x046a, 0x0023), .driver_info = USB_QUIRK_RESET_RESUME },
828 +
829 /* Logitech HD Pro Webcams C920, C920-C, C925e and C930e */
830 { USB_DEVICE(0x046d, 0x082d), .driver_info = USB_QUIRK_DELAY_INIT },
831 { USB_DEVICE(0x046d, 0x0841), .driver_info = USB_QUIRK_DELAY_INIT },
832 diff --git a/drivers/usb/dwc3/gadget.c b/drivers/usb/dwc3/gadget.c
833 index 26efe8c7535f..ed6b9bfe3759 100644
834 --- a/drivers/usb/dwc3/gadget.c
835 +++ b/drivers/usb/dwc3/gadget.c
836 @@ -1280,9 +1280,6 @@ int __dwc3_gadget_ep_set_halt(struct dwc3_ep *dep, int value, int protocol)
837 unsigned transfer_in_flight;
838 unsigned started;
839
840 - if (dep->flags & DWC3_EP_STALL)
841 - return 0;
842 -
843 if (dep->number > 1)
844 trb = dwc3_ep_prev_trb(dep, dep->trb_enqueue);
845 else
846 @@ -1307,8 +1304,6 @@ int __dwc3_gadget_ep_set_halt(struct dwc3_ep *dep, int value, int protocol)
847 else
848 dep->flags |= DWC3_EP_STALL;
849 } else {
850 - if (!(dep->flags & DWC3_EP_STALL))
851 - return 0;
852
853 ret = dwc3_send_clear_stall_ep_cmd(dep);
854 if (ret)
855 diff --git a/drivers/usb/storage/unusual_realtek.h b/drivers/usb/storage/unusual_realtek.h
856 index 8fe624ad302a..7ca779493671 100644
857 --- a/drivers/usb/storage/unusual_realtek.h
858 +++ b/drivers/usb/storage/unusual_realtek.h
859 @@ -39,4 +39,14 @@ UNUSUAL_DEV(0x0bda, 0x0159, 0x0000, 0x9999,
860 "USB Card Reader",
861 USB_SC_DEVICE, USB_PR_DEVICE, init_realtek_cr, 0),
862
863 +UNUSUAL_DEV(0x0bda, 0x0177, 0x0000, 0x9999,
864 + "Realtek",
865 + "USB Card Reader",
866 + USB_SC_DEVICE, USB_PR_DEVICE, init_realtek_cr, 0),
867 +
868 +UNUSUAL_DEV(0x0bda, 0x0184, 0x0000, 0x9999,
869 + "Realtek",
870 + "USB Card Reader",
871 + USB_SC_DEVICE, USB_PR_DEVICE, init_realtek_cr, 0),
872 +
873 #endif /* defined(CONFIG_USB_STORAGE_REALTEK) || ... */
874 diff --git a/fs/btrfs/super.c b/fs/btrfs/super.c
875 index f6e111984ce2..a7b69deb6d70 100644
876 --- a/fs/btrfs/super.c
877 +++ b/fs/btrfs/super.c
878 @@ -2226,6 +2226,7 @@ static long btrfs_control_ioctl(struct file *file, unsigned int cmd,
879 vol = memdup_user((void __user *)arg, sizeof(*vol));
880 if (IS_ERR(vol))
881 return PTR_ERR(vol);
882 + vol->name[BTRFS_PATH_NAME_MAX] = '\0';
883
884 switch (cmd) {
885 case BTRFS_IOC_SCAN_DEV:
886 diff --git a/fs/btrfs/transaction.c b/fs/btrfs/transaction.c
887 index 9517de0e668c..fd6c74662e9a 100644
888 --- a/fs/btrfs/transaction.c
889 +++ b/fs/btrfs/transaction.c
890 @@ -1924,6 +1924,9 @@ int btrfs_commit_transaction(struct btrfs_trans_handle *trans,
891 return ret;
892 }
893
894 + btrfs_trans_release_metadata(trans, root);
895 + trans->block_rsv = NULL;
896 +
897 /* make a pass through all the delayed refs we have so far
898 * any runnings procs may add more while we are here
899 */
900 @@ -1933,9 +1936,6 @@ int btrfs_commit_transaction(struct btrfs_trans_handle *trans,
901 return ret;
902 }
903
904 - btrfs_trans_release_metadata(trans, root);
905 - trans->block_rsv = NULL;
906 -
907 cur_trans = trans->transaction;
908
909 /*
910 diff --git a/fs/direct-io.c b/fs/direct-io.c
911 index c6220a2daefd..07cc38ec66ca 100644
912 --- a/fs/direct-io.c
913 +++ b/fs/direct-io.c
914 @@ -278,8 +278,8 @@ static ssize_t dio_complete(struct dio *dio, ssize_t ret, bool is_async)
915 */
916 dio->iocb->ki_pos += transferred;
917
918 - if (dio->op == REQ_OP_WRITE)
919 - ret = generic_write_sync(dio->iocb, transferred);
920 + if (ret > 0 && dio->op == REQ_OP_WRITE)
921 + ret = generic_write_sync(dio->iocb, ret);
922 dio->iocb->ki_complete(dio->iocb, ret, 0);
923 }
924
925 diff --git a/fs/ext2/xattr.c b/fs/ext2/xattr.c
926 index fbdb8f171893..babef30d440b 100644
927 --- a/fs/ext2/xattr.c
928 +++ b/fs/ext2/xattr.c
929 @@ -609,9 +609,9 @@ skip_replace:
930 }
931
932 cleanup:
933 - brelse(bh);
934 if (!(bh && header == HDR(bh)))
935 kfree(header);
936 + brelse(bh);
937 up_write(&EXT2_I(inode)->xattr_sem);
938
939 return error;
940 diff --git a/include/linux/workqueue.h b/include/linux/workqueue.h
941 index 1def337b16d4..8e880f7f67b2 100644
942 --- a/include/linux/workqueue.h
943 +++ b/include/linux/workqueue.h
944 @@ -106,9 +106,9 @@ struct work_struct {
945 #endif
946 };
947
948 -#define WORK_DATA_INIT() ATOMIC_LONG_INIT(WORK_STRUCT_NO_POOL)
949 +#define WORK_DATA_INIT() ATOMIC_LONG_INIT((unsigned long)WORK_STRUCT_NO_POOL)
950 #define WORK_DATA_STATIC_INIT() \
951 - ATOMIC_LONG_INIT(WORK_STRUCT_NO_POOL | WORK_STRUCT_STATIC)
952 + ATOMIC_LONG_INIT((unsigned long)(WORK_STRUCT_NO_POOL | WORK_STRUCT_STATIC))
953
954 struct delayed_work {
955 struct work_struct work;
956 diff --git a/mm/huge_memory.c b/mm/huge_memory.c
957 index 9f7bba700e4e..7ea8da990b9d 100644
958 --- a/mm/huge_memory.c
959 +++ b/mm/huge_memory.c
960 @@ -1839,7 +1839,7 @@ void vma_adjust_trans_huge(struct vm_area_struct *vma,
961 }
962 }
963
964 -static void freeze_page(struct page *page)
965 +static void unmap_page(struct page *page)
966 {
967 enum ttu_flags ttu_flags = TTU_IGNORE_MLOCK | TTU_IGNORE_ACCESS |
968 TTU_RMAP_LOCKED;
969 @@ -1862,7 +1862,7 @@ static void freeze_page(struct page *page)
970 VM_BUG_ON_PAGE(ret, page + i - 1);
971 }
972
973 -static void unfreeze_page(struct page *page)
974 +static void remap_page(struct page *page)
975 {
976 int i;
977
978 @@ -1876,26 +1876,13 @@ static void __split_huge_page_tail(struct page *head, int tail,
979 struct page *page_tail = head + tail;
980
981 VM_BUG_ON_PAGE(atomic_read(&page_tail->_mapcount) != -1, page_tail);
982 - VM_BUG_ON_PAGE(page_ref_count(page_tail) != 0, page_tail);
983
984 /*
985 - * tail_page->_refcount is zero and not changing from under us. But
986 - * get_page_unless_zero() may be running from under us on the
987 - * tail_page. If we used atomic_set() below instead of atomic_inc() or
988 - * atomic_add(), we would then run atomic_set() concurrently with
989 - * get_page_unless_zero(), and atomic_set() is implemented in C not
990 - * using locked ops. spin_unlock on x86 sometime uses locked ops
991 - * because of PPro errata 66, 92, so unless somebody can guarantee
992 - * atomic_set() here would be safe on all archs (and not only on x86),
993 - * it's safer to use atomic_inc()/atomic_add().
994 + * Clone page flags before unfreezing refcount.
995 + *
996 + * After successful get_page_unless_zero() might follow flags change,
997 + * for exmaple lock_page() which set PG_waiters.
998 */
999 - if (PageAnon(head)) {
1000 - page_ref_inc(page_tail);
1001 - } else {
1002 - /* Additional pin to radix tree */
1003 - page_ref_add(page_tail, 2);
1004 - }
1005 -
1006 page_tail->flags &= ~PAGE_FLAGS_CHECK_AT_PREP;
1007 page_tail->flags |= (head->flags &
1008 ((1L << PG_referenced) |
1009 @@ -1907,36 +1894,42 @@ static void __split_huge_page_tail(struct page *head, int tail,
1010 (1L << PG_unevictable) |
1011 (1L << PG_dirty)));
1012
1013 - /*
1014 - * After clearing PageTail the gup refcount can be released.
1015 - * Page flags also must be visible before we make the page non-compound.
1016 - */
1017 + /* ->mapping in first tail page is compound_mapcount */
1018 + VM_BUG_ON_PAGE(tail > 2 && page_tail->mapping != TAIL_MAPPING,
1019 + page_tail);
1020 + page_tail->mapping = head->mapping;
1021 + page_tail->index = head->index + tail;
1022 +
1023 + /* Page flags must be visible before we make the page non-compound. */
1024 smp_wmb();
1025
1026 + /*
1027 + * Clear PageTail before unfreezing page refcount.
1028 + *
1029 + * After successful get_page_unless_zero() might follow put_page()
1030 + * which needs correct compound_head().
1031 + */
1032 clear_compound_head(page_tail);
1033
1034 + /* Finally unfreeze refcount. Additional reference from page cache. */
1035 + page_ref_unfreeze(page_tail, 1 + (!PageAnon(head) ||
1036 + PageSwapCache(head)));
1037 +
1038 if (page_is_young(head))
1039 set_page_young(page_tail);
1040 if (page_is_idle(head))
1041 set_page_idle(page_tail);
1042
1043 - /* ->mapping in first tail page is compound_mapcount */
1044 - VM_BUG_ON_PAGE(tail > 2 && page_tail->mapping != TAIL_MAPPING,
1045 - page_tail);
1046 - page_tail->mapping = head->mapping;
1047 -
1048 - page_tail->index = head->index + tail;
1049 page_cpupid_xchg_last(page_tail, page_cpupid_last(head));
1050 lru_add_page_tail(head, page_tail, lruvec, list);
1051 }
1052
1053 static void __split_huge_page(struct page *page, struct list_head *list,
1054 - unsigned long flags)
1055 + pgoff_t end, unsigned long flags)
1056 {
1057 struct page *head = compound_head(page);
1058 struct zone *zone = page_zone(head);
1059 struct lruvec *lruvec;
1060 - pgoff_t end = -1;
1061 int i;
1062
1063 lruvec = mem_cgroup_page_lruvec(head, zone->zone_pgdat);
1064 @@ -1944,9 +1937,6 @@ static void __split_huge_page(struct page *page, struct list_head *list,
1065 /* complete memcg works before add pages to LRU */
1066 mem_cgroup_split_huge_fixup(head);
1067
1068 - if (!PageAnon(page))
1069 - end = DIV_ROUND_UP(i_size_read(head->mapping->host), PAGE_SIZE);
1070 -
1071 for (i = HPAGE_PMD_NR - 1; i >= 1; i--) {
1072 __split_huge_page_tail(head, i, lruvec, list);
1073 /* Some pages can be beyond i_size: drop them from page cache */
1074 @@ -1971,7 +1961,7 @@ static void __split_huge_page(struct page *page, struct list_head *list,
1075
1076 spin_unlock_irqrestore(zone_lru_lock(page_zone(head)), flags);
1077
1078 - unfreeze_page(head);
1079 + remap_page(head);
1080
1081 for (i = 0; i < HPAGE_PMD_NR; i++) {
1082 struct page *subpage = head + i;
1083 @@ -2099,6 +2089,7 @@ int split_huge_page_to_list(struct page *page, struct list_head *list)
1084 int count, mapcount, extra_pins, ret;
1085 bool mlocked;
1086 unsigned long flags;
1087 + pgoff_t end;
1088
1089 VM_BUG_ON_PAGE(is_huge_zero_page(page), page);
1090 VM_BUG_ON_PAGE(!PageLocked(page), page);
1091 @@ -2120,6 +2111,7 @@ int split_huge_page_to_list(struct page *page, struct list_head *list)
1092 goto out;
1093 }
1094 extra_pins = 0;
1095 + end = -1;
1096 mapping = NULL;
1097 anon_vma_lock_write(anon_vma);
1098 } else {
1099 @@ -2135,10 +2127,19 @@ int split_huge_page_to_list(struct page *page, struct list_head *list)
1100 extra_pins = HPAGE_PMD_NR;
1101 anon_vma = NULL;
1102 i_mmap_lock_read(mapping);
1103 +
1104 + /*
1105 + *__split_huge_page() may need to trim off pages beyond EOF:
1106 + * but on 32-bit, i_size_read() takes an irq-unsafe seqlock,
1107 + * which cannot be nested inside the page tree lock. So note
1108 + * end now: i_size itself may be changed at any moment, but
1109 + * head page lock is good enough to serialize the trimming.
1110 + */
1111 + end = DIV_ROUND_UP(i_size_read(mapping->host), PAGE_SIZE);
1112 }
1113
1114 /*
1115 - * Racy check if we can split the page, before freeze_page() will
1116 + * Racy check if we can split the page, before unmap_page() will
1117 * split PMDs
1118 */
1119 if (total_mapcount(head) != page_count(head) - extra_pins - 1) {
1120 @@ -2147,7 +2148,7 @@ int split_huge_page_to_list(struct page *page, struct list_head *list)
1121 }
1122
1123 mlocked = PageMlocked(page);
1124 - freeze_page(head);
1125 + unmap_page(head);
1126 VM_BUG_ON_PAGE(compound_mapcount(head), head);
1127
1128 /* Make sure the page is not on per-CPU pagevec as it takes pin */
1129 @@ -2184,7 +2185,7 @@ int split_huge_page_to_list(struct page *page, struct list_head *list)
1130 if (mapping)
1131 __dec_node_page_state(page, NR_SHMEM_THPS);
1132 spin_unlock(&pgdata->split_queue_lock);
1133 - __split_huge_page(page, list, flags);
1134 + __split_huge_page(page, list, end, flags);
1135 ret = 0;
1136 } else {
1137 if (IS_ENABLED(CONFIG_DEBUG_VM) && mapcount) {
1138 @@ -2199,7 +2200,7 @@ int split_huge_page_to_list(struct page *page, struct list_head *list)
1139 fail: if (mapping)
1140 spin_unlock(&mapping->tree_lock);
1141 spin_unlock_irqrestore(zone_lru_lock(page_zone(head)), flags);
1142 - unfreeze_page(head);
1143 + remap_page(head);
1144 ret = -EBUSY;
1145 }
1146
1147 diff --git a/mm/khugepaged.c b/mm/khugepaged.c
1148 index 1df37ee996d5..e0cfc3a54b6a 100644
1149 --- a/mm/khugepaged.c
1150 +++ b/mm/khugepaged.c
1151 @@ -1286,7 +1286,7 @@ static void retract_page_tables(struct address_space *mapping, pgoff_t pgoff)
1152 * collapse_shmem - collapse small tmpfs/shmem pages into huge one.
1153 *
1154 * Basic scheme is simple, details are more complex:
1155 - * - allocate and freeze a new huge page;
1156 + * - allocate and lock a new huge page;
1157 * - scan over radix tree replacing old pages the new one
1158 * + swap in pages if necessary;
1159 * + fill in gaps;
1160 @@ -1294,11 +1294,11 @@ static void retract_page_tables(struct address_space *mapping, pgoff_t pgoff)
1161 * - if replacing succeed:
1162 * + copy data over;
1163 * + free old pages;
1164 - * + unfreeze huge page;
1165 + * + unlock huge page;
1166 * - if replacing failed;
1167 * + put all pages back and unfreeze them;
1168 * + restore gaps in the radix-tree;
1169 - * + free huge page;
1170 + * + unlock and free huge page;
1171 */
1172 static void collapse_shmem(struct mm_struct *mm,
1173 struct address_space *mapping, pgoff_t start,
1174 @@ -1332,18 +1332,15 @@ static void collapse_shmem(struct mm_struct *mm,
1175 goto out;
1176 }
1177
1178 + __SetPageLocked(new_page);
1179 + __SetPageSwapBacked(new_page);
1180 new_page->index = start;
1181 new_page->mapping = mapping;
1182 - __SetPageSwapBacked(new_page);
1183 - __SetPageLocked(new_page);
1184 - BUG_ON(!page_ref_freeze(new_page, 1));
1185 -
1186
1187 /*
1188 - * At this point the new_page is 'frozen' (page_count() is zero), locked
1189 - * and not up-to-date. It's safe to insert it into radix tree, because
1190 - * nobody would be able to map it or use it in other way until we
1191 - * unfreeze it.
1192 + * At this point the new_page is locked and not up-to-date.
1193 + * It's safe to insert it into the page cache, because nobody would
1194 + * be able to map it or use it in another way until we unlock it.
1195 */
1196
1197 index = start;
1198 @@ -1351,19 +1348,29 @@ static void collapse_shmem(struct mm_struct *mm,
1199 radix_tree_for_each_slot(slot, &mapping->page_tree, &iter, start) {
1200 int n = min(iter.index, end) - index;
1201
1202 + /*
1203 + * Stop if extent has been hole-punched, and is now completely
1204 + * empty (the more obvious i_size_read() check would take an
1205 + * irq-unsafe seqlock on 32-bit).
1206 + */
1207 + if (n >= HPAGE_PMD_NR) {
1208 + result = SCAN_TRUNCATED;
1209 + goto tree_locked;
1210 + }
1211 +
1212 /*
1213 * Handle holes in the radix tree: charge it from shmem and
1214 * insert relevant subpage of new_page into the radix-tree.
1215 */
1216 if (n && !shmem_charge(mapping->host, n)) {
1217 result = SCAN_FAIL;
1218 - break;
1219 + goto tree_locked;
1220 }
1221 - nr_none += n;
1222 for (; index < min(iter.index, end); index++) {
1223 radix_tree_insert(&mapping->page_tree, index,
1224 new_page + (index % HPAGE_PMD_NR));
1225 }
1226 + nr_none += n;
1227
1228 /* We are done. */
1229 if (index >= end)
1230 @@ -1379,12 +1386,12 @@ static void collapse_shmem(struct mm_struct *mm,
1231 result = SCAN_FAIL;
1232 goto tree_unlocked;
1233 }
1234 - spin_lock_irq(&mapping->tree_lock);
1235 } else if (trylock_page(page)) {
1236 get_page(page);
1237 + spin_unlock_irq(&mapping->tree_lock);
1238 } else {
1239 result = SCAN_PAGE_LOCK;
1240 - break;
1241 + goto tree_locked;
1242 }
1243
1244 /*
1245 @@ -1393,17 +1400,24 @@ static void collapse_shmem(struct mm_struct *mm,
1246 */
1247 VM_BUG_ON_PAGE(!PageLocked(page), page);
1248 VM_BUG_ON_PAGE(!PageUptodate(page), page);
1249 - VM_BUG_ON_PAGE(PageTransCompound(page), page);
1250 +
1251 + /*
1252 + * If file was truncated then extended, or hole-punched, before
1253 + * we locked the first page, then a THP might be there already.
1254 + */
1255 + if (PageTransCompound(page)) {
1256 + result = SCAN_PAGE_COMPOUND;
1257 + goto out_unlock;
1258 + }
1259
1260 if (page_mapping(page) != mapping) {
1261 result = SCAN_TRUNCATED;
1262 goto out_unlock;
1263 }
1264 - spin_unlock_irq(&mapping->tree_lock);
1265
1266 if (isolate_lru_page(page)) {
1267 result = SCAN_DEL_PAGE_LRU;
1268 - goto out_isolate_failed;
1269 + goto out_unlock;
1270 }
1271
1272 if (page_mapped(page))
1273 @@ -1425,7 +1439,9 @@ static void collapse_shmem(struct mm_struct *mm,
1274 */
1275 if (!page_ref_freeze(page, 3)) {
1276 result = SCAN_PAGE_COUNT;
1277 - goto out_lru;
1278 + spin_unlock_irq(&mapping->tree_lock);
1279 + putback_lru_page(page);
1280 + goto out_unlock;
1281 }
1282
1283 /*
1284 @@ -1441,17 +1457,10 @@ static void collapse_shmem(struct mm_struct *mm,
1285 slot = radix_tree_iter_next(&iter);
1286 index++;
1287 continue;
1288 -out_lru:
1289 - spin_unlock_irq(&mapping->tree_lock);
1290 - putback_lru_page(page);
1291 -out_isolate_failed:
1292 - unlock_page(page);
1293 - put_page(page);
1294 - goto tree_unlocked;
1295 out_unlock:
1296 unlock_page(page);
1297 put_page(page);
1298 - break;
1299 + goto tree_unlocked;
1300 }
1301
1302 /*
1303 @@ -1459,14 +1468,18 @@ out_unlock:
1304 * This code only triggers if there's nothing in radix tree
1305 * beyond 'end'.
1306 */
1307 - if (result == SCAN_SUCCEED && index < end) {
1308 + if (index < end) {
1309 int n = end - index;
1310
1311 + /* Stop if extent has been truncated, and is now empty */
1312 + if (n >= HPAGE_PMD_NR) {
1313 + result = SCAN_TRUNCATED;
1314 + goto tree_locked;
1315 + }
1316 if (!shmem_charge(mapping->host, n)) {
1317 result = SCAN_FAIL;
1318 goto tree_locked;
1319 }
1320 -
1321 for (; index < end; index++) {
1322 radix_tree_insert(&mapping->page_tree, index,
1323 new_page + (index % HPAGE_PMD_NR));
1324 @@ -1474,57 +1487,62 @@ out_unlock:
1325 nr_none += n;
1326 }
1327
1328 + __inc_node_page_state(new_page, NR_SHMEM_THPS);
1329 + if (nr_none) {
1330 + struct zone *zone = page_zone(new_page);
1331 +
1332 + __mod_node_page_state(zone->zone_pgdat, NR_FILE_PAGES, nr_none);
1333 + __mod_node_page_state(zone->zone_pgdat, NR_SHMEM, nr_none);
1334 + }
1335 +
1336 tree_locked:
1337 spin_unlock_irq(&mapping->tree_lock);
1338 tree_unlocked:
1339
1340 if (result == SCAN_SUCCEED) {
1341 - unsigned long flags;
1342 - struct zone *zone = page_zone(new_page);
1343 -
1344 /*
1345 * Replacing old pages with new one has succeed, now we need to
1346 * copy the content and free old pages.
1347 */
1348 + index = start;
1349 list_for_each_entry_safe(page, tmp, &pagelist, lru) {
1350 + while (index < page->index) {
1351 + clear_highpage(new_page + (index % HPAGE_PMD_NR));
1352 + index++;
1353 + }
1354 copy_highpage(new_page + (page->index % HPAGE_PMD_NR),
1355 page);
1356 list_del(&page->lru);
1357 - unlock_page(page);
1358 - page_ref_unfreeze(page, 1);
1359 page->mapping = NULL;
1360 + page_ref_unfreeze(page, 1);
1361 ClearPageActive(page);
1362 ClearPageUnevictable(page);
1363 + unlock_page(page);
1364 put_page(page);
1365 + index++;
1366 }
1367 -
1368 - local_irq_save(flags);
1369 - __inc_node_page_state(new_page, NR_SHMEM_THPS);
1370 - if (nr_none) {
1371 - __mod_node_page_state(zone->zone_pgdat, NR_FILE_PAGES, nr_none);
1372 - __mod_node_page_state(zone->zone_pgdat, NR_SHMEM, nr_none);
1373 + while (index < end) {
1374 + clear_highpage(new_page + (index % HPAGE_PMD_NR));
1375 + index++;
1376 }
1377 - local_irq_restore(flags);
1378
1379 - /*
1380 - * Remove pte page tables, so we can re-faulti
1381 - * the page as huge.
1382 - */
1383 - retract_page_tables(mapping, start);
1384 -
1385 - /* Everything is ready, let's unfreeze the new_page */
1386 - set_page_dirty(new_page);
1387 SetPageUptodate(new_page);
1388 - page_ref_unfreeze(new_page, HPAGE_PMD_NR);
1389 + page_ref_add(new_page, HPAGE_PMD_NR - 1);
1390 + set_page_dirty(new_page);
1391 mem_cgroup_commit_charge(new_page, memcg, false, true);
1392 lru_cache_add_anon(new_page);
1393 - unlock_page(new_page);
1394
1395 + /*
1396 + * Remove pte page tables, so we can re-fault the page as huge.
1397 + */
1398 + retract_page_tables(mapping, start);
1399 *hpage = NULL;
1400 } else {
1401 /* Something went wrong: rollback changes to the radix-tree */
1402 - shmem_uncharge(mapping->host, nr_none);
1403 spin_lock_irq(&mapping->tree_lock);
1404 + mapping->nrpages -= nr_none;
1405 + shmem_uncharge(mapping->host, nr_none);
1406 +
1407 radix_tree_for_each_slot(slot, &mapping->page_tree, &iter,
1408 start) {
1409 if (iter.index >= end)
1410 @@ -1549,20 +1567,19 @@ tree_unlocked:
1411 page_ref_unfreeze(page, 2);
1412 radix_tree_replace_slot(slot, page);
1413 spin_unlock_irq(&mapping->tree_lock);
1414 - putback_lru_page(page);
1415 unlock_page(page);
1416 + putback_lru_page(page);
1417 spin_lock_irq(&mapping->tree_lock);
1418 slot = radix_tree_iter_next(&iter);
1419 }
1420 VM_BUG_ON(nr_none);
1421 spin_unlock_irq(&mapping->tree_lock);
1422
1423 - /* Unfreeze new_page, caller would take care about freeing it */
1424 - page_ref_unfreeze(new_page, 1);
1425 mem_cgroup_cancel_charge(new_page, memcg, true);
1426 - unlock_page(new_page);
1427 new_page->mapping = NULL;
1428 }
1429 +
1430 + unlock_page(new_page);
1431 out:
1432 VM_BUG_ON(!list_empty(&pagelist));
1433 /* TODO: tracepoints */
1434 diff --git a/mm/shmem.c b/mm/shmem.c
1435 index 358a92be43eb..9b17bd4cbc5e 100644
1436 --- a/mm/shmem.c
1437 +++ b/mm/shmem.c
1438 @@ -181,6 +181,38 @@ static inline void shmem_unacct_blocks(unsigned long flags, long pages)
1439 vm_unacct_memory(pages * VM_ACCT(PAGE_SIZE));
1440 }
1441
1442 +static inline bool shmem_inode_acct_block(struct inode *inode, long pages)
1443 +{
1444 + struct shmem_inode_info *info = SHMEM_I(inode);
1445 + struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb);
1446 +
1447 + if (shmem_acct_block(info->flags, pages))
1448 + return false;
1449 +
1450 + if (sbinfo->max_blocks) {
1451 + if (percpu_counter_compare(&sbinfo->used_blocks,
1452 + sbinfo->max_blocks - pages) > 0)
1453 + goto unacct;
1454 + percpu_counter_add(&sbinfo->used_blocks, pages);
1455 + }
1456 +
1457 + return true;
1458 +
1459 +unacct:
1460 + shmem_unacct_blocks(info->flags, pages);
1461 + return false;
1462 +}
1463 +
1464 +static inline void shmem_inode_unacct_blocks(struct inode *inode, long pages)
1465 +{
1466 + struct shmem_inode_info *info = SHMEM_I(inode);
1467 + struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb);
1468 +
1469 + if (sbinfo->max_blocks)
1470 + percpu_counter_sub(&sbinfo->used_blocks, pages);
1471 + shmem_unacct_blocks(info->flags, pages);
1472 +}
1473 +
1474 static const struct super_operations shmem_ops;
1475 static const struct address_space_operations shmem_aops;
1476 static const struct file_operations shmem_file_operations;
1477 @@ -237,61 +269,46 @@ static void shmem_recalc_inode(struct inode *inode)
1478
1479 freed = info->alloced - info->swapped - inode->i_mapping->nrpages;
1480 if (freed > 0) {
1481 - struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb);
1482 - if (sbinfo->max_blocks)
1483 - percpu_counter_add(&sbinfo->used_blocks, -freed);
1484 info->alloced -= freed;
1485 inode->i_blocks -= freed * BLOCKS_PER_PAGE;
1486 - shmem_unacct_blocks(info->flags, freed);
1487 + shmem_inode_unacct_blocks(inode, freed);
1488 }
1489 }
1490
1491 bool shmem_charge(struct inode *inode, long pages)
1492 {
1493 struct shmem_inode_info *info = SHMEM_I(inode);
1494 - struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb);
1495 unsigned long flags;
1496
1497 - if (shmem_acct_block(info->flags, pages))
1498 + if (!shmem_inode_acct_block(inode, pages))
1499 return false;
1500 +
1501 + /* nrpages adjustment first, then shmem_recalc_inode() when balanced */
1502 + inode->i_mapping->nrpages += pages;
1503 +
1504 spin_lock_irqsave(&info->lock, flags);
1505 info->alloced += pages;
1506 inode->i_blocks += pages * BLOCKS_PER_PAGE;
1507 shmem_recalc_inode(inode);
1508 spin_unlock_irqrestore(&info->lock, flags);
1509 - inode->i_mapping->nrpages += pages;
1510
1511 - if (!sbinfo->max_blocks)
1512 - return true;
1513 - if (percpu_counter_compare(&sbinfo->used_blocks,
1514 - sbinfo->max_blocks - pages) > 0) {
1515 - inode->i_mapping->nrpages -= pages;
1516 - spin_lock_irqsave(&info->lock, flags);
1517 - info->alloced -= pages;
1518 - shmem_recalc_inode(inode);
1519 - spin_unlock_irqrestore(&info->lock, flags);
1520 - shmem_unacct_blocks(info->flags, pages);
1521 - return false;
1522 - }
1523 - percpu_counter_add(&sbinfo->used_blocks, pages);
1524 return true;
1525 }
1526
1527 void shmem_uncharge(struct inode *inode, long pages)
1528 {
1529 struct shmem_inode_info *info = SHMEM_I(inode);
1530 - struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb);
1531 unsigned long flags;
1532
1533 + /* nrpages adjustment done by __delete_from_page_cache() or caller */
1534 +
1535 spin_lock_irqsave(&info->lock, flags);
1536 info->alloced -= pages;
1537 inode->i_blocks -= pages * BLOCKS_PER_PAGE;
1538 shmem_recalc_inode(inode);
1539 spin_unlock_irqrestore(&info->lock, flags);
1540
1541 - if (sbinfo->max_blocks)
1542 - percpu_counter_sub(&sbinfo->used_blocks, pages);
1543 - shmem_unacct_blocks(info->flags, pages);
1544 + shmem_inode_unacct_blocks(inode, pages);
1545 }
1546
1547 /*
1548 @@ -1424,9 +1441,10 @@ static struct page *shmem_alloc_page(gfp_t gfp,
1549 }
1550
1551 static struct page *shmem_alloc_and_acct_page(gfp_t gfp,
1552 - struct shmem_inode_info *info, struct shmem_sb_info *sbinfo,
1553 + struct inode *inode,
1554 pgoff_t index, bool huge)
1555 {
1556 + struct shmem_inode_info *info = SHMEM_I(inode);
1557 struct page *page;
1558 int nr;
1559 int err = -ENOSPC;
1560 @@ -1435,14 +1453,8 @@ static struct page *shmem_alloc_and_acct_page(gfp_t gfp,
1561 huge = false;
1562 nr = huge ? HPAGE_PMD_NR : 1;
1563
1564 - if (shmem_acct_block(info->flags, nr))
1565 + if (!shmem_inode_acct_block(inode, nr))
1566 goto failed;
1567 - if (sbinfo->max_blocks) {
1568 - if (percpu_counter_compare(&sbinfo->used_blocks,
1569 - sbinfo->max_blocks - nr) > 0)
1570 - goto unacct;
1571 - percpu_counter_add(&sbinfo->used_blocks, nr);
1572 - }
1573
1574 if (huge)
1575 page = shmem_alloc_hugepage(gfp, info, index);
1576 @@ -1455,10 +1467,7 @@ static struct page *shmem_alloc_and_acct_page(gfp_t gfp,
1577 }
1578
1579 err = -ENOMEM;
1580 - if (sbinfo->max_blocks)
1581 - percpu_counter_add(&sbinfo->used_blocks, -nr);
1582 -unacct:
1583 - shmem_unacct_blocks(info->flags, nr);
1584 + shmem_inode_unacct_blocks(inode, nr);
1585 failed:
1586 return ERR_PTR(err);
1587 }
1588 @@ -1485,11 +1494,13 @@ static int shmem_replace_page(struct page **pagep, gfp_t gfp,
1589 {
1590 struct page *oldpage, *newpage;
1591 struct address_space *swap_mapping;
1592 + swp_entry_t entry;
1593 pgoff_t swap_index;
1594 int error;
1595
1596 oldpage = *pagep;
1597 - swap_index = page_private(oldpage);
1598 + entry.val = page_private(oldpage);
1599 + swap_index = swp_offset(entry);
1600 swap_mapping = page_mapping(oldpage);
1601
1602 /*
1603 @@ -1508,7 +1519,7 @@ static int shmem_replace_page(struct page **pagep, gfp_t gfp,
1604 __SetPageLocked(newpage);
1605 __SetPageSwapBacked(newpage);
1606 SetPageUptodate(newpage);
1607 - set_page_private(newpage, swap_index);
1608 + set_page_private(newpage, entry.val);
1609 SetPageSwapCache(newpage);
1610
1611 /*
1612 @@ -1718,10 +1729,9 @@ repeat:
1613 }
1614
1615 alloc_huge:
1616 - page = shmem_alloc_and_acct_page(gfp, info, sbinfo,
1617 - index, true);
1618 + page = shmem_alloc_and_acct_page(gfp, inode, index, true);
1619 if (IS_ERR(page)) {
1620 -alloc_nohuge: page = shmem_alloc_and_acct_page(gfp, info, sbinfo,
1621 +alloc_nohuge: page = shmem_alloc_and_acct_page(gfp, inode,
1622 index, false);
1623 }
1624 if (IS_ERR(page)) {
1625 @@ -1843,10 +1853,7 @@ clear:
1626 * Error recovery.
1627 */
1628 unacct:
1629 - if (sbinfo->max_blocks)
1630 - percpu_counter_sub(&sbinfo->used_blocks,
1631 - 1 << compound_order(page));
1632 - shmem_unacct_blocks(info->flags, 1 << compound_order(page));
1633 + shmem_inode_unacct_blocks(inode, 1 << compound_order(page));
1634
1635 if (PageTransHuge(page)) {
1636 unlock_page(page);
1637 diff --git a/net/core/skbuff.c b/net/core/skbuff.c
1638 index 68ecb7d71c2b..dca1fed0d7da 100644
1639 --- a/net/core/skbuff.c
1640 +++ b/net/core/skbuff.c
1641 @@ -4421,6 +4421,10 @@ void skb_scrub_packet(struct sk_buff *skb, bool xnet)
1642 nf_reset(skb);
1643 nf_reset_trace(skb);
1644
1645 +#ifdef CONFIG_NET_SWITCHDEV
1646 + skb->offload_fwd_mark = 0;
1647 +#endif
1648 +
1649 if (!xnet)
1650 return;
1651
1652 diff --git a/sound/core/control.c b/sound/core/control.c
1653 index 995cde48c1be..511368fe974e 100644
1654 --- a/sound/core/control.c
1655 +++ b/sound/core/control.c
1656 @@ -346,6 +346,40 @@ static int snd_ctl_find_hole(struct snd_card *card, unsigned int count)
1657 return 0;
1658 }
1659
1660 +/* add a new kcontrol object; call with card->controls_rwsem locked */
1661 +static int __snd_ctl_add(struct snd_card *card, struct snd_kcontrol *kcontrol)
1662 +{
1663 + struct snd_ctl_elem_id id;
1664 + unsigned int idx;
1665 + unsigned int count;
1666 +
1667 + id = kcontrol->id;
1668 + if (id.index > UINT_MAX - kcontrol->count)
1669 + return -EINVAL;
1670 +
1671 + if (snd_ctl_find_id(card, &id)) {
1672 + dev_err(card->dev,
1673 + "control %i:%i:%i:%s:%i is already present\n",
1674 + id.iface, id.device, id.subdevice, id.name, id.index);
1675 + return -EBUSY;
1676 + }
1677 +
1678 + if (snd_ctl_find_hole(card, kcontrol->count) < 0)
1679 + return -ENOMEM;
1680 +
1681 + list_add_tail(&kcontrol->list, &card->controls);
1682 + card->controls_count += kcontrol->count;
1683 + kcontrol->id.numid = card->last_numid + 1;
1684 + card->last_numid += kcontrol->count;
1685 +
1686 + id = kcontrol->id;
1687 + count = kcontrol->count;
1688 + for (idx = 0; idx < count; idx++, id.index++, id.numid++)
1689 + snd_ctl_notify(card, SNDRV_CTL_EVENT_MASK_ADD, &id);
1690 +
1691 + return 0;
1692 +}
1693 +
1694 /**
1695 * snd_ctl_add - add the control instance to the card
1696 * @card: the card instance
1697 @@ -362,45 +396,18 @@ static int snd_ctl_find_hole(struct snd_card *card, unsigned int count)
1698 */
1699 int snd_ctl_add(struct snd_card *card, struct snd_kcontrol *kcontrol)
1700 {
1701 - struct snd_ctl_elem_id id;
1702 - unsigned int idx;
1703 - unsigned int count;
1704 int err = -EINVAL;
1705
1706 if (! kcontrol)
1707 return err;
1708 if (snd_BUG_ON(!card || !kcontrol->info))
1709 goto error;
1710 - id = kcontrol->id;
1711 - if (id.index > UINT_MAX - kcontrol->count)
1712 - goto error;
1713
1714 down_write(&card->controls_rwsem);
1715 - if (snd_ctl_find_id(card, &id)) {
1716 - up_write(&card->controls_rwsem);
1717 - dev_err(card->dev, "control %i:%i:%i:%s:%i is already present\n",
1718 - id.iface,
1719 - id.device,
1720 - id.subdevice,
1721 - id.name,
1722 - id.index);
1723 - err = -EBUSY;
1724 - goto error;
1725 - }
1726 - if (snd_ctl_find_hole(card, kcontrol->count) < 0) {
1727 - up_write(&card->controls_rwsem);
1728 - err = -ENOMEM;
1729 - goto error;
1730 - }
1731 - list_add_tail(&kcontrol->list, &card->controls);
1732 - card->controls_count += kcontrol->count;
1733 - kcontrol->id.numid = card->last_numid + 1;
1734 - card->last_numid += kcontrol->count;
1735 - id = kcontrol->id;
1736 - count = kcontrol->count;
1737 + err = __snd_ctl_add(card, kcontrol);
1738 up_write(&card->controls_rwsem);
1739 - for (idx = 0; idx < count; idx++, id.index++, id.numid++)
1740 - snd_ctl_notify(card, SNDRV_CTL_EVENT_MASK_ADD, &id);
1741 + if (err < 0)
1742 + goto error;
1743 return 0;
1744
1745 error:
1746 @@ -1354,9 +1361,12 @@ static int snd_ctl_elem_add(struct snd_ctl_file *file,
1747 kctl->tlv.c = snd_ctl_elem_user_tlv;
1748
1749 /* This function manage to free the instance on failure. */
1750 - err = snd_ctl_add(card, kctl);
1751 - if (err < 0)
1752 - return err;
1753 + down_write(&card->controls_rwsem);
1754 + err = __snd_ctl_add(card, kctl);
1755 + if (err < 0) {
1756 + snd_ctl_free_one(kctl);
1757 + goto unlock;
1758 + }
1759 offset = snd_ctl_get_ioff(kctl, &info->id);
1760 snd_ctl_build_ioff(&info->id, kctl, offset);
1761 /*
1762 @@ -1367,10 +1377,10 @@ static int snd_ctl_elem_add(struct snd_ctl_file *file,
1763 * which locks the element.
1764 */
1765
1766 - down_write(&card->controls_rwsem);
1767 card->user_ctl_count++;
1768 - up_write(&card->controls_rwsem);
1769
1770 + unlock:
1771 + up_write(&card->controls_rwsem);
1772 return 0;
1773 }
1774
1775 diff --git a/sound/isa/wss/wss_lib.c b/sound/isa/wss/wss_lib.c
1776 index 913b731d2236..f40330ddb9b2 100644
1777 --- a/sound/isa/wss/wss_lib.c
1778 +++ b/sound/isa/wss/wss_lib.c
1779 @@ -1531,7 +1531,6 @@ static int snd_wss_playback_open(struct snd_pcm_substream *substream)
1780 if (err < 0) {
1781 if (chip->release_dma)
1782 chip->release_dma(chip, chip->dma_private_data, chip->dma1);
1783 - snd_free_pages(runtime->dma_area, runtime->dma_bytes);
1784 return err;
1785 }
1786 chip->playback_substream = substream;
1787 @@ -1572,7 +1571,6 @@ static int snd_wss_capture_open(struct snd_pcm_substream *substream)
1788 if (err < 0) {
1789 if (chip->release_dma)
1790 chip->release_dma(chip, chip->dma_private_data, chip->dma2);
1791 - snd_free_pages(runtime->dma_area, runtime->dma_bytes);
1792 return err;
1793 }
1794 chip->capture_substream = substream;
1795 diff --git a/sound/pci/ac97/ac97_codec.c b/sound/pci/ac97/ac97_codec.c
1796 index 82259ca61e64..c4840fda44b4 100644
1797 --- a/sound/pci/ac97/ac97_codec.c
1798 +++ b/sound/pci/ac97/ac97_codec.c
1799 @@ -824,7 +824,7 @@ static int snd_ac97_put_spsa(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_
1800 {
1801 struct snd_ac97 *ac97 = snd_kcontrol_chip(kcontrol);
1802 int reg = kcontrol->private_value & 0xff;
1803 - int shift = (kcontrol->private_value >> 8) & 0xff;
1804 + int shift = (kcontrol->private_value >> 8) & 0x0f;
1805 int mask = (kcontrol->private_value >> 16) & 0xff;
1806 // int invert = (kcontrol->private_value >> 24) & 0xff;
1807 unsigned short value, old, new;
1808 diff --git a/sound/sparc/cs4231.c b/sound/sparc/cs4231.c
1809 index 30bdc971883b..017e241b0ec9 100644
1810 --- a/sound/sparc/cs4231.c
1811 +++ b/sound/sparc/cs4231.c
1812 @@ -1146,10 +1146,8 @@ static int snd_cs4231_playback_open(struct snd_pcm_substream *substream)
1813 runtime->hw = snd_cs4231_playback;
1814
1815 err = snd_cs4231_open(chip, CS4231_MODE_PLAY);
1816 - if (err < 0) {
1817 - snd_free_pages(runtime->dma_area, runtime->dma_bytes);
1818 + if (err < 0)
1819 return err;
1820 - }
1821 chip->playback_substream = substream;
1822 chip->p_periods_sent = 0;
1823 snd_pcm_set_sync(substream);
1824 @@ -1167,10 +1165,8 @@ static int snd_cs4231_capture_open(struct snd_pcm_substream *substream)
1825 runtime->hw = snd_cs4231_capture;
1826
1827 err = snd_cs4231_open(chip, CS4231_MODE_RECORD);
1828 - if (err < 0) {
1829 - snd_free_pages(runtime->dma_area, runtime->dma_bytes);
1830 + if (err < 0)
1831 return err;
1832 - }
1833 chip->capture_substream = substream;
1834 chip->c_periods_sent = 0;
1835 snd_pcm_set_sync(substream);