Magellan Linux

Contents of /trunk/kernel-alx/patches-4.19/0152-4.19.53-all-fixes.patch

Parent Directory Parent Directory | Revision Log Revision Log


Revision 3431 - (show annotations) (download)
Fri Aug 2 11:47:59 2019 UTC (4 years, 9 months ago) by niro
File size: 106293 byte(s)
-linux-4.19.53
1 diff --git a/Makefile b/Makefile
2 index c82ee02ad9be..bedcb121dc3d 100644
3 --- a/Makefile
4 +++ b/Makefile
5 @@ -1,7 +1,7 @@
6 # SPDX-License-Identifier: GPL-2.0
7 VERSION = 4
8 PATCHLEVEL = 19
9 -SUBLEVEL = 52
10 +SUBLEVEL = 53
11 EXTRAVERSION =
12 NAME = "People's Front"
13
14 diff --git a/arch/arm/kvm/hyp/Makefile b/arch/arm/kvm/hyp/Makefile
15 index d2b5ec9c4b92..ba88b1eca93c 100644
16 --- a/arch/arm/kvm/hyp/Makefile
17 +++ b/arch/arm/kvm/hyp/Makefile
18 @@ -11,6 +11,7 @@ CFLAGS_ARMV7VE :=$(call cc-option, -march=armv7ve)
19
20 obj-$(CONFIG_KVM_ARM_HOST) += $(KVM)/arm/hyp/vgic-v3-sr.o
21 obj-$(CONFIG_KVM_ARM_HOST) += $(KVM)/arm/hyp/timer-sr.o
22 +obj-$(CONFIG_KVM_ARM_HOST) += $(KVM)/arm/hyp/aarch32.o
23
24 obj-$(CONFIG_KVM_ARM_HOST) += tlb.o
25 obj-$(CONFIG_KVM_ARM_HOST) += cp15-sr.o
26 diff --git a/arch/arm64/kvm/hyp/Makefile b/arch/arm64/kvm/hyp/Makefile
27 index 2fabc2dc1966..feef06fc7c5a 100644
28 --- a/arch/arm64/kvm/hyp/Makefile
29 +++ b/arch/arm64/kvm/hyp/Makefile
30 @@ -10,6 +10,7 @@ KVM=../../../../virt/kvm
31
32 obj-$(CONFIG_KVM_ARM_HOST) += $(KVM)/arm/hyp/vgic-v3-sr.o
33 obj-$(CONFIG_KVM_ARM_HOST) += $(KVM)/arm/hyp/timer-sr.o
34 +obj-$(CONFIG_KVM_ARM_HOST) += $(KVM)/arm/hyp/aarch32.o
35
36 obj-$(CONFIG_KVM_ARM_HOST) += vgic-v2-cpuif-proxy.o
37 obj-$(CONFIG_KVM_ARM_HOST) += sysreg-sr.o
38 diff --git a/arch/arm64/mm/mmu.c b/arch/arm64/mm/mmu.c
39 index 8080c9f489c3..0fa558176fb1 100644
40 --- a/arch/arm64/mm/mmu.c
41 +++ b/arch/arm64/mm/mmu.c
42 @@ -921,13 +921,18 @@ void *__init fixmap_remap_fdt(phys_addr_t dt_phys)
43
44 int __init arch_ioremap_pud_supported(void)
45 {
46 - /* only 4k granule supports level 1 block mappings */
47 - return IS_ENABLED(CONFIG_ARM64_4K_PAGES);
48 + /*
49 + * Only 4k granule supports level 1 block mappings.
50 + * SW table walks can't handle removal of intermediate entries.
51 + */
52 + return IS_ENABLED(CONFIG_ARM64_4K_PAGES) &&
53 + !IS_ENABLED(CONFIG_ARM64_PTDUMP_DEBUGFS);
54 }
55
56 int __init arch_ioremap_pmd_supported(void)
57 {
58 - return 1;
59 + /* See arch_ioremap_pud_supported() */
60 + return !IS_ENABLED(CONFIG_ARM64_PTDUMP_DEBUGFS);
61 }
62
63 int pud_set_huge(pud_t *pudp, phys_addr_t phys, pgprot_t prot)
64 diff --git a/arch/s390/include/asm/uaccess.h b/arch/s390/include/asm/uaccess.h
65 index ad6b91013a05..5332f628c1ed 100644
66 --- a/arch/s390/include/asm/uaccess.h
67 +++ b/arch/s390/include/asm/uaccess.h
68 @@ -56,8 +56,10 @@ raw_copy_from_user(void *to, const void __user *from, unsigned long n);
69 unsigned long __must_check
70 raw_copy_to_user(void __user *to, const void *from, unsigned long n);
71
72 +#ifndef CONFIG_KASAN
73 #define INLINE_COPY_FROM_USER
74 #define INLINE_COPY_TO_USER
75 +#endif
76
77 #ifdef CONFIG_HAVE_MARCH_Z10_FEATURES
78
79 diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c
80 index f538e3fac7ad..fc7de27960e7 100644
81 --- a/arch/s390/kvm/kvm-s390.c
82 +++ b/arch/s390/kvm/kvm-s390.c
83 @@ -4156,21 +4156,28 @@ void kvm_arch_commit_memory_region(struct kvm *kvm,
84 const struct kvm_memory_slot *new,
85 enum kvm_mr_change change)
86 {
87 - int rc;
88 -
89 - /* If the basics of the memslot do not change, we do not want
90 - * to update the gmap. Every update causes several unnecessary
91 - * segment translation exceptions. This is usually handled just
92 - * fine by the normal fault handler + gmap, but it will also
93 - * cause faults on the prefix page of running guest CPUs.
94 - */
95 - if (old->userspace_addr == mem->userspace_addr &&
96 - old->base_gfn * PAGE_SIZE == mem->guest_phys_addr &&
97 - old->npages * PAGE_SIZE == mem->memory_size)
98 - return;
99 + int rc = 0;
100
101 - rc = gmap_map_segment(kvm->arch.gmap, mem->userspace_addr,
102 - mem->guest_phys_addr, mem->memory_size);
103 + switch (change) {
104 + case KVM_MR_DELETE:
105 + rc = gmap_unmap_segment(kvm->arch.gmap, old->base_gfn * PAGE_SIZE,
106 + old->npages * PAGE_SIZE);
107 + break;
108 + case KVM_MR_MOVE:
109 + rc = gmap_unmap_segment(kvm->arch.gmap, old->base_gfn * PAGE_SIZE,
110 + old->npages * PAGE_SIZE);
111 + if (rc)
112 + break;
113 + /* FALLTHROUGH */
114 + case KVM_MR_CREATE:
115 + rc = gmap_map_segment(kvm->arch.gmap, mem->userspace_addr,
116 + mem->guest_phys_addr, mem->memory_size);
117 + break;
118 + case KVM_MR_FLAGS_ONLY:
119 + break;
120 + default:
121 + WARN(1, "Unknown KVM MR CHANGE: %d\n", change);
122 + }
123 if (rc)
124 pr_warn("failed to commit memory region\n");
125 return;
126 diff --git a/arch/x86/kernel/cpu/intel_rdt_monitor.c b/arch/x86/kernel/cpu/intel_rdt_monitor.c
127 index b0f3aed76b75..3d4ec80a6bb9 100644
128 --- a/arch/x86/kernel/cpu/intel_rdt_monitor.c
129 +++ b/arch/x86/kernel/cpu/intel_rdt_monitor.c
130 @@ -371,6 +371,9 @@ static void update_mba_bw(struct rdtgroup *rgrp, struct rdt_domain *dom_mbm)
131 struct list_head *head;
132 struct rdtgroup *entry;
133
134 + if (!is_mbm_local_enabled())
135 + return;
136 +
137 r_mba = &rdt_resources_all[RDT_RESOURCE_MBA];
138 closid = rgrp->closid;
139 rmid = rgrp->mon.rmid;
140 diff --git a/arch/x86/kernel/cpu/microcode/core.c b/arch/x86/kernel/cpu/microcode/core.c
141 index b43ddefd77f4..b7027e667604 100644
142 --- a/arch/x86/kernel/cpu/microcode/core.c
143 +++ b/arch/x86/kernel/cpu/microcode/core.c
144 @@ -873,7 +873,7 @@ int __init microcode_init(void)
145 goto out_ucode_group;
146
147 register_syscore_ops(&mc_syscore_ops);
148 - cpuhp_setup_state_nocalls(CPUHP_AP_ONLINE_DYN, "x86/microcode:online",
149 + cpuhp_setup_state_nocalls(CPUHP_AP_MICROCODE_LOADER, "x86/microcode:online",
150 mc_cpu_online, mc_cpu_down_prep);
151
152 pr_info("Microcode Update Driver: v%s.", DRIVER_VERSION);
153 diff --git a/arch/x86/kvm/pmu.c b/arch/x86/kvm/pmu.c
154 index 58ead7db71a3..952aebd0a8a3 100644
155 --- a/arch/x86/kvm/pmu.c
156 +++ b/arch/x86/kvm/pmu.c
157 @@ -282,20 +282,16 @@ int kvm_pmu_rdpmc(struct kvm_vcpu *vcpu, unsigned idx, u64 *data)
158 {
159 bool fast_mode = idx & (1u << 31);
160 struct kvm_pmc *pmc;
161 - u64 ctr_val;
162 + u64 mask = fast_mode ? ~0u : ~0ull;
163
164 if (is_vmware_backdoor_pmc(idx))
165 return kvm_pmu_rdpmc_vmware(vcpu, idx, data);
166
167 - pmc = kvm_x86_ops->pmu_ops->msr_idx_to_pmc(vcpu, idx);
168 + pmc = kvm_x86_ops->pmu_ops->msr_idx_to_pmc(vcpu, idx, &mask);
169 if (!pmc)
170 return 1;
171
172 - ctr_val = pmc_read_counter(pmc);
173 - if (fast_mode)
174 - ctr_val = (u32)ctr_val;
175 -
176 - *data = ctr_val;
177 + *data = pmc_read_counter(pmc) & mask;
178 return 0;
179 }
180
181 diff --git a/arch/x86/kvm/pmu.h b/arch/x86/kvm/pmu.h
182 index ba8898e1a854..22dff661145a 100644
183 --- a/arch/x86/kvm/pmu.h
184 +++ b/arch/x86/kvm/pmu.h
185 @@ -25,7 +25,8 @@ struct kvm_pmu_ops {
186 unsigned (*find_fixed_event)(int idx);
187 bool (*pmc_is_enabled)(struct kvm_pmc *pmc);
188 struct kvm_pmc *(*pmc_idx_to_pmc)(struct kvm_pmu *pmu, int pmc_idx);
189 - struct kvm_pmc *(*msr_idx_to_pmc)(struct kvm_vcpu *vcpu, unsigned idx);
190 + struct kvm_pmc *(*msr_idx_to_pmc)(struct kvm_vcpu *vcpu, unsigned idx,
191 + u64 *mask);
192 int (*is_valid_msr_idx)(struct kvm_vcpu *vcpu, unsigned idx);
193 bool (*is_valid_msr)(struct kvm_vcpu *vcpu, u32 msr);
194 int (*get_msr)(struct kvm_vcpu *vcpu, u32 msr, u64 *data);
195 diff --git a/arch/x86/kvm/pmu_amd.c b/arch/x86/kvm/pmu_amd.c
196 index 1495a735b38e..41dff881e0f0 100644
197 --- a/arch/x86/kvm/pmu_amd.c
198 +++ b/arch/x86/kvm/pmu_amd.c
199 @@ -186,7 +186,7 @@ static int amd_is_valid_msr_idx(struct kvm_vcpu *vcpu, unsigned idx)
200 }
201
202 /* idx is the ECX register of RDPMC instruction */
203 -static struct kvm_pmc *amd_msr_idx_to_pmc(struct kvm_vcpu *vcpu, unsigned idx)
204 +static struct kvm_pmc *amd_msr_idx_to_pmc(struct kvm_vcpu *vcpu, unsigned idx, u64 *mask)
205 {
206 struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
207 struct kvm_pmc *counters;
208 diff --git a/arch/x86/kvm/pmu_intel.c b/arch/x86/kvm/pmu_intel.c
209 index 5ab4a364348e..c3f103e2b08e 100644
210 --- a/arch/x86/kvm/pmu_intel.c
211 +++ b/arch/x86/kvm/pmu_intel.c
212 @@ -126,7 +126,7 @@ static int intel_is_valid_msr_idx(struct kvm_vcpu *vcpu, unsigned idx)
213 }
214
215 static struct kvm_pmc *intel_msr_idx_to_pmc(struct kvm_vcpu *vcpu,
216 - unsigned idx)
217 + unsigned idx, u64 *mask)
218 {
219 struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
220 bool fixed = idx & (1u << 30);
221 @@ -138,6 +138,7 @@ static struct kvm_pmc *intel_msr_idx_to_pmc(struct kvm_vcpu *vcpu,
222 if (fixed && idx >= pmu->nr_arch_fixed_counters)
223 return NULL;
224 counters = fixed ? pmu->fixed_counters : pmu->gp_counters;
225 + *mask &= pmu->counter_bitmask[fixed ? KVM_PMC_FIXED : KVM_PMC_GP];
226
227 return &counters[idx];
228 }
229 @@ -183,9 +184,13 @@ static int intel_pmu_get_msr(struct kvm_vcpu *vcpu, u32 msr, u64 *data)
230 *data = pmu->global_ovf_ctrl;
231 return 0;
232 default:
233 - if ((pmc = get_gp_pmc(pmu, msr, MSR_IA32_PERFCTR0)) ||
234 - (pmc = get_fixed_pmc(pmu, msr))) {
235 - *data = pmc_read_counter(pmc);
236 + if ((pmc = get_gp_pmc(pmu, msr, MSR_IA32_PERFCTR0))) {
237 + u64 val = pmc_read_counter(pmc);
238 + *data = val & pmu->counter_bitmask[KVM_PMC_GP];
239 + return 0;
240 + } else if ((pmc = get_fixed_pmc(pmu, msr))) {
241 + u64 val = pmc_read_counter(pmc);
242 + *data = val & pmu->counter_bitmask[KVM_PMC_FIXED];
243 return 0;
244 } else if ((pmc = get_gp_pmc(pmu, msr, MSR_P6_EVNTSEL0))) {
245 *data = pmc->eventsel;
246 @@ -235,11 +240,14 @@ static int intel_pmu_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
247 }
248 break;
249 default:
250 - if ((pmc = get_gp_pmc(pmu, msr, MSR_IA32_PERFCTR0)) ||
251 - (pmc = get_fixed_pmc(pmu, msr))) {
252 - if (!msr_info->host_initiated)
253 - data = (s64)(s32)data;
254 - pmc->counter += data - pmc_read_counter(pmc);
255 + if ((pmc = get_gp_pmc(pmu, msr, MSR_IA32_PERFCTR0))) {
256 + if (msr_info->host_initiated)
257 + pmc->counter = data;
258 + else
259 + pmc->counter = (s32)data;
260 + return 0;
261 + } else if ((pmc = get_fixed_pmc(pmu, msr))) {
262 + pmc->counter = data;
263 return 0;
264 } else if ((pmc = get_gp_pmc(pmu, msr, MSR_P6_EVNTSEL0))) {
265 if (data == pmc->eventsel)
266 diff --git a/arch/x86/mm/kasan_init_64.c b/arch/x86/mm/kasan_init_64.c
267 index e3e77527f8df..4bfd14d5da8e 100644
268 --- a/arch/x86/mm/kasan_init_64.c
269 +++ b/arch/x86/mm/kasan_init_64.c
270 @@ -198,7 +198,7 @@ static inline p4d_t *early_p4d_offset(pgd_t *pgd, unsigned long addr)
271 if (!pgtable_l5_enabled())
272 return (p4d_t *)pgd;
273
274 - p4d = __pa_nodebug(pgd_val(*pgd)) & PTE_PFN_MASK;
275 + p4d = pgd_val(*pgd) & PTE_PFN_MASK;
276 p4d += __START_KERNEL_map - phys_base;
277 return (p4d_t *)p4d + p4d_index(addr);
278 }
279 diff --git a/arch/x86/mm/kaslr.c b/arch/x86/mm/kaslr.c
280 index 0988971069c9..bfe769209eae 100644
281 --- a/arch/x86/mm/kaslr.c
282 +++ b/arch/x86/mm/kaslr.c
283 @@ -51,7 +51,7 @@ static __initdata struct kaslr_memory_region {
284 } kaslr_regions[] = {
285 { &page_offset_base, 0 },
286 { &vmalloc_base, 0 },
287 - { &vmemmap_base, 1 },
288 + { &vmemmap_base, 0 },
289 };
290
291 /* Get size in bytes used by the memory region */
292 @@ -77,6 +77,7 @@ void __init kernel_randomize_memory(void)
293 unsigned long rand, memory_tb;
294 struct rnd_state rand_state;
295 unsigned long remain_entropy;
296 + unsigned long vmemmap_size;
297
298 vaddr_start = pgtable_l5_enabled() ? __PAGE_OFFSET_BASE_L5 : __PAGE_OFFSET_BASE_L4;
299 vaddr = vaddr_start;
300 @@ -108,6 +109,14 @@ void __init kernel_randomize_memory(void)
301 if (memory_tb < kaslr_regions[0].size_tb)
302 kaslr_regions[0].size_tb = memory_tb;
303
304 + /*
305 + * Calculate the vmemmap region size in TBs, aligned to a TB
306 + * boundary.
307 + */
308 + vmemmap_size = (kaslr_regions[0].size_tb << (TB_SHIFT - PAGE_SHIFT)) *
309 + sizeof(struct page);
310 + kaslr_regions[2].size_tb = DIV_ROUND_UP(vmemmap_size, 1UL << TB_SHIFT);
311 +
312 /* Calculate entropy available between regions */
313 remain_entropy = vaddr_end - vaddr_start;
314 for (i = 0; i < ARRAY_SIZE(kaslr_regions); i++)
315 diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c
316 index adf28788cab5..133fed8e4a8b 100644
317 --- a/drivers/ata/libata-core.c
318 +++ b/drivers/ata/libata-core.c
319 @@ -4476,9 +4476,12 @@ static const struct ata_blacklist_entry ata_device_blacklist [] = {
320 { "ST3320[68]13AS", "SD1[5-9]", ATA_HORKAGE_NONCQ |
321 ATA_HORKAGE_FIRMWARE_WARN },
322
323 - /* drives which fail FPDMA_AA activation (some may freeze afterwards) */
324 - { "ST1000LM024 HN-M101MBB", "2AR10001", ATA_HORKAGE_BROKEN_FPDMA_AA },
325 - { "ST1000LM024 HN-M101MBB", "2BA30001", ATA_HORKAGE_BROKEN_FPDMA_AA },
326 + /* drives which fail FPDMA_AA activation (some may freeze afterwards)
327 + the ST disks also have LPM issues */
328 + { "ST1000LM024 HN-M101MBB", "2AR10001", ATA_HORKAGE_BROKEN_FPDMA_AA |
329 + ATA_HORKAGE_NOLPM, },
330 + { "ST1000LM024 HN-M101MBB", "2BA30001", ATA_HORKAGE_BROKEN_FPDMA_AA |
331 + ATA_HORKAGE_NOLPM, },
332 { "VB0250EAVER", "HPG7", ATA_HORKAGE_BROKEN_FPDMA_AA },
333
334 /* Blacklist entries taken from Silicon Image 3124/3132
335 diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c
336 index 7c581f4c2b94..5965f6383ada 100644
337 --- a/drivers/gpu/drm/drm_edid.c
338 +++ b/drivers/gpu/drm/drm_edid.c
339 @@ -1580,6 +1580,50 @@ static void connector_bad_edid(struct drm_connector *connector,
340 }
341 }
342
343 +/* Get override or firmware EDID */
344 +static struct edid *drm_get_override_edid(struct drm_connector *connector)
345 +{
346 + struct edid *override = NULL;
347 +
348 + if (connector->override_edid)
349 + override = drm_edid_duplicate(connector->edid_blob_ptr->data);
350 +
351 + if (!override)
352 + override = drm_load_edid_firmware(connector);
353 +
354 + return IS_ERR(override) ? NULL : override;
355 +}
356 +
357 +/**
358 + * drm_add_override_edid_modes - add modes from override/firmware EDID
359 + * @connector: connector we're probing
360 + *
361 + * Add modes from the override/firmware EDID, if available. Only to be used from
362 + * drm_helper_probe_single_connector_modes() as a fallback for when DDC probe
363 + * failed during drm_get_edid() and caused the override/firmware EDID to be
364 + * skipped.
365 + *
366 + * Return: The number of modes added or 0 if we couldn't find any.
367 + */
368 +int drm_add_override_edid_modes(struct drm_connector *connector)
369 +{
370 + struct edid *override;
371 + int num_modes = 0;
372 +
373 + override = drm_get_override_edid(connector);
374 + if (override) {
375 + drm_connector_update_edid_property(connector, override);
376 + num_modes = drm_add_edid_modes(connector, override);
377 + kfree(override);
378 +
379 + DRM_DEBUG_KMS("[CONNECTOR:%d:%s] adding %d modes via fallback override/firmware EDID\n",
380 + connector->base.id, connector->name, num_modes);
381 + }
382 +
383 + return num_modes;
384 +}
385 +EXPORT_SYMBOL(drm_add_override_edid_modes);
386 +
387 /**
388 * drm_do_get_edid - get EDID data using a custom EDID block read function
389 * @connector: connector we're probing
390 @@ -1607,15 +1651,10 @@ struct edid *drm_do_get_edid(struct drm_connector *connector,
391 {
392 int i, j = 0, valid_extensions = 0;
393 u8 *edid, *new;
394 - struct edid *override = NULL;
395 -
396 - if (connector->override_edid)
397 - override = drm_edid_duplicate(connector->edid_blob_ptr->data);
398 -
399 - if (!override)
400 - override = drm_load_edid_firmware(connector);
401 + struct edid *override;
402
403 - if (!IS_ERR_OR_NULL(override))
404 + override = drm_get_override_edid(connector);
405 + if (override)
406 return override;
407
408 if ((edid = kmalloc(EDID_LENGTH, GFP_KERNEL)) == NULL)
409 diff --git a/drivers/gpu/drm/drm_probe_helper.c b/drivers/gpu/drm/drm_probe_helper.c
410 index a1bb157bfdfa..d18b7e27ef64 100644
411 --- a/drivers/gpu/drm/drm_probe_helper.c
412 +++ b/drivers/gpu/drm/drm_probe_helper.c
413 @@ -479,6 +479,13 @@ retry:
414
415 count = (*connector_funcs->get_modes)(connector);
416
417 + /*
418 + * Fallback for when DDC probe failed in drm_get_edid() and thus skipped
419 + * override/firmware EDID.
420 + */
421 + if (count == 0 && connector->status == connector_status_connected)
422 + count = drm_add_override_edid_modes(connector);
423 +
424 if (count == 0 && connector->status == connector_status_connected)
425 count = drm_add_modes_noedid(connector, 1024, 768);
426 count += drm_helper_probe_add_cmdline_mode(connector);
427 diff --git a/drivers/gpu/drm/i915/intel_sdvo.c b/drivers/gpu/drm/i915/intel_sdvo.c
428 index 812fe7b06f87..1817a5c0c80f 100644
429 --- a/drivers/gpu/drm/i915/intel_sdvo.c
430 +++ b/drivers/gpu/drm/i915/intel_sdvo.c
431 @@ -925,6 +925,13 @@ static bool intel_sdvo_set_colorimetry(struct intel_sdvo *intel_sdvo,
432 return intel_sdvo_set_value(intel_sdvo, SDVO_CMD_SET_COLORIMETRY, &mode, 1);
433 }
434
435 +static bool intel_sdvo_set_audio_state(struct intel_sdvo *intel_sdvo,
436 + u8 audio_state)
437 +{
438 + return intel_sdvo_set_value(intel_sdvo, SDVO_CMD_SET_AUDIO_STAT,
439 + &audio_state, 1);
440 +}
441 +
442 #if 0
443 static void intel_sdvo_dump_hdmi_buf(struct intel_sdvo *intel_sdvo)
444 {
445 @@ -1371,11 +1378,6 @@ static void intel_sdvo_pre_enable(struct intel_encoder *intel_encoder,
446 else
447 sdvox |= SDVO_PIPE_SEL(crtc->pipe);
448
449 - if (crtc_state->has_audio) {
450 - WARN_ON_ONCE(INTEL_GEN(dev_priv) < 4);
451 - sdvox |= SDVO_AUDIO_ENABLE;
452 - }
453 -
454 if (INTEL_GEN(dev_priv) >= 4) {
455 /* done in crtc_mode_set as the dpll_md reg must be written early */
456 } else if (IS_I945G(dev_priv) || IS_I945GM(dev_priv) ||
457 @@ -1515,8 +1517,13 @@ static void intel_sdvo_get_config(struct intel_encoder *encoder,
458 if (sdvox & HDMI_COLOR_RANGE_16_235)
459 pipe_config->limited_color_range = true;
460
461 - if (sdvox & SDVO_AUDIO_ENABLE)
462 - pipe_config->has_audio = true;
463 + if (intel_sdvo_get_value(intel_sdvo, SDVO_CMD_GET_AUDIO_STAT,
464 + &val, 1)) {
465 + u8 mask = SDVO_AUDIO_ELD_VALID | SDVO_AUDIO_PRESENCE_DETECT;
466 +
467 + if ((val & mask) == mask)
468 + pipe_config->has_audio = true;
469 + }
470
471 if (intel_sdvo_get_value(intel_sdvo, SDVO_CMD_GET_ENCODE,
472 &val, 1)) {
473 @@ -1529,6 +1536,32 @@ static void intel_sdvo_get_config(struct intel_encoder *encoder,
474 pipe_config->pixel_multiplier, encoder_pixel_multiplier);
475 }
476
477 +static void intel_sdvo_disable_audio(struct intel_sdvo *intel_sdvo)
478 +{
479 + intel_sdvo_set_audio_state(intel_sdvo, 0);
480 +}
481 +
482 +static void intel_sdvo_enable_audio(struct intel_sdvo *intel_sdvo,
483 + const struct intel_crtc_state *crtc_state,
484 + const struct drm_connector_state *conn_state)
485 +{
486 + const struct drm_display_mode *adjusted_mode =
487 + &crtc_state->base.adjusted_mode;
488 + struct drm_connector *connector = conn_state->connector;
489 + u8 *eld = connector->eld;
490 +
491 + eld[6] = drm_av_sync_delay(connector, adjusted_mode) / 2;
492 +
493 + intel_sdvo_set_audio_state(intel_sdvo, 0);
494 +
495 + intel_sdvo_write_infoframe(intel_sdvo, SDVO_HBUF_INDEX_ELD,
496 + SDVO_HBUF_TX_DISABLED,
497 + eld, drm_eld_size(eld));
498 +
499 + intel_sdvo_set_audio_state(intel_sdvo, SDVO_AUDIO_ELD_VALID |
500 + SDVO_AUDIO_PRESENCE_DETECT);
501 +}
502 +
503 static void intel_disable_sdvo(struct intel_encoder *encoder,
504 const struct intel_crtc_state *old_crtc_state,
505 const struct drm_connector_state *conn_state)
506 @@ -1538,6 +1571,9 @@ static void intel_disable_sdvo(struct intel_encoder *encoder,
507 struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->base.crtc);
508 u32 temp;
509
510 + if (old_crtc_state->has_audio)
511 + intel_sdvo_disable_audio(intel_sdvo);
512 +
513 intel_sdvo_set_active_outputs(intel_sdvo, 0);
514 if (0)
515 intel_sdvo_set_encoder_power_state(intel_sdvo,
516 @@ -1623,6 +1659,9 @@ static void intel_enable_sdvo(struct intel_encoder *encoder,
517 intel_sdvo_set_encoder_power_state(intel_sdvo,
518 DRM_MODE_DPMS_ON);
519 intel_sdvo_set_active_outputs(intel_sdvo, intel_sdvo->attached_output);
520 +
521 + if (pipe_config->has_audio)
522 + intel_sdvo_enable_audio(intel_sdvo, pipe_config, conn_state);
523 }
524
525 static enum drm_mode_status
526 @@ -2514,7 +2553,6 @@ static bool
527 intel_sdvo_dvi_init(struct intel_sdvo *intel_sdvo, int device)
528 {
529 struct drm_encoder *encoder = &intel_sdvo->base.base;
530 - struct drm_i915_private *dev_priv = to_i915(encoder->dev);
531 struct drm_connector *connector;
532 struct intel_encoder *intel_encoder = to_intel_encoder(encoder);
533 struct intel_connector *intel_connector;
534 @@ -2551,9 +2589,7 @@ intel_sdvo_dvi_init(struct intel_sdvo *intel_sdvo, int device)
535 encoder->encoder_type = DRM_MODE_ENCODER_TMDS;
536 connector->connector_type = DRM_MODE_CONNECTOR_DVID;
537
538 - /* gen3 doesn't do the hdmi bits in the SDVO register */
539 - if (INTEL_GEN(dev_priv) >= 4 &&
540 - intel_sdvo_is_hdmi_connector(intel_sdvo, device)) {
541 + if (intel_sdvo_is_hdmi_connector(intel_sdvo, device)) {
542 connector->connector_type = DRM_MODE_CONNECTOR_HDMIA;
543 intel_sdvo->is_hdmi = true;
544 }
545 diff --git a/drivers/gpu/drm/i915/intel_sdvo_regs.h b/drivers/gpu/drm/i915/intel_sdvo_regs.h
546 index db0ed499268a..e9ba3b047f93 100644
547 --- a/drivers/gpu/drm/i915/intel_sdvo_regs.h
548 +++ b/drivers/gpu/drm/i915/intel_sdvo_regs.h
549 @@ -707,6 +707,9 @@ struct intel_sdvo_enhancements_arg {
550 #define SDVO_CMD_GET_AUDIO_ENCRYPT_PREFER 0x90
551 #define SDVO_CMD_SET_AUDIO_STAT 0x91
552 #define SDVO_CMD_GET_AUDIO_STAT 0x92
553 + #define SDVO_AUDIO_ELD_VALID (1 << 0)
554 + #define SDVO_AUDIO_PRESENCE_DETECT (1 << 1)
555 + #define SDVO_AUDIO_CP_READY (1 << 2)
556 #define SDVO_CMD_SET_HBUF_INDEX 0x93
557 #define SDVO_HBUF_INDEX_ELD 0
558 #define SDVO_HBUF_INDEX_AVI_IF 1
559 diff --git a/drivers/gpu/drm/nouveau/Kconfig b/drivers/gpu/drm/nouveau/Kconfig
560 index 4b75ad40dd80..00d9d77f583a 100644
561 --- a/drivers/gpu/drm/nouveau/Kconfig
562 +++ b/drivers/gpu/drm/nouveau/Kconfig
563 @@ -16,10 +16,21 @@ config DRM_NOUVEAU
564 select INPUT if ACPI && X86
565 select THERMAL if ACPI && X86
566 select ACPI_VIDEO if ACPI && X86
567 - select DRM_VM
568 help
569 Choose this option for open-source NVIDIA support.
570
571 +config NOUVEAU_LEGACY_CTX_SUPPORT
572 + bool "Nouveau legacy context support"
573 + depends on DRM_NOUVEAU
574 + select DRM_VM
575 + default y
576 + help
577 + There was a version of the nouveau DDX that relied on legacy
578 + ctx ioctls not erroring out. But that was back in time a long
579 + ways, so offer a way to disable it now. For uapi compat with
580 + old nouveau ddx this should be on by default, but modern distros
581 + should consider turning it off.
582 +
583 config NOUVEAU_PLATFORM_DRIVER
584 bool "Nouveau (NVIDIA) SoC GPUs"
585 depends on DRM_NOUVEAU && ARCH_TEGRA
586 diff --git a/drivers/gpu/drm/nouveau/nouveau_drm.c b/drivers/gpu/drm/nouveau/nouveau_drm.c
587 index 74d2283f2c28..2b7a54cc3c9e 100644
588 --- a/drivers/gpu/drm/nouveau/nouveau_drm.c
589 +++ b/drivers/gpu/drm/nouveau/nouveau_drm.c
590 @@ -1015,8 +1015,11 @@ nouveau_driver_fops = {
591 static struct drm_driver
592 driver_stub = {
593 .driver_features =
594 - DRIVER_GEM | DRIVER_MODESET | DRIVER_PRIME | DRIVER_RENDER |
595 - DRIVER_KMS_LEGACY_CONTEXT,
596 + DRIVER_GEM | DRIVER_MODESET | DRIVER_PRIME | DRIVER_RENDER
597 +#if defined(CONFIG_NOUVEAU_LEGACY_CTX_SUPPORT)
598 + | DRIVER_KMS_LEGACY_CONTEXT
599 +#endif
600 + ,
601
602 .load = nouveau_drm_load,
603 .unload = nouveau_drm_unload,
604 diff --git a/drivers/gpu/drm/nouveau/nouveau_ttm.c b/drivers/gpu/drm/nouveau/nouveau_ttm.c
605 index 8edb9f2a4269..e4b977cc8452 100644
606 --- a/drivers/gpu/drm/nouveau/nouveau_ttm.c
607 +++ b/drivers/gpu/drm/nouveau/nouveau_ttm.c
608 @@ -169,7 +169,11 @@ nouveau_ttm_mmap(struct file *filp, struct vm_area_struct *vma)
609 struct nouveau_drm *drm = nouveau_drm(file_priv->minor->dev);
610
611 if (unlikely(vma->vm_pgoff < DRM_FILE_PAGE_OFFSET))
612 +#if defined(CONFIG_NOUVEAU_LEGACY_CTX_SUPPORT)
613 return drm_legacy_mmap(filp, vma);
614 +#else
615 + return -EINVAL;
616 +#endif
617
618 return ttm_bo_mmap(filp, vma, &drm->ttm.bdev);
619 }
620 diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
621 index c3e2022bda5d..3834aa71c9c4 100644
622 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
623 +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
624 @@ -2493,7 +2493,8 @@ static int vmw_cmd_dx_set_shader(struct vmw_private *dev_priv,
625
626 cmd = container_of(header, typeof(*cmd), header);
627
628 - if (cmd->body.type >= SVGA3D_SHADERTYPE_DX10_MAX) {
629 + if (cmd->body.type >= SVGA3D_SHADERTYPE_DX10_MAX ||
630 + cmd->body.type < SVGA3D_SHADERTYPE_MIN) {
631 DRM_ERROR("Illegal shader type %u.\n",
632 (unsigned) cmd->body.type);
633 return -EINVAL;
634 @@ -2732,6 +2733,10 @@ static int vmw_cmd_dx_view_define(struct vmw_private *dev_priv,
635 if (view_type == vmw_view_max)
636 return -EINVAL;
637 cmd = container_of(header, typeof(*cmd), header);
638 + if (unlikely(cmd->sid == SVGA3D_INVALID_ID)) {
639 + DRM_ERROR("Invalid surface id.\n");
640 + return -EINVAL;
641 + }
642 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
643 user_surface_converter,
644 &cmd->sid, &srf_node);
645 diff --git a/drivers/hid/hid-multitouch.c b/drivers/hid/hid-multitouch.c
646 index 2faf5421fdd0..184e49036e1d 100644
647 --- a/drivers/hid/hid-multitouch.c
648 +++ b/drivers/hid/hid-multitouch.c
649 @@ -641,6 +641,13 @@ static void mt_store_field(struct hid_device *hdev,
650 if (*target != DEFAULT_TRUE &&
651 *target != DEFAULT_FALSE &&
652 *target != DEFAULT_ZERO) {
653 + if (usage->contactid == DEFAULT_ZERO ||
654 + usage->x == DEFAULT_ZERO ||
655 + usage->y == DEFAULT_ZERO) {
656 + hid_dbg(hdev,
657 + "ignoring duplicate usage on incomplete");
658 + return;
659 + }
660 usage = mt_allocate_usage(hdev, application);
661 if (!usage)
662 return;
663 diff --git a/drivers/hid/wacom_wac.c b/drivers/hid/wacom_wac.c
664 index 5dd3a8245f0f..d7c3f4ac2c04 100644
665 --- a/drivers/hid/wacom_wac.c
666 +++ b/drivers/hid/wacom_wac.c
667 @@ -1234,13 +1234,13 @@ static void wacom_intuos_pro2_bt_pen(struct wacom_wac *wacom)
668 /* Add back in missing bits of ID for non-USI pens */
669 wacom->id[0] |= (wacom->serial[0] >> 32) & 0xFFFFF;
670 }
671 - wacom->tool[0] = wacom_intuos_get_tool_type(wacom_intuos_id_mangle(wacom->id[0]));
672
673 for (i = 0; i < pen_frames; i++) {
674 unsigned char *frame = &data[i*pen_frame_len + 1];
675 bool valid = frame[0] & 0x80;
676 bool prox = frame[0] & 0x40;
677 bool range = frame[0] & 0x20;
678 + bool invert = frame[0] & 0x10;
679
680 if (!valid)
681 continue;
682 @@ -1249,9 +1249,24 @@ static void wacom_intuos_pro2_bt_pen(struct wacom_wac *wacom)
683 wacom->shared->stylus_in_proximity = false;
684 wacom_exit_report(wacom);
685 input_sync(pen_input);
686 +
687 + wacom->tool[0] = 0;
688 + wacom->id[0] = 0;
689 + wacom->serial[0] = 0;
690 return;
691 }
692 +
693 if (range) {
694 + if (!wacom->tool[0]) { /* first in range */
695 + /* Going into range select tool */
696 + if (invert)
697 + wacom->tool[0] = BTN_TOOL_RUBBER;
698 + else if (wacom->id[0])
699 + wacom->tool[0] = wacom_intuos_get_tool_type(wacom->id[0]);
700 + else
701 + wacom->tool[0] = BTN_TOOL_PEN;
702 + }
703 +
704 input_report_abs(pen_input, ABS_X, get_unaligned_le16(&frame[1]));
705 input_report_abs(pen_input, ABS_Y, get_unaligned_le16(&frame[3]));
706
707 @@ -1273,23 +1288,26 @@ static void wacom_intuos_pro2_bt_pen(struct wacom_wac *wacom)
708 get_unaligned_le16(&frame[11]));
709 }
710 }
711 - input_report_abs(pen_input, ABS_PRESSURE, get_unaligned_le16(&frame[5]));
712 - if (wacom->features.type == INTUOSP2_BT) {
713 - input_report_abs(pen_input, ABS_DISTANCE,
714 - range ? frame[13] : wacom->features.distance_max);
715 - } else {
716 - input_report_abs(pen_input, ABS_DISTANCE,
717 - range ? frame[7] : wacom->features.distance_max);
718 - }
719
720 - input_report_key(pen_input, BTN_TOUCH, frame[0] & 0x01);
721 - input_report_key(pen_input, BTN_STYLUS, frame[0] & 0x02);
722 - input_report_key(pen_input, BTN_STYLUS2, frame[0] & 0x04);
723 + if (wacom->tool[0]) {
724 + input_report_abs(pen_input, ABS_PRESSURE, get_unaligned_le16(&frame[5]));
725 + if (wacom->features.type == INTUOSP2_BT) {
726 + input_report_abs(pen_input, ABS_DISTANCE,
727 + range ? frame[13] : wacom->features.distance_max);
728 + } else {
729 + input_report_abs(pen_input, ABS_DISTANCE,
730 + range ? frame[7] : wacom->features.distance_max);
731 + }
732
733 - input_report_key(pen_input, wacom->tool[0], prox);
734 - input_event(pen_input, EV_MSC, MSC_SERIAL, wacom->serial[0]);
735 - input_report_abs(pen_input, ABS_MISC,
736 - wacom_intuos_id_mangle(wacom->id[0])); /* report tool id */
737 + input_report_key(pen_input, BTN_TOUCH, frame[0] & 0x09);
738 + input_report_key(pen_input, BTN_STYLUS, frame[0] & 0x02);
739 + input_report_key(pen_input, BTN_STYLUS2, frame[0] & 0x04);
740 +
741 + input_report_key(pen_input, wacom->tool[0], prox);
742 + input_event(pen_input, EV_MSC, MSC_SERIAL, wacom->serial[0]);
743 + input_report_abs(pen_input, ABS_MISC,
744 + wacom_intuos_id_mangle(wacom->id[0])); /* report tool id */
745 + }
746
747 wacom->shared->stylus_in_proximity = prox;
748
749 @@ -1351,11 +1369,17 @@ static void wacom_intuos_pro2_bt_touch(struct wacom_wac *wacom)
750 if (wacom->num_contacts_left <= 0) {
751 wacom->num_contacts_left = 0;
752 wacom->shared->touch_down = wacom_wac_finger_count_touches(wacom);
753 + input_sync(touch_input);
754 }
755 }
756
757 - input_report_switch(touch_input, SW_MUTE_DEVICE, !(data[281] >> 7));
758 - input_sync(touch_input);
759 + if (wacom->num_contacts_left == 0) {
760 + // Be careful that we don't accidentally call input_sync with
761 + // only a partial set of fingers of processed
762 + input_report_switch(touch_input, SW_MUTE_DEVICE, !(data[281] >> 7));
763 + input_sync(touch_input);
764 + }
765 +
766 }
767
768 static void wacom_intuos_pro2_bt_pad(struct wacom_wac *wacom)
769 @@ -1363,7 +1387,7 @@ static void wacom_intuos_pro2_bt_pad(struct wacom_wac *wacom)
770 struct input_dev *pad_input = wacom->pad_input;
771 unsigned char *data = wacom->data;
772
773 - int buttons = (data[282] << 1) | ((data[281] >> 6) & 0x01);
774 + int buttons = data[282] | ((data[281] & 0x40) << 2);
775 int ring = data[285] & 0x7F;
776 bool ringstatus = data[285] & 0x80;
777 bool prox = buttons || ringstatus;
778 @@ -3832,7 +3856,7 @@ static void wacom_24hd_update_leds(struct wacom *wacom, int mask, int group)
779 static bool wacom_is_led_toggled(struct wacom *wacom, int button_count,
780 int mask, int group)
781 {
782 - int button_per_group;
783 + int group_button;
784
785 /*
786 * 21UX2 has LED group 1 to the left and LED group 0
787 @@ -3842,9 +3866,12 @@ static bool wacom_is_led_toggled(struct wacom *wacom, int button_count,
788 if (wacom->wacom_wac.features.type == WACOM_21UX2)
789 group = 1 - group;
790
791 - button_per_group = button_count/wacom->led.count;
792 + group_button = group * (button_count/wacom->led.count);
793 +
794 + if (wacom->wacom_wac.features.type == INTUOSP2_BT)
795 + group_button = 8;
796
797 - return mask & (1 << (group * button_per_group));
798 + return mask & (1 << group_button);
799 }
800
801 static void wacom_update_led(struct wacom *wacom, int button_count, int mask,
802 diff --git a/drivers/i2c/busses/i2c-acorn.c b/drivers/i2c/busses/i2c-acorn.c
803 index f4a5ae69bf6a..fa3763e4b3ee 100644
804 --- a/drivers/i2c/busses/i2c-acorn.c
805 +++ b/drivers/i2c/busses/i2c-acorn.c
806 @@ -81,6 +81,7 @@ static struct i2c_algo_bit_data ioc_data = {
807
808 static struct i2c_adapter ioc_ops = {
809 .nr = 0,
810 + .name = "ioc",
811 .algo_data = &ioc_data,
812 };
813
814 diff --git a/drivers/iommu/arm-smmu.c b/drivers/iommu/arm-smmu.c
815 index ce119cb279c3..0c3b8f1c7225 100644
816 --- a/drivers/iommu/arm-smmu.c
817 +++ b/drivers/iommu/arm-smmu.c
818 @@ -56,6 +56,15 @@
819 #include "io-pgtable.h"
820 #include "arm-smmu-regs.h"
821
822 +/*
823 + * Apparently, some Qualcomm arm64 platforms which appear to expose their SMMU
824 + * global register space are still, in fact, using a hypervisor to mediate it
825 + * by trapping and emulating register accesses. Sadly, some deployed versions
826 + * of said trapping code have bugs wherein they go horribly wrong for stores
827 + * using r31 (i.e. XZR/WZR) as the source register.
828 + */
829 +#define QCOM_DUMMY_VAL -1
830 +
831 #define ARM_MMU500_ACTLR_CPRE (1 << 1)
832
833 #define ARM_MMU500_ACR_CACHE_LOCK (1 << 26)
834 @@ -398,7 +407,7 @@ static void __arm_smmu_tlb_sync(struct arm_smmu_device *smmu,
835 {
836 unsigned int spin_cnt, delay;
837
838 - writel_relaxed(0, sync);
839 + writel_relaxed(QCOM_DUMMY_VAL, sync);
840 for (delay = 1; delay < TLB_LOOP_TIMEOUT; delay *= 2) {
841 for (spin_cnt = TLB_SPIN_COUNT; spin_cnt > 0; spin_cnt--) {
842 if (!(readl_relaxed(status) & sTLBGSTATUS_GSACTIVE))
843 @@ -1637,8 +1646,8 @@ static void arm_smmu_device_reset(struct arm_smmu_device *smmu)
844 }
845
846 /* Invalidate the TLB, just in case */
847 - writel_relaxed(0, gr0_base + ARM_SMMU_GR0_TLBIALLH);
848 - writel_relaxed(0, gr0_base + ARM_SMMU_GR0_TLBIALLNSNH);
849 + writel_relaxed(QCOM_DUMMY_VAL, gr0_base + ARM_SMMU_GR0_TLBIALLH);
850 + writel_relaxed(QCOM_DUMMY_VAL, gr0_base + ARM_SMMU_GR0_TLBIALLNSNH);
851
852 reg = readl_relaxed(ARM_SMMU_GR0_NS(smmu) + ARM_SMMU_GR0_sCR0);
853
854 diff --git a/drivers/md/bcache/bset.c b/drivers/md/bcache/bset.c
855 index 8f07fa6e1739..268f1b685084 100644
856 --- a/drivers/md/bcache/bset.c
857 +++ b/drivers/md/bcache/bset.c
858 @@ -887,12 +887,22 @@ unsigned int bch_btree_insert_key(struct btree_keys *b, struct bkey *k,
859 struct bset *i = bset_tree_last(b)->data;
860 struct bkey *m, *prev = NULL;
861 struct btree_iter iter;
862 + struct bkey preceding_key_on_stack = ZERO_KEY;
863 + struct bkey *preceding_key_p = &preceding_key_on_stack;
864
865 BUG_ON(b->ops->is_extents && !KEY_SIZE(k));
866
867 - m = bch_btree_iter_init(b, &iter, b->ops->is_extents
868 - ? PRECEDING_KEY(&START_KEY(k))
869 - : PRECEDING_KEY(k));
870 + /*
871 + * If k has preceding key, preceding_key_p will be set to address
872 + * of k's preceding key; otherwise preceding_key_p will be set
873 + * to NULL inside preceding_key().
874 + */
875 + if (b->ops->is_extents)
876 + preceding_key(&START_KEY(k), &preceding_key_p);
877 + else
878 + preceding_key(k, &preceding_key_p);
879 +
880 + m = bch_btree_iter_init(b, &iter, preceding_key_p);
881
882 if (b->ops->insert_fixup(b, k, &iter, replace_key))
883 return status;
884 diff --git a/drivers/md/bcache/bset.h b/drivers/md/bcache/bset.h
885 index bac76aabca6d..c71365e7c1fa 100644
886 --- a/drivers/md/bcache/bset.h
887 +++ b/drivers/md/bcache/bset.h
888 @@ -434,20 +434,26 @@ static inline bool bch_cut_back(const struct bkey *where, struct bkey *k)
889 return __bch_cut_back(where, k);
890 }
891
892 -#define PRECEDING_KEY(_k) \
893 -({ \
894 - struct bkey *_ret = NULL; \
895 - \
896 - if (KEY_INODE(_k) || KEY_OFFSET(_k)) { \
897 - _ret = &KEY(KEY_INODE(_k), KEY_OFFSET(_k), 0); \
898 - \
899 - if (!_ret->low) \
900 - _ret->high--; \
901 - _ret->low--; \
902 - } \
903 - \
904 - _ret; \
905 -})
906 +/*
907 + * Pointer '*preceding_key_p' points to a memory object to store preceding
908 + * key of k. If the preceding key does not exist, set '*preceding_key_p' to
909 + * NULL. So the caller of preceding_key() needs to take care of memory
910 + * which '*preceding_key_p' pointed to before calling preceding_key().
911 + * Currently the only caller of preceding_key() is bch_btree_insert_key(),
912 + * and it points to an on-stack variable, so the memory release is handled
913 + * by stackframe itself.
914 + */
915 +static inline void preceding_key(struct bkey *k, struct bkey **preceding_key_p)
916 +{
917 + if (KEY_INODE(k) || KEY_OFFSET(k)) {
918 + (**preceding_key_p) = KEY(KEY_INODE(k), KEY_OFFSET(k), 0);
919 + if (!(*preceding_key_p)->low)
920 + (*preceding_key_p)->high--;
921 + (*preceding_key_p)->low--;
922 + } else {
923 + (*preceding_key_p) = NULL;
924 + }
925 +}
926
927 static inline bool bch_ptr_invalid(struct btree_keys *b, const struct bkey *k)
928 {
929 diff --git a/drivers/md/bcache/sysfs.c b/drivers/md/bcache/sysfs.c
930 index d9481640b3e1..541454b4f479 100644
931 --- a/drivers/md/bcache/sysfs.c
932 +++ b/drivers/md/bcache/sysfs.c
933 @@ -393,8 +393,13 @@ STORE(bch_cached_dev)
934 if (attr == &sysfs_writeback_running)
935 bch_writeback_queue(dc);
936
937 + /*
938 + * Only set BCACHE_DEV_WB_RUNNING when cached device attached to
939 + * a cache set, otherwise it doesn't make sense.
940 + */
941 if (attr == &sysfs_writeback_percent)
942 - if (!test_and_set_bit(BCACHE_DEV_WB_RUNNING, &dc->disk.flags))
943 + if ((dc->disk.c != NULL) &&
944 + (!test_and_set_bit(BCACHE_DEV_WB_RUNNING, &dc->disk.flags)))
945 schedule_delayed_work(&dc->writeback_rate_update,
946 dc->writeback_rate_update_seconds * HZ);
947
948 diff --git a/drivers/misc/kgdbts.c b/drivers/misc/kgdbts.c
949 index 6193270e7b3d..eb4d90b7d99e 100644
950 --- a/drivers/misc/kgdbts.c
951 +++ b/drivers/misc/kgdbts.c
952 @@ -1139,7 +1139,7 @@ static void kgdbts_put_char(u8 chr)
953 static int param_set_kgdbts_var(const char *kmessage,
954 const struct kernel_param *kp)
955 {
956 - int len = strlen(kmessage);
957 + size_t len = strlen(kmessage);
958
959 if (len >= MAX_CONFIG_LEN) {
960 printk(KERN_ERR "kgdbts: config string too long\n");
961 @@ -1159,7 +1159,7 @@ static int param_set_kgdbts_var(const char *kmessage,
962
963 strcpy(config, kmessage);
964 /* Chop out \n char as a result of echo */
965 - if (config[len - 1] == '\n')
966 + if (len && config[len - 1] == '\n')
967 config[len - 1] = '\0';
968
969 /* Go and configure with the new params. */
970 diff --git a/drivers/net/usb/ipheth.c b/drivers/net/usb/ipheth.c
971 index 3d8a70d3ea9b..3d71f1716390 100644
972 --- a/drivers/net/usb/ipheth.c
973 +++ b/drivers/net/usb/ipheth.c
974 @@ -437,17 +437,18 @@ static int ipheth_tx(struct sk_buff *skb, struct net_device *net)
975 dev);
976 dev->tx_urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP;
977
978 + netif_stop_queue(net);
979 retval = usb_submit_urb(dev->tx_urb, GFP_ATOMIC);
980 if (retval) {
981 dev_err(&dev->intf->dev, "%s: usb_submit_urb: %d\n",
982 __func__, retval);
983 dev->net->stats.tx_errors++;
984 dev_kfree_skb_any(skb);
985 + netif_wake_queue(net);
986 } else {
987 dev->net->stats.tx_packets++;
988 dev->net->stats.tx_bytes += skb->len;
989 dev_consume_skb_any(skb);
990 - netif_stop_queue(net);
991 }
992
993 return NETDEV_TX_OK;
994 diff --git a/drivers/nvdimm/bus.c b/drivers/nvdimm/bus.c
995 index 9148015ed803..a3132a9eb91c 100644
996 --- a/drivers/nvdimm/bus.c
997 +++ b/drivers/nvdimm/bus.c
998 @@ -612,7 +612,7 @@ static struct attribute *nd_device_attributes[] = {
999 NULL,
1000 };
1001
1002 -/**
1003 +/*
1004 * nd_device_attribute_group - generic attributes for all devices on an nd bus
1005 */
1006 struct attribute_group nd_device_attribute_group = {
1007 @@ -641,7 +641,7 @@ static umode_t nd_numa_attr_visible(struct kobject *kobj, struct attribute *a,
1008 return a->mode;
1009 }
1010
1011 -/**
1012 +/*
1013 * nd_numa_attribute_group - NUMA attributes for all devices on an nd bus
1014 */
1015 struct attribute_group nd_numa_attribute_group = {
1016 diff --git a/drivers/nvdimm/label.c b/drivers/nvdimm/label.c
1017 index 452ad379ed70..9f1b7e3153f9 100644
1018 --- a/drivers/nvdimm/label.c
1019 +++ b/drivers/nvdimm/label.c
1020 @@ -25,6 +25,8 @@ static guid_t nvdimm_btt2_guid;
1021 static guid_t nvdimm_pfn_guid;
1022 static guid_t nvdimm_dax_guid;
1023
1024 +static const char NSINDEX_SIGNATURE[] = "NAMESPACE_INDEX\0";
1025 +
1026 static u32 best_seq(u32 a, u32 b)
1027 {
1028 a &= NSINDEX_SEQ_MASK;
1029 diff --git a/drivers/nvdimm/label.h b/drivers/nvdimm/label.h
1030 index 18bbe183b3a9..52f9fcada00a 100644
1031 --- a/drivers/nvdimm/label.h
1032 +++ b/drivers/nvdimm/label.h
1033 @@ -38,8 +38,6 @@ enum {
1034 ND_NSINDEX_INIT = 0x1,
1035 };
1036
1037 -static const char NSINDEX_SIGNATURE[] = "NAMESPACE_INDEX\0";
1038 -
1039 /**
1040 * struct nd_namespace_index - label set superblock
1041 * @sig: NAMESPACE_INDEX\0
1042 diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c
1043 index abfb46378cc1..a867a139bb35 100644
1044 --- a/drivers/nvme/host/core.c
1045 +++ b/drivers/nvme/host/core.c
1046 @@ -1277,9 +1277,14 @@ static struct nvme_ns *nvme_get_ns_from_disk(struct gendisk *disk,
1047 {
1048 #ifdef CONFIG_NVME_MULTIPATH
1049 if (disk->fops == &nvme_ns_head_ops) {
1050 + struct nvme_ns *ns;
1051 +
1052 *head = disk->private_data;
1053 *srcu_idx = srcu_read_lock(&(*head)->srcu);
1054 - return nvme_find_path(*head);
1055 + ns = nvme_find_path(*head);
1056 + if (!ns)
1057 + srcu_read_unlock(&(*head)->srcu, *srcu_idx);
1058 + return ns;
1059 }
1060 #endif
1061 *head = NULL;
1062 @@ -1293,42 +1298,56 @@ static void nvme_put_ns_from_disk(struct nvme_ns_head *head, int idx)
1063 srcu_read_unlock(&head->srcu, idx);
1064 }
1065
1066 -static int nvme_ns_ioctl(struct nvme_ns *ns, unsigned cmd, unsigned long arg)
1067 +static int nvme_ioctl(struct block_device *bdev, fmode_t mode,
1068 + unsigned int cmd, unsigned long arg)
1069 {
1070 + struct nvme_ns_head *head = NULL;
1071 + void __user *argp = (void __user *)arg;
1072 + struct nvme_ns *ns;
1073 + int srcu_idx, ret;
1074 +
1075 + ns = nvme_get_ns_from_disk(bdev->bd_disk, &head, &srcu_idx);
1076 + if (unlikely(!ns))
1077 + return -EWOULDBLOCK;
1078 +
1079 + /*
1080 + * Handle ioctls that apply to the controller instead of the namespace
1081 + * seperately and drop the ns SRCU reference early. This avoids a
1082 + * deadlock when deleting namespaces using the passthrough interface.
1083 + */
1084 + if (cmd == NVME_IOCTL_ADMIN_CMD || is_sed_ioctl(cmd)) {
1085 + struct nvme_ctrl *ctrl = ns->ctrl;
1086 +
1087 + nvme_get_ctrl(ns->ctrl);
1088 + nvme_put_ns_from_disk(head, srcu_idx);
1089 +
1090 + if (cmd == NVME_IOCTL_ADMIN_CMD)
1091 + ret = nvme_user_cmd(ctrl, NULL, argp);
1092 + else
1093 + ret = sed_ioctl(ctrl->opal_dev, cmd, argp);
1094 +
1095 + nvme_put_ctrl(ctrl);
1096 + return ret;
1097 + }
1098 +
1099 switch (cmd) {
1100 case NVME_IOCTL_ID:
1101 force_successful_syscall_return();
1102 - return ns->head->ns_id;
1103 - case NVME_IOCTL_ADMIN_CMD:
1104 - return nvme_user_cmd(ns->ctrl, NULL, (void __user *)arg);
1105 + ret = ns->head->ns_id;
1106 + break;
1107 case NVME_IOCTL_IO_CMD:
1108 - return nvme_user_cmd(ns->ctrl, ns, (void __user *)arg);
1109 + ret = nvme_user_cmd(ns->ctrl, ns, argp);
1110 + break;
1111 case NVME_IOCTL_SUBMIT_IO:
1112 - return nvme_submit_io(ns, (void __user *)arg);
1113 + ret = nvme_submit_io(ns, argp);
1114 + break;
1115 default:
1116 -#ifdef CONFIG_NVM
1117 if (ns->ndev)
1118 - return nvme_nvm_ioctl(ns, cmd, arg);
1119 -#endif
1120 - if (is_sed_ioctl(cmd))
1121 - return sed_ioctl(ns->ctrl->opal_dev, cmd,
1122 - (void __user *) arg);
1123 - return -ENOTTY;
1124 + ret = nvme_nvm_ioctl(ns, cmd, arg);
1125 + else
1126 + ret = -ENOTTY;
1127 }
1128 -}
1129 -
1130 -static int nvme_ioctl(struct block_device *bdev, fmode_t mode,
1131 - unsigned int cmd, unsigned long arg)
1132 -{
1133 - struct nvme_ns_head *head = NULL;
1134 - struct nvme_ns *ns;
1135 - int srcu_idx, ret;
1136
1137 - ns = nvme_get_ns_from_disk(bdev->bd_disk, &head, &srcu_idx);
1138 - if (unlikely(!ns))
1139 - ret = -EWOULDBLOCK;
1140 - else
1141 - ret = nvme_ns_ioctl(ns, cmd, arg);
1142 nvme_put_ns_from_disk(head, srcu_idx);
1143 return ret;
1144 }
1145 @@ -3506,6 +3525,7 @@ EXPORT_SYMBOL_GPL(nvme_start_ctrl);
1146
1147 void nvme_uninit_ctrl(struct nvme_ctrl *ctrl)
1148 {
1149 + dev_pm_qos_hide_latency_tolerance(ctrl->device);
1150 cdev_device_del(&ctrl->cdev, ctrl->device);
1151 }
1152 EXPORT_SYMBOL_GPL(nvme_uninit_ctrl);
1153 diff --git a/drivers/platform/x86/pmc_atom.c b/drivers/platform/x86/pmc_atom.c
1154 index c7039f52ad51..b1d804376237 100644
1155 --- a/drivers/platform/x86/pmc_atom.c
1156 +++ b/drivers/platform/x86/pmc_atom.c
1157 @@ -398,12 +398,45 @@ static int pmc_dbgfs_register(struct pmc_dev *pmc)
1158 */
1159 static const struct dmi_system_id critclk_systems[] = {
1160 {
1161 + /* pmc_plt_clk0 is used for an external HSIC USB HUB */
1162 .ident = "MPL CEC1x",
1163 .matches = {
1164 DMI_MATCH(DMI_SYS_VENDOR, "MPL AG"),
1165 DMI_MATCH(DMI_PRODUCT_NAME, "CEC10 Family"),
1166 },
1167 },
1168 + {
1169 + /* pmc_plt_clk0 - 3 are used for the 4 ethernet controllers */
1170 + .ident = "Lex 3I380D",
1171 + .matches = {
1172 + DMI_MATCH(DMI_SYS_VENDOR, "Lex BayTrail"),
1173 + DMI_MATCH(DMI_PRODUCT_NAME, "3I380D"),
1174 + },
1175 + },
1176 + {
1177 + /* pmc_plt_clk* - are used for ethernet controllers */
1178 + .ident = "Beckhoff CB3163",
1179 + .matches = {
1180 + DMI_MATCH(DMI_SYS_VENDOR, "Beckhoff Automation"),
1181 + DMI_MATCH(DMI_BOARD_NAME, "CB3163"),
1182 + },
1183 + },
1184 + {
1185 + /* pmc_plt_clk* - are used for ethernet controllers */
1186 + .ident = "Beckhoff CB6263",
1187 + .matches = {
1188 + DMI_MATCH(DMI_SYS_VENDOR, "Beckhoff Automation"),
1189 + DMI_MATCH(DMI_BOARD_NAME, "CB6263"),
1190 + },
1191 + },
1192 + {
1193 + /* pmc_plt_clk* - are used for ethernet controllers */
1194 + .ident = "Beckhoff CB6363",
1195 + .matches = {
1196 + DMI_MATCH(DMI_SYS_VENDOR, "Beckhoff Automation"),
1197 + DMI_MATCH(DMI_BOARD_NAME, "CB6363"),
1198 + },
1199 + },
1200 { /*sentinel*/ }
1201 };
1202
1203 diff --git a/drivers/ras/cec.c b/drivers/ras/cec.c
1204 index 2d9ec378a8bc..f85d6b7a1984 100644
1205 --- a/drivers/ras/cec.c
1206 +++ b/drivers/ras/cec.c
1207 @@ -2,6 +2,7 @@
1208 #include <linux/mm.h>
1209 #include <linux/gfp.h>
1210 #include <linux/kernel.h>
1211 +#include <linux/workqueue.h>
1212
1213 #include <asm/mce.h>
1214
1215 @@ -123,16 +124,12 @@ static u64 dfs_pfn;
1216 /* Amount of errors after which we offline */
1217 static unsigned int count_threshold = COUNT_MASK;
1218
1219 -/*
1220 - * The timer "decays" element count each timer_interval which is 24hrs by
1221 - * default.
1222 - */
1223 -
1224 -#define CEC_TIMER_DEFAULT_INTERVAL 24 * 60 * 60 /* 24 hrs */
1225 -#define CEC_TIMER_MIN_INTERVAL 1 * 60 * 60 /* 1h */
1226 -#define CEC_TIMER_MAX_INTERVAL 30 * 24 * 60 * 60 /* one month */
1227 -static struct timer_list cec_timer;
1228 -static u64 timer_interval = CEC_TIMER_DEFAULT_INTERVAL;
1229 +/* Each element "decays" each decay_interval which is 24hrs by default. */
1230 +#define CEC_DECAY_DEFAULT_INTERVAL 24 * 60 * 60 /* 24 hrs */
1231 +#define CEC_DECAY_MIN_INTERVAL 1 * 60 * 60 /* 1h */
1232 +#define CEC_DECAY_MAX_INTERVAL 30 * 24 * 60 * 60 /* one month */
1233 +static struct delayed_work cec_work;
1234 +static u64 decay_interval = CEC_DECAY_DEFAULT_INTERVAL;
1235
1236 /*
1237 * Decrement decay value. We're using DECAY_BITS bits to denote decay of an
1238 @@ -160,20 +157,21 @@ static void do_spring_cleaning(struct ce_array *ca)
1239 /*
1240 * @interval in seconds
1241 */
1242 -static void cec_mod_timer(struct timer_list *t, unsigned long interval)
1243 +static void cec_mod_work(unsigned long interval)
1244 {
1245 unsigned long iv;
1246
1247 - iv = interval * HZ + jiffies;
1248 -
1249 - mod_timer(t, round_jiffies(iv));
1250 + iv = interval * HZ;
1251 + mod_delayed_work(system_wq, &cec_work, round_jiffies(iv));
1252 }
1253
1254 -static void cec_timer_fn(struct timer_list *unused)
1255 +static void cec_work_fn(struct work_struct *work)
1256 {
1257 + mutex_lock(&ce_mutex);
1258 do_spring_cleaning(&ce_arr);
1259 + mutex_unlock(&ce_mutex);
1260
1261 - cec_mod_timer(&cec_timer, timer_interval);
1262 + cec_mod_work(decay_interval);
1263 }
1264
1265 /*
1266 @@ -183,32 +181,38 @@ static void cec_timer_fn(struct timer_list *unused)
1267 */
1268 static int __find_elem(struct ce_array *ca, u64 pfn, unsigned int *to)
1269 {
1270 + int min = 0, max = ca->n - 1;
1271 u64 this_pfn;
1272 - int min = 0, max = ca->n;
1273
1274 - while (min < max) {
1275 - int tmp = (max + min) >> 1;
1276 + while (min <= max) {
1277 + int i = (min + max) >> 1;
1278
1279 - this_pfn = PFN(ca->array[tmp]);
1280 + this_pfn = PFN(ca->array[i]);
1281
1282 if (this_pfn < pfn)
1283 - min = tmp + 1;
1284 + min = i + 1;
1285 else if (this_pfn > pfn)
1286 - max = tmp;
1287 - else {
1288 - min = tmp;
1289 - break;
1290 + max = i - 1;
1291 + else if (this_pfn == pfn) {
1292 + if (to)
1293 + *to = i;
1294 +
1295 + return i;
1296 }
1297 }
1298
1299 + /*
1300 + * When the loop terminates without finding @pfn, min has the index of
1301 + * the element slot where the new @pfn should be inserted. The loop
1302 + * terminates when min > max, which means the min index points to the
1303 + * bigger element while the max index to the smaller element, in-between
1304 + * which the new @pfn belongs to.
1305 + *
1306 + * For more details, see exercise 1, Section 6.2.1 in TAOCP, vol. 3.
1307 + */
1308 if (to)
1309 *to = min;
1310
1311 - this_pfn = PFN(ca->array[min]);
1312 -
1313 - if (this_pfn == pfn)
1314 - return min;
1315 -
1316 return -ENOKEY;
1317 }
1318
1319 @@ -374,15 +378,15 @@ static int decay_interval_set(void *data, u64 val)
1320 {
1321 *(u64 *)data = val;
1322
1323 - if (val < CEC_TIMER_MIN_INTERVAL)
1324 + if (val < CEC_DECAY_MIN_INTERVAL)
1325 return -EINVAL;
1326
1327 - if (val > CEC_TIMER_MAX_INTERVAL)
1328 + if (val > CEC_DECAY_MAX_INTERVAL)
1329 return -EINVAL;
1330
1331 - timer_interval = val;
1332 + decay_interval = val;
1333
1334 - cec_mod_timer(&cec_timer, timer_interval);
1335 + cec_mod_work(decay_interval);
1336 return 0;
1337 }
1338 DEFINE_DEBUGFS_ATTRIBUTE(decay_interval_ops, u64_get, decay_interval_set, "%lld\n");
1339 @@ -426,7 +430,7 @@ static int array_dump(struct seq_file *m, void *v)
1340
1341 seq_printf(m, "Flags: 0x%x\n", ca->flags);
1342
1343 - seq_printf(m, "Timer interval: %lld seconds\n", timer_interval);
1344 + seq_printf(m, "Decay interval: %lld seconds\n", decay_interval);
1345 seq_printf(m, "Decays: %lld\n", ca->decays_done);
1346
1347 seq_printf(m, "Action threshold: %d\n", count_threshold);
1348 @@ -472,7 +476,7 @@ static int __init create_debugfs_nodes(void)
1349 }
1350
1351 decay = debugfs_create_file("decay_interval", S_IRUSR | S_IWUSR, d,
1352 - &timer_interval, &decay_interval_ops);
1353 + &decay_interval, &decay_interval_ops);
1354 if (!decay) {
1355 pr_warn("Error creating decay_interval debugfs node!\n");
1356 goto err;
1357 @@ -508,8 +512,8 @@ void __init cec_init(void)
1358 if (create_debugfs_nodes())
1359 return;
1360
1361 - timer_setup(&cec_timer, cec_timer_fn, 0);
1362 - cec_mod_timer(&cec_timer, CEC_TIMER_DEFAULT_INTERVAL);
1363 + INIT_DELAYED_WORK(&cec_work, cec_work_fn);
1364 + schedule_delayed_work(&cec_work, CEC_DECAY_DEFAULT_INTERVAL);
1365
1366 pr_info("Correctable Errors collector initialized.\n");
1367 }
1368 diff --git a/drivers/rtc/rtc-pcf8523.c b/drivers/rtc/rtc-pcf8523.c
1369 index 453615f8ac9a..3fcd2cbafc84 100644
1370 --- a/drivers/rtc/rtc-pcf8523.c
1371 +++ b/drivers/rtc/rtc-pcf8523.c
1372 @@ -85,6 +85,18 @@ static int pcf8523_write(struct i2c_client *client, u8 reg, u8 value)
1373 return 0;
1374 }
1375
1376 +static int pcf8523_voltage_low(struct i2c_client *client)
1377 +{
1378 + u8 value;
1379 + int err;
1380 +
1381 + err = pcf8523_read(client, REG_CONTROL3, &value);
1382 + if (err < 0)
1383 + return err;
1384 +
1385 + return !!(value & REG_CONTROL3_BLF);
1386 +}
1387 +
1388 static int pcf8523_select_capacitance(struct i2c_client *client, bool high)
1389 {
1390 u8 value;
1391 @@ -167,6 +179,14 @@ static int pcf8523_rtc_read_time(struct device *dev, struct rtc_time *tm)
1392 struct i2c_msg msgs[2];
1393 int err;
1394
1395 + err = pcf8523_voltage_low(client);
1396 + if (err < 0) {
1397 + return err;
1398 + } else if (err > 0) {
1399 + dev_err(dev, "low voltage detected, time is unreliable\n");
1400 + return -EINVAL;
1401 + }
1402 +
1403 msgs[0].addr = client->addr;
1404 msgs[0].flags = 0;
1405 msgs[0].len = 1;
1406 @@ -251,17 +271,13 @@ static int pcf8523_rtc_ioctl(struct device *dev, unsigned int cmd,
1407 unsigned long arg)
1408 {
1409 struct i2c_client *client = to_i2c_client(dev);
1410 - u8 value;
1411 - int ret = 0, err;
1412 + int ret;
1413
1414 switch (cmd) {
1415 case RTC_VL_READ:
1416 - err = pcf8523_read(client, REG_CONTROL3, &value);
1417 - if (err < 0)
1418 - return err;
1419 -
1420 - if (value & REG_CONTROL3_BLF)
1421 - ret = 1;
1422 + ret = pcf8523_voltage_low(client);
1423 + if (ret < 0)
1424 + return ret;
1425
1426 if (copy_to_user((void __user *)arg, &ret, sizeof(int)))
1427 return -EFAULT;
1428 diff --git a/drivers/scsi/bnx2fc/bnx2fc_hwi.c b/drivers/scsi/bnx2fc/bnx2fc_hwi.c
1429 index e8ae4d671d23..097305949a95 100644
1430 --- a/drivers/scsi/bnx2fc/bnx2fc_hwi.c
1431 +++ b/drivers/scsi/bnx2fc/bnx2fc_hwi.c
1432 @@ -830,7 +830,7 @@ ret_err_rqe:
1433 ((u64)err_entry->data.err_warn_bitmap_hi << 32) |
1434 (u64)err_entry->data.err_warn_bitmap_lo;
1435 for (i = 0; i < BNX2FC_NUM_ERR_BITS; i++) {
1436 - if (err_warn_bit_map & (u64) (1 << i)) {
1437 + if (err_warn_bit_map & ((u64)1 << i)) {
1438 err_warn = i;
1439 break;
1440 }
1441 diff --git a/drivers/scsi/lpfc/lpfc_attr.c b/drivers/scsi/lpfc/lpfc_attr.c
1442 index cb19b12e7211..55cd96e2469c 100644
1443 --- a/drivers/scsi/lpfc/lpfc_attr.c
1444 +++ b/drivers/scsi/lpfc/lpfc_attr.c
1445 @@ -341,7 +341,7 @@ lpfc_nvme_info_show(struct device *dev, struct device_attribute *attr,
1446 phba->sli4_hba.scsi_xri_max,
1447 lpfc_sli4_get_els_iocb_cnt(phba));
1448 if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE)
1449 - goto buffer_done;
1450 + goto rcu_unlock_buf_done;
1451
1452 /* Port state is only one of two values for now. */
1453 if (localport->port_id)
1454 @@ -357,7 +357,7 @@ lpfc_nvme_info_show(struct device *dev, struct device_attribute *attr,
1455 wwn_to_u64(vport->fc_nodename.u.wwn),
1456 localport->port_id, statep);
1457 if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE)
1458 - goto buffer_done;
1459 + goto rcu_unlock_buf_done;
1460
1461 list_for_each_entry(ndlp, &vport->fc_nodes, nlp_listp) {
1462 nrport = NULL;
1463 @@ -384,39 +384,39 @@ lpfc_nvme_info_show(struct device *dev, struct device_attribute *attr,
1464
1465 /* Tab in to show lport ownership. */
1466 if (strlcat(buf, "NVME RPORT ", PAGE_SIZE) >= PAGE_SIZE)
1467 - goto buffer_done;
1468 + goto rcu_unlock_buf_done;
1469 if (phba->brd_no >= 10) {
1470 if (strlcat(buf, " ", PAGE_SIZE) >= PAGE_SIZE)
1471 - goto buffer_done;
1472 + goto rcu_unlock_buf_done;
1473 }
1474
1475 scnprintf(tmp, sizeof(tmp), "WWPN x%llx ",
1476 nrport->port_name);
1477 if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE)
1478 - goto buffer_done;
1479 + goto rcu_unlock_buf_done;
1480
1481 scnprintf(tmp, sizeof(tmp), "WWNN x%llx ",
1482 nrport->node_name);
1483 if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE)
1484 - goto buffer_done;
1485 + goto rcu_unlock_buf_done;
1486
1487 scnprintf(tmp, sizeof(tmp), "DID x%06x ",
1488 nrport->port_id);
1489 if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE)
1490 - goto buffer_done;
1491 + goto rcu_unlock_buf_done;
1492
1493 /* An NVME rport can have multiple roles. */
1494 if (nrport->port_role & FC_PORT_ROLE_NVME_INITIATOR) {
1495 if (strlcat(buf, "INITIATOR ", PAGE_SIZE) >= PAGE_SIZE)
1496 - goto buffer_done;
1497 + goto rcu_unlock_buf_done;
1498 }
1499 if (nrport->port_role & FC_PORT_ROLE_NVME_TARGET) {
1500 if (strlcat(buf, "TARGET ", PAGE_SIZE) >= PAGE_SIZE)
1501 - goto buffer_done;
1502 + goto rcu_unlock_buf_done;
1503 }
1504 if (nrport->port_role & FC_PORT_ROLE_NVME_DISCOVERY) {
1505 if (strlcat(buf, "DISCSRVC ", PAGE_SIZE) >= PAGE_SIZE)
1506 - goto buffer_done;
1507 + goto rcu_unlock_buf_done;
1508 }
1509 if (nrport->port_role & ~(FC_PORT_ROLE_NVME_INITIATOR |
1510 FC_PORT_ROLE_NVME_TARGET |
1511 @@ -424,12 +424,12 @@ lpfc_nvme_info_show(struct device *dev, struct device_attribute *attr,
1512 scnprintf(tmp, sizeof(tmp), "UNKNOWN ROLE x%x",
1513 nrport->port_role);
1514 if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE)
1515 - goto buffer_done;
1516 + goto rcu_unlock_buf_done;
1517 }
1518
1519 scnprintf(tmp, sizeof(tmp), "%s\n", statep);
1520 if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE)
1521 - goto buffer_done;
1522 + goto rcu_unlock_buf_done;
1523 }
1524 rcu_read_unlock();
1525
1526 @@ -491,7 +491,13 @@ lpfc_nvme_info_show(struct device *dev, struct device_attribute *attr,
1527 atomic_read(&lport->cmpl_fcp_err));
1528 strlcat(buf, tmp, PAGE_SIZE);
1529
1530 -buffer_done:
1531 + /* RCU is already unlocked. */
1532 + goto buffer_done;
1533 +
1534 + rcu_unlock_buf_done:
1535 + rcu_read_unlock();
1536 +
1537 + buffer_done:
1538 len = strnlen(buf, PAGE_SIZE);
1539
1540 if (unlikely(len >= (PAGE_SIZE - 1))) {
1541 diff --git a/drivers/scsi/lpfc/lpfc_els.c b/drivers/scsi/lpfc/lpfc_els.c
1542 index 0d214e6b8e9a..f3c6801c0b31 100644
1543 --- a/drivers/scsi/lpfc/lpfc_els.c
1544 +++ b/drivers/scsi/lpfc/lpfc_els.c
1545 @@ -7094,7 +7094,10 @@ int
1546 lpfc_send_rrq(struct lpfc_hba *phba, struct lpfc_node_rrq *rrq)
1547 {
1548 struct lpfc_nodelist *ndlp = lpfc_findnode_did(rrq->vport,
1549 - rrq->nlp_DID);
1550 + rrq->nlp_DID);
1551 + if (!ndlp)
1552 + return 1;
1553 +
1554 if (lpfc_test_rrq_active(phba, ndlp, rrq->xritag))
1555 return lpfc_issue_els_rrq(rrq->vport, ndlp,
1556 rrq->nlp_DID, rrq);
1557 diff --git a/drivers/scsi/qedi/qedi_dbg.c b/drivers/scsi/qedi/qedi_dbg.c
1558 index 8fd28b056f73..3383314a3882 100644
1559 --- a/drivers/scsi/qedi/qedi_dbg.c
1560 +++ b/drivers/scsi/qedi/qedi_dbg.c
1561 @@ -16,10 +16,6 @@ qedi_dbg_err(struct qedi_dbg_ctx *qedi, const char *func, u32 line,
1562 {
1563 va_list va;
1564 struct va_format vaf;
1565 - char nfunc[32];
1566 -
1567 - memset(nfunc, 0, sizeof(nfunc));
1568 - memcpy(nfunc, func, sizeof(nfunc) - 1);
1569
1570 va_start(va, fmt);
1571
1572 @@ -28,9 +24,9 @@ qedi_dbg_err(struct qedi_dbg_ctx *qedi, const char *func, u32 line,
1573
1574 if (likely(qedi) && likely(qedi->pdev))
1575 pr_err("[%s]:[%s:%d]:%d: %pV", dev_name(&qedi->pdev->dev),
1576 - nfunc, line, qedi->host_no, &vaf);
1577 + func, line, qedi->host_no, &vaf);
1578 else
1579 - pr_err("[0000:00:00.0]:[%s:%d]: %pV", nfunc, line, &vaf);
1580 + pr_err("[0000:00:00.0]:[%s:%d]: %pV", func, line, &vaf);
1581
1582 va_end(va);
1583 }
1584 @@ -41,10 +37,6 @@ qedi_dbg_warn(struct qedi_dbg_ctx *qedi, const char *func, u32 line,
1585 {
1586 va_list va;
1587 struct va_format vaf;
1588 - char nfunc[32];
1589 -
1590 - memset(nfunc, 0, sizeof(nfunc));
1591 - memcpy(nfunc, func, sizeof(nfunc) - 1);
1592
1593 va_start(va, fmt);
1594
1595 @@ -56,9 +48,9 @@ qedi_dbg_warn(struct qedi_dbg_ctx *qedi, const char *func, u32 line,
1596
1597 if (likely(qedi) && likely(qedi->pdev))
1598 pr_warn("[%s]:[%s:%d]:%d: %pV", dev_name(&qedi->pdev->dev),
1599 - nfunc, line, qedi->host_no, &vaf);
1600 + func, line, qedi->host_no, &vaf);
1601 else
1602 - pr_warn("[0000:00:00.0]:[%s:%d]: %pV", nfunc, line, &vaf);
1603 + pr_warn("[0000:00:00.0]:[%s:%d]: %pV", func, line, &vaf);
1604
1605 ret:
1606 va_end(va);
1607 @@ -70,10 +62,6 @@ qedi_dbg_notice(struct qedi_dbg_ctx *qedi, const char *func, u32 line,
1608 {
1609 va_list va;
1610 struct va_format vaf;
1611 - char nfunc[32];
1612 -
1613 - memset(nfunc, 0, sizeof(nfunc));
1614 - memcpy(nfunc, func, sizeof(nfunc) - 1);
1615
1616 va_start(va, fmt);
1617
1618 @@ -85,10 +73,10 @@ qedi_dbg_notice(struct qedi_dbg_ctx *qedi, const char *func, u32 line,
1619
1620 if (likely(qedi) && likely(qedi->pdev))
1621 pr_notice("[%s]:[%s:%d]:%d: %pV",
1622 - dev_name(&qedi->pdev->dev), nfunc, line,
1623 + dev_name(&qedi->pdev->dev), func, line,
1624 qedi->host_no, &vaf);
1625 else
1626 - pr_notice("[0000:00:00.0]:[%s:%d]: %pV", nfunc, line, &vaf);
1627 + pr_notice("[0000:00:00.0]:[%s:%d]: %pV", func, line, &vaf);
1628
1629 ret:
1630 va_end(va);
1631 @@ -100,10 +88,6 @@ qedi_dbg_info(struct qedi_dbg_ctx *qedi, const char *func, u32 line,
1632 {
1633 va_list va;
1634 struct va_format vaf;
1635 - char nfunc[32];
1636 -
1637 - memset(nfunc, 0, sizeof(nfunc));
1638 - memcpy(nfunc, func, sizeof(nfunc) - 1);
1639
1640 va_start(va, fmt);
1641
1642 @@ -115,9 +99,9 @@ qedi_dbg_info(struct qedi_dbg_ctx *qedi, const char *func, u32 line,
1643
1644 if (likely(qedi) && likely(qedi->pdev))
1645 pr_info("[%s]:[%s:%d]:%d: %pV", dev_name(&qedi->pdev->dev),
1646 - nfunc, line, qedi->host_no, &vaf);
1647 + func, line, qedi->host_no, &vaf);
1648 else
1649 - pr_info("[0000:00:00.0]:[%s:%d]: %pV", nfunc, line, &vaf);
1650 + pr_info("[0000:00:00.0]:[%s:%d]: %pV", func, line, &vaf);
1651
1652 ret:
1653 va_end(va);
1654 diff --git a/drivers/scsi/qedi/qedi_iscsi.c b/drivers/scsi/qedi/qedi_iscsi.c
1655 index 4130b9117055..1b7049dce169 100644
1656 --- a/drivers/scsi/qedi/qedi_iscsi.c
1657 +++ b/drivers/scsi/qedi/qedi_iscsi.c
1658 @@ -810,8 +810,6 @@ qedi_ep_connect(struct Scsi_Host *shost, struct sockaddr *dst_addr,
1659 struct qedi_endpoint *qedi_ep;
1660 struct sockaddr_in *addr;
1661 struct sockaddr_in6 *addr6;
1662 - struct qed_dev *cdev = NULL;
1663 - struct qedi_uio_dev *udev = NULL;
1664 struct iscsi_path path_req;
1665 u32 msg_type = ISCSI_KEVENT_IF_DOWN;
1666 u32 iscsi_cid = QEDI_CID_RESERVED;
1667 @@ -831,8 +829,6 @@ qedi_ep_connect(struct Scsi_Host *shost, struct sockaddr *dst_addr,
1668 }
1669
1670 qedi = iscsi_host_priv(shost);
1671 - cdev = qedi->cdev;
1672 - udev = qedi->udev;
1673
1674 if (test_bit(QEDI_IN_OFFLINE, &qedi->flags) ||
1675 test_bit(QEDI_IN_RECOVERY, &qedi->flags)) {
1676 diff --git a/drivers/usb/core/quirks.c b/drivers/usb/core/quirks.c
1677 index 6082b008969b..6b6413073584 100644
1678 --- a/drivers/usb/core/quirks.c
1679 +++ b/drivers/usb/core/quirks.c
1680 @@ -215,6 +215,9 @@ static const struct usb_device_id usb_quirk_list[] = {
1681 /* Cherry Stream G230 2.0 (G85-231) and 3.0 (G85-232) */
1682 { USB_DEVICE(0x046a, 0x0023), .driver_info = USB_QUIRK_RESET_RESUME },
1683
1684 + /* Logitech HD Webcam C270 */
1685 + { USB_DEVICE(0x046d, 0x0825), .driver_info = USB_QUIRK_RESET_RESUME },
1686 +
1687 /* Logitech HD Pro Webcams C920, C920-C, C925e and C930e */
1688 { USB_DEVICE(0x046d, 0x082d), .driver_info = USB_QUIRK_DELAY_INIT },
1689 { USB_DEVICE(0x046d, 0x0841), .driver_info = USB_QUIRK_DELAY_INIT },
1690 diff --git a/drivers/usb/dwc2/hcd.c b/drivers/usb/dwc2/hcd.c
1691 index 260010abf9d8..aad7edc29bdd 100644
1692 --- a/drivers/usb/dwc2/hcd.c
1693 +++ b/drivers/usb/dwc2/hcd.c
1694 @@ -2673,8 +2673,10 @@ static void dwc2_free_dma_aligned_buffer(struct urb *urb)
1695 return;
1696
1697 /* Restore urb->transfer_buffer from the end of the allocated area */
1698 - memcpy(&stored_xfer_buffer, urb->transfer_buffer +
1699 - urb->transfer_buffer_length, sizeof(urb->transfer_buffer));
1700 + memcpy(&stored_xfer_buffer,
1701 + PTR_ALIGN(urb->transfer_buffer + urb->transfer_buffer_length,
1702 + dma_get_cache_alignment()),
1703 + sizeof(urb->transfer_buffer));
1704
1705 if (usb_urb_dir_in(urb)) {
1706 if (usb_pipeisoc(urb->pipe))
1707 @@ -2706,6 +2708,7 @@ static int dwc2_alloc_dma_aligned_buffer(struct urb *urb, gfp_t mem_flags)
1708 * DMA
1709 */
1710 kmalloc_size = urb->transfer_buffer_length +
1711 + (dma_get_cache_alignment() - 1) +
1712 sizeof(urb->transfer_buffer);
1713
1714 kmalloc_ptr = kmalloc(kmalloc_size, mem_flags);
1715 @@ -2716,7 +2719,8 @@ static int dwc2_alloc_dma_aligned_buffer(struct urb *urb, gfp_t mem_flags)
1716 * Position value of original urb->transfer_buffer pointer to the end
1717 * of allocation for later referencing
1718 */
1719 - memcpy(kmalloc_ptr + urb->transfer_buffer_length,
1720 + memcpy(PTR_ALIGN(kmalloc_ptr + urb->transfer_buffer_length,
1721 + dma_get_cache_alignment()),
1722 &urb->transfer_buffer, sizeof(urb->transfer_buffer));
1723
1724 if (usb_urb_dir_out(urb))
1725 @@ -2801,7 +2805,7 @@ static int dwc2_assign_and_init_hc(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh)
1726 chan->dev_addr = dwc2_hcd_get_dev_addr(&urb->pipe_info);
1727 chan->ep_num = dwc2_hcd_get_ep_num(&urb->pipe_info);
1728 chan->speed = qh->dev_speed;
1729 - chan->max_packet = dwc2_max_packet(qh->maxp);
1730 + chan->max_packet = qh->maxp;
1731
1732 chan->xfer_started = 0;
1733 chan->halt_status = DWC2_HC_XFER_NO_HALT_STATUS;
1734 @@ -2879,7 +2883,7 @@ static int dwc2_assign_and_init_hc(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh)
1735 * This value may be modified when the transfer is started
1736 * to reflect the actual transfer length
1737 */
1738 - chan->multi_count = dwc2_hb_mult(qh->maxp);
1739 + chan->multi_count = qh->maxp_mult;
1740
1741 if (hsotg->params.dma_desc_enable) {
1742 chan->desc_list_addr = qh->desc_list_dma;
1743 @@ -3991,19 +3995,21 @@ static struct dwc2_hcd_urb *dwc2_hcd_urb_alloc(struct dwc2_hsotg *hsotg,
1744
1745 static void dwc2_hcd_urb_set_pipeinfo(struct dwc2_hsotg *hsotg,
1746 struct dwc2_hcd_urb *urb, u8 dev_addr,
1747 - u8 ep_num, u8 ep_type, u8 ep_dir, u16 mps)
1748 + u8 ep_num, u8 ep_type, u8 ep_dir,
1749 + u16 maxp, u16 maxp_mult)
1750 {
1751 if (dbg_perio() ||
1752 ep_type == USB_ENDPOINT_XFER_BULK ||
1753 ep_type == USB_ENDPOINT_XFER_CONTROL)
1754 dev_vdbg(hsotg->dev,
1755 - "addr=%d, ep_num=%d, ep_dir=%1x, ep_type=%1x, mps=%d\n",
1756 - dev_addr, ep_num, ep_dir, ep_type, mps);
1757 + "addr=%d, ep_num=%d, ep_dir=%1x, ep_type=%1x, maxp=%d (%d mult)\n",
1758 + dev_addr, ep_num, ep_dir, ep_type, maxp, maxp_mult);
1759 urb->pipe_info.dev_addr = dev_addr;
1760 urb->pipe_info.ep_num = ep_num;
1761 urb->pipe_info.pipe_type = ep_type;
1762 urb->pipe_info.pipe_dir = ep_dir;
1763 - urb->pipe_info.mps = mps;
1764 + urb->pipe_info.maxp = maxp;
1765 + urb->pipe_info.maxp_mult = maxp_mult;
1766 }
1767
1768 /*
1769 @@ -4094,8 +4100,9 @@ void dwc2_hcd_dump_state(struct dwc2_hsotg *hsotg)
1770 dwc2_hcd_is_pipe_in(&urb->pipe_info) ?
1771 "IN" : "OUT");
1772 dev_dbg(hsotg->dev,
1773 - " Max packet size: %d\n",
1774 - dwc2_hcd_get_mps(&urb->pipe_info));
1775 + " Max packet size: %d (%d mult)\n",
1776 + dwc2_hcd_get_maxp(&urb->pipe_info),
1777 + dwc2_hcd_get_maxp_mult(&urb->pipe_info));
1778 dev_dbg(hsotg->dev,
1779 " transfer_buffer: %p\n",
1780 urb->buf);
1781 @@ -4653,8 +4660,10 @@ static void dwc2_dump_urb_info(struct usb_hcd *hcd, struct urb *urb,
1782 }
1783
1784 dev_vdbg(hsotg->dev, " Speed: %s\n", speed);
1785 - dev_vdbg(hsotg->dev, " Max packet size: %d\n",
1786 - usb_maxpacket(urb->dev, urb->pipe, usb_pipeout(urb->pipe)));
1787 + dev_vdbg(hsotg->dev, " Max packet size: %d (%d mult)\n",
1788 + usb_endpoint_maxp(&urb->ep->desc),
1789 + usb_endpoint_maxp_mult(&urb->ep->desc));
1790 +
1791 dev_vdbg(hsotg->dev, " Data buffer length: %d\n",
1792 urb->transfer_buffer_length);
1793 dev_vdbg(hsotg->dev, " Transfer buffer: %p, Transfer DMA: %08lx\n",
1794 @@ -4737,8 +4746,8 @@ static int _dwc2_hcd_urb_enqueue(struct usb_hcd *hcd, struct urb *urb,
1795 dwc2_hcd_urb_set_pipeinfo(hsotg, dwc2_urb, usb_pipedevice(urb->pipe),
1796 usb_pipeendpoint(urb->pipe), ep_type,
1797 usb_pipein(urb->pipe),
1798 - usb_maxpacket(urb->dev, urb->pipe,
1799 - !(usb_pipein(urb->pipe))));
1800 + usb_endpoint_maxp(&ep->desc),
1801 + usb_endpoint_maxp_mult(&ep->desc));
1802
1803 buf = urb->transfer_buffer;
1804
1805 diff --git a/drivers/usb/dwc2/hcd.h b/drivers/usb/dwc2/hcd.h
1806 index c089ffa1f0a8..ce6445a06588 100644
1807 --- a/drivers/usb/dwc2/hcd.h
1808 +++ b/drivers/usb/dwc2/hcd.h
1809 @@ -171,7 +171,8 @@ struct dwc2_hcd_pipe_info {
1810 u8 ep_num;
1811 u8 pipe_type;
1812 u8 pipe_dir;
1813 - u16 mps;
1814 + u16 maxp;
1815 + u16 maxp_mult;
1816 };
1817
1818 struct dwc2_hcd_iso_packet_desc {
1819 @@ -264,6 +265,7 @@ struct dwc2_hs_transfer_time {
1820 * - USB_ENDPOINT_XFER_ISOC
1821 * @ep_is_in: Endpoint direction
1822 * @maxp: Value from wMaxPacketSize field of Endpoint Descriptor
1823 + * @maxp_mult: Multiplier for maxp
1824 * @dev_speed: Device speed. One of the following values:
1825 * - USB_SPEED_LOW
1826 * - USB_SPEED_FULL
1827 @@ -340,6 +342,7 @@ struct dwc2_qh {
1828 u8 ep_type;
1829 u8 ep_is_in;
1830 u16 maxp;
1831 + u16 maxp_mult;
1832 u8 dev_speed;
1833 u8 data_toggle;
1834 u8 ping_state;
1835 @@ -503,9 +506,14 @@ static inline u8 dwc2_hcd_get_pipe_type(struct dwc2_hcd_pipe_info *pipe)
1836 return pipe->pipe_type;
1837 }
1838
1839 -static inline u16 dwc2_hcd_get_mps(struct dwc2_hcd_pipe_info *pipe)
1840 +static inline u16 dwc2_hcd_get_maxp(struct dwc2_hcd_pipe_info *pipe)
1841 +{
1842 + return pipe->maxp;
1843 +}
1844 +
1845 +static inline u16 dwc2_hcd_get_maxp_mult(struct dwc2_hcd_pipe_info *pipe)
1846 {
1847 - return pipe->mps;
1848 + return pipe->maxp_mult;
1849 }
1850
1851 static inline u8 dwc2_hcd_get_dev_addr(struct dwc2_hcd_pipe_info *pipe)
1852 @@ -620,12 +628,6 @@ static inline bool dbg_urb(struct urb *urb)
1853 static inline bool dbg_perio(void) { return false; }
1854 #endif
1855
1856 -/* High bandwidth multiplier as encoded in highspeed endpoint descriptors */
1857 -#define dwc2_hb_mult(wmaxpacketsize) (1 + (((wmaxpacketsize) >> 11) & 0x03))
1858 -
1859 -/* Packet size for any kind of endpoint descriptor */
1860 -#define dwc2_max_packet(wmaxpacketsize) ((wmaxpacketsize) & 0x07ff)
1861 -
1862 /*
1863 * Returns true if frame1 index is greater than frame2 index. The comparison
1864 * is done modulo FRLISTEN_64_SIZE. This accounts for the rollover of the
1865 diff --git a/drivers/usb/dwc2/hcd_intr.c b/drivers/usb/dwc2/hcd_intr.c
1866 index 88b5dcf3aefc..a052d39b4375 100644
1867 --- a/drivers/usb/dwc2/hcd_intr.c
1868 +++ b/drivers/usb/dwc2/hcd_intr.c
1869 @@ -1617,8 +1617,9 @@ static void dwc2_hc_ahberr_intr(struct dwc2_hsotg *hsotg,
1870
1871 dev_err(hsotg->dev, " Speed: %s\n", speed);
1872
1873 - dev_err(hsotg->dev, " Max packet size: %d\n",
1874 - dwc2_hcd_get_mps(&urb->pipe_info));
1875 + dev_err(hsotg->dev, " Max packet size: %d (mult %d)\n",
1876 + dwc2_hcd_get_maxp(&urb->pipe_info),
1877 + dwc2_hcd_get_maxp_mult(&urb->pipe_info));
1878 dev_err(hsotg->dev, " Data buffer length: %d\n", urb->length);
1879 dev_err(hsotg->dev, " Transfer buffer: %p, Transfer DMA: %08lx\n",
1880 urb->buf, (unsigned long)urb->dma);
1881 diff --git a/drivers/usb/dwc2/hcd_queue.c b/drivers/usb/dwc2/hcd_queue.c
1882 index ea3aa640c15c..68bbac64b753 100644
1883 --- a/drivers/usb/dwc2/hcd_queue.c
1884 +++ b/drivers/usb/dwc2/hcd_queue.c
1885 @@ -708,7 +708,7 @@ static void dwc2_hs_pmap_unschedule(struct dwc2_hsotg *hsotg,
1886 static int dwc2_uframe_schedule_split(struct dwc2_hsotg *hsotg,
1887 struct dwc2_qh *qh)
1888 {
1889 - int bytecount = dwc2_hb_mult(qh->maxp) * dwc2_max_packet(qh->maxp);
1890 + int bytecount = qh->maxp_mult * qh->maxp;
1891 int ls_search_slice;
1892 int err = 0;
1893 int host_interval_in_sched;
1894 @@ -1332,7 +1332,7 @@ static int dwc2_check_max_xfer_size(struct dwc2_hsotg *hsotg,
1895 u32 max_channel_xfer_size;
1896 int status = 0;
1897
1898 - max_xfer_size = dwc2_max_packet(qh->maxp) * dwc2_hb_mult(qh->maxp);
1899 + max_xfer_size = qh->maxp * qh->maxp_mult;
1900 max_channel_xfer_size = hsotg->params.max_transfer_size;
1901
1902 if (max_xfer_size > max_channel_xfer_size) {
1903 @@ -1517,8 +1517,9 @@ static void dwc2_qh_init(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh,
1904 u32 prtspd = (hprt & HPRT0_SPD_MASK) >> HPRT0_SPD_SHIFT;
1905 bool do_split = (prtspd == HPRT0_SPD_HIGH_SPEED &&
1906 dev_speed != USB_SPEED_HIGH);
1907 - int maxp = dwc2_hcd_get_mps(&urb->pipe_info);
1908 - int bytecount = dwc2_hb_mult(maxp) * dwc2_max_packet(maxp);
1909 + int maxp = dwc2_hcd_get_maxp(&urb->pipe_info);
1910 + int maxp_mult = dwc2_hcd_get_maxp_mult(&urb->pipe_info);
1911 + int bytecount = maxp_mult * maxp;
1912 char *speed, *type;
1913
1914 /* Initialize QH */
1915 @@ -1531,6 +1532,7 @@ static void dwc2_qh_init(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh,
1916
1917 qh->data_toggle = DWC2_HC_PID_DATA0;
1918 qh->maxp = maxp;
1919 + qh->maxp_mult = maxp_mult;
1920 INIT_LIST_HEAD(&qh->qtd_list);
1921 INIT_LIST_HEAD(&qh->qh_list_entry);
1922
1923 diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c
1924 index d8c474b386a8..ea891195bbdf 100644
1925 --- a/drivers/usb/serial/option.c
1926 +++ b/drivers/usb/serial/option.c
1927 @@ -1171,6 +1171,10 @@ static const struct usb_device_id option_ids[] = {
1928 { USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, TELIT_PRODUCT_LE920A4_1213, 0xff) },
1929 { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_LE920A4_1214),
1930 .driver_info = NCTRL(0) | RSVD(1) | RSVD(2) | RSVD(3) },
1931 + { USB_DEVICE(TELIT_VENDOR_ID, 0x1260),
1932 + .driver_info = NCTRL(0) | RSVD(1) | RSVD(2) },
1933 + { USB_DEVICE(TELIT_VENDOR_ID, 0x1261),
1934 + .driver_info = NCTRL(0) | RSVD(1) | RSVD(2) },
1935 { USB_DEVICE(TELIT_VENDOR_ID, 0x1900), /* Telit LN940 (QMI) */
1936 .driver_info = NCTRL(0) | RSVD(1) },
1937 { USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x1901, 0xff), /* Telit LN940 (MBIM) */
1938 @@ -1772,6 +1776,8 @@ static const struct usb_device_id option_ids[] = {
1939 { USB_DEVICE(ALINK_VENDOR_ID, SIMCOM_PRODUCT_SIM7100E),
1940 .driver_info = RSVD(5) | RSVD(6) },
1941 { USB_DEVICE_INTERFACE_CLASS(0x1e0e, 0x9003, 0xff) }, /* Simcom SIM7500/SIM7600 MBIM mode */
1942 + { USB_DEVICE_INTERFACE_CLASS(0x1e0e, 0x9011, 0xff), /* Simcom SIM7500/SIM7600 RNDIS mode */
1943 + .driver_info = RSVD(7) },
1944 { USB_DEVICE(ALCATEL_VENDOR_ID, ALCATEL_PRODUCT_X060S_X200),
1945 .driver_info = NCTRL(0) | NCTRL(1) | RSVD(4) },
1946 { USB_DEVICE(ALCATEL_VENDOR_ID, ALCATEL_PRODUCT_X220_X500D),
1947 diff --git a/drivers/usb/serial/pl2303.c b/drivers/usb/serial/pl2303.c
1948 index 5a6df6e9ad57..5d7b21ea6238 100644
1949 --- a/drivers/usb/serial/pl2303.c
1950 +++ b/drivers/usb/serial/pl2303.c
1951 @@ -106,6 +106,7 @@ static const struct usb_device_id id_table[] = {
1952 { USB_DEVICE(SANWA_VENDOR_ID, SANWA_PRODUCT_ID) },
1953 { USB_DEVICE(ADLINK_VENDOR_ID, ADLINK_ND6530_PRODUCT_ID) },
1954 { USB_DEVICE(SMART_VENDOR_ID, SMART_PRODUCT_ID) },
1955 + { USB_DEVICE(AT_VENDOR_ID, AT_VTKIT3_PRODUCT_ID) },
1956 { } /* Terminating entry */
1957 };
1958
1959 diff --git a/drivers/usb/serial/pl2303.h b/drivers/usb/serial/pl2303.h
1960 index 559941ca884d..b0175f17d1a2 100644
1961 --- a/drivers/usb/serial/pl2303.h
1962 +++ b/drivers/usb/serial/pl2303.h
1963 @@ -155,3 +155,6 @@
1964 #define SMART_VENDOR_ID 0x0b8c
1965 #define SMART_PRODUCT_ID 0x2303
1966
1967 +/* Allied Telesis VT-Kit3 */
1968 +#define AT_VENDOR_ID 0x0caa
1969 +#define AT_VTKIT3_PRODUCT_ID 0x3001
1970 diff --git a/drivers/usb/storage/unusual_realtek.h b/drivers/usb/storage/unusual_realtek.h
1971 index 6b2140f966ef..7e14c2d7cf73 100644
1972 --- a/drivers/usb/storage/unusual_realtek.h
1973 +++ b/drivers/usb/storage/unusual_realtek.h
1974 @@ -17,6 +17,11 @@ UNUSUAL_DEV(0x0bda, 0x0138, 0x0000, 0x9999,
1975 "USB Card Reader",
1976 USB_SC_DEVICE, USB_PR_DEVICE, init_realtek_cr, 0),
1977
1978 +UNUSUAL_DEV(0x0bda, 0x0153, 0x0000, 0x9999,
1979 + "Realtek",
1980 + "USB Card Reader",
1981 + USB_SC_DEVICE, USB_PR_DEVICE, init_realtek_cr, 0),
1982 +
1983 UNUSUAL_DEV(0x0bda, 0x0158, 0x0000, 0x9999,
1984 "Realtek",
1985 "USB Card Reader",
1986 diff --git a/fs/f2fs/xattr.c b/fs/f2fs/xattr.c
1987 index 409a637f7a92..88e30f7cf9e1 100644
1988 --- a/fs/f2fs/xattr.c
1989 +++ b/fs/f2fs/xattr.c
1990 @@ -205,12 +205,17 @@ static inline const struct xattr_handler *f2fs_xattr_handler(int index)
1991 return handler;
1992 }
1993
1994 -static struct f2fs_xattr_entry *__find_xattr(void *base_addr, int index,
1995 - size_t len, const char *name)
1996 +static struct f2fs_xattr_entry *__find_xattr(void *base_addr,
1997 + void *last_base_addr, int index,
1998 + size_t len, const char *name)
1999 {
2000 struct f2fs_xattr_entry *entry;
2001
2002 list_for_each_xattr(entry, base_addr) {
2003 + if ((void *)(entry) + sizeof(__u32) > last_base_addr ||
2004 + (void *)XATTR_NEXT_ENTRY(entry) > last_base_addr)
2005 + return NULL;
2006 +
2007 if (entry->e_name_index != index)
2008 continue;
2009 if (entry->e_name_len != len)
2010 @@ -300,20 +305,22 @@ static int lookup_all_xattrs(struct inode *inode, struct page *ipage,
2011 const char *name, struct f2fs_xattr_entry **xe,
2012 void **base_addr, int *base_size)
2013 {
2014 - void *cur_addr, *txattr_addr, *last_addr = NULL;
2015 + void *cur_addr, *txattr_addr, *last_txattr_addr;
2016 + void *last_addr = NULL;
2017 nid_t xnid = F2FS_I(inode)->i_xattr_nid;
2018 - unsigned int size = xnid ? VALID_XATTR_BLOCK_SIZE : 0;
2019 unsigned int inline_size = inline_xattr_size(inode);
2020 int err = 0;
2021
2022 - if (!size && !inline_size)
2023 + if (!xnid && !inline_size)
2024 return -ENODATA;
2025
2026 - *base_size = inline_size + size + XATTR_PADDING_SIZE;
2027 + *base_size = XATTR_SIZE(xnid, inode) + XATTR_PADDING_SIZE;
2028 txattr_addr = f2fs_kzalloc(F2FS_I_SB(inode), *base_size, GFP_NOFS);
2029 if (!txattr_addr)
2030 return -ENOMEM;
2031
2032 + last_txattr_addr = (void *)txattr_addr + XATTR_SIZE(xnid, inode);
2033 +
2034 /* read from inline xattr */
2035 if (inline_size) {
2036 err = read_inline_xattr(inode, ipage, txattr_addr);
2037 @@ -340,7 +347,11 @@ static int lookup_all_xattrs(struct inode *inode, struct page *ipage,
2038 else
2039 cur_addr = txattr_addr;
2040
2041 - *xe = __find_xattr(cur_addr, index, len, name);
2042 + *xe = __find_xattr(cur_addr, last_txattr_addr, index, len, name);
2043 + if (!*xe) {
2044 + err = -EFAULT;
2045 + goto out;
2046 + }
2047 check:
2048 if (IS_XATTR_LAST_ENTRY(*xe)) {
2049 err = -ENODATA;
2050 @@ -584,7 +595,8 @@ static int __f2fs_setxattr(struct inode *inode, int index,
2051 struct page *ipage, int flags)
2052 {
2053 struct f2fs_xattr_entry *here, *last;
2054 - void *base_addr;
2055 + void *base_addr, *last_base_addr;
2056 + nid_t xnid = F2FS_I(inode)->i_xattr_nid;
2057 int found, newsize;
2058 size_t len;
2059 __u32 new_hsize;
2060 @@ -608,8 +620,14 @@ static int __f2fs_setxattr(struct inode *inode, int index,
2061 if (error)
2062 return error;
2063
2064 + last_base_addr = (void *)base_addr + XATTR_SIZE(xnid, inode);
2065 +
2066 /* find entry with wanted name. */
2067 - here = __find_xattr(base_addr, index, len, name);
2068 + here = __find_xattr(base_addr, last_base_addr, index, len, name);
2069 + if (!here) {
2070 + error = -EFAULT;
2071 + goto exit;
2072 + }
2073
2074 found = IS_XATTR_LAST_ENTRY(here) ? 0 : 1;
2075
2076 diff --git a/fs/f2fs/xattr.h b/fs/f2fs/xattr.h
2077 index dbcd1d16e669..2a4ecaf338ea 100644
2078 --- a/fs/f2fs/xattr.h
2079 +++ b/fs/f2fs/xattr.h
2080 @@ -74,6 +74,8 @@ struct f2fs_xattr_entry {
2081 entry = XATTR_NEXT_ENTRY(entry))
2082 #define VALID_XATTR_BLOCK_SIZE (PAGE_SIZE - sizeof(struct node_footer))
2083 #define XATTR_PADDING_SIZE (sizeof(__u32))
2084 +#define XATTR_SIZE(x,i) (((x) ? VALID_XATTR_BLOCK_SIZE : 0) + \
2085 + (inline_xattr_size(i)))
2086 #define MIN_OFFSET(i) XATTR_ALIGN(inline_xattr_size(i) + \
2087 VALID_XATTR_BLOCK_SIZE)
2088
2089 diff --git a/fs/ocfs2/dcache.c b/fs/ocfs2/dcache.c
2090 index 290373024d9d..e8ace3b54e9c 100644
2091 --- a/fs/ocfs2/dcache.c
2092 +++ b/fs/ocfs2/dcache.c
2093 @@ -310,6 +310,18 @@ int ocfs2_dentry_attach_lock(struct dentry *dentry,
2094
2095 out_attach:
2096 spin_lock(&dentry_attach_lock);
2097 + if (unlikely(dentry->d_fsdata && !alias)) {
2098 + /* d_fsdata is set by a racing thread which is doing
2099 + * the same thing as this thread is doing. Leave the racing
2100 + * thread going ahead and we return here.
2101 + */
2102 + spin_unlock(&dentry_attach_lock);
2103 + iput(dl->dl_inode);
2104 + ocfs2_lock_res_free(&dl->dl_lockres);
2105 + kfree(dl);
2106 + return 0;
2107 + }
2108 +
2109 dentry->d_fsdata = dl;
2110 dl->dl_count++;
2111 spin_unlock(&dentry_attach_lock);
2112 diff --git a/include/drm/drm_edid.h b/include/drm/drm_edid.h
2113 index e3c404833115..53be104aab5c 100644
2114 --- a/include/drm/drm_edid.h
2115 +++ b/include/drm/drm_edid.h
2116 @@ -466,6 +466,7 @@ struct edid *drm_get_edid_switcheroo(struct drm_connector *connector,
2117 struct i2c_adapter *adapter);
2118 struct edid *drm_edid_duplicate(const struct edid *edid);
2119 int drm_add_edid_modes(struct drm_connector *connector, struct edid *edid);
2120 +int drm_add_override_edid_modes(struct drm_connector *connector);
2121
2122 u8 drm_match_cea_mode(const struct drm_display_mode *to_match);
2123 enum hdmi_picture_aspect drm_get_cea_aspect_ratio(const u8 video_code);
2124 diff --git a/include/linux/cgroup.h b/include/linux/cgroup.h
2125 index ca51b2c15bcc..8937d48a5389 100644
2126 --- a/include/linux/cgroup.h
2127 +++ b/include/linux/cgroup.h
2128 @@ -485,7 +485,7 @@ static inline struct cgroup_subsys_state *task_css(struct task_struct *task,
2129 *
2130 * Find the css for the (@task, @subsys_id) combination, increment a
2131 * reference on and return it. This function is guaranteed to return a
2132 - * valid css.
2133 + * valid css. The returned css may already have been offlined.
2134 */
2135 static inline struct cgroup_subsys_state *
2136 task_get_css(struct task_struct *task, int subsys_id)
2137 @@ -495,7 +495,13 @@ task_get_css(struct task_struct *task, int subsys_id)
2138 rcu_read_lock();
2139 while (true) {
2140 css = task_css(task, subsys_id);
2141 - if (likely(css_tryget_online(css)))
2142 + /*
2143 + * Can't use css_tryget_online() here. A task which has
2144 + * PF_EXITING set may stay associated with an offline css.
2145 + * If such task calls this function, css_tryget_online()
2146 + * will keep failing.
2147 + */
2148 + if (likely(css_tryget(css)))
2149 break;
2150 cpu_relax();
2151 }
2152 diff --git a/include/linux/cpuhotplug.h b/include/linux/cpuhotplug.h
2153 index caf40ad0bbc6..d64d8c2bbdab 100644
2154 --- a/include/linux/cpuhotplug.h
2155 +++ b/include/linux/cpuhotplug.h
2156 @@ -101,6 +101,7 @@ enum cpuhp_state {
2157 CPUHP_AP_IRQ_BCM2836_STARTING,
2158 CPUHP_AP_IRQ_MIPS_GIC_STARTING,
2159 CPUHP_AP_ARM_MVEBU_COHERENCY,
2160 + CPUHP_AP_MICROCODE_LOADER,
2161 CPUHP_AP_PERF_X86_AMD_UNCORE_STARTING,
2162 CPUHP_AP_PERF_X86_STARTING,
2163 CPUHP_AP_PERF_X86_AMD_IBS_STARTING,
2164 diff --git a/kernel/Makefile b/kernel/Makefile
2165 index 7a63d567fdb5..df5e3ca30acd 100644
2166 --- a/kernel/Makefile
2167 +++ b/kernel/Makefile
2168 @@ -30,6 +30,7 @@ KCOV_INSTRUMENT_extable.o := n
2169 # Don't self-instrument.
2170 KCOV_INSTRUMENT_kcov.o := n
2171 KASAN_SANITIZE_kcov.o := n
2172 +CFLAGS_kcov.o := $(call cc-option, -fno-conserve-stack -fno-stack-protector)
2173
2174 # cond_syscall is currently not LTO compatible
2175 CFLAGS_sys_ni.o = $(DISABLE_LTO)
2176 diff --git a/kernel/cred.c b/kernel/cred.c
2177 index ecf03657e71c..efd04b2ec84c 100644
2178 --- a/kernel/cred.c
2179 +++ b/kernel/cred.c
2180 @@ -448,6 +448,15 @@ int commit_creds(struct cred *new)
2181 if (task->mm)
2182 set_dumpable(task->mm, suid_dumpable);
2183 task->pdeath_signal = 0;
2184 + /*
2185 + * If a task drops privileges and becomes nondumpable,
2186 + * the dumpability change must become visible before
2187 + * the credential change; otherwise, a __ptrace_may_access()
2188 + * racing with this change may be able to attach to a task it
2189 + * shouldn't be able to attach to (as if the task had dropped
2190 + * privileges without becoming nondumpable).
2191 + * Pairs with a read barrier in __ptrace_may_access().
2192 + */
2193 smp_wmb();
2194 }
2195
2196 diff --git a/kernel/ptrace.c b/kernel/ptrace.c
2197 index fc0d667f5792..5d0838c2349e 100644
2198 --- a/kernel/ptrace.c
2199 +++ b/kernel/ptrace.c
2200 @@ -323,6 +323,16 @@ static int __ptrace_may_access(struct task_struct *task, unsigned int mode)
2201 return -EPERM;
2202 ok:
2203 rcu_read_unlock();
2204 + /*
2205 + * If a task drops privileges and becomes nondumpable (through a syscall
2206 + * like setresuid()) while we are trying to access it, we must ensure
2207 + * that the dumpability is read after the credentials; otherwise,
2208 + * we may be able to attach to a task that we shouldn't be able to
2209 + * attach to (as if the task had dropped privileges without becoming
2210 + * nondumpable).
2211 + * Pairs with a write barrier in commit_creds().
2212 + */
2213 + smp_rmb();
2214 mm = task->mm;
2215 if (mm &&
2216 ((get_dumpable(mm) != SUID_DUMP_USER) &&
2217 @@ -704,6 +714,10 @@ static int ptrace_peek_siginfo(struct task_struct *child,
2218 if (arg.nr < 0)
2219 return -EINVAL;
2220
2221 + /* Ensure arg.off fits in an unsigned long */
2222 + if (arg.off > ULONG_MAX)
2223 + return 0;
2224 +
2225 if (arg.flags & PTRACE_PEEKSIGINFO_SHARED)
2226 pending = &child->signal->shared_pending;
2227 else
2228 @@ -711,18 +725,20 @@ static int ptrace_peek_siginfo(struct task_struct *child,
2229
2230 for (i = 0; i < arg.nr; ) {
2231 siginfo_t info;
2232 - s32 off = arg.off + i;
2233 + unsigned long off = arg.off + i;
2234 + bool found = false;
2235
2236 spin_lock_irq(&child->sighand->siglock);
2237 list_for_each_entry(q, &pending->list, list) {
2238 if (!off--) {
2239 + found = true;
2240 copy_siginfo(&info, &q->info);
2241 break;
2242 }
2243 }
2244 spin_unlock_irq(&child->sighand->siglock);
2245
2246 - if (off >= 0) /* beyond the end of the list */
2247 + if (!found) /* beyond the end of the list */
2248 break;
2249
2250 #ifdef CONFIG_COMPAT
2251 diff --git a/kernel/time/timekeeping.c b/kernel/time/timekeeping.c
2252 index 9a6bfcd22dc6..443edcddac8a 100644
2253 --- a/kernel/time/timekeeping.c
2254 +++ b/kernel/time/timekeeping.c
2255 @@ -812,17 +812,18 @@ ktime_t ktime_get_coarse_with_offset(enum tk_offsets offs)
2256 struct timekeeper *tk = &tk_core.timekeeper;
2257 unsigned int seq;
2258 ktime_t base, *offset = offsets[offs];
2259 + u64 nsecs;
2260
2261 WARN_ON(timekeeping_suspended);
2262
2263 do {
2264 seq = read_seqcount_begin(&tk_core.seq);
2265 base = ktime_add(tk->tkr_mono.base, *offset);
2266 + nsecs = tk->tkr_mono.xtime_nsec >> tk->tkr_mono.shift;
2267
2268 } while (read_seqcount_retry(&tk_core.seq, seq));
2269
2270 - return base;
2271 -
2272 + return base + nsecs;
2273 }
2274 EXPORT_SYMBOL_GPL(ktime_get_coarse_with_offset);
2275
2276 diff --git a/kernel/trace/trace_events_hist.c b/kernel/trace/trace_events_hist.c
2277 index 11853e90b649..3f34cfb66a85 100644
2278 --- a/kernel/trace/trace_events_hist.c
2279 +++ b/kernel/trace/trace_events_hist.c
2280 @@ -1632,6 +1632,9 @@ static u64 hist_field_var_ref(struct hist_field *hist_field,
2281 struct hist_elt_data *elt_data;
2282 u64 var_val = 0;
2283
2284 + if (WARN_ON_ONCE(!elt))
2285 + return var_val;
2286 +
2287 elt_data = elt->private_data;
2288 var_val = elt_data->var_ref_vals[hist_field->var_ref_idx];
2289
2290 diff --git a/mm/list_lru.c b/mm/list_lru.c
2291 index f0a15d32b959..758653dd1443 100644
2292 --- a/mm/list_lru.c
2293 +++ b/mm/list_lru.c
2294 @@ -353,7 +353,7 @@ static int __memcg_init_list_lru_node(struct list_lru_memcg *memcg_lrus,
2295 }
2296 return 0;
2297 fail:
2298 - __memcg_destroy_list_lru_node(memcg_lrus, begin, i - 1);
2299 + __memcg_destroy_list_lru_node(memcg_lrus, begin, i);
2300 return -ENOMEM;
2301 }
2302
2303 diff --git a/mm/vmscan.c b/mm/vmscan.c
2304 index ee545d1e9894..dec88fcf8876 100644
2305 --- a/mm/vmscan.c
2306 +++ b/mm/vmscan.c
2307 @@ -1510,7 +1510,7 @@ unsigned long reclaim_clean_pages_from_list(struct zone *zone,
2308
2309 list_for_each_entry_safe(page, next, page_list, lru) {
2310 if (page_is_file_cache(page) && !PageDirty(page) &&
2311 - !__PageMovable(page)) {
2312 + !__PageMovable(page) && !PageUnevictable(page)) {
2313 ClearPageActive(page);
2314 list_move(&page->lru, &clean_pages);
2315 }
2316 diff --git a/sound/core/seq/seq_clientmgr.c b/sound/core/seq/seq_clientmgr.c
2317 index b55cb96d1fed..f59e13c1d84a 100644
2318 --- a/sound/core/seq/seq_clientmgr.c
2319 +++ b/sound/core/seq/seq_clientmgr.c
2320 @@ -1900,20 +1900,14 @@ static int snd_seq_ioctl_get_subscription(struct snd_seq_client *client,
2321 int result;
2322 struct snd_seq_client *sender = NULL;
2323 struct snd_seq_client_port *sport = NULL;
2324 - struct snd_seq_subscribers *p;
2325
2326 result = -EINVAL;
2327 if ((sender = snd_seq_client_use_ptr(subs->sender.client)) == NULL)
2328 goto __end;
2329 if ((sport = snd_seq_port_use_ptr(sender, subs->sender.port)) == NULL)
2330 goto __end;
2331 - p = snd_seq_port_get_subscription(&sport->c_src, &subs->dest);
2332 - if (p) {
2333 - result = 0;
2334 - *subs = p->info;
2335 - } else
2336 - result = -ENOENT;
2337 -
2338 + result = snd_seq_port_get_subscription(&sport->c_src, &subs->dest,
2339 + subs);
2340 __end:
2341 if (sport)
2342 snd_seq_port_unlock(sport);
2343 diff --git a/sound/core/seq/seq_ports.c b/sound/core/seq/seq_ports.c
2344 index da31aa8e216e..16289aefb443 100644
2345 --- a/sound/core/seq/seq_ports.c
2346 +++ b/sound/core/seq/seq_ports.c
2347 @@ -635,20 +635,23 @@ int snd_seq_port_disconnect(struct snd_seq_client *connector,
2348
2349
2350 /* get matched subscriber */
2351 -struct snd_seq_subscribers *snd_seq_port_get_subscription(struct snd_seq_port_subs_info *src_grp,
2352 - struct snd_seq_addr *dest_addr)
2353 +int snd_seq_port_get_subscription(struct snd_seq_port_subs_info *src_grp,
2354 + struct snd_seq_addr *dest_addr,
2355 + struct snd_seq_port_subscribe *subs)
2356 {
2357 - struct snd_seq_subscribers *s, *found = NULL;
2358 + struct snd_seq_subscribers *s;
2359 + int err = -ENOENT;
2360
2361 down_read(&src_grp->list_mutex);
2362 list_for_each_entry(s, &src_grp->list_head, src_list) {
2363 if (addr_match(dest_addr, &s->info.dest)) {
2364 - found = s;
2365 + *subs = s->info;
2366 + err = 0;
2367 break;
2368 }
2369 }
2370 up_read(&src_grp->list_mutex);
2371 - return found;
2372 + return err;
2373 }
2374
2375 /*
2376 diff --git a/sound/core/seq/seq_ports.h b/sound/core/seq/seq_ports.h
2377 index 26bd71f36c41..06003b36652e 100644
2378 --- a/sound/core/seq/seq_ports.h
2379 +++ b/sound/core/seq/seq_ports.h
2380 @@ -135,7 +135,8 @@ int snd_seq_port_subscribe(struct snd_seq_client_port *port,
2381 struct snd_seq_port_subscribe *info);
2382
2383 /* get matched subscriber */
2384 -struct snd_seq_subscribers *snd_seq_port_get_subscription(struct snd_seq_port_subs_info *src_grp,
2385 - struct snd_seq_addr *dest_addr);
2386 +int snd_seq_port_get_subscription(struct snd_seq_port_subs_info *src_grp,
2387 + struct snd_seq_addr *dest_addr,
2388 + struct snd_seq_port_subscribe *subs);
2389
2390 #endif
2391 diff --git a/sound/firewire/motu/motu-stream.c b/sound/firewire/motu/motu-stream.c
2392 index 73e7a5e527fc..483a8771d502 100644
2393 --- a/sound/firewire/motu/motu-stream.c
2394 +++ b/sound/firewire/motu/motu-stream.c
2395 @@ -345,7 +345,7 @@ static void destroy_stream(struct snd_motu *motu,
2396 }
2397
2398 amdtp_stream_destroy(stream);
2399 - fw_iso_resources_free(resources);
2400 + fw_iso_resources_destroy(resources);
2401 }
2402
2403 int snd_motu_stream_init_duplex(struct snd_motu *motu)
2404 diff --git a/sound/firewire/oxfw/oxfw.c b/sound/firewire/oxfw/oxfw.c
2405 index 5f82a375725a..4ecaf69569dc 100644
2406 --- a/sound/firewire/oxfw/oxfw.c
2407 +++ b/sound/firewire/oxfw/oxfw.c
2408 @@ -170,9 +170,6 @@ static int detect_quirks(struct snd_oxfw *oxfw)
2409 oxfw->midi_input_ports = 0;
2410 oxfw->midi_output_ports = 0;
2411
2412 - /* Output stream exists but no data channels are useful. */
2413 - oxfw->has_output = false;
2414 -
2415 return snd_oxfw_scs1x_add(oxfw);
2416 }
2417
2418 diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
2419 index 5e6cb625db83..e154506a66cb 100644
2420 --- a/sound/pci/hda/patch_realtek.c
2421 +++ b/sound/pci/hda/patch_realtek.c
2422 @@ -4082,18 +4082,19 @@ static struct coef_fw alc225_pre_hsmode[] = {
2423 static void alc_headset_mode_unplugged(struct hda_codec *codec)
2424 {
2425 static struct coef_fw coef0255[] = {
2426 + WRITE_COEF(0x1b, 0x0c0b), /* LDO and MISC control */
2427 WRITE_COEF(0x45, 0xd089), /* UAJ function set to menual mode */
2428 UPDATE_COEFEX(0x57, 0x05, 1<<14, 0), /* Direct Drive HP Amp control(Set to verb control)*/
2429 WRITE_COEF(0x06, 0x6104), /* Set MIC2 Vref gate with HP */
2430 WRITE_COEFEX(0x57, 0x03, 0x8aa6), /* Direct Drive HP Amp control */
2431 {}
2432 };
2433 - static struct coef_fw coef0255_1[] = {
2434 - WRITE_COEF(0x1b, 0x0c0b), /* LDO and MISC control */
2435 - {}
2436 - };
2437 static struct coef_fw coef0256[] = {
2438 WRITE_COEF(0x1b, 0x0c4b), /* LDO and MISC control */
2439 + WRITE_COEF(0x45, 0xd089), /* UAJ function set to menual mode */
2440 + WRITE_COEF(0x06, 0x6104), /* Set MIC2 Vref gate with HP */
2441 + WRITE_COEFEX(0x57, 0x03, 0x09a3), /* Direct Drive HP Amp control */
2442 + UPDATE_COEFEX(0x57, 0x05, 1<<14, 0), /* Direct Drive HP Amp control(Set to verb control)*/
2443 {}
2444 };
2445 static struct coef_fw coef0233[] = {
2446 @@ -4156,13 +4157,11 @@ static void alc_headset_mode_unplugged(struct hda_codec *codec)
2447
2448 switch (codec->core.vendor_id) {
2449 case 0x10ec0255:
2450 - alc_process_coef_fw(codec, coef0255_1);
2451 alc_process_coef_fw(codec, coef0255);
2452 break;
2453 case 0x10ec0236:
2454 case 0x10ec0256:
2455 alc_process_coef_fw(codec, coef0256);
2456 - alc_process_coef_fw(codec, coef0255);
2457 break;
2458 case 0x10ec0234:
2459 case 0x10ec0274:
2460 @@ -4215,6 +4214,12 @@ static void alc_headset_mode_mic_in(struct hda_codec *codec, hda_nid_t hp_pin,
2461 WRITE_COEF(0x06, 0x6100), /* Set MIC2 Vref gate to normal */
2462 {}
2463 };
2464 + static struct coef_fw coef0256[] = {
2465 + UPDATE_COEFEX(0x57, 0x05, 1<<14, 1<<14), /* Direct Drive HP Amp control(Set to verb control)*/
2466 + WRITE_COEFEX(0x57, 0x03, 0x09a3),
2467 + WRITE_COEF(0x06, 0x6100), /* Set MIC2 Vref gate to normal */
2468 + {}
2469 + };
2470 static struct coef_fw coef0233[] = {
2471 UPDATE_COEF(0x35, 0, 1<<14),
2472 WRITE_COEF(0x06, 0x2100),
2473 @@ -4262,14 +4267,19 @@ static void alc_headset_mode_mic_in(struct hda_codec *codec, hda_nid_t hp_pin,
2474 };
2475
2476 switch (codec->core.vendor_id) {
2477 - case 0x10ec0236:
2478 case 0x10ec0255:
2479 - case 0x10ec0256:
2480 alc_write_coef_idx(codec, 0x45, 0xc489);
2481 snd_hda_set_pin_ctl_cache(codec, hp_pin, 0);
2482 alc_process_coef_fw(codec, coef0255);
2483 snd_hda_set_pin_ctl_cache(codec, mic_pin, PIN_VREF50);
2484 break;
2485 + case 0x10ec0236:
2486 + case 0x10ec0256:
2487 + alc_write_coef_idx(codec, 0x45, 0xc489);
2488 + snd_hda_set_pin_ctl_cache(codec, hp_pin, 0);
2489 + alc_process_coef_fw(codec, coef0256);
2490 + snd_hda_set_pin_ctl_cache(codec, mic_pin, PIN_VREF50);
2491 + break;
2492 case 0x10ec0234:
2493 case 0x10ec0274:
2494 case 0x10ec0294:
2495 @@ -4351,6 +4361,14 @@ static void alc_headset_mode_default(struct hda_codec *codec)
2496 WRITE_COEF(0x49, 0x0049),
2497 {}
2498 };
2499 + static struct coef_fw coef0256[] = {
2500 + WRITE_COEF(0x45, 0xc489),
2501 + WRITE_COEFEX(0x57, 0x03, 0x0da3),
2502 + WRITE_COEF(0x49, 0x0049),
2503 + UPDATE_COEFEX(0x57, 0x05, 1<<14, 0), /* Direct Drive HP Amp control(Set to verb control)*/
2504 + WRITE_COEF(0x06, 0x6100),
2505 + {}
2506 + };
2507 static struct coef_fw coef0233[] = {
2508 WRITE_COEF(0x06, 0x2100),
2509 WRITE_COEF(0x32, 0x4ea3),
2510 @@ -4401,11 +4419,16 @@ static void alc_headset_mode_default(struct hda_codec *codec)
2511 alc_process_coef_fw(codec, alc225_pre_hsmode);
2512 alc_process_coef_fw(codec, coef0225);
2513 break;
2514 - case 0x10ec0236:
2515 case 0x10ec0255:
2516 - case 0x10ec0256:
2517 alc_process_coef_fw(codec, coef0255);
2518 break;
2519 + case 0x10ec0236:
2520 + case 0x10ec0256:
2521 + alc_write_coef_idx(codec, 0x1b, 0x0e4b);
2522 + alc_write_coef_idx(codec, 0x45, 0xc089);
2523 + msleep(50);
2524 + alc_process_coef_fw(codec, coef0256);
2525 + break;
2526 case 0x10ec0234:
2527 case 0x10ec0274:
2528 case 0x10ec0294:
2529 @@ -4449,8 +4472,7 @@ static void alc_headset_mode_ctia(struct hda_codec *codec)
2530 };
2531 static struct coef_fw coef0256[] = {
2532 WRITE_COEF(0x45, 0xd489), /* Set to CTIA type */
2533 - WRITE_COEF(0x1b, 0x0c6b),
2534 - WRITE_COEFEX(0x57, 0x03, 0x8ea6),
2535 + WRITE_COEF(0x1b, 0x0e6b),
2536 {}
2537 };
2538 static struct coef_fw coef0233[] = {
2539 @@ -4568,8 +4590,7 @@ static void alc_headset_mode_omtp(struct hda_codec *codec)
2540 };
2541 static struct coef_fw coef0256[] = {
2542 WRITE_COEF(0x45, 0xe489), /* Set to OMTP Type */
2543 - WRITE_COEF(0x1b, 0x0c6b),
2544 - WRITE_COEFEX(0x57, 0x03, 0x8ea6),
2545 + WRITE_COEF(0x1b, 0x0e6b),
2546 {}
2547 };
2548 static struct coef_fw coef0233[] = {
2549 @@ -4701,13 +4722,37 @@ static void alc_determine_headset_type(struct hda_codec *codec)
2550 };
2551
2552 switch (codec->core.vendor_id) {
2553 - case 0x10ec0236:
2554 case 0x10ec0255:
2555 + alc_process_coef_fw(codec, coef0255);
2556 + msleep(300);
2557 + val = alc_read_coef_idx(codec, 0x46);
2558 + is_ctia = (val & 0x0070) == 0x0070;
2559 + break;
2560 + case 0x10ec0236:
2561 case 0x10ec0256:
2562 + alc_write_coef_idx(codec, 0x1b, 0x0e4b);
2563 + alc_write_coef_idx(codec, 0x06, 0x6104);
2564 + alc_write_coefex_idx(codec, 0x57, 0x3, 0x09a3);
2565 +
2566 + snd_hda_codec_write(codec, 0x21, 0,
2567 + AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_MUTE);
2568 + msleep(80);
2569 + snd_hda_codec_write(codec, 0x21, 0,
2570 + AC_VERB_SET_PIN_WIDGET_CONTROL, 0x0);
2571 +
2572 alc_process_coef_fw(codec, coef0255);
2573 msleep(300);
2574 val = alc_read_coef_idx(codec, 0x46);
2575 is_ctia = (val & 0x0070) == 0x0070;
2576 +
2577 + alc_write_coefex_idx(codec, 0x57, 0x3, 0x0da3);
2578 + alc_update_coefex_idx(codec, 0x57, 0x5, 1<<14, 0);
2579 +
2580 + snd_hda_codec_write(codec, 0x21, 0,
2581 + AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_OUT);
2582 + msleep(80);
2583 + snd_hda_codec_write(codec, 0x21, 0,
2584 + AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_UNMUTE);
2585 break;
2586 case 0x10ec0234:
2587 case 0x10ec0274:
2588 @@ -6084,15 +6129,13 @@ static const struct hda_fixup alc269_fixups[] = {
2589 .chain_id = ALC269_FIXUP_THINKPAD_ACPI,
2590 },
2591 [ALC255_FIXUP_ACER_MIC_NO_PRESENCE] = {
2592 - .type = HDA_FIXUP_VERBS,
2593 - .v.verbs = (const struct hda_verb[]) {
2594 - /* Enable the Mic */
2595 - { 0x20, AC_VERB_SET_COEF_INDEX, 0x45 },
2596 - { 0x20, AC_VERB_SET_PROC_COEF, 0x5089 },
2597 - {}
2598 + .type = HDA_FIXUP_PINS,
2599 + .v.pins = (const struct hda_pintbl[]) {
2600 + { 0x19, 0x01a1913c }, /* use as headset mic, without its own jack detect */
2601 + { }
2602 },
2603 .chained = true,
2604 - .chain_id = ALC269_FIXUP_LIFEBOOK_EXTMIC
2605 + .chain_id = ALC255_FIXUP_HEADSET_MODE
2606 },
2607 [ALC255_FIXUP_ASUS_MIC_NO_PRESENCE] = {
2608 .type = HDA_FIXUP_PINS,
2609 @@ -7123,10 +7166,6 @@ static const struct snd_hda_pin_quirk alc269_pin_fixup_tbl[] = {
2610 {0x18, 0x02a11030},
2611 {0x19, 0x0181303F},
2612 {0x21, 0x0221102f}),
2613 - SND_HDA_PIN_QUIRK(0x10ec0255, 0x1025, "Acer", ALC255_FIXUP_ACER_MIC_NO_PRESENCE,
2614 - {0x12, 0x90a60140},
2615 - {0x14, 0x90170120},
2616 - {0x21, 0x02211030}),
2617 SND_HDA_PIN_QUIRK(0x10ec0255, 0x1025, "Acer", ALC255_FIXUP_ACER_MIC_NO_PRESENCE,
2618 {0x12, 0x90a601c0},
2619 {0x14, 0x90171120},
2620 diff --git a/sound/soc/codecs/cs42xx8.c b/sound/soc/codecs/cs42xx8.c
2621 index ebb9e0cf8364..28a4ac36c4f8 100644
2622 --- a/sound/soc/codecs/cs42xx8.c
2623 +++ b/sound/soc/codecs/cs42xx8.c
2624 @@ -558,6 +558,7 @@ static int cs42xx8_runtime_resume(struct device *dev)
2625 msleep(5);
2626
2627 regcache_cache_only(cs42xx8->regmap, false);
2628 + regcache_mark_dirty(cs42xx8->regmap);
2629
2630 ret = regcache_sync(cs42xx8->regmap);
2631 if (ret) {
2632 diff --git a/sound/soc/fsl/fsl_asrc.c b/sound/soc/fsl/fsl_asrc.c
2633 index 528e8b108422..09e03b884a82 100644
2634 --- a/sound/soc/fsl/fsl_asrc.c
2635 +++ b/sound/soc/fsl/fsl_asrc.c
2636 @@ -282,8 +282,8 @@ static int fsl_asrc_config_pair(struct fsl_asrc_pair *pair)
2637 return -EINVAL;
2638 }
2639
2640 - if ((outrate > 8000 && outrate < 30000) &&
2641 - (outrate/inrate > 24 || inrate/outrate > 8)) {
2642 + if ((outrate >= 8000 && outrate <= 30000) &&
2643 + (outrate > 24 * inrate || inrate > 8 * outrate)) {
2644 pair_err("exceed supported ratio range [1/24, 8] for \
2645 inrate/outrate: %d/%d\n", inrate, outrate);
2646 return -EINVAL;
2647 diff --git a/tools/kvm/kvm_stat/kvm_stat b/tools/kvm/kvm_stat/kvm_stat
2648 index 195ba486640f..ba7ee74ee533 100755
2649 --- a/tools/kvm/kvm_stat/kvm_stat
2650 +++ b/tools/kvm/kvm_stat/kvm_stat
2651 @@ -575,8 +575,12 @@ class TracepointProvider(Provider):
2652 def update_fields(self, fields_filter):
2653 """Refresh fields, applying fields_filter"""
2654 self.fields = [field for field in self._get_available_fields()
2655 - if self.is_field_wanted(fields_filter, field) or
2656 - ARCH.tracepoint_is_child(field)]
2657 + if self.is_field_wanted(fields_filter, field)]
2658 + # add parents for child fields - otherwise we won't see any output!
2659 + for field in self._fields:
2660 + parent = ARCH.tracepoint_is_child(field)
2661 + if (parent and parent not in self._fields):
2662 + self.fields.append(parent)
2663
2664 @staticmethod
2665 def _get_online_cpus():
2666 @@ -735,8 +739,12 @@ class DebugfsProvider(Provider):
2667 def update_fields(self, fields_filter):
2668 """Refresh fields, applying fields_filter"""
2669 self._fields = [field for field in self._get_available_fields()
2670 - if self.is_field_wanted(fields_filter, field) or
2671 - ARCH.debugfs_is_child(field)]
2672 + if self.is_field_wanted(fields_filter, field)]
2673 + # add parents for child fields - otherwise we won't see any output!
2674 + for field in self._fields:
2675 + parent = ARCH.debugfs_is_child(field)
2676 + if (parent and parent not in self._fields):
2677 + self.fields.append(parent)
2678
2679 @property
2680 def fields(self):
2681 diff --git a/tools/kvm/kvm_stat/kvm_stat.txt b/tools/kvm/kvm_stat/kvm_stat.txt
2682 index 0811d860fe75..c057ba52364e 100644
2683 --- a/tools/kvm/kvm_stat/kvm_stat.txt
2684 +++ b/tools/kvm/kvm_stat/kvm_stat.txt
2685 @@ -34,6 +34,8 @@ INTERACTIVE COMMANDS
2686 *c*:: clear filter
2687
2688 *f*:: filter by regular expression
2689 + :: *Note*: Child events pull in their parents, and parents' stats summarize
2690 + all child events, not just the filtered ones
2691
2692 *g*:: filter by guest name/PID
2693
2694 diff --git a/tools/testing/selftests/net/fib_rule_tests.sh b/tools/testing/selftests/net/fib_rule_tests.sh
2695 index d84193bdc307..dbd90ca73e44 100755
2696 --- a/tools/testing/selftests/net/fib_rule_tests.sh
2697 +++ b/tools/testing/selftests/net/fib_rule_tests.sh
2698 @@ -55,7 +55,7 @@ setup()
2699
2700 $IP link add dummy0 type dummy
2701 $IP link set dev dummy0 up
2702 - $IP address add 198.51.100.1/24 dev dummy0
2703 + $IP address add 192.51.100.1/24 dev dummy0
2704 $IP -6 address add 2001:db8:1::1/64 dev dummy0
2705
2706 set +e
2707 diff --git a/tools/testing/selftests/timers/adjtick.c b/tools/testing/selftests/timers/adjtick.c
2708 index 0caca3a06bd2..54d8d87f36b3 100644
2709 --- a/tools/testing/selftests/timers/adjtick.c
2710 +++ b/tools/testing/selftests/timers/adjtick.c
2711 @@ -136,6 +136,7 @@ int check_tick_adj(long tickval)
2712
2713 eppm = get_ppm_drift();
2714 printf("%lld usec, %lld ppm", systick + (systick * eppm / MILLION), eppm);
2715 + fflush(stdout);
2716
2717 tx1.modes = 0;
2718 adjtimex(&tx1);
2719 diff --git a/tools/testing/selftests/timers/leapcrash.c b/tools/testing/selftests/timers/leapcrash.c
2720 index 830c462f605d..dc80728ed191 100644
2721 --- a/tools/testing/selftests/timers/leapcrash.c
2722 +++ b/tools/testing/selftests/timers/leapcrash.c
2723 @@ -101,6 +101,7 @@ int main(void)
2724 }
2725 clear_time_state();
2726 printf(".");
2727 + fflush(stdout);
2728 }
2729 printf("[OK]\n");
2730 return ksft_exit_pass();
2731 diff --git a/tools/testing/selftests/timers/mqueue-lat.c b/tools/testing/selftests/timers/mqueue-lat.c
2732 index 1867db5d6f5e..7916cf5cc6ff 100644
2733 --- a/tools/testing/selftests/timers/mqueue-lat.c
2734 +++ b/tools/testing/selftests/timers/mqueue-lat.c
2735 @@ -102,6 +102,7 @@ int main(int argc, char **argv)
2736 int ret;
2737
2738 printf("Mqueue latency : ");
2739 + fflush(stdout);
2740
2741 ret = mqueue_lat_test();
2742 if (ret < 0) {
2743 diff --git a/tools/testing/selftests/timers/nanosleep.c b/tools/testing/selftests/timers/nanosleep.c
2744 index 8adb0bb51d4d..71b5441c2fd9 100644
2745 --- a/tools/testing/selftests/timers/nanosleep.c
2746 +++ b/tools/testing/selftests/timers/nanosleep.c
2747 @@ -142,6 +142,7 @@ int main(int argc, char **argv)
2748 continue;
2749
2750 printf("Nanosleep %-31s ", clockstring(clockid));
2751 + fflush(stdout);
2752
2753 length = 10;
2754 while (length <= (NSEC_PER_SEC * 10)) {
2755 diff --git a/tools/testing/selftests/timers/nsleep-lat.c b/tools/testing/selftests/timers/nsleep-lat.c
2756 index c3c3dc10db17..eb3e79ed7b4a 100644
2757 --- a/tools/testing/selftests/timers/nsleep-lat.c
2758 +++ b/tools/testing/selftests/timers/nsleep-lat.c
2759 @@ -155,6 +155,7 @@ int main(int argc, char **argv)
2760 continue;
2761
2762 printf("nsleep latency %-26s ", clockstring(clockid));
2763 + fflush(stdout);
2764
2765 length = 10;
2766 while (length <= (NSEC_PER_SEC * 10)) {
2767 diff --git a/tools/testing/selftests/timers/raw_skew.c b/tools/testing/selftests/timers/raw_skew.c
2768 index dcf73c5dab6e..b41d8dd0c40c 100644
2769 --- a/tools/testing/selftests/timers/raw_skew.c
2770 +++ b/tools/testing/selftests/timers/raw_skew.c
2771 @@ -112,6 +112,7 @@ int main(int argv, char **argc)
2772 printf("WARNING: ADJ_OFFSET in progress, this will cause inaccurate results\n");
2773
2774 printf("Estimating clock drift: ");
2775 + fflush(stdout);
2776 sleep(120);
2777
2778 get_monotonic_and_raw(&mon, &raw);
2779 diff --git a/tools/testing/selftests/timers/set-tai.c b/tools/testing/selftests/timers/set-tai.c
2780 index 70fed27d8fd3..8c4179ee2ca2 100644
2781 --- a/tools/testing/selftests/timers/set-tai.c
2782 +++ b/tools/testing/selftests/timers/set-tai.c
2783 @@ -55,6 +55,7 @@ int main(int argc, char **argv)
2784 printf("tai offset started at %i\n", ret);
2785
2786 printf("Checking tai offsets can be properly set: ");
2787 + fflush(stdout);
2788 for (i = 1; i <= 60; i++) {
2789 ret = set_tai(i);
2790 ret = get_tai();
2791 diff --git a/tools/testing/selftests/timers/set-tz.c b/tools/testing/selftests/timers/set-tz.c
2792 index 877fd5532fee..62bd33eb16f0 100644
2793 --- a/tools/testing/selftests/timers/set-tz.c
2794 +++ b/tools/testing/selftests/timers/set-tz.c
2795 @@ -65,6 +65,7 @@ int main(int argc, char **argv)
2796 printf("tz_minuteswest started at %i, dst at %i\n", min, dst);
2797
2798 printf("Checking tz_minuteswest can be properly set: ");
2799 + fflush(stdout);
2800 for (i = -15*60; i < 15*60; i += 30) {
2801 ret = set_tz(i, dst);
2802 ret = get_tz_min();
2803 @@ -76,6 +77,7 @@ int main(int argc, char **argv)
2804 printf("[OK]\n");
2805
2806 printf("Checking invalid tz_minuteswest values are caught: ");
2807 + fflush(stdout);
2808
2809 if (!set_tz(-15*60-1, dst)) {
2810 printf("[FAILED] %i didn't return failure!\n", -15*60-1);
2811 diff --git a/tools/testing/selftests/timers/threadtest.c b/tools/testing/selftests/timers/threadtest.c
2812 index 759c9c06f1a0..cf3e48919874 100644
2813 --- a/tools/testing/selftests/timers/threadtest.c
2814 +++ b/tools/testing/selftests/timers/threadtest.c
2815 @@ -163,6 +163,7 @@ int main(int argc, char **argv)
2816 strftime(buf, 255, "%a, %d %b %Y %T %z", localtime(&start));
2817 printf("%s\n", buf);
2818 printf("Testing consistency with %i threads for %ld seconds: ", thread_count, runtime);
2819 + fflush(stdout);
2820
2821 /* spawn */
2822 for (i = 0; i < thread_count; i++)
2823 diff --git a/tools/testing/selftests/timers/valid-adjtimex.c b/tools/testing/selftests/timers/valid-adjtimex.c
2824 index d9d3ab93b31a..5397de708d3c 100644
2825 --- a/tools/testing/selftests/timers/valid-adjtimex.c
2826 +++ b/tools/testing/selftests/timers/valid-adjtimex.c
2827 @@ -123,6 +123,7 @@ int validate_freq(void)
2828 /* Set the leap second insert flag */
2829
2830 printf("Testing ADJ_FREQ... ");
2831 + fflush(stdout);
2832 for (i = 0; i < NUM_FREQ_VALID; i++) {
2833 tx.modes = ADJ_FREQUENCY;
2834 tx.freq = valid_freq[i];
2835 @@ -250,6 +251,7 @@ int set_bad_offset(long sec, long usec, int use_nano)
2836 int validate_set_offset(void)
2837 {
2838 printf("Testing ADJ_SETOFFSET... ");
2839 + fflush(stdout);
2840
2841 /* Test valid values */
2842 if (set_offset(NSEC_PER_SEC - 1, 1))
2843 diff --git a/virt/kvm/arm/aarch32.c b/virt/kvm/arm/aarch32.c
2844 index 5abbe9b3c652..6880236974b8 100644
2845 --- a/virt/kvm/arm/aarch32.c
2846 +++ b/virt/kvm/arm/aarch32.c
2847 @@ -25,127 +25,6 @@
2848 #include <asm/kvm_emulate.h>
2849 #include <asm/kvm_hyp.h>
2850
2851 -/*
2852 - * stolen from arch/arm/kernel/opcodes.c
2853 - *
2854 - * condition code lookup table
2855 - * index into the table is test code: EQ, NE, ... LT, GT, AL, NV
2856 - *
2857 - * bit position in short is condition code: NZCV
2858 - */
2859 -static const unsigned short cc_map[16] = {
2860 - 0xF0F0, /* EQ == Z set */
2861 - 0x0F0F, /* NE */
2862 - 0xCCCC, /* CS == C set */
2863 - 0x3333, /* CC */
2864 - 0xFF00, /* MI == N set */
2865 - 0x00FF, /* PL */
2866 - 0xAAAA, /* VS == V set */
2867 - 0x5555, /* VC */
2868 - 0x0C0C, /* HI == C set && Z clear */
2869 - 0xF3F3, /* LS == C clear || Z set */
2870 - 0xAA55, /* GE == (N==V) */
2871 - 0x55AA, /* LT == (N!=V) */
2872 - 0x0A05, /* GT == (!Z && (N==V)) */
2873 - 0xF5FA, /* LE == (Z || (N!=V)) */
2874 - 0xFFFF, /* AL always */
2875 - 0 /* NV */
2876 -};
2877 -
2878 -/*
2879 - * Check if a trapped instruction should have been executed or not.
2880 - */
2881 -bool __hyp_text kvm_condition_valid32(const struct kvm_vcpu *vcpu)
2882 -{
2883 - unsigned long cpsr;
2884 - u32 cpsr_cond;
2885 - int cond;
2886 -
2887 - /* Top two bits non-zero? Unconditional. */
2888 - if (kvm_vcpu_get_hsr(vcpu) >> 30)
2889 - return true;
2890 -
2891 - /* Is condition field valid? */
2892 - cond = kvm_vcpu_get_condition(vcpu);
2893 - if (cond == 0xE)
2894 - return true;
2895 -
2896 - cpsr = *vcpu_cpsr(vcpu);
2897 -
2898 - if (cond < 0) {
2899 - /* This can happen in Thumb mode: examine IT state. */
2900 - unsigned long it;
2901 -
2902 - it = ((cpsr >> 8) & 0xFC) | ((cpsr >> 25) & 0x3);
2903 -
2904 - /* it == 0 => unconditional. */
2905 - if (it == 0)
2906 - return true;
2907 -
2908 - /* The cond for this insn works out as the top 4 bits. */
2909 - cond = (it >> 4);
2910 - }
2911 -
2912 - cpsr_cond = cpsr >> 28;
2913 -
2914 - if (!((cc_map[cond] >> cpsr_cond) & 1))
2915 - return false;
2916 -
2917 - return true;
2918 -}
2919 -
2920 -/**
2921 - * adjust_itstate - adjust ITSTATE when emulating instructions in IT-block
2922 - * @vcpu: The VCPU pointer
2923 - *
2924 - * When exceptions occur while instructions are executed in Thumb IF-THEN
2925 - * blocks, the ITSTATE field of the CPSR is not advanced (updated), so we have
2926 - * to do this little bit of work manually. The fields map like this:
2927 - *
2928 - * IT[7:0] -> CPSR[26:25],CPSR[15:10]
2929 - */
2930 -static void __hyp_text kvm_adjust_itstate(struct kvm_vcpu *vcpu)
2931 -{
2932 - unsigned long itbits, cond;
2933 - unsigned long cpsr = *vcpu_cpsr(vcpu);
2934 - bool is_arm = !(cpsr & PSR_AA32_T_BIT);
2935 -
2936 - if (is_arm || !(cpsr & PSR_AA32_IT_MASK))
2937 - return;
2938 -
2939 - cond = (cpsr & 0xe000) >> 13;
2940 - itbits = (cpsr & 0x1c00) >> (10 - 2);
2941 - itbits |= (cpsr & (0x3 << 25)) >> 25;
2942 -
2943 - /* Perform ITAdvance (see page A2-52 in ARM DDI 0406C) */
2944 - if ((itbits & 0x7) == 0)
2945 - itbits = cond = 0;
2946 - else
2947 - itbits = (itbits << 1) & 0x1f;
2948 -
2949 - cpsr &= ~PSR_AA32_IT_MASK;
2950 - cpsr |= cond << 13;
2951 - cpsr |= (itbits & 0x1c) << (10 - 2);
2952 - cpsr |= (itbits & 0x3) << 25;
2953 - *vcpu_cpsr(vcpu) = cpsr;
2954 -}
2955 -
2956 -/**
2957 - * kvm_skip_instr - skip a trapped instruction and proceed to the next
2958 - * @vcpu: The vcpu pointer
2959 - */
2960 -void __hyp_text kvm_skip_instr32(struct kvm_vcpu *vcpu, bool is_wide_instr)
2961 -{
2962 - bool is_thumb;
2963 -
2964 - is_thumb = !!(*vcpu_cpsr(vcpu) & PSR_AA32_T_BIT);
2965 - if (is_thumb && !is_wide_instr)
2966 - *vcpu_pc(vcpu) += 2;
2967 - else
2968 - *vcpu_pc(vcpu) += 4;
2969 - kvm_adjust_itstate(vcpu);
2970 -}
2971 -
2972 /*
2973 * Table taken from ARMv8 ARM DDI0487B-B, table G1-10.
2974 */
2975 diff --git a/virt/kvm/arm/hyp/aarch32.c b/virt/kvm/arm/hyp/aarch32.c
2976 new file mode 100644
2977 index 000000000000..d31f267961e7
2978 --- /dev/null
2979 +++ b/virt/kvm/arm/hyp/aarch32.c
2980 @@ -0,0 +1,136 @@
2981 +// SPDX-License-Identifier: GPL-2.0
2982 +/*
2983 + * Hyp portion of the (not much of an) Emulation layer for 32bit guests.
2984 + *
2985 + * Copyright (C) 2012,2013 - ARM Ltd
2986 + * Author: Marc Zyngier <marc.zyngier@arm.com>
2987 + *
2988 + * based on arch/arm/kvm/emulate.c
2989 + * Copyright (C) 2012 - Virtual Open Systems and Columbia University
2990 + * Author: Christoffer Dall <c.dall@virtualopensystems.com>
2991 + */
2992 +
2993 +#include <linux/kvm_host.h>
2994 +#include <asm/kvm_emulate.h>
2995 +#include <asm/kvm_hyp.h>
2996 +
2997 +/*
2998 + * stolen from arch/arm/kernel/opcodes.c
2999 + *
3000 + * condition code lookup table
3001 + * index into the table is test code: EQ, NE, ... LT, GT, AL, NV
3002 + *
3003 + * bit position in short is condition code: NZCV
3004 + */
3005 +static const unsigned short cc_map[16] = {
3006 + 0xF0F0, /* EQ == Z set */
3007 + 0x0F0F, /* NE */
3008 + 0xCCCC, /* CS == C set */
3009 + 0x3333, /* CC */
3010 + 0xFF00, /* MI == N set */
3011 + 0x00FF, /* PL */
3012 + 0xAAAA, /* VS == V set */
3013 + 0x5555, /* VC */
3014 + 0x0C0C, /* HI == C set && Z clear */
3015 + 0xF3F3, /* LS == C clear || Z set */
3016 + 0xAA55, /* GE == (N==V) */
3017 + 0x55AA, /* LT == (N!=V) */
3018 + 0x0A05, /* GT == (!Z && (N==V)) */
3019 + 0xF5FA, /* LE == (Z || (N!=V)) */
3020 + 0xFFFF, /* AL always */
3021 + 0 /* NV */
3022 +};
3023 +
3024 +/*
3025 + * Check if a trapped instruction should have been executed or not.
3026 + */
3027 +bool __hyp_text kvm_condition_valid32(const struct kvm_vcpu *vcpu)
3028 +{
3029 + unsigned long cpsr;
3030 + u32 cpsr_cond;
3031 + int cond;
3032 +
3033 + /* Top two bits non-zero? Unconditional. */
3034 + if (kvm_vcpu_get_hsr(vcpu) >> 30)
3035 + return true;
3036 +
3037 + /* Is condition field valid? */
3038 + cond = kvm_vcpu_get_condition(vcpu);
3039 + if (cond == 0xE)
3040 + return true;
3041 +
3042 + cpsr = *vcpu_cpsr(vcpu);
3043 +
3044 + if (cond < 0) {
3045 + /* This can happen in Thumb mode: examine IT state. */
3046 + unsigned long it;
3047 +
3048 + it = ((cpsr >> 8) & 0xFC) | ((cpsr >> 25) & 0x3);
3049 +
3050 + /* it == 0 => unconditional. */
3051 + if (it == 0)
3052 + return true;
3053 +
3054 + /* The cond for this insn works out as the top 4 bits. */
3055 + cond = (it >> 4);
3056 + }
3057 +
3058 + cpsr_cond = cpsr >> 28;
3059 +
3060 + if (!((cc_map[cond] >> cpsr_cond) & 1))
3061 + return false;
3062 +
3063 + return true;
3064 +}
3065 +
3066 +/**
3067 + * adjust_itstate - adjust ITSTATE when emulating instructions in IT-block
3068 + * @vcpu: The VCPU pointer
3069 + *
3070 + * When exceptions occur while instructions are executed in Thumb IF-THEN
3071 + * blocks, the ITSTATE field of the CPSR is not advanced (updated), so we have
3072 + * to do this little bit of work manually. The fields map like this:
3073 + *
3074 + * IT[7:0] -> CPSR[26:25],CPSR[15:10]
3075 + */
3076 +static void __hyp_text kvm_adjust_itstate(struct kvm_vcpu *vcpu)
3077 +{
3078 + unsigned long itbits, cond;
3079 + unsigned long cpsr = *vcpu_cpsr(vcpu);
3080 + bool is_arm = !(cpsr & PSR_AA32_T_BIT);
3081 +
3082 + if (is_arm || !(cpsr & PSR_AA32_IT_MASK))
3083 + return;
3084 +
3085 + cond = (cpsr & 0xe000) >> 13;
3086 + itbits = (cpsr & 0x1c00) >> (10 - 2);
3087 + itbits |= (cpsr & (0x3 << 25)) >> 25;
3088 +
3089 + /* Perform ITAdvance (see page A2-52 in ARM DDI 0406C) */
3090 + if ((itbits & 0x7) == 0)
3091 + itbits = cond = 0;
3092 + else
3093 + itbits = (itbits << 1) & 0x1f;
3094 +
3095 + cpsr &= ~PSR_AA32_IT_MASK;
3096 + cpsr |= cond << 13;
3097 + cpsr |= (itbits & 0x1c) << (10 - 2);
3098 + cpsr |= (itbits & 0x3) << 25;
3099 + *vcpu_cpsr(vcpu) = cpsr;
3100 +}
3101 +
3102 +/**
3103 + * kvm_skip_instr - skip a trapped instruction and proceed to the next
3104 + * @vcpu: The vcpu pointer
3105 + */
3106 +void __hyp_text kvm_skip_instr32(struct kvm_vcpu *vcpu, bool is_wide_instr)
3107 +{
3108 + bool is_thumb;
3109 +
3110 + is_thumb = !!(*vcpu_cpsr(vcpu) & PSR_AA32_T_BIT);
3111 + if (is_thumb && !is_wide_instr)
3112 + *vcpu_pc(vcpu) += 2;
3113 + else
3114 + *vcpu_pc(vcpu) += 4;
3115 + kvm_adjust_itstate(vcpu);
3116 +}