Contents of /trunk/kernel-alx/patches-4.19/0126-4.19.27-all-fixes.patch
Parent Directory | Revision Log
Revision 3405 -
(show annotations)
(download)
Fri Aug 2 11:47:38 2019 UTC (5 years, 1 month ago) by niro
File size: 89447 byte(s)
Fri Aug 2 11:47:38 2019 UTC (5 years, 1 month ago) by niro
File size: 89447 byte(s)
-linux-4.19.27
1 | diff --git a/Makefile b/Makefile |
2 | index b71076cecba9c..70ed9a53558a5 100644 |
3 | --- a/Makefile |
4 | +++ b/Makefile |
5 | @@ -1,7 +1,7 @@ |
6 | # SPDX-License-Identifier: GPL-2.0 |
7 | VERSION = 4 |
8 | PATCHLEVEL = 19 |
9 | -SUBLEVEL = 26 |
10 | +SUBLEVEL = 27 |
11 | EXTRAVERSION = |
12 | NAME = "People's Front" |
13 | |
14 | diff --git a/arch/arc/include/asm/bitops.h b/arch/arc/include/asm/bitops.h |
15 | index 8da87feec59aa..99e6d8948f4ac 100644 |
16 | --- a/arch/arc/include/asm/bitops.h |
17 | +++ b/arch/arc/include/asm/bitops.h |
18 | @@ -340,7 +340,7 @@ static inline __attribute__ ((const)) int __fls(unsigned long x) |
19 | /* |
20 | * __ffs: Similar to ffs, but zero based (0-31) |
21 | */ |
22 | -static inline __attribute__ ((const)) int __ffs(unsigned long word) |
23 | +static inline __attribute__ ((const)) unsigned long __ffs(unsigned long word) |
24 | { |
25 | if (!word) |
26 | return word; |
27 | @@ -400,9 +400,9 @@ static inline __attribute__ ((const)) int ffs(unsigned long x) |
28 | /* |
29 | * __ffs: Similar to ffs, but zero based (0-31) |
30 | */ |
31 | -static inline __attribute__ ((const)) int __ffs(unsigned long x) |
32 | +static inline __attribute__ ((const)) unsigned long __ffs(unsigned long x) |
33 | { |
34 | - int n; |
35 | + unsigned long n; |
36 | |
37 | asm volatile( |
38 | " ffs.f %0, %1 \n" /* 0:31; 31(Z) if src 0 */ |
39 | diff --git a/arch/arc/kernel/troubleshoot.c b/arch/arc/kernel/troubleshoot.c |
40 | index e8d9fb4523462..5c6663321e873 100644 |
41 | --- a/arch/arc/kernel/troubleshoot.c |
42 | +++ b/arch/arc/kernel/troubleshoot.c |
43 | @@ -18,6 +18,8 @@ |
44 | #include <asm/arcregs.h> |
45 | #include <asm/irqflags.h> |
46 | |
47 | +#define ARC_PATH_MAX 256 |
48 | + |
49 | /* |
50 | * Common routine to print scratch regs (r0-r12) or callee regs (r13-r25) |
51 | * -Prints 3 regs per line and a CR. |
52 | @@ -58,11 +60,12 @@ static void show_callee_regs(struct callee_regs *cregs) |
53 | print_reg_file(&(cregs->r13), 13); |
54 | } |
55 | |
56 | -static void print_task_path_n_nm(struct task_struct *tsk, char *buf) |
57 | +static void print_task_path_n_nm(struct task_struct *tsk) |
58 | { |
59 | char *path_nm = NULL; |
60 | struct mm_struct *mm; |
61 | struct file *exe_file; |
62 | + char buf[ARC_PATH_MAX]; |
63 | |
64 | mm = get_task_mm(tsk); |
65 | if (!mm) |
66 | @@ -72,7 +75,7 @@ static void print_task_path_n_nm(struct task_struct *tsk, char *buf) |
67 | mmput(mm); |
68 | |
69 | if (exe_file) { |
70 | - path_nm = file_path(exe_file, buf, 255); |
71 | + path_nm = file_path(exe_file, buf, ARC_PATH_MAX-1); |
72 | fput(exe_file); |
73 | } |
74 | |
75 | @@ -80,10 +83,9 @@ done: |
76 | pr_info("Path: %s\n", !IS_ERR(path_nm) ? path_nm : "?"); |
77 | } |
78 | |
79 | -static void show_faulting_vma(unsigned long address, char *buf) |
80 | +static void show_faulting_vma(unsigned long address) |
81 | { |
82 | struct vm_area_struct *vma; |
83 | - char *nm = buf; |
84 | struct mm_struct *active_mm = current->active_mm; |
85 | |
86 | /* can't use print_vma_addr() yet as it doesn't check for |
87 | @@ -96,8 +98,11 @@ static void show_faulting_vma(unsigned long address, char *buf) |
88 | * if the container VMA is not found |
89 | */ |
90 | if (vma && (vma->vm_start <= address)) { |
91 | + char buf[ARC_PATH_MAX]; |
92 | + char *nm = "?"; |
93 | + |
94 | if (vma->vm_file) { |
95 | - nm = file_path(vma->vm_file, buf, PAGE_SIZE - 1); |
96 | + nm = file_path(vma->vm_file, buf, ARC_PATH_MAX-1); |
97 | if (IS_ERR(nm)) |
98 | nm = "?"; |
99 | } |
100 | @@ -173,13 +178,8 @@ void show_regs(struct pt_regs *regs) |
101 | { |
102 | struct task_struct *tsk = current; |
103 | struct callee_regs *cregs; |
104 | - char *buf; |
105 | |
106 | - buf = (char *)__get_free_page(GFP_KERNEL); |
107 | - if (!buf) |
108 | - return; |
109 | - |
110 | - print_task_path_n_nm(tsk, buf); |
111 | + print_task_path_n_nm(tsk); |
112 | show_regs_print_info(KERN_INFO); |
113 | |
114 | show_ecr_verbose(regs); |
115 | @@ -189,7 +189,7 @@ void show_regs(struct pt_regs *regs) |
116 | (void *)regs->blink, (void *)regs->ret); |
117 | |
118 | if (user_mode(regs)) |
119 | - show_faulting_vma(regs->ret, buf); /* faulting code, not data */ |
120 | + show_faulting_vma(regs->ret); /* faulting code, not data */ |
121 | |
122 | pr_info("[STAT32]: 0x%08lx", regs->status32); |
123 | |
124 | @@ -221,8 +221,6 @@ void show_regs(struct pt_regs *regs) |
125 | cregs = (struct callee_regs *)current->thread.callee_reg; |
126 | if (cregs) |
127 | show_callee_regs(cregs); |
128 | - |
129 | - free_page((unsigned long)buf); |
130 | } |
131 | |
132 | void show_kernel_fault_diag(const char *str, struct pt_regs *regs, |
133 | diff --git a/arch/mips/bcm63xx/dev-enet.c b/arch/mips/bcm63xx/dev-enet.c |
134 | index 07b4c65a88a43..8e73d65f34806 100644 |
135 | --- a/arch/mips/bcm63xx/dev-enet.c |
136 | +++ b/arch/mips/bcm63xx/dev-enet.c |
137 | @@ -70,6 +70,8 @@ static struct platform_device bcm63xx_enet_shared_device = { |
138 | |
139 | static int shared_device_registered; |
140 | |
141 | +static u64 enet_dmamask = DMA_BIT_MASK(32); |
142 | + |
143 | static struct resource enet0_res[] = { |
144 | { |
145 | .start = -1, /* filled at runtime */ |
146 | @@ -99,6 +101,8 @@ static struct platform_device bcm63xx_enet0_device = { |
147 | .resource = enet0_res, |
148 | .dev = { |
149 | .platform_data = &enet0_pd, |
150 | + .dma_mask = &enet_dmamask, |
151 | + .coherent_dma_mask = DMA_BIT_MASK(32), |
152 | }, |
153 | }; |
154 | |
155 | @@ -131,6 +135,8 @@ static struct platform_device bcm63xx_enet1_device = { |
156 | .resource = enet1_res, |
157 | .dev = { |
158 | .platform_data = &enet1_pd, |
159 | + .dma_mask = &enet_dmamask, |
160 | + .coherent_dma_mask = DMA_BIT_MASK(32), |
161 | }, |
162 | }; |
163 | |
164 | @@ -157,6 +163,8 @@ static struct platform_device bcm63xx_enetsw_device = { |
165 | .resource = enetsw_res, |
166 | .dev = { |
167 | .platform_data = &enetsw_pd, |
168 | + .dma_mask = &enet_dmamask, |
169 | + .coherent_dma_mask = DMA_BIT_MASK(32), |
170 | }, |
171 | }; |
172 | |
173 | diff --git a/arch/mips/kernel/cmpxchg.c b/arch/mips/kernel/cmpxchg.c |
174 | index 0b9535bc2c53d..6b2a4a902a981 100644 |
175 | --- a/arch/mips/kernel/cmpxchg.c |
176 | +++ b/arch/mips/kernel/cmpxchg.c |
177 | @@ -54,10 +54,9 @@ unsigned long __xchg_small(volatile void *ptr, unsigned long val, unsigned int s |
178 | unsigned long __cmpxchg_small(volatile void *ptr, unsigned long old, |
179 | unsigned long new, unsigned int size) |
180 | { |
181 | - u32 mask, old32, new32, load32; |
182 | + u32 mask, old32, new32, load32, load; |
183 | volatile u32 *ptr32; |
184 | unsigned int shift; |
185 | - u8 load; |
186 | |
187 | /* Check that ptr is naturally aligned */ |
188 | WARN_ON((unsigned long)ptr & (size - 1)); |
189 | diff --git a/arch/mips/net/ebpf_jit.c b/arch/mips/net/ebpf_jit.c |
190 | index 252c00985c973..9bda82ed75eb7 100644 |
191 | --- a/arch/mips/net/ebpf_jit.c |
192 | +++ b/arch/mips/net/ebpf_jit.c |
193 | @@ -1818,7 +1818,7 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog) |
194 | |
195 | /* Update the icache */ |
196 | flush_icache_range((unsigned long)ctx.target, |
197 | - (unsigned long)(ctx.target + ctx.idx * sizeof(u32))); |
198 | + (unsigned long)&ctx.target[ctx.idx]); |
199 | |
200 | if (bpf_jit_enable > 1) |
201 | /* Dump JIT code */ |
202 | diff --git a/arch/x86/include/asm/uaccess.h b/arch/x86/include/asm/uaccess.h |
203 | index aae77eb8491c0..4111edb3188e2 100644 |
204 | --- a/arch/x86/include/asm/uaccess.h |
205 | +++ b/arch/x86/include/asm/uaccess.h |
206 | @@ -293,8 +293,7 @@ do { \ |
207 | __put_user_asm(x, ptr, retval, "l", "k", "ir", errret); \ |
208 | break; \ |
209 | case 8: \ |
210 | - __put_user_asm_u64((__typeof__(*ptr))(x), ptr, retval, \ |
211 | - errret); \ |
212 | + __put_user_asm_u64(x, ptr, retval, errret); \ |
213 | break; \ |
214 | default: \ |
215 | __put_user_bad(); \ |
216 | @@ -440,8 +439,10 @@ do { \ |
217 | #define __put_user_nocheck(x, ptr, size) \ |
218 | ({ \ |
219 | int __pu_err; \ |
220 | + __typeof__(*(ptr)) __pu_val; \ |
221 | + __pu_val = x; \ |
222 | __uaccess_begin(); \ |
223 | - __put_user_size((x), (ptr), (size), __pu_err, -EFAULT); \ |
224 | + __put_user_size(__pu_val, (ptr), (size), __pu_err, -EFAULT);\ |
225 | __uaccess_end(); \ |
226 | __builtin_expect(__pu_err, 0); \ |
227 | }) |
228 | diff --git a/arch/x86/kernel/apic/vector.c b/arch/x86/kernel/apic/vector.c |
229 | index 7654febd51027..652e7ffa9b9de 100644 |
230 | --- a/arch/x86/kernel/apic/vector.c |
231 | +++ b/arch/x86/kernel/apic/vector.c |
232 | @@ -313,14 +313,13 @@ assign_managed_vector(struct irq_data *irqd, const struct cpumask *dest) |
233 | struct apic_chip_data *apicd = apic_chip_data(irqd); |
234 | int vector, cpu; |
235 | |
236 | - cpumask_and(vector_searchmask, vector_searchmask, affmsk); |
237 | - cpu = cpumask_first(vector_searchmask); |
238 | - if (cpu >= nr_cpu_ids) |
239 | - return -EINVAL; |
240 | + cpumask_and(vector_searchmask, dest, affmsk); |
241 | + |
242 | /* set_affinity might call here for nothing */ |
243 | if (apicd->vector && cpumask_test_cpu(apicd->cpu, vector_searchmask)) |
244 | return 0; |
245 | - vector = irq_matrix_alloc_managed(vector_matrix, cpu); |
246 | + vector = irq_matrix_alloc_managed(vector_matrix, vector_searchmask, |
247 | + &cpu); |
248 | trace_vector_alloc_managed(irqd->irq, vector, vector); |
249 | if (vector < 0) |
250 | return vector; |
251 | diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c |
252 | index ee8f8d70b98a2..b475419620121 100644 |
253 | --- a/arch/x86/kvm/svm.c |
254 | +++ b/arch/x86/kvm/svm.c |
255 | @@ -3399,6 +3399,14 @@ static int nested_svm_vmexit(struct vcpu_svm *svm) |
256 | kvm_mmu_reset_context(&svm->vcpu); |
257 | kvm_mmu_load(&svm->vcpu); |
258 | |
259 | + /* |
260 | + * Drop what we picked up for L2 via svm_complete_interrupts() so it |
261 | + * doesn't end up in L1. |
262 | + */ |
263 | + svm->vcpu.arch.nmi_injected = false; |
264 | + kvm_clear_exception_queue(&svm->vcpu); |
265 | + kvm_clear_interrupt_queue(&svm->vcpu); |
266 | + |
267 | return 0; |
268 | } |
269 | |
270 | @@ -4485,25 +4493,14 @@ static int avic_incomplete_ipi_interception(struct vcpu_svm *svm) |
271 | kvm_lapic_reg_write(apic, APIC_ICR, icrl); |
272 | break; |
273 | case AVIC_IPI_FAILURE_TARGET_NOT_RUNNING: { |
274 | - int i; |
275 | - struct kvm_vcpu *vcpu; |
276 | - struct kvm *kvm = svm->vcpu.kvm; |
277 | struct kvm_lapic *apic = svm->vcpu.arch.apic; |
278 | |
279 | /* |
280 | - * At this point, we expect that the AVIC HW has already |
281 | - * set the appropriate IRR bits on the valid target |
282 | - * vcpus. So, we just need to kick the appropriate vcpu. |
283 | + * Update ICR high and low, then emulate sending IPI, |
284 | + * which is handled when writing APIC_ICR. |
285 | */ |
286 | - kvm_for_each_vcpu(i, vcpu, kvm) { |
287 | - bool m = kvm_apic_match_dest(vcpu, apic, |
288 | - icrl & KVM_APIC_SHORT_MASK, |
289 | - GET_APIC_DEST_FIELD(icrh), |
290 | - icrl & KVM_APIC_DEST_MASK); |
291 | - |
292 | - if (m && !avic_vcpu_is_running(vcpu)) |
293 | - kvm_vcpu_wake_up(vcpu); |
294 | - } |
295 | + kvm_lapic_reg_write(apic, APIC_ICR2, icrh); |
296 | + kvm_lapic_reg_write(apic, APIC_ICR, icrl); |
297 | break; |
298 | } |
299 | case AVIC_IPI_FAILURE_INVALID_TARGET: |
300 | diff --git a/arch/x86/mm/mem_encrypt_identity.c b/arch/x86/mm/mem_encrypt_identity.c |
301 | index 7ae36868aed25..c9faf34cbb62e 100644 |
302 | --- a/arch/x86/mm/mem_encrypt_identity.c |
303 | +++ b/arch/x86/mm/mem_encrypt_identity.c |
304 | @@ -157,8 +157,8 @@ static void __init sme_populate_pgd(struct sme_populate_pgd_data *ppd) |
305 | pmd = pmd_offset(pud, ppd->vaddr); |
306 | if (pmd_none(*pmd)) { |
307 | pte = ppd->pgtable_area; |
308 | - memset(pte, 0, sizeof(pte) * PTRS_PER_PTE); |
309 | - ppd->pgtable_area += sizeof(pte) * PTRS_PER_PTE; |
310 | + memset(pte, 0, sizeof(*pte) * PTRS_PER_PTE); |
311 | + ppd->pgtable_area += sizeof(*pte) * PTRS_PER_PTE; |
312 | set_pmd(pmd, __pmd(PMD_FLAGS | __pa(pte))); |
313 | } |
314 | |
315 | diff --git a/drivers/clk/clk-versaclock5.c b/drivers/clk/clk-versaclock5.c |
316 | index decffb3826ece..a738af893532f 100644 |
317 | --- a/drivers/clk/clk-versaclock5.c |
318 | +++ b/drivers/clk/clk-versaclock5.c |
319 | @@ -262,8 +262,10 @@ static int vc5_mux_set_parent(struct clk_hw *hw, u8 index) |
320 | |
321 | if (vc5->clk_mux_ins == VC5_MUX_IN_XIN) |
322 | src = VC5_PRIM_SRC_SHDN_EN_XTAL; |
323 | - if (vc5->clk_mux_ins == VC5_MUX_IN_CLKIN) |
324 | + else if (vc5->clk_mux_ins == VC5_MUX_IN_CLKIN) |
325 | src = VC5_PRIM_SRC_SHDN_EN_CLKIN; |
326 | + else /* Invalid; should have been caught by vc5_probe() */ |
327 | + return -EINVAL; |
328 | } |
329 | |
330 | return regmap_update_bits(vc5->regmap, VC5_PRIM_SRC_SHDN, mask, src); |
331 | diff --git a/drivers/clk/clk.c b/drivers/clk/clk.c |
332 | index d31055ae6ec6f..5413ffaf02e23 100644 |
333 | --- a/drivers/clk/clk.c |
334 | +++ b/drivers/clk/clk.c |
335 | @@ -2687,7 +2687,7 @@ static void clk_dump_one(struct seq_file *s, struct clk_core *c, int level) |
336 | seq_printf(s, "\"protect_count\": %d,", c->protect_count); |
337 | seq_printf(s, "\"rate\": %lu,", clk_core_get_rate(c)); |
338 | seq_printf(s, "\"accuracy\": %lu,", clk_core_get_accuracy(c)); |
339 | - seq_printf(s, "\"phase\": %d", clk_core_get_phase(c)); |
340 | + seq_printf(s, "\"phase\": %d,", clk_core_get_phase(c)); |
341 | seq_printf(s, "\"duty_cycle\": %u", |
342 | clk_core_get_scaled_duty_cycle(c, 100000)); |
343 | } |
344 | diff --git a/drivers/clk/tegra/clk-tegra124-dfll-fcpu.c b/drivers/clk/tegra/clk-tegra124-dfll-fcpu.c |
345 | index 269d3595758be..edc31bb56674a 100644 |
346 | --- a/drivers/clk/tegra/clk-tegra124-dfll-fcpu.c |
347 | +++ b/drivers/clk/tegra/clk-tegra124-dfll-fcpu.c |
348 | @@ -133,9 +133,11 @@ static int tegra124_dfll_fcpu_remove(struct platform_device *pdev) |
349 | struct tegra_dfll_soc_data *soc; |
350 | |
351 | soc = tegra_dfll_unregister(pdev); |
352 | - if (IS_ERR(soc)) |
353 | + if (IS_ERR(soc)) { |
354 | dev_err(&pdev->dev, "failed to unregister DFLL: %ld\n", |
355 | PTR_ERR(soc)); |
356 | + return PTR_ERR(soc); |
357 | + } |
358 | |
359 | tegra_cvb_remove_opp_table(soc->dev, soc->cvb, soc->max_freq); |
360 | |
361 | diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_processpptables.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_processpptables.c |
362 | index 16b1a9cf6cf08..743d3c983082d 100644 |
363 | --- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_processpptables.c |
364 | +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_processpptables.c |
365 | @@ -32,6 +32,7 @@ |
366 | #include "vega10_pptable.h" |
367 | |
368 | #define NUM_DSPCLK_LEVELS 8 |
369 | +#define VEGA10_ENGINECLOCK_HARDMAX 198000 |
370 | |
371 | static void set_hw_cap(struct pp_hwmgr *hwmgr, bool enable, |
372 | enum phm_platform_caps cap) |
373 | @@ -258,7 +259,26 @@ static int init_over_drive_limits( |
374 | struct pp_hwmgr *hwmgr, |
375 | const ATOM_Vega10_POWERPLAYTABLE *powerplay_table) |
376 | { |
377 | - hwmgr->platform_descriptor.overdriveLimit.engineClock = |
378 | + const ATOM_Vega10_GFXCLK_Dependency_Table *gfxclk_dep_table = |
379 | + (const ATOM_Vega10_GFXCLK_Dependency_Table *) |
380 | + (((unsigned long) powerplay_table) + |
381 | + le16_to_cpu(powerplay_table->usGfxclkDependencyTableOffset)); |
382 | + bool is_acg_enabled = false; |
383 | + ATOM_Vega10_GFXCLK_Dependency_Record_V2 *patom_record_v2; |
384 | + |
385 | + if (gfxclk_dep_table->ucRevId == 1) { |
386 | + patom_record_v2 = |
387 | + (ATOM_Vega10_GFXCLK_Dependency_Record_V2 *)gfxclk_dep_table->entries; |
388 | + is_acg_enabled = |
389 | + (bool)patom_record_v2[gfxclk_dep_table->ucNumEntries-1].ucACGEnable; |
390 | + } |
391 | + |
392 | + if (powerplay_table->ulMaxODEngineClock > VEGA10_ENGINECLOCK_HARDMAX && |
393 | + !is_acg_enabled) |
394 | + hwmgr->platform_descriptor.overdriveLimit.engineClock = |
395 | + VEGA10_ENGINECLOCK_HARDMAX; |
396 | + else |
397 | + hwmgr->platform_descriptor.overdriveLimit.engineClock = |
398 | le32_to_cpu(powerplay_table->ulMaxODEngineClock); |
399 | hwmgr->platform_descriptor.overdriveLimit.memoryClock = |
400 | le32_to_cpu(powerplay_table->ulMaxODMemoryClock); |
401 | diff --git a/drivers/gpu/drm/drm_atomic_helper.c b/drivers/gpu/drm/drm_atomic_helper.c |
402 | index 23397c08be11c..1d74aed7e471f 100644 |
403 | --- a/drivers/gpu/drm/drm_atomic_helper.c |
404 | +++ b/drivers/gpu/drm/drm_atomic_helper.c |
405 | @@ -1564,6 +1564,15 @@ int drm_atomic_helper_async_check(struct drm_device *dev, |
406 | old_plane_state->crtc != new_plane_state->crtc) |
407 | return -EINVAL; |
408 | |
409 | + /* |
410 | + * FIXME: Since prepare_fb and cleanup_fb are always called on |
411 | + * the new_plane_state for async updates we need to block framebuffer |
412 | + * changes. This prevents use of a fb that's been cleaned up and |
413 | + * double cleanups from occuring. |
414 | + */ |
415 | + if (old_plane_state->fb != new_plane_state->fb) |
416 | + return -EINVAL; |
417 | + |
418 | funcs = plane->helper_private; |
419 | if (!funcs->atomic_async_update) |
420 | return -EINVAL; |
421 | diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gmu.c b/drivers/gpu/drm/msm/adreno/a6xx_gmu.c |
422 | index bbb8126ec5c57..9acb9dfaf57e6 100644 |
423 | --- a/drivers/gpu/drm/msm/adreno/a6xx_gmu.c |
424 | +++ b/drivers/gpu/drm/msm/adreno/a6xx_gmu.c |
425 | @@ -896,7 +896,7 @@ static u32 a6xx_gmu_get_arc_level(struct device *dev, unsigned long freq) |
426 | np = dev_pm_opp_get_of_node(opp); |
427 | |
428 | if (np) { |
429 | - of_property_read_u32(np, "qcom,level", &val); |
430 | + of_property_read_u32(np, "opp-level", &val); |
431 | of_node_put(np); |
432 | } |
433 | |
434 | diff --git a/drivers/gpu/drm/msm/msm_rd.c b/drivers/gpu/drm/msm/msm_rd.c |
435 | index f7a0edea4705b..d4cc5ceb22d01 100644 |
436 | --- a/drivers/gpu/drm/msm/msm_rd.c |
437 | +++ b/drivers/gpu/drm/msm/msm_rd.c |
438 | @@ -115,7 +115,9 @@ static void rd_write(struct msm_rd_state *rd, const void *buf, int sz) |
439 | char *fptr = &fifo->buf[fifo->head]; |
440 | int n; |
441 | |
442 | - wait_event(rd->fifo_event, circ_space(&rd->fifo) > 0); |
443 | + wait_event(rd->fifo_event, circ_space(&rd->fifo) > 0 || !rd->open); |
444 | + if (!rd->open) |
445 | + return; |
446 | |
447 | /* Note that smp_load_acquire() is not strictly required |
448 | * as CIRC_SPACE_TO_END() does not access the tail more |
449 | @@ -213,7 +215,10 @@ out: |
450 | static int rd_release(struct inode *inode, struct file *file) |
451 | { |
452 | struct msm_rd_state *rd = inode->i_private; |
453 | + |
454 | rd->open = false; |
455 | + wake_up_all(&rd->fifo_event); |
456 | + |
457 | return 0; |
458 | } |
459 | |
460 | diff --git a/drivers/gpu/drm/sun4i/sun4i_hdmi_enc.c b/drivers/gpu/drm/sun4i/sun4i_hdmi_enc.c |
461 | index 061d2e0d9011e..416da53767018 100644 |
462 | --- a/drivers/gpu/drm/sun4i/sun4i_hdmi_enc.c |
463 | +++ b/drivers/gpu/drm/sun4i/sun4i_hdmi_enc.c |
464 | @@ -92,6 +92,8 @@ static void sun4i_hdmi_disable(struct drm_encoder *encoder) |
465 | val = readl(hdmi->base + SUN4I_HDMI_VID_CTRL_REG); |
466 | val &= ~SUN4I_HDMI_VID_CTRL_ENABLE; |
467 | writel(val, hdmi->base + SUN4I_HDMI_VID_CTRL_REG); |
468 | + |
469 | + clk_disable_unprepare(hdmi->tmds_clk); |
470 | } |
471 | |
472 | static void sun4i_hdmi_enable(struct drm_encoder *encoder) |
473 | @@ -102,6 +104,8 @@ static void sun4i_hdmi_enable(struct drm_encoder *encoder) |
474 | |
475 | DRM_DEBUG_DRIVER("Enabling the HDMI Output\n"); |
476 | |
477 | + clk_prepare_enable(hdmi->tmds_clk); |
478 | + |
479 | sun4i_hdmi_setup_avi_infoframes(hdmi, mode); |
480 | val |= SUN4I_HDMI_PKT_CTRL_TYPE(0, SUN4I_HDMI_PKT_AVI); |
481 | val |= SUN4I_HDMI_PKT_CTRL_TYPE(1, SUN4I_HDMI_PKT_END); |
482 | diff --git a/drivers/irqchip/irq-gic-v3-mbi.c b/drivers/irqchip/irq-gic-v3-mbi.c |
483 | index ad70e7c416e30..fbfa7ff6deb16 100644 |
484 | --- a/drivers/irqchip/irq-gic-v3-mbi.c |
485 | +++ b/drivers/irqchip/irq-gic-v3-mbi.c |
486 | @@ -24,7 +24,7 @@ struct mbi_range { |
487 | unsigned long *bm; |
488 | }; |
489 | |
490 | -static struct mutex mbi_lock; |
491 | +static DEFINE_MUTEX(mbi_lock); |
492 | static phys_addr_t mbi_phys_base; |
493 | static struct mbi_range *mbi_ranges; |
494 | static unsigned int mbi_range_nr; |
495 | diff --git a/drivers/mmc/core/core.c b/drivers/mmc/core/core.c |
496 | index 50a5c340307b8..d4f9bfbaf0232 100644 |
497 | --- a/drivers/mmc/core/core.c |
498 | +++ b/drivers/mmc/core/core.c |
499 | @@ -95,7 +95,7 @@ static void mmc_should_fail_request(struct mmc_host *host, |
500 | if (!data) |
501 | return; |
502 | |
503 | - if (cmd->error || data->error || |
504 | + if ((cmd && cmd->error) || data->error || |
505 | !should_fail(&host->fail_mmc_request, data->blksz * data->blocks)) |
506 | return; |
507 | |
508 | diff --git a/drivers/mmc/host/cqhci.c b/drivers/mmc/host/cqhci.c |
509 | index 159270e947cf6..a8af682a91821 100644 |
510 | --- a/drivers/mmc/host/cqhci.c |
511 | +++ b/drivers/mmc/host/cqhci.c |
512 | @@ -201,7 +201,7 @@ static int cqhci_host_alloc_tdl(struct cqhci_host *cq_host) |
513 | cq_host->desc_size = cq_host->slot_sz * cq_host->num_slots; |
514 | |
515 | cq_host->data_size = cq_host->trans_desc_len * cq_host->mmc->max_segs * |
516 | - (cq_host->num_slots - 1); |
517 | + cq_host->mmc->cqe_qdepth; |
518 | |
519 | pr_debug("%s: cqhci: desc_size: %zu data_sz: %zu slot-sz: %d\n", |
520 | mmc_hostname(cq_host->mmc), cq_host->desc_size, cq_host->data_size, |
521 | @@ -217,12 +217,21 @@ static int cqhci_host_alloc_tdl(struct cqhci_host *cq_host) |
522 | cq_host->desc_size, |
523 | &cq_host->desc_dma_base, |
524 | GFP_KERNEL); |
525 | + if (!cq_host->desc_base) |
526 | + return -ENOMEM; |
527 | + |
528 | cq_host->trans_desc_base = dmam_alloc_coherent(mmc_dev(cq_host->mmc), |
529 | cq_host->data_size, |
530 | &cq_host->trans_desc_dma_base, |
531 | GFP_KERNEL); |
532 | - if (!cq_host->desc_base || !cq_host->trans_desc_base) |
533 | + if (!cq_host->trans_desc_base) { |
534 | + dmam_free_coherent(mmc_dev(cq_host->mmc), cq_host->desc_size, |
535 | + cq_host->desc_base, |
536 | + cq_host->desc_dma_base); |
537 | + cq_host->desc_base = NULL; |
538 | + cq_host->desc_dma_base = 0; |
539 | return -ENOMEM; |
540 | + } |
541 | |
542 | pr_debug("%s: cqhci: desc-base: 0x%p trans-base: 0x%p\n desc_dma 0x%llx trans_dma: 0x%llx\n", |
543 | mmc_hostname(cq_host->mmc), cq_host->desc_base, cq_host->trans_desc_base, |
544 | diff --git a/drivers/mmc/host/mmc_spi.c b/drivers/mmc/host/mmc_spi.c |
545 | index 476e53d301283..67f6bd24a9d0c 100644 |
546 | --- a/drivers/mmc/host/mmc_spi.c |
547 | +++ b/drivers/mmc/host/mmc_spi.c |
548 | @@ -1447,6 +1447,7 @@ static int mmc_spi_probe(struct spi_device *spi) |
549 | mmc->caps &= ~MMC_CAP_NEEDS_POLL; |
550 | mmc_gpiod_request_cd_irq(mmc); |
551 | } |
552 | + mmc_detect_change(mmc, 0); |
553 | |
554 | if (host->pdata && host->pdata->flags & MMC_SPI_USE_RO_GPIO) { |
555 | has_ro = true; |
556 | diff --git a/drivers/mmc/host/renesas_sdhi_sys_dmac.c b/drivers/mmc/host/renesas_sdhi_sys_dmac.c |
557 | index 5389c48218820..c3d63edb545e3 100644 |
558 | --- a/drivers/mmc/host/renesas_sdhi_sys_dmac.c |
559 | +++ b/drivers/mmc/host/renesas_sdhi_sys_dmac.c |
560 | @@ -68,6 +68,7 @@ static const struct renesas_sdhi_of_data of_rcar_gen2_compatible = { |
561 | .scc_offset = 0x0300, |
562 | .taps = rcar_gen2_scc_taps, |
563 | .taps_num = ARRAY_SIZE(rcar_gen2_scc_taps), |
564 | + .max_blk_count = 0xffffffff, |
565 | }; |
566 | |
567 | /* Definitions for sampling clocks */ |
568 | diff --git a/drivers/mmc/host/sdhci-esdhc-imx.c b/drivers/mmc/host/sdhci-esdhc-imx.c |
569 | index f44e49014a440..753973dc16556 100644 |
570 | --- a/drivers/mmc/host/sdhci-esdhc-imx.c |
571 | +++ b/drivers/mmc/host/sdhci-esdhc-imx.c |
572 | @@ -1097,11 +1097,12 @@ static void sdhci_esdhc_imx_hwinit(struct sdhci_host *host) |
573 | writel(readl(host->ioaddr + SDHCI_HOST_CONTROL) |
574 | | ESDHC_BURST_LEN_EN_INCR, |
575 | host->ioaddr + SDHCI_HOST_CONTROL); |
576 | + |
577 | /* |
578 | - * erratum ESDHC_FLAG_ERR004536 fix for MX6Q TO1.2 and MX6DL |
579 | - * TO1.1, it's harmless for MX6SL |
580 | - */ |
581 | - writel(readl(host->ioaddr + 0x6c) | BIT(7), |
582 | + * erratum ESDHC_FLAG_ERR004536 fix for MX6Q TO1.2 and MX6DL |
583 | + * TO1.1, it's harmless for MX6SL |
584 | + */ |
585 | + writel(readl(host->ioaddr + 0x6c) & ~BIT(7), |
586 | host->ioaddr + 0x6c); |
587 | |
588 | /* disable DLL_CTRL delay line settings */ |
589 | diff --git a/drivers/mmc/host/tmio_mmc.h b/drivers/mmc/host/tmio_mmc.h |
590 | index 5d141f79e175b..7c40a7e1fea1c 100644 |
591 | --- a/drivers/mmc/host/tmio_mmc.h |
592 | +++ b/drivers/mmc/host/tmio_mmc.h |
593 | @@ -279,6 +279,11 @@ static inline void sd_ctrl_write32_as_16_and_16(struct tmio_mmc_host *host, |
594 | iowrite16(val >> 16, host->ctl + ((addr + 2) << host->bus_shift)); |
595 | } |
596 | |
597 | +static inline void sd_ctrl_write32(struct tmio_mmc_host *host, int addr, u32 val) |
598 | +{ |
599 | + iowrite32(val, host->ctl + (addr << host->bus_shift)); |
600 | +} |
601 | + |
602 | static inline void sd_ctrl_write32_rep(struct tmio_mmc_host *host, int addr, |
603 | const u32 *buf, int count) |
604 | { |
605 | diff --git a/drivers/mmc/host/tmio_mmc_core.c b/drivers/mmc/host/tmio_mmc_core.c |
606 | index 261b4d62d2b10..7d13ca9ea5347 100644 |
607 | --- a/drivers/mmc/host/tmio_mmc_core.c |
608 | +++ b/drivers/mmc/host/tmio_mmc_core.c |
609 | @@ -46,6 +46,7 @@ |
610 | #include <linux/regulator/consumer.h> |
611 | #include <linux/mmc/sdio.h> |
612 | #include <linux/scatterlist.h> |
613 | +#include <linux/sizes.h> |
614 | #include <linux/spinlock.h> |
615 | #include <linux/swiotlb.h> |
616 | #include <linux/workqueue.h> |
617 | @@ -703,7 +704,7 @@ static bool __tmio_mmc_sdcard_irq(struct tmio_mmc_host *host, int ireg, |
618 | return false; |
619 | } |
620 | |
621 | -static void __tmio_mmc_sdio_irq(struct tmio_mmc_host *host) |
622 | +static bool __tmio_mmc_sdio_irq(struct tmio_mmc_host *host) |
623 | { |
624 | struct mmc_host *mmc = host->mmc; |
625 | struct tmio_mmc_data *pdata = host->pdata; |
626 | @@ -711,7 +712,7 @@ static void __tmio_mmc_sdio_irq(struct tmio_mmc_host *host) |
627 | unsigned int sdio_status; |
628 | |
629 | if (!(pdata->flags & TMIO_MMC_SDIO_IRQ)) |
630 | - return; |
631 | + return false; |
632 | |
633 | status = sd_ctrl_read16(host, CTL_SDIO_STATUS); |
634 | ireg = status & TMIO_SDIO_MASK_ALL & ~host->sdio_irq_mask; |
635 | @@ -724,6 +725,8 @@ static void __tmio_mmc_sdio_irq(struct tmio_mmc_host *host) |
636 | |
637 | if (mmc->caps & MMC_CAP_SDIO_IRQ && ireg & TMIO_SDIO_STAT_IOIRQ) |
638 | mmc_signal_sdio_irq(mmc); |
639 | + |
640 | + return ireg; |
641 | } |
642 | |
643 | irqreturn_t tmio_mmc_irq(int irq, void *devid) |
644 | @@ -742,9 +745,10 @@ irqreturn_t tmio_mmc_irq(int irq, void *devid) |
645 | if (__tmio_mmc_sdcard_irq(host, ireg, status)) |
646 | return IRQ_HANDLED; |
647 | |
648 | - __tmio_mmc_sdio_irq(host); |
649 | + if (__tmio_mmc_sdio_irq(host)) |
650 | + return IRQ_HANDLED; |
651 | |
652 | - return IRQ_HANDLED; |
653 | + return IRQ_NONE; |
654 | } |
655 | EXPORT_SYMBOL_GPL(tmio_mmc_irq); |
656 | |
657 | @@ -774,7 +778,10 @@ static int tmio_mmc_start_data(struct tmio_mmc_host *host, |
658 | |
659 | /* Set transfer length / blocksize */ |
660 | sd_ctrl_write16(host, CTL_SD_XFER_LEN, data->blksz); |
661 | - sd_ctrl_write16(host, CTL_XFER_BLK_COUNT, data->blocks); |
662 | + if (host->mmc->max_blk_count >= SZ_64K) |
663 | + sd_ctrl_write32(host, CTL_XFER_BLK_COUNT, data->blocks); |
664 | + else |
665 | + sd_ctrl_write16(host, CTL_XFER_BLK_COUNT, data->blocks); |
666 | |
667 | tmio_mmc_start_dma(host, data); |
668 | |
669 | diff --git a/drivers/net/ethernet/altera/altera_tse_main.c b/drivers/net/ethernet/altera/altera_tse_main.c |
670 | index baca8f704a459..c3c1195021a2b 100644 |
671 | --- a/drivers/net/ethernet/altera/altera_tse_main.c |
672 | +++ b/drivers/net/ethernet/altera/altera_tse_main.c |
673 | @@ -714,8 +714,10 @@ static struct phy_device *connect_local_phy(struct net_device *dev) |
674 | |
675 | phydev = phy_connect(dev, phy_id_fmt, &altera_tse_adjust_link, |
676 | priv->phy_iface); |
677 | - if (IS_ERR(phydev)) |
678 | + if (IS_ERR(phydev)) { |
679 | netdev_err(dev, "Could not attach to PHY\n"); |
680 | + phydev = NULL; |
681 | + } |
682 | |
683 | } else { |
684 | int ret; |
685 | diff --git a/drivers/net/ethernet/ibm/ibmveth.c b/drivers/net/ethernet/ibm/ibmveth.c |
686 | index 91f48c0780734..f70cb4d3c6846 100644 |
687 | --- a/drivers/net/ethernet/ibm/ibmveth.c |
688 | +++ b/drivers/net/ethernet/ibm/ibmveth.c |
689 | @@ -1314,7 +1314,6 @@ static int ibmveth_poll(struct napi_struct *napi, int budget) |
690 | unsigned long lpar_rc; |
691 | u16 mss = 0; |
692 | |
693 | -restart_poll: |
694 | while (frames_processed < budget) { |
695 | if (!ibmveth_rxq_pending_buffer(adapter)) |
696 | break; |
697 | @@ -1402,7 +1401,6 @@ restart_poll: |
698 | napi_reschedule(napi)) { |
699 | lpar_rc = h_vio_signal(adapter->vdev->unit_address, |
700 | VIO_IRQ_DISABLE); |
701 | - goto restart_poll; |
702 | } |
703 | } |
704 | |
705 | diff --git a/drivers/net/hyperv/hyperv_net.h b/drivers/net/hyperv/hyperv_net.h |
706 | index a32ded5b4f416..42d284669b03a 100644 |
707 | --- a/drivers/net/hyperv/hyperv_net.h |
708 | +++ b/drivers/net/hyperv/hyperv_net.h |
709 | @@ -144,6 +144,8 @@ struct hv_netvsc_packet { |
710 | u32 total_data_buflen; |
711 | }; |
712 | |
713 | +#define NETVSC_HASH_KEYLEN 40 |
714 | + |
715 | struct netvsc_device_info { |
716 | unsigned char mac_adr[ETH_ALEN]; |
717 | u32 num_chn; |
718 | @@ -151,6 +153,8 @@ struct netvsc_device_info { |
719 | u32 recv_sections; |
720 | u32 send_section_size; |
721 | u32 recv_section_size; |
722 | + |
723 | + u8 rss_key[NETVSC_HASH_KEYLEN]; |
724 | }; |
725 | |
726 | enum rndis_device_state { |
727 | @@ -160,8 +164,6 @@ enum rndis_device_state { |
728 | RNDIS_DEV_DATAINITIALIZED, |
729 | }; |
730 | |
731 | -#define NETVSC_HASH_KEYLEN 40 |
732 | - |
733 | struct rndis_device { |
734 | struct net_device *ndev; |
735 | |
736 | @@ -210,7 +212,9 @@ int netvsc_recv_callback(struct net_device *net, |
737 | void netvsc_channel_cb(void *context); |
738 | int netvsc_poll(struct napi_struct *napi, int budget); |
739 | |
740 | -int rndis_set_subchannel(struct net_device *ndev, struct netvsc_device *nvdev); |
741 | +int rndis_set_subchannel(struct net_device *ndev, |
742 | + struct netvsc_device *nvdev, |
743 | + struct netvsc_device_info *dev_info); |
744 | int rndis_filter_open(struct netvsc_device *nvdev); |
745 | int rndis_filter_close(struct netvsc_device *nvdev); |
746 | struct netvsc_device *rndis_filter_device_add(struct hv_device *dev, |
747 | diff --git a/drivers/net/hyperv/netvsc.c b/drivers/net/hyperv/netvsc.c |
748 | index fe01e141c8f87..1a942feab9548 100644 |
749 | --- a/drivers/net/hyperv/netvsc.c |
750 | +++ b/drivers/net/hyperv/netvsc.c |
751 | @@ -84,7 +84,7 @@ static void netvsc_subchan_work(struct work_struct *w) |
752 | |
753 | rdev = nvdev->extension; |
754 | if (rdev) { |
755 | - ret = rndis_set_subchannel(rdev->ndev, nvdev); |
756 | + ret = rndis_set_subchannel(rdev->ndev, nvdev, NULL); |
757 | if (ret == 0) { |
758 | netif_device_attach(rdev->ndev); |
759 | } else { |
760 | diff --git a/drivers/net/hyperv/netvsc_drv.c b/drivers/net/hyperv/netvsc_drv.c |
761 | index 1c37a821895b7..c9e2a986ccb72 100644 |
762 | --- a/drivers/net/hyperv/netvsc_drv.c |
763 | +++ b/drivers/net/hyperv/netvsc_drv.c |
764 | @@ -856,6 +856,39 @@ static void netvsc_get_channels(struct net_device *net, |
765 | } |
766 | } |
767 | |
768 | +/* Alloc struct netvsc_device_info, and initialize it from either existing |
769 | + * struct netvsc_device, or from default values. |
770 | + */ |
771 | +static struct netvsc_device_info *netvsc_devinfo_get |
772 | + (struct netvsc_device *nvdev) |
773 | +{ |
774 | + struct netvsc_device_info *dev_info; |
775 | + |
776 | + dev_info = kzalloc(sizeof(*dev_info), GFP_ATOMIC); |
777 | + |
778 | + if (!dev_info) |
779 | + return NULL; |
780 | + |
781 | + if (nvdev) { |
782 | + dev_info->num_chn = nvdev->num_chn; |
783 | + dev_info->send_sections = nvdev->send_section_cnt; |
784 | + dev_info->send_section_size = nvdev->send_section_size; |
785 | + dev_info->recv_sections = nvdev->recv_section_cnt; |
786 | + dev_info->recv_section_size = nvdev->recv_section_size; |
787 | + |
788 | + memcpy(dev_info->rss_key, nvdev->extension->rss_key, |
789 | + NETVSC_HASH_KEYLEN); |
790 | + } else { |
791 | + dev_info->num_chn = VRSS_CHANNEL_DEFAULT; |
792 | + dev_info->send_sections = NETVSC_DEFAULT_TX; |
793 | + dev_info->send_section_size = NETVSC_SEND_SECTION_SIZE; |
794 | + dev_info->recv_sections = NETVSC_DEFAULT_RX; |
795 | + dev_info->recv_section_size = NETVSC_RECV_SECTION_SIZE; |
796 | + } |
797 | + |
798 | + return dev_info; |
799 | +} |
800 | + |
801 | static int netvsc_detach(struct net_device *ndev, |
802 | struct netvsc_device *nvdev) |
803 | { |
804 | @@ -907,7 +940,7 @@ static int netvsc_attach(struct net_device *ndev, |
805 | return PTR_ERR(nvdev); |
806 | |
807 | if (nvdev->num_chn > 1) { |
808 | - ret = rndis_set_subchannel(ndev, nvdev); |
809 | + ret = rndis_set_subchannel(ndev, nvdev, dev_info); |
810 | |
811 | /* if unavailable, just proceed with one queue */ |
812 | if (ret) { |
813 | @@ -941,7 +974,7 @@ static int netvsc_set_channels(struct net_device *net, |
814 | struct net_device_context *net_device_ctx = netdev_priv(net); |
815 | struct netvsc_device *nvdev = rtnl_dereference(net_device_ctx->nvdev); |
816 | unsigned int orig, count = channels->combined_count; |
817 | - struct netvsc_device_info device_info; |
818 | + struct netvsc_device_info *device_info; |
819 | int ret; |
820 | |
821 | /* We do not support separate count for rx, tx, or other */ |
822 | @@ -960,24 +993,26 @@ static int netvsc_set_channels(struct net_device *net, |
823 | |
824 | orig = nvdev->num_chn; |
825 | |
826 | - memset(&device_info, 0, sizeof(device_info)); |
827 | - device_info.num_chn = count; |
828 | - device_info.send_sections = nvdev->send_section_cnt; |
829 | - device_info.send_section_size = nvdev->send_section_size; |
830 | - device_info.recv_sections = nvdev->recv_section_cnt; |
831 | - device_info.recv_section_size = nvdev->recv_section_size; |
832 | + device_info = netvsc_devinfo_get(nvdev); |
833 | + |
834 | + if (!device_info) |
835 | + return -ENOMEM; |
836 | + |
837 | + device_info->num_chn = count; |
838 | |
839 | ret = netvsc_detach(net, nvdev); |
840 | if (ret) |
841 | - return ret; |
842 | + goto out; |
843 | |
844 | - ret = netvsc_attach(net, &device_info); |
845 | + ret = netvsc_attach(net, device_info); |
846 | if (ret) { |
847 | - device_info.num_chn = orig; |
848 | - if (netvsc_attach(net, &device_info)) |
849 | + device_info->num_chn = orig; |
850 | + if (netvsc_attach(net, device_info)) |
851 | netdev_err(net, "restoring channel setting failed\n"); |
852 | } |
853 | |
854 | +out: |
855 | + kfree(device_info); |
856 | return ret; |
857 | } |
858 | |
859 | @@ -1044,48 +1079,45 @@ static int netvsc_change_mtu(struct net_device *ndev, int mtu) |
860 | struct net_device *vf_netdev = rtnl_dereference(ndevctx->vf_netdev); |
861 | struct netvsc_device *nvdev = rtnl_dereference(ndevctx->nvdev); |
862 | int orig_mtu = ndev->mtu; |
863 | - struct netvsc_device_info device_info; |
864 | + struct netvsc_device_info *device_info; |
865 | int ret = 0; |
866 | |
867 | if (!nvdev || nvdev->destroy) |
868 | return -ENODEV; |
869 | |
870 | + device_info = netvsc_devinfo_get(nvdev); |
871 | + |
872 | + if (!device_info) |
873 | + return -ENOMEM; |
874 | + |
875 | /* Change MTU of underlying VF netdev first. */ |
876 | if (vf_netdev) { |
877 | ret = dev_set_mtu(vf_netdev, mtu); |
878 | if (ret) |
879 | - return ret; |
880 | + goto out; |
881 | } |
882 | |
883 | - memset(&device_info, 0, sizeof(device_info)); |
884 | - device_info.num_chn = nvdev->num_chn; |
885 | - device_info.send_sections = nvdev->send_section_cnt; |
886 | - device_info.send_section_size = nvdev->send_section_size; |
887 | - device_info.recv_sections = nvdev->recv_section_cnt; |
888 | - device_info.recv_section_size = nvdev->recv_section_size; |
889 | - |
890 | ret = netvsc_detach(ndev, nvdev); |
891 | if (ret) |
892 | goto rollback_vf; |
893 | |
894 | ndev->mtu = mtu; |
895 | |
896 | - ret = netvsc_attach(ndev, &device_info); |
897 | - if (ret) |
898 | - goto rollback; |
899 | - |
900 | - return 0; |
901 | + ret = netvsc_attach(ndev, device_info); |
902 | + if (!ret) |
903 | + goto out; |
904 | |
905 | -rollback: |
906 | /* Attempt rollback to original MTU */ |
907 | ndev->mtu = orig_mtu; |
908 | |
909 | - if (netvsc_attach(ndev, &device_info)) |
910 | + if (netvsc_attach(ndev, device_info)) |
911 | netdev_err(ndev, "restoring mtu failed\n"); |
912 | rollback_vf: |
913 | if (vf_netdev) |
914 | dev_set_mtu(vf_netdev, orig_mtu); |
915 | |
916 | +out: |
917 | + kfree(device_info); |
918 | return ret; |
919 | } |
920 | |
921 | @@ -1690,7 +1722,7 @@ static int netvsc_set_ringparam(struct net_device *ndev, |
922 | { |
923 | struct net_device_context *ndevctx = netdev_priv(ndev); |
924 | struct netvsc_device *nvdev = rtnl_dereference(ndevctx->nvdev); |
925 | - struct netvsc_device_info device_info; |
926 | + struct netvsc_device_info *device_info; |
927 | struct ethtool_ringparam orig; |
928 | u32 new_tx, new_rx; |
929 | int ret = 0; |
930 | @@ -1710,26 +1742,29 @@ static int netvsc_set_ringparam(struct net_device *ndev, |
931 | new_rx == orig.rx_pending) |
932 | return 0; /* no change */ |
933 | |
934 | - memset(&device_info, 0, sizeof(device_info)); |
935 | - device_info.num_chn = nvdev->num_chn; |
936 | - device_info.send_sections = new_tx; |
937 | - device_info.send_section_size = nvdev->send_section_size; |
938 | - device_info.recv_sections = new_rx; |
939 | - device_info.recv_section_size = nvdev->recv_section_size; |
940 | + device_info = netvsc_devinfo_get(nvdev); |
941 | + |
942 | + if (!device_info) |
943 | + return -ENOMEM; |
944 | + |
945 | + device_info->send_sections = new_tx; |
946 | + device_info->recv_sections = new_rx; |
947 | |
948 | ret = netvsc_detach(ndev, nvdev); |
949 | if (ret) |
950 | - return ret; |
951 | + goto out; |
952 | |
953 | - ret = netvsc_attach(ndev, &device_info); |
954 | + ret = netvsc_attach(ndev, device_info); |
955 | if (ret) { |
956 | - device_info.send_sections = orig.tx_pending; |
957 | - device_info.recv_sections = orig.rx_pending; |
958 | + device_info->send_sections = orig.tx_pending; |
959 | + device_info->recv_sections = orig.rx_pending; |
960 | |
961 | - if (netvsc_attach(ndev, &device_info)) |
962 | + if (netvsc_attach(ndev, device_info)) |
963 | netdev_err(ndev, "restoring ringparam failed"); |
964 | } |
965 | |
966 | +out: |
967 | + kfree(device_info); |
968 | return ret; |
969 | } |
970 | |
971 | @@ -2158,7 +2193,7 @@ static int netvsc_probe(struct hv_device *dev, |
972 | { |
973 | struct net_device *net = NULL; |
974 | struct net_device_context *net_device_ctx; |
975 | - struct netvsc_device_info device_info; |
976 | + struct netvsc_device_info *device_info = NULL; |
977 | struct netvsc_device *nvdev; |
978 | int ret = -ENOMEM; |
979 | |
980 | @@ -2205,21 +2240,21 @@ static int netvsc_probe(struct hv_device *dev, |
981 | netif_set_real_num_rx_queues(net, 1); |
982 | |
983 | /* Notify the netvsc driver of the new device */ |
984 | - memset(&device_info, 0, sizeof(device_info)); |
985 | - device_info.num_chn = VRSS_CHANNEL_DEFAULT; |
986 | - device_info.send_sections = NETVSC_DEFAULT_TX; |
987 | - device_info.send_section_size = NETVSC_SEND_SECTION_SIZE; |
988 | - device_info.recv_sections = NETVSC_DEFAULT_RX; |
989 | - device_info.recv_section_size = NETVSC_RECV_SECTION_SIZE; |
990 | - |
991 | - nvdev = rndis_filter_device_add(dev, &device_info); |
992 | + device_info = netvsc_devinfo_get(NULL); |
993 | + |
994 | + if (!device_info) { |
995 | + ret = -ENOMEM; |
996 | + goto devinfo_failed; |
997 | + } |
998 | + |
999 | + nvdev = rndis_filter_device_add(dev, device_info); |
1000 | if (IS_ERR(nvdev)) { |
1001 | ret = PTR_ERR(nvdev); |
1002 | netdev_err(net, "unable to add netvsc device (ret %d)\n", ret); |
1003 | goto rndis_failed; |
1004 | } |
1005 | |
1006 | - memcpy(net->dev_addr, device_info.mac_adr, ETH_ALEN); |
1007 | + memcpy(net->dev_addr, device_info->mac_adr, ETH_ALEN); |
1008 | |
1009 | /* We must get rtnl lock before scheduling nvdev->subchan_work, |
1010 | * otherwise netvsc_subchan_work() can get rtnl lock first and wait |
1011 | @@ -2257,12 +2292,16 @@ static int netvsc_probe(struct hv_device *dev, |
1012 | |
1013 | list_add(&net_device_ctx->list, &netvsc_dev_list); |
1014 | rtnl_unlock(); |
1015 | + |
1016 | + kfree(device_info); |
1017 | return 0; |
1018 | |
1019 | register_failed: |
1020 | rtnl_unlock(); |
1021 | rndis_filter_device_remove(dev, nvdev); |
1022 | rndis_failed: |
1023 | + kfree(device_info); |
1024 | +devinfo_failed: |
1025 | free_percpu(net_device_ctx->vf_stats); |
1026 | no_stats: |
1027 | hv_set_drvdata(dev, NULL); |
1028 | diff --git a/drivers/net/hyperv/rndis_filter.c b/drivers/net/hyperv/rndis_filter.c |
1029 | index 2a5209f23f296..53c6039bffb67 100644 |
1030 | --- a/drivers/net/hyperv/rndis_filter.c |
1031 | +++ b/drivers/net/hyperv/rndis_filter.c |
1032 | @@ -715,8 +715,8 @@ cleanup: |
1033 | return ret; |
1034 | } |
1035 | |
1036 | -int rndis_filter_set_rss_param(struct rndis_device *rdev, |
1037 | - const u8 *rss_key) |
1038 | +static int rndis_set_rss_param_msg(struct rndis_device *rdev, |
1039 | + const u8 *rss_key, u16 flag) |
1040 | { |
1041 | struct net_device *ndev = rdev->ndev; |
1042 | struct rndis_request *request; |
1043 | @@ -745,7 +745,7 @@ int rndis_filter_set_rss_param(struct rndis_device *rdev, |
1044 | rssp->hdr.type = NDIS_OBJECT_TYPE_RSS_PARAMETERS; |
1045 | rssp->hdr.rev = NDIS_RECEIVE_SCALE_PARAMETERS_REVISION_2; |
1046 | rssp->hdr.size = sizeof(struct ndis_recv_scale_param); |
1047 | - rssp->flag = 0; |
1048 | + rssp->flag = flag; |
1049 | rssp->hashinfo = NDIS_HASH_FUNC_TOEPLITZ | NDIS_HASH_IPV4 | |
1050 | NDIS_HASH_TCP_IPV4 | NDIS_HASH_IPV6 | |
1051 | NDIS_HASH_TCP_IPV6; |
1052 | @@ -770,9 +770,12 @@ int rndis_filter_set_rss_param(struct rndis_device *rdev, |
1053 | |
1054 | wait_for_completion(&request->wait_event); |
1055 | set_complete = &request->response_msg.msg.set_complete; |
1056 | - if (set_complete->status == RNDIS_STATUS_SUCCESS) |
1057 | - memcpy(rdev->rss_key, rss_key, NETVSC_HASH_KEYLEN); |
1058 | - else { |
1059 | + if (set_complete->status == RNDIS_STATUS_SUCCESS) { |
1060 | + if (!(flag & NDIS_RSS_PARAM_FLAG_DISABLE_RSS) && |
1061 | + !(flag & NDIS_RSS_PARAM_FLAG_HASH_KEY_UNCHANGED)) |
1062 | + memcpy(rdev->rss_key, rss_key, NETVSC_HASH_KEYLEN); |
1063 | + |
1064 | + } else { |
1065 | netdev_err(ndev, "Fail to set RSS parameters:0x%x\n", |
1066 | set_complete->status); |
1067 | ret = -EINVAL; |
1068 | @@ -783,6 +786,16 @@ cleanup: |
1069 | return ret; |
1070 | } |
1071 | |
1072 | +int rndis_filter_set_rss_param(struct rndis_device *rdev, |
1073 | + const u8 *rss_key) |
1074 | +{ |
1075 | + /* Disable RSS before change */ |
1076 | + rndis_set_rss_param_msg(rdev, rss_key, |
1077 | + NDIS_RSS_PARAM_FLAG_DISABLE_RSS); |
1078 | + |
1079 | + return rndis_set_rss_param_msg(rdev, rss_key, 0); |
1080 | +} |
1081 | + |
1082 | static int rndis_filter_query_device_link_status(struct rndis_device *dev, |
1083 | struct netvsc_device *net_device) |
1084 | { |
1085 | @@ -1062,7 +1075,9 @@ static void netvsc_sc_open(struct vmbus_channel *new_sc) |
1086 | * This breaks overlap of processing the host message for the |
1087 | * new primary channel with the initialization of sub-channels. |
1088 | */ |
1089 | -int rndis_set_subchannel(struct net_device *ndev, struct netvsc_device *nvdev) |
1090 | +int rndis_set_subchannel(struct net_device *ndev, |
1091 | + struct netvsc_device *nvdev, |
1092 | + struct netvsc_device_info *dev_info) |
1093 | { |
1094 | struct nvsp_message *init_packet = &nvdev->channel_init_pkt; |
1095 | struct net_device_context *ndev_ctx = netdev_priv(ndev); |
1096 | @@ -1103,7 +1118,10 @@ int rndis_set_subchannel(struct net_device *ndev, struct netvsc_device *nvdev) |
1097 | atomic_read(&nvdev->open_chn) == nvdev->num_chn); |
1098 | |
1099 | /* ignore failues from setting rss parameters, still have channels */ |
1100 | - rndis_filter_set_rss_param(rdev, netvsc_hash_key); |
1101 | + if (dev_info) |
1102 | + rndis_filter_set_rss_param(rdev, dev_info->rss_key); |
1103 | + else |
1104 | + rndis_filter_set_rss_param(rdev, netvsc_hash_key); |
1105 | |
1106 | netif_set_real_num_tx_queues(ndev, nvdev->num_chn); |
1107 | netif_set_real_num_rx_queues(ndev, nvdev->num_chn); |
1108 | diff --git a/drivers/net/usb/asix_devices.c b/drivers/net/usb/asix_devices.c |
1109 | index b654f05b2ccd0..3d93993e74da0 100644 |
1110 | --- a/drivers/net/usb/asix_devices.c |
1111 | +++ b/drivers/net/usb/asix_devices.c |
1112 | @@ -739,8 +739,13 @@ static int ax88772_bind(struct usbnet *dev, struct usb_interface *intf) |
1113 | asix_read_cmd(dev, AX_CMD_STATMNGSTS_REG, 0, 0, 1, &chipcode, 0); |
1114 | chipcode &= AX_CHIPCODE_MASK; |
1115 | |
1116 | - (chipcode == AX_AX88772_CHIPCODE) ? ax88772_hw_reset(dev, 0) : |
1117 | - ax88772a_hw_reset(dev, 0); |
1118 | + ret = (chipcode == AX_AX88772_CHIPCODE) ? ax88772_hw_reset(dev, 0) : |
1119 | + ax88772a_hw_reset(dev, 0); |
1120 | + |
1121 | + if (ret < 0) { |
1122 | + netdev_dbg(dev->net, "Failed to reset AX88772: %d\n", ret); |
1123 | + return ret; |
1124 | + } |
1125 | |
1126 | /* Read PHYID register *AFTER* the PHY was reset properly */ |
1127 | phyid = asix_get_phyid(dev); |
1128 | diff --git a/drivers/nvme/host/multipath.c b/drivers/nvme/host/multipath.c |
1129 | index 815509dbed846..da8f5ad30c719 100644 |
1130 | --- a/drivers/nvme/host/multipath.c |
1131 | +++ b/drivers/nvme/host/multipath.c |
1132 | @@ -531,8 +531,7 @@ int nvme_mpath_init(struct nvme_ctrl *ctrl, struct nvme_id_ctrl *id) |
1133 | timer_setup(&ctrl->anatt_timer, nvme_anatt_timeout, 0); |
1134 | ctrl->ana_log_size = sizeof(struct nvme_ana_rsp_hdr) + |
1135 | ctrl->nanagrpid * sizeof(struct nvme_ana_group_desc); |
1136 | - if (!(ctrl->anacap & (1 << 6))) |
1137 | - ctrl->ana_log_size += ctrl->max_namespaces * sizeof(__le32); |
1138 | + ctrl->ana_log_size += ctrl->max_namespaces * sizeof(__le32); |
1139 | |
1140 | if (ctrl->ana_log_size > ctrl->max_hw_sectors << SECTOR_SHIFT) { |
1141 | dev_err(ctrl->device, |
1142 | diff --git a/drivers/nvme/host/rdma.c b/drivers/nvme/host/rdma.c |
1143 | index b6a28de682e85..0939a4e178fb9 100644 |
1144 | --- a/drivers/nvme/host/rdma.c |
1145 | +++ b/drivers/nvme/host/rdma.c |
1146 | @@ -1672,18 +1672,28 @@ static enum blk_eh_timer_return |
1147 | nvme_rdma_timeout(struct request *rq, bool reserved) |
1148 | { |
1149 | struct nvme_rdma_request *req = blk_mq_rq_to_pdu(rq); |
1150 | + struct nvme_rdma_queue *queue = req->queue; |
1151 | + struct nvme_rdma_ctrl *ctrl = queue->ctrl; |
1152 | |
1153 | - dev_warn(req->queue->ctrl->ctrl.device, |
1154 | - "I/O %d QID %d timeout, reset controller\n", |
1155 | - rq->tag, nvme_rdma_queue_idx(req->queue)); |
1156 | + dev_warn(ctrl->ctrl.device, "I/O %d QID %d timeout\n", |
1157 | + rq->tag, nvme_rdma_queue_idx(queue)); |
1158 | |
1159 | - /* queue error recovery */ |
1160 | - nvme_rdma_error_recovery(req->queue->ctrl); |
1161 | + if (ctrl->ctrl.state != NVME_CTRL_LIVE) { |
1162 | + /* |
1163 | + * Teardown immediately if controller times out while starting |
1164 | + * or we are already started error recovery. all outstanding |
1165 | + * requests are completed on shutdown, so we return BLK_EH_DONE. |
1166 | + */ |
1167 | + flush_work(&ctrl->err_work); |
1168 | + nvme_rdma_teardown_io_queues(ctrl, false); |
1169 | + nvme_rdma_teardown_admin_queue(ctrl, false); |
1170 | + return BLK_EH_DONE; |
1171 | + } |
1172 | |
1173 | - /* fail with DNR on cmd timeout */ |
1174 | - nvme_req(rq)->status = NVME_SC_ABORT_REQ | NVME_SC_DNR; |
1175 | + dev_warn(ctrl->ctrl.device, "starting error recovery\n"); |
1176 | + nvme_rdma_error_recovery(ctrl); |
1177 | |
1178 | - return BLK_EH_DONE; |
1179 | + return BLK_EH_RESET_TIMER; |
1180 | } |
1181 | |
1182 | static blk_status_t nvme_rdma_queue_rq(struct blk_mq_hw_ctx *hctx, |
1183 | diff --git a/drivers/phy/qualcomm/phy-ath79-usb.c b/drivers/phy/qualcomm/phy-ath79-usb.c |
1184 | index 6fd6e07ab345f..09a77e556eceb 100644 |
1185 | --- a/drivers/phy/qualcomm/phy-ath79-usb.c |
1186 | +++ b/drivers/phy/qualcomm/phy-ath79-usb.c |
1187 | @@ -31,7 +31,7 @@ static int ath79_usb_phy_power_on(struct phy *phy) |
1188 | |
1189 | err = reset_control_deassert(priv->reset); |
1190 | if (err && priv->no_suspend_override) |
1191 | - reset_control_assert(priv->no_suspend_override); |
1192 | + reset_control_deassert(priv->no_suspend_override); |
1193 | |
1194 | return err; |
1195 | } |
1196 | @@ -69,7 +69,7 @@ static int ath79_usb_phy_probe(struct platform_device *pdev) |
1197 | if (!priv) |
1198 | return -ENOMEM; |
1199 | |
1200 | - priv->reset = devm_reset_control_get(&pdev->dev, "usb-phy"); |
1201 | + priv->reset = devm_reset_control_get(&pdev->dev, "phy"); |
1202 | if (IS_ERR(priv->reset)) |
1203 | return PTR_ERR(priv->reset); |
1204 | |
1205 | diff --git a/drivers/scsi/csiostor/csio_attr.c b/drivers/scsi/csiostor/csio_attr.c |
1206 | index 8a004036e3d72..9bd2bd8dc2be2 100644 |
1207 | --- a/drivers/scsi/csiostor/csio_attr.c |
1208 | +++ b/drivers/scsi/csiostor/csio_attr.c |
1209 | @@ -594,12 +594,12 @@ csio_vport_create(struct fc_vport *fc_vport, bool disable) |
1210 | } |
1211 | |
1212 | fc_vport_set_state(fc_vport, FC_VPORT_INITIALIZING); |
1213 | + ln->fc_vport = fc_vport; |
1214 | |
1215 | if (csio_fcoe_alloc_vnp(hw, ln)) |
1216 | goto error; |
1217 | |
1218 | *(struct csio_lnode **)fc_vport->dd_data = ln; |
1219 | - ln->fc_vport = fc_vport; |
1220 | if (!fc_vport->node_name) |
1221 | fc_vport->node_name = wwn_to_u64(csio_ln_wwnn(ln)); |
1222 | if (!fc_vport->port_name) |
1223 | diff --git a/drivers/scsi/libsas/sas_expander.c b/drivers/scsi/libsas/sas_expander.c |
1224 | index fadc99cb60df9..a1551ab336165 100644 |
1225 | --- a/drivers/scsi/libsas/sas_expander.c |
1226 | +++ b/drivers/scsi/libsas/sas_expander.c |
1227 | @@ -829,6 +829,7 @@ static struct domain_device *sas_ex_discover_end_dev( |
1228 | rphy = sas_end_device_alloc(phy->port); |
1229 | if (!rphy) |
1230 | goto out_free; |
1231 | + rphy->identify.phy_identifier = phy_id; |
1232 | |
1233 | child->rphy = rphy; |
1234 | get_device(&rphy->dev); |
1235 | @@ -856,6 +857,7 @@ static struct domain_device *sas_ex_discover_end_dev( |
1236 | |
1237 | child->rphy = rphy; |
1238 | get_device(&rphy->dev); |
1239 | + rphy->identify.phy_identifier = phy_id; |
1240 | sas_fill_in_rphy(child, rphy); |
1241 | |
1242 | list_add_tail(&child->disco_list_node, &parent->port->disco_list); |
1243 | diff --git a/drivers/scsi/lpfc/lpfc_nvme.c b/drivers/scsi/lpfc/lpfc_nvme.c |
1244 | index 918ae18ef8a82..ca62117a2d131 100644 |
1245 | --- a/drivers/scsi/lpfc/lpfc_nvme.c |
1246 | +++ b/drivers/scsi/lpfc/lpfc_nvme.c |
1247 | @@ -297,7 +297,8 @@ lpfc_nvme_localport_delete(struct nvme_fc_local_port *localport) |
1248 | lport); |
1249 | |
1250 | /* release any threads waiting for the unreg to complete */ |
1251 | - complete(&lport->lport_unreg_done); |
1252 | + if (lport->vport->localport) |
1253 | + complete(lport->lport_unreg_cmp); |
1254 | } |
1255 | |
1256 | /* lpfc_nvme_remoteport_delete |
1257 | @@ -2556,7 +2557,8 @@ lpfc_nvme_create_localport(struct lpfc_vport *vport) |
1258 | */ |
1259 | void |
1260 | lpfc_nvme_lport_unreg_wait(struct lpfc_vport *vport, |
1261 | - struct lpfc_nvme_lport *lport) |
1262 | + struct lpfc_nvme_lport *lport, |
1263 | + struct completion *lport_unreg_cmp) |
1264 | { |
1265 | #if (IS_ENABLED(CONFIG_NVME_FC)) |
1266 | u32 wait_tmo; |
1267 | @@ -2568,8 +2570,7 @@ lpfc_nvme_lport_unreg_wait(struct lpfc_vport *vport, |
1268 | */ |
1269 | wait_tmo = msecs_to_jiffies(LPFC_NVME_WAIT_TMO * 1000); |
1270 | while (true) { |
1271 | - ret = wait_for_completion_timeout(&lport->lport_unreg_done, |
1272 | - wait_tmo); |
1273 | + ret = wait_for_completion_timeout(lport_unreg_cmp, wait_tmo); |
1274 | if (unlikely(!ret)) { |
1275 | lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME_IOERR, |
1276 | "6176 Lport %p Localport %p wait " |
1277 | @@ -2603,12 +2604,12 @@ lpfc_nvme_destroy_localport(struct lpfc_vport *vport) |
1278 | struct lpfc_nvme_lport *lport; |
1279 | struct lpfc_nvme_ctrl_stat *cstat; |
1280 | int ret; |
1281 | + DECLARE_COMPLETION_ONSTACK(lport_unreg_cmp); |
1282 | |
1283 | if (vport->nvmei_support == 0) |
1284 | return; |
1285 | |
1286 | localport = vport->localport; |
1287 | - vport->localport = NULL; |
1288 | lport = (struct lpfc_nvme_lport *)localport->private; |
1289 | cstat = lport->cstat; |
1290 | |
1291 | @@ -2619,13 +2620,14 @@ lpfc_nvme_destroy_localport(struct lpfc_vport *vport) |
1292 | /* lport's rport list is clear. Unregister |
1293 | * lport and release resources. |
1294 | */ |
1295 | - init_completion(&lport->lport_unreg_done); |
1296 | + lport->lport_unreg_cmp = &lport_unreg_cmp; |
1297 | ret = nvme_fc_unregister_localport(localport); |
1298 | |
1299 | /* Wait for completion. This either blocks |
1300 | * indefinitely or succeeds |
1301 | */ |
1302 | - lpfc_nvme_lport_unreg_wait(vport, lport); |
1303 | + lpfc_nvme_lport_unreg_wait(vport, lport, &lport_unreg_cmp); |
1304 | + vport->localport = NULL; |
1305 | kfree(cstat); |
1306 | |
1307 | /* Regardless of the unregister upcall response, clear |
1308 | diff --git a/drivers/scsi/lpfc/lpfc_nvme.h b/drivers/scsi/lpfc/lpfc_nvme.h |
1309 | index cfd4719be25c3..b234d02989942 100644 |
1310 | --- a/drivers/scsi/lpfc/lpfc_nvme.h |
1311 | +++ b/drivers/scsi/lpfc/lpfc_nvme.h |
1312 | @@ -50,7 +50,7 @@ struct lpfc_nvme_ctrl_stat { |
1313 | /* Declare nvme-based local and remote port definitions. */ |
1314 | struct lpfc_nvme_lport { |
1315 | struct lpfc_vport *vport; |
1316 | - struct completion lport_unreg_done; |
1317 | + struct completion *lport_unreg_cmp; |
1318 | /* Add stats counters here */ |
1319 | struct lpfc_nvme_ctrl_stat *cstat; |
1320 | atomic_t fc4NvmeLsRequests; |
1321 | diff --git a/drivers/scsi/lpfc/lpfc_nvmet.c b/drivers/scsi/lpfc/lpfc_nvmet.c |
1322 | index b766afe10d3d7..e2575c8ec93e8 100644 |
1323 | --- a/drivers/scsi/lpfc/lpfc_nvmet.c |
1324 | +++ b/drivers/scsi/lpfc/lpfc_nvmet.c |
1325 | @@ -1003,7 +1003,8 @@ lpfc_nvmet_targetport_delete(struct nvmet_fc_target_port *targetport) |
1326 | struct lpfc_nvmet_tgtport *tport = targetport->private; |
1327 | |
1328 | /* release any threads waiting for the unreg to complete */ |
1329 | - complete(&tport->tport_unreg_done); |
1330 | + if (tport->phba->targetport) |
1331 | + complete(tport->tport_unreg_cmp); |
1332 | } |
1333 | |
1334 | static void |
1335 | @@ -1700,6 +1701,7 @@ lpfc_nvmet_destroy_targetport(struct lpfc_hba *phba) |
1336 | struct lpfc_nvmet_tgtport *tgtp; |
1337 | struct lpfc_queue *wq; |
1338 | uint32_t qidx; |
1339 | + DECLARE_COMPLETION_ONSTACK(tport_unreg_cmp); |
1340 | |
1341 | if (phba->nvmet_support == 0) |
1342 | return; |
1343 | @@ -1709,9 +1711,9 @@ lpfc_nvmet_destroy_targetport(struct lpfc_hba *phba) |
1344 | wq = phba->sli4_hba.nvme_wq[qidx]; |
1345 | lpfc_nvmet_wqfull_flush(phba, wq, NULL); |
1346 | } |
1347 | - init_completion(&tgtp->tport_unreg_done); |
1348 | + tgtp->tport_unreg_cmp = &tport_unreg_cmp; |
1349 | nvmet_fc_unregister_targetport(phba->targetport); |
1350 | - wait_for_completion_timeout(&tgtp->tport_unreg_done, 5); |
1351 | + wait_for_completion_timeout(&tport_unreg_cmp, 5); |
1352 | lpfc_nvmet_cleanup_io_context(phba); |
1353 | } |
1354 | phba->targetport = NULL; |
1355 | diff --git a/drivers/scsi/lpfc/lpfc_nvmet.h b/drivers/scsi/lpfc/lpfc_nvmet.h |
1356 | index 1aaff63f1f419..0ec1082ce7ef6 100644 |
1357 | --- a/drivers/scsi/lpfc/lpfc_nvmet.h |
1358 | +++ b/drivers/scsi/lpfc/lpfc_nvmet.h |
1359 | @@ -34,7 +34,7 @@ |
1360 | /* Used for NVME Target */ |
1361 | struct lpfc_nvmet_tgtport { |
1362 | struct lpfc_hba *phba; |
1363 | - struct completion tport_unreg_done; |
1364 | + struct completion *tport_unreg_cmp; |
1365 | |
1366 | /* Stats counters - lpfc_nvmet_unsol_ls_buffer */ |
1367 | atomic_t rcv_ls_req_in; |
1368 | diff --git a/drivers/staging/android/ion/ion.c b/drivers/staging/android/ion/ion.c |
1369 | index 99073325b0c00..45c7f829e3872 100644 |
1370 | --- a/drivers/staging/android/ion/ion.c |
1371 | +++ b/drivers/staging/android/ion/ion.c |
1372 | @@ -237,10 +237,10 @@ static void ion_dma_buf_detatch(struct dma_buf *dmabuf, |
1373 | struct ion_dma_buf_attachment *a = attachment->priv; |
1374 | struct ion_buffer *buffer = dmabuf->priv; |
1375 | |
1376 | - free_duped_table(a->table); |
1377 | mutex_lock(&buffer->lock); |
1378 | list_del(&a->list); |
1379 | mutex_unlock(&buffer->lock); |
1380 | + free_duped_table(a->table); |
1381 | |
1382 | kfree(a); |
1383 | } |
1384 | diff --git a/drivers/staging/rtl8723bs/include/ieee80211.h b/drivers/staging/rtl8723bs/include/ieee80211.h |
1385 | index bcc8dfa8e6728..9efb4dcb9d3a8 100644 |
1386 | --- a/drivers/staging/rtl8723bs/include/ieee80211.h |
1387 | +++ b/drivers/staging/rtl8723bs/include/ieee80211.h |
1388 | @@ -850,18 +850,18 @@ enum ieee80211_state { |
1389 | #define IP_FMT "%pI4" |
1390 | #define IP_ARG(x) (x) |
1391 | |
1392 | -extern __inline int is_multicast_mac_addr(const u8 *addr) |
1393 | +static inline int is_multicast_mac_addr(const u8 *addr) |
1394 | { |
1395 | return ((addr[0] != 0xff) && (0x01 & addr[0])); |
1396 | } |
1397 | |
1398 | -extern __inline int is_broadcast_mac_addr(const u8 *addr) |
1399 | +static inline int is_broadcast_mac_addr(const u8 *addr) |
1400 | { |
1401 | return ((addr[0] == 0xff) && (addr[1] == 0xff) && (addr[2] == 0xff) && \ |
1402 | (addr[3] == 0xff) && (addr[4] == 0xff) && (addr[5] == 0xff)); |
1403 | } |
1404 | |
1405 | -extern __inline int is_zero_mac_addr(const u8 *addr) |
1406 | +static inline int is_zero_mac_addr(const u8 *addr) |
1407 | { |
1408 | return ((addr[0] == 0x00) && (addr[1] == 0x00) && (addr[2] == 0x00) && \ |
1409 | (addr[3] == 0x00) && (addr[4] == 0x00) && (addr[5] == 0x00)); |
1410 | diff --git a/drivers/thermal/int340x_thermal/processor_thermal_device.c b/drivers/thermal/int340x_thermal/processor_thermal_device.c |
1411 | index 284cf2c5a8fd9..8e1cf4d789be1 100644 |
1412 | --- a/drivers/thermal/int340x_thermal/processor_thermal_device.c |
1413 | +++ b/drivers/thermal/int340x_thermal/processor_thermal_device.c |
1414 | @@ -84,7 +84,12 @@ static ssize_t power_limit_##index##_##suffix##_show(struct device *dev, \ |
1415 | struct pci_dev *pci_dev; \ |
1416 | struct platform_device *pdev; \ |
1417 | struct proc_thermal_device *proc_dev; \ |
1418 | -\ |
1419 | + \ |
1420 | + if (proc_thermal_emum_mode == PROC_THERMAL_NONE) { \ |
1421 | + dev_warn(dev, "Attempted to get power limit before device was initialized!\n"); \ |
1422 | + return 0; \ |
1423 | + } \ |
1424 | + \ |
1425 | if (proc_thermal_emum_mode == PROC_THERMAL_PLATFORM_DEV) { \ |
1426 | pdev = to_platform_device(dev); \ |
1427 | proc_dev = platform_get_drvdata(pdev); \ |
1428 | @@ -298,11 +303,6 @@ static int proc_thermal_add(struct device *dev, |
1429 | *priv = proc_priv; |
1430 | |
1431 | ret = proc_thermal_read_ppcc(proc_priv); |
1432 | - if (!ret) { |
1433 | - ret = sysfs_create_group(&dev->kobj, |
1434 | - &power_limit_attribute_group); |
1435 | - |
1436 | - } |
1437 | if (ret) |
1438 | return ret; |
1439 | |
1440 | @@ -316,8 +316,7 @@ static int proc_thermal_add(struct device *dev, |
1441 | |
1442 | proc_priv->int340x_zone = int340x_thermal_zone_add(adev, ops); |
1443 | if (IS_ERR(proc_priv->int340x_zone)) { |
1444 | - ret = PTR_ERR(proc_priv->int340x_zone); |
1445 | - goto remove_group; |
1446 | + return PTR_ERR(proc_priv->int340x_zone); |
1447 | } else |
1448 | ret = 0; |
1449 | |
1450 | @@ -331,9 +330,6 @@ static int proc_thermal_add(struct device *dev, |
1451 | |
1452 | remove_zone: |
1453 | int340x_thermal_zone_remove(proc_priv->int340x_zone); |
1454 | -remove_group: |
1455 | - sysfs_remove_group(&proc_priv->dev->kobj, |
1456 | - &power_limit_attribute_group); |
1457 | |
1458 | return ret; |
1459 | } |
1460 | @@ -364,7 +360,10 @@ static int int3401_add(struct platform_device *pdev) |
1461 | platform_set_drvdata(pdev, proc_priv); |
1462 | proc_thermal_emum_mode = PROC_THERMAL_PLATFORM_DEV; |
1463 | |
1464 | - return 0; |
1465 | + dev_info(&pdev->dev, "Creating sysfs group for PROC_THERMAL_PLATFORM_DEV\n"); |
1466 | + |
1467 | + return sysfs_create_group(&pdev->dev.kobj, |
1468 | + &power_limit_attribute_group); |
1469 | } |
1470 | |
1471 | static int int3401_remove(struct platform_device *pdev) |
1472 | @@ -423,7 +422,7 @@ static int proc_thermal_pci_probe(struct pci_dev *pdev, |
1473 | proc_priv->soc_dts = intel_soc_dts_iosf_init( |
1474 | INTEL_SOC_DTS_INTERRUPT_MSI, 2, 0); |
1475 | |
1476 | - if (proc_priv->soc_dts && pdev->irq) { |
1477 | + if (!IS_ERR(proc_priv->soc_dts) && pdev->irq) { |
1478 | ret = pci_enable_msi(pdev); |
1479 | if (!ret) { |
1480 | ret = request_threaded_irq(pdev->irq, NULL, |
1481 | @@ -441,7 +440,10 @@ static int proc_thermal_pci_probe(struct pci_dev *pdev, |
1482 | dev_err(&pdev->dev, "No auxiliary DTSs enabled\n"); |
1483 | } |
1484 | |
1485 | - return 0; |
1486 | + dev_info(&pdev->dev, "Creating sysfs group for PROC_THERMAL_PCI\n"); |
1487 | + |
1488 | + return sysfs_create_group(&pdev->dev.kobj, |
1489 | + &power_limit_attribute_group); |
1490 | } |
1491 | |
1492 | static void proc_thermal_pci_remove(struct pci_dev *pdev) |
1493 | diff --git a/drivers/tty/serial/fsl_lpuart.c b/drivers/tty/serial/fsl_lpuart.c |
1494 | index 7d030c2e42ffd..50b6746a8b5d7 100644 |
1495 | --- a/drivers/tty/serial/fsl_lpuart.c |
1496 | +++ b/drivers/tty/serial/fsl_lpuart.c |
1497 | @@ -1695,7 +1695,7 @@ lpuart32_set_termios(struct uart_port *port, struct ktermios *termios, |
1498 | } |
1499 | |
1500 | /* ask the core to calculate the divisor */ |
1501 | - baud = uart_get_baud_rate(port, termios, old, 50, port->uartclk / 16); |
1502 | + baud = uart_get_baud_rate(port, termios, old, 50, port->uartclk / 4); |
1503 | |
1504 | spin_lock_irqsave(&sport->port.lock, flags); |
1505 | |
1506 | diff --git a/drivers/tty/serial/qcom_geni_serial.c b/drivers/tty/serial/qcom_geni_serial.c |
1507 | index 1515074e18fb6..35d1f6fa0e3c3 100644 |
1508 | --- a/drivers/tty/serial/qcom_geni_serial.c |
1509 | +++ b/drivers/tty/serial/qcom_geni_serial.c |
1510 | @@ -221,7 +221,7 @@ static unsigned int qcom_geni_serial_get_mctrl(struct uart_port *uport) |
1511 | unsigned int mctrl = TIOCM_DSR | TIOCM_CAR; |
1512 | u32 geni_ios; |
1513 | |
1514 | - if (uart_console(uport) || !uart_cts_enabled(uport)) { |
1515 | + if (uart_console(uport)) { |
1516 | mctrl |= TIOCM_CTS; |
1517 | } else { |
1518 | geni_ios = readl_relaxed(uport->membase + SE_GENI_IOS); |
1519 | @@ -237,7 +237,7 @@ static void qcom_geni_serial_set_mctrl(struct uart_port *uport, |
1520 | { |
1521 | u32 uart_manual_rfr = 0; |
1522 | |
1523 | - if (uart_console(uport) || !uart_cts_enabled(uport)) |
1524 | + if (uart_console(uport)) |
1525 | return; |
1526 | |
1527 | if (!(mctrl & TIOCM_RTS)) |
1528 | diff --git a/drivers/usb/dwc3/gadget.c b/drivers/usb/dwc3/gadget.c |
1529 | index 0db90f6f4aa81..700fb626ad03b 100644 |
1530 | --- a/drivers/usb/dwc3/gadget.c |
1531 | +++ b/drivers/usb/dwc3/gadget.c |
1532 | @@ -1864,6 +1864,7 @@ static int __dwc3_gadget_start(struct dwc3 *dwc) |
1533 | |
1534 | /* begin to receive SETUP packets */ |
1535 | dwc->ep0state = EP0_SETUP_PHASE; |
1536 | + dwc->link_state = DWC3_LINK_STATE_SS_DIS; |
1537 | dwc3_ep0_out_start(dwc); |
1538 | |
1539 | dwc3_gadget_enable_irq(dwc); |
1540 | @@ -3274,6 +3275,8 @@ int dwc3_gadget_suspend(struct dwc3 *dwc) |
1541 | dwc3_disconnect_gadget(dwc); |
1542 | __dwc3_gadget_stop(dwc); |
1543 | |
1544 | + synchronize_irq(dwc->irq_gadget); |
1545 | + |
1546 | return 0; |
1547 | } |
1548 | |
1549 | diff --git a/drivers/usb/gadget/function/f_sourcesink.c b/drivers/usb/gadget/function/f_sourcesink.c |
1550 | index 9cdef108fb1b3..ed68a4860b7d8 100644 |
1551 | --- a/drivers/usb/gadget/function/f_sourcesink.c |
1552 | +++ b/drivers/usb/gadget/function/f_sourcesink.c |
1553 | @@ -838,7 +838,7 @@ static struct usb_function *source_sink_alloc_func( |
1554 | |
1555 | ss = kzalloc(sizeof(*ss), GFP_KERNEL); |
1556 | if (!ss) |
1557 | - return NULL; |
1558 | + return ERR_PTR(-ENOMEM); |
1559 | |
1560 | ss_opts = container_of(fi, struct f_ss_opts, func_inst); |
1561 | |
1562 | diff --git a/fs/direct-io.c b/fs/direct-io.c |
1563 | index 1991460360930..1abb7634b2d58 100644 |
1564 | --- a/fs/direct-io.c |
1565 | +++ b/fs/direct-io.c |
1566 | @@ -679,6 +679,7 @@ static int get_more_blocks(struct dio *dio, struct dio_submit *sdio, |
1567 | unsigned long fs_count; /* Number of filesystem-sized blocks */ |
1568 | int create; |
1569 | unsigned int i_blkbits = sdio->blkbits + sdio->blkfactor; |
1570 | + loff_t i_size; |
1571 | |
1572 | /* |
1573 | * If there was a memory error and we've overwritten all the |
1574 | @@ -708,8 +709,8 @@ static int get_more_blocks(struct dio *dio, struct dio_submit *sdio, |
1575 | */ |
1576 | create = dio->op == REQ_OP_WRITE; |
1577 | if (dio->flags & DIO_SKIP_HOLES) { |
1578 | - if (fs_startblk <= ((i_size_read(dio->inode) - 1) >> |
1579 | - i_blkbits)) |
1580 | + i_size = i_size_read(dio->inode); |
1581 | + if (i_size && fs_startblk <= (i_size - 1) >> i_blkbits) |
1582 | create = 0; |
1583 | } |
1584 | |
1585 | diff --git a/fs/fs-writeback.c b/fs/fs-writeback.c |
1586 | index 471d863958bc2..82ce6d4f7e314 100644 |
1587 | --- a/fs/fs-writeback.c |
1588 | +++ b/fs/fs-writeback.c |
1589 | @@ -331,11 +331,22 @@ struct inode_switch_wbs_context { |
1590 | struct work_struct work; |
1591 | }; |
1592 | |
1593 | +static void bdi_down_write_wb_switch_rwsem(struct backing_dev_info *bdi) |
1594 | +{ |
1595 | + down_write(&bdi->wb_switch_rwsem); |
1596 | +} |
1597 | + |
1598 | +static void bdi_up_write_wb_switch_rwsem(struct backing_dev_info *bdi) |
1599 | +{ |
1600 | + up_write(&bdi->wb_switch_rwsem); |
1601 | +} |
1602 | + |
1603 | static void inode_switch_wbs_work_fn(struct work_struct *work) |
1604 | { |
1605 | struct inode_switch_wbs_context *isw = |
1606 | container_of(work, struct inode_switch_wbs_context, work); |
1607 | struct inode *inode = isw->inode; |
1608 | + struct backing_dev_info *bdi = inode_to_bdi(inode); |
1609 | struct address_space *mapping = inode->i_mapping; |
1610 | struct bdi_writeback *old_wb = inode->i_wb; |
1611 | struct bdi_writeback *new_wb = isw->new_wb; |
1612 | @@ -343,6 +354,12 @@ static void inode_switch_wbs_work_fn(struct work_struct *work) |
1613 | bool switched = false; |
1614 | void **slot; |
1615 | |
1616 | + /* |
1617 | + * If @inode switches cgwb membership while sync_inodes_sb() is |
1618 | + * being issued, sync_inodes_sb() might miss it. Synchronize. |
1619 | + */ |
1620 | + down_read(&bdi->wb_switch_rwsem); |
1621 | + |
1622 | /* |
1623 | * By the time control reaches here, RCU grace period has passed |
1624 | * since I_WB_SWITCH assertion and all wb stat update transactions |
1625 | @@ -435,6 +452,8 @@ skip_switch: |
1626 | spin_unlock(&new_wb->list_lock); |
1627 | spin_unlock(&old_wb->list_lock); |
1628 | |
1629 | + up_read(&bdi->wb_switch_rwsem); |
1630 | + |
1631 | if (switched) { |
1632 | wb_wakeup(new_wb); |
1633 | wb_put(old_wb); |
1634 | @@ -475,9 +494,18 @@ static void inode_switch_wbs(struct inode *inode, int new_wb_id) |
1635 | if (inode->i_state & I_WB_SWITCH) |
1636 | return; |
1637 | |
1638 | + /* |
1639 | + * Avoid starting new switches while sync_inodes_sb() is in |
1640 | + * progress. Otherwise, if the down_write protected issue path |
1641 | + * blocks heavily, we might end up starting a large number of |
1642 | + * switches which will block on the rwsem. |
1643 | + */ |
1644 | + if (!down_read_trylock(&bdi->wb_switch_rwsem)) |
1645 | + return; |
1646 | + |
1647 | isw = kzalloc(sizeof(*isw), GFP_ATOMIC); |
1648 | if (!isw) |
1649 | - return; |
1650 | + goto out_unlock; |
1651 | |
1652 | /* find and pin the new wb */ |
1653 | rcu_read_lock(); |
1654 | @@ -511,12 +539,14 @@ static void inode_switch_wbs(struct inode *inode, int new_wb_id) |
1655 | * Let's continue after I_WB_SWITCH is guaranteed to be visible. |
1656 | */ |
1657 | call_rcu(&isw->rcu_head, inode_switch_wbs_rcu_fn); |
1658 | - return; |
1659 | + goto out_unlock; |
1660 | |
1661 | out_free: |
1662 | if (isw->new_wb) |
1663 | wb_put(isw->new_wb); |
1664 | kfree(isw); |
1665 | +out_unlock: |
1666 | + up_read(&bdi->wb_switch_rwsem); |
1667 | } |
1668 | |
1669 | /** |
1670 | @@ -894,6 +924,9 @@ fs_initcall(cgroup_writeback_init); |
1671 | |
1672 | #else /* CONFIG_CGROUP_WRITEBACK */ |
1673 | |
1674 | +static void bdi_down_write_wb_switch_rwsem(struct backing_dev_info *bdi) { } |
1675 | +static void bdi_up_write_wb_switch_rwsem(struct backing_dev_info *bdi) { } |
1676 | + |
1677 | static struct bdi_writeback * |
1678 | locked_inode_to_wb_and_lock_list(struct inode *inode) |
1679 | __releases(&inode->i_lock) |
1680 | @@ -2420,8 +2453,11 @@ void sync_inodes_sb(struct super_block *sb) |
1681 | return; |
1682 | WARN_ON(!rwsem_is_locked(&sb->s_umount)); |
1683 | |
1684 | + /* protect against inode wb switch, see inode_switch_wbs_work_fn() */ |
1685 | + bdi_down_write_wb_switch_rwsem(bdi); |
1686 | bdi_split_work_to_wbs(bdi, &work, false); |
1687 | wb_wait_for_completion(bdi, &done); |
1688 | + bdi_up_write_wb_switch_rwsem(bdi); |
1689 | |
1690 | wait_sb_inodes(sb); |
1691 | } |
1692 | diff --git a/fs/hugetlbfs/inode.c b/fs/hugetlbfs/inode.c |
1693 | index 32920a10100e2..a7fa037b876b7 100644 |
1694 | --- a/fs/hugetlbfs/inode.c |
1695 | +++ b/fs/hugetlbfs/inode.c |
1696 | @@ -859,6 +859,18 @@ static int hugetlbfs_migrate_page(struct address_space *mapping, |
1697 | rc = migrate_huge_page_move_mapping(mapping, newpage, page); |
1698 | if (rc != MIGRATEPAGE_SUCCESS) |
1699 | return rc; |
1700 | + |
1701 | + /* |
1702 | + * page_private is subpool pointer in hugetlb pages. Transfer to |
1703 | + * new page. PagePrivate is not associated with page_private for |
1704 | + * hugetlb pages and can not be set here as only page_huge_active |
1705 | + * pages can be migrated. |
1706 | + */ |
1707 | + if (page_private(page)) { |
1708 | + set_page_private(newpage, page_private(page)); |
1709 | + set_page_private(page, 0); |
1710 | + } |
1711 | + |
1712 | if (mode != MIGRATE_SYNC_NO_COPY) |
1713 | migrate_page_copy(newpage, page); |
1714 | else |
1715 | diff --git a/include/linux/backing-dev-defs.h b/include/linux/backing-dev-defs.h |
1716 | index c311571355981..07e02d6df5ad9 100644 |
1717 | --- a/include/linux/backing-dev-defs.h |
1718 | +++ b/include/linux/backing-dev-defs.h |
1719 | @@ -190,6 +190,7 @@ struct backing_dev_info { |
1720 | struct radix_tree_root cgwb_tree; /* radix tree of active cgroup wbs */ |
1721 | struct rb_root cgwb_congested_tree; /* their congested states */ |
1722 | struct mutex cgwb_release_mutex; /* protect shutdown of wb structs */ |
1723 | + struct rw_semaphore wb_switch_rwsem; /* no cgwb switch while syncing */ |
1724 | #else |
1725 | struct bdi_writeback_congested *wb_congested; |
1726 | #endif |
1727 | diff --git a/include/linux/if_arp.h b/include/linux/if_arp.h |
1728 | index 6756fea18b69f..e44746de95cdf 100644 |
1729 | --- a/include/linux/if_arp.h |
1730 | +++ b/include/linux/if_arp.h |
1731 | @@ -54,6 +54,7 @@ static inline bool dev_is_mac_header_xmit(const struct net_device *dev) |
1732 | case ARPHRD_IPGRE: |
1733 | case ARPHRD_VOID: |
1734 | case ARPHRD_NONE: |
1735 | + case ARPHRD_RAWIP: |
1736 | return false; |
1737 | default: |
1738 | return true; |
1739 | diff --git a/include/linux/irq.h b/include/linux/irq.h |
1740 | index 201de12a99571..c9bffda04a450 100644 |
1741 | --- a/include/linux/irq.h |
1742 | +++ b/include/linux/irq.h |
1743 | @@ -1151,7 +1151,8 @@ void irq_matrix_offline(struct irq_matrix *m); |
1744 | void irq_matrix_assign_system(struct irq_matrix *m, unsigned int bit, bool replace); |
1745 | int irq_matrix_reserve_managed(struct irq_matrix *m, const struct cpumask *msk); |
1746 | void irq_matrix_remove_managed(struct irq_matrix *m, const struct cpumask *msk); |
1747 | -int irq_matrix_alloc_managed(struct irq_matrix *m, unsigned int cpu); |
1748 | +int irq_matrix_alloc_managed(struct irq_matrix *m, const struct cpumask *msk, |
1749 | + unsigned int *mapped_cpu); |
1750 | void irq_matrix_reserve(struct irq_matrix *m); |
1751 | void irq_matrix_remove_reserved(struct irq_matrix *m); |
1752 | int irq_matrix_alloc(struct irq_matrix *m, const struct cpumask *msk, |
1753 | diff --git a/kernel/exit.c b/kernel/exit.c |
1754 | index 55b4fa6d01ebd..d607e23fd0c3e 100644 |
1755 | --- a/kernel/exit.c |
1756 | +++ b/kernel/exit.c |
1757 | @@ -307,7 +307,7 @@ void rcuwait_wake_up(struct rcuwait *w) |
1758 | * MB (A) MB (B) |
1759 | * [L] cond [L] tsk |
1760 | */ |
1761 | - smp_rmb(); /* (B) */ |
1762 | + smp_mb(); /* (B) */ |
1763 | |
1764 | /* |
1765 | * Avoid using task_rcu_dereference() magic as long as we are careful, |
1766 | diff --git a/kernel/futex.c b/kernel/futex.c |
1767 | index d7c465fd687c6..c5fca746edc46 100644 |
1768 | --- a/kernel/futex.c |
1769 | +++ b/kernel/futex.c |
1770 | @@ -1444,11 +1444,7 @@ static void mark_wake_futex(struct wake_q_head *wake_q, struct futex_q *q) |
1771 | if (WARN(q->pi_state || q->rt_waiter, "refusing to wake PI futex\n")) |
1772 | return; |
1773 | |
1774 | - /* |
1775 | - * Queue the task for later wakeup for after we've released |
1776 | - * the hb->lock. wake_q_add() grabs reference to p. |
1777 | - */ |
1778 | - wake_q_add(wake_q, p); |
1779 | + get_task_struct(p); |
1780 | __unqueue_futex(q); |
1781 | /* |
1782 | * The waiting task can free the futex_q as soon as q->lock_ptr = NULL |
1783 | @@ -1458,6 +1454,13 @@ static void mark_wake_futex(struct wake_q_head *wake_q, struct futex_q *q) |
1784 | * plist_del in __unqueue_futex(). |
1785 | */ |
1786 | smp_store_release(&q->lock_ptr, NULL); |
1787 | + |
1788 | + /* |
1789 | + * Queue the task for later wakeup for after we've released |
1790 | + * the hb->lock. wake_q_add() grabs reference to p. |
1791 | + */ |
1792 | + wake_q_add(wake_q, p); |
1793 | + put_task_struct(p); |
1794 | } |
1795 | |
1796 | /* |
1797 | diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c |
1798 | index 9dbdccab3b6a3..5c0ba5ca59308 100644 |
1799 | --- a/kernel/irq/manage.c |
1800 | +++ b/kernel/irq/manage.c |
1801 | @@ -393,6 +393,9 @@ int irq_setup_affinity(struct irq_desc *desc) |
1802 | } |
1803 | |
1804 | cpumask_and(&mask, cpu_online_mask, set); |
1805 | + if (cpumask_empty(&mask)) |
1806 | + cpumask_copy(&mask, cpu_online_mask); |
1807 | + |
1808 | if (node != NUMA_NO_NODE) { |
1809 | const struct cpumask *nodemask = cpumask_of_node(node); |
1810 | |
1811 | diff --git a/kernel/irq/matrix.c b/kernel/irq/matrix.c |
1812 | index 5092494bf2614..92337703ca9fd 100644 |
1813 | --- a/kernel/irq/matrix.c |
1814 | +++ b/kernel/irq/matrix.c |
1815 | @@ -14,6 +14,7 @@ struct cpumap { |
1816 | unsigned int available; |
1817 | unsigned int allocated; |
1818 | unsigned int managed; |
1819 | + unsigned int managed_allocated; |
1820 | bool initialized; |
1821 | bool online; |
1822 | unsigned long alloc_map[IRQ_MATRIX_SIZE]; |
1823 | @@ -124,6 +125,48 @@ static unsigned int matrix_alloc_area(struct irq_matrix *m, struct cpumap *cm, |
1824 | return area; |
1825 | } |
1826 | |
1827 | +/* Find the best CPU which has the lowest vector allocation count */ |
1828 | +static unsigned int matrix_find_best_cpu(struct irq_matrix *m, |
1829 | + const struct cpumask *msk) |
1830 | +{ |
1831 | + unsigned int cpu, best_cpu, maxavl = 0; |
1832 | + struct cpumap *cm; |
1833 | + |
1834 | + best_cpu = UINT_MAX; |
1835 | + |
1836 | + for_each_cpu(cpu, msk) { |
1837 | + cm = per_cpu_ptr(m->maps, cpu); |
1838 | + |
1839 | + if (!cm->online || cm->available <= maxavl) |
1840 | + continue; |
1841 | + |
1842 | + best_cpu = cpu; |
1843 | + maxavl = cm->available; |
1844 | + } |
1845 | + return best_cpu; |
1846 | +} |
1847 | + |
1848 | +/* Find the best CPU which has the lowest number of managed IRQs allocated */ |
1849 | +static unsigned int matrix_find_best_cpu_managed(struct irq_matrix *m, |
1850 | + const struct cpumask *msk) |
1851 | +{ |
1852 | + unsigned int cpu, best_cpu, allocated = UINT_MAX; |
1853 | + struct cpumap *cm; |
1854 | + |
1855 | + best_cpu = UINT_MAX; |
1856 | + |
1857 | + for_each_cpu(cpu, msk) { |
1858 | + cm = per_cpu_ptr(m->maps, cpu); |
1859 | + |
1860 | + if (!cm->online || cm->managed_allocated > allocated) |
1861 | + continue; |
1862 | + |
1863 | + best_cpu = cpu; |
1864 | + allocated = cm->managed_allocated; |
1865 | + } |
1866 | + return best_cpu; |
1867 | +} |
1868 | + |
1869 | /** |
1870 | * irq_matrix_assign_system - Assign system wide entry in the matrix |
1871 | * @m: Matrix pointer |
1872 | @@ -239,11 +282,21 @@ void irq_matrix_remove_managed(struct irq_matrix *m, const struct cpumask *msk) |
1873 | * @m: Matrix pointer |
1874 | * @cpu: On which CPU the interrupt should be allocated |
1875 | */ |
1876 | -int irq_matrix_alloc_managed(struct irq_matrix *m, unsigned int cpu) |
1877 | +int irq_matrix_alloc_managed(struct irq_matrix *m, const struct cpumask *msk, |
1878 | + unsigned int *mapped_cpu) |
1879 | { |
1880 | - struct cpumap *cm = per_cpu_ptr(m->maps, cpu); |
1881 | - unsigned int bit, end = m->alloc_end; |
1882 | + unsigned int bit, cpu, end = m->alloc_end; |
1883 | + struct cpumap *cm; |
1884 | |
1885 | + if (cpumask_empty(msk)) |
1886 | + return -EINVAL; |
1887 | + |
1888 | + cpu = matrix_find_best_cpu_managed(m, msk); |
1889 | + if (cpu == UINT_MAX) |
1890 | + return -ENOSPC; |
1891 | + |
1892 | + cm = per_cpu_ptr(m->maps, cpu); |
1893 | + end = m->alloc_end; |
1894 | /* Get managed bit which are not allocated */ |
1895 | bitmap_andnot(m->scratch_map, cm->managed_map, cm->alloc_map, end); |
1896 | bit = find_first_bit(m->scratch_map, end); |
1897 | @@ -251,7 +304,9 @@ int irq_matrix_alloc_managed(struct irq_matrix *m, unsigned int cpu) |
1898 | return -ENOSPC; |
1899 | set_bit(bit, cm->alloc_map); |
1900 | cm->allocated++; |
1901 | + cm->managed_allocated++; |
1902 | m->total_allocated++; |
1903 | + *mapped_cpu = cpu; |
1904 | trace_irq_matrix_alloc_managed(bit, cpu, m, cm); |
1905 | return bit; |
1906 | } |
1907 | @@ -322,37 +377,27 @@ void irq_matrix_remove_reserved(struct irq_matrix *m) |
1908 | int irq_matrix_alloc(struct irq_matrix *m, const struct cpumask *msk, |
1909 | bool reserved, unsigned int *mapped_cpu) |
1910 | { |
1911 | - unsigned int cpu, best_cpu, maxavl = 0; |
1912 | + unsigned int cpu, bit; |
1913 | struct cpumap *cm; |
1914 | - unsigned int bit; |
1915 | |
1916 | - best_cpu = UINT_MAX; |
1917 | - for_each_cpu(cpu, msk) { |
1918 | - cm = per_cpu_ptr(m->maps, cpu); |
1919 | - |
1920 | - if (!cm->online || cm->available <= maxavl) |
1921 | - continue; |
1922 | + cpu = matrix_find_best_cpu(m, msk); |
1923 | + if (cpu == UINT_MAX) |
1924 | + return -ENOSPC; |
1925 | |
1926 | - best_cpu = cpu; |
1927 | - maxavl = cm->available; |
1928 | - } |
1929 | + cm = per_cpu_ptr(m->maps, cpu); |
1930 | + bit = matrix_alloc_area(m, cm, 1, false); |
1931 | + if (bit >= m->alloc_end) |
1932 | + return -ENOSPC; |
1933 | + cm->allocated++; |
1934 | + cm->available--; |
1935 | + m->total_allocated++; |
1936 | + m->global_available--; |
1937 | + if (reserved) |
1938 | + m->global_reserved--; |
1939 | + *mapped_cpu = cpu; |
1940 | + trace_irq_matrix_alloc(bit, cpu, m, cm); |
1941 | + return bit; |
1942 | |
1943 | - if (maxavl) { |
1944 | - cm = per_cpu_ptr(m->maps, best_cpu); |
1945 | - bit = matrix_alloc_area(m, cm, 1, false); |
1946 | - if (bit < m->alloc_end) { |
1947 | - cm->allocated++; |
1948 | - cm->available--; |
1949 | - m->total_allocated++; |
1950 | - m->global_available--; |
1951 | - if (reserved) |
1952 | - m->global_reserved--; |
1953 | - *mapped_cpu = best_cpu; |
1954 | - trace_irq_matrix_alloc(bit, best_cpu, m, cm); |
1955 | - return bit; |
1956 | - } |
1957 | - } |
1958 | - return -ENOSPC; |
1959 | } |
1960 | |
1961 | /** |
1962 | @@ -373,6 +418,8 @@ void irq_matrix_free(struct irq_matrix *m, unsigned int cpu, |
1963 | |
1964 | clear_bit(bit, cm->alloc_map); |
1965 | cm->allocated--; |
1966 | + if(managed) |
1967 | + cm->managed_allocated--; |
1968 | |
1969 | if (cm->online) |
1970 | m->total_allocated--; |
1971 | @@ -442,13 +489,14 @@ void irq_matrix_debug_show(struct seq_file *sf, struct irq_matrix *m, int ind) |
1972 | seq_printf(sf, "Total allocated: %6u\n", m->total_allocated); |
1973 | seq_printf(sf, "System: %u: %*pbl\n", nsys, m->matrix_bits, |
1974 | m->system_map); |
1975 | - seq_printf(sf, "%*s| CPU | avl | man | act | vectors\n", ind, " "); |
1976 | + seq_printf(sf, "%*s| CPU | avl | man | mac | act | vectors\n", ind, " "); |
1977 | cpus_read_lock(); |
1978 | for_each_online_cpu(cpu) { |
1979 | struct cpumap *cm = per_cpu_ptr(m->maps, cpu); |
1980 | |
1981 | - seq_printf(sf, "%*s %4d %4u %4u %4u %*pbl\n", ind, " ", |
1982 | - cpu, cm->available, cm->managed, cm->allocated, |
1983 | + seq_printf(sf, "%*s %4d %4u %4u %4u %4u %*pbl\n", ind, " ", |
1984 | + cpu, cm->available, cm->managed, |
1985 | + cm->managed_allocated, cm->allocated, |
1986 | m->matrix_bits, cm->alloc_map); |
1987 | } |
1988 | cpus_read_unlock(); |
1989 | diff --git a/kernel/locking/rwsem-xadd.c b/kernel/locking/rwsem-xadd.c |
1990 | index 3064c50e181e1..ef909357b84e1 100644 |
1991 | --- a/kernel/locking/rwsem-xadd.c |
1992 | +++ b/kernel/locking/rwsem-xadd.c |
1993 | @@ -198,15 +198,22 @@ static void __rwsem_mark_wake(struct rw_semaphore *sem, |
1994 | woken++; |
1995 | tsk = waiter->task; |
1996 | |
1997 | - wake_q_add(wake_q, tsk); |
1998 | + get_task_struct(tsk); |
1999 | list_del(&waiter->list); |
2000 | /* |
2001 | - * Ensure that the last operation is setting the reader |
2002 | + * Ensure calling get_task_struct() before setting the reader |
2003 | * waiter to nil such that rwsem_down_read_failed() cannot |
2004 | * race with do_exit() by always holding a reference count |
2005 | * to the task to wakeup. |
2006 | */ |
2007 | smp_store_release(&waiter->task, NULL); |
2008 | + /* |
2009 | + * Ensure issuing the wakeup (either by us or someone else) |
2010 | + * after setting the reader waiter to nil. |
2011 | + */ |
2012 | + wake_q_add(wake_q, tsk); |
2013 | + /* wake_q_add() already take the task ref */ |
2014 | + put_task_struct(tsk); |
2015 | } |
2016 | |
2017 | adjustment = woken * RWSEM_ACTIVE_READ_BIAS - adjustment; |
2018 | diff --git a/kernel/sched/core.c b/kernel/sched/core.c |
2019 | index 13ddfa46d741f..152a0b0c91bb6 100644 |
2020 | --- a/kernel/sched/core.c |
2021 | +++ b/kernel/sched/core.c |
2022 | @@ -405,10 +405,11 @@ void wake_q_add(struct wake_q_head *head, struct task_struct *task) |
2023 | * its already queued (either by us or someone else) and will get the |
2024 | * wakeup due to that. |
2025 | * |
2026 | - * This cmpxchg() executes a full barrier, which pairs with the full |
2027 | - * barrier executed by the wakeup in wake_up_q(). |
2028 | + * In order to ensure that a pending wakeup will observe our pending |
2029 | + * state, even in the failed case, an explicit smp_mb() must be used. |
2030 | */ |
2031 | - if (cmpxchg(&node->next, NULL, WAKE_Q_TAIL)) |
2032 | + smp_mb__before_atomic(); |
2033 | + if (cmpxchg_relaxed(&node->next, NULL, WAKE_Q_TAIL)) |
2034 | return; |
2035 | |
2036 | get_task_struct(task); |
2037 | diff --git a/mm/backing-dev.c b/mm/backing-dev.c |
2038 | index 8a8bb8796c6c4..72e6d0c55cfad 100644 |
2039 | --- a/mm/backing-dev.c |
2040 | +++ b/mm/backing-dev.c |
2041 | @@ -689,6 +689,7 @@ static int cgwb_bdi_init(struct backing_dev_info *bdi) |
2042 | INIT_RADIX_TREE(&bdi->cgwb_tree, GFP_ATOMIC); |
2043 | bdi->cgwb_congested_tree = RB_ROOT; |
2044 | mutex_init(&bdi->cgwb_release_mutex); |
2045 | + init_rwsem(&bdi->wb_switch_rwsem); |
2046 | |
2047 | ret = wb_init(&bdi->wb, bdi, 1, GFP_KERNEL); |
2048 | if (!ret) { |
2049 | diff --git a/mm/hugetlb.c b/mm/hugetlb.c |
2050 | index 10e83672bfbec..9e5f66cbf711c 100644 |
2051 | --- a/mm/hugetlb.c |
2052 | +++ b/mm/hugetlb.c |
2053 | @@ -3624,7 +3624,6 @@ retry_avoidcopy: |
2054 | copy_user_huge_page(new_page, old_page, address, vma, |
2055 | pages_per_huge_page(h)); |
2056 | __SetPageUptodate(new_page); |
2057 | - set_page_huge_active(new_page); |
2058 | |
2059 | mmun_start = haddr; |
2060 | mmun_end = mmun_start + huge_page_size(h); |
2061 | @@ -3646,6 +3645,7 @@ retry_avoidcopy: |
2062 | make_huge_pte(vma, new_page, 1)); |
2063 | page_remove_rmap(old_page, true); |
2064 | hugepage_add_new_anon_rmap(new_page, vma, haddr); |
2065 | + set_page_huge_active(new_page); |
2066 | /* Make the old page be freed below */ |
2067 | new_page = old_page; |
2068 | } |
2069 | @@ -3730,6 +3730,7 @@ static vm_fault_t hugetlb_no_page(struct mm_struct *mm, |
2070 | pte_t new_pte; |
2071 | spinlock_t *ptl; |
2072 | unsigned long haddr = address & huge_page_mask(h); |
2073 | + bool new_page = false; |
2074 | |
2075 | /* |
2076 | * Currently, we are forced to kill the process in the event the |
2077 | @@ -3791,7 +3792,7 @@ retry: |
2078 | } |
2079 | clear_huge_page(page, address, pages_per_huge_page(h)); |
2080 | __SetPageUptodate(page); |
2081 | - set_page_huge_active(page); |
2082 | + new_page = true; |
2083 | |
2084 | if (vma->vm_flags & VM_MAYSHARE) { |
2085 | int err = huge_add_to_page_cache(page, mapping, idx); |
2086 | @@ -3862,6 +3863,15 @@ retry: |
2087 | } |
2088 | |
2089 | spin_unlock(ptl); |
2090 | + |
2091 | + /* |
2092 | + * Only make newly allocated pages active. Existing pages found |
2093 | + * in the pagecache could be !page_huge_active() if they have been |
2094 | + * isolated for migration. |
2095 | + */ |
2096 | + if (new_page) |
2097 | + set_page_huge_active(page); |
2098 | + |
2099 | unlock_page(page); |
2100 | out: |
2101 | return ret; |
2102 | @@ -4096,7 +4106,6 @@ int hugetlb_mcopy_atomic_pte(struct mm_struct *dst_mm, |
2103 | * the set_pte_at() write. |
2104 | */ |
2105 | __SetPageUptodate(page); |
2106 | - set_page_huge_active(page); |
2107 | |
2108 | mapping = dst_vma->vm_file->f_mapping; |
2109 | idx = vma_hugecache_offset(h, dst_vma, dst_addr); |
2110 | @@ -4164,6 +4173,7 @@ int hugetlb_mcopy_atomic_pte(struct mm_struct *dst_mm, |
2111 | update_mmu_cache(dst_vma, dst_addr, dst_pte); |
2112 | |
2113 | spin_unlock(ptl); |
2114 | + set_page_huge_active(page); |
2115 | if (vm_shared) |
2116 | unlock_page(page); |
2117 | ret = 0; |
2118 | diff --git a/mm/migrate.c b/mm/migrate.c |
2119 | index ab260260a6262..14779c4f9a60a 100644 |
2120 | --- a/mm/migrate.c |
2121 | +++ b/mm/migrate.c |
2122 | @@ -1303,6 +1303,16 @@ static int unmap_and_move_huge_page(new_page_t get_new_page, |
2123 | lock_page(hpage); |
2124 | } |
2125 | |
2126 | + /* |
2127 | + * Check for pages which are in the process of being freed. Without |
2128 | + * page_mapping() set, hugetlbfs specific move page routine will not |
2129 | + * be called and we could leak usage counts for subpools. |
2130 | + */ |
2131 | + if (page_private(hpage) && !page_mapping(hpage)) { |
2132 | + rc = -EBUSY; |
2133 | + goto out_unlock; |
2134 | + } |
2135 | + |
2136 | if (PageAnon(hpage)) |
2137 | anon_vma = page_get_anon_vma(hpage); |
2138 | |
2139 | @@ -1333,6 +1343,7 @@ put_anon: |
2140 | put_new_page = NULL; |
2141 | } |
2142 | |
2143 | +out_unlock: |
2144 | unlock_page(hpage); |
2145 | out: |
2146 | if (rc != -EAGAIN) |
2147 | diff --git a/mm/mmap.c b/mm/mmap.c |
2148 | index f7cd9cb966c0f..43507f7e66b41 100644 |
2149 | --- a/mm/mmap.c |
2150 | +++ b/mm/mmap.c |
2151 | @@ -2391,12 +2391,11 @@ int expand_downwards(struct vm_area_struct *vma, |
2152 | { |
2153 | struct mm_struct *mm = vma->vm_mm; |
2154 | struct vm_area_struct *prev; |
2155 | - int error; |
2156 | + int error = 0; |
2157 | |
2158 | address &= PAGE_MASK; |
2159 | - error = security_mmap_addr(address); |
2160 | - if (error) |
2161 | - return error; |
2162 | + if (address < mmap_min_addr) |
2163 | + return -EPERM; |
2164 | |
2165 | /* Enforce stack_guard_gap */ |
2166 | prev = vma->vm_prev; |
2167 | diff --git a/net/mac80211/cfg.c b/net/mac80211/cfg.c |
2168 | index c2abe9db1ea24..40c5102234679 100644 |
2169 | --- a/net/mac80211/cfg.c |
2170 | +++ b/net/mac80211/cfg.c |
2171 | @@ -1478,6 +1478,10 @@ static int ieee80211_add_station(struct wiphy *wiphy, struct net_device *dev, |
2172 | if (params->sta_flags_set & BIT(NL80211_STA_FLAG_TDLS_PEER)) |
2173 | sta->sta.tdls = true; |
2174 | |
2175 | + if (sta->sta.tdls && sdata->vif.type == NL80211_IFTYPE_STATION && |
2176 | + !sdata->u.mgd.associated) |
2177 | + return -EINVAL; |
2178 | + |
2179 | err = sta_apply_parameters(local, sta, params); |
2180 | if (err) { |
2181 | sta_info_free(local, sta); |
2182 | diff --git a/net/mac80211/rx.c b/net/mac80211/rx.c |
2183 | index 828348b2a504d..e946ee4f335bd 100644 |
2184 | --- a/net/mac80211/rx.c |
2185 | +++ b/net/mac80211/rx.c |
2186 | @@ -221,7 +221,7 @@ static void ieee80211_handle_mu_mimo_mon(struct ieee80211_sub_if_data *sdata, |
2187 | struct ieee80211_hdr_3addr hdr; |
2188 | u8 category; |
2189 | u8 action_code; |
2190 | - } __packed action; |
2191 | + } __packed __aligned(2) action; |
2192 | |
2193 | if (!sdata) |
2194 | return; |
2195 | @@ -2678,7 +2678,9 @@ ieee80211_rx_h_mesh_fwding(struct ieee80211_rx_data *rx) |
2196 | skb_set_queue_mapping(skb, q); |
2197 | |
2198 | if (!--mesh_hdr->ttl) { |
2199 | - IEEE80211_IFSTA_MESH_CTR_INC(ifmsh, dropped_frames_ttl); |
2200 | + if (!is_multicast_ether_addr(hdr->addr1)) |
2201 | + IEEE80211_IFSTA_MESH_CTR_INC(ifmsh, |
2202 | + dropped_frames_ttl); |
2203 | goto out; |
2204 | } |
2205 | |
2206 | diff --git a/net/mac80211/tx.c b/net/mac80211/tx.c |
2207 | index c7ccd7b71b151..743cde66aaf62 100644 |
2208 | --- a/net/mac80211/tx.c |
2209 | +++ b/net/mac80211/tx.c |
2210 | @@ -3614,10 +3614,10 @@ void __ieee80211_subif_start_xmit(struct sk_buff *skb, |
2211 | /* We need a bit of data queued to build aggregates properly, so |
2212 | * instruct the TCP stack to allow more than a single ms of data |
2213 | * to be queued in the stack. The value is a bit-shift of 1 |
2214 | - * second, so 8 is ~4ms of queued data. Only affects local TCP |
2215 | + * second, so 7 is ~8ms of queued data. Only affects local TCP |
2216 | * sockets. |
2217 | */ |
2218 | - sk_pacing_shift_update(skb->sk, 8); |
2219 | + sk_pacing_shift_update(skb->sk, 7); |
2220 | |
2221 | fast_tx = rcu_dereference(sta->fast_tx); |
2222 | |
2223 | diff --git a/net/wireless/reg.c b/net/wireless/reg.c |
2224 | index 24cfa2776f50b..8002ace7c9f65 100644 |
2225 | --- a/net/wireless/reg.c |
2226 | +++ b/net/wireless/reg.c |
2227 | @@ -1249,7 +1249,7 @@ static bool is_valid_rd(const struct ieee80211_regdomain *rd) |
2228 | * definitions (the "2.4 GHz band", the "5 GHz band" and the "60GHz band"), |
2229 | * however it is safe for now to assume that a frequency rule should not be |
2230 | * part of a frequency's band if the start freq or end freq are off by more |
2231 | - * than 2 GHz for the 2.4 and 5 GHz bands, and by more than 10 GHz for the |
2232 | + * than 2 GHz for the 2.4 and 5 GHz bands, and by more than 20 GHz for the |
2233 | * 60 GHz band. |
2234 | * This resolution can be lowered and should be considered as we add |
2235 | * regulatory rule support for other "bands". |
2236 | @@ -1264,7 +1264,7 @@ static bool freq_in_rule_band(const struct ieee80211_freq_range *freq_range, |
2237 | * with the Channel starting frequency above 45 GHz. |
2238 | */ |
2239 | u32 limit = freq_khz > 45 * ONE_GHZ_IN_KHZ ? |
2240 | - 10 * ONE_GHZ_IN_KHZ : 2 * ONE_GHZ_IN_KHZ; |
2241 | + 20 * ONE_GHZ_IN_KHZ : 2 * ONE_GHZ_IN_KHZ; |
2242 | if (abs(freq_khz - freq_range->start_freq_khz) <= limit) |
2243 | return true; |
2244 | if (abs(freq_khz - freq_range->end_freq_khz) <= limit) |
2245 | diff --git a/sound/core/compress_offload.c b/sound/core/compress_offload.c |
2246 | index 26b5e245b0747..8b78ddffa509a 100644 |
2247 | --- a/sound/core/compress_offload.c |
2248 | +++ b/sound/core/compress_offload.c |
2249 | @@ -529,7 +529,8 @@ static int snd_compress_check_input(struct snd_compr_params *params) |
2250 | { |
2251 | /* first let's check the buffer parameter's */ |
2252 | if (params->buffer.fragment_size == 0 || |
2253 | - params->buffer.fragments > INT_MAX / params->buffer.fragment_size) |
2254 | + params->buffer.fragments > INT_MAX / params->buffer.fragment_size || |
2255 | + params->buffer.fragments == 0) |
2256 | return -EINVAL; |
2257 | |
2258 | /* now codec parameters */ |
2259 | diff --git a/sound/soc/codecs/rt274.c b/sound/soc/codecs/rt274.c |
2260 | index d88e673410835..18a931c25ca58 100644 |
2261 | --- a/sound/soc/codecs/rt274.c |
2262 | +++ b/sound/soc/codecs/rt274.c |
2263 | @@ -1126,8 +1126,11 @@ static int rt274_i2c_probe(struct i2c_client *i2c, |
2264 | return ret; |
2265 | } |
2266 | |
2267 | - regmap_read(rt274->regmap, |
2268 | + ret = regmap_read(rt274->regmap, |
2269 | RT274_GET_PARAM(AC_NODE_ROOT, AC_PAR_VENDOR_ID), &val); |
2270 | + if (ret) |
2271 | + return ret; |
2272 | + |
2273 | if (val != RT274_VENDOR_ID) { |
2274 | dev_err(&i2c->dev, |
2275 | "Device with ID register %#x is not rt274\n", val); |
2276 | diff --git a/sound/soc/codecs/rt5682.h b/sound/soc/codecs/rt5682.h |
2277 | index 8068140ebe3f1..cdd659f4df93d 100644 |
2278 | --- a/sound/soc/codecs/rt5682.h |
2279 | +++ b/sound/soc/codecs/rt5682.h |
2280 | @@ -849,18 +849,18 @@ |
2281 | #define RT5682_SCLK_SRC_PLL2 (0x2 << 13) |
2282 | #define RT5682_SCLK_SRC_SDW (0x3 << 13) |
2283 | #define RT5682_SCLK_SRC_RCCLK (0x4 << 13) |
2284 | -#define RT5682_PLL1_SRC_MASK (0x3 << 10) |
2285 | -#define RT5682_PLL1_SRC_SFT 10 |
2286 | -#define RT5682_PLL1_SRC_MCLK (0x0 << 10) |
2287 | -#define RT5682_PLL1_SRC_BCLK1 (0x1 << 10) |
2288 | -#define RT5682_PLL1_SRC_SDW (0x2 << 10) |
2289 | -#define RT5682_PLL1_SRC_RC (0x3 << 10) |
2290 | -#define RT5682_PLL2_SRC_MASK (0x3 << 8) |
2291 | -#define RT5682_PLL2_SRC_SFT 8 |
2292 | -#define RT5682_PLL2_SRC_MCLK (0x0 << 8) |
2293 | -#define RT5682_PLL2_SRC_BCLK1 (0x1 << 8) |
2294 | -#define RT5682_PLL2_SRC_SDW (0x2 << 8) |
2295 | -#define RT5682_PLL2_SRC_RC (0x3 << 8) |
2296 | +#define RT5682_PLL2_SRC_MASK (0x3 << 10) |
2297 | +#define RT5682_PLL2_SRC_SFT 10 |
2298 | +#define RT5682_PLL2_SRC_MCLK (0x0 << 10) |
2299 | +#define RT5682_PLL2_SRC_BCLK1 (0x1 << 10) |
2300 | +#define RT5682_PLL2_SRC_SDW (0x2 << 10) |
2301 | +#define RT5682_PLL2_SRC_RC (0x3 << 10) |
2302 | +#define RT5682_PLL1_SRC_MASK (0x3 << 8) |
2303 | +#define RT5682_PLL1_SRC_SFT 8 |
2304 | +#define RT5682_PLL1_SRC_MCLK (0x0 << 8) |
2305 | +#define RT5682_PLL1_SRC_BCLK1 (0x1 << 8) |
2306 | +#define RT5682_PLL1_SRC_SDW (0x2 << 8) |
2307 | +#define RT5682_PLL1_SRC_RC (0x3 << 8) |
2308 | |
2309 | |
2310 | |
2311 | diff --git a/sound/soc/fsl/imx-audmux.c b/sound/soc/fsl/imx-audmux.c |
2312 | index 392d5eef356d3..99e07b01a2ce9 100644 |
2313 | --- a/sound/soc/fsl/imx-audmux.c |
2314 | +++ b/sound/soc/fsl/imx-audmux.c |
2315 | @@ -86,49 +86,49 @@ static ssize_t audmux_read_file(struct file *file, char __user *user_buf, |
2316 | if (!buf) |
2317 | return -ENOMEM; |
2318 | |
2319 | - ret = snprintf(buf, PAGE_SIZE, "PDCR: %08x\nPTCR: %08x\n", |
2320 | + ret = scnprintf(buf, PAGE_SIZE, "PDCR: %08x\nPTCR: %08x\n", |
2321 | pdcr, ptcr); |
2322 | |
2323 | if (ptcr & IMX_AUDMUX_V2_PTCR_TFSDIR) |
2324 | - ret += snprintf(buf + ret, PAGE_SIZE - ret, |
2325 | + ret += scnprintf(buf + ret, PAGE_SIZE - ret, |
2326 | "TxFS output from %s, ", |
2327 | audmux_port_string((ptcr >> 27) & 0x7)); |
2328 | else |
2329 | - ret += snprintf(buf + ret, PAGE_SIZE - ret, |
2330 | + ret += scnprintf(buf + ret, PAGE_SIZE - ret, |
2331 | "TxFS input, "); |
2332 | |
2333 | if (ptcr & IMX_AUDMUX_V2_PTCR_TCLKDIR) |
2334 | - ret += snprintf(buf + ret, PAGE_SIZE - ret, |
2335 | + ret += scnprintf(buf + ret, PAGE_SIZE - ret, |
2336 | "TxClk output from %s", |
2337 | audmux_port_string((ptcr >> 22) & 0x7)); |
2338 | else |
2339 | - ret += snprintf(buf + ret, PAGE_SIZE - ret, |
2340 | + ret += scnprintf(buf + ret, PAGE_SIZE - ret, |
2341 | "TxClk input"); |
2342 | |
2343 | - ret += snprintf(buf + ret, PAGE_SIZE - ret, "\n"); |
2344 | + ret += scnprintf(buf + ret, PAGE_SIZE - ret, "\n"); |
2345 | |
2346 | if (ptcr & IMX_AUDMUX_V2_PTCR_SYN) { |
2347 | - ret += snprintf(buf + ret, PAGE_SIZE - ret, |
2348 | + ret += scnprintf(buf + ret, PAGE_SIZE - ret, |
2349 | "Port is symmetric"); |
2350 | } else { |
2351 | if (ptcr & IMX_AUDMUX_V2_PTCR_RFSDIR) |
2352 | - ret += snprintf(buf + ret, PAGE_SIZE - ret, |
2353 | + ret += scnprintf(buf + ret, PAGE_SIZE - ret, |
2354 | "RxFS output from %s, ", |
2355 | audmux_port_string((ptcr >> 17) & 0x7)); |
2356 | else |
2357 | - ret += snprintf(buf + ret, PAGE_SIZE - ret, |
2358 | + ret += scnprintf(buf + ret, PAGE_SIZE - ret, |
2359 | "RxFS input, "); |
2360 | |
2361 | if (ptcr & IMX_AUDMUX_V2_PTCR_RCLKDIR) |
2362 | - ret += snprintf(buf + ret, PAGE_SIZE - ret, |
2363 | + ret += scnprintf(buf + ret, PAGE_SIZE - ret, |
2364 | "RxClk output from %s", |
2365 | audmux_port_string((ptcr >> 12) & 0x7)); |
2366 | else |
2367 | - ret += snprintf(buf + ret, PAGE_SIZE - ret, |
2368 | + ret += scnprintf(buf + ret, PAGE_SIZE - ret, |
2369 | "RxClk input"); |
2370 | } |
2371 | |
2372 | - ret += snprintf(buf + ret, PAGE_SIZE - ret, |
2373 | + ret += scnprintf(buf + ret, PAGE_SIZE - ret, |
2374 | "\nData received from %s\n", |
2375 | audmux_port_string((pdcr >> 13) & 0x7)); |
2376 | |
2377 | diff --git a/sound/soc/intel/boards/broadwell.c b/sound/soc/intel/boards/broadwell.c |
2378 | index 7b0ee67b4fc8b..78ec97b53f50e 100644 |
2379 | --- a/sound/soc/intel/boards/broadwell.c |
2380 | +++ b/sound/soc/intel/boards/broadwell.c |
2381 | @@ -192,7 +192,7 @@ static struct snd_soc_dai_link broadwell_rt286_dais[] = { |
2382 | .stream_name = "Loopback", |
2383 | .cpu_dai_name = "Loopback Pin", |
2384 | .platform_name = "haswell-pcm-audio", |
2385 | - .dynamic = 0, |
2386 | + .dynamic = 1, |
2387 | .codec_name = "snd-soc-dummy", |
2388 | .codec_dai_name = "snd-soc-dummy-dai", |
2389 | .trigger = {SND_SOC_DPCM_TRIGGER_POST, SND_SOC_DPCM_TRIGGER_POST}, |
2390 | diff --git a/sound/soc/intel/boards/haswell.c b/sound/soc/intel/boards/haswell.c |
2391 | index eab1f439dd3f1..a4022983a7ce0 100644 |
2392 | --- a/sound/soc/intel/boards/haswell.c |
2393 | +++ b/sound/soc/intel/boards/haswell.c |
2394 | @@ -146,7 +146,7 @@ static struct snd_soc_dai_link haswell_rt5640_dais[] = { |
2395 | .stream_name = "Loopback", |
2396 | .cpu_dai_name = "Loopback Pin", |
2397 | .platform_name = "haswell-pcm-audio", |
2398 | - .dynamic = 0, |
2399 | + .dynamic = 1, |
2400 | .codec_name = "snd-soc-dummy", |
2401 | .codec_dai_name = "snd-soc-dummy-dai", |
2402 | .trigger = {SND_SOC_DPCM_TRIGGER_POST, SND_SOC_DPCM_TRIGGER_POST}, |
2403 | diff --git a/sound/soc/soc-dapm.c b/sound/soc/soc-dapm.c |
2404 | index 461d951917c05..6537069452226 100644 |
2405 | --- a/sound/soc/soc-dapm.c |
2406 | +++ b/sound/soc/soc-dapm.c |
2407 | @@ -2028,19 +2028,19 @@ static ssize_t dapm_widget_power_read_file(struct file *file, |
2408 | out = is_connected_output_ep(w, NULL, NULL); |
2409 | } |
2410 | |
2411 | - ret = snprintf(buf, PAGE_SIZE, "%s: %s%s in %d out %d", |
2412 | + ret = scnprintf(buf, PAGE_SIZE, "%s: %s%s in %d out %d", |
2413 | w->name, w->power ? "On" : "Off", |
2414 | w->force ? " (forced)" : "", in, out); |
2415 | |
2416 | if (w->reg >= 0) |
2417 | - ret += snprintf(buf + ret, PAGE_SIZE - ret, |
2418 | + ret += scnprintf(buf + ret, PAGE_SIZE - ret, |
2419 | " - R%d(0x%x) mask 0x%x", |
2420 | w->reg, w->reg, w->mask << w->shift); |
2421 | |
2422 | - ret += snprintf(buf + ret, PAGE_SIZE - ret, "\n"); |
2423 | + ret += scnprintf(buf + ret, PAGE_SIZE - ret, "\n"); |
2424 | |
2425 | if (w->sname) |
2426 | - ret += snprintf(buf + ret, PAGE_SIZE - ret, " stream %s %s\n", |
2427 | + ret += scnprintf(buf + ret, PAGE_SIZE - ret, " stream %s %s\n", |
2428 | w->sname, |
2429 | w->active ? "active" : "inactive"); |
2430 | |
2431 | @@ -2053,7 +2053,7 @@ static ssize_t dapm_widget_power_read_file(struct file *file, |
2432 | if (!p->connect) |
2433 | continue; |
2434 | |
2435 | - ret += snprintf(buf + ret, PAGE_SIZE - ret, |
2436 | + ret += scnprintf(buf + ret, PAGE_SIZE - ret, |
2437 | " %s \"%s\" \"%s\"\n", |
2438 | (rdir == SND_SOC_DAPM_DIR_IN) ? "in" : "out", |
2439 | p->name ? p->name : "static", |
2440 | diff --git a/tools/testing/selftests/gpio/gpio-mockup-chardev.c b/tools/testing/selftests/gpio/gpio-mockup-chardev.c |
2441 | index f8d468f54e986..aaa1e9f083c37 100644 |
2442 | --- a/tools/testing/selftests/gpio/gpio-mockup-chardev.c |
2443 | +++ b/tools/testing/selftests/gpio/gpio-mockup-chardev.c |
2444 | @@ -37,7 +37,7 @@ static int get_debugfs(char **path) |
2445 | struct libmnt_table *tb; |
2446 | struct libmnt_iter *itr = NULL; |
2447 | struct libmnt_fs *fs; |
2448 | - int found = 0; |
2449 | + int found = 0, ret; |
2450 | |
2451 | cxt = mnt_new_context(); |
2452 | if (!cxt) |
2453 | @@ -58,8 +58,11 @@ static int get_debugfs(char **path) |
2454 | break; |
2455 | } |
2456 | } |
2457 | - if (found) |
2458 | - asprintf(path, "%s/gpio", mnt_fs_get_target(fs)); |
2459 | + if (found) { |
2460 | + ret = asprintf(path, "%s/gpio", mnt_fs_get_target(fs)); |
2461 | + if (ret < 0) |
2462 | + err(EXIT_FAILURE, "failed to format string"); |
2463 | + } |
2464 | |
2465 | mnt_free_iter(itr); |
2466 | mnt_free_context(cxt); |
2467 | diff --git a/tools/testing/selftests/kvm/lib/kvm_util.c b/tools/testing/selftests/kvm/lib/kvm_util.c |
2468 | index 6fd8c089cafcd..fb5d2d1e0c048 100644 |
2469 | --- a/tools/testing/selftests/kvm/lib/kvm_util.c |
2470 | +++ b/tools/testing/selftests/kvm/lib/kvm_util.c |
2471 | @@ -590,7 +590,7 @@ void vm_userspace_mem_region_add(struct kvm_vm *vm, |
2472 | * already exist. |
2473 | */ |
2474 | region = (struct userspace_mem_region *) userspace_mem_region_find( |
2475 | - vm, guest_paddr, guest_paddr + npages * vm->page_size); |
2476 | + vm, guest_paddr, (guest_paddr + npages * vm->page_size) - 1); |
2477 | if (region != NULL) |
2478 | TEST_ASSERT(false, "overlapping userspace_mem_region already " |
2479 | "exists\n" |
2480 | @@ -606,15 +606,10 @@ void vm_userspace_mem_region_add(struct kvm_vm *vm, |
2481 | region = region->next) { |
2482 | if (region->region.slot == slot) |
2483 | break; |
2484 | - if ((guest_paddr <= (region->region.guest_phys_addr |
2485 | - + region->region.memory_size)) |
2486 | - && ((guest_paddr + npages * vm->page_size) |
2487 | - >= region->region.guest_phys_addr)) |
2488 | - break; |
2489 | } |
2490 | if (region != NULL) |
2491 | TEST_ASSERT(false, "A mem region with the requested slot " |
2492 | - "or overlapping physical memory range already exists.\n" |
2493 | + "already exists.\n" |
2494 | " requested slot: %u paddr: 0x%lx npages: 0x%lx\n" |
2495 | " existing slot: %u paddr: 0x%lx size: 0x%lx", |
2496 | slot, guest_paddr, npages, |
2497 | diff --git a/tools/testing/selftests/rtc/rtctest.c b/tools/testing/selftests/rtc/rtctest.c |
2498 | index e20b017e70731..b2065536d4075 100644 |
2499 | --- a/tools/testing/selftests/rtc/rtctest.c |
2500 | +++ b/tools/testing/selftests/rtc/rtctest.c |
2501 | @@ -145,15 +145,12 @@ TEST_F(rtc, alarm_alm_set) { |
2502 | |
2503 | rc = select(self->fd + 1, &readfds, NULL, NULL, &tv); |
2504 | ASSERT_NE(-1, rc); |
2505 | - EXPECT_NE(0, rc); |
2506 | + ASSERT_NE(0, rc); |
2507 | |
2508 | /* Disable alarm interrupts */ |
2509 | rc = ioctl(self->fd, RTC_AIE_OFF, 0); |
2510 | ASSERT_NE(-1, rc); |
2511 | |
2512 | - if (rc == 0) |
2513 | - return; |
2514 | - |
2515 | rc = read(self->fd, &data, sizeof(unsigned long)); |
2516 | ASSERT_NE(-1, rc); |
2517 | TH_LOG("data: %lx", data); |
2518 | @@ -202,7 +199,109 @@ TEST_F(rtc, alarm_wkalm_set) { |
2519 | |
2520 | rc = select(self->fd + 1, &readfds, NULL, NULL, &tv); |
2521 | ASSERT_NE(-1, rc); |
2522 | - EXPECT_NE(0, rc); |
2523 | + ASSERT_NE(0, rc); |
2524 | + |
2525 | + rc = read(self->fd, &data, sizeof(unsigned long)); |
2526 | + ASSERT_NE(-1, rc); |
2527 | + |
2528 | + rc = ioctl(self->fd, RTC_RD_TIME, &tm); |
2529 | + ASSERT_NE(-1, rc); |
2530 | + |
2531 | + new = timegm((struct tm *)&tm); |
2532 | + ASSERT_EQ(new, secs); |
2533 | +} |
2534 | + |
2535 | +TEST_F(rtc, alarm_alm_set_minute) { |
2536 | + struct timeval tv = { .tv_sec = 62 }; |
2537 | + unsigned long data; |
2538 | + struct rtc_time tm; |
2539 | + fd_set readfds; |
2540 | + time_t secs, new; |
2541 | + int rc; |
2542 | + |
2543 | + rc = ioctl(self->fd, RTC_RD_TIME, &tm); |
2544 | + ASSERT_NE(-1, rc); |
2545 | + |
2546 | + secs = timegm((struct tm *)&tm) + 60 - tm.tm_sec; |
2547 | + gmtime_r(&secs, (struct tm *)&tm); |
2548 | + |
2549 | + rc = ioctl(self->fd, RTC_ALM_SET, &tm); |
2550 | + if (rc == -1) { |
2551 | + ASSERT_EQ(EINVAL, errno); |
2552 | + TH_LOG("skip alarms are not supported."); |
2553 | + return; |
2554 | + } |
2555 | + |
2556 | + rc = ioctl(self->fd, RTC_ALM_READ, &tm); |
2557 | + ASSERT_NE(-1, rc); |
2558 | + |
2559 | + TH_LOG("Alarm time now set to %02d:%02d:%02d.", |
2560 | + tm.tm_hour, tm.tm_min, tm.tm_sec); |
2561 | + |
2562 | + /* Enable alarm interrupts */ |
2563 | + rc = ioctl(self->fd, RTC_AIE_ON, 0); |
2564 | + ASSERT_NE(-1, rc); |
2565 | + |
2566 | + FD_ZERO(&readfds); |
2567 | + FD_SET(self->fd, &readfds); |
2568 | + |
2569 | + rc = select(self->fd + 1, &readfds, NULL, NULL, &tv); |
2570 | + ASSERT_NE(-1, rc); |
2571 | + ASSERT_NE(0, rc); |
2572 | + |
2573 | + /* Disable alarm interrupts */ |
2574 | + rc = ioctl(self->fd, RTC_AIE_OFF, 0); |
2575 | + ASSERT_NE(-1, rc); |
2576 | + |
2577 | + rc = read(self->fd, &data, sizeof(unsigned long)); |
2578 | + ASSERT_NE(-1, rc); |
2579 | + TH_LOG("data: %lx", data); |
2580 | + |
2581 | + rc = ioctl(self->fd, RTC_RD_TIME, &tm); |
2582 | + ASSERT_NE(-1, rc); |
2583 | + |
2584 | + new = timegm((struct tm *)&tm); |
2585 | + ASSERT_EQ(new, secs); |
2586 | +} |
2587 | + |
2588 | +TEST_F(rtc, alarm_wkalm_set_minute) { |
2589 | + struct timeval tv = { .tv_sec = 62 }; |
2590 | + struct rtc_wkalrm alarm = { 0 }; |
2591 | + struct rtc_time tm; |
2592 | + unsigned long data; |
2593 | + fd_set readfds; |
2594 | + time_t secs, new; |
2595 | + int rc; |
2596 | + |
2597 | + rc = ioctl(self->fd, RTC_RD_TIME, &alarm.time); |
2598 | + ASSERT_NE(-1, rc); |
2599 | + |
2600 | + secs = timegm((struct tm *)&alarm.time) + 60 - alarm.time.tm_sec; |
2601 | + gmtime_r(&secs, (struct tm *)&alarm.time); |
2602 | + |
2603 | + alarm.enabled = 1; |
2604 | + |
2605 | + rc = ioctl(self->fd, RTC_WKALM_SET, &alarm); |
2606 | + if (rc == -1) { |
2607 | + ASSERT_EQ(EINVAL, errno); |
2608 | + TH_LOG("skip alarms are not supported."); |
2609 | + return; |
2610 | + } |
2611 | + |
2612 | + rc = ioctl(self->fd, RTC_WKALM_RD, &alarm); |
2613 | + ASSERT_NE(-1, rc); |
2614 | + |
2615 | + TH_LOG("Alarm time now set to %02d/%02d/%02d %02d:%02d:%02d.", |
2616 | + alarm.time.tm_mday, alarm.time.tm_mon + 1, |
2617 | + alarm.time.tm_year + 1900, alarm.time.tm_hour, |
2618 | + alarm.time.tm_min, alarm.time.tm_sec); |
2619 | + |
2620 | + FD_ZERO(&readfds); |
2621 | + FD_SET(self->fd, &readfds); |
2622 | + |
2623 | + rc = select(self->fd + 1, &readfds, NULL, NULL, &tv); |
2624 | + ASSERT_NE(-1, rc); |
2625 | + ASSERT_NE(0, rc); |
2626 | |
2627 | rc = read(self->fd, &data, sizeof(unsigned long)); |
2628 | ASSERT_NE(-1, rc); |
2629 | diff --git a/tools/testing/selftests/seccomp/Makefile b/tools/testing/selftests/seccomp/Makefile |
2630 | index fce7f4ce06925..1760b3e397306 100644 |
2631 | --- a/tools/testing/selftests/seccomp/Makefile |
2632 | +++ b/tools/testing/selftests/seccomp/Makefile |
2633 | @@ -9,7 +9,7 @@ BINARIES := seccomp_bpf seccomp_benchmark |
2634 | CFLAGS += -Wl,-no-as-needed -Wall |
2635 | |
2636 | seccomp_bpf: seccomp_bpf.c ../kselftest_harness.h |
2637 | - $(CC) $(CFLAGS) $(LDFLAGS) -lpthread $< -o $@ |
2638 | + $(CC) $(CFLAGS) $(LDFLAGS) $< -lpthread -o $@ |
2639 | |
2640 | TEST_PROGS += $(BINARIES) |
2641 | EXTRA_CLEAN := $(BINARIES) |
2642 | diff --git a/tools/testing/selftests/vm/gup_benchmark.c b/tools/testing/selftests/vm/gup_benchmark.c |
2643 | index 36df55132036f..9601bc24454d9 100644 |
2644 | --- a/tools/testing/selftests/vm/gup_benchmark.c |
2645 | +++ b/tools/testing/selftests/vm/gup_benchmark.c |
2646 | @@ -22,6 +22,7 @@ struct gup_benchmark { |
2647 | __u64 size; |
2648 | __u32 nr_pages_per_call; |
2649 | __u32 flags; |
2650 | + __u64 expansion[10]; /* For future use */ |
2651 | }; |
2652 | |
2653 | int main(int argc, char **argv) |