Contents of /trunk/kernel-magellan/patches-4.13/0112-4.13.13-all-fixes.patch
Parent Directory | Revision Log
Revision 3023 -
(show annotations)
(download)
Thu Nov 16 11:09:13 2017 UTC (6 years, 10 months ago) by niro
File size: 49155 byte(s)
Thu Nov 16 11:09:13 2017 UTC (6 years, 10 months ago) by niro
File size: 49155 byte(s)
-linux-4.13.13
1 | diff --git a/Makefile b/Makefile |
2 | index a7c847f495b0..1608a9b71381 100644 |
3 | --- a/Makefile |
4 | +++ b/Makefile |
5 | @@ -1,6 +1,6 @@ |
6 | VERSION = 4 |
7 | PATCHLEVEL = 13 |
8 | -SUBLEVEL = 12 |
9 | +SUBLEVEL = 13 |
10 | EXTRAVERSION = |
11 | NAME = Fearless Coyote |
12 | |
13 | diff --git a/arch/arm/kernel/traps.c b/arch/arm/kernel/traps.c |
14 | index 948c648fea00..0fcd82f01388 100644 |
15 | --- a/arch/arm/kernel/traps.c |
16 | +++ b/arch/arm/kernel/traps.c |
17 | @@ -154,30 +154,26 @@ static void dump_mem(const char *lvl, const char *str, unsigned long bottom, |
18 | set_fs(fs); |
19 | } |
20 | |
21 | -static void dump_instr(const char *lvl, struct pt_regs *regs) |
22 | +static void __dump_instr(const char *lvl, struct pt_regs *regs) |
23 | { |
24 | unsigned long addr = instruction_pointer(regs); |
25 | const int thumb = thumb_mode(regs); |
26 | const int width = thumb ? 4 : 8; |
27 | - mm_segment_t fs; |
28 | char str[sizeof("00000000 ") * 5 + 2 + 1], *p = str; |
29 | int i; |
30 | |
31 | /* |
32 | - * We need to switch to kernel mode so that we can use __get_user |
33 | - * to safely read from kernel space. Note that we now dump the |
34 | - * code first, just in case the backtrace kills us. |
35 | + * Note that we now dump the code first, just in case the backtrace |
36 | + * kills us. |
37 | */ |
38 | - fs = get_fs(); |
39 | - set_fs(KERNEL_DS); |
40 | |
41 | for (i = -4; i < 1 + !!thumb; i++) { |
42 | unsigned int val, bad; |
43 | |
44 | if (thumb) |
45 | - bad = __get_user(val, &((u16 *)addr)[i]); |
46 | + bad = get_user(val, &((u16 *)addr)[i]); |
47 | else |
48 | - bad = __get_user(val, &((u32 *)addr)[i]); |
49 | + bad = get_user(val, &((u32 *)addr)[i]); |
50 | |
51 | if (!bad) |
52 | p += sprintf(p, i == 0 ? "(%0*x) " : "%0*x ", |
53 | @@ -188,8 +184,20 @@ static void dump_instr(const char *lvl, struct pt_regs *regs) |
54 | } |
55 | } |
56 | printk("%sCode: %s\n", lvl, str); |
57 | +} |
58 | |
59 | - set_fs(fs); |
60 | +static void dump_instr(const char *lvl, struct pt_regs *regs) |
61 | +{ |
62 | + mm_segment_t fs; |
63 | + |
64 | + if (!user_mode(regs)) { |
65 | + fs = get_fs(); |
66 | + set_fs(KERNEL_DS); |
67 | + __dump_instr(lvl, regs); |
68 | + set_fs(fs); |
69 | + } else { |
70 | + __dump_instr(lvl, regs); |
71 | + } |
72 | } |
73 | |
74 | #ifdef CONFIG_ARM_UNWIND |
75 | diff --git a/arch/mips/ar7/platform.c b/arch/mips/ar7/platform.c |
76 | index df7acea3747a..4674f1efbe7a 100644 |
77 | --- a/arch/mips/ar7/platform.c |
78 | +++ b/arch/mips/ar7/platform.c |
79 | @@ -575,6 +575,7 @@ static int __init ar7_register_uarts(void) |
80 | uart_port.type = PORT_AR7; |
81 | uart_port.uartclk = clk_get_rate(bus_clk) / 2; |
82 | uart_port.iotype = UPIO_MEM32; |
83 | + uart_port.flags = UPF_FIXED_TYPE; |
84 | uart_port.regshift = 2; |
85 | |
86 | uart_port.line = 0; |
87 | @@ -653,6 +654,10 @@ static int __init ar7_register_devices(void) |
88 | u32 val; |
89 | int res; |
90 | |
91 | + res = ar7_gpio_init(); |
92 | + if (res) |
93 | + pr_warn("unable to register gpios: %d\n", res); |
94 | + |
95 | res = ar7_register_uarts(); |
96 | if (res) |
97 | pr_err("unable to setup uart(s): %d\n", res); |
98 | diff --git a/arch/mips/ar7/prom.c b/arch/mips/ar7/prom.c |
99 | index 4fd83336131a..dd53987a690f 100644 |
100 | --- a/arch/mips/ar7/prom.c |
101 | +++ b/arch/mips/ar7/prom.c |
102 | @@ -246,8 +246,6 @@ void __init prom_init(void) |
103 | ar7_init_cmdline(fw_arg0, (char **)fw_arg1); |
104 | ar7_init_env((struct env_var *)fw_arg2); |
105 | console_config(); |
106 | - |
107 | - ar7_gpio_init(); |
108 | } |
109 | |
110 | #define PORT(offset) (KSEG1ADDR(AR7_REGS_UART0 + (offset * 4))) |
111 | diff --git a/arch/mips/include/asm/mips-cm.h b/arch/mips/include/asm/mips-cm.h |
112 | index cfdbab015769..163317fd3d7e 100644 |
113 | --- a/arch/mips/include/asm/mips-cm.h |
114 | +++ b/arch/mips/include/asm/mips-cm.h |
115 | @@ -240,8 +240,8 @@ BUILD_CM_Cx_R_(tcid_8_priority, 0x80) |
116 | #define CM_GCR_BASE_GCRBASE_MSK (_ULCAST_(0x1ffff) << 15) |
117 | #define CM_GCR_BASE_CMDEFTGT_SHF 0 |
118 | #define CM_GCR_BASE_CMDEFTGT_MSK (_ULCAST_(0x3) << 0) |
119 | -#define CM_GCR_BASE_CMDEFTGT_DISABLED 0 |
120 | -#define CM_GCR_BASE_CMDEFTGT_MEM 1 |
121 | +#define CM_GCR_BASE_CMDEFTGT_MEM 0 |
122 | +#define CM_GCR_BASE_CMDEFTGT_RESERVED 1 |
123 | #define CM_GCR_BASE_CMDEFTGT_IOCU0 2 |
124 | #define CM_GCR_BASE_CMDEFTGT_IOCU1 3 |
125 | |
126 | diff --git a/arch/mips/kernel/smp-bmips.c b/arch/mips/kernel/smp-bmips.c |
127 | index 1b070a76fcdd..5e0d87f4958f 100644 |
128 | --- a/arch/mips/kernel/smp-bmips.c |
129 | +++ b/arch/mips/kernel/smp-bmips.c |
130 | @@ -589,11 +589,11 @@ void __init bmips_cpu_setup(void) |
131 | |
132 | /* Flush and enable RAC */ |
133 | cfg = __raw_readl(cbr + BMIPS_RAC_CONFIG); |
134 | - __raw_writel(cfg | 0x100, BMIPS_RAC_CONFIG); |
135 | + __raw_writel(cfg | 0x100, cbr + BMIPS_RAC_CONFIG); |
136 | __raw_readl(cbr + BMIPS_RAC_CONFIG); |
137 | |
138 | cfg = __raw_readl(cbr + BMIPS_RAC_CONFIG); |
139 | - __raw_writel(cfg | 0xf, BMIPS_RAC_CONFIG); |
140 | + __raw_writel(cfg | 0xf, cbr + BMIPS_RAC_CONFIG); |
141 | __raw_readl(cbr + BMIPS_RAC_CONFIG); |
142 | |
143 | cfg = __raw_readl(cbr + BMIPS_RAC_ADDRESS_RANGE); |
144 | diff --git a/arch/powerpc/kvm/book3s_64_mmu_hv.c b/arch/powerpc/kvm/book3s_64_mmu_hv.c |
145 | index b42812e014c0..1fcc30ff9569 100644 |
146 | --- a/arch/powerpc/kvm/book3s_64_mmu_hv.c |
147 | +++ b/arch/powerpc/kvm/book3s_64_mmu_hv.c |
148 | @@ -645,6 +645,16 @@ int kvmppc_book3s_hv_page_fault(struct kvm_run *run, struct kvm_vcpu *vcpu, |
149 | hnow_v = hpte_new_to_old_v(hnow_v, hnow_r); |
150 | hnow_r = hpte_new_to_old_r(hnow_r); |
151 | } |
152 | + |
153 | + /* |
154 | + * If the HPT is being resized, don't update the HPTE, |
155 | + * instead let the guest retry after the resize operation is complete. |
156 | + * The synchronization for hpte_setup_done test vs. set is provided |
157 | + * by the HPTE lock. |
158 | + */ |
159 | + if (!kvm->arch.hpte_setup_done) |
160 | + goto out_unlock; |
161 | + |
162 | if ((hnow_v & ~HPTE_V_HVLOCK) != hpte[0] || hnow_r != hpte[1] || |
163 | rev->guest_rpte != hpte[2]) |
164 | /* HPTE has been changed under us; let the guest retry */ |
165 | diff --git a/arch/powerpc/kvm/book3s_hv.c b/arch/powerpc/kvm/book3s_hv.c |
166 | index 9ecd9aea0b54..c059541743f0 100644 |
167 | --- a/arch/powerpc/kvm/book3s_hv.c |
168 | +++ b/arch/powerpc/kvm/book3s_hv.c |
169 | @@ -2688,11 +2688,14 @@ static noinline void kvmppc_run_core(struct kvmppc_vcore *vc) |
170 | * Hard-disable interrupts, and check resched flag and signals. |
171 | * If we need to reschedule or deliver a signal, clean up |
172 | * and return without going into the guest(s). |
173 | + * If the hpte_setup_done flag has been cleared, don't go into the |
174 | + * guest because that means a HPT resize operation is in progress. |
175 | */ |
176 | local_irq_disable(); |
177 | hard_irq_disable(); |
178 | if (lazy_irq_pending() || need_resched() || |
179 | - recheck_signals(&core_info)) { |
180 | + recheck_signals(&core_info) || |
181 | + (!kvm_is_radix(vc->kvm) && !vc->kvm->arch.hpte_setup_done)) { |
182 | local_irq_enable(); |
183 | vc->vcore_state = VCORE_INACTIVE; |
184 | /* Unlock all except the primary vcore */ |
185 | @@ -3061,7 +3064,7 @@ static void kvmppc_vcore_blocked(struct kvmppc_vcore *vc) |
186 | |
187 | static int kvmppc_run_vcpu(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu) |
188 | { |
189 | - int n_ceded, i; |
190 | + int n_ceded, i, r; |
191 | struct kvmppc_vcore *vc; |
192 | struct kvm_vcpu *v; |
193 | |
194 | @@ -3115,6 +3118,20 @@ static int kvmppc_run_vcpu(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu) |
195 | |
196 | while (vcpu->arch.state == KVMPPC_VCPU_RUNNABLE && |
197 | !signal_pending(current)) { |
198 | + /* See if the HPT and VRMA are ready to go */ |
199 | + if (!kvm_is_radix(vcpu->kvm) && |
200 | + !vcpu->kvm->arch.hpte_setup_done) { |
201 | + spin_unlock(&vc->lock); |
202 | + r = kvmppc_hv_setup_htab_rma(vcpu); |
203 | + spin_lock(&vc->lock); |
204 | + if (r) { |
205 | + kvm_run->exit_reason = KVM_EXIT_FAIL_ENTRY; |
206 | + kvm_run->fail_entry.hardware_entry_failure_reason = 0; |
207 | + vcpu->arch.ret = r; |
208 | + break; |
209 | + } |
210 | + } |
211 | + |
212 | if (vc->vcore_state == VCORE_PREEMPT && vc->runner == NULL) |
213 | kvmppc_vcore_end_preempt(vc); |
214 | |
215 | @@ -3232,13 +3249,6 @@ static int kvmppc_vcpu_run_hv(struct kvm_run *run, struct kvm_vcpu *vcpu) |
216 | /* Order vcpus_running vs. hpte_setup_done, see kvmppc_alloc_reset_hpt */ |
217 | smp_mb(); |
218 | |
219 | - /* On the first time here, set up HTAB and VRMA */ |
220 | - if (!kvm_is_radix(vcpu->kvm) && !vcpu->kvm->arch.hpte_setup_done) { |
221 | - r = kvmppc_hv_setup_htab_rma(vcpu); |
222 | - if (r) |
223 | - goto out; |
224 | - } |
225 | - |
226 | flush_all_to_thread(current); |
227 | |
228 | /* Save userspace EBB and other register values */ |
229 | @@ -3286,7 +3296,6 @@ static int kvmppc_vcpu_run_hv(struct kvm_run *run, struct kvm_vcpu *vcpu) |
230 | } |
231 | mtspr(SPRN_VRSAVE, user_vrsave); |
232 | |
233 | - out: |
234 | vcpu->arch.state = KVMPPC_VCPU_NOTREADY; |
235 | atomic_dec(&vcpu->kvm->arch.vcpus_running); |
236 | return r; |
237 | diff --git a/arch/x86/crypto/sha1-mb/sha1_mb_mgr_flush_avx2.S b/arch/x86/crypto/sha1-mb/sha1_mb_mgr_flush_avx2.S |
238 | index 93b945597ecf..7cfba738f104 100644 |
239 | --- a/arch/x86/crypto/sha1-mb/sha1_mb_mgr_flush_avx2.S |
240 | +++ b/arch/x86/crypto/sha1-mb/sha1_mb_mgr_flush_avx2.S |
241 | @@ -157,8 +157,8 @@ LABEL skip_ %I |
242 | .endr |
243 | |
244 | # Find min length |
245 | - vmovdqa _lens+0*16(state), %xmm0 |
246 | - vmovdqa _lens+1*16(state), %xmm1 |
247 | + vmovdqu _lens+0*16(state), %xmm0 |
248 | + vmovdqu _lens+1*16(state), %xmm1 |
249 | |
250 | vpminud %xmm1, %xmm0, %xmm2 # xmm2 has {D,C,B,A} |
251 | vpalignr $8, %xmm2, %xmm3, %xmm3 # xmm3 has {x,x,D,C} |
252 | @@ -178,8 +178,8 @@ LABEL skip_ %I |
253 | vpsubd %xmm2, %xmm0, %xmm0 |
254 | vpsubd %xmm2, %xmm1, %xmm1 |
255 | |
256 | - vmovdqa %xmm0, _lens+0*16(state) |
257 | - vmovdqa %xmm1, _lens+1*16(state) |
258 | + vmovdqu %xmm0, _lens+0*16(state) |
259 | + vmovdqu %xmm1, _lens+1*16(state) |
260 | |
261 | # "state" and "args" are the same address, arg1 |
262 | # len is arg2 |
263 | @@ -235,8 +235,8 @@ ENTRY(sha1_mb_mgr_get_comp_job_avx2) |
264 | jc .return_null |
265 | |
266 | # Find min length |
267 | - vmovdqa _lens(state), %xmm0 |
268 | - vmovdqa _lens+1*16(state), %xmm1 |
269 | + vmovdqu _lens(state), %xmm0 |
270 | + vmovdqu _lens+1*16(state), %xmm1 |
271 | |
272 | vpminud %xmm1, %xmm0, %xmm2 # xmm2 has {D,C,B,A} |
273 | vpalignr $8, %xmm2, %xmm3, %xmm3 # xmm3 has {x,x,D,C} |
274 | diff --git a/arch/x86/crypto/sha256-mb/sha256_mb_mgr_flush_avx2.S b/arch/x86/crypto/sha256-mb/sha256_mb_mgr_flush_avx2.S |
275 | index 8fe6338bcc84..16c4ccb1f154 100644 |
276 | --- a/arch/x86/crypto/sha256-mb/sha256_mb_mgr_flush_avx2.S |
277 | +++ b/arch/x86/crypto/sha256-mb/sha256_mb_mgr_flush_avx2.S |
278 | @@ -155,8 +155,8 @@ LABEL skip_ %I |
279 | .endr |
280 | |
281 | # Find min length |
282 | - vmovdqa _lens+0*16(state), %xmm0 |
283 | - vmovdqa _lens+1*16(state), %xmm1 |
284 | + vmovdqu _lens+0*16(state), %xmm0 |
285 | + vmovdqu _lens+1*16(state), %xmm1 |
286 | |
287 | vpminud %xmm1, %xmm0, %xmm2 # xmm2 has {D,C,B,A} |
288 | vpalignr $8, %xmm2, %xmm3, %xmm3 # xmm3 has {x,x,D,C} |
289 | @@ -176,8 +176,8 @@ LABEL skip_ %I |
290 | vpsubd %xmm2, %xmm0, %xmm0 |
291 | vpsubd %xmm2, %xmm1, %xmm1 |
292 | |
293 | - vmovdqa %xmm0, _lens+0*16(state) |
294 | - vmovdqa %xmm1, _lens+1*16(state) |
295 | + vmovdqu %xmm0, _lens+0*16(state) |
296 | + vmovdqu %xmm1, _lens+1*16(state) |
297 | |
298 | # "state" and "args" are the same address, arg1 |
299 | # len is arg2 |
300 | @@ -234,8 +234,8 @@ ENTRY(sha256_mb_mgr_get_comp_job_avx2) |
301 | jc .return_null |
302 | |
303 | # Find min length |
304 | - vmovdqa _lens(state), %xmm0 |
305 | - vmovdqa _lens+1*16(state), %xmm1 |
306 | + vmovdqu _lens(state), %xmm0 |
307 | + vmovdqu _lens+1*16(state), %xmm1 |
308 | |
309 | vpminud %xmm1, %xmm0, %xmm2 # xmm2 has {D,C,B,A} |
310 | vpalignr $8, %xmm2, %xmm3, %xmm3 # xmm3 has {x,x,D,C} |
311 | diff --git a/arch/x86/kernel/cpu/Makefile b/arch/x86/kernel/cpu/Makefile |
312 | index 836877e2da22..cdf82492b770 100644 |
313 | --- a/arch/x86/kernel/cpu/Makefile |
314 | +++ b/arch/x86/kernel/cpu/Makefile |
315 | @@ -21,7 +21,7 @@ obj-y += common.o |
316 | obj-y += rdrand.o |
317 | obj-y += match.o |
318 | obj-y += bugs.o |
319 | -obj-y += aperfmperf.o |
320 | +obj-$(CONFIG_CPU_FREQ) += aperfmperf.o |
321 | |
322 | obj-$(CONFIG_PROC_FS) += proc.o |
323 | obj-$(CONFIG_X86_FEATURE_NAMES) += capflags.o powerflags.o |
324 | diff --git a/arch/x86/kernel/cpu/aperfmperf.c b/arch/x86/kernel/cpu/aperfmperf.c |
325 | index 957813e0180d..0ee83321a313 100644 |
326 | --- a/arch/x86/kernel/cpu/aperfmperf.c |
327 | +++ b/arch/x86/kernel/cpu/aperfmperf.c |
328 | @@ -42,6 +42,10 @@ static void aperfmperf_snapshot_khz(void *dummy) |
329 | s64 time_delta = ktime_ms_delta(now, s->time); |
330 | unsigned long flags; |
331 | |
332 | + /* Don't bother re-computing within the cache threshold time. */ |
333 | + if (time_delta < APERFMPERF_CACHE_THRESHOLD_MS) |
334 | + return; |
335 | + |
336 | local_irq_save(flags); |
337 | rdmsrl(MSR_IA32_APERF, aperf); |
338 | rdmsrl(MSR_IA32_MPERF, mperf); |
339 | @@ -70,7 +74,6 @@ static void aperfmperf_snapshot_khz(void *dummy) |
340 | |
341 | unsigned int arch_freq_get_on_cpu(int cpu) |
342 | { |
343 | - s64 time_delta; |
344 | unsigned int khz; |
345 | |
346 | if (!cpu_khz) |
347 | @@ -79,12 +82,6 @@ unsigned int arch_freq_get_on_cpu(int cpu) |
348 | if (!static_cpu_has(X86_FEATURE_APERFMPERF)) |
349 | return 0; |
350 | |
351 | - /* Don't bother re-computing within the cache threshold time. */ |
352 | - time_delta = ktime_ms_delta(ktime_get(), per_cpu(samples.time, cpu)); |
353 | - khz = per_cpu(samples.khz, cpu); |
354 | - if (khz && time_delta < APERFMPERF_CACHE_THRESHOLD_MS) |
355 | - return khz; |
356 | - |
357 | smp_call_function_single(cpu, aperfmperf_snapshot_khz, NULL, 1); |
358 | khz = per_cpu(samples.khz, cpu); |
359 | if (khz) |
360 | diff --git a/arch/x86/kernel/cpu/proc.c b/arch/x86/kernel/cpu/proc.c |
361 | index 510e69596278..6df621ae62a7 100644 |
362 | --- a/arch/x86/kernel/cpu/proc.c |
363 | +++ b/arch/x86/kernel/cpu/proc.c |
364 | @@ -77,10 +77,8 @@ static int show_cpuinfo(struct seq_file *m, void *v) |
365 | seq_printf(m, "microcode\t: 0x%x\n", c->microcode); |
366 | |
367 | if (cpu_has(c, X86_FEATURE_TSC)) { |
368 | - unsigned int freq = arch_freq_get_on_cpu(cpu); |
369 | + unsigned int freq = cpufreq_quick_get(cpu); |
370 | |
371 | - if (!freq) |
372 | - freq = cpufreq_quick_get(cpu); |
373 | if (!freq) |
374 | freq = cpu_khz; |
375 | seq_printf(m, "cpu MHz\t\t: %u.%03u\n", |
376 | diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c |
377 | index 54b9e89d4d6b..893fd8c849e2 100644 |
378 | --- a/arch/x86/kernel/smpboot.c |
379 | +++ b/arch/x86/kernel/smpboot.c |
380 | @@ -193,6 +193,12 @@ static void smp_callin(void) |
381 | */ |
382 | smp_store_cpu_info(cpuid); |
383 | |
384 | + /* |
385 | + * The topology information must be up to date before |
386 | + * calibrate_delay() and notify_cpu_starting(). |
387 | + */ |
388 | + set_cpu_sibling_map(raw_smp_processor_id()); |
389 | + |
390 | /* |
391 | * Get our bogomips. |
392 | * Update loops_per_jiffy in cpu_data. Previous call to |
393 | @@ -203,11 +209,6 @@ static void smp_callin(void) |
394 | cpu_data(cpuid).loops_per_jiffy = loops_per_jiffy; |
395 | pr_debug("Stack at about %p\n", &cpuid); |
396 | |
397 | - /* |
398 | - * This must be done before setting cpu_online_mask |
399 | - * or calling notify_cpu_starting. |
400 | - */ |
401 | - set_cpu_sibling_map(raw_smp_processor_id()); |
402 | wmb(); |
403 | |
404 | notify_cpu_starting(cpuid); |
405 | diff --git a/arch/x86/kernel/traps.c b/arch/x86/kernel/traps.c |
406 | index bf54309b85da..b2157d4a5338 100644 |
407 | --- a/arch/x86/kernel/traps.c |
408 | +++ b/arch/x86/kernel/traps.c |
409 | @@ -221,9 +221,6 @@ do_trap_no_signal(struct task_struct *tsk, int trapnr, char *str, |
410 | if (fixup_exception(regs, trapnr)) |
411 | return 0; |
412 | |
413 | - if (fixup_bug(regs, trapnr)) |
414 | - return 0; |
415 | - |
416 | tsk->thread.error_code = error_code; |
417 | tsk->thread.trap_nr = trapnr; |
418 | die(str, regs, error_code); |
419 | @@ -304,6 +301,13 @@ static void do_error_trap(struct pt_regs *regs, long error_code, char *str, |
420 | |
421 | RCU_LOCKDEP_WARN(!rcu_is_watching(), "entry code didn't wake RCU"); |
422 | |
423 | + /* |
424 | + * WARN*()s end up here; fix them up before we call the |
425 | + * notifier chain. |
426 | + */ |
427 | + if (!user_mode(regs) && fixup_bug(regs, trapnr)) |
428 | + return; |
429 | + |
430 | if (notify_die(DIE_TRAP, str, regs, error_code, trapnr, signr) != |
431 | NOTIFY_STOP) { |
432 | cond_local_irq_enable(regs); |
433 | diff --git a/arch/x86/kernel/tsc.c b/arch/x86/kernel/tsc.c |
434 | index 796d96bb0821..ad2b925a808e 100644 |
435 | --- a/arch/x86/kernel/tsc.c |
436 | +++ b/arch/x86/kernel/tsc.c |
437 | @@ -1346,12 +1346,10 @@ void __init tsc_init(void) |
438 | unsigned long calibrate_delay_is_known(void) |
439 | { |
440 | int sibling, cpu = smp_processor_id(); |
441 | - struct cpumask *mask = topology_core_cpumask(cpu); |
442 | + int constant_tsc = cpu_has(&cpu_data(cpu), X86_FEATURE_CONSTANT_TSC); |
443 | + const struct cpumask *mask = topology_core_cpumask(cpu); |
444 | |
445 | - if (!tsc_disabled && !cpu_has(&cpu_data(cpu), X86_FEATURE_CONSTANT_TSC)) |
446 | - return 0; |
447 | - |
448 | - if (!mask) |
449 | + if (tsc_disabled || !constant_tsc || !mask) |
450 | return 0; |
451 | |
452 | sibling = cpumask_any_but(mask, cpu); |
453 | diff --git a/arch/x86/oprofile/op_model_ppro.c b/arch/x86/oprofile/op_model_ppro.c |
454 | index 350f7096baac..7913b6921959 100644 |
455 | --- a/arch/x86/oprofile/op_model_ppro.c |
456 | +++ b/arch/x86/oprofile/op_model_ppro.c |
457 | @@ -212,8 +212,8 @@ static void arch_perfmon_setup_counters(void) |
458 | eax.full = cpuid_eax(0xa); |
459 | |
460 | /* Workaround for BIOS bugs in 6/15. Taken from perfmon2 */ |
461 | - if (eax.split.version_id == 0 && __this_cpu_read(cpu_info.x86) == 6 && |
462 | - __this_cpu_read(cpu_info.x86_model) == 15) { |
463 | + if (eax.split.version_id == 0 && boot_cpu_data.x86 == 6 && |
464 | + boot_cpu_data.x86_model == 15) { |
465 | eax.split.version_id = 2; |
466 | eax.split.num_counters = 2; |
467 | eax.split.bit_width = 40; |
468 | diff --git a/crypto/ccm.c b/crypto/ccm.c |
469 | index 1ce37ae0ce56..0a083342ec8c 100644 |
470 | --- a/crypto/ccm.c |
471 | +++ b/crypto/ccm.c |
472 | @@ -363,7 +363,7 @@ static int crypto_ccm_decrypt(struct aead_request *req) |
473 | unsigned int cryptlen = req->cryptlen; |
474 | u8 *authtag = pctx->auth_tag; |
475 | u8 *odata = pctx->odata; |
476 | - u8 *iv = req->iv; |
477 | + u8 *iv = pctx->idata; |
478 | int err; |
479 | |
480 | cryptlen -= authsize; |
481 | @@ -379,6 +379,8 @@ static int crypto_ccm_decrypt(struct aead_request *req) |
482 | if (req->src != req->dst) |
483 | dst = pctx->dst; |
484 | |
485 | + memcpy(iv, req->iv, 16); |
486 | + |
487 | skcipher_request_set_tfm(skreq, ctx->ctr); |
488 | skcipher_request_set_callback(skreq, pctx->flags, |
489 | crypto_ccm_decrypt_done, req); |
490 | diff --git a/drivers/acpi/acpica/evgpeblk.c b/drivers/acpi/acpica/evgpeblk.c |
491 | index 9c941947a063..3a3cb8624f41 100644 |
492 | --- a/drivers/acpi/acpica/evgpeblk.c |
493 | +++ b/drivers/acpi/acpica/evgpeblk.c |
494 | @@ -440,9 +440,11 @@ acpi_ev_initialize_gpe_block(struct acpi_gpe_xrupt_info *gpe_xrupt_info, |
495 | void *ignored) |
496 | { |
497 | acpi_status status; |
498 | + acpi_event_status event_status; |
499 | struct acpi_gpe_event_info *gpe_event_info; |
500 | u32 gpe_enabled_count; |
501 | u32 gpe_index; |
502 | + u32 gpe_number; |
503 | u32 i; |
504 | u32 j; |
505 | |
506 | @@ -470,30 +472,40 @@ acpi_ev_initialize_gpe_block(struct acpi_gpe_xrupt_info *gpe_xrupt_info, |
507 | |
508 | gpe_index = (i * ACPI_GPE_REGISTER_WIDTH) + j; |
509 | gpe_event_info = &gpe_block->event_info[gpe_index]; |
510 | + gpe_number = gpe_block->block_base_number + gpe_index; |
511 | |
512 | /* |
513 | * Ignore GPEs that have no corresponding _Lxx/_Exx method |
514 | - * and GPEs that are used to wake the system |
515 | + * and GPEs that are used for wakeup |
516 | */ |
517 | - if ((ACPI_GPE_DISPATCH_TYPE(gpe_event_info->flags) == |
518 | - ACPI_GPE_DISPATCH_NONE) |
519 | - || (ACPI_GPE_DISPATCH_TYPE(gpe_event_info->flags) == |
520 | - ACPI_GPE_DISPATCH_HANDLER) |
521 | - || (ACPI_GPE_DISPATCH_TYPE(gpe_event_info->flags) == |
522 | - ACPI_GPE_DISPATCH_RAW_HANDLER) |
523 | + if ((ACPI_GPE_DISPATCH_TYPE(gpe_event_info->flags) != |
524 | + ACPI_GPE_DISPATCH_METHOD) |
525 | || (gpe_event_info->flags & ACPI_GPE_CAN_WAKE)) { |
526 | continue; |
527 | } |
528 | |
529 | + event_status = 0; |
530 | + (void)acpi_hw_get_gpe_status(gpe_event_info, |
531 | + &event_status); |
532 | + |
533 | status = acpi_ev_add_gpe_reference(gpe_event_info); |
534 | if (ACPI_FAILURE(status)) { |
535 | ACPI_EXCEPTION((AE_INFO, status, |
536 | "Could not enable GPE 0x%02X", |
537 | - gpe_index + |
538 | - gpe_block->block_base_number)); |
539 | + gpe_number)); |
540 | continue; |
541 | } |
542 | |
543 | + gpe_event_info->flags |= ACPI_GPE_AUTO_ENABLED; |
544 | + |
545 | + if (event_status & ACPI_EVENT_FLAG_STATUS_SET) { |
546 | + ACPI_INFO(("GPE 0x%02X active on init", |
547 | + gpe_number)); |
548 | + (void)acpi_ev_gpe_dispatch(gpe_block->node, |
549 | + gpe_event_info, |
550 | + gpe_number); |
551 | + } |
552 | + |
553 | gpe_enabled_count++; |
554 | } |
555 | } |
556 | diff --git a/drivers/acpi/acpica/evxfgpe.c b/drivers/acpi/acpica/evxfgpe.c |
557 | index 57718a3e029a..67c7c4ce276c 100644 |
558 | --- a/drivers/acpi/acpica/evxfgpe.c |
559 | +++ b/drivers/acpi/acpica/evxfgpe.c |
560 | @@ -435,6 +435,14 @@ acpi_setup_gpe_for_wake(acpi_handle wake_device, |
561 | */ |
562 | gpe_event_info->flags = |
563 | (ACPI_GPE_DISPATCH_NOTIFY | ACPI_GPE_LEVEL_TRIGGERED); |
564 | + } else if (gpe_event_info->flags & ACPI_GPE_AUTO_ENABLED) { |
565 | + /* |
566 | + * A reference to this GPE has been added during the GPE block |
567 | + * initialization, so drop it now to prevent the GPE from being |
568 | + * permanently enabled and clear its ACPI_GPE_AUTO_ENABLED flag. |
569 | + */ |
570 | + (void)acpi_ev_remove_gpe_reference(gpe_event_info); |
571 | + gpe_event_info->flags &= ~ACPI_GPE_AUTO_ENABLED; |
572 | } |
573 | |
574 | /* |
575 | diff --git a/drivers/acpi/scan.c b/drivers/acpi/scan.c |
576 | index 70fd5502c284..b7bdf9d0f5c0 100644 |
577 | --- a/drivers/acpi/scan.c |
578 | +++ b/drivers/acpi/scan.c |
579 | @@ -2058,6 +2058,9 @@ int __init acpi_scan_init(void) |
580 | acpi_get_spcr_uart_addr(); |
581 | } |
582 | |
583 | + acpi_gpe_apply_masked_gpes(); |
584 | + acpi_update_all_gpes(); |
585 | + |
586 | mutex_lock(&acpi_scan_lock); |
587 | /* |
588 | * Enumerate devices in the ACPI namespace. |
589 | @@ -2082,9 +2085,6 @@ int __init acpi_scan_init(void) |
590 | } |
591 | } |
592 | |
593 | - acpi_gpe_apply_masked_gpes(); |
594 | - acpi_update_all_gpes(); |
595 | - |
596 | acpi_scan_initialized = true; |
597 | |
598 | out: |
599 | diff --git a/drivers/acpi/sleep.c b/drivers/acpi/sleep.c |
600 | index fa8243c5c062..59b78e42a58b 100644 |
601 | --- a/drivers/acpi/sleep.c |
602 | +++ b/drivers/acpi/sleep.c |
603 | @@ -160,6 +160,14 @@ static int __init init_nvs_nosave(const struct dmi_system_id *d) |
604 | return 0; |
605 | } |
606 | |
607 | +static bool acpi_sleep_no_lps0; |
608 | + |
609 | +static int __init init_no_lps0(const struct dmi_system_id *d) |
610 | +{ |
611 | + acpi_sleep_no_lps0 = true; |
612 | + return 0; |
613 | +} |
614 | + |
615 | static struct dmi_system_id acpisleep_dmi_table[] __initdata = { |
616 | { |
617 | .callback = init_old_suspend_ordering, |
618 | @@ -343,6 +351,19 @@ static struct dmi_system_id acpisleep_dmi_table[] __initdata = { |
619 | DMI_MATCH(DMI_PRODUCT_NAME, "80E3"), |
620 | }, |
621 | }, |
622 | + /* |
623 | + * https://bugzilla.kernel.org/show_bug.cgi?id=196907 |
624 | + * Some Dell XPS13 9360 cannot do suspend-to-idle using the Low Power |
625 | + * S0 Idle firmware interface. |
626 | + */ |
627 | + { |
628 | + .callback = init_no_lps0, |
629 | + .ident = "Dell XPS13 9360", |
630 | + .matches = { |
631 | + DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), |
632 | + DMI_MATCH(DMI_PRODUCT_NAME, "XPS 13 9360"), |
633 | + }, |
634 | + }, |
635 | {}, |
636 | }; |
637 | |
638 | @@ -485,6 +506,7 @@ static void acpi_pm_end(void) |
639 | } |
640 | #else /* !CONFIG_ACPI_SLEEP */ |
641 | #define acpi_target_sleep_state ACPI_STATE_S0 |
642 | +#define acpi_sleep_no_lps0 (false) |
643 | static inline void acpi_sleep_dmi_check(void) {} |
644 | #endif /* CONFIG_ACPI_SLEEP */ |
645 | |
646 | @@ -702,6 +724,12 @@ static int lps0_device_attach(struct acpi_device *adev, |
647 | if (lps0_device_handle) |
648 | return 0; |
649 | |
650 | + if (acpi_sleep_no_lps0) { |
651 | + acpi_handle_info(adev->handle, |
652 | + "Low Power S0 Idle interface disabled\n"); |
653 | + return 0; |
654 | + } |
655 | + |
656 | if (!(acpi_gbl_FADT.flags & ACPI_FADT_LOW_POWER_S0)) |
657 | return 0; |
658 | |
659 | diff --git a/drivers/block/rbd.c b/drivers/block/rbd.c |
660 | index b008b6a98098..cf54a1cf8c55 100644 |
661 | --- a/drivers/block/rbd.c |
662 | +++ b/drivers/block/rbd.c |
663 | @@ -2692,7 +2692,7 @@ static int rbd_img_obj_parent_read_full(struct rbd_obj_request *obj_request) |
664 | * from the parent. |
665 | */ |
666 | page_count = (u32)calc_pages_for(0, length); |
667 | - pages = ceph_alloc_page_vector(page_count, GFP_KERNEL); |
668 | + pages = ceph_alloc_page_vector(page_count, GFP_NOIO); |
669 | if (IS_ERR(pages)) { |
670 | result = PTR_ERR(pages); |
671 | pages = NULL; |
672 | @@ -2827,7 +2827,7 @@ static int rbd_img_obj_exists_submit(struct rbd_obj_request *obj_request) |
673 | */ |
674 | size = sizeof (__le64) + sizeof (__le32) + sizeof (__le32); |
675 | page_count = (u32)calc_pages_for(0, size); |
676 | - pages = ceph_alloc_page_vector(page_count, GFP_KERNEL); |
677 | + pages = ceph_alloc_page_vector(page_count, GFP_NOIO); |
678 | if (IS_ERR(pages)) { |
679 | ret = PTR_ERR(pages); |
680 | goto fail_stat_request; |
681 | diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c |
682 | index 4436d53ae16c..f160a66b7098 100644 |
683 | --- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c |
684 | +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c |
685 | @@ -722,7 +722,7 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset) |
686 | * allocation taken by fbdev |
687 | */ |
688 | if (!(dev_priv->capabilities & SVGA_CAP_3D)) |
689 | - mem_size *= 2; |
690 | + mem_size *= 3; |
691 | |
692 | dev_priv->max_mob_pages = mem_size * 1024 / PAGE_SIZE; |
693 | dev_priv->prim_bb_mem = |
694 | diff --git a/drivers/input/mouse/elan_i2c_core.c b/drivers/input/mouse/elan_i2c_core.c |
695 | index a6b762271a40..47a9696e7874 100644 |
696 | --- a/drivers/input/mouse/elan_i2c_core.c |
697 | +++ b/drivers/input/mouse/elan_i2c_core.c |
698 | @@ -1253,6 +1253,7 @@ static const struct acpi_device_id elan_acpi_id[] = { |
699 | { "ELAN0605", 0 }, |
700 | { "ELAN0609", 0 }, |
701 | { "ELAN060B", 0 }, |
702 | + { "ELAN060C", 0 }, |
703 | { "ELAN0611", 0 }, |
704 | { "ELAN1000", 0 }, |
705 | { } |
706 | diff --git a/drivers/net/can/c_can/c_can_pci.c b/drivers/net/can/c_can/c_can_pci.c |
707 | index cf7c18947189..d065c0e2d18e 100644 |
708 | --- a/drivers/net/can/c_can/c_can_pci.c |
709 | +++ b/drivers/net/can/c_can/c_can_pci.c |
710 | @@ -178,7 +178,6 @@ static int c_can_pci_probe(struct pci_dev *pdev, |
711 | break; |
712 | case BOSCH_D_CAN: |
713 | priv->regs = reg_map_d_can; |
714 | - priv->can.ctrlmode_supported |= CAN_CTRLMODE_3_SAMPLES; |
715 | break; |
716 | default: |
717 | ret = -EINVAL; |
718 | diff --git a/drivers/net/can/c_can/c_can_platform.c b/drivers/net/can/c_can/c_can_platform.c |
719 | index e36d10520e24..717530eac70c 100644 |
720 | --- a/drivers/net/can/c_can/c_can_platform.c |
721 | +++ b/drivers/net/can/c_can/c_can_platform.c |
722 | @@ -320,7 +320,6 @@ static int c_can_plat_probe(struct platform_device *pdev) |
723 | break; |
724 | case BOSCH_D_CAN: |
725 | priv->regs = reg_map_d_can; |
726 | - priv->can.ctrlmode_supported |= CAN_CTRLMODE_3_SAMPLES; |
727 | priv->read_reg = c_can_plat_read_reg_aligned_to_16bit; |
728 | priv->write_reg = c_can_plat_write_reg_aligned_to_16bit; |
729 | priv->read_reg32 = d_can_plat_read_reg32; |
730 | diff --git a/drivers/net/can/ifi_canfd/ifi_canfd.c b/drivers/net/can/ifi_canfd/ifi_canfd.c |
731 | index 4d1fe8d95042..2772d05ff11c 100644 |
732 | --- a/drivers/net/can/ifi_canfd/ifi_canfd.c |
733 | +++ b/drivers/net/can/ifi_canfd/ifi_canfd.c |
734 | @@ -670,9 +670,9 @@ static void ifi_canfd_set_bittiming(struct net_device *ndev) |
735 | priv->base + IFI_CANFD_FTIME); |
736 | |
737 | /* Configure transmitter delay */ |
738 | - tdc = (dbt->brp * (dbt->phase_seg1 + 1)) & IFI_CANFD_TDELAY_MASK; |
739 | - writel(IFI_CANFD_TDELAY_EN | IFI_CANFD_TDELAY_ABS | tdc, |
740 | - priv->base + IFI_CANFD_TDELAY); |
741 | + tdc = dbt->brp * (dbt->prop_seg + dbt->phase_seg1); |
742 | + tdc &= IFI_CANFD_TDELAY_MASK; |
743 | + writel(IFI_CANFD_TDELAY_EN | tdc, priv->base + IFI_CANFD_TDELAY); |
744 | } |
745 | |
746 | static void ifi_canfd_set_filter(struct net_device *ndev, const u32 id, |
747 | diff --git a/drivers/net/can/peak_canfd/peak_pciefd_main.c b/drivers/net/can/peak_canfd/peak_pciefd_main.c |
748 | index 51c2d182a33a..b4efd711f824 100644 |
749 | --- a/drivers/net/can/peak_canfd/peak_pciefd_main.c |
750 | +++ b/drivers/net/can/peak_canfd/peak_pciefd_main.c |
751 | @@ -29,14 +29,19 @@ |
752 | #include "peak_canfd_user.h" |
753 | |
754 | MODULE_AUTHOR("Stephane Grosjean <s.grosjean@peak-system.com>"); |
755 | -MODULE_DESCRIPTION("Socket-CAN driver for PEAK PCAN PCIe FD family cards"); |
756 | -MODULE_SUPPORTED_DEVICE("PEAK PCAN PCIe FD CAN cards"); |
757 | +MODULE_DESCRIPTION("Socket-CAN driver for PEAK PCAN PCIe/M.2 FD family cards"); |
758 | +MODULE_SUPPORTED_DEVICE("PEAK PCAN PCIe/M.2 FD CAN cards"); |
759 | MODULE_LICENSE("GPL v2"); |
760 | |
761 | #define PCIEFD_DRV_NAME "peak_pciefd" |
762 | |
763 | #define PEAK_PCI_VENDOR_ID 0x001c /* The PCI device and vendor IDs */ |
764 | #define PEAK_PCIEFD_ID 0x0013 /* for PCIe slot cards */ |
765 | +#define PCAN_CPCIEFD_ID 0x0014 /* for Compact-PCI Serial slot cards */ |
766 | +#define PCAN_PCIE104FD_ID 0x0017 /* for PCIe-104 Express slot cards */ |
767 | +#define PCAN_MINIPCIEFD_ID 0x0018 /* for mini-PCIe slot cards */ |
768 | +#define PCAN_PCIEFD_OEM_ID 0x0019 /* for PCIe slot OEM cards */ |
769 | +#define PCAN_M2_ID 0x001a /* for M2 slot cards */ |
770 | |
771 | /* PEAK PCIe board access description */ |
772 | #define PCIEFD_BAR0_SIZE (64 * 1024) |
773 | @@ -203,6 +208,11 @@ struct pciefd_board { |
774 | /* supported device ids. */ |
775 | static const struct pci_device_id peak_pciefd_tbl[] = { |
776 | {PEAK_PCI_VENDOR_ID, PEAK_PCIEFD_ID, PCI_ANY_ID, PCI_ANY_ID,}, |
777 | + {PEAK_PCI_VENDOR_ID, PCAN_CPCIEFD_ID, PCI_ANY_ID, PCI_ANY_ID,}, |
778 | + {PEAK_PCI_VENDOR_ID, PCAN_PCIE104FD_ID, PCI_ANY_ID, PCI_ANY_ID,}, |
779 | + {PEAK_PCI_VENDOR_ID, PCAN_MINIPCIEFD_ID, PCI_ANY_ID, PCI_ANY_ID,}, |
780 | + {PEAK_PCI_VENDOR_ID, PCAN_PCIEFD_OEM_ID, PCI_ANY_ID, PCI_ANY_ID,}, |
781 | + {PEAK_PCI_VENDOR_ID, PCAN_M2_ID, PCI_ANY_ID, PCI_ANY_ID,}, |
782 | {0,} |
783 | }; |
784 | |
785 | diff --git a/drivers/net/can/sun4i_can.c b/drivers/net/can/sun4i_can.c |
786 | index b0c80859f746..1ac2090a1721 100644 |
787 | --- a/drivers/net/can/sun4i_can.c |
788 | +++ b/drivers/net/can/sun4i_can.c |
789 | @@ -539,6 +539,13 @@ static int sun4i_can_err(struct net_device *dev, u8 isrc, u8 status) |
790 | } |
791 | stats->rx_over_errors++; |
792 | stats->rx_errors++; |
793 | + |
794 | + /* reset the CAN IP by entering reset mode |
795 | + * ignoring timeout error |
796 | + */ |
797 | + set_reset_mode(dev); |
798 | + set_normal_mode(dev); |
799 | + |
800 | /* clear bit */ |
801 | sun4i_can_write_cmdreg(priv, SUN4I_CMD_CLEAR_OR_FLAG); |
802 | } |
803 | @@ -653,8 +660,9 @@ static irqreturn_t sun4i_can_interrupt(int irq, void *dev_id) |
804 | netif_wake_queue(dev); |
805 | can_led_event(dev, CAN_LED_EVENT_TX); |
806 | } |
807 | - if (isrc & SUN4I_INT_RBUF_VLD) { |
808 | - /* receive interrupt */ |
809 | + if ((isrc & SUN4I_INT_RBUF_VLD) && |
810 | + !(isrc & SUN4I_INT_DATA_OR)) { |
811 | + /* receive interrupt - don't read if overrun occurred */ |
812 | while (status & SUN4I_STA_RBUF_RDY) { |
813 | /* RX buffer is not empty */ |
814 | sun4i_can_rx(dev); |
815 | diff --git a/include/acpi/actypes.h b/include/acpi/actypes.h |
816 | index 2fcbaec8b368..71eddf645566 100644 |
817 | --- a/include/acpi/actypes.h |
818 | +++ b/include/acpi/actypes.h |
819 | @@ -775,7 +775,7 @@ typedef u32 acpi_event_status; |
820 | * | | | | +-- Type of dispatch:to method, handler, notify, or none |
821 | * | | | +----- Interrupt type: edge or level triggered |
822 | * | | +------- Is a Wake GPE |
823 | - * | +--------- Is GPE masked by the software GPE masking mechanism |
824 | + * | +--------- Has been enabled automatically at init time |
825 | * +------------ <Reserved> |
826 | */ |
827 | #define ACPI_GPE_DISPATCH_NONE (u8) 0x00 |
828 | @@ -791,6 +791,7 @@ typedef u32 acpi_event_status; |
829 | #define ACPI_GPE_XRUPT_TYPE_MASK (u8) 0x08 |
830 | |
831 | #define ACPI_GPE_CAN_WAKE (u8) 0x10 |
832 | +#define ACPI_GPE_AUTO_ENABLED (u8) 0x20 |
833 | |
834 | /* |
835 | * Flags for GPE and Lock interfaces |
836 | diff --git a/include/net/netfilter/nf_conntrack.h b/include/net/netfilter/nf_conntrack.h |
837 | index 48407569585d..a7c2cee39570 100644 |
838 | --- a/include/net/netfilter/nf_conntrack.h |
839 | +++ b/include/net/netfilter/nf_conntrack.h |
840 | @@ -17,7 +17,6 @@ |
841 | #include <linux/bitops.h> |
842 | #include <linux/compiler.h> |
843 | #include <linux/atomic.h> |
844 | -#include <linux/rhashtable.h> |
845 | |
846 | #include <linux/netfilter/nf_conntrack_tcp.h> |
847 | #include <linux/netfilter/nf_conntrack_dccp.h> |
848 | @@ -83,7 +82,7 @@ struct nf_conn { |
849 | possible_net_t ct_net; |
850 | |
851 | #if IS_ENABLED(CONFIG_NF_NAT) |
852 | - struct rhlist_head nat_bysource; |
853 | + struct hlist_node nat_bysource; |
854 | #endif |
855 | /* all members below initialized via memset */ |
856 | u8 __nfct_init_offset[0]; |
857 | diff --git a/include/net/netfilter/nf_nat.h b/include/net/netfilter/nf_nat.h |
858 | index 05c82a1a4267..b71701302e61 100644 |
859 | --- a/include/net/netfilter/nf_nat.h |
860 | +++ b/include/net/netfilter/nf_nat.h |
861 | @@ -1,6 +1,5 @@ |
862 | #ifndef _NF_NAT_H |
863 | #define _NF_NAT_H |
864 | -#include <linux/rhashtable.h> |
865 | #include <linux/netfilter_ipv4.h> |
866 | #include <linux/netfilter/nf_nat.h> |
867 | #include <net/netfilter/nf_conntrack_tuple.h> |
868 | diff --git a/include/sound/seq_kernel.h b/include/sound/seq_kernel.h |
869 | index feb58d455560..4b9ee3009aa0 100644 |
870 | --- a/include/sound/seq_kernel.h |
871 | +++ b/include/sound/seq_kernel.h |
872 | @@ -49,7 +49,8 @@ typedef union snd_seq_timestamp snd_seq_timestamp_t; |
873 | #define SNDRV_SEQ_DEFAULT_CLIENT_EVENTS 200 |
874 | |
875 | /* max delivery path length */ |
876 | -#define SNDRV_SEQ_MAX_HOPS 10 |
877 | +/* NOTE: this shouldn't be greater than MAX_LOCKDEP_SUBCLASSES */ |
878 | +#define SNDRV_SEQ_MAX_HOPS 8 |
879 | |
880 | /* max size of event size */ |
881 | #define SNDRV_SEQ_MAX_EVENT_LEN 0x3fffffff |
882 | diff --git a/include/sound/timer.h b/include/sound/timer.h |
883 | index c4d76ff056c6..7ae226ab6990 100644 |
884 | --- a/include/sound/timer.h |
885 | +++ b/include/sound/timer.h |
886 | @@ -90,6 +90,8 @@ struct snd_timer { |
887 | struct list_head ack_list_head; |
888 | struct list_head sack_list_head; /* slow ack list head */ |
889 | struct tasklet_struct task_queue; |
890 | + int max_instances; /* upper limit of timer instances */ |
891 | + int num_instances; /* current number of timer instances */ |
892 | }; |
893 | |
894 | struct snd_timer_instance { |
895 | diff --git a/kernel/workqueue_internal.h b/kernel/workqueue_internal.h |
896 | index 8635417c587b..29fa81f0f51a 100644 |
897 | --- a/kernel/workqueue_internal.h |
898 | +++ b/kernel/workqueue_internal.h |
899 | @@ -9,6 +9,7 @@ |
900 | |
901 | #include <linux/workqueue.h> |
902 | #include <linux/kthread.h> |
903 | +#include <linux/preempt.h> |
904 | |
905 | struct worker_pool; |
906 | |
907 | @@ -59,7 +60,7 @@ struct worker { |
908 | */ |
909 | static inline struct worker *current_wq_worker(void) |
910 | { |
911 | - if (current->flags & PF_WQ_WORKER) |
912 | + if (in_task() && (current->flags & PF_WQ_WORKER)) |
913 | return kthread_data(current); |
914 | return NULL; |
915 | } |
916 | diff --git a/lib/asn1_decoder.c b/lib/asn1_decoder.c |
917 | index fef5d2e114be..1ef0cec38d78 100644 |
918 | --- a/lib/asn1_decoder.c |
919 | +++ b/lib/asn1_decoder.c |
920 | @@ -228,7 +228,7 @@ int asn1_ber_decoder(const struct asn1_decoder *decoder, |
921 | hdr = 2; |
922 | |
923 | /* Extract a tag from the data */ |
924 | - if (unlikely(dp >= datalen - 1)) |
925 | + if (unlikely(datalen - dp < 2)) |
926 | goto data_overrun_error; |
927 | tag = data[dp++]; |
928 | if (unlikely((tag & 0x1f) == ASN1_LONG_TAG)) |
929 | @@ -274,7 +274,7 @@ int asn1_ber_decoder(const struct asn1_decoder *decoder, |
930 | int n = len - 0x80; |
931 | if (unlikely(n > 2)) |
932 | goto length_too_long; |
933 | - if (unlikely(dp >= datalen - n)) |
934 | + if (unlikely(n > datalen - dp)) |
935 | goto data_overrun_error; |
936 | hdr += n; |
937 | for (len = 0; n > 0; n--) { |
938 | diff --git a/net/netfilter/nf_nat_core.c b/net/netfilter/nf_nat_core.c |
939 | index b1d3740ae36a..2fb80a4bfb34 100644 |
940 | --- a/net/netfilter/nf_nat_core.c |
941 | +++ b/net/netfilter/nf_nat_core.c |
942 | @@ -30,19 +30,17 @@ |
943 | #include <net/netfilter/nf_conntrack_zones.h> |
944 | #include <linux/netfilter/nf_nat.h> |
945 | |
946 | +static DEFINE_SPINLOCK(nf_nat_lock); |
947 | + |
948 | static DEFINE_MUTEX(nf_nat_proto_mutex); |
949 | static const struct nf_nat_l3proto __rcu *nf_nat_l3protos[NFPROTO_NUMPROTO] |
950 | __read_mostly; |
951 | static const struct nf_nat_l4proto __rcu **nf_nat_l4protos[NFPROTO_NUMPROTO] |
952 | __read_mostly; |
953 | |
954 | -struct nf_nat_conn_key { |
955 | - const struct net *net; |
956 | - const struct nf_conntrack_tuple *tuple; |
957 | - const struct nf_conntrack_zone *zone; |
958 | -}; |
959 | - |
960 | -static struct rhltable nf_nat_bysource_table; |
961 | +static struct hlist_head *nf_nat_bysource __read_mostly; |
962 | +static unsigned int nf_nat_htable_size __read_mostly; |
963 | +static unsigned int nf_nat_hash_rnd __read_mostly; |
964 | |
965 | inline const struct nf_nat_l3proto * |
966 | __nf_nat_l3proto_find(u8 family) |
967 | @@ -118,17 +116,19 @@ int nf_xfrm_me_harder(struct net *net, struct sk_buff *skb, unsigned int family) |
968 | EXPORT_SYMBOL(nf_xfrm_me_harder); |
969 | #endif /* CONFIG_XFRM */ |
970 | |
971 | -static u32 nf_nat_bysource_hash(const void *data, u32 len, u32 seed) |
972 | +/* We keep an extra hash for each conntrack, for fast searching. */ |
973 | +static unsigned int |
974 | +hash_by_src(const struct net *n, const struct nf_conntrack_tuple *tuple) |
975 | { |
976 | - const struct nf_conntrack_tuple *t; |
977 | - const struct nf_conn *ct = data; |
978 | + unsigned int hash; |
979 | + |
980 | + get_random_once(&nf_nat_hash_rnd, sizeof(nf_nat_hash_rnd)); |
981 | |
982 | - t = &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple; |
983 | /* Original src, to ensure we map it consistently if poss. */ |
984 | + hash = jhash2((u32 *)&tuple->src, sizeof(tuple->src) / sizeof(u32), |
985 | + tuple->dst.protonum ^ nf_nat_hash_rnd ^ net_hash_mix(n)); |
986 | |
987 | - seed ^= net_hash_mix(nf_ct_net(ct)); |
988 | - return jhash2((const u32 *)&t->src, sizeof(t->src) / sizeof(u32), |
989 | - t->dst.protonum ^ seed); |
990 | + return reciprocal_scale(hash, nf_nat_htable_size); |
991 | } |
992 | |
993 | /* Is this tuple already taken? (not by us) */ |
994 | @@ -184,28 +184,6 @@ same_src(const struct nf_conn *ct, |
995 | t->src.u.all == tuple->src.u.all); |
996 | } |
997 | |
998 | -static int nf_nat_bysource_cmp(struct rhashtable_compare_arg *arg, |
999 | - const void *obj) |
1000 | -{ |
1001 | - const struct nf_nat_conn_key *key = arg->key; |
1002 | - const struct nf_conn *ct = obj; |
1003 | - |
1004 | - if (!same_src(ct, key->tuple) || |
1005 | - !net_eq(nf_ct_net(ct), key->net) || |
1006 | - !nf_ct_zone_equal(ct, key->zone, IP_CT_DIR_ORIGINAL)) |
1007 | - return 1; |
1008 | - |
1009 | - return 0; |
1010 | -} |
1011 | - |
1012 | -static struct rhashtable_params nf_nat_bysource_params = { |
1013 | - .head_offset = offsetof(struct nf_conn, nat_bysource), |
1014 | - .obj_hashfn = nf_nat_bysource_hash, |
1015 | - .obj_cmpfn = nf_nat_bysource_cmp, |
1016 | - .nelem_hint = 256, |
1017 | - .min_size = 1024, |
1018 | -}; |
1019 | - |
1020 | /* Only called for SRC manip */ |
1021 | static int |
1022 | find_appropriate_src(struct net *net, |
1023 | @@ -216,26 +194,22 @@ find_appropriate_src(struct net *net, |
1024 | struct nf_conntrack_tuple *result, |
1025 | const struct nf_nat_range *range) |
1026 | { |
1027 | + unsigned int h = hash_by_src(net, tuple); |
1028 | const struct nf_conn *ct; |
1029 | - struct nf_nat_conn_key key = { |
1030 | - .net = net, |
1031 | - .tuple = tuple, |
1032 | - .zone = zone |
1033 | - }; |
1034 | - struct rhlist_head *hl, *h; |
1035 | - |
1036 | - hl = rhltable_lookup(&nf_nat_bysource_table, &key, |
1037 | - nf_nat_bysource_params); |
1038 | |
1039 | - rhl_for_each_entry_rcu(ct, h, hl, nat_bysource) { |
1040 | - nf_ct_invert_tuplepr(result, |
1041 | - &ct->tuplehash[IP_CT_DIR_REPLY].tuple); |
1042 | - result->dst = tuple->dst; |
1043 | - |
1044 | - if (in_range(l3proto, l4proto, result, range)) |
1045 | - return 1; |
1046 | + hlist_for_each_entry_rcu(ct, &nf_nat_bysource[h], nat_bysource) { |
1047 | + if (same_src(ct, tuple) && |
1048 | + net_eq(net, nf_ct_net(ct)) && |
1049 | + nf_ct_zone_equal(ct, zone, IP_CT_DIR_ORIGINAL)) { |
1050 | + /* Copy source part from reply tuple. */ |
1051 | + nf_ct_invert_tuplepr(result, |
1052 | + &ct->tuplehash[IP_CT_DIR_REPLY].tuple); |
1053 | + result->dst = tuple->dst; |
1054 | + |
1055 | + if (in_range(l3proto, l4proto, result, range)) |
1056 | + return 1; |
1057 | + } |
1058 | } |
1059 | - |
1060 | return 0; |
1061 | } |
1062 | |
1063 | @@ -408,6 +382,7 @@ nf_nat_setup_info(struct nf_conn *ct, |
1064 | const struct nf_nat_range *range, |
1065 | enum nf_nat_manip_type maniptype) |
1066 | { |
1067 | + struct net *net = nf_ct_net(ct); |
1068 | struct nf_conntrack_tuple curr_tuple, new_tuple; |
1069 | |
1070 | /* Can't setup nat info for confirmed ct. */ |
1071 | @@ -447,19 +422,14 @@ nf_nat_setup_info(struct nf_conn *ct, |
1072 | } |
1073 | |
1074 | if (maniptype == NF_NAT_MANIP_SRC) { |
1075 | - struct nf_nat_conn_key key = { |
1076 | - .net = nf_ct_net(ct), |
1077 | - .tuple = &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple, |
1078 | - .zone = nf_ct_zone(ct), |
1079 | - }; |
1080 | - int err; |
1081 | - |
1082 | - err = rhltable_insert_key(&nf_nat_bysource_table, |
1083 | - &key, |
1084 | - &ct->nat_bysource, |
1085 | - nf_nat_bysource_params); |
1086 | - if (err) |
1087 | - return NF_DROP; |
1088 | + unsigned int srchash; |
1089 | + |
1090 | + srchash = hash_by_src(net, |
1091 | + &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple); |
1092 | + spin_lock_bh(&nf_nat_lock); |
1093 | + hlist_add_head_rcu(&ct->nat_bysource, |
1094 | + &nf_nat_bysource[srchash]); |
1095 | + spin_unlock_bh(&nf_nat_lock); |
1096 | } |
1097 | |
1098 | /* It's done. */ |
1099 | @@ -568,8 +538,9 @@ static int nf_nat_proto_clean(struct nf_conn *ct, void *data) |
1100 | * will delete entry from already-freed table. |
1101 | */ |
1102 | clear_bit(IPS_SRC_NAT_DONE_BIT, &ct->status); |
1103 | - rhltable_remove(&nf_nat_bysource_table, &ct->nat_bysource, |
1104 | - nf_nat_bysource_params); |
1105 | + spin_lock_bh(&nf_nat_lock); |
1106 | + hlist_del_rcu(&ct->nat_bysource); |
1107 | + spin_unlock_bh(&nf_nat_lock); |
1108 | |
1109 | /* don't delete conntrack. Although that would make things a lot |
1110 | * simpler, we'd end up flushing all conntracks on nat rmmod. |
1111 | @@ -697,9 +668,11 @@ EXPORT_SYMBOL_GPL(nf_nat_l3proto_unregister); |
1112 | /* No one using conntrack by the time this called. */ |
1113 | static void nf_nat_cleanup_conntrack(struct nf_conn *ct) |
1114 | { |
1115 | - if (ct->status & IPS_SRC_NAT_DONE) |
1116 | - rhltable_remove(&nf_nat_bysource_table, &ct->nat_bysource, |
1117 | - nf_nat_bysource_params); |
1118 | + if (ct->status & IPS_SRC_NAT_DONE) { |
1119 | + spin_lock_bh(&nf_nat_lock); |
1120 | + hlist_del_rcu(&ct->nat_bysource); |
1121 | + spin_unlock_bh(&nf_nat_lock); |
1122 | + } |
1123 | } |
1124 | |
1125 | static struct nf_ct_ext_type nat_extend __read_mostly = { |
1126 | @@ -823,13 +796,16 @@ static int __init nf_nat_init(void) |
1127 | { |
1128 | int ret; |
1129 | |
1130 | - ret = rhltable_init(&nf_nat_bysource_table, &nf_nat_bysource_params); |
1131 | - if (ret) |
1132 | - return ret; |
1133 | + /* Leave them the same for the moment. */ |
1134 | + nf_nat_htable_size = nf_conntrack_htable_size; |
1135 | + |
1136 | + nf_nat_bysource = nf_ct_alloc_hashtable(&nf_nat_htable_size, 0); |
1137 | + if (!nf_nat_bysource) |
1138 | + return -ENOMEM; |
1139 | |
1140 | ret = nf_ct_extend_register(&nat_extend); |
1141 | if (ret < 0) { |
1142 | - rhltable_destroy(&nf_nat_bysource_table); |
1143 | + nf_ct_free_hashtable(nf_nat_bysource, nf_nat_htable_size); |
1144 | printk(KERN_ERR "nf_nat_core: Unable to register extension\n"); |
1145 | return ret; |
1146 | } |
1147 | @@ -863,8 +839,8 @@ static void __exit nf_nat_cleanup(void) |
1148 | |
1149 | for (i = 0; i < NFPROTO_NUMPROTO; i++) |
1150 | kfree(nf_nat_l4protos[i]); |
1151 | - |
1152 | - rhltable_destroy(&nf_nat_bysource_table); |
1153 | + synchronize_net(); |
1154 | + nf_ct_free_hashtable(nf_nat_bysource, nf_nat_htable_size); |
1155 | } |
1156 | |
1157 | MODULE_LICENSE("GPL"); |
1158 | diff --git a/net/netfilter/nft_set_hash.c b/net/netfilter/nft_set_hash.c |
1159 | index 0fa01d772c5e..9c0d5a7ce5f9 100644 |
1160 | --- a/net/netfilter/nft_set_hash.c |
1161 | +++ b/net/netfilter/nft_set_hash.c |
1162 | @@ -643,7 +643,6 @@ nft_hash_select_ops(const struct nft_ctx *ctx, const struct nft_set_desc *desc, |
1163 | { |
1164 | if (desc->size) { |
1165 | switch (desc->klen) { |
1166 | - case 2: |
1167 | case 4: |
1168 | return &nft_hash_fast_ops; |
1169 | default: |
1170 | diff --git a/sound/core/hrtimer.c b/sound/core/hrtimer.c |
1171 | index 1ac0c423903e..6e47b823bcaa 100644 |
1172 | --- a/sound/core/hrtimer.c |
1173 | +++ b/sound/core/hrtimer.c |
1174 | @@ -159,6 +159,7 @@ static int __init snd_hrtimer_init(void) |
1175 | timer->hw = hrtimer_hw; |
1176 | timer->hw.resolution = resolution; |
1177 | timer->hw.ticks = NANO_SEC / resolution; |
1178 | + timer->max_instances = 100; /* lower the limit */ |
1179 | |
1180 | err = snd_timer_global_register(timer); |
1181 | if (err < 0) { |
1182 | diff --git a/sound/core/seq/oss/seq_oss_midi.c b/sound/core/seq/oss/seq_oss_midi.c |
1183 | index aaff9ee32695..b30b2139e3f0 100644 |
1184 | --- a/sound/core/seq/oss/seq_oss_midi.c |
1185 | +++ b/sound/core/seq/oss/seq_oss_midi.c |
1186 | @@ -612,9 +612,7 @@ send_midi_event(struct seq_oss_devinfo *dp, struct snd_seq_event *ev, struct seq |
1187 | if (!dp->timer->running) |
1188 | len = snd_seq_oss_timer_start(dp->timer); |
1189 | if (ev->type == SNDRV_SEQ_EVENT_SYSEX) { |
1190 | - if ((ev->flags & SNDRV_SEQ_EVENT_LENGTH_MASK) == SNDRV_SEQ_EVENT_LENGTH_VARIABLE) |
1191 | - snd_seq_oss_readq_puts(dp->readq, mdev->seq_device, |
1192 | - ev->data.ext.ptr, ev->data.ext.len); |
1193 | + snd_seq_oss_readq_sysex(dp->readq, mdev->seq_device, ev); |
1194 | } else { |
1195 | len = snd_midi_event_decode(mdev->coder, msg, sizeof(msg), ev); |
1196 | if (len > 0) |
1197 | diff --git a/sound/core/seq/oss/seq_oss_readq.c b/sound/core/seq/oss/seq_oss_readq.c |
1198 | index 046cb586fb2f..06b21226b4e7 100644 |
1199 | --- a/sound/core/seq/oss/seq_oss_readq.c |
1200 | +++ b/sound/core/seq/oss/seq_oss_readq.c |
1201 | @@ -117,6 +117,35 @@ snd_seq_oss_readq_puts(struct seq_oss_readq *q, int dev, unsigned char *data, in |
1202 | return 0; |
1203 | } |
1204 | |
1205 | +/* |
1206 | + * put MIDI sysex bytes; the event buffer may be chained, thus it has |
1207 | + * to be expanded via snd_seq_dump_var_event(). |
1208 | + */ |
1209 | +struct readq_sysex_ctx { |
1210 | + struct seq_oss_readq *readq; |
1211 | + int dev; |
1212 | +}; |
1213 | + |
1214 | +static int readq_dump_sysex(void *ptr, void *buf, int count) |
1215 | +{ |
1216 | + struct readq_sysex_ctx *ctx = ptr; |
1217 | + |
1218 | + return snd_seq_oss_readq_puts(ctx->readq, ctx->dev, buf, count); |
1219 | +} |
1220 | + |
1221 | +int snd_seq_oss_readq_sysex(struct seq_oss_readq *q, int dev, |
1222 | + struct snd_seq_event *ev) |
1223 | +{ |
1224 | + struct readq_sysex_ctx ctx = { |
1225 | + .readq = q, |
1226 | + .dev = dev |
1227 | + }; |
1228 | + |
1229 | + if ((ev->flags & SNDRV_SEQ_EVENT_LENGTH_MASK) != SNDRV_SEQ_EVENT_LENGTH_VARIABLE) |
1230 | + return 0; |
1231 | + return snd_seq_dump_var_event(ev, readq_dump_sysex, &ctx); |
1232 | +} |
1233 | + |
1234 | /* |
1235 | * copy an event to input queue: |
1236 | * return zero if enqueued |
1237 | diff --git a/sound/core/seq/oss/seq_oss_readq.h b/sound/core/seq/oss/seq_oss_readq.h |
1238 | index f1463f1f449e..8d033ca2d23f 100644 |
1239 | --- a/sound/core/seq/oss/seq_oss_readq.h |
1240 | +++ b/sound/core/seq/oss/seq_oss_readq.h |
1241 | @@ -44,6 +44,8 @@ void snd_seq_oss_readq_delete(struct seq_oss_readq *q); |
1242 | void snd_seq_oss_readq_clear(struct seq_oss_readq *readq); |
1243 | unsigned int snd_seq_oss_readq_poll(struct seq_oss_readq *readq, struct file *file, poll_table *wait); |
1244 | int snd_seq_oss_readq_puts(struct seq_oss_readq *readq, int dev, unsigned char *data, int len); |
1245 | +int snd_seq_oss_readq_sysex(struct seq_oss_readq *q, int dev, |
1246 | + struct snd_seq_event *ev); |
1247 | int snd_seq_oss_readq_put_event(struct seq_oss_readq *readq, union evrec *ev); |
1248 | int snd_seq_oss_readq_put_timestamp(struct seq_oss_readq *readq, unsigned long curt, int seq_mode); |
1249 | int snd_seq_oss_readq_pick(struct seq_oss_readq *q, union evrec *rec); |
1250 | diff --git a/sound/core/timer.c b/sound/core/timer.c |
1251 | index a9b9a277e00c..c8e4d0da13b4 100644 |
1252 | --- a/sound/core/timer.c |
1253 | +++ b/sound/core/timer.c |
1254 | @@ -180,7 +180,7 @@ static void snd_timer_request(struct snd_timer_id *tid) |
1255 | * |
1256 | * call this with register_mutex down. |
1257 | */ |
1258 | -static void snd_timer_check_slave(struct snd_timer_instance *slave) |
1259 | +static int snd_timer_check_slave(struct snd_timer_instance *slave) |
1260 | { |
1261 | struct snd_timer *timer; |
1262 | struct snd_timer_instance *master; |
1263 | @@ -190,16 +190,21 @@ static void snd_timer_check_slave(struct snd_timer_instance *slave) |
1264 | list_for_each_entry(master, &timer->open_list_head, open_list) { |
1265 | if (slave->slave_class == master->slave_class && |
1266 | slave->slave_id == master->slave_id) { |
1267 | + if (master->timer->num_instances >= |
1268 | + master->timer->max_instances) |
1269 | + return -EBUSY; |
1270 | list_move_tail(&slave->open_list, |
1271 | &master->slave_list_head); |
1272 | + master->timer->num_instances++; |
1273 | spin_lock_irq(&slave_active_lock); |
1274 | slave->master = master; |
1275 | slave->timer = master->timer; |
1276 | spin_unlock_irq(&slave_active_lock); |
1277 | - return; |
1278 | + return 0; |
1279 | } |
1280 | } |
1281 | } |
1282 | + return 0; |
1283 | } |
1284 | |
1285 | /* |
1286 | @@ -208,7 +213,7 @@ static void snd_timer_check_slave(struct snd_timer_instance *slave) |
1287 | * |
1288 | * call this with register_mutex down. |
1289 | */ |
1290 | -static void snd_timer_check_master(struct snd_timer_instance *master) |
1291 | +static int snd_timer_check_master(struct snd_timer_instance *master) |
1292 | { |
1293 | struct snd_timer_instance *slave, *tmp; |
1294 | |
1295 | @@ -216,7 +221,11 @@ static void snd_timer_check_master(struct snd_timer_instance *master) |
1296 | list_for_each_entry_safe(slave, tmp, &snd_timer_slave_list, open_list) { |
1297 | if (slave->slave_class == master->slave_class && |
1298 | slave->slave_id == master->slave_id) { |
1299 | + if (master->timer->num_instances >= |
1300 | + master->timer->max_instances) |
1301 | + return -EBUSY; |
1302 | list_move_tail(&slave->open_list, &master->slave_list_head); |
1303 | + master->timer->num_instances++; |
1304 | spin_lock_irq(&slave_active_lock); |
1305 | spin_lock(&master->timer->lock); |
1306 | slave->master = master; |
1307 | @@ -228,8 +237,11 @@ static void snd_timer_check_master(struct snd_timer_instance *master) |
1308 | spin_unlock_irq(&slave_active_lock); |
1309 | } |
1310 | } |
1311 | + return 0; |
1312 | } |
1313 | |
1314 | +static int snd_timer_close_locked(struct snd_timer_instance *timeri); |
1315 | + |
1316 | /* |
1317 | * open a timer instance |
1318 | * when opening a master, the slave id must be here given. |
1319 | @@ -240,6 +252,7 @@ int snd_timer_open(struct snd_timer_instance **ti, |
1320 | { |
1321 | struct snd_timer *timer; |
1322 | struct snd_timer_instance *timeri = NULL; |
1323 | + int err; |
1324 | |
1325 | if (tid->dev_class == SNDRV_TIMER_CLASS_SLAVE) { |
1326 | /* open a slave instance */ |
1327 | @@ -259,10 +272,14 @@ int snd_timer_open(struct snd_timer_instance **ti, |
1328 | timeri->slave_id = tid->device; |
1329 | timeri->flags |= SNDRV_TIMER_IFLG_SLAVE; |
1330 | list_add_tail(&timeri->open_list, &snd_timer_slave_list); |
1331 | - snd_timer_check_slave(timeri); |
1332 | + err = snd_timer_check_slave(timeri); |
1333 | + if (err < 0) { |
1334 | + snd_timer_close_locked(timeri); |
1335 | + timeri = NULL; |
1336 | + } |
1337 | mutex_unlock(®ister_mutex); |
1338 | *ti = timeri; |
1339 | - return 0; |
1340 | + return err; |
1341 | } |
1342 | |
1343 | /* open a master instance */ |
1344 | @@ -288,6 +305,10 @@ int snd_timer_open(struct snd_timer_instance **ti, |
1345 | return -EBUSY; |
1346 | } |
1347 | } |
1348 | + if (timer->num_instances >= timer->max_instances) { |
1349 | + mutex_unlock(®ister_mutex); |
1350 | + return -EBUSY; |
1351 | + } |
1352 | timeri = snd_timer_instance_new(owner, timer); |
1353 | if (!timeri) { |
1354 | mutex_unlock(®ister_mutex); |
1355 | @@ -314,25 +335,27 @@ int snd_timer_open(struct snd_timer_instance **ti, |
1356 | } |
1357 | |
1358 | list_add_tail(&timeri->open_list, &timer->open_list_head); |
1359 | - snd_timer_check_master(timeri); |
1360 | + timer->num_instances++; |
1361 | + err = snd_timer_check_master(timeri); |
1362 | + if (err < 0) { |
1363 | + snd_timer_close_locked(timeri); |
1364 | + timeri = NULL; |
1365 | + } |
1366 | mutex_unlock(®ister_mutex); |
1367 | *ti = timeri; |
1368 | - return 0; |
1369 | + return err; |
1370 | } |
1371 | EXPORT_SYMBOL(snd_timer_open); |
1372 | |
1373 | /* |
1374 | * close a timer instance |
1375 | + * call this with register_mutex down. |
1376 | */ |
1377 | -int snd_timer_close(struct snd_timer_instance *timeri) |
1378 | +static int snd_timer_close_locked(struct snd_timer_instance *timeri) |
1379 | { |
1380 | struct snd_timer *timer = NULL; |
1381 | struct snd_timer_instance *slave, *tmp; |
1382 | |
1383 | - if (snd_BUG_ON(!timeri)) |
1384 | - return -ENXIO; |
1385 | - |
1386 | - mutex_lock(®ister_mutex); |
1387 | list_del(&timeri->open_list); |
1388 | |
1389 | /* force to stop the timer */ |
1390 | @@ -340,6 +363,7 @@ int snd_timer_close(struct snd_timer_instance *timeri) |
1391 | |
1392 | timer = timeri->timer; |
1393 | if (timer) { |
1394 | + timer->num_instances--; |
1395 | /* wait, until the active callback is finished */ |
1396 | spin_lock_irq(&timer->lock); |
1397 | while (timeri->flags & SNDRV_TIMER_IFLG_CALLBACK) { |
1398 | @@ -355,6 +379,7 @@ int snd_timer_close(struct snd_timer_instance *timeri) |
1399 | list_for_each_entry_safe(slave, tmp, &timeri->slave_list_head, |
1400 | open_list) { |
1401 | list_move_tail(&slave->open_list, &snd_timer_slave_list); |
1402 | + timer->num_instances--; |
1403 | slave->master = NULL; |
1404 | slave->timer = NULL; |
1405 | list_del_init(&slave->ack_list); |
1406 | @@ -382,9 +407,24 @@ int snd_timer_close(struct snd_timer_instance *timeri) |
1407 | module_put(timer->module); |
1408 | } |
1409 | |
1410 | - mutex_unlock(®ister_mutex); |
1411 | return 0; |
1412 | } |
1413 | + |
1414 | +/* |
1415 | + * close a timer instance |
1416 | + */ |
1417 | +int snd_timer_close(struct snd_timer_instance *timeri) |
1418 | +{ |
1419 | + int err; |
1420 | + |
1421 | + if (snd_BUG_ON(!timeri)) |
1422 | + return -ENXIO; |
1423 | + |
1424 | + mutex_lock(®ister_mutex); |
1425 | + err = snd_timer_close_locked(timeri); |
1426 | + mutex_unlock(®ister_mutex); |
1427 | + return err; |
1428 | +} |
1429 | EXPORT_SYMBOL(snd_timer_close); |
1430 | |
1431 | unsigned long snd_timer_resolution(struct snd_timer_instance *timeri) |
1432 | @@ -855,6 +895,7 @@ int snd_timer_new(struct snd_card *card, char *id, struct snd_timer_id *tid, |
1433 | spin_lock_init(&timer->lock); |
1434 | tasklet_init(&timer->task_queue, snd_timer_tasklet, |
1435 | (unsigned long)timer); |
1436 | + timer->max_instances = 1000; /* default limit per timer */ |
1437 | if (card != NULL) { |
1438 | timer->module = card->module; |
1439 | err = snd_device_new(card, SNDRV_DEV_TIMER, timer, &ops); |
1440 | diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c |
1441 | index fe4d06398fc3..c5f0e8d42d22 100644 |
1442 | --- a/sound/pci/hda/patch_realtek.c |
1443 | +++ b/sound/pci/hda/patch_realtek.c |
1444 | @@ -6527,6 +6527,11 @@ static const struct snd_hda_pin_quirk alc269_pin_fixup_tbl[] = { |
1445 | {0x14, 0x90170110}, |
1446 | {0x1b, 0x90a70130}, |
1447 | {0x21, 0x03211020}), |
1448 | + SND_HDA_PIN_QUIRK(0x10ec0274, 0x1028, "Dell", ALC269_FIXUP_DELL1_MIC_NO_PRESENCE, |
1449 | + {0x12, 0xb7a60130}, |
1450 | + {0x13, 0xb8a61140}, |
1451 | + {0x16, 0x90170110}, |
1452 | + {0x21, 0x04211020}), |
1453 | SND_HDA_PIN_QUIRK(0x10ec0280, 0x103c, "HP", ALC280_FIXUP_HP_GPIO4, |
1454 | {0x12, 0x90a60130}, |
1455 | {0x14, 0x90170110}, |
1456 | diff --git a/sound/usb/quirks.c b/sound/usb/quirks.c |
1457 | index 0e54fe490458..f910c4fd932b 100644 |
1458 | --- a/sound/usb/quirks.c |
1459 | +++ b/sound/usb/quirks.c |
1460 | @@ -1373,6 +1373,7 @@ u64 snd_usb_interface_dsd_format_quirks(struct snd_usb_audio *chip, |
1461 | case 0x199: |
1462 | return SNDRV_PCM_FMTBIT_DSD_U32_LE; |
1463 | case 0x19b: |
1464 | + case 0x203: |
1465 | return SNDRV_PCM_FMTBIT_DSD_U32_BE; |
1466 | default: |
1467 | break; |