Contents of /trunk/kernel-magellan/patches-4.11/0104-4.11.5-all-fixes.patch
Parent Directory | Revision Log
Revision 2944 -
(show annotations)
(download)
Mon Jun 19 08:32:05 2017 UTC (7 years, 3 months ago) by niro
File size: 173614 byte(s)
Mon Jun 19 08:32:05 2017 UTC (7 years, 3 months ago) by niro
File size: 173614 byte(s)
-linux-4.11.5
1 | diff --git a/Makefile b/Makefile |
2 | index 741814dca844..5b3a81d3262e 100644 |
3 | --- a/Makefile |
4 | +++ b/Makefile |
5 | @@ -1,6 +1,6 @@ |
6 | VERSION = 4 |
7 | PATCHLEVEL = 11 |
8 | -SUBLEVEL = 4 |
9 | +SUBLEVEL = 5 |
10 | EXTRAVERSION = |
11 | NAME = Fearless Coyote |
12 | |
13 | diff --git a/arch/arm/boot/dts/keystone-k2l-netcp.dtsi b/arch/arm/boot/dts/keystone-k2l-netcp.dtsi |
14 | index b6f26824e83a..66f615a74118 100644 |
15 | --- a/arch/arm/boot/dts/keystone-k2l-netcp.dtsi |
16 | +++ b/arch/arm/boot/dts/keystone-k2l-netcp.dtsi |
17 | @@ -137,8 +137,8 @@ netcp: netcp@26000000 { |
18 | /* NetCP address range */ |
19 | ranges = <0 0x26000000 0x1000000>; |
20 | |
21 | - clocks = <&clkpa>, <&clkcpgmac>, <&chipclk12>, <&clkosr>; |
22 | - clock-names = "pa_clk", "ethss_clk", "cpts", "osr_clk"; |
23 | + clocks = <&clkpa>, <&clkcpgmac>, <&chipclk12>; |
24 | + clock-names = "pa_clk", "ethss_clk", "cpts"; |
25 | dma-coherent; |
26 | |
27 | ti,navigator-dmas = <&dma_gbe 0>, |
28 | diff --git a/arch/arm/boot/dts/keystone-k2l.dtsi b/arch/arm/boot/dts/keystone-k2l.dtsi |
29 | index b58e7ebc0919..148650406cf7 100644 |
30 | --- a/arch/arm/boot/dts/keystone-k2l.dtsi |
31 | +++ b/arch/arm/boot/dts/keystone-k2l.dtsi |
32 | @@ -232,6 +232,14 @@ |
33 | }; |
34 | }; |
35 | |
36 | + osr: sram@70000000 { |
37 | + compatible = "mmio-sram"; |
38 | + reg = <0x70000000 0x10000>; |
39 | + #address-cells = <1>; |
40 | + #size-cells = <1>; |
41 | + clocks = <&clkosr>; |
42 | + }; |
43 | + |
44 | dspgpio0: keystone_dsp_gpio@02620240 { |
45 | compatible = "ti,keystone-dsp-gpio"; |
46 | gpio-controller; |
47 | diff --git a/arch/arm/kvm/init.S b/arch/arm/kvm/init.S |
48 | index bf89c919efc1..bd0ee7fc304c 100644 |
49 | --- a/arch/arm/kvm/init.S |
50 | +++ b/arch/arm/kvm/init.S |
51 | @@ -95,7 +95,6 @@ __do_hyp_init: |
52 | @ - Write permission implies XN: disabled |
53 | @ - Instruction cache: enabled |
54 | @ - Data/Unified cache: enabled |
55 | - @ - Memory alignment checks: enabled |
56 | @ - MMU: enabled (this code must be run from an identity mapping) |
57 | mrc p15, 4, r0, c1, c0, 0 @ HSCR |
58 | ldr r2, =HSCTLR_MASK |
59 | @@ -103,8 +102,8 @@ __do_hyp_init: |
60 | mrc p15, 0, r1, c1, c0, 0 @ SCTLR |
61 | ldr r2, =(HSCTLR_EE | HSCTLR_FI | HSCTLR_I | HSCTLR_C) |
62 | and r1, r1, r2 |
63 | - ARM( ldr r2, =(HSCTLR_M | HSCTLR_A) ) |
64 | - THUMB( ldr r2, =(HSCTLR_M | HSCTLR_A | HSCTLR_TE) ) |
65 | + ARM( ldr r2, =(HSCTLR_M) ) |
66 | + THUMB( ldr r2, =(HSCTLR_M | HSCTLR_TE) ) |
67 | orr r1, r1, r2 |
68 | orr r0, r0, r1 |
69 | mcr p15, 4, r0, c1, c0, 0 @ HSCR |
70 | diff --git a/arch/arm/kvm/mmu.c b/arch/arm/kvm/mmu.c |
71 | index 3837b096e1a6..b97bc12812ab 100644 |
72 | --- a/arch/arm/kvm/mmu.c |
73 | +++ b/arch/arm/kvm/mmu.c |
74 | @@ -879,6 +879,9 @@ static pmd_t *stage2_get_pmd(struct kvm *kvm, struct kvm_mmu_memory_cache *cache |
75 | pmd_t *pmd; |
76 | |
77 | pud = stage2_get_pud(kvm, cache, addr); |
78 | + if (!pud) |
79 | + return NULL; |
80 | + |
81 | if (stage2_pud_none(*pud)) { |
82 | if (!cache) |
83 | return NULL; |
84 | diff --git a/arch/arm64/include/asm/sysreg.h b/arch/arm64/include/asm/sysreg.h |
85 | index ac24b6e798b1..2d3e155b185f 100644 |
86 | --- a/arch/arm64/include/asm/sysreg.h |
87 | +++ b/arch/arm64/include/asm/sysreg.h |
88 | @@ -138,6 +138,10 @@ |
89 | #define SCTLR_ELx_A (1 << 1) |
90 | #define SCTLR_ELx_M 1 |
91 | |
92 | +#define SCTLR_EL2_RES1 ((1 << 4) | (1 << 5) | (1 << 11) | (1 << 16) | \ |
93 | + (1 << 16) | (1 << 18) | (1 << 22) | (1 << 23) | \ |
94 | + (1 << 28) | (1 << 29)) |
95 | + |
96 | #define SCTLR_ELx_FLAGS (SCTLR_ELx_M | SCTLR_ELx_A | SCTLR_ELx_C | \ |
97 | SCTLR_ELx_SA | SCTLR_ELx_I) |
98 | |
99 | diff --git a/arch/arm64/kvm/hyp-init.S b/arch/arm64/kvm/hyp-init.S |
100 | index 6b29d3d9e1f2..4bbff904169d 100644 |
101 | --- a/arch/arm64/kvm/hyp-init.S |
102 | +++ b/arch/arm64/kvm/hyp-init.S |
103 | @@ -102,10 +102,13 @@ __do_hyp_init: |
104 | tlbi alle2 |
105 | dsb sy |
106 | |
107 | - mrs x4, sctlr_el2 |
108 | - and x4, x4, #SCTLR_ELx_EE // preserve endianness of EL2 |
109 | - ldr x5, =SCTLR_ELx_FLAGS |
110 | - orr x4, x4, x5 |
111 | + /* |
112 | + * Preserve all the RES1 bits while setting the default flags, |
113 | + * as well as the EE bit on BE. Drop the A flag since the compiler |
114 | + * is allowed to generate unaligned accesses. |
115 | + */ |
116 | + ldr x4, =(SCTLR_EL2_RES1 | (SCTLR_ELx_FLAGS & ~SCTLR_ELx_A)) |
117 | +CPU_BE( orr x4, x4, #SCTLR_ELx_EE) |
118 | msr sctlr_el2, x4 |
119 | isb |
120 | |
121 | diff --git a/arch/mips/kernel/process.c b/arch/mips/kernel/process.c |
122 | index b68e10fc453d..0f88015f3bfa 100644 |
123 | --- a/arch/mips/kernel/process.c |
124 | +++ b/arch/mips/kernel/process.c |
125 | @@ -120,7 +120,6 @@ int copy_thread(unsigned long clone_flags, unsigned long usp, |
126 | struct thread_info *ti = task_thread_info(p); |
127 | struct pt_regs *childregs, *regs = current_pt_regs(); |
128 | unsigned long childksp; |
129 | - p->set_child_tid = p->clear_child_tid = NULL; |
130 | |
131 | childksp = (unsigned long)task_stack_page(p) + THREAD_SIZE - 32; |
132 | |
133 | diff --git a/arch/openrisc/kernel/process.c b/arch/openrisc/kernel/process.c |
134 | index f8da545854f9..106859ae27ff 100644 |
135 | --- a/arch/openrisc/kernel/process.c |
136 | +++ b/arch/openrisc/kernel/process.c |
137 | @@ -167,8 +167,6 @@ copy_thread(unsigned long clone_flags, unsigned long usp, |
138 | |
139 | top_of_kernel_stack = sp; |
140 | |
141 | - p->set_child_tid = p->clear_child_tid = NULL; |
142 | - |
143 | /* Locate userspace context on stack... */ |
144 | sp -= STACK_FRAME_OVERHEAD; /* redzone */ |
145 | sp -= sizeof(struct pt_regs); |
146 | diff --git a/arch/powerpc/include/asm/topology.h b/arch/powerpc/include/asm/topology.h |
147 | index 8b3b46b7b0f2..329771559cbb 100644 |
148 | --- a/arch/powerpc/include/asm/topology.h |
149 | +++ b/arch/powerpc/include/asm/topology.h |
150 | @@ -44,8 +44,22 @@ extern void __init dump_numa_cpu_topology(void); |
151 | extern int sysfs_add_device_to_node(struct device *dev, int nid); |
152 | extern void sysfs_remove_device_from_node(struct device *dev, int nid); |
153 | |
154 | +static inline int early_cpu_to_node(int cpu) |
155 | +{ |
156 | + int nid; |
157 | + |
158 | + nid = numa_cpu_lookup_table[cpu]; |
159 | + |
160 | + /* |
161 | + * Fall back to node 0 if nid is unset (it should be, except bugs). |
162 | + * This allows callers to safely do NODE_DATA(early_cpu_to_node(cpu)). |
163 | + */ |
164 | + return (nid < 0) ? 0 : nid; |
165 | +} |
166 | #else |
167 | |
168 | +static inline int early_cpu_to_node(int cpu) { return 0; } |
169 | + |
170 | static inline void dump_numa_cpu_topology(void) {} |
171 | |
172 | static inline int sysfs_add_device_to_node(struct device *dev, int nid) |
173 | diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c |
174 | index baae104b16c7..2ad725ef4368 100644 |
175 | --- a/arch/powerpc/kernel/process.c |
176 | +++ b/arch/powerpc/kernel/process.c |
177 | @@ -1666,6 +1666,7 @@ void start_thread(struct pt_regs *regs, unsigned long start, unsigned long sp) |
178 | #ifdef CONFIG_VSX |
179 | current->thread.used_vsr = 0; |
180 | #endif |
181 | + current->thread.load_fp = 0; |
182 | memset(¤t->thread.fp_state, 0, sizeof(current->thread.fp_state)); |
183 | current->thread.fp_save_area = NULL; |
184 | #ifdef CONFIG_ALTIVEC |
185 | @@ -1674,6 +1675,7 @@ void start_thread(struct pt_regs *regs, unsigned long start, unsigned long sp) |
186 | current->thread.vr_save_area = NULL; |
187 | current->thread.vrsave = 0; |
188 | current->thread.used_vr = 0; |
189 | + current->thread.load_vec = 0; |
190 | #endif /* CONFIG_ALTIVEC */ |
191 | #ifdef CONFIG_SPE |
192 | memset(current->thread.evr, 0, sizeof(current->thread.evr)); |
193 | @@ -1685,6 +1687,7 @@ void start_thread(struct pt_regs *regs, unsigned long start, unsigned long sp) |
194 | current->thread.tm_tfhar = 0; |
195 | current->thread.tm_texasr = 0; |
196 | current->thread.tm_tfiar = 0; |
197 | + current->thread.load_tm = 0; |
198 | #endif /* CONFIG_PPC_TRANSACTIONAL_MEM */ |
199 | } |
200 | EXPORT_SYMBOL(start_thread); |
201 | diff --git a/arch/powerpc/kernel/setup_64.c b/arch/powerpc/kernel/setup_64.c |
202 | index f997154dfc41..7183c43d4e81 100644 |
203 | --- a/arch/powerpc/kernel/setup_64.c |
204 | +++ b/arch/powerpc/kernel/setup_64.c |
205 | @@ -650,7 +650,7 @@ void __init emergency_stack_init(void) |
206 | |
207 | static void * __init pcpu_fc_alloc(unsigned int cpu, size_t size, size_t align) |
208 | { |
209 | - return __alloc_bootmem_node(NODE_DATA(cpu_to_node(cpu)), size, align, |
210 | + return __alloc_bootmem_node(NODE_DATA(early_cpu_to_node(cpu)), size, align, |
211 | __pa(MAX_DMA_ADDRESS)); |
212 | } |
213 | |
214 | @@ -661,7 +661,7 @@ static void __init pcpu_fc_free(void *ptr, size_t size) |
215 | |
216 | static int pcpu_cpu_distance(unsigned int from, unsigned int to) |
217 | { |
218 | - if (cpu_to_node(from) == cpu_to_node(to)) |
219 | + if (early_cpu_to_node(from) == early_cpu_to_node(to)) |
220 | return LOCAL_DISTANCE; |
221 | else |
222 | return REMOTE_DISTANCE; |
223 | diff --git a/arch/powerpc/platforms/pseries/hotplug-memory.c b/arch/powerpc/platforms/pseries/hotplug-memory.c |
224 | index e104c71ea44a..1fb162ba9d1c 100644 |
225 | --- a/arch/powerpc/platforms/pseries/hotplug-memory.c |
226 | +++ b/arch/powerpc/platforms/pseries/hotplug-memory.c |
227 | @@ -124,6 +124,7 @@ static struct property *dlpar_clone_drconf_property(struct device_node *dn) |
228 | for (i = 0; i < num_lmbs; i++) { |
229 | lmbs[i].base_addr = be64_to_cpu(lmbs[i].base_addr); |
230 | lmbs[i].drc_index = be32_to_cpu(lmbs[i].drc_index); |
231 | + lmbs[i].aa_index = be32_to_cpu(lmbs[i].aa_index); |
232 | lmbs[i].flags = be32_to_cpu(lmbs[i].flags); |
233 | } |
234 | |
235 | @@ -147,6 +148,7 @@ static void dlpar_update_drconf_property(struct device_node *dn, |
236 | for (i = 0; i < num_lmbs; i++) { |
237 | lmbs[i].base_addr = cpu_to_be64(lmbs[i].base_addr); |
238 | lmbs[i].drc_index = cpu_to_be32(lmbs[i].drc_index); |
239 | + lmbs[i].aa_index = cpu_to_be32(lmbs[i].aa_index); |
240 | lmbs[i].flags = cpu_to_be32(lmbs[i].flags); |
241 | } |
242 | |
243 | diff --git a/arch/powerpc/sysdev/simple_gpio.c b/arch/powerpc/sysdev/simple_gpio.c |
244 | index ef470b470b04..6afddae2fb47 100644 |
245 | --- a/arch/powerpc/sysdev/simple_gpio.c |
246 | +++ b/arch/powerpc/sysdev/simple_gpio.c |
247 | @@ -75,7 +75,8 @@ static int u8_gpio_dir_out(struct gpio_chip *gc, unsigned int gpio, int val) |
248 | |
249 | static void u8_gpio_save_regs(struct of_mm_gpio_chip *mm_gc) |
250 | { |
251 | - struct u8_gpio_chip *u8_gc = gpiochip_get_data(&mm_gc->gc); |
252 | + struct u8_gpio_chip *u8_gc = |
253 | + container_of(mm_gc, struct u8_gpio_chip, mm_gc); |
254 | |
255 | u8_gc->data = in_8(mm_gc->regs); |
256 | } |
257 | diff --git a/arch/sparc/Kconfig b/arch/sparc/Kconfig |
258 | index 3db2543733a5..1384d4c9764b 100644 |
259 | --- a/arch/sparc/Kconfig |
260 | +++ b/arch/sparc/Kconfig |
261 | @@ -192,9 +192,9 @@ config NR_CPUS |
262 | int "Maximum number of CPUs" |
263 | depends on SMP |
264 | range 2 32 if SPARC32 |
265 | - range 2 1024 if SPARC64 |
266 | + range 2 4096 if SPARC64 |
267 | default 32 if SPARC32 |
268 | - default 64 if SPARC64 |
269 | + default 4096 if SPARC64 |
270 | |
271 | source kernel/Kconfig.hz |
272 | |
273 | diff --git a/arch/sparc/include/asm/mmu_64.h b/arch/sparc/include/asm/mmu_64.h |
274 | index f7de0dbc38af..83b36a5371ff 100644 |
275 | --- a/arch/sparc/include/asm/mmu_64.h |
276 | +++ b/arch/sparc/include/asm/mmu_64.h |
277 | @@ -52,7 +52,7 @@ |
278 | #define CTX_NR_MASK TAG_CONTEXT_BITS |
279 | #define CTX_HW_MASK (CTX_NR_MASK | CTX_PGSZ_MASK) |
280 | |
281 | -#define CTX_FIRST_VERSION ((_AC(1,UL) << CTX_VERSION_SHIFT) + _AC(1,UL)) |
282 | +#define CTX_FIRST_VERSION BIT(CTX_VERSION_SHIFT) |
283 | #define CTX_VALID(__ctx) \ |
284 | (!(((__ctx.sparc64_ctx_val) ^ tlb_context_cache) & CTX_VERSION_MASK)) |
285 | #define CTX_HWBITS(__ctx) ((__ctx.sparc64_ctx_val) & CTX_HW_MASK) |
286 | diff --git a/arch/sparc/include/asm/mmu_context_64.h b/arch/sparc/include/asm/mmu_context_64.h |
287 | index 22fede6eba11..2cddcda4f85f 100644 |
288 | --- a/arch/sparc/include/asm/mmu_context_64.h |
289 | +++ b/arch/sparc/include/asm/mmu_context_64.h |
290 | @@ -19,13 +19,8 @@ extern spinlock_t ctx_alloc_lock; |
291 | extern unsigned long tlb_context_cache; |
292 | extern unsigned long mmu_context_bmap[]; |
293 | |
294 | +DECLARE_PER_CPU(struct mm_struct *, per_cpu_secondary_mm); |
295 | void get_new_mmu_context(struct mm_struct *mm); |
296 | -#ifdef CONFIG_SMP |
297 | -void smp_new_mmu_context_version(void); |
298 | -#else |
299 | -#define smp_new_mmu_context_version() do { } while (0) |
300 | -#endif |
301 | - |
302 | int init_new_context(struct task_struct *tsk, struct mm_struct *mm); |
303 | void destroy_context(struct mm_struct *mm); |
304 | |
305 | @@ -76,8 +71,9 @@ void __flush_tlb_mm(unsigned long, unsigned long); |
306 | static inline void switch_mm(struct mm_struct *old_mm, struct mm_struct *mm, struct task_struct *tsk) |
307 | { |
308 | unsigned long ctx_valid, flags; |
309 | - int cpu; |
310 | + int cpu = smp_processor_id(); |
311 | |
312 | + per_cpu(per_cpu_secondary_mm, cpu) = mm; |
313 | if (unlikely(mm == &init_mm)) |
314 | return; |
315 | |
316 | @@ -123,7 +119,6 @@ static inline void switch_mm(struct mm_struct *old_mm, struct mm_struct *mm, str |
317 | * for the first time, we must flush that context out of the |
318 | * local TLB. |
319 | */ |
320 | - cpu = smp_processor_id(); |
321 | if (!ctx_valid || !cpumask_test_cpu(cpu, mm_cpumask(mm))) { |
322 | cpumask_set_cpu(cpu, mm_cpumask(mm)); |
323 | __flush_tlb_mm(CTX_HWBITS(mm->context), |
324 | @@ -133,26 +128,7 @@ static inline void switch_mm(struct mm_struct *old_mm, struct mm_struct *mm, str |
325 | } |
326 | |
327 | #define deactivate_mm(tsk,mm) do { } while (0) |
328 | - |
329 | -/* Activate a new MM instance for the current task. */ |
330 | -static inline void activate_mm(struct mm_struct *active_mm, struct mm_struct *mm) |
331 | -{ |
332 | - unsigned long flags; |
333 | - int cpu; |
334 | - |
335 | - spin_lock_irqsave(&mm->context.lock, flags); |
336 | - if (!CTX_VALID(mm->context)) |
337 | - get_new_mmu_context(mm); |
338 | - cpu = smp_processor_id(); |
339 | - if (!cpumask_test_cpu(cpu, mm_cpumask(mm))) |
340 | - cpumask_set_cpu(cpu, mm_cpumask(mm)); |
341 | - |
342 | - load_secondary_context(mm); |
343 | - __flush_tlb_mm(CTX_HWBITS(mm->context), SECONDARY_CONTEXT); |
344 | - tsb_context_switch(mm); |
345 | - spin_unlock_irqrestore(&mm->context.lock, flags); |
346 | -} |
347 | - |
348 | +#define activate_mm(active_mm, mm) switch_mm(active_mm, mm, NULL) |
349 | #endif /* !(__ASSEMBLY__) */ |
350 | |
351 | #endif /* !(__SPARC64_MMU_CONTEXT_H) */ |
352 | diff --git a/arch/sparc/include/asm/pil.h b/arch/sparc/include/asm/pil.h |
353 | index 266937030546..522b43db2ed3 100644 |
354 | --- a/arch/sparc/include/asm/pil.h |
355 | +++ b/arch/sparc/include/asm/pil.h |
356 | @@ -20,7 +20,6 @@ |
357 | #define PIL_SMP_CALL_FUNC 1 |
358 | #define PIL_SMP_RECEIVE_SIGNAL 2 |
359 | #define PIL_SMP_CAPTURE 3 |
360 | -#define PIL_SMP_CTX_NEW_VERSION 4 |
361 | #define PIL_DEVICE_IRQ 5 |
362 | #define PIL_SMP_CALL_FUNC_SNGL 6 |
363 | #define PIL_DEFERRED_PCR_WORK 7 |
364 | diff --git a/arch/sparc/include/asm/vio.h b/arch/sparc/include/asm/vio.h |
365 | index 8174f6cdbbbb..9dca7a892978 100644 |
366 | --- a/arch/sparc/include/asm/vio.h |
367 | +++ b/arch/sparc/include/asm/vio.h |
368 | @@ -327,6 +327,7 @@ struct vio_dev { |
369 | int compat_len; |
370 | |
371 | u64 dev_no; |
372 | + u64 id; |
373 | |
374 | unsigned long channel_id; |
375 | |
376 | diff --git a/arch/sparc/kernel/irq_64.c b/arch/sparc/kernel/irq_64.c |
377 | index 4d0248aa0928..99dd133a029f 100644 |
378 | --- a/arch/sparc/kernel/irq_64.c |
379 | +++ b/arch/sparc/kernel/irq_64.c |
380 | @@ -1034,17 +1034,26 @@ static void __init init_cpu_send_mondo_info(struct trap_per_cpu *tb) |
381 | { |
382 | #ifdef CONFIG_SMP |
383 | unsigned long page; |
384 | + void *mondo, *p; |
385 | |
386 | - BUILD_BUG_ON((NR_CPUS * sizeof(u16)) > (PAGE_SIZE - 64)); |
387 | + BUILD_BUG_ON((NR_CPUS * sizeof(u16)) > PAGE_SIZE); |
388 | + |
389 | + /* Make sure mondo block is 64byte aligned */ |
390 | + p = kzalloc(127, GFP_KERNEL); |
391 | + if (!p) { |
392 | + prom_printf("SUN4V: Error, cannot allocate mondo block.\n"); |
393 | + prom_halt(); |
394 | + } |
395 | + mondo = (void *)(((unsigned long)p + 63) & ~0x3f); |
396 | + tb->cpu_mondo_block_pa = __pa(mondo); |
397 | |
398 | page = get_zeroed_page(GFP_KERNEL); |
399 | if (!page) { |
400 | - prom_printf("SUN4V: Error, cannot allocate cpu mondo page.\n"); |
401 | + prom_printf("SUN4V: Error, cannot allocate cpu list page.\n"); |
402 | prom_halt(); |
403 | } |
404 | |
405 | - tb->cpu_mondo_block_pa = __pa(page); |
406 | - tb->cpu_list_pa = __pa(page + 64); |
407 | + tb->cpu_list_pa = __pa(page); |
408 | #endif |
409 | } |
410 | |
411 | diff --git a/arch/sparc/kernel/kernel.h b/arch/sparc/kernel/kernel.h |
412 | index c9804551262c..6ae1e77be0bf 100644 |
413 | --- a/arch/sparc/kernel/kernel.h |
414 | +++ b/arch/sparc/kernel/kernel.h |
415 | @@ -37,7 +37,6 @@ void handle_stdfmna(struct pt_regs *regs, unsigned long sfar, unsigned long sfsr |
416 | /* smp_64.c */ |
417 | void __irq_entry smp_call_function_client(int irq, struct pt_regs *regs); |
418 | void __irq_entry smp_call_function_single_client(int irq, struct pt_regs *regs); |
419 | -void __irq_entry smp_new_mmu_context_version_client(int irq, struct pt_regs *regs); |
420 | void __irq_entry smp_penguin_jailcell(int irq, struct pt_regs *regs); |
421 | void __irq_entry smp_receive_signal_client(int irq, struct pt_regs *regs); |
422 | |
423 | diff --git a/arch/sparc/kernel/smp_64.c b/arch/sparc/kernel/smp_64.c |
424 | index b3bc0ac757cc..fdf31040a7dc 100644 |
425 | --- a/arch/sparc/kernel/smp_64.c |
426 | +++ b/arch/sparc/kernel/smp_64.c |
427 | @@ -964,37 +964,6 @@ void flush_dcache_page_all(struct mm_struct *mm, struct page *page) |
428 | preempt_enable(); |
429 | } |
430 | |
431 | -void __irq_entry smp_new_mmu_context_version_client(int irq, struct pt_regs *regs) |
432 | -{ |
433 | - struct mm_struct *mm; |
434 | - unsigned long flags; |
435 | - |
436 | - clear_softint(1 << irq); |
437 | - |
438 | - /* See if we need to allocate a new TLB context because |
439 | - * the version of the one we are using is now out of date. |
440 | - */ |
441 | - mm = current->active_mm; |
442 | - if (unlikely(!mm || (mm == &init_mm))) |
443 | - return; |
444 | - |
445 | - spin_lock_irqsave(&mm->context.lock, flags); |
446 | - |
447 | - if (unlikely(!CTX_VALID(mm->context))) |
448 | - get_new_mmu_context(mm); |
449 | - |
450 | - spin_unlock_irqrestore(&mm->context.lock, flags); |
451 | - |
452 | - load_secondary_context(mm); |
453 | - __flush_tlb_mm(CTX_HWBITS(mm->context), |
454 | - SECONDARY_CONTEXT); |
455 | -} |
456 | - |
457 | -void smp_new_mmu_context_version(void) |
458 | -{ |
459 | - smp_cross_call(&xcall_new_mmu_context_version, 0, 0, 0); |
460 | -} |
461 | - |
462 | #ifdef CONFIG_KGDB |
463 | void kgdb_roundup_cpus(unsigned long flags) |
464 | { |
465 | diff --git a/arch/sparc/kernel/tsb.S b/arch/sparc/kernel/tsb.S |
466 | index 10689cfd0ad4..07c0df924960 100644 |
467 | --- a/arch/sparc/kernel/tsb.S |
468 | +++ b/arch/sparc/kernel/tsb.S |
469 | @@ -455,13 +455,16 @@ __tsb_context_switch: |
470 | .type copy_tsb,#function |
471 | copy_tsb: /* %o0=old_tsb_base, %o1=old_tsb_size |
472 | * %o2=new_tsb_base, %o3=new_tsb_size |
473 | + * %o4=page_size_shift |
474 | */ |
475 | sethi %uhi(TSB_PASS_BITS), %g7 |
476 | srlx %o3, 4, %o3 |
477 | - add %o0, %o1, %g1 /* end of old tsb */ |
478 | + add %o0, %o1, %o1 /* end of old tsb */ |
479 | sllx %g7, 32, %g7 |
480 | sub %o3, 1, %o3 /* %o3 == new tsb hash mask */ |
481 | |
482 | + mov %o4, %g1 /* page_size_shift */ |
483 | + |
484 | 661: prefetcha [%o0] ASI_N, #one_read |
485 | .section .tsb_phys_patch, "ax" |
486 | .word 661b |
487 | @@ -486,9 +489,9 @@ copy_tsb: /* %o0=old_tsb_base, %o1=old_tsb_size |
488 | /* This can definitely be computed faster... */ |
489 | srlx %o0, 4, %o5 /* Build index */ |
490 | and %o5, 511, %o5 /* Mask index */ |
491 | - sllx %o5, PAGE_SHIFT, %o5 /* Put into vaddr position */ |
492 | + sllx %o5, %g1, %o5 /* Put into vaddr position */ |
493 | or %o4, %o5, %o4 /* Full VADDR. */ |
494 | - srlx %o4, PAGE_SHIFT, %o4 /* Shift down to create index */ |
495 | + srlx %o4, %g1, %o4 /* Shift down to create index */ |
496 | and %o4, %o3, %o4 /* Mask with new_tsb_nents-1 */ |
497 | sllx %o4, 4, %o4 /* Shift back up into tsb ent offset */ |
498 | TSB_STORE(%o2 + %o4, %g2) /* Store TAG */ |
499 | @@ -496,7 +499,7 @@ copy_tsb: /* %o0=old_tsb_base, %o1=old_tsb_size |
500 | TSB_STORE(%o2 + %o4, %g3) /* Store TTE */ |
501 | |
502 | 80: add %o0, 16, %o0 |
503 | - cmp %o0, %g1 |
504 | + cmp %o0, %o1 |
505 | bne,pt %xcc, 90b |
506 | nop |
507 | |
508 | diff --git a/arch/sparc/kernel/ttable_64.S b/arch/sparc/kernel/ttable_64.S |
509 | index 7bd8f6556352..efe93ab4a9c0 100644 |
510 | --- a/arch/sparc/kernel/ttable_64.S |
511 | +++ b/arch/sparc/kernel/ttable_64.S |
512 | @@ -50,7 +50,7 @@ tl0_resv03e: BTRAP(0x3e) BTRAP(0x3f) BTRAP(0x40) |
513 | tl0_irq1: TRAP_IRQ(smp_call_function_client, 1) |
514 | tl0_irq2: TRAP_IRQ(smp_receive_signal_client, 2) |
515 | tl0_irq3: TRAP_IRQ(smp_penguin_jailcell, 3) |
516 | -tl0_irq4: TRAP_IRQ(smp_new_mmu_context_version_client, 4) |
517 | +tl0_irq4: BTRAP(0x44) |
518 | #else |
519 | tl0_irq1: BTRAP(0x41) |
520 | tl0_irq2: BTRAP(0x42) |
521 | diff --git a/arch/sparc/kernel/vio.c b/arch/sparc/kernel/vio.c |
522 | index f6bb857254fc..075d38980dee 100644 |
523 | --- a/arch/sparc/kernel/vio.c |
524 | +++ b/arch/sparc/kernel/vio.c |
525 | @@ -302,13 +302,16 @@ static struct vio_dev *vio_create_one(struct mdesc_handle *hp, u64 mp, |
526 | if (!id) { |
527 | dev_set_name(&vdev->dev, "%s", bus_id_name); |
528 | vdev->dev_no = ~(u64)0; |
529 | + vdev->id = ~(u64)0; |
530 | } else if (!cfg_handle) { |
531 | dev_set_name(&vdev->dev, "%s-%llu", bus_id_name, *id); |
532 | vdev->dev_no = *id; |
533 | + vdev->id = ~(u64)0; |
534 | } else { |
535 | dev_set_name(&vdev->dev, "%s-%llu-%llu", bus_id_name, |
536 | *cfg_handle, *id); |
537 | vdev->dev_no = *cfg_handle; |
538 | + vdev->id = *id; |
539 | } |
540 | |
541 | vdev->dev.parent = parent; |
542 | @@ -351,27 +354,84 @@ static void vio_add(struct mdesc_handle *hp, u64 node) |
543 | (void) vio_create_one(hp, node, &root_vdev->dev); |
544 | } |
545 | |
546 | +struct vio_md_node_query { |
547 | + const char *type; |
548 | + u64 dev_no; |
549 | + u64 id; |
550 | +}; |
551 | + |
552 | static int vio_md_node_match(struct device *dev, void *arg) |
553 | { |
554 | + struct vio_md_node_query *query = (struct vio_md_node_query *) arg; |
555 | struct vio_dev *vdev = to_vio_dev(dev); |
556 | |
557 | - if (vdev->mp == (u64) arg) |
558 | - return 1; |
559 | + if (vdev->dev_no != query->dev_no) |
560 | + return 0; |
561 | + if (vdev->id != query->id) |
562 | + return 0; |
563 | + if (strcmp(vdev->type, query->type)) |
564 | + return 0; |
565 | |
566 | - return 0; |
567 | + return 1; |
568 | } |
569 | |
570 | static void vio_remove(struct mdesc_handle *hp, u64 node) |
571 | { |
572 | + const char *type; |
573 | + const u64 *id, *cfg_handle; |
574 | + u64 a; |
575 | + struct vio_md_node_query query; |
576 | struct device *dev; |
577 | |
578 | - dev = device_find_child(&root_vdev->dev, (void *) node, |
579 | + type = mdesc_get_property(hp, node, "device-type", NULL); |
580 | + if (!type) { |
581 | + type = mdesc_get_property(hp, node, "name", NULL); |
582 | + if (!type) |
583 | + type = mdesc_node_name(hp, node); |
584 | + } |
585 | + |
586 | + query.type = type; |
587 | + |
588 | + id = mdesc_get_property(hp, node, "id", NULL); |
589 | + cfg_handle = NULL; |
590 | + mdesc_for_each_arc(a, hp, node, MDESC_ARC_TYPE_BACK) { |
591 | + u64 target; |
592 | + |
593 | + target = mdesc_arc_target(hp, a); |
594 | + cfg_handle = mdesc_get_property(hp, target, |
595 | + "cfg-handle", NULL); |
596 | + if (cfg_handle) |
597 | + break; |
598 | + } |
599 | + |
600 | + if (!id) { |
601 | + query.dev_no = ~(u64)0; |
602 | + query.id = ~(u64)0; |
603 | + } else if (!cfg_handle) { |
604 | + query.dev_no = *id; |
605 | + query.id = ~(u64)0; |
606 | + } else { |
607 | + query.dev_no = *cfg_handle; |
608 | + query.id = *id; |
609 | + } |
610 | + |
611 | + dev = device_find_child(&root_vdev->dev, &query, |
612 | vio_md_node_match); |
613 | if (dev) { |
614 | printk(KERN_INFO "VIO: Removing device %s\n", dev_name(dev)); |
615 | |
616 | device_unregister(dev); |
617 | put_device(dev); |
618 | + } else { |
619 | + if (!id) |
620 | + printk(KERN_ERR "VIO: Removed unknown %s node.\n", |
621 | + type); |
622 | + else if (!cfg_handle) |
623 | + printk(KERN_ERR "VIO: Removed unknown %s node %llu.\n", |
624 | + type, *id); |
625 | + else |
626 | + printk(KERN_ERR "VIO: Removed unknown %s node %llu-%llu.\n", |
627 | + type, *cfg_handle, *id); |
628 | } |
629 | } |
630 | |
631 | diff --git a/arch/sparc/lib/Makefile b/arch/sparc/lib/Makefile |
632 | index 69912d2f8b54..07c03e72d812 100644 |
633 | --- a/arch/sparc/lib/Makefile |
634 | +++ b/arch/sparc/lib/Makefile |
635 | @@ -15,6 +15,7 @@ lib-$(CONFIG_SPARC32) += copy_user.o locks.o |
636 | lib-$(CONFIG_SPARC64) += atomic_64.o |
637 | lib-$(CONFIG_SPARC32) += lshrdi3.o ashldi3.o |
638 | lib-$(CONFIG_SPARC32) += muldi3.o bitext.o cmpdi2.o |
639 | +lib-$(CONFIG_SPARC64) += multi3.o |
640 | |
641 | lib-$(CONFIG_SPARC64) += copy_page.o clear_page.o bzero.o |
642 | lib-$(CONFIG_SPARC64) += csum_copy.o csum_copy_from_user.o csum_copy_to_user.o |
643 | diff --git a/arch/sparc/lib/multi3.S b/arch/sparc/lib/multi3.S |
644 | new file mode 100644 |
645 | index 000000000000..d6b6c97fe3c7 |
646 | --- /dev/null |
647 | +++ b/arch/sparc/lib/multi3.S |
648 | @@ -0,0 +1,35 @@ |
649 | +#include <linux/linkage.h> |
650 | +#include <asm/export.h> |
651 | + |
652 | + .text |
653 | + .align 4 |
654 | +ENTRY(__multi3) /* %o0 = u, %o1 = v */ |
655 | + mov %o1, %g1 |
656 | + srl %o3, 0, %g4 |
657 | + mulx %g4, %g1, %o1 |
658 | + srlx %g1, 0x20, %g3 |
659 | + mulx %g3, %g4, %g5 |
660 | + sllx %g5, 0x20, %o5 |
661 | + srl %g1, 0, %g4 |
662 | + sub %o1, %o5, %o5 |
663 | + srlx %o5, 0x20, %o5 |
664 | + addcc %g5, %o5, %g5 |
665 | + srlx %o3, 0x20, %o5 |
666 | + mulx %g4, %o5, %g4 |
667 | + mulx %g3, %o5, %o5 |
668 | + sethi %hi(0x80000000), %g3 |
669 | + addcc %g5, %g4, %g5 |
670 | + srlx %g5, 0x20, %g5 |
671 | + add %g3, %g3, %g3 |
672 | + movcc %xcc, %g0, %g3 |
673 | + addcc %o5, %g5, %o5 |
674 | + sllx %g4, 0x20, %g4 |
675 | + add %o1, %g4, %o1 |
676 | + add %o5, %g3, %g2 |
677 | + mulx %g1, %o2, %g1 |
678 | + add %g1, %g2, %g1 |
679 | + mulx %o0, %o3, %o0 |
680 | + retl |
681 | + add %g1, %o0, %o0 |
682 | +ENDPROC(__multi3) |
683 | +EXPORT_SYMBOL(__multi3) |
684 | diff --git a/arch/sparc/mm/init_64.c b/arch/sparc/mm/init_64.c |
685 | index 0cda653ae007..3c40ebd50f92 100644 |
686 | --- a/arch/sparc/mm/init_64.c |
687 | +++ b/arch/sparc/mm/init_64.c |
688 | @@ -358,7 +358,8 @@ static int __init setup_hugepagesz(char *string) |
689 | } |
690 | |
691 | if ((hv_pgsz_mask & cpu_pgsz_mask) == 0U) { |
692 | - pr_warn("hugepagesz=%llu not supported by MMU.\n", |
693 | + hugetlb_bad_size(); |
694 | + pr_err("hugepagesz=%llu not supported by MMU.\n", |
695 | hugepage_size); |
696 | goto out; |
697 | } |
698 | @@ -706,10 +707,58 @@ EXPORT_SYMBOL(__flush_dcache_range); |
699 | |
700 | /* get_new_mmu_context() uses "cache + 1". */ |
701 | DEFINE_SPINLOCK(ctx_alloc_lock); |
702 | -unsigned long tlb_context_cache = CTX_FIRST_VERSION - 1; |
703 | +unsigned long tlb_context_cache = CTX_FIRST_VERSION; |
704 | #define MAX_CTX_NR (1UL << CTX_NR_BITS) |
705 | #define CTX_BMAP_SLOTS BITS_TO_LONGS(MAX_CTX_NR) |
706 | DECLARE_BITMAP(mmu_context_bmap, MAX_CTX_NR); |
707 | +DEFINE_PER_CPU(struct mm_struct *, per_cpu_secondary_mm) = {0}; |
708 | + |
709 | +static void mmu_context_wrap(void) |
710 | +{ |
711 | + unsigned long old_ver = tlb_context_cache & CTX_VERSION_MASK; |
712 | + unsigned long new_ver, new_ctx, old_ctx; |
713 | + struct mm_struct *mm; |
714 | + int cpu; |
715 | + |
716 | + bitmap_zero(mmu_context_bmap, 1 << CTX_NR_BITS); |
717 | + |
718 | + /* Reserve kernel context */ |
719 | + set_bit(0, mmu_context_bmap); |
720 | + |
721 | + new_ver = (tlb_context_cache & CTX_VERSION_MASK) + CTX_FIRST_VERSION; |
722 | + if (unlikely(new_ver == 0)) |
723 | + new_ver = CTX_FIRST_VERSION; |
724 | + tlb_context_cache = new_ver; |
725 | + |
726 | + /* |
727 | + * Make sure that any new mm that are added into per_cpu_secondary_mm, |
728 | + * are going to go through get_new_mmu_context() path. |
729 | + */ |
730 | + mb(); |
731 | + |
732 | + /* |
733 | + * Updated versions to current on those CPUs that had valid secondary |
734 | + * contexts |
735 | + */ |
736 | + for_each_online_cpu(cpu) { |
737 | + /* |
738 | + * If a new mm is stored after we took this mm from the array, |
739 | + * it will go into get_new_mmu_context() path, because we |
740 | + * already bumped the version in tlb_context_cache. |
741 | + */ |
742 | + mm = per_cpu(per_cpu_secondary_mm, cpu); |
743 | + |
744 | + if (unlikely(!mm || mm == &init_mm)) |
745 | + continue; |
746 | + |
747 | + old_ctx = mm->context.sparc64_ctx_val; |
748 | + if (likely((old_ctx & CTX_VERSION_MASK) == old_ver)) { |
749 | + new_ctx = (old_ctx & ~CTX_VERSION_MASK) | new_ver; |
750 | + set_bit(new_ctx & CTX_NR_MASK, mmu_context_bmap); |
751 | + mm->context.sparc64_ctx_val = new_ctx; |
752 | + } |
753 | + } |
754 | +} |
755 | |
756 | /* Caller does TLB context flushing on local CPU if necessary. |
757 | * The caller also ensures that CTX_VALID(mm->context) is false. |
758 | @@ -725,48 +774,30 @@ void get_new_mmu_context(struct mm_struct *mm) |
759 | { |
760 | unsigned long ctx, new_ctx; |
761 | unsigned long orig_pgsz_bits; |
762 | - int new_version; |
763 | |
764 | spin_lock(&ctx_alloc_lock); |
765 | +retry: |
766 | + /* wrap might have happened, test again if our context became valid */ |
767 | + if (unlikely(CTX_VALID(mm->context))) |
768 | + goto out; |
769 | orig_pgsz_bits = (mm->context.sparc64_ctx_val & CTX_PGSZ_MASK); |
770 | ctx = (tlb_context_cache + 1) & CTX_NR_MASK; |
771 | new_ctx = find_next_zero_bit(mmu_context_bmap, 1 << CTX_NR_BITS, ctx); |
772 | - new_version = 0; |
773 | if (new_ctx >= (1 << CTX_NR_BITS)) { |
774 | new_ctx = find_next_zero_bit(mmu_context_bmap, ctx, 1); |
775 | if (new_ctx >= ctx) { |
776 | - int i; |
777 | - new_ctx = (tlb_context_cache & CTX_VERSION_MASK) + |
778 | - CTX_FIRST_VERSION; |
779 | - if (new_ctx == 1) |
780 | - new_ctx = CTX_FIRST_VERSION; |
781 | - |
782 | - /* Don't call memset, for 16 entries that's just |
783 | - * plain silly... |
784 | - */ |
785 | - mmu_context_bmap[0] = 3; |
786 | - mmu_context_bmap[1] = 0; |
787 | - mmu_context_bmap[2] = 0; |
788 | - mmu_context_bmap[3] = 0; |
789 | - for (i = 4; i < CTX_BMAP_SLOTS; i += 4) { |
790 | - mmu_context_bmap[i + 0] = 0; |
791 | - mmu_context_bmap[i + 1] = 0; |
792 | - mmu_context_bmap[i + 2] = 0; |
793 | - mmu_context_bmap[i + 3] = 0; |
794 | - } |
795 | - new_version = 1; |
796 | - goto out; |
797 | + mmu_context_wrap(); |
798 | + goto retry; |
799 | } |
800 | } |
801 | + if (mm->context.sparc64_ctx_val) |
802 | + cpumask_clear(mm_cpumask(mm)); |
803 | mmu_context_bmap[new_ctx>>6] |= (1UL << (new_ctx & 63)); |
804 | new_ctx |= (tlb_context_cache & CTX_VERSION_MASK); |
805 | -out: |
806 | tlb_context_cache = new_ctx; |
807 | mm->context.sparc64_ctx_val = new_ctx | orig_pgsz_bits; |
808 | +out: |
809 | spin_unlock(&ctx_alloc_lock); |
810 | - |
811 | - if (unlikely(new_version)) |
812 | - smp_new_mmu_context_version(); |
813 | } |
814 | |
815 | static int numa_enabled = 1; |
816 | diff --git a/arch/sparc/mm/tsb.c b/arch/sparc/mm/tsb.c |
817 | index bedf08b22a47..0d4b998c7d7b 100644 |
818 | --- a/arch/sparc/mm/tsb.c |
819 | +++ b/arch/sparc/mm/tsb.c |
820 | @@ -496,7 +496,8 @@ void tsb_grow(struct mm_struct *mm, unsigned long tsb_index, unsigned long rss) |
821 | extern void copy_tsb(unsigned long old_tsb_base, |
822 | unsigned long old_tsb_size, |
823 | unsigned long new_tsb_base, |
824 | - unsigned long new_tsb_size); |
825 | + unsigned long new_tsb_size, |
826 | + unsigned long page_size_shift); |
827 | unsigned long old_tsb_base = (unsigned long) old_tsb; |
828 | unsigned long new_tsb_base = (unsigned long) new_tsb; |
829 | |
830 | @@ -504,7 +505,9 @@ void tsb_grow(struct mm_struct *mm, unsigned long tsb_index, unsigned long rss) |
831 | old_tsb_base = __pa(old_tsb_base); |
832 | new_tsb_base = __pa(new_tsb_base); |
833 | } |
834 | - copy_tsb(old_tsb_base, old_size, new_tsb_base, new_size); |
835 | + copy_tsb(old_tsb_base, old_size, new_tsb_base, new_size, |
836 | + tsb_index == MM_TSB_BASE ? |
837 | + PAGE_SHIFT : REAL_HPAGE_SHIFT); |
838 | } |
839 | |
840 | mm->context.tsb_block[tsb_index].tsb = new_tsb; |
841 | diff --git a/arch/sparc/mm/ultra.S b/arch/sparc/mm/ultra.S |
842 | index 5d2fd6cd3189..fcf4d27a38fb 100644 |
843 | --- a/arch/sparc/mm/ultra.S |
844 | +++ b/arch/sparc/mm/ultra.S |
845 | @@ -971,11 +971,6 @@ xcall_capture: |
846 | wr %g0, (1 << PIL_SMP_CAPTURE), %set_softint |
847 | retry |
848 | |
849 | - .globl xcall_new_mmu_context_version |
850 | -xcall_new_mmu_context_version: |
851 | - wr %g0, (1 << PIL_SMP_CTX_NEW_VERSION), %set_softint |
852 | - retry |
853 | - |
854 | #ifdef CONFIG_KGDB |
855 | .globl xcall_kgdb_capture |
856 | xcall_kgdb_capture: |
857 | diff --git a/arch/x86/kernel/cpu/microcode/intel.c b/arch/x86/kernel/cpu/microcode/intel.c |
858 | index 8325d8a09ab0..91eb813e8917 100644 |
859 | --- a/arch/x86/kernel/cpu/microcode/intel.c |
860 | +++ b/arch/x86/kernel/cpu/microcode/intel.c |
861 | @@ -619,6 +619,9 @@ int __init save_microcode_in_initrd_intel(void) |
862 | |
863 | show_saved_mc(); |
864 | |
865 | + /* initrd is going away, clear patch ptr. */ |
866 | + intel_ucode_patch = NULL; |
867 | + |
868 | return 0; |
869 | } |
870 | |
871 | diff --git a/arch/x86/kernel/kvm.c b/arch/x86/kernel/kvm.c |
872 | index 14f65a5f938e..2a7835932b71 100644 |
873 | --- a/arch/x86/kernel/kvm.c |
874 | +++ b/arch/x86/kernel/kvm.c |
875 | @@ -161,8 +161,8 @@ void kvm_async_pf_task_wait(u32 token) |
876 | */ |
877 | rcu_irq_exit(); |
878 | native_safe_halt(); |
879 | - rcu_irq_enter(); |
880 | local_irq_disable(); |
881 | + rcu_irq_enter(); |
882 | } |
883 | } |
884 | if (!n.halted) |
885 | diff --git a/arch/x86/kvm/cpuid.c b/arch/x86/kvm/cpuid.c |
886 | index efde6cc50875..3665f755baa3 100644 |
887 | --- a/arch/x86/kvm/cpuid.c |
888 | +++ b/arch/x86/kvm/cpuid.c |
889 | @@ -780,18 +780,20 @@ int kvm_dev_ioctl_get_cpuid(struct kvm_cpuid2 *cpuid, |
890 | static int move_to_next_stateful_cpuid_entry(struct kvm_vcpu *vcpu, int i) |
891 | { |
892 | struct kvm_cpuid_entry2 *e = &vcpu->arch.cpuid_entries[i]; |
893 | - int j, nent = vcpu->arch.cpuid_nent; |
894 | + struct kvm_cpuid_entry2 *ej; |
895 | + int j = i; |
896 | + int nent = vcpu->arch.cpuid_nent; |
897 | |
898 | e->flags &= ~KVM_CPUID_FLAG_STATE_READ_NEXT; |
899 | /* when no next entry is found, the current entry[i] is reselected */ |
900 | - for (j = i + 1; ; j = (j + 1) % nent) { |
901 | - struct kvm_cpuid_entry2 *ej = &vcpu->arch.cpuid_entries[j]; |
902 | - if (ej->function == e->function) { |
903 | - ej->flags |= KVM_CPUID_FLAG_STATE_READ_NEXT; |
904 | - return j; |
905 | - } |
906 | - } |
907 | - return 0; /* silence gcc, even though control never reaches here */ |
908 | + do { |
909 | + j = (j + 1) % nent; |
910 | + ej = &vcpu->arch.cpuid_entries[j]; |
911 | + } while (ej->function != e->function); |
912 | + |
913 | + ej->flags |= KVM_CPUID_FLAG_STATE_READ_NEXT; |
914 | + |
915 | + return j; |
916 | } |
917 | |
918 | /* find an entry with matching function, matching index (if needed), and that |
919 | diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c |
920 | index ac7810513d0e..732c0270a489 100644 |
921 | --- a/arch/x86/kvm/mmu.c |
922 | +++ b/arch/x86/kvm/mmu.c |
923 | @@ -3683,12 +3683,15 @@ static int kvm_arch_setup_async_pf(struct kvm_vcpu *vcpu, gva_t gva, gfn_t gfn) |
924 | return kvm_setup_async_pf(vcpu, gva, kvm_vcpu_gfn_to_hva(vcpu, gfn), &arch); |
925 | } |
926 | |
927 | -static bool can_do_async_pf(struct kvm_vcpu *vcpu) |
928 | +bool kvm_can_do_async_pf(struct kvm_vcpu *vcpu) |
929 | { |
930 | if (unlikely(!lapic_in_kernel(vcpu) || |
931 | kvm_event_needs_reinjection(vcpu))) |
932 | return false; |
933 | |
934 | + if (is_guest_mode(vcpu)) |
935 | + return false; |
936 | + |
937 | return kvm_x86_ops->interrupt_allowed(vcpu); |
938 | } |
939 | |
940 | @@ -3704,7 +3707,7 @@ static bool try_async_pf(struct kvm_vcpu *vcpu, bool prefault, gfn_t gfn, |
941 | if (!async) |
942 | return false; /* *pfn has correct page already */ |
943 | |
944 | - if (!prefault && can_do_async_pf(vcpu)) { |
945 | + if (!prefault && kvm_can_do_async_pf(vcpu)) { |
946 | trace_kvm_try_async_get_page(gva, gfn); |
947 | if (kvm_find_async_pf_gfn(vcpu, gfn)) { |
948 | trace_kvm_async_pf_doublefault(gva, gfn); |
949 | diff --git a/arch/x86/kvm/mmu.h b/arch/x86/kvm/mmu.h |
950 | index ddc56e91f2e4..c92834c55c59 100644 |
951 | --- a/arch/x86/kvm/mmu.h |
952 | +++ b/arch/x86/kvm/mmu.h |
953 | @@ -75,6 +75,7 @@ enum { |
954 | int handle_mmio_page_fault(struct kvm_vcpu *vcpu, u64 addr, bool direct); |
955 | void kvm_init_shadow_mmu(struct kvm_vcpu *vcpu); |
956 | void kvm_init_shadow_ept_mmu(struct kvm_vcpu *vcpu, bool execonly); |
957 | +bool kvm_can_do_async_pf(struct kvm_vcpu *vcpu); |
958 | |
959 | static inline unsigned int kvm_mmu_available_pages(struct kvm *kvm) |
960 | { |
961 | diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c |
962 | index a4a2bae7c274..6557c790c8c1 100644 |
963 | --- a/arch/x86/kvm/x86.c |
964 | +++ b/arch/x86/kvm/x86.c |
965 | @@ -8623,8 +8623,7 @@ bool kvm_arch_can_inject_async_page_present(struct kvm_vcpu *vcpu) |
966 | if (!(vcpu->arch.apf.msr_val & KVM_ASYNC_PF_ENABLED)) |
967 | return true; |
968 | else |
969 | - return !kvm_event_needs_reinjection(vcpu) && |
970 | - kvm_x86_ops->interrupt_allowed(vcpu); |
971 | + return kvm_can_do_async_pf(vcpu); |
972 | } |
973 | |
974 | void kvm_arch_start_assignment(struct kvm *kvm) |
975 | diff --git a/arch/x86/platform/efi/efi-bgrt.c b/arch/x86/platform/efi/efi-bgrt.c |
976 | index 04ca8764f0c0..8bf27323f7a3 100644 |
977 | --- a/arch/x86/platform/efi/efi-bgrt.c |
978 | +++ b/arch/x86/platform/efi/efi-bgrt.c |
979 | @@ -36,6 +36,9 @@ void __init efi_bgrt_init(struct acpi_table_header *table) |
980 | if (acpi_disabled) |
981 | return; |
982 | |
983 | + if (!efi_enabled(EFI_BOOT)) |
984 | + return; |
985 | + |
986 | if (table->length < sizeof(bgrt_tab)) { |
987 | pr_notice("Ignoring BGRT: invalid length %u (expected %zu)\n", |
988 | table->length, sizeof(bgrt_tab)); |
989 | diff --git a/arch/x86/platform/efi/quirks.c b/arch/x86/platform/efi/quirks.c |
990 | index cdfe8c628959..393a0c0288d1 100644 |
991 | --- a/arch/x86/platform/efi/quirks.c |
992 | +++ b/arch/x86/platform/efi/quirks.c |
993 | @@ -358,6 +358,9 @@ void __init efi_free_boot_services(void) |
994 | free_bootmem_late(start, size); |
995 | } |
996 | |
997 | + if (!num_entries) |
998 | + return; |
999 | + |
1000 | new_size = efi.memmap.desc_size * num_entries; |
1001 | new_phys = efi_memmap_alloc(num_entries); |
1002 | if (!new_phys) { |
1003 | diff --git a/block/blk-cgroup.c b/block/blk-cgroup.c |
1004 | index bbe7ee00bd3d..a981cc916a13 100644 |
1005 | --- a/block/blk-cgroup.c |
1006 | +++ b/block/blk-cgroup.c |
1007 | @@ -74,7 +74,7 @@ static void blkg_free(struct blkcg_gq *blkg) |
1008 | blkcg_policy[i]->pd_free_fn(blkg->pd[i]); |
1009 | |
1010 | if (blkg->blkcg != &blkcg_root) |
1011 | - blk_exit_rl(&blkg->rl); |
1012 | + blk_exit_rl(blkg->q, &blkg->rl); |
1013 | |
1014 | blkg_rwstat_exit(&blkg->stat_ios); |
1015 | blkg_rwstat_exit(&blkg->stat_bytes); |
1016 | diff --git a/block/blk-core.c b/block/blk-core.c |
1017 | index d772c221cc17..1fb277501017 100644 |
1018 | --- a/block/blk-core.c |
1019 | +++ b/block/blk-core.c |
1020 | @@ -643,13 +643,19 @@ int blk_init_rl(struct request_list *rl, struct request_queue *q, |
1021 | if (!rl->rq_pool) |
1022 | return -ENOMEM; |
1023 | |
1024 | + if (rl != &q->root_rl) |
1025 | + WARN_ON_ONCE(!blk_get_queue(q)); |
1026 | + |
1027 | return 0; |
1028 | } |
1029 | |
1030 | -void blk_exit_rl(struct request_list *rl) |
1031 | +void blk_exit_rl(struct request_queue *q, struct request_list *rl) |
1032 | { |
1033 | - if (rl->rq_pool) |
1034 | + if (rl->rq_pool) { |
1035 | mempool_destroy(rl->rq_pool); |
1036 | + if (rl != &q->root_rl) |
1037 | + blk_put_queue(q); |
1038 | + } |
1039 | } |
1040 | |
1041 | struct request_queue *blk_alloc_queue(gfp_t gfp_mask) |
1042 | diff --git a/block/blk-sysfs.c b/block/blk-sysfs.c |
1043 | index 37f0b3ad635e..6a13d0924a66 100644 |
1044 | --- a/block/blk-sysfs.c |
1045 | +++ b/block/blk-sysfs.c |
1046 | @@ -819,7 +819,7 @@ static void blk_release_queue(struct kobject *kobj) |
1047 | elevator_exit(q, q->elevator); |
1048 | } |
1049 | |
1050 | - blk_exit_rl(&q->root_rl); |
1051 | + blk_exit_rl(q, &q->root_rl); |
1052 | |
1053 | if (q->queue_tags) |
1054 | __blk_queue_free_tags(q); |
1055 | diff --git a/block/blk.h b/block/blk.h |
1056 | index d1ea4bd9b9a3..8701d0a74eb1 100644 |
1057 | --- a/block/blk.h |
1058 | +++ b/block/blk.h |
1059 | @@ -59,7 +59,7 @@ void blk_free_flush_queue(struct blk_flush_queue *q); |
1060 | |
1061 | int blk_init_rl(struct request_list *rl, struct request_queue *q, |
1062 | gfp_t gfp_mask); |
1063 | -void blk_exit_rl(struct request_list *rl); |
1064 | +void blk_exit_rl(struct request_queue *q, struct request_list *rl); |
1065 | void init_request_from_bio(struct request *req, struct bio *bio); |
1066 | void blk_rq_bio_prep(struct request_queue *q, struct request *rq, |
1067 | struct bio *bio); |
1068 | diff --git a/block/cfq-iosched.c b/block/cfq-iosched.c |
1069 | index 440b95ee593c..2762505664a6 100644 |
1070 | --- a/block/cfq-iosched.c |
1071 | +++ b/block/cfq-iosched.c |
1072 | @@ -38,9 +38,13 @@ static const u64 cfq_target_latency = (u64)NSEC_PER_SEC * 3/10; /* 300 ms */ |
1073 | static const int cfq_hist_divisor = 4; |
1074 | |
1075 | /* |
1076 | - * offset from end of service tree |
1077 | + * offset from end of queue service tree for idle class |
1078 | */ |
1079 | #define CFQ_IDLE_DELAY (NSEC_PER_SEC / 5) |
1080 | +/* offset from end of group service tree under time slice mode */ |
1081 | +#define CFQ_SLICE_MODE_GROUP_DELAY (NSEC_PER_SEC / 5) |
1082 | +/* offset from end of group service under IOPS mode */ |
1083 | +#define CFQ_IOPS_MODE_GROUP_DELAY (HZ / 5) |
1084 | |
1085 | /* |
1086 | * below this threshold, we consider thinktime immediate |
1087 | @@ -1362,6 +1366,14 @@ cfq_group_service_tree_add(struct cfq_rb_root *st, struct cfq_group *cfqg) |
1088 | cfqg->vfraction = max_t(unsigned, vfr, 1); |
1089 | } |
1090 | |
1091 | +static inline u64 cfq_get_cfqg_vdisktime_delay(struct cfq_data *cfqd) |
1092 | +{ |
1093 | + if (!iops_mode(cfqd)) |
1094 | + return CFQ_SLICE_MODE_GROUP_DELAY; |
1095 | + else |
1096 | + return CFQ_IOPS_MODE_GROUP_DELAY; |
1097 | +} |
1098 | + |
1099 | static void |
1100 | cfq_group_notify_queue_add(struct cfq_data *cfqd, struct cfq_group *cfqg) |
1101 | { |
1102 | @@ -1381,7 +1393,8 @@ cfq_group_notify_queue_add(struct cfq_data *cfqd, struct cfq_group *cfqg) |
1103 | n = rb_last(&st->rb); |
1104 | if (n) { |
1105 | __cfqg = rb_entry_cfqg(n); |
1106 | - cfqg->vdisktime = __cfqg->vdisktime + CFQ_IDLE_DELAY; |
1107 | + cfqg->vdisktime = __cfqg->vdisktime + |
1108 | + cfq_get_cfqg_vdisktime_delay(cfqd); |
1109 | } else |
1110 | cfqg->vdisktime = st->min_vdisktime; |
1111 | cfq_group_service_tree_add(st, cfqg); |
1112 | diff --git a/crypto/asymmetric_keys/public_key.c b/crypto/asymmetric_keys/public_key.c |
1113 | index d3a989e718f5..3cd6e12cfc46 100644 |
1114 | --- a/crypto/asymmetric_keys/public_key.c |
1115 | +++ b/crypto/asymmetric_keys/public_key.c |
1116 | @@ -141,7 +141,7 @@ int public_key_verify_signature(const struct public_key *pkey, |
1117 | * signature and returns that to us. |
1118 | */ |
1119 | ret = crypto_akcipher_verify(req); |
1120 | - if (ret == -EINPROGRESS) { |
1121 | + if ((ret == -EINPROGRESS) || (ret == -EBUSY)) { |
1122 | wait_for_completion(&compl.completion); |
1123 | ret = compl.err; |
1124 | } |
1125 | diff --git a/crypto/drbg.c b/crypto/drbg.c |
1126 | index 8a4d98b4adba..5efc2b22a831 100644 |
1127 | --- a/crypto/drbg.c |
1128 | +++ b/crypto/drbg.c |
1129 | @@ -1768,9 +1768,8 @@ static int drbg_kcapi_sym_ctr(struct drbg_state *drbg, |
1130 | break; |
1131 | case -EINPROGRESS: |
1132 | case -EBUSY: |
1133 | - ret = wait_for_completion_interruptible( |
1134 | - &drbg->ctr_completion); |
1135 | - if (!ret && !drbg->ctr_async_err) { |
1136 | + wait_for_completion(&drbg->ctr_completion); |
1137 | + if (!drbg->ctr_async_err) { |
1138 | reinit_completion(&drbg->ctr_completion); |
1139 | break; |
1140 | } |
1141 | diff --git a/crypto/gcm.c b/crypto/gcm.c |
1142 | index b7ad808be3d4..3841b5eafa7e 100644 |
1143 | --- a/crypto/gcm.c |
1144 | +++ b/crypto/gcm.c |
1145 | @@ -152,10 +152,8 @@ static int crypto_gcm_setkey(struct crypto_aead *aead, const u8 *key, |
1146 | |
1147 | err = crypto_skcipher_encrypt(&data->req); |
1148 | if (err == -EINPROGRESS || err == -EBUSY) { |
1149 | - err = wait_for_completion_interruptible( |
1150 | - &data->result.completion); |
1151 | - if (!err) |
1152 | - err = data->result.err; |
1153 | + wait_for_completion(&data->result.completion); |
1154 | + err = data->result.err; |
1155 | } |
1156 | |
1157 | if (err) |
1158 | diff --git a/drivers/ata/ahci.c b/drivers/ata/ahci.c |
1159 | index 2fc52407306c..c69954023c2e 100644 |
1160 | --- a/drivers/ata/ahci.c |
1161 | +++ b/drivers/ata/ahci.c |
1162 | @@ -1364,6 +1364,40 @@ static inline void ahci_gtf_filter_workaround(struct ata_host *host) |
1163 | {} |
1164 | #endif |
1165 | |
1166 | +/* |
1167 | + * On the Acer Aspire Switch Alpha 12, sometimes all SATA ports are detected |
1168 | + * as DUMMY, or detected but eventually get a "link down" and never get up |
1169 | + * again. When this happens, CAP.NP may hold a value of 0x00 or 0x01, and the |
1170 | + * port_map may hold a value of 0x00. |
1171 | + * |
1172 | + * Overriding CAP.NP to 0x02 and the port_map to 0x7 will reveal all 3 ports |
1173 | + * and can significantly reduce the occurrence of the problem. |
1174 | + * |
1175 | + * https://bugzilla.kernel.org/show_bug.cgi?id=189471 |
1176 | + */ |
1177 | +static void acer_sa5_271_workaround(struct ahci_host_priv *hpriv, |
1178 | + struct pci_dev *pdev) |
1179 | +{ |
1180 | + static const struct dmi_system_id sysids[] = { |
1181 | + { |
1182 | + .ident = "Acer Switch Alpha 12", |
1183 | + .matches = { |
1184 | + DMI_MATCH(DMI_SYS_VENDOR, "Acer"), |
1185 | + DMI_MATCH(DMI_PRODUCT_NAME, "Switch SA5-271") |
1186 | + }, |
1187 | + }, |
1188 | + { } |
1189 | + }; |
1190 | + |
1191 | + if (dmi_check_system(sysids)) { |
1192 | + dev_info(&pdev->dev, "enabling Acer Switch Alpha 12 workaround\n"); |
1193 | + if ((hpriv->saved_cap & 0xC734FF00) == 0xC734FF00) { |
1194 | + hpriv->port_map = 0x7; |
1195 | + hpriv->cap = 0xC734FF02; |
1196 | + } |
1197 | + } |
1198 | +} |
1199 | + |
1200 | #ifdef CONFIG_ARM64 |
1201 | /* |
1202 | * Due to ERRATA#22536, ThunderX needs to handle HOST_IRQ_STAT differently. |
1203 | @@ -1636,6 +1670,10 @@ static int ahci_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) |
1204 | "online status unreliable, applying workaround\n"); |
1205 | } |
1206 | |
1207 | + |
1208 | + /* Acer SA5-271 workaround modifies private_data */ |
1209 | + acer_sa5_271_workaround(hpriv, pdev); |
1210 | + |
1211 | /* CAP.NP sometimes indicate the index of the last enabled |
1212 | * port, at other times, that of the last possible port, so |
1213 | * determining the maximum port number requires looking at |
1214 | diff --git a/drivers/ata/sata_mv.c b/drivers/ata/sata_mv.c |
1215 | index 00ce26d0c047..6eed4a72d328 100644 |
1216 | --- a/drivers/ata/sata_mv.c |
1217 | +++ b/drivers/ata/sata_mv.c |
1218 | @@ -4067,7 +4067,6 @@ static int mv_platform_probe(struct platform_device *pdev) |
1219 | struct ata_host *host; |
1220 | struct mv_host_priv *hpriv; |
1221 | struct resource *res; |
1222 | - void __iomem *mmio; |
1223 | int n_ports = 0, irq = 0; |
1224 | int rc; |
1225 | int port; |
1226 | @@ -4086,9 +4085,8 @@ static int mv_platform_probe(struct platform_device *pdev) |
1227 | * Get the register base first |
1228 | */ |
1229 | res = platform_get_resource(pdev, IORESOURCE_MEM, 0); |
1230 | - mmio = devm_ioremap_resource(&pdev->dev, res); |
1231 | - if (IS_ERR(mmio)) |
1232 | - return PTR_ERR(mmio); |
1233 | + if (res == NULL) |
1234 | + return -EINVAL; |
1235 | |
1236 | /* allocate host */ |
1237 | if (pdev->dev.of_node) { |
1238 | @@ -4132,7 +4130,12 @@ static int mv_platform_probe(struct platform_device *pdev) |
1239 | hpriv->board_idx = chip_soc; |
1240 | |
1241 | host->iomap = NULL; |
1242 | - hpriv->base = mmio - SATAHC0_REG_BASE; |
1243 | + hpriv->base = devm_ioremap(&pdev->dev, res->start, |
1244 | + resource_size(res)); |
1245 | + if (!hpriv->base) |
1246 | + return -ENOMEM; |
1247 | + |
1248 | + hpriv->base -= SATAHC0_REG_BASE; |
1249 | |
1250 | hpriv->clk = clk_get(&pdev->dev, NULL); |
1251 | if (IS_ERR(hpriv->clk)) |
1252 | diff --git a/drivers/char/mem.c b/drivers/char/mem.c |
1253 | index 6e0cbe092220..593a8818aca9 100644 |
1254 | --- a/drivers/char/mem.c |
1255 | +++ b/drivers/char/mem.c |
1256 | @@ -343,7 +343,7 @@ static int mmap_mem(struct file *file, struct vm_area_struct *vma) |
1257 | phys_addr_t offset = (phys_addr_t)vma->vm_pgoff << PAGE_SHIFT; |
1258 | |
1259 | /* It's illegal to wrap around the end of the physical address space. */ |
1260 | - if (offset + (phys_addr_t)size < offset) |
1261 | + if (offset + (phys_addr_t)size - 1 < offset) |
1262 | return -EINVAL; |
1263 | |
1264 | if (!valid_mmap_phys_addr_range(vma->vm_pgoff, size)) |
1265 | diff --git a/drivers/char/random.c b/drivers/char/random.c |
1266 | index 0ab024918907..2291e6224ed3 100644 |
1267 | --- a/drivers/char/random.c |
1268 | +++ b/drivers/char/random.c |
1269 | @@ -1,6 +1,9 @@ |
1270 | /* |
1271 | * random.c -- A strong random number generator |
1272 | * |
1273 | + * Copyright (C) 2017 Jason A. Donenfeld <Jason@zx2c4.com>. All |
1274 | + * Rights Reserved. |
1275 | + * |
1276 | * Copyright Matt Mackall <mpm@selenic.com>, 2003, 2004, 2005 |
1277 | * |
1278 | * Copyright Theodore Ts'o, 1994, 1995, 1996, 1997, 1998, 1999. All |
1279 | @@ -762,6 +765,8 @@ static DECLARE_WAIT_QUEUE_HEAD(crng_init_wait); |
1280 | static struct crng_state **crng_node_pool __read_mostly; |
1281 | #endif |
1282 | |
1283 | +static void invalidate_batched_entropy(void); |
1284 | + |
1285 | static void crng_initialize(struct crng_state *crng) |
1286 | { |
1287 | int i; |
1288 | @@ -799,6 +804,7 @@ static int crng_fast_load(const char *cp, size_t len) |
1289 | cp++; crng_init_cnt++; len--; |
1290 | } |
1291 | if (crng_init_cnt >= CRNG_INIT_CNT_THRESH) { |
1292 | + invalidate_batched_entropy(); |
1293 | crng_init = 1; |
1294 | wake_up_interruptible(&crng_init_wait); |
1295 | pr_notice("random: fast init done\n"); |
1296 | @@ -836,6 +842,7 @@ static void crng_reseed(struct crng_state *crng, struct entropy_store *r) |
1297 | memzero_explicit(&buf, sizeof(buf)); |
1298 | crng->init_time = jiffies; |
1299 | if (crng == &primary_crng && crng_init < 2) { |
1300 | + invalidate_batched_entropy(); |
1301 | crng_init = 2; |
1302 | process_random_ready_list(); |
1303 | wake_up_interruptible(&crng_init_wait); |
1304 | @@ -2019,6 +2026,7 @@ struct batched_entropy { |
1305 | }; |
1306 | unsigned int position; |
1307 | }; |
1308 | +static rwlock_t batched_entropy_reset_lock = __RW_LOCK_UNLOCKED(batched_entropy_reset_lock); |
1309 | |
1310 | /* |
1311 | * Get a random word for internal kernel use only. The quality of the random |
1312 | @@ -2029,6 +2037,8 @@ static DEFINE_PER_CPU(struct batched_entropy, batched_entropy_u64); |
1313 | u64 get_random_u64(void) |
1314 | { |
1315 | u64 ret; |
1316 | + bool use_lock = crng_init < 2; |
1317 | + unsigned long flags; |
1318 | struct batched_entropy *batch; |
1319 | |
1320 | #if BITS_PER_LONG == 64 |
1321 | @@ -2041,11 +2051,15 @@ u64 get_random_u64(void) |
1322 | #endif |
1323 | |
1324 | batch = &get_cpu_var(batched_entropy_u64); |
1325 | + if (use_lock) |
1326 | + read_lock_irqsave(&batched_entropy_reset_lock, flags); |
1327 | if (batch->position % ARRAY_SIZE(batch->entropy_u64) == 0) { |
1328 | extract_crng((u8 *)batch->entropy_u64); |
1329 | batch->position = 0; |
1330 | } |
1331 | ret = batch->entropy_u64[batch->position++]; |
1332 | + if (use_lock) |
1333 | + read_unlock_irqrestore(&batched_entropy_reset_lock, flags); |
1334 | put_cpu_var(batched_entropy_u64); |
1335 | return ret; |
1336 | } |
1337 | @@ -2055,22 +2069,45 @@ static DEFINE_PER_CPU(struct batched_entropy, batched_entropy_u32); |
1338 | u32 get_random_u32(void) |
1339 | { |
1340 | u32 ret; |
1341 | + bool use_lock = crng_init < 2; |
1342 | + unsigned long flags; |
1343 | struct batched_entropy *batch; |
1344 | |
1345 | if (arch_get_random_int(&ret)) |
1346 | return ret; |
1347 | |
1348 | batch = &get_cpu_var(batched_entropy_u32); |
1349 | + if (use_lock) |
1350 | + read_lock_irqsave(&batched_entropy_reset_lock, flags); |
1351 | if (batch->position % ARRAY_SIZE(batch->entropy_u32) == 0) { |
1352 | extract_crng((u8 *)batch->entropy_u32); |
1353 | batch->position = 0; |
1354 | } |
1355 | ret = batch->entropy_u32[batch->position++]; |
1356 | + if (use_lock) |
1357 | + read_unlock_irqrestore(&batched_entropy_reset_lock, flags); |
1358 | put_cpu_var(batched_entropy_u32); |
1359 | return ret; |
1360 | } |
1361 | EXPORT_SYMBOL(get_random_u32); |
1362 | |
1363 | +/* It's important to invalidate all potential batched entropy that might |
1364 | + * be stored before the crng is initialized, which we can do lazily by |
1365 | + * simply resetting the counter to zero so that it's re-extracted on the |
1366 | + * next usage. */ |
1367 | +static void invalidate_batched_entropy(void) |
1368 | +{ |
1369 | + int cpu; |
1370 | + unsigned long flags; |
1371 | + |
1372 | + write_lock_irqsave(&batched_entropy_reset_lock, flags); |
1373 | + for_each_possible_cpu (cpu) { |
1374 | + per_cpu_ptr(&batched_entropy_u32, cpu)->position = 0; |
1375 | + per_cpu_ptr(&batched_entropy_u64, cpu)->position = 0; |
1376 | + } |
1377 | + write_unlock_irqrestore(&batched_entropy_reset_lock, flags); |
1378 | +} |
1379 | + |
1380 | /** |
1381 | * randomize_page - Generate a random, page aligned address |
1382 | * @start: The smallest acceptable address the caller will take. |
1383 | diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c |
1384 | index 0e3f6496524d..26b643d57847 100644 |
1385 | --- a/drivers/cpufreq/cpufreq.c |
1386 | +++ b/drivers/cpufreq/cpufreq.c |
1387 | @@ -2468,6 +2468,7 @@ int cpufreq_register_driver(struct cpufreq_driver *driver_data) |
1388 | if (!(cpufreq_driver->flags & CPUFREQ_STICKY) && |
1389 | list_empty(&cpufreq_policy_list)) { |
1390 | /* if all ->init() calls failed, unregister */ |
1391 | + ret = -ENODEV; |
1392 | pr_debug("%s: No CPU initialized for driver %s\n", __func__, |
1393 | driver_data->name); |
1394 | goto err_if_unreg; |
1395 | diff --git a/drivers/dma/ep93xx_dma.c b/drivers/dma/ep93xx_dma.c |
1396 | index d37e8dda8079..ec240592f5c8 100644 |
1397 | --- a/drivers/dma/ep93xx_dma.c |
1398 | +++ b/drivers/dma/ep93xx_dma.c |
1399 | @@ -201,6 +201,7 @@ struct ep93xx_dma_engine { |
1400 | struct dma_device dma_dev; |
1401 | bool m2m; |
1402 | int (*hw_setup)(struct ep93xx_dma_chan *); |
1403 | + void (*hw_synchronize)(struct ep93xx_dma_chan *); |
1404 | void (*hw_shutdown)(struct ep93xx_dma_chan *); |
1405 | void (*hw_submit)(struct ep93xx_dma_chan *); |
1406 | int (*hw_interrupt)(struct ep93xx_dma_chan *); |
1407 | @@ -323,6 +324,8 @@ static int m2p_hw_setup(struct ep93xx_dma_chan *edmac) |
1408 | | M2P_CONTROL_ENABLE; |
1409 | m2p_set_control(edmac, control); |
1410 | |
1411 | + edmac->buffer = 0; |
1412 | + |
1413 | return 0; |
1414 | } |
1415 | |
1416 | @@ -331,21 +334,27 @@ static inline u32 m2p_channel_state(struct ep93xx_dma_chan *edmac) |
1417 | return (readl(edmac->regs + M2P_STATUS) >> 4) & 0x3; |
1418 | } |
1419 | |
1420 | -static void m2p_hw_shutdown(struct ep93xx_dma_chan *edmac) |
1421 | +static void m2p_hw_synchronize(struct ep93xx_dma_chan *edmac) |
1422 | { |
1423 | + unsigned long flags; |
1424 | u32 control; |
1425 | |
1426 | + spin_lock_irqsave(&edmac->lock, flags); |
1427 | control = readl(edmac->regs + M2P_CONTROL); |
1428 | control &= ~(M2P_CONTROL_STALLINT | M2P_CONTROL_NFBINT); |
1429 | m2p_set_control(edmac, control); |
1430 | + spin_unlock_irqrestore(&edmac->lock, flags); |
1431 | |
1432 | while (m2p_channel_state(edmac) >= M2P_STATE_ON) |
1433 | - cpu_relax(); |
1434 | + schedule(); |
1435 | +} |
1436 | |
1437 | +static void m2p_hw_shutdown(struct ep93xx_dma_chan *edmac) |
1438 | +{ |
1439 | m2p_set_control(edmac, 0); |
1440 | |
1441 | - while (m2p_channel_state(edmac) == M2P_STATE_STALL) |
1442 | - cpu_relax(); |
1443 | + while (m2p_channel_state(edmac) != M2P_STATE_IDLE) |
1444 | + dev_warn(chan2dev(edmac), "M2P: Not yet IDLE\n"); |
1445 | } |
1446 | |
1447 | static void m2p_fill_desc(struct ep93xx_dma_chan *edmac) |
1448 | @@ -1161,6 +1170,26 @@ ep93xx_dma_prep_dma_cyclic(struct dma_chan *chan, dma_addr_t dma_addr, |
1449 | } |
1450 | |
1451 | /** |
1452 | + * ep93xx_dma_synchronize - Synchronizes the termination of transfers to the |
1453 | + * current context. |
1454 | + * @chan: channel |
1455 | + * |
1456 | + * Synchronizes the DMA channel termination to the current context. When this |
1457 | + * function returns it is guaranteed that all transfers for previously issued |
1458 | + * descriptors have stopped and and it is safe to free the memory associated |
1459 | + * with them. Furthermore it is guaranteed that all complete callback functions |
1460 | + * for a previously submitted descriptor have finished running and it is safe to |
1461 | + * free resources accessed from within the complete callbacks. |
1462 | + */ |
1463 | +static void ep93xx_dma_synchronize(struct dma_chan *chan) |
1464 | +{ |
1465 | + struct ep93xx_dma_chan *edmac = to_ep93xx_dma_chan(chan); |
1466 | + |
1467 | + if (edmac->edma->hw_synchronize) |
1468 | + edmac->edma->hw_synchronize(edmac); |
1469 | +} |
1470 | + |
1471 | +/** |
1472 | * ep93xx_dma_terminate_all - terminate all transactions |
1473 | * @chan: channel |
1474 | * |
1475 | @@ -1323,6 +1352,7 @@ static int __init ep93xx_dma_probe(struct platform_device *pdev) |
1476 | dma_dev->device_prep_slave_sg = ep93xx_dma_prep_slave_sg; |
1477 | dma_dev->device_prep_dma_cyclic = ep93xx_dma_prep_dma_cyclic; |
1478 | dma_dev->device_config = ep93xx_dma_slave_config; |
1479 | + dma_dev->device_synchronize = ep93xx_dma_synchronize; |
1480 | dma_dev->device_terminate_all = ep93xx_dma_terminate_all; |
1481 | dma_dev->device_issue_pending = ep93xx_dma_issue_pending; |
1482 | dma_dev->device_tx_status = ep93xx_dma_tx_status; |
1483 | @@ -1340,6 +1370,7 @@ static int __init ep93xx_dma_probe(struct platform_device *pdev) |
1484 | } else { |
1485 | dma_cap_set(DMA_PRIVATE, dma_dev->cap_mask); |
1486 | |
1487 | + edma->hw_synchronize = m2p_hw_synchronize; |
1488 | edma->hw_setup = m2p_hw_setup; |
1489 | edma->hw_shutdown = m2p_hw_shutdown; |
1490 | edma->hw_submit = m2p_hw_submit; |
1491 | diff --git a/drivers/dma/mv_xor_v2.c b/drivers/dma/mv_xor_v2.c |
1492 | index a28a01fcba67..f3e211f8f6c5 100644 |
1493 | --- a/drivers/dma/mv_xor_v2.c |
1494 | +++ b/drivers/dma/mv_xor_v2.c |
1495 | @@ -161,6 +161,7 @@ struct mv_xor_v2_device { |
1496 | struct mv_xor_v2_sw_desc *sw_desq; |
1497 | int desc_size; |
1498 | unsigned int npendings; |
1499 | + unsigned int hw_queue_idx; |
1500 | }; |
1501 | |
1502 | /** |
1503 | @@ -214,18 +215,6 @@ static void mv_xor_v2_set_data_buffers(struct mv_xor_v2_device *xor_dev, |
1504 | } |
1505 | |
1506 | /* |
1507 | - * Return the next available index in the DESQ. |
1508 | - */ |
1509 | -static int mv_xor_v2_get_desq_write_ptr(struct mv_xor_v2_device *xor_dev) |
1510 | -{ |
1511 | - /* read the index for the next available descriptor in the DESQ */ |
1512 | - u32 reg = readl(xor_dev->dma_base + MV_XOR_V2_DMA_DESQ_ALLOC_OFF); |
1513 | - |
1514 | - return ((reg >> MV_XOR_V2_DMA_DESQ_ALLOC_WRPTR_SHIFT) |
1515 | - & MV_XOR_V2_DMA_DESQ_ALLOC_WRPTR_MASK); |
1516 | -} |
1517 | - |
1518 | -/* |
1519 | * notify the engine of new descriptors, and update the available index. |
1520 | */ |
1521 | static void mv_xor_v2_add_desc_to_desq(struct mv_xor_v2_device *xor_dev, |
1522 | @@ -257,22 +246,6 @@ static int mv_xor_v2_set_desc_size(struct mv_xor_v2_device *xor_dev) |
1523 | return MV_XOR_V2_EXT_DESC_SIZE; |
1524 | } |
1525 | |
1526 | -/* |
1527 | - * Set the IMSG threshold |
1528 | - */ |
1529 | -static inline |
1530 | -void mv_xor_v2_set_imsg_thrd(struct mv_xor_v2_device *xor_dev, int thrd_val) |
1531 | -{ |
1532 | - u32 reg; |
1533 | - |
1534 | - reg = readl(xor_dev->dma_base + MV_XOR_V2_DMA_IMSG_THRD_OFF); |
1535 | - |
1536 | - reg &= (~MV_XOR_V2_DMA_IMSG_THRD_MASK << MV_XOR_V2_DMA_IMSG_THRD_SHIFT); |
1537 | - reg |= (thrd_val << MV_XOR_V2_DMA_IMSG_THRD_SHIFT); |
1538 | - |
1539 | - writel(reg, xor_dev->dma_base + MV_XOR_V2_DMA_IMSG_THRD_OFF); |
1540 | -} |
1541 | - |
1542 | static irqreturn_t mv_xor_v2_interrupt_handler(int irq, void *data) |
1543 | { |
1544 | struct mv_xor_v2_device *xor_dev = data; |
1545 | @@ -288,12 +261,6 @@ static irqreturn_t mv_xor_v2_interrupt_handler(int irq, void *data) |
1546 | if (!ndescs) |
1547 | return IRQ_NONE; |
1548 | |
1549 | - /* |
1550 | - * Update IMSG threshold, to disable new IMSG interrupts until |
1551 | - * end of the tasklet |
1552 | - */ |
1553 | - mv_xor_v2_set_imsg_thrd(xor_dev, MV_XOR_V2_DESC_NUM); |
1554 | - |
1555 | /* schedule a tasklet to handle descriptors callbacks */ |
1556 | tasklet_schedule(&xor_dev->irq_tasklet); |
1557 | |
1558 | @@ -306,7 +273,6 @@ static irqreturn_t mv_xor_v2_interrupt_handler(int irq, void *data) |
1559 | static dma_cookie_t |
1560 | mv_xor_v2_tx_submit(struct dma_async_tx_descriptor *tx) |
1561 | { |
1562 | - int desq_ptr; |
1563 | void *dest_hw_desc; |
1564 | dma_cookie_t cookie; |
1565 | struct mv_xor_v2_sw_desc *sw_desc = |
1566 | @@ -322,15 +288,15 @@ mv_xor_v2_tx_submit(struct dma_async_tx_descriptor *tx) |
1567 | spin_lock_bh(&xor_dev->lock); |
1568 | cookie = dma_cookie_assign(tx); |
1569 | |
1570 | - /* get the next available slot in the DESQ */ |
1571 | - desq_ptr = mv_xor_v2_get_desq_write_ptr(xor_dev); |
1572 | - |
1573 | /* copy the HW descriptor from the SW descriptor to the DESQ */ |
1574 | - dest_hw_desc = xor_dev->hw_desq_virt + desq_ptr; |
1575 | + dest_hw_desc = xor_dev->hw_desq_virt + xor_dev->hw_queue_idx; |
1576 | |
1577 | memcpy(dest_hw_desc, &sw_desc->hw_desc, xor_dev->desc_size); |
1578 | |
1579 | xor_dev->npendings++; |
1580 | + xor_dev->hw_queue_idx++; |
1581 | + if (xor_dev->hw_queue_idx >= MV_XOR_V2_DESC_NUM) |
1582 | + xor_dev->hw_queue_idx = 0; |
1583 | |
1584 | spin_unlock_bh(&xor_dev->lock); |
1585 | |
1586 | @@ -344,6 +310,7 @@ static struct mv_xor_v2_sw_desc * |
1587 | mv_xor_v2_prep_sw_desc(struct mv_xor_v2_device *xor_dev) |
1588 | { |
1589 | struct mv_xor_v2_sw_desc *sw_desc; |
1590 | + bool found = false; |
1591 | |
1592 | /* Lock the channel */ |
1593 | spin_lock_bh(&xor_dev->lock); |
1594 | @@ -355,19 +322,23 @@ mv_xor_v2_prep_sw_desc(struct mv_xor_v2_device *xor_dev) |
1595 | return NULL; |
1596 | } |
1597 | |
1598 | - /* get a free SW descriptor from the SW DESQ */ |
1599 | - sw_desc = list_first_entry(&xor_dev->free_sw_desc, |
1600 | - struct mv_xor_v2_sw_desc, free_list); |
1601 | + list_for_each_entry(sw_desc, &xor_dev->free_sw_desc, free_list) { |
1602 | + if (async_tx_test_ack(&sw_desc->async_tx)) { |
1603 | + found = true; |
1604 | + break; |
1605 | + } |
1606 | + } |
1607 | + |
1608 | + if (!found) { |
1609 | + spin_unlock_bh(&xor_dev->lock); |
1610 | + return NULL; |
1611 | + } |
1612 | + |
1613 | list_del(&sw_desc->free_list); |
1614 | |
1615 | /* Release the channel */ |
1616 | spin_unlock_bh(&xor_dev->lock); |
1617 | |
1618 | - /* set the async tx descriptor */ |
1619 | - dma_async_tx_descriptor_init(&sw_desc->async_tx, &xor_dev->dmachan); |
1620 | - sw_desc->async_tx.tx_submit = mv_xor_v2_tx_submit; |
1621 | - async_tx_ack(&sw_desc->async_tx); |
1622 | - |
1623 | return sw_desc; |
1624 | } |
1625 | |
1626 | @@ -389,6 +360,8 @@ mv_xor_v2_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, |
1627 | __func__, len, &src, &dest, flags); |
1628 | |
1629 | sw_desc = mv_xor_v2_prep_sw_desc(xor_dev); |
1630 | + if (!sw_desc) |
1631 | + return NULL; |
1632 | |
1633 | sw_desc->async_tx.flags = flags; |
1634 | |
1635 | @@ -443,6 +416,8 @@ mv_xor_v2_prep_dma_xor(struct dma_chan *chan, dma_addr_t dest, dma_addr_t *src, |
1636 | __func__, src_cnt, len, &dest, flags); |
1637 | |
1638 | sw_desc = mv_xor_v2_prep_sw_desc(xor_dev); |
1639 | + if (!sw_desc) |
1640 | + return NULL; |
1641 | |
1642 | sw_desc->async_tx.flags = flags; |
1643 | |
1644 | @@ -491,6 +466,8 @@ mv_xor_v2_prep_dma_interrupt(struct dma_chan *chan, unsigned long flags) |
1645 | container_of(chan, struct mv_xor_v2_device, dmachan); |
1646 | |
1647 | sw_desc = mv_xor_v2_prep_sw_desc(xor_dev); |
1648 | + if (!sw_desc) |
1649 | + return NULL; |
1650 | |
1651 | /* set the HW descriptor */ |
1652 | hw_descriptor = &sw_desc->hw_desc; |
1653 | @@ -554,7 +531,6 @@ static void mv_xor_v2_tasklet(unsigned long data) |
1654 | { |
1655 | struct mv_xor_v2_device *xor_dev = (struct mv_xor_v2_device *) data; |
1656 | int pending_ptr, num_of_pending, i; |
1657 | - struct mv_xor_v2_descriptor *next_pending_hw_desc = NULL; |
1658 | struct mv_xor_v2_sw_desc *next_pending_sw_desc = NULL; |
1659 | |
1660 | dev_dbg(xor_dev->dmadev.dev, "%s %d\n", __func__, __LINE__); |
1661 | @@ -562,17 +538,10 @@ static void mv_xor_v2_tasklet(unsigned long data) |
1662 | /* get the pending descriptors parameters */ |
1663 | num_of_pending = mv_xor_v2_get_pending_params(xor_dev, &pending_ptr); |
1664 | |
1665 | - /* next HW descriptor */ |
1666 | - next_pending_hw_desc = xor_dev->hw_desq_virt + pending_ptr; |
1667 | - |
1668 | /* loop over free descriptors */ |
1669 | for (i = 0; i < num_of_pending; i++) { |
1670 | - |
1671 | - if (pending_ptr > MV_XOR_V2_DESC_NUM) |
1672 | - pending_ptr = 0; |
1673 | - |
1674 | - if (next_pending_sw_desc != NULL) |
1675 | - next_pending_hw_desc++; |
1676 | + struct mv_xor_v2_descriptor *next_pending_hw_desc = |
1677 | + xor_dev->hw_desq_virt + pending_ptr; |
1678 | |
1679 | /* get the SW descriptor related to the HW descriptor */ |
1680 | next_pending_sw_desc = |
1681 | @@ -608,15 +577,14 @@ static void mv_xor_v2_tasklet(unsigned long data) |
1682 | |
1683 | /* increment the next descriptor */ |
1684 | pending_ptr++; |
1685 | + if (pending_ptr >= MV_XOR_V2_DESC_NUM) |
1686 | + pending_ptr = 0; |
1687 | } |
1688 | |
1689 | if (num_of_pending != 0) { |
1690 | /* free the descriptores */ |
1691 | mv_xor_v2_free_desc_from_desq(xor_dev, num_of_pending); |
1692 | } |
1693 | - |
1694 | - /* Update IMSG threshold, to enable new IMSG interrupts */ |
1695 | - mv_xor_v2_set_imsg_thrd(xor_dev, 0); |
1696 | } |
1697 | |
1698 | /* |
1699 | @@ -648,9 +616,6 @@ static int mv_xor_v2_descq_init(struct mv_xor_v2_device *xor_dev) |
1700 | writel((xor_dev->hw_desq & 0xFFFF00000000) >> 32, |
1701 | xor_dev->dma_base + MV_XOR_V2_DMA_DESQ_BAHR_OFF); |
1702 | |
1703 | - /* enable the DMA engine */ |
1704 | - writel(0, xor_dev->dma_base + MV_XOR_V2_DMA_DESQ_STOP_OFF); |
1705 | - |
1706 | /* |
1707 | * This is a temporary solution, until we activate the |
1708 | * SMMU. Set the attributes for reading & writing data buffers |
1709 | @@ -694,6 +659,9 @@ static int mv_xor_v2_descq_init(struct mv_xor_v2_device *xor_dev) |
1710 | reg |= MV_XOR_V2_GLOB_PAUSE_AXI_TIME_DIS_VAL; |
1711 | writel(reg, xor_dev->glob_base + MV_XOR_V2_GLOB_PAUSE); |
1712 | |
1713 | + /* enable the DMA engine */ |
1714 | + writel(0, xor_dev->dma_base + MV_XOR_V2_DMA_DESQ_STOP_OFF); |
1715 | + |
1716 | return 0; |
1717 | } |
1718 | |
1719 | @@ -725,6 +693,10 @@ static int mv_xor_v2_probe(struct platform_device *pdev) |
1720 | |
1721 | platform_set_drvdata(pdev, xor_dev); |
1722 | |
1723 | + ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(40)); |
1724 | + if (ret) |
1725 | + return ret; |
1726 | + |
1727 | xor_dev->clk = devm_clk_get(&pdev->dev, NULL); |
1728 | if (IS_ERR(xor_dev->clk) && PTR_ERR(xor_dev->clk) == -EPROBE_DEFER) |
1729 | return -EPROBE_DEFER; |
1730 | @@ -785,8 +757,15 @@ static int mv_xor_v2_probe(struct platform_device *pdev) |
1731 | |
1732 | /* add all SW descriptors to the free list */ |
1733 | for (i = 0; i < MV_XOR_V2_DESC_NUM; i++) { |
1734 | - xor_dev->sw_desq[i].idx = i; |
1735 | - list_add(&xor_dev->sw_desq[i].free_list, |
1736 | + struct mv_xor_v2_sw_desc *sw_desc = |
1737 | + xor_dev->sw_desq + i; |
1738 | + sw_desc->idx = i; |
1739 | + dma_async_tx_descriptor_init(&sw_desc->async_tx, |
1740 | + &xor_dev->dmachan); |
1741 | + sw_desc->async_tx.tx_submit = mv_xor_v2_tx_submit; |
1742 | + async_tx_ack(&sw_desc->async_tx); |
1743 | + |
1744 | + list_add(&sw_desc->free_list, |
1745 | &xor_dev->free_sw_desc); |
1746 | } |
1747 | |
1748 | diff --git a/drivers/dma/sh/usb-dmac.c b/drivers/dma/sh/usb-dmac.c |
1749 | index 72c649713ace..31a145154e9f 100644 |
1750 | --- a/drivers/dma/sh/usb-dmac.c |
1751 | +++ b/drivers/dma/sh/usb-dmac.c |
1752 | @@ -117,7 +117,7 @@ struct usb_dmac { |
1753 | #define USB_DMASWR 0x0008 |
1754 | #define USB_DMASWR_SWR (1 << 0) |
1755 | #define USB_DMAOR 0x0060 |
1756 | -#define USB_DMAOR_AE (1 << 2) |
1757 | +#define USB_DMAOR_AE (1 << 1) |
1758 | #define USB_DMAOR_DME (1 << 0) |
1759 | |
1760 | #define USB_DMASAR 0x0000 |
1761 | diff --git a/drivers/gpu/drm/amd/amdgpu/ci_dpm.c b/drivers/gpu/drm/amd/amdgpu/ci_dpm.c |
1762 | index f97ecb49972e..167f029f5fad 100644 |
1763 | --- a/drivers/gpu/drm/amd/amdgpu/ci_dpm.c |
1764 | +++ b/drivers/gpu/drm/amd/amdgpu/ci_dpm.c |
1765 | @@ -906,6 +906,12 @@ static bool ci_dpm_vblank_too_short(struct amdgpu_device *adev) |
1766 | u32 vblank_time = amdgpu_dpm_get_vblank_time(adev); |
1767 | u32 switch_limit = adev->mc.vram_type == AMDGPU_VRAM_TYPE_GDDR5 ? 450 : 300; |
1768 | |
1769 | + /* disable mclk switching if the refresh is >120Hz, even if the |
1770 | + * blanking period would allow it |
1771 | + */ |
1772 | + if (amdgpu_dpm_get_vrefresh(adev) > 120) |
1773 | + return true; |
1774 | + |
1775 | if (vblank_time < switch_limit) |
1776 | return true; |
1777 | else |
1778 | diff --git a/drivers/gpu/drm/drm_drv.c b/drivers/gpu/drm/drm_drv.c |
1779 | index b5c6bb46a425..37b8ad3e30d8 100644 |
1780 | --- a/drivers/gpu/drm/drm_drv.c |
1781 | +++ b/drivers/gpu/drm/drm_drv.c |
1782 | @@ -358,7 +358,12 @@ EXPORT_SYMBOL(drm_put_dev); |
1783 | void drm_unplug_dev(struct drm_device *dev) |
1784 | { |
1785 | /* for a USB device */ |
1786 | - drm_dev_unregister(dev); |
1787 | + if (drm_core_check_feature(dev, DRIVER_MODESET)) |
1788 | + drm_modeset_unregister_all(dev); |
1789 | + |
1790 | + drm_minor_unregister(dev, DRM_MINOR_PRIMARY); |
1791 | + drm_minor_unregister(dev, DRM_MINOR_RENDER); |
1792 | + drm_minor_unregister(dev, DRM_MINOR_CONTROL); |
1793 | |
1794 | mutex_lock(&drm_global_mutex); |
1795 | |
1796 | diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c |
1797 | index 5c089b3c2a7e..66dbb3c4c6d8 100644 |
1798 | --- a/drivers/gpu/drm/i915/i915_drv.c |
1799 | +++ b/drivers/gpu/drm/i915/i915_drv.c |
1800 | @@ -565,9 +565,7 @@ static int i915_load_modeset_init(struct drm_device *dev) |
1801 | if (i915_inject_load_failure()) |
1802 | return -ENODEV; |
1803 | |
1804 | - ret = intel_bios_init(dev_priv); |
1805 | - if (ret) |
1806 | - DRM_INFO("failed to find VBIOS tables\n"); |
1807 | + intel_bios_init(dev_priv); |
1808 | |
1809 | /* If we have > 1 VGA cards, then we need to arbitrate access |
1810 | * to the common VGA resources. |
1811 | diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h |
1812 | index 46fcd8b7080a..959e22dc94ba 100644 |
1813 | --- a/drivers/gpu/drm/i915/i915_drv.h |
1814 | +++ b/drivers/gpu/drm/i915/i915_drv.h |
1815 | @@ -3629,7 +3629,7 @@ static inline bool intel_gmbus_is_forced_bit(struct i2c_adapter *adapter) |
1816 | extern void intel_i2c_reset(struct drm_i915_private *dev_priv); |
1817 | |
1818 | /* intel_bios.c */ |
1819 | -int intel_bios_init(struct drm_i915_private *dev_priv); |
1820 | +void intel_bios_init(struct drm_i915_private *dev_priv); |
1821 | bool intel_bios_is_valid_vbt(const void *buf, size_t size); |
1822 | bool intel_bios_is_tv_present(struct drm_i915_private *dev_priv); |
1823 | bool intel_bios_is_lvds_present(struct drm_i915_private *dev_priv, u8 *i2c_pin); |
1824 | diff --git a/drivers/gpu/drm/i915/intel_bios.c b/drivers/gpu/drm/i915/intel_bios.c |
1825 | index e144f033f4b5..639d45c1dd2e 100644 |
1826 | --- a/drivers/gpu/drm/i915/intel_bios.c |
1827 | +++ b/drivers/gpu/drm/i915/intel_bios.c |
1828 | @@ -1341,6 +1341,7 @@ parse_device_mapping(struct drm_i915_private *dev_priv, |
1829 | return; |
1830 | } |
1831 | |
1832 | +/* Common defaults which may be overridden by VBT. */ |
1833 | static void |
1834 | init_vbt_defaults(struct drm_i915_private *dev_priv) |
1835 | { |
1836 | @@ -1377,6 +1378,18 @@ init_vbt_defaults(struct drm_i915_private *dev_priv) |
1837 | &dev_priv->vbt.ddi_port_info[port]; |
1838 | |
1839 | info->hdmi_level_shift = HDMI_LEVEL_SHIFT_UNKNOWN; |
1840 | + } |
1841 | +} |
1842 | + |
1843 | +/* Defaults to initialize only if there is no VBT. */ |
1844 | +static void |
1845 | +init_vbt_missing_defaults(struct drm_i915_private *dev_priv) |
1846 | +{ |
1847 | + enum port port; |
1848 | + |
1849 | + for (port = PORT_A; port < I915_MAX_PORTS; port++) { |
1850 | + struct ddi_vbt_port_info *info = |
1851 | + &dev_priv->vbt.ddi_port_info[port]; |
1852 | |
1853 | info->supports_dvi = (port != PORT_A && port != PORT_E); |
1854 | info->supports_hdmi = info->supports_dvi; |
1855 | @@ -1462,36 +1475,35 @@ static const struct vbt_header *find_vbt(void __iomem *bios, size_t size) |
1856 | * intel_bios_init - find VBT and initialize settings from the BIOS |
1857 | * @dev_priv: i915 device instance |
1858 | * |
1859 | - * Loads the Video BIOS and checks that the VBT exists. Sets scratch registers |
1860 | - * to appropriate values. |
1861 | - * |
1862 | - * Returns 0 on success, nonzero on failure. |
1863 | + * Parse and initialize settings from the Video BIOS Tables (VBT). If the VBT |
1864 | + * was not found in ACPI OpRegion, try to find it in PCI ROM first. Also |
1865 | + * initialize some defaults if the VBT is not present at all. |
1866 | */ |
1867 | -int |
1868 | -intel_bios_init(struct drm_i915_private *dev_priv) |
1869 | +void intel_bios_init(struct drm_i915_private *dev_priv) |
1870 | { |
1871 | struct pci_dev *pdev = dev_priv->drm.pdev; |
1872 | const struct vbt_header *vbt = dev_priv->opregion.vbt; |
1873 | const struct bdb_header *bdb; |
1874 | u8 __iomem *bios = NULL; |
1875 | |
1876 | - if (HAS_PCH_NOP(dev_priv)) |
1877 | - return -ENODEV; |
1878 | + if (HAS_PCH_NOP(dev_priv)) { |
1879 | + DRM_DEBUG_KMS("Skipping VBT init due to disabled display.\n"); |
1880 | + return; |
1881 | + } |
1882 | |
1883 | init_vbt_defaults(dev_priv); |
1884 | |
1885 | + /* If the OpRegion does not have VBT, look in PCI ROM. */ |
1886 | if (!vbt) { |
1887 | size_t size; |
1888 | |
1889 | bios = pci_map_rom(pdev, &size); |
1890 | if (!bios) |
1891 | - return -1; |
1892 | + goto out; |
1893 | |
1894 | vbt = find_vbt(bios, size); |
1895 | - if (!vbt) { |
1896 | - pci_unmap_rom(pdev, bios); |
1897 | - return -1; |
1898 | - } |
1899 | + if (!vbt) |
1900 | + goto out; |
1901 | |
1902 | DRM_DEBUG_KMS("Found valid VBT in PCI ROM\n"); |
1903 | } |
1904 | @@ -1516,10 +1528,14 @@ intel_bios_init(struct drm_i915_private *dev_priv) |
1905 | parse_mipi_sequence(dev_priv, bdb); |
1906 | parse_ddi_ports(dev_priv, bdb); |
1907 | |
1908 | +out: |
1909 | + if (!vbt) { |
1910 | + DRM_INFO("Failed to find VBIOS tables (VBT)\n"); |
1911 | + init_vbt_missing_defaults(dev_priv); |
1912 | + } |
1913 | + |
1914 | if (bios) |
1915 | pci_unmap_rom(pdev, bios); |
1916 | - |
1917 | - return 0; |
1918 | } |
1919 | |
1920 | /** |
1921 | diff --git a/drivers/gpu/drm/i915/intel_lpe_audio.c b/drivers/gpu/drm/i915/intel_lpe_audio.c |
1922 | index 7a5b41b1c024..999cb31ba63d 100644 |
1923 | --- a/drivers/gpu/drm/i915/intel_lpe_audio.c |
1924 | +++ b/drivers/gpu/drm/i915/intel_lpe_audio.c |
1925 | @@ -63,6 +63,7 @@ |
1926 | #include <linux/acpi.h> |
1927 | #include <linux/device.h> |
1928 | #include <linux/pci.h> |
1929 | +#include <linux/pm_runtime.h> |
1930 | |
1931 | #include "i915_drv.h" |
1932 | #include <linux/delay.h> |
1933 | @@ -121,6 +122,10 @@ lpe_audio_platdev_create(struct drm_i915_private *dev_priv) |
1934 | |
1935 | kfree(rsc); |
1936 | |
1937 | + pm_runtime_forbid(&platdev->dev); |
1938 | + pm_runtime_set_active(&platdev->dev); |
1939 | + pm_runtime_enable(&platdev->dev); |
1940 | + |
1941 | return platdev; |
1942 | |
1943 | err: |
1944 | diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_plane.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_plane.c |
1945 | index 0ffb8affef35..4a81d67b0d69 100644 |
1946 | --- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_plane.c |
1947 | +++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_plane.c |
1948 | @@ -220,9 +220,10 @@ mdp5_plane_duplicate_state(struct drm_plane *plane) |
1949 | |
1950 | mdp5_state = kmemdup(to_mdp5_plane_state(plane->state), |
1951 | sizeof(*mdp5_state), GFP_KERNEL); |
1952 | + if (!mdp5_state) |
1953 | + return NULL; |
1954 | |
1955 | - if (mdp5_state && mdp5_state->base.fb) |
1956 | - drm_framebuffer_reference(mdp5_state->base.fb); |
1957 | + __drm_atomic_helper_plane_duplicate_state(plane, &mdp5_state->base); |
1958 | |
1959 | return &mdp5_state->base; |
1960 | } |
1961 | diff --git a/drivers/gpu/drm/msm/msm_drv.c b/drivers/gpu/drm/msm/msm_drv.c |
1962 | index 70226eaa5cac..eba4c3e8e156 100644 |
1963 | --- a/drivers/gpu/drm/msm/msm_drv.c |
1964 | +++ b/drivers/gpu/drm/msm/msm_drv.c |
1965 | @@ -828,6 +828,7 @@ static struct drm_driver msm_driver = { |
1966 | .prime_fd_to_handle = drm_gem_prime_fd_to_handle, |
1967 | .gem_prime_export = drm_gem_prime_export, |
1968 | .gem_prime_import = drm_gem_prime_import, |
1969 | + .gem_prime_res_obj = msm_gem_prime_res_obj, |
1970 | .gem_prime_pin = msm_gem_prime_pin, |
1971 | .gem_prime_unpin = msm_gem_prime_unpin, |
1972 | .gem_prime_get_sg_table = msm_gem_prime_get_sg_table, |
1973 | diff --git a/drivers/gpu/drm/msm/msm_drv.h b/drivers/gpu/drm/msm/msm_drv.h |
1974 | index c3b14876edaa..0e56a8bb7b59 100644 |
1975 | --- a/drivers/gpu/drm/msm/msm_drv.h |
1976 | +++ b/drivers/gpu/drm/msm/msm_drv.h |
1977 | @@ -223,6 +223,7 @@ struct sg_table *msm_gem_prime_get_sg_table(struct drm_gem_object *obj); |
1978 | void *msm_gem_prime_vmap(struct drm_gem_object *obj); |
1979 | void msm_gem_prime_vunmap(struct drm_gem_object *obj, void *vaddr); |
1980 | int msm_gem_prime_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma); |
1981 | +struct reservation_object *msm_gem_prime_res_obj(struct drm_gem_object *obj); |
1982 | struct drm_gem_object *msm_gem_prime_import_sg_table(struct drm_device *dev, |
1983 | struct dma_buf_attachment *attach, struct sg_table *sg); |
1984 | int msm_gem_prime_pin(struct drm_gem_object *obj); |
1985 | diff --git a/drivers/gpu/drm/msm/msm_gem_prime.c b/drivers/gpu/drm/msm/msm_gem_prime.c |
1986 | index 60bb290700ce..13403c6da6c7 100644 |
1987 | --- a/drivers/gpu/drm/msm/msm_gem_prime.c |
1988 | +++ b/drivers/gpu/drm/msm/msm_gem_prime.c |
1989 | @@ -70,3 +70,10 @@ void msm_gem_prime_unpin(struct drm_gem_object *obj) |
1990 | if (!obj->import_attach) |
1991 | msm_gem_put_pages(obj); |
1992 | } |
1993 | + |
1994 | +struct reservation_object *msm_gem_prime_res_obj(struct drm_gem_object *obj) |
1995 | +{ |
1996 | + struct msm_gem_object *msm_obj = to_msm_bo(obj); |
1997 | + |
1998 | + return msm_obj->resv; |
1999 | +} |
2000 | diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/timer.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/timer.h |
2001 | index 6a567fe347b3..820a4805916f 100644 |
2002 | --- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/timer.h |
2003 | +++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/timer.h |
2004 | @@ -4,6 +4,7 @@ |
2005 | |
2006 | struct nvkm_alarm { |
2007 | struct list_head head; |
2008 | + struct list_head exec; |
2009 | u64 timestamp; |
2010 | void (*func)(struct nvkm_alarm *); |
2011 | }; |
2012 | diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/timer/base.c b/drivers/gpu/drm/nouveau/nvkm/subdev/timer/base.c |
2013 | index f2a86eae0a0d..2437f7d41ca2 100644 |
2014 | --- a/drivers/gpu/drm/nouveau/nvkm/subdev/timer/base.c |
2015 | +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/timer/base.c |
2016 | @@ -50,7 +50,8 @@ nvkm_timer_alarm_trigger(struct nvkm_timer *tmr) |
2017 | /* Move to completed list. We'll drop the lock before |
2018 | * executing the callback so it can reschedule itself. |
2019 | */ |
2020 | - list_move_tail(&alarm->head, &exec); |
2021 | + list_del_init(&alarm->head); |
2022 | + list_add(&alarm->exec, &exec); |
2023 | } |
2024 | |
2025 | /* Shut down interrupt if no more pending alarms. */ |
2026 | @@ -59,8 +60,8 @@ nvkm_timer_alarm_trigger(struct nvkm_timer *tmr) |
2027 | spin_unlock_irqrestore(&tmr->lock, flags); |
2028 | |
2029 | /* Execute completed callbacks. */ |
2030 | - list_for_each_entry_safe(alarm, atemp, &exec, head) { |
2031 | - list_del_init(&alarm->head); |
2032 | + list_for_each_entry_safe(alarm, atemp, &exec, exec) { |
2033 | + list_del(&alarm->exec); |
2034 | alarm->func(alarm); |
2035 | } |
2036 | } |
2037 | diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c b/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c |
2038 | index b6a0806b06bf..a1c68e6a689e 100644 |
2039 | --- a/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c |
2040 | +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c |
2041 | @@ -368,6 +368,8 @@ static void *vmw_local_fifo_reserve(struct vmw_private *dev_priv, |
2042 | return fifo_state->static_buffer; |
2043 | else { |
2044 | fifo_state->dynamic_buffer = vmalloc(bytes); |
2045 | + if (!fifo_state->dynamic_buffer) |
2046 | + goto out_err; |
2047 | return fifo_state->dynamic_buffer; |
2048 | } |
2049 | } |
2050 | diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c b/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c |
2051 | index 05fa092c942b..56b803384ea2 100644 |
2052 | --- a/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c |
2053 | +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c |
2054 | @@ -1275,11 +1275,14 @@ int vmw_gb_surface_define_ioctl(struct drm_device *dev, void *data, |
2055 | struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile; |
2056 | int ret; |
2057 | uint32_t size; |
2058 | - uint32_t backup_handle; |
2059 | + uint32_t backup_handle = 0; |
2060 | |
2061 | if (req->multisample_count != 0) |
2062 | return -EINVAL; |
2063 | |
2064 | + if (req->mip_levels > DRM_VMW_MAX_MIP_LEVELS) |
2065 | + return -EINVAL; |
2066 | + |
2067 | if (unlikely(vmw_user_surface_size == 0)) |
2068 | vmw_user_surface_size = ttm_round_pot(sizeof(*user_srf)) + |
2069 | 128; |
2070 | @@ -1315,12 +1318,16 @@ int vmw_gb_surface_define_ioctl(struct drm_device *dev, void *data, |
2071 | ret = vmw_user_dmabuf_lookup(tfile, req->buffer_handle, |
2072 | &res->backup, |
2073 | &user_srf->backup_base); |
2074 | - if (ret == 0 && res->backup->base.num_pages * PAGE_SIZE < |
2075 | - res->backup_size) { |
2076 | - DRM_ERROR("Surface backup buffer is too small.\n"); |
2077 | - vmw_dmabuf_unreference(&res->backup); |
2078 | - ret = -EINVAL; |
2079 | - goto out_unlock; |
2080 | + if (ret == 0) { |
2081 | + if (res->backup->base.num_pages * PAGE_SIZE < |
2082 | + res->backup_size) { |
2083 | + DRM_ERROR("Surface backup buffer is too small.\n"); |
2084 | + vmw_dmabuf_unreference(&res->backup); |
2085 | + ret = -EINVAL; |
2086 | + goto out_unlock; |
2087 | + } else { |
2088 | + backup_handle = req->buffer_handle; |
2089 | + } |
2090 | } |
2091 | } else if (req->drm_surface_flags & drm_vmw_surface_flag_create_buffer) |
2092 | ret = vmw_user_dmabuf_alloc(dev_priv, tfile, |
2093 | diff --git a/drivers/hwmon/coretemp.c b/drivers/hwmon/coretemp.c |
2094 | index 3ac4c03ba77b..c13a4fd86b3c 100644 |
2095 | --- a/drivers/hwmon/coretemp.c |
2096 | +++ b/drivers/hwmon/coretemp.c |
2097 | @@ -605,6 +605,13 @@ static int coretemp_cpu_online(unsigned int cpu) |
2098 | struct platform_data *pdata; |
2099 | |
2100 | /* |
2101 | + * Don't execute this on resume as the offline callback did |
2102 | + * not get executed on suspend. |
2103 | + */ |
2104 | + if (cpuhp_tasks_frozen) |
2105 | + return 0; |
2106 | + |
2107 | + /* |
2108 | * CPUID.06H.EAX[0] indicates whether the CPU has thermal |
2109 | * sensors. We check this bit only, all the early CPUs |
2110 | * without thermal sensors will be filtered out. |
2111 | @@ -654,6 +661,13 @@ static int coretemp_cpu_offline(unsigned int cpu) |
2112 | struct temp_data *tdata; |
2113 | int indx, target; |
2114 | |
2115 | + /* |
2116 | + * Don't execute this on suspend as the device remove locks |
2117 | + * up the machine. |
2118 | + */ |
2119 | + if (cpuhp_tasks_frozen) |
2120 | + return 0; |
2121 | + |
2122 | /* If the physical CPU device does not exist, just return */ |
2123 | if (!pdev) |
2124 | return 0; |
2125 | diff --git a/drivers/iio/adc/bcm_iproc_adc.c b/drivers/iio/adc/bcm_iproc_adc.c |
2126 | index 21d38c8af21e..7f4f9c4150e3 100644 |
2127 | --- a/drivers/iio/adc/bcm_iproc_adc.c |
2128 | +++ b/drivers/iio/adc/bcm_iproc_adc.c |
2129 | @@ -143,7 +143,7 @@ static void iproc_adc_reg_dump(struct iio_dev *indio_dev) |
2130 | iproc_adc_dbg_reg(dev, adc_priv, IPROC_SOFT_BYPASS_DATA); |
2131 | } |
2132 | |
2133 | -static irqreturn_t iproc_adc_interrupt_handler(int irq, void *data) |
2134 | +static irqreturn_t iproc_adc_interrupt_thread(int irq, void *data) |
2135 | { |
2136 | u32 channel_intr_status; |
2137 | u32 intr_status; |
2138 | @@ -167,7 +167,7 @@ static irqreturn_t iproc_adc_interrupt_handler(int irq, void *data) |
2139 | return IRQ_NONE; |
2140 | } |
2141 | |
2142 | -static irqreturn_t iproc_adc_interrupt_thread(int irq, void *data) |
2143 | +static irqreturn_t iproc_adc_interrupt_handler(int irq, void *data) |
2144 | { |
2145 | irqreturn_t retval = IRQ_NONE; |
2146 | struct iproc_adc_priv *adc_priv; |
2147 | @@ -181,7 +181,7 @@ static irqreturn_t iproc_adc_interrupt_thread(int irq, void *data) |
2148 | adc_priv = iio_priv(indio_dev); |
2149 | |
2150 | regmap_read(adc_priv->regmap, IPROC_INTERRUPT_STATUS, &intr_status); |
2151 | - dev_dbg(&indio_dev->dev, "iproc_adc_interrupt_thread(),INTRPT_STS:%x\n", |
2152 | + dev_dbg(&indio_dev->dev, "iproc_adc_interrupt_handler(),INTRPT_STS:%x\n", |
2153 | intr_status); |
2154 | |
2155 | intr_channels = (intr_status & IPROC_ADC_INTR_MASK) >> IPROC_ADC_INTR; |
2156 | @@ -566,8 +566,8 @@ static int iproc_adc_probe(struct platform_device *pdev) |
2157 | } |
2158 | |
2159 | ret = devm_request_threaded_irq(&pdev->dev, adc_priv->irqno, |
2160 | - iproc_adc_interrupt_thread, |
2161 | iproc_adc_interrupt_handler, |
2162 | + iproc_adc_interrupt_thread, |
2163 | IRQF_SHARED, "iproc-adc", indio_dev); |
2164 | if (ret) { |
2165 | dev_err(&pdev->dev, "request_irq error %d\n", ret); |
2166 | diff --git a/drivers/iio/industrialio-trigger.c b/drivers/iio/industrialio-trigger.c |
2167 | index 978e1592c2a3..4061fed93f1f 100644 |
2168 | --- a/drivers/iio/industrialio-trigger.c |
2169 | +++ b/drivers/iio/industrialio-trigger.c |
2170 | @@ -451,7 +451,8 @@ static ssize_t iio_trigger_write_current(struct device *dev, |
2171 | return len; |
2172 | |
2173 | out_trigger_put: |
2174 | - iio_trigger_put(trig); |
2175 | + if (trig) |
2176 | + iio_trigger_put(trig); |
2177 | return ret; |
2178 | } |
2179 | |
2180 | diff --git a/drivers/iio/light/ltr501.c b/drivers/iio/light/ltr501.c |
2181 | index b30e0c1c6cc4..67838edd8b37 100644 |
2182 | --- a/drivers/iio/light/ltr501.c |
2183 | +++ b/drivers/iio/light/ltr501.c |
2184 | @@ -74,9 +74,9 @@ static const int int_time_mapping[] = {100000, 50000, 200000, 400000}; |
2185 | static const struct reg_field reg_field_it = |
2186 | REG_FIELD(LTR501_ALS_MEAS_RATE, 3, 4); |
2187 | static const struct reg_field reg_field_als_intr = |
2188 | - REG_FIELD(LTR501_INTR, 0, 0); |
2189 | -static const struct reg_field reg_field_ps_intr = |
2190 | REG_FIELD(LTR501_INTR, 1, 1); |
2191 | +static const struct reg_field reg_field_ps_intr = |
2192 | + REG_FIELD(LTR501_INTR, 0, 0); |
2193 | static const struct reg_field reg_field_als_rate = |
2194 | REG_FIELD(LTR501_ALS_MEAS_RATE, 0, 2); |
2195 | static const struct reg_field reg_field_ps_rate = |
2196 | diff --git a/drivers/iio/proximity/as3935.c b/drivers/iio/proximity/as3935.c |
2197 | index 020459513384..268210ea4990 100644 |
2198 | --- a/drivers/iio/proximity/as3935.c |
2199 | +++ b/drivers/iio/proximity/as3935.c |
2200 | @@ -40,9 +40,9 @@ |
2201 | #define AS3935_AFE_PWR_BIT BIT(0) |
2202 | |
2203 | #define AS3935_INT 0x03 |
2204 | -#define AS3935_INT_MASK 0x07 |
2205 | +#define AS3935_INT_MASK 0x0f |
2206 | #define AS3935_EVENT_INT BIT(3) |
2207 | -#define AS3935_NOISE_INT BIT(1) |
2208 | +#define AS3935_NOISE_INT BIT(0) |
2209 | |
2210 | #define AS3935_DATA 0x07 |
2211 | #define AS3935_DATA_MASK 0x3F |
2212 | @@ -215,7 +215,7 @@ static irqreturn_t as3935_trigger_handler(int irq, void *private) |
2213 | |
2214 | st->buffer[0] = val & AS3935_DATA_MASK; |
2215 | iio_push_to_buffers_with_timestamp(indio_dev, &st->buffer, |
2216 | - pf->timestamp); |
2217 | + iio_get_time_ns(indio_dev)); |
2218 | err_read: |
2219 | iio_trigger_notify_done(indio_dev->trig); |
2220 | |
2221 | @@ -244,7 +244,7 @@ static void as3935_event_work(struct work_struct *work) |
2222 | |
2223 | switch (val) { |
2224 | case AS3935_EVENT_INT: |
2225 | - iio_trigger_poll(st->trig); |
2226 | + iio_trigger_poll_chained(st->trig); |
2227 | break; |
2228 | case AS3935_NOISE_INT: |
2229 | dev_warn(&st->spi->dev, "noise level is too high\n"); |
2230 | diff --git a/drivers/input/mouse/elantech.c b/drivers/input/mouse/elantech.c |
2231 | index e73d968023f7..f1fa1f172107 100644 |
2232 | --- a/drivers/input/mouse/elantech.c |
2233 | +++ b/drivers/input/mouse/elantech.c |
2234 | @@ -1118,8 +1118,10 @@ static int elantech_get_resolution_v4(struct psmouse *psmouse, |
2235 | * Asus UX32VD 0x361f02 00, 15, 0e clickpad |
2236 | * Avatar AVIU-145A2 0x361f00 ? clickpad |
2237 | * Fujitsu LIFEBOOK E544 0x470f00 d0, 12, 09 2 hw buttons |
2238 | + * Fujitsu LIFEBOOK E546 0x470f00 50, 12, 09 2 hw buttons |
2239 | * Fujitsu LIFEBOOK E547 0x470f00 50, 12, 09 2 hw buttons |
2240 | * Fujitsu LIFEBOOK E554 0x570f01 40, 14, 0c 2 hw buttons |
2241 | + * Fujitsu LIFEBOOK E557 0x570f01 40, 14, 0c 2 hw buttons |
2242 | * Fujitsu T725 0x470f01 05, 12, 09 2 hw buttons |
2243 | * Fujitsu H730 0x570f00 c0, 14, 0c 3 hw buttons (**) |
2244 | * Gigabyte U2442 0x450f01 58, 17, 0c 2 hw buttons |
2245 | @@ -1525,6 +1527,13 @@ static const struct dmi_system_id elantech_dmi_force_crc_enabled[] = { |
2246 | }, |
2247 | }, |
2248 | { |
2249 | + /* Fujitsu LIFEBOOK E546 does not work with crc_enabled == 0 */ |
2250 | + .matches = { |
2251 | + DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU"), |
2252 | + DMI_MATCH(DMI_PRODUCT_NAME, "LIFEBOOK E546"), |
2253 | + }, |
2254 | + }, |
2255 | + { |
2256 | /* Fujitsu LIFEBOOK E547 does not work with crc_enabled == 0 */ |
2257 | .matches = { |
2258 | DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU"), |
2259 | @@ -1546,6 +1555,13 @@ static const struct dmi_system_id elantech_dmi_force_crc_enabled[] = { |
2260 | }, |
2261 | }, |
2262 | { |
2263 | + /* Fujitsu LIFEBOOK E557 does not work with crc_enabled == 0 */ |
2264 | + .matches = { |
2265 | + DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU"), |
2266 | + DMI_MATCH(DMI_PRODUCT_NAME, "LIFEBOOK E557"), |
2267 | + }, |
2268 | + }, |
2269 | + { |
2270 | /* Fujitsu LIFEBOOK U745 does not work with crc_enabled == 0 */ |
2271 | .matches = { |
2272 | DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU"), |
2273 | diff --git a/drivers/media/rc/rc-ir-raw.c b/drivers/media/rc/rc-ir-raw.c |
2274 | index 7fa84b64a2ae..0bdc161c76cd 100644 |
2275 | --- a/drivers/media/rc/rc-ir-raw.c |
2276 | +++ b/drivers/media/rc/rc-ir-raw.c |
2277 | @@ -211,7 +211,7 @@ EXPORT_SYMBOL_GPL(ir_raw_event_set_idle); |
2278 | */ |
2279 | void ir_raw_event_handle(struct rc_dev *dev) |
2280 | { |
2281 | - if (!dev->raw) |
2282 | + if (!dev->raw || !dev->raw->thread) |
2283 | return; |
2284 | |
2285 | wake_up_process(dev->raw->thread); |
2286 | @@ -490,6 +490,7 @@ int ir_raw_event_register(struct rc_dev *dev) |
2287 | { |
2288 | int rc; |
2289 | struct ir_raw_handler *handler; |
2290 | + struct task_struct *thread; |
2291 | |
2292 | if (!dev) |
2293 | return -EINVAL; |
2294 | @@ -507,13 +508,15 @@ int ir_raw_event_register(struct rc_dev *dev) |
2295 | * because the event is coming from userspace |
2296 | */ |
2297 | if (dev->driver_type != RC_DRIVER_IR_RAW_TX) { |
2298 | - dev->raw->thread = kthread_run(ir_raw_event_thread, dev->raw, |
2299 | - "rc%u", dev->minor); |
2300 | + thread = kthread_run(ir_raw_event_thread, dev->raw, "rc%u", |
2301 | + dev->minor); |
2302 | |
2303 | - if (IS_ERR(dev->raw->thread)) { |
2304 | - rc = PTR_ERR(dev->raw->thread); |
2305 | + if (IS_ERR(thread)) { |
2306 | + rc = PTR_ERR(thread); |
2307 | goto out; |
2308 | } |
2309 | + |
2310 | + dev->raw->thread = thread; |
2311 | } |
2312 | |
2313 | mutex_lock(&ir_raw_handler_lock); |
2314 | diff --git a/drivers/misc/cxl/file.c b/drivers/misc/cxl/file.c |
2315 | index e7139c76f961..072064220707 100644 |
2316 | --- a/drivers/misc/cxl/file.c |
2317 | +++ b/drivers/misc/cxl/file.c |
2318 | @@ -158,11 +158,8 @@ static long afu_ioctl_start_work(struct cxl_context *ctx, |
2319 | |
2320 | /* Do this outside the status_mutex to avoid a circular dependency with |
2321 | * the locking in cxl_mmap_fault() */ |
2322 | - if (copy_from_user(&work, uwork, |
2323 | - sizeof(struct cxl_ioctl_start_work))) { |
2324 | - rc = -EFAULT; |
2325 | - goto out; |
2326 | - } |
2327 | + if (copy_from_user(&work, uwork, sizeof(work))) |
2328 | + return -EFAULT; |
2329 | |
2330 | mutex_lock(&ctx->status_mutex); |
2331 | if (ctx->status != OPENED) { |
2332 | diff --git a/drivers/misc/cxl/native.c b/drivers/misc/cxl/native.c |
2333 | index 7ae710585267..47b777234c54 100644 |
2334 | --- a/drivers/misc/cxl/native.c |
2335 | +++ b/drivers/misc/cxl/native.c |
2336 | @@ -1075,13 +1075,16 @@ int cxl_native_register_psl_err_irq(struct cxl *adapter) |
2337 | |
2338 | void cxl_native_release_psl_err_irq(struct cxl *adapter) |
2339 | { |
2340 | - if (adapter->native->err_virq != irq_find_mapping(NULL, adapter->native->err_hwirq)) |
2341 | + if (adapter->native->err_virq == 0 || |
2342 | + adapter->native->err_virq != |
2343 | + irq_find_mapping(NULL, adapter->native->err_hwirq)) |
2344 | return; |
2345 | |
2346 | cxl_p1_write(adapter, CXL_PSL_ErrIVTE, 0x0000000000000000); |
2347 | cxl_unmap_irq(adapter->native->err_virq, adapter); |
2348 | cxl_ops->release_one_irq(adapter, adapter->native->err_hwirq); |
2349 | kfree(adapter->irq_name); |
2350 | + adapter->native->err_virq = 0; |
2351 | } |
2352 | |
2353 | int cxl_native_register_serr_irq(struct cxl_afu *afu) |
2354 | @@ -1111,13 +1114,15 @@ int cxl_native_register_serr_irq(struct cxl_afu *afu) |
2355 | |
2356 | void cxl_native_release_serr_irq(struct cxl_afu *afu) |
2357 | { |
2358 | - if (afu->serr_virq != irq_find_mapping(NULL, afu->serr_hwirq)) |
2359 | + if (afu->serr_virq == 0 || |
2360 | + afu->serr_virq != irq_find_mapping(NULL, afu->serr_hwirq)) |
2361 | return; |
2362 | |
2363 | cxl_p1n_write(afu, CXL_PSL_SERR_An, 0x0000000000000000); |
2364 | cxl_unmap_irq(afu->serr_virq, afu); |
2365 | cxl_ops->release_one_irq(afu->adapter, afu->serr_hwirq); |
2366 | kfree(afu->err_irq_name); |
2367 | + afu->serr_virq = 0; |
2368 | } |
2369 | |
2370 | int cxl_native_register_psl_irq(struct cxl_afu *afu) |
2371 | @@ -1140,12 +1145,15 @@ int cxl_native_register_psl_irq(struct cxl_afu *afu) |
2372 | |
2373 | void cxl_native_release_psl_irq(struct cxl_afu *afu) |
2374 | { |
2375 | - if (afu->native->psl_virq != irq_find_mapping(NULL, afu->native->psl_hwirq)) |
2376 | + if (afu->native->psl_virq == 0 || |
2377 | + afu->native->psl_virq != |
2378 | + irq_find_mapping(NULL, afu->native->psl_hwirq)) |
2379 | return; |
2380 | |
2381 | cxl_unmap_irq(afu->native->psl_virq, afu); |
2382 | cxl_ops->release_one_irq(afu->adapter, afu->native->psl_hwirq); |
2383 | kfree(afu->psl_irq_name); |
2384 | + afu->native->psl_virq = 0; |
2385 | } |
2386 | |
2387 | static void recover_psl_err(struct cxl_afu *afu, u64 errstat) |
2388 | diff --git a/drivers/misc/mei/bus.c b/drivers/misc/mei/bus.c |
2389 | index df5f78ae3d25..8b96c074799c 100644 |
2390 | --- a/drivers/misc/mei/bus.c |
2391 | +++ b/drivers/misc/mei/bus.c |
2392 | @@ -763,8 +763,10 @@ static ssize_t modalias_show(struct device *dev, struct device_attribute *a, |
2393 | { |
2394 | struct mei_cl_device *cldev = to_mei_cl_device(dev); |
2395 | const uuid_le *uuid = mei_me_cl_uuid(cldev->me_cl); |
2396 | + u8 version = mei_me_cl_ver(cldev->me_cl); |
2397 | |
2398 | - return scnprintf(buf, PAGE_SIZE, "mei:%s:%pUl:", cldev->name, uuid); |
2399 | + return scnprintf(buf, PAGE_SIZE, "mei:%s:%pUl:%02X:", |
2400 | + cldev->name, uuid, version); |
2401 | } |
2402 | static DEVICE_ATTR_RO(modalias); |
2403 | |
2404 | diff --git a/drivers/mtd/nand/tango_nand.c b/drivers/mtd/nand/tango_nand.c |
2405 | index 4a5e948c62df..25b7b930e02a 100644 |
2406 | --- a/drivers/mtd/nand/tango_nand.c |
2407 | +++ b/drivers/mtd/nand/tango_nand.c |
2408 | @@ -55,10 +55,10 @@ |
2409 | * byte 1 for other packets in the page (PKT_N, for N > 0) |
2410 | * ERR_COUNT_PKT_N is the max error count over all but the first packet. |
2411 | */ |
2412 | -#define DECODE_OK_PKT_0(v) ((v) & BIT(7)) |
2413 | -#define DECODE_OK_PKT_N(v) ((v) & BIT(15)) |
2414 | #define ERR_COUNT_PKT_0(v) (((v) >> 0) & 0x3f) |
2415 | #define ERR_COUNT_PKT_N(v) (((v) >> 8) & 0x3f) |
2416 | +#define DECODE_FAIL_PKT_0(v) (((v) & BIT(7)) == 0) |
2417 | +#define DECODE_FAIL_PKT_N(v) (((v) & BIT(15)) == 0) |
2418 | |
2419 | /* Offsets relative to pbus_base */ |
2420 | #define PBUS_CS_CTRL 0x83c |
2421 | @@ -193,6 +193,8 @@ static int check_erased_page(struct nand_chip *chip, u8 *buf) |
2422 | chip->ecc.strength); |
2423 | if (res < 0) |
2424 | mtd->ecc_stats.failed++; |
2425 | + else |
2426 | + mtd->ecc_stats.corrected += res; |
2427 | |
2428 | bitflips = max(res, bitflips); |
2429 | buf += pkt_size; |
2430 | @@ -202,9 +204,11 @@ static int check_erased_page(struct nand_chip *chip, u8 *buf) |
2431 | return bitflips; |
2432 | } |
2433 | |
2434 | -static int decode_error_report(struct tango_nfc *nfc) |
2435 | +static int decode_error_report(struct nand_chip *chip) |
2436 | { |
2437 | u32 status, res; |
2438 | + struct mtd_info *mtd = nand_to_mtd(chip); |
2439 | + struct tango_nfc *nfc = to_tango_nfc(chip->controller); |
2440 | |
2441 | status = readl_relaxed(nfc->reg_base + NFC_XFER_STATUS); |
2442 | if (status & PAGE_IS_EMPTY) |
2443 | @@ -212,10 +216,14 @@ static int decode_error_report(struct tango_nfc *nfc) |
2444 | |
2445 | res = readl_relaxed(nfc->mem_base + ERROR_REPORT); |
2446 | |
2447 | - if (DECODE_OK_PKT_0(res) && DECODE_OK_PKT_N(res)) |
2448 | - return max(ERR_COUNT_PKT_0(res), ERR_COUNT_PKT_N(res)); |
2449 | + if (DECODE_FAIL_PKT_0(res) || DECODE_FAIL_PKT_N(res)) |
2450 | + return -EBADMSG; |
2451 | + |
2452 | + /* ERR_COUNT_PKT_N is max, not sum, but that's all we have */ |
2453 | + mtd->ecc_stats.corrected += |
2454 | + ERR_COUNT_PKT_0(res) + ERR_COUNT_PKT_N(res); |
2455 | |
2456 | - return -EBADMSG; |
2457 | + return max(ERR_COUNT_PKT_0(res), ERR_COUNT_PKT_N(res)); |
2458 | } |
2459 | |
2460 | static void tango_dma_callback(void *arg) |
2461 | @@ -280,7 +288,7 @@ static int tango_read_page(struct mtd_info *mtd, struct nand_chip *chip, |
2462 | if (err) |
2463 | return err; |
2464 | |
2465 | - res = decode_error_report(nfc); |
2466 | + res = decode_error_report(chip); |
2467 | if (res < 0) { |
2468 | chip->ecc.read_oob_raw(mtd, chip, page); |
2469 | res = check_erased_page(chip, buf); |
2470 | @@ -661,6 +669,7 @@ static const struct of_device_id tango_nand_ids[] = { |
2471 | { .compatible = "sigma,smp8758-nand" }, |
2472 | { /* sentinel */ } |
2473 | }; |
2474 | +MODULE_DEVICE_TABLE(of, tango_nand_ids); |
2475 | |
2476 | static struct platform_driver tango_nand_driver = { |
2477 | .probe = tango_nand_probe, |
2478 | diff --git a/drivers/net/ethernet/broadcom/bcmsysport.c b/drivers/net/ethernet/broadcom/bcmsysport.c |
2479 | index a68d4889f5db..a96916a63fa3 100644 |
2480 | --- a/drivers/net/ethernet/broadcom/bcmsysport.c |
2481 | +++ b/drivers/net/ethernet/broadcom/bcmsysport.c |
2482 | @@ -1968,9 +1968,12 @@ static int bcm_sysport_probe(struct platform_device *pdev) |
2483 | priv->num_rx_desc_words = params->num_rx_desc_words; |
2484 | |
2485 | priv->irq0 = platform_get_irq(pdev, 0); |
2486 | - if (!priv->is_lite) |
2487 | + if (!priv->is_lite) { |
2488 | priv->irq1 = platform_get_irq(pdev, 1); |
2489 | - priv->wol_irq = platform_get_irq(pdev, 2); |
2490 | + priv->wol_irq = platform_get_irq(pdev, 2); |
2491 | + } else { |
2492 | + priv->wol_irq = platform_get_irq(pdev, 1); |
2493 | + } |
2494 | if (priv->irq0 <= 0 || (priv->irq1 <= 0 && !priv->is_lite)) { |
2495 | dev_err(&pdev->dev, "invalid interrupts\n"); |
2496 | ret = -EINVAL; |
2497 | diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c |
2498 | index 9e8c06130c09..c2f9a1f93c70 100644 |
2499 | --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c |
2500 | +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c |
2501 | @@ -1926,7 +1926,7 @@ u16 bnx2x_select_queue(struct net_device *dev, struct sk_buff *skb, |
2502 | } |
2503 | |
2504 | /* select a non-FCoE queue */ |
2505 | - return fallback(dev, skb) % BNX2X_NUM_ETH_QUEUES(bp); |
2506 | + return fallback(dev, skb) % (BNX2X_NUM_ETH_QUEUES(bp) * bp->max_cos); |
2507 | } |
2508 | |
2509 | void bnx2x_set_num_queues(struct bnx2x *bp) |
2510 | diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c |
2511 | index afb0967d2ce6..012194bc92d3 100644 |
2512 | --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c |
2513 | +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c |
2514 | @@ -2217,10 +2217,14 @@ static int cxgb_up(struct adapter *adap) |
2515 | if (err) |
2516 | goto irq_err; |
2517 | } |
2518 | + |
2519 | + mutex_lock(&uld_mutex); |
2520 | enable_rx(adap); |
2521 | t4_sge_start(adap); |
2522 | t4_intr_enable(adap); |
2523 | adap->flags |= FULL_INIT_DONE; |
2524 | + mutex_unlock(&uld_mutex); |
2525 | + |
2526 | notify_ulds(adap, CXGB4_STATE_UP); |
2527 | #if IS_ENABLED(CONFIG_IPV6) |
2528 | update_clip(adap); |
2529 | diff --git a/drivers/net/ethernet/ethoc.c b/drivers/net/ethernet/ethoc.c |
2530 | index 23d82748f52b..4f33660134b8 100644 |
2531 | --- a/drivers/net/ethernet/ethoc.c |
2532 | +++ b/drivers/net/ethernet/ethoc.c |
2533 | @@ -739,6 +739,8 @@ static int ethoc_open(struct net_device *dev) |
2534 | if (ret) |
2535 | return ret; |
2536 | |
2537 | + napi_enable(&priv->napi); |
2538 | + |
2539 | ethoc_init_ring(priv, dev->mem_start); |
2540 | ethoc_reset(priv); |
2541 | |
2542 | @@ -754,7 +756,6 @@ static int ethoc_open(struct net_device *dev) |
2543 | priv->old_duplex = -1; |
2544 | |
2545 | phy_start(dev->phydev); |
2546 | - napi_enable(&priv->napi); |
2547 | |
2548 | if (netif_msg_ifup(priv)) { |
2549 | dev_info(&dev->dev, "I/O: %08lx Memory: %08lx-%08lx\n", |
2550 | diff --git a/drivers/net/ethernet/qualcomm/emac/emac-mac.c b/drivers/net/ethernet/qualcomm/emac/emac-mac.c |
2551 | index cc065ffbe4b5..bcd4708b3745 100644 |
2552 | --- a/drivers/net/ethernet/qualcomm/emac/emac-mac.c |
2553 | +++ b/drivers/net/ethernet/qualcomm/emac/emac-mac.c |
2554 | @@ -931,7 +931,7 @@ int emac_mac_up(struct emac_adapter *adpt) |
2555 | emac_mac_config(adpt); |
2556 | emac_mac_rx_descs_refill(adpt, &adpt->rx_q); |
2557 | |
2558 | - adpt->phydev->irq = PHY_IGNORE_INTERRUPT; |
2559 | + adpt->phydev->irq = PHY_POLL; |
2560 | ret = phy_connect_direct(netdev, adpt->phydev, emac_adjust_link, |
2561 | PHY_INTERFACE_MODE_SGMII); |
2562 | if (ret) { |
2563 | diff --git a/drivers/net/ethernet/qualcomm/emac/emac-phy.c b/drivers/net/ethernet/qualcomm/emac/emac-phy.c |
2564 | index 441c19366489..18461fcb9815 100644 |
2565 | --- a/drivers/net/ethernet/qualcomm/emac/emac-phy.c |
2566 | +++ b/drivers/net/ethernet/qualcomm/emac/emac-phy.c |
2567 | @@ -13,15 +13,11 @@ |
2568 | /* Qualcomm Technologies, Inc. EMAC PHY Controller driver. |
2569 | */ |
2570 | |
2571 | -#include <linux/module.h> |
2572 | -#include <linux/of.h> |
2573 | -#include <linux/of_net.h> |
2574 | #include <linux/of_mdio.h> |
2575 | #include <linux/phy.h> |
2576 | #include <linux/iopoll.h> |
2577 | #include <linux/acpi.h> |
2578 | #include "emac.h" |
2579 | -#include "emac-mac.h" |
2580 | |
2581 | /* EMAC base register offsets */ |
2582 | #define EMAC_MDIO_CTRL 0x001414 |
2583 | @@ -52,62 +48,10 @@ |
2584 | |
2585 | #define MDIO_WAIT_TIMES 1000 |
2586 | |
2587 | -#define EMAC_LINK_SPEED_DEFAULT (\ |
2588 | - EMAC_LINK_SPEED_10_HALF |\ |
2589 | - EMAC_LINK_SPEED_10_FULL |\ |
2590 | - EMAC_LINK_SPEED_100_HALF |\ |
2591 | - EMAC_LINK_SPEED_100_FULL |\ |
2592 | - EMAC_LINK_SPEED_1GB_FULL) |
2593 | - |
2594 | -/** |
2595 | - * emac_phy_mdio_autopoll_disable() - disable mdio autopoll |
2596 | - * @adpt: the emac adapter |
2597 | - * |
2598 | - * The autopoll feature takes over the MDIO bus. In order for |
2599 | - * the PHY driver to be able to talk to the PHY over the MDIO |
2600 | - * bus, we need to temporarily disable the autopoll feature. |
2601 | - */ |
2602 | -static int emac_phy_mdio_autopoll_disable(struct emac_adapter *adpt) |
2603 | -{ |
2604 | - u32 val; |
2605 | - |
2606 | - /* disable autopoll */ |
2607 | - emac_reg_update32(adpt->base + EMAC_MDIO_CTRL, MDIO_AP_EN, 0); |
2608 | - |
2609 | - /* wait for any mdio polling to complete */ |
2610 | - if (!readl_poll_timeout(adpt->base + EMAC_MDIO_CTRL, val, |
2611 | - !(val & MDIO_BUSY), 100, MDIO_WAIT_TIMES * 100)) |
2612 | - return 0; |
2613 | - |
2614 | - /* failed to disable; ensure it is enabled before returning */ |
2615 | - emac_reg_update32(adpt->base + EMAC_MDIO_CTRL, 0, MDIO_AP_EN); |
2616 | - |
2617 | - return -EBUSY; |
2618 | -} |
2619 | - |
2620 | -/** |
2621 | - * emac_phy_mdio_autopoll_disable() - disable mdio autopoll |
2622 | - * @adpt: the emac adapter |
2623 | - * |
2624 | - * The EMAC has the ability to poll the external PHY on the MDIO |
2625 | - * bus for link state changes. This eliminates the need for the |
2626 | - * driver to poll the phy. If if the link state does change, |
2627 | - * the EMAC issues an interrupt on behalf of the PHY. |
2628 | - */ |
2629 | -static void emac_phy_mdio_autopoll_enable(struct emac_adapter *adpt) |
2630 | -{ |
2631 | - emac_reg_update32(adpt->base + EMAC_MDIO_CTRL, 0, MDIO_AP_EN); |
2632 | -} |
2633 | - |
2634 | static int emac_mdio_read(struct mii_bus *bus, int addr, int regnum) |
2635 | { |
2636 | struct emac_adapter *adpt = bus->priv; |
2637 | u32 reg; |
2638 | - int ret; |
2639 | - |
2640 | - ret = emac_phy_mdio_autopoll_disable(adpt); |
2641 | - if (ret) |
2642 | - return ret; |
2643 | |
2644 | emac_reg_update32(adpt->base + EMAC_PHY_STS, PHY_ADDR_BMSK, |
2645 | (addr << PHY_ADDR_SHFT)); |
2646 | @@ -122,24 +66,15 @@ static int emac_mdio_read(struct mii_bus *bus, int addr, int regnum) |
2647 | if (readl_poll_timeout(adpt->base + EMAC_MDIO_CTRL, reg, |
2648 | !(reg & (MDIO_START | MDIO_BUSY)), |
2649 | 100, MDIO_WAIT_TIMES * 100)) |
2650 | - ret = -EIO; |
2651 | - else |
2652 | - ret = (reg >> MDIO_DATA_SHFT) & MDIO_DATA_BMSK; |
2653 | + return -EIO; |
2654 | |
2655 | - emac_phy_mdio_autopoll_enable(adpt); |
2656 | - |
2657 | - return ret; |
2658 | + return (reg >> MDIO_DATA_SHFT) & MDIO_DATA_BMSK; |
2659 | } |
2660 | |
2661 | static int emac_mdio_write(struct mii_bus *bus, int addr, int regnum, u16 val) |
2662 | { |
2663 | struct emac_adapter *adpt = bus->priv; |
2664 | u32 reg; |
2665 | - int ret; |
2666 | - |
2667 | - ret = emac_phy_mdio_autopoll_disable(adpt); |
2668 | - if (ret) |
2669 | - return ret; |
2670 | |
2671 | emac_reg_update32(adpt->base + EMAC_PHY_STS, PHY_ADDR_BMSK, |
2672 | (addr << PHY_ADDR_SHFT)); |
2673 | @@ -155,11 +90,9 @@ static int emac_mdio_write(struct mii_bus *bus, int addr, int regnum, u16 val) |
2674 | if (readl_poll_timeout(adpt->base + EMAC_MDIO_CTRL, reg, |
2675 | !(reg & (MDIO_START | MDIO_BUSY)), 100, |
2676 | MDIO_WAIT_TIMES * 100)) |
2677 | - ret = -EIO; |
2678 | + return -EIO; |
2679 | |
2680 | - emac_phy_mdio_autopoll_enable(adpt); |
2681 | - |
2682 | - return ret; |
2683 | + return 0; |
2684 | } |
2685 | |
2686 | /* Configure the MDIO bus and connect the external PHY */ |
2687 | diff --git a/drivers/net/ethernet/qualcomm/emac/emac.c b/drivers/net/ethernet/qualcomm/emac/emac.c |
2688 | index 28a8cdc36485..98a326faea29 100644 |
2689 | --- a/drivers/net/ethernet/qualcomm/emac/emac.c |
2690 | +++ b/drivers/net/ethernet/qualcomm/emac/emac.c |
2691 | @@ -50,19 +50,7 @@ |
2692 | #define DMAR_DLY_CNT_DEF 15 |
2693 | #define DMAW_DLY_CNT_DEF 4 |
2694 | |
2695 | -#define IMR_NORMAL_MASK (\ |
2696 | - ISR_ERROR |\ |
2697 | - ISR_GPHY_LINK |\ |
2698 | - ISR_TX_PKT |\ |
2699 | - GPHY_WAKEUP_INT) |
2700 | - |
2701 | -#define IMR_EXTENDED_MASK (\ |
2702 | - SW_MAN_INT |\ |
2703 | - ISR_OVER |\ |
2704 | - ISR_ERROR |\ |
2705 | - ISR_GPHY_LINK |\ |
2706 | - ISR_TX_PKT |\ |
2707 | - GPHY_WAKEUP_INT) |
2708 | +#define IMR_NORMAL_MASK (ISR_ERROR | ISR_OVER | ISR_TX_PKT) |
2709 | |
2710 | #define ISR_TX_PKT (\ |
2711 | TX_PKT_INT |\ |
2712 | @@ -70,10 +58,6 @@ |
2713 | TX_PKT_INT2 |\ |
2714 | TX_PKT_INT3) |
2715 | |
2716 | -#define ISR_GPHY_LINK (\ |
2717 | - GPHY_LINK_UP_INT |\ |
2718 | - GPHY_LINK_DOWN_INT) |
2719 | - |
2720 | #define ISR_OVER (\ |
2721 | RFD0_UR_INT |\ |
2722 | RFD1_UR_INT |\ |
2723 | @@ -187,10 +171,6 @@ irqreturn_t emac_isr(int _irq, void *data) |
2724 | if (status & ISR_OVER) |
2725 | net_warn_ratelimited("warning: TX/RX overflow\n"); |
2726 | |
2727 | - /* link event */ |
2728 | - if (status & ISR_GPHY_LINK) |
2729 | - phy_mac_interrupt(adpt->phydev, !!(status & GPHY_LINK_UP_INT)); |
2730 | - |
2731 | exit: |
2732 | /* enable the interrupt */ |
2733 | writel(irq->mask, adpt->base + EMAC_INT_MASK); |
2734 | diff --git a/drivers/net/ethernet/renesas/ravb_main.c b/drivers/net/ethernet/renesas/ravb_main.c |
2735 | index 3cd7989c007d..784782da3a85 100644 |
2736 | --- a/drivers/net/ethernet/renesas/ravb_main.c |
2737 | +++ b/drivers/net/ethernet/renesas/ravb_main.c |
2738 | @@ -230,18 +230,6 @@ static void ravb_ring_free(struct net_device *ndev, int q) |
2739 | int ring_size; |
2740 | int i; |
2741 | |
2742 | - /* Free RX skb ringbuffer */ |
2743 | - if (priv->rx_skb[q]) { |
2744 | - for (i = 0; i < priv->num_rx_ring[q]; i++) |
2745 | - dev_kfree_skb(priv->rx_skb[q][i]); |
2746 | - } |
2747 | - kfree(priv->rx_skb[q]); |
2748 | - priv->rx_skb[q] = NULL; |
2749 | - |
2750 | - /* Free aligned TX buffers */ |
2751 | - kfree(priv->tx_align[q]); |
2752 | - priv->tx_align[q] = NULL; |
2753 | - |
2754 | if (priv->rx_ring[q]) { |
2755 | for (i = 0; i < priv->num_rx_ring[q]; i++) { |
2756 | struct ravb_ex_rx_desc *desc = &priv->rx_ring[q][i]; |
2757 | @@ -270,6 +258,18 @@ static void ravb_ring_free(struct net_device *ndev, int q) |
2758 | priv->tx_ring[q] = NULL; |
2759 | } |
2760 | |
2761 | + /* Free RX skb ringbuffer */ |
2762 | + if (priv->rx_skb[q]) { |
2763 | + for (i = 0; i < priv->num_rx_ring[q]; i++) |
2764 | + dev_kfree_skb(priv->rx_skb[q][i]); |
2765 | + } |
2766 | + kfree(priv->rx_skb[q]); |
2767 | + priv->rx_skb[q] = NULL; |
2768 | + |
2769 | + /* Free aligned TX buffers */ |
2770 | + kfree(priv->tx_align[q]); |
2771 | + priv->tx_align[q] = NULL; |
2772 | + |
2773 | /* Free TX skb ringbuffer. |
2774 | * SKBs are freed by ravb_tx_free() call above. |
2775 | */ |
2776 | diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c |
2777 | index 4498a3861aa3..67e86ace5d92 100644 |
2778 | --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c |
2779 | +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c |
2780 | @@ -1950,7 +1950,7 @@ static void stmmac_tso_allocator(struct stmmac_priv *priv, unsigned int des, |
2781 | |
2782 | priv->hw->desc->prepare_tso_tx_desc(desc, 0, buff_size, |
2783 | 0, 1, |
2784 | - (last_segment) && (buff_size < TSO_MAX_BUFF_SIZE), |
2785 | + (last_segment) && (tmp_len <= TSO_MAX_BUFF_SIZE), |
2786 | 0, 0); |
2787 | |
2788 | tmp_len -= TSO_MAX_BUFF_SIZE; |
2789 | diff --git a/drivers/net/geneve.c b/drivers/net/geneve.c |
2790 | index 959fd12d2e67..6ebb0f559a42 100644 |
2791 | --- a/drivers/net/geneve.c |
2792 | +++ b/drivers/net/geneve.c |
2793 | @@ -1133,7 +1133,7 @@ static int geneve_configure(struct net *net, struct net_device *dev, |
2794 | |
2795 | /* make enough headroom for basic scenario */ |
2796 | encap_len = GENEVE_BASE_HLEN + ETH_HLEN; |
2797 | - if (ip_tunnel_info_af(info) == AF_INET) { |
2798 | + if (!metadata && ip_tunnel_info_af(info) == AF_INET) { |
2799 | encap_len += sizeof(struct iphdr); |
2800 | dev->max_mtu -= sizeof(struct iphdr); |
2801 | } else { |
2802 | diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c |
2803 | index bdb6ae16d4a8..70dbd5a48b6b 100644 |
2804 | --- a/drivers/net/vxlan.c |
2805 | +++ b/drivers/net/vxlan.c |
2806 | @@ -59,6 +59,8 @@ static const u8 all_zeros_mac[ETH_ALEN + 2]; |
2807 | |
2808 | static int vxlan_sock_add(struct vxlan_dev *vxlan); |
2809 | |
2810 | +static void vxlan_vs_del_dev(struct vxlan_dev *vxlan); |
2811 | + |
2812 | /* per-network namespace private data for this module */ |
2813 | struct vxlan_net { |
2814 | struct list_head vxlan_list; |
2815 | @@ -740,6 +742,22 @@ static void vxlan_fdb_destroy(struct vxlan_dev *vxlan, struct vxlan_fdb *f) |
2816 | call_rcu(&f->rcu, vxlan_fdb_free); |
2817 | } |
2818 | |
2819 | +static void vxlan_dst_free(struct rcu_head *head) |
2820 | +{ |
2821 | + struct vxlan_rdst *rd = container_of(head, struct vxlan_rdst, rcu); |
2822 | + |
2823 | + dst_cache_destroy(&rd->dst_cache); |
2824 | + kfree(rd); |
2825 | +} |
2826 | + |
2827 | +static void vxlan_fdb_dst_destroy(struct vxlan_dev *vxlan, struct vxlan_fdb *f, |
2828 | + struct vxlan_rdst *rd) |
2829 | +{ |
2830 | + list_del_rcu(&rd->list); |
2831 | + vxlan_fdb_notify(vxlan, f, rd, RTM_DELNEIGH); |
2832 | + call_rcu(&rd->rcu, vxlan_dst_free); |
2833 | +} |
2834 | + |
2835 | static int vxlan_fdb_parse(struct nlattr *tb[], struct vxlan_dev *vxlan, |
2836 | union vxlan_addr *ip, __be16 *port, __be32 *src_vni, |
2837 | __be32 *vni, u32 *ifindex) |
2838 | @@ -864,9 +882,7 @@ static int __vxlan_fdb_delete(struct vxlan_dev *vxlan, |
2839 | * otherwise destroy the fdb entry |
2840 | */ |
2841 | if (rd && !list_is_singular(&f->remotes)) { |
2842 | - list_del_rcu(&rd->list); |
2843 | - vxlan_fdb_notify(vxlan, f, rd, RTM_DELNEIGH); |
2844 | - kfree_rcu(rd, rcu); |
2845 | + vxlan_fdb_dst_destroy(vxlan, f, rd); |
2846 | goto out; |
2847 | } |
2848 | |
2849 | @@ -1067,6 +1083,8 @@ static void vxlan_sock_release(struct vxlan_dev *vxlan) |
2850 | rcu_assign_pointer(vxlan->vn4_sock, NULL); |
2851 | synchronize_net(); |
2852 | |
2853 | + vxlan_vs_del_dev(vxlan); |
2854 | + |
2855 | if (__vxlan_sock_release_prep(sock4)) { |
2856 | udp_tunnel_sock_release(sock4->sock); |
2857 | kfree(sock4); |
2858 | @@ -2338,6 +2356,15 @@ static void vxlan_cleanup(unsigned long arg) |
2859 | mod_timer(&vxlan->age_timer, next_timer); |
2860 | } |
2861 | |
2862 | +static void vxlan_vs_del_dev(struct vxlan_dev *vxlan) |
2863 | +{ |
2864 | + struct vxlan_net *vn = net_generic(vxlan->net, vxlan_net_id); |
2865 | + |
2866 | + spin_lock(&vn->sock_lock); |
2867 | + hlist_del_init_rcu(&vxlan->hlist); |
2868 | + spin_unlock(&vn->sock_lock); |
2869 | +} |
2870 | + |
2871 | static void vxlan_vs_add_dev(struct vxlan_sock *vs, struct vxlan_dev *vxlan) |
2872 | { |
2873 | struct vxlan_net *vn = net_generic(vxlan->net, vxlan_net_id); |
2874 | @@ -3275,15 +3302,9 @@ static int vxlan_changelink(struct net_device *dev, struct nlattr *tb[], |
2875 | static void vxlan_dellink(struct net_device *dev, struct list_head *head) |
2876 | { |
2877 | struct vxlan_dev *vxlan = netdev_priv(dev); |
2878 | - struct vxlan_net *vn = net_generic(vxlan->net, vxlan_net_id); |
2879 | |
2880 | vxlan_flush(vxlan, true); |
2881 | |
2882 | - spin_lock(&vn->sock_lock); |
2883 | - if (!hlist_unhashed(&vxlan->hlist)) |
2884 | - hlist_del_rcu(&vxlan->hlist); |
2885 | - spin_unlock(&vn->sock_lock); |
2886 | - |
2887 | gro_cells_destroy(&vxlan->gro_cells); |
2888 | list_del(&vxlan->next); |
2889 | unregister_netdevice_queue(dev, head); |
2890 | diff --git a/drivers/pinctrl/intel/pinctrl-cherryview.c b/drivers/pinctrl/intel/pinctrl-cherryview.c |
2891 | index 9ff790174906..1d09097dec88 100644 |
2892 | --- a/drivers/pinctrl/intel/pinctrl-cherryview.c |
2893 | +++ b/drivers/pinctrl/intel/pinctrl-cherryview.c |
2894 | @@ -1542,7 +1542,8 @@ static const struct dmi_system_id chv_no_valid_mask[] = { |
2895 | DMI_MATCH(DMI_PRODUCT_NAME, "Edgar"), |
2896 | DMI_MATCH(DMI_BIOS_DATE, "05/21/2016"), |
2897 | }, |
2898 | - } |
2899 | + }, |
2900 | + {} |
2901 | }; |
2902 | |
2903 | static int chv_gpio_probe(struct chv_pinctrl *pctrl, int irq) |
2904 | diff --git a/drivers/scsi/qla2xxx/qla_bsg.c b/drivers/scsi/qla2xxx/qla_bsg.c |
2905 | index 84c9098cc089..aea10682e0fc 100644 |
2906 | --- a/drivers/scsi/qla2xxx/qla_bsg.c |
2907 | +++ b/drivers/scsi/qla2xxx/qla_bsg.c |
2908 | @@ -730,6 +730,8 @@ qla2x00_process_loopback(struct bsg_job *bsg_job) |
2909 | return -EIO; |
2910 | } |
2911 | |
2912 | + memset(&elreq, 0, sizeof(elreq)); |
2913 | + |
2914 | elreq.req_sg_cnt = dma_map_sg(&ha->pdev->dev, |
2915 | bsg_job->request_payload.sg_list, bsg_job->request_payload.sg_cnt, |
2916 | DMA_TO_DEVICE); |
2917 | @@ -795,10 +797,9 @@ qla2x00_process_loopback(struct bsg_job *bsg_job) |
2918 | |
2919 | if (atomic_read(&vha->loop_state) == LOOP_READY && |
2920 | (ha->current_topology == ISP_CFG_F || |
2921 | - ((IS_QLA81XX(ha) || IS_QLA8031(ha) || IS_QLA8044(ha)) && |
2922 | - le32_to_cpu(*(uint32_t *)req_data) == ELS_OPCODE_BYTE |
2923 | - && req_data_len == MAX_ELS_FRAME_PAYLOAD)) && |
2924 | - elreq.options == EXTERNAL_LOOPBACK) { |
2925 | + (le32_to_cpu(*(uint32_t *)req_data) == ELS_OPCODE_BYTE && |
2926 | + req_data_len == MAX_ELS_FRAME_PAYLOAD)) && |
2927 | + elreq.options == EXTERNAL_LOOPBACK) { |
2928 | type = "FC_BSG_HST_VENDOR_ECHO_DIAG"; |
2929 | ql_dbg(ql_dbg_user, vha, 0x701e, |
2930 | "BSG request type: %s.\n", type); |
2931 | diff --git a/drivers/scsi/qla2xxx/qla_dbg.c b/drivers/scsi/qla2xxx/qla_dbg.c |
2932 | index 51b4179469d1..88748a6ab73f 100644 |
2933 | --- a/drivers/scsi/qla2xxx/qla_dbg.c |
2934 | +++ b/drivers/scsi/qla2xxx/qla_dbg.c |
2935 | @@ -1131,7 +1131,7 @@ qla24xx_fw_dump(scsi_qla_host_t *vha, int hardware_locked) |
2936 | |
2937 | /* Mailbox registers. */ |
2938 | mbx_reg = ®->mailbox0; |
2939 | - for (cnt = 0; cnt < sizeof(fw->mailbox_reg) / 2; cnt++, dmp_reg++) |
2940 | + for (cnt = 0; cnt < sizeof(fw->mailbox_reg) / 2; cnt++, mbx_reg++) |
2941 | fw->mailbox_reg[cnt] = htons(RD_REG_WORD(mbx_reg)); |
2942 | |
2943 | /* Transfer sequence registers. */ |
2944 | @@ -2090,7 +2090,7 @@ qla83xx_fw_dump(scsi_qla_host_t *vha, int hardware_locked) |
2945 | |
2946 | /* Mailbox registers. */ |
2947 | mbx_reg = ®->mailbox0; |
2948 | - for (cnt = 0; cnt < sizeof(fw->mailbox_reg) / 2; cnt++, dmp_reg++) |
2949 | + for (cnt = 0; cnt < sizeof(fw->mailbox_reg) / 2; cnt++, mbx_reg++) |
2950 | fw->mailbox_reg[cnt] = htons(RD_REG_WORD(mbx_reg)); |
2951 | |
2952 | /* Transfer sequence registers. */ |
2953 | diff --git a/drivers/scsi/qla2xxx/qla_def.h b/drivers/scsi/qla2xxx/qla_def.h |
2954 | index ae119018dfaa..eddbc1218a39 100644 |
2955 | --- a/drivers/scsi/qla2xxx/qla_def.h |
2956 | +++ b/drivers/scsi/qla2xxx/qla_def.h |
2957 | @@ -3425,6 +3425,7 @@ struct qla_hw_data { |
2958 | uint8_t max_req_queues; |
2959 | uint8_t max_rsp_queues; |
2960 | uint8_t max_qpairs; |
2961 | + uint8_t num_qpairs; |
2962 | struct qla_qpair *base_qpair; |
2963 | struct qla_npiv_entry *npiv_info; |
2964 | uint16_t nvram_npiv_size; |
2965 | diff --git a/drivers/scsi/qla2xxx/qla_init.c b/drivers/scsi/qla2xxx/qla_init.c |
2966 | index f9d2fe7b1ade..98a2ca4fe03c 100644 |
2967 | --- a/drivers/scsi/qla2xxx/qla_init.c |
2968 | +++ b/drivers/scsi/qla2xxx/qla_init.c |
2969 | @@ -7543,12 +7543,13 @@ struct qla_qpair *qla2xxx_create_qpair(struct scsi_qla_host *vha, int qos, int v |
2970 | /* Assign available que pair id */ |
2971 | mutex_lock(&ha->mq_lock); |
2972 | qpair_id = find_first_zero_bit(ha->qpair_qid_map, ha->max_qpairs); |
2973 | - if (qpair_id >= ha->max_qpairs) { |
2974 | + if (ha->num_qpairs >= ha->max_qpairs) { |
2975 | mutex_unlock(&ha->mq_lock); |
2976 | ql_log(ql_log_warn, vha, 0x0183, |
2977 | "No resources to create additional q pair.\n"); |
2978 | goto fail_qid_map; |
2979 | } |
2980 | + ha->num_qpairs++; |
2981 | set_bit(qpair_id, ha->qpair_qid_map); |
2982 | ha->queue_pair_map[qpair_id] = qpair; |
2983 | qpair->id = qpair_id; |
2984 | @@ -7635,6 +7636,7 @@ struct qla_qpair *qla2xxx_create_qpair(struct scsi_qla_host *vha, int qos, int v |
2985 | fail_msix: |
2986 | ha->queue_pair_map[qpair_id] = NULL; |
2987 | clear_bit(qpair_id, ha->qpair_qid_map); |
2988 | + ha->num_qpairs--; |
2989 | mutex_unlock(&ha->mq_lock); |
2990 | fail_qid_map: |
2991 | kfree(qpair); |
2992 | @@ -7660,6 +7662,7 @@ int qla2xxx_delete_qpair(struct scsi_qla_host *vha, struct qla_qpair *qpair) |
2993 | mutex_lock(&ha->mq_lock); |
2994 | ha->queue_pair_map[qpair->id] = NULL; |
2995 | clear_bit(qpair->id, ha->qpair_qid_map); |
2996 | + ha->num_qpairs--; |
2997 | list_del(&qpair->qp_list_elem); |
2998 | if (list_empty(&vha->qp_list)) |
2999 | vha->flags.qpairs_available = 0; |
3000 | diff --git a/drivers/scsi/qla2xxx/qla_isr.c b/drivers/scsi/qla2xxx/qla_isr.c |
3001 | index 3203367a4f42..189f72d5aa4f 100644 |
3002 | --- a/drivers/scsi/qla2xxx/qla_isr.c |
3003 | +++ b/drivers/scsi/qla2xxx/qla_isr.c |
3004 | @@ -3282,7 +3282,7 @@ qla24xx_enable_msix(struct qla_hw_data *ha, struct rsp_que *rsp) |
3005 | } |
3006 | |
3007 | /* Enable MSI-X vector for response queue update for queue 0 */ |
3008 | - if (IS_QLA83XX(ha) || IS_QLA27XX(ha)) { |
3009 | + if (IS_QLA25XX(ha) || IS_QLA83XX(ha) || IS_QLA27XX(ha)) { |
3010 | if (ha->msixbase && ha->mqiobase && |
3011 | (ha->max_rsp_queues > 1 || ha->max_req_queues > 1 || |
3012 | ql2xmqsupport)) |
3013 | diff --git a/drivers/scsi/qla2xxx/qla_mbx.c b/drivers/scsi/qla2xxx/qla_mbx.c |
3014 | index a113ab3592a7..cba1fc5e8be9 100644 |
3015 | --- a/drivers/scsi/qla2xxx/qla_mbx.c |
3016 | +++ b/drivers/scsi/qla2xxx/qla_mbx.c |
3017 | @@ -3676,15 +3676,6 @@ qla24xx_report_id_acquisition(scsi_qla_host_t *vha, |
3018 | qlt_update_host_map(vha, id); |
3019 | } |
3020 | |
3021 | - fc_host_port_name(vha->host) = |
3022 | - wwn_to_u64(vha->port_name); |
3023 | - |
3024 | - if (qla_ini_mode_enabled(vha)) |
3025 | - ql_dbg(ql_dbg_mbx, vha, 0x1018, |
3026 | - "FA-WWN portname %016llx (%x)\n", |
3027 | - fc_host_port_name(vha->host), |
3028 | - rptid_entry->vp_status); |
3029 | - |
3030 | set_bit(REGISTER_FC4_NEEDED, &vha->dpc_flags); |
3031 | set_bit(REGISTER_FDMI_NEEDED, &vha->dpc_flags); |
3032 | } else { |
3033 | @@ -4821,9 +4812,9 @@ qla2x00_echo_test(scsi_qla_host_t *vha, struct msg_echo_lb *mreq, |
3034 | |
3035 | memset(mcp->mb, 0 , sizeof(mcp->mb)); |
3036 | mcp->mb[0] = MBC_DIAGNOSTIC_ECHO; |
3037 | - mcp->mb[1] = mreq->options | BIT_6; /* BIT_6 specifies 64bit address */ |
3038 | + /* BIT_6 specifies 64bit address */ |
3039 | + mcp->mb[1] = mreq->options | BIT_15 | BIT_6; |
3040 | if (IS_CNA_CAPABLE(ha)) { |
3041 | - mcp->mb[1] |= BIT_15; |
3042 | mcp->mb[2] = vha->fcoe_fcf_idx; |
3043 | } |
3044 | mcp->mb[16] = LSW(mreq->rcv_dma); |
3045 | diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c |
3046 | index 83d61d2142e9..190f609317af 100644 |
3047 | --- a/drivers/scsi/qla2xxx/qla_os.c |
3048 | +++ b/drivers/scsi/qla2xxx/qla_os.c |
3049 | @@ -2626,10 +2626,10 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id) |
3050 | |
3051 | if (mem_only) { |
3052 | if (pci_enable_device_mem(pdev)) |
3053 | - goto probe_out; |
3054 | + return ret; |
3055 | } else { |
3056 | if (pci_enable_device(pdev)) |
3057 | - goto probe_out; |
3058 | + return ret; |
3059 | } |
3060 | |
3061 | /* This may fail but that's ok */ |
3062 | @@ -2639,7 +2639,7 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id) |
3063 | if (!ha) { |
3064 | ql_log_pci(ql_log_fatal, pdev, 0x0009, |
3065 | "Unable to allocate memory for ha.\n"); |
3066 | - goto probe_out; |
3067 | + goto disable_device; |
3068 | } |
3069 | ql_dbg_pci(ql_dbg_init, pdev, 0x000a, |
3070 | "Memory allocated for ha=%p.\n", ha); |
3071 | @@ -3258,7 +3258,7 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id) |
3072 | kfree(ha); |
3073 | ha = NULL; |
3074 | |
3075 | -probe_out: |
3076 | +disable_device: |
3077 | pci_disable_device(pdev); |
3078 | return ret; |
3079 | } |
3080 | diff --git a/drivers/scsi/qla2xxx/qla_tmpl.c b/drivers/scsi/qla2xxx/qla_tmpl.c |
3081 | index 8a58ef3adab4..c197972a3e2d 100644 |
3082 | --- a/drivers/scsi/qla2xxx/qla_tmpl.c |
3083 | +++ b/drivers/scsi/qla2xxx/qla_tmpl.c |
3084 | @@ -371,7 +371,7 @@ qla27xx_fwdt_entry_t262(struct scsi_qla_host *vha, |
3085 | goto done; |
3086 | } |
3087 | |
3088 | - if (end <= start || start == 0 || end == 0) { |
3089 | + if (end < start || start == 0 || end == 0) { |
3090 | ql_dbg(ql_dbg_misc, vha, 0xd023, |
3091 | "%s: unusable range (start=%x end=%x)\n", __func__, |
3092 | ent->t262.end_addr, ent->t262.start_addr); |
3093 | diff --git a/drivers/staging/lustre/lustre/lov/lov_pack.c b/drivers/staging/lustre/lustre/lov/lov_pack.c |
3094 | index 2e1bd47337fd..e6727cefde05 100644 |
3095 | --- a/drivers/staging/lustre/lustre/lov/lov_pack.c |
3096 | +++ b/drivers/staging/lustre/lustre/lov/lov_pack.c |
3097 | @@ -293,18 +293,10 @@ int lov_getstripe(struct lov_object *obj, struct lov_stripe_md *lsm, |
3098 | size_t lmmk_size; |
3099 | size_t lum_size; |
3100 | int rc; |
3101 | - mm_segment_t seg; |
3102 | |
3103 | if (!lsm) |
3104 | return -ENODATA; |
3105 | |
3106 | - /* |
3107 | - * "Switch to kernel segment" to allow copying from kernel space by |
3108 | - * copy_{to,from}_user(). |
3109 | - */ |
3110 | - seg = get_fs(); |
3111 | - set_fs(KERNEL_DS); |
3112 | - |
3113 | if (lsm->lsm_magic != LOV_MAGIC_V1 && lsm->lsm_magic != LOV_MAGIC_V3) { |
3114 | CERROR("bad LSM MAGIC: 0x%08X != 0x%08X nor 0x%08X\n", |
3115 | lsm->lsm_magic, LOV_MAGIC_V1, LOV_MAGIC_V3); |
3116 | @@ -406,6 +398,5 @@ int lov_getstripe(struct lov_object *obj, struct lov_stripe_md *lsm, |
3117 | out_free: |
3118 | kvfree(lmmk); |
3119 | out: |
3120 | - set_fs(seg); |
3121 | return rc; |
3122 | } |
3123 | diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c |
3124 | index a0cd56ee5fe9..ff26626d94ef 100644 |
3125 | --- a/drivers/target/target_core_transport.c |
3126 | +++ b/drivers/target/target_core_transport.c |
3127 | @@ -1160,15 +1160,28 @@ target_cmd_size_check(struct se_cmd *cmd, unsigned int size) |
3128 | if (cmd->unknown_data_length) { |
3129 | cmd->data_length = size; |
3130 | } else if (size != cmd->data_length) { |
3131 | - pr_warn("TARGET_CORE[%s]: Expected Transfer Length:" |
3132 | + pr_warn_ratelimited("TARGET_CORE[%s]: Expected Transfer Length:" |
3133 | " %u does not match SCSI CDB Length: %u for SAM Opcode:" |
3134 | " 0x%02x\n", cmd->se_tfo->get_fabric_name(), |
3135 | cmd->data_length, size, cmd->t_task_cdb[0]); |
3136 | |
3137 | - if (cmd->data_direction == DMA_TO_DEVICE && |
3138 | - cmd->se_cmd_flags & SCF_SCSI_DATA_CDB) { |
3139 | - pr_err("Rejecting underflow/overflow WRITE data\n"); |
3140 | - return TCM_INVALID_CDB_FIELD; |
3141 | + if (cmd->data_direction == DMA_TO_DEVICE) { |
3142 | + if (cmd->se_cmd_flags & SCF_SCSI_DATA_CDB) { |
3143 | + pr_err_ratelimited("Rejecting underflow/overflow" |
3144 | + " for WRITE data CDB\n"); |
3145 | + return TCM_INVALID_CDB_FIELD; |
3146 | + } |
3147 | + /* |
3148 | + * Some fabric drivers like iscsi-target still expect to |
3149 | + * always reject overflow writes. Reject this case until |
3150 | + * full fabric driver level support for overflow writes |
3151 | + * is introduced tree-wide. |
3152 | + */ |
3153 | + if (size > cmd->data_length) { |
3154 | + pr_err_ratelimited("Rejecting overflow for" |
3155 | + " WRITE control CDB\n"); |
3156 | + return TCM_INVALID_CDB_FIELD; |
3157 | + } |
3158 | } |
3159 | /* |
3160 | * Reject READ_* or WRITE_* with overflow/underflow for |
3161 | diff --git a/drivers/tty/serial/8250/8250_port.c b/drivers/tty/serial/8250/8250_port.c |
3162 | index 4c26d15ad7d9..579706d36f5c 100644 |
3163 | --- a/drivers/tty/serial/8250/8250_port.c |
3164 | +++ b/drivers/tty/serial/8250/8250_port.c |
3165 | @@ -47,6 +47,7 @@ |
3166 | /* |
3167 | * These are definitions for the Exar XR17V35X and XR17(C|D)15X |
3168 | */ |
3169 | +#define UART_EXAR_INT0 0x80 |
3170 | #define UART_EXAR_SLEEP 0x8b /* Sleep mode */ |
3171 | #define UART_EXAR_DVID 0x8d /* Device identification */ |
3172 | |
3173 | @@ -1869,17 +1870,13 @@ static int serial8250_default_handle_irq(struct uart_port *port) |
3174 | static int exar_handle_irq(struct uart_port *port) |
3175 | { |
3176 | unsigned int iir = serial_port_in(port, UART_IIR); |
3177 | - int ret; |
3178 | + int ret = 0; |
3179 | |
3180 | - ret = serial8250_handle_irq(port, iir); |
3181 | + if (((port->type == PORT_XR17V35X) || (port->type == PORT_XR17D15X)) && |
3182 | + serial_port_in(port, UART_EXAR_INT0) != 0) |
3183 | + ret = 1; |
3184 | |
3185 | - if ((port->type == PORT_XR17V35X) || |
3186 | - (port->type == PORT_XR17D15X)) { |
3187 | - serial_port_in(port, 0x80); |
3188 | - serial_port_in(port, 0x81); |
3189 | - serial_port_in(port, 0x82); |
3190 | - serial_port_in(port, 0x83); |
3191 | - } |
3192 | + ret |= serial8250_handle_irq(port, iir); |
3193 | |
3194 | return ret; |
3195 | } |
3196 | @@ -2177,6 +2174,8 @@ int serial8250_do_startup(struct uart_port *port) |
3197 | serial_port_in(port, UART_RX); |
3198 | serial_port_in(port, UART_IIR); |
3199 | serial_port_in(port, UART_MSR); |
3200 | + if ((port->type == PORT_XR17V35X) || (port->type == PORT_XR17D15X)) |
3201 | + serial_port_in(port, UART_EXAR_INT0); |
3202 | |
3203 | /* |
3204 | * At this point, there's no way the LSR could still be 0xff; |
3205 | @@ -2335,6 +2334,8 @@ int serial8250_do_startup(struct uart_port *port) |
3206 | serial_port_in(port, UART_RX); |
3207 | serial_port_in(port, UART_IIR); |
3208 | serial_port_in(port, UART_MSR); |
3209 | + if ((port->type == PORT_XR17V35X) || (port->type == PORT_XR17D15X)) |
3210 | + serial_port_in(port, UART_EXAR_INT0); |
3211 | up->lsr_saved_flags = 0; |
3212 | up->msr_saved_flags = 0; |
3213 | |
3214 | diff --git a/drivers/tty/serial/ifx6x60.c b/drivers/tty/serial/ifx6x60.c |
3215 | index 157883653256..f190a84a0246 100644 |
3216 | --- a/drivers/tty/serial/ifx6x60.c |
3217 | +++ b/drivers/tty/serial/ifx6x60.c |
3218 | @@ -1382,9 +1382,9 @@ static struct spi_driver ifx_spi_driver = { |
3219 | static void __exit ifx_spi_exit(void) |
3220 | { |
3221 | /* unregister */ |
3222 | + spi_unregister_driver(&ifx_spi_driver); |
3223 | tty_unregister_driver(tty_drv); |
3224 | put_tty_driver(tty_drv); |
3225 | - spi_unregister_driver(&ifx_spi_driver); |
3226 | unregister_reboot_notifier(&ifx_modem_reboot_notifier_block); |
3227 | } |
3228 | |
3229 | diff --git a/drivers/tty/serial/serial_core.c b/drivers/tty/serial/serial_core.c |
3230 | index 3fe56894974a..7f9139445f2a 100644 |
3231 | --- a/drivers/tty/serial/serial_core.c |
3232 | +++ b/drivers/tty/serial/serial_core.c |
3233 | @@ -2083,7 +2083,7 @@ int uart_suspend_port(struct uart_driver *drv, struct uart_port *uport) |
3234 | mutex_lock(&port->mutex); |
3235 | |
3236 | tty_dev = device_find_child(uport->dev, &match, serial_match_port); |
3237 | - if (device_may_wakeup(tty_dev)) { |
3238 | + if (tty_dev && device_may_wakeup(tty_dev)) { |
3239 | if (!enable_irq_wake(uport->irq)) |
3240 | uport->irq_wake = 1; |
3241 | put_device(tty_dev); |
3242 | diff --git a/drivers/tty/serial/sh-sci.c b/drivers/tty/serial/sh-sci.c |
3243 | index 9a47cc4f16a2..1df57461ece4 100644 |
3244 | --- a/drivers/tty/serial/sh-sci.c |
3245 | +++ b/drivers/tty/serial/sh-sci.c |
3246 | @@ -1985,11 +1985,13 @@ static int sci_startup(struct uart_port *port) |
3247 | |
3248 | dev_dbg(port->dev, "%s(%d)\n", __func__, port->line); |
3249 | |
3250 | + sci_request_dma(port); |
3251 | + |
3252 | ret = sci_request_irq(s); |
3253 | - if (unlikely(ret < 0)) |
3254 | + if (unlikely(ret < 0)) { |
3255 | + sci_free_dma(port); |
3256 | return ret; |
3257 | - |
3258 | - sci_request_dma(port); |
3259 | + } |
3260 | |
3261 | return 0; |
3262 | } |
3263 | @@ -2021,8 +2023,8 @@ static void sci_shutdown(struct uart_port *port) |
3264 | } |
3265 | #endif |
3266 | |
3267 | - sci_free_dma(port); |
3268 | sci_free_irq(s); |
3269 | + sci_free_dma(port); |
3270 | } |
3271 | |
3272 | static int sci_sck_calc(struct sci_port *s, unsigned int bps, |
3273 | diff --git a/drivers/usb/chipidea/debug.c b/drivers/usb/chipidea/debug.c |
3274 | index 6d23eede4d8c..1c31e8a08810 100644 |
3275 | --- a/drivers/usb/chipidea/debug.c |
3276 | +++ b/drivers/usb/chipidea/debug.c |
3277 | @@ -294,7 +294,8 @@ static int ci_role_show(struct seq_file *s, void *data) |
3278 | { |
3279 | struct ci_hdrc *ci = s->private; |
3280 | |
3281 | - seq_printf(s, "%s\n", ci_role(ci)->name); |
3282 | + if (ci->role != CI_ROLE_END) |
3283 | + seq_printf(s, "%s\n", ci_role(ci)->name); |
3284 | |
3285 | return 0; |
3286 | } |
3287 | diff --git a/drivers/usb/chipidea/udc.c b/drivers/usb/chipidea/udc.c |
3288 | index f88e9157fad0..60a786c87c06 100644 |
3289 | --- a/drivers/usb/chipidea/udc.c |
3290 | +++ b/drivers/usb/chipidea/udc.c |
3291 | @@ -1984,6 +1984,7 @@ static void udc_id_switch_for_host(struct ci_hdrc *ci) |
3292 | int ci_hdrc_gadget_init(struct ci_hdrc *ci) |
3293 | { |
3294 | struct ci_role_driver *rdrv; |
3295 | + int ret; |
3296 | |
3297 | if (!hw_read(ci, CAP_DCCPARAMS, DCCPARAMS_DC)) |
3298 | return -ENXIO; |
3299 | @@ -1996,7 +1997,10 @@ int ci_hdrc_gadget_init(struct ci_hdrc *ci) |
3300 | rdrv->stop = udc_id_switch_for_host; |
3301 | rdrv->irq = udc_irq; |
3302 | rdrv->name = "gadget"; |
3303 | - ci->roles[CI_ROLE_GADGET] = rdrv; |
3304 | |
3305 | - return udc_start(ci); |
3306 | + ret = udc_start(ci); |
3307 | + if (!ret) |
3308 | + ci->roles[CI_ROLE_GADGET] = rdrv; |
3309 | + |
3310 | + return ret; |
3311 | } |
3312 | diff --git a/drivers/usb/chipidea/usbmisc_imx.c b/drivers/usb/chipidea/usbmisc_imx.c |
3313 | index e77a4ed4f021..9f4a0185dd60 100644 |
3314 | --- a/drivers/usb/chipidea/usbmisc_imx.c |
3315 | +++ b/drivers/usb/chipidea/usbmisc_imx.c |
3316 | @@ -108,6 +108,8 @@ struct imx_usbmisc { |
3317 | const struct usbmisc_ops *ops; |
3318 | }; |
3319 | |
3320 | +static inline bool is_imx53_usbmisc(struct imx_usbmisc_data *data); |
3321 | + |
3322 | static int usbmisc_imx25_init(struct imx_usbmisc_data *data) |
3323 | { |
3324 | struct imx_usbmisc *usbmisc = dev_get_drvdata(data->dev); |
3325 | @@ -242,10 +244,15 @@ static int usbmisc_imx53_init(struct imx_usbmisc_data *data) |
3326 | val = readl(reg) | MX53_USB_UHx_CTRL_WAKE_UP_EN |
3327 | | MX53_USB_UHx_CTRL_ULPI_INT_EN; |
3328 | writel(val, reg); |
3329 | - /* Disable internal 60Mhz clock */ |
3330 | - reg = usbmisc->base + MX53_USB_CLKONOFF_CTRL_OFFSET; |
3331 | - val = readl(reg) | MX53_USB_CLKONOFF_CTRL_H2_INT60CKOFF; |
3332 | - writel(val, reg); |
3333 | + if (is_imx53_usbmisc(data)) { |
3334 | + /* Disable internal 60Mhz clock */ |
3335 | + reg = usbmisc->base + |
3336 | + MX53_USB_CLKONOFF_CTRL_OFFSET; |
3337 | + val = readl(reg) | |
3338 | + MX53_USB_CLKONOFF_CTRL_H2_INT60CKOFF; |
3339 | + writel(val, reg); |
3340 | + } |
3341 | + |
3342 | } |
3343 | if (data->disable_oc) { |
3344 | reg = usbmisc->base + MX53_USB_UH2_CTRL_OFFSET; |
3345 | @@ -267,10 +274,15 @@ static int usbmisc_imx53_init(struct imx_usbmisc_data *data) |
3346 | val = readl(reg) | MX53_USB_UHx_CTRL_WAKE_UP_EN |
3347 | | MX53_USB_UHx_CTRL_ULPI_INT_EN; |
3348 | writel(val, reg); |
3349 | - /* Disable internal 60Mhz clock */ |
3350 | - reg = usbmisc->base + MX53_USB_CLKONOFF_CTRL_OFFSET; |
3351 | - val = readl(reg) | MX53_USB_CLKONOFF_CTRL_H3_INT60CKOFF; |
3352 | - writel(val, reg); |
3353 | + |
3354 | + if (is_imx53_usbmisc(data)) { |
3355 | + /* Disable internal 60Mhz clock */ |
3356 | + reg = usbmisc->base + |
3357 | + MX53_USB_CLKONOFF_CTRL_OFFSET; |
3358 | + val = readl(reg) | |
3359 | + MX53_USB_CLKONOFF_CTRL_H3_INT60CKOFF; |
3360 | + writel(val, reg); |
3361 | + } |
3362 | } |
3363 | if (data->disable_oc) { |
3364 | reg = usbmisc->base + MX53_USB_UH3_CTRL_OFFSET; |
3365 | @@ -456,6 +468,10 @@ static const struct usbmisc_ops imx27_usbmisc_ops = { |
3366 | .init = usbmisc_imx27_init, |
3367 | }; |
3368 | |
3369 | +static const struct usbmisc_ops imx51_usbmisc_ops = { |
3370 | + .init = usbmisc_imx53_init, |
3371 | +}; |
3372 | + |
3373 | static const struct usbmisc_ops imx53_usbmisc_ops = { |
3374 | .init = usbmisc_imx53_init, |
3375 | }; |
3376 | @@ -479,6 +495,13 @@ static const struct usbmisc_ops imx7d_usbmisc_ops = { |
3377 | .set_wakeup = usbmisc_imx7d_set_wakeup, |
3378 | }; |
3379 | |
3380 | +static inline bool is_imx53_usbmisc(struct imx_usbmisc_data *data) |
3381 | +{ |
3382 | + struct imx_usbmisc *usbmisc = dev_get_drvdata(data->dev); |
3383 | + |
3384 | + return usbmisc->ops == &imx53_usbmisc_ops; |
3385 | +} |
3386 | + |
3387 | int imx_usbmisc_init(struct imx_usbmisc_data *data) |
3388 | { |
3389 | struct imx_usbmisc *usbmisc; |
3390 | @@ -536,7 +559,7 @@ static const struct of_device_id usbmisc_imx_dt_ids[] = { |
3391 | }, |
3392 | { |
3393 | .compatible = "fsl,imx51-usbmisc", |
3394 | - .data = &imx53_usbmisc_ops, |
3395 | + .data = &imx51_usbmisc_ops, |
3396 | }, |
3397 | { |
3398 | .compatible = "fsl,imx53-usbmisc", |
3399 | diff --git a/drivers/usb/gadget/function/f_mass_storage.c b/drivers/usb/gadget/function/f_mass_storage.c |
3400 | index 4c8aacc232c0..74d57d6994da 100644 |
3401 | --- a/drivers/usb/gadget/function/f_mass_storage.c |
3402 | +++ b/drivers/usb/gadget/function/f_mass_storage.c |
3403 | @@ -396,7 +396,11 @@ static int fsg_set_halt(struct fsg_dev *fsg, struct usb_ep *ep) |
3404 | /* Caller must hold fsg->lock */ |
3405 | static void wakeup_thread(struct fsg_common *common) |
3406 | { |
3407 | - smp_wmb(); /* ensure the write of bh->state is complete */ |
3408 | + /* |
3409 | + * Ensure the reading of thread_wakeup_needed |
3410 | + * and the writing of bh->state are completed |
3411 | + */ |
3412 | + smp_mb(); |
3413 | /* Tell the main thread that something has happened */ |
3414 | common->thread_wakeup_needed = 1; |
3415 | if (common->thread_task) |
3416 | @@ -627,7 +631,12 @@ static int sleep_thread(struct fsg_common *common, bool can_freeze) |
3417 | } |
3418 | __set_current_state(TASK_RUNNING); |
3419 | common->thread_wakeup_needed = 0; |
3420 | - smp_rmb(); /* ensure the latest bh->state is visible */ |
3421 | + |
3422 | + /* |
3423 | + * Ensure the writing of thread_wakeup_needed |
3424 | + * and the reading of bh->state are completed |
3425 | + */ |
3426 | + smp_mb(); |
3427 | return rc; |
3428 | } |
3429 | |
3430 | diff --git a/drivers/usb/musb/musb_dsps.c b/drivers/usb/musb/musb_dsps.c |
3431 | index 9c7ee26ef388..bc6a9be2ccc5 100644 |
3432 | --- a/drivers/usb/musb/musb_dsps.c |
3433 | +++ b/drivers/usb/musb/musb_dsps.c |
3434 | @@ -245,6 +245,11 @@ static int dsps_check_status(struct musb *musb, void *unused) |
3435 | dsps_mod_timer_optional(glue); |
3436 | break; |
3437 | case OTG_STATE_A_WAIT_BCON: |
3438 | + /* keep VBUS on for host-only mode */ |
3439 | + if (musb->port_mode == MUSB_PORT_MODE_HOST) { |
3440 | + dsps_mod_timer_optional(glue); |
3441 | + break; |
3442 | + } |
3443 | musb_writeb(musb->mregs, MUSB_DEVCTL, 0); |
3444 | skip_session = 1; |
3445 | /* fall */ |
3446 | diff --git a/drivers/xen/privcmd.c b/drivers/xen/privcmd.c |
3447 | index 7a92a5e1d40c..feca75b07fdd 100644 |
3448 | --- a/drivers/xen/privcmd.c |
3449 | +++ b/drivers/xen/privcmd.c |
3450 | @@ -362,8 +362,8 @@ static int mmap_batch_fn(void *data, int nr, void *state) |
3451 | st->global_error = 1; |
3452 | } |
3453 | } |
3454 | - st->va += PAGE_SIZE * nr; |
3455 | - st->index += nr; |
3456 | + st->va += XEN_PAGE_SIZE * nr; |
3457 | + st->index += nr / XEN_PFN_PER_PAGE; |
3458 | |
3459 | return 0; |
3460 | } |
3461 | diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h |
3462 | index c4115901d906..7a945a1f076b 100644 |
3463 | --- a/fs/btrfs/ctree.h |
3464 | +++ b/fs/btrfs/ctree.h |
3465 | @@ -2547,7 +2547,7 @@ u64 btrfs_csum_bytes_to_leaves(struct btrfs_fs_info *fs_info, u64 csum_bytes); |
3466 | static inline u64 btrfs_calc_trans_metadata_size(struct btrfs_fs_info *fs_info, |
3467 | unsigned num_items) |
3468 | { |
3469 | - return fs_info->nodesize * BTRFS_MAX_LEVEL * 2 * num_items; |
3470 | + return (u64)fs_info->nodesize * BTRFS_MAX_LEVEL * 2 * num_items; |
3471 | } |
3472 | |
3473 | /* |
3474 | @@ -2557,7 +2557,7 @@ static inline u64 btrfs_calc_trans_metadata_size(struct btrfs_fs_info *fs_info, |
3475 | static inline u64 btrfs_calc_trunc_metadata_size(struct btrfs_fs_info *fs_info, |
3476 | unsigned num_items) |
3477 | { |
3478 | - return fs_info->nodesize * BTRFS_MAX_LEVEL * num_items; |
3479 | + return (u64)fs_info->nodesize * BTRFS_MAX_LEVEL * num_items; |
3480 | } |
3481 | |
3482 | int btrfs_should_throttle_delayed_refs(struct btrfs_trans_handle *trans, |
3483 | diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c |
3484 | index be5477676cc8..ed3fefc9e5e7 100644 |
3485 | --- a/fs/btrfs/extent-tree.c |
3486 | +++ b/fs/btrfs/extent-tree.c |
3487 | @@ -3983,6 +3983,7 @@ static int update_space_info(struct btrfs_fs_info *info, u64 flags, |
3488 | info->space_info_kobj, "%s", |
3489 | alloc_name(found->flags)); |
3490 | if (ret) { |
3491 | + percpu_counter_destroy(&found->total_bytes_pinned); |
3492 | kfree(found); |
3493 | return ret; |
3494 | } |
3495 | @@ -4834,7 +4835,7 @@ static int may_commit_transaction(struct btrfs_fs_info *fs_info, |
3496 | spin_unlock(&delayed_rsv->lock); |
3497 | |
3498 | commit: |
3499 | - trans = btrfs_join_transaction(fs_info->fs_root); |
3500 | + trans = btrfs_join_transaction(fs_info->extent_root); |
3501 | if (IS_ERR(trans)) |
3502 | return -ENOSPC; |
3503 | |
3504 | @@ -4852,7 +4853,7 @@ static int flush_space(struct btrfs_fs_info *fs_info, |
3505 | struct btrfs_space_info *space_info, u64 num_bytes, |
3506 | u64 orig_bytes, int state) |
3507 | { |
3508 | - struct btrfs_root *root = fs_info->fs_root; |
3509 | + struct btrfs_root *root = fs_info->extent_root; |
3510 | struct btrfs_trans_handle *trans; |
3511 | int nr; |
3512 | int ret = 0; |
3513 | @@ -5052,7 +5053,7 @@ static void priority_reclaim_metadata_space(struct btrfs_fs_info *fs_info, |
3514 | int flush_state = FLUSH_DELAYED_ITEMS_NR; |
3515 | |
3516 | spin_lock(&space_info->lock); |
3517 | - to_reclaim = btrfs_calc_reclaim_metadata_size(fs_info->fs_root, |
3518 | + to_reclaim = btrfs_calc_reclaim_metadata_size(fs_info->extent_root, |
3519 | space_info); |
3520 | if (!to_reclaim) { |
3521 | spin_unlock(&space_info->lock); |
3522 | diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c |
3523 | index 5e71f1ea3391..65fc76a47094 100644 |
3524 | --- a/fs/btrfs/inode.c |
3525 | +++ b/fs/btrfs/inode.c |
3526 | @@ -7359,8 +7359,8 @@ bool btrfs_page_exists_in_range(struct inode *inode, loff_t start, loff_t end) |
3527 | int found = false; |
3528 | void **pagep = NULL; |
3529 | struct page *page = NULL; |
3530 | - int start_idx; |
3531 | - int end_idx; |
3532 | + unsigned long start_idx; |
3533 | + unsigned long end_idx; |
3534 | |
3535 | start_idx = start >> PAGE_SHIFT; |
3536 | |
3537 | diff --git a/fs/ext4/extents.c b/fs/ext4/extents.c |
3538 | index 2a97dff87b96..3e36508610b7 100644 |
3539 | --- a/fs/ext4/extents.c |
3540 | +++ b/fs/ext4/extents.c |
3541 | @@ -3413,13 +3413,13 @@ static int ext4_ext_convert_to_initialized(handle_t *handle, |
3542 | struct ext4_sb_info *sbi; |
3543 | struct ext4_extent_header *eh; |
3544 | struct ext4_map_blocks split_map; |
3545 | - struct ext4_extent zero_ex; |
3546 | + struct ext4_extent zero_ex1, zero_ex2; |
3547 | struct ext4_extent *ex, *abut_ex; |
3548 | ext4_lblk_t ee_block, eof_block; |
3549 | unsigned int ee_len, depth, map_len = map->m_len; |
3550 | int allocated = 0, max_zeroout = 0; |
3551 | int err = 0; |
3552 | - int split_flag = 0; |
3553 | + int split_flag = EXT4_EXT_DATA_VALID2; |
3554 | |
3555 | ext_debug("ext4_ext_convert_to_initialized: inode %lu, logical" |
3556 | "block %llu, max_blocks %u\n", inode->i_ino, |
3557 | @@ -3436,7 +3436,8 @@ static int ext4_ext_convert_to_initialized(handle_t *handle, |
3558 | ex = path[depth].p_ext; |
3559 | ee_block = le32_to_cpu(ex->ee_block); |
3560 | ee_len = ext4_ext_get_actual_len(ex); |
3561 | - zero_ex.ee_len = 0; |
3562 | + zero_ex1.ee_len = 0; |
3563 | + zero_ex2.ee_len = 0; |
3564 | |
3565 | trace_ext4_ext_convert_to_initialized_enter(inode, map, ex); |
3566 | |
3567 | @@ -3576,62 +3577,52 @@ static int ext4_ext_convert_to_initialized(handle_t *handle, |
3568 | if (ext4_encrypted_inode(inode)) |
3569 | max_zeroout = 0; |
3570 | |
3571 | - /* If extent is less than s_max_zeroout_kb, zeroout directly */ |
3572 | - if (max_zeroout && (ee_len <= max_zeroout)) { |
3573 | - err = ext4_ext_zeroout(inode, ex); |
3574 | - if (err) |
3575 | - goto out; |
3576 | - zero_ex.ee_block = ex->ee_block; |
3577 | - zero_ex.ee_len = cpu_to_le16(ext4_ext_get_actual_len(ex)); |
3578 | - ext4_ext_store_pblock(&zero_ex, ext4_ext_pblock(ex)); |
3579 | - |
3580 | - err = ext4_ext_get_access(handle, inode, path + depth); |
3581 | - if (err) |
3582 | - goto out; |
3583 | - ext4_ext_mark_initialized(ex); |
3584 | - ext4_ext_try_to_merge(handle, inode, path, ex); |
3585 | - err = ext4_ext_dirty(handle, inode, path + path->p_depth); |
3586 | - goto out; |
3587 | - } |
3588 | - |
3589 | /* |
3590 | - * four cases: |
3591 | + * five cases: |
3592 | * 1. split the extent into three extents. |
3593 | - * 2. split the extent into two extents, zeroout the first half. |
3594 | - * 3. split the extent into two extents, zeroout the second half. |
3595 | + * 2. split the extent into two extents, zeroout the head of the first |
3596 | + * extent. |
3597 | + * 3. split the extent into two extents, zeroout the tail of the second |
3598 | + * extent. |
3599 | * 4. split the extent into two extents with out zeroout. |
3600 | + * 5. no splitting needed, just possibly zeroout the head and / or the |
3601 | + * tail of the extent. |
3602 | */ |
3603 | split_map.m_lblk = map->m_lblk; |
3604 | split_map.m_len = map->m_len; |
3605 | |
3606 | - if (max_zeroout && (allocated > map->m_len)) { |
3607 | + if (max_zeroout && (allocated > split_map.m_len)) { |
3608 | if (allocated <= max_zeroout) { |
3609 | - /* case 3 */ |
3610 | - zero_ex.ee_block = |
3611 | - cpu_to_le32(map->m_lblk); |
3612 | - zero_ex.ee_len = cpu_to_le16(allocated); |
3613 | - ext4_ext_store_pblock(&zero_ex, |
3614 | - ext4_ext_pblock(ex) + map->m_lblk - ee_block); |
3615 | - err = ext4_ext_zeroout(inode, &zero_ex); |
3616 | + /* case 3 or 5 */ |
3617 | + zero_ex1.ee_block = |
3618 | + cpu_to_le32(split_map.m_lblk + |
3619 | + split_map.m_len); |
3620 | + zero_ex1.ee_len = |
3621 | + cpu_to_le16(allocated - split_map.m_len); |
3622 | + ext4_ext_store_pblock(&zero_ex1, |
3623 | + ext4_ext_pblock(ex) + split_map.m_lblk + |
3624 | + split_map.m_len - ee_block); |
3625 | + err = ext4_ext_zeroout(inode, &zero_ex1); |
3626 | if (err) |
3627 | goto out; |
3628 | - split_map.m_lblk = map->m_lblk; |
3629 | split_map.m_len = allocated; |
3630 | - } else if (map->m_lblk - ee_block + map->m_len < max_zeroout) { |
3631 | - /* case 2 */ |
3632 | - if (map->m_lblk != ee_block) { |
3633 | - zero_ex.ee_block = ex->ee_block; |
3634 | - zero_ex.ee_len = cpu_to_le16(map->m_lblk - |
3635 | + } |
3636 | + if (split_map.m_lblk - ee_block + split_map.m_len < |
3637 | + max_zeroout) { |
3638 | + /* case 2 or 5 */ |
3639 | + if (split_map.m_lblk != ee_block) { |
3640 | + zero_ex2.ee_block = ex->ee_block; |
3641 | + zero_ex2.ee_len = cpu_to_le16(split_map.m_lblk - |
3642 | ee_block); |
3643 | - ext4_ext_store_pblock(&zero_ex, |
3644 | + ext4_ext_store_pblock(&zero_ex2, |
3645 | ext4_ext_pblock(ex)); |
3646 | - err = ext4_ext_zeroout(inode, &zero_ex); |
3647 | + err = ext4_ext_zeroout(inode, &zero_ex2); |
3648 | if (err) |
3649 | goto out; |
3650 | } |
3651 | |
3652 | + split_map.m_len += split_map.m_lblk - ee_block; |
3653 | split_map.m_lblk = ee_block; |
3654 | - split_map.m_len = map->m_lblk - ee_block + map->m_len; |
3655 | allocated = map->m_len; |
3656 | } |
3657 | } |
3658 | @@ -3642,8 +3633,11 @@ static int ext4_ext_convert_to_initialized(handle_t *handle, |
3659 | err = 0; |
3660 | out: |
3661 | /* If we have gotten a failure, don't zero out status tree */ |
3662 | - if (!err) |
3663 | - err = ext4_zeroout_es(inode, &zero_ex); |
3664 | + if (!err) { |
3665 | + err = ext4_zeroout_es(inode, &zero_ex1); |
3666 | + if (!err) |
3667 | + err = ext4_zeroout_es(inode, &zero_ex2); |
3668 | + } |
3669 | return err ? err : allocated; |
3670 | } |
3671 | |
3672 | @@ -4883,6 +4877,8 @@ static long ext4_zero_range(struct file *file, loff_t offset, |
3673 | |
3674 | /* Zero out partial block at the edges of the range */ |
3675 | ret = ext4_zero_partial_blocks(handle, inode, offset, len); |
3676 | + if (ret >= 0) |
3677 | + ext4_update_inode_fsync_trans(handle, inode, 1); |
3678 | |
3679 | if (file->f_flags & O_SYNC) |
3680 | ext4_handle_sync(handle); |
3681 | @@ -5569,6 +5565,7 @@ int ext4_collapse_range(struct inode *inode, loff_t offset, loff_t len) |
3682 | ext4_handle_sync(handle); |
3683 | inode->i_mtime = inode->i_ctime = current_time(inode); |
3684 | ext4_mark_inode_dirty(handle, inode); |
3685 | + ext4_update_inode_fsync_trans(handle, inode, 1); |
3686 | |
3687 | out_stop: |
3688 | ext4_journal_stop(handle); |
3689 | @@ -5742,6 +5739,8 @@ int ext4_insert_range(struct inode *inode, loff_t offset, loff_t len) |
3690 | up_write(&EXT4_I(inode)->i_data_sem); |
3691 | if (IS_SYNC(inode)) |
3692 | ext4_handle_sync(handle); |
3693 | + if (ret >= 0) |
3694 | + ext4_update_inode_fsync_trans(handle, inode, 1); |
3695 | |
3696 | out_stop: |
3697 | ext4_journal_stop(handle); |
3698 | diff --git a/fs/ext4/file.c b/fs/ext4/file.c |
3699 | index 831fd6beebf0..bbea2dccd584 100644 |
3700 | --- a/fs/ext4/file.c |
3701 | +++ b/fs/ext4/file.c |
3702 | @@ -484,47 +484,27 @@ static int ext4_find_unwritten_pgoff(struct inode *inode, |
3703 | num = min_t(pgoff_t, end - index, PAGEVEC_SIZE); |
3704 | nr_pages = pagevec_lookup(&pvec, inode->i_mapping, index, |
3705 | (pgoff_t)num); |
3706 | - if (nr_pages == 0) { |
3707 | - if (whence == SEEK_DATA) |
3708 | - break; |
3709 | - |
3710 | - BUG_ON(whence != SEEK_HOLE); |
3711 | - /* |
3712 | - * If this is the first time to go into the loop and |
3713 | - * offset is not beyond the end offset, it will be a |
3714 | - * hole at this offset |
3715 | - */ |
3716 | - if (lastoff == startoff || lastoff < endoff) |
3717 | - found = 1; |
3718 | - break; |
3719 | - } |
3720 | - |
3721 | - /* |
3722 | - * If this is the first time to go into the loop and |
3723 | - * offset is smaller than the first page offset, it will be a |
3724 | - * hole at this offset. |
3725 | - */ |
3726 | - if (lastoff == startoff && whence == SEEK_HOLE && |
3727 | - lastoff < page_offset(pvec.pages[0])) { |
3728 | - found = 1; |
3729 | + if (nr_pages == 0) |
3730 | break; |
3731 | - } |
3732 | |
3733 | for (i = 0; i < nr_pages; i++) { |
3734 | struct page *page = pvec.pages[i]; |
3735 | struct buffer_head *bh, *head; |
3736 | |
3737 | /* |
3738 | - * If the current offset is not beyond the end of given |
3739 | - * range, it will be a hole. |
3740 | + * If current offset is smaller than the page offset, |
3741 | + * there is a hole at this offset. |
3742 | */ |
3743 | - if (lastoff < endoff && whence == SEEK_HOLE && |
3744 | - page->index > end) { |
3745 | + if (whence == SEEK_HOLE && lastoff < endoff && |
3746 | + lastoff < page_offset(pvec.pages[i])) { |
3747 | found = 1; |
3748 | *offset = lastoff; |
3749 | goto out; |
3750 | } |
3751 | |
3752 | + if (page->index > end) |
3753 | + goto out; |
3754 | + |
3755 | lock_page(page); |
3756 | |
3757 | if (unlikely(page->mapping != inode->i_mapping)) { |
3758 | @@ -564,20 +544,18 @@ static int ext4_find_unwritten_pgoff(struct inode *inode, |
3759 | unlock_page(page); |
3760 | } |
3761 | |
3762 | - /* |
3763 | - * The no. of pages is less than our desired, that would be a |
3764 | - * hole in there. |
3765 | - */ |
3766 | - if (nr_pages < num && whence == SEEK_HOLE) { |
3767 | - found = 1; |
3768 | - *offset = lastoff; |
3769 | + /* The no. of pages is less than our desired, we are done. */ |
3770 | + if (nr_pages < num) |
3771 | break; |
3772 | - } |
3773 | |
3774 | index = pvec.pages[i - 1]->index + 1; |
3775 | pagevec_release(&pvec); |
3776 | } while (index <= end); |
3777 | |
3778 | + if (whence == SEEK_HOLE && lastoff < endoff) { |
3779 | + found = 1; |
3780 | + *offset = lastoff; |
3781 | + } |
3782 | out: |
3783 | pagevec_release(&pvec); |
3784 | return found; |
3785 | diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c |
3786 | index 88203ae5b154..7090752ec2cb 100644 |
3787 | --- a/fs/ext4/inode.c |
3788 | +++ b/fs/ext4/inode.c |
3789 | @@ -4165,6 +4165,8 @@ int ext4_punch_hole(struct inode *inode, loff_t offset, loff_t length) |
3790 | |
3791 | inode->i_mtime = inode->i_ctime = current_time(inode); |
3792 | ext4_mark_inode_dirty(handle, inode); |
3793 | + if (ret >= 0) |
3794 | + ext4_update_inode_fsync_trans(handle, inode, 1); |
3795 | out_stop: |
3796 | ext4_journal_stop(handle); |
3797 | out_dio: |
3798 | @@ -5621,8 +5623,9 @@ static int ext4_expand_extra_isize(struct inode *inode, |
3799 | /* No extended attributes present */ |
3800 | if (!ext4_test_inode_state(inode, EXT4_STATE_XATTR) || |
3801 | header->h_magic != cpu_to_le32(EXT4_XATTR_MAGIC)) { |
3802 | - memset((void *)raw_inode + EXT4_GOOD_OLD_INODE_SIZE, 0, |
3803 | - new_extra_isize); |
3804 | + memset((void *)raw_inode + EXT4_GOOD_OLD_INODE_SIZE + |
3805 | + EXT4_I(inode)->i_extra_isize, 0, |
3806 | + new_extra_isize - EXT4_I(inode)->i_extra_isize); |
3807 | EXT4_I(inode)->i_extra_isize = new_extra_isize; |
3808 | return 0; |
3809 | } |
3810 | diff --git a/fs/gfs2/log.c b/fs/gfs2/log.c |
3811 | index f865b96374df..d2955daf17a4 100644 |
3812 | --- a/fs/gfs2/log.c |
3813 | +++ b/fs/gfs2/log.c |
3814 | @@ -659,7 +659,7 @@ static void log_write_header(struct gfs2_sbd *sdp, u32 flags) |
3815 | struct gfs2_log_header *lh; |
3816 | unsigned int tail; |
3817 | u32 hash; |
3818 | - int op_flags = REQ_PREFLUSH | REQ_FUA | REQ_META; |
3819 | + int op_flags = REQ_PREFLUSH | REQ_FUA | REQ_META | REQ_SYNC; |
3820 | struct page *page = mempool_alloc(gfs2_page_pool, GFP_NOIO); |
3821 | enum gfs2_freeze_state state = atomic_read(&sdp->sd_freeze_state); |
3822 | lh = page_address(page); |
3823 | diff --git a/fs/iomap.c b/fs/iomap.c |
3824 | index 1c25ae30500e..258fb4100b1d 100644 |
3825 | --- a/fs/iomap.c |
3826 | +++ b/fs/iomap.c |
3827 | @@ -909,6 +909,9 @@ iomap_dio_rw(struct kiocb *iocb, struct iov_iter *iter, |
3828 | break; |
3829 | } |
3830 | pos += ret; |
3831 | + |
3832 | + if (iov_iter_rw(iter) == READ && pos >= dio->i_size) |
3833 | + break; |
3834 | } while ((count = iov_iter_count(iter)) > 0); |
3835 | blk_finish_plug(&plug); |
3836 | |
3837 | diff --git a/fs/nfsd/nfs4proc.c b/fs/nfsd/nfs4proc.c |
3838 | index c453a1998e00..dadb3bf305b2 100644 |
3839 | --- a/fs/nfsd/nfs4proc.c |
3840 | +++ b/fs/nfsd/nfs4proc.c |
3841 | @@ -1769,6 +1769,12 @@ nfsd4_proc_compound(struct svc_rqst *rqstp, |
3842 | opdesc->op_get_currentstateid(cstate, &op->u); |
3843 | op->status = opdesc->op_func(rqstp, cstate, &op->u); |
3844 | |
3845 | + /* Only from SEQUENCE */ |
3846 | + if (cstate->status == nfserr_replay_cache) { |
3847 | + dprintk("%s NFS4.1 replay from cache\n", __func__); |
3848 | + status = op->status; |
3849 | + goto out; |
3850 | + } |
3851 | if (!op->status) { |
3852 | if (opdesc->op_set_currentstateid) |
3853 | opdesc->op_set_currentstateid(cstate, &op->u); |
3854 | @@ -1779,14 +1785,7 @@ nfsd4_proc_compound(struct svc_rqst *rqstp, |
3855 | if (need_wrongsec_check(rqstp)) |
3856 | op->status = check_nfsd_access(current_fh->fh_export, rqstp); |
3857 | } |
3858 | - |
3859 | encode_op: |
3860 | - /* Only from SEQUENCE */ |
3861 | - if (cstate->status == nfserr_replay_cache) { |
3862 | - dprintk("%s NFS4.1 replay from cache\n", __func__); |
3863 | - status = op->status; |
3864 | - goto out; |
3865 | - } |
3866 | if (op->status == nfserr_replay_me) { |
3867 | op->replay = &cstate->replay_owner->so_replay; |
3868 | nfsd4_encode_replay(&resp->xdr, op); |
3869 | diff --git a/fs/overlayfs/copy_up.c b/fs/overlayfs/copy_up.c |
3870 | index 906ea6c93260..5b14c16d1b77 100644 |
3871 | --- a/fs/overlayfs/copy_up.c |
3872 | +++ b/fs/overlayfs/copy_up.c |
3873 | @@ -269,12 +269,13 @@ static int ovl_copy_up_locked(struct dentry *workdir, struct dentry *upperdir, |
3874 | temp = ovl_do_tmpfile(upperdir, stat->mode); |
3875 | else |
3876 | temp = ovl_lookup_temp(workdir, dentry); |
3877 | - err = PTR_ERR(temp); |
3878 | - if (IS_ERR(temp)) |
3879 | - goto out1; |
3880 | - |
3881 | err = 0; |
3882 | - if (!tmpfile) |
3883 | + if (IS_ERR(temp)) { |
3884 | + err = PTR_ERR(temp); |
3885 | + temp = NULL; |
3886 | + } |
3887 | + |
3888 | + if (!err && !tmpfile) |
3889 | err = ovl_create_real(wdir, temp, &cattr, NULL, true); |
3890 | |
3891 | if (new_creds) { |
3892 | diff --git a/fs/reiserfs/journal.c b/fs/reiserfs/journal.c |
3893 | index aa40c242f1db..64a4d3c82125 100644 |
3894 | --- a/fs/reiserfs/journal.c |
3895 | +++ b/fs/reiserfs/journal.c |
3896 | @@ -1112,7 +1112,7 @@ static int flush_commit_list(struct super_block *s, |
3897 | depth = reiserfs_write_unlock_nested(s); |
3898 | if (reiserfs_barrier_flush(s)) |
3899 | __sync_dirty_buffer(jl->j_commit_bh, |
3900 | - REQ_PREFLUSH | REQ_FUA); |
3901 | + REQ_SYNC | REQ_PREFLUSH | REQ_FUA); |
3902 | else |
3903 | sync_dirty_buffer(jl->j_commit_bh); |
3904 | reiserfs_write_lock_nested(s, depth); |
3905 | @@ -1271,7 +1271,7 @@ static int _update_journal_header_block(struct super_block *sb, |
3906 | |
3907 | if (reiserfs_barrier_flush(sb)) |
3908 | __sync_dirty_buffer(journal->j_header_bh, |
3909 | - REQ_PREFLUSH | REQ_FUA); |
3910 | + REQ_SYNC | REQ_PREFLUSH | REQ_FUA); |
3911 | else |
3912 | sync_dirty_buffer(journal->j_header_bh); |
3913 | |
3914 | diff --git a/fs/stat.c b/fs/stat.c |
3915 | index a257b872a53d..ea6235a31ec8 100644 |
3916 | --- a/fs/stat.c |
3917 | +++ b/fs/stat.c |
3918 | @@ -586,6 +586,7 @@ void __inode_add_bytes(struct inode *inode, loff_t bytes) |
3919 | inode->i_bytes -= 512; |
3920 | } |
3921 | } |
3922 | +EXPORT_SYMBOL(__inode_add_bytes); |
3923 | |
3924 | void inode_add_bytes(struct inode *inode, loff_t bytes) |
3925 | { |
3926 | diff --git a/fs/ufs/balloc.c b/fs/ufs/balloc.c |
3927 | index a0376a2c1c29..d642cc0a8271 100644 |
3928 | --- a/fs/ufs/balloc.c |
3929 | +++ b/fs/ufs/balloc.c |
3930 | @@ -82,7 +82,8 @@ void ufs_free_fragments(struct inode *inode, u64 fragment, unsigned count) |
3931 | ufs_error (sb, "ufs_free_fragments", |
3932 | "bit already cleared for fragment %u", i); |
3933 | } |
3934 | - |
3935 | + |
3936 | + inode_sub_bytes(inode, count << uspi->s_fshift); |
3937 | fs32_add(sb, &ucg->cg_cs.cs_nffree, count); |
3938 | uspi->cs_total.cs_nffree += count; |
3939 | fs32_add(sb, &UFS_SB(sb)->fs_cs(cgno).cs_nffree, count); |
3940 | @@ -184,6 +185,7 @@ void ufs_free_blocks(struct inode *inode, u64 fragment, unsigned count) |
3941 | ufs_error(sb, "ufs_free_blocks", "freeing free fragment"); |
3942 | } |
3943 | ubh_setblock(UCPI_UBH(ucpi), ucpi->c_freeoff, blkno); |
3944 | + inode_sub_bytes(inode, uspi->s_fpb << uspi->s_fshift); |
3945 | if ((UFS_SB(sb)->s_flags & UFS_CG_MASK) == UFS_CG_44BSD) |
3946 | ufs_clusteracct (sb, ucpi, blkno, 1); |
3947 | |
3948 | @@ -494,6 +496,20 @@ u64 ufs_new_fragments(struct inode *inode, void *p, u64 fragment, |
3949 | return 0; |
3950 | } |
3951 | |
3952 | +static bool try_add_frags(struct inode *inode, unsigned frags) |
3953 | +{ |
3954 | + unsigned size = frags * i_blocksize(inode); |
3955 | + spin_lock(&inode->i_lock); |
3956 | + __inode_add_bytes(inode, size); |
3957 | + if (unlikely((u32)inode->i_blocks != inode->i_blocks)) { |
3958 | + __inode_sub_bytes(inode, size); |
3959 | + spin_unlock(&inode->i_lock); |
3960 | + return false; |
3961 | + } |
3962 | + spin_unlock(&inode->i_lock); |
3963 | + return true; |
3964 | +} |
3965 | + |
3966 | static u64 ufs_add_fragments(struct inode *inode, u64 fragment, |
3967 | unsigned oldcount, unsigned newcount) |
3968 | { |
3969 | @@ -530,6 +546,9 @@ static u64 ufs_add_fragments(struct inode *inode, u64 fragment, |
3970 | for (i = oldcount; i < newcount; i++) |
3971 | if (ubh_isclr (UCPI_UBH(ucpi), ucpi->c_freeoff, fragno + i)) |
3972 | return 0; |
3973 | + |
3974 | + if (!try_add_frags(inode, count)) |
3975 | + return 0; |
3976 | /* |
3977 | * Block can be extended |
3978 | */ |
3979 | @@ -647,6 +666,7 @@ static u64 ufs_alloc_fragments(struct inode *inode, unsigned cgno, |
3980 | ubh_setbit (UCPI_UBH(ucpi), ucpi->c_freeoff, goal + i); |
3981 | i = uspi->s_fpb - count; |
3982 | |
3983 | + inode_sub_bytes(inode, i << uspi->s_fshift); |
3984 | fs32_add(sb, &ucg->cg_cs.cs_nffree, i); |
3985 | uspi->cs_total.cs_nffree += i; |
3986 | fs32_add(sb, &UFS_SB(sb)->fs_cs(cgno).cs_nffree, i); |
3987 | @@ -657,6 +677,8 @@ static u64 ufs_alloc_fragments(struct inode *inode, unsigned cgno, |
3988 | result = ufs_bitmap_search (sb, ucpi, goal, allocsize); |
3989 | if (result == INVBLOCK) |
3990 | return 0; |
3991 | + if (!try_add_frags(inode, count)) |
3992 | + return 0; |
3993 | for (i = 0; i < count; i++) |
3994 | ubh_clrbit (UCPI_UBH(ucpi), ucpi->c_freeoff, result + i); |
3995 | |
3996 | @@ -716,6 +738,8 @@ static u64 ufs_alloccg_block(struct inode *inode, |
3997 | return INVBLOCK; |
3998 | ucpi->c_rotor = result; |
3999 | gotit: |
4000 | + if (!try_add_frags(inode, uspi->s_fpb)) |
4001 | + return 0; |
4002 | blkno = ufs_fragstoblks(result); |
4003 | ubh_clrblock (UCPI_UBH(ucpi), ucpi->c_freeoff, blkno); |
4004 | if ((UFS_SB(sb)->s_flags & UFS_CG_MASK) == UFS_CG_44BSD) |
4005 | diff --git a/fs/ufs/inode.c b/fs/ufs/inode.c |
4006 | index 7e41aee7b69a..34f11cf0900a 100644 |
4007 | --- a/fs/ufs/inode.c |
4008 | +++ b/fs/ufs/inode.c |
4009 | @@ -235,7 +235,8 @@ ufs_extend_tail(struct inode *inode, u64 writes_to, |
4010 | |
4011 | p = ufs_get_direct_data_ptr(uspi, ufsi, block); |
4012 | tmp = ufs_new_fragments(inode, p, lastfrag, ufs_data_ptr_to_cpu(sb, p), |
4013 | - new_size, err, locked_page); |
4014 | + new_size - (lastfrag & uspi->s_fpbmask), err, |
4015 | + locked_page); |
4016 | return tmp != 0; |
4017 | } |
4018 | |
4019 | @@ -284,7 +285,7 @@ ufs_inode_getfrag(struct inode *inode, unsigned index, |
4020 | goal += uspi->s_fpb; |
4021 | } |
4022 | tmp = ufs_new_fragments(inode, p, ufs_blknum(new_fragment), |
4023 | - goal, uspi->s_fpb, err, locked_page); |
4024 | + goal, nfrags, err, locked_page); |
4025 | |
4026 | if (!tmp) { |
4027 | *err = -ENOSPC; |
4028 | @@ -402,7 +403,9 @@ static int ufs_getfrag_block(struct inode *inode, sector_t fragment, struct buff |
4029 | |
4030 | if (!create) { |
4031 | phys64 = ufs_frag_map(inode, offsets, depth); |
4032 | - goto out; |
4033 | + if (phys64) |
4034 | + map_bh(bh_result, sb, phys64 + frag); |
4035 | + return 0; |
4036 | } |
4037 | |
4038 | /* This code entered only while writing ....? */ |
4039 | @@ -841,7 +844,9 @@ void ufs_evict_inode(struct inode * inode) |
4040 | truncate_inode_pages_final(&inode->i_data); |
4041 | if (want_delete) { |
4042 | inode->i_size = 0; |
4043 | - if (inode->i_blocks) |
4044 | + if (inode->i_blocks && |
4045 | + (S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode) || |
4046 | + S_ISLNK(inode->i_mode))) |
4047 | ufs_truncate_blocks(inode); |
4048 | } |
4049 | |
4050 | @@ -1100,7 +1105,7 @@ static int ufs_alloc_lastblock(struct inode *inode, loff_t size) |
4051 | return err; |
4052 | } |
4053 | |
4054 | -static void __ufs_truncate_blocks(struct inode *inode) |
4055 | +static void ufs_truncate_blocks(struct inode *inode) |
4056 | { |
4057 | struct ufs_inode_info *ufsi = UFS_I(inode); |
4058 | struct super_block *sb = inode->i_sb; |
4059 | @@ -1183,7 +1188,7 @@ static int ufs_truncate(struct inode *inode, loff_t size) |
4060 | |
4061 | truncate_setsize(inode, size); |
4062 | |
4063 | - __ufs_truncate_blocks(inode); |
4064 | + ufs_truncate_blocks(inode); |
4065 | inode->i_mtime = inode->i_ctime = current_time(inode); |
4066 | mark_inode_dirty(inode); |
4067 | out: |
4068 | @@ -1191,16 +1196,6 @@ static int ufs_truncate(struct inode *inode, loff_t size) |
4069 | return err; |
4070 | } |
4071 | |
4072 | -static void ufs_truncate_blocks(struct inode *inode) |
4073 | -{ |
4074 | - if (!(S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode) || |
4075 | - S_ISLNK(inode->i_mode))) |
4076 | - return; |
4077 | - if (IS_APPEND(inode) || IS_IMMUTABLE(inode)) |
4078 | - return; |
4079 | - __ufs_truncate_blocks(inode); |
4080 | -} |
4081 | - |
4082 | int ufs_setattr(struct dentry *dentry, struct iattr *attr) |
4083 | { |
4084 | struct inode *inode = d_inode(dentry); |
4085 | diff --git a/fs/ufs/super.c b/fs/ufs/super.c |
4086 | index 29ecaf739449..878cc6264f1a 100644 |
4087 | --- a/fs/ufs/super.c |
4088 | +++ b/fs/ufs/super.c |
4089 | @@ -746,6 +746,23 @@ static void ufs_put_super(struct super_block *sb) |
4090 | return; |
4091 | } |
4092 | |
4093 | +static u64 ufs_max_bytes(struct super_block *sb) |
4094 | +{ |
4095 | + struct ufs_sb_private_info *uspi = UFS_SB(sb)->s_uspi; |
4096 | + int bits = uspi->s_apbshift; |
4097 | + u64 res; |
4098 | + |
4099 | + if (bits > 21) |
4100 | + res = ~0ULL; |
4101 | + else |
4102 | + res = UFS_NDADDR + (1LL << bits) + (1LL << (2*bits)) + |
4103 | + (1LL << (3*bits)); |
4104 | + |
4105 | + if (res >= (MAX_LFS_FILESIZE >> uspi->s_bshift)) |
4106 | + return MAX_LFS_FILESIZE; |
4107 | + return res << uspi->s_bshift; |
4108 | +} |
4109 | + |
4110 | static int ufs_fill_super(struct super_block *sb, void *data, int silent) |
4111 | { |
4112 | struct ufs_sb_info * sbi; |
4113 | @@ -1211,6 +1228,7 @@ static int ufs_fill_super(struct super_block *sb, void *data, int silent) |
4114 | "fast symlink size (%u)\n", uspi->s_maxsymlinklen); |
4115 | uspi->s_maxsymlinklen = maxsymlen; |
4116 | } |
4117 | + sb->s_maxbytes = ufs_max_bytes(sb); |
4118 | sb->s_max_links = UFS_LINK_MAX; |
4119 | |
4120 | inode = ufs_iget(sb, UFS_ROOTINO); |
4121 | diff --git a/fs/ufs/util.h b/fs/ufs/util.h |
4122 | index b7fbf53dbc81..398019fb1448 100644 |
4123 | --- a/fs/ufs/util.h |
4124 | +++ b/fs/ufs/util.h |
4125 | @@ -473,15 +473,19 @@ static inline unsigned _ubh_find_last_zero_bit_( |
4126 | static inline int _ubh_isblockset_(struct ufs_sb_private_info * uspi, |
4127 | struct ufs_buffer_head * ubh, unsigned begin, unsigned block) |
4128 | { |
4129 | + u8 mask; |
4130 | switch (uspi->s_fpb) { |
4131 | case 8: |
4132 | return (*ubh_get_addr (ubh, begin + block) == 0xff); |
4133 | case 4: |
4134 | - return (*ubh_get_addr (ubh, begin + (block >> 1)) == (0x0f << ((block & 0x01) << 2))); |
4135 | + mask = 0x0f << ((block & 0x01) << 2); |
4136 | + return (*ubh_get_addr (ubh, begin + (block >> 1)) & mask) == mask; |
4137 | case 2: |
4138 | - return (*ubh_get_addr (ubh, begin + (block >> 2)) == (0x03 << ((block & 0x03) << 1))); |
4139 | + mask = 0x03 << ((block & 0x03) << 1); |
4140 | + return (*ubh_get_addr (ubh, begin + (block >> 2)) & mask) == mask; |
4141 | case 1: |
4142 | - return (*ubh_get_addr (ubh, begin + (block >> 3)) == (0x01 << (block & 0x07))); |
4143 | + mask = 0x01 << (block & 0x07); |
4144 | + return (*ubh_get_addr (ubh, begin + (block >> 3)) & mask) == mask; |
4145 | } |
4146 | return 0; |
4147 | } |
4148 | diff --git a/include/drm/i915_pciids.h b/include/drm/i915_pciids.h |
4149 | index a1dd21d6b723..466c71592a6f 100644 |
4150 | --- a/include/drm/i915_pciids.h |
4151 | +++ b/include/drm/i915_pciids.h |
4152 | @@ -265,7 +265,8 @@ |
4153 | INTEL_VGA_DEVICE(0x1923, info), /* ULT GT3 */ \ |
4154 | INTEL_VGA_DEVICE(0x1926, info), /* ULT GT3 */ \ |
4155 | INTEL_VGA_DEVICE(0x1927, info), /* ULT GT3 */ \ |
4156 | - INTEL_VGA_DEVICE(0x192B, info) /* Halo GT3 */ \ |
4157 | + INTEL_VGA_DEVICE(0x192B, info), /* Halo GT3 */ \ |
4158 | + INTEL_VGA_DEVICE(0x192D, info) /* SRV GT3 */ |
4159 | |
4160 | #define INTEL_SKL_GT4_IDS(info) \ |
4161 | INTEL_VGA_DEVICE(0x1932, info), /* DT GT4 */ \ |
4162 | diff --git a/include/linux/cgroup-defs.h b/include/linux/cgroup-defs.h |
4163 | index 6a3f850cabab..14db95e9b529 100644 |
4164 | --- a/include/linux/cgroup-defs.h |
4165 | +++ b/include/linux/cgroup-defs.h |
4166 | @@ -47,6 +47,7 @@ enum { |
4167 | CSS_ONLINE = (1 << 1), /* between ->css_online() and ->css_offline() */ |
4168 | CSS_RELEASED = (1 << 2), /* refcnt reached zero, released */ |
4169 | CSS_VISIBLE = (1 << 3), /* css is visible to userland */ |
4170 | + CSS_DYING = (1 << 4), /* css is dying */ |
4171 | }; |
4172 | |
4173 | /* bits in struct cgroup flags field */ |
4174 | diff --git a/include/linux/cgroup.h b/include/linux/cgroup.h |
4175 | index af9c86e958bd..b48579d9b806 100644 |
4176 | --- a/include/linux/cgroup.h |
4177 | +++ b/include/linux/cgroup.h |
4178 | @@ -344,6 +344,26 @@ static inline bool css_tryget_online(struct cgroup_subsys_state *css) |
4179 | } |
4180 | |
4181 | /** |
4182 | + * css_is_dying - test whether the specified css is dying |
4183 | + * @css: target css |
4184 | + * |
4185 | + * Test whether @css is in the process of offlining or already offline. In |
4186 | + * most cases, ->css_online() and ->css_offline() callbacks should be |
4187 | + * enough; however, the actual offline operations are RCU delayed and this |
4188 | + * test returns %true also when @css is scheduled to be offlined. |
4189 | + * |
4190 | + * This is useful, for example, when the use case requires synchronous |
4191 | + * behavior with respect to cgroup removal. cgroup removal schedules css |
4192 | + * offlining but the css can seem alive while the operation is being |
4193 | + * delayed. If the delay affects user visible semantics, this test can be |
4194 | + * used to resolve the situation. |
4195 | + */ |
4196 | +static inline bool css_is_dying(struct cgroup_subsys_state *css) |
4197 | +{ |
4198 | + return !(css->flags & CSS_NO_REF) && percpu_ref_is_dying(&css->refcnt); |
4199 | +} |
4200 | + |
4201 | +/** |
4202 | * css_put - put a css reference |
4203 | * @css: target css |
4204 | * |
4205 | diff --git a/include/linux/ptrace.h b/include/linux/ptrace.h |
4206 | index 422bc2e4cb6a..ef3eb8bbfee4 100644 |
4207 | --- a/include/linux/ptrace.h |
4208 | +++ b/include/linux/ptrace.h |
4209 | @@ -54,7 +54,8 @@ extern int ptrace_request(struct task_struct *child, long request, |
4210 | unsigned long addr, unsigned long data); |
4211 | extern void ptrace_notify(int exit_code); |
4212 | extern void __ptrace_link(struct task_struct *child, |
4213 | - struct task_struct *new_parent); |
4214 | + struct task_struct *new_parent, |
4215 | + const struct cred *ptracer_cred); |
4216 | extern void __ptrace_unlink(struct task_struct *child); |
4217 | extern void exit_ptrace(struct task_struct *tracer, struct list_head *dead); |
4218 | #define PTRACE_MODE_READ 0x01 |
4219 | @@ -206,7 +207,7 @@ static inline void ptrace_init_task(struct task_struct *child, bool ptrace) |
4220 | |
4221 | if (unlikely(ptrace) && current->ptrace) { |
4222 | child->ptrace = current->ptrace; |
4223 | - __ptrace_link(child, current->parent); |
4224 | + __ptrace_link(child, current->parent, current->ptracer_cred); |
4225 | |
4226 | if (child->ptrace & PT_SEIZED) |
4227 | task_set_jobctl_pending(child, JOBCTL_TRAP_STOP); |
4228 | @@ -215,6 +216,8 @@ static inline void ptrace_init_task(struct task_struct *child, bool ptrace) |
4229 | |
4230 | set_tsk_thread_flag(child, TIF_SIGPENDING); |
4231 | } |
4232 | + else |
4233 | + child->ptracer_cred = NULL; |
4234 | } |
4235 | |
4236 | /** |
4237 | diff --git a/include/linux/srcu.h b/include/linux/srcu.h |
4238 | index a598cf3ac70c..8a95e5d0fdf9 100644 |
4239 | --- a/include/linux/srcu.h |
4240 | +++ b/include/linux/srcu.h |
4241 | @@ -232,9 +232,7 @@ static inline int srcu_read_lock(struct srcu_struct *sp) __acquires(sp) |
4242 | { |
4243 | int retval; |
4244 | |
4245 | - preempt_disable(); |
4246 | retval = __srcu_read_lock(sp); |
4247 | - preempt_enable(); |
4248 | rcu_lock_acquire(&(sp)->dep_map); |
4249 | return retval; |
4250 | } |
4251 | diff --git a/include/net/ipv6.h b/include/net/ipv6.h |
4252 | index dbf0abba33b8..3e505bbff8ca 100644 |
4253 | --- a/include/net/ipv6.h |
4254 | +++ b/include/net/ipv6.h |
4255 | @@ -1007,6 +1007,7 @@ int inet6_hash_connect(struct inet_timewait_death_row *death_row, |
4256 | */ |
4257 | extern const struct proto_ops inet6_stream_ops; |
4258 | extern const struct proto_ops inet6_dgram_ops; |
4259 | +extern const struct proto_ops inet6_sockraw_ops; |
4260 | |
4261 | struct group_source_req; |
4262 | struct group_filter; |
4263 | diff --git a/kernel/audit.c b/kernel/audit.c |
4264 | index a871bf80fde1..dd2c339c8eb9 100644 |
4265 | --- a/kernel/audit.c |
4266 | +++ b/kernel/audit.c |
4267 | @@ -110,18 +110,19 @@ struct audit_net { |
4268 | * @pid: auditd PID |
4269 | * @portid: netlink portid |
4270 | * @net: the associated network namespace |
4271 | - * @lock: spinlock to protect write access |
4272 | + * @rcu: RCU head |
4273 | * |
4274 | * Description: |
4275 | * This struct is RCU protected; you must either hold the RCU lock for reading |
4276 | - * or the included spinlock for writing. |
4277 | + * or the associated spinlock for writing. |
4278 | */ |
4279 | static struct auditd_connection { |
4280 | int pid; |
4281 | u32 portid; |
4282 | struct net *net; |
4283 | - spinlock_t lock; |
4284 | -} auditd_conn; |
4285 | + struct rcu_head rcu; |
4286 | +} *auditd_conn = NULL; |
4287 | +static DEFINE_SPINLOCK(auditd_conn_lock); |
4288 | |
4289 | /* If audit_rate_limit is non-zero, limit the rate of sending audit records |
4290 | * to that number per second. This prevents DoS attacks, but results in |
4291 | @@ -223,15 +224,39 @@ struct audit_reply { |
4292 | int auditd_test_task(const struct task_struct *task) |
4293 | { |
4294 | int rc; |
4295 | + struct auditd_connection *ac; |
4296 | |
4297 | rcu_read_lock(); |
4298 | - rc = (auditd_conn.pid && task->tgid == auditd_conn.pid ? 1 : 0); |
4299 | + ac = rcu_dereference(auditd_conn); |
4300 | + rc = (ac && ac->pid == task->tgid ? 1 : 0); |
4301 | rcu_read_unlock(); |
4302 | |
4303 | return rc; |
4304 | } |
4305 | |
4306 | /** |
4307 | + * auditd_pid_vnr - Return the auditd PID relative to the namespace |
4308 | + * |
4309 | + * Description: |
4310 | + * Returns the PID in relation to the namespace, 0 on failure. |
4311 | + */ |
4312 | +static pid_t auditd_pid_vnr(void) |
4313 | +{ |
4314 | + pid_t pid; |
4315 | + const struct auditd_connection *ac; |
4316 | + |
4317 | + rcu_read_lock(); |
4318 | + ac = rcu_dereference(auditd_conn); |
4319 | + if (!ac) |
4320 | + pid = 0; |
4321 | + else |
4322 | + pid = ac->pid; |
4323 | + rcu_read_unlock(); |
4324 | + |
4325 | + return pid; |
4326 | +} |
4327 | + |
4328 | +/** |
4329 | * audit_get_sk - Return the audit socket for the given network namespace |
4330 | * @net: the destination network namespace |
4331 | * |
4332 | @@ -427,6 +452,23 @@ static int audit_set_failure(u32 state) |
4333 | } |
4334 | |
4335 | /** |
4336 | + * auditd_conn_free - RCU helper to release an auditd connection struct |
4337 | + * @rcu: RCU head |
4338 | + * |
4339 | + * Description: |
4340 | + * Drop any references inside the auditd connection tracking struct and free |
4341 | + * the memory. |
4342 | + */ |
4343 | +static void auditd_conn_free(struct rcu_head *rcu) |
4344 | +{ |
4345 | + struct auditd_connection *ac; |
4346 | + |
4347 | + ac = container_of(rcu, struct auditd_connection, rcu); |
4348 | + put_net(ac->net); |
4349 | + kfree(ac); |
4350 | +} |
4351 | + |
4352 | +/** |
4353 | * auditd_set - Set/Reset the auditd connection state |
4354 | * @pid: auditd PID |
4355 | * @portid: auditd netlink portid |
4356 | @@ -434,22 +476,33 @@ static int audit_set_failure(u32 state) |
4357 | * |
4358 | * Description: |
4359 | * This function will obtain and drop network namespace references as |
4360 | - * necessary. |
4361 | + * necessary. Returns zero on success, negative values on failure. |
4362 | */ |
4363 | -static void auditd_set(int pid, u32 portid, struct net *net) |
4364 | +static int auditd_set(int pid, u32 portid, struct net *net) |
4365 | { |
4366 | unsigned long flags; |
4367 | + struct auditd_connection *ac_old, *ac_new; |
4368 | |
4369 | - spin_lock_irqsave(&auditd_conn.lock, flags); |
4370 | - auditd_conn.pid = pid; |
4371 | - auditd_conn.portid = portid; |
4372 | - if (auditd_conn.net) |
4373 | - put_net(auditd_conn.net); |
4374 | - if (net) |
4375 | - auditd_conn.net = get_net(net); |
4376 | - else |
4377 | - auditd_conn.net = NULL; |
4378 | - spin_unlock_irqrestore(&auditd_conn.lock, flags); |
4379 | + if (!pid || !net) |
4380 | + return -EINVAL; |
4381 | + |
4382 | + ac_new = kzalloc(sizeof(*ac_new), GFP_KERNEL); |
4383 | + if (!ac_new) |
4384 | + return -ENOMEM; |
4385 | + ac_new->pid = pid; |
4386 | + ac_new->portid = portid; |
4387 | + ac_new->net = get_net(net); |
4388 | + |
4389 | + spin_lock_irqsave(&auditd_conn_lock, flags); |
4390 | + ac_old = rcu_dereference_protected(auditd_conn, |
4391 | + lockdep_is_held(&auditd_conn_lock)); |
4392 | + rcu_assign_pointer(auditd_conn, ac_new); |
4393 | + spin_unlock_irqrestore(&auditd_conn_lock, flags); |
4394 | + |
4395 | + if (ac_old) |
4396 | + call_rcu(&ac_old->rcu, auditd_conn_free); |
4397 | + |
4398 | + return 0; |
4399 | } |
4400 | |
4401 | /** |
4402 | @@ -544,13 +597,19 @@ static void kauditd_retry_skb(struct sk_buff *skb) |
4403 | */ |
4404 | static void auditd_reset(void) |
4405 | { |
4406 | + unsigned long flags; |
4407 | struct sk_buff *skb; |
4408 | + struct auditd_connection *ac_old; |
4409 | |
4410 | /* if it isn't already broken, break the connection */ |
4411 | - rcu_read_lock(); |
4412 | - if (auditd_conn.pid) |
4413 | - auditd_set(0, 0, NULL); |
4414 | - rcu_read_unlock(); |
4415 | + spin_lock_irqsave(&auditd_conn_lock, flags); |
4416 | + ac_old = rcu_dereference_protected(auditd_conn, |
4417 | + lockdep_is_held(&auditd_conn_lock)); |
4418 | + rcu_assign_pointer(auditd_conn, NULL); |
4419 | + spin_unlock_irqrestore(&auditd_conn_lock, flags); |
4420 | + |
4421 | + if (ac_old) |
4422 | + call_rcu(&ac_old->rcu, auditd_conn_free); |
4423 | |
4424 | /* flush all of the main and retry queues to the hold queue */ |
4425 | while ((skb = skb_dequeue(&audit_retry_queue))) |
4426 | @@ -576,6 +635,7 @@ static int auditd_send_unicast_skb(struct sk_buff *skb) |
4427 | u32 portid; |
4428 | struct net *net; |
4429 | struct sock *sk; |
4430 | + struct auditd_connection *ac; |
4431 | |
4432 | /* NOTE: we can't call netlink_unicast while in the RCU section so |
4433 | * take a reference to the network namespace and grab local |
4434 | @@ -585,15 +645,15 @@ static int auditd_send_unicast_skb(struct sk_buff *skb) |
4435 | * section netlink_unicast() should safely return an error */ |
4436 | |
4437 | rcu_read_lock(); |
4438 | - if (!auditd_conn.pid) { |
4439 | + ac = rcu_dereference(auditd_conn); |
4440 | + if (!ac) { |
4441 | rcu_read_unlock(); |
4442 | rc = -ECONNREFUSED; |
4443 | goto err; |
4444 | } |
4445 | - net = auditd_conn.net; |
4446 | - get_net(net); |
4447 | + net = get_net(ac->net); |
4448 | sk = audit_get_sk(net); |
4449 | - portid = auditd_conn.portid; |
4450 | + portid = ac->portid; |
4451 | rcu_read_unlock(); |
4452 | |
4453 | rc = netlink_unicast(sk, skb, portid, 0); |
4454 | @@ -728,6 +788,7 @@ static int kauditd_thread(void *dummy) |
4455 | u32 portid = 0; |
4456 | struct net *net = NULL; |
4457 | struct sock *sk = NULL; |
4458 | + struct auditd_connection *ac; |
4459 | |
4460 | #define UNICAST_RETRIES 5 |
4461 | |
4462 | @@ -735,14 +796,14 @@ static int kauditd_thread(void *dummy) |
4463 | while (!kthread_should_stop()) { |
4464 | /* NOTE: see the lock comments in auditd_send_unicast_skb() */ |
4465 | rcu_read_lock(); |
4466 | - if (!auditd_conn.pid) { |
4467 | + ac = rcu_dereference(auditd_conn); |
4468 | + if (!ac) { |
4469 | rcu_read_unlock(); |
4470 | goto main_queue; |
4471 | } |
4472 | - net = auditd_conn.net; |
4473 | - get_net(net); |
4474 | + net = get_net(ac->net); |
4475 | sk = audit_get_sk(net); |
4476 | - portid = auditd_conn.portid; |
4477 | + portid = ac->portid; |
4478 | rcu_read_unlock(); |
4479 | |
4480 | /* attempt to flush the hold queue */ |
4481 | @@ -1102,9 +1163,7 @@ static int audit_receive_msg(struct sk_buff *skb, struct nlmsghdr *nlh) |
4482 | memset(&s, 0, sizeof(s)); |
4483 | s.enabled = audit_enabled; |
4484 | s.failure = audit_failure; |
4485 | - rcu_read_lock(); |
4486 | - s.pid = auditd_conn.pid; |
4487 | - rcu_read_unlock(); |
4488 | + s.pid = auditd_pid_vnr(); |
4489 | s.rate_limit = audit_rate_limit; |
4490 | s.backlog_limit = audit_backlog_limit; |
4491 | s.lost = atomic_read(&audit_lost); |
4492 | @@ -1143,38 +1202,44 @@ static int audit_receive_msg(struct sk_buff *skb, struct nlmsghdr *nlh) |
4493 | /* test the auditd connection */ |
4494 | audit_replace(requesting_pid); |
4495 | |
4496 | - rcu_read_lock(); |
4497 | - auditd_pid = auditd_conn.pid; |
4498 | + auditd_pid = auditd_pid_vnr(); |
4499 | /* only the current auditd can unregister itself */ |
4500 | if ((!new_pid) && (requesting_pid != auditd_pid)) { |
4501 | - rcu_read_unlock(); |
4502 | audit_log_config_change("audit_pid", new_pid, |
4503 | auditd_pid, 0); |
4504 | return -EACCES; |
4505 | } |
4506 | /* replacing a healthy auditd is not allowed */ |
4507 | if (auditd_pid && new_pid) { |
4508 | - rcu_read_unlock(); |
4509 | audit_log_config_change("audit_pid", new_pid, |
4510 | auditd_pid, 0); |
4511 | return -EEXIST; |
4512 | } |
4513 | - rcu_read_unlock(); |
4514 | - |
4515 | - if (audit_enabled != AUDIT_OFF) |
4516 | - audit_log_config_change("audit_pid", new_pid, |
4517 | - auditd_pid, 1); |
4518 | |
4519 | if (new_pid) { |
4520 | /* register a new auditd connection */ |
4521 | - auditd_set(new_pid, |
4522 | - NETLINK_CB(skb).portid, |
4523 | - sock_net(NETLINK_CB(skb).sk)); |
4524 | + err = auditd_set(new_pid, |
4525 | + NETLINK_CB(skb).portid, |
4526 | + sock_net(NETLINK_CB(skb).sk)); |
4527 | + if (audit_enabled != AUDIT_OFF) |
4528 | + audit_log_config_change("audit_pid", |
4529 | + new_pid, |
4530 | + auditd_pid, |
4531 | + err ? 0 : 1); |
4532 | + if (err) |
4533 | + return err; |
4534 | + |
4535 | /* try to process any backlog */ |
4536 | wake_up_interruptible(&kauditd_wait); |
4537 | - } else |
4538 | + } else { |
4539 | + if (audit_enabled != AUDIT_OFF) |
4540 | + audit_log_config_change("audit_pid", |
4541 | + new_pid, |
4542 | + auditd_pid, 1); |
4543 | + |
4544 | /* unregister the auditd connection */ |
4545 | auditd_reset(); |
4546 | + } |
4547 | } |
4548 | if (s.mask & AUDIT_STATUS_RATE_LIMIT) { |
4549 | err = audit_set_rate_limit(s.rate_limit); |
4550 | @@ -1447,10 +1512,11 @@ static void __net_exit audit_net_exit(struct net *net) |
4551 | { |
4552 | struct audit_net *aunet = net_generic(net, audit_net_id); |
4553 | |
4554 | - rcu_read_lock(); |
4555 | - if (net == auditd_conn.net) |
4556 | - auditd_reset(); |
4557 | - rcu_read_unlock(); |
4558 | + /* NOTE: you would think that we would want to check the auditd |
4559 | + * connection and potentially reset it here if it lives in this |
4560 | + * namespace, but since the auditd connection tracking struct holds a |
4561 | + * reference to this namespace (see auditd_set()) we are only ever |
4562 | + * going to get here after that connection has been released */ |
4563 | |
4564 | netlink_kernel_release(aunet->sk); |
4565 | } |
4566 | @@ -1470,9 +1536,6 @@ static int __init audit_init(void) |
4567 | if (audit_initialized == AUDIT_DISABLED) |
4568 | return 0; |
4569 | |
4570 | - memset(&auditd_conn, 0, sizeof(auditd_conn)); |
4571 | - spin_lock_init(&auditd_conn.lock); |
4572 | - |
4573 | skb_queue_head_init(&audit_queue); |
4574 | skb_queue_head_init(&audit_retry_queue); |
4575 | skb_queue_head_init(&audit_hold_queue); |
4576 | diff --git a/kernel/cgroup/cgroup.c b/kernel/cgroup/cgroup.c |
4577 | index b507f1889a72..e21c9321101f 100644 |
4578 | --- a/kernel/cgroup/cgroup.c |
4579 | +++ b/kernel/cgroup/cgroup.c |
4580 | @@ -436,7 +436,7 @@ struct cgroup_subsys_state *cgroup_get_e_css(struct cgroup *cgrp, |
4581 | return css; |
4582 | } |
4583 | |
4584 | -static void cgroup_get(struct cgroup *cgrp) |
4585 | +static void __maybe_unused cgroup_get(struct cgroup *cgrp) |
4586 | { |
4587 | css_get(&cgrp->self); |
4588 | } |
4589 | @@ -4265,6 +4265,11 @@ static void kill_css(struct cgroup_subsys_state *css) |
4590 | { |
4591 | lockdep_assert_held(&cgroup_mutex); |
4592 | |
4593 | + if (css->flags & CSS_DYING) |
4594 | + return; |
4595 | + |
4596 | + css->flags |= CSS_DYING; |
4597 | + |
4598 | /* |
4599 | * This must happen before css is disassociated with its cgroup. |
4600 | * See seq_css() for details. |
4601 | diff --git a/kernel/cgroup/cpuset.c b/kernel/cgroup/cpuset.c |
4602 | index 0f41292be0fb..943481230cf8 100644 |
4603 | --- a/kernel/cgroup/cpuset.c |
4604 | +++ b/kernel/cgroup/cpuset.c |
4605 | @@ -176,9 +176,9 @@ typedef enum { |
4606 | } cpuset_flagbits_t; |
4607 | |
4608 | /* convenient tests for these bits */ |
4609 | -static inline bool is_cpuset_online(const struct cpuset *cs) |
4610 | +static inline bool is_cpuset_online(struct cpuset *cs) |
4611 | { |
4612 | - return test_bit(CS_ONLINE, &cs->flags); |
4613 | + return test_bit(CS_ONLINE, &cs->flags) && !css_is_dying(&cs->css); |
4614 | } |
4615 | |
4616 | static inline int is_cpu_exclusive(const struct cpuset *cs) |
4617 | diff --git a/kernel/cpu.c b/kernel/cpu.c |
4618 | index 37b223e4fc05..e27838ab275d 100644 |
4619 | --- a/kernel/cpu.c |
4620 | +++ b/kernel/cpu.c |
4621 | @@ -1656,13 +1656,13 @@ static ssize_t write_cpuhp_target(struct device *dev, |
4622 | ret = !sp->name || sp->cant_stop ? -EINVAL : 0; |
4623 | mutex_unlock(&cpuhp_state_mutex); |
4624 | if (ret) |
4625 | - return ret; |
4626 | + goto out; |
4627 | |
4628 | if (st->state < target) |
4629 | ret = do_cpu_up(dev->id, target); |
4630 | else |
4631 | ret = do_cpu_down(dev->id, target); |
4632 | - |
4633 | +out: |
4634 | unlock_device_hotplug(); |
4635 | return ret ? ret : count; |
4636 | } |
4637 | diff --git a/kernel/events/core.c b/kernel/events/core.c |
4638 | index ff01cba86f43..95c7fa675009 100644 |
4639 | --- a/kernel/events/core.c |
4640 | +++ b/kernel/events/core.c |
4641 | @@ -7184,6 +7184,21 @@ int perf_event_account_interrupt(struct perf_event *event) |
4642 | return __perf_event_account_interrupt(event, 1); |
4643 | } |
4644 | |
4645 | +static bool sample_is_allowed(struct perf_event *event, struct pt_regs *regs) |
4646 | +{ |
4647 | + /* |
4648 | + * Due to interrupt latency (AKA "skid"), we may enter the |
4649 | + * kernel before taking an overflow, even if the PMU is only |
4650 | + * counting user events. |
4651 | + * To avoid leaking information to userspace, we must always |
4652 | + * reject kernel samples when exclude_kernel is set. |
4653 | + */ |
4654 | + if (event->attr.exclude_kernel && !user_mode(regs)) |
4655 | + return false; |
4656 | + |
4657 | + return true; |
4658 | +} |
4659 | + |
4660 | /* |
4661 | * Generic event overflow handling, sampling. |
4662 | */ |
4663 | @@ -7205,6 +7220,12 @@ static int __perf_event_overflow(struct perf_event *event, |
4664 | ret = __perf_event_account_interrupt(event, throttle); |
4665 | |
4666 | /* |
4667 | + * For security, drop the skid kernel samples if necessary. |
4668 | + */ |
4669 | + if (!sample_is_allowed(event, regs)) |
4670 | + return ret; |
4671 | + |
4672 | + /* |
4673 | * XXX event_limit might not quite work as expected on inherited |
4674 | * events |
4675 | */ |
4676 | diff --git a/kernel/fork.c b/kernel/fork.c |
4677 | index 4cc564ece2cf..4f7151d1716b 100644 |
4678 | --- a/kernel/fork.c |
4679 | +++ b/kernel/fork.c |
4680 | @@ -1552,6 +1552,18 @@ static __latent_entropy struct task_struct *copy_process( |
4681 | if (!p) |
4682 | goto fork_out; |
4683 | |
4684 | + /* |
4685 | + * This _must_ happen before we call free_task(), i.e. before we jump |
4686 | + * to any of the bad_fork_* labels. This is to avoid freeing |
4687 | + * p->set_child_tid which is (ab)used as a kthread's data pointer for |
4688 | + * kernel threads (PF_KTHREAD). |
4689 | + */ |
4690 | + p->set_child_tid = (clone_flags & CLONE_CHILD_SETTID) ? child_tidptr : NULL; |
4691 | + /* |
4692 | + * Clear TID on mm_release()? |
4693 | + */ |
4694 | + p->clear_child_tid = (clone_flags & CLONE_CHILD_CLEARTID) ? child_tidptr : NULL; |
4695 | + |
4696 | ftrace_graph_init_task(p); |
4697 | |
4698 | rt_mutex_init_task(p); |
4699 | @@ -1715,11 +1727,6 @@ static __latent_entropy struct task_struct *copy_process( |
4700 | } |
4701 | } |
4702 | |
4703 | - p->set_child_tid = (clone_flags & CLONE_CHILD_SETTID) ? child_tidptr : NULL; |
4704 | - /* |
4705 | - * Clear TID on mm_release()? |
4706 | - */ |
4707 | - p->clear_child_tid = (clone_flags & CLONE_CHILD_CLEARTID) ? child_tidptr : NULL; |
4708 | #ifdef CONFIG_BLOCK |
4709 | p->plug = NULL; |
4710 | #endif |
4711 | diff --git a/kernel/ptrace.c b/kernel/ptrace.c |
4712 | index 266ddcc1d8bb..60f356d91060 100644 |
4713 | --- a/kernel/ptrace.c |
4714 | +++ b/kernel/ptrace.c |
4715 | @@ -60,19 +60,25 @@ int ptrace_access_vm(struct task_struct *tsk, unsigned long addr, |
4716 | } |
4717 | |
4718 | |
4719 | +void __ptrace_link(struct task_struct *child, struct task_struct *new_parent, |
4720 | + const struct cred *ptracer_cred) |
4721 | +{ |
4722 | + BUG_ON(!list_empty(&child->ptrace_entry)); |
4723 | + list_add(&child->ptrace_entry, &new_parent->ptraced); |
4724 | + child->parent = new_parent; |
4725 | + child->ptracer_cred = get_cred(ptracer_cred); |
4726 | +} |
4727 | + |
4728 | /* |
4729 | * ptrace a task: make the debugger its new parent and |
4730 | * move it to the ptrace list. |
4731 | * |
4732 | * Must be called with the tasklist lock write-held. |
4733 | */ |
4734 | -void __ptrace_link(struct task_struct *child, struct task_struct *new_parent) |
4735 | +static void ptrace_link(struct task_struct *child, struct task_struct *new_parent) |
4736 | { |
4737 | - BUG_ON(!list_empty(&child->ptrace_entry)); |
4738 | - list_add(&child->ptrace_entry, &new_parent->ptraced); |
4739 | - child->parent = new_parent; |
4740 | rcu_read_lock(); |
4741 | - child->ptracer_cred = get_cred(__task_cred(new_parent)); |
4742 | + __ptrace_link(child, new_parent, __task_cred(new_parent)); |
4743 | rcu_read_unlock(); |
4744 | } |
4745 | |
4746 | @@ -386,7 +392,7 @@ static int ptrace_attach(struct task_struct *task, long request, |
4747 | flags |= PT_SEIZED; |
4748 | task->ptrace = flags; |
4749 | |
4750 | - __ptrace_link(task, current); |
4751 | + ptrace_link(task, current); |
4752 | |
4753 | /* SEIZE doesn't trap tracee on attach */ |
4754 | if (!seize) |
4755 | @@ -459,7 +465,7 @@ static int ptrace_traceme(void) |
4756 | */ |
4757 | if (!ret && !(current->real_parent->flags & PF_EXITING)) { |
4758 | current->ptrace = PT_PTRACED; |
4759 | - __ptrace_link(current, current->real_parent); |
4760 | + ptrace_link(current, current->real_parent); |
4761 | } |
4762 | } |
4763 | write_unlock_irq(&tasklist_lock); |
4764 | diff --git a/kernel/rcu/srcu.c b/kernel/rcu/srcu.c |
4765 | index ef3bcfb15b39..6e48a6b6a564 100644 |
4766 | --- a/kernel/rcu/srcu.c |
4767 | +++ b/kernel/rcu/srcu.c |
4768 | @@ -257,7 +257,7 @@ EXPORT_SYMBOL_GPL(cleanup_srcu_struct); |
4769 | |
4770 | /* |
4771 | * Counts the new reader in the appropriate per-CPU element of the |
4772 | - * srcu_struct. Must be called from process context. |
4773 | + * srcu_struct. |
4774 | * Returns an index that must be passed to the matching srcu_read_unlock(). |
4775 | */ |
4776 | int __srcu_read_lock(struct srcu_struct *sp) |
4777 | @@ -265,7 +265,7 @@ int __srcu_read_lock(struct srcu_struct *sp) |
4778 | int idx; |
4779 | |
4780 | idx = READ_ONCE(sp->completed) & 0x1; |
4781 | - __this_cpu_inc(sp->per_cpu_ref->lock_count[idx]); |
4782 | + this_cpu_inc(sp->per_cpu_ref->lock_count[idx]); |
4783 | smp_mb(); /* B */ /* Avoid leaking the critical section. */ |
4784 | return idx; |
4785 | } |
4786 | @@ -275,7 +275,6 @@ EXPORT_SYMBOL_GPL(__srcu_read_lock); |
4787 | * Removes the count for the old reader from the appropriate per-CPU |
4788 | * element of the srcu_struct. Note that this may well be a different |
4789 | * CPU than that which was incremented by the corresponding srcu_read_lock(). |
4790 | - * Must be called from process context. |
4791 | */ |
4792 | void __srcu_read_unlock(struct srcu_struct *sp, int idx) |
4793 | { |
4794 | diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c |
4795 | index dd3e91d68dc7..8827ee31cbf5 100644 |
4796 | --- a/kernel/trace/ftrace.c |
4797 | +++ b/kernel/trace/ftrace.c |
4798 | @@ -4859,7 +4859,7 @@ ftrace_graph_release(struct inode *inode, struct file *file) |
4799 | } |
4800 | |
4801 | out: |
4802 | - kfree(fgd->new_hash); |
4803 | + free_ftrace_hash(fgd->new_hash); |
4804 | kfree(fgd); |
4805 | |
4806 | return ret; |
4807 | diff --git a/net/bridge/br_netlink.c b/net/bridge/br_netlink.c |
4808 | index 0488c6735c46..8a05a98a8666 100644 |
4809 | --- a/net/bridge/br_netlink.c |
4810 | +++ b/net/bridge/br_netlink.c |
4811 | @@ -591,7 +591,7 @@ static int br_afspec(struct net_bridge *br, |
4812 | err = 0; |
4813 | switch (nla_type(attr)) { |
4814 | case IFLA_BRIDGE_VLAN_TUNNEL_INFO: |
4815 | - if (!(p->flags & BR_VLAN_TUNNEL)) |
4816 | + if (!p || !(p->flags & BR_VLAN_TUNNEL)) |
4817 | return -EINVAL; |
4818 | err = br_parse_vlan_tunnel_info(attr, &tinfo_curr); |
4819 | if (err) |
4820 | diff --git a/net/bridge/br_stp_if.c b/net/bridge/br_stp_if.c |
4821 | index 0db8102995a5..6f12a5271219 100644 |
4822 | --- a/net/bridge/br_stp_if.c |
4823 | +++ b/net/bridge/br_stp_if.c |
4824 | @@ -179,7 +179,8 @@ static void br_stp_start(struct net_bridge *br) |
4825 | br_debug(br, "using kernel STP\n"); |
4826 | |
4827 | /* To start timers on any ports left in blocking */ |
4828 | - mod_timer(&br->hello_timer, jiffies + br->hello_time); |
4829 | + if (br->dev->flags & IFF_UP) |
4830 | + mod_timer(&br->hello_timer, jiffies + br->hello_time); |
4831 | br_port_state_selection(br); |
4832 | } |
4833 | |
4834 | diff --git a/net/core/skbuff.c b/net/core/skbuff.c |
4835 | index f1d04592ace0..ac5059aad313 100644 |
4836 | --- a/net/core/skbuff.c |
4837 | +++ b/net/core/skbuff.c |
4838 | @@ -3755,8 +3755,11 @@ struct sk_buff *sock_dequeue_err_skb(struct sock *sk) |
4839 | |
4840 | spin_lock_irqsave(&q->lock, flags); |
4841 | skb = __skb_dequeue(q); |
4842 | - if (skb && (skb_next = skb_peek(q))) |
4843 | + if (skb && (skb_next = skb_peek(q))) { |
4844 | icmp_next = is_icmp_err_skb(skb_next); |
4845 | + if (icmp_next) |
4846 | + sk->sk_err = SKB_EXT_ERR(skb_next)->ee.ee_origin; |
4847 | + } |
4848 | spin_unlock_irqrestore(&q->lock, flags); |
4849 | |
4850 | if (is_icmp_err_skb(skb) && !icmp_next) |
4851 | diff --git a/net/dsa/dsa2.c b/net/dsa/dsa2.c |
4852 | index 737be6470c7f..ffaa4fb33d0a 100644 |
4853 | --- a/net/dsa/dsa2.c |
4854 | +++ b/net/dsa/dsa2.c |
4855 | @@ -440,8 +440,10 @@ static void dsa_dst_unapply(struct dsa_switch_tree *dst) |
4856 | dsa_ds_unapply(dst, ds); |
4857 | } |
4858 | |
4859 | - if (dst->cpu_switch) |
4860 | + if (dst->cpu_switch) { |
4861 | dsa_cpu_port_ethtool_restore(dst->cpu_switch); |
4862 | + dst->cpu_switch = NULL; |
4863 | + } |
4864 | |
4865 | pr_info("DSA: tree %d unapplied\n", dst->tree); |
4866 | dst->applied = false; |
4867 | diff --git a/net/ipv4/af_inet.c b/net/ipv4/af_inet.c |
4868 | index 13a9a3297eae..b9c1bc5e54db 100644 |
4869 | --- a/net/ipv4/af_inet.c |
4870 | +++ b/net/ipv4/af_inet.c |
4871 | @@ -1043,7 +1043,7 @@ static struct inet_protosw inetsw_array[] = |
4872 | .type = SOCK_DGRAM, |
4873 | .protocol = IPPROTO_ICMP, |
4874 | .prot = &ping_prot, |
4875 | - .ops = &inet_dgram_ops, |
4876 | + .ops = &inet_sockraw_ops, |
4877 | .flags = INET_PROTOSW_REUSE, |
4878 | }, |
4879 | |
4880 | diff --git a/net/ipv4/tcp_cong.c b/net/ipv4/tcp_cong.c |
4881 | index 6e3c512054a6..324c9bcc5456 100644 |
4882 | --- a/net/ipv4/tcp_cong.c |
4883 | +++ b/net/ipv4/tcp_cong.c |
4884 | @@ -180,6 +180,7 @@ void tcp_init_congestion_control(struct sock *sk) |
4885 | { |
4886 | const struct inet_connection_sock *icsk = inet_csk(sk); |
4887 | |
4888 | + tcp_sk(sk)->prior_ssthresh = 0; |
4889 | if (icsk->icsk_ca_ops->init) |
4890 | icsk->icsk_ca_ops->init(sk); |
4891 | if (tcp_ca_needs_ecn(sk)) |
4892 | diff --git a/net/ipv6/calipso.c b/net/ipv6/calipso.c |
4893 | index 37ac9de713c6..8d772fea1dde 100644 |
4894 | --- a/net/ipv6/calipso.c |
4895 | +++ b/net/ipv6/calipso.c |
4896 | @@ -1319,7 +1319,7 @@ static int calipso_skbuff_setattr(struct sk_buff *skb, |
4897 | struct ipv6hdr *ip6_hdr; |
4898 | struct ipv6_opt_hdr *hop; |
4899 | unsigned char buf[CALIPSO_MAX_BUFFER]; |
4900 | - int len_delta, new_end, pad; |
4901 | + int len_delta, new_end, pad, payload; |
4902 | unsigned int start, end; |
4903 | |
4904 | ip6_hdr = ipv6_hdr(skb); |
4905 | @@ -1346,6 +1346,8 @@ static int calipso_skbuff_setattr(struct sk_buff *skb, |
4906 | if (ret_val < 0) |
4907 | return ret_val; |
4908 | |
4909 | + ip6_hdr = ipv6_hdr(skb); /* Reset as skb_cow() may have moved it */ |
4910 | + |
4911 | if (len_delta) { |
4912 | if (len_delta > 0) |
4913 | skb_push(skb, len_delta); |
4914 | @@ -1355,6 +1357,8 @@ static int calipso_skbuff_setattr(struct sk_buff *skb, |
4915 | sizeof(*ip6_hdr) + start); |
4916 | skb_reset_network_header(skb); |
4917 | ip6_hdr = ipv6_hdr(skb); |
4918 | + payload = ntohs(ip6_hdr->payload_len); |
4919 | + ip6_hdr->payload_len = htons(payload + len_delta); |
4920 | } |
4921 | |
4922 | hop = (struct ipv6_opt_hdr *)(ip6_hdr + 1); |
4923 | diff --git a/net/ipv6/ip6_offload.c b/net/ipv6/ip6_offload.c |
4924 | index 280268f1dd7b..cdb3728faca7 100644 |
4925 | --- a/net/ipv6/ip6_offload.c |
4926 | +++ b/net/ipv6/ip6_offload.c |
4927 | @@ -116,8 +116,10 @@ static struct sk_buff *ipv6_gso_segment(struct sk_buff *skb, |
4928 | |
4929 | if (udpfrag) { |
4930 | int err = ip6_find_1stfragopt(skb, &prevhdr); |
4931 | - if (err < 0) |
4932 | + if (err < 0) { |
4933 | + kfree_skb_list(segs); |
4934 | return ERR_PTR(err); |
4935 | + } |
4936 | fptr = (struct frag_hdr *)((u8 *)ipv6h + err); |
4937 | fptr->frag_off = htons(offset); |
4938 | if (skb->next) |
4939 | diff --git a/net/ipv6/ip6_tunnel.c b/net/ipv6/ip6_tunnel.c |
4940 | index 15ff33934f79..e2d7867f3112 100644 |
4941 | --- a/net/ipv6/ip6_tunnel.c |
4942 | +++ b/net/ipv6/ip6_tunnel.c |
4943 | @@ -1095,6 +1095,9 @@ int ip6_tnl_xmit(struct sk_buff *skb, struct net_device *dev, __u8 dsfield, |
4944 | |
4945 | if (!dst) { |
4946 | route_lookup: |
4947 | + /* add dsfield to flowlabel for route lookup */ |
4948 | + fl6->flowlabel = ip6_make_flowinfo(dsfield, fl6->flowlabel); |
4949 | + |
4950 | dst = ip6_route_output(net, NULL, fl6); |
4951 | |
4952 | if (dst->error) |
4953 | diff --git a/net/ipv6/ping.c b/net/ipv6/ping.c |
4954 | index 9b522fa90e6d..ac826dd338ff 100644 |
4955 | --- a/net/ipv6/ping.c |
4956 | +++ b/net/ipv6/ping.c |
4957 | @@ -192,7 +192,7 @@ static struct inet_protosw pingv6_protosw = { |
4958 | .type = SOCK_DGRAM, |
4959 | .protocol = IPPROTO_ICMPV6, |
4960 | .prot = &pingv6_prot, |
4961 | - .ops = &inet6_dgram_ops, |
4962 | + .ops = &inet6_sockraw_ops, |
4963 | .flags = INET_PROTOSW_REUSE, |
4964 | }; |
4965 | |
4966 | diff --git a/net/ipv6/raw.c b/net/ipv6/raw.c |
4967 | index 1f992d9e261d..60be012fe708 100644 |
4968 | --- a/net/ipv6/raw.c |
4969 | +++ b/net/ipv6/raw.c |
4970 | @@ -1338,7 +1338,7 @@ void raw6_proc_exit(void) |
4971 | #endif /* CONFIG_PROC_FS */ |
4972 | |
4973 | /* Same as inet6_dgram_ops, sans udp_poll. */ |
4974 | -static const struct proto_ops inet6_sockraw_ops = { |
4975 | +const struct proto_ops inet6_sockraw_ops = { |
4976 | .family = PF_INET6, |
4977 | .owner = THIS_MODULE, |
4978 | .release = inet6_release, |
4979 | diff --git a/net/ipv6/xfrm6_mode_ro.c b/net/ipv6/xfrm6_mode_ro.c |
4980 | index 0e015906f9ca..07d36573f50b 100644 |
4981 | --- a/net/ipv6/xfrm6_mode_ro.c |
4982 | +++ b/net/ipv6/xfrm6_mode_ro.c |
4983 | @@ -47,6 +47,8 @@ static int xfrm6_ro_output(struct xfrm_state *x, struct sk_buff *skb) |
4984 | iph = ipv6_hdr(skb); |
4985 | |
4986 | hdr_len = x->type->hdr_offset(x, skb, &prevhdr); |
4987 | + if (hdr_len < 0) |
4988 | + return hdr_len; |
4989 | skb_set_mac_header(skb, (prevhdr - x->props.header_len) - skb->data); |
4990 | skb_set_network_header(skb, -x->props.header_len); |
4991 | skb->transport_header = skb->network_header + hdr_len; |
4992 | diff --git a/net/ipv6/xfrm6_mode_transport.c b/net/ipv6/xfrm6_mode_transport.c |
4993 | index 4439ee44c8b0..5e304102287c 100644 |
4994 | --- a/net/ipv6/xfrm6_mode_transport.c |
4995 | +++ b/net/ipv6/xfrm6_mode_transport.c |
4996 | @@ -28,6 +28,8 @@ static int xfrm6_transport_output(struct xfrm_state *x, struct sk_buff *skb) |
4997 | iph = ipv6_hdr(skb); |
4998 | |
4999 | hdr_len = x->type->hdr_offset(x, skb, &prevhdr); |
5000 | + if (hdr_len < 0) |
5001 | + return hdr_len; |
5002 | skb_set_mac_header(skb, (prevhdr - x->props.header_len) - skb->data); |
5003 | skb_set_network_header(skb, -x->props.header_len); |
5004 | skb->transport_header = skb->network_header + hdr_len; |
5005 | diff --git a/net/netfilter/nft_set_rbtree.c b/net/netfilter/nft_set_rbtree.c |
5006 | index 78dfbf9588b3..99aff9618fb8 100644 |
5007 | --- a/net/netfilter/nft_set_rbtree.c |
5008 | +++ b/net/netfilter/nft_set_rbtree.c |
5009 | @@ -117,17 +117,17 @@ static int __nft_rbtree_insert(const struct net *net, const struct nft_set *set, |
5010 | else if (d > 0) |
5011 | p = &parent->rb_right; |
5012 | else { |
5013 | - if (nft_set_elem_active(&rbe->ext, genmask)) { |
5014 | - if (nft_rbtree_interval_end(rbe) && |
5015 | - !nft_rbtree_interval_end(new)) |
5016 | - p = &parent->rb_left; |
5017 | - else if (!nft_rbtree_interval_end(rbe) && |
5018 | - nft_rbtree_interval_end(new)) |
5019 | - p = &parent->rb_right; |
5020 | - else { |
5021 | - *ext = &rbe->ext; |
5022 | - return -EEXIST; |
5023 | - } |
5024 | + if (nft_rbtree_interval_end(rbe) && |
5025 | + !nft_rbtree_interval_end(new)) { |
5026 | + p = &parent->rb_left; |
5027 | + } else if (!nft_rbtree_interval_end(rbe) && |
5028 | + nft_rbtree_interval_end(new)) { |
5029 | + p = &parent->rb_right; |
5030 | + } else if (nft_set_elem_active(&rbe->ext, genmask)) { |
5031 | + *ext = &rbe->ext; |
5032 | + return -EEXIST; |
5033 | + } else { |
5034 | + p = &parent->rb_left; |
5035 | } |
5036 | } |
5037 | } |
5038 | diff --git a/security/keys/encrypted-keys/encrypted.c b/security/keys/encrypted-keys/encrypted.c |
5039 | index 0010955d7876..1845d47474a0 100644 |
5040 | --- a/security/keys/encrypted-keys/encrypted.c |
5041 | +++ b/security/keys/encrypted-keys/encrypted.c |
5042 | @@ -480,12 +480,9 @@ static int derived_key_encrypt(struct encrypted_key_payload *epayload, |
5043 | struct skcipher_request *req; |
5044 | unsigned int encrypted_datalen; |
5045 | u8 iv[AES_BLOCK_SIZE]; |
5046 | - unsigned int padlen; |
5047 | - char pad[16]; |
5048 | int ret; |
5049 | |
5050 | encrypted_datalen = roundup(epayload->decrypted_datalen, blksize); |
5051 | - padlen = encrypted_datalen - epayload->decrypted_datalen; |
5052 | |
5053 | req = init_skcipher_req(derived_key, derived_keylen); |
5054 | ret = PTR_ERR(req); |
5055 | @@ -493,11 +490,10 @@ static int derived_key_encrypt(struct encrypted_key_payload *epayload, |
5056 | goto out; |
5057 | dump_decrypted_data(epayload); |
5058 | |
5059 | - memset(pad, 0, sizeof pad); |
5060 | sg_init_table(sg_in, 2); |
5061 | sg_set_buf(&sg_in[0], epayload->decrypted_data, |
5062 | epayload->decrypted_datalen); |
5063 | - sg_set_buf(&sg_in[1], pad, padlen); |
5064 | + sg_set_page(&sg_in[1], ZERO_PAGE(0), AES_BLOCK_SIZE, 0); |
5065 | |
5066 | sg_init_table(sg_out, 1); |
5067 | sg_set_buf(sg_out, epayload->encrypted_data, encrypted_datalen); |
5068 | @@ -584,9 +580,14 @@ static int derived_key_decrypt(struct encrypted_key_payload *epayload, |
5069 | struct skcipher_request *req; |
5070 | unsigned int encrypted_datalen; |
5071 | u8 iv[AES_BLOCK_SIZE]; |
5072 | - char pad[16]; |
5073 | + u8 *pad; |
5074 | int ret; |
5075 | |
5076 | + /* Throwaway buffer to hold the unused zero padding at the end */ |
5077 | + pad = kmalloc(AES_BLOCK_SIZE, GFP_KERNEL); |
5078 | + if (!pad) |
5079 | + return -ENOMEM; |
5080 | + |
5081 | encrypted_datalen = roundup(epayload->decrypted_datalen, blksize); |
5082 | req = init_skcipher_req(derived_key, derived_keylen); |
5083 | ret = PTR_ERR(req); |
5084 | @@ -594,13 +595,12 @@ static int derived_key_decrypt(struct encrypted_key_payload *epayload, |
5085 | goto out; |
5086 | dump_encrypted_data(epayload, encrypted_datalen); |
5087 | |
5088 | - memset(pad, 0, sizeof pad); |
5089 | sg_init_table(sg_in, 1); |
5090 | sg_init_table(sg_out, 2); |
5091 | sg_set_buf(sg_in, epayload->encrypted_data, encrypted_datalen); |
5092 | sg_set_buf(&sg_out[0], epayload->decrypted_data, |
5093 | epayload->decrypted_datalen); |
5094 | - sg_set_buf(&sg_out[1], pad, sizeof pad); |
5095 | + sg_set_buf(&sg_out[1], pad, AES_BLOCK_SIZE); |
5096 | |
5097 | memcpy(iv, epayload->iv, sizeof(iv)); |
5098 | skcipher_request_set_crypt(req, sg_in, sg_out, encrypted_datalen, iv); |
5099 | @@ -612,6 +612,7 @@ static int derived_key_decrypt(struct encrypted_key_payload *epayload, |
5100 | goto out; |
5101 | dump_decrypted_data(epayload); |
5102 | out: |
5103 | + kfree(pad); |
5104 | return ret; |
5105 | } |
5106 | |
5107 | diff --git a/security/keys/key.c b/security/keys/key.c |
5108 | index 346fbf201c22..2f4ce35ae2aa 100644 |
5109 | --- a/security/keys/key.c |
5110 | +++ b/security/keys/key.c |
5111 | @@ -962,12 +962,11 @@ int key_update(key_ref_t key_ref, const void *payload, size_t plen) |
5112 | /* the key must be writable */ |
5113 | ret = key_permission(key_ref, KEY_NEED_WRITE); |
5114 | if (ret < 0) |
5115 | - goto error; |
5116 | + return ret; |
5117 | |
5118 | /* attempt to update it if supported */ |
5119 | - ret = -EOPNOTSUPP; |
5120 | if (!key->type->update) |
5121 | - goto error; |
5122 | + return -EOPNOTSUPP; |
5123 | |
5124 | memset(&prep, 0, sizeof(prep)); |
5125 | prep.data = payload; |
5126 | diff --git a/security/keys/keyctl.c b/security/keys/keyctl.c |
5127 | index 4ad3212adebe..3663a98b473d 100644 |
5128 | --- a/security/keys/keyctl.c |
5129 | +++ b/security/keys/keyctl.c |
5130 | @@ -99,7 +99,7 @@ SYSCALL_DEFINE5(add_key, const char __user *, _type, |
5131 | /* pull the payload in if one was supplied */ |
5132 | payload = NULL; |
5133 | |
5134 | - if (_payload) { |
5135 | + if (plen) { |
5136 | ret = -ENOMEM; |
5137 | payload = kmalloc(plen, GFP_KERNEL | __GFP_NOWARN); |
5138 | if (!payload) { |
5139 | @@ -329,7 +329,7 @@ long keyctl_update_key(key_serial_t id, |
5140 | |
5141 | /* pull the payload in if one was supplied */ |
5142 | payload = NULL; |
5143 | - if (_payload) { |
5144 | + if (plen) { |
5145 | ret = -ENOMEM; |
5146 | payload = kmalloc(plen, GFP_KERNEL); |
5147 | if (!payload) |
5148 | diff --git a/sound/core/timer.c b/sound/core/timer.c |
5149 | index 6d4fbc439246..171c01ad9375 100644 |
5150 | --- a/sound/core/timer.c |
5151 | +++ b/sound/core/timer.c |
5152 | @@ -1623,6 +1623,7 @@ static int snd_timer_user_tselect(struct file *file, |
5153 | if (err < 0) |
5154 | goto __err; |
5155 | |
5156 | + tu->qhead = tu->qtail = tu->qused = 0; |
5157 | kfree(tu->queue); |
5158 | tu->queue = NULL; |
5159 | kfree(tu->tqueue); |
5160 | @@ -1964,6 +1965,7 @@ static ssize_t snd_timer_user_read(struct file *file, char __user *buffer, |
5161 | |
5162 | tu = file->private_data; |
5163 | unit = tu->tread ? sizeof(struct snd_timer_tread) : sizeof(struct snd_timer_read); |
5164 | + mutex_lock(&tu->ioctl_lock); |
5165 | spin_lock_irq(&tu->qlock); |
5166 | while ((long)count - result >= unit) { |
5167 | while (!tu->qused) { |
5168 | @@ -1979,7 +1981,9 @@ static ssize_t snd_timer_user_read(struct file *file, char __user *buffer, |
5169 | add_wait_queue(&tu->qchange_sleep, &wait); |
5170 | |
5171 | spin_unlock_irq(&tu->qlock); |
5172 | + mutex_unlock(&tu->ioctl_lock); |
5173 | schedule(); |
5174 | + mutex_lock(&tu->ioctl_lock); |
5175 | spin_lock_irq(&tu->qlock); |
5176 | |
5177 | remove_wait_queue(&tu->qchange_sleep, &wait); |
5178 | @@ -1999,7 +2003,6 @@ static ssize_t snd_timer_user_read(struct file *file, char __user *buffer, |
5179 | tu->qused--; |
5180 | spin_unlock_irq(&tu->qlock); |
5181 | |
5182 | - mutex_lock(&tu->ioctl_lock); |
5183 | if (tu->tread) { |
5184 | if (copy_to_user(buffer, &tu->tqueue[qhead], |
5185 | sizeof(struct snd_timer_tread))) |
5186 | @@ -2009,7 +2012,6 @@ static ssize_t snd_timer_user_read(struct file *file, char __user *buffer, |
5187 | sizeof(struct snd_timer_read))) |
5188 | err = -EFAULT; |
5189 | } |
5190 | - mutex_unlock(&tu->ioctl_lock); |
5191 | |
5192 | spin_lock_irq(&tu->qlock); |
5193 | if (err < 0) |
5194 | @@ -2019,6 +2021,7 @@ static ssize_t snd_timer_user_read(struct file *file, char __user *buffer, |
5195 | } |
5196 | _error: |
5197 | spin_unlock_irq(&tu->qlock); |
5198 | + mutex_unlock(&tu->ioctl_lock); |
5199 | return result > 0 ? result : err; |
5200 | } |
5201 | |
5202 | diff --git a/sound/soc/soc-core.c b/sound/soc/soc-core.c |
5203 | index 2722bb0c5573..98d60f471c5d 100644 |
5204 | --- a/sound/soc/soc-core.c |
5205 | +++ b/sound/soc/soc-core.c |
5206 | @@ -2286,6 +2286,9 @@ static int soc_cleanup_card_resources(struct snd_soc_card *card) |
5207 | list_for_each_entry(rtd, &card->rtd_list, list) |
5208 | flush_delayed_work(&rtd->delayed_work); |
5209 | |
5210 | + /* free the ALSA card at first; this syncs with pending operations */ |
5211 | + snd_card_free(card->snd_card); |
5212 | + |
5213 | /* remove and free each DAI */ |
5214 | soc_remove_dai_links(card); |
5215 | soc_remove_pcm_runtimes(card); |
5216 | @@ -2300,9 +2303,7 @@ static int soc_cleanup_card_resources(struct snd_soc_card *card) |
5217 | if (card->remove) |
5218 | card->remove(card); |
5219 | |
5220 | - snd_card_free(card->snd_card); |
5221 | return 0; |
5222 | - |
5223 | } |
5224 | |
5225 | /* removes a socdev */ |
5226 | diff --git a/sound/x86/intel_hdmi_audio.c b/sound/x86/intel_hdmi_audio.c |
5227 | index c505b019e09c..bfac6f21ae5e 100644 |
5228 | --- a/sound/x86/intel_hdmi_audio.c |
5229 | +++ b/sound/x86/intel_hdmi_audio.c |
5230 | @@ -1809,10 +1809,6 @@ static int hdmi_lpe_audio_probe(struct platform_device *pdev) |
5231 | pdata->notify_pending = false; |
5232 | spin_unlock_irq(&pdata->lpe_audio_slock); |
5233 | |
5234 | - /* runtime PM isn't enabled as default, since it won't save much on |
5235 | - * BYT/CHT devices; user who want the runtime PM should adjust the |
5236 | - * power/ontrol and power/autosuspend_delay_ms sysfs entries instead |
5237 | - */ |
5238 | pm_runtime_use_autosuspend(&pdev->dev); |
5239 | pm_runtime_mark_last_busy(&pdev->dev); |
5240 | pm_runtime_set_active(&pdev->dev); |