Contents of /trunk/kernel-alx-legacy/patches-4.9/0333-4.9.234-all-fixes.patch
Parent Directory | Revision Log
Revision 3617 -
(show annotations)
(download)
Wed Aug 26 13:06:32 2020 UTC (4 years, 1 month ago) by niro
File size: 40908 byte(s)
Wed Aug 26 13:06:32 2020 UTC (4 years, 1 month ago) by niro
File size: 40908 byte(s)
-linux-4.9.234
1 | diff --git a/Makefile b/Makefile |
2 | index af68e8c3fb962..e5a6f33e95de6 100644 |
3 | --- a/Makefile |
4 | +++ b/Makefile |
5 | @@ -1,6 +1,6 @@ |
6 | VERSION = 4 |
7 | PATCHLEVEL = 9 |
8 | -SUBLEVEL = 233 |
9 | +SUBLEVEL = 234 |
10 | EXTRAVERSION = |
11 | NAME = Roaring Lionus |
12 | |
13 | diff --git a/arch/alpha/include/asm/io.h b/arch/alpha/include/asm/io.h |
14 | index ff4049155c840..355aec0867f4d 100644 |
15 | --- a/arch/alpha/include/asm/io.h |
16 | +++ b/arch/alpha/include/asm/io.h |
17 | @@ -491,10 +491,10 @@ extern inline void writeq(u64 b, volatile void __iomem *addr) |
18 | } |
19 | #endif |
20 | |
21 | -#define ioread16be(p) be16_to_cpu(ioread16(p)) |
22 | -#define ioread32be(p) be32_to_cpu(ioread32(p)) |
23 | -#define iowrite16be(v,p) iowrite16(cpu_to_be16(v), (p)) |
24 | -#define iowrite32be(v,p) iowrite32(cpu_to_be32(v), (p)) |
25 | +#define ioread16be(p) swab16(ioread16(p)) |
26 | +#define ioread32be(p) swab32(ioread32(p)) |
27 | +#define iowrite16be(v,p) iowrite16(swab16(v), (p)) |
28 | +#define iowrite32be(v,p) iowrite32(swab32(v), (p)) |
29 | |
30 | #define inb_p inb |
31 | #define inw_p inw |
32 | diff --git a/arch/arm/kvm/mmu.c b/arch/arm/kvm/mmu.c |
33 | index bb0d5e21d60bd..b5ce1e81f945a 100644 |
34 | --- a/arch/arm/kvm/mmu.c |
35 | +++ b/arch/arm/kvm/mmu.c |
36 | @@ -298,12 +298,6 @@ static void unmap_stage2_range(struct kvm *kvm, phys_addr_t start, u64 size) |
37 | next = stage2_pgd_addr_end(addr, end); |
38 | if (!stage2_pgd_none(*pgd)) |
39 | unmap_stage2_puds(kvm, pgd, addr, next); |
40 | - /* |
41 | - * If the range is too large, release the kvm->mmu_lock |
42 | - * to prevent starvation and lockup detector warnings. |
43 | - */ |
44 | - if (next != end) |
45 | - cond_resched_lock(&kvm->mmu_lock); |
46 | } while (pgd++, addr = next, addr != end); |
47 | } |
48 | |
49 | diff --git a/arch/m68k/include/asm/m53xxacr.h b/arch/m68k/include/asm/m53xxacr.h |
50 | index 3177ce8331d69..baee0c77b9818 100644 |
51 | --- a/arch/m68k/include/asm/m53xxacr.h |
52 | +++ b/arch/m68k/include/asm/m53xxacr.h |
53 | @@ -88,9 +88,9 @@ |
54 | * coherency though in all cases. And for copyback caches we will need |
55 | * to push cached data as well. |
56 | */ |
57 | -#define CACHE_INIT CACR_CINVA |
58 | -#define CACHE_INVALIDATE CACR_CINVA |
59 | -#define CACHE_INVALIDATED CACR_CINVA |
60 | +#define CACHE_INIT (CACHE_MODE + CACR_CINVA - CACR_EC) |
61 | +#define CACHE_INVALIDATE (CACHE_MODE + CACR_CINVA) |
62 | +#define CACHE_INVALIDATED (CACHE_MODE + CACR_CINVA) |
63 | |
64 | #define ACR0_MODE ((CONFIG_RAMBASE & 0xff000000) + \ |
65 | (0x000f0000) + \ |
66 | diff --git a/arch/powerpc/mm/fault.c b/arch/powerpc/mm/fault.c |
67 | index 2791f568bdb25..3e4fb430ae457 100644 |
68 | --- a/arch/powerpc/mm/fault.c |
69 | +++ b/arch/powerpc/mm/fault.c |
70 | @@ -192,6 +192,9 @@ static int mm_fault_error(struct pt_regs *regs, unsigned long addr, int fault) |
71 | return MM_FAULT_CONTINUE; |
72 | } |
73 | |
74 | +// This comes from 64-bit struct rt_sigframe + __SIGNAL_FRAMESIZE |
75 | +#define SIGFRAME_MAX_SIZE (4096 + 128) |
76 | + |
77 | /* |
78 | * For 600- and 800-family processors, the error_code parameter is DSISR |
79 | * for a data fault, SRR1 for an instruction fault. For 400-family processors |
80 | @@ -341,7 +344,7 @@ retry: |
81 | /* |
82 | * N.B. The POWER/Open ABI allows programs to access up to |
83 | * 288 bytes below the stack pointer. |
84 | - * The kernel signal delivery code writes up to about 1.5kB |
85 | + * The kernel signal delivery code writes up to about 4kB |
86 | * below the stack pointer (r1) before decrementing it. |
87 | * The exec code can write slightly over 640kB to the stack |
88 | * before setting the user r1. Thus we allow the stack to |
89 | @@ -365,7 +368,7 @@ retry: |
90 | * between the last mapped region and the stack will |
91 | * expand the stack rather than segfaulting. |
92 | */ |
93 | - if (address + 2048 < uregs->gpr[1] && !store_update_sp) |
94 | + if (address + SIGFRAME_MAX_SIZE < uregs->gpr[1] && !store_update_sp) |
95 | goto bad_area; |
96 | } |
97 | if (expand_stack(vma, address)) |
98 | diff --git a/arch/powerpc/platforms/pseries/ras.c b/arch/powerpc/platforms/pseries/ras.c |
99 | index 0af19aa1df57d..3d6b372fab3f6 100644 |
100 | --- a/arch/powerpc/platforms/pseries/ras.c |
101 | +++ b/arch/powerpc/platforms/pseries/ras.c |
102 | @@ -101,7 +101,6 @@ static void handle_system_shutdown(char event_modifier) |
103 | case EPOW_SHUTDOWN_ON_UPS: |
104 | pr_emerg("Loss of system power detected. System is running on" |
105 | " UPS/battery. Check RTAS error log for details\n"); |
106 | - orderly_poweroff(true); |
107 | break; |
108 | |
109 | case EPOW_SHUTDOWN_LOSS_OF_CRITICAL_FUNCTIONS: |
110 | diff --git a/arch/x86/include/asm/archrandom.h b/arch/x86/include/asm/archrandom.h |
111 | index 5b0579abb3982..3ac991d81e74d 100644 |
112 | --- a/arch/x86/include/asm/archrandom.h |
113 | +++ b/arch/x86/include/asm/archrandom.h |
114 | @@ -45,7 +45,7 @@ static inline bool rdrand_long(unsigned long *v) |
115 | bool ok; |
116 | unsigned int retry = RDRAND_RETRY_LOOPS; |
117 | do { |
118 | - asm volatile(RDRAND_LONG "\n\t" |
119 | + asm volatile(RDRAND_LONG |
120 | CC_SET(c) |
121 | : CC_OUT(c) (ok), "=a" (*v)); |
122 | if (ok) |
123 | @@ -59,7 +59,7 @@ static inline bool rdrand_int(unsigned int *v) |
124 | bool ok; |
125 | unsigned int retry = RDRAND_RETRY_LOOPS; |
126 | do { |
127 | - asm volatile(RDRAND_INT "\n\t" |
128 | + asm volatile(RDRAND_INT |
129 | CC_SET(c) |
130 | : CC_OUT(c) (ok), "=a" (*v)); |
131 | if (ok) |
132 | @@ -71,7 +71,7 @@ static inline bool rdrand_int(unsigned int *v) |
133 | static inline bool rdseed_long(unsigned long *v) |
134 | { |
135 | bool ok; |
136 | - asm volatile(RDSEED_LONG "\n\t" |
137 | + asm volatile(RDSEED_LONG |
138 | CC_SET(c) |
139 | : CC_OUT(c) (ok), "=a" (*v)); |
140 | return ok; |
141 | @@ -80,7 +80,7 @@ static inline bool rdseed_long(unsigned long *v) |
142 | static inline bool rdseed_int(unsigned int *v) |
143 | { |
144 | bool ok; |
145 | - asm volatile(RDSEED_INT "\n\t" |
146 | + asm volatile(RDSEED_INT |
147 | CC_SET(c) |
148 | : CC_OUT(c) (ok), "=a" (*v)); |
149 | return ok; |
150 | diff --git a/arch/x86/include/asm/bitops.h b/arch/x86/include/asm/bitops.h |
151 | index 68557f52b9619..fb402d4c45082 100644 |
152 | --- a/arch/x86/include/asm/bitops.h |
153 | +++ b/arch/x86/include/asm/bitops.h |
154 | @@ -77,7 +77,7 @@ set_bit(long nr, volatile unsigned long *addr) |
155 | : "iq" ((u8)CONST_MASK(nr)) |
156 | : "memory"); |
157 | } else { |
158 | - asm volatile(LOCK_PREFIX "bts %1,%0" |
159 | + asm volatile(LOCK_PREFIX __ASM_SIZE(bts) " %1,%0" |
160 | : BITOP_ADDR(addr) : "Ir" (nr) : "memory"); |
161 | } |
162 | } |
163 | @@ -93,7 +93,7 @@ set_bit(long nr, volatile unsigned long *addr) |
164 | */ |
165 | static __always_inline void __set_bit(long nr, volatile unsigned long *addr) |
166 | { |
167 | - asm volatile("bts %1,%0" : ADDR : "Ir" (nr) : "memory"); |
168 | + asm volatile(__ASM_SIZE(bts) " %1,%0" : ADDR : "Ir" (nr) : "memory"); |
169 | } |
170 | |
171 | /** |
172 | @@ -114,7 +114,7 @@ clear_bit(long nr, volatile unsigned long *addr) |
173 | : CONST_MASK_ADDR(nr, addr) |
174 | : "iq" ((u8)~CONST_MASK(nr))); |
175 | } else { |
176 | - asm volatile(LOCK_PREFIX "btr %1,%0" |
177 | + asm volatile(LOCK_PREFIX __ASM_SIZE(btr) " %1,%0" |
178 | : BITOP_ADDR(addr) |
179 | : "Ir" (nr)); |
180 | } |
181 | @@ -136,7 +136,7 @@ static __always_inline void clear_bit_unlock(long nr, volatile unsigned long *ad |
182 | |
183 | static __always_inline void __clear_bit(long nr, volatile unsigned long *addr) |
184 | { |
185 | - asm volatile("btr %1,%0" : ADDR : "Ir" (nr)); |
186 | + asm volatile(__ASM_SIZE(btr) " %1,%0" : ADDR : "Ir" (nr)); |
187 | } |
188 | |
189 | /* |
190 | @@ -168,7 +168,7 @@ static __always_inline void __clear_bit_unlock(long nr, volatile unsigned long * |
191 | */ |
192 | static __always_inline void __change_bit(long nr, volatile unsigned long *addr) |
193 | { |
194 | - asm volatile("btc %1,%0" : ADDR : "Ir" (nr)); |
195 | + asm volatile(__ASM_SIZE(btc) " %1,%0" : ADDR : "Ir" (nr)); |
196 | } |
197 | |
198 | /** |
199 | @@ -187,7 +187,7 @@ static __always_inline void change_bit(long nr, volatile unsigned long *addr) |
200 | : CONST_MASK_ADDR(nr, addr) |
201 | : "iq" ((u8)CONST_MASK(nr))); |
202 | } else { |
203 | - asm volatile(LOCK_PREFIX "btc %1,%0" |
204 | + asm volatile(LOCK_PREFIX __ASM_SIZE(btc) " %1,%0" |
205 | : BITOP_ADDR(addr) |
206 | : "Ir" (nr)); |
207 | } |
208 | @@ -203,7 +203,8 @@ static __always_inline void change_bit(long nr, volatile unsigned long *addr) |
209 | */ |
210 | static __always_inline bool test_and_set_bit(long nr, volatile unsigned long *addr) |
211 | { |
212 | - GEN_BINARY_RMWcc(LOCK_PREFIX "bts", *addr, "Ir", nr, "%0", c); |
213 | + GEN_BINARY_RMWcc(LOCK_PREFIX __ASM_SIZE(bts), |
214 | + *addr, "Ir", nr, "%0", c); |
215 | } |
216 | |
217 | /** |
218 | @@ -232,7 +233,7 @@ static __always_inline bool __test_and_set_bit(long nr, volatile unsigned long * |
219 | { |
220 | bool oldbit; |
221 | |
222 | - asm("bts %2,%1\n\t" |
223 | + asm(__ASM_SIZE(bts) " %2,%1" |
224 | CC_SET(c) |
225 | : CC_OUT(c) (oldbit), ADDR |
226 | : "Ir" (nr)); |
227 | @@ -249,7 +250,8 @@ static __always_inline bool __test_and_set_bit(long nr, volatile unsigned long * |
228 | */ |
229 | static __always_inline bool test_and_clear_bit(long nr, volatile unsigned long *addr) |
230 | { |
231 | - GEN_BINARY_RMWcc(LOCK_PREFIX "btr", *addr, "Ir", nr, "%0", c); |
232 | + GEN_BINARY_RMWcc(LOCK_PREFIX __ASM_SIZE(btr), |
233 | + *addr, "Ir", nr, "%0", c); |
234 | } |
235 | |
236 | /** |
237 | @@ -272,7 +274,7 @@ static __always_inline bool __test_and_clear_bit(long nr, volatile unsigned long |
238 | { |
239 | bool oldbit; |
240 | |
241 | - asm volatile("btr %2,%1\n\t" |
242 | + asm volatile(__ASM_SIZE(btr) " %2,%1" |
243 | CC_SET(c) |
244 | : CC_OUT(c) (oldbit), ADDR |
245 | : "Ir" (nr)); |
246 | @@ -284,7 +286,7 @@ static __always_inline bool __test_and_change_bit(long nr, volatile unsigned lon |
247 | { |
248 | bool oldbit; |
249 | |
250 | - asm volatile("btc %2,%1\n\t" |
251 | + asm volatile(__ASM_SIZE(btc) " %2,%1" |
252 | CC_SET(c) |
253 | : CC_OUT(c) (oldbit), ADDR |
254 | : "Ir" (nr) : "memory"); |
255 | @@ -302,7 +304,8 @@ static __always_inline bool __test_and_change_bit(long nr, volatile unsigned lon |
256 | */ |
257 | static __always_inline bool test_and_change_bit(long nr, volatile unsigned long *addr) |
258 | { |
259 | - GEN_BINARY_RMWcc(LOCK_PREFIX "btc", *addr, "Ir", nr, "%0", c); |
260 | + GEN_BINARY_RMWcc(LOCK_PREFIX __ASM_SIZE(btc), |
261 | + *addr, "Ir", nr, "%0", c); |
262 | } |
263 | |
264 | static __always_inline bool constant_test_bit(long nr, const volatile unsigned long *addr) |
265 | @@ -315,7 +318,7 @@ static __always_inline bool variable_test_bit(long nr, volatile const unsigned l |
266 | { |
267 | bool oldbit; |
268 | |
269 | - asm volatile("bt %2,%1\n\t" |
270 | + asm volatile(__ASM_SIZE(bt) " %2,%1" |
271 | CC_SET(c) |
272 | : CC_OUT(c) (oldbit) |
273 | : "m" (*(unsigned long *)addr), "Ir" (nr)); |
274 | diff --git a/arch/x86/include/asm/percpu.h b/arch/x86/include/asm/percpu.h |
275 | index 2cb5d0f13641a..f7745ef149c08 100644 |
276 | --- a/arch/x86/include/asm/percpu.h |
277 | +++ b/arch/x86/include/asm/percpu.h |
278 | @@ -536,7 +536,7 @@ static inline bool x86_this_cpu_variable_test_bit(int nr, |
279 | { |
280 | bool oldbit; |
281 | |
282 | - asm volatile("bt "__percpu_arg(2)",%1\n\t" |
283 | + asm volatile("btl "__percpu_arg(2)",%1" |
284 | CC_SET(c) |
285 | : CC_OUT(c) (oldbit) |
286 | : "m" (*(unsigned long __percpu *)addr), "Ir" (nr)); |
287 | diff --git a/drivers/gpu/drm/imx/imx-ldb.c b/drivers/gpu/drm/imx/imx-ldb.c |
288 | index 67881e5517fbf..2df407b2b0da7 100644 |
289 | --- a/drivers/gpu/drm/imx/imx-ldb.c |
290 | +++ b/drivers/gpu/drm/imx/imx-ldb.c |
291 | @@ -317,6 +317,7 @@ static void imx_ldb_encoder_disable(struct drm_encoder *encoder) |
292 | { |
293 | struct imx_ldb_channel *imx_ldb_ch = enc_to_imx_ldb_ch(encoder); |
294 | struct imx_ldb *ldb = imx_ldb_ch->ldb; |
295 | + int dual = ldb->ldb_ctrl & LDB_SPLIT_MODE_EN; |
296 | int mux, ret; |
297 | |
298 | /* |
299 | @@ -333,14 +334,14 @@ static void imx_ldb_encoder_disable(struct drm_encoder *encoder) |
300 | |
301 | drm_panel_disable(imx_ldb_ch->panel); |
302 | |
303 | - if (imx_ldb_ch == &ldb->channel[0]) |
304 | + if (imx_ldb_ch == &ldb->channel[0] || dual) |
305 | ldb->ldb_ctrl &= ~LDB_CH0_MODE_EN_MASK; |
306 | - else if (imx_ldb_ch == &ldb->channel[1]) |
307 | + if (imx_ldb_ch == &ldb->channel[1] || dual) |
308 | ldb->ldb_ctrl &= ~LDB_CH1_MODE_EN_MASK; |
309 | |
310 | regmap_write(ldb->regmap, IOMUXC_GPR2, ldb->ldb_ctrl); |
311 | |
312 | - if (ldb->ldb_ctrl & LDB_SPLIT_MODE_EN) { |
313 | + if (dual) { |
314 | clk_disable_unprepare(ldb->clk[0]); |
315 | clk_disable_unprepare(ldb->clk[1]); |
316 | } |
317 | diff --git a/drivers/input/mouse/psmouse-base.c b/drivers/input/mouse/psmouse-base.c |
318 | index 5cbf17aa84439..597ecae02c405 100644 |
319 | --- a/drivers/input/mouse/psmouse-base.c |
320 | +++ b/drivers/input/mouse/psmouse-base.c |
321 | @@ -1909,7 +1909,7 @@ static int psmouse_get_maxproto(char *buffer, const struct kernel_param *kp) |
322 | { |
323 | int type = *((unsigned int *)kp->arg); |
324 | |
325 | - return sprintf(buffer, "%s", psmouse_protocol_by_type(type)->name); |
326 | + return sprintf(buffer, "%s\n", psmouse_protocol_by_type(type)->name); |
327 | } |
328 | |
329 | static int __init psmouse_init(void) |
330 | diff --git a/drivers/media/pci/ttpci/budget-core.c b/drivers/media/pci/ttpci/budget-core.c |
331 | index 6d42dcfd4825b..e7bdfc4e4aa83 100644 |
332 | --- a/drivers/media/pci/ttpci/budget-core.c |
333 | +++ b/drivers/media/pci/ttpci/budget-core.c |
334 | @@ -386,20 +386,25 @@ static int budget_register(struct budget *budget) |
335 | ret = dvbdemux->dmx.add_frontend(&dvbdemux->dmx, &budget->hw_frontend); |
336 | |
337 | if (ret < 0) |
338 | - return ret; |
339 | + goto err_release_dmx; |
340 | |
341 | budget->mem_frontend.source = DMX_MEMORY_FE; |
342 | ret = dvbdemux->dmx.add_frontend(&dvbdemux->dmx, &budget->mem_frontend); |
343 | if (ret < 0) |
344 | - return ret; |
345 | + goto err_release_dmx; |
346 | |
347 | ret = dvbdemux->dmx.connect_frontend(&dvbdemux->dmx, &budget->hw_frontend); |
348 | if (ret < 0) |
349 | - return ret; |
350 | + goto err_release_dmx; |
351 | |
352 | dvb_net_init(&budget->dvb_adapter, &budget->dvb_net, &dvbdemux->dmx); |
353 | |
354 | return 0; |
355 | + |
356 | +err_release_dmx: |
357 | + dvb_dmxdev_release(&budget->dmxdev); |
358 | + dvb_dmx_release(&budget->demux); |
359 | + return ret; |
360 | } |
361 | |
362 | static void budget_unregister(struct budget *budget) |
363 | diff --git a/drivers/media/platform/davinci/vpss.c b/drivers/media/platform/davinci/vpss.c |
364 | index c2c68988e38ac..9884b34d6f406 100644 |
365 | --- a/drivers/media/platform/davinci/vpss.c |
366 | +++ b/drivers/media/platform/davinci/vpss.c |
367 | @@ -519,19 +519,31 @@ static void vpss_exit(void) |
368 | |
369 | static int __init vpss_init(void) |
370 | { |
371 | + int ret; |
372 | + |
373 | if (!request_mem_region(VPSS_CLK_CTRL, 4, "vpss_clock_control")) |
374 | return -EBUSY; |
375 | |
376 | oper_cfg.vpss_regs_base2 = ioremap(VPSS_CLK_CTRL, 4); |
377 | if (unlikely(!oper_cfg.vpss_regs_base2)) { |
378 | - release_mem_region(VPSS_CLK_CTRL, 4); |
379 | - return -ENOMEM; |
380 | + ret = -ENOMEM; |
381 | + goto err_ioremap; |
382 | } |
383 | |
384 | writel(VPSS_CLK_CTRL_VENCCLKEN | |
385 | - VPSS_CLK_CTRL_DACCLKEN, oper_cfg.vpss_regs_base2); |
386 | + VPSS_CLK_CTRL_DACCLKEN, oper_cfg.vpss_regs_base2); |
387 | + |
388 | + ret = platform_driver_register(&vpss_driver); |
389 | + if (ret) |
390 | + goto err_pd_register; |
391 | + |
392 | + return 0; |
393 | |
394 | - return platform_driver_register(&vpss_driver); |
395 | +err_pd_register: |
396 | + iounmap(oper_cfg.vpss_regs_base2); |
397 | +err_ioremap: |
398 | + release_mem_region(VPSS_CLK_CTRL, 4); |
399 | + return ret; |
400 | } |
401 | subsys_initcall(vpss_init); |
402 | module_exit(vpss_exit); |
403 | diff --git a/drivers/net/dsa/b53/b53_common.c b/drivers/net/dsa/b53/b53_common.c |
404 | index 060f9b1769298..c387be5c926b7 100644 |
405 | --- a/drivers/net/dsa/b53/b53_common.c |
406 | +++ b/drivers/net/dsa/b53/b53_common.c |
407 | @@ -1175,6 +1175,8 @@ static int b53_arl_op(struct b53_device *dev, int op, int port, |
408 | return ret; |
409 | |
410 | switch (ret) { |
411 | + case -ETIMEDOUT: |
412 | + return ret; |
413 | case -ENOSPC: |
414 | dev_dbg(dev->dev, "{%pM,%.4d} no space left in ARL\n", |
415 | addr, vid); |
416 | diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c |
417 | index 8df32398d3435..9b3ea0406e0d5 100644 |
418 | --- a/drivers/net/ethernet/freescale/fec_main.c |
419 | +++ b/drivers/net/ethernet/freescale/fec_main.c |
420 | @@ -3505,11 +3505,11 @@ failed_mii_init: |
421 | failed_irq: |
422 | failed_init: |
423 | fec_ptp_stop(pdev); |
424 | - if (fep->reg_phy) |
425 | - regulator_disable(fep->reg_phy); |
426 | failed_reset: |
427 | pm_runtime_put_noidle(&pdev->dev); |
428 | pm_runtime_disable(&pdev->dev); |
429 | + if (fep->reg_phy) |
430 | + regulator_disable(fep->reg_phy); |
431 | failed_regulator: |
432 | failed_clk_ipg: |
433 | fec_enet_clk_enable(ndev, false); |
434 | diff --git a/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h b/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h |
435 | index 67e396b2b3472..0e75c3a34fe7c 100644 |
436 | --- a/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h |
437 | +++ b/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h |
438 | @@ -1107,7 +1107,7 @@ struct i40e_aqc_set_vsi_promiscuous_modes { |
439 | #define I40E_AQC_SET_VSI_PROMISC_BROADCAST 0x04 |
440 | #define I40E_AQC_SET_VSI_DEFAULT 0x08 |
441 | #define I40E_AQC_SET_VSI_PROMISC_VLAN 0x10 |
442 | -#define I40E_AQC_SET_VSI_PROMISC_TX 0x8000 |
443 | +#define I40E_AQC_SET_VSI_PROMISC_RX_ONLY 0x8000 |
444 | __le16 seid; |
445 | #define I40E_AQC_VSI_PROM_CMD_SEID_MASK 0x3FF |
446 | __le16 vlan_tag; |
447 | diff --git a/drivers/net/ethernet/intel/i40e/i40e_common.c b/drivers/net/ethernet/intel/i40e/i40e_common.c |
448 | index 2154a34c1dd80..09b47088dcc2b 100644 |
449 | --- a/drivers/net/ethernet/intel/i40e/i40e_common.c |
450 | +++ b/drivers/net/ethernet/intel/i40e/i40e_common.c |
451 | @@ -1922,6 +1922,21 @@ i40e_status i40e_aq_set_phy_debug(struct i40e_hw *hw, u8 cmd_flags, |
452 | return status; |
453 | } |
454 | |
455 | +/** |
456 | + * i40e_is_aq_api_ver_ge |
457 | + * @aq: pointer to AdminQ info containing HW API version to compare |
458 | + * @maj: API major value |
459 | + * @min: API minor value |
460 | + * |
461 | + * Assert whether current HW API version is greater/equal than provided. |
462 | + **/ |
463 | +static bool i40e_is_aq_api_ver_ge(struct i40e_adminq_info *aq, u16 maj, |
464 | + u16 min) |
465 | +{ |
466 | + return (aq->api_maj_ver > maj || |
467 | + (aq->api_maj_ver == maj && aq->api_min_ver >= min)); |
468 | +} |
469 | + |
470 | /** |
471 | * i40e_aq_add_vsi |
472 | * @hw: pointer to the hw struct |
473 | @@ -2047,18 +2062,16 @@ i40e_status i40e_aq_set_vsi_unicast_promiscuous(struct i40e_hw *hw, |
474 | |
475 | if (set) { |
476 | flags |= I40E_AQC_SET_VSI_PROMISC_UNICAST; |
477 | - if (rx_only_promisc && |
478 | - (((hw->aq.api_maj_ver == 1) && (hw->aq.api_min_ver >= 5)) || |
479 | - (hw->aq.api_maj_ver > 1))) |
480 | - flags |= I40E_AQC_SET_VSI_PROMISC_TX; |
481 | + if (rx_only_promisc && i40e_is_aq_api_ver_ge(&hw->aq, 1, 5)) |
482 | + flags |= I40E_AQC_SET_VSI_PROMISC_RX_ONLY; |
483 | } |
484 | |
485 | cmd->promiscuous_flags = cpu_to_le16(flags); |
486 | |
487 | cmd->valid_flags = cpu_to_le16(I40E_AQC_SET_VSI_PROMISC_UNICAST); |
488 | - if (((hw->aq.api_maj_ver >= 1) && (hw->aq.api_min_ver >= 5)) || |
489 | - (hw->aq.api_maj_ver > 1)) |
490 | - cmd->valid_flags |= cpu_to_le16(I40E_AQC_SET_VSI_PROMISC_TX); |
491 | + if (i40e_is_aq_api_ver_ge(&hw->aq, 1, 5)) |
492 | + cmd->valid_flags |= |
493 | + cpu_to_le16(I40E_AQC_SET_VSI_PROMISC_RX_ONLY); |
494 | |
495 | cmd->seid = cpu_to_le16(seid); |
496 | status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); |
497 | @@ -2155,11 +2168,17 @@ enum i40e_status_code i40e_aq_set_vsi_uc_promisc_on_vlan(struct i40e_hw *hw, |
498 | i40e_fill_default_direct_cmd_desc(&desc, |
499 | i40e_aqc_opc_set_vsi_promiscuous_modes); |
500 | |
501 | - if (enable) |
502 | + if (enable) { |
503 | flags |= I40E_AQC_SET_VSI_PROMISC_UNICAST; |
504 | + if (i40e_is_aq_api_ver_ge(&hw->aq, 1, 5)) |
505 | + flags |= I40E_AQC_SET_VSI_PROMISC_RX_ONLY; |
506 | + } |
507 | |
508 | cmd->promiscuous_flags = cpu_to_le16(flags); |
509 | cmd->valid_flags = cpu_to_le16(I40E_AQC_SET_VSI_PROMISC_UNICAST); |
510 | + if (i40e_is_aq_api_ver_ge(&hw->aq, 1, 5)) |
511 | + cmd->valid_flags |= |
512 | + cpu_to_le16(I40E_AQC_SET_VSI_PROMISC_RX_ONLY); |
513 | cmd->seid = cpu_to_le16(seid); |
514 | cmd->vlan_tag = cpu_to_le16(vid | I40E_AQC_SET_VSI_VLAN_VALID); |
515 | |
516 | diff --git a/drivers/scsi/libfc/fc_disc.c b/drivers/scsi/libfc/fc_disc.c |
517 | index 880a9068ca126..ef06af4e3611d 100644 |
518 | --- a/drivers/scsi/libfc/fc_disc.c |
519 | +++ b/drivers/scsi/libfc/fc_disc.c |
520 | @@ -595,8 +595,12 @@ static void fc_disc_gpn_id_resp(struct fc_seq *sp, struct fc_frame *fp, |
521 | mutex_lock(&disc->disc_mutex); |
522 | if (PTR_ERR(fp) == -FC_EX_CLOSED) |
523 | goto out; |
524 | - if (IS_ERR(fp)) |
525 | - goto redisc; |
526 | + if (IS_ERR(fp)) { |
527 | + mutex_lock(&disc->disc_mutex); |
528 | + fc_disc_restart(disc); |
529 | + mutex_unlock(&disc->disc_mutex); |
530 | + goto out; |
531 | + } |
532 | |
533 | cp = fc_frame_payload_get(fp, sizeof(*cp)); |
534 | if (!cp) |
535 | @@ -621,7 +625,7 @@ static void fc_disc_gpn_id_resp(struct fc_seq *sp, struct fc_frame *fp, |
536 | new_rdata->disc_id = disc->disc_id; |
537 | lport->tt.rport_login(new_rdata); |
538 | } |
539 | - goto out; |
540 | + goto free_fp; |
541 | } |
542 | rdata->disc_id = disc->disc_id; |
543 | lport->tt.rport_login(rdata); |
544 | @@ -635,6 +639,8 @@ static void fc_disc_gpn_id_resp(struct fc_seq *sp, struct fc_frame *fp, |
545 | redisc: |
546 | fc_disc_restart(disc); |
547 | } |
548 | +free_fp: |
549 | + fc_frame_free(fp); |
550 | out: |
551 | mutex_unlock(&disc->disc_mutex); |
552 | kref_put(&rdata->kref, lport->tt.rport_destroy); |
553 | diff --git a/drivers/scsi/ufs/ufs_quirks.h b/drivers/scsi/ufs/ufs_quirks.h |
554 | index 71f73d1d1ad1f..6c944fbefd40a 100644 |
555 | --- a/drivers/scsi/ufs/ufs_quirks.h |
556 | +++ b/drivers/scsi/ufs/ufs_quirks.h |
557 | @@ -21,6 +21,7 @@ |
558 | #define UFS_ANY_VENDOR 0xFFFF |
559 | #define UFS_ANY_MODEL "ANY_MODEL" |
560 | |
561 | +#define UFS_VENDOR_MICRON 0x12C |
562 | #define UFS_VENDOR_TOSHIBA 0x198 |
563 | #define UFS_VENDOR_SAMSUNG 0x1CE |
564 | #define UFS_VENDOR_SKHYNIX 0x1AD |
565 | diff --git a/drivers/scsi/ufs/ufshcd.c b/drivers/scsi/ufs/ufshcd.c |
566 | index af4b0a2021d6c..a7f520581cb0f 100644 |
567 | --- a/drivers/scsi/ufs/ufshcd.c |
568 | +++ b/drivers/scsi/ufs/ufshcd.c |
569 | @@ -178,6 +178,8 @@ ufs_get_pm_lvl_to_link_pwr_state(enum ufs_pm_level lvl) |
570 | |
571 | static struct ufs_dev_fix ufs_fixups[] = { |
572 | /* UFS cards deviations table */ |
573 | + UFS_FIX(UFS_VENDOR_MICRON, UFS_ANY_MODEL, |
574 | + UFS_DEVICE_QUIRK_DELAY_BEFORE_LPM), |
575 | UFS_FIX(UFS_VENDOR_SAMSUNG, UFS_ANY_MODEL, |
576 | UFS_DEVICE_QUIRK_DELAY_BEFORE_LPM), |
577 | UFS_FIX(UFS_VENDOR_SAMSUNG, UFS_ANY_MODEL, UFS_DEVICE_NO_VCCQ), |
578 | diff --git a/drivers/virtio/virtio_ring.c b/drivers/virtio/virtio_ring.c |
579 | index e459cd7302e27..5cad9f41c238b 100644 |
580 | --- a/drivers/virtio/virtio_ring.c |
581 | +++ b/drivers/virtio/virtio_ring.c |
582 | @@ -785,6 +785,9 @@ bool virtqueue_poll(struct virtqueue *_vq, unsigned last_used_idx) |
583 | { |
584 | struct vring_virtqueue *vq = to_vvq(_vq); |
585 | |
586 | + if (unlikely(vq->broken)) |
587 | + return false; |
588 | + |
589 | virtio_mb(vq->weak_barriers); |
590 | return (u16)last_used_idx != virtio16_to_cpu(_vq->vdev, vq->vring.used->idx); |
591 | } |
592 | diff --git a/drivers/xen/preempt.c b/drivers/xen/preempt.c |
593 | index 5f6b77ea34fb5..128375ff80b8c 100644 |
594 | --- a/drivers/xen/preempt.c |
595 | +++ b/drivers/xen/preempt.c |
596 | @@ -31,7 +31,7 @@ EXPORT_SYMBOL_GPL(xen_in_preemptible_hcall); |
597 | asmlinkage __visible void xen_maybe_preempt_hcall(void) |
598 | { |
599 | if (unlikely(__this_cpu_read(xen_in_preemptible_hcall) |
600 | - && need_resched())) { |
601 | + && need_resched() && !preempt_count())) { |
602 | /* |
603 | * Clear flag as we may be rescheduled on a different |
604 | * cpu. |
605 | diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h |
606 | index 2bc37d03d4075..abfc090510480 100644 |
607 | --- a/fs/btrfs/ctree.h |
608 | +++ b/fs/btrfs/ctree.h |
609 | @@ -3261,6 +3261,8 @@ ssize_t btrfs_listxattr(struct dentry *dentry, char *buffer, size_t size); |
610 | int btrfs_parse_options(struct btrfs_root *root, char *options, |
611 | unsigned long new_flags); |
612 | int btrfs_sync_fs(struct super_block *sb, int wait); |
613 | +char *btrfs_get_subvol_name_from_objectid(struct btrfs_fs_info *fs_info, |
614 | + u64 subvol_objectid); |
615 | |
616 | static inline __printf(2, 3) |
617 | void btrfs_no_printk(const struct btrfs_fs_info *fs_info, const char *fmt, ...) |
618 | diff --git a/fs/btrfs/export.c b/fs/btrfs/export.c |
619 | index 2513a7f533342..92f80ed642194 100644 |
620 | --- a/fs/btrfs/export.c |
621 | +++ b/fs/btrfs/export.c |
622 | @@ -55,9 +55,9 @@ static int btrfs_encode_fh(struct inode *inode, u32 *fh, int *max_len, |
623 | return type; |
624 | } |
625 | |
626 | -static struct dentry *btrfs_get_dentry(struct super_block *sb, u64 objectid, |
627 | - u64 root_objectid, u32 generation, |
628 | - int check_generation) |
629 | +struct dentry *btrfs_get_dentry(struct super_block *sb, u64 objectid, |
630 | + u64 root_objectid, u32 generation, |
631 | + int check_generation) |
632 | { |
633 | struct btrfs_fs_info *fs_info = btrfs_sb(sb); |
634 | struct btrfs_root *root; |
635 | @@ -150,7 +150,7 @@ static struct dentry *btrfs_fh_to_dentry(struct super_block *sb, struct fid *fh, |
636 | return btrfs_get_dentry(sb, objectid, root_objectid, generation, 1); |
637 | } |
638 | |
639 | -static struct dentry *btrfs_get_parent(struct dentry *child) |
640 | +struct dentry *btrfs_get_parent(struct dentry *child) |
641 | { |
642 | struct inode *dir = d_inode(child); |
643 | struct btrfs_root *root = BTRFS_I(dir)->root; |
644 | diff --git a/fs/btrfs/export.h b/fs/btrfs/export.h |
645 | index 074348a95841f..7a305e5549991 100644 |
646 | --- a/fs/btrfs/export.h |
647 | +++ b/fs/btrfs/export.h |
648 | @@ -16,4 +16,9 @@ struct btrfs_fid { |
649 | u64 parent_root_objectid; |
650 | } __attribute__ ((packed)); |
651 | |
652 | +struct dentry *btrfs_get_dentry(struct super_block *sb, u64 objectid, |
653 | + u64 root_objectid, u32 generation, |
654 | + int check_generation); |
655 | +struct dentry *btrfs_get_parent(struct dentry *child); |
656 | + |
657 | #endif |
658 | diff --git a/fs/btrfs/super.c b/fs/btrfs/super.c |
659 | index 9286603a6a98b..3a0cb745164f8 100644 |
660 | --- a/fs/btrfs/super.c |
661 | +++ b/fs/btrfs/super.c |
662 | @@ -948,8 +948,8 @@ out: |
663 | return error; |
664 | } |
665 | |
666 | -static char *get_subvol_name_from_objectid(struct btrfs_fs_info *fs_info, |
667 | - u64 subvol_objectid) |
668 | +char *btrfs_get_subvol_name_from_objectid(struct btrfs_fs_info *fs_info, |
669 | + u64 subvol_objectid) |
670 | { |
671 | struct btrfs_root *root = fs_info->tree_root; |
672 | struct btrfs_root *fs_root; |
673 | @@ -1225,6 +1225,7 @@ static int btrfs_show_options(struct seq_file *seq, struct dentry *dentry) |
674 | struct btrfs_fs_info *info = btrfs_sb(dentry->d_sb); |
675 | struct btrfs_root *root = info->tree_root; |
676 | char *compress_type; |
677 | + const char *subvol_name; |
678 | |
679 | if (btrfs_test_opt(info, DEGRADED)) |
680 | seq_puts(seq, ",degraded"); |
681 | @@ -1311,8 +1312,13 @@ static int btrfs_show_options(struct seq_file *seq, struct dentry *dentry) |
682 | #endif |
683 | seq_printf(seq, ",subvolid=%llu", |
684 | BTRFS_I(d_inode(dentry))->root->root_key.objectid); |
685 | - seq_puts(seq, ",subvol="); |
686 | - seq_dentry(seq, dentry, " \t\n\\"); |
687 | + subvol_name = btrfs_get_subvol_name_from_objectid(info, |
688 | + BTRFS_I(d_inode(dentry))->root->root_key.objectid); |
689 | + if (!IS_ERR(subvol_name)) { |
690 | + seq_puts(seq, ",subvol="); |
691 | + seq_escape(seq, subvol_name, " \t\n\\"); |
692 | + kfree(subvol_name); |
693 | + } |
694 | return 0; |
695 | } |
696 | |
697 | @@ -1430,8 +1436,8 @@ static struct dentry *mount_subvol(const char *subvol_name, u64 subvol_objectid, |
698 | goto out; |
699 | } |
700 | } |
701 | - subvol_name = get_subvol_name_from_objectid(btrfs_sb(mnt->mnt_sb), |
702 | - subvol_objectid); |
703 | + subvol_name = btrfs_get_subvol_name_from_objectid( |
704 | + btrfs_sb(mnt->mnt_sb), subvol_objectid); |
705 | if (IS_ERR(subvol_name)) { |
706 | root = ERR_CAST(subvol_name); |
707 | subvol_name = NULL; |
708 | diff --git a/fs/eventpoll.c b/fs/eventpoll.c |
709 | index a9c0bf8782f53..aad52e1858363 100644 |
710 | --- a/fs/eventpoll.c |
711 | +++ b/fs/eventpoll.c |
712 | @@ -1747,9 +1747,11 @@ static int ep_loop_check_proc(void *priv, void *cookie, int call_nests) |
713 | * not already there, and calling reverse_path_check() |
714 | * during ep_insert(). |
715 | */ |
716 | - if (list_empty(&epi->ffd.file->f_tfile_llink)) |
717 | + if (list_empty(&epi->ffd.file->f_tfile_llink)) { |
718 | + get_file(epi->ffd.file); |
719 | list_add(&epi->ffd.file->f_tfile_llink, |
720 | &tfile_check_list); |
721 | + } |
722 | } |
723 | } |
724 | mutex_unlock(&ep->mtx); |
725 | @@ -1793,6 +1795,7 @@ static void clear_tfile_check_list(void) |
726 | file = list_first_entry(&tfile_check_list, struct file, |
727 | f_tfile_llink); |
728 | list_del_init(&file->f_tfile_llink); |
729 | + fput(file); |
730 | } |
731 | INIT_LIST_HEAD(&tfile_check_list); |
732 | } |
733 | @@ -1943,13 +1946,13 @@ SYSCALL_DEFINE4(epoll_ctl, int, epfd, int, op, int, fd, |
734 | mutex_lock(&epmutex); |
735 | if (is_file_epoll(tf.file)) { |
736 | error = -ELOOP; |
737 | - if (ep_loop_check(ep, tf.file) != 0) { |
738 | - clear_tfile_check_list(); |
739 | + if (ep_loop_check(ep, tf.file) != 0) |
740 | goto error_tgt_fput; |
741 | - } |
742 | - } else |
743 | + } else { |
744 | + get_file(tf.file); |
745 | list_add(&tf.file->f_tfile_llink, |
746 | &tfile_check_list); |
747 | + } |
748 | mutex_lock_nested(&ep->mtx, 0); |
749 | if (is_file_epoll(tf.file)) { |
750 | tep = tf.file->private_data; |
751 | @@ -1973,8 +1976,6 @@ SYSCALL_DEFINE4(epoll_ctl, int, epfd, int, op, int, fd, |
752 | error = ep_insert(ep, &epds, tf.file, fd, full_check); |
753 | } else |
754 | error = -EEXIST; |
755 | - if (full_check) |
756 | - clear_tfile_check_list(); |
757 | break; |
758 | case EPOLL_CTL_DEL: |
759 | if (epi) |
760 | @@ -1997,8 +1998,10 @@ SYSCALL_DEFINE4(epoll_ctl, int, epfd, int, op, int, fd, |
761 | mutex_unlock(&ep->mtx); |
762 | |
763 | error_tgt_fput: |
764 | - if (full_check) |
765 | + if (full_check) { |
766 | + clear_tfile_check_list(); |
767 | mutex_unlock(&epmutex); |
768 | + } |
769 | |
770 | fdput(tf); |
771 | error_fput: |
772 | diff --git a/fs/ext4/namei.c b/fs/ext4/namei.c |
773 | index 6225ce9f1884c..157dbbe235f90 100644 |
774 | --- a/fs/ext4/namei.c |
775 | +++ b/fs/ext4/namei.c |
776 | @@ -1251,19 +1251,18 @@ static void dx_insert_block(struct dx_frame *frame, u32 hash, ext4_lblk_t block) |
777 | } |
778 | |
779 | /* |
780 | - * NOTE! unlike strncmp, ext4_match returns 1 for success, 0 for failure. |
781 | + * Test whether a directory entry matches the filename being searched for. |
782 | * |
783 | - * `len <= EXT4_NAME_LEN' is guaranteed by caller. |
784 | - * `de != NULL' is guaranteed by caller. |
785 | + * Return: %true if the directory entry matches, otherwise %false. |
786 | */ |
787 | -static inline int ext4_match(struct ext4_filename *fname, |
788 | - struct ext4_dir_entry_2 *de) |
789 | +static inline bool ext4_match(const struct ext4_filename *fname, |
790 | + const struct ext4_dir_entry_2 *de) |
791 | { |
792 | const void *name = fname_name(fname); |
793 | u32 len = fname_len(fname); |
794 | |
795 | if (!de->inode) |
796 | - return 0; |
797 | + return false; |
798 | |
799 | #ifdef CONFIG_EXT4_FS_ENCRYPTION |
800 | if (unlikely(!name)) { |
801 | @@ -1295,48 +1294,31 @@ int ext4_search_dir(struct buffer_head *bh, char *search_buf, int buf_size, |
802 | struct ext4_dir_entry_2 * de; |
803 | char * dlimit; |
804 | int de_len; |
805 | - int res; |
806 | |
807 | de = (struct ext4_dir_entry_2 *)search_buf; |
808 | dlimit = search_buf + buf_size; |
809 | while ((char *) de < dlimit) { |
810 | /* this code is executed quadratically often */ |
811 | /* do minimal checking `by hand' */ |
812 | - if ((char *) de + de->name_len <= dlimit) { |
813 | - res = ext4_match(fname, de); |
814 | - if (res < 0) { |
815 | - res = -1; |
816 | - goto return_result; |
817 | - } |
818 | - if (res > 0) { |
819 | - /* found a match - just to be sure, do |
820 | - * a full check */ |
821 | - if (ext4_check_dir_entry(dir, NULL, de, bh, |
822 | - bh->b_data, |
823 | - bh->b_size, offset)) { |
824 | - res = -1; |
825 | - goto return_result; |
826 | - } |
827 | - *res_dir = de; |
828 | - res = 1; |
829 | - goto return_result; |
830 | - } |
831 | - |
832 | + if ((char *) de + de->name_len <= dlimit && |
833 | + ext4_match(fname, de)) { |
834 | + /* found a match - just to be sure, do |
835 | + * a full check */ |
836 | + if (ext4_check_dir_entry(dir, NULL, de, bh, search_buf, |
837 | + buf_size, offset)) |
838 | + return -1; |
839 | + *res_dir = de; |
840 | + return 1; |
841 | } |
842 | /* prevent looping on a bad block */ |
843 | de_len = ext4_rec_len_from_disk(de->rec_len, |
844 | dir->i_sb->s_blocksize); |
845 | - if (de_len <= 0) { |
846 | - res = -1; |
847 | - goto return_result; |
848 | - } |
849 | + if (de_len <= 0) |
850 | + return -1; |
851 | offset += de_len; |
852 | de = (struct ext4_dir_entry_2 *) ((char *) de + de_len); |
853 | } |
854 | - |
855 | - res = 0; |
856 | -return_result: |
857 | - return res; |
858 | + return 0; |
859 | } |
860 | |
861 | static int is_dx_internal_node(struct inode *dir, ext4_lblk_t block, |
862 | @@ -1777,7 +1759,7 @@ static struct ext4_dir_entry_2 *do_split(handle_t *handle, struct inode *dir, |
863 | blocksize, hinfo, map); |
864 | map -= count; |
865 | dx_sort_map(map, count); |
866 | - /* Split the existing block in the middle, size-wise */ |
867 | + /* Ensure that neither split block is over half full */ |
868 | size = 0; |
869 | move = 0; |
870 | for (i = count-1; i >= 0; i--) { |
871 | @@ -1787,8 +1769,18 @@ static struct ext4_dir_entry_2 *do_split(handle_t *handle, struct inode *dir, |
872 | size += map[i].size; |
873 | move++; |
874 | } |
875 | - /* map index at which we will split */ |
876 | - split = count - move; |
877 | + /* |
878 | + * map index at which we will split |
879 | + * |
880 | + * If the sum of active entries didn't exceed half the block size, just |
881 | + * split it in half by count; each resulting block will have at least |
882 | + * half the space free. |
883 | + */ |
884 | + if (i > 0) |
885 | + split = count - move; |
886 | + else |
887 | + split = count/2; |
888 | + |
889 | hash2 = map[split].hash; |
890 | continued = hash2 == map[split - 1].hash; |
891 | dxtrace(printk(KERN_INFO "Split block %lu at %x, %i/%i\n", |
892 | @@ -1853,24 +1845,15 @@ int ext4_find_dest_de(struct inode *dir, struct inode *inode, |
893 | int nlen, rlen; |
894 | unsigned int offset = 0; |
895 | char *top; |
896 | - int res; |
897 | |
898 | de = (struct ext4_dir_entry_2 *)buf; |
899 | top = buf + buf_size - reclen; |
900 | while ((char *) de <= top) { |
901 | if (ext4_check_dir_entry(dir, NULL, de, bh, |
902 | - buf, buf_size, offset)) { |
903 | - res = -EFSCORRUPTED; |
904 | - goto return_result; |
905 | - } |
906 | - /* Provide crypto context and crypto buffer to ext4 match */ |
907 | - res = ext4_match(fname, de); |
908 | - if (res < 0) |
909 | - goto return_result; |
910 | - if (res > 0) { |
911 | - res = -EEXIST; |
912 | - goto return_result; |
913 | - } |
914 | + buf, buf_size, offset)) |
915 | + return -EFSCORRUPTED; |
916 | + if (ext4_match(fname, de)) |
917 | + return -EEXIST; |
918 | nlen = EXT4_DIR_REC_LEN(de->name_len); |
919 | rlen = ext4_rec_len_from_disk(de->rec_len, buf_size); |
920 | if ((de->inode ? rlen - nlen : rlen) >= reclen) |
921 | @@ -1878,15 +1861,11 @@ int ext4_find_dest_de(struct inode *dir, struct inode *inode, |
922 | de = (struct ext4_dir_entry_2 *)((char *)de + rlen); |
923 | offset += rlen; |
924 | } |
925 | - |
926 | if ((char *) de > top) |
927 | - res = -ENOSPC; |
928 | - else { |
929 | - *dest_de = de; |
930 | - res = 0; |
931 | - } |
932 | -return_result: |
933 | - return res; |
934 | + return -ENOSPC; |
935 | + |
936 | + *dest_de = de; |
937 | + return 0; |
938 | } |
939 | |
940 | int ext4_insert_dentry(struct inode *dir, |
941 | @@ -2375,7 +2354,7 @@ int ext4_generic_delete_entry(handle_t *handle, |
942 | de = (struct ext4_dir_entry_2 *)entry_buf; |
943 | while (i < buf_size - csum_size) { |
944 | if (ext4_check_dir_entry(dir, NULL, de, bh, |
945 | - bh->b_data, bh->b_size, i)) |
946 | + entry_buf, buf_size, i)) |
947 | return -EFSCORRUPTED; |
948 | if (de == de_del) { |
949 | if (pde) |
950 | diff --git a/fs/jffs2/dir.c b/fs/jffs2/dir.c |
951 | index e5a6deb38e1e1..f4a5ec92f5dc7 100644 |
952 | --- a/fs/jffs2/dir.c |
953 | +++ b/fs/jffs2/dir.c |
954 | @@ -590,10 +590,14 @@ static int jffs2_rmdir (struct inode *dir_i, struct dentry *dentry) |
955 | int ret; |
956 | uint32_t now = get_seconds(); |
957 | |
958 | + mutex_lock(&f->sem); |
959 | for (fd = f->dents ; fd; fd = fd->next) { |
960 | - if (fd->ino) |
961 | + if (fd->ino) { |
962 | + mutex_unlock(&f->sem); |
963 | return -ENOTEMPTY; |
964 | + } |
965 | } |
966 | + mutex_unlock(&f->sem); |
967 | |
968 | ret = jffs2_do_unlink(c, dir_f, dentry->d_name.name, |
969 | dentry->d_name.len, f, now); |
970 | diff --git a/fs/romfs/storage.c b/fs/romfs/storage.c |
971 | index f86f51f99aceb..1dcadd22b440d 100644 |
972 | --- a/fs/romfs/storage.c |
973 | +++ b/fs/romfs/storage.c |
974 | @@ -221,10 +221,8 @@ int romfs_dev_read(struct super_block *sb, unsigned long pos, |
975 | size_t limit; |
976 | |
977 | limit = romfs_maxsize(sb); |
978 | - if (pos >= limit) |
979 | + if (pos >= limit || buflen > limit - pos) |
980 | return -EIO; |
981 | - if (buflen > limit - pos) |
982 | - buflen = limit - pos; |
983 | |
984 | #ifdef CONFIG_ROMFS_ON_MTD |
985 | if (sb->s_mtd) |
986 | diff --git a/fs/xfs/xfs_sysfs.h b/fs/xfs/xfs_sysfs.h |
987 | index d04637181ef21..980c9429abec5 100644 |
988 | --- a/fs/xfs/xfs_sysfs.h |
989 | +++ b/fs/xfs/xfs_sysfs.h |
990 | @@ -44,9 +44,11 @@ xfs_sysfs_init( |
991 | struct xfs_kobj *parent_kobj, |
992 | const char *name) |
993 | { |
994 | + struct kobject *parent; |
995 | + |
996 | + parent = parent_kobj ? &parent_kobj->kobject : NULL; |
997 | init_completion(&kobj->complete); |
998 | - return kobject_init_and_add(&kobj->kobject, ktype, |
999 | - &parent_kobj->kobject, "%s", name); |
1000 | + return kobject_init_and_add(&kobj->kobject, ktype, parent, "%s", name); |
1001 | } |
1002 | |
1003 | static inline void |
1004 | diff --git a/fs/xfs/xfs_trans_dquot.c b/fs/xfs/xfs_trans_dquot.c |
1005 | index c3d547211d160..9c42e50a5cb7e 100644 |
1006 | --- a/fs/xfs/xfs_trans_dquot.c |
1007 | +++ b/fs/xfs/xfs_trans_dquot.c |
1008 | @@ -669,7 +669,7 @@ xfs_trans_dqresv( |
1009 | } |
1010 | } |
1011 | if (ninos > 0) { |
1012 | - total_count = be64_to_cpu(dqp->q_core.d_icount) + ninos; |
1013 | + total_count = dqp->q_res_icount + ninos; |
1014 | timer = be32_to_cpu(dqp->q_core.d_itimer); |
1015 | warns = be16_to_cpu(dqp->q_core.d_iwarns); |
1016 | warnlimit = dqp->q_mount->m_quotainfo->qi_iwarnlimit; |
1017 | diff --git a/kernel/relay.c b/kernel/relay.c |
1018 | index 5034cb3a339fb..3623ad9b529c2 100644 |
1019 | --- a/kernel/relay.c |
1020 | +++ b/kernel/relay.c |
1021 | @@ -196,6 +196,7 @@ free_buf: |
1022 | static void relay_destroy_channel(struct kref *kref) |
1023 | { |
1024 | struct rchan *chan = container_of(kref, struct rchan, kref); |
1025 | + free_percpu(chan->buf); |
1026 | kfree(chan); |
1027 | } |
1028 | |
1029 | diff --git a/kernel/trace/trace_hwlat.c b/kernel/trace/trace_hwlat.c |
1030 | index 5fe23f0ee7db6..d1e007c729235 100644 |
1031 | --- a/kernel/trace/trace_hwlat.c |
1032 | +++ b/kernel/trace/trace_hwlat.c |
1033 | @@ -268,24 +268,14 @@ out: |
1034 | static struct cpumask save_cpumask; |
1035 | static bool disable_migrate; |
1036 | |
1037 | -static void move_to_next_cpu(bool initmask) |
1038 | +static void move_to_next_cpu(void) |
1039 | { |
1040 | - static struct cpumask *current_mask; |
1041 | + struct cpumask *current_mask = &save_cpumask; |
1042 | + struct trace_array *tr = hwlat_trace; |
1043 | int next_cpu; |
1044 | |
1045 | if (disable_migrate) |
1046 | return; |
1047 | - |
1048 | - /* Just pick the first CPU on first iteration */ |
1049 | - if (initmask) { |
1050 | - current_mask = &save_cpumask; |
1051 | - get_online_cpus(); |
1052 | - cpumask_and(current_mask, cpu_online_mask, tracing_buffer_mask); |
1053 | - put_online_cpus(); |
1054 | - next_cpu = cpumask_first(current_mask); |
1055 | - goto set_affinity; |
1056 | - } |
1057 | - |
1058 | /* |
1059 | * If for some reason the user modifies the CPU affinity |
1060 | * of this thread, than stop migrating for the duration |
1061 | @@ -295,14 +285,13 @@ static void move_to_next_cpu(bool initmask) |
1062 | goto disable; |
1063 | |
1064 | get_online_cpus(); |
1065 | - cpumask_and(current_mask, cpu_online_mask, tracing_buffer_mask); |
1066 | + cpumask_and(current_mask, cpu_online_mask, tr->tracing_cpumask); |
1067 | next_cpu = cpumask_next(smp_processor_id(), current_mask); |
1068 | put_online_cpus(); |
1069 | |
1070 | if (next_cpu >= nr_cpu_ids) |
1071 | next_cpu = cpumask_first(current_mask); |
1072 | |
1073 | - set_affinity: |
1074 | if (next_cpu >= nr_cpu_ids) /* Shouldn't happen! */ |
1075 | goto disable; |
1076 | |
1077 | @@ -332,12 +321,10 @@ static void move_to_next_cpu(bool initmask) |
1078 | static int kthread_fn(void *data) |
1079 | { |
1080 | u64 interval; |
1081 | - bool initmask = true; |
1082 | |
1083 | while (!kthread_should_stop()) { |
1084 | |
1085 | - move_to_next_cpu(initmask); |
1086 | - initmask = false; |
1087 | + move_to_next_cpu(); |
1088 | |
1089 | local_irq_disable(); |
1090 | get_sample(); |
1091 | @@ -368,13 +355,27 @@ static int kthread_fn(void *data) |
1092 | */ |
1093 | static int start_kthread(struct trace_array *tr) |
1094 | { |
1095 | + struct cpumask *current_mask = &save_cpumask; |
1096 | struct task_struct *kthread; |
1097 | + int next_cpu; |
1098 | + |
1099 | + /* Just pick the first CPU on first iteration */ |
1100 | + current_mask = &save_cpumask; |
1101 | + get_online_cpus(); |
1102 | + cpumask_and(current_mask, cpu_online_mask, tr->tracing_cpumask); |
1103 | + put_online_cpus(); |
1104 | + next_cpu = cpumask_first(current_mask); |
1105 | |
1106 | kthread = kthread_create(kthread_fn, NULL, "hwlatd"); |
1107 | if (IS_ERR(kthread)) { |
1108 | pr_err(BANNER "could not start sampling thread\n"); |
1109 | return -ENOMEM; |
1110 | } |
1111 | + |
1112 | + cpumask_clear(current_mask); |
1113 | + cpumask_set_cpu(next_cpu, current_mask); |
1114 | + sched_setaffinity(kthread->pid, current_mask); |
1115 | + |
1116 | hwlat_kthread = kthread; |
1117 | wake_up_process(kthread); |
1118 | |
1119 | diff --git a/mm/hugetlb.c b/mm/hugetlb.c |
1120 | index 9914da93069e8..2c22ea7a20131 100644 |
1121 | --- a/mm/hugetlb.c |
1122 | +++ b/mm/hugetlb.c |
1123 | @@ -4380,25 +4380,21 @@ static bool vma_shareable(struct vm_area_struct *vma, unsigned long addr) |
1124 | void adjust_range_if_pmd_sharing_possible(struct vm_area_struct *vma, |
1125 | unsigned long *start, unsigned long *end) |
1126 | { |
1127 | - unsigned long check_addr = *start; |
1128 | + unsigned long a_start, a_end; |
1129 | |
1130 | if (!(vma->vm_flags & VM_MAYSHARE)) |
1131 | return; |
1132 | |
1133 | - for (check_addr = *start; check_addr < *end; check_addr += PUD_SIZE) { |
1134 | - unsigned long a_start = check_addr & PUD_MASK; |
1135 | - unsigned long a_end = a_start + PUD_SIZE; |
1136 | + /* Extend the range to be PUD aligned for a worst case scenario */ |
1137 | + a_start = ALIGN_DOWN(*start, PUD_SIZE); |
1138 | + a_end = ALIGN(*end, PUD_SIZE); |
1139 | |
1140 | - /* |
1141 | - * If sharing is possible, adjust start/end if necessary. |
1142 | - */ |
1143 | - if (range_in_vma(vma, a_start, a_end)) { |
1144 | - if (a_start < *start) |
1145 | - *start = a_start; |
1146 | - if (a_end > *end) |
1147 | - *end = a_end; |
1148 | - } |
1149 | - } |
1150 | + /* |
1151 | + * Intersect the range with the vma range, since pmd sharing won't be |
1152 | + * across vma after all |
1153 | + */ |
1154 | + *start = max(vma->vm_start, a_start); |
1155 | + *end = min(vma->vm_end, a_end); |
1156 | } |
1157 | |
1158 | /* |
1159 | diff --git a/mm/khugepaged.c b/mm/khugepaged.c |
1160 | index 3080c6415493c..1538e5e5c628a 100644 |
1161 | --- a/mm/khugepaged.c |
1162 | +++ b/mm/khugepaged.c |
1163 | @@ -391,7 +391,7 @@ static void insert_to_mm_slots_hash(struct mm_struct *mm, |
1164 | |
1165 | static inline int khugepaged_test_exit(struct mm_struct *mm) |
1166 | { |
1167 | - return atomic_read(&mm->mm_users) == 0; |
1168 | + return atomic_read(&mm->mm_users) == 0 || !mmget_still_valid(mm); |
1169 | } |
1170 | |
1171 | int __khugepaged_enter(struct mm_struct *mm) |
1172 | @@ -404,7 +404,7 @@ int __khugepaged_enter(struct mm_struct *mm) |
1173 | return -ENOMEM; |
1174 | |
1175 | /* __khugepaged_exit() must not run from under us */ |
1176 | - VM_BUG_ON_MM(khugepaged_test_exit(mm), mm); |
1177 | + VM_BUG_ON_MM(atomic_read(&mm->mm_users) == 0, mm); |
1178 | if (unlikely(test_and_set_bit(MMF_VM_HUGEPAGE, &mm->flags))) { |
1179 | free_mm_slot(mm_slot); |
1180 | return 0; |
1181 | @@ -1004,9 +1004,6 @@ static void collapse_huge_page(struct mm_struct *mm, |
1182 | * handled by the anon_vma lock + PG_lock. |
1183 | */ |
1184 | down_write(&mm->mmap_sem); |
1185 | - result = SCAN_ANY_PROCESS; |
1186 | - if (!mmget_still_valid(mm)) |
1187 | - goto out; |
1188 | result = hugepage_vma_revalidate(mm, address, &vma); |
1189 | if (result) |
1190 | goto out; |
1191 | diff --git a/mm/page_alloc.c b/mm/page_alloc.c |
1192 | index f394dd87fa033..458523bc73916 100644 |
1193 | --- a/mm/page_alloc.c |
1194 | +++ b/mm/page_alloc.c |
1195 | @@ -1116,6 +1116,11 @@ static void free_pcppages_bulk(struct zone *zone, int count, |
1196 | if (nr_scanned) |
1197 | __mod_node_page_state(zone->zone_pgdat, NR_PAGES_SCANNED, -nr_scanned); |
1198 | |
1199 | + /* |
1200 | + * Ensure proper count is passed which otherwise would stuck in the |
1201 | + * below while (list_empty(list)) loop. |
1202 | + */ |
1203 | + count = min(pcp->count, count); |
1204 | while (count) { |
1205 | struct page *page; |
1206 | struct list_head *list; |
1207 | @@ -6782,7 +6787,7 @@ int __meminit init_per_zone_wmark_min(void) |
1208 | |
1209 | return 0; |
1210 | } |
1211 | -core_initcall(init_per_zone_wmark_min) |
1212 | +postcore_initcall(init_per_zone_wmark_min) |
1213 | |
1214 | /* |
1215 | * min_free_kbytes_sysctl_handler - just a wrapper around proc_dointvec() so |
1216 | diff --git a/sound/soc/intel/atom/sst-mfld-platform-pcm.c b/sound/soc/intel/atom/sst-mfld-platform-pcm.c |
1217 | index e83e314a76a53..dc1b9a32c0575 100644 |
1218 | --- a/sound/soc/intel/atom/sst-mfld-platform-pcm.c |
1219 | +++ b/sound/soc/intel/atom/sst-mfld-platform-pcm.c |
1220 | @@ -339,7 +339,7 @@ static int sst_media_open(struct snd_pcm_substream *substream, |
1221 | |
1222 | ret_val = power_up_sst(stream); |
1223 | if (ret_val < 0) |
1224 | - return ret_val; |
1225 | + goto out_power_up; |
1226 | |
1227 | /* Make sure, that the period size is always even */ |
1228 | snd_pcm_hw_constraint_step(substream->runtime, 0, |
1229 | @@ -348,8 +348,9 @@ static int sst_media_open(struct snd_pcm_substream *substream, |
1230 | return snd_pcm_hw_constraint_integer(runtime, |
1231 | SNDRV_PCM_HW_PARAM_PERIODS); |
1232 | out_ops: |
1233 | - kfree(stream); |
1234 | mutex_unlock(&sst_lock); |
1235 | +out_power_up: |
1236 | + kfree(stream); |
1237 | return ret_val; |
1238 | } |
1239 | |
1240 | diff --git a/tools/perf/util/probe-finder.c b/tools/perf/util/probe-finder.c |
1241 | index 7d0d44b4f3d5c..863f668a07355 100644 |
1242 | --- a/tools/perf/util/probe-finder.c |
1243 | +++ b/tools/perf/util/probe-finder.c |
1244 | @@ -1351,7 +1351,7 @@ int debuginfo__find_trace_events(struct debuginfo *dbg, |
1245 | tf.ntevs = 0; |
1246 | |
1247 | ret = debuginfo__find_probes(dbg, &tf.pf); |
1248 | - if (ret < 0) { |
1249 | + if (ret < 0 || tf.ntevs == 0) { |
1250 | for (i = 0; i < tf.ntevs; i++) |
1251 | clear_probe_trace_event(&tf.tevs[i]); |
1252 | zfree(tevs); |