Annotation of /trunk/kernel-alx/patches-4.9/0132-4.9.33-all-fixes.patch
Parent Directory | Revision Log
Revision 2956 -
(hide annotations)
(download)
Mon Jul 24 12:03:46 2017 UTC (7 years, 2 months ago) by niro
File size: 179095 byte(s)
Mon Jul 24 12:03:46 2017 UTC (7 years, 2 months ago) by niro
File size: 179095 byte(s)
-added patches-4.9
1 | niro | 2956 | diff --git a/Makefile b/Makefile |
2 | index 3d8781997968..8470d81d5cc2 100644 | ||
3 | --- a/Makefile | ||
4 | +++ b/Makefile | ||
5 | @@ -1,6 +1,6 @@ | ||
6 | VERSION = 4 | ||
7 | PATCHLEVEL = 9 | ||
8 | -SUBLEVEL = 32 | ||
9 | +SUBLEVEL = 33 | ||
10 | EXTRAVERSION = | ||
11 | NAME = Roaring Lionus | ||
12 | |||
13 | @@ -797,7 +797,7 @@ KBUILD_CFLAGS += $(call cc-option,-Werror=incompatible-pointer-types) | ||
14 | KBUILD_ARFLAGS := $(call ar-option,D) | ||
15 | |||
16 | # check for 'asm goto' | ||
17 | -ifeq ($(shell $(CONFIG_SHELL) $(srctree)/scripts/gcc-goto.sh $(CC)), y) | ||
18 | +ifeq ($(shell $(CONFIG_SHELL) $(srctree)/scripts/gcc-goto.sh $(CC) $(KBUILD_CFLAGS)), y) | ||
19 | KBUILD_CFLAGS += -DCC_HAVE_ASM_GOTO | ||
20 | KBUILD_AFLAGS += -DCC_HAVE_ASM_GOTO | ||
21 | endif | ||
22 | diff --git a/arch/arc/kernel/head.S b/arch/arc/kernel/head.S | ||
23 | index 689dd867fdff..8b90d25a15cc 100644 | ||
24 | --- a/arch/arc/kernel/head.S | ||
25 | +++ b/arch/arc/kernel/head.S | ||
26 | @@ -71,14 +71,14 @@ ENTRY(stext) | ||
27 | GET_CPU_ID r5 | ||
28 | cmp r5, 0 | ||
29 | mov.nz r0, r5 | ||
30 | -#ifdef CONFIG_ARC_SMP_HALT_ON_RESET | ||
31 | - ; Non-Master can proceed as system would be booted sufficiently | ||
32 | - jnz first_lines_of_secondary | ||
33 | -#else | ||
34 | + bz .Lmaster_proceed | ||
35 | + | ||
36 | ; Non-Masters wait for Master to boot enough and bring them up | ||
37 | - jnz arc_platform_smp_wait_to_boot | ||
38 | -#endif | ||
39 | - ; Master falls thru | ||
40 | + ; when they resume, tail-call to entry point | ||
41 | + mov blink, @first_lines_of_secondary | ||
42 | + j arc_platform_smp_wait_to_boot | ||
43 | + | ||
44 | +.Lmaster_proceed: | ||
45 | #endif | ||
46 | |||
47 | ; Clear BSS before updating any globals | ||
48 | diff --git a/arch/arc/kernel/smp.c b/arch/arc/kernel/smp.c | ||
49 | index 88674d972c9d..2afbafadb6ab 100644 | ||
50 | --- a/arch/arc/kernel/smp.c | ||
51 | +++ b/arch/arc/kernel/smp.c | ||
52 | @@ -90,22 +90,37 @@ void __init smp_cpus_done(unsigned int max_cpus) | ||
53 | */ | ||
54 | static volatile int wake_flag; | ||
55 | |||
56 | +#ifdef CONFIG_ISA_ARCOMPACT | ||
57 | + | ||
58 | +#define __boot_read(f) f | ||
59 | +#define __boot_write(f, v) f = v | ||
60 | + | ||
61 | +#else | ||
62 | + | ||
63 | +#define __boot_read(f) arc_read_uncached_32(&f) | ||
64 | +#define __boot_write(f, v) arc_write_uncached_32(&f, v) | ||
65 | + | ||
66 | +#endif | ||
67 | + | ||
68 | static void arc_default_smp_cpu_kick(int cpu, unsigned long pc) | ||
69 | { | ||
70 | BUG_ON(cpu == 0); | ||
71 | - wake_flag = cpu; | ||
72 | + | ||
73 | + __boot_write(wake_flag, cpu); | ||
74 | } | ||
75 | |||
76 | void arc_platform_smp_wait_to_boot(int cpu) | ||
77 | { | ||
78 | - while (wake_flag != cpu) | ||
79 | + /* for halt-on-reset, we've waited already */ | ||
80 | + if (IS_ENABLED(CONFIG_ARC_SMP_HALT_ON_RESET)) | ||
81 | + return; | ||
82 | + | ||
83 | + while (__boot_read(wake_flag) != cpu) | ||
84 | ; | ||
85 | |||
86 | - wake_flag = 0; | ||
87 | - __asm__ __volatile__("j @first_lines_of_secondary \n"); | ||
88 | + __boot_write(wake_flag, 0); | ||
89 | } | ||
90 | |||
91 | - | ||
92 | const char *arc_platform_smp_cpuinfo(void) | ||
93 | { | ||
94 | return plat_smp_ops.info ? : ""; | ||
95 | diff --git a/arch/arm/configs/ezx_defconfig b/arch/arm/configs/ezx_defconfig | ||
96 | index ea316c4b890e..d3f1768840e2 100644 | ||
97 | --- a/arch/arm/configs/ezx_defconfig | ||
98 | +++ b/arch/arm/configs/ezx_defconfig | ||
99 | @@ -64,8 +64,8 @@ CONFIG_NETFILTER=y | ||
100 | CONFIG_NETFILTER_NETLINK_QUEUE=m | ||
101 | CONFIG_NF_CONNTRACK=m | ||
102 | CONFIG_NF_CONNTRACK_EVENTS=y | ||
103 | -CONFIG_NF_CT_PROTO_SCTP=m | ||
104 | -CONFIG_NF_CT_PROTO_UDPLITE=m | ||
105 | +CONFIG_NF_CT_PROTO_SCTP=y | ||
106 | +CONFIG_NF_CT_PROTO_UDPLITE=y | ||
107 | CONFIG_NF_CONNTRACK_AMANDA=m | ||
108 | CONFIG_NF_CONNTRACK_FTP=m | ||
109 | CONFIG_NF_CONNTRACK_H323=m | ||
110 | diff --git a/arch/arm/configs/imote2_defconfig b/arch/arm/configs/imote2_defconfig | ||
111 | index 18e59feaa307..7f479cdb3479 100644 | ||
112 | --- a/arch/arm/configs/imote2_defconfig | ||
113 | +++ b/arch/arm/configs/imote2_defconfig | ||
114 | @@ -56,8 +56,8 @@ CONFIG_NETFILTER=y | ||
115 | CONFIG_NETFILTER_NETLINK_QUEUE=m | ||
116 | CONFIG_NF_CONNTRACK=m | ||
117 | CONFIG_NF_CONNTRACK_EVENTS=y | ||
118 | -CONFIG_NF_CT_PROTO_SCTP=m | ||
119 | -CONFIG_NF_CT_PROTO_UDPLITE=m | ||
120 | +CONFIG_NF_CT_PROTO_SCTP=y | ||
121 | +CONFIG_NF_CT_PROTO_UDPLITE=y | ||
122 | CONFIG_NF_CONNTRACK_AMANDA=m | ||
123 | CONFIG_NF_CONNTRACK_FTP=m | ||
124 | CONFIG_NF_CONNTRACK_H323=m | ||
125 | diff --git a/arch/frv/include/asm/atomic.h b/arch/frv/include/asm/atomic.h | ||
126 | index 1c2a5e264fc7..e93c9494503a 100644 | ||
127 | --- a/arch/frv/include/asm/atomic.h | ||
128 | +++ b/arch/frv/include/asm/atomic.h | ||
129 | @@ -139,7 +139,7 @@ static inline void atomic64_dec(atomic64_t *v) | ||
130 | #define atomic64_sub_and_test(i,v) (atomic64_sub_return((i), (v)) == 0) | ||
131 | #define atomic64_dec_and_test(v) (atomic64_dec_return((v)) == 0) | ||
132 | #define atomic64_inc_and_test(v) (atomic64_inc_return((v)) == 0) | ||
133 | - | ||
134 | +#define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0) | ||
135 | |||
136 | #define atomic_cmpxchg(v, old, new) (cmpxchg(&(v)->counter, old, new)) | ||
137 | #define atomic_xchg(v, new) (xchg(&(v)->counter, new)) | ||
138 | @@ -161,6 +161,39 @@ static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u) | ||
139 | return c; | ||
140 | } | ||
141 | |||
142 | +static inline int atomic64_add_unless(atomic64_t *v, long long i, long long u) | ||
143 | +{ | ||
144 | + long long c, old; | ||
145 | + | ||
146 | + c = atomic64_read(v); | ||
147 | + for (;;) { | ||
148 | + if (unlikely(c == u)) | ||
149 | + break; | ||
150 | + old = atomic64_cmpxchg(v, c, c + i); | ||
151 | + if (likely(old == c)) | ||
152 | + break; | ||
153 | + c = old; | ||
154 | + } | ||
155 | + return c != u; | ||
156 | +} | ||
157 | + | ||
158 | +static inline long long atomic64_dec_if_positive(atomic64_t *v) | ||
159 | +{ | ||
160 | + long long c, old, dec; | ||
161 | + | ||
162 | + c = atomic64_read(v); | ||
163 | + for (;;) { | ||
164 | + dec = c - 1; | ||
165 | + if (unlikely(dec < 0)) | ||
166 | + break; | ||
167 | + old = atomic64_cmpxchg((v), c, dec); | ||
168 | + if (likely(old == c)) | ||
169 | + break; | ||
170 | + c = old; | ||
171 | + } | ||
172 | + return dec; | ||
173 | +} | ||
174 | + | ||
175 | #define ATOMIC_OP(op) \ | ||
176 | static inline int atomic_fetch_##op(int i, atomic_t *v) \ | ||
177 | { \ | ||
178 | diff --git a/arch/mn10300/include/asm/switch_to.h b/arch/mn10300/include/asm/switch_to.h | ||
179 | index 393d311735c8..67e333aa7629 100644 | ||
180 | --- a/arch/mn10300/include/asm/switch_to.h | ||
181 | +++ b/arch/mn10300/include/asm/switch_to.h | ||
182 | @@ -16,7 +16,7 @@ | ||
183 | struct task_struct; | ||
184 | struct thread_struct; | ||
185 | |||
186 | -#if !defined(CONFIG_LAZY_SAVE_FPU) | ||
187 | +#if defined(CONFIG_FPU) && !defined(CONFIG_LAZY_SAVE_FPU) | ||
188 | struct fpu_state_struct; | ||
189 | extern asmlinkage void fpu_save(struct fpu_state_struct *); | ||
190 | #define switch_fpu(prev, next) \ | ||
191 | diff --git a/arch/powerpc/sysdev/xics/icp-opal.c b/arch/powerpc/sysdev/xics/icp-opal.c | ||
192 | index 32c46b424dd0..b53f80f0b4d8 100644 | ||
193 | --- a/arch/powerpc/sysdev/xics/icp-opal.c | ||
194 | +++ b/arch/powerpc/sysdev/xics/icp-opal.c | ||
195 | @@ -130,14 +130,16 @@ static void icp_opal_cause_ipi(int cpu, unsigned long data) | ||
196 | { | ||
197 | int hw_cpu = get_hard_smp_processor_id(cpu); | ||
198 | |||
199 | + kvmppc_set_host_ipi(cpu, 1); | ||
200 | opal_int_set_mfrr(hw_cpu, IPI_PRIORITY); | ||
201 | } | ||
202 | |||
203 | static irqreturn_t icp_opal_ipi_action(int irq, void *dev_id) | ||
204 | { | ||
205 | - int hw_cpu = hard_smp_processor_id(); | ||
206 | + int cpu = smp_processor_id(); | ||
207 | |||
208 | - opal_int_set_mfrr(hw_cpu, 0xff); | ||
209 | + kvmppc_set_host_ipi(cpu, 0); | ||
210 | + opal_int_set_mfrr(get_hard_smp_processor_id(cpu), 0xff); | ||
211 | |||
212 | return smp_ipi_demux(); | ||
213 | } | ||
214 | diff --git a/arch/s390/kernel/entry.S b/arch/s390/kernel/entry.S | ||
215 | index c43816886839..3bc2825173ef 100644 | ||
216 | --- a/arch/s390/kernel/entry.S | ||
217 | +++ b/arch/s390/kernel/entry.S | ||
218 | @@ -240,12 +240,17 @@ ENTRY(sie64a) | ||
219 | lctlg %c1,%c1,__LC_USER_ASCE # load primary asce | ||
220 | .Lsie_done: | ||
221 | # some program checks are suppressing. C code (e.g. do_protection_exception) | ||
222 | -# will rewind the PSW by the ILC, which is 4 bytes in case of SIE. Other | ||
223 | -# instructions between sie64a and .Lsie_done should not cause program | ||
224 | -# interrupts. So lets use a nop (47 00 00 00) as a landing pad. | ||
225 | +# will rewind the PSW by the ILC, which is often 4 bytes in case of SIE. There | ||
226 | +# are some corner cases (e.g. runtime instrumentation) where ILC is unpredictable. | ||
227 | +# Other instructions between sie64a and .Lsie_done should not cause program | ||
228 | +# interrupts. So lets use 3 nops as a landing pad for all possible rewinds. | ||
229 | # See also .Lcleanup_sie | ||
230 | -.Lrewind_pad: | ||
231 | - nop 0 | ||
232 | +.Lrewind_pad6: | ||
233 | + nopr 7 | ||
234 | +.Lrewind_pad4: | ||
235 | + nopr 7 | ||
236 | +.Lrewind_pad2: | ||
237 | + nopr 7 | ||
238 | .globl sie_exit | ||
239 | sie_exit: | ||
240 | lg %r14,__SF_EMPTY+8(%r15) # load guest register save area | ||
241 | @@ -258,7 +263,9 @@ sie_exit: | ||
242 | stg %r14,__SF_EMPTY+16(%r15) # set exit reason code | ||
243 | j sie_exit | ||
244 | |||
245 | - EX_TABLE(.Lrewind_pad,.Lsie_fault) | ||
246 | + EX_TABLE(.Lrewind_pad6,.Lsie_fault) | ||
247 | + EX_TABLE(.Lrewind_pad4,.Lsie_fault) | ||
248 | + EX_TABLE(.Lrewind_pad2,.Lsie_fault) | ||
249 | EX_TABLE(sie_exit,.Lsie_fault) | ||
250 | EXPORT_SYMBOL(sie64a) | ||
251 | EXPORT_SYMBOL(sie_exit) | ||
252 | diff --git a/arch/sparc/kernel/traps_64.c b/arch/sparc/kernel/traps_64.c | ||
253 | index 4094a51b1970..496fa926e1e0 100644 | ||
254 | --- a/arch/sparc/kernel/traps_64.c | ||
255 | +++ b/arch/sparc/kernel/traps_64.c | ||
256 | @@ -85,7 +85,7 @@ static void dump_tl1_traplog(struct tl1_traplog *p) | ||
257 | |||
258 | void bad_trap(struct pt_regs *regs, long lvl) | ||
259 | { | ||
260 | - char buffer[32]; | ||
261 | + char buffer[36]; | ||
262 | siginfo_t info; | ||
263 | |||
264 | if (notify_die(DIE_TRAP, "bad trap", regs, | ||
265 | @@ -116,7 +116,7 @@ void bad_trap(struct pt_regs *regs, long lvl) | ||
266 | |||
267 | void bad_trap_tl1(struct pt_regs *regs, long lvl) | ||
268 | { | ||
269 | - char buffer[32]; | ||
270 | + char buffer[36]; | ||
271 | |||
272 | if (notify_die(DIE_TRAP_TL1, "bad trap tl1", regs, | ||
273 | 0, lvl, SIGTRAP) == NOTIFY_STOP) | ||
274 | diff --git a/arch/xtensa/include/asm/irq.h b/arch/xtensa/include/asm/irq.h | ||
275 | index f71f88ea7646..19707db966f1 100644 | ||
276 | --- a/arch/xtensa/include/asm/irq.h | ||
277 | +++ b/arch/xtensa/include/asm/irq.h | ||
278 | @@ -29,7 +29,8 @@ static inline void variant_irq_disable(unsigned int irq) { } | ||
279 | # define PLATFORM_NR_IRQS 0 | ||
280 | #endif | ||
281 | #define XTENSA_NR_IRQS XCHAL_NUM_INTERRUPTS | ||
282 | -#define NR_IRQS (XTENSA_NR_IRQS + VARIANT_NR_IRQS + PLATFORM_NR_IRQS) | ||
283 | +#define NR_IRQS (XTENSA_NR_IRQS + VARIANT_NR_IRQS + PLATFORM_NR_IRQS + 1) | ||
284 | +#define XTENSA_PIC_LINUX_IRQ(hwirq) ((hwirq) + 1) | ||
285 | |||
286 | #if VARIANT_NR_IRQS == 0 | ||
287 | static inline void variant_init_irq(void) { } | ||
288 | diff --git a/arch/xtensa/kernel/irq.c b/arch/xtensa/kernel/irq.c | ||
289 | index 4ac3d23161cf..441694464b1e 100644 | ||
290 | --- a/arch/xtensa/kernel/irq.c | ||
291 | +++ b/arch/xtensa/kernel/irq.c | ||
292 | @@ -34,11 +34,6 @@ asmlinkage void do_IRQ(int hwirq, struct pt_regs *regs) | ||
293 | { | ||
294 | int irq = irq_find_mapping(NULL, hwirq); | ||
295 | |||
296 | - if (hwirq >= NR_IRQS) { | ||
297 | - printk(KERN_EMERG "%s: cannot handle IRQ %d\n", | ||
298 | - __func__, hwirq); | ||
299 | - } | ||
300 | - | ||
301 | #ifdef CONFIG_DEBUG_STACKOVERFLOW | ||
302 | /* Debugging check for stack overflow: is there less than 1KB free? */ | ||
303 | { | ||
304 | diff --git a/arch/xtensa/platforms/xtfpga/include/platform/hardware.h b/arch/xtensa/platforms/xtfpga/include/platform/hardware.h | ||
305 | index dbeea2b440a1..1fda7e20dfcb 100644 | ||
306 | --- a/arch/xtensa/platforms/xtfpga/include/platform/hardware.h | ||
307 | +++ b/arch/xtensa/platforms/xtfpga/include/platform/hardware.h | ||
308 | @@ -24,16 +24,18 @@ | ||
309 | |||
310 | /* Interrupt configuration. */ | ||
311 | |||
312 | -#define PLATFORM_NR_IRQS 10 | ||
313 | +#define PLATFORM_NR_IRQS 0 | ||
314 | |||
315 | /* Default assignment of LX60 devices to external interrupts. */ | ||
316 | |||
317 | #ifdef CONFIG_XTENSA_MX | ||
318 | #define DUART16552_INTNUM XCHAL_EXTINT3_NUM | ||
319 | #define OETH_IRQ XCHAL_EXTINT4_NUM | ||
320 | +#define C67X00_IRQ XCHAL_EXTINT8_NUM | ||
321 | #else | ||
322 | #define DUART16552_INTNUM XCHAL_EXTINT0_NUM | ||
323 | #define OETH_IRQ XCHAL_EXTINT1_NUM | ||
324 | +#define C67X00_IRQ XCHAL_EXTINT5_NUM | ||
325 | #endif | ||
326 | |||
327 | /* | ||
328 | @@ -63,5 +65,5 @@ | ||
329 | |||
330 | #define C67X00_PADDR (XCHAL_KIO_PADDR + 0x0D0D0000) | ||
331 | #define C67X00_SIZE 0x10 | ||
332 | -#define C67X00_IRQ 5 | ||
333 | + | ||
334 | #endif /* __XTENSA_XTAVNET_HARDWARE_H */ | ||
335 | diff --git a/arch/xtensa/platforms/xtfpga/setup.c b/arch/xtensa/platforms/xtfpga/setup.c | ||
336 | index 779be723eb2b..42285f35d313 100644 | ||
337 | --- a/arch/xtensa/platforms/xtfpga/setup.c | ||
338 | +++ b/arch/xtensa/platforms/xtfpga/setup.c | ||
339 | @@ -175,8 +175,8 @@ static struct resource ethoc_res[] = { | ||
340 | .flags = IORESOURCE_MEM, | ||
341 | }, | ||
342 | [2] = { /* IRQ number */ | ||
343 | - .start = OETH_IRQ, | ||
344 | - .end = OETH_IRQ, | ||
345 | + .start = XTENSA_PIC_LINUX_IRQ(OETH_IRQ), | ||
346 | + .end = XTENSA_PIC_LINUX_IRQ(OETH_IRQ), | ||
347 | .flags = IORESOURCE_IRQ, | ||
348 | }, | ||
349 | }; | ||
350 | @@ -213,8 +213,8 @@ static struct resource c67x00_res[] = { | ||
351 | .flags = IORESOURCE_MEM, | ||
352 | }, | ||
353 | [1] = { /* IRQ number */ | ||
354 | - .start = C67X00_IRQ, | ||
355 | - .end = C67X00_IRQ, | ||
356 | + .start = XTENSA_PIC_LINUX_IRQ(C67X00_IRQ), | ||
357 | + .end = XTENSA_PIC_LINUX_IRQ(C67X00_IRQ), | ||
358 | .flags = IORESOURCE_IRQ, | ||
359 | }, | ||
360 | }; | ||
361 | @@ -247,7 +247,7 @@ static struct resource serial_resource = { | ||
362 | static struct plat_serial8250_port serial_platform_data[] = { | ||
363 | [0] = { | ||
364 | .mapbase = DUART16552_PADDR, | ||
365 | - .irq = DUART16552_INTNUM, | ||
366 | + .irq = XTENSA_PIC_LINUX_IRQ(DUART16552_INTNUM), | ||
367 | .flags = UPF_BOOT_AUTOCONF | UPF_SKIP_TEST | | ||
368 | UPF_IOREMAP, | ||
369 | .iotype = XCHAL_HAVE_BE ? UPIO_MEM32BE : UPIO_MEM32, | ||
370 | diff --git a/block/partitions/msdos.c b/block/partitions/msdos.c | ||
371 | index 93e7c1b32edd..5610cd537da7 100644 | ||
372 | --- a/block/partitions/msdos.c | ||
373 | +++ b/block/partitions/msdos.c | ||
374 | @@ -300,6 +300,8 @@ static void parse_bsd(struct parsed_partitions *state, | ||
375 | continue; | ||
376 | bsd_start = le32_to_cpu(p->p_offset); | ||
377 | bsd_size = le32_to_cpu(p->p_size); | ||
378 | + if (memcmp(flavour, "bsd\0", 4) == 0) | ||
379 | + bsd_start += offset; | ||
380 | if (offset == bsd_start && size == bsd_size) | ||
381 | /* full parent partition, we have it already */ | ||
382 | continue; | ||
383 | diff --git a/drivers/base/power/runtime.c b/drivers/base/power/runtime.c | ||
384 | index 23f3b95a1158..147d2e3678aa 100644 | ||
385 | --- a/drivers/base/power/runtime.c | ||
386 | +++ b/drivers/base/power/runtime.c | ||
387 | @@ -889,13 +889,13 @@ int __pm_runtime_idle(struct device *dev, int rpmflags) | ||
388 | unsigned long flags; | ||
389 | int retval; | ||
390 | |||
391 | - might_sleep_if(!(rpmflags & RPM_ASYNC) && !dev->power.irq_safe); | ||
392 | - | ||
393 | if (rpmflags & RPM_GET_PUT) { | ||
394 | if (!atomic_dec_and_test(&dev->power.usage_count)) | ||
395 | return 0; | ||
396 | } | ||
397 | |||
398 | + might_sleep_if(!(rpmflags & RPM_ASYNC) && !dev->power.irq_safe); | ||
399 | + | ||
400 | spin_lock_irqsave(&dev->power.lock, flags); | ||
401 | retval = rpm_idle(dev, rpmflags); | ||
402 | spin_unlock_irqrestore(&dev->power.lock, flags); | ||
403 | @@ -921,13 +921,13 @@ int __pm_runtime_suspend(struct device *dev, int rpmflags) | ||
404 | unsigned long flags; | ||
405 | int retval; | ||
406 | |||
407 | - might_sleep_if(!(rpmflags & RPM_ASYNC) && !dev->power.irq_safe); | ||
408 | - | ||
409 | if (rpmflags & RPM_GET_PUT) { | ||
410 | if (!atomic_dec_and_test(&dev->power.usage_count)) | ||
411 | return 0; | ||
412 | } | ||
413 | |||
414 | + might_sleep_if(!(rpmflags & RPM_ASYNC) && !dev->power.irq_safe); | ||
415 | + | ||
416 | spin_lock_irqsave(&dev->power.lock, flags); | ||
417 | retval = rpm_suspend(dev, rpmflags); | ||
418 | spin_unlock_irqrestore(&dev->power.lock, flags); | ||
419 | @@ -952,7 +952,8 @@ int __pm_runtime_resume(struct device *dev, int rpmflags) | ||
420 | unsigned long flags; | ||
421 | int retval; | ||
422 | |||
423 | - might_sleep_if(!(rpmflags & RPM_ASYNC) && !dev->power.irq_safe); | ||
424 | + might_sleep_if(!(rpmflags & RPM_ASYNC) && !dev->power.irq_safe && | ||
425 | + dev->power.runtime_status != RPM_ACTIVE); | ||
426 | |||
427 | if (rpmflags & RPM_GET_PUT) | ||
428 | atomic_inc(&dev->power.usage_count); | ||
429 | diff --git a/drivers/gpu/drm/ast/ast_drv.h b/drivers/gpu/drm/ast/ast_drv.h | ||
430 | index 908011d2c8f5..7abda94fc2cf 100644 | ||
431 | --- a/drivers/gpu/drm/ast/ast_drv.h | ||
432 | +++ b/drivers/gpu/drm/ast/ast_drv.h | ||
433 | @@ -113,6 +113,7 @@ struct ast_private { | ||
434 | struct ttm_bo_kmap_obj cache_kmap; | ||
435 | int next_cursor; | ||
436 | bool support_wide_screen; | ||
437 | + bool DisableP2A; | ||
438 | |||
439 | enum ast_tx_chip tx_chip_type; | ||
440 | u8 dp501_maxclk; | ||
441 | diff --git a/drivers/gpu/drm/ast/ast_main.c b/drivers/gpu/drm/ast/ast_main.c | ||
442 | index f75c6421db62..533e762d036d 100644 | ||
443 | --- a/drivers/gpu/drm/ast/ast_main.c | ||
444 | +++ b/drivers/gpu/drm/ast/ast_main.c | ||
445 | @@ -124,6 +124,12 @@ static int ast_detect_chip(struct drm_device *dev, bool *need_post) | ||
446 | } else | ||
447 | *need_post = false; | ||
448 | |||
449 | + /* Check P2A Access */ | ||
450 | + ast->DisableP2A = true; | ||
451 | + data = ast_read32(ast, 0xf004); | ||
452 | + if (data != 0xFFFFFFFF) | ||
453 | + ast->DisableP2A = false; | ||
454 | + | ||
455 | /* Check if we support wide screen */ | ||
456 | switch (ast->chip) { | ||
457 | case AST1180: | ||
458 | @@ -140,15 +146,17 @@ static int ast_detect_chip(struct drm_device *dev, bool *need_post) | ||
459 | ast->support_wide_screen = true; | ||
460 | else { | ||
461 | ast->support_wide_screen = false; | ||
462 | - /* Read SCU7c (silicon revision register) */ | ||
463 | - ast_write32(ast, 0xf004, 0x1e6e0000); | ||
464 | - ast_write32(ast, 0xf000, 0x1); | ||
465 | - data = ast_read32(ast, 0x1207c); | ||
466 | - data &= 0x300; | ||
467 | - if (ast->chip == AST2300 && data == 0x0) /* ast1300 */ | ||
468 | - ast->support_wide_screen = true; | ||
469 | - if (ast->chip == AST2400 && data == 0x100) /* ast1400 */ | ||
470 | - ast->support_wide_screen = true; | ||
471 | + if (ast->DisableP2A == false) { | ||
472 | + /* Read SCU7c (silicon revision register) */ | ||
473 | + ast_write32(ast, 0xf004, 0x1e6e0000); | ||
474 | + ast_write32(ast, 0xf000, 0x1); | ||
475 | + data = ast_read32(ast, 0x1207c); | ||
476 | + data &= 0x300; | ||
477 | + if (ast->chip == AST2300 && data == 0x0) /* ast1300 */ | ||
478 | + ast->support_wide_screen = true; | ||
479 | + if (ast->chip == AST2400 && data == 0x100) /* ast1400 */ | ||
480 | + ast->support_wide_screen = true; | ||
481 | + } | ||
482 | } | ||
483 | break; | ||
484 | } | ||
485 | @@ -216,80 +224,81 @@ static int ast_get_dram_info(struct drm_device *dev) | ||
486 | uint32_t data, data2; | ||
487 | uint32_t denum, num, div, ref_pll; | ||
488 | |||
489 | - ast_write32(ast, 0xf004, 0x1e6e0000); | ||
490 | - ast_write32(ast, 0xf000, 0x1); | ||
491 | - | ||
492 | - | ||
493 | - ast_write32(ast, 0x10000, 0xfc600309); | ||
494 | - | ||
495 | - do { | ||
496 | - if (pci_channel_offline(dev->pdev)) | ||
497 | - return -EIO; | ||
498 | - } while (ast_read32(ast, 0x10000) != 0x01); | ||
499 | - data = ast_read32(ast, 0x10004); | ||
500 | - | ||
501 | - if (data & 0x40) | ||
502 | + if (ast->DisableP2A) | ||
503 | + { | ||
504 | ast->dram_bus_width = 16; | ||
505 | + ast->dram_type = AST_DRAM_1Gx16; | ||
506 | + ast->mclk = 396; | ||
507 | + } | ||
508 | else | ||
509 | - ast->dram_bus_width = 32; | ||
510 | + { | ||
511 | + ast_write32(ast, 0xf004, 0x1e6e0000); | ||
512 | + ast_write32(ast, 0xf000, 0x1); | ||
513 | + data = ast_read32(ast, 0x10004); | ||
514 | + | ||
515 | + if (data & 0x40) | ||
516 | + ast->dram_bus_width = 16; | ||
517 | + else | ||
518 | + ast->dram_bus_width = 32; | ||
519 | + | ||
520 | + if (ast->chip == AST2300 || ast->chip == AST2400) { | ||
521 | + switch (data & 0x03) { | ||
522 | + case 0: | ||
523 | + ast->dram_type = AST_DRAM_512Mx16; | ||
524 | + break; | ||
525 | + default: | ||
526 | + case 1: | ||
527 | + ast->dram_type = AST_DRAM_1Gx16; | ||
528 | + break; | ||
529 | + case 2: | ||
530 | + ast->dram_type = AST_DRAM_2Gx16; | ||
531 | + break; | ||
532 | + case 3: | ||
533 | + ast->dram_type = AST_DRAM_4Gx16; | ||
534 | + break; | ||
535 | + } | ||
536 | + } else { | ||
537 | + switch (data & 0x0c) { | ||
538 | + case 0: | ||
539 | + case 4: | ||
540 | + ast->dram_type = AST_DRAM_512Mx16; | ||
541 | + break; | ||
542 | + case 8: | ||
543 | + if (data & 0x40) | ||
544 | + ast->dram_type = AST_DRAM_1Gx16; | ||
545 | + else | ||
546 | + ast->dram_type = AST_DRAM_512Mx32; | ||
547 | + break; | ||
548 | + case 0xc: | ||
549 | + ast->dram_type = AST_DRAM_1Gx32; | ||
550 | + break; | ||
551 | + } | ||
552 | + } | ||
553 | |||
554 | - if (ast->chip == AST2300 || ast->chip == AST2400) { | ||
555 | - switch (data & 0x03) { | ||
556 | - case 0: | ||
557 | - ast->dram_type = AST_DRAM_512Mx16; | ||
558 | - break; | ||
559 | - default: | ||
560 | - case 1: | ||
561 | - ast->dram_type = AST_DRAM_1Gx16; | ||
562 | - break; | ||
563 | - case 2: | ||
564 | - ast->dram_type = AST_DRAM_2Gx16; | ||
565 | - break; | ||
566 | + data = ast_read32(ast, 0x10120); | ||
567 | + data2 = ast_read32(ast, 0x10170); | ||
568 | + if (data2 & 0x2000) | ||
569 | + ref_pll = 14318; | ||
570 | + else | ||
571 | + ref_pll = 12000; | ||
572 | + | ||
573 | + denum = data & 0x1f; | ||
574 | + num = (data & 0x3fe0) >> 5; | ||
575 | + data = (data & 0xc000) >> 14; | ||
576 | + switch (data) { | ||
577 | case 3: | ||
578 | - ast->dram_type = AST_DRAM_4Gx16; | ||
579 | - break; | ||
580 | - } | ||
581 | - } else { | ||
582 | - switch (data & 0x0c) { | ||
583 | - case 0: | ||
584 | - case 4: | ||
585 | - ast->dram_type = AST_DRAM_512Mx16; | ||
586 | + div = 0x4; | ||
587 | break; | ||
588 | - case 8: | ||
589 | - if (data & 0x40) | ||
590 | - ast->dram_type = AST_DRAM_1Gx16; | ||
591 | - else | ||
592 | - ast->dram_type = AST_DRAM_512Mx32; | ||
593 | + case 2: | ||
594 | + case 1: | ||
595 | + div = 0x2; | ||
596 | break; | ||
597 | - case 0xc: | ||
598 | - ast->dram_type = AST_DRAM_1Gx32; | ||
599 | + default: | ||
600 | + div = 0x1; | ||
601 | break; | ||
602 | } | ||
603 | + ast->mclk = ref_pll * (num + 2) / (denum + 2) * (div * 1000); | ||
604 | } | ||
605 | - | ||
606 | - data = ast_read32(ast, 0x10120); | ||
607 | - data2 = ast_read32(ast, 0x10170); | ||
608 | - if (data2 & 0x2000) | ||
609 | - ref_pll = 14318; | ||
610 | - else | ||
611 | - ref_pll = 12000; | ||
612 | - | ||
613 | - denum = data & 0x1f; | ||
614 | - num = (data & 0x3fe0) >> 5; | ||
615 | - data = (data & 0xc000) >> 14; | ||
616 | - switch (data) { | ||
617 | - case 3: | ||
618 | - div = 0x4; | ||
619 | - break; | ||
620 | - case 2: | ||
621 | - case 1: | ||
622 | - div = 0x2; | ||
623 | - break; | ||
624 | - default: | ||
625 | - div = 0x1; | ||
626 | - break; | ||
627 | - } | ||
628 | - ast->mclk = ref_pll * (num + 2) / (denum + 2) * (div * 1000); | ||
629 | return 0; | ||
630 | } | ||
631 | |||
632 | diff --git a/drivers/gpu/drm/ast/ast_post.c b/drivers/gpu/drm/ast/ast_post.c | ||
633 | index 30672a3df8a9..270e8fb2803f 100644 | ||
634 | --- a/drivers/gpu/drm/ast/ast_post.c | ||
635 | +++ b/drivers/gpu/drm/ast/ast_post.c | ||
636 | @@ -375,12 +375,20 @@ void ast_post_gpu(struct drm_device *dev) | ||
637 | ast_enable_mmio(dev); | ||
638 | ast_set_def_ext_reg(dev); | ||
639 | |||
640 | - if (ast->chip == AST2300 || ast->chip == AST2400) | ||
641 | - ast_init_dram_2300(dev); | ||
642 | - else | ||
643 | - ast_init_dram_reg(dev); | ||
644 | + if (ast->DisableP2A == false) | ||
645 | + { | ||
646 | + if (ast->chip == AST2300 || ast->chip == AST2400) | ||
647 | + ast_init_dram_2300(dev); | ||
648 | + else | ||
649 | + ast_init_dram_reg(dev); | ||
650 | |||
651 | - ast_init_3rdtx(dev); | ||
652 | + ast_init_3rdtx(dev); | ||
653 | + } | ||
654 | + else | ||
655 | + { | ||
656 | + if (ast->tx_chip_type != AST_TX_NONE) | ||
657 | + ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xa3, 0xcf, 0x80); /* Enable DVO */ | ||
658 | + } | ||
659 | } | ||
660 | |||
661 | /* AST 2300 DRAM settings */ | ||
662 | diff --git a/drivers/gpu/drm/drm_connector.c b/drivers/gpu/drm/drm_connector.c | ||
663 | index 2db7fb510b6c..0e934a9ac63c 100644 | ||
664 | --- a/drivers/gpu/drm/drm_connector.c | ||
665 | +++ b/drivers/gpu/drm/drm_connector.c | ||
666 | @@ -225,6 +225,7 @@ int drm_connector_init(struct drm_device *dev, | ||
667 | |||
668 | INIT_LIST_HEAD(&connector->probed_modes); | ||
669 | INIT_LIST_HEAD(&connector->modes); | ||
670 | + mutex_init(&connector->mutex); | ||
671 | connector->edid_blob_ptr = NULL; | ||
672 | connector->status = connector_status_unknown; | ||
673 | |||
674 | @@ -359,6 +360,8 @@ void drm_connector_cleanup(struct drm_connector *connector) | ||
675 | connector->funcs->atomic_destroy_state(connector, | ||
676 | connector->state); | ||
677 | |||
678 | + mutex_destroy(&connector->mutex); | ||
679 | + | ||
680 | memset(connector, 0, sizeof(*connector)); | ||
681 | } | ||
682 | EXPORT_SYMBOL(drm_connector_cleanup); | ||
683 | @@ -374,14 +377,18 @@ EXPORT_SYMBOL(drm_connector_cleanup); | ||
684 | */ | ||
685 | int drm_connector_register(struct drm_connector *connector) | ||
686 | { | ||
687 | - int ret; | ||
688 | + int ret = 0; | ||
689 | |||
690 | - if (connector->registered) | ||
691 | + if (!connector->dev->registered) | ||
692 | return 0; | ||
693 | |||
694 | + mutex_lock(&connector->mutex); | ||
695 | + if (connector->registered) | ||
696 | + goto unlock; | ||
697 | + | ||
698 | ret = drm_sysfs_connector_add(connector); | ||
699 | if (ret) | ||
700 | - return ret; | ||
701 | + goto unlock; | ||
702 | |||
703 | ret = drm_debugfs_connector_add(connector); | ||
704 | if (ret) { | ||
705 | @@ -397,12 +404,14 @@ int drm_connector_register(struct drm_connector *connector) | ||
706 | drm_mode_object_register(connector->dev, &connector->base); | ||
707 | |||
708 | connector->registered = true; | ||
709 | - return 0; | ||
710 | + goto unlock; | ||
711 | |||
712 | err_debugfs: | ||
713 | drm_debugfs_connector_remove(connector); | ||
714 | err_sysfs: | ||
715 | drm_sysfs_connector_remove(connector); | ||
716 | +unlock: | ||
717 | + mutex_unlock(&connector->mutex); | ||
718 | return ret; | ||
719 | } | ||
720 | EXPORT_SYMBOL(drm_connector_register); | ||
721 | @@ -415,8 +424,11 @@ EXPORT_SYMBOL(drm_connector_register); | ||
722 | */ | ||
723 | void drm_connector_unregister(struct drm_connector *connector) | ||
724 | { | ||
725 | - if (!connector->registered) | ||
726 | + mutex_lock(&connector->mutex); | ||
727 | + if (!connector->registered) { | ||
728 | + mutex_unlock(&connector->mutex); | ||
729 | return; | ||
730 | + } | ||
731 | |||
732 | if (connector->funcs->early_unregister) | ||
733 | connector->funcs->early_unregister(connector); | ||
734 | @@ -425,6 +437,7 @@ void drm_connector_unregister(struct drm_connector *connector) | ||
735 | drm_debugfs_connector_remove(connector); | ||
736 | |||
737 | connector->registered = false; | ||
738 | + mutex_unlock(&connector->mutex); | ||
739 | } | ||
740 | EXPORT_SYMBOL(drm_connector_unregister); | ||
741 | |||
742 | diff --git a/drivers/gpu/drm/drm_drv.c b/drivers/gpu/drm/drm_drv.c | ||
743 | index 0f2fa9044668..362b8cd68a24 100644 | ||
744 | --- a/drivers/gpu/drm/drm_drv.c | ||
745 | +++ b/drivers/gpu/drm/drm_drv.c | ||
746 | @@ -710,6 +710,8 @@ int drm_dev_register(struct drm_device *dev, unsigned long flags) | ||
747 | if (ret) | ||
748 | goto err_minors; | ||
749 | |||
750 | + dev->registered = true; | ||
751 | + | ||
752 | if (dev->driver->load) { | ||
753 | ret = dev->driver->load(dev, flags); | ||
754 | if (ret) | ||
755 | @@ -749,6 +751,8 @@ void drm_dev_unregister(struct drm_device *dev) | ||
756 | |||
757 | drm_lastclose(dev); | ||
758 | |||
759 | + dev->registered = false; | ||
760 | + | ||
761 | if (drm_core_check_feature(dev, DRIVER_MODESET)) | ||
762 | drm_modeset_unregister_all(dev); | ||
763 | |||
764 | diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c | ||
765 | index ca6efb69ef66..7513e7678263 100644 | ||
766 | --- a/drivers/gpu/drm/i915/i915_drv.c | ||
767 | +++ b/drivers/gpu/drm/i915/i915_drv.c | ||
768 | @@ -1199,6 +1199,15 @@ int i915_driver_load(struct pci_dev *pdev, const struct pci_device_id *ent) | ||
769 | goto out_free_priv; | ||
770 | |||
771 | pci_set_drvdata(pdev, &dev_priv->drm); | ||
772 | + /* | ||
773 | + * Disable the system suspend direct complete optimization, which can | ||
774 | + * leave the device suspended skipping the driver's suspend handlers | ||
775 | + * if the device was already runtime suspended. This is needed due to | ||
776 | + * the difference in our runtime and system suspend sequence and | ||
777 | + * becaue the HDA driver may require us to enable the audio power | ||
778 | + * domain during system suspend. | ||
779 | + */ | ||
780 | + pdev->dev_flags |= PCI_DEV_FLAGS_NEEDS_RESUME; | ||
781 | |||
782 | ret = i915_driver_init_early(dev_priv, ent); | ||
783 | if (ret < 0) | ||
784 | diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c | ||
785 | index 5dc6082639db..f8efd20e4a90 100644 | ||
786 | --- a/drivers/gpu/drm/i915/intel_display.c | ||
787 | +++ b/drivers/gpu/drm/i915/intel_display.c | ||
788 | @@ -2253,6 +2253,9 @@ void intel_unpin_fb_obj(struct drm_framebuffer *fb, unsigned int rotation) | ||
789 | intel_fill_fb_ggtt_view(&view, fb, rotation); | ||
790 | vma = i915_gem_object_to_ggtt(obj, &view); | ||
791 | |||
792 | + if (WARN_ON_ONCE(!vma)) | ||
793 | + return; | ||
794 | + | ||
795 | i915_vma_unpin_fence(vma); | ||
796 | i915_gem_object_unpin_from_display_plane(vma); | ||
797 | } | ||
798 | @@ -13764,6 +13767,15 @@ static void update_scanline_offset(struct intel_crtc *crtc) | ||
799 | * type. For DP ports it behaves like most other platforms, but on HDMI | ||
800 | * there's an extra 1 line difference. So we need to add two instead of | ||
801 | * one to the value. | ||
802 | + * | ||
803 | + * On VLV/CHV DSI the scanline counter would appear to increment | ||
804 | + * approx. 1/3 of a scanline before start of vblank. Unfortunately | ||
805 | + * that means we can't tell whether we're in vblank or not while | ||
806 | + * we're on that particular line. We must still set scanline_offset | ||
807 | + * to 1 so that the vblank timestamps come out correct when we query | ||
808 | + * the scanline counter from within the vblank interrupt handler. | ||
809 | + * However if queried just before the start of vblank we'll get an | ||
810 | + * answer that's slightly in the future. | ||
811 | */ | ||
812 | if (IS_GEN2(dev)) { | ||
813 | const struct drm_display_mode *adjusted_mode = &crtc->config->base.adjusted_mode; | ||
814 | diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c | ||
815 | index 2c6d59d4b6d3..49de4760cc16 100644 | ||
816 | --- a/drivers/gpu/drm/i915/intel_pm.c | ||
817 | +++ b/drivers/gpu/drm/i915/intel_pm.c | ||
818 | @@ -4114,11 +4114,19 @@ skl_compute_wm(struct drm_atomic_state *state) | ||
819 | struct drm_crtc_state *cstate; | ||
820 | struct intel_atomic_state *intel_state = to_intel_atomic_state(state); | ||
821 | struct skl_wm_values *results = &intel_state->wm_results; | ||
822 | + struct drm_device *dev = state->dev; | ||
823 | struct skl_pipe_wm *pipe_wm; | ||
824 | bool changed = false; | ||
825 | int ret, i; | ||
826 | |||
827 | /* | ||
828 | + * When we distrust bios wm we always need to recompute to set the | ||
829 | + * expected DDB allocations for each CRTC. | ||
830 | + */ | ||
831 | + if (to_i915(dev)->wm.distrust_bios_wm) | ||
832 | + changed = true; | ||
833 | + | ||
834 | + /* | ||
835 | * If this transaction isn't actually touching any CRTC's, don't | ||
836 | * bother with watermark calculation. Note that if we pass this | ||
837 | * test, we're guaranteed to hold at least one CRTC state mutex, | ||
838 | @@ -4128,6 +4136,7 @@ skl_compute_wm(struct drm_atomic_state *state) | ||
839 | */ | ||
840 | for_each_crtc_in_state(state, crtc, cstate, i) | ||
841 | changed = true; | ||
842 | + | ||
843 | if (!changed) | ||
844 | return 0; | ||
845 | |||
846 | diff --git a/drivers/gpu/drm/i915/intel_sprite.c b/drivers/gpu/drm/i915/intel_sprite.c | ||
847 | index dbed12c484c9..64f4e2e18594 100644 | ||
848 | --- a/drivers/gpu/drm/i915/intel_sprite.c | ||
849 | +++ b/drivers/gpu/drm/i915/intel_sprite.c | ||
850 | @@ -81,10 +81,13 @@ int intel_usecs_to_scanlines(const struct drm_display_mode *adjusted_mode, | ||
851 | */ | ||
852 | void intel_pipe_update_start(struct intel_crtc *crtc) | ||
853 | { | ||
854 | + struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); | ||
855 | const struct drm_display_mode *adjusted_mode = &crtc->config->base.adjusted_mode; | ||
856 | long timeout = msecs_to_jiffies_timeout(1); | ||
857 | int scanline, min, max, vblank_start; | ||
858 | wait_queue_head_t *wq = drm_crtc_vblank_waitqueue(&crtc->base); | ||
859 | + bool need_vlv_dsi_wa = (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) && | ||
860 | + intel_crtc_has_type(crtc->config, INTEL_OUTPUT_DSI); | ||
861 | DEFINE_WAIT(wait); | ||
862 | |||
863 | vblank_start = adjusted_mode->crtc_vblank_start; | ||
864 | @@ -136,6 +139,24 @@ void intel_pipe_update_start(struct intel_crtc *crtc) | ||
865 | |||
866 | drm_crtc_vblank_put(&crtc->base); | ||
867 | |||
868 | + /* | ||
869 | + * On VLV/CHV DSI the scanline counter would appear to | ||
870 | + * increment approx. 1/3 of a scanline before start of vblank. | ||
871 | + * The registers still get latched at start of vblank however. | ||
872 | + * This means we must not write any registers on the first | ||
873 | + * line of vblank (since not the whole line is actually in | ||
874 | + * vblank). And unfortunately we can't use the interrupt to | ||
875 | + * wait here since it will fire too soon. We could use the | ||
876 | + * frame start interrupt instead since it will fire after the | ||
877 | + * critical scanline, but that would require more changes | ||
878 | + * in the interrupt code. So for now we'll just do the nasty | ||
879 | + * thing and poll for the bad scanline to pass us by. | ||
880 | + * | ||
881 | + * FIXME figure out if BXT+ DSI suffers from this as well | ||
882 | + */ | ||
883 | + while (need_vlv_dsi_wa && scanline == vblank_start) | ||
884 | + scanline = intel_get_crtc_scanline(crtc); | ||
885 | + | ||
886 | crtc->debug.scanline_start = scanline; | ||
887 | crtc->debug.start_vbl_time = ktime_get(); | ||
888 | crtc->debug.start_vbl_count = intel_crtc_get_vblank_counter(crtc); | ||
889 | diff --git a/drivers/gpu/drm/nouveau/nouveau_display.c b/drivers/gpu/drm/nouveau/nouveau_display.c | ||
890 | index afbf557b23d4..2c2b86d68129 100644 | ||
891 | --- a/drivers/gpu/drm/nouveau/nouveau_display.c | ||
892 | +++ b/drivers/gpu/drm/nouveau/nouveau_display.c | ||
893 | @@ -24,6 +24,7 @@ | ||
894 | * | ||
895 | */ | ||
896 | |||
897 | +#include <acpi/video.h> | ||
898 | #include <drm/drmP.h> | ||
899 | #include <drm/drm_crtc_helper.h> | ||
900 | |||
901 | @@ -358,6 +359,57 @@ static struct nouveau_drm_prop_enum_list dither_depth[] = { | ||
902 | } \ | ||
903 | } while(0) | ||
904 | |||
905 | +static void | ||
906 | +nouveau_display_hpd_work(struct work_struct *work) | ||
907 | +{ | ||
908 | + struct nouveau_drm *drm = container_of(work, typeof(*drm), hpd_work); | ||
909 | + | ||
910 | + pm_runtime_get_sync(drm->dev->dev); | ||
911 | + | ||
912 | + drm_helper_hpd_irq_event(drm->dev); | ||
913 | + /* enable polling for external displays */ | ||
914 | + drm_kms_helper_poll_enable(drm->dev); | ||
915 | + | ||
916 | + pm_runtime_mark_last_busy(drm->dev->dev); | ||
917 | + pm_runtime_put_sync(drm->dev->dev); | ||
918 | +} | ||
919 | + | ||
920 | +#ifdef CONFIG_ACPI | ||
921 | + | ||
922 | +/* | ||
923 | + * Hans de Goede: This define belongs in acpi/video.h, I've submitted a patch | ||
924 | + * to the acpi subsys to move it there from drivers/acpi/acpi_video.c . | ||
925 | + * This should be dropped once that is merged. | ||
926 | + */ | ||
927 | +#ifndef ACPI_VIDEO_NOTIFY_PROBE | ||
928 | +#define ACPI_VIDEO_NOTIFY_PROBE 0x81 | ||
929 | +#endif | ||
930 | + | ||
931 | +static int | ||
932 | +nouveau_display_acpi_ntfy(struct notifier_block *nb, unsigned long val, | ||
933 | + void *data) | ||
934 | +{ | ||
935 | + struct nouveau_drm *drm = container_of(nb, typeof(*drm), acpi_nb); | ||
936 | + struct acpi_bus_event *info = data; | ||
937 | + | ||
938 | + if (!strcmp(info->device_class, ACPI_VIDEO_CLASS)) { | ||
939 | + if (info->type == ACPI_VIDEO_NOTIFY_PROBE) { | ||
940 | + /* | ||
941 | + * This may be the only indication we receive of a | ||
942 | + * connector hotplug on a runtime suspended GPU, | ||
943 | + * schedule hpd_work to check. | ||
944 | + */ | ||
945 | + schedule_work(&drm->hpd_work); | ||
946 | + | ||
947 | + /* acpi-video should not generate keypresses for this */ | ||
948 | + return NOTIFY_BAD; | ||
949 | + } | ||
950 | + } | ||
951 | + | ||
952 | + return NOTIFY_DONE; | ||
953 | +} | ||
954 | +#endif | ||
955 | + | ||
956 | int | ||
957 | nouveau_display_init(struct drm_device *dev) | ||
958 | { | ||
959 | @@ -370,9 +422,6 @@ nouveau_display_init(struct drm_device *dev) | ||
960 | if (ret) | ||
961 | return ret; | ||
962 | |||
963 | - /* enable polling for external displays */ | ||
964 | - drm_kms_helper_poll_enable(dev); | ||
965 | - | ||
966 | /* enable hotplug interrupts */ | ||
967 | list_for_each_entry(connector, &dev->mode_config.connector_list, head) { | ||
968 | struct nouveau_connector *conn = nouveau_connector(connector); | ||
969 | @@ -537,6 +586,12 @@ nouveau_display_create(struct drm_device *dev) | ||
970 | } | ||
971 | |||
972 | nouveau_backlight_init(dev); | ||
973 | + INIT_WORK(&drm->hpd_work, nouveau_display_hpd_work); | ||
974 | +#ifdef CONFIG_ACPI | ||
975 | + drm->acpi_nb.notifier_call = nouveau_display_acpi_ntfy; | ||
976 | + register_acpi_notifier(&drm->acpi_nb); | ||
977 | +#endif | ||
978 | + | ||
979 | return 0; | ||
980 | |||
981 | vblank_err: | ||
982 | @@ -552,6 +607,9 @@ nouveau_display_destroy(struct drm_device *dev) | ||
983 | { | ||
984 | struct nouveau_display *disp = nouveau_display(dev); | ||
985 | |||
986 | +#ifdef CONFIG_ACPI | ||
987 | + unregister_acpi_notifier(&nouveau_drm(dev)->acpi_nb); | ||
988 | +#endif | ||
989 | nouveau_backlight_exit(dev); | ||
990 | nouveau_display_vblank_fini(dev); | ||
991 | |||
992 | diff --git a/drivers/gpu/drm/nouveau/nouveau_drm.c b/drivers/gpu/drm/nouveau/nouveau_drm.c | ||
993 | index 3100fd88a015..42829a942e33 100644 | ||
994 | --- a/drivers/gpu/drm/nouveau/nouveau_drm.c | ||
995 | +++ b/drivers/gpu/drm/nouveau/nouveau_drm.c | ||
996 | @@ -483,6 +483,9 @@ nouveau_drm_load(struct drm_device *dev, unsigned long flags) | ||
997 | pm_runtime_allow(dev->dev); | ||
998 | pm_runtime_mark_last_busy(dev->dev); | ||
999 | pm_runtime_put(dev->dev); | ||
1000 | + } else { | ||
1001 | + /* enable polling for external displays */ | ||
1002 | + drm_kms_helper_poll_enable(dev); | ||
1003 | } | ||
1004 | return 0; | ||
1005 | |||
1006 | @@ -761,7 +764,7 @@ nouveau_pmops_runtime_resume(struct device *dev) | ||
1007 | pci_set_master(pdev); | ||
1008 | |||
1009 | ret = nouveau_do_resume(drm_dev, true); | ||
1010 | - drm_kms_helper_poll_enable(drm_dev); | ||
1011 | + | ||
1012 | /* do magic */ | ||
1013 | nvif_mask(&device->object, 0x088488, (1 << 25), (1 << 25)); | ||
1014 | vga_switcheroo_set_dynamic_switch(pdev, VGA_SWITCHEROO_ON); | ||
1015 | diff --git a/drivers/gpu/drm/nouveau/nouveau_drv.h b/drivers/gpu/drm/nouveau/nouveau_drv.h | ||
1016 | index 822a0212cd48..1e7f1e326b3c 100644 | ||
1017 | --- a/drivers/gpu/drm/nouveau/nouveau_drv.h | ||
1018 | +++ b/drivers/gpu/drm/nouveau/nouveau_drv.h | ||
1019 | @@ -37,6 +37,8 @@ | ||
1020 | * - implemented limited ABI16/NVIF interop | ||
1021 | */ | ||
1022 | |||
1023 | +#include <linux/notifier.h> | ||
1024 | + | ||
1025 | #include <nvif/client.h> | ||
1026 | #include <nvif/device.h> | ||
1027 | #include <nvif/ioctl.h> | ||
1028 | @@ -161,6 +163,12 @@ struct nouveau_drm { | ||
1029 | struct nvbios vbios; | ||
1030 | struct nouveau_display *display; | ||
1031 | struct backlight_device *backlight; | ||
1032 | + struct work_struct hpd_work; | ||
1033 | + struct work_struct fbcon_work; | ||
1034 | + int fbcon_new_state; | ||
1035 | +#ifdef CONFIG_ACPI | ||
1036 | + struct notifier_block acpi_nb; | ||
1037 | +#endif | ||
1038 | |||
1039 | /* power management */ | ||
1040 | struct nouveau_hwmon *hwmon; | ||
1041 | diff --git a/drivers/gpu/drm/nouveau/nouveau_fbcon.c b/drivers/gpu/drm/nouveau/nouveau_fbcon.c | ||
1042 | index 9f5692726c16..2b79e27dd89c 100644 | ||
1043 | --- a/drivers/gpu/drm/nouveau/nouveau_fbcon.c | ||
1044 | +++ b/drivers/gpu/drm/nouveau/nouveau_fbcon.c | ||
1045 | @@ -491,19 +491,43 @@ static const struct drm_fb_helper_funcs nouveau_fbcon_helper_funcs = { | ||
1046 | .fb_probe = nouveau_fbcon_create, | ||
1047 | }; | ||
1048 | |||
1049 | +static void | ||
1050 | +nouveau_fbcon_set_suspend_work(struct work_struct *work) | ||
1051 | +{ | ||
1052 | + struct nouveau_drm *drm = container_of(work, typeof(*drm), fbcon_work); | ||
1053 | + int state = READ_ONCE(drm->fbcon_new_state); | ||
1054 | + | ||
1055 | + if (state == FBINFO_STATE_RUNNING) | ||
1056 | + pm_runtime_get_sync(drm->dev->dev); | ||
1057 | + | ||
1058 | + console_lock(); | ||
1059 | + if (state == FBINFO_STATE_RUNNING) | ||
1060 | + nouveau_fbcon_accel_restore(drm->dev); | ||
1061 | + drm_fb_helper_set_suspend(&drm->fbcon->helper, state); | ||
1062 | + if (state != FBINFO_STATE_RUNNING) | ||
1063 | + nouveau_fbcon_accel_save_disable(drm->dev); | ||
1064 | + console_unlock(); | ||
1065 | + | ||
1066 | + if (state == FBINFO_STATE_RUNNING) { | ||
1067 | + pm_runtime_mark_last_busy(drm->dev->dev); | ||
1068 | + pm_runtime_put_sync(drm->dev->dev); | ||
1069 | + } | ||
1070 | +} | ||
1071 | + | ||
1072 | void | ||
1073 | nouveau_fbcon_set_suspend(struct drm_device *dev, int state) | ||
1074 | { | ||
1075 | struct nouveau_drm *drm = nouveau_drm(dev); | ||
1076 | - if (drm->fbcon) { | ||
1077 | - console_lock(); | ||
1078 | - if (state == FBINFO_STATE_RUNNING) | ||
1079 | - nouveau_fbcon_accel_restore(dev); | ||
1080 | - drm_fb_helper_set_suspend(&drm->fbcon->helper, state); | ||
1081 | - if (state != FBINFO_STATE_RUNNING) | ||
1082 | - nouveau_fbcon_accel_save_disable(dev); | ||
1083 | - console_unlock(); | ||
1084 | - } | ||
1085 | + | ||
1086 | + if (!drm->fbcon) | ||
1087 | + return; | ||
1088 | + | ||
1089 | + drm->fbcon_new_state = state; | ||
1090 | + /* Since runtime resume can happen as a result of a sysfs operation, | ||
1091 | + * it's possible we already have the console locked. So handle fbcon | ||
1092 | + * init/deinit from a seperate work thread | ||
1093 | + */ | ||
1094 | + schedule_work(&drm->fbcon_work); | ||
1095 | } | ||
1096 | |||
1097 | int | ||
1098 | @@ -524,6 +548,7 @@ nouveau_fbcon_init(struct drm_device *dev) | ||
1099 | |||
1100 | fbcon->dev = dev; | ||
1101 | drm->fbcon = fbcon; | ||
1102 | + INIT_WORK(&drm->fbcon_work, nouveau_fbcon_set_suspend_work); | ||
1103 | |||
1104 | drm_fb_helper_prepare(dev, &fbcon->helper, &nouveau_fbcon_helper_funcs); | ||
1105 | |||
1106 | diff --git a/drivers/gpu/drm/nouveau/nouveau_fence.h b/drivers/gpu/drm/nouveau/nouveau_fence.h | ||
1107 | index 64c4ce7115ad..75e1f09484ff 100644 | ||
1108 | --- a/drivers/gpu/drm/nouveau/nouveau_fence.h | ||
1109 | +++ b/drivers/gpu/drm/nouveau/nouveau_fence.h | ||
1110 | @@ -100,6 +100,7 @@ struct nv84_fence_priv { | ||
1111 | struct nouveau_bo *bo; | ||
1112 | struct nouveau_bo *bo_gart; | ||
1113 | u32 *suspend; | ||
1114 | + struct mutex mutex; | ||
1115 | }; | ||
1116 | |||
1117 | u64 nv84_fence_crtc(struct nouveau_channel *, int); | ||
1118 | diff --git a/drivers/gpu/drm/nouveau/nouveau_usif.c b/drivers/gpu/drm/nouveau/nouveau_usif.c | ||
1119 | index 08f9c6fa0f7f..1fba38622744 100644 | ||
1120 | --- a/drivers/gpu/drm/nouveau/nouveau_usif.c | ||
1121 | +++ b/drivers/gpu/drm/nouveau/nouveau_usif.c | ||
1122 | @@ -313,7 +313,8 @@ usif_ioctl(struct drm_file *filp, void __user *user, u32 argc) | ||
1123 | if (!(ret = nvif_unpack(-ENOSYS, &data, &size, argv->v0, 0, 0, true))) { | ||
1124 | /* block access to objects not created via this interface */ | ||
1125 | owner = argv->v0.owner; | ||
1126 | - if (argv->v0.object == 0ULL) | ||
1127 | + if (argv->v0.object == 0ULL && | ||
1128 | + argv->v0.type != NVIF_IOCTL_V0_DEL) | ||
1129 | argv->v0.owner = NVDRM_OBJECT_ANY; /* except client */ | ||
1130 | else | ||
1131 | argv->v0.owner = NVDRM_OBJECT_USIF; | ||
1132 | diff --git a/drivers/gpu/drm/nouveau/nv84_fence.c b/drivers/gpu/drm/nouveau/nv84_fence.c | ||
1133 | index 18bde9d8e6d6..90a5dd6311c6 100644 | ||
1134 | --- a/drivers/gpu/drm/nouveau/nv84_fence.c | ||
1135 | +++ b/drivers/gpu/drm/nouveau/nv84_fence.c | ||
1136 | @@ -121,8 +121,10 @@ nv84_fence_context_del(struct nouveau_channel *chan) | ||
1137 | } | ||
1138 | |||
1139 | nouveau_bo_wr32(priv->bo, chan->chid * 16 / 4, fctx->base.sequence); | ||
1140 | + mutex_lock(&priv->mutex); | ||
1141 | nouveau_bo_vma_del(priv->bo, &fctx->vma_gart); | ||
1142 | nouveau_bo_vma_del(priv->bo, &fctx->vma); | ||
1143 | + mutex_unlock(&priv->mutex); | ||
1144 | nouveau_fence_context_del(&fctx->base); | ||
1145 | chan->fence = NULL; | ||
1146 | nouveau_fence_context_free(&fctx->base); | ||
1147 | @@ -148,11 +150,13 @@ nv84_fence_context_new(struct nouveau_channel *chan) | ||
1148 | fctx->base.sync32 = nv84_fence_sync32; | ||
1149 | fctx->base.sequence = nv84_fence_read(chan); | ||
1150 | |||
1151 | + mutex_lock(&priv->mutex); | ||
1152 | ret = nouveau_bo_vma_add(priv->bo, cli->vm, &fctx->vma); | ||
1153 | if (ret == 0) { | ||
1154 | ret = nouveau_bo_vma_add(priv->bo_gart, cli->vm, | ||
1155 | &fctx->vma_gart); | ||
1156 | } | ||
1157 | + mutex_unlock(&priv->mutex); | ||
1158 | |||
1159 | /* map display semaphore buffers into channel's vm */ | ||
1160 | for (i = 0; !ret && i < chan->drm->dev->mode_config.num_crtc; i++) { | ||
1161 | @@ -232,6 +236,8 @@ nv84_fence_create(struct nouveau_drm *drm) | ||
1162 | priv->base.context_base = fence_context_alloc(priv->base.contexts); | ||
1163 | priv->base.uevent = true; | ||
1164 | |||
1165 | + mutex_init(&priv->mutex); | ||
1166 | + | ||
1167 | /* Use VRAM if there is any ; otherwise fallback to system memory */ | ||
1168 | domain = drm->device.info.ram_size != 0 ? TTM_PL_FLAG_VRAM : | ||
1169 | /* | ||
1170 | diff --git a/drivers/i2c/busses/i2c-piix4.c b/drivers/i2c/busses/i2c-piix4.c | ||
1171 | index e34d82e79b98..c21ca7bf2efe 100644 | ||
1172 | --- a/drivers/i2c/busses/i2c-piix4.c | ||
1173 | +++ b/drivers/i2c/busses/i2c-piix4.c | ||
1174 | @@ -58,7 +58,7 @@ | ||
1175 | #define SMBSLVDAT (0xC + piix4_smba) | ||
1176 | |||
1177 | /* count for request_region */ | ||
1178 | -#define SMBIOSIZE 8 | ||
1179 | +#define SMBIOSIZE 9 | ||
1180 | |||
1181 | /* PCI Address Constants */ | ||
1182 | #define SMBBA 0x090 | ||
1183 | @@ -592,6 +592,8 @@ static s32 piix4_access_sb800(struct i2c_adapter *adap, u16 addr, | ||
1184 | u8 port; | ||
1185 | int retval; | ||
1186 | |||
1187 | + mutex_lock(&piix4_mutex_sb800); | ||
1188 | + | ||
1189 | /* Request the SMBUS semaphore, avoid conflicts with the IMC */ | ||
1190 | smbslvcnt = inb_p(SMBSLVCNT); | ||
1191 | do { | ||
1192 | @@ -605,10 +607,10 @@ static s32 piix4_access_sb800(struct i2c_adapter *adap, u16 addr, | ||
1193 | usleep_range(1000, 2000); | ||
1194 | } while (--retries); | ||
1195 | /* SMBus is still owned by the IMC, we give up */ | ||
1196 | - if (!retries) | ||
1197 | + if (!retries) { | ||
1198 | + mutex_unlock(&piix4_mutex_sb800); | ||
1199 | return -EBUSY; | ||
1200 | - | ||
1201 | - mutex_lock(&piix4_mutex_sb800); | ||
1202 | + } | ||
1203 | |||
1204 | outb_p(piix4_port_sel_sb800, SB800_PIIX4_SMB_IDX); | ||
1205 | smba_en_lo = inb_p(SB800_PIIX4_SMB_IDX + 1); | ||
1206 | @@ -623,11 +625,11 @@ static s32 piix4_access_sb800(struct i2c_adapter *adap, u16 addr, | ||
1207 | |||
1208 | outb_p(smba_en_lo, SB800_PIIX4_SMB_IDX + 1); | ||
1209 | |||
1210 | - mutex_unlock(&piix4_mutex_sb800); | ||
1211 | - | ||
1212 | /* Release the semaphore */ | ||
1213 | outb_p(smbslvcnt | 0x20, SMBSLVCNT); | ||
1214 | |||
1215 | + mutex_unlock(&piix4_mutex_sb800); | ||
1216 | + | ||
1217 | return retval; | ||
1218 | } | ||
1219 | |||
1220 | diff --git a/drivers/infiniband/hw/qedr/main.c b/drivers/infiniband/hw/qedr/main.c | ||
1221 | index 7b74d09a8217..58e92bce6825 100644 | ||
1222 | --- a/drivers/infiniband/hw/qedr/main.c | ||
1223 | +++ b/drivers/infiniband/hw/qedr/main.c | ||
1224 | @@ -792,6 +792,9 @@ static struct qedr_dev *qedr_add(struct qed_dev *cdev, struct pci_dev *pdev, | ||
1225 | if (device_create_file(&dev->ibdev.dev, qedr_attributes[i])) | ||
1226 | goto sysfs_err; | ||
1227 | |||
1228 | + if (!test_and_set_bit(QEDR_ENET_STATE_BIT, &dev->enet_state)) | ||
1229 | + qedr_ib_dispatch_event(dev, QEDR_PORT, IB_EVENT_PORT_ACTIVE); | ||
1230 | + | ||
1231 | DP_DEBUG(dev, QEDR_MSG_INIT, "qedr driver loaded successfully\n"); | ||
1232 | return dev; | ||
1233 | |||
1234 | @@ -824,11 +827,10 @@ static void qedr_remove(struct qedr_dev *dev) | ||
1235 | ib_dealloc_device(&dev->ibdev); | ||
1236 | } | ||
1237 | |||
1238 | -static int qedr_close(struct qedr_dev *dev) | ||
1239 | +static void qedr_close(struct qedr_dev *dev) | ||
1240 | { | ||
1241 | - qedr_ib_dispatch_event(dev, 1, IB_EVENT_PORT_ERR); | ||
1242 | - | ||
1243 | - return 0; | ||
1244 | + if (test_and_clear_bit(QEDR_ENET_STATE_BIT, &dev->enet_state)) | ||
1245 | + qedr_ib_dispatch_event(dev, QEDR_PORT, IB_EVENT_PORT_ERR); | ||
1246 | } | ||
1247 | |||
1248 | static void qedr_shutdown(struct qedr_dev *dev) | ||
1249 | @@ -837,6 +839,12 @@ static void qedr_shutdown(struct qedr_dev *dev) | ||
1250 | qedr_remove(dev); | ||
1251 | } | ||
1252 | |||
1253 | +static void qedr_open(struct qedr_dev *dev) | ||
1254 | +{ | ||
1255 | + if (!test_and_set_bit(QEDR_ENET_STATE_BIT, &dev->enet_state)) | ||
1256 | + qedr_ib_dispatch_event(dev, QEDR_PORT, IB_EVENT_PORT_ACTIVE); | ||
1257 | +} | ||
1258 | + | ||
1259 | static void qedr_mac_address_change(struct qedr_dev *dev) | ||
1260 | { | ||
1261 | union ib_gid *sgid = &dev->sgid_tbl[0]; | ||
1262 | @@ -863,7 +871,7 @@ static void qedr_mac_address_change(struct qedr_dev *dev) | ||
1263 | |||
1264 | ether_addr_copy(dev->gsi_ll2_mac_address, dev->ndev->dev_addr); | ||
1265 | |||
1266 | - qedr_ib_dispatch_event(dev, 1, IB_EVENT_GID_CHANGE); | ||
1267 | + qedr_ib_dispatch_event(dev, QEDR_PORT, IB_EVENT_GID_CHANGE); | ||
1268 | |||
1269 | if (rc) | ||
1270 | DP_ERR(dev, "Error updating mac filter\n"); | ||
1271 | @@ -877,7 +885,7 @@ static void qedr_notify(struct qedr_dev *dev, enum qede_roce_event event) | ||
1272 | { | ||
1273 | switch (event) { | ||
1274 | case QEDE_UP: | ||
1275 | - qedr_ib_dispatch_event(dev, 1, IB_EVENT_PORT_ACTIVE); | ||
1276 | + qedr_open(dev); | ||
1277 | break; | ||
1278 | case QEDE_DOWN: | ||
1279 | qedr_close(dev); | ||
1280 | diff --git a/drivers/infiniband/hw/qedr/qedr.h b/drivers/infiniband/hw/qedr/qedr.h | ||
1281 | index 620badd7d4fb..f669d0bb697e 100644 | ||
1282 | --- a/drivers/infiniband/hw/qedr/qedr.h | ||
1283 | +++ b/drivers/infiniband/hw/qedr/qedr.h | ||
1284 | @@ -113,6 +113,8 @@ struct qedr_device_attr { | ||
1285 | struct qed_rdma_events events; | ||
1286 | }; | ||
1287 | |||
1288 | +#define QEDR_ENET_STATE_BIT (0) | ||
1289 | + | ||
1290 | struct qedr_dev { | ||
1291 | struct ib_device ibdev; | ||
1292 | struct qed_dev *cdev; | ||
1293 | @@ -153,6 +155,8 @@ struct qedr_dev { | ||
1294 | struct qedr_cq *gsi_sqcq; | ||
1295 | struct qedr_cq *gsi_rqcq; | ||
1296 | struct qedr_qp *gsi_qp; | ||
1297 | + | ||
1298 | + unsigned long enet_state; | ||
1299 | }; | ||
1300 | |||
1301 | #define QEDR_MAX_SQ_PBL (0x8000) | ||
1302 | @@ -188,6 +192,7 @@ struct qedr_dev { | ||
1303 | #define QEDR_ROCE_MAX_CNQ_SIZE (0x4000) | ||
1304 | |||
1305 | #define QEDR_MAX_PORT (1) | ||
1306 | +#define QEDR_PORT (1) | ||
1307 | |||
1308 | #define QEDR_UVERBS(CMD_NAME) (1ull << IB_USER_VERBS_CMD_##CMD_NAME) | ||
1309 | |||
1310 | diff --git a/drivers/infiniband/hw/qedr/verbs.c b/drivers/infiniband/hw/qedr/verbs.c | ||
1311 | index a61514296767..4ba019e3dc56 100644 | ||
1312 | --- a/drivers/infiniband/hw/qedr/verbs.c | ||
1313 | +++ b/drivers/infiniband/hw/qedr/verbs.c | ||
1314 | @@ -471,8 +471,6 @@ struct ib_pd *qedr_alloc_pd(struct ib_device *ibdev, | ||
1315 | struct ib_ucontext *context, struct ib_udata *udata) | ||
1316 | { | ||
1317 | struct qedr_dev *dev = get_qedr_dev(ibdev); | ||
1318 | - struct qedr_ucontext *uctx = NULL; | ||
1319 | - struct qedr_alloc_pd_uresp uresp; | ||
1320 | struct qedr_pd *pd; | ||
1321 | u16 pd_id; | ||
1322 | int rc; | ||
1323 | @@ -489,21 +487,33 @@ struct ib_pd *qedr_alloc_pd(struct ib_device *ibdev, | ||
1324 | if (!pd) | ||
1325 | return ERR_PTR(-ENOMEM); | ||
1326 | |||
1327 | - dev->ops->rdma_alloc_pd(dev->rdma_ctx, &pd_id); | ||
1328 | + rc = dev->ops->rdma_alloc_pd(dev->rdma_ctx, &pd_id); | ||
1329 | + if (rc) | ||
1330 | + goto err; | ||
1331 | |||
1332 | - uresp.pd_id = pd_id; | ||
1333 | pd->pd_id = pd_id; | ||
1334 | |||
1335 | if (udata && context) { | ||
1336 | + struct qedr_alloc_pd_uresp uresp; | ||
1337 | + | ||
1338 | + uresp.pd_id = pd_id; | ||
1339 | + | ||
1340 | rc = ib_copy_to_udata(udata, &uresp, sizeof(uresp)); | ||
1341 | - if (rc) | ||
1342 | + if (rc) { | ||
1343 | DP_ERR(dev, "copy error pd_id=0x%x.\n", pd_id); | ||
1344 | - uctx = get_qedr_ucontext(context); | ||
1345 | - uctx->pd = pd; | ||
1346 | - pd->uctx = uctx; | ||
1347 | + dev->ops->rdma_dealloc_pd(dev->rdma_ctx, pd_id); | ||
1348 | + goto err; | ||
1349 | + } | ||
1350 | + | ||
1351 | + pd->uctx = get_qedr_ucontext(context); | ||
1352 | + pd->uctx->pd = pd; | ||
1353 | } | ||
1354 | |||
1355 | return &pd->ibpd; | ||
1356 | + | ||
1357 | +err: | ||
1358 | + kfree(pd); | ||
1359 | + return ERR_PTR(rc); | ||
1360 | } | ||
1361 | |||
1362 | int qedr_dealloc_pd(struct ib_pd *ibpd) | ||
1363 | @@ -1719,6 +1729,14 @@ static int qedr_update_qp_state(struct qedr_dev *dev, | ||
1364 | /* ERR->XXX */ | ||
1365 | switch (new_state) { | ||
1366 | case QED_ROCE_QP_STATE_RESET: | ||
1367 | + if ((qp->rq.prod != qp->rq.cons) || | ||
1368 | + (qp->sq.prod != qp->sq.cons)) { | ||
1369 | + DP_NOTICE(dev, | ||
1370 | + "Error->Reset with rq/sq not empty rq.prod=%x rq.cons=%x sq.prod=%x sq.cons=%x\n", | ||
1371 | + qp->rq.prod, qp->rq.cons, qp->sq.prod, | ||
1372 | + qp->sq.cons); | ||
1373 | + status = -EINVAL; | ||
1374 | + } | ||
1375 | break; | ||
1376 | default: | ||
1377 | status = -EINVAL; | ||
1378 | @@ -2014,7 +2032,7 @@ int qedr_query_qp(struct ib_qp *ibqp, | ||
1379 | qp_attr->cap.max_recv_wr = qp->rq.max_wr; | ||
1380 | qp_attr->cap.max_send_sge = qp->sq.max_sges; | ||
1381 | qp_attr->cap.max_recv_sge = qp->rq.max_sges; | ||
1382 | - qp_attr->cap.max_inline_data = qp->max_inline_data; | ||
1383 | + qp_attr->cap.max_inline_data = ROCE_REQ_MAX_INLINE_DATA_SIZE; | ||
1384 | qp_init_attr->cap = qp_attr->cap; | ||
1385 | |||
1386 | memcpy(&qp_attr->ah_attr.grh.dgid.raw[0], ¶ms.dgid.bytes[0], | ||
1387 | @@ -3220,9 +3238,10 @@ static int qedr_poll_cq_req(struct qedr_dev *dev, | ||
1388 | IB_WC_SUCCESS, 0); | ||
1389 | break; | ||
1390 | case RDMA_CQE_REQ_STS_WORK_REQUEST_FLUSHED_ERR: | ||
1391 | - DP_ERR(dev, | ||
1392 | - "Error: POLL CQ with RDMA_CQE_REQ_STS_WORK_REQUEST_FLUSHED_ERR. CQ icid=0x%x, QP icid=0x%x\n", | ||
1393 | - cq->icid, qp->icid); | ||
1394 | + if (qp->state != QED_ROCE_QP_STATE_ERR) | ||
1395 | + DP_ERR(dev, | ||
1396 | + "Error: POLL CQ with RDMA_CQE_REQ_STS_WORK_REQUEST_FLUSHED_ERR. CQ icid=0x%x, QP icid=0x%x\n", | ||
1397 | + cq->icid, qp->icid); | ||
1398 | cnt = process_req(dev, qp, cq, num_entries, wc, req->sq_cons, | ||
1399 | IB_WC_WR_FLUSH_ERR, 0); | ||
1400 | break; | ||
1401 | diff --git a/drivers/irqchip/irq-xtensa-mx.c b/drivers/irqchip/irq-xtensa-mx.c | ||
1402 | index bb3ac5fe5846..72a391e01011 100644 | ||
1403 | --- a/drivers/irqchip/irq-xtensa-mx.c | ||
1404 | +++ b/drivers/irqchip/irq-xtensa-mx.c | ||
1405 | @@ -142,7 +142,7 @@ static struct irq_chip xtensa_mx_irq_chip = { | ||
1406 | int __init xtensa_mx_init_legacy(struct device_node *interrupt_parent) | ||
1407 | { | ||
1408 | struct irq_domain *root_domain = | ||
1409 | - irq_domain_add_legacy(NULL, NR_IRQS, 0, 0, | ||
1410 | + irq_domain_add_legacy(NULL, NR_IRQS - 1, 1, 0, | ||
1411 | &xtensa_mx_irq_domain_ops, | ||
1412 | &xtensa_mx_irq_chip); | ||
1413 | irq_set_default_host(root_domain); | ||
1414 | diff --git a/drivers/irqchip/irq-xtensa-pic.c b/drivers/irqchip/irq-xtensa-pic.c | ||
1415 | index 472ae1770964..f728755fa292 100644 | ||
1416 | --- a/drivers/irqchip/irq-xtensa-pic.c | ||
1417 | +++ b/drivers/irqchip/irq-xtensa-pic.c | ||
1418 | @@ -89,7 +89,7 @@ static struct irq_chip xtensa_irq_chip = { | ||
1419 | int __init xtensa_pic_init_legacy(struct device_node *interrupt_parent) | ||
1420 | { | ||
1421 | struct irq_domain *root_domain = | ||
1422 | - irq_domain_add_legacy(NULL, NR_IRQS, 0, 0, | ||
1423 | + irq_domain_add_legacy(NULL, NR_IRQS - 1, 1, 0, | ||
1424 | &xtensa_irq_domain_ops, &xtensa_irq_chip); | ||
1425 | irq_set_default_host(root_domain); | ||
1426 | return 0; | ||
1427 | diff --git a/drivers/net/ethernet/adaptec/starfire.c b/drivers/net/ethernet/adaptec/starfire.c | ||
1428 | index 8af2c88d5b33..45bb0fe50917 100644 | ||
1429 | --- a/drivers/net/ethernet/adaptec/starfire.c | ||
1430 | +++ b/drivers/net/ethernet/adaptec/starfire.c | ||
1431 | @@ -1153,6 +1153,12 @@ static void init_ring(struct net_device *dev) | ||
1432 | if (skb == NULL) | ||
1433 | break; | ||
1434 | np->rx_info[i].mapping = pci_map_single(np->pci_dev, skb->data, np->rx_buf_sz, PCI_DMA_FROMDEVICE); | ||
1435 | + if (pci_dma_mapping_error(np->pci_dev, | ||
1436 | + np->rx_info[i].mapping)) { | ||
1437 | + dev_kfree_skb(skb); | ||
1438 | + np->rx_info[i].skb = NULL; | ||
1439 | + break; | ||
1440 | + } | ||
1441 | /* Grrr, we cannot offset to correctly align the IP header. */ | ||
1442 | np->rx_ring[i].rxaddr = cpu_to_dma(np->rx_info[i].mapping | RxDescValid); | ||
1443 | } | ||
1444 | @@ -1183,8 +1189,9 @@ static netdev_tx_t start_tx(struct sk_buff *skb, struct net_device *dev) | ||
1445 | { | ||
1446 | struct netdev_private *np = netdev_priv(dev); | ||
1447 | unsigned int entry; | ||
1448 | + unsigned int prev_tx; | ||
1449 | u32 status; | ||
1450 | - int i; | ||
1451 | + int i, j; | ||
1452 | |||
1453 | /* | ||
1454 | * be cautious here, wrapping the queue has weird semantics | ||
1455 | @@ -1202,6 +1209,7 @@ static netdev_tx_t start_tx(struct sk_buff *skb, struct net_device *dev) | ||
1456 | } | ||
1457 | #endif /* ZEROCOPY && HAS_BROKEN_FIRMWARE */ | ||
1458 | |||
1459 | + prev_tx = np->cur_tx; | ||
1460 | entry = np->cur_tx % TX_RING_SIZE; | ||
1461 | for (i = 0; i < skb_num_frags(skb); i++) { | ||
1462 | int wrap_ring = 0; | ||
1463 | @@ -1235,6 +1243,11 @@ static netdev_tx_t start_tx(struct sk_buff *skb, struct net_device *dev) | ||
1464 | skb_frag_size(this_frag), | ||
1465 | PCI_DMA_TODEVICE); | ||
1466 | } | ||
1467 | + if (pci_dma_mapping_error(np->pci_dev, | ||
1468 | + np->tx_info[entry].mapping)) { | ||
1469 | + dev->stats.tx_dropped++; | ||
1470 | + goto err_out; | ||
1471 | + } | ||
1472 | |||
1473 | np->tx_ring[entry].addr = cpu_to_dma(np->tx_info[entry].mapping); | ||
1474 | np->tx_ring[entry].status = cpu_to_le32(status); | ||
1475 | @@ -1269,8 +1282,30 @@ static netdev_tx_t start_tx(struct sk_buff *skb, struct net_device *dev) | ||
1476 | netif_stop_queue(dev); | ||
1477 | |||
1478 | return NETDEV_TX_OK; | ||
1479 | -} | ||
1480 | |||
1481 | +err_out: | ||
1482 | + entry = prev_tx % TX_RING_SIZE; | ||
1483 | + np->tx_info[entry].skb = NULL; | ||
1484 | + if (i > 0) { | ||
1485 | + pci_unmap_single(np->pci_dev, | ||
1486 | + np->tx_info[entry].mapping, | ||
1487 | + skb_first_frag_len(skb), | ||
1488 | + PCI_DMA_TODEVICE); | ||
1489 | + np->tx_info[entry].mapping = 0; | ||
1490 | + entry = (entry + np->tx_info[entry].used_slots) % TX_RING_SIZE; | ||
1491 | + for (j = 1; j < i; j++) { | ||
1492 | + pci_unmap_single(np->pci_dev, | ||
1493 | + np->tx_info[entry].mapping, | ||
1494 | + skb_frag_size( | ||
1495 | + &skb_shinfo(skb)->frags[j-1]), | ||
1496 | + PCI_DMA_TODEVICE); | ||
1497 | + entry++; | ||
1498 | + } | ||
1499 | + } | ||
1500 | + dev_kfree_skb_any(skb); | ||
1501 | + np->cur_tx = prev_tx; | ||
1502 | + return NETDEV_TX_OK; | ||
1503 | +} | ||
1504 | |||
1505 | /* The interrupt handler does all of the Rx thread work and cleans up | ||
1506 | after the Tx thread. */ | ||
1507 | @@ -1570,6 +1605,12 @@ static void refill_rx_ring(struct net_device *dev) | ||
1508 | break; /* Better luck next round. */ | ||
1509 | np->rx_info[entry].mapping = | ||
1510 | pci_map_single(np->pci_dev, skb->data, np->rx_buf_sz, PCI_DMA_FROMDEVICE); | ||
1511 | + if (pci_dma_mapping_error(np->pci_dev, | ||
1512 | + np->rx_info[entry].mapping)) { | ||
1513 | + dev_kfree_skb(skb); | ||
1514 | + np->rx_info[entry].skb = NULL; | ||
1515 | + break; | ||
1516 | + } | ||
1517 | np->rx_ring[entry].rxaddr = | ||
1518 | cpu_to_dma(np->rx_info[entry].mapping | RxDescValid); | ||
1519 | } | ||
1520 | diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c | ||
1521 | index 48ee4110ef6e..5cc0f8cfec87 100644 | ||
1522 | --- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c | ||
1523 | +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c | ||
1524 | @@ -1499,6 +1499,7 @@ static int bnxt_async_event_process(struct bnxt *bp, | ||
1525 | netdev_warn(bp->dev, "Link speed %d no longer supported\n", | ||
1526 | speed); | ||
1527 | } | ||
1528 | + set_bit(BNXT_LINK_SPEED_CHNG_SP_EVENT, &bp->sp_event); | ||
1529 | /* fall thru */ | ||
1530 | } | ||
1531 | case HWRM_ASYNC_EVENT_CMPL_EVENT_ID_LINK_STATUS_CHANGE: | ||
1532 | @@ -5110,6 +5111,7 @@ static int bnxt_update_link(struct bnxt *bp, bool chng_link_state) | ||
1533 | struct hwrm_port_phy_qcfg_input req = {0}; | ||
1534 | struct hwrm_port_phy_qcfg_output *resp = bp->hwrm_cmd_resp_addr; | ||
1535 | u8 link_up = link_info->link_up; | ||
1536 | + u16 diff; | ||
1537 | |||
1538 | bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_PHY_QCFG, -1, -1); | ||
1539 | |||
1540 | @@ -5197,6 +5199,18 @@ static int bnxt_update_link(struct bnxt *bp, bool chng_link_state) | ||
1541 | link_info->link_up = 0; | ||
1542 | } | ||
1543 | mutex_unlock(&bp->hwrm_cmd_lock); | ||
1544 | + | ||
1545 | + diff = link_info->support_auto_speeds ^ link_info->advertising; | ||
1546 | + if ((link_info->support_auto_speeds | diff) != | ||
1547 | + link_info->support_auto_speeds) { | ||
1548 | + /* An advertised speed is no longer supported, so we need to | ||
1549 | + * update the advertisement settings. Caller holds RTNL | ||
1550 | + * so we can modify link settings. | ||
1551 | + */ | ||
1552 | + link_info->advertising = link_info->support_auto_speeds; | ||
1553 | + if (link_info->autoneg & BNXT_AUTONEG_SPEED) | ||
1554 | + bnxt_hwrm_set_link_setting(bp, true, false); | ||
1555 | + } | ||
1556 | return 0; | ||
1557 | } | ||
1558 | |||
1559 | @@ -6080,29 +6094,37 @@ static void bnxt_timer(unsigned long data) | ||
1560 | mod_timer(&bp->timer, jiffies + bp->current_interval); | ||
1561 | } | ||
1562 | |||
1563 | -/* Only called from bnxt_sp_task() */ | ||
1564 | -static void bnxt_reset(struct bnxt *bp, bool silent) | ||
1565 | +static void bnxt_rtnl_lock_sp(struct bnxt *bp) | ||
1566 | { | ||
1567 | - /* bnxt_reset_task() calls bnxt_close_nic() which waits | ||
1568 | - * for BNXT_STATE_IN_SP_TASK to clear. | ||
1569 | - * If there is a parallel dev_close(), bnxt_close() may be holding | ||
1570 | + /* We are called from bnxt_sp_task which has BNXT_STATE_IN_SP_TASK | ||
1571 | + * set. If the device is being closed, bnxt_close() may be holding | ||
1572 | * rtnl() and waiting for BNXT_STATE_IN_SP_TASK to clear. So we | ||
1573 | * must clear BNXT_STATE_IN_SP_TASK before holding rtnl(). | ||
1574 | */ | ||
1575 | clear_bit(BNXT_STATE_IN_SP_TASK, &bp->state); | ||
1576 | rtnl_lock(); | ||
1577 | - if (test_bit(BNXT_STATE_OPEN, &bp->state)) | ||
1578 | - bnxt_reset_task(bp, silent); | ||
1579 | +} | ||
1580 | + | ||
1581 | +static void bnxt_rtnl_unlock_sp(struct bnxt *bp) | ||
1582 | +{ | ||
1583 | set_bit(BNXT_STATE_IN_SP_TASK, &bp->state); | ||
1584 | rtnl_unlock(); | ||
1585 | } | ||
1586 | |||
1587 | +/* Only called from bnxt_sp_task() */ | ||
1588 | +static void bnxt_reset(struct bnxt *bp, bool silent) | ||
1589 | +{ | ||
1590 | + bnxt_rtnl_lock_sp(bp); | ||
1591 | + if (test_bit(BNXT_STATE_OPEN, &bp->state)) | ||
1592 | + bnxt_reset_task(bp, silent); | ||
1593 | + bnxt_rtnl_unlock_sp(bp); | ||
1594 | +} | ||
1595 | + | ||
1596 | static void bnxt_cfg_ntp_filters(struct bnxt *); | ||
1597 | |||
1598 | static void bnxt_sp_task(struct work_struct *work) | ||
1599 | { | ||
1600 | struct bnxt *bp = container_of(work, struct bnxt, sp_task); | ||
1601 | - int rc; | ||
1602 | |||
1603 | set_bit(BNXT_STATE_IN_SP_TASK, &bp->state); | ||
1604 | smp_mb__after_atomic(); | ||
1605 | @@ -6116,12 +6138,6 @@ static void bnxt_sp_task(struct work_struct *work) | ||
1606 | |||
1607 | if (test_and_clear_bit(BNXT_RX_NTP_FLTR_SP_EVENT, &bp->sp_event)) | ||
1608 | bnxt_cfg_ntp_filters(bp); | ||
1609 | - if (test_and_clear_bit(BNXT_LINK_CHNG_SP_EVENT, &bp->sp_event)) { | ||
1610 | - rc = bnxt_update_link(bp, true); | ||
1611 | - if (rc) | ||
1612 | - netdev_err(bp->dev, "SP task can't update link (rc: %x)\n", | ||
1613 | - rc); | ||
1614 | - } | ||
1615 | if (test_and_clear_bit(BNXT_HWRM_EXEC_FWD_REQ_SP_EVENT, &bp->sp_event)) | ||
1616 | bnxt_hwrm_exec_fwd_req(bp); | ||
1617 | if (test_and_clear_bit(BNXT_VXLAN_ADD_PORT_SP_EVENT, &bp->sp_event)) { | ||
1618 | @@ -6142,18 +6158,39 @@ static void bnxt_sp_task(struct work_struct *work) | ||
1619 | bnxt_hwrm_tunnel_dst_port_free( | ||
1620 | bp, TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_GENEVE); | ||
1621 | } | ||
1622 | + if (test_and_clear_bit(BNXT_PERIODIC_STATS_SP_EVENT, &bp->sp_event)) | ||
1623 | + bnxt_hwrm_port_qstats(bp); | ||
1624 | + | ||
1625 | + /* These functions below will clear BNXT_STATE_IN_SP_TASK. They | ||
1626 | + * must be the last functions to be called before exiting. | ||
1627 | + */ | ||
1628 | + if (test_and_clear_bit(BNXT_LINK_CHNG_SP_EVENT, &bp->sp_event)) { | ||
1629 | + int rc = 0; | ||
1630 | + | ||
1631 | + if (test_and_clear_bit(BNXT_LINK_SPEED_CHNG_SP_EVENT, | ||
1632 | + &bp->sp_event)) | ||
1633 | + bnxt_hwrm_phy_qcaps(bp); | ||
1634 | + | ||
1635 | + bnxt_rtnl_lock_sp(bp); | ||
1636 | + if (test_bit(BNXT_STATE_OPEN, &bp->state)) | ||
1637 | + rc = bnxt_update_link(bp, true); | ||
1638 | + bnxt_rtnl_unlock_sp(bp); | ||
1639 | + if (rc) | ||
1640 | + netdev_err(bp->dev, "SP task can't update link (rc: %x)\n", | ||
1641 | + rc); | ||
1642 | + } | ||
1643 | + if (test_and_clear_bit(BNXT_HWRM_PORT_MODULE_SP_EVENT, &bp->sp_event)) { | ||
1644 | + bnxt_rtnl_lock_sp(bp); | ||
1645 | + if (test_bit(BNXT_STATE_OPEN, &bp->state)) | ||
1646 | + bnxt_get_port_module_status(bp); | ||
1647 | + bnxt_rtnl_unlock_sp(bp); | ||
1648 | + } | ||
1649 | if (test_and_clear_bit(BNXT_RESET_TASK_SP_EVENT, &bp->sp_event)) | ||
1650 | bnxt_reset(bp, false); | ||
1651 | |||
1652 | if (test_and_clear_bit(BNXT_RESET_TASK_SILENT_SP_EVENT, &bp->sp_event)) | ||
1653 | bnxt_reset(bp, true); | ||
1654 | |||
1655 | - if (test_and_clear_bit(BNXT_HWRM_PORT_MODULE_SP_EVENT, &bp->sp_event)) | ||
1656 | - bnxt_get_port_module_status(bp); | ||
1657 | - | ||
1658 | - if (test_and_clear_bit(BNXT_PERIODIC_STATS_SP_EVENT, &bp->sp_event)) | ||
1659 | - bnxt_hwrm_port_qstats(bp); | ||
1660 | - | ||
1661 | smp_mb__before_atomic(); | ||
1662 | clear_bit(BNXT_STATE_IN_SP_TASK, &bp->state); | ||
1663 | } | ||
1664 | diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.h b/drivers/net/ethernet/broadcom/bnxt/bnxt.h | ||
1665 | index 51b164a0e844..666bc0608ed7 100644 | ||
1666 | --- a/drivers/net/ethernet/broadcom/bnxt/bnxt.h | ||
1667 | +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.h | ||
1668 | @@ -1089,6 +1089,7 @@ struct bnxt { | ||
1669 | #define BNXT_RESET_TASK_SILENT_SP_EVENT 11 | ||
1670 | #define BNXT_GENEVE_ADD_PORT_SP_EVENT 12 | ||
1671 | #define BNXT_GENEVE_DEL_PORT_SP_EVENT 13 | ||
1672 | +#define BNXT_LINK_SPEED_CHNG_SP_EVENT 14 | ||
1673 | |||
1674 | struct bnxt_pf_info pf; | ||
1675 | #ifdef CONFIG_BNXT_SRIOV | ||
1676 | diff --git a/drivers/net/ethernet/cavium/thunder/thunder_bgx.c b/drivers/net/ethernet/cavium/thunder/thunder_bgx.c | ||
1677 | index 050e21fbb147..679679a4ccb2 100644 | ||
1678 | --- a/drivers/net/ethernet/cavium/thunder/thunder_bgx.c | ||
1679 | +++ b/drivers/net/ethernet/cavium/thunder/thunder_bgx.c | ||
1680 | @@ -31,6 +31,7 @@ struct lmac { | ||
1681 | u8 lmac_type; | ||
1682 | u8 lane_to_sds; | ||
1683 | bool use_training; | ||
1684 | + bool autoneg; | ||
1685 | bool link_up; | ||
1686 | int lmacid; /* ID within BGX */ | ||
1687 | int lmacid_bd; /* ID on board */ | ||
1688 | @@ -418,7 +419,17 @@ static int bgx_lmac_sgmii_init(struct bgx *bgx, struct lmac *lmac) | ||
1689 | /* power down, reset autoneg, autoneg enable */ | ||
1690 | cfg = bgx_reg_read(bgx, lmacid, BGX_GMP_PCS_MRX_CTL); | ||
1691 | cfg &= ~PCS_MRX_CTL_PWR_DN; | ||
1692 | - cfg |= (PCS_MRX_CTL_RST_AN | PCS_MRX_CTL_AN_EN); | ||
1693 | + cfg |= PCS_MRX_CTL_RST_AN; | ||
1694 | + if (lmac->phydev) { | ||
1695 | + cfg |= PCS_MRX_CTL_AN_EN; | ||
1696 | + } else { | ||
1697 | + /* In scenarios where PHY driver is not present or it's a | ||
1698 | + * non-standard PHY, FW sets AN_EN to inform Linux driver | ||
1699 | + * to do auto-neg and link polling or not. | ||
1700 | + */ | ||
1701 | + if (cfg & PCS_MRX_CTL_AN_EN) | ||
1702 | + lmac->autoneg = true; | ||
1703 | + } | ||
1704 | bgx_reg_write(bgx, lmacid, BGX_GMP_PCS_MRX_CTL, cfg); | ||
1705 | |||
1706 | if (lmac->lmac_type == BGX_MODE_QSGMII) { | ||
1707 | @@ -429,7 +440,7 @@ static int bgx_lmac_sgmii_init(struct bgx *bgx, struct lmac *lmac) | ||
1708 | return 0; | ||
1709 | } | ||
1710 | |||
1711 | - if (lmac->lmac_type == BGX_MODE_SGMII) { | ||
1712 | + if ((lmac->lmac_type == BGX_MODE_SGMII) && lmac->phydev) { | ||
1713 | if (bgx_poll_reg(bgx, lmacid, BGX_GMP_PCS_MRX_STATUS, | ||
1714 | PCS_MRX_STATUS_AN_CPT, false)) { | ||
1715 | dev_err(&bgx->pdev->dev, "BGX AN_CPT not completed\n"); | ||
1716 | @@ -623,12 +634,71 @@ static int bgx_xaui_check_link(struct lmac *lmac) | ||
1717 | return -1; | ||
1718 | } | ||
1719 | |||
1720 | +static void bgx_poll_for_sgmii_link(struct lmac *lmac) | ||
1721 | +{ | ||
1722 | + u64 pcs_link, an_result; | ||
1723 | + u8 speed; | ||
1724 | + | ||
1725 | + pcs_link = bgx_reg_read(lmac->bgx, lmac->lmacid, | ||
1726 | + BGX_GMP_PCS_MRX_STATUS); | ||
1727 | + | ||
1728 | + /*Link state bit is sticky, read it again*/ | ||
1729 | + if (!(pcs_link & PCS_MRX_STATUS_LINK)) | ||
1730 | + pcs_link = bgx_reg_read(lmac->bgx, lmac->lmacid, | ||
1731 | + BGX_GMP_PCS_MRX_STATUS); | ||
1732 | + | ||
1733 | + if (bgx_poll_reg(lmac->bgx, lmac->lmacid, BGX_GMP_PCS_MRX_STATUS, | ||
1734 | + PCS_MRX_STATUS_AN_CPT, false)) { | ||
1735 | + lmac->link_up = false; | ||
1736 | + lmac->last_speed = SPEED_UNKNOWN; | ||
1737 | + lmac->last_duplex = DUPLEX_UNKNOWN; | ||
1738 | + goto next_poll; | ||
1739 | + } | ||
1740 | + | ||
1741 | + lmac->link_up = ((pcs_link & PCS_MRX_STATUS_LINK) != 0) ? true : false; | ||
1742 | + an_result = bgx_reg_read(lmac->bgx, lmac->lmacid, | ||
1743 | + BGX_GMP_PCS_ANX_AN_RESULTS); | ||
1744 | + | ||
1745 | + speed = (an_result >> 3) & 0x3; | ||
1746 | + lmac->last_duplex = (an_result >> 1) & 0x1; | ||
1747 | + switch (speed) { | ||
1748 | + case 0: | ||
1749 | + lmac->last_speed = 10; | ||
1750 | + break; | ||
1751 | + case 1: | ||
1752 | + lmac->last_speed = 100; | ||
1753 | + break; | ||
1754 | + case 2: | ||
1755 | + lmac->last_speed = 1000; | ||
1756 | + break; | ||
1757 | + default: | ||
1758 | + lmac->link_up = false; | ||
1759 | + lmac->last_speed = SPEED_UNKNOWN; | ||
1760 | + lmac->last_duplex = DUPLEX_UNKNOWN; | ||
1761 | + break; | ||
1762 | + } | ||
1763 | + | ||
1764 | +next_poll: | ||
1765 | + | ||
1766 | + if (lmac->last_link != lmac->link_up) { | ||
1767 | + if (lmac->link_up) | ||
1768 | + bgx_sgmii_change_link_state(lmac); | ||
1769 | + lmac->last_link = lmac->link_up; | ||
1770 | + } | ||
1771 | + | ||
1772 | + queue_delayed_work(lmac->check_link, &lmac->dwork, HZ * 3); | ||
1773 | +} | ||
1774 | + | ||
1775 | static void bgx_poll_for_link(struct work_struct *work) | ||
1776 | { | ||
1777 | struct lmac *lmac; | ||
1778 | u64 spu_link, smu_link; | ||
1779 | |||
1780 | lmac = container_of(work, struct lmac, dwork.work); | ||
1781 | + if (lmac->is_sgmii) { | ||
1782 | + bgx_poll_for_sgmii_link(lmac); | ||
1783 | + return; | ||
1784 | + } | ||
1785 | |||
1786 | /* Receive link is latching low. Force it high and verify it */ | ||
1787 | bgx_reg_modify(lmac->bgx, lmac->lmacid, | ||
1788 | @@ -720,9 +790,21 @@ static int bgx_lmac_enable(struct bgx *bgx, u8 lmacid) | ||
1789 | (lmac->lmac_type != BGX_MODE_XLAUI) && | ||
1790 | (lmac->lmac_type != BGX_MODE_40G_KR) && | ||
1791 | (lmac->lmac_type != BGX_MODE_10G_KR)) { | ||
1792 | - if (!lmac->phydev) | ||
1793 | - return -ENODEV; | ||
1794 | - | ||
1795 | + if (!lmac->phydev) { | ||
1796 | + if (lmac->autoneg) { | ||
1797 | + bgx_reg_write(bgx, lmacid, | ||
1798 | + BGX_GMP_PCS_LINKX_TIMER, | ||
1799 | + PCS_LINKX_TIMER_COUNT); | ||
1800 | + goto poll; | ||
1801 | + } else { | ||
1802 | + /* Default to below link speed and duplex */ | ||
1803 | + lmac->link_up = true; | ||
1804 | + lmac->last_speed = 1000; | ||
1805 | + lmac->last_duplex = 1; | ||
1806 | + bgx_sgmii_change_link_state(lmac); | ||
1807 | + return 0; | ||
1808 | + } | ||
1809 | + } | ||
1810 | lmac->phydev->dev_flags = 0; | ||
1811 | |||
1812 | if (phy_connect_direct(&lmac->netdev, lmac->phydev, | ||
1813 | @@ -731,15 +813,17 @@ static int bgx_lmac_enable(struct bgx *bgx, u8 lmacid) | ||
1814 | return -ENODEV; | ||
1815 | |||
1816 | phy_start_aneg(lmac->phydev); | ||
1817 | - } else { | ||
1818 | - lmac->check_link = alloc_workqueue("check_link", WQ_UNBOUND | | ||
1819 | - WQ_MEM_RECLAIM, 1); | ||
1820 | - if (!lmac->check_link) | ||
1821 | - return -ENOMEM; | ||
1822 | - INIT_DELAYED_WORK(&lmac->dwork, bgx_poll_for_link); | ||
1823 | - queue_delayed_work(lmac->check_link, &lmac->dwork, 0); | ||
1824 | + return 0; | ||
1825 | } | ||
1826 | |||
1827 | +poll: | ||
1828 | + lmac->check_link = alloc_workqueue("check_link", WQ_UNBOUND | | ||
1829 | + WQ_MEM_RECLAIM, 1); | ||
1830 | + if (!lmac->check_link) | ||
1831 | + return -ENOMEM; | ||
1832 | + INIT_DELAYED_WORK(&lmac->dwork, bgx_poll_for_link); | ||
1833 | + queue_delayed_work(lmac->check_link, &lmac->dwork, 0); | ||
1834 | + | ||
1835 | return 0; | ||
1836 | } | ||
1837 | |||
1838 | diff --git a/drivers/net/ethernet/cavium/thunder/thunder_bgx.h b/drivers/net/ethernet/cavium/thunder/thunder_bgx.h | ||
1839 | index 01cc7c859131..1143e9575e53 100644 | ||
1840 | --- a/drivers/net/ethernet/cavium/thunder/thunder_bgx.h | ||
1841 | +++ b/drivers/net/ethernet/cavium/thunder/thunder_bgx.h | ||
1842 | @@ -144,10 +144,15 @@ | ||
1843 | #define PCS_MRX_CTL_LOOPBACK1 BIT_ULL(14) | ||
1844 | #define PCS_MRX_CTL_RESET BIT_ULL(15) | ||
1845 | #define BGX_GMP_PCS_MRX_STATUS 0x30008 | ||
1846 | +#define PCS_MRX_STATUS_LINK BIT_ULL(2) | ||
1847 | #define PCS_MRX_STATUS_AN_CPT BIT_ULL(5) | ||
1848 | +#define BGX_GMP_PCS_ANX_ADV 0x30010 | ||
1849 | #define BGX_GMP_PCS_ANX_AN_RESULTS 0x30020 | ||
1850 | +#define BGX_GMP_PCS_LINKX_TIMER 0x30040 | ||
1851 | +#define PCS_LINKX_TIMER_COUNT 0x1E84 | ||
1852 | #define BGX_GMP_PCS_SGM_AN_ADV 0x30068 | ||
1853 | #define BGX_GMP_PCS_MISCX_CTL 0x30078 | ||
1854 | +#define PCS_MISC_CTL_MODE BIT_ULL(8) | ||
1855 | #define PCS_MISC_CTL_DISP_EN BIT_ULL(13) | ||
1856 | #define PCS_MISC_CTL_GMX_ENO BIT_ULL(11) | ||
1857 | #define PCS_MISC_CTL_SAMP_PT_MASK 0x7Full | ||
1858 | diff --git a/drivers/net/ethernet/freescale/gianfar.c b/drivers/net/ethernet/freescale/gianfar.c | ||
1859 | index 9061c2f82b9c..d391beebe591 100644 | ||
1860 | --- a/drivers/net/ethernet/freescale/gianfar.c | ||
1861 | +++ b/drivers/net/ethernet/freescale/gianfar.c | ||
1862 | @@ -2007,8 +2007,8 @@ static void free_skb_rx_queue(struct gfar_priv_rx_q *rx_queue) | ||
1863 | if (!rxb->page) | ||
1864 | continue; | ||
1865 | |||
1866 | - dma_unmap_single(rx_queue->dev, rxb->dma, | ||
1867 | - PAGE_SIZE, DMA_FROM_DEVICE); | ||
1868 | + dma_unmap_page(rx_queue->dev, rxb->dma, | ||
1869 | + PAGE_SIZE, DMA_FROM_DEVICE); | ||
1870 | __free_page(rxb->page); | ||
1871 | |||
1872 | rxb->page = NULL; | ||
1873 | diff --git a/drivers/net/ethernet/hisilicon/hns/hns_enet.c b/drivers/net/ethernet/hisilicon/hns/hns_enet.c | ||
1874 | index dff7b60345d8..c06845b7b666 100644 | ||
1875 | --- a/drivers/net/ethernet/hisilicon/hns/hns_enet.c | ||
1876 | +++ b/drivers/net/ethernet/hisilicon/hns/hns_enet.c | ||
1877 | @@ -304,8 +304,8 @@ int hns_nic_net_xmit_hw(struct net_device *ndev, | ||
1878 | struct hns_nic_ring_data *ring_data) | ||
1879 | { | ||
1880 | struct hns_nic_priv *priv = netdev_priv(ndev); | ||
1881 | - struct device *dev = priv->dev; | ||
1882 | struct hnae_ring *ring = ring_data->ring; | ||
1883 | + struct device *dev = ring_to_dev(ring); | ||
1884 | struct netdev_queue *dev_queue; | ||
1885 | struct skb_frag_struct *frag; | ||
1886 | int buf_num; | ||
1887 | diff --git a/drivers/net/ethernet/ibm/ibmvnic.c b/drivers/net/ethernet/ibm/ibmvnic.c | ||
1888 | index 0fbf686f5e7c..9f2184be55dc 100644 | ||
1889 | --- a/drivers/net/ethernet/ibm/ibmvnic.c | ||
1890 | +++ b/drivers/net/ethernet/ibm/ibmvnic.c | ||
1891 | @@ -189,9 +189,10 @@ static int alloc_long_term_buff(struct ibmvnic_adapter *adapter, | ||
1892 | } | ||
1893 | ltb->map_id = adapter->map_id; | ||
1894 | adapter->map_id++; | ||
1895 | + | ||
1896 | + init_completion(&adapter->fw_done); | ||
1897 | send_request_map(adapter, ltb->addr, | ||
1898 | ltb->size, ltb->map_id); | ||
1899 | - init_completion(&adapter->fw_done); | ||
1900 | wait_for_completion(&adapter->fw_done); | ||
1901 | return 0; | ||
1902 | } | ||
1903 | @@ -505,7 +506,7 @@ static int ibmvnic_open(struct net_device *netdev) | ||
1904 | adapter->rx_pool = NULL; | ||
1905 | rx_pool_arr_alloc_failed: | ||
1906 | for (i = 0; i < adapter->req_rx_queues; i++) | ||
1907 | - napi_enable(&adapter->napi[i]); | ||
1908 | + napi_disable(&adapter->napi[i]); | ||
1909 | alloc_napi_failed: | ||
1910 | return -ENOMEM; | ||
1911 | } | ||
1912 | @@ -1133,10 +1134,10 @@ static void ibmvnic_get_ethtool_stats(struct net_device *dev, | ||
1913 | crq.request_statistics.ioba = cpu_to_be32(adapter->stats_token); | ||
1914 | crq.request_statistics.len = | ||
1915 | cpu_to_be32(sizeof(struct ibmvnic_statistics)); | ||
1916 | - ibmvnic_send_crq(adapter, &crq); | ||
1917 | |||
1918 | /* Wait for data to be written */ | ||
1919 | init_completion(&adapter->stats_done); | ||
1920 | + ibmvnic_send_crq(adapter, &crq); | ||
1921 | wait_for_completion(&adapter->stats_done); | ||
1922 | |||
1923 | for (i = 0; i < ARRAY_SIZE(ibmvnic_stats); i++) | ||
1924 | @@ -2197,12 +2198,12 @@ static void handle_error_info_rsp(union ibmvnic_crq *crq, | ||
1925 | |||
1926 | if (!found) { | ||
1927 | dev_err(dev, "Couldn't find error id %x\n", | ||
1928 | - crq->request_error_rsp.error_id); | ||
1929 | + be32_to_cpu(crq->request_error_rsp.error_id)); | ||
1930 | return; | ||
1931 | } | ||
1932 | |||
1933 | dev_err(dev, "Detailed info for error id %x:", | ||
1934 | - crq->request_error_rsp.error_id); | ||
1935 | + be32_to_cpu(crq->request_error_rsp.error_id)); | ||
1936 | |||
1937 | for (i = 0; i < error_buff->len; i++) { | ||
1938 | pr_cont("%02x", (int)error_buff->buff[i]); | ||
1939 | @@ -2281,8 +2282,8 @@ static void handle_error_indication(union ibmvnic_crq *crq, | ||
1940 | dev_err(dev, "Firmware reports %serror id %x, cause %d\n", | ||
1941 | crq->error_indication. | ||
1942 | flags & IBMVNIC_FATAL_ERROR ? "FATAL " : "", | ||
1943 | - crq->error_indication.error_id, | ||
1944 | - crq->error_indication.error_cause); | ||
1945 | + be32_to_cpu(crq->error_indication.error_id), | ||
1946 | + be16_to_cpu(crq->error_indication.error_cause)); | ||
1947 | |||
1948 | error_buff = kmalloc(sizeof(*error_buff), GFP_ATOMIC); | ||
1949 | if (!error_buff) | ||
1950 | @@ -2400,10 +2401,10 @@ static void handle_request_cap_rsp(union ibmvnic_crq *crq, | ||
1951 | case PARTIALSUCCESS: | ||
1952 | dev_info(dev, "req=%lld, rsp=%ld in %s queue, retrying.\n", | ||
1953 | *req_value, | ||
1954 | - (long int)be32_to_cpu(crq->request_capability_rsp. | ||
1955 | + (long int)be64_to_cpu(crq->request_capability_rsp. | ||
1956 | number), name); | ||
1957 | release_sub_crqs_no_irqs(adapter); | ||
1958 | - *req_value = be32_to_cpu(crq->request_capability_rsp.number); | ||
1959 | + *req_value = be64_to_cpu(crq->request_capability_rsp.number); | ||
1960 | init_sub_crqs(adapter, 1); | ||
1961 | return; | ||
1962 | default: | ||
1963 | @@ -2809,9 +2810,9 @@ static ssize_t trace_read(struct file *file, char __user *user_buf, size_t len, | ||
1964 | crq.collect_fw_trace.correlator = adapter->ras_comps[num].correlator; | ||
1965 | crq.collect_fw_trace.ioba = cpu_to_be32(trace_tok); | ||
1966 | crq.collect_fw_trace.len = adapter->ras_comps[num].trace_buff_size; | ||
1967 | - ibmvnic_send_crq(adapter, &crq); | ||
1968 | |||
1969 | init_completion(&adapter->fw_done); | ||
1970 | + ibmvnic_send_crq(adapter, &crq); | ||
1971 | wait_for_completion(&adapter->fw_done); | ||
1972 | |||
1973 | if (*ppos + len > be32_to_cpu(adapter->ras_comps[num].trace_buff_size)) | ||
1974 | @@ -3591,9 +3592,9 @@ static int ibmvnic_dump_show(struct seq_file *seq, void *v) | ||
1975 | memset(&crq, 0, sizeof(crq)); | ||
1976 | crq.request_dump_size.first = IBMVNIC_CRQ_CMD; | ||
1977 | crq.request_dump_size.cmd = REQUEST_DUMP_SIZE; | ||
1978 | - ibmvnic_send_crq(adapter, &crq); | ||
1979 | |||
1980 | init_completion(&adapter->fw_done); | ||
1981 | + ibmvnic_send_crq(adapter, &crq); | ||
1982 | wait_for_completion(&adapter->fw_done); | ||
1983 | |||
1984 | seq_write(seq, adapter->dump_data, adapter->dump_data_size); | ||
1985 | @@ -3639,8 +3640,8 @@ static void handle_crq_init_rsp(struct work_struct *work) | ||
1986 | } | ||
1987 | } | ||
1988 | |||
1989 | - send_version_xchg(adapter); | ||
1990 | reinit_completion(&adapter->init_done); | ||
1991 | + send_version_xchg(adapter); | ||
1992 | if (!wait_for_completion_timeout(&adapter->init_done, timeout)) { | ||
1993 | dev_err(dev, "Passive init timeout\n"); | ||
1994 | goto task_failed; | ||
1995 | @@ -3650,9 +3651,9 @@ static void handle_crq_init_rsp(struct work_struct *work) | ||
1996 | if (adapter->renegotiate) { | ||
1997 | adapter->renegotiate = false; | ||
1998 | release_sub_crqs_no_irqs(adapter); | ||
1999 | - send_cap_queries(adapter); | ||
2000 | |||
2001 | reinit_completion(&adapter->init_done); | ||
2002 | + send_cap_queries(adapter); | ||
2003 | if (!wait_for_completion_timeout(&adapter->init_done, | ||
2004 | timeout)) { | ||
2005 | dev_err(dev, "Passive init timeout\n"); | ||
2006 | @@ -3780,9 +3781,9 @@ static int ibmvnic_probe(struct vio_dev *dev, const struct vio_device_id *id) | ||
2007 | adapter->debugfs_dump = ent; | ||
2008 | } | ||
2009 | } | ||
2010 | - ibmvnic_send_crq_init(adapter); | ||
2011 | |||
2012 | init_completion(&adapter->init_done); | ||
2013 | + ibmvnic_send_crq_init(adapter); | ||
2014 | if (!wait_for_completion_timeout(&adapter->init_done, timeout)) | ||
2015 | return 0; | ||
2016 | |||
2017 | @@ -3790,9 +3791,9 @@ static int ibmvnic_probe(struct vio_dev *dev, const struct vio_device_id *id) | ||
2018 | if (adapter->renegotiate) { | ||
2019 | adapter->renegotiate = false; | ||
2020 | release_sub_crqs_no_irqs(adapter); | ||
2021 | - send_cap_queries(adapter); | ||
2022 | |||
2023 | reinit_completion(&adapter->init_done); | ||
2024 | + send_cap_queries(adapter); | ||
2025 | if (!wait_for_completion_timeout(&adapter->init_done, | ||
2026 | timeout)) | ||
2027 | return 0; | ||
2028 | diff --git a/drivers/net/ethernet/mediatek/mtk_eth_soc.c b/drivers/net/ethernet/mediatek/mtk_eth_soc.c | ||
2029 | index 86a89cbd3ec9..4832223f1500 100644 | ||
2030 | --- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c | ||
2031 | +++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c | ||
2032 | @@ -2518,7 +2518,7 @@ static int mtk_remove(struct platform_device *pdev) | ||
2033 | } | ||
2034 | |||
2035 | const struct of_device_id of_mtk_match[] = { | ||
2036 | - { .compatible = "mediatek,mt7623-eth" }, | ||
2037 | + { .compatible = "mediatek,mt2701-eth" }, | ||
2038 | {}, | ||
2039 | }; | ||
2040 | MODULE_DEVICE_TABLE(of, of_mtk_match); | ||
2041 | diff --git a/drivers/net/ethernet/mellanox/mlx4/catas.c b/drivers/net/ethernet/mellanox/mlx4/catas.c | ||
2042 | index c7e939945259..53daa6ca5d83 100644 | ||
2043 | --- a/drivers/net/ethernet/mellanox/mlx4/catas.c | ||
2044 | +++ b/drivers/net/ethernet/mellanox/mlx4/catas.c | ||
2045 | @@ -158,7 +158,7 @@ static int mlx4_reset_slave(struct mlx4_dev *dev) | ||
2046 | return -ETIMEDOUT; | ||
2047 | } | ||
2048 | |||
2049 | -static int mlx4_comm_internal_err(u32 slave_read) | ||
2050 | +int mlx4_comm_internal_err(u32 slave_read) | ||
2051 | { | ||
2052 | return (u32)COMM_CHAN_EVENT_INTERNAL_ERR == | ||
2053 | (slave_read & (u32)COMM_CHAN_EVENT_INTERNAL_ERR) ? 1 : 0; | ||
2054 | diff --git a/drivers/net/ethernet/mellanox/mlx4/intf.c b/drivers/net/ethernet/mellanox/mlx4/intf.c | ||
2055 | index 0e8b7c44931f..8258d08acd8c 100644 | ||
2056 | --- a/drivers/net/ethernet/mellanox/mlx4/intf.c | ||
2057 | +++ b/drivers/net/ethernet/mellanox/mlx4/intf.c | ||
2058 | @@ -222,6 +222,18 @@ void mlx4_unregister_device(struct mlx4_dev *dev) | ||
2059 | return; | ||
2060 | |||
2061 | mlx4_stop_catas_poll(dev); | ||
2062 | + if (dev->persist->interface_state & MLX4_INTERFACE_STATE_DELETION && | ||
2063 | + mlx4_is_slave(dev)) { | ||
2064 | + /* In mlx4_remove_one on a VF */ | ||
2065 | + u32 slave_read = | ||
2066 | + swab32(readl(&mlx4_priv(dev)->mfunc.comm->slave_read)); | ||
2067 | + | ||
2068 | + if (mlx4_comm_internal_err(slave_read)) { | ||
2069 | + mlx4_dbg(dev, "%s: comm channel is down, entering error state.\n", | ||
2070 | + __func__); | ||
2071 | + mlx4_enter_error_state(dev->persist); | ||
2072 | + } | ||
2073 | + } | ||
2074 | mutex_lock(&intf_mutex); | ||
2075 | |||
2076 | list_for_each_entry(intf, &intf_list, list) | ||
2077 | diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4.h b/drivers/net/ethernet/mellanox/mlx4/mlx4.h | ||
2078 | index 88ee7d8a5923..086920b615af 100644 | ||
2079 | --- a/drivers/net/ethernet/mellanox/mlx4/mlx4.h | ||
2080 | +++ b/drivers/net/ethernet/mellanox/mlx4/mlx4.h | ||
2081 | @@ -1220,6 +1220,7 @@ void mlx4_qp_event(struct mlx4_dev *dev, u32 qpn, int event_type); | ||
2082 | void mlx4_srq_event(struct mlx4_dev *dev, u32 srqn, int event_type); | ||
2083 | |||
2084 | void mlx4_enter_error_state(struct mlx4_dev_persistent *persist); | ||
2085 | +int mlx4_comm_internal_err(u32 slave_read); | ||
2086 | |||
2087 | int mlx4_SENSE_PORT(struct mlx4_dev *dev, int port, | ||
2088 | enum mlx4_port_type *type); | ||
2089 | diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c b/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c | ||
2090 | index 36fbc6b21a33..8cd7227fbdfc 100644 | ||
2091 | --- a/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c | ||
2092 | +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c | ||
2093 | @@ -1081,7 +1081,7 @@ int mlx5e_create_flow_steering(struct mlx5e_priv *priv) | ||
2094 | MLX5_FLOW_NAMESPACE_KERNEL); | ||
2095 | |||
2096 | if (!priv->fs.ns) | ||
2097 | - return -EINVAL; | ||
2098 | + return -EOPNOTSUPP; | ||
2099 | |||
2100 | err = mlx5e_arfs_create_tables(priv); | ||
2101 | if (err) { | ||
2102 | diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c | ||
2103 | index c7011ef4e351..a8966e6dbe1b 100644 | ||
2104 | --- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c | ||
2105 | +++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c | ||
2106 | @@ -352,7 +352,7 @@ static int esw_create_legacy_fdb_table(struct mlx5_eswitch *esw, int nvports) | ||
2107 | root_ns = mlx5_get_flow_namespace(dev, MLX5_FLOW_NAMESPACE_FDB); | ||
2108 | if (!root_ns) { | ||
2109 | esw_warn(dev, "Failed to get FDB flow namespace\n"); | ||
2110 | - return -ENOMEM; | ||
2111 | + return -EOPNOTSUPP; | ||
2112 | } | ||
2113 | |||
2114 | flow_group_in = mlx5_vzalloc(inlen); | ||
2115 | @@ -961,7 +961,7 @@ static int esw_vport_enable_egress_acl(struct mlx5_eswitch *esw, | ||
2116 | root_ns = mlx5_get_flow_namespace(dev, MLX5_FLOW_NAMESPACE_ESW_EGRESS); | ||
2117 | if (!root_ns) { | ||
2118 | esw_warn(dev, "Failed to get E-Switch egress flow namespace\n"); | ||
2119 | - return -EIO; | ||
2120 | + return -EOPNOTSUPP; | ||
2121 | } | ||
2122 | |||
2123 | flow_group_in = mlx5_vzalloc(inlen); | ||
2124 | @@ -1078,7 +1078,7 @@ static int esw_vport_enable_ingress_acl(struct mlx5_eswitch *esw, | ||
2125 | root_ns = mlx5_get_flow_namespace(dev, MLX5_FLOW_NAMESPACE_ESW_INGRESS); | ||
2126 | if (!root_ns) { | ||
2127 | esw_warn(dev, "Failed to get E-Switch ingress flow namespace\n"); | ||
2128 | - return -EIO; | ||
2129 | + return -EOPNOTSUPP; | ||
2130 | } | ||
2131 | |||
2132 | flow_group_in = mlx5_vzalloc(inlen); | ||
2133 | diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c | ||
2134 | index d239f5d0ea36..b08b9e2c6a76 100644 | ||
2135 | --- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c | ||
2136 | +++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c | ||
2137 | @@ -414,6 +414,7 @@ static int esw_create_offloads_fdb_table(struct mlx5_eswitch *esw, int nvports) | ||
2138 | root_ns = mlx5_get_flow_namespace(dev, MLX5_FLOW_NAMESPACE_FDB); | ||
2139 | if (!root_ns) { | ||
2140 | esw_warn(dev, "Failed to get FDB flow namespace\n"); | ||
2141 | + err = -EOPNOTSUPP; | ||
2142 | goto ns_err; | ||
2143 | } | ||
2144 | |||
2145 | @@ -520,7 +521,7 @@ static int esw_create_offloads_table(struct mlx5_eswitch *esw) | ||
2146 | ns = mlx5_get_flow_namespace(dev, MLX5_FLOW_NAMESPACE_OFFLOADS); | ||
2147 | if (!ns) { | ||
2148 | esw_warn(esw->dev, "Failed to get offloads flow namespace\n"); | ||
2149 | - return -ENOMEM; | ||
2150 | + return -EOPNOTSUPP; | ||
2151 | } | ||
2152 | |||
2153 | ft_offloads = mlx5_create_flow_table(ns, 0, dev->priv.sriov.num_vfs + 2, 0); | ||
2154 | @@ -639,7 +640,7 @@ static int esw_offloads_start(struct mlx5_eswitch *esw) | ||
2155 | esw_warn(esw->dev, "Failed setting eswitch to offloads, err %d\n", err); | ||
2156 | err1 = mlx5_eswitch_enable_sriov(esw, num_vfs, SRIOV_LEGACY); | ||
2157 | if (err1) | ||
2158 | - esw_warn(esw->dev, "Failed setting eswitch back to legacy, err %d\n", err); | ||
2159 | + esw_warn(esw->dev, "Failed setting eswitch back to legacy, err %d\n", err1); | ||
2160 | } | ||
2161 | return err; | ||
2162 | } | ||
2163 | diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c | ||
2164 | index 7e20e4bc4cc7..4de3c28b0547 100644 | ||
2165 | --- a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c | ||
2166 | +++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c | ||
2167 | @@ -1678,7 +1678,7 @@ static int create_anchor_flow_table(struct mlx5_flow_steering *steering) | ||
2168 | struct mlx5_flow_table *ft; | ||
2169 | |||
2170 | ns = mlx5_get_flow_namespace(steering->dev, MLX5_FLOW_NAMESPACE_ANCHOR); | ||
2171 | - if (!ns) | ||
2172 | + if (WARN_ON(!ns)) | ||
2173 | return -EINVAL; | ||
2174 | ft = mlx5_create_flow_table(ns, ANCHOR_PRIO, ANCHOR_SIZE, ANCHOR_LEVEL); | ||
2175 | if (IS_ERR(ft)) { | ||
2176 | diff --git a/drivers/net/ethernet/renesas/ravb_main.c b/drivers/net/ethernet/renesas/ravb_main.c | ||
2177 | index 862f18ed6022..510ff62584d6 100644 | ||
2178 | --- a/drivers/net/ethernet/renesas/ravb_main.c | ||
2179 | +++ b/drivers/net/ethernet/renesas/ravb_main.c | ||
2180 | @@ -179,6 +179,49 @@ static struct mdiobb_ops bb_ops = { | ||
2181 | .get_mdio_data = ravb_get_mdio_data, | ||
2182 | }; | ||
2183 | |||
2184 | +/* Free TX skb function for AVB-IP */ | ||
2185 | +static int ravb_tx_free(struct net_device *ndev, int q, bool free_txed_only) | ||
2186 | +{ | ||
2187 | + struct ravb_private *priv = netdev_priv(ndev); | ||
2188 | + struct net_device_stats *stats = &priv->stats[q]; | ||
2189 | + struct ravb_tx_desc *desc; | ||
2190 | + int free_num = 0; | ||
2191 | + int entry; | ||
2192 | + u32 size; | ||
2193 | + | ||
2194 | + for (; priv->cur_tx[q] - priv->dirty_tx[q] > 0; priv->dirty_tx[q]++) { | ||
2195 | + bool txed; | ||
2196 | + | ||
2197 | + entry = priv->dirty_tx[q] % (priv->num_tx_ring[q] * | ||
2198 | + NUM_TX_DESC); | ||
2199 | + desc = &priv->tx_ring[q][entry]; | ||
2200 | + txed = desc->die_dt == DT_FEMPTY; | ||
2201 | + if (free_txed_only && !txed) | ||
2202 | + break; | ||
2203 | + /* Descriptor type must be checked before all other reads */ | ||
2204 | + dma_rmb(); | ||
2205 | + size = le16_to_cpu(desc->ds_tagl) & TX_DS; | ||
2206 | + /* Free the original skb. */ | ||
2207 | + if (priv->tx_skb[q][entry / NUM_TX_DESC]) { | ||
2208 | + dma_unmap_single(ndev->dev.parent, le32_to_cpu(desc->dptr), | ||
2209 | + size, DMA_TO_DEVICE); | ||
2210 | + /* Last packet descriptor? */ | ||
2211 | + if (entry % NUM_TX_DESC == NUM_TX_DESC - 1) { | ||
2212 | + entry /= NUM_TX_DESC; | ||
2213 | + dev_kfree_skb_any(priv->tx_skb[q][entry]); | ||
2214 | + priv->tx_skb[q][entry] = NULL; | ||
2215 | + if (txed) | ||
2216 | + stats->tx_packets++; | ||
2217 | + } | ||
2218 | + free_num++; | ||
2219 | + } | ||
2220 | + if (txed) | ||
2221 | + stats->tx_bytes += size; | ||
2222 | + desc->die_dt = DT_EEMPTY; | ||
2223 | + } | ||
2224 | + return free_num; | ||
2225 | +} | ||
2226 | + | ||
2227 | /* Free skb's and DMA buffers for Ethernet AVB */ | ||
2228 | static void ravb_ring_free(struct net_device *ndev, int q) | ||
2229 | { | ||
2230 | @@ -194,19 +237,21 @@ static void ravb_ring_free(struct net_device *ndev, int q) | ||
2231 | kfree(priv->rx_skb[q]); | ||
2232 | priv->rx_skb[q] = NULL; | ||
2233 | |||
2234 | - /* Free TX skb ringbuffer */ | ||
2235 | - if (priv->tx_skb[q]) { | ||
2236 | - for (i = 0; i < priv->num_tx_ring[q]; i++) | ||
2237 | - dev_kfree_skb(priv->tx_skb[q][i]); | ||
2238 | - } | ||
2239 | - kfree(priv->tx_skb[q]); | ||
2240 | - priv->tx_skb[q] = NULL; | ||
2241 | - | ||
2242 | /* Free aligned TX buffers */ | ||
2243 | kfree(priv->tx_align[q]); | ||
2244 | priv->tx_align[q] = NULL; | ||
2245 | |||
2246 | if (priv->rx_ring[q]) { | ||
2247 | + for (i = 0; i < priv->num_rx_ring[q]; i++) { | ||
2248 | + struct ravb_ex_rx_desc *desc = &priv->rx_ring[q][i]; | ||
2249 | + | ||
2250 | + if (!dma_mapping_error(ndev->dev.parent, | ||
2251 | + le32_to_cpu(desc->dptr))) | ||
2252 | + dma_unmap_single(ndev->dev.parent, | ||
2253 | + le32_to_cpu(desc->dptr), | ||
2254 | + PKT_BUF_SZ, | ||
2255 | + DMA_FROM_DEVICE); | ||
2256 | + } | ||
2257 | ring_size = sizeof(struct ravb_ex_rx_desc) * | ||
2258 | (priv->num_rx_ring[q] + 1); | ||
2259 | dma_free_coherent(ndev->dev.parent, ring_size, priv->rx_ring[q], | ||
2260 | @@ -215,12 +260,20 @@ static void ravb_ring_free(struct net_device *ndev, int q) | ||
2261 | } | ||
2262 | |||
2263 | if (priv->tx_ring[q]) { | ||
2264 | + ravb_tx_free(ndev, q, false); | ||
2265 | + | ||
2266 | ring_size = sizeof(struct ravb_tx_desc) * | ||
2267 | (priv->num_tx_ring[q] * NUM_TX_DESC + 1); | ||
2268 | dma_free_coherent(ndev->dev.parent, ring_size, priv->tx_ring[q], | ||
2269 | priv->tx_desc_dma[q]); | ||
2270 | priv->tx_ring[q] = NULL; | ||
2271 | } | ||
2272 | + | ||
2273 | + /* Free TX skb ringbuffer. | ||
2274 | + * SKBs are freed by ravb_tx_free() call above. | ||
2275 | + */ | ||
2276 | + kfree(priv->tx_skb[q]); | ||
2277 | + priv->tx_skb[q] = NULL; | ||
2278 | } | ||
2279 | |||
2280 | /* Format skb and descriptor buffer for Ethernet AVB */ | ||
2281 | @@ -431,44 +484,6 @@ static int ravb_dmac_init(struct net_device *ndev) | ||
2282 | return 0; | ||
2283 | } | ||
2284 | |||
2285 | -/* Free TX skb function for AVB-IP */ | ||
2286 | -static int ravb_tx_free(struct net_device *ndev, int q) | ||
2287 | -{ | ||
2288 | - struct ravb_private *priv = netdev_priv(ndev); | ||
2289 | - struct net_device_stats *stats = &priv->stats[q]; | ||
2290 | - struct ravb_tx_desc *desc; | ||
2291 | - int free_num = 0; | ||
2292 | - int entry; | ||
2293 | - u32 size; | ||
2294 | - | ||
2295 | - for (; priv->cur_tx[q] - priv->dirty_tx[q] > 0; priv->dirty_tx[q]++) { | ||
2296 | - entry = priv->dirty_tx[q] % (priv->num_tx_ring[q] * | ||
2297 | - NUM_TX_DESC); | ||
2298 | - desc = &priv->tx_ring[q][entry]; | ||
2299 | - if (desc->die_dt != DT_FEMPTY) | ||
2300 | - break; | ||
2301 | - /* Descriptor type must be checked before all other reads */ | ||
2302 | - dma_rmb(); | ||
2303 | - size = le16_to_cpu(desc->ds_tagl) & TX_DS; | ||
2304 | - /* Free the original skb. */ | ||
2305 | - if (priv->tx_skb[q][entry / NUM_TX_DESC]) { | ||
2306 | - dma_unmap_single(ndev->dev.parent, le32_to_cpu(desc->dptr), | ||
2307 | - size, DMA_TO_DEVICE); | ||
2308 | - /* Last packet descriptor? */ | ||
2309 | - if (entry % NUM_TX_DESC == NUM_TX_DESC - 1) { | ||
2310 | - entry /= NUM_TX_DESC; | ||
2311 | - dev_kfree_skb_any(priv->tx_skb[q][entry]); | ||
2312 | - priv->tx_skb[q][entry] = NULL; | ||
2313 | - stats->tx_packets++; | ||
2314 | - } | ||
2315 | - free_num++; | ||
2316 | - } | ||
2317 | - stats->tx_bytes += size; | ||
2318 | - desc->die_dt = DT_EEMPTY; | ||
2319 | - } | ||
2320 | - return free_num; | ||
2321 | -} | ||
2322 | - | ||
2323 | static void ravb_get_tx_tstamp(struct net_device *ndev) | ||
2324 | { | ||
2325 | struct ravb_private *priv = netdev_priv(ndev); | ||
2326 | @@ -902,7 +917,7 @@ static int ravb_poll(struct napi_struct *napi, int budget) | ||
2327 | spin_lock_irqsave(&priv->lock, flags); | ||
2328 | /* Clear TX interrupt */ | ||
2329 | ravb_write(ndev, ~mask, TIS); | ||
2330 | - ravb_tx_free(ndev, q); | ||
2331 | + ravb_tx_free(ndev, q, true); | ||
2332 | netif_wake_subqueue(ndev, q); | ||
2333 | mmiowb(); | ||
2334 | spin_unlock_irqrestore(&priv->lock, flags); | ||
2335 | @@ -1571,7 +1586,8 @@ static netdev_tx_t ravb_start_xmit(struct sk_buff *skb, struct net_device *ndev) | ||
2336 | |||
2337 | priv->cur_tx[q] += NUM_TX_DESC; | ||
2338 | if (priv->cur_tx[q] - priv->dirty_tx[q] > | ||
2339 | - (priv->num_tx_ring[q] - 1) * NUM_TX_DESC && !ravb_tx_free(ndev, q)) | ||
2340 | + (priv->num_tx_ring[q] - 1) * NUM_TX_DESC && | ||
2341 | + !ravb_tx_free(ndev, q, true)) | ||
2342 | netif_stop_subqueue(ndev, q); | ||
2343 | |||
2344 | exit: | ||
2345 | diff --git a/drivers/net/ethernet/xilinx/xilinx_emaclite.c b/drivers/net/ethernet/xilinx/xilinx_emaclite.c | ||
2346 | index 93dc10b10c09..aa02a03a6d8d 100644 | ||
2347 | --- a/drivers/net/ethernet/xilinx/xilinx_emaclite.c | ||
2348 | +++ b/drivers/net/ethernet/xilinx/xilinx_emaclite.c | ||
2349 | @@ -100,6 +100,14 @@ | ||
2350 | /* BUFFER_ALIGN(adr) calculates the number of bytes to the next alignment. */ | ||
2351 | #define BUFFER_ALIGN(adr) ((ALIGNMENT - ((u32) adr)) % ALIGNMENT) | ||
2352 | |||
2353 | +#ifdef __BIG_ENDIAN | ||
2354 | +#define xemaclite_readl ioread32be | ||
2355 | +#define xemaclite_writel iowrite32be | ||
2356 | +#else | ||
2357 | +#define xemaclite_readl ioread32 | ||
2358 | +#define xemaclite_writel iowrite32 | ||
2359 | +#endif | ||
2360 | + | ||
2361 | /** | ||
2362 | * struct net_local - Our private per device data | ||
2363 | * @ndev: instance of the network device | ||
2364 | @@ -156,15 +164,15 @@ static void xemaclite_enable_interrupts(struct net_local *drvdata) | ||
2365 | u32 reg_data; | ||
2366 | |||
2367 | /* Enable the Tx interrupts for the first Buffer */ | ||
2368 | - reg_data = __raw_readl(drvdata->base_addr + XEL_TSR_OFFSET); | ||
2369 | - __raw_writel(reg_data | XEL_TSR_XMIT_IE_MASK, | ||
2370 | - drvdata->base_addr + XEL_TSR_OFFSET); | ||
2371 | + reg_data = xemaclite_readl(drvdata->base_addr + XEL_TSR_OFFSET); | ||
2372 | + xemaclite_writel(reg_data | XEL_TSR_XMIT_IE_MASK, | ||
2373 | + drvdata->base_addr + XEL_TSR_OFFSET); | ||
2374 | |||
2375 | /* Enable the Rx interrupts for the first buffer */ | ||
2376 | - __raw_writel(XEL_RSR_RECV_IE_MASK, drvdata->base_addr + XEL_RSR_OFFSET); | ||
2377 | + xemaclite_writel(XEL_RSR_RECV_IE_MASK, drvdata->base_addr + XEL_RSR_OFFSET); | ||
2378 | |||
2379 | /* Enable the Global Interrupt Enable */ | ||
2380 | - __raw_writel(XEL_GIER_GIE_MASK, drvdata->base_addr + XEL_GIER_OFFSET); | ||
2381 | + xemaclite_writel(XEL_GIER_GIE_MASK, drvdata->base_addr + XEL_GIER_OFFSET); | ||
2382 | } | ||
2383 | |||
2384 | /** | ||
2385 | @@ -179,17 +187,17 @@ static void xemaclite_disable_interrupts(struct net_local *drvdata) | ||
2386 | u32 reg_data; | ||
2387 | |||
2388 | /* Disable the Global Interrupt Enable */ | ||
2389 | - __raw_writel(XEL_GIER_GIE_MASK, drvdata->base_addr + XEL_GIER_OFFSET); | ||
2390 | + xemaclite_writel(XEL_GIER_GIE_MASK, drvdata->base_addr + XEL_GIER_OFFSET); | ||
2391 | |||
2392 | /* Disable the Tx interrupts for the first buffer */ | ||
2393 | - reg_data = __raw_readl(drvdata->base_addr + XEL_TSR_OFFSET); | ||
2394 | - __raw_writel(reg_data & (~XEL_TSR_XMIT_IE_MASK), | ||
2395 | - drvdata->base_addr + XEL_TSR_OFFSET); | ||
2396 | + reg_data = xemaclite_readl(drvdata->base_addr + XEL_TSR_OFFSET); | ||
2397 | + xemaclite_writel(reg_data & (~XEL_TSR_XMIT_IE_MASK), | ||
2398 | + drvdata->base_addr + XEL_TSR_OFFSET); | ||
2399 | |||
2400 | /* Disable the Rx interrupts for the first buffer */ | ||
2401 | - reg_data = __raw_readl(drvdata->base_addr + XEL_RSR_OFFSET); | ||
2402 | - __raw_writel(reg_data & (~XEL_RSR_RECV_IE_MASK), | ||
2403 | - drvdata->base_addr + XEL_RSR_OFFSET); | ||
2404 | + reg_data = xemaclite_readl(drvdata->base_addr + XEL_RSR_OFFSET); | ||
2405 | + xemaclite_writel(reg_data & (~XEL_RSR_RECV_IE_MASK), | ||
2406 | + drvdata->base_addr + XEL_RSR_OFFSET); | ||
2407 | } | ||
2408 | |||
2409 | /** | ||
2410 | @@ -321,7 +329,7 @@ static int xemaclite_send_data(struct net_local *drvdata, u8 *data, | ||
2411 | byte_count = ETH_FRAME_LEN; | ||
2412 | |||
2413 | /* Check if the expected buffer is available */ | ||
2414 | - reg_data = __raw_readl(addr + XEL_TSR_OFFSET); | ||
2415 | + reg_data = xemaclite_readl(addr + XEL_TSR_OFFSET); | ||
2416 | if ((reg_data & (XEL_TSR_XMIT_BUSY_MASK | | ||
2417 | XEL_TSR_XMIT_ACTIVE_MASK)) == 0) { | ||
2418 | |||
2419 | @@ -334,7 +342,7 @@ static int xemaclite_send_data(struct net_local *drvdata, u8 *data, | ||
2420 | |||
2421 | addr = (void __iomem __force *)((u32 __force)addr ^ | ||
2422 | XEL_BUFFER_OFFSET); | ||
2423 | - reg_data = __raw_readl(addr + XEL_TSR_OFFSET); | ||
2424 | + reg_data = xemaclite_readl(addr + XEL_TSR_OFFSET); | ||
2425 | |||
2426 | if ((reg_data & (XEL_TSR_XMIT_BUSY_MASK | | ||
2427 | XEL_TSR_XMIT_ACTIVE_MASK)) != 0) | ||
2428 | @@ -345,16 +353,16 @@ static int xemaclite_send_data(struct net_local *drvdata, u8 *data, | ||
2429 | /* Write the frame to the buffer */ | ||
2430 | xemaclite_aligned_write(data, (u32 __force *) addr, byte_count); | ||
2431 | |||
2432 | - __raw_writel((byte_count & XEL_TPLR_LENGTH_MASK), | ||
2433 | - addr + XEL_TPLR_OFFSET); | ||
2434 | + xemaclite_writel((byte_count & XEL_TPLR_LENGTH_MASK), | ||
2435 | + addr + XEL_TPLR_OFFSET); | ||
2436 | |||
2437 | /* Update the Tx Status Register to indicate that there is a | ||
2438 | * frame to send. Set the XEL_TSR_XMIT_ACTIVE_MASK flag which | ||
2439 | * is used by the interrupt handler to check whether a frame | ||
2440 | * has been transmitted */ | ||
2441 | - reg_data = __raw_readl(addr + XEL_TSR_OFFSET); | ||
2442 | + reg_data = xemaclite_readl(addr + XEL_TSR_OFFSET); | ||
2443 | reg_data |= (XEL_TSR_XMIT_BUSY_MASK | XEL_TSR_XMIT_ACTIVE_MASK); | ||
2444 | - __raw_writel(reg_data, addr + XEL_TSR_OFFSET); | ||
2445 | + xemaclite_writel(reg_data, addr + XEL_TSR_OFFSET); | ||
2446 | |||
2447 | return 0; | ||
2448 | } | ||
2449 | @@ -369,7 +377,7 @@ static int xemaclite_send_data(struct net_local *drvdata, u8 *data, | ||
2450 | * | ||
2451 | * Return: Total number of bytes received | ||
2452 | */ | ||
2453 | -static u16 xemaclite_recv_data(struct net_local *drvdata, u8 *data) | ||
2454 | +static u16 xemaclite_recv_data(struct net_local *drvdata, u8 *data, int maxlen) | ||
2455 | { | ||
2456 | void __iomem *addr; | ||
2457 | u16 length, proto_type; | ||
2458 | @@ -379,7 +387,7 @@ static u16 xemaclite_recv_data(struct net_local *drvdata, u8 *data) | ||
2459 | addr = (drvdata->base_addr + drvdata->next_rx_buf_to_use); | ||
2460 | |||
2461 | /* Verify which buffer has valid data */ | ||
2462 | - reg_data = __raw_readl(addr + XEL_RSR_OFFSET); | ||
2463 | + reg_data = xemaclite_readl(addr + XEL_RSR_OFFSET); | ||
2464 | |||
2465 | if ((reg_data & XEL_RSR_RECV_DONE_MASK) == XEL_RSR_RECV_DONE_MASK) { | ||
2466 | if (drvdata->rx_ping_pong != 0) | ||
2467 | @@ -396,27 +404,28 @@ static u16 xemaclite_recv_data(struct net_local *drvdata, u8 *data) | ||
2468 | return 0; /* No data was available */ | ||
2469 | |||
2470 | /* Verify that buffer has valid data */ | ||
2471 | - reg_data = __raw_readl(addr + XEL_RSR_OFFSET); | ||
2472 | + reg_data = xemaclite_readl(addr + XEL_RSR_OFFSET); | ||
2473 | if ((reg_data & XEL_RSR_RECV_DONE_MASK) != | ||
2474 | XEL_RSR_RECV_DONE_MASK) | ||
2475 | return 0; /* No data was available */ | ||
2476 | } | ||
2477 | |||
2478 | /* Get the protocol type of the ethernet frame that arrived */ | ||
2479 | - proto_type = ((ntohl(__raw_readl(addr + XEL_HEADER_OFFSET + | ||
2480 | + proto_type = ((ntohl(xemaclite_readl(addr + XEL_HEADER_OFFSET + | ||
2481 | XEL_RXBUFF_OFFSET)) >> XEL_HEADER_SHIFT) & | ||
2482 | XEL_RPLR_LENGTH_MASK); | ||
2483 | |||
2484 | /* Check if received ethernet frame is a raw ethernet frame | ||
2485 | * or an IP packet or an ARP packet */ | ||
2486 | - if (proto_type > (ETH_FRAME_LEN + ETH_FCS_LEN)) { | ||
2487 | + if (proto_type > ETH_DATA_LEN) { | ||
2488 | |||
2489 | if (proto_type == ETH_P_IP) { | ||
2490 | - length = ((ntohl(__raw_readl(addr + | ||
2491 | + length = ((ntohl(xemaclite_readl(addr + | ||
2492 | XEL_HEADER_IP_LENGTH_OFFSET + | ||
2493 | XEL_RXBUFF_OFFSET)) >> | ||
2494 | XEL_HEADER_SHIFT) & | ||
2495 | XEL_RPLR_LENGTH_MASK); | ||
2496 | + length = min_t(u16, length, ETH_DATA_LEN); | ||
2497 | length += ETH_HLEN + ETH_FCS_LEN; | ||
2498 | |||
2499 | } else if (proto_type == ETH_P_ARP) | ||
2500 | @@ -429,14 +438,17 @@ static u16 xemaclite_recv_data(struct net_local *drvdata, u8 *data) | ||
2501 | /* Use the length in the frame, plus the header and trailer */ | ||
2502 | length = proto_type + ETH_HLEN + ETH_FCS_LEN; | ||
2503 | |||
2504 | + if (WARN_ON(length > maxlen)) | ||
2505 | + length = maxlen; | ||
2506 | + | ||
2507 | /* Read from the EmacLite device */ | ||
2508 | xemaclite_aligned_read((u32 __force *) (addr + XEL_RXBUFF_OFFSET), | ||
2509 | data, length); | ||
2510 | |||
2511 | /* Acknowledge the frame */ | ||
2512 | - reg_data = __raw_readl(addr + XEL_RSR_OFFSET); | ||
2513 | + reg_data = xemaclite_readl(addr + XEL_RSR_OFFSET); | ||
2514 | reg_data &= ~XEL_RSR_RECV_DONE_MASK; | ||
2515 | - __raw_writel(reg_data, addr + XEL_RSR_OFFSET); | ||
2516 | + xemaclite_writel(reg_data, addr + XEL_RSR_OFFSET); | ||
2517 | |||
2518 | return length; | ||
2519 | } | ||
2520 | @@ -463,14 +475,14 @@ static void xemaclite_update_address(struct net_local *drvdata, | ||
2521 | |||
2522 | xemaclite_aligned_write(address_ptr, (u32 __force *) addr, ETH_ALEN); | ||
2523 | |||
2524 | - __raw_writel(ETH_ALEN, addr + XEL_TPLR_OFFSET); | ||
2525 | + xemaclite_writel(ETH_ALEN, addr + XEL_TPLR_OFFSET); | ||
2526 | |||
2527 | /* Update the MAC address in the EmacLite */ | ||
2528 | - reg_data = __raw_readl(addr + XEL_TSR_OFFSET); | ||
2529 | - __raw_writel(reg_data | XEL_TSR_PROG_MAC_ADDR, addr + XEL_TSR_OFFSET); | ||
2530 | + reg_data = xemaclite_readl(addr + XEL_TSR_OFFSET); | ||
2531 | + xemaclite_writel(reg_data | XEL_TSR_PROG_MAC_ADDR, addr + XEL_TSR_OFFSET); | ||
2532 | |||
2533 | /* Wait for EmacLite to finish with the MAC address update */ | ||
2534 | - while ((__raw_readl(addr + XEL_TSR_OFFSET) & | ||
2535 | + while ((xemaclite_readl(addr + XEL_TSR_OFFSET) & | ||
2536 | XEL_TSR_PROG_MAC_ADDR) != 0) | ||
2537 | ; | ||
2538 | } | ||
2539 | @@ -603,7 +615,7 @@ static void xemaclite_rx_handler(struct net_device *dev) | ||
2540 | |||
2541 | skb_reserve(skb, 2); | ||
2542 | |||
2543 | - len = xemaclite_recv_data(lp, (u8 *) skb->data); | ||
2544 | + len = xemaclite_recv_data(lp, (u8 *) skb->data, len); | ||
2545 | |||
2546 | if (!len) { | ||
2547 | dev->stats.rx_errors++; | ||
2548 | @@ -640,32 +652,32 @@ static irqreturn_t xemaclite_interrupt(int irq, void *dev_id) | ||
2549 | u32 tx_status; | ||
2550 | |||
2551 | /* Check if there is Rx Data available */ | ||
2552 | - if ((__raw_readl(base_addr + XEL_RSR_OFFSET) & | ||
2553 | + if ((xemaclite_readl(base_addr + XEL_RSR_OFFSET) & | ||
2554 | XEL_RSR_RECV_DONE_MASK) || | ||
2555 | - (__raw_readl(base_addr + XEL_BUFFER_OFFSET + XEL_RSR_OFFSET) | ||
2556 | + (xemaclite_readl(base_addr + XEL_BUFFER_OFFSET + XEL_RSR_OFFSET) | ||
2557 | & XEL_RSR_RECV_DONE_MASK)) | ||
2558 | |||
2559 | xemaclite_rx_handler(dev); | ||
2560 | |||
2561 | /* Check if the Transmission for the first buffer is completed */ | ||
2562 | - tx_status = __raw_readl(base_addr + XEL_TSR_OFFSET); | ||
2563 | + tx_status = xemaclite_readl(base_addr + XEL_TSR_OFFSET); | ||
2564 | if (((tx_status & XEL_TSR_XMIT_BUSY_MASK) == 0) && | ||
2565 | (tx_status & XEL_TSR_XMIT_ACTIVE_MASK) != 0) { | ||
2566 | |||
2567 | tx_status &= ~XEL_TSR_XMIT_ACTIVE_MASK; | ||
2568 | - __raw_writel(tx_status, base_addr + XEL_TSR_OFFSET); | ||
2569 | + xemaclite_writel(tx_status, base_addr + XEL_TSR_OFFSET); | ||
2570 | |||
2571 | tx_complete = true; | ||
2572 | } | ||
2573 | |||
2574 | /* Check if the Transmission for the second buffer is completed */ | ||
2575 | - tx_status = __raw_readl(base_addr + XEL_BUFFER_OFFSET + XEL_TSR_OFFSET); | ||
2576 | + tx_status = xemaclite_readl(base_addr + XEL_BUFFER_OFFSET + XEL_TSR_OFFSET); | ||
2577 | if (((tx_status & XEL_TSR_XMIT_BUSY_MASK) == 0) && | ||
2578 | (tx_status & XEL_TSR_XMIT_ACTIVE_MASK) != 0) { | ||
2579 | |||
2580 | tx_status &= ~XEL_TSR_XMIT_ACTIVE_MASK; | ||
2581 | - __raw_writel(tx_status, base_addr + XEL_BUFFER_OFFSET + | ||
2582 | - XEL_TSR_OFFSET); | ||
2583 | + xemaclite_writel(tx_status, base_addr + XEL_BUFFER_OFFSET + | ||
2584 | + XEL_TSR_OFFSET); | ||
2585 | |||
2586 | tx_complete = true; | ||
2587 | } | ||
2588 | @@ -698,7 +710,7 @@ static int xemaclite_mdio_wait(struct net_local *lp) | ||
2589 | /* wait for the MDIO interface to not be busy or timeout | ||
2590 | after some time. | ||
2591 | */ | ||
2592 | - while (__raw_readl(lp->base_addr + XEL_MDIOCTRL_OFFSET) & | ||
2593 | + while (xemaclite_readl(lp->base_addr + XEL_MDIOCTRL_OFFSET) & | ||
2594 | XEL_MDIOCTRL_MDIOSTS_MASK) { | ||
2595 | if (time_before_eq(end, jiffies)) { | ||
2596 | WARN_ON(1); | ||
2597 | @@ -734,17 +746,17 @@ static int xemaclite_mdio_read(struct mii_bus *bus, int phy_id, int reg) | ||
2598 | * MDIO Address register. Set the Status bit in the MDIO Control | ||
2599 | * register to start a MDIO read transaction. | ||
2600 | */ | ||
2601 | - ctrl_reg = __raw_readl(lp->base_addr + XEL_MDIOCTRL_OFFSET); | ||
2602 | - __raw_writel(XEL_MDIOADDR_OP_MASK | | ||
2603 | - ((phy_id << XEL_MDIOADDR_PHYADR_SHIFT) | reg), | ||
2604 | - lp->base_addr + XEL_MDIOADDR_OFFSET); | ||
2605 | - __raw_writel(ctrl_reg | XEL_MDIOCTRL_MDIOSTS_MASK, | ||
2606 | - lp->base_addr + XEL_MDIOCTRL_OFFSET); | ||
2607 | + ctrl_reg = xemaclite_readl(lp->base_addr + XEL_MDIOCTRL_OFFSET); | ||
2608 | + xemaclite_writel(XEL_MDIOADDR_OP_MASK | | ||
2609 | + ((phy_id << XEL_MDIOADDR_PHYADR_SHIFT) | reg), | ||
2610 | + lp->base_addr + XEL_MDIOADDR_OFFSET); | ||
2611 | + xemaclite_writel(ctrl_reg | XEL_MDIOCTRL_MDIOSTS_MASK, | ||
2612 | + lp->base_addr + XEL_MDIOCTRL_OFFSET); | ||
2613 | |||
2614 | if (xemaclite_mdio_wait(lp)) | ||
2615 | return -ETIMEDOUT; | ||
2616 | |||
2617 | - rc = __raw_readl(lp->base_addr + XEL_MDIORD_OFFSET); | ||
2618 | + rc = xemaclite_readl(lp->base_addr + XEL_MDIORD_OFFSET); | ||
2619 | |||
2620 | dev_dbg(&lp->ndev->dev, | ||
2621 | "xemaclite_mdio_read(phy_id=%i, reg=%x) == %x\n", | ||
2622 | @@ -781,13 +793,13 @@ static int xemaclite_mdio_write(struct mii_bus *bus, int phy_id, int reg, | ||
2623 | * Data register. Finally, set the Status bit in the MDIO Control | ||
2624 | * register to start a MDIO write transaction. | ||
2625 | */ | ||
2626 | - ctrl_reg = __raw_readl(lp->base_addr + XEL_MDIOCTRL_OFFSET); | ||
2627 | - __raw_writel(~XEL_MDIOADDR_OP_MASK & | ||
2628 | - ((phy_id << XEL_MDIOADDR_PHYADR_SHIFT) | reg), | ||
2629 | - lp->base_addr + XEL_MDIOADDR_OFFSET); | ||
2630 | - __raw_writel(val, lp->base_addr + XEL_MDIOWR_OFFSET); | ||
2631 | - __raw_writel(ctrl_reg | XEL_MDIOCTRL_MDIOSTS_MASK, | ||
2632 | - lp->base_addr + XEL_MDIOCTRL_OFFSET); | ||
2633 | + ctrl_reg = xemaclite_readl(lp->base_addr + XEL_MDIOCTRL_OFFSET); | ||
2634 | + xemaclite_writel(~XEL_MDIOADDR_OP_MASK & | ||
2635 | + ((phy_id << XEL_MDIOADDR_PHYADR_SHIFT) | reg), | ||
2636 | + lp->base_addr + XEL_MDIOADDR_OFFSET); | ||
2637 | + xemaclite_writel(val, lp->base_addr + XEL_MDIOWR_OFFSET); | ||
2638 | + xemaclite_writel(ctrl_reg | XEL_MDIOCTRL_MDIOSTS_MASK, | ||
2639 | + lp->base_addr + XEL_MDIOCTRL_OFFSET); | ||
2640 | |||
2641 | return 0; | ||
2642 | } | ||
2643 | @@ -834,8 +846,8 @@ static int xemaclite_mdio_setup(struct net_local *lp, struct device *dev) | ||
2644 | /* Enable the MDIO bus by asserting the enable bit in MDIO Control | ||
2645 | * register. | ||
2646 | */ | ||
2647 | - __raw_writel(XEL_MDIOCTRL_MDIOEN_MASK, | ||
2648 | - lp->base_addr + XEL_MDIOCTRL_OFFSET); | ||
2649 | + xemaclite_writel(XEL_MDIOCTRL_MDIOEN_MASK, | ||
2650 | + lp->base_addr + XEL_MDIOCTRL_OFFSET); | ||
2651 | |||
2652 | bus = mdiobus_alloc(); | ||
2653 | if (!bus) { | ||
2654 | @@ -1140,8 +1152,8 @@ static int xemaclite_of_probe(struct platform_device *ofdev) | ||
2655 | } | ||
2656 | |||
2657 | /* Clear the Tx CSR's in case this is a restart */ | ||
2658 | - __raw_writel(0, lp->base_addr + XEL_TSR_OFFSET); | ||
2659 | - __raw_writel(0, lp->base_addr + XEL_BUFFER_OFFSET + XEL_TSR_OFFSET); | ||
2660 | + xemaclite_writel(0, lp->base_addr + XEL_TSR_OFFSET); | ||
2661 | + xemaclite_writel(0, lp->base_addr + XEL_BUFFER_OFFSET + XEL_TSR_OFFSET); | ||
2662 | |||
2663 | /* Set the MAC address in the EmacLite device */ | ||
2664 | xemaclite_update_address(lp, ndev->dev_addr); | ||
2665 | diff --git a/drivers/net/gtp.c b/drivers/net/gtp.c | ||
2666 | index 97e0cbca0a08..cebde074d196 100644 | ||
2667 | --- a/drivers/net/gtp.c | ||
2668 | +++ b/drivers/net/gtp.c | ||
2669 | @@ -1372,3 +1372,4 @@ MODULE_LICENSE("GPL"); | ||
2670 | MODULE_AUTHOR("Harald Welte <hwelte@sysmocom.de>"); | ||
2671 | MODULE_DESCRIPTION("Interface driver for GTP encapsulated traffic"); | ||
2672 | MODULE_ALIAS_RTNL_LINK("gtp"); | ||
2673 | +MODULE_ALIAS_GENL_FAMILY("gtp"); | ||
2674 | diff --git a/drivers/net/hamradio/mkiss.c b/drivers/net/hamradio/mkiss.c | ||
2675 | index 1dfe2304daa7..e0a6b1a0ca88 100644 | ||
2676 | --- a/drivers/net/hamradio/mkiss.c | ||
2677 | +++ b/drivers/net/hamradio/mkiss.c | ||
2678 | @@ -648,8 +648,8 @@ static void ax_setup(struct net_device *dev) | ||
2679 | { | ||
2680 | /* Finish setting up the DEVICE info. */ | ||
2681 | dev->mtu = AX_MTU; | ||
2682 | - dev->hard_header_len = 0; | ||
2683 | - dev->addr_len = 0; | ||
2684 | + dev->hard_header_len = AX25_MAX_HEADER_LEN; | ||
2685 | + dev->addr_len = AX25_ADDR_LEN; | ||
2686 | dev->type = ARPHRD_AX25; | ||
2687 | dev->tx_queue_len = 10; | ||
2688 | dev->header_ops = &ax25_header_ops; | ||
2689 | diff --git a/drivers/net/phy/micrel.c b/drivers/net/phy/micrel.c | ||
2690 | index ea92d524d5a8..fab56c9350cf 100644 | ||
2691 | --- a/drivers/net/phy/micrel.c | ||
2692 | +++ b/drivers/net/phy/micrel.c | ||
2693 | @@ -1014,6 +1014,20 @@ static struct phy_driver ksphy_driver[] = { | ||
2694 | .get_stats = kszphy_get_stats, | ||
2695 | .suspend = genphy_suspend, | ||
2696 | .resume = genphy_resume, | ||
2697 | +}, { | ||
2698 | + .phy_id = PHY_ID_KSZ8795, | ||
2699 | + .phy_id_mask = MICREL_PHY_ID_MASK, | ||
2700 | + .name = "Micrel KSZ8795", | ||
2701 | + .features = (SUPPORTED_Pause | SUPPORTED_Asym_Pause), | ||
2702 | + .flags = PHY_HAS_MAGICANEG | PHY_HAS_INTERRUPT, | ||
2703 | + .config_init = kszphy_config_init, | ||
2704 | + .config_aneg = ksz8873mll_config_aneg, | ||
2705 | + .read_status = ksz8873mll_read_status, | ||
2706 | + .get_sset_count = kszphy_get_sset_count, | ||
2707 | + .get_strings = kszphy_get_strings, | ||
2708 | + .get_stats = kszphy_get_stats, | ||
2709 | + .suspend = genphy_suspend, | ||
2710 | + .resume = genphy_resume, | ||
2711 | } }; | ||
2712 | |||
2713 | module_phy_driver(ksphy_driver); | ||
2714 | diff --git a/drivers/net/phy/phy_device.c b/drivers/net/phy/phy_device.c | ||
2715 | index c4ceb082e970..14d57d0d1c04 100644 | ||
2716 | --- a/drivers/net/phy/phy_device.c | ||
2717 | +++ b/drivers/net/phy/phy_device.c | ||
2718 | @@ -860,6 +860,7 @@ int phy_attach_direct(struct net_device *dev, struct phy_device *phydev, | ||
2719 | struct module *ndev_owner = dev->dev.parent->driver->owner; | ||
2720 | struct mii_bus *bus = phydev->mdio.bus; | ||
2721 | struct device *d = &phydev->mdio.dev; | ||
2722 | + bool using_genphy = false; | ||
2723 | int err; | ||
2724 | |||
2725 | /* For Ethernet device drivers that register their own MDIO bus, we | ||
2726 | @@ -885,12 +886,22 @@ int phy_attach_direct(struct net_device *dev, struct phy_device *phydev, | ||
2727 | d->driver = | ||
2728 | &genphy_driver[GENPHY_DRV_1G].mdiodrv.driver; | ||
2729 | |||
2730 | + using_genphy = true; | ||
2731 | + } | ||
2732 | + | ||
2733 | + if (!try_module_get(d->driver->owner)) { | ||
2734 | + dev_err(&dev->dev, "failed to get the device driver module\n"); | ||
2735 | + err = -EIO; | ||
2736 | + goto error_put_device; | ||
2737 | + } | ||
2738 | + | ||
2739 | + if (using_genphy) { | ||
2740 | err = d->driver->probe(d); | ||
2741 | if (err >= 0) | ||
2742 | err = device_bind_driver(d); | ||
2743 | |||
2744 | if (err) | ||
2745 | - goto error; | ||
2746 | + goto error_module_put; | ||
2747 | } | ||
2748 | |||
2749 | if (phydev->attached_dev) { | ||
2750 | @@ -926,6 +937,13 @@ int phy_attach_direct(struct net_device *dev, struct phy_device *phydev, | ||
2751 | return err; | ||
2752 | |||
2753 | error: | ||
2754 | + /* phy_detach() does all of the cleanup below */ | ||
2755 | + phy_detach(phydev); | ||
2756 | + return err; | ||
2757 | + | ||
2758 | +error_module_put: | ||
2759 | + module_put(d->driver->owner); | ||
2760 | +error_put_device: | ||
2761 | put_device(d); | ||
2762 | if (ndev_owner != bus->owner) | ||
2763 | module_put(bus->owner); | ||
2764 | @@ -987,6 +1005,8 @@ void phy_detach(struct phy_device *phydev) | ||
2765 | phydev->attached_dev = NULL; | ||
2766 | phy_suspend(phydev); | ||
2767 | |||
2768 | + module_put(phydev->mdio.dev.driver->owner); | ||
2769 | + | ||
2770 | /* If the device had no specific driver before (i.e. - it | ||
2771 | * was using the generic driver), we unbind the device | ||
2772 | * from the generic driver so that there's a chance a | ||
2773 | diff --git a/drivers/net/usb/r8152.c b/drivers/net/usb/r8152.c | ||
2774 | index 90b426c5ffce..afb953a258cd 100644 | ||
2775 | --- a/drivers/net/usb/r8152.c | ||
2776 | +++ b/drivers/net/usb/r8152.c | ||
2777 | @@ -32,7 +32,7 @@ | ||
2778 | #define NETNEXT_VERSION "08" | ||
2779 | |||
2780 | /* Information for net */ | ||
2781 | -#define NET_VERSION "7" | ||
2782 | +#define NET_VERSION "8" | ||
2783 | |||
2784 | #define DRIVER_VERSION "v1." NETNEXT_VERSION "." NET_VERSION | ||
2785 | #define DRIVER_AUTHOR "Realtek linux nic maintainers <nic_swsd@realtek.com>" | ||
2786 | @@ -1936,6 +1936,9 @@ static int r8152_poll(struct napi_struct *napi, int budget) | ||
2787 | napi_complete(napi); | ||
2788 | if (!list_empty(&tp->rx_done)) | ||
2789 | napi_schedule(napi); | ||
2790 | + else if (!skb_queue_empty(&tp->tx_queue) && | ||
2791 | + !list_empty(&tp->tx_free)) | ||
2792 | + napi_schedule(napi); | ||
2793 | } | ||
2794 | |||
2795 | return work_done; | ||
2796 | @@ -3155,10 +3158,13 @@ static void set_carrier(struct r8152 *tp) | ||
2797 | if (!netif_carrier_ok(netdev)) { | ||
2798 | tp->rtl_ops.enable(tp); | ||
2799 | set_bit(RTL8152_SET_RX_MODE, &tp->flags); | ||
2800 | + netif_stop_queue(netdev); | ||
2801 | napi_disable(&tp->napi); | ||
2802 | netif_carrier_on(netdev); | ||
2803 | rtl_start_rx(tp); | ||
2804 | napi_enable(&tp->napi); | ||
2805 | + netif_wake_queue(netdev); | ||
2806 | + netif_info(tp, link, netdev, "carrier on\n"); | ||
2807 | } | ||
2808 | } else { | ||
2809 | if (netif_carrier_ok(netdev)) { | ||
2810 | @@ -3166,6 +3172,7 @@ static void set_carrier(struct r8152 *tp) | ||
2811 | napi_disable(&tp->napi); | ||
2812 | tp->rtl_ops.disable(tp); | ||
2813 | napi_enable(&tp->napi); | ||
2814 | + netif_info(tp, link, netdev, "carrier off\n"); | ||
2815 | } | ||
2816 | } | ||
2817 | } | ||
2818 | @@ -3515,12 +3522,12 @@ static int rtl8152_pre_reset(struct usb_interface *intf) | ||
2819 | if (!netif_running(netdev)) | ||
2820 | return 0; | ||
2821 | |||
2822 | + netif_stop_queue(netdev); | ||
2823 | napi_disable(&tp->napi); | ||
2824 | clear_bit(WORK_ENABLE, &tp->flags); | ||
2825 | usb_kill_urb(tp->intr_urb); | ||
2826 | cancel_delayed_work_sync(&tp->schedule); | ||
2827 | if (netif_carrier_ok(netdev)) { | ||
2828 | - netif_stop_queue(netdev); | ||
2829 | mutex_lock(&tp->control); | ||
2830 | tp->rtl_ops.disable(tp); | ||
2831 | mutex_unlock(&tp->control); | ||
2832 | @@ -3545,12 +3552,17 @@ static int rtl8152_post_reset(struct usb_interface *intf) | ||
2833 | if (netif_carrier_ok(netdev)) { | ||
2834 | mutex_lock(&tp->control); | ||
2835 | tp->rtl_ops.enable(tp); | ||
2836 | + rtl_start_rx(tp); | ||
2837 | rtl8152_set_rx_mode(netdev); | ||
2838 | mutex_unlock(&tp->control); | ||
2839 | - netif_wake_queue(netdev); | ||
2840 | } | ||
2841 | |||
2842 | napi_enable(&tp->napi); | ||
2843 | + netif_wake_queue(netdev); | ||
2844 | + usb_submit_urb(tp->intr_urb, GFP_KERNEL); | ||
2845 | + | ||
2846 | + if (!list_empty(&tp->rx_done)) | ||
2847 | + napi_schedule(&tp->napi); | ||
2848 | |||
2849 | return 0; | ||
2850 | } | ||
2851 | @@ -3583,10 +3595,15 @@ static int rtl8152_rumtime_suspend(struct r8152 *tp) | ||
2852 | struct net_device *netdev = tp->netdev; | ||
2853 | int ret = 0; | ||
2854 | |||
2855 | + set_bit(SELECTIVE_SUSPEND, &tp->flags); | ||
2856 | + smp_mb__after_atomic(); | ||
2857 | + | ||
2858 | if (netif_running(netdev) && test_bit(WORK_ENABLE, &tp->flags)) { | ||
2859 | u32 rcr = 0; | ||
2860 | |||
2861 | if (delay_autosuspend(tp)) { | ||
2862 | + clear_bit(SELECTIVE_SUSPEND, &tp->flags); | ||
2863 | + smp_mb__after_atomic(); | ||
2864 | ret = -EBUSY; | ||
2865 | goto out1; | ||
2866 | } | ||
2867 | @@ -3603,6 +3620,8 @@ static int rtl8152_rumtime_suspend(struct r8152 *tp) | ||
2868 | if (!(ocp_data & RXFIFO_EMPTY)) { | ||
2869 | rxdy_gated_en(tp, false); | ||
2870 | ocp_write_dword(tp, MCU_TYPE_PLA, PLA_RCR, rcr); | ||
2871 | + clear_bit(SELECTIVE_SUSPEND, &tp->flags); | ||
2872 | + smp_mb__after_atomic(); | ||
2873 | ret = -EBUSY; | ||
2874 | goto out1; | ||
2875 | } | ||
2876 | @@ -3622,8 +3641,6 @@ static int rtl8152_rumtime_suspend(struct r8152 *tp) | ||
2877 | } | ||
2878 | } | ||
2879 | |||
2880 | - set_bit(SELECTIVE_SUSPEND, &tp->flags); | ||
2881 | - | ||
2882 | out1: | ||
2883 | return ret; | ||
2884 | } | ||
2885 | @@ -3679,12 +3696,15 @@ static int rtl8152_resume(struct usb_interface *intf) | ||
2886 | if (netif_running(tp->netdev) && tp->netdev->flags & IFF_UP) { | ||
2887 | if (test_bit(SELECTIVE_SUSPEND, &tp->flags)) { | ||
2888 | tp->rtl_ops.autosuspend_en(tp, false); | ||
2889 | - clear_bit(SELECTIVE_SUSPEND, &tp->flags); | ||
2890 | napi_disable(&tp->napi); | ||
2891 | set_bit(WORK_ENABLE, &tp->flags); | ||
2892 | if (netif_carrier_ok(tp->netdev)) | ||
2893 | rtl_start_rx(tp); | ||
2894 | napi_enable(&tp->napi); | ||
2895 | + clear_bit(SELECTIVE_SUSPEND, &tp->flags); | ||
2896 | + smp_mb__after_atomic(); | ||
2897 | + if (!list_empty(&tp->rx_done)) | ||
2898 | + napi_schedule(&tp->napi); | ||
2899 | } else { | ||
2900 | tp->rtl_ops.up(tp); | ||
2901 | netif_carrier_off(tp->netdev); | ||
2902 | diff --git a/drivers/net/usb/sierra_net.c b/drivers/net/usb/sierra_net.c | ||
2903 | index a251588762ec..0b5a84c9022c 100644 | ||
2904 | --- a/drivers/net/usb/sierra_net.c | ||
2905 | +++ b/drivers/net/usb/sierra_net.c | ||
2906 | @@ -73,8 +73,6 @@ static atomic_t iface_counter = ATOMIC_INIT(0); | ||
2907 | /* Private data structure */ | ||
2908 | struct sierra_net_data { | ||
2909 | |||
2910 | - u8 ethr_hdr_tmpl[ETH_HLEN]; /* ethernet header template for rx'd pkts */ | ||
2911 | - | ||
2912 | u16 link_up; /* air link up or down */ | ||
2913 | u8 tx_hdr_template[4]; /* part of HIP hdr for tx'd packets */ | ||
2914 | |||
2915 | @@ -122,6 +120,7 @@ struct param { | ||
2916 | |||
2917 | /* LSI Protocol types */ | ||
2918 | #define SIERRA_NET_PROTOCOL_UMTS 0x01 | ||
2919 | +#define SIERRA_NET_PROTOCOL_UMTS_DS 0x04 | ||
2920 | /* LSI Coverage */ | ||
2921 | #define SIERRA_NET_COVERAGE_NONE 0x00 | ||
2922 | #define SIERRA_NET_COVERAGE_NOPACKET 0x01 | ||
2923 | @@ -129,7 +128,8 @@ struct param { | ||
2924 | /* LSI Session */ | ||
2925 | #define SIERRA_NET_SESSION_IDLE 0x00 | ||
2926 | /* LSI Link types */ | ||
2927 | -#define SIERRA_NET_AS_LINK_TYPE_IPv4 0x00 | ||
2928 | +#define SIERRA_NET_AS_LINK_TYPE_IPV4 0x00 | ||
2929 | +#define SIERRA_NET_AS_LINK_TYPE_IPV6 0x02 | ||
2930 | |||
2931 | struct lsi_umts { | ||
2932 | u8 protocol; | ||
2933 | @@ -137,9 +137,14 @@ struct lsi_umts { | ||
2934 | __be16 length; | ||
2935 | /* eventually use a union for the rest - assume umts for now */ | ||
2936 | u8 coverage; | ||
2937 | - u8 unused2[41]; | ||
2938 | + u8 network_len; /* network name len */ | ||
2939 | + u8 network[40]; /* network name (UCS2, bigendian) */ | ||
2940 | u8 session_state; | ||
2941 | u8 unused3[33]; | ||
2942 | +} __packed; | ||
2943 | + | ||
2944 | +struct lsi_umts_single { | ||
2945 | + struct lsi_umts lsi; | ||
2946 | u8 link_type; | ||
2947 | u8 pdp_addr_len; /* NW-supplied PDP address len */ | ||
2948 | u8 pdp_addr[16]; /* NW-supplied PDP address (bigendian)) */ | ||
2949 | @@ -158,10 +163,31 @@ struct lsi_umts { | ||
2950 | u8 reserved[8]; | ||
2951 | } __packed; | ||
2952 | |||
2953 | +struct lsi_umts_dual { | ||
2954 | + struct lsi_umts lsi; | ||
2955 | + u8 pdp_addr4_len; /* NW-supplied PDP IPv4 address len */ | ||
2956 | + u8 pdp_addr4[4]; /* NW-supplied PDP IPv4 address (bigendian)) */ | ||
2957 | + u8 pdp_addr6_len; /* NW-supplied PDP IPv6 address len */ | ||
2958 | + u8 pdp_addr6[16]; /* NW-supplied PDP IPv6 address (bigendian)) */ | ||
2959 | + u8 unused4[23]; | ||
2960 | + u8 dns1_addr4_len; /* NW-supplied 1st DNS v4 address len (bigendian) */ | ||
2961 | + u8 dns1_addr4[4]; /* NW-supplied 1st DNS v4 address */ | ||
2962 | + u8 dns1_addr6_len; /* NW-supplied 1st DNS v6 address len */ | ||
2963 | + u8 dns1_addr6[16]; /* NW-supplied 1st DNS v6 address (bigendian)*/ | ||
2964 | + u8 dns2_addr4_len; /* NW-supplied 2nd DNS v4 address len (bigendian) */ | ||
2965 | + u8 dns2_addr4[4]; /* NW-supplied 2nd DNS v4 address */ | ||
2966 | + u8 dns2_addr6_len; /* NW-supplied 2nd DNS v6 address len */ | ||
2967 | + u8 dns2_addr6[16]; /* NW-supplied 2nd DNS v6 address (bigendian)*/ | ||
2968 | + u8 unused5[68]; | ||
2969 | +} __packed; | ||
2970 | + | ||
2971 | #define SIERRA_NET_LSI_COMMON_LEN 4 | ||
2972 | -#define SIERRA_NET_LSI_UMTS_LEN (sizeof(struct lsi_umts)) | ||
2973 | +#define SIERRA_NET_LSI_UMTS_LEN (sizeof(struct lsi_umts_single)) | ||
2974 | #define SIERRA_NET_LSI_UMTS_STATUS_LEN \ | ||
2975 | (SIERRA_NET_LSI_UMTS_LEN - SIERRA_NET_LSI_COMMON_LEN) | ||
2976 | +#define SIERRA_NET_LSI_UMTS_DS_LEN (sizeof(struct lsi_umts_dual)) | ||
2977 | +#define SIERRA_NET_LSI_UMTS_DS_STATUS_LEN \ | ||
2978 | + (SIERRA_NET_LSI_UMTS_DS_LEN - SIERRA_NET_LSI_COMMON_LEN) | ||
2979 | |||
2980 | /* Forward definitions */ | ||
2981 | static void sierra_sync_timer(unsigned long syncdata); | ||
2982 | @@ -191,10 +217,11 @@ static inline void sierra_net_set_private(struct usbnet *dev, | ||
2983 | dev->data[0] = (unsigned long)priv; | ||
2984 | } | ||
2985 | |||
2986 | -/* is packet IPv4 */ | ||
2987 | +/* is packet IPv4/IPv6 */ | ||
2988 | static inline int is_ip(struct sk_buff *skb) | ||
2989 | { | ||
2990 | - return skb->protocol == cpu_to_be16(ETH_P_IP); | ||
2991 | + return skb->protocol == cpu_to_be16(ETH_P_IP) || | ||
2992 | + skb->protocol == cpu_to_be16(ETH_P_IPV6); | ||
2993 | } | ||
2994 | |||
2995 | /* | ||
2996 | @@ -350,49 +377,54 @@ static inline int sierra_net_is_valid_addrlen(u8 len) | ||
2997 | static int sierra_net_parse_lsi(struct usbnet *dev, char *data, int datalen) | ||
2998 | { | ||
2999 | struct lsi_umts *lsi = (struct lsi_umts *)data; | ||
3000 | + u32 expected_length; | ||
3001 | |||
3002 | - if (datalen < sizeof(struct lsi_umts)) { | ||
3003 | - netdev_err(dev->net, "%s: Data length %d, exp %Zu\n", | ||
3004 | - __func__, datalen, | ||
3005 | - sizeof(struct lsi_umts)); | ||
3006 | + if (datalen < sizeof(struct lsi_umts_single)) { | ||
3007 | + netdev_err(dev->net, "%s: Data length %d, exp >= %Zu\n", | ||
3008 | + __func__, datalen, sizeof(struct lsi_umts_single)); | ||
3009 | return -1; | ||
3010 | } | ||
3011 | |||
3012 | - if (lsi->length != cpu_to_be16(SIERRA_NET_LSI_UMTS_STATUS_LEN)) { | ||
3013 | - netdev_err(dev->net, "%s: LSI_UMTS_STATUS_LEN %d, exp %u\n", | ||
3014 | - __func__, be16_to_cpu(lsi->length), | ||
3015 | - (u32)SIERRA_NET_LSI_UMTS_STATUS_LEN); | ||
3016 | - return -1; | ||
3017 | + /* Validate the session state */ | ||
3018 | + if (lsi->session_state == SIERRA_NET_SESSION_IDLE) { | ||
3019 | + netdev_err(dev->net, "Session idle, 0x%02x\n", | ||
3020 | + lsi->session_state); | ||
3021 | + return 0; | ||
3022 | } | ||
3023 | |||
3024 | /* Validate the protocol - only support UMTS for now */ | ||
3025 | - if (lsi->protocol != SIERRA_NET_PROTOCOL_UMTS) { | ||
3026 | + if (lsi->protocol == SIERRA_NET_PROTOCOL_UMTS) { | ||
3027 | + struct lsi_umts_single *single = (struct lsi_umts_single *)lsi; | ||
3028 | + | ||
3029 | + /* Validate the link type */ | ||
3030 | + if (single->link_type != SIERRA_NET_AS_LINK_TYPE_IPV4 && | ||
3031 | + single->link_type != SIERRA_NET_AS_LINK_TYPE_IPV6) { | ||
3032 | + netdev_err(dev->net, "Link type unsupported: 0x%02x\n", | ||
3033 | + single->link_type); | ||
3034 | + return -1; | ||
3035 | + } | ||
3036 | + expected_length = SIERRA_NET_LSI_UMTS_STATUS_LEN; | ||
3037 | + } else if (lsi->protocol == SIERRA_NET_PROTOCOL_UMTS_DS) { | ||
3038 | + expected_length = SIERRA_NET_LSI_UMTS_DS_STATUS_LEN; | ||
3039 | + } else { | ||
3040 | netdev_err(dev->net, "Protocol unsupported, 0x%02x\n", | ||
3041 | - lsi->protocol); | ||
3042 | + lsi->protocol); | ||
3043 | return -1; | ||
3044 | } | ||
3045 | |||
3046 | - /* Validate the link type */ | ||
3047 | - if (lsi->link_type != SIERRA_NET_AS_LINK_TYPE_IPv4) { | ||
3048 | - netdev_err(dev->net, "Link type unsupported: 0x%02x\n", | ||
3049 | - lsi->link_type); | ||
3050 | + if (be16_to_cpu(lsi->length) != expected_length) { | ||
3051 | + netdev_err(dev->net, "%s: LSI_UMTS_STATUS_LEN %d, exp %u\n", | ||
3052 | + __func__, be16_to_cpu(lsi->length), expected_length); | ||
3053 | return -1; | ||
3054 | } | ||
3055 | |||
3056 | /* Validate the coverage */ | ||
3057 | - if (lsi->coverage == SIERRA_NET_COVERAGE_NONE | ||
3058 | - || lsi->coverage == SIERRA_NET_COVERAGE_NOPACKET) { | ||
3059 | + if (lsi->coverage == SIERRA_NET_COVERAGE_NONE || | ||
3060 | + lsi->coverage == SIERRA_NET_COVERAGE_NOPACKET) { | ||
3061 | netdev_err(dev->net, "No coverage, 0x%02x\n", lsi->coverage); | ||
3062 | return 0; | ||
3063 | } | ||
3064 | |||
3065 | - /* Validate the session state */ | ||
3066 | - if (lsi->session_state == SIERRA_NET_SESSION_IDLE) { | ||
3067 | - netdev_err(dev->net, "Session idle, 0x%02x\n", | ||
3068 | - lsi->session_state); | ||
3069 | - return 0; | ||
3070 | - } | ||
3071 | - | ||
3072 | /* Set link_sense true */ | ||
3073 | return 1; | ||
3074 | } | ||
3075 | @@ -662,7 +694,6 @@ static int sierra_net_bind(struct usbnet *dev, struct usb_interface *intf) | ||
3076 | u8 numendpoints; | ||
3077 | u16 fwattr = 0; | ||
3078 | int status; | ||
3079 | - struct ethhdr *eth; | ||
3080 | struct sierra_net_data *priv; | ||
3081 | static const u8 sync_tmplate[sizeof(priv->sync_msg)] = { | ||
3082 | 0x00, 0x00, SIERRA_NET_HIP_MSYNC_ID, 0x00}; | ||
3083 | @@ -700,11 +731,6 @@ static int sierra_net_bind(struct usbnet *dev, struct usb_interface *intf) | ||
3084 | dev->net->dev_addr[ETH_ALEN-2] = atomic_inc_return(&iface_counter); | ||
3085 | dev->net->dev_addr[ETH_ALEN-1] = ifacenum; | ||
3086 | |||
3087 | - /* we will have to manufacture ethernet headers, prepare template */ | ||
3088 | - eth = (struct ethhdr *)priv->ethr_hdr_tmpl; | ||
3089 | - memcpy(ð->h_dest, dev->net->dev_addr, ETH_ALEN); | ||
3090 | - eth->h_proto = cpu_to_be16(ETH_P_IP); | ||
3091 | - | ||
3092 | /* prepare shutdown message template */ | ||
3093 | memcpy(priv->shdwn_msg, shdwn_tmplate, sizeof(priv->shdwn_msg)); | ||
3094 | /* set context index initially to 0 - prepares tx hdr template */ | ||
3095 | @@ -833,9 +859,14 @@ static int sierra_net_rx_fixup(struct usbnet *dev, struct sk_buff *skb) | ||
3096 | |||
3097 | skb_pull(skb, hh.hdrlen); | ||
3098 | |||
3099 | - /* We are going to accept this packet, prepare it */ | ||
3100 | - memcpy(skb->data, sierra_net_get_private(dev)->ethr_hdr_tmpl, | ||
3101 | - ETH_HLEN); | ||
3102 | + /* We are going to accept this packet, prepare it. | ||
3103 | + * In case protocol is IPv6, keep it, otherwise force IPv4. | ||
3104 | + */ | ||
3105 | + skb_reset_mac_header(skb); | ||
3106 | + if (eth_hdr(skb)->h_proto != cpu_to_be16(ETH_P_IPV6)) | ||
3107 | + eth_hdr(skb)->h_proto = cpu_to_be16(ETH_P_IP); | ||
3108 | + eth_zero_addr(eth_hdr(skb)->h_source); | ||
3109 | + memcpy(eth_hdr(skb)->h_dest, dev->net->dev_addr, ETH_ALEN); | ||
3110 | |||
3111 | /* Last packet in batch handled by usbnet */ | ||
3112 | if (hh.payload_len.word == skb->len) | ||
3113 | diff --git a/drivers/nvme/target/rdma.c b/drivers/nvme/target/rdma.c | ||
3114 | index 005ef5d17a19..ca8ddc3fb19e 100644 | ||
3115 | --- a/drivers/nvme/target/rdma.c | ||
3116 | +++ b/drivers/nvme/target/rdma.c | ||
3117 | @@ -438,6 +438,10 @@ static int nvmet_rdma_post_recv(struct nvmet_rdma_device *ndev, | ||
3118 | { | ||
3119 | struct ib_recv_wr *bad_wr; | ||
3120 | |||
3121 | + ib_dma_sync_single_for_device(ndev->device, | ||
3122 | + cmd->sge[0].addr, cmd->sge[0].length, | ||
3123 | + DMA_FROM_DEVICE); | ||
3124 | + | ||
3125 | if (ndev->srq) | ||
3126 | return ib_post_srq_recv(ndev->srq, &cmd->wr, &bad_wr); | ||
3127 | return ib_post_recv(cmd->queue->cm_id->qp, &cmd->wr, &bad_wr); | ||
3128 | @@ -538,6 +542,11 @@ static void nvmet_rdma_queue_response(struct nvmet_req *req) | ||
3129 | first_wr = &rsp->send_wr; | ||
3130 | |||
3131 | nvmet_rdma_post_recv(rsp->queue->dev, rsp->cmd); | ||
3132 | + | ||
3133 | + ib_dma_sync_single_for_device(rsp->queue->dev->device, | ||
3134 | + rsp->send_sge.addr, rsp->send_sge.length, | ||
3135 | + DMA_TO_DEVICE); | ||
3136 | + | ||
3137 | if (ib_post_send(cm_id->qp, first_wr, &bad_wr)) { | ||
3138 | pr_err("sending cmd response failed\n"); | ||
3139 | nvmet_rdma_release_rsp(rsp); | ||
3140 | @@ -698,6 +707,14 @@ static void nvmet_rdma_handle_command(struct nvmet_rdma_queue *queue, | ||
3141 | cmd->n_rdma = 0; | ||
3142 | cmd->req.port = queue->port; | ||
3143 | |||
3144 | + | ||
3145 | + ib_dma_sync_single_for_cpu(queue->dev->device, | ||
3146 | + cmd->cmd->sge[0].addr, cmd->cmd->sge[0].length, | ||
3147 | + DMA_FROM_DEVICE); | ||
3148 | + ib_dma_sync_single_for_cpu(queue->dev->device, | ||
3149 | + cmd->send_sge.addr, cmd->send_sge.length, | ||
3150 | + DMA_TO_DEVICE); | ||
3151 | + | ||
3152 | if (!nvmet_req_init(&cmd->req, &queue->nvme_cq, | ||
3153 | &queue->nvme_sq, &nvmet_rdma_ops)) | ||
3154 | return; | ||
3155 | diff --git a/drivers/parport/parport_gsc.c b/drivers/parport/parport_gsc.c | ||
3156 | index 6e3a60c78873..50f3bb0dd1f1 100644 | ||
3157 | --- a/drivers/parport/parport_gsc.c | ||
3158 | +++ b/drivers/parport/parport_gsc.c | ||
3159 | @@ -293,7 +293,7 @@ struct parport *parport_gsc_probe_port(unsigned long base, | ||
3160 | p->irq = PARPORT_IRQ_NONE; | ||
3161 | } | ||
3162 | if (p->irq != PARPORT_IRQ_NONE) { | ||
3163 | - printk(", irq %d", p->irq); | ||
3164 | + pr_cont(", irq %d", p->irq); | ||
3165 | |||
3166 | if (p->dma == PARPORT_DMA_AUTO) { | ||
3167 | p->dma = PARPORT_DMA_NONE; | ||
3168 | @@ -303,8 +303,8 @@ struct parport *parport_gsc_probe_port(unsigned long base, | ||
3169 | is mandatory (see above) */ | ||
3170 | p->dma = PARPORT_DMA_NONE; | ||
3171 | |||
3172 | - printk(" ["); | ||
3173 | -#define printmode(x) {if(p->modes&PARPORT_MODE_##x){printk("%s%s",f?",":"",#x);f++;}} | ||
3174 | + pr_cont(" ["); | ||
3175 | +#define printmode(x) {if(p->modes&PARPORT_MODE_##x){pr_cont("%s%s",f?",":"",#x);f++;}} | ||
3176 | { | ||
3177 | int f = 0; | ||
3178 | printmode(PCSPP); | ||
3179 | @@ -315,7 +315,7 @@ struct parport *parport_gsc_probe_port(unsigned long base, | ||
3180 | // printmode(DMA); | ||
3181 | } | ||
3182 | #undef printmode | ||
3183 | - printk("]\n"); | ||
3184 | + pr_cont("]\n"); | ||
3185 | |||
3186 | if (p->irq != PARPORT_IRQ_NONE) { | ||
3187 | if (request_irq (p->irq, parport_irq_handler, | ||
3188 | diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c | ||
3189 | index 579c4946dc6e..e7d4048e81f2 100644 | ||
3190 | --- a/drivers/pci/pci.c | ||
3191 | +++ b/drivers/pci/pci.c | ||
3192 | @@ -2142,7 +2142,8 @@ bool pci_dev_keep_suspended(struct pci_dev *pci_dev) | ||
3193 | |||
3194 | if (!pm_runtime_suspended(dev) | ||
3195 | || pci_target_state(pci_dev) != pci_dev->current_state | ||
3196 | - || platform_pci_need_resume(pci_dev)) | ||
3197 | + || platform_pci_need_resume(pci_dev) | ||
3198 | + || (pci_dev->dev_flags & PCI_DEV_FLAGS_NEEDS_RESUME)) | ||
3199 | return false; | ||
3200 | |||
3201 | /* | ||
3202 | diff --git a/drivers/pinctrl/berlin/berlin-bg4ct.c b/drivers/pinctrl/berlin/berlin-bg4ct.c | ||
3203 | index 09172043d589..c617ec49e9ed 100644 | ||
3204 | --- a/drivers/pinctrl/berlin/berlin-bg4ct.c | ||
3205 | +++ b/drivers/pinctrl/berlin/berlin-bg4ct.c | ||
3206 | @@ -217,7 +217,7 @@ static const struct berlin_desc_group berlin4ct_soc_pinctrl_groups[] = { | ||
3207 | BERLIN_PINCTRL_GROUP("SCRD0_CRD_PRES", 0xc, 0x3, 0x15, | ||
3208 | BERLIN_PINCTRL_FUNCTION(0x0, "gpio"), /* GPIO20 */ | ||
3209 | BERLIN_PINCTRL_FUNCTION(0x1, "scrd0"), /* crd pres */ | ||
3210 | - BERLIN_PINCTRL_FUNCTION(0x1, "sd1a")), /* DAT3 */ | ||
3211 | + BERLIN_PINCTRL_FUNCTION(0x3, "sd1a")), /* DAT3 */ | ||
3212 | BERLIN_PINCTRL_GROUP("SPI1_SS0n", 0xc, 0x3, 0x18, | ||
3213 | BERLIN_PINCTRL_FUNCTION(0x0, "spi1"), /* SS0n */ | ||
3214 | BERLIN_PINCTRL_FUNCTION(0x1, "gpio"), /* GPIO37 */ | ||
3215 | diff --git a/drivers/pinctrl/intel/pinctrl-baytrail.c b/drivers/pinctrl/intel/pinctrl-baytrail.c | ||
3216 | index 583ae3f38fc0..5419de8e20b1 100644 | ||
3217 | --- a/drivers/pinctrl/intel/pinctrl-baytrail.c | ||
3218 | +++ b/drivers/pinctrl/intel/pinctrl-baytrail.c | ||
3219 | @@ -1250,10 +1250,12 @@ static int byt_pin_config_set(struct pinctrl_dev *pctl_dev, | ||
3220 | debounce = readl(db_reg); | ||
3221 | debounce &= ~BYT_DEBOUNCE_PULSE_MASK; | ||
3222 | |||
3223 | + if (arg) | ||
3224 | + conf |= BYT_DEBOUNCE_EN; | ||
3225 | + else | ||
3226 | + conf &= ~BYT_DEBOUNCE_EN; | ||
3227 | + | ||
3228 | switch (arg) { | ||
3229 | - case 0: | ||
3230 | - conf &= BYT_DEBOUNCE_EN; | ||
3231 | - break; | ||
3232 | case 375: | ||
3233 | debounce |= BYT_DEBOUNCE_PULSE_375US; | ||
3234 | break; | ||
3235 | @@ -1276,7 +1278,9 @@ static int byt_pin_config_set(struct pinctrl_dev *pctl_dev, | ||
3236 | debounce |= BYT_DEBOUNCE_PULSE_24MS; | ||
3237 | break; | ||
3238 | default: | ||
3239 | - ret = -EINVAL; | ||
3240 | + if (arg) | ||
3241 | + ret = -EINVAL; | ||
3242 | + break; | ||
3243 | } | ||
3244 | |||
3245 | if (!ret) | ||
3246 | diff --git a/drivers/staging/media/cec/cec-adap.c b/drivers/staging/media/cec/cec-adap.c | ||
3247 | index 611e07b78bfe..057c9b5ab1e5 100644 | ||
3248 | --- a/drivers/staging/media/cec/cec-adap.c | ||
3249 | +++ b/drivers/staging/media/cec/cec-adap.c | ||
3250 | @@ -1017,7 +1017,7 @@ static int cec_config_thread_func(void *arg) | ||
3251 | las->log_addr[i] = CEC_LOG_ADDR_INVALID; | ||
3252 | if (last_la == CEC_LOG_ADDR_INVALID || | ||
3253 | last_la == CEC_LOG_ADDR_UNREGISTERED || | ||
3254 | - !(last_la & type2mask[type])) | ||
3255 | + !((1 << last_la) & type2mask[type])) | ||
3256 | last_la = la_list[0]; | ||
3257 | |||
3258 | err = cec_config_log_addr(adap, i, last_la); | ||
3259 | diff --git a/drivers/usb/musb/musb_core.c b/drivers/usb/musb/musb_core.c | ||
3260 | index 358feca54945..261ed2ca28f9 100644 | ||
3261 | --- a/drivers/usb/musb/musb_core.c | ||
3262 | +++ b/drivers/usb/musb/musb_core.c | ||
3263 | @@ -1909,6 +1909,14 @@ static void musb_pm_runtime_check_session(struct musb *musb) | ||
3264 | static void musb_irq_work(struct work_struct *data) | ||
3265 | { | ||
3266 | struct musb *musb = container_of(data, struct musb, irq_work.work); | ||
3267 | + int error; | ||
3268 | + | ||
3269 | + error = pm_runtime_get_sync(musb->controller); | ||
3270 | + if (error < 0) { | ||
3271 | + dev_err(musb->controller, "Could not enable: %i\n", error); | ||
3272 | + | ||
3273 | + return; | ||
3274 | + } | ||
3275 | |||
3276 | musb_pm_runtime_check_session(musb); | ||
3277 | |||
3278 | @@ -1916,6 +1924,9 @@ static void musb_irq_work(struct work_struct *data) | ||
3279 | musb->xceiv_old_state = musb->xceiv->otg->state; | ||
3280 | sysfs_notify(&musb->controller->kobj, NULL, "mode"); | ||
3281 | } | ||
3282 | + | ||
3283 | + pm_runtime_mark_last_busy(musb->controller); | ||
3284 | + pm_runtime_put_autosuspend(musb->controller); | ||
3285 | } | ||
3286 | |||
3287 | static void musb_recover_from_babble(struct musb *musb) | ||
3288 | diff --git a/drivers/vfio/vfio_iommu_spapr_tce.c b/drivers/vfio/vfio_iommu_spapr_tce.c | ||
3289 | index c8823578a1b2..79ddcb05d126 100644 | ||
3290 | --- a/drivers/vfio/vfio_iommu_spapr_tce.c | ||
3291 | +++ b/drivers/vfio/vfio_iommu_spapr_tce.c | ||
3292 | @@ -1246,6 +1246,8 @@ static void tce_iommu_release_ownership_ddw(struct tce_container *container, | ||
3293 | static long tce_iommu_take_ownership_ddw(struct tce_container *container, | ||
3294 | struct iommu_table_group *table_group) | ||
3295 | { | ||
3296 | + long i, ret = 0; | ||
3297 | + | ||
3298 | if (!table_group->ops->create_table || !table_group->ops->set_window || | ||
3299 | !table_group->ops->release_ownership) { | ||
3300 | WARN_ON_ONCE(1); | ||
3301 | @@ -1254,7 +1256,27 @@ static long tce_iommu_take_ownership_ddw(struct tce_container *container, | ||
3302 | |||
3303 | table_group->ops->take_ownership(table_group); | ||
3304 | |||
3305 | + /* Set all windows to the new group */ | ||
3306 | + for (i = 0; i < IOMMU_TABLE_GROUP_MAX_TABLES; ++i) { | ||
3307 | + struct iommu_table *tbl = container->tables[i]; | ||
3308 | + | ||
3309 | + if (!tbl) | ||
3310 | + continue; | ||
3311 | + | ||
3312 | + ret = table_group->ops->set_window(table_group, i, tbl); | ||
3313 | + if (ret) | ||
3314 | + goto release_exit; | ||
3315 | + } | ||
3316 | + | ||
3317 | return 0; | ||
3318 | + | ||
3319 | +release_exit: | ||
3320 | + for (i = 0; i < IOMMU_TABLE_GROUP_MAX_TABLES; ++i) | ||
3321 | + table_group->ops->unset_window(table_group, i); | ||
3322 | + | ||
3323 | + table_group->ops->release_ownership(table_group); | ||
3324 | + | ||
3325 | + return ret; | ||
3326 | } | ||
3327 | |||
3328 | static int tce_iommu_attach_group(void *iommu_data, | ||
3329 | diff --git a/drivers/vhost/vsock.c b/drivers/vhost/vsock.c | ||
3330 | index a504e2e003da..e3fad302b4fb 100644 | ||
3331 | --- a/drivers/vhost/vsock.c | ||
3332 | +++ b/drivers/vhost/vsock.c | ||
3333 | @@ -368,6 +368,7 @@ static void vhost_vsock_handle_rx_kick(struct vhost_work *work) | ||
3334 | |||
3335 | static int vhost_vsock_start(struct vhost_vsock *vsock) | ||
3336 | { | ||
3337 | + struct vhost_virtqueue *vq; | ||
3338 | size_t i; | ||
3339 | int ret; | ||
3340 | |||
3341 | @@ -378,19 +379,20 @@ static int vhost_vsock_start(struct vhost_vsock *vsock) | ||
3342 | goto err; | ||
3343 | |||
3344 | for (i = 0; i < ARRAY_SIZE(vsock->vqs); i++) { | ||
3345 | - struct vhost_virtqueue *vq = &vsock->vqs[i]; | ||
3346 | + vq = &vsock->vqs[i]; | ||
3347 | |||
3348 | mutex_lock(&vq->mutex); | ||
3349 | |||
3350 | if (!vhost_vq_access_ok(vq)) { | ||
3351 | ret = -EFAULT; | ||
3352 | - mutex_unlock(&vq->mutex); | ||
3353 | goto err_vq; | ||
3354 | } | ||
3355 | |||
3356 | if (!vq->private_data) { | ||
3357 | vq->private_data = vsock; | ||
3358 | - vhost_vq_init_access(vq); | ||
3359 | + ret = vhost_vq_init_access(vq); | ||
3360 | + if (ret) | ||
3361 | + goto err_vq; | ||
3362 | } | ||
3363 | |||
3364 | mutex_unlock(&vq->mutex); | ||
3365 | @@ -400,8 +402,11 @@ static int vhost_vsock_start(struct vhost_vsock *vsock) | ||
3366 | return 0; | ||
3367 | |||
3368 | err_vq: | ||
3369 | + vq->private_data = NULL; | ||
3370 | + mutex_unlock(&vq->mutex); | ||
3371 | + | ||
3372 | for (i = 0; i < ARRAY_SIZE(vsock->vqs); i++) { | ||
3373 | - struct vhost_virtqueue *vq = &vsock->vqs[i]; | ||
3374 | + vq = &vsock->vqs[i]; | ||
3375 | |||
3376 | mutex_lock(&vq->mutex); | ||
3377 | vq->private_data = NULL; | ||
3378 | diff --git a/fs/fscache/cookie.c b/fs/fscache/cookie.c | ||
3379 | index 4304072161aa..40d61077bead 100644 | ||
3380 | --- a/fs/fscache/cookie.c | ||
3381 | +++ b/fs/fscache/cookie.c | ||
3382 | @@ -542,6 +542,7 @@ void __fscache_disable_cookie(struct fscache_cookie *cookie, bool invalidate) | ||
3383 | hlist_for_each_entry(object, &cookie->backing_objects, cookie_link) { | ||
3384 | if (invalidate) | ||
3385 | set_bit(FSCACHE_OBJECT_RETIRED, &object->flags); | ||
3386 | + clear_bit(FSCACHE_OBJECT_PENDING_WRITE, &object->flags); | ||
3387 | fscache_raise_event(object, FSCACHE_OBJECT_EV_KILL); | ||
3388 | } | ||
3389 | } else { | ||
3390 | @@ -560,6 +561,10 @@ void __fscache_disable_cookie(struct fscache_cookie *cookie, bool invalidate) | ||
3391 | wait_on_atomic_t(&cookie->n_active, fscache_wait_atomic_t, | ||
3392 | TASK_UNINTERRUPTIBLE); | ||
3393 | |||
3394 | + /* Make sure any pending writes are cancelled. */ | ||
3395 | + if (cookie->def->type != FSCACHE_COOKIE_TYPE_INDEX) | ||
3396 | + fscache_invalidate_writes(cookie); | ||
3397 | + | ||
3398 | /* Reset the cookie state if it wasn't relinquished */ | ||
3399 | if (!test_bit(FSCACHE_COOKIE_RELINQUISHED, &cookie->flags)) { | ||
3400 | atomic_inc(&cookie->n_active); | ||
3401 | diff --git a/fs/fscache/netfs.c b/fs/fscache/netfs.c | ||
3402 | index 9b28649df3a1..a8aa00be4444 100644 | ||
3403 | --- a/fs/fscache/netfs.c | ||
3404 | +++ b/fs/fscache/netfs.c | ||
3405 | @@ -48,6 +48,7 @@ int __fscache_register_netfs(struct fscache_netfs *netfs) | ||
3406 | cookie->flags = 1 << FSCACHE_COOKIE_ENABLED; | ||
3407 | |||
3408 | spin_lock_init(&cookie->lock); | ||
3409 | + spin_lock_init(&cookie->stores_lock); | ||
3410 | INIT_HLIST_HEAD(&cookie->backing_objects); | ||
3411 | |||
3412 | /* check the netfs type is not already present */ | ||
3413 | diff --git a/fs/fscache/object.c b/fs/fscache/object.c | ||
3414 | index 9e792e30f4db..7a182c87f378 100644 | ||
3415 | --- a/fs/fscache/object.c | ||
3416 | +++ b/fs/fscache/object.c | ||
3417 | @@ -30,6 +30,7 @@ static const struct fscache_state *fscache_look_up_object(struct fscache_object | ||
3418 | static const struct fscache_state *fscache_object_available(struct fscache_object *, int); | ||
3419 | static const struct fscache_state *fscache_parent_ready(struct fscache_object *, int); | ||
3420 | static const struct fscache_state *fscache_update_object(struct fscache_object *, int); | ||
3421 | +static const struct fscache_state *fscache_object_dead(struct fscache_object *, int); | ||
3422 | |||
3423 | #define __STATE_NAME(n) fscache_osm_##n | ||
3424 | #define STATE(n) (&__STATE_NAME(n)) | ||
3425 | @@ -91,7 +92,7 @@ static WORK_STATE(LOOKUP_FAILURE, "LCFL", fscache_lookup_failure); | ||
3426 | static WORK_STATE(KILL_OBJECT, "KILL", fscache_kill_object); | ||
3427 | static WORK_STATE(KILL_DEPENDENTS, "KDEP", fscache_kill_dependents); | ||
3428 | static WORK_STATE(DROP_OBJECT, "DROP", fscache_drop_object); | ||
3429 | -static WORK_STATE(OBJECT_DEAD, "DEAD", (void*)2UL); | ||
3430 | +static WORK_STATE(OBJECT_DEAD, "DEAD", fscache_object_dead); | ||
3431 | |||
3432 | static WAIT_STATE(WAIT_FOR_INIT, "?INI", | ||
3433 | TRANSIT_TO(INIT_OBJECT, 1 << FSCACHE_OBJECT_EV_NEW_CHILD)); | ||
3434 | @@ -229,6 +230,10 @@ static void fscache_object_sm_dispatcher(struct fscache_object *object) | ||
3435 | event = -1; | ||
3436 | if (new_state == NO_TRANSIT) { | ||
3437 | _debug("{OBJ%x} %s notrans", object->debug_id, state->name); | ||
3438 | + if (unlikely(state == STATE(OBJECT_DEAD))) { | ||
3439 | + _leave(" [dead]"); | ||
3440 | + return; | ||
3441 | + } | ||
3442 | fscache_enqueue_object(object); | ||
3443 | event_mask = object->oob_event_mask; | ||
3444 | goto unmask_events; | ||
3445 | @@ -239,7 +244,7 @@ static void fscache_object_sm_dispatcher(struct fscache_object *object) | ||
3446 | object->state = state = new_state; | ||
3447 | |||
3448 | if (state->work) { | ||
3449 | - if (unlikely(state->work == ((void *)2UL))) { | ||
3450 | + if (unlikely(state == STATE(OBJECT_DEAD))) { | ||
3451 | _leave(" [dead]"); | ||
3452 | return; | ||
3453 | } | ||
3454 | @@ -645,6 +650,12 @@ static const struct fscache_state *fscache_kill_object(struct fscache_object *ob | ||
3455 | fscache_mark_object_dead(object); | ||
3456 | object->oob_event_mask = 0; | ||
3457 | |||
3458 | + if (test_bit(FSCACHE_OBJECT_RETIRED, &object->flags)) { | ||
3459 | + /* Reject any new read/write ops and abort any that are pending. */ | ||
3460 | + clear_bit(FSCACHE_OBJECT_PENDING_WRITE, &object->flags); | ||
3461 | + fscache_cancel_all_ops(object); | ||
3462 | + } | ||
3463 | + | ||
3464 | if (list_empty(&object->dependents) && | ||
3465 | object->n_ops == 0 && | ||
3466 | object->n_children == 0) | ||
3467 | @@ -1077,3 +1088,20 @@ void fscache_object_mark_killed(struct fscache_object *object, | ||
3468 | } | ||
3469 | } | ||
3470 | EXPORT_SYMBOL(fscache_object_mark_killed); | ||
3471 | + | ||
3472 | +/* | ||
3473 | + * The object is dead. We can get here if an object gets queued by an event | ||
3474 | + * that would lead to its death (such as EV_KILL) when the dispatcher is | ||
3475 | + * already running (and so can be requeued) but hasn't yet cleared the event | ||
3476 | + * mask. | ||
3477 | + */ | ||
3478 | +static const struct fscache_state *fscache_object_dead(struct fscache_object *object, | ||
3479 | + int event) | ||
3480 | +{ | ||
3481 | + if (!test_and_set_bit(FSCACHE_OBJECT_RUN_AFTER_DEAD, | ||
3482 | + &object->flags)) | ||
3483 | + return NO_TRANSIT; | ||
3484 | + | ||
3485 | + WARN(true, "FS-Cache object redispatched after death"); | ||
3486 | + return NO_TRANSIT; | ||
3487 | +} | ||
3488 | diff --git a/fs/gfs2/glock.c b/fs/gfs2/glock.c | ||
3489 | index 133f322573b5..6528724ad6e5 100644 | ||
3490 | --- a/fs/gfs2/glock.c | ||
3491 | +++ b/fs/gfs2/glock.c | ||
3492 | @@ -1425,26 +1425,32 @@ static struct shrinker glock_shrinker = { | ||
3493 | * @sdp: the filesystem | ||
3494 | * @bucket: the bucket | ||
3495 | * | ||
3496 | + * Note that the function can be called multiple times on the same | ||
3497 | + * object. So the user must ensure that the function can cope with | ||
3498 | + * that. | ||
3499 | */ | ||
3500 | |||
3501 | static void glock_hash_walk(glock_examiner examiner, const struct gfs2_sbd *sdp) | ||
3502 | { | ||
3503 | struct gfs2_glock *gl; | ||
3504 | - struct rhash_head *pos; | ||
3505 | - const struct bucket_table *tbl; | ||
3506 | - int i; | ||
3507 | + struct rhashtable_iter iter; | ||
3508 | |||
3509 | - rcu_read_lock(); | ||
3510 | - tbl = rht_dereference_rcu(gl_hash_table.tbl, &gl_hash_table); | ||
3511 | - for (i = 0; i < tbl->size; i++) { | ||
3512 | - rht_for_each_entry_rcu(gl, pos, tbl, i, gl_node) { | ||
3513 | + rhashtable_walk_enter(&gl_hash_table, &iter); | ||
3514 | + | ||
3515 | + do { | ||
3516 | + gl = ERR_PTR(rhashtable_walk_start(&iter)); | ||
3517 | + if (gl) | ||
3518 | + continue; | ||
3519 | + | ||
3520 | + while ((gl = rhashtable_walk_next(&iter)) && !IS_ERR(gl)) | ||
3521 | if ((gl->gl_name.ln_sbd == sdp) && | ||
3522 | lockref_get_not_dead(&gl->gl_lockref)) | ||
3523 | examiner(gl); | ||
3524 | - } | ||
3525 | - } | ||
3526 | - rcu_read_unlock(); | ||
3527 | - cond_resched(); | ||
3528 | + | ||
3529 | + rhashtable_walk_stop(&iter); | ||
3530 | + } while (cond_resched(), gl == ERR_PTR(-EAGAIN)); | ||
3531 | + | ||
3532 | + rhashtable_walk_exit(&iter); | ||
3533 | } | ||
3534 | |||
3535 | /** | ||
3536 | diff --git a/fs/nfs/nfs4state.c b/fs/nfs/nfs4state.c | ||
3537 | index 0959c9661662..92671914067f 100644 | ||
3538 | --- a/fs/nfs/nfs4state.c | ||
3539 | +++ b/fs/nfs/nfs4state.c | ||
3540 | @@ -1079,6 +1079,7 @@ static void nfs_increment_seqid(int status, struct nfs_seqid *seqid) | ||
3541 | case -NFS4ERR_BADXDR: | ||
3542 | case -NFS4ERR_RESOURCE: | ||
3543 | case -NFS4ERR_NOFILEHANDLE: | ||
3544 | + case -NFS4ERR_MOVED: | ||
3545 | /* Non-seqid mutating errors */ | ||
3546 | return; | ||
3547 | }; | ||
3548 | diff --git a/fs/proc/base.c b/fs/proc/base.c | ||
3549 | index ca651ac00660..e67fec3c9856 100644 | ||
3550 | --- a/fs/proc/base.c | ||
3551 | +++ b/fs/proc/base.c | ||
3552 | @@ -3181,6 +3181,8 @@ int proc_pid_readdir(struct file *file, struct dir_context *ctx) | ||
3553 | iter.tgid += 1, iter = next_tgid(ns, iter)) { | ||
3554 | char name[PROC_NUMBUF]; | ||
3555 | int len; | ||
3556 | + | ||
3557 | + cond_resched(); | ||
3558 | if (!has_pid_permissions(ns, iter.task, 2)) | ||
3559 | continue; | ||
3560 | |||
3561 | diff --git a/fs/romfs/super.c b/fs/romfs/super.c | ||
3562 | index d0f8a38dfafa..0186fe6d39f3 100644 | ||
3563 | --- a/fs/romfs/super.c | ||
3564 | +++ b/fs/romfs/super.c | ||
3565 | @@ -74,6 +74,7 @@ | ||
3566 | #include <linux/highmem.h> | ||
3567 | #include <linux/pagemap.h> | ||
3568 | #include <linux/uaccess.h> | ||
3569 | +#include <linux/major.h> | ||
3570 | #include "internal.h" | ||
3571 | |||
3572 | static struct kmem_cache *romfs_inode_cachep; | ||
3573 | @@ -416,7 +417,22 @@ static void romfs_destroy_inode(struct inode *inode) | ||
3574 | static int romfs_statfs(struct dentry *dentry, struct kstatfs *buf) | ||
3575 | { | ||
3576 | struct super_block *sb = dentry->d_sb; | ||
3577 | - u64 id = huge_encode_dev(sb->s_bdev->bd_dev); | ||
3578 | + u64 id = 0; | ||
3579 | + | ||
3580 | + /* When calling huge_encode_dev(), | ||
3581 | + * use sb->s_bdev->bd_dev when, | ||
3582 | + * - CONFIG_ROMFS_ON_BLOCK defined | ||
3583 | + * use sb->s_dev when, | ||
3584 | + * - CONFIG_ROMFS_ON_BLOCK undefined and | ||
3585 | + * - CONFIG_ROMFS_ON_MTD defined | ||
3586 | + * leave id as 0 when, | ||
3587 | + * - CONFIG_ROMFS_ON_BLOCK undefined and | ||
3588 | + * - CONFIG_ROMFS_ON_MTD undefined | ||
3589 | + */ | ||
3590 | + if (sb->s_bdev) | ||
3591 | + id = huge_encode_dev(sb->s_bdev->bd_dev); | ||
3592 | + else if (sb->s_dev) | ||
3593 | + id = huge_encode_dev(sb->s_dev); | ||
3594 | |||
3595 | buf->f_type = ROMFS_MAGIC; | ||
3596 | buf->f_namelen = ROMFS_MAXFN; | ||
3597 | @@ -489,6 +505,11 @@ static int romfs_fill_super(struct super_block *sb, void *data, int silent) | ||
3598 | sb->s_flags |= MS_RDONLY | MS_NOATIME; | ||
3599 | sb->s_op = &romfs_super_ops; | ||
3600 | |||
3601 | +#ifdef CONFIG_ROMFS_ON_MTD | ||
3602 | + /* Use same dev ID from the underlying mtdblock device */ | ||
3603 | + if (sb->s_mtd) | ||
3604 | + sb->s_dev = MKDEV(MTD_BLOCK_MAJOR, sb->s_mtd->index); | ||
3605 | +#endif | ||
3606 | /* read the image superblock and check it */ | ||
3607 | rsb = kmalloc(512, GFP_KERNEL); | ||
3608 | if (!rsb) | ||
3609 | diff --git a/fs/userfaultfd.c b/fs/userfaultfd.c | ||
3610 | index 85959d8324df..b86054cc41db 100644 | ||
3611 | --- a/fs/userfaultfd.c | ||
3612 | +++ b/fs/userfaultfd.c | ||
3613 | @@ -63,6 +63,7 @@ struct userfaultfd_wait_queue { | ||
3614 | struct uffd_msg msg; | ||
3615 | wait_queue_t wq; | ||
3616 | struct userfaultfd_ctx *ctx; | ||
3617 | + bool waken; | ||
3618 | }; | ||
3619 | |||
3620 | struct userfaultfd_wake_range { | ||
3621 | @@ -86,6 +87,12 @@ static int userfaultfd_wake_function(wait_queue_t *wq, unsigned mode, | ||
3622 | if (len && (start > uwq->msg.arg.pagefault.address || | ||
3623 | start + len <= uwq->msg.arg.pagefault.address)) | ||
3624 | goto out; | ||
3625 | + WRITE_ONCE(uwq->waken, true); | ||
3626 | + /* | ||
3627 | + * The implicit smp_mb__before_spinlock in try_to_wake_up() | ||
3628 | + * renders uwq->waken visible to other CPUs before the task is | ||
3629 | + * waken. | ||
3630 | + */ | ||
3631 | ret = wake_up_state(wq->private, mode); | ||
3632 | if (ret) | ||
3633 | /* | ||
3634 | @@ -264,6 +271,7 @@ int handle_userfault(struct fault_env *fe, unsigned long reason) | ||
3635 | struct userfaultfd_wait_queue uwq; | ||
3636 | int ret; | ||
3637 | bool must_wait, return_to_userland; | ||
3638 | + long blocking_state; | ||
3639 | |||
3640 | BUG_ON(!rwsem_is_locked(&mm->mmap_sem)); | ||
3641 | |||
3642 | @@ -333,10 +341,13 @@ int handle_userfault(struct fault_env *fe, unsigned long reason) | ||
3643 | uwq.wq.private = current; | ||
3644 | uwq.msg = userfault_msg(fe->address, fe->flags, reason); | ||
3645 | uwq.ctx = ctx; | ||
3646 | + uwq.waken = false; | ||
3647 | |||
3648 | return_to_userland = | ||
3649 | (fe->flags & (FAULT_FLAG_USER|FAULT_FLAG_KILLABLE)) == | ||
3650 | (FAULT_FLAG_USER|FAULT_FLAG_KILLABLE); | ||
3651 | + blocking_state = return_to_userland ? TASK_INTERRUPTIBLE : | ||
3652 | + TASK_KILLABLE; | ||
3653 | |||
3654 | spin_lock(&ctx->fault_pending_wqh.lock); | ||
3655 | /* | ||
3656 | @@ -349,8 +360,7 @@ int handle_userfault(struct fault_env *fe, unsigned long reason) | ||
3657 | * following the spin_unlock to happen before the list_add in | ||
3658 | * __add_wait_queue. | ||
3659 | */ | ||
3660 | - set_current_state(return_to_userland ? TASK_INTERRUPTIBLE : | ||
3661 | - TASK_KILLABLE); | ||
3662 | + set_current_state(blocking_state); | ||
3663 | spin_unlock(&ctx->fault_pending_wqh.lock); | ||
3664 | |||
3665 | must_wait = userfaultfd_must_wait(ctx, fe->address, fe->flags, reason); | ||
3666 | @@ -362,6 +372,29 @@ int handle_userfault(struct fault_env *fe, unsigned long reason) | ||
3667 | wake_up_poll(&ctx->fd_wqh, POLLIN); | ||
3668 | schedule(); | ||
3669 | ret |= VM_FAULT_MAJOR; | ||
3670 | + | ||
3671 | + /* | ||
3672 | + * False wakeups can orginate even from rwsem before | ||
3673 | + * up_read() however userfaults will wait either for a | ||
3674 | + * targeted wakeup on the specific uwq waitqueue from | ||
3675 | + * wake_userfault() or for signals or for uffd | ||
3676 | + * release. | ||
3677 | + */ | ||
3678 | + while (!READ_ONCE(uwq.waken)) { | ||
3679 | + /* | ||
3680 | + * This needs the full smp_store_mb() | ||
3681 | + * guarantee as the state write must be | ||
3682 | + * visible to other CPUs before reading | ||
3683 | + * uwq.waken from other CPUs. | ||
3684 | + */ | ||
3685 | + set_current_state(blocking_state); | ||
3686 | + if (READ_ONCE(uwq.waken) || | ||
3687 | + READ_ONCE(ctx->released) || | ||
3688 | + (return_to_userland ? signal_pending(current) : | ||
3689 | + fatal_signal_pending(current))) | ||
3690 | + break; | ||
3691 | + schedule(); | ||
3692 | + } | ||
3693 | } | ||
3694 | |||
3695 | __set_current_state(TASK_RUNNING); | ||
3696 | diff --git a/include/drm/drmP.h b/include/drm/drmP.h | ||
3697 | index e9fb2e802feb..0c4f9c67c221 100644 | ||
3698 | --- a/include/drm/drmP.h | ||
3699 | +++ b/include/drm/drmP.h | ||
3700 | @@ -776,6 +776,7 @@ struct drm_device { | ||
3701 | struct drm_minor *control; /**< Control node */ | ||
3702 | struct drm_minor *primary; /**< Primary node */ | ||
3703 | struct drm_minor *render; /**< Render node */ | ||
3704 | + bool registered; | ||
3705 | |||
3706 | /* currently active master for this device. Protected by master_mutex */ | ||
3707 | struct drm_master *master; | ||
3708 | diff --git a/include/drm/drm_connector.h b/include/drm/drm_connector.h | ||
3709 | index ac9d7d8e0e43..d8bb8d151825 100644 | ||
3710 | --- a/include/drm/drm_connector.h | ||
3711 | +++ b/include/drm/drm_connector.h | ||
3712 | @@ -345,6 +345,8 @@ struct drm_connector_funcs { | ||
3713 | * core drm connector interfaces. Everything added from this callback | ||
3714 | * should be unregistered in the early_unregister callback. | ||
3715 | * | ||
3716 | + * This is called while holding drm_connector->mutex. | ||
3717 | + * | ||
3718 | * Returns: | ||
3719 | * | ||
3720 | * 0 on success, or a negative error code on failure. | ||
3721 | @@ -359,6 +361,8 @@ struct drm_connector_funcs { | ||
3722 | * late_register(). It is called from drm_connector_unregister(), | ||
3723 | * early in the driver unload sequence to disable userspace access | ||
3724 | * before data structures are torndown. | ||
3725 | + * | ||
3726 | + * This is called while holding drm_connector->mutex. | ||
3727 | */ | ||
3728 | void (*early_unregister)(struct drm_connector *connector); | ||
3729 | |||
3730 | @@ -511,7 +515,6 @@ struct drm_cmdline_mode { | ||
3731 | * @interlace_allowed: can this connector handle interlaced modes? | ||
3732 | * @doublescan_allowed: can this connector handle doublescan? | ||
3733 | * @stereo_allowed: can this connector handle stereo modes? | ||
3734 | - * @registered: is this connector exposed (registered) with userspace? | ||
3735 | * @modes: modes available on this connector (from fill_modes() + user) | ||
3736 | * @status: one of the drm_connector_status enums (connected, not, or unknown) | ||
3737 | * @probed_modes: list of modes derived directly from the display | ||
3738 | @@ -560,6 +563,13 @@ struct drm_connector { | ||
3739 | char *name; | ||
3740 | |||
3741 | /** | ||
3742 | + * @mutex: Lock for general connector state, but currently only protects | ||
3743 | + * @registered. Most of the connector state is still protected by the | ||
3744 | + * mutex in &drm_mode_config. | ||
3745 | + */ | ||
3746 | + struct mutex mutex; | ||
3747 | + | ||
3748 | + /** | ||
3749 | * @index: Compacted connector index, which matches the position inside | ||
3750 | * the mode_config.list for drivers not supporting hot-add/removing. Can | ||
3751 | * be used as an array index. It is invariant over the lifetime of the | ||
3752 | @@ -572,6 +582,10 @@ struct drm_connector { | ||
3753 | bool interlace_allowed; | ||
3754 | bool doublescan_allowed; | ||
3755 | bool stereo_allowed; | ||
3756 | + /** | ||
3757 | + * @registered: Is this connector exposed (registered) with userspace? | ||
3758 | + * Protected by @mutex. | ||
3759 | + */ | ||
3760 | bool registered; | ||
3761 | struct list_head modes; /* list of modes on this connector */ | ||
3762 | |||
3763 | diff --git a/include/linux/fscache-cache.h b/include/linux/fscache-cache.h | ||
3764 | index 13ba552e6c09..4c467ef50159 100644 | ||
3765 | --- a/include/linux/fscache-cache.h | ||
3766 | +++ b/include/linux/fscache-cache.h | ||
3767 | @@ -360,6 +360,7 @@ struct fscache_object { | ||
3768 | #define FSCACHE_OBJECT_IS_AVAILABLE 5 /* T if object has become active */ | ||
3769 | #define FSCACHE_OBJECT_RETIRED 6 /* T if object was retired on relinquishment */ | ||
3770 | #define FSCACHE_OBJECT_KILLED_BY_CACHE 7 /* T if object was killed by the cache */ | ||
3771 | +#define FSCACHE_OBJECT_RUN_AFTER_DEAD 8 /* T if object has been dispatched after death */ | ||
3772 | |||
3773 | struct list_head cache_link; /* link in cache->object_list */ | ||
3774 | struct hlist_node cookie_link; /* link in cookie->backing_objects */ | ||
3775 | diff --git a/include/linux/log2.h b/include/linux/log2.h | ||
3776 | index f38fae23bdac..c373295f359f 100644 | ||
3777 | --- a/include/linux/log2.h | ||
3778 | +++ b/include/linux/log2.h | ||
3779 | @@ -194,6 +194,17 @@ unsigned long __rounddown_pow_of_two(unsigned long n) | ||
3780 | * ... and so on. | ||
3781 | */ | ||
3782 | |||
3783 | -#define order_base_2(n) ilog2(roundup_pow_of_two(n)) | ||
3784 | +static inline __attribute_const__ | ||
3785 | +int __order_base_2(unsigned long n) | ||
3786 | +{ | ||
3787 | + return n > 1 ? ilog2(n - 1) + 1 : 0; | ||
3788 | +} | ||
3789 | |||
3790 | +#define order_base_2(n) \ | ||
3791 | +( \ | ||
3792 | + __builtin_constant_p(n) ? ( \ | ||
3793 | + ((n) == 0 || (n) == 1) ? 0 : \ | ||
3794 | + ilog2((n) - 1) + 1) : \ | ||
3795 | + __order_base_2(n) \ | ||
3796 | +) | ||
3797 | #endif /* _LINUX_LOG2_H */ | ||
3798 | diff --git a/include/linux/micrel_phy.h b/include/linux/micrel_phy.h | ||
3799 | index 257173e0095e..f541da68d1e7 100644 | ||
3800 | --- a/include/linux/micrel_phy.h | ||
3801 | +++ b/include/linux/micrel_phy.h | ||
3802 | @@ -35,6 +35,8 @@ | ||
3803 | #define PHY_ID_KSZ886X 0x00221430 | ||
3804 | #define PHY_ID_KSZ8863 0x00221435 | ||
3805 | |||
3806 | +#define PHY_ID_KSZ8795 0x00221550 | ||
3807 | + | ||
3808 | /* struct phy_device dev_flags definitions */ | ||
3809 | #define MICREL_PHY_50MHZ_CLK 0x00000001 | ||
3810 | #define MICREL_PHY_FXEN 0x00000002 | ||
3811 | diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h | ||
3812 | index bb9b102c15cd..780e7171f548 100644 | ||
3813 | --- a/include/linux/netdevice.h | ||
3814 | +++ b/include/linux/netdevice.h | ||
3815 | @@ -865,11 +865,15 @@ struct netdev_xdp { | ||
3816 | * of useless work if you return NETDEV_TX_BUSY. | ||
3817 | * Required; cannot be NULL. | ||
3818 | * | ||
3819 | - * netdev_features_t (*ndo_fix_features)(struct net_device *dev, | ||
3820 | - * netdev_features_t features); | ||
3821 | - * Adjusts the requested feature flags according to device-specific | ||
3822 | - * constraints, and returns the resulting flags. Must not modify | ||
3823 | - * the device state. | ||
3824 | + * netdev_features_t (*ndo_features_check)(struct sk_buff *skb, | ||
3825 | + * struct net_device *dev | ||
3826 | + * netdev_features_t features); | ||
3827 | + * Called by core transmit path to determine if device is capable of | ||
3828 | + * performing offload operations on a given packet. This is to give | ||
3829 | + * the device an opportunity to implement any restrictions that cannot | ||
3830 | + * be otherwise expressed by feature flags. The check is called with | ||
3831 | + * the set of features that the stack has calculated and it returns | ||
3832 | + * those the driver believes to be appropriate. | ||
3833 | * | ||
3834 | * u16 (*ndo_select_queue)(struct net_device *dev, struct sk_buff *skb, | ||
3835 | * void *accel_priv, select_queue_fallback_t fallback); | ||
3836 | @@ -1027,6 +1031,12 @@ struct netdev_xdp { | ||
3837 | * Called to release previously enslaved netdev. | ||
3838 | * | ||
3839 | * Feature/offload setting functions. | ||
3840 | + * netdev_features_t (*ndo_fix_features)(struct net_device *dev, | ||
3841 | + * netdev_features_t features); | ||
3842 | + * Adjusts the requested feature flags according to device-specific | ||
3843 | + * constraints, and returns the resulting flags. Must not modify | ||
3844 | + * the device state. | ||
3845 | + * | ||
3846 | * int (*ndo_set_features)(struct net_device *dev, netdev_features_t features); | ||
3847 | * Called to update device configuration to new features. Passed | ||
3848 | * feature set might be less than what was returned by ndo_fix_features()). | ||
3849 | @@ -1099,15 +1109,6 @@ struct netdev_xdp { | ||
3850 | * Callback to use for xmit over the accelerated station. This | ||
3851 | * is used in place of ndo_start_xmit on accelerated net | ||
3852 | * devices. | ||
3853 | - * netdev_features_t (*ndo_features_check)(struct sk_buff *skb, | ||
3854 | - * struct net_device *dev | ||
3855 | - * netdev_features_t features); | ||
3856 | - * Called by core transmit path to determine if device is capable of | ||
3857 | - * performing offload operations on a given packet. This is to give | ||
3858 | - * the device an opportunity to implement any restrictions that cannot | ||
3859 | - * be otherwise expressed by feature flags. The check is called with | ||
3860 | - * the set of features that the stack has calculated and it returns | ||
3861 | - * those the driver believes to be appropriate. | ||
3862 | * int (*ndo_set_tx_maxrate)(struct net_device *dev, | ||
3863 | * int queue_index, u32 maxrate); | ||
3864 | * Called when a user wants to set a max-rate limitation of specific | ||
3865 | diff --git a/include/linux/nmi.h b/include/linux/nmi.h | ||
3866 | index a78c35cff1ae..0a3fadc32693 100644 | ||
3867 | --- a/include/linux/nmi.h | ||
3868 | +++ b/include/linux/nmi.h | ||
3869 | @@ -7,6 +7,23 @@ | ||
3870 | #include <linux/sched.h> | ||
3871 | #include <asm/irq.h> | ||
3872 | |||
3873 | +/* | ||
3874 | + * The run state of the lockup detectors is controlled by the content of the | ||
3875 | + * 'watchdog_enabled' variable. Each lockup detector has its dedicated bit - | ||
3876 | + * bit 0 for the hard lockup detector and bit 1 for the soft lockup detector. | ||
3877 | + * | ||
3878 | + * 'watchdog_user_enabled', 'nmi_watchdog_enabled' and 'soft_watchdog_enabled' | ||
3879 | + * are variables that are only used as an 'interface' between the parameters | ||
3880 | + * in /proc/sys/kernel and the internal state bits in 'watchdog_enabled'. The | ||
3881 | + * 'watchdog_thresh' variable is handled differently because its value is not | ||
3882 | + * boolean, and the lockup detectors are 'suspended' while 'watchdog_thresh' | ||
3883 | + * is equal zero. | ||
3884 | + */ | ||
3885 | +#define NMI_WATCHDOG_ENABLED_BIT 0 | ||
3886 | +#define SOFT_WATCHDOG_ENABLED_BIT 1 | ||
3887 | +#define NMI_WATCHDOG_ENABLED (1 << NMI_WATCHDOG_ENABLED_BIT) | ||
3888 | +#define SOFT_WATCHDOG_ENABLED (1 << SOFT_WATCHDOG_ENABLED_BIT) | ||
3889 | + | ||
3890 | /** | ||
3891 | * touch_nmi_watchdog - restart NMI watchdog timeout. | ||
3892 | * | ||
3893 | @@ -91,9 +108,17 @@ extern int nmi_watchdog_enabled; | ||
3894 | extern int soft_watchdog_enabled; | ||
3895 | extern int watchdog_user_enabled; | ||
3896 | extern int watchdog_thresh; | ||
3897 | +extern unsigned long watchdog_enabled; | ||
3898 | extern unsigned long *watchdog_cpumask_bits; | ||
3899 | +extern atomic_t watchdog_park_in_progress; | ||
3900 | +#ifdef CONFIG_SMP | ||
3901 | extern int sysctl_softlockup_all_cpu_backtrace; | ||
3902 | extern int sysctl_hardlockup_all_cpu_backtrace; | ||
3903 | +#else | ||
3904 | +#define sysctl_softlockup_all_cpu_backtrace 0 | ||
3905 | +#define sysctl_hardlockup_all_cpu_backtrace 0 | ||
3906 | +#endif | ||
3907 | +extern bool is_hardlockup(void); | ||
3908 | struct ctl_table; | ||
3909 | extern int proc_watchdog(struct ctl_table *, int , | ||
3910 | void __user *, size_t *, loff_t *); | ||
3911 | diff --git a/include/linux/pci.h b/include/linux/pci.h | ||
3912 | index a38772a85588..1b711796d989 100644 | ||
3913 | --- a/include/linux/pci.h | ||
3914 | +++ b/include/linux/pci.h | ||
3915 | @@ -178,6 +178,11 @@ enum pci_dev_flags { | ||
3916 | PCI_DEV_FLAGS_NO_PM_RESET = (__force pci_dev_flags_t) (1 << 7), | ||
3917 | /* Get VPD from function 0 VPD */ | ||
3918 | PCI_DEV_FLAGS_VPD_REF_F0 = (__force pci_dev_flags_t) (1 << 8), | ||
3919 | + /* | ||
3920 | + * Resume before calling the driver's system suspend hooks, disabling | ||
3921 | + * the direct_complete optimization. | ||
3922 | + */ | ||
3923 | + PCI_DEV_FLAGS_NEEDS_RESUME = (__force pci_dev_flags_t) (1 << 11), | ||
3924 | }; | ||
3925 | |||
3926 | enum pci_irq_reroute_variant { | ||
3927 | diff --git a/include/net/ipv6.h b/include/net/ipv6.h | ||
3928 | index 91afb4aadaa6..615ce0abba9c 100644 | ||
3929 | --- a/include/net/ipv6.h | ||
3930 | +++ b/include/net/ipv6.h | ||
3931 | @@ -776,6 +776,11 @@ static inline __be32 ip6_make_flowlabel(struct net *net, struct sk_buff *skb, | ||
3932 | { | ||
3933 | u32 hash; | ||
3934 | |||
3935 | + /* @flowlabel may include more than a flow label, eg, the traffic class. | ||
3936 | + * Here we want only the flow label value. | ||
3937 | + */ | ||
3938 | + flowlabel &= IPV6_FLOWLABEL_MASK; | ||
3939 | + | ||
3940 | if (flowlabel || | ||
3941 | net->ipv6.sysctl.auto_flowlabels == IP6_AUTO_FLOW_LABEL_OFF || | ||
3942 | (!autolabel && | ||
3943 | diff --git a/include/uapi/linux/netfilter/nf_log.h b/include/uapi/linux/netfilter/nf_log.h | ||
3944 | index 8be21e02387d..d0b5fa91ff54 100644 | ||
3945 | --- a/include/uapi/linux/netfilter/nf_log.h | ||
3946 | +++ b/include/uapi/linux/netfilter/nf_log.h | ||
3947 | @@ -9,4 +9,6 @@ | ||
3948 | #define NF_LOG_MACDECODE 0x20 /* Decode MAC header */ | ||
3949 | #define NF_LOG_MASK 0x2f | ||
3950 | |||
3951 | +#define NF_LOG_PREFIXLEN 128 | ||
3952 | + | ||
3953 | #endif /* _NETFILTER_NF_LOG_H */ | ||
3954 | diff --git a/kernel/Makefile b/kernel/Makefile | ||
3955 | index eb26e12c6c2a..314e7d62f5f0 100644 | ||
3956 | --- a/kernel/Makefile | ||
3957 | +++ b/kernel/Makefile | ||
3958 | @@ -84,6 +84,7 @@ obj-$(CONFIG_KPROBES) += kprobes.o | ||
3959 | obj-$(CONFIG_KGDB) += debug/ | ||
3960 | obj-$(CONFIG_DETECT_HUNG_TASK) += hung_task.o | ||
3961 | obj-$(CONFIG_LOCKUP_DETECTOR) += watchdog.o | ||
3962 | +obj-$(CONFIG_HARDLOCKUP_DETECTOR) += watchdog_hld.o | ||
3963 | obj-$(CONFIG_SECCOMP) += seccomp.o | ||
3964 | obj-$(CONFIG_RELAY) += relay.o | ||
3965 | obj-$(CONFIG_SYSCTL) += utsname_sysctl.o | ||
3966 | diff --git a/kernel/ucount.c b/kernel/ucount.c | ||
3967 | index f4ac18509ecf..c761cdba2a2d 100644 | ||
3968 | --- a/kernel/ucount.c | ||
3969 | +++ b/kernel/ucount.c | ||
3970 | @@ -231,11 +231,10 @@ static __init int user_namespace_sysctl_init(void) | ||
3971 | * properly. | ||
3972 | */ | ||
3973 | user_header = register_sysctl("user", empty); | ||
3974 | + kmemleak_ignore(user_header); | ||
3975 | BUG_ON(!user_header); | ||
3976 | BUG_ON(!setup_userns_sysctls(&init_user_ns)); | ||
3977 | #endif | ||
3978 | return 0; | ||
3979 | } | ||
3980 | subsys_initcall(user_namespace_sysctl_init); | ||
3981 | - | ||
3982 | - | ||
3983 | diff --git a/kernel/watchdog.c b/kernel/watchdog.c | ||
3984 | index 6d1020c03d41..63177be0159e 100644 | ||
3985 | --- a/kernel/watchdog.c | ||
3986 | +++ b/kernel/watchdog.c | ||
3987 | @@ -24,32 +24,14 @@ | ||
3988 | |||
3989 | #include <asm/irq_regs.h> | ||
3990 | #include <linux/kvm_para.h> | ||
3991 | -#include <linux/perf_event.h> | ||
3992 | #include <linux/kthread.h> | ||
3993 | |||
3994 | -/* | ||
3995 | - * The run state of the lockup detectors is controlled by the content of the | ||
3996 | - * 'watchdog_enabled' variable. Each lockup detector has its dedicated bit - | ||
3997 | - * bit 0 for the hard lockup detector and bit 1 for the soft lockup detector. | ||
3998 | - * | ||
3999 | - * 'watchdog_user_enabled', 'nmi_watchdog_enabled' and 'soft_watchdog_enabled' | ||
4000 | - * are variables that are only used as an 'interface' between the parameters | ||
4001 | - * in /proc/sys/kernel and the internal state bits in 'watchdog_enabled'. The | ||
4002 | - * 'watchdog_thresh' variable is handled differently because its value is not | ||
4003 | - * boolean, and the lockup detectors are 'suspended' while 'watchdog_thresh' | ||
4004 | - * is equal zero. | ||
4005 | - */ | ||
4006 | -#define NMI_WATCHDOG_ENABLED_BIT 0 | ||
4007 | -#define SOFT_WATCHDOG_ENABLED_BIT 1 | ||
4008 | -#define NMI_WATCHDOG_ENABLED (1 << NMI_WATCHDOG_ENABLED_BIT) | ||
4009 | -#define SOFT_WATCHDOG_ENABLED (1 << SOFT_WATCHDOG_ENABLED_BIT) | ||
4010 | - | ||
4011 | static DEFINE_MUTEX(watchdog_proc_mutex); | ||
4012 | |||
4013 | -#ifdef CONFIG_HARDLOCKUP_DETECTOR | ||
4014 | -static unsigned long __read_mostly watchdog_enabled = SOFT_WATCHDOG_ENABLED|NMI_WATCHDOG_ENABLED; | ||
4015 | +#if defined(CONFIG_HAVE_NMI_WATCHDOG) || defined(CONFIG_HARDLOCKUP_DETECTOR) | ||
4016 | +unsigned long __read_mostly watchdog_enabled = SOFT_WATCHDOG_ENABLED|NMI_WATCHDOG_ENABLED; | ||
4017 | #else | ||
4018 | -static unsigned long __read_mostly watchdog_enabled = SOFT_WATCHDOG_ENABLED; | ||
4019 | +unsigned long __read_mostly watchdog_enabled = SOFT_WATCHDOG_ENABLED; | ||
4020 | #endif | ||
4021 | int __read_mostly nmi_watchdog_enabled; | ||
4022 | int __read_mostly soft_watchdog_enabled; | ||
4023 | @@ -59,9 +41,6 @@ int __read_mostly watchdog_thresh = 10; | ||
4024 | #ifdef CONFIG_SMP | ||
4025 | int __read_mostly sysctl_softlockup_all_cpu_backtrace; | ||
4026 | int __read_mostly sysctl_hardlockup_all_cpu_backtrace; | ||
4027 | -#else | ||
4028 | -#define sysctl_softlockup_all_cpu_backtrace 0 | ||
4029 | -#define sysctl_hardlockup_all_cpu_backtrace 0 | ||
4030 | #endif | ||
4031 | static struct cpumask watchdog_cpumask __read_mostly; | ||
4032 | unsigned long *watchdog_cpumask_bits = cpumask_bits(&watchdog_cpumask); | ||
4033 | @@ -70,6 +49,8 @@ unsigned long *watchdog_cpumask_bits = cpumask_bits(&watchdog_cpumask); | ||
4034 | #define for_each_watchdog_cpu(cpu) \ | ||
4035 | for_each_cpu_and((cpu), cpu_online_mask, &watchdog_cpumask) | ||
4036 | |||
4037 | +atomic_t watchdog_park_in_progress = ATOMIC_INIT(0); | ||
4038 | + | ||
4039 | /* | ||
4040 | * The 'watchdog_running' variable is set to 1 when the watchdog threads | ||
4041 | * are registered/started and is set to 0 when the watchdog threads are | ||
4042 | @@ -100,50 +81,9 @@ static DEFINE_PER_CPU(bool, soft_watchdog_warn); | ||
4043 | static DEFINE_PER_CPU(unsigned long, hrtimer_interrupts); | ||
4044 | static DEFINE_PER_CPU(unsigned long, soft_lockup_hrtimer_cnt); | ||
4045 | static DEFINE_PER_CPU(struct task_struct *, softlockup_task_ptr_saved); | ||
4046 | -#ifdef CONFIG_HARDLOCKUP_DETECTOR | ||
4047 | -static DEFINE_PER_CPU(bool, hard_watchdog_warn); | ||
4048 | -static DEFINE_PER_CPU(bool, watchdog_nmi_touch); | ||
4049 | static DEFINE_PER_CPU(unsigned long, hrtimer_interrupts_saved); | ||
4050 | -static DEFINE_PER_CPU(struct perf_event *, watchdog_ev); | ||
4051 | -#endif | ||
4052 | static unsigned long soft_lockup_nmi_warn; | ||
4053 | |||
4054 | -/* boot commands */ | ||
4055 | -/* | ||
4056 | - * Should we panic when a soft-lockup or hard-lockup occurs: | ||
4057 | - */ | ||
4058 | -#ifdef CONFIG_HARDLOCKUP_DETECTOR | ||
4059 | -unsigned int __read_mostly hardlockup_panic = | ||
4060 | - CONFIG_BOOTPARAM_HARDLOCKUP_PANIC_VALUE; | ||
4061 | -static unsigned long hardlockup_allcpu_dumped; | ||
4062 | -/* | ||
4063 | - * We may not want to enable hard lockup detection by default in all cases, | ||
4064 | - * for example when running the kernel as a guest on a hypervisor. In these | ||
4065 | - * cases this function can be called to disable hard lockup detection. This | ||
4066 | - * function should only be executed once by the boot processor before the | ||
4067 | - * kernel command line parameters are parsed, because otherwise it is not | ||
4068 | - * possible to override this in hardlockup_panic_setup(). | ||
4069 | - */ | ||
4070 | -void hardlockup_detector_disable(void) | ||
4071 | -{ | ||
4072 | - watchdog_enabled &= ~NMI_WATCHDOG_ENABLED; | ||
4073 | -} | ||
4074 | - | ||
4075 | -static int __init hardlockup_panic_setup(char *str) | ||
4076 | -{ | ||
4077 | - if (!strncmp(str, "panic", 5)) | ||
4078 | - hardlockup_panic = 1; | ||
4079 | - else if (!strncmp(str, "nopanic", 7)) | ||
4080 | - hardlockup_panic = 0; | ||
4081 | - else if (!strncmp(str, "0", 1)) | ||
4082 | - watchdog_enabled &= ~NMI_WATCHDOG_ENABLED; | ||
4083 | - else if (!strncmp(str, "1", 1)) | ||
4084 | - watchdog_enabled |= NMI_WATCHDOG_ENABLED; | ||
4085 | - return 1; | ||
4086 | -} | ||
4087 | -__setup("nmi_watchdog=", hardlockup_panic_setup); | ||
4088 | -#endif | ||
4089 | - | ||
4090 | unsigned int __read_mostly softlockup_panic = | ||
4091 | CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC_VALUE; | ||
4092 | |||
4093 | @@ -264,32 +204,14 @@ void touch_all_softlockup_watchdogs(void) | ||
4094 | wq_watchdog_touch(-1); | ||
4095 | } | ||
4096 | |||
4097 | -#ifdef CONFIG_HARDLOCKUP_DETECTOR | ||
4098 | -void touch_nmi_watchdog(void) | ||
4099 | -{ | ||
4100 | - /* | ||
4101 | - * Using __raw here because some code paths have | ||
4102 | - * preemption enabled. If preemption is enabled | ||
4103 | - * then interrupts should be enabled too, in which | ||
4104 | - * case we shouldn't have to worry about the watchdog | ||
4105 | - * going off. | ||
4106 | - */ | ||
4107 | - raw_cpu_write(watchdog_nmi_touch, true); | ||
4108 | - touch_softlockup_watchdog(); | ||
4109 | -} | ||
4110 | -EXPORT_SYMBOL(touch_nmi_watchdog); | ||
4111 | - | ||
4112 | -#endif | ||
4113 | - | ||
4114 | void touch_softlockup_watchdog_sync(void) | ||
4115 | { | ||
4116 | __this_cpu_write(softlockup_touch_sync, true); | ||
4117 | __this_cpu_write(watchdog_touch_ts, 0); | ||
4118 | } | ||
4119 | |||
4120 | -#ifdef CONFIG_HARDLOCKUP_DETECTOR | ||
4121 | /* watchdog detector functions */ | ||
4122 | -static bool is_hardlockup(void) | ||
4123 | +bool is_hardlockup(void) | ||
4124 | { | ||
4125 | unsigned long hrint = __this_cpu_read(hrtimer_interrupts); | ||
4126 | |||
4127 | @@ -299,7 +221,6 @@ static bool is_hardlockup(void) | ||
4128 | __this_cpu_write(hrtimer_interrupts_saved, hrint); | ||
4129 | return false; | ||
4130 | } | ||
4131 | -#endif | ||
4132 | |||
4133 | static int is_softlockup(unsigned long touch_ts) | ||
4134 | { | ||
4135 | @@ -313,77 +234,22 @@ static int is_softlockup(unsigned long touch_ts) | ||
4136 | return 0; | ||
4137 | } | ||
4138 | |||
4139 | -#ifdef CONFIG_HARDLOCKUP_DETECTOR | ||
4140 | - | ||
4141 | -static struct perf_event_attr wd_hw_attr = { | ||
4142 | - .type = PERF_TYPE_HARDWARE, | ||
4143 | - .config = PERF_COUNT_HW_CPU_CYCLES, | ||
4144 | - .size = sizeof(struct perf_event_attr), | ||
4145 | - .pinned = 1, | ||
4146 | - .disabled = 1, | ||
4147 | -}; | ||
4148 | - | ||
4149 | -/* Callback function for perf event subsystem */ | ||
4150 | -static void watchdog_overflow_callback(struct perf_event *event, | ||
4151 | - struct perf_sample_data *data, | ||
4152 | - struct pt_regs *regs) | ||
4153 | -{ | ||
4154 | - /* Ensure the watchdog never gets throttled */ | ||
4155 | - event->hw.interrupts = 0; | ||
4156 | - | ||
4157 | - if (__this_cpu_read(watchdog_nmi_touch) == true) { | ||
4158 | - __this_cpu_write(watchdog_nmi_touch, false); | ||
4159 | - return; | ||
4160 | - } | ||
4161 | - | ||
4162 | - /* check for a hardlockup | ||
4163 | - * This is done by making sure our timer interrupt | ||
4164 | - * is incrementing. The timer interrupt should have | ||
4165 | - * fired multiple times before we overflow'd. If it hasn't | ||
4166 | - * then this is a good indication the cpu is stuck | ||
4167 | - */ | ||
4168 | - if (is_hardlockup()) { | ||
4169 | - int this_cpu = smp_processor_id(); | ||
4170 | - | ||
4171 | - /* only print hardlockups once */ | ||
4172 | - if (__this_cpu_read(hard_watchdog_warn) == true) | ||
4173 | - return; | ||
4174 | - | ||
4175 | - pr_emerg("Watchdog detected hard LOCKUP on cpu %d", this_cpu); | ||
4176 | - print_modules(); | ||
4177 | - print_irqtrace_events(current); | ||
4178 | - if (regs) | ||
4179 | - show_regs(regs); | ||
4180 | - else | ||
4181 | - dump_stack(); | ||
4182 | - | ||
4183 | - /* | ||
4184 | - * Perform all-CPU dump only once to avoid multiple hardlockups | ||
4185 | - * generating interleaving traces | ||
4186 | - */ | ||
4187 | - if (sysctl_hardlockup_all_cpu_backtrace && | ||
4188 | - !test_and_set_bit(0, &hardlockup_allcpu_dumped)) | ||
4189 | - trigger_allbutself_cpu_backtrace(); | ||
4190 | - | ||
4191 | - if (hardlockup_panic) | ||
4192 | - nmi_panic(regs, "Hard LOCKUP"); | ||
4193 | - | ||
4194 | - __this_cpu_write(hard_watchdog_warn, true); | ||
4195 | - return; | ||
4196 | - } | ||
4197 | - | ||
4198 | - __this_cpu_write(hard_watchdog_warn, false); | ||
4199 | - return; | ||
4200 | -} | ||
4201 | -#endif /* CONFIG_HARDLOCKUP_DETECTOR */ | ||
4202 | - | ||
4203 | static void watchdog_interrupt_count(void) | ||
4204 | { | ||
4205 | __this_cpu_inc(hrtimer_interrupts); | ||
4206 | } | ||
4207 | |||
4208 | -static int watchdog_nmi_enable(unsigned int cpu); | ||
4209 | -static void watchdog_nmi_disable(unsigned int cpu); | ||
4210 | +/* | ||
4211 | + * These two functions are mostly architecture specific | ||
4212 | + * defining them as weak here. | ||
4213 | + */ | ||
4214 | +int __weak watchdog_nmi_enable(unsigned int cpu) | ||
4215 | +{ | ||
4216 | + return 0; | ||
4217 | +} | ||
4218 | +void __weak watchdog_nmi_disable(unsigned int cpu) | ||
4219 | +{ | ||
4220 | +} | ||
4221 | |||
4222 | static int watchdog_enable_all_cpus(void); | ||
4223 | static void watchdog_disable_all_cpus(void); | ||
4224 | @@ -396,6 +262,9 @@ static enum hrtimer_restart watchdog_timer_fn(struct hrtimer *hrtimer) | ||
4225 | int duration; | ||
4226 | int softlockup_all_cpu_backtrace = sysctl_softlockup_all_cpu_backtrace; | ||
4227 | |||
4228 | + if (atomic_read(&watchdog_park_in_progress) != 0) | ||
4229 | + return HRTIMER_NORESTART; | ||
4230 | + | ||
4231 | /* kick the hardlockup detector */ | ||
4232 | watchdog_interrupt_count(); | ||
4233 | |||
4234 | @@ -576,109 +445,6 @@ static void watchdog(unsigned int cpu) | ||
4235 | watchdog_nmi_disable(cpu); | ||
4236 | } | ||
4237 | |||
4238 | -#ifdef CONFIG_HARDLOCKUP_DETECTOR | ||
4239 | -/* | ||
4240 | - * People like the simple clean cpu node info on boot. | ||
4241 | - * Reduce the watchdog noise by only printing messages | ||
4242 | - * that are different from what cpu0 displayed. | ||
4243 | - */ | ||
4244 | -static unsigned long cpu0_err; | ||
4245 | - | ||
4246 | -static int watchdog_nmi_enable(unsigned int cpu) | ||
4247 | -{ | ||
4248 | - struct perf_event_attr *wd_attr; | ||
4249 | - struct perf_event *event = per_cpu(watchdog_ev, cpu); | ||
4250 | - | ||
4251 | - /* nothing to do if the hard lockup detector is disabled */ | ||
4252 | - if (!(watchdog_enabled & NMI_WATCHDOG_ENABLED)) | ||
4253 | - goto out; | ||
4254 | - | ||
4255 | - /* is it already setup and enabled? */ | ||
4256 | - if (event && event->state > PERF_EVENT_STATE_OFF) | ||
4257 | - goto out; | ||
4258 | - | ||
4259 | - /* it is setup but not enabled */ | ||
4260 | - if (event != NULL) | ||
4261 | - goto out_enable; | ||
4262 | - | ||
4263 | - wd_attr = &wd_hw_attr; | ||
4264 | - wd_attr->sample_period = hw_nmi_get_sample_period(watchdog_thresh); | ||
4265 | - | ||
4266 | - /* Try to register using hardware perf events */ | ||
4267 | - event = perf_event_create_kernel_counter(wd_attr, cpu, NULL, watchdog_overflow_callback, NULL); | ||
4268 | - | ||
4269 | - /* save cpu0 error for future comparision */ | ||
4270 | - if (cpu == 0 && IS_ERR(event)) | ||
4271 | - cpu0_err = PTR_ERR(event); | ||
4272 | - | ||
4273 | - if (!IS_ERR(event)) { | ||
4274 | - /* only print for cpu0 or different than cpu0 */ | ||
4275 | - if (cpu == 0 || cpu0_err) | ||
4276 | - pr_info("enabled on all CPUs, permanently consumes one hw-PMU counter.\n"); | ||
4277 | - goto out_save; | ||
4278 | - } | ||
4279 | - | ||
4280 | - /* | ||
4281 | - * Disable the hard lockup detector if _any_ CPU fails to set up | ||
4282 | - * set up the hardware perf event. The watchdog() function checks | ||
4283 | - * the NMI_WATCHDOG_ENABLED bit periodically. | ||
4284 | - * | ||
4285 | - * The barriers are for syncing up watchdog_enabled across all the | ||
4286 | - * cpus, as clear_bit() does not use barriers. | ||
4287 | - */ | ||
4288 | - smp_mb__before_atomic(); | ||
4289 | - clear_bit(NMI_WATCHDOG_ENABLED_BIT, &watchdog_enabled); | ||
4290 | - smp_mb__after_atomic(); | ||
4291 | - | ||
4292 | - /* skip displaying the same error again */ | ||
4293 | - if (cpu > 0 && (PTR_ERR(event) == cpu0_err)) | ||
4294 | - return PTR_ERR(event); | ||
4295 | - | ||
4296 | - /* vary the KERN level based on the returned errno */ | ||
4297 | - if (PTR_ERR(event) == -EOPNOTSUPP) | ||
4298 | - pr_info("disabled (cpu%i): not supported (no LAPIC?)\n", cpu); | ||
4299 | - else if (PTR_ERR(event) == -ENOENT) | ||
4300 | - pr_warn("disabled (cpu%i): hardware events not enabled\n", | ||
4301 | - cpu); | ||
4302 | - else | ||
4303 | - pr_err("disabled (cpu%i): unable to create perf event: %ld\n", | ||
4304 | - cpu, PTR_ERR(event)); | ||
4305 | - | ||
4306 | - pr_info("Shutting down hard lockup detector on all cpus\n"); | ||
4307 | - | ||
4308 | - return PTR_ERR(event); | ||
4309 | - | ||
4310 | - /* success path */ | ||
4311 | -out_save: | ||
4312 | - per_cpu(watchdog_ev, cpu) = event; | ||
4313 | -out_enable: | ||
4314 | - perf_event_enable(per_cpu(watchdog_ev, cpu)); | ||
4315 | -out: | ||
4316 | - return 0; | ||
4317 | -} | ||
4318 | - | ||
4319 | -static void watchdog_nmi_disable(unsigned int cpu) | ||
4320 | -{ | ||
4321 | - struct perf_event *event = per_cpu(watchdog_ev, cpu); | ||
4322 | - | ||
4323 | - if (event) { | ||
4324 | - perf_event_disable(event); | ||
4325 | - per_cpu(watchdog_ev, cpu) = NULL; | ||
4326 | - | ||
4327 | - /* should be in cleanup, but blocks oprofile */ | ||
4328 | - perf_event_release_kernel(event); | ||
4329 | - } | ||
4330 | - if (cpu == 0) { | ||
4331 | - /* watchdog_nmi_enable() expects this to be zero initially. */ | ||
4332 | - cpu0_err = 0; | ||
4333 | - } | ||
4334 | -} | ||
4335 | - | ||
4336 | -#else | ||
4337 | -static int watchdog_nmi_enable(unsigned int cpu) { return 0; } | ||
4338 | -static void watchdog_nmi_disable(unsigned int cpu) { return; } | ||
4339 | -#endif /* CONFIG_HARDLOCKUP_DETECTOR */ | ||
4340 | - | ||
4341 | static struct smp_hotplug_thread watchdog_threads = { | ||
4342 | .store = &softlockup_watchdog, | ||
4343 | .thread_should_run = watchdog_should_run, | ||
4344 | @@ -706,12 +472,16 @@ static int watchdog_park_threads(void) | ||
4345 | { | ||
4346 | int cpu, ret = 0; | ||
4347 | |||
4348 | + atomic_set(&watchdog_park_in_progress, 1); | ||
4349 | + | ||
4350 | for_each_watchdog_cpu(cpu) { | ||
4351 | ret = kthread_park(per_cpu(softlockup_watchdog, cpu)); | ||
4352 | if (ret) | ||
4353 | break; | ||
4354 | } | ||
4355 | |||
4356 | + atomic_set(&watchdog_park_in_progress, 0); | ||
4357 | + | ||
4358 | return ret; | ||
4359 | } | ||
4360 | |||
4361 | diff --git a/kernel/watchdog_hld.c b/kernel/watchdog_hld.c | ||
4362 | new file mode 100644 | ||
4363 | index 000000000000..12b8dd640786 | ||
4364 | --- /dev/null | ||
4365 | +++ b/kernel/watchdog_hld.c | ||
4366 | @@ -0,0 +1,230 @@ | ||
4367 | +/* | ||
4368 | + * Detect hard lockups on a system | ||
4369 | + * | ||
4370 | + * started by Don Zickus, Copyright (C) 2010 Red Hat, Inc. | ||
4371 | + * | ||
4372 | + * Note: Most of this code is borrowed heavily from the original softlockup | ||
4373 | + * detector, so thanks to Ingo for the initial implementation. | ||
4374 | + * Some chunks also taken from the old x86-specific nmi watchdog code, thanks | ||
4375 | + * to those contributors as well. | ||
4376 | + */ | ||
4377 | + | ||
4378 | +#define pr_fmt(fmt) "NMI watchdog: " fmt | ||
4379 | + | ||
4380 | +#include <linux/nmi.h> | ||
4381 | +#include <linux/module.h> | ||
4382 | +#include <asm/irq_regs.h> | ||
4383 | +#include <linux/perf_event.h> | ||
4384 | + | ||
4385 | +static DEFINE_PER_CPU(bool, hard_watchdog_warn); | ||
4386 | +static DEFINE_PER_CPU(bool, watchdog_nmi_touch); | ||
4387 | +static DEFINE_PER_CPU(struct perf_event *, watchdog_ev); | ||
4388 | + | ||
4389 | +/* boot commands */ | ||
4390 | +/* | ||
4391 | + * Should we panic when a soft-lockup or hard-lockup occurs: | ||
4392 | + */ | ||
4393 | +unsigned int __read_mostly hardlockup_panic = | ||
4394 | + CONFIG_BOOTPARAM_HARDLOCKUP_PANIC_VALUE; | ||
4395 | +static unsigned long hardlockup_allcpu_dumped; | ||
4396 | +/* | ||
4397 | + * We may not want to enable hard lockup detection by default in all cases, | ||
4398 | + * for example when running the kernel as a guest on a hypervisor. In these | ||
4399 | + * cases this function can be called to disable hard lockup detection. This | ||
4400 | + * function should only be executed once by the boot processor before the | ||
4401 | + * kernel command line parameters are parsed, because otherwise it is not | ||
4402 | + * possible to override this in hardlockup_panic_setup(). | ||
4403 | + */ | ||
4404 | +void hardlockup_detector_disable(void) | ||
4405 | +{ | ||
4406 | + watchdog_enabled &= ~NMI_WATCHDOG_ENABLED; | ||
4407 | +} | ||
4408 | + | ||
4409 | +static int __init hardlockup_panic_setup(char *str) | ||
4410 | +{ | ||
4411 | + if (!strncmp(str, "panic", 5)) | ||
4412 | + hardlockup_panic = 1; | ||
4413 | + else if (!strncmp(str, "nopanic", 7)) | ||
4414 | + hardlockup_panic = 0; | ||
4415 | + else if (!strncmp(str, "0", 1)) | ||
4416 | + watchdog_enabled &= ~NMI_WATCHDOG_ENABLED; | ||
4417 | + else if (!strncmp(str, "1", 1)) | ||
4418 | + watchdog_enabled |= NMI_WATCHDOG_ENABLED; | ||
4419 | + return 1; | ||
4420 | +} | ||
4421 | +__setup("nmi_watchdog=", hardlockup_panic_setup); | ||
4422 | + | ||
4423 | +void touch_nmi_watchdog(void) | ||
4424 | +{ | ||
4425 | + /* | ||
4426 | + * Using __raw here because some code paths have | ||
4427 | + * preemption enabled. If preemption is enabled | ||
4428 | + * then interrupts should be enabled too, in which | ||
4429 | + * case we shouldn't have to worry about the watchdog | ||
4430 | + * going off. | ||
4431 | + */ | ||
4432 | + raw_cpu_write(watchdog_nmi_touch, true); | ||
4433 | + touch_softlockup_watchdog(); | ||
4434 | +} | ||
4435 | +EXPORT_SYMBOL(touch_nmi_watchdog); | ||
4436 | + | ||
4437 | +static struct perf_event_attr wd_hw_attr = { | ||
4438 | + .type = PERF_TYPE_HARDWARE, | ||
4439 | + .config = PERF_COUNT_HW_CPU_CYCLES, | ||
4440 | + .size = sizeof(struct perf_event_attr), | ||
4441 | + .pinned = 1, | ||
4442 | + .disabled = 1, | ||
4443 | +}; | ||
4444 | + | ||
4445 | +/* Callback function for perf event subsystem */ | ||
4446 | +static void watchdog_overflow_callback(struct perf_event *event, | ||
4447 | + struct perf_sample_data *data, | ||
4448 | + struct pt_regs *regs) | ||
4449 | +{ | ||
4450 | + /* Ensure the watchdog never gets throttled */ | ||
4451 | + event->hw.interrupts = 0; | ||
4452 | + | ||
4453 | + if (atomic_read(&watchdog_park_in_progress) != 0) | ||
4454 | + return; | ||
4455 | + | ||
4456 | + if (__this_cpu_read(watchdog_nmi_touch) == true) { | ||
4457 | + __this_cpu_write(watchdog_nmi_touch, false); | ||
4458 | + return; | ||
4459 | + } | ||
4460 | + | ||
4461 | + /* check for a hardlockup | ||
4462 | + * This is done by making sure our timer interrupt | ||
4463 | + * is incrementing. The timer interrupt should have | ||
4464 | + * fired multiple times before we overflow'd. If it hasn't | ||
4465 | + * then this is a good indication the cpu is stuck | ||
4466 | + */ | ||
4467 | + if (is_hardlockup()) { | ||
4468 | + int this_cpu = smp_processor_id(); | ||
4469 | + | ||
4470 | + /* only print hardlockups once */ | ||
4471 | + if (__this_cpu_read(hard_watchdog_warn) == true) | ||
4472 | + return; | ||
4473 | + | ||
4474 | + pr_emerg("Watchdog detected hard LOCKUP on cpu %d", this_cpu); | ||
4475 | + print_modules(); | ||
4476 | + print_irqtrace_events(current); | ||
4477 | + if (regs) | ||
4478 | + show_regs(regs); | ||
4479 | + else | ||
4480 | + dump_stack(); | ||
4481 | + | ||
4482 | + /* | ||
4483 | + * Perform all-CPU dump only once to avoid multiple hardlockups | ||
4484 | + * generating interleaving traces | ||
4485 | + */ | ||
4486 | + if (sysctl_hardlockup_all_cpu_backtrace && | ||
4487 | + !test_and_set_bit(0, &hardlockup_allcpu_dumped)) | ||
4488 | + trigger_allbutself_cpu_backtrace(); | ||
4489 | + | ||
4490 | + if (hardlockup_panic) | ||
4491 | + nmi_panic(regs, "Hard LOCKUP"); | ||
4492 | + | ||
4493 | + __this_cpu_write(hard_watchdog_warn, true); | ||
4494 | + return; | ||
4495 | + } | ||
4496 | + | ||
4497 | + __this_cpu_write(hard_watchdog_warn, false); | ||
4498 | + return; | ||
4499 | +} | ||
4500 | + | ||
4501 | +/* | ||
4502 | + * People like the simple clean cpu node info on boot. | ||
4503 | + * Reduce the watchdog noise by only printing messages | ||
4504 | + * that are different from what cpu0 displayed. | ||
4505 | + */ | ||
4506 | +static unsigned long cpu0_err; | ||
4507 | + | ||
4508 | +int watchdog_nmi_enable(unsigned int cpu) | ||
4509 | +{ | ||
4510 | + struct perf_event_attr *wd_attr; | ||
4511 | + struct perf_event *event = per_cpu(watchdog_ev, cpu); | ||
4512 | + | ||
4513 | + /* nothing to do if the hard lockup detector is disabled */ | ||
4514 | + if (!(watchdog_enabled & NMI_WATCHDOG_ENABLED)) | ||
4515 | + goto out; | ||
4516 | + | ||
4517 | + /* is it already setup and enabled? */ | ||
4518 | + if (event && event->state > PERF_EVENT_STATE_OFF) | ||
4519 | + goto out; | ||
4520 | + | ||
4521 | + /* it is setup but not enabled */ | ||
4522 | + if (event != NULL) | ||
4523 | + goto out_enable; | ||
4524 | + | ||
4525 | + wd_attr = &wd_hw_attr; | ||
4526 | + wd_attr->sample_period = hw_nmi_get_sample_period(watchdog_thresh); | ||
4527 | + | ||
4528 | + /* Try to register using hardware perf events */ | ||
4529 | + event = perf_event_create_kernel_counter(wd_attr, cpu, NULL, watchdog_overflow_callback, NULL); | ||
4530 | + | ||
4531 | + /* save cpu0 error for future comparision */ | ||
4532 | + if (cpu == 0 && IS_ERR(event)) | ||
4533 | + cpu0_err = PTR_ERR(event); | ||
4534 | + | ||
4535 | + if (!IS_ERR(event)) { | ||
4536 | + /* only print for cpu0 or different than cpu0 */ | ||
4537 | + if (cpu == 0 || cpu0_err) | ||
4538 | + pr_info("enabled on all CPUs, permanently consumes one hw-PMU counter.\n"); | ||
4539 | + goto out_save; | ||
4540 | + } | ||
4541 | + | ||
4542 | + /* | ||
4543 | + * Disable the hard lockup detector if _any_ CPU fails to set up | ||
4544 | + * set up the hardware perf event. The watchdog() function checks | ||
4545 | + * the NMI_WATCHDOG_ENABLED bit periodically. | ||
4546 | + * | ||
4547 | + * The barriers are for syncing up watchdog_enabled across all the | ||
4548 | + * cpus, as clear_bit() does not use barriers. | ||
4549 | + */ | ||
4550 | + smp_mb__before_atomic(); | ||
4551 | + clear_bit(NMI_WATCHDOG_ENABLED_BIT, &watchdog_enabled); | ||
4552 | + smp_mb__after_atomic(); | ||
4553 | + | ||
4554 | + /* skip displaying the same error again */ | ||
4555 | + if (cpu > 0 && (PTR_ERR(event) == cpu0_err)) | ||
4556 | + return PTR_ERR(event); | ||
4557 | + | ||
4558 | + /* vary the KERN level based on the returned errno */ | ||
4559 | + if (PTR_ERR(event) == -EOPNOTSUPP) | ||
4560 | + pr_info("disabled (cpu%i): not supported (no LAPIC?)\n", cpu); | ||
4561 | + else if (PTR_ERR(event) == -ENOENT) | ||
4562 | + pr_warn("disabled (cpu%i): hardware events not enabled\n", | ||
4563 | + cpu); | ||
4564 | + else | ||
4565 | + pr_err("disabled (cpu%i): unable to create perf event: %ld\n", | ||
4566 | + cpu, PTR_ERR(event)); | ||
4567 | + | ||
4568 | + pr_info("Shutting down hard lockup detector on all cpus\n"); | ||
4569 | + | ||
4570 | + return PTR_ERR(event); | ||
4571 | + | ||
4572 | + /* success path */ | ||
4573 | +out_save: | ||
4574 | + per_cpu(watchdog_ev, cpu) = event; | ||
4575 | +out_enable: | ||
4576 | + perf_event_enable(per_cpu(watchdog_ev, cpu)); | ||
4577 | +out: | ||
4578 | + return 0; | ||
4579 | +} | ||
4580 | + | ||
4581 | +void watchdog_nmi_disable(unsigned int cpu) | ||
4582 | +{ | ||
4583 | + struct perf_event *event = per_cpu(watchdog_ev, cpu); | ||
4584 | + | ||
4585 | + if (event) { | ||
4586 | + perf_event_disable(event); | ||
4587 | + per_cpu(watchdog_ev, cpu) = NULL; | ||
4588 | + | ||
4589 | + /* should be in cleanup, but blocks oprofile */ | ||
4590 | + perf_event_release_kernel(event); | ||
4591 | + } | ||
4592 | + if (cpu == 0) { | ||
4593 | + /* watchdog_nmi_enable() expects this to be zero initially. */ | ||
4594 | + cpu0_err = 0; | ||
4595 | + } | ||
4596 | +} | ||
4597 | diff --git a/mm/kasan/report.c b/mm/kasan/report.c | ||
4598 | index 073325aedc68..8ca412aebcf1 100644 | ||
4599 | --- a/mm/kasan/report.c | ||
4600 | +++ b/mm/kasan/report.c | ||
4601 | @@ -13,6 +13,7 @@ | ||
4602 | * | ||
4603 | */ | ||
4604 | |||
4605 | +#include <linux/ftrace.h> | ||
4606 | #include <linux/kernel.h> | ||
4607 | #include <linux/mm.h> | ||
4608 | #include <linux/printk.h> | ||
4609 | @@ -298,6 +299,8 @@ void kasan_report(unsigned long addr, size_t size, | ||
4610 | if (likely(!kasan_report_enabled())) | ||
4611 | return; | ||
4612 | |||
4613 | + disable_trace_on_warning(); | ||
4614 | + | ||
4615 | info.access_addr = (void *)addr; | ||
4616 | info.access_size = size; | ||
4617 | info.is_write = is_write; | ||
4618 | diff --git a/mm/shmem.c b/mm/shmem.c | ||
4619 | index 9d32e1cb9f38..d99cfb6eb03a 100644 | ||
4620 | --- a/mm/shmem.c | ||
4621 | +++ b/mm/shmem.c | ||
4622 | @@ -412,6 +412,7 @@ static unsigned long shmem_unused_huge_shrink(struct shmem_sb_info *sbinfo, | ||
4623 | struct shrink_control *sc, unsigned long nr_to_split) | ||
4624 | { | ||
4625 | LIST_HEAD(list), *pos, *next; | ||
4626 | + LIST_HEAD(to_remove); | ||
4627 | struct inode *inode; | ||
4628 | struct shmem_inode_info *info; | ||
4629 | struct page *page; | ||
4630 | @@ -438,9 +439,8 @@ static unsigned long shmem_unused_huge_shrink(struct shmem_sb_info *sbinfo, | ||
4631 | /* Check if there's anything to gain */ | ||
4632 | if (round_up(inode->i_size, PAGE_SIZE) == | ||
4633 | round_up(inode->i_size, HPAGE_PMD_SIZE)) { | ||
4634 | - list_del_init(&info->shrinklist); | ||
4635 | + list_move(&info->shrinklist, &to_remove); | ||
4636 | removed++; | ||
4637 | - iput(inode); | ||
4638 | goto next; | ||
4639 | } | ||
4640 | |||
4641 | @@ -451,6 +451,13 @@ static unsigned long shmem_unused_huge_shrink(struct shmem_sb_info *sbinfo, | ||
4642 | } | ||
4643 | spin_unlock(&sbinfo->shrinklist_lock); | ||
4644 | |||
4645 | + list_for_each_safe(pos, next, &to_remove) { | ||
4646 | + info = list_entry(pos, struct shmem_inode_info, shrinklist); | ||
4647 | + inode = &info->vfs_inode; | ||
4648 | + list_del_init(&info->shrinklist); | ||
4649 | + iput(inode); | ||
4650 | + } | ||
4651 | + | ||
4652 | list_for_each_safe(pos, next, &list) { | ||
4653 | int ret; | ||
4654 | |||
4655 | diff --git a/net/core/ethtool.c b/net/core/ethtool.c | ||
4656 | index 047a1752ece1..072c1f4998c9 100644 | ||
4657 | --- a/net/core/ethtool.c | ||
4658 | +++ b/net/core/ethtool.c | ||
4659 | @@ -1394,9 +1394,12 @@ static int ethtool_get_regs(struct net_device *dev, char __user *useraddr) | ||
4660 | if (regs.len > reglen) | ||
4661 | regs.len = reglen; | ||
4662 | |||
4663 | - regbuf = vzalloc(reglen); | ||
4664 | - if (reglen && !regbuf) | ||
4665 | - return -ENOMEM; | ||
4666 | + regbuf = NULL; | ||
4667 | + if (reglen) { | ||
4668 | + regbuf = vzalloc(reglen); | ||
4669 | + if (!regbuf) | ||
4670 | + return -ENOMEM; | ||
4671 | + } | ||
4672 | |||
4673 | ops->get_regs(dev, ®s, regbuf); | ||
4674 | |||
4675 | diff --git a/net/ipv4/arp.c b/net/ipv4/arp.c | ||
4676 | index 89a8cac4726a..51b27ae09fbd 100644 | ||
4677 | --- a/net/ipv4/arp.c | ||
4678 | +++ b/net/ipv4/arp.c | ||
4679 | @@ -1263,7 +1263,7 @@ void __init arp_init(void) | ||
4680 | /* | ||
4681 | * ax25 -> ASCII conversion | ||
4682 | */ | ||
4683 | -static char *ax2asc2(ax25_address *a, char *buf) | ||
4684 | +static void ax2asc2(ax25_address *a, char *buf) | ||
4685 | { | ||
4686 | char c, *s; | ||
4687 | int n; | ||
4688 | @@ -1285,10 +1285,10 @@ static char *ax2asc2(ax25_address *a, char *buf) | ||
4689 | *s++ = n + '0'; | ||
4690 | *s++ = '\0'; | ||
4691 | |||
4692 | - if (*buf == '\0' || *buf == '-') | ||
4693 | - return "*"; | ||
4694 | - | ||
4695 | - return buf; | ||
4696 | + if (*buf == '\0' || *buf == '-') { | ||
4697 | + buf[0] = '*'; | ||
4698 | + buf[1] = '\0'; | ||
4699 | + } | ||
4700 | } | ||
4701 | #endif /* CONFIG_AX25 */ | ||
4702 | |||
4703 | @@ -1322,7 +1322,7 @@ static void arp_format_neigh_entry(struct seq_file *seq, | ||
4704 | } | ||
4705 | #endif | ||
4706 | sprintf(tbuf, "%pI4", n->primary_key); | ||
4707 | - seq_printf(seq, "%-16s 0x%-10x0x%-10x%s * %s\n", | ||
4708 | + seq_printf(seq, "%-16s 0x%-10x0x%-10x%-17s * %s\n", | ||
4709 | tbuf, hatype, arp_state_to_flags(n), hbuffer, dev->name); | ||
4710 | read_unlock(&n->lock); | ||
4711 | } | ||
4712 | diff --git a/net/ipv4/tcp_probe.c b/net/ipv4/tcp_probe.c | ||
4713 | index f6c50af24a64..3d063eb37848 100644 | ||
4714 | --- a/net/ipv4/tcp_probe.c | ||
4715 | +++ b/net/ipv4/tcp_probe.c | ||
4716 | @@ -117,7 +117,7 @@ static void jtcp_rcv_established(struct sock *sk, struct sk_buff *skb, | ||
4717 | (fwmark > 0 && skb->mark == fwmark)) && | ||
4718 | (full || tp->snd_cwnd != tcp_probe.lastcwnd)) { | ||
4719 | |||
4720 | - spin_lock(&tcp_probe.lock); | ||
4721 | + spin_lock_bh(&tcp_probe.lock); | ||
4722 | /* If log fills, just silently drop */ | ||
4723 | if (tcp_probe_avail() > 1) { | ||
4724 | struct tcp_log *p = tcp_probe.log + tcp_probe.head; | ||
4725 | @@ -157,7 +157,7 @@ static void jtcp_rcv_established(struct sock *sk, struct sk_buff *skb, | ||
4726 | tcp_probe.head = (tcp_probe.head + 1) & (bufsize - 1); | ||
4727 | } | ||
4728 | tcp_probe.lastcwnd = tp->snd_cwnd; | ||
4729 | - spin_unlock(&tcp_probe.lock); | ||
4730 | + spin_unlock_bh(&tcp_probe.lock); | ||
4731 | |||
4732 | wake_up(&tcp_probe.wait); | ||
4733 | } | ||
4734 | diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c | ||
4735 | index f088a1d9a618..045738319e8b 100644 | ||
4736 | --- a/net/ipv6/addrconf.c | ||
4737 | +++ b/net/ipv6/addrconf.c | ||
4738 | @@ -3387,9 +3387,15 @@ static int addrconf_notify(struct notifier_block *this, unsigned long event, | ||
4739 | } | ||
4740 | |||
4741 | if (idev) { | ||
4742 | - if (idev->if_flags & IF_READY) | ||
4743 | - /* device is already configured. */ | ||
4744 | + if (idev->if_flags & IF_READY) { | ||
4745 | + /* device is already configured - | ||
4746 | + * but resend MLD reports, we might | ||
4747 | + * have roamed and need to update | ||
4748 | + * multicast snooping switches | ||
4749 | + */ | ||
4750 | + ipv6_mc_up(idev); | ||
4751 | break; | ||
4752 | + } | ||
4753 | idev->if_flags |= IF_READY; | ||
4754 | } | ||
4755 | |||
4756 | @@ -4004,6 +4010,12 @@ static void addrconf_dad_completed(struct inet6_ifaddr *ifp, bool bump_id) | ||
4757 | |||
4758 | if (bump_id) | ||
4759 | rt_genid_bump_ipv6(dev_net(dev)); | ||
4760 | + | ||
4761 | + /* Make sure that a new temporary address will be created | ||
4762 | + * before this temporary address becomes deprecated. | ||
4763 | + */ | ||
4764 | + if (ifp->flags & IFA_F_TEMPORARY) | ||
4765 | + addrconf_verify_rtnl(); | ||
4766 | } | ||
4767 | |||
4768 | static void addrconf_dad_run(struct inet6_dev *idev) | ||
4769 | diff --git a/net/ipv6/datagram.c b/net/ipv6/datagram.c | ||
4770 | index 442ec1f39ed1..38062f403ceb 100644 | ||
4771 | --- a/net/ipv6/datagram.c | ||
4772 | +++ b/net/ipv6/datagram.c | ||
4773 | @@ -166,18 +166,22 @@ int __ip6_datagram_connect(struct sock *sk, struct sockaddr *uaddr, | ||
4774 | if (np->sndflow) | ||
4775 | fl6_flowlabel = usin->sin6_flowinfo & IPV6_FLOWINFO_MASK; | ||
4776 | |||
4777 | - addr_type = ipv6_addr_type(&usin->sin6_addr); | ||
4778 | - | ||
4779 | - if (addr_type == IPV6_ADDR_ANY) { | ||
4780 | + if (ipv6_addr_any(&usin->sin6_addr)) { | ||
4781 | /* | ||
4782 | * connect to self | ||
4783 | */ | ||
4784 | - usin->sin6_addr.s6_addr[15] = 0x01; | ||
4785 | + if (ipv6_addr_v4mapped(&sk->sk_v6_rcv_saddr)) | ||
4786 | + ipv6_addr_set_v4mapped(htonl(INADDR_LOOPBACK), | ||
4787 | + &usin->sin6_addr); | ||
4788 | + else | ||
4789 | + usin->sin6_addr = in6addr_loopback; | ||
4790 | } | ||
4791 | |||
4792 | + addr_type = ipv6_addr_type(&usin->sin6_addr); | ||
4793 | + | ||
4794 | daddr = &usin->sin6_addr; | ||
4795 | |||
4796 | - if (addr_type == IPV6_ADDR_MAPPED) { | ||
4797 | + if (addr_type & IPV6_ADDR_MAPPED) { | ||
4798 | struct sockaddr_in sin; | ||
4799 | |||
4800 | if (__ipv6_only_sock(sk)) { | ||
4801 | diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c | ||
4802 | index 1ac3cea49171..3ab32ac57ccd 100644 | ||
4803 | --- a/net/ipv6/ip6_output.c | ||
4804 | +++ b/net/ipv6/ip6_output.c | ||
4805 | @@ -1019,6 +1019,9 @@ static int ip6_dst_lookup_tail(struct net *net, const struct sock *sk, | ||
4806 | } | ||
4807 | } | ||
4808 | #endif | ||
4809 | + if (ipv6_addr_v4mapped(&fl6->saddr) && | ||
4810 | + !(ipv6_addr_v4mapped(&fl6->daddr) || ipv6_addr_any(&fl6->daddr))) | ||
4811 | + return -EAFNOSUPPORT; | ||
4812 | |||
4813 | return 0; | ||
4814 | |||
4815 | diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c | ||
4816 | index aef9b28067f4..7ac2365aa6fb 100644 | ||
4817 | --- a/net/ipv6/tcp_ipv6.c | ||
4818 | +++ b/net/ipv6/tcp_ipv6.c | ||
4819 | @@ -148,8 +148,13 @@ static int tcp_v6_connect(struct sock *sk, struct sockaddr *uaddr, | ||
4820 | * connect() to INADDR_ANY means loopback (BSD'ism). | ||
4821 | */ | ||
4822 | |||
4823 | - if (ipv6_addr_any(&usin->sin6_addr)) | ||
4824 | - usin->sin6_addr.s6_addr[15] = 0x1; | ||
4825 | + if (ipv6_addr_any(&usin->sin6_addr)) { | ||
4826 | + if (ipv6_addr_v4mapped(&sk->sk_v6_rcv_saddr)) | ||
4827 | + ipv6_addr_set_v4mapped(htonl(INADDR_LOOPBACK), | ||
4828 | + &usin->sin6_addr); | ||
4829 | + else | ||
4830 | + usin->sin6_addr = in6addr_loopback; | ||
4831 | + } | ||
4832 | |||
4833 | addr_type = ipv6_addr_type(&usin->sin6_addr); | ||
4834 | |||
4835 | @@ -188,7 +193,7 @@ static int tcp_v6_connect(struct sock *sk, struct sockaddr *uaddr, | ||
4836 | * TCP over IPv4 | ||
4837 | */ | ||
4838 | |||
4839 | - if (addr_type == IPV6_ADDR_MAPPED) { | ||
4840 | + if (addr_type & IPV6_ADDR_MAPPED) { | ||
4841 | u32 exthdrlen = icsk->icsk_ext_hdr_len; | ||
4842 | struct sockaddr_in sin; | ||
4843 | |||
4844 | diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c | ||
4845 | index 40a289f78d77..2497f62fa4c2 100644 | ||
4846 | --- a/net/ipv6/udp.c | ||
4847 | +++ b/net/ipv6/udp.c | ||
4848 | @@ -1049,6 +1049,10 @@ int udpv6_sendmsg(struct sock *sk, struct msghdr *msg, size_t len) | ||
4849 | if (addr_len < SIN6_LEN_RFC2133) | ||
4850 | return -EINVAL; | ||
4851 | daddr = &sin6->sin6_addr; | ||
4852 | + if (ipv6_addr_any(daddr) && | ||
4853 | + ipv6_addr_v4mapped(&np->saddr)) | ||
4854 | + ipv6_addr_set_v4mapped(htonl(INADDR_LOOPBACK), | ||
4855 | + daddr); | ||
4856 | break; | ||
4857 | case AF_INET: | ||
4858 | goto do_udp_sendmsg; | ||
4859 | diff --git a/net/netfilter/nf_conntrack_sip.c b/net/netfilter/nf_conntrack_sip.c | ||
4860 | index c3fc14e021ec..3a8dc39a9116 100644 | ||
4861 | --- a/net/netfilter/nf_conntrack_sip.c | ||
4862 | +++ b/net/netfilter/nf_conntrack_sip.c | ||
4863 | @@ -1630,8 +1630,6 @@ static int __init nf_conntrack_sip_init(void) | ||
4864 | ports[ports_c++] = SIP_PORT; | ||
4865 | |||
4866 | for (i = 0; i < ports_c; i++) { | ||
4867 | - memset(&sip[i], 0, sizeof(sip[i])); | ||
4868 | - | ||
4869 | nf_ct_helper_init(&sip[4 * i], AF_INET, IPPROTO_UDP, "sip", | ||
4870 | SIP_PORT, ports[i], i, sip_exp_policy, | ||
4871 | SIP_EXPECT_MAX, | ||
4872 | diff --git a/net/netfilter/nf_log.c b/net/netfilter/nf_log.c | ||
4873 | index 3dca90dc24ad..ffb9e8ada899 100644 | ||
4874 | --- a/net/netfilter/nf_log.c | ||
4875 | +++ b/net/netfilter/nf_log.c | ||
4876 | @@ -13,7 +13,6 @@ | ||
4877 | /* Internal logging interface, which relies on the real | ||
4878 | LOG target modules */ | ||
4879 | |||
4880 | -#define NF_LOG_PREFIXLEN 128 | ||
4881 | #define NFLOGGER_NAME_LEN 64 | ||
4882 | |||
4883 | static struct nf_logger __rcu *loggers[NFPROTO_NUMPROTO][NF_LOG_TYPE_MAX] __read_mostly; | ||
4884 | diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c | ||
4885 | index e5194f6f906c..778fcdb83225 100644 | ||
4886 | --- a/net/netfilter/nf_tables_api.c | ||
4887 | +++ b/net/netfilter/nf_tables_api.c | ||
4888 | @@ -3637,10 +3637,18 @@ static int nft_add_set_elem(struct nft_ctx *ctx, struct nft_set *set, | ||
4889 | goto err5; | ||
4890 | } | ||
4891 | |||
4892 | + if (set->size && | ||
4893 | + !atomic_add_unless(&set->nelems, 1, set->size + set->ndeact)) { | ||
4894 | + err = -ENFILE; | ||
4895 | + goto err6; | ||
4896 | + } | ||
4897 | + | ||
4898 | nft_trans_elem(trans) = elem; | ||
4899 | list_add_tail(&trans->list, &ctx->net->nft.commit_list); | ||
4900 | return 0; | ||
4901 | |||
4902 | +err6: | ||
4903 | + set->ops->remove(set, &elem); | ||
4904 | err5: | ||
4905 | kfree(trans); | ||
4906 | err4: | ||
4907 | @@ -3687,15 +3695,9 @@ static int nf_tables_newsetelem(struct net *net, struct sock *nlsk, | ||
4908 | return -EBUSY; | ||
4909 | |||
4910 | nla_for_each_nested(attr, nla[NFTA_SET_ELEM_LIST_ELEMENTS], rem) { | ||
4911 | - if (set->size && | ||
4912 | - !atomic_add_unless(&set->nelems, 1, set->size + set->ndeact)) | ||
4913 | - return -ENFILE; | ||
4914 | - | ||
4915 | err = nft_add_set_elem(&ctx, set, attr, nlh->nlmsg_flags); | ||
4916 | - if (err < 0) { | ||
4917 | - atomic_dec(&set->nelems); | ||
4918 | + if (err < 0) | ||
4919 | break; | ||
4920 | - } | ||
4921 | } | ||
4922 | return err; | ||
4923 | } | ||
4924 | diff --git a/net/netfilter/nft_log.c b/net/netfilter/nft_log.c | ||
4925 | index 1b01404bb33f..c7704e9123ef 100644 | ||
4926 | --- a/net/netfilter/nft_log.c | ||
4927 | +++ b/net/netfilter/nft_log.c | ||
4928 | @@ -38,7 +38,8 @@ static void nft_log_eval(const struct nft_expr *expr, | ||
4929 | |||
4930 | static const struct nla_policy nft_log_policy[NFTA_LOG_MAX + 1] = { | ||
4931 | [NFTA_LOG_GROUP] = { .type = NLA_U16 }, | ||
4932 | - [NFTA_LOG_PREFIX] = { .type = NLA_STRING }, | ||
4933 | + [NFTA_LOG_PREFIX] = { .type = NLA_STRING, | ||
4934 | + .len = NF_LOG_PREFIXLEN - 1 }, | ||
4935 | [NFTA_LOG_SNAPLEN] = { .type = NLA_U32 }, | ||
4936 | [NFTA_LOG_QTHRESHOLD] = { .type = NLA_U16 }, | ||
4937 | [NFTA_LOG_LEVEL] = { .type = NLA_U32 }, | ||
4938 | diff --git a/net/sctp/offload.c b/net/sctp/offload.c | ||
4939 | index 7e869d0cca69..4f5a2b580aa5 100644 | ||
4940 | --- a/net/sctp/offload.c | ||
4941 | +++ b/net/sctp/offload.c | ||
4942 | @@ -68,7 +68,7 @@ static struct sk_buff *sctp_gso_segment(struct sk_buff *skb, | ||
4943 | goto out; | ||
4944 | } | ||
4945 | |||
4946 | - segs = skb_segment(skb, features | NETIF_F_HW_CSUM); | ||
4947 | + segs = skb_segment(skb, features | NETIF_F_HW_CSUM | NETIF_F_SG); | ||
4948 | if (IS_ERR(segs)) | ||
4949 | goto out; | ||
4950 | |||
4951 | diff --git a/net/sctp/socket.c b/net/sctp/socket.c | ||
4952 | index 14346dccc4fe..e1719c695174 100644 | ||
4953 | --- a/net/sctp/socket.c | ||
4954 | +++ b/net/sctp/socket.c | ||
4955 | @@ -235,8 +235,12 @@ static struct sctp_transport *sctp_addr_id2transport(struct sock *sk, | ||
4956 | sctp_assoc_t id) | ||
4957 | { | ||
4958 | struct sctp_association *addr_asoc = NULL, *id_asoc = NULL; | ||
4959 | - struct sctp_transport *transport; | ||
4960 | + struct sctp_af *af = sctp_get_af_specific(addr->ss_family); | ||
4961 | union sctp_addr *laddr = (union sctp_addr *)addr; | ||
4962 | + struct sctp_transport *transport; | ||
4963 | + | ||
4964 | + if (sctp_verify_addr(sk, laddr, af->sockaddr_len)) | ||
4965 | + return NULL; | ||
4966 | |||
4967 | addr_asoc = sctp_endpoint_lookup_assoc(sctp_sk(sk)->ep, | ||
4968 | laddr, | ||
4969 | diff --git a/net/tipc/net.c b/net/tipc/net.c | ||
4970 | index 28bf4feeb81c..ab8a2d5d1e32 100644 | ||
4971 | --- a/net/tipc/net.c | ||
4972 | +++ b/net/tipc/net.c | ||
4973 | @@ -110,6 +110,10 @@ int tipc_net_start(struct net *net, u32 addr) | ||
4974 | char addr_string[16]; | ||
4975 | |||
4976 | tn->own_addr = addr; | ||
4977 | + | ||
4978 | + /* Ensure that the new address is visible before we reinit. */ | ||
4979 | + smp_mb(); | ||
4980 | + | ||
4981 | tipc_named_reinit(net); | ||
4982 | tipc_sk_reinit(net); | ||
4983 | |||
4984 | diff --git a/net/tipc/node.c b/net/tipc/node.c | ||
4985 | index 9d2f4c2b08ab..27753325e06e 100644 | ||
4986 | --- a/net/tipc/node.c | ||
4987 | +++ b/net/tipc/node.c | ||
4988 | @@ -263,6 +263,11 @@ static void tipc_node_write_lock(struct tipc_node *n) | ||
4989 | write_lock_bh(&n->lock); | ||
4990 | } | ||
4991 | |||
4992 | +static void tipc_node_write_unlock_fast(struct tipc_node *n) | ||
4993 | +{ | ||
4994 | + write_unlock_bh(&n->lock); | ||
4995 | +} | ||
4996 | + | ||
4997 | static void tipc_node_write_unlock(struct tipc_node *n) | ||
4998 | { | ||
4999 | struct net *net = n->net; | ||
5000 | @@ -417,7 +422,7 @@ void tipc_node_subscribe(struct net *net, struct list_head *subscr, u32 addr) | ||
5001 | } | ||
5002 | tipc_node_write_lock(n); | ||
5003 | list_add_tail(subscr, &n->publ_list); | ||
5004 | - tipc_node_write_unlock(n); | ||
5005 | + tipc_node_write_unlock_fast(n); | ||
5006 | tipc_node_put(n); | ||
5007 | } | ||
5008 | |||
5009 | @@ -435,7 +440,7 @@ void tipc_node_unsubscribe(struct net *net, struct list_head *subscr, u32 addr) | ||
5010 | } | ||
5011 | tipc_node_write_lock(n); | ||
5012 | list_del_init(subscr); | ||
5013 | - tipc_node_write_unlock(n); | ||
5014 | + tipc_node_write_unlock_fast(n); | ||
5015 | tipc_node_put(n); | ||
5016 | } | ||
5017 | |||
5018 | diff --git a/net/tipc/server.c b/net/tipc/server.c | ||
5019 | index 215849ce453d..f89c0c2e8c16 100644 | ||
5020 | --- a/net/tipc/server.c | ||
5021 | +++ b/net/tipc/server.c | ||
5022 | @@ -91,7 +91,8 @@ static void tipc_sock_release(struct tipc_conn *con); | ||
5023 | static void tipc_conn_kref_release(struct kref *kref) | ||
5024 | { | ||
5025 | struct tipc_conn *con = container_of(kref, struct tipc_conn, kref); | ||
5026 | - struct sockaddr_tipc *saddr = con->server->saddr; | ||
5027 | + struct tipc_server *s = con->server; | ||
5028 | + struct sockaddr_tipc *saddr = s->saddr; | ||
5029 | struct socket *sock = con->sock; | ||
5030 | struct sock *sk; | ||
5031 | |||
5032 | @@ -106,6 +107,11 @@ static void tipc_conn_kref_release(struct kref *kref) | ||
5033 | tipc_sock_release(con); | ||
5034 | sock_release(sock); | ||
5035 | con->sock = NULL; | ||
5036 | + | ||
5037 | + spin_lock_bh(&s->idr_lock); | ||
5038 | + idr_remove(&s->conn_idr, con->conid); | ||
5039 | + s->idr_in_use--; | ||
5040 | + spin_unlock_bh(&s->idr_lock); | ||
5041 | } | ||
5042 | |||
5043 | tipc_clean_outqueues(con); | ||
5044 | @@ -128,8 +134,10 @@ static struct tipc_conn *tipc_conn_lookup(struct tipc_server *s, int conid) | ||
5045 | |||
5046 | spin_lock_bh(&s->idr_lock); | ||
5047 | con = idr_find(&s->conn_idr, conid); | ||
5048 | - if (con) | ||
5049 | + if (con && test_bit(CF_CONNECTED, &con->flags)) | ||
5050 | conn_get(con); | ||
5051 | + else | ||
5052 | + con = NULL; | ||
5053 | spin_unlock_bh(&s->idr_lock); | ||
5054 | return con; | ||
5055 | } | ||
5056 | @@ -198,15 +206,8 @@ static void tipc_sock_release(struct tipc_conn *con) | ||
5057 | |||
5058 | static void tipc_close_conn(struct tipc_conn *con) | ||
5059 | { | ||
5060 | - struct tipc_server *s = con->server; | ||
5061 | - | ||
5062 | if (test_and_clear_bit(CF_CONNECTED, &con->flags)) { | ||
5063 | |||
5064 | - spin_lock_bh(&s->idr_lock); | ||
5065 | - idr_remove(&s->conn_idr, con->conid); | ||
5066 | - s->idr_in_use--; | ||
5067 | - spin_unlock_bh(&s->idr_lock); | ||
5068 | - | ||
5069 | /* We shouldn't flush pending works as we may be in the | ||
5070 | * thread. In fact the races with pending rx/tx work structs | ||
5071 | * are harmless for us here as we have already deleted this | ||
5072 | @@ -458,6 +459,11 @@ int tipc_conn_sendmsg(struct tipc_server *s, int conid, | ||
5073 | if (!con) | ||
5074 | return -EINVAL; | ||
5075 | |||
5076 | + if (!test_bit(CF_CONNECTED, &con->flags)) { | ||
5077 | + conn_put(con); | ||
5078 | + return 0; | ||
5079 | + } | ||
5080 | + | ||
5081 | e = tipc_alloc_entry(data, len); | ||
5082 | if (!e) { | ||
5083 | conn_put(con); | ||
5084 | @@ -471,12 +477,8 @@ int tipc_conn_sendmsg(struct tipc_server *s, int conid, | ||
5085 | list_add_tail(&e->list, &con->outqueue); | ||
5086 | spin_unlock_bh(&con->outqueue_lock); | ||
5087 | |||
5088 | - if (test_bit(CF_CONNECTED, &con->flags)) { | ||
5089 | - if (!queue_work(s->send_wq, &con->swork)) | ||
5090 | - conn_put(con); | ||
5091 | - } else { | ||
5092 | + if (!queue_work(s->send_wq, &con->swork)) | ||
5093 | conn_put(con); | ||
5094 | - } | ||
5095 | return 0; | ||
5096 | } | ||
5097 | |||
5098 | @@ -500,7 +502,7 @@ static void tipc_send_to_sock(struct tipc_conn *con) | ||
5099 | int ret; | ||
5100 | |||
5101 | spin_lock_bh(&con->outqueue_lock); | ||
5102 | - while (1) { | ||
5103 | + while (test_bit(CF_CONNECTED, &con->flags)) { | ||
5104 | e = list_entry(con->outqueue.next, struct outqueue_entry, | ||
5105 | list); | ||
5106 | if ((struct list_head *) e == &con->outqueue) | ||
5107 | diff --git a/net/tipc/socket.c b/net/tipc/socket.c | ||
5108 | index 41f013888f07..25bc5c30d7fb 100644 | ||
5109 | --- a/net/tipc/socket.c | ||
5110 | +++ b/net/tipc/socket.c | ||
5111 | @@ -335,8 +335,6 @@ static int tipc_sk_create(struct net *net, struct socket *sock, | ||
5112 | INIT_LIST_HEAD(&tsk->publications); | ||
5113 | msg = &tsk->phdr; | ||
5114 | tn = net_generic(sock_net(sk), tipc_net_id); | ||
5115 | - tipc_msg_init(tn->own_addr, msg, TIPC_LOW_IMPORTANCE, TIPC_NAMED_MSG, | ||
5116 | - NAMED_H_SIZE, 0); | ||
5117 | |||
5118 | /* Finish initializing socket data structures */ | ||
5119 | sock->ops = ops; | ||
5120 | @@ -346,6 +344,13 @@ static int tipc_sk_create(struct net *net, struct socket *sock, | ||
5121 | pr_warn("Socket create failed; port number exhausted\n"); | ||
5122 | return -EINVAL; | ||
5123 | } | ||
5124 | + | ||
5125 | + /* Ensure tsk is visible before we read own_addr. */ | ||
5126 | + smp_mb(); | ||
5127 | + | ||
5128 | + tipc_msg_init(tn->own_addr, msg, TIPC_LOW_IMPORTANCE, TIPC_NAMED_MSG, | ||
5129 | + NAMED_H_SIZE, 0); | ||
5130 | + | ||
5131 | msg_set_origport(msg, tsk->portid); | ||
5132 | setup_timer(&sk->sk_timer, tipc_sk_timeout, (unsigned long)tsk); | ||
5133 | sk->sk_backlog_rcv = tipc_backlog_rcv; | ||
5134 | @@ -2264,24 +2269,27 @@ static int tipc_sk_withdraw(struct tipc_sock *tsk, uint scope, | ||
5135 | void tipc_sk_reinit(struct net *net) | ||
5136 | { | ||
5137 | struct tipc_net *tn = net_generic(net, tipc_net_id); | ||
5138 | - const struct bucket_table *tbl; | ||
5139 | - struct rhash_head *pos; | ||
5140 | + struct rhashtable_iter iter; | ||
5141 | struct tipc_sock *tsk; | ||
5142 | struct tipc_msg *msg; | ||
5143 | - int i; | ||
5144 | |||
5145 | - rcu_read_lock(); | ||
5146 | - tbl = rht_dereference_rcu((&tn->sk_rht)->tbl, &tn->sk_rht); | ||
5147 | - for (i = 0; i < tbl->size; i++) { | ||
5148 | - rht_for_each_entry_rcu(tsk, pos, tbl, i, node) { | ||
5149 | + rhashtable_walk_enter(&tn->sk_rht, &iter); | ||
5150 | + | ||
5151 | + do { | ||
5152 | + tsk = ERR_PTR(rhashtable_walk_start(&iter)); | ||
5153 | + if (tsk) | ||
5154 | + continue; | ||
5155 | + | ||
5156 | + while ((tsk = rhashtable_walk_next(&iter)) && !IS_ERR(tsk)) { | ||
5157 | spin_lock_bh(&tsk->sk.sk_lock.slock); | ||
5158 | msg = &tsk->phdr; | ||
5159 | msg_set_prevnode(msg, tn->own_addr); | ||
5160 | msg_set_orignode(msg, tn->own_addr); | ||
5161 | spin_unlock_bh(&tsk->sk.sk_lock.slock); | ||
5162 | } | ||
5163 | - } | ||
5164 | - rcu_read_unlock(); | ||
5165 | + | ||
5166 | + rhashtable_walk_stop(&iter); | ||
5167 | + } while (tsk == ERR_PTR(-EAGAIN)); | ||
5168 | } | ||
5169 | |||
5170 | static struct tipc_sock *tipc_sk_lookup(struct net *net, u32 portid) | ||
5171 | diff --git a/net/tipc/subscr.c b/net/tipc/subscr.c | ||
5172 | index 0dd02244e21d..9d94e65d0894 100644 | ||
5173 | --- a/net/tipc/subscr.c | ||
5174 | +++ b/net/tipc/subscr.c | ||
5175 | @@ -54,6 +54,8 @@ struct tipc_subscriber { | ||
5176 | |||
5177 | static void tipc_subscrp_delete(struct tipc_subscription *sub); | ||
5178 | static void tipc_subscrb_put(struct tipc_subscriber *subscriber); | ||
5179 | +static void tipc_subscrp_put(struct tipc_subscription *subscription); | ||
5180 | +static void tipc_subscrp_get(struct tipc_subscription *subscription); | ||
5181 | |||
5182 | /** | ||
5183 | * htohl - convert value to endianness used by destination | ||
5184 | @@ -123,6 +125,7 @@ void tipc_subscrp_report_overlap(struct tipc_subscription *sub, u32 found_lower, | ||
5185 | { | ||
5186 | struct tipc_name_seq seq; | ||
5187 | |||
5188 | + tipc_subscrp_get(sub); | ||
5189 | tipc_subscrp_convert_seq(&sub->evt.s.seq, sub->swap, &seq); | ||
5190 | if (!tipc_subscrp_check_overlap(&seq, found_lower, found_upper)) | ||
5191 | return; | ||
5192 | @@ -132,30 +135,23 @@ void tipc_subscrp_report_overlap(struct tipc_subscription *sub, u32 found_lower, | ||
5193 | |||
5194 | tipc_subscrp_send_event(sub, found_lower, found_upper, event, port_ref, | ||
5195 | node); | ||
5196 | + tipc_subscrp_put(sub); | ||
5197 | } | ||
5198 | |||
5199 | static void tipc_subscrp_timeout(unsigned long data) | ||
5200 | { | ||
5201 | struct tipc_subscription *sub = (struct tipc_subscription *)data; | ||
5202 | - struct tipc_subscriber *subscriber = sub->subscriber; | ||
5203 | |||
5204 | /* Notify subscriber of timeout */ | ||
5205 | tipc_subscrp_send_event(sub, sub->evt.s.seq.lower, sub->evt.s.seq.upper, | ||
5206 | TIPC_SUBSCR_TIMEOUT, 0, 0); | ||
5207 | |||
5208 | - spin_lock_bh(&subscriber->lock); | ||
5209 | - tipc_subscrp_delete(sub); | ||
5210 | - spin_unlock_bh(&subscriber->lock); | ||
5211 | - | ||
5212 | - tipc_subscrb_put(subscriber); | ||
5213 | + tipc_subscrp_put(sub); | ||
5214 | } | ||
5215 | |||
5216 | static void tipc_subscrb_kref_release(struct kref *kref) | ||
5217 | { | ||
5218 | - struct tipc_subscriber *subcriber = container_of(kref, | ||
5219 | - struct tipc_subscriber, kref); | ||
5220 | - | ||
5221 | - kfree(subcriber); | ||
5222 | + kfree(container_of(kref,struct tipc_subscriber, kref)); | ||
5223 | } | ||
5224 | |||
5225 | static void tipc_subscrb_put(struct tipc_subscriber *subscriber) | ||
5226 | @@ -168,6 +164,59 @@ static void tipc_subscrb_get(struct tipc_subscriber *subscriber) | ||
5227 | kref_get(&subscriber->kref); | ||
5228 | } | ||
5229 | |||
5230 | +static void tipc_subscrp_kref_release(struct kref *kref) | ||
5231 | +{ | ||
5232 | + struct tipc_subscription *sub = container_of(kref, | ||
5233 | + struct tipc_subscription, | ||
5234 | + kref); | ||
5235 | + struct tipc_net *tn = net_generic(sub->net, tipc_net_id); | ||
5236 | + struct tipc_subscriber *subscriber = sub->subscriber; | ||
5237 | + | ||
5238 | + spin_lock_bh(&subscriber->lock); | ||
5239 | + tipc_nametbl_unsubscribe(sub); | ||
5240 | + list_del(&sub->subscrp_list); | ||
5241 | + atomic_dec(&tn->subscription_count); | ||
5242 | + spin_unlock_bh(&subscriber->lock); | ||
5243 | + kfree(sub); | ||
5244 | + tipc_subscrb_put(subscriber); | ||
5245 | +} | ||
5246 | + | ||
5247 | +static void tipc_subscrp_put(struct tipc_subscription *subscription) | ||
5248 | +{ | ||
5249 | + kref_put(&subscription->kref, tipc_subscrp_kref_release); | ||
5250 | +} | ||
5251 | + | ||
5252 | +static void tipc_subscrp_get(struct tipc_subscription *subscription) | ||
5253 | +{ | ||
5254 | + kref_get(&subscription->kref); | ||
5255 | +} | ||
5256 | + | ||
5257 | +/* tipc_subscrb_subscrp_delete - delete a specific subscription or all | ||
5258 | + * subscriptions for a given subscriber. | ||
5259 | + */ | ||
5260 | +static void tipc_subscrb_subscrp_delete(struct tipc_subscriber *subscriber, | ||
5261 | + struct tipc_subscr *s) | ||
5262 | +{ | ||
5263 | + struct list_head *subscription_list = &subscriber->subscrp_list; | ||
5264 | + struct tipc_subscription *sub, *temp; | ||
5265 | + | ||
5266 | + spin_lock_bh(&subscriber->lock); | ||
5267 | + list_for_each_entry_safe(sub, temp, subscription_list, subscrp_list) { | ||
5268 | + if (s && memcmp(s, &sub->evt.s, sizeof(struct tipc_subscr))) | ||
5269 | + continue; | ||
5270 | + | ||
5271 | + tipc_subscrp_get(sub); | ||
5272 | + spin_unlock_bh(&subscriber->lock); | ||
5273 | + tipc_subscrp_delete(sub); | ||
5274 | + tipc_subscrp_put(sub); | ||
5275 | + spin_lock_bh(&subscriber->lock); | ||
5276 | + | ||
5277 | + if (s) | ||
5278 | + break; | ||
5279 | + } | ||
5280 | + spin_unlock_bh(&subscriber->lock); | ||
5281 | +} | ||
5282 | + | ||
5283 | static struct tipc_subscriber *tipc_subscrb_create(int conid) | ||
5284 | { | ||
5285 | struct tipc_subscriber *subscriber; | ||
5286 | @@ -177,8 +226,8 @@ static struct tipc_subscriber *tipc_subscrb_create(int conid) | ||
5287 | pr_warn("Subscriber rejected, no memory\n"); | ||
5288 | return NULL; | ||
5289 | } | ||
5290 | - kref_init(&subscriber->kref); | ||
5291 | INIT_LIST_HEAD(&subscriber->subscrp_list); | ||
5292 | + kref_init(&subscriber->kref); | ||
5293 | subscriber->conid = conid; | ||
5294 | spin_lock_init(&subscriber->lock); | ||
5295 | |||
5296 | @@ -187,55 +236,22 @@ static struct tipc_subscriber *tipc_subscrb_create(int conid) | ||
5297 | |||
5298 | static void tipc_subscrb_delete(struct tipc_subscriber *subscriber) | ||
5299 | { | ||
5300 | - struct tipc_subscription *sub, *temp; | ||
5301 | - u32 timeout; | ||
5302 | - | ||
5303 | - spin_lock_bh(&subscriber->lock); | ||
5304 | - /* Destroy any existing subscriptions for subscriber */ | ||
5305 | - list_for_each_entry_safe(sub, temp, &subscriber->subscrp_list, | ||
5306 | - subscrp_list) { | ||
5307 | - timeout = htohl(sub->evt.s.timeout, sub->swap); | ||
5308 | - if ((timeout == TIPC_WAIT_FOREVER) || del_timer(&sub->timer)) { | ||
5309 | - tipc_subscrp_delete(sub); | ||
5310 | - tipc_subscrb_put(subscriber); | ||
5311 | - } | ||
5312 | - } | ||
5313 | - spin_unlock_bh(&subscriber->lock); | ||
5314 | - | ||
5315 | + tipc_subscrb_subscrp_delete(subscriber, NULL); | ||
5316 | tipc_subscrb_put(subscriber); | ||
5317 | } | ||
5318 | |||
5319 | static void tipc_subscrp_delete(struct tipc_subscription *sub) | ||
5320 | { | ||
5321 | - struct tipc_net *tn = net_generic(sub->net, tipc_net_id); | ||
5322 | + u32 timeout = htohl(sub->evt.s.timeout, sub->swap); | ||
5323 | |||
5324 | - tipc_nametbl_unsubscribe(sub); | ||
5325 | - list_del(&sub->subscrp_list); | ||
5326 | - kfree(sub); | ||
5327 | - atomic_dec(&tn->subscription_count); | ||
5328 | + if (timeout == TIPC_WAIT_FOREVER || del_timer(&sub->timer)) | ||
5329 | + tipc_subscrp_put(sub); | ||
5330 | } | ||
5331 | |||
5332 | static void tipc_subscrp_cancel(struct tipc_subscr *s, | ||
5333 | struct tipc_subscriber *subscriber) | ||
5334 | { | ||
5335 | - struct tipc_subscription *sub, *temp; | ||
5336 | - u32 timeout; | ||
5337 | - | ||
5338 | - spin_lock_bh(&subscriber->lock); | ||
5339 | - /* Find first matching subscription, exit if not found */ | ||
5340 | - list_for_each_entry_safe(sub, temp, &subscriber->subscrp_list, | ||
5341 | - subscrp_list) { | ||
5342 | - if (!memcmp(s, &sub->evt.s, sizeof(struct tipc_subscr))) { | ||
5343 | - timeout = htohl(sub->evt.s.timeout, sub->swap); | ||
5344 | - if ((timeout == TIPC_WAIT_FOREVER) || | ||
5345 | - del_timer(&sub->timer)) { | ||
5346 | - tipc_subscrp_delete(sub); | ||
5347 | - tipc_subscrb_put(subscriber); | ||
5348 | - } | ||
5349 | - break; | ||
5350 | - } | ||
5351 | - } | ||
5352 | - spin_unlock_bh(&subscriber->lock); | ||
5353 | + tipc_subscrb_subscrp_delete(subscriber, s); | ||
5354 | } | ||
5355 | |||
5356 | static struct tipc_subscription *tipc_subscrp_create(struct net *net, | ||
5357 | @@ -272,6 +288,7 @@ static struct tipc_subscription *tipc_subscrp_create(struct net *net, | ||
5358 | sub->swap = swap; | ||
5359 | memcpy(&sub->evt.s, s, sizeof(*s)); | ||
5360 | atomic_inc(&tn->subscription_count); | ||
5361 | + kref_init(&sub->kref); | ||
5362 | return sub; | ||
5363 | } | ||
5364 | |||
5365 | @@ -288,17 +305,16 @@ static void tipc_subscrp_subscribe(struct net *net, struct tipc_subscr *s, | ||
5366 | |||
5367 | spin_lock_bh(&subscriber->lock); | ||
5368 | list_add(&sub->subscrp_list, &subscriber->subscrp_list); | ||
5369 | - tipc_subscrb_get(subscriber); | ||
5370 | sub->subscriber = subscriber; | ||
5371 | tipc_nametbl_subscribe(sub); | ||
5372 | + tipc_subscrb_get(subscriber); | ||
5373 | spin_unlock_bh(&subscriber->lock); | ||
5374 | |||
5375 | + setup_timer(&sub->timer, tipc_subscrp_timeout, (unsigned long)sub); | ||
5376 | timeout = htohl(sub->evt.s.timeout, swap); | ||
5377 | - if (timeout == TIPC_WAIT_FOREVER) | ||
5378 | - return; | ||
5379 | |||
5380 | - setup_timer(&sub->timer, tipc_subscrp_timeout, (unsigned long)sub); | ||
5381 | - mod_timer(&sub->timer, jiffies + msecs_to_jiffies(timeout)); | ||
5382 | + if (timeout != TIPC_WAIT_FOREVER) | ||
5383 | + mod_timer(&sub->timer, jiffies + msecs_to_jiffies(timeout)); | ||
5384 | } | ||
5385 | |||
5386 | /* Handle one termination request for the subscriber */ | ||
5387 | diff --git a/net/tipc/subscr.h b/net/tipc/subscr.h | ||
5388 | index be60103082c9..ffdc214c117a 100644 | ||
5389 | --- a/net/tipc/subscr.h | ||
5390 | +++ b/net/tipc/subscr.h | ||
5391 | @@ -57,6 +57,7 @@ struct tipc_subscriber; | ||
5392 | * @evt: template for events generated by subscription | ||
5393 | */ | ||
5394 | struct tipc_subscription { | ||
5395 | + struct kref kref; | ||
5396 | struct tipc_subscriber *subscriber; | ||
5397 | struct net *net; | ||
5398 | struct timer_list timer; |