Contents of /trunk/kernel-magellan/patches-3.17/0103-3.17.4-all-fixes.patch
Parent Directory | Revision Log
Revision 2534 -
(show annotations)
(download)
Fri Jan 30 10:05:22 2015 UTC (9 years, 7 months ago) by niro
File size: 221247 byte(s)
Fri Jan 30 10:05:22 2015 UTC (9 years, 7 months ago) by niro
File size: 221247 byte(s)
-linux-3.17.4
1 | diff --git a/Documentation/devicetree/bindings/ata/sata_rcar.txt b/Documentation/devicetree/bindings/ata/sata_rcar.txt |
2 | index 1e6111333fa8..7dd32d321a34 100644 |
3 | --- a/Documentation/devicetree/bindings/ata/sata_rcar.txt |
4 | +++ b/Documentation/devicetree/bindings/ata/sata_rcar.txt |
5 | @@ -3,7 +3,8 @@ |
6 | Required properties: |
7 | - compatible : should contain one of the following: |
8 | - "renesas,sata-r8a7779" for R-Car H1 |
9 | - - "renesas,sata-r8a7790" for R-Car H2 |
10 | + - "renesas,sata-r8a7790-es1" for R-Car H2 ES1 |
11 | + - "renesas,sata-r8a7790" for R-Car H2 other than ES1 |
12 | - "renesas,sata-r8a7791" for R-Car M2 |
13 | - reg : address and length of the SATA registers; |
14 | - interrupts : must consist of one interrupt specifier. |
15 | diff --git a/Makefile b/Makefile |
16 | index 57a45b1ea2c7..b60b64d65416 100644 |
17 | --- a/Makefile |
18 | +++ b/Makefile |
19 | @@ -1,6 +1,6 @@ |
20 | VERSION = 3 |
21 | PATCHLEVEL = 17 |
22 | -SUBLEVEL = 3 |
23 | +SUBLEVEL = 4 |
24 | EXTRAVERSION = |
25 | NAME = Shuffling Zombie Juror |
26 | |
27 | diff --git a/arch/arm/boot/compressed/head.S b/arch/arm/boot/compressed/head.S |
28 | index 413fd94b5301..68be9017593d 100644 |
29 | --- a/arch/arm/boot/compressed/head.S |
30 | +++ b/arch/arm/boot/compressed/head.S |
31 | @@ -397,8 +397,7 @@ dtb_check_done: |
32 | add sp, sp, r6 |
33 | #endif |
34 | |
35 | - tst r4, #1 |
36 | - bleq cache_clean_flush |
37 | + bl cache_clean_flush |
38 | |
39 | adr r0, BSYM(restart) |
40 | add r0, r0, r6 |
41 | @@ -1047,6 +1046,8 @@ cache_clean_flush: |
42 | b call_cache_fn |
43 | |
44 | __armv4_mpu_cache_flush: |
45 | + tst r4, #1 |
46 | + movne pc, lr |
47 | mov r2, #1 |
48 | mov r3, #0 |
49 | mcr p15, 0, ip, c7, c6, 0 @ invalidate D cache |
50 | @@ -1064,6 +1065,8 @@ __armv4_mpu_cache_flush: |
51 | mov pc, lr |
52 | |
53 | __fa526_cache_flush: |
54 | + tst r4, #1 |
55 | + movne pc, lr |
56 | mov r1, #0 |
57 | mcr p15, 0, r1, c7, c14, 0 @ clean and invalidate D cache |
58 | mcr p15, 0, r1, c7, c5, 0 @ flush I cache |
59 | @@ -1072,13 +1075,16 @@ __fa526_cache_flush: |
60 | |
61 | __armv6_mmu_cache_flush: |
62 | mov r1, #0 |
63 | - mcr p15, 0, r1, c7, c14, 0 @ clean+invalidate D |
64 | + tst r4, #1 |
65 | + mcreq p15, 0, r1, c7, c14, 0 @ clean+invalidate D |
66 | mcr p15, 0, r1, c7, c5, 0 @ invalidate I+BTB |
67 | - mcr p15, 0, r1, c7, c15, 0 @ clean+invalidate unified |
68 | + mcreq p15, 0, r1, c7, c15, 0 @ clean+invalidate unified |
69 | mcr p15, 0, r1, c7, c10, 4 @ drain WB |
70 | mov pc, lr |
71 | |
72 | __armv7_mmu_cache_flush: |
73 | + tst r4, #1 |
74 | + bne iflush |
75 | mrc p15, 0, r10, c0, c1, 5 @ read ID_MMFR1 |
76 | tst r10, #0xf << 16 @ hierarchical cache (ARMv7) |
77 | mov r10, #0 |
78 | @@ -1139,6 +1145,8 @@ iflush: |
79 | mov pc, lr |
80 | |
81 | __armv5tej_mmu_cache_flush: |
82 | + tst r4, #1 |
83 | + movne pc, lr |
84 | 1: mrc p15, 0, r15, c7, c14, 3 @ test,clean,invalidate D cache |
85 | bne 1b |
86 | mcr p15, 0, r0, c7, c5, 0 @ flush I cache |
87 | @@ -1146,6 +1154,8 @@ __armv5tej_mmu_cache_flush: |
88 | mov pc, lr |
89 | |
90 | __armv4_mmu_cache_flush: |
91 | + tst r4, #1 |
92 | + movne pc, lr |
93 | mov r2, #64*1024 @ default: 32K dcache size (*2) |
94 | mov r11, #32 @ default: 32 byte line size |
95 | mrc p15, 0, r3, c0, c0, 1 @ read cache type |
96 | @@ -1179,6 +1189,8 @@ no_cache_id: |
97 | |
98 | __armv3_mmu_cache_flush: |
99 | __armv3_mpu_cache_flush: |
100 | + tst r4, #1 |
101 | + movne pc, lr |
102 | mov r1, #0 |
103 | mcr p15, 0, r1, c7, c0, 0 @ invalidate whole cache v3 |
104 | mov pc, lr |
105 | diff --git a/arch/arm/boot/dts/am335x-evm.dts b/arch/arm/boot/dts/am335x-evm.dts |
106 | index e2156a583de7..c4b968f0feb5 100644 |
107 | --- a/arch/arm/boot/dts/am335x-evm.dts |
108 | +++ b/arch/arm/boot/dts/am335x-evm.dts |
109 | @@ -489,7 +489,7 @@ |
110 | reg = <0x00060000 0x00020000>; |
111 | }; |
112 | partition@4 { |
113 | - label = "NAND.u-boot-spl"; |
114 | + label = "NAND.u-boot-spl-os"; |
115 | reg = <0x00080000 0x00040000>; |
116 | }; |
117 | partition@5 { |
118 | diff --git a/arch/arm/mach-mvebu/board-v7.c b/arch/arm/mach-mvebu/board-v7.c |
119 | index 6478626e3ff6..d0d39f150fab 100644 |
120 | --- a/arch/arm/mach-mvebu/board-v7.c |
121 | +++ b/arch/arm/mach-mvebu/board-v7.c |
122 | @@ -188,7 +188,7 @@ static void __init thermal_quirk(void) |
123 | |
124 | static void __init mvebu_dt_init(void) |
125 | { |
126 | - if (of_machine_is_compatible("plathome,openblocks-ax3-4")) |
127 | + if (of_machine_is_compatible("marvell,armadaxp")) |
128 | i2c_quirk(); |
129 | if (of_machine_is_compatible("marvell,a375-db")) { |
130 | external_abort_quirk(); |
131 | diff --git a/arch/arm/mm/Kconfig b/arch/arm/mm/Kconfig |
132 | index ae69809a9e47..7eb94e6fc376 100644 |
133 | --- a/arch/arm/mm/Kconfig |
134 | +++ b/arch/arm/mm/Kconfig |
135 | @@ -798,6 +798,7 @@ config NEED_KUSER_HELPERS |
136 | |
137 | config KUSER_HELPERS |
138 | bool "Enable kuser helpers in vector page" if !NEED_KUSER_HELPERS |
139 | + depends on MMU |
140 | default y |
141 | help |
142 | Warning: disabling this option may break user programs. |
143 | diff --git a/arch/arm64/kernel/efi-entry.S b/arch/arm64/kernel/efi-entry.S |
144 | index 619b1dd7bcde..d18a44940968 100644 |
145 | --- a/arch/arm64/kernel/efi-entry.S |
146 | +++ b/arch/arm64/kernel/efi-entry.S |
147 | @@ -54,18 +54,17 @@ ENTRY(efi_stub_entry) |
148 | b.eq efi_load_fail |
149 | |
150 | /* |
151 | - * efi_entry() will have relocated the kernel image if necessary |
152 | - * and we return here with device tree address in x0 and the kernel |
153 | - * entry point stored at *image_addr. Save those values in registers |
154 | - * which are callee preserved. |
155 | + * efi_entry() will have copied the kernel image if necessary and we |
156 | + * return here with device tree address in x0 and the kernel entry |
157 | + * point stored at *image_addr. Save those values in registers which |
158 | + * are callee preserved. |
159 | */ |
160 | mov x20, x0 // DTB address |
161 | ldr x0, [sp, #16] // relocated _text address |
162 | mov x21, x0 |
163 | |
164 | /* |
165 | - * Flush dcache covering current runtime addresses |
166 | - * of kernel text/data. Then flush all of icache. |
167 | + * Calculate size of the kernel Image (same for original and copy). |
168 | */ |
169 | adrp x1, _text |
170 | add x1, x1, #:lo12:_text |
171 | @@ -73,9 +72,24 @@ ENTRY(efi_stub_entry) |
172 | add x2, x2, #:lo12:_edata |
173 | sub x1, x2, x1 |
174 | |
175 | + /* |
176 | + * Flush the copied Image to the PoC, and ensure it is not shadowed by |
177 | + * stale icache entries from before relocation. |
178 | + */ |
179 | bl __flush_dcache_area |
180 | ic ialluis |
181 | |
182 | + /* |
183 | + * Ensure that the rest of this function (in the original Image) is |
184 | + * visible when the caches are disabled. The I-cache can't have stale |
185 | + * entries for the VA range of the current image, so no maintenance is |
186 | + * necessary. |
187 | + */ |
188 | + adr x0, efi_stub_entry |
189 | + adr x1, efi_stub_entry_end |
190 | + sub x1, x1, x0 |
191 | + bl __flush_dcache_area |
192 | + |
193 | /* Turn off Dcache and MMU */ |
194 | mrs x0, CurrentEL |
195 | cmp x0, #CurrentEL_EL2 |
196 | @@ -105,4 +119,5 @@ efi_load_fail: |
197 | ldp x29, x30, [sp], #32 |
198 | ret |
199 | |
200 | +efi_stub_entry_end: |
201 | ENDPROC(efi_stub_entry) |
202 | diff --git a/arch/arm64/kernel/insn.c b/arch/arm64/kernel/insn.c |
203 | index 92f36835486b..565e26f23f31 100644 |
204 | --- a/arch/arm64/kernel/insn.c |
205 | +++ b/arch/arm64/kernel/insn.c |
206 | @@ -156,9 +156,10 @@ static int __kprobes aarch64_insn_patch_text_cb(void *arg) |
207 | * which ends with "dsb; isb" pair guaranteeing global |
208 | * visibility. |
209 | */ |
210 | - atomic_set(&pp->cpu_count, -1); |
211 | + /* Notify other processors with an additional increment. */ |
212 | + atomic_inc(&pp->cpu_count); |
213 | } else { |
214 | - while (atomic_read(&pp->cpu_count) != -1) |
215 | + while (atomic_read(&pp->cpu_count) <= num_online_cpus()) |
216 | cpu_relax(); |
217 | isb(); |
218 | } |
219 | diff --git a/arch/arm64/lib/clear_user.S b/arch/arm64/lib/clear_user.S |
220 | index 6e0ed93d51fe..c17967fdf5f6 100644 |
221 | --- a/arch/arm64/lib/clear_user.S |
222 | +++ b/arch/arm64/lib/clear_user.S |
223 | @@ -46,7 +46,7 @@ USER(9f, strh wzr, [x0], #2 ) |
224 | sub x1, x1, #2 |
225 | 4: adds x1, x1, #1 |
226 | b.mi 5f |
227 | - strb wzr, [x0] |
228 | +USER(9f, strb wzr, [x0] ) |
229 | 5: mov x0, #0 |
230 | ret |
231 | ENDPROC(__clear_user) |
232 | diff --git a/arch/parisc/include/uapi/asm/shmbuf.h b/arch/parisc/include/uapi/asm/shmbuf.h |
233 | index 0a3eada1863b..f395cde7b593 100644 |
234 | --- a/arch/parisc/include/uapi/asm/shmbuf.h |
235 | +++ b/arch/parisc/include/uapi/asm/shmbuf.h |
236 | @@ -36,23 +36,16 @@ struct shmid64_ds { |
237 | unsigned int __unused2; |
238 | }; |
239 | |
240 | -#ifdef CONFIG_64BIT |
241 | -/* The 'unsigned int' (formerly 'unsigned long') data types below will |
242 | - * ensure that a 32-bit app calling shmctl(*,IPC_INFO,*) will work on |
243 | - * a wide kernel, but if some of these values are meant to contain pointers |
244 | - * they may need to be 'long long' instead. -PB XXX FIXME |
245 | - */ |
246 | -#endif |
247 | struct shminfo64 { |
248 | - unsigned int shmmax; |
249 | - unsigned int shmmin; |
250 | - unsigned int shmmni; |
251 | - unsigned int shmseg; |
252 | - unsigned int shmall; |
253 | - unsigned int __unused1; |
254 | - unsigned int __unused2; |
255 | - unsigned int __unused3; |
256 | - unsigned int __unused4; |
257 | + unsigned long shmmax; |
258 | + unsigned long shmmin; |
259 | + unsigned long shmmni; |
260 | + unsigned long shmseg; |
261 | + unsigned long shmall; |
262 | + unsigned long __unused1; |
263 | + unsigned long __unused2; |
264 | + unsigned long __unused3; |
265 | + unsigned long __unused4; |
266 | }; |
267 | |
268 | #endif /* _PARISC_SHMBUF_H */ |
269 | diff --git a/arch/parisc/kernel/syscall_table.S b/arch/parisc/kernel/syscall_table.S |
270 | index b563d9c8268b..d183a440b1b0 100644 |
271 | --- a/arch/parisc/kernel/syscall_table.S |
272 | +++ b/arch/parisc/kernel/syscall_table.S |
273 | @@ -286,11 +286,11 @@ |
274 | ENTRY_COMP(msgsnd) |
275 | ENTRY_COMP(msgrcv) |
276 | ENTRY_SAME(msgget) /* 190 */ |
277 | - ENTRY_SAME(msgctl) |
278 | - ENTRY_SAME(shmat) |
279 | + ENTRY_COMP(msgctl) |
280 | + ENTRY_COMP(shmat) |
281 | ENTRY_SAME(shmdt) |
282 | ENTRY_SAME(shmget) |
283 | - ENTRY_SAME(shmctl) /* 195 */ |
284 | + ENTRY_COMP(shmctl) /* 195 */ |
285 | ENTRY_SAME(ni_syscall) /* streams1 */ |
286 | ENTRY_SAME(ni_syscall) /* streams2 */ |
287 | ENTRY_SAME(lstat64) |
288 | @@ -323,7 +323,7 @@ |
289 | ENTRY_SAME(epoll_ctl) /* 225 */ |
290 | ENTRY_SAME(epoll_wait) |
291 | ENTRY_SAME(remap_file_pages) |
292 | - ENTRY_SAME(semtimedop) |
293 | + ENTRY_COMP(semtimedop) |
294 | ENTRY_COMP(mq_open) |
295 | ENTRY_SAME(mq_unlink) /* 230 */ |
296 | ENTRY_COMP(mq_timedsend) |
297 | diff --git a/arch/sparc/include/asm/atomic_32.h b/arch/sparc/include/asm/atomic_32.h |
298 | index 7aed2be45b44..2701fb68a24f 100644 |
299 | --- a/arch/sparc/include/asm/atomic_32.h |
300 | +++ b/arch/sparc/include/asm/atomic_32.h |
301 | @@ -22,7 +22,7 @@ |
302 | |
303 | int __atomic_add_return(int, atomic_t *); |
304 | int atomic_cmpxchg(atomic_t *, int, int); |
305 | -#define atomic_xchg(v, new) (xchg(&((v)->counter), new)) |
306 | +int atomic_xchg(atomic_t *, int); |
307 | int __atomic_add_unless(atomic_t *, int, int); |
308 | void atomic_set(atomic_t *, int); |
309 | |
310 | diff --git a/arch/sparc/include/asm/cmpxchg_32.h b/arch/sparc/include/asm/cmpxchg_32.h |
311 | index 32c29a133f9d..d38b52dca216 100644 |
312 | --- a/arch/sparc/include/asm/cmpxchg_32.h |
313 | +++ b/arch/sparc/include/asm/cmpxchg_32.h |
314 | @@ -11,22 +11,14 @@ |
315 | #ifndef __ARCH_SPARC_CMPXCHG__ |
316 | #define __ARCH_SPARC_CMPXCHG__ |
317 | |
318 | -static inline unsigned long xchg_u32(__volatile__ unsigned long *m, unsigned long val) |
319 | -{ |
320 | - __asm__ __volatile__("swap [%2], %0" |
321 | - : "=&r" (val) |
322 | - : "0" (val), "r" (m) |
323 | - : "memory"); |
324 | - return val; |
325 | -} |
326 | - |
327 | +unsigned long __xchg_u32(volatile u32 *m, u32 new); |
328 | void __xchg_called_with_bad_pointer(void); |
329 | |
330 | static inline unsigned long __xchg(unsigned long x, __volatile__ void * ptr, int size) |
331 | { |
332 | switch (size) { |
333 | case 4: |
334 | - return xchg_u32(ptr, x); |
335 | + return __xchg_u32(ptr, x); |
336 | } |
337 | __xchg_called_with_bad_pointer(); |
338 | return x; |
339 | diff --git a/arch/sparc/include/asm/vio.h b/arch/sparc/include/asm/vio.h |
340 | index e0f6c399f1d0..2986120acac5 100644 |
341 | --- a/arch/sparc/include/asm/vio.h |
342 | +++ b/arch/sparc/include/asm/vio.h |
343 | @@ -118,12 +118,18 @@ struct vio_disk_attr_info { |
344 | u8 vdisk_type; |
345 | #define VD_DISK_TYPE_SLICE 0x01 /* Slice in block device */ |
346 | #define VD_DISK_TYPE_DISK 0x02 /* Entire block device */ |
347 | - u16 resv1; |
348 | + u8 vdisk_mtype; /* v1.1 */ |
349 | +#define VD_MEDIA_TYPE_FIXED 0x01 /* Fixed device */ |
350 | +#define VD_MEDIA_TYPE_CD 0x02 /* CD Device */ |
351 | +#define VD_MEDIA_TYPE_DVD 0x03 /* DVD Device */ |
352 | + u8 resv1; |
353 | u32 vdisk_block_size; |
354 | u64 operations; |
355 | - u64 vdisk_size; |
356 | + u64 vdisk_size; /* v1.1 */ |
357 | u64 max_xfer_size; |
358 | - u64 resv2[2]; |
359 | + u32 phys_block_size; /* v1.2 */ |
360 | + u32 resv2; |
361 | + u64 resv3[1]; |
362 | }; |
363 | |
364 | struct vio_disk_desc { |
365 | @@ -259,7 +265,7 @@ static inline u32 vio_dring_avail(struct vio_dring_state *dr, |
366 | unsigned int ring_size) |
367 | { |
368 | return (dr->pending - |
369 | - ((dr->prod - dr->cons) & (ring_size - 1))); |
370 | + ((dr->prod - dr->cons) & (ring_size - 1)) - 1); |
371 | } |
372 | |
373 | #define VIO_MAX_TYPE_LEN 32 |
374 | diff --git a/arch/sparc/kernel/pci_schizo.c b/arch/sparc/kernel/pci_schizo.c |
375 | index 8f76f23dac38..f9c6813c132d 100644 |
376 | --- a/arch/sparc/kernel/pci_schizo.c |
377 | +++ b/arch/sparc/kernel/pci_schizo.c |
378 | @@ -581,7 +581,7 @@ static irqreturn_t schizo_pcierr_intr_other(struct pci_pbm_info *pbm) |
379 | { |
380 | unsigned long csr_reg, csr, csr_error_bits; |
381 | irqreturn_t ret = IRQ_NONE; |
382 | - u16 stat; |
383 | + u32 stat; |
384 | |
385 | csr_reg = pbm->pbm_regs + SCHIZO_PCI_CTRL; |
386 | csr = upa_readq(csr_reg); |
387 | @@ -617,7 +617,7 @@ static irqreturn_t schizo_pcierr_intr_other(struct pci_pbm_info *pbm) |
388 | pbm->name); |
389 | ret = IRQ_HANDLED; |
390 | } |
391 | - pci_read_config_word(pbm->pci_bus->self, PCI_STATUS, &stat); |
392 | + pbm->pci_ops->read(pbm->pci_bus, 0, PCI_STATUS, 2, &stat); |
393 | if (stat & (PCI_STATUS_PARITY | |
394 | PCI_STATUS_SIG_TARGET_ABORT | |
395 | PCI_STATUS_REC_TARGET_ABORT | |
396 | @@ -625,7 +625,7 @@ static irqreturn_t schizo_pcierr_intr_other(struct pci_pbm_info *pbm) |
397 | PCI_STATUS_SIG_SYSTEM_ERROR)) { |
398 | printk("%s: PCI bus error, PCI_STATUS[%04x]\n", |
399 | pbm->name, stat); |
400 | - pci_write_config_word(pbm->pci_bus->self, PCI_STATUS, 0xffff); |
401 | + pbm->pci_ops->write(pbm->pci_bus, 0, PCI_STATUS, 2, 0xffff); |
402 | ret = IRQ_HANDLED; |
403 | } |
404 | return ret; |
405 | diff --git a/arch/sparc/kernel/smp_64.c b/arch/sparc/kernel/smp_64.c |
406 | index c9300bfaee5a..81954ee7c47c 100644 |
407 | --- a/arch/sparc/kernel/smp_64.c |
408 | +++ b/arch/sparc/kernel/smp_64.c |
409 | @@ -816,13 +816,17 @@ void arch_send_call_function_single_ipi(int cpu) |
410 | void __irq_entry smp_call_function_client(int irq, struct pt_regs *regs) |
411 | { |
412 | clear_softint(1 << irq); |
413 | + irq_enter(); |
414 | generic_smp_call_function_interrupt(); |
415 | + irq_exit(); |
416 | } |
417 | |
418 | void __irq_entry smp_call_function_single_client(int irq, struct pt_regs *regs) |
419 | { |
420 | clear_softint(1 << irq); |
421 | + irq_enter(); |
422 | generic_smp_call_function_single_interrupt(); |
423 | + irq_exit(); |
424 | } |
425 | |
426 | static void tsb_sync(void *info) |
427 | diff --git a/arch/sparc/lib/atomic32.c b/arch/sparc/lib/atomic32.c |
428 | index 1d32b54089aa..8f2f94d53434 100644 |
429 | --- a/arch/sparc/lib/atomic32.c |
430 | +++ b/arch/sparc/lib/atomic32.c |
431 | @@ -40,6 +40,19 @@ int __atomic_add_return(int i, atomic_t *v) |
432 | } |
433 | EXPORT_SYMBOL(__atomic_add_return); |
434 | |
435 | +int atomic_xchg(atomic_t *v, int new) |
436 | +{ |
437 | + int ret; |
438 | + unsigned long flags; |
439 | + |
440 | + spin_lock_irqsave(ATOMIC_HASH(v), flags); |
441 | + ret = v->counter; |
442 | + v->counter = new; |
443 | + spin_unlock_irqrestore(ATOMIC_HASH(v), flags); |
444 | + return ret; |
445 | +} |
446 | +EXPORT_SYMBOL(atomic_xchg); |
447 | + |
448 | int atomic_cmpxchg(atomic_t *v, int old, int new) |
449 | { |
450 | int ret; |
451 | @@ -132,3 +145,17 @@ unsigned long __cmpxchg_u32(volatile u32 *ptr, u32 old, u32 new) |
452 | return (unsigned long)prev; |
453 | } |
454 | EXPORT_SYMBOL(__cmpxchg_u32); |
455 | + |
456 | +unsigned long __xchg_u32(volatile u32 *ptr, u32 new) |
457 | +{ |
458 | + unsigned long flags; |
459 | + u32 prev; |
460 | + |
461 | + spin_lock_irqsave(ATOMIC_HASH(ptr), flags); |
462 | + prev = *ptr; |
463 | + *ptr = new; |
464 | + spin_unlock_irqrestore(ATOMIC_HASH(ptr), flags); |
465 | + |
466 | + return (unsigned long)prev; |
467 | +} |
468 | +EXPORT_SYMBOL(__xchg_u32); |
469 | diff --git a/arch/x86/.gitignore b/arch/x86/.gitignore |
470 | index 7cab8c08e6d1..aff152c87cf4 100644 |
471 | --- a/arch/x86/.gitignore |
472 | +++ b/arch/x86/.gitignore |
473 | @@ -1,4 +1,6 @@ |
474 | boot/compressed/vmlinux |
475 | tools/test_get_len |
476 | tools/insn_sanity |
477 | +purgatory/kexec-purgatory.c |
478 | +purgatory/purgatory.ro |
479 | |
480 | diff --git a/arch/x86/boot/compressed/Makefile b/arch/x86/boot/compressed/Makefile |
481 | index 0fcd9133790c..14fe7cba21d1 100644 |
482 | --- a/arch/x86/boot/compressed/Makefile |
483 | +++ b/arch/x86/boot/compressed/Makefile |
484 | @@ -75,8 +75,10 @@ suffix-$(CONFIG_KERNEL_XZ) := xz |
485 | suffix-$(CONFIG_KERNEL_LZO) := lzo |
486 | suffix-$(CONFIG_KERNEL_LZ4) := lz4 |
487 | |
488 | +RUN_SIZE = $(shell objdump -h vmlinux | \ |
489 | + perl $(srctree)/arch/x86/tools/calc_run_size.pl) |
490 | quiet_cmd_mkpiggy = MKPIGGY $@ |
491 | - cmd_mkpiggy = $(obj)/mkpiggy $< > $@ || ( rm -f $@ ; false ) |
492 | + cmd_mkpiggy = $(obj)/mkpiggy $< $(RUN_SIZE) > $@ || ( rm -f $@ ; false ) |
493 | |
494 | targets += piggy.S |
495 | $(obj)/piggy.S: $(obj)/vmlinux.bin.$(suffix-y) $(obj)/mkpiggy FORCE |
496 | diff --git a/arch/x86/boot/compressed/head_32.S b/arch/x86/boot/compressed/head_32.S |
497 | index cbed1407a5cd..1d7fbbcc196d 100644 |
498 | --- a/arch/x86/boot/compressed/head_32.S |
499 | +++ b/arch/x86/boot/compressed/head_32.S |
500 | @@ -207,7 +207,8 @@ relocated: |
501 | * Do the decompression, and jump to the new kernel.. |
502 | */ |
503 | /* push arguments for decompress_kernel: */ |
504 | - pushl $z_output_len /* decompressed length */ |
505 | + pushl $z_run_size /* size of kernel with .bss and .brk */ |
506 | + pushl $z_output_len /* decompressed length, end of relocs */ |
507 | leal z_extract_offset_negative(%ebx), %ebp |
508 | pushl %ebp /* output address */ |
509 | pushl $z_input_len /* input_len */ |
510 | @@ -217,7 +218,7 @@ relocated: |
511 | pushl %eax /* heap area */ |
512 | pushl %esi /* real mode pointer */ |
513 | call decompress_kernel /* returns kernel location in %eax */ |
514 | - addl $24, %esp |
515 | + addl $28, %esp |
516 | |
517 | /* |
518 | * Jump to the decompressed kernel. |
519 | diff --git a/arch/x86/boot/compressed/head_64.S b/arch/x86/boot/compressed/head_64.S |
520 | index 2884e0c3e8a5..6b1766c6c082 100644 |
521 | --- a/arch/x86/boot/compressed/head_64.S |
522 | +++ b/arch/x86/boot/compressed/head_64.S |
523 | @@ -402,13 +402,16 @@ relocated: |
524 | * Do the decompression, and jump to the new kernel.. |
525 | */ |
526 | pushq %rsi /* Save the real mode argument */ |
527 | + movq $z_run_size, %r9 /* size of kernel with .bss and .brk */ |
528 | + pushq %r9 |
529 | movq %rsi, %rdi /* real mode address */ |
530 | leaq boot_heap(%rip), %rsi /* malloc area for uncompression */ |
531 | leaq input_data(%rip), %rdx /* input_data */ |
532 | movl $z_input_len, %ecx /* input_len */ |
533 | movq %rbp, %r8 /* output target address */ |
534 | - movq $z_output_len, %r9 /* decompressed length */ |
535 | + movq $z_output_len, %r9 /* decompressed length, end of relocs */ |
536 | call decompress_kernel /* returns kernel location in %rax */ |
537 | + popq %r9 |
538 | popq %rsi |
539 | |
540 | /* |
541 | diff --git a/arch/x86/boot/compressed/misc.c b/arch/x86/boot/compressed/misc.c |
542 | index 57ab74df7eea..30dd59a9f0b4 100644 |
543 | --- a/arch/x86/boot/compressed/misc.c |
544 | +++ b/arch/x86/boot/compressed/misc.c |
545 | @@ -358,7 +358,8 @@ asmlinkage __visible void *decompress_kernel(void *rmode, memptr heap, |
546 | unsigned char *input_data, |
547 | unsigned long input_len, |
548 | unsigned char *output, |
549 | - unsigned long output_len) |
550 | + unsigned long output_len, |
551 | + unsigned long run_size) |
552 | { |
553 | real_mode = rmode; |
554 | |
555 | @@ -381,8 +382,14 @@ asmlinkage __visible void *decompress_kernel(void *rmode, memptr heap, |
556 | free_mem_ptr = heap; /* Heap */ |
557 | free_mem_end_ptr = heap + BOOT_HEAP_SIZE; |
558 | |
559 | - output = choose_kernel_location(input_data, input_len, |
560 | - output, output_len); |
561 | + /* |
562 | + * The memory hole needed for the kernel is the larger of either |
563 | + * the entire decompressed kernel plus relocation table, or the |
564 | + * entire decompressed kernel plus .bss and .brk sections. |
565 | + */ |
566 | + output = choose_kernel_location(input_data, input_len, output, |
567 | + output_len > run_size ? output_len |
568 | + : run_size); |
569 | |
570 | /* Validate memory location choices. */ |
571 | if ((unsigned long)output & (MIN_KERNEL_ALIGN - 1)) |
572 | diff --git a/arch/x86/boot/compressed/mkpiggy.c b/arch/x86/boot/compressed/mkpiggy.c |
573 | index b669ab65bf6c..d8222f213182 100644 |
574 | --- a/arch/x86/boot/compressed/mkpiggy.c |
575 | +++ b/arch/x86/boot/compressed/mkpiggy.c |
576 | @@ -36,11 +36,13 @@ int main(int argc, char *argv[]) |
577 | uint32_t olen; |
578 | long ilen; |
579 | unsigned long offs; |
580 | + unsigned long run_size; |
581 | FILE *f = NULL; |
582 | int retval = 1; |
583 | |
584 | - if (argc < 2) { |
585 | - fprintf(stderr, "Usage: %s compressed_file\n", argv[0]); |
586 | + if (argc < 3) { |
587 | + fprintf(stderr, "Usage: %s compressed_file run_size\n", |
588 | + argv[0]); |
589 | goto bail; |
590 | } |
591 | |
592 | @@ -74,6 +76,7 @@ int main(int argc, char *argv[]) |
593 | offs += olen >> 12; /* Add 8 bytes for each 32K block */ |
594 | offs += 64*1024 + 128; /* Add 64K + 128 bytes slack */ |
595 | offs = (offs+4095) & ~4095; /* Round to a 4K boundary */ |
596 | + run_size = atoi(argv[2]); |
597 | |
598 | printf(".section \".rodata..compressed\",\"a\",@progbits\n"); |
599 | printf(".globl z_input_len\n"); |
600 | @@ -85,6 +88,8 @@ int main(int argc, char *argv[]) |
601 | /* z_extract_offset_negative allows simplification of head_32.S */ |
602 | printf(".globl z_extract_offset_negative\n"); |
603 | printf("z_extract_offset_negative = -0x%lx\n", offs); |
604 | + printf(".globl z_run_size\n"); |
605 | + printf("z_run_size = %lu\n", run_size); |
606 | |
607 | printf(".globl input_data, input_data_end\n"); |
608 | printf("input_data:\n"); |
609 | diff --git a/arch/x86/kernel/cpu/microcode/amd_early.c b/arch/x86/kernel/cpu/microcode/amd_early.c |
610 | index 617a9e284245..b63773ba1646 100644 |
611 | --- a/arch/x86/kernel/cpu/microcode/amd_early.c |
612 | +++ b/arch/x86/kernel/cpu/microcode/amd_early.c |
613 | @@ -108,12 +108,13 @@ static size_t compute_container_size(u8 *data, u32 total_size) |
614 | * load_microcode_amd() to save equivalent cpu table and microcode patches in |
615 | * kernel heap memory. |
616 | */ |
617 | -static void apply_ucode_in_initrd(void *ucode, size_t size) |
618 | +static void apply_ucode_in_initrd(void *ucode, size_t size, bool save_patch) |
619 | { |
620 | struct equiv_cpu_entry *eq; |
621 | size_t *cont_sz; |
622 | u32 *header; |
623 | u8 *data, **cont; |
624 | + u8 (*patch)[PATCH_MAX_SIZE]; |
625 | u16 eq_id = 0; |
626 | int offset, left; |
627 | u32 rev, eax, ebx, ecx, edx; |
628 | @@ -123,10 +124,12 @@ static void apply_ucode_in_initrd(void *ucode, size_t size) |
629 | new_rev = (u32 *)__pa_nodebug(&ucode_new_rev); |
630 | cont_sz = (size_t *)__pa_nodebug(&container_size); |
631 | cont = (u8 **)__pa_nodebug(&container); |
632 | + patch = (u8 (*)[PATCH_MAX_SIZE])__pa_nodebug(&amd_ucode_patch); |
633 | #else |
634 | new_rev = &ucode_new_rev; |
635 | cont_sz = &container_size; |
636 | cont = &container; |
637 | + patch = &amd_ucode_patch; |
638 | #endif |
639 | |
640 | data = ucode; |
641 | @@ -213,9 +216,9 @@ static void apply_ucode_in_initrd(void *ucode, size_t size) |
642 | rev = mc->hdr.patch_id; |
643 | *new_rev = rev; |
644 | |
645 | - /* save ucode patch */ |
646 | - memcpy(amd_ucode_patch, mc, |
647 | - min_t(u32, header[1], PATCH_MAX_SIZE)); |
648 | + if (save_patch) |
649 | + memcpy(patch, mc, |
650 | + min_t(u32, header[1], PATCH_MAX_SIZE)); |
651 | } |
652 | } |
653 | |
654 | @@ -246,7 +249,7 @@ void __init load_ucode_amd_bsp(void) |
655 | *data = cp.data; |
656 | *size = cp.size; |
657 | |
658 | - apply_ucode_in_initrd(cp.data, cp.size); |
659 | + apply_ucode_in_initrd(cp.data, cp.size, true); |
660 | } |
661 | |
662 | #ifdef CONFIG_X86_32 |
663 | @@ -263,7 +266,7 @@ void load_ucode_amd_ap(void) |
664 | size_t *usize; |
665 | void **ucode; |
666 | |
667 | - mc = (struct microcode_amd *)__pa(amd_ucode_patch); |
668 | + mc = (struct microcode_amd *)__pa_nodebug(amd_ucode_patch); |
669 | if (mc->hdr.patch_id && mc->hdr.processor_rev_id) { |
670 | __apply_microcode_amd(mc); |
671 | return; |
672 | @@ -275,7 +278,7 @@ void load_ucode_amd_ap(void) |
673 | if (!*ucode || !*usize) |
674 | return; |
675 | |
676 | - apply_ucode_in_initrd(*ucode, *usize); |
677 | + apply_ucode_in_initrd(*ucode, *usize, false); |
678 | } |
679 | |
680 | static void __init collect_cpu_sig_on_bsp(void *arg) |
681 | @@ -339,7 +342,7 @@ void load_ucode_amd_ap(void) |
682 | * AP has a different equivalence ID than BSP, looks like |
683 | * mixed-steppings silicon so go through the ucode blob anew. |
684 | */ |
685 | - apply_ucode_in_initrd(ucode_cpio.data, ucode_cpio.size); |
686 | + apply_ucode_in_initrd(ucode_cpio.data, ucode_cpio.size, false); |
687 | } |
688 | } |
689 | #endif |
690 | @@ -347,7 +350,9 @@ void load_ucode_amd_ap(void) |
691 | int __init save_microcode_in_initrd_amd(void) |
692 | { |
693 | unsigned long cont; |
694 | + int retval = 0; |
695 | enum ucode_state ret; |
696 | + u8 *cont_va; |
697 | u32 eax; |
698 | |
699 | if (!container) |
700 | @@ -355,13 +360,15 @@ int __init save_microcode_in_initrd_amd(void) |
701 | |
702 | #ifdef CONFIG_X86_32 |
703 | get_bsp_sig(); |
704 | - cont = (unsigned long)container; |
705 | + cont = (unsigned long)container; |
706 | + cont_va = __va(container); |
707 | #else |
708 | /* |
709 | * We need the physical address of the container for both bitness since |
710 | * boot_params.hdr.ramdisk_image is a physical address. |
711 | */ |
712 | - cont = __pa(container); |
713 | + cont = __pa(container); |
714 | + cont_va = container; |
715 | #endif |
716 | |
717 | /* |
718 | @@ -372,6 +379,8 @@ int __init save_microcode_in_initrd_amd(void) |
719 | if (relocated_ramdisk) |
720 | container = (u8 *)(__va(relocated_ramdisk) + |
721 | (cont - boot_params.hdr.ramdisk_image)); |
722 | + else |
723 | + container = cont_va; |
724 | |
725 | if (ucode_new_rev) |
726 | pr_info("microcode: updated early to new patch_level=0x%08x\n", |
727 | @@ -382,7 +391,7 @@ int __init save_microcode_in_initrd_amd(void) |
728 | |
729 | ret = load_microcode_amd(eax, container, container_size); |
730 | if (ret != UCODE_OK) |
731 | - return -EINVAL; |
732 | + retval = -EINVAL; |
733 | |
734 | /* |
735 | * This will be freed any msec now, stash patches for the current |
736 | @@ -391,5 +400,5 @@ int __init save_microcode_in_initrd_amd(void) |
737 | container = NULL; |
738 | container_size = 0; |
739 | |
740 | - return 0; |
741 | + return retval; |
742 | } |
743 | diff --git a/arch/x86/kernel/cpu/microcode/core_early.c b/arch/x86/kernel/cpu/microcode/core_early.c |
744 | index 5f28a64e71ea..2c017f242a78 100644 |
745 | --- a/arch/x86/kernel/cpu/microcode/core_early.c |
746 | +++ b/arch/x86/kernel/cpu/microcode/core_early.c |
747 | @@ -124,7 +124,7 @@ void __init load_ucode_bsp(void) |
748 | static bool check_loader_disabled_ap(void) |
749 | { |
750 | #ifdef CONFIG_X86_32 |
751 | - return __pa_nodebug(dis_ucode_ldr); |
752 | + return *((bool *)__pa_nodebug(&dis_ucode_ldr)); |
753 | #else |
754 | return dis_ucode_ldr; |
755 | #endif |
756 | diff --git a/arch/x86/kernel/ptrace.c b/arch/x86/kernel/ptrace.c |
757 | index 678c0ada3b3c..b1a5dfa24789 100644 |
758 | --- a/arch/x86/kernel/ptrace.c |
759 | +++ b/arch/x86/kernel/ptrace.c |
760 | @@ -1441,15 +1441,6 @@ void send_sigtrap(struct task_struct *tsk, struct pt_regs *regs, |
761 | force_sig_info(SIGTRAP, &info, tsk); |
762 | } |
763 | |
764 | - |
765 | -#ifdef CONFIG_X86_32 |
766 | -# define IS_IA32 1 |
767 | -#elif defined CONFIG_IA32_EMULATION |
768 | -# define IS_IA32 is_compat_task() |
769 | -#else |
770 | -# define IS_IA32 0 |
771 | -#endif |
772 | - |
773 | /* |
774 | * We must return the syscall number to actually look up in the table. |
775 | * This can be -1L to skip running any syscall at all. |
776 | @@ -1487,7 +1478,7 @@ long syscall_trace_enter(struct pt_regs *regs) |
777 | if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT))) |
778 | trace_sys_enter(regs, regs->orig_ax); |
779 | |
780 | - if (IS_IA32) |
781 | + if (is_ia32_task()) |
782 | audit_syscall_entry(AUDIT_ARCH_I386, |
783 | regs->orig_ax, |
784 | regs->bx, regs->cx, |
785 | diff --git a/arch/x86/kvm/emulate.c b/arch/x86/kvm/emulate.c |
786 | index 77c77fe84f13..9254069f0d08 100644 |
787 | --- a/arch/x86/kvm/emulate.c |
788 | +++ b/arch/x86/kvm/emulate.c |
789 | @@ -4272,6 +4272,7 @@ static int decode_operand(struct x86_emulate_ctxt *ctxt, struct operand *op, |
790 | fetch_register_operand(op); |
791 | break; |
792 | case OpCL: |
793 | + op->type = OP_IMM; |
794 | op->bytes = 1; |
795 | op->val = reg_read(ctxt, VCPU_REGS_RCX) & 0xff; |
796 | break; |
797 | @@ -4279,6 +4280,7 @@ static int decode_operand(struct x86_emulate_ctxt *ctxt, struct operand *op, |
798 | rc = decode_imm(ctxt, op, 1, true); |
799 | break; |
800 | case OpOne: |
801 | + op->type = OP_IMM; |
802 | op->bytes = 1; |
803 | op->val = 1; |
804 | break; |
805 | @@ -4337,21 +4339,27 @@ static int decode_operand(struct x86_emulate_ctxt *ctxt, struct operand *op, |
806 | ctxt->memop.bytes = ctxt->op_bytes + 2; |
807 | goto mem_common; |
808 | case OpES: |
809 | + op->type = OP_IMM; |
810 | op->val = VCPU_SREG_ES; |
811 | break; |
812 | case OpCS: |
813 | + op->type = OP_IMM; |
814 | op->val = VCPU_SREG_CS; |
815 | break; |
816 | case OpSS: |
817 | + op->type = OP_IMM; |
818 | op->val = VCPU_SREG_SS; |
819 | break; |
820 | case OpDS: |
821 | + op->type = OP_IMM; |
822 | op->val = VCPU_SREG_DS; |
823 | break; |
824 | case OpFS: |
825 | + op->type = OP_IMM; |
826 | op->val = VCPU_SREG_FS; |
827 | break; |
828 | case OpGS: |
829 | + op->type = OP_IMM; |
830 | op->val = VCPU_SREG_GS; |
831 | break; |
832 | case OpImplicit: |
833 | diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c |
834 | index 9d292e8372d6..d6aeccf116fa 100644 |
835 | --- a/arch/x86/kvm/x86.c |
836 | +++ b/arch/x86/kvm/x86.c |
837 | @@ -5002,7 +5002,7 @@ static int handle_emulation_failure(struct kvm_vcpu *vcpu) |
838 | |
839 | ++vcpu->stat.insn_emulation_fail; |
840 | trace_kvm_emulate_insn_failed(vcpu); |
841 | - if (!is_guest_mode(vcpu)) { |
842 | + if (!is_guest_mode(vcpu) && kvm_x86_ops->get_cpl(vcpu) == 0) { |
843 | vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR; |
844 | vcpu->run->internal.suberror = KVM_INTERNAL_ERROR_EMULATION; |
845 | vcpu->run->internal.ndata = 0; |
846 | diff --git a/arch/x86/tools/calc_run_size.pl b/arch/x86/tools/calc_run_size.pl |
847 | new file mode 100644 |
848 | index 000000000000..0b0b124d3ece |
849 | --- /dev/null |
850 | +++ b/arch/x86/tools/calc_run_size.pl |
851 | @@ -0,0 +1,30 @@ |
852 | +#!/usr/bin/perl |
853 | +# |
854 | +# Calculate the amount of space needed to run the kernel, including room for |
855 | +# the .bss and .brk sections. |
856 | +# |
857 | +# Usage: |
858 | +# objdump -h a.out | perl calc_run_size.pl |
859 | +use strict; |
860 | + |
861 | +my $mem_size = 0; |
862 | +my $file_offset = 0; |
863 | + |
864 | +my $sections=" *[0-9]+ \.(?:bss|brk) +"; |
865 | +while (<>) { |
866 | + if (/^$sections([0-9a-f]+) +(?:[0-9a-f]+ +){2}([0-9a-f]+)/) { |
867 | + my $size = hex($1); |
868 | + my $offset = hex($2); |
869 | + $mem_size += $size; |
870 | + if ($file_offset == 0) { |
871 | + $file_offset = $offset; |
872 | + } elsif ($file_offset != $offset) { |
873 | + die ".bss and .brk lack common file offset\n"; |
874 | + } |
875 | + } |
876 | +} |
877 | + |
878 | +if ($file_offset == 0) { |
879 | + die "Never found .bss or .brk file offset\n"; |
880 | +} |
881 | +printf("%d\n", $mem_size + $file_offset); |
882 | diff --git a/arch/xtensa/include/uapi/asm/unistd.h b/arch/xtensa/include/uapi/asm/unistd.h |
883 | index 8883fc877c5c..b1744a3474a6 100644 |
884 | --- a/arch/xtensa/include/uapi/asm/unistd.h |
885 | +++ b/arch/xtensa/include/uapi/asm/unistd.h |
886 | @@ -384,7 +384,8 @@ __SYSCALL(174, sys_chroot, 1) |
887 | #define __NR_pivot_root 175 |
888 | __SYSCALL(175, sys_pivot_root, 2) |
889 | #define __NR_umount 176 |
890 | -__SYSCALL(176, sys_umount, 2) |
891 | +__SYSCALL(176, sys_oldumount, 1) |
892 | +#define __ARCH_WANT_SYS_OLDUMOUNT |
893 | #define __NR_swapoff 177 |
894 | __SYSCALL(177, sys_swapoff, 1) |
895 | #define __NR_sync 178 |
896 | diff --git a/block/ioprio.c b/block/ioprio.c |
897 | index e50170ca7c33..31666c92b46a 100644 |
898 | --- a/block/ioprio.c |
899 | +++ b/block/ioprio.c |
900 | @@ -157,14 +157,16 @@ out: |
901 | |
902 | int ioprio_best(unsigned short aprio, unsigned short bprio) |
903 | { |
904 | - unsigned short aclass = IOPRIO_PRIO_CLASS(aprio); |
905 | - unsigned short bclass = IOPRIO_PRIO_CLASS(bprio); |
906 | + unsigned short aclass; |
907 | + unsigned short bclass; |
908 | |
909 | - if (aclass == IOPRIO_CLASS_NONE) |
910 | - aclass = IOPRIO_CLASS_BE; |
911 | - if (bclass == IOPRIO_CLASS_NONE) |
912 | - bclass = IOPRIO_CLASS_BE; |
913 | + if (!ioprio_valid(aprio)) |
914 | + aprio = IOPRIO_PRIO_VALUE(IOPRIO_CLASS_BE, IOPRIO_NORM); |
915 | + if (!ioprio_valid(bprio)) |
916 | + bprio = IOPRIO_PRIO_VALUE(IOPRIO_CLASS_BE, IOPRIO_NORM); |
917 | |
918 | + aclass = IOPRIO_PRIO_CLASS(aprio); |
919 | + bclass = IOPRIO_PRIO_CLASS(bprio); |
920 | if (aclass == bclass) |
921 | return min(aprio, bprio); |
922 | if (aclass > bclass) |
923 | diff --git a/drivers/ata/ahci.c b/drivers/ata/ahci.c |
924 | index a0cc0edafc78..597b15e7f6e5 100644 |
925 | --- a/drivers/ata/ahci.c |
926 | +++ b/drivers/ata/ahci.c |
927 | @@ -60,6 +60,7 @@ enum board_ids { |
928 | /* board IDs by feature in alphabetical order */ |
929 | board_ahci, |
930 | board_ahci_ign_iferr, |
931 | + board_ahci_nomsi, |
932 | board_ahci_noncq, |
933 | board_ahci_nosntf, |
934 | board_ahci_yes_fbs, |
935 | @@ -121,6 +122,13 @@ static const struct ata_port_info ahci_port_info[] = { |
936 | .udma_mask = ATA_UDMA6, |
937 | .port_ops = &ahci_ops, |
938 | }, |
939 | + [board_ahci_nomsi] = { |
940 | + AHCI_HFLAGS (AHCI_HFLAG_NO_MSI), |
941 | + .flags = AHCI_FLAG_COMMON, |
942 | + .pio_mask = ATA_PIO4, |
943 | + .udma_mask = ATA_UDMA6, |
944 | + .port_ops = &ahci_ops, |
945 | + }, |
946 | [board_ahci_noncq] = { |
947 | AHCI_HFLAGS (AHCI_HFLAG_NO_NCQ), |
948 | .flags = AHCI_FLAG_COMMON, |
949 | @@ -313,6 +321,11 @@ static const struct pci_device_id ahci_pci_tbl[] = { |
950 | { PCI_VDEVICE(INTEL, 0x8c87), board_ahci }, /* 9 Series RAID */ |
951 | { PCI_VDEVICE(INTEL, 0x8c8e), board_ahci }, /* 9 Series RAID */ |
952 | { PCI_VDEVICE(INTEL, 0x8c8f), board_ahci }, /* 9 Series RAID */ |
953 | + { PCI_VDEVICE(INTEL, 0xa103), board_ahci }, /* Sunrise Point-H AHCI */ |
954 | + { PCI_VDEVICE(INTEL, 0xa103), board_ahci }, /* Sunrise Point-H RAID */ |
955 | + { PCI_VDEVICE(INTEL, 0xa105), board_ahci }, /* Sunrise Point-H RAID */ |
956 | + { PCI_VDEVICE(INTEL, 0xa107), board_ahci }, /* Sunrise Point-H RAID */ |
957 | + { PCI_VDEVICE(INTEL, 0xa10f), board_ahci }, /* Sunrise Point-H RAID */ |
958 | |
959 | /* JMicron 360/1/3/5/6, match class to avoid IDE function */ |
960 | { PCI_VENDOR_ID_JMICRON, PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID, |
961 | @@ -475,10 +488,10 @@ static const struct pci_device_id ahci_pci_tbl[] = { |
962 | { PCI_VDEVICE(ASMEDIA, 0x0612), board_ahci }, /* ASM1062 */ |
963 | |
964 | /* |
965 | - * Samsung SSDs found on some macbooks. NCQ times out. |
966 | - * https://bugzilla.kernel.org/show_bug.cgi?id=60731 |
967 | + * Samsung SSDs found on some macbooks. NCQ times out if MSI is |
968 | + * enabled. https://bugzilla.kernel.org/show_bug.cgi?id=60731 |
969 | */ |
970 | - { PCI_VDEVICE(SAMSUNG, 0x1600), board_ahci_noncq }, |
971 | + { PCI_VDEVICE(SAMSUNG, 0x1600), board_ahci_nomsi }, |
972 | |
973 | /* Enmotus */ |
974 | { PCI_DEVICE(0x1c44, 0x8000), board_ahci }, |
975 | @@ -514,12 +527,9 @@ MODULE_PARM_DESC(marvell_enable, "Marvell SATA via AHCI (1 = enabled)"); |
976 | static void ahci_pci_save_initial_config(struct pci_dev *pdev, |
977 | struct ahci_host_priv *hpriv) |
978 | { |
979 | - unsigned int force_port_map = 0; |
980 | - unsigned int mask_port_map = 0; |
981 | - |
982 | if (pdev->vendor == PCI_VENDOR_ID_JMICRON && pdev->device == 0x2361) { |
983 | dev_info(&pdev->dev, "JMB361 has only one port\n"); |
984 | - force_port_map = 1; |
985 | + hpriv->force_port_map = 1; |
986 | } |
987 | |
988 | /* |
989 | @@ -529,9 +539,9 @@ static void ahci_pci_save_initial_config(struct pci_dev *pdev, |
990 | */ |
991 | if (hpriv->flags & AHCI_HFLAG_MV_PATA) { |
992 | if (pdev->device == 0x6121) |
993 | - mask_port_map = 0x3; |
994 | + hpriv->mask_port_map = 0x3; |
995 | else |
996 | - mask_port_map = 0xf; |
997 | + hpriv->mask_port_map = 0xf; |
998 | dev_info(&pdev->dev, |
999 | "Disabling your PATA port. Use the boot option 'ahci.marvell_enable=0' to avoid this.\n"); |
1000 | } |
1001 | diff --git a/drivers/ata/sata_rcar.c b/drivers/ata/sata_rcar.c |
1002 | index 61eb6d77dac7..8732e42db3a9 100644 |
1003 | --- a/drivers/ata/sata_rcar.c |
1004 | +++ b/drivers/ata/sata_rcar.c |
1005 | @@ -146,6 +146,7 @@ |
1006 | enum sata_rcar_type { |
1007 | RCAR_GEN1_SATA, |
1008 | RCAR_GEN2_SATA, |
1009 | + RCAR_R8A7790_ES1_SATA, |
1010 | }; |
1011 | |
1012 | struct sata_rcar_priv { |
1013 | @@ -763,6 +764,9 @@ static void sata_rcar_setup_port(struct ata_host *host) |
1014 | ap->udma_mask = ATA_UDMA6; |
1015 | ap->flags |= ATA_FLAG_SATA; |
1016 | |
1017 | + if (priv->type == RCAR_R8A7790_ES1_SATA) |
1018 | + ap->flags |= ATA_FLAG_NO_DIPM; |
1019 | + |
1020 | ioaddr->cmd_addr = base + SDATA_REG; |
1021 | ioaddr->ctl_addr = base + SSDEVCON_REG; |
1022 | ioaddr->scr_addr = base + SCRSSTS_REG; |
1023 | @@ -792,6 +796,7 @@ static void sata_rcar_init_controller(struct ata_host *host) |
1024 | sata_rcar_gen1_phy_init(priv); |
1025 | break; |
1026 | case RCAR_GEN2_SATA: |
1027 | + case RCAR_R8A7790_ES1_SATA: |
1028 | sata_rcar_gen2_phy_init(priv); |
1029 | break; |
1030 | default: |
1031 | @@ -838,6 +843,10 @@ static struct of_device_id sata_rcar_match[] = { |
1032 | .data = (void *)RCAR_GEN2_SATA |
1033 | }, |
1034 | { |
1035 | + .compatible = "renesas,sata-r8a7790-es1", |
1036 | + .data = (void *)RCAR_R8A7790_ES1_SATA |
1037 | + }, |
1038 | + { |
1039 | .compatible = "renesas,sata-r8a7791", |
1040 | .data = (void *)RCAR_GEN2_SATA |
1041 | }, |
1042 | @@ -849,6 +858,7 @@ static const struct platform_device_id sata_rcar_id_table[] = { |
1043 | { "sata_rcar", RCAR_GEN1_SATA }, /* Deprecated by "sata-r8a7779" */ |
1044 | { "sata-r8a7779", RCAR_GEN1_SATA }, |
1045 | { "sata-r8a7790", RCAR_GEN2_SATA }, |
1046 | + { "sata-r8a7790-es1", RCAR_R8A7790_ES1_SATA }, |
1047 | { "sata-r8a7791", RCAR_GEN2_SATA }, |
1048 | { }, |
1049 | }; |
1050 | diff --git a/drivers/block/sunvdc.c b/drivers/block/sunvdc.c |
1051 | index 5814deb6963d..0ebadf93b6c5 100644 |
1052 | --- a/drivers/block/sunvdc.c |
1053 | +++ b/drivers/block/sunvdc.c |
1054 | @@ -9,6 +9,7 @@ |
1055 | #include <linux/blkdev.h> |
1056 | #include <linux/hdreg.h> |
1057 | #include <linux/genhd.h> |
1058 | +#include <linux/cdrom.h> |
1059 | #include <linux/slab.h> |
1060 | #include <linux/spinlock.h> |
1061 | #include <linux/completion.h> |
1062 | @@ -22,8 +23,8 @@ |
1063 | |
1064 | #define DRV_MODULE_NAME "sunvdc" |
1065 | #define PFX DRV_MODULE_NAME ": " |
1066 | -#define DRV_MODULE_VERSION "1.0" |
1067 | -#define DRV_MODULE_RELDATE "June 25, 2007" |
1068 | +#define DRV_MODULE_VERSION "1.1" |
1069 | +#define DRV_MODULE_RELDATE "February 13, 2013" |
1070 | |
1071 | static char version[] = |
1072 | DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n"; |
1073 | @@ -32,7 +33,7 @@ MODULE_DESCRIPTION("Sun LDOM virtual disk client driver"); |
1074 | MODULE_LICENSE("GPL"); |
1075 | MODULE_VERSION(DRV_MODULE_VERSION); |
1076 | |
1077 | -#define VDC_TX_RING_SIZE 256 |
1078 | +#define VDC_TX_RING_SIZE 512 |
1079 | |
1080 | #define WAITING_FOR_LINK_UP 0x01 |
1081 | #define WAITING_FOR_TX_SPACE 0x02 |
1082 | @@ -65,11 +66,9 @@ struct vdc_port { |
1083 | u64 operations; |
1084 | u32 vdisk_size; |
1085 | u8 vdisk_type; |
1086 | + u8 vdisk_mtype; |
1087 | |
1088 | char disk_name[32]; |
1089 | - |
1090 | - struct vio_disk_geom geom; |
1091 | - struct vio_disk_vtoc label; |
1092 | }; |
1093 | |
1094 | static inline struct vdc_port *to_vdc_port(struct vio_driver_state *vio) |
1095 | @@ -79,9 +78,16 @@ static inline struct vdc_port *to_vdc_port(struct vio_driver_state *vio) |
1096 | |
1097 | /* Ordered from largest major to lowest */ |
1098 | static struct vio_version vdc_versions[] = { |
1099 | + { .major = 1, .minor = 1 }, |
1100 | { .major = 1, .minor = 0 }, |
1101 | }; |
1102 | |
1103 | +static inline int vdc_version_supported(struct vdc_port *port, |
1104 | + u16 major, u16 minor) |
1105 | +{ |
1106 | + return port->vio.ver.major == major && port->vio.ver.minor >= minor; |
1107 | +} |
1108 | + |
1109 | #define VDCBLK_NAME "vdisk" |
1110 | static int vdc_major; |
1111 | #define PARTITION_SHIFT 3 |
1112 | @@ -94,18 +100,54 @@ static inline u32 vdc_tx_dring_avail(struct vio_dring_state *dr) |
1113 | static int vdc_getgeo(struct block_device *bdev, struct hd_geometry *geo) |
1114 | { |
1115 | struct gendisk *disk = bdev->bd_disk; |
1116 | - struct vdc_port *port = disk->private_data; |
1117 | + sector_t nsect = get_capacity(disk); |
1118 | + sector_t cylinders = nsect; |
1119 | |
1120 | - geo->heads = (u8) port->geom.num_hd; |
1121 | - geo->sectors = (u8) port->geom.num_sec; |
1122 | - geo->cylinders = port->geom.num_cyl; |
1123 | + geo->heads = 0xff; |
1124 | + geo->sectors = 0x3f; |
1125 | + sector_div(cylinders, geo->heads * geo->sectors); |
1126 | + geo->cylinders = cylinders; |
1127 | + if ((sector_t)(geo->cylinders + 1) * geo->heads * geo->sectors < nsect) |
1128 | + geo->cylinders = 0xffff; |
1129 | |
1130 | return 0; |
1131 | } |
1132 | |
1133 | +/* Add ioctl/CDROM_GET_CAPABILITY to support cdrom_id in udev |
1134 | + * when vdisk_mtype is VD_MEDIA_TYPE_CD or VD_MEDIA_TYPE_DVD. |
1135 | + * Needed to be able to install inside an ldom from an iso image. |
1136 | + */ |
1137 | +static int vdc_ioctl(struct block_device *bdev, fmode_t mode, |
1138 | + unsigned command, unsigned long argument) |
1139 | +{ |
1140 | + int i; |
1141 | + struct gendisk *disk; |
1142 | + |
1143 | + switch (command) { |
1144 | + case CDROMMULTISESSION: |
1145 | + pr_debug(PFX "Multisession CDs not supported\n"); |
1146 | + for (i = 0; i < sizeof(struct cdrom_multisession); i++) |
1147 | + if (put_user(0, (char __user *)(argument + i))) |
1148 | + return -EFAULT; |
1149 | + return 0; |
1150 | + |
1151 | + case CDROM_GET_CAPABILITY: |
1152 | + disk = bdev->bd_disk; |
1153 | + |
1154 | + if (bdev->bd_disk && (disk->flags & GENHD_FL_CD)) |
1155 | + return 0; |
1156 | + return -EINVAL; |
1157 | + |
1158 | + default: |
1159 | + pr_debug(PFX "ioctl %08x not supported\n", command); |
1160 | + return -EINVAL; |
1161 | + } |
1162 | +} |
1163 | + |
1164 | static const struct block_device_operations vdc_fops = { |
1165 | .owner = THIS_MODULE, |
1166 | .getgeo = vdc_getgeo, |
1167 | + .ioctl = vdc_ioctl, |
1168 | }; |
1169 | |
1170 | static void vdc_finish(struct vio_driver_state *vio, int err, int waiting_for) |
1171 | @@ -165,9 +207,9 @@ static int vdc_handle_attr(struct vio_driver_state *vio, void *arg) |
1172 | struct vio_disk_attr_info *pkt = arg; |
1173 | |
1174 | viodbg(HS, "GOT ATTR stype[0x%x] ops[%llx] disk_size[%llu] disk_type[%x] " |
1175 | - "xfer_mode[0x%x] blksz[%u] max_xfer[%llu]\n", |
1176 | + "mtype[0x%x] xfer_mode[0x%x] blksz[%u] max_xfer[%llu]\n", |
1177 | pkt->tag.stype, pkt->operations, |
1178 | - pkt->vdisk_size, pkt->vdisk_type, |
1179 | + pkt->vdisk_size, pkt->vdisk_type, pkt->vdisk_mtype, |
1180 | pkt->xfer_mode, pkt->vdisk_block_size, |
1181 | pkt->max_xfer_size); |
1182 | |
1183 | @@ -192,8 +234,11 @@ static int vdc_handle_attr(struct vio_driver_state *vio, void *arg) |
1184 | } |
1185 | |
1186 | port->operations = pkt->operations; |
1187 | - port->vdisk_size = pkt->vdisk_size; |
1188 | port->vdisk_type = pkt->vdisk_type; |
1189 | + if (vdc_version_supported(port, 1, 1)) { |
1190 | + port->vdisk_size = pkt->vdisk_size; |
1191 | + port->vdisk_mtype = pkt->vdisk_mtype; |
1192 | + } |
1193 | if (pkt->max_xfer_size < port->max_xfer_size) |
1194 | port->max_xfer_size = pkt->max_xfer_size; |
1195 | port->vdisk_block_size = pkt->vdisk_block_size; |
1196 | @@ -236,7 +281,9 @@ static void vdc_end_one(struct vdc_port *port, struct vio_dring_state *dr, |
1197 | |
1198 | __blk_end_request(req, (desc->status ? -EIO : 0), desc->size); |
1199 | |
1200 | - if (blk_queue_stopped(port->disk->queue)) |
1201 | + /* restart blk queue when ring is half emptied */ |
1202 | + if (blk_queue_stopped(port->disk->queue) && |
1203 | + vdc_tx_dring_avail(dr) * 100 / VDC_TX_RING_SIZE >= 50) |
1204 | blk_start_queue(port->disk->queue); |
1205 | } |
1206 | |
1207 | @@ -388,12 +435,6 @@ static int __send_request(struct request *req) |
1208 | for (i = 0; i < nsg; i++) |
1209 | len += sg[i].length; |
1210 | |
1211 | - if (unlikely(vdc_tx_dring_avail(dr) < 1)) { |
1212 | - blk_stop_queue(port->disk->queue); |
1213 | - err = -ENOMEM; |
1214 | - goto out; |
1215 | - } |
1216 | - |
1217 | desc = vio_dring_cur(dr); |
1218 | |
1219 | err = ldc_map_sg(port->vio.lp, sg, nsg, |
1220 | @@ -433,21 +474,32 @@ static int __send_request(struct request *req) |
1221 | port->req_id++; |
1222 | dr->prod = (dr->prod + 1) & (VDC_TX_RING_SIZE - 1); |
1223 | } |
1224 | -out: |
1225 | |
1226 | return err; |
1227 | } |
1228 | |
1229 | -static void do_vdc_request(struct request_queue *q) |
1230 | +static void do_vdc_request(struct request_queue *rq) |
1231 | { |
1232 | - while (1) { |
1233 | - struct request *req = blk_fetch_request(q); |
1234 | + struct request *req; |
1235 | |
1236 | - if (!req) |
1237 | - break; |
1238 | + while ((req = blk_peek_request(rq)) != NULL) { |
1239 | + struct vdc_port *port; |
1240 | + struct vio_dring_state *dr; |
1241 | |
1242 | - if (__send_request(req) < 0) |
1243 | - __blk_end_request_all(req, -EIO); |
1244 | + port = req->rq_disk->private_data; |
1245 | + dr = &port->vio.drings[VIO_DRIVER_TX_RING]; |
1246 | + if (unlikely(vdc_tx_dring_avail(dr) < 1)) |
1247 | + goto wait; |
1248 | + |
1249 | + blk_start_request(req); |
1250 | + |
1251 | + if (__send_request(req) < 0) { |
1252 | + blk_requeue_request(rq, req); |
1253 | +wait: |
1254 | + /* Avoid pointless unplugs. */ |
1255 | + blk_stop_queue(rq); |
1256 | + break; |
1257 | + } |
1258 | } |
1259 | } |
1260 | |
1261 | @@ -656,25 +708,27 @@ static int probe_disk(struct vdc_port *port) |
1262 | if (comp.err) |
1263 | return comp.err; |
1264 | |
1265 | - err = generic_request(port, VD_OP_GET_VTOC, |
1266 | - &port->label, sizeof(port->label)); |
1267 | - if (err < 0) { |
1268 | - printk(KERN_ERR PFX "VD_OP_GET_VTOC returns error %d\n", err); |
1269 | - return err; |
1270 | - } |
1271 | - |
1272 | - err = generic_request(port, VD_OP_GET_DISKGEOM, |
1273 | - &port->geom, sizeof(port->geom)); |
1274 | - if (err < 0) { |
1275 | - printk(KERN_ERR PFX "VD_OP_GET_DISKGEOM returns " |
1276 | - "error %d\n", err); |
1277 | - return err; |
1278 | + if (vdc_version_supported(port, 1, 1)) { |
1279 | + /* vdisk_size should be set during the handshake, if it wasn't |
1280 | + * then the underlying disk is reserved by another system |
1281 | + */ |
1282 | + if (port->vdisk_size == -1) |
1283 | + return -ENODEV; |
1284 | + } else { |
1285 | + struct vio_disk_geom geom; |
1286 | + |
1287 | + err = generic_request(port, VD_OP_GET_DISKGEOM, |
1288 | + &geom, sizeof(geom)); |
1289 | + if (err < 0) { |
1290 | + printk(KERN_ERR PFX "VD_OP_GET_DISKGEOM returns " |
1291 | + "error %d\n", err); |
1292 | + return err; |
1293 | + } |
1294 | + port->vdisk_size = ((u64)geom.num_cyl * |
1295 | + (u64)geom.num_hd * |
1296 | + (u64)geom.num_sec); |
1297 | } |
1298 | |
1299 | - port->vdisk_size = ((u64)port->geom.num_cyl * |
1300 | - (u64)port->geom.num_hd * |
1301 | - (u64)port->geom.num_sec); |
1302 | - |
1303 | q = blk_init_queue(do_vdc_request, &port->vio.lock); |
1304 | if (!q) { |
1305 | printk(KERN_ERR PFX "%s: Could not allocate queue.\n", |
1306 | @@ -691,6 +745,10 @@ static int probe_disk(struct vdc_port *port) |
1307 | |
1308 | port->disk = g; |
1309 | |
1310 | + /* Each segment in a request is up to an aligned page in size. */ |
1311 | + blk_queue_segment_boundary(q, PAGE_SIZE - 1); |
1312 | + blk_queue_max_segment_size(q, PAGE_SIZE); |
1313 | + |
1314 | blk_queue_max_segments(q, port->ring_cookies); |
1315 | blk_queue_max_hw_sectors(q, port->max_xfer_size); |
1316 | g->major = vdc_major; |
1317 | @@ -704,9 +762,32 @@ static int probe_disk(struct vdc_port *port) |
1318 | |
1319 | set_capacity(g, port->vdisk_size); |
1320 | |
1321 | - printk(KERN_INFO PFX "%s: %u sectors (%u MB)\n", |
1322 | + if (vdc_version_supported(port, 1, 1)) { |
1323 | + switch (port->vdisk_mtype) { |
1324 | + case VD_MEDIA_TYPE_CD: |
1325 | + pr_info(PFX "Virtual CDROM %s\n", port->disk_name); |
1326 | + g->flags |= GENHD_FL_CD; |
1327 | + g->flags |= GENHD_FL_REMOVABLE; |
1328 | + set_disk_ro(g, 1); |
1329 | + break; |
1330 | + |
1331 | + case VD_MEDIA_TYPE_DVD: |
1332 | + pr_info(PFX "Virtual DVD %s\n", port->disk_name); |
1333 | + g->flags |= GENHD_FL_CD; |
1334 | + g->flags |= GENHD_FL_REMOVABLE; |
1335 | + set_disk_ro(g, 1); |
1336 | + break; |
1337 | + |
1338 | + case VD_MEDIA_TYPE_FIXED: |
1339 | + pr_info(PFX "Virtual Hard disk %s\n", port->disk_name); |
1340 | + break; |
1341 | + } |
1342 | + } |
1343 | + |
1344 | + pr_info(PFX "%s: %u sectors (%u MB) protocol %d.%d\n", |
1345 | g->disk_name, |
1346 | - port->vdisk_size, (port->vdisk_size >> (20 - 9))); |
1347 | + port->vdisk_size, (port->vdisk_size >> (20 - 9)), |
1348 | + port->vio.ver.major, port->vio.ver.minor); |
1349 | |
1350 | add_disk(g); |
1351 | |
1352 | @@ -765,6 +846,7 @@ static int vdc_port_probe(struct vio_dev *vdev, const struct vio_device_id *id) |
1353 | else |
1354 | snprintf(port->disk_name, sizeof(port->disk_name), |
1355 | VDCBLK_NAME "%c", 'a' + ((int)vdev->dev_no % 26)); |
1356 | + port->vdisk_size = -1; |
1357 | |
1358 | err = vio_driver_init(&port->vio, vdev, VDEV_DISK, |
1359 | vdc_versions, ARRAY_SIZE(vdc_versions), |
1360 | diff --git a/drivers/block/zram/zram_drv.c b/drivers/block/zram/zram_drv.c |
1361 | index d00831c3d731..cc1b58b82a48 100644 |
1362 | --- a/drivers/block/zram/zram_drv.c |
1363 | +++ b/drivers/block/zram/zram_drv.c |
1364 | @@ -476,7 +476,8 @@ static int zram_bvec_write(struct zram *zram, struct bio_vec *bvec, u32 index, |
1365 | } |
1366 | |
1367 | if (page_zero_filled(uncmem)) { |
1368 | - kunmap_atomic(user_mem); |
1369 | + if (user_mem) |
1370 | + kunmap_atomic(user_mem); |
1371 | /* Free memory associated with this sector now. */ |
1372 | bit_spin_lock(ZRAM_ACCESS, &meta->table[index].value); |
1373 | zram_free_page(zram, index); |
1374 | diff --git a/drivers/char/hw_random/pseries-rng.c b/drivers/char/hw_random/pseries-rng.c |
1375 | index ab7ffdec0ec3..f38f2c13e79c 100644 |
1376 | --- a/drivers/char/hw_random/pseries-rng.c |
1377 | +++ b/drivers/char/hw_random/pseries-rng.c |
1378 | @@ -25,18 +25,21 @@ |
1379 | #include <asm/vio.h> |
1380 | |
1381 | |
1382 | -static int pseries_rng_data_read(struct hwrng *rng, u32 *data) |
1383 | +static int pseries_rng_read(struct hwrng *rng, void *data, size_t max, bool wait) |
1384 | { |
1385 | + u64 buffer[PLPAR_HCALL_BUFSIZE]; |
1386 | + size_t size = max < 8 ? max : 8; |
1387 | int rc; |
1388 | |
1389 | - rc = plpar_hcall(H_RANDOM, (unsigned long *)data); |
1390 | + rc = plpar_hcall(H_RANDOM, (unsigned long *)buffer); |
1391 | if (rc != H_SUCCESS) { |
1392 | pr_err_ratelimited("H_RANDOM call failed %d\n", rc); |
1393 | return -EIO; |
1394 | } |
1395 | + memcpy(data, buffer, size); |
1396 | |
1397 | /* The hypervisor interface returns 64 bits */ |
1398 | - return 8; |
1399 | + return size; |
1400 | } |
1401 | |
1402 | /** |
1403 | @@ -55,7 +58,7 @@ static unsigned long pseries_rng_get_desired_dma(struct vio_dev *vdev) |
1404 | |
1405 | static struct hwrng pseries_rng = { |
1406 | .name = KBUILD_MODNAME, |
1407 | - .data_read = pseries_rng_data_read, |
1408 | + .read = pseries_rng_read, |
1409 | }; |
1410 | |
1411 | static int __init pseries_rng_probe(struct vio_dev *dev, |
1412 | diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c |
1413 | index c05821e8de41..07c827637b17 100644 |
1414 | --- a/drivers/cpufreq/cpufreq.c |
1415 | +++ b/drivers/cpufreq/cpufreq.c |
1416 | @@ -1022,7 +1022,8 @@ static struct cpufreq_policy *cpufreq_policy_restore(unsigned int cpu) |
1417 | |
1418 | read_unlock_irqrestore(&cpufreq_driver_lock, flags); |
1419 | |
1420 | - policy->governor = NULL; |
1421 | + if (policy) |
1422 | + policy->governor = NULL; |
1423 | |
1424 | return policy; |
1425 | } |
1426 | diff --git a/drivers/crypto/caam/caamhash.c b/drivers/crypto/caam/caamhash.c |
1427 | index e9cc753d5095..f347ab7eea95 100644 |
1428 | --- a/drivers/crypto/caam/caamhash.c |
1429 | +++ b/drivers/crypto/caam/caamhash.c |
1430 | @@ -836,8 +836,9 @@ static int ahash_update_ctx(struct ahash_request *req) |
1431 | edesc->sec4_sg + sec4_sg_src_index, |
1432 | chained); |
1433 | if (*next_buflen) { |
1434 | - sg_copy_part(next_buf, req->src, to_hash - |
1435 | - *buflen, req->nbytes); |
1436 | + scatterwalk_map_and_copy(next_buf, req->src, |
1437 | + to_hash - *buflen, |
1438 | + *next_buflen, 0); |
1439 | state->current_buf = !state->current_buf; |
1440 | } |
1441 | } else { |
1442 | @@ -878,7 +879,8 @@ static int ahash_update_ctx(struct ahash_request *req) |
1443 | kfree(edesc); |
1444 | } |
1445 | } else if (*next_buflen) { |
1446 | - sg_copy(buf + *buflen, req->src, req->nbytes); |
1447 | + scatterwalk_map_and_copy(buf + *buflen, req->src, 0, |
1448 | + req->nbytes, 0); |
1449 | *buflen = *next_buflen; |
1450 | *next_buflen = last_buflen; |
1451 | } |
1452 | @@ -1262,8 +1264,9 @@ static int ahash_update_no_ctx(struct ahash_request *req) |
1453 | src_map_to_sec4_sg(jrdev, req->src, src_nents, |
1454 | edesc->sec4_sg + 1, chained); |
1455 | if (*next_buflen) { |
1456 | - sg_copy_part(next_buf, req->src, to_hash - *buflen, |
1457 | - req->nbytes); |
1458 | + scatterwalk_map_and_copy(next_buf, req->src, |
1459 | + to_hash - *buflen, |
1460 | + *next_buflen, 0); |
1461 | state->current_buf = !state->current_buf; |
1462 | } |
1463 | |
1464 | @@ -1304,7 +1307,8 @@ static int ahash_update_no_ctx(struct ahash_request *req) |
1465 | kfree(edesc); |
1466 | } |
1467 | } else if (*next_buflen) { |
1468 | - sg_copy(buf + *buflen, req->src, req->nbytes); |
1469 | + scatterwalk_map_and_copy(buf + *buflen, req->src, 0, |
1470 | + req->nbytes, 0); |
1471 | *buflen = *next_buflen; |
1472 | *next_buflen = 0; |
1473 | } |
1474 | @@ -1476,7 +1480,8 @@ static int ahash_update_first(struct ahash_request *req) |
1475 | } |
1476 | |
1477 | if (*next_buflen) |
1478 | - sg_copy_part(next_buf, req->src, to_hash, req->nbytes); |
1479 | + scatterwalk_map_and_copy(next_buf, req->src, to_hash, |
1480 | + *next_buflen, 0); |
1481 | |
1482 | sh_len = desc_len(sh_desc); |
1483 | desc = edesc->hw_desc; |
1484 | @@ -1511,7 +1516,8 @@ static int ahash_update_first(struct ahash_request *req) |
1485 | state->update = ahash_update_no_ctx; |
1486 | state->finup = ahash_finup_no_ctx; |
1487 | state->final = ahash_final_no_ctx; |
1488 | - sg_copy(next_buf, req->src, req->nbytes); |
1489 | + scatterwalk_map_and_copy(next_buf, req->src, 0, |
1490 | + req->nbytes, 0); |
1491 | } |
1492 | #ifdef DEBUG |
1493 | print_hex_dump(KERN_ERR, "next buf@"__stringify(__LINE__)": ", |
1494 | diff --git a/drivers/crypto/caam/key_gen.c b/drivers/crypto/caam/key_gen.c |
1495 | index 871703c49d2c..e1eaf4ff9762 100644 |
1496 | --- a/drivers/crypto/caam/key_gen.c |
1497 | +++ b/drivers/crypto/caam/key_gen.c |
1498 | @@ -48,23 +48,29 @@ int gen_split_key(struct device *jrdev, u8 *key_out, int split_key_len, |
1499 | u32 *desc; |
1500 | struct split_key_result result; |
1501 | dma_addr_t dma_addr_in, dma_addr_out; |
1502 | - int ret = 0; |
1503 | + int ret = -ENOMEM; |
1504 | |
1505 | desc = kmalloc(CAAM_CMD_SZ * 6 + CAAM_PTR_SZ * 2, GFP_KERNEL | GFP_DMA); |
1506 | if (!desc) { |
1507 | dev_err(jrdev, "unable to allocate key input memory\n"); |
1508 | - return -ENOMEM; |
1509 | + return ret; |
1510 | } |
1511 | |
1512 | - init_job_desc(desc, 0); |
1513 | - |
1514 | dma_addr_in = dma_map_single(jrdev, (void *)key_in, keylen, |
1515 | DMA_TO_DEVICE); |
1516 | if (dma_mapping_error(jrdev, dma_addr_in)) { |
1517 | dev_err(jrdev, "unable to map key input memory\n"); |
1518 | - kfree(desc); |
1519 | - return -ENOMEM; |
1520 | + goto out_free; |
1521 | } |
1522 | + |
1523 | + dma_addr_out = dma_map_single(jrdev, key_out, split_key_pad_len, |
1524 | + DMA_FROM_DEVICE); |
1525 | + if (dma_mapping_error(jrdev, dma_addr_out)) { |
1526 | + dev_err(jrdev, "unable to map key output memory\n"); |
1527 | + goto out_unmap_in; |
1528 | + } |
1529 | + |
1530 | + init_job_desc(desc, 0); |
1531 | append_key(desc, dma_addr_in, keylen, CLASS_2 | KEY_DEST_CLASS_REG); |
1532 | |
1533 | /* Sets MDHA up into an HMAC-INIT */ |
1534 | @@ -81,13 +87,6 @@ int gen_split_key(struct device *jrdev, u8 *key_out, int split_key_len, |
1535 | * FIFO_STORE with the explicit split-key content store |
1536 | * (0x26 output type) |
1537 | */ |
1538 | - dma_addr_out = dma_map_single(jrdev, key_out, split_key_pad_len, |
1539 | - DMA_FROM_DEVICE); |
1540 | - if (dma_mapping_error(jrdev, dma_addr_out)) { |
1541 | - dev_err(jrdev, "unable to map key output memory\n"); |
1542 | - kfree(desc); |
1543 | - return -ENOMEM; |
1544 | - } |
1545 | append_fifo_store(desc, dma_addr_out, split_key_len, |
1546 | LDST_CLASS_2_CCB | FIFOST_TYPE_SPLIT_KEK); |
1547 | |
1548 | @@ -115,10 +114,10 @@ int gen_split_key(struct device *jrdev, u8 *key_out, int split_key_len, |
1549 | |
1550 | dma_unmap_single(jrdev, dma_addr_out, split_key_pad_len, |
1551 | DMA_FROM_DEVICE); |
1552 | +out_unmap_in: |
1553 | dma_unmap_single(jrdev, dma_addr_in, keylen, DMA_TO_DEVICE); |
1554 | - |
1555 | +out_free: |
1556 | kfree(desc); |
1557 | - |
1558 | return ret; |
1559 | } |
1560 | EXPORT_SYMBOL(gen_split_key); |
1561 | diff --git a/drivers/crypto/caam/sg_sw_sec4.h b/drivers/crypto/caam/sg_sw_sec4.h |
1562 | index b12ff85f4241..ce28a563effc 100644 |
1563 | --- a/drivers/crypto/caam/sg_sw_sec4.h |
1564 | +++ b/drivers/crypto/caam/sg_sw_sec4.h |
1565 | @@ -116,57 +116,3 @@ static int dma_unmap_sg_chained(struct device *dev, struct scatterlist *sg, |
1566 | } |
1567 | return nents; |
1568 | } |
1569 | - |
1570 | -/* Map SG page in kernel virtual address space and copy */ |
1571 | -static inline void sg_map_copy(u8 *dest, struct scatterlist *sg, |
1572 | - int len, int offset) |
1573 | -{ |
1574 | - u8 *mapped_addr; |
1575 | - |
1576 | - /* |
1577 | - * Page here can be user-space pinned using get_user_pages |
1578 | - * Same must be kmapped before use and kunmapped subsequently |
1579 | - */ |
1580 | - mapped_addr = kmap_atomic(sg_page(sg)); |
1581 | - memcpy(dest, mapped_addr + offset, len); |
1582 | - kunmap_atomic(mapped_addr); |
1583 | -} |
1584 | - |
1585 | -/* Copy from len bytes of sg to dest, starting from beginning */ |
1586 | -static inline void sg_copy(u8 *dest, struct scatterlist *sg, unsigned int len) |
1587 | -{ |
1588 | - struct scatterlist *current_sg = sg; |
1589 | - int cpy_index = 0, next_cpy_index = current_sg->length; |
1590 | - |
1591 | - while (next_cpy_index < len) { |
1592 | - sg_map_copy(dest + cpy_index, current_sg, current_sg->length, |
1593 | - current_sg->offset); |
1594 | - current_sg = scatterwalk_sg_next(current_sg); |
1595 | - cpy_index = next_cpy_index; |
1596 | - next_cpy_index += current_sg->length; |
1597 | - } |
1598 | - if (cpy_index < len) |
1599 | - sg_map_copy(dest + cpy_index, current_sg, len-cpy_index, |
1600 | - current_sg->offset); |
1601 | -} |
1602 | - |
1603 | -/* Copy sg data, from to_skip to end, to dest */ |
1604 | -static inline void sg_copy_part(u8 *dest, struct scatterlist *sg, |
1605 | - int to_skip, unsigned int end) |
1606 | -{ |
1607 | - struct scatterlist *current_sg = sg; |
1608 | - int sg_index, cpy_index, offset; |
1609 | - |
1610 | - sg_index = current_sg->length; |
1611 | - while (sg_index <= to_skip) { |
1612 | - current_sg = scatterwalk_sg_next(current_sg); |
1613 | - sg_index += current_sg->length; |
1614 | - } |
1615 | - cpy_index = sg_index - to_skip; |
1616 | - offset = current_sg->offset + current_sg->length - cpy_index; |
1617 | - sg_map_copy(dest, current_sg, cpy_index, offset); |
1618 | - if (end - sg_index) { |
1619 | - current_sg = scatterwalk_sg_next(current_sg); |
1620 | - sg_copy(dest + cpy_index, current_sg, end - sg_index); |
1621 | - } |
1622 | -} |
1623 | diff --git a/drivers/crypto/qat/qat_common/adf_accel_devices.h b/drivers/crypto/qat/qat_common/adf_accel_devices.h |
1624 | index 9282381b03ce..fe7b3f06f6e6 100644 |
1625 | --- a/drivers/crypto/qat/qat_common/adf_accel_devices.h |
1626 | +++ b/drivers/crypto/qat/qat_common/adf_accel_devices.h |
1627 | @@ -198,8 +198,7 @@ struct adf_accel_dev { |
1628 | struct dentry *debugfs_dir; |
1629 | struct list_head list; |
1630 | struct module *owner; |
1631 | - uint8_t accel_id; |
1632 | - uint8_t numa_node; |
1633 | struct adf_accel_pci accel_pci_dev; |
1634 | + uint8_t accel_id; |
1635 | } __packed; |
1636 | #endif |
1637 | diff --git a/drivers/crypto/qat/qat_common/adf_transport.c b/drivers/crypto/qat/qat_common/adf_transport.c |
1638 | index 5f3fa45348b4..9dd2cb72a4e8 100644 |
1639 | --- a/drivers/crypto/qat/qat_common/adf_transport.c |
1640 | +++ b/drivers/crypto/qat/qat_common/adf_transport.c |
1641 | @@ -419,9 +419,10 @@ static int adf_init_bank(struct adf_accel_dev *accel_dev, |
1642 | WRITE_CSR_RING_BASE(csr_addr, bank_num, i, 0); |
1643 | ring = &bank->rings[i]; |
1644 | if (hw_data->tx_rings_mask & (1 << i)) { |
1645 | - ring->inflights = kzalloc_node(sizeof(atomic_t), |
1646 | - GFP_KERNEL, |
1647 | - accel_dev->numa_node); |
1648 | + ring->inflights = |
1649 | + kzalloc_node(sizeof(atomic_t), |
1650 | + GFP_KERNEL, |
1651 | + dev_to_node(&GET_DEV(accel_dev))); |
1652 | if (!ring->inflights) |
1653 | goto err; |
1654 | } else { |
1655 | @@ -469,13 +470,14 @@ int adf_init_etr_data(struct adf_accel_dev *accel_dev) |
1656 | int i, ret; |
1657 | |
1658 | etr_data = kzalloc_node(sizeof(*etr_data), GFP_KERNEL, |
1659 | - accel_dev->numa_node); |
1660 | + dev_to_node(&GET_DEV(accel_dev))); |
1661 | if (!etr_data) |
1662 | return -ENOMEM; |
1663 | |
1664 | num_banks = GET_MAX_BANKS(accel_dev); |
1665 | size = num_banks * sizeof(struct adf_etr_bank_data); |
1666 | - etr_data->banks = kzalloc_node(size, GFP_KERNEL, accel_dev->numa_node); |
1667 | + etr_data->banks = kzalloc_node(size, GFP_KERNEL, |
1668 | + dev_to_node(&GET_DEV(accel_dev))); |
1669 | if (!etr_data->banks) { |
1670 | ret = -ENOMEM; |
1671 | goto err_bank; |
1672 | diff --git a/drivers/crypto/qat/qat_common/qat_algs.c b/drivers/crypto/qat/qat_common/qat_algs.c |
1673 | index 59df48872955..f50db957b6b1 100644 |
1674 | --- a/drivers/crypto/qat/qat_common/qat_algs.c |
1675 | +++ b/drivers/crypto/qat/qat_common/qat_algs.c |
1676 | @@ -641,7 +641,8 @@ static int qat_alg_sgl_to_bufl(struct qat_crypto_instance *inst, |
1677 | if (unlikely(!n)) |
1678 | return -EINVAL; |
1679 | |
1680 | - bufl = kmalloc_node(sz, GFP_ATOMIC, inst->accel_dev->numa_node); |
1681 | + bufl = kmalloc_node(sz, GFP_ATOMIC, |
1682 | + dev_to_node(&GET_DEV(inst->accel_dev))); |
1683 | if (unlikely(!bufl)) |
1684 | return -ENOMEM; |
1685 | |
1686 | @@ -650,6 +651,8 @@ static int qat_alg_sgl_to_bufl(struct qat_crypto_instance *inst, |
1687 | goto err; |
1688 | |
1689 | for_each_sg(assoc, sg, assoc_n, i) { |
1690 | + if (!sg->length) |
1691 | + continue; |
1692 | bufl->bufers[bufs].addr = dma_map_single(dev, |
1693 | sg_virt(sg), |
1694 | sg->length, |
1695 | @@ -685,7 +688,7 @@ static int qat_alg_sgl_to_bufl(struct qat_crypto_instance *inst, |
1696 | struct qat_alg_buf *bufers; |
1697 | |
1698 | buflout = kmalloc_node(sz, GFP_ATOMIC, |
1699 | - inst->accel_dev->numa_node); |
1700 | + dev_to_node(&GET_DEV(inst->accel_dev))); |
1701 | if (unlikely(!buflout)) |
1702 | goto err; |
1703 | bloutp = dma_map_single(dev, buflout, sz, DMA_TO_DEVICE); |
1704 | diff --git a/drivers/crypto/qat/qat_common/qat_crypto.c b/drivers/crypto/qat/qat_common/qat_crypto.c |
1705 | index 0d59bcb50de1..828f2a686aab 100644 |
1706 | --- a/drivers/crypto/qat/qat_common/qat_crypto.c |
1707 | +++ b/drivers/crypto/qat/qat_common/qat_crypto.c |
1708 | @@ -109,12 +109,14 @@ struct qat_crypto_instance *qat_crypto_get_instance_node(int node) |
1709 | |
1710 | list_for_each(itr, adf_devmgr_get_head()) { |
1711 | accel_dev = list_entry(itr, struct adf_accel_dev, list); |
1712 | - if (accel_dev->numa_node == node && adf_dev_started(accel_dev)) |
1713 | + if ((node == dev_to_node(&GET_DEV(accel_dev)) || |
1714 | + dev_to_node(&GET_DEV(accel_dev)) < 0) |
1715 | + && adf_dev_started(accel_dev)) |
1716 | break; |
1717 | accel_dev = NULL; |
1718 | } |
1719 | if (!accel_dev) { |
1720 | - pr_err("QAT: Could not find device on give node\n"); |
1721 | + pr_err("QAT: Could not find device on node %d\n", node); |
1722 | accel_dev = adf_devmgr_get_first(); |
1723 | } |
1724 | if (!accel_dev || !adf_dev_started(accel_dev)) |
1725 | @@ -164,7 +166,7 @@ static int qat_crypto_create_instances(struct adf_accel_dev *accel_dev) |
1726 | |
1727 | for (i = 0; i < num_inst; i++) { |
1728 | inst = kzalloc_node(sizeof(*inst), GFP_KERNEL, |
1729 | - accel_dev->numa_node); |
1730 | + dev_to_node(&GET_DEV(accel_dev))); |
1731 | if (!inst) |
1732 | goto err; |
1733 | |
1734 | diff --git a/drivers/crypto/qat/qat_dh895xcc/adf_admin.c b/drivers/crypto/qat/qat_dh895xcc/adf_admin.c |
1735 | index 978d6c56639d..53c491b59f07 100644 |
1736 | --- a/drivers/crypto/qat/qat_dh895xcc/adf_admin.c |
1737 | +++ b/drivers/crypto/qat/qat_dh895xcc/adf_admin.c |
1738 | @@ -108,7 +108,7 @@ int adf_init_admin_comms(struct adf_accel_dev *accel_dev) |
1739 | uint64_t reg_val; |
1740 | |
1741 | admin = kzalloc_node(sizeof(*accel_dev->admin), GFP_KERNEL, |
1742 | - accel_dev->numa_node); |
1743 | + dev_to_node(&GET_DEV(accel_dev))); |
1744 | if (!admin) |
1745 | return -ENOMEM; |
1746 | admin->virt_addr = dma_zalloc_coherent(&GET_DEV(accel_dev), PAGE_SIZE, |
1747 | diff --git a/drivers/crypto/qat/qat_dh895xcc/adf_drv.c b/drivers/crypto/qat/qat_dh895xcc/adf_drv.c |
1748 | index 0d0435a41be9..948f66be262b 100644 |
1749 | --- a/drivers/crypto/qat/qat_dh895xcc/adf_drv.c |
1750 | +++ b/drivers/crypto/qat/qat_dh895xcc/adf_drv.c |
1751 | @@ -119,21 +119,6 @@ static void adf_cleanup_accel(struct adf_accel_dev *accel_dev) |
1752 | kfree(accel_dev); |
1753 | } |
1754 | |
1755 | -static uint8_t adf_get_dev_node_id(struct pci_dev *pdev) |
1756 | -{ |
1757 | - unsigned int bus_per_cpu = 0; |
1758 | - struct cpuinfo_x86 *c = &cpu_data(num_online_cpus() - 1); |
1759 | - |
1760 | - if (!c->phys_proc_id) |
1761 | - return 0; |
1762 | - |
1763 | - bus_per_cpu = 256 / (c->phys_proc_id + 1); |
1764 | - |
1765 | - if (bus_per_cpu != 0) |
1766 | - return pdev->bus->number / bus_per_cpu; |
1767 | - return 0; |
1768 | -} |
1769 | - |
1770 | static int qat_dev_start(struct adf_accel_dev *accel_dev) |
1771 | { |
1772 | int cpus = num_online_cpus(); |
1773 | @@ -235,7 +220,6 @@ static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent) |
1774 | void __iomem *pmisc_bar_addr = NULL; |
1775 | char name[ADF_DEVICE_NAME_LENGTH]; |
1776 | unsigned int i, bar_nr; |
1777 | - uint8_t node; |
1778 | int ret; |
1779 | |
1780 | switch (ent->device) { |
1781 | @@ -246,12 +230,19 @@ static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent) |
1782 | return -ENODEV; |
1783 | } |
1784 | |
1785 | - node = adf_get_dev_node_id(pdev); |
1786 | - accel_dev = kzalloc_node(sizeof(*accel_dev), GFP_KERNEL, node); |
1787 | + if (num_possible_nodes() > 1 && dev_to_node(&pdev->dev) < 0) { |
1788 | + /* If the accelerator is connected to a node with no memory |
1789 | + * there is no point in using the accelerator since the remote |
1790 | + * memory transaction will be very slow. */ |
1791 | + dev_err(&pdev->dev, "Invalid NUMA configuration.\n"); |
1792 | + return -EINVAL; |
1793 | + } |
1794 | + |
1795 | + accel_dev = kzalloc_node(sizeof(*accel_dev), GFP_KERNEL, |
1796 | + dev_to_node(&pdev->dev)); |
1797 | if (!accel_dev) |
1798 | return -ENOMEM; |
1799 | |
1800 | - accel_dev->numa_node = node; |
1801 | INIT_LIST_HEAD(&accel_dev->crypto_list); |
1802 | |
1803 | /* Add accel device to accel table. |
1804 | @@ -264,7 +255,8 @@ static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent) |
1805 | |
1806 | accel_dev->owner = THIS_MODULE; |
1807 | /* Allocate and configure device configuration structure */ |
1808 | - hw_data = kzalloc_node(sizeof(*hw_data), GFP_KERNEL, node); |
1809 | + hw_data = kzalloc_node(sizeof(*hw_data), GFP_KERNEL, |
1810 | + dev_to_node(&pdev->dev)); |
1811 | if (!hw_data) { |
1812 | ret = -ENOMEM; |
1813 | goto out_err; |
1814 | diff --git a/drivers/crypto/qat/qat_dh895xcc/adf_isr.c b/drivers/crypto/qat/qat_dh895xcc/adf_isr.c |
1815 | index d4172dedf775..38b80ee4e556 100644 |
1816 | --- a/drivers/crypto/qat/qat_dh895xcc/adf_isr.c |
1817 | +++ b/drivers/crypto/qat/qat_dh895xcc/adf_isr.c |
1818 | @@ -168,7 +168,7 @@ static int adf_isr_alloc_msix_entry_table(struct adf_accel_dev *accel_dev) |
1819 | uint32_t msix_num_entries = hw_data->num_banks + 1; |
1820 | |
1821 | entries = kzalloc_node(msix_num_entries * sizeof(*entries), |
1822 | - GFP_KERNEL, accel_dev->numa_node); |
1823 | + GFP_KERNEL, dev_to_node(&GET_DEV(accel_dev))); |
1824 | if (!entries) |
1825 | return -ENOMEM; |
1826 | |
1827 | diff --git a/drivers/firewire/core-cdev.c b/drivers/firewire/core-cdev.c |
1828 | index 5d997a33907e..2a3973a7c441 100644 |
1829 | --- a/drivers/firewire/core-cdev.c |
1830 | +++ b/drivers/firewire/core-cdev.c |
1831 | @@ -1637,8 +1637,7 @@ static int dispatch_ioctl(struct client *client, |
1832 | _IOC_SIZE(cmd) > sizeof(buffer)) |
1833 | return -ENOTTY; |
1834 | |
1835 | - if (_IOC_DIR(cmd) == _IOC_READ) |
1836 | - memset(&buffer, 0, _IOC_SIZE(cmd)); |
1837 | + memset(&buffer, 0, sizeof(buffer)); |
1838 | |
1839 | if (_IOC_DIR(cmd) & _IOC_WRITE) |
1840 | if (copy_from_user(&buffer, arg, _IOC_SIZE(cmd))) |
1841 | diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c |
1842 | index e42925f76b4b..eff83f5c3407 100644 |
1843 | --- a/drivers/gpu/drm/i915/i915_gem_gtt.c |
1844 | +++ b/drivers/gpu/drm/i915/i915_gem_gtt.c |
1845 | @@ -1901,6 +1901,22 @@ static void bdw_setup_private_ppat(struct drm_i915_private *dev_priv) |
1846 | GEN8_PPAT(6, GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(2)) | |
1847 | GEN8_PPAT(7, GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(3)); |
1848 | |
1849 | + if (!USES_PPGTT(dev_priv->dev)) |
1850 | + /* Spec: "For GGTT, there is NO pat_sel[2:0] from the entry, |
1851 | + * so RTL will always use the value corresponding to |
1852 | + * pat_sel = 000". |
1853 | + * So let's disable cache for GGTT to avoid screen corruptions. |
1854 | + * MOCS still can be used though. |
1855 | + * - System agent ggtt writes (i.e. cpu gtt mmaps) already work |
1856 | + * before this patch, i.e. the same uncached + snooping access |
1857 | + * like on gen6/7 seems to be in effect. |
1858 | + * - So this just fixes blitter/render access. Again it looks |
1859 | + * like it's not just uncached access, but uncached + snooping. |
1860 | + * So we can still hold onto all our assumptions wrt cpu |
1861 | + * clflushing on LLC machines. |
1862 | + */ |
1863 | + pat = GEN8_PPAT(0, GEN8_PPAT_UC); |
1864 | + |
1865 | /* XXX: spec defines this as 2 distinct registers. It's unclear if a 64b |
1866 | * write would work. */ |
1867 | I915_WRITE(GEN8_PRIVATE_PAT, pat); |
1868 | diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c |
1869 | index 9222e20e230c..4b3c09636990 100644 |
1870 | --- a/drivers/gpu/drm/i915/intel_dp.c |
1871 | +++ b/drivers/gpu/drm/i915/intel_dp.c |
1872 | @@ -3354,9 +3354,10 @@ intel_dp_get_dpcd(struct intel_dp *intel_dp) |
1873 | } |
1874 | } |
1875 | |
1876 | - /* Training Pattern 3 support */ |
1877 | + /* Training Pattern 3 support, both source and sink */ |
1878 | if (intel_dp->dpcd[DP_DPCD_REV] >= 0x12 && |
1879 | - intel_dp->dpcd[DP_MAX_LANE_COUNT] & DP_TPS3_SUPPORTED) { |
1880 | + intel_dp->dpcd[DP_MAX_LANE_COUNT] & DP_TPS3_SUPPORTED && |
1881 | + (IS_HASWELL(dev) || INTEL_INFO(dev)->gen >= 8)) { |
1882 | intel_dp->use_tps3 = true; |
1883 | DRM_DEBUG_KMS("Displayport TPS3 supported"); |
1884 | } else |
1885 | diff --git a/drivers/gpu/drm/i915/intel_panel.c b/drivers/gpu/drm/i915/intel_panel.c |
1886 | index cbe8a8de85de..0971fbf133d5 100644 |
1887 | --- a/drivers/gpu/drm/i915/intel_panel.c |
1888 | +++ b/drivers/gpu/drm/i915/intel_panel.c |
1889 | @@ -1074,12 +1074,25 @@ static u32 get_backlight_min_vbt(struct intel_connector *connector) |
1890 | struct drm_device *dev = connector->base.dev; |
1891 | struct drm_i915_private *dev_priv = dev->dev_private; |
1892 | struct intel_panel *panel = &connector->panel; |
1893 | + int min; |
1894 | |
1895 | WARN_ON(panel->backlight.max == 0); |
1896 | |
1897 | + /* |
1898 | + * XXX: If the vbt value is 255, it makes min equal to max, which leads |
1899 | + * to problems. There are such machines out there. Either our |
1900 | + * interpretation is wrong or the vbt has bogus data. Or both. Safeguard |
1901 | + * against this by letting the minimum be at most (arbitrarily chosen) |
1902 | + * 25% of the max. |
1903 | + */ |
1904 | + min = clamp_t(int, dev_priv->vbt.backlight.min_brightness, 0, 64); |
1905 | + if (min != dev_priv->vbt.backlight.min_brightness) { |
1906 | + DRM_DEBUG_KMS("clamping VBT min backlight %d/255 to %d/255\n", |
1907 | + dev_priv->vbt.backlight.min_brightness, min); |
1908 | + } |
1909 | + |
1910 | /* vbt value is a coefficient in range [0..255] */ |
1911 | - return scale(dev_priv->vbt.backlight.min_brightness, 0, 255, |
1912 | - 0, panel->backlight.max); |
1913 | + return scale(min, 0, 255, 0, panel->backlight.max); |
1914 | } |
1915 | |
1916 | static int bdw_setup_backlight(struct intel_connector *connector) |
1917 | diff --git a/drivers/gpu/drm/radeon/atom.c b/drivers/gpu/drm/radeon/atom.c |
1918 | index 15da7ef344a4..ec1593a6a561 100644 |
1919 | --- a/drivers/gpu/drm/radeon/atom.c |
1920 | +++ b/drivers/gpu/drm/radeon/atom.c |
1921 | @@ -1217,7 +1217,7 @@ free: |
1922 | return ret; |
1923 | } |
1924 | |
1925 | -int atom_execute_table(struct atom_context *ctx, int index, uint32_t * params) |
1926 | +int atom_execute_table_scratch_unlocked(struct atom_context *ctx, int index, uint32_t * params) |
1927 | { |
1928 | int r; |
1929 | |
1930 | @@ -1238,6 +1238,15 @@ int atom_execute_table(struct atom_context *ctx, int index, uint32_t * params) |
1931 | return r; |
1932 | } |
1933 | |
1934 | +int atom_execute_table(struct atom_context *ctx, int index, uint32_t * params) |
1935 | +{ |
1936 | + int r; |
1937 | + mutex_lock(&ctx->scratch_mutex); |
1938 | + r = atom_execute_table_scratch_unlocked(ctx, index, params); |
1939 | + mutex_unlock(&ctx->scratch_mutex); |
1940 | + return r; |
1941 | +} |
1942 | + |
1943 | static int atom_iio_len[] = { 1, 2, 3, 3, 3, 3, 4, 4, 4, 3 }; |
1944 | |
1945 | static void atom_index_iio(struct atom_context *ctx, int base) |
1946 | diff --git a/drivers/gpu/drm/radeon/atom.h b/drivers/gpu/drm/radeon/atom.h |
1947 | index feba6b8d36b3..6d014ddb6b78 100644 |
1948 | --- a/drivers/gpu/drm/radeon/atom.h |
1949 | +++ b/drivers/gpu/drm/radeon/atom.h |
1950 | @@ -125,6 +125,7 @@ struct card_info { |
1951 | struct atom_context { |
1952 | struct card_info *card; |
1953 | struct mutex mutex; |
1954 | + struct mutex scratch_mutex; |
1955 | void *bios; |
1956 | uint32_t cmd_table, data_table; |
1957 | uint16_t *iio; |
1958 | @@ -145,6 +146,7 @@ extern int atom_debug; |
1959 | |
1960 | struct atom_context *atom_parse(struct card_info *, void *); |
1961 | int atom_execute_table(struct atom_context *, int, uint32_t *); |
1962 | +int atom_execute_table_scratch_unlocked(struct atom_context *, int, uint32_t *); |
1963 | int atom_asic_init(struct atom_context *); |
1964 | void atom_destroy(struct atom_context *); |
1965 | bool atom_parse_data_header(struct atom_context *ctx, int index, uint16_t *size, |
1966 | diff --git a/drivers/gpu/drm/radeon/atombios_dp.c b/drivers/gpu/drm/radeon/atombios_dp.c |
1967 | index ac14b67621d3..9074662d8f89 100644 |
1968 | --- a/drivers/gpu/drm/radeon/atombios_dp.c |
1969 | +++ b/drivers/gpu/drm/radeon/atombios_dp.c |
1970 | @@ -100,6 +100,7 @@ static int radeon_process_aux_ch(struct radeon_i2c_chan *chan, |
1971 | memset(&args, 0, sizeof(args)); |
1972 | |
1973 | mutex_lock(&chan->mutex); |
1974 | + mutex_lock(&rdev->mode_info.atom_context->scratch_mutex); |
1975 | |
1976 | base = (unsigned char *)(rdev->mode_info.atom_context->scratch + 1); |
1977 | |
1978 | @@ -113,7 +114,7 @@ static int radeon_process_aux_ch(struct radeon_i2c_chan *chan, |
1979 | if (ASIC_IS_DCE4(rdev)) |
1980 | args.v2.ucHPD_ID = chan->rec.hpd; |
1981 | |
1982 | - atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args); |
1983 | + atom_execute_table_scratch_unlocked(rdev->mode_info.atom_context, index, (uint32_t *)&args); |
1984 | |
1985 | *ack = args.v1.ucReplyStatus; |
1986 | |
1987 | @@ -147,6 +148,7 @@ static int radeon_process_aux_ch(struct radeon_i2c_chan *chan, |
1988 | |
1989 | r = recv_bytes; |
1990 | done: |
1991 | + mutex_unlock(&rdev->mode_info.atom_context->scratch_mutex); |
1992 | mutex_unlock(&chan->mutex); |
1993 | |
1994 | return r; |
1995 | diff --git a/drivers/gpu/drm/radeon/atombios_i2c.c b/drivers/gpu/drm/radeon/atombios_i2c.c |
1996 | index 9c570fb15b8c..4157780585a0 100644 |
1997 | --- a/drivers/gpu/drm/radeon/atombios_i2c.c |
1998 | +++ b/drivers/gpu/drm/radeon/atombios_i2c.c |
1999 | @@ -48,6 +48,7 @@ static int radeon_process_i2c_ch(struct radeon_i2c_chan *chan, |
2000 | memset(&args, 0, sizeof(args)); |
2001 | |
2002 | mutex_lock(&chan->mutex); |
2003 | + mutex_lock(&rdev->mode_info.atom_context->scratch_mutex); |
2004 | |
2005 | base = (unsigned char *)rdev->mode_info.atom_context->scratch; |
2006 | |
2007 | @@ -82,7 +83,7 @@ static int radeon_process_i2c_ch(struct radeon_i2c_chan *chan, |
2008 | args.ucSlaveAddr = slave_addr << 1; |
2009 | args.ucLineNumber = chan->rec.i2c_id; |
2010 | |
2011 | - atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args); |
2012 | + atom_execute_table_scratch_unlocked(rdev->mode_info.atom_context, index, (uint32_t *)&args); |
2013 | |
2014 | /* error */ |
2015 | if (args.ucStatus != HW_ASSISTED_I2C_STATUS_SUCCESS) { |
2016 | @@ -95,6 +96,7 @@ static int radeon_process_i2c_ch(struct radeon_i2c_chan *chan, |
2017 | radeon_atom_copy_swap(buf, base, num, false); |
2018 | |
2019 | done: |
2020 | + mutex_unlock(&rdev->mode_info.atom_context->scratch_mutex); |
2021 | mutex_unlock(&chan->mutex); |
2022 | |
2023 | return r; |
2024 | diff --git a/drivers/gpu/drm/radeon/cik.c b/drivers/gpu/drm/radeon/cik.c |
2025 | index 3d546c606b43..32d8cef2d230 100644 |
2026 | --- a/drivers/gpu/drm/radeon/cik.c |
2027 | +++ b/drivers/gpu/drm/radeon/cik.c |
2028 | @@ -4315,8 +4315,8 @@ static int cik_cp_gfx_start(struct radeon_device *rdev) |
2029 | /* init the CE partitions. CE only used for gfx on CIK */ |
2030 | radeon_ring_write(ring, PACKET3(PACKET3_SET_BASE, 2)); |
2031 | radeon_ring_write(ring, PACKET3_BASE_INDEX(CE_PARTITION_BASE)); |
2032 | - radeon_ring_write(ring, 0xc000); |
2033 | - radeon_ring_write(ring, 0xc000); |
2034 | + radeon_ring_write(ring, 0x8000); |
2035 | + radeon_ring_write(ring, 0x8000); |
2036 | |
2037 | /* setup clear context state */ |
2038 | radeon_ring_write(ring, PACKET3(PACKET3_PREAMBLE_CNTL, 0)); |
2039 | @@ -9447,6 +9447,9 @@ void dce8_bandwidth_update(struct radeon_device *rdev) |
2040 | u32 num_heads = 0, lb_size; |
2041 | int i; |
2042 | |
2043 | + if (!rdev->mode_info.mode_config_initialized) |
2044 | + return; |
2045 | + |
2046 | radeon_update_display_priority(rdev); |
2047 | |
2048 | for (i = 0; i < rdev->num_crtc; i++) { |
2049 | diff --git a/drivers/gpu/drm/radeon/cik_sdma.c b/drivers/gpu/drm/radeon/cik_sdma.c |
2050 | index e8eea36b52d1..dd0c4919ea0e 100644 |
2051 | --- a/drivers/gpu/drm/radeon/cik_sdma.c |
2052 | +++ b/drivers/gpu/drm/radeon/cik_sdma.c |
2053 | @@ -666,17 +666,20 @@ int cik_sdma_ib_test(struct radeon_device *rdev, struct radeon_ring *ring) |
2054 | { |
2055 | struct radeon_ib ib; |
2056 | unsigned i; |
2057 | + unsigned index; |
2058 | int r; |
2059 | - void __iomem *ptr = (void *)rdev->vram_scratch.ptr; |
2060 | u32 tmp = 0; |
2061 | + u64 gpu_addr; |
2062 | |
2063 | - if (!ptr) { |
2064 | - DRM_ERROR("invalid vram scratch pointer\n"); |
2065 | - return -EINVAL; |
2066 | - } |
2067 | + if (ring->idx == R600_RING_TYPE_DMA_INDEX) |
2068 | + index = R600_WB_DMA_RING_TEST_OFFSET; |
2069 | + else |
2070 | + index = CAYMAN_WB_DMA1_RING_TEST_OFFSET; |
2071 | + |
2072 | + gpu_addr = rdev->wb.gpu_addr + index; |
2073 | |
2074 | tmp = 0xCAFEDEAD; |
2075 | - writel(tmp, ptr); |
2076 | + rdev->wb.wb[index/4] = cpu_to_le32(tmp); |
2077 | |
2078 | r = radeon_ib_get(rdev, ring->idx, &ib, NULL, 256); |
2079 | if (r) { |
2080 | @@ -685,8 +688,8 @@ int cik_sdma_ib_test(struct radeon_device *rdev, struct radeon_ring *ring) |
2081 | } |
2082 | |
2083 | ib.ptr[0] = SDMA_PACKET(SDMA_OPCODE_WRITE, SDMA_WRITE_SUB_OPCODE_LINEAR, 0); |
2084 | - ib.ptr[1] = rdev->vram_scratch.gpu_addr & 0xfffffffc; |
2085 | - ib.ptr[2] = upper_32_bits(rdev->vram_scratch.gpu_addr); |
2086 | + ib.ptr[1] = lower_32_bits(gpu_addr); |
2087 | + ib.ptr[2] = upper_32_bits(gpu_addr); |
2088 | ib.ptr[3] = 1; |
2089 | ib.ptr[4] = 0xDEADBEEF; |
2090 | ib.length_dw = 5; |
2091 | @@ -703,7 +706,7 @@ int cik_sdma_ib_test(struct radeon_device *rdev, struct radeon_ring *ring) |
2092 | return r; |
2093 | } |
2094 | for (i = 0; i < rdev->usec_timeout; i++) { |
2095 | - tmp = readl(ptr); |
2096 | + tmp = le32_to_cpu(rdev->wb.wb[index/4]); |
2097 | if (tmp == 0xDEADBEEF) |
2098 | break; |
2099 | DRM_UDELAY(1); |
2100 | diff --git a/drivers/gpu/drm/radeon/evergreen.c b/drivers/gpu/drm/radeon/evergreen.c |
2101 | index e50807c29f69..5d18cfe60e1f 100644 |
2102 | --- a/drivers/gpu/drm/radeon/evergreen.c |
2103 | +++ b/drivers/gpu/drm/radeon/evergreen.c |
2104 | @@ -2346,6 +2346,9 @@ void evergreen_bandwidth_update(struct radeon_device *rdev) |
2105 | u32 num_heads = 0, lb_size; |
2106 | int i; |
2107 | |
2108 | + if (!rdev->mode_info.mode_config_initialized) |
2109 | + return; |
2110 | + |
2111 | radeon_update_display_priority(rdev); |
2112 | |
2113 | for (i = 0; i < rdev->num_crtc; i++) { |
2114 | @@ -2553,6 +2556,7 @@ void evergreen_mc_stop(struct radeon_device *rdev, struct evergreen_mc_save *sav |
2115 | WREG32(EVERGREEN_CRTC_UPDATE_LOCK + crtc_offsets[i], 1); |
2116 | tmp |= EVERGREEN_CRTC_BLANK_DATA_EN; |
2117 | WREG32(EVERGREEN_CRTC_BLANK_CONTROL + crtc_offsets[i], tmp); |
2118 | + WREG32(EVERGREEN_CRTC_UPDATE_LOCK + crtc_offsets[i], 0); |
2119 | } |
2120 | } else { |
2121 | tmp = RREG32(EVERGREEN_CRTC_CONTROL + crtc_offsets[i]); |
2122 | diff --git a/drivers/gpu/drm/radeon/r100.c b/drivers/gpu/drm/radeon/r100.c |
2123 | index b0098e792e62..6a5518f98296 100644 |
2124 | --- a/drivers/gpu/drm/radeon/r100.c |
2125 | +++ b/drivers/gpu/drm/radeon/r100.c |
2126 | @@ -3204,6 +3204,9 @@ void r100_bandwidth_update(struct radeon_device *rdev) |
2127 | uint32_t pixel_bytes1 = 0; |
2128 | uint32_t pixel_bytes2 = 0; |
2129 | |
2130 | + if (!rdev->mode_info.mode_config_initialized) |
2131 | + return; |
2132 | + |
2133 | radeon_update_display_priority(rdev); |
2134 | |
2135 | if (rdev->mode_info.crtcs[0]->base.enabled) { |
2136 | diff --git a/drivers/gpu/drm/radeon/r600_dma.c b/drivers/gpu/drm/radeon/r600_dma.c |
2137 | index 44379bfca61f..5ef501bc9dc6 100644 |
2138 | --- a/drivers/gpu/drm/radeon/r600_dma.c |
2139 | +++ b/drivers/gpu/drm/radeon/r600_dma.c |
2140 | @@ -338,17 +338,17 @@ int r600_dma_ib_test(struct radeon_device *rdev, struct radeon_ring *ring) |
2141 | { |
2142 | struct radeon_ib ib; |
2143 | unsigned i; |
2144 | + unsigned index; |
2145 | int r; |
2146 | - void __iomem *ptr = (void *)rdev->vram_scratch.ptr; |
2147 | u32 tmp = 0; |
2148 | + u64 gpu_addr; |
2149 | |
2150 | - if (!ptr) { |
2151 | - DRM_ERROR("invalid vram scratch pointer\n"); |
2152 | - return -EINVAL; |
2153 | - } |
2154 | + if (ring->idx == R600_RING_TYPE_DMA_INDEX) |
2155 | + index = R600_WB_DMA_RING_TEST_OFFSET; |
2156 | + else |
2157 | + index = CAYMAN_WB_DMA1_RING_TEST_OFFSET; |
2158 | |
2159 | - tmp = 0xCAFEDEAD; |
2160 | - writel(tmp, ptr); |
2161 | + gpu_addr = rdev->wb.gpu_addr + index; |
2162 | |
2163 | r = radeon_ib_get(rdev, ring->idx, &ib, NULL, 256); |
2164 | if (r) { |
2165 | @@ -357,8 +357,8 @@ int r600_dma_ib_test(struct radeon_device *rdev, struct radeon_ring *ring) |
2166 | } |
2167 | |
2168 | ib.ptr[0] = DMA_PACKET(DMA_PACKET_WRITE, 0, 0, 1); |
2169 | - ib.ptr[1] = rdev->vram_scratch.gpu_addr & 0xfffffffc; |
2170 | - ib.ptr[2] = upper_32_bits(rdev->vram_scratch.gpu_addr) & 0xff; |
2171 | + ib.ptr[1] = lower_32_bits(gpu_addr); |
2172 | + ib.ptr[2] = upper_32_bits(gpu_addr) & 0xff; |
2173 | ib.ptr[3] = 0xDEADBEEF; |
2174 | ib.length_dw = 4; |
2175 | |
2176 | @@ -374,7 +374,7 @@ int r600_dma_ib_test(struct radeon_device *rdev, struct radeon_ring *ring) |
2177 | return r; |
2178 | } |
2179 | for (i = 0; i < rdev->usec_timeout; i++) { |
2180 | - tmp = readl(ptr); |
2181 | + tmp = le32_to_cpu(rdev->wb.wb[index/4]); |
2182 | if (tmp == 0xDEADBEEF) |
2183 | break; |
2184 | DRM_UDELAY(1); |
2185 | diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c |
2186 | index 6684fbf09929..5d4416fbf124 100644 |
2187 | --- a/drivers/gpu/drm/radeon/radeon_device.c |
2188 | +++ b/drivers/gpu/drm/radeon/radeon_device.c |
2189 | @@ -952,6 +952,7 @@ int radeon_atombios_init(struct radeon_device *rdev) |
2190 | } |
2191 | |
2192 | mutex_init(&rdev->mode_info.atom_context->mutex); |
2193 | + mutex_init(&rdev->mode_info.atom_context->scratch_mutex); |
2194 | radeon_atom_initialize_bios_scratch_regs(rdev->ddev); |
2195 | atom_allocate_fb_scratch(rdev->mode_info.atom_context); |
2196 | return 0; |
2197 | diff --git a/drivers/gpu/drm/radeon/rs600.c b/drivers/gpu/drm/radeon/rs600.c |
2198 | index 5f6db4629aaa..9acb1c3c005b 100644 |
2199 | --- a/drivers/gpu/drm/radeon/rs600.c |
2200 | +++ b/drivers/gpu/drm/radeon/rs600.c |
2201 | @@ -879,6 +879,9 @@ void rs600_bandwidth_update(struct radeon_device *rdev) |
2202 | u32 d1mode_priority_a_cnt, d2mode_priority_a_cnt; |
2203 | /* FIXME: implement full support */ |
2204 | |
2205 | + if (!rdev->mode_info.mode_config_initialized) |
2206 | + return; |
2207 | + |
2208 | radeon_update_display_priority(rdev); |
2209 | |
2210 | if (rdev->mode_info.crtcs[0]->base.enabled) |
2211 | diff --git a/drivers/gpu/drm/radeon/rs690.c b/drivers/gpu/drm/radeon/rs690.c |
2212 | index 3462b64369bf..0a2d36e81108 100644 |
2213 | --- a/drivers/gpu/drm/radeon/rs690.c |
2214 | +++ b/drivers/gpu/drm/radeon/rs690.c |
2215 | @@ -579,6 +579,9 @@ void rs690_bandwidth_update(struct radeon_device *rdev) |
2216 | u32 d1mode_priority_a_cnt, d1mode_priority_b_cnt; |
2217 | u32 d2mode_priority_a_cnt, d2mode_priority_b_cnt; |
2218 | |
2219 | + if (!rdev->mode_info.mode_config_initialized) |
2220 | + return; |
2221 | + |
2222 | radeon_update_display_priority(rdev); |
2223 | |
2224 | if (rdev->mode_info.crtcs[0]->base.enabled) |
2225 | diff --git a/drivers/gpu/drm/radeon/rv515.c b/drivers/gpu/drm/radeon/rv515.c |
2226 | index 8a477bf1fdb3..c55d653aaf5f 100644 |
2227 | --- a/drivers/gpu/drm/radeon/rv515.c |
2228 | +++ b/drivers/gpu/drm/radeon/rv515.c |
2229 | @@ -1277,6 +1277,9 @@ void rv515_bandwidth_update(struct radeon_device *rdev) |
2230 | struct drm_display_mode *mode0 = NULL; |
2231 | struct drm_display_mode *mode1 = NULL; |
2232 | |
2233 | + if (!rdev->mode_info.mode_config_initialized) |
2234 | + return; |
2235 | + |
2236 | radeon_update_display_priority(rdev); |
2237 | |
2238 | if (rdev->mode_info.crtcs[0]->base.enabled) |
2239 | diff --git a/drivers/gpu/drm/radeon/si.c b/drivers/gpu/drm/radeon/si.c |
2240 | index 3a0b973e8a96..7f13a824a613 100644 |
2241 | --- a/drivers/gpu/drm/radeon/si.c |
2242 | +++ b/drivers/gpu/drm/radeon/si.c |
2243 | @@ -2384,6 +2384,9 @@ void dce6_bandwidth_update(struct radeon_device *rdev) |
2244 | u32 num_heads = 0, lb_size; |
2245 | int i; |
2246 | |
2247 | + if (!rdev->mode_info.mode_config_initialized) |
2248 | + return; |
2249 | + |
2250 | radeon_update_display_priority(rdev); |
2251 | |
2252 | for (i = 0; i < rdev->num_crtc; i++) { |
2253 | diff --git a/drivers/infiniband/core/uverbs_cmd.c b/drivers/infiniband/core/uverbs_cmd.c |
2254 | index 0600c50e6215..5ba2a86aab6a 100644 |
2255 | --- a/drivers/infiniband/core/uverbs_cmd.c |
2256 | +++ b/drivers/infiniband/core/uverbs_cmd.c |
2257 | @@ -2518,6 +2518,8 @@ ssize_t ib_uverbs_create_ah(struct ib_uverbs_file *file, |
2258 | attr.grh.sgid_index = cmd.attr.grh.sgid_index; |
2259 | attr.grh.hop_limit = cmd.attr.grh.hop_limit; |
2260 | attr.grh.traffic_class = cmd.attr.grh.traffic_class; |
2261 | + attr.vlan_id = 0; |
2262 | + memset(&attr.dmac, 0, sizeof(attr.dmac)); |
2263 | memcpy(attr.grh.dgid.raw, cmd.attr.grh.dgid, 16); |
2264 | |
2265 | ah = ib_create_ah(pd, &attr); |
2266 | diff --git a/drivers/input/mouse/alps.c b/drivers/input/mouse/alps.c |
2267 | index 2b0ae8cc8e51..d125a019383f 100644 |
2268 | --- a/drivers/input/mouse/alps.c |
2269 | +++ b/drivers/input/mouse/alps.c |
2270 | @@ -1156,7 +1156,13 @@ static psmouse_ret_t alps_process_byte(struct psmouse *psmouse) |
2271 | { |
2272 | struct alps_data *priv = psmouse->private; |
2273 | |
2274 | - if ((psmouse->packet[0] & 0xc8) == 0x08) { /* PS/2 packet */ |
2275 | + /* |
2276 | + * Check if we are dealing with a bare PS/2 packet, presumably from |
2277 | + * a device connected to the external PS/2 port. Because bare PS/2 |
2278 | + * protocol does not have enough constant bits to self-synchronize |
2279 | + * properly we only do this if the device is fully synchronized. |
2280 | + */ |
2281 | + if (!psmouse->out_of_sync_cnt && (psmouse->packet[0] & 0xc8) == 0x08) { |
2282 | if (psmouse->pktcnt == 3) { |
2283 | alps_report_bare_ps2_packet(psmouse, psmouse->packet, |
2284 | true); |
2285 | @@ -1180,12 +1186,27 @@ static psmouse_ret_t alps_process_byte(struct psmouse *psmouse) |
2286 | } |
2287 | |
2288 | /* Bytes 2 - pktsize should have 0 in the highest bit */ |
2289 | - if ((priv->proto_version < ALPS_PROTO_V5) && |
2290 | + if (priv->proto_version < ALPS_PROTO_V5 && |
2291 | psmouse->pktcnt >= 2 && psmouse->pktcnt <= psmouse->pktsize && |
2292 | (psmouse->packet[psmouse->pktcnt - 1] & 0x80)) { |
2293 | psmouse_dbg(psmouse, "refusing packet[%i] = %x\n", |
2294 | psmouse->pktcnt - 1, |
2295 | psmouse->packet[psmouse->pktcnt - 1]); |
2296 | + |
2297 | + if (priv->proto_version == ALPS_PROTO_V3 && |
2298 | + psmouse->pktcnt == psmouse->pktsize) { |
2299 | + /* |
2300 | + * Some Dell boxes, such as Latitude E6440 or E7440 |
2301 | + * with closed lid, quite often smash last byte of |
2302 | + * otherwise valid packet with 0xff. Given that the |
2303 | + * next packet is very likely to be valid let's |
2304 | + * report PSMOUSE_FULL_PACKET but not process data, |
2305 | + * rather than reporting PSMOUSE_BAD_DATA and |
2306 | + * filling the logs. |
2307 | + */ |
2308 | + return PSMOUSE_FULL_PACKET; |
2309 | + } |
2310 | + |
2311 | return PSMOUSE_BAD_DATA; |
2312 | } |
2313 | |
2314 | @@ -2389,6 +2410,9 @@ int alps_init(struct psmouse *psmouse) |
2315 | /* We are having trouble resyncing ALPS touchpads so disable it for now */ |
2316 | psmouse->resync_time = 0; |
2317 | |
2318 | + /* Allow 2 invalid packets without resetting device */ |
2319 | + psmouse->resetafter = psmouse->pktsize * 2; |
2320 | + |
2321 | return 0; |
2322 | |
2323 | init_fail: |
2324 | diff --git a/drivers/input/mouse/synaptics.c b/drivers/input/mouse/synaptics.c |
2325 | index b5b630c484c5..2e8f3ba7b2bd 100644 |
2326 | --- a/drivers/input/mouse/synaptics.c |
2327 | +++ b/drivers/input/mouse/synaptics.c |
2328 | @@ -135,8 +135,8 @@ static const struct min_max_quirk min_max_pnpid_table[] = { |
2329 | 1232, 5710, 1156, 4696 |
2330 | }, |
2331 | { |
2332 | - (const char * const []){"LEN0034", "LEN0036", "LEN2002", |
2333 | - "LEN2004", NULL}, |
2334 | + (const char * const []){"LEN0034", "LEN0036", "LEN0039", |
2335 | + "LEN2002", "LEN2004", NULL}, |
2336 | 1024, 5112, 2024, 4832 |
2337 | }, |
2338 | { |
2339 | @@ -163,6 +163,7 @@ static const char * const topbuttonpad_pnp_ids[] = { |
2340 | "LEN0036", /* T440 */ |
2341 | "LEN0037", |
2342 | "LEN0038", |
2343 | + "LEN0039", /* T440s */ |
2344 | "LEN0041", |
2345 | "LEN0042", /* Yoga */ |
2346 | "LEN0045", |
2347 | diff --git a/drivers/md/dm-bufio.c b/drivers/md/dm-bufio.c |
2348 | index 9ea5b6041eb2..0be200b6dbf2 100644 |
2349 | --- a/drivers/md/dm-bufio.c |
2350 | +++ b/drivers/md/dm-bufio.c |
2351 | @@ -1435,9 +1435,9 @@ static void drop_buffers(struct dm_bufio_client *c) |
2352 | |
2353 | /* |
2354 | * Test if the buffer is unused and too old, and commit it. |
2355 | - * At if noio is set, we must not do any I/O because we hold |
2356 | - * dm_bufio_clients_lock and we would risk deadlock if the I/O gets rerouted to |
2357 | - * different bufio client. |
2358 | + * And if GFP_NOFS is used, we must not do any I/O because we hold |
2359 | + * dm_bufio_clients_lock and we would risk deadlock if the I/O gets |
2360 | + * rerouted to different bufio client. |
2361 | */ |
2362 | static int __cleanup_old_buffer(struct dm_buffer *b, gfp_t gfp, |
2363 | unsigned long max_jiffies) |
2364 | @@ -1445,7 +1445,7 @@ static int __cleanup_old_buffer(struct dm_buffer *b, gfp_t gfp, |
2365 | if (jiffies - b->last_accessed < max_jiffies) |
2366 | return 0; |
2367 | |
2368 | - if (!(gfp & __GFP_IO)) { |
2369 | + if (!(gfp & __GFP_FS)) { |
2370 | if (test_bit(B_READING, &b->state) || |
2371 | test_bit(B_WRITING, &b->state) || |
2372 | test_bit(B_DIRTY, &b->state)) |
2373 | @@ -1487,7 +1487,7 @@ dm_bufio_shrink_scan(struct shrinker *shrink, struct shrink_control *sc) |
2374 | unsigned long freed; |
2375 | |
2376 | c = container_of(shrink, struct dm_bufio_client, shrinker); |
2377 | - if (sc->gfp_mask & __GFP_IO) |
2378 | + if (sc->gfp_mask & __GFP_FS) |
2379 | dm_bufio_lock(c); |
2380 | else if (!dm_bufio_trylock(c)) |
2381 | return SHRINK_STOP; |
2382 | @@ -1504,7 +1504,7 @@ dm_bufio_shrink_count(struct shrinker *shrink, struct shrink_control *sc) |
2383 | unsigned long count; |
2384 | |
2385 | c = container_of(shrink, struct dm_bufio_client, shrinker); |
2386 | - if (sc->gfp_mask & __GFP_IO) |
2387 | + if (sc->gfp_mask & __GFP_FS) |
2388 | dm_bufio_lock(c); |
2389 | else if (!dm_bufio_trylock(c)) |
2390 | return 0; |
2391 | diff --git a/drivers/md/dm-raid.c b/drivers/md/dm-raid.c |
2392 | index 4880b69e2e9e..59715389b3cf 100644 |
2393 | --- a/drivers/md/dm-raid.c |
2394 | +++ b/drivers/md/dm-raid.c |
2395 | @@ -785,8 +785,7 @@ struct dm_raid_superblock { |
2396 | __le32 layout; |
2397 | __le32 stripe_sectors; |
2398 | |
2399 | - __u8 pad[452]; /* Round struct to 512 bytes. */ |
2400 | - /* Always set to 0 when writing. */ |
2401 | + /* Remainder of a logical block is zero-filled when writing (see super_sync()). */ |
2402 | } __packed; |
2403 | |
2404 | static int read_disk_sb(struct md_rdev *rdev, int size) |
2405 | @@ -823,7 +822,7 @@ static void super_sync(struct mddev *mddev, struct md_rdev *rdev) |
2406 | test_bit(Faulty, &(rs->dev[i].rdev.flags))) |
2407 | failed_devices |= (1ULL << i); |
2408 | |
2409 | - memset(sb, 0, sizeof(*sb)); |
2410 | + memset(sb + 1, 0, rdev->sb_size - sizeof(*sb)); |
2411 | |
2412 | sb->magic = cpu_to_le32(DM_RAID_MAGIC); |
2413 | sb->features = cpu_to_le32(0); /* No features yet */ |
2414 | @@ -858,7 +857,11 @@ static int super_load(struct md_rdev *rdev, struct md_rdev *refdev) |
2415 | uint64_t events_sb, events_refsb; |
2416 | |
2417 | rdev->sb_start = 0; |
2418 | - rdev->sb_size = sizeof(*sb); |
2419 | + rdev->sb_size = bdev_logical_block_size(rdev->meta_bdev); |
2420 | + if (rdev->sb_size < sizeof(*sb) || rdev->sb_size > PAGE_SIZE) { |
2421 | + DMERR("superblock size of a logical block is no longer valid"); |
2422 | + return -EINVAL; |
2423 | + } |
2424 | |
2425 | ret = read_disk_sb(rdev, rdev->sb_size); |
2426 | if (ret) |
2427 | diff --git a/drivers/md/dm-thin.c b/drivers/md/dm-thin.c |
2428 | index 4843801173fe..0f86d802b533 100644 |
2429 | --- a/drivers/md/dm-thin.c |
2430 | +++ b/drivers/md/dm-thin.c |
2431 | @@ -1936,6 +1936,14 @@ static int thin_bio_map(struct dm_target *ti, struct bio *bio) |
2432 | return DM_MAPIO_SUBMITTED; |
2433 | } |
2434 | |
2435 | + /* |
2436 | + * We must hold the virtual cell before doing the lookup, otherwise |
2437 | + * there's a race with discard. |
2438 | + */ |
2439 | + build_virtual_key(tc->td, block, &key); |
2440 | + if (dm_bio_detain(tc->pool->prison, &key, bio, &cell1, &cell_result)) |
2441 | + return DM_MAPIO_SUBMITTED; |
2442 | + |
2443 | r = dm_thin_find_block(td, block, 0, &result); |
2444 | |
2445 | /* |
2446 | @@ -1959,13 +1967,10 @@ static int thin_bio_map(struct dm_target *ti, struct bio *bio) |
2447 | * shared flag will be set in their case. |
2448 | */ |
2449 | thin_defer_bio(tc, bio); |
2450 | + cell_defer_no_holder_no_free(tc, &cell1); |
2451 | return DM_MAPIO_SUBMITTED; |
2452 | } |
2453 | |
2454 | - build_virtual_key(tc->td, block, &key); |
2455 | - if (dm_bio_detain(tc->pool->prison, &key, bio, &cell1, &cell_result)) |
2456 | - return DM_MAPIO_SUBMITTED; |
2457 | - |
2458 | build_data_key(tc->td, result.block, &key); |
2459 | if (dm_bio_detain(tc->pool->prison, &key, bio, &cell2, &cell_result)) { |
2460 | cell_defer_no_holder_no_free(tc, &cell1); |
2461 | @@ -1986,6 +1991,7 @@ static int thin_bio_map(struct dm_target *ti, struct bio *bio) |
2462 | * of doing so. |
2463 | */ |
2464 | handle_unserviceable_bio(tc->pool, bio); |
2465 | + cell_defer_no_holder_no_free(tc, &cell1); |
2466 | return DM_MAPIO_SUBMITTED; |
2467 | } |
2468 | /* fall through */ |
2469 | @@ -1996,6 +2002,7 @@ static int thin_bio_map(struct dm_target *ti, struct bio *bio) |
2470 | * provide the hint to load the metadata into cache. |
2471 | */ |
2472 | thin_defer_bio(tc, bio); |
2473 | + cell_defer_no_holder_no_free(tc, &cell1); |
2474 | return DM_MAPIO_SUBMITTED; |
2475 | |
2476 | default: |
2477 | @@ -2005,6 +2012,7 @@ static int thin_bio_map(struct dm_target *ti, struct bio *bio) |
2478 | * pool is switched to fail-io mode. |
2479 | */ |
2480 | bio_io_error(bio); |
2481 | + cell_defer_no_holder_no_free(tc, &cell1); |
2482 | return DM_MAPIO_SUBMITTED; |
2483 | } |
2484 | } |
2485 | diff --git a/drivers/md/md.c b/drivers/md/md.c |
2486 | index 1294238610df..b7f603c2a7d4 100644 |
2487 | --- a/drivers/md/md.c |
2488 | +++ b/drivers/md/md.c |
2489 | @@ -5313,6 +5313,7 @@ static int md_set_readonly(struct mddev *mddev, struct block_device *bdev) |
2490 | printk("md: %s still in use.\n",mdname(mddev)); |
2491 | if (did_freeze) { |
2492 | clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery); |
2493 | + set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); |
2494 | md_wakeup_thread(mddev->thread); |
2495 | } |
2496 | err = -EBUSY; |
2497 | @@ -5327,6 +5328,8 @@ static int md_set_readonly(struct mddev *mddev, struct block_device *bdev) |
2498 | mddev->ro = 1; |
2499 | set_disk_ro(mddev->gendisk, 1); |
2500 | clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery); |
2501 | + set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); |
2502 | + md_wakeup_thread(mddev->thread); |
2503 | sysfs_notify_dirent_safe(mddev->sysfs_state); |
2504 | err = 0; |
2505 | } |
2506 | @@ -5370,6 +5373,7 @@ static int do_md_stop(struct mddev * mddev, int mode, |
2507 | mutex_unlock(&mddev->open_mutex); |
2508 | if (did_freeze) { |
2509 | clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery); |
2510 | + set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); |
2511 | md_wakeup_thread(mddev->thread); |
2512 | } |
2513 | return -EBUSY; |
2514 | diff --git a/drivers/md/persistent-data/dm-btree-internal.h b/drivers/md/persistent-data/dm-btree-internal.h |
2515 | index 37d367bb9aa8..bf2b80d5c470 100644 |
2516 | --- a/drivers/md/persistent-data/dm-btree-internal.h |
2517 | +++ b/drivers/md/persistent-data/dm-btree-internal.h |
2518 | @@ -42,6 +42,12 @@ struct btree_node { |
2519 | } __packed; |
2520 | |
2521 | |
2522 | +/* |
2523 | + * Locks a block using the btree node validator. |
2524 | + */ |
2525 | +int bn_read_lock(struct dm_btree_info *info, dm_block_t b, |
2526 | + struct dm_block **result); |
2527 | + |
2528 | void inc_children(struct dm_transaction_manager *tm, struct btree_node *n, |
2529 | struct dm_btree_value_type *vt); |
2530 | |
2531 | diff --git a/drivers/md/persistent-data/dm-btree-spine.c b/drivers/md/persistent-data/dm-btree-spine.c |
2532 | index cf9fd676ae44..1b5e13ec7f96 100644 |
2533 | --- a/drivers/md/persistent-data/dm-btree-spine.c |
2534 | +++ b/drivers/md/persistent-data/dm-btree-spine.c |
2535 | @@ -92,7 +92,7 @@ struct dm_block_validator btree_node_validator = { |
2536 | |
2537 | /*----------------------------------------------------------------*/ |
2538 | |
2539 | -static int bn_read_lock(struct dm_btree_info *info, dm_block_t b, |
2540 | +int bn_read_lock(struct dm_btree_info *info, dm_block_t b, |
2541 | struct dm_block **result) |
2542 | { |
2543 | return dm_tm_read_lock(info->tm, b, &btree_node_validator, result); |
2544 | diff --git a/drivers/md/persistent-data/dm-btree.c b/drivers/md/persistent-data/dm-btree.c |
2545 | index 416060c25709..200ac12a1d40 100644 |
2546 | --- a/drivers/md/persistent-data/dm-btree.c |
2547 | +++ b/drivers/md/persistent-data/dm-btree.c |
2548 | @@ -847,22 +847,26 @@ EXPORT_SYMBOL_GPL(dm_btree_find_lowest_key); |
2549 | * FIXME: We shouldn't use a recursive algorithm when we have limited stack |
2550 | * space. Also this only works for single level trees. |
2551 | */ |
2552 | -static int walk_node(struct ro_spine *s, dm_block_t block, |
2553 | +static int walk_node(struct dm_btree_info *info, dm_block_t block, |
2554 | int (*fn)(void *context, uint64_t *keys, void *leaf), |
2555 | void *context) |
2556 | { |
2557 | int r; |
2558 | unsigned i, nr; |
2559 | + struct dm_block *node; |
2560 | struct btree_node *n; |
2561 | uint64_t keys; |
2562 | |
2563 | - r = ro_step(s, block); |
2564 | - n = ro_node(s); |
2565 | + r = bn_read_lock(info, block, &node); |
2566 | + if (r) |
2567 | + return r; |
2568 | + |
2569 | + n = dm_block_data(node); |
2570 | |
2571 | nr = le32_to_cpu(n->header.nr_entries); |
2572 | for (i = 0; i < nr; i++) { |
2573 | if (le32_to_cpu(n->header.flags) & INTERNAL_NODE) { |
2574 | - r = walk_node(s, value64(n, i), fn, context); |
2575 | + r = walk_node(info, value64(n, i), fn, context); |
2576 | if (r) |
2577 | goto out; |
2578 | } else { |
2579 | @@ -874,7 +878,7 @@ static int walk_node(struct ro_spine *s, dm_block_t block, |
2580 | } |
2581 | |
2582 | out: |
2583 | - ro_pop(s); |
2584 | + dm_tm_unlock(info->tm, node); |
2585 | return r; |
2586 | } |
2587 | |
2588 | @@ -882,15 +886,7 @@ int dm_btree_walk(struct dm_btree_info *info, dm_block_t root, |
2589 | int (*fn)(void *context, uint64_t *keys, void *leaf), |
2590 | void *context) |
2591 | { |
2592 | - int r; |
2593 | - struct ro_spine spine; |
2594 | - |
2595 | BUG_ON(info->levels > 1); |
2596 | - |
2597 | - init_ro_spine(&spine, info); |
2598 | - r = walk_node(&spine, root, fn, context); |
2599 | - exit_ro_spine(&spine); |
2600 | - |
2601 | - return r; |
2602 | + return walk_node(info, root, fn, context); |
2603 | } |
2604 | EXPORT_SYMBOL_GPL(dm_btree_walk); |
2605 | diff --git a/drivers/media/usb/ttusb-dec/ttusbdecfe.c b/drivers/media/usb/ttusb-dec/ttusbdecfe.c |
2606 | index 5c45c9d0712d..9c29552aedec 100644 |
2607 | --- a/drivers/media/usb/ttusb-dec/ttusbdecfe.c |
2608 | +++ b/drivers/media/usb/ttusb-dec/ttusbdecfe.c |
2609 | @@ -156,6 +156,9 @@ static int ttusbdecfe_dvbs_diseqc_send_master_cmd(struct dvb_frontend* fe, struc |
2610 | 0x00, 0x00, 0x00, 0x00, |
2611 | 0x00, 0x00 }; |
2612 | |
2613 | + if (cmd->msg_len > sizeof(b) - 4) |
2614 | + return -EINVAL; |
2615 | + |
2616 | memcpy(&b[4], cmd->msg, cmd->msg_len); |
2617 | |
2618 | state->config->send_command(fe, 0x72, |
2619 | diff --git a/drivers/mfd/max77693.c b/drivers/mfd/max77693.c |
2620 | index 249c139ef04a..dabb0241813c 100644 |
2621 | --- a/drivers/mfd/max77693.c |
2622 | +++ b/drivers/mfd/max77693.c |
2623 | @@ -237,7 +237,7 @@ static int max77693_i2c_probe(struct i2c_client *i2c, |
2624 | goto err_irq_charger; |
2625 | } |
2626 | |
2627 | - ret = regmap_add_irq_chip(max77693->regmap, max77693->irq, |
2628 | + ret = regmap_add_irq_chip(max77693->regmap_muic, max77693->irq, |
2629 | IRQF_ONESHOT | IRQF_SHARED | |
2630 | IRQF_TRIGGER_FALLING, 0, |
2631 | &max77693_muic_irq_chip, |
2632 | @@ -247,6 +247,17 @@ static int max77693_i2c_probe(struct i2c_client *i2c, |
2633 | goto err_irq_muic; |
2634 | } |
2635 | |
2636 | + /* Unmask interrupts from all blocks in interrupt source register */ |
2637 | + ret = regmap_update_bits(max77693->regmap, |
2638 | + MAX77693_PMIC_REG_INTSRC_MASK, |
2639 | + SRC_IRQ_ALL, (unsigned int)~SRC_IRQ_ALL); |
2640 | + if (ret < 0) { |
2641 | + dev_err(max77693->dev, |
2642 | + "Could not unmask interrupts in INTSRC: %d\n", |
2643 | + ret); |
2644 | + goto err_intsrc; |
2645 | + } |
2646 | + |
2647 | pm_runtime_set_active(max77693->dev); |
2648 | |
2649 | ret = mfd_add_devices(max77693->dev, -1, max77693_devs, |
2650 | @@ -258,6 +269,7 @@ static int max77693_i2c_probe(struct i2c_client *i2c, |
2651 | |
2652 | err_mfd: |
2653 | mfd_remove_devices(max77693->dev); |
2654 | +err_intsrc: |
2655 | regmap_del_irq_chip(max77693->irq, max77693->irq_data_muic); |
2656 | err_irq_muic: |
2657 | regmap_del_irq_chip(max77693->irq, max77693->irq_data_charger); |
2658 | diff --git a/drivers/mfd/twl4030-power.c b/drivers/mfd/twl4030-power.c |
2659 | index 4d3ff3771491..542f1a8247f5 100644 |
2660 | --- a/drivers/mfd/twl4030-power.c |
2661 | +++ b/drivers/mfd/twl4030-power.c |
2662 | @@ -44,6 +44,15 @@ static u8 twl4030_start_script_address = 0x2b; |
2663 | #define PWR_DEVSLP BIT(1) |
2664 | #define PWR_DEVOFF BIT(0) |
2665 | |
2666 | +/* Register bits for CFG_P1_TRANSITION (also for P2 and P3) */ |
2667 | +#define STARTON_SWBUG BIT(7) /* Start on watchdog */ |
2668 | +#define STARTON_VBUS BIT(5) /* Start on VBUS */ |
2669 | +#define STARTON_VBAT BIT(4) /* Start on battery insert */ |
2670 | +#define STARTON_RTC BIT(3) /* Start on RTC */ |
2671 | +#define STARTON_USB BIT(2) /* Start on USB host */ |
2672 | +#define STARTON_CHG BIT(1) /* Start on charger */ |
2673 | +#define STARTON_PWON BIT(0) /* Start on PWRON button */ |
2674 | + |
2675 | #define SEQ_OFFSYNC (1 << 0) |
2676 | |
2677 | #define PHY_TO_OFF_PM_MASTER(p) (p - 0x36) |
2678 | @@ -606,6 +615,44 @@ twl4030_power_configure_resources(const struct twl4030_power_data *pdata) |
2679 | return 0; |
2680 | } |
2681 | |
2682 | +static int twl4030_starton_mask_and_set(u8 bitmask, u8 bitvalues) |
2683 | +{ |
2684 | + u8 regs[3] = { TWL4030_PM_MASTER_CFG_P1_TRANSITION, |
2685 | + TWL4030_PM_MASTER_CFG_P2_TRANSITION, |
2686 | + TWL4030_PM_MASTER_CFG_P3_TRANSITION, }; |
2687 | + u8 val; |
2688 | + int i, err; |
2689 | + |
2690 | + err = twl_i2c_write_u8(TWL_MODULE_PM_MASTER, TWL4030_PM_MASTER_KEY_CFG1, |
2691 | + TWL4030_PM_MASTER_PROTECT_KEY); |
2692 | + if (err) |
2693 | + goto relock; |
2694 | + err = twl_i2c_write_u8(TWL_MODULE_PM_MASTER, |
2695 | + TWL4030_PM_MASTER_KEY_CFG2, |
2696 | + TWL4030_PM_MASTER_PROTECT_KEY); |
2697 | + if (err) |
2698 | + goto relock; |
2699 | + |
2700 | + for (i = 0; i < sizeof(regs); i++) { |
2701 | + err = twl_i2c_read_u8(TWL_MODULE_PM_MASTER, |
2702 | + &val, regs[i]); |
2703 | + if (err) |
2704 | + break; |
2705 | + val = (~bitmask & val) | (bitmask & bitvalues); |
2706 | + err = twl_i2c_write_u8(TWL_MODULE_PM_MASTER, |
2707 | + val, regs[i]); |
2708 | + if (err) |
2709 | + break; |
2710 | + } |
2711 | + |
2712 | + if (err) |
2713 | + pr_err("TWL4030 Register access failed: %i\n", err); |
2714 | + |
2715 | +relock: |
2716 | + return twl_i2c_write_u8(TWL_MODULE_PM_MASTER, 0, |
2717 | + TWL4030_PM_MASTER_PROTECT_KEY); |
2718 | +} |
2719 | + |
2720 | /* |
2721 | * In master mode, start the power off sequence. |
2722 | * After a successful execution, TWL shuts down the power to the SoC |
2723 | @@ -615,6 +662,11 @@ void twl4030_power_off(void) |
2724 | { |
2725 | int err; |
2726 | |
2727 | + /* Disable start on charger or VBUS as it can break poweroff */ |
2728 | + err = twl4030_starton_mask_and_set(STARTON_VBUS | STARTON_CHG, 0); |
2729 | + if (err) |
2730 | + pr_err("TWL4030 Unable to configure start-up\n"); |
2731 | + |
2732 | err = twl_i2c_write_u8(TWL_MODULE_PM_MASTER, PWR_DEVOFF, |
2733 | TWL4030_PM_MASTER_P1_SW_EVENTS); |
2734 | if (err) |
2735 | diff --git a/drivers/net/ethernet/broadcom/bcmsysport.c b/drivers/net/ethernet/broadcom/bcmsysport.c |
2736 | index 71068d7d930d..ce60b960cd6c 100644 |
2737 | --- a/drivers/net/ethernet/broadcom/bcmsysport.c |
2738 | +++ b/drivers/net/ethernet/broadcom/bcmsysport.c |
2739 | @@ -1384,6 +1384,9 @@ static void bcm_sysport_netif_start(struct net_device *dev) |
2740 | /* Enable NAPI */ |
2741 | napi_enable(&priv->napi); |
2742 | |
2743 | + /* Enable RX interrupt and TX ring full interrupt */ |
2744 | + intrl2_0_mask_clear(priv, INTRL2_0_RDMA_MBDONE | INTRL2_0_TX_RING_FULL); |
2745 | + |
2746 | phy_start(priv->phydev); |
2747 | |
2748 | /* Enable TX interrupts for the 32 TXQs */ |
2749 | @@ -1486,9 +1489,6 @@ static int bcm_sysport_open(struct net_device *dev) |
2750 | if (ret) |
2751 | goto out_free_rx_ring; |
2752 | |
2753 | - /* Enable RX interrupt and TX ring full interrupt */ |
2754 | - intrl2_0_mask_clear(priv, INTRL2_0_RDMA_MBDONE | INTRL2_0_TX_RING_FULL); |
2755 | - |
2756 | /* Turn on TDMA */ |
2757 | ret = tdma_enable_set(priv, 1); |
2758 | if (ret) |
2759 | @@ -1845,6 +1845,8 @@ static int bcm_sysport_resume(struct device *d) |
2760 | if (!netif_running(dev)) |
2761 | return 0; |
2762 | |
2763 | + umac_reset(priv); |
2764 | + |
2765 | /* We may have been suspended and never received a WOL event that |
2766 | * would turn off MPD detection, take care of that now |
2767 | */ |
2768 | @@ -1872,9 +1874,6 @@ static int bcm_sysport_resume(struct device *d) |
2769 | |
2770 | netif_device_attach(dev); |
2771 | |
2772 | - /* Enable RX interrupt and TX ring full interrupt */ |
2773 | - intrl2_0_mask_clear(priv, INTRL2_0_RDMA_MBDONE | INTRL2_0_TX_RING_FULL); |
2774 | - |
2775 | /* RX pipe enable */ |
2776 | topctrl_writel(priv, 0, RX_FLUSH_CNTL); |
2777 | |
2778 | diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_dcb.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_dcb.c |
2779 | index 8edf0f5bd679..d4cc9b0c07f5 100644 |
2780 | --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_dcb.c |
2781 | +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_dcb.c |
2782 | @@ -80,7 +80,6 @@ void cxgb4_dcb_state_fsm(struct net_device *dev, |
2783 | /* we're going to use Host DCB */ |
2784 | dcb->state = CXGB4_DCB_STATE_HOST; |
2785 | dcb->supported = CXGB4_DCBX_HOST_SUPPORT; |
2786 | - dcb->enabled = 1; |
2787 | break; |
2788 | } |
2789 | |
2790 | @@ -349,6 +348,12 @@ static u8 cxgb4_setstate(struct net_device *dev, u8 enabled) |
2791 | { |
2792 | struct port_info *pi = netdev2pinfo(dev); |
2793 | |
2794 | + /* If DCBx is host-managed, dcb is enabled by outside lldp agents */ |
2795 | + if (pi->dcb.state == CXGB4_DCB_STATE_HOST) { |
2796 | + pi->dcb.enabled = enabled; |
2797 | + return 0; |
2798 | + } |
2799 | + |
2800 | /* Firmware doesn't provide any mechanism to control the DCB state. |
2801 | */ |
2802 | if (enabled != (pi->dcb.state == CXGB4_DCB_STATE_FW_ALLSYNCED)) |
2803 | diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c |
2804 | index fac3821cef87..9f5f3c313993 100644 |
2805 | --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c |
2806 | +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c |
2807 | @@ -688,7 +688,11 @@ int cxgb4_dcb_enabled(const struct net_device *dev) |
2808 | #ifdef CONFIG_CHELSIO_T4_DCB |
2809 | struct port_info *pi = netdev_priv(dev); |
2810 | |
2811 | - return pi->dcb.state == CXGB4_DCB_STATE_FW_ALLSYNCED; |
2812 | + if (!pi->dcb.enabled) |
2813 | + return 0; |
2814 | + |
2815 | + return ((pi->dcb.state == CXGB4_DCB_STATE_FW_ALLSYNCED) || |
2816 | + (pi->dcb.state == CXGB4_DCB_STATE_HOST)); |
2817 | #else |
2818 | return 0; |
2819 | #endif |
2820 | diff --git a/drivers/net/ethernet/marvell/mv643xx_eth.c b/drivers/net/ethernet/marvell/mv643xx_eth.c |
2821 | index b151a949f352..d44560d1d268 100644 |
2822 | --- a/drivers/net/ethernet/marvell/mv643xx_eth.c |
2823 | +++ b/drivers/net/ethernet/marvell/mv643xx_eth.c |
2824 | @@ -1047,7 +1047,6 @@ static int txq_reclaim(struct tx_queue *txq, int budget, int force) |
2825 | int tx_index; |
2826 | struct tx_desc *desc; |
2827 | u32 cmd_sts; |
2828 | - struct sk_buff *skb; |
2829 | |
2830 | tx_index = txq->tx_used_desc; |
2831 | desc = &txq->tx_desc_area[tx_index]; |
2832 | @@ -1066,19 +1065,22 @@ static int txq_reclaim(struct tx_queue *txq, int budget, int force) |
2833 | reclaimed++; |
2834 | txq->tx_desc_count--; |
2835 | |
2836 | - skb = NULL; |
2837 | - if (cmd_sts & TX_LAST_DESC) |
2838 | - skb = __skb_dequeue(&txq->tx_skb); |
2839 | + if (!IS_TSO_HEADER(txq, desc->buf_ptr)) |
2840 | + dma_unmap_single(mp->dev->dev.parent, desc->buf_ptr, |
2841 | + desc->byte_cnt, DMA_TO_DEVICE); |
2842 | + |
2843 | + if (cmd_sts & TX_ENABLE_INTERRUPT) { |
2844 | + struct sk_buff *skb = __skb_dequeue(&txq->tx_skb); |
2845 | + |
2846 | + if (!WARN_ON(!skb)) |
2847 | + dev_kfree_skb(skb); |
2848 | + } |
2849 | |
2850 | if (cmd_sts & ERROR_SUMMARY) { |
2851 | netdev_info(mp->dev, "tx error\n"); |
2852 | mp->dev->stats.tx_errors++; |
2853 | } |
2854 | |
2855 | - if (!IS_TSO_HEADER(txq, desc->buf_ptr)) |
2856 | - dma_unmap_single(mp->dev->dev.parent, desc->buf_ptr, |
2857 | - desc->byte_cnt, DMA_TO_DEVICE); |
2858 | - dev_kfree_skb(skb); |
2859 | } |
2860 | |
2861 | __netif_tx_unlock_bh(nq); |
2862 | diff --git a/drivers/net/ethernet/smsc/smsc911x.c b/drivers/net/ethernet/smsc/smsc911x.c |
2863 | index 5e13fa5524ae..a4400035681f 100644 |
2864 | --- a/drivers/net/ethernet/smsc/smsc911x.c |
2865 | +++ b/drivers/net/ethernet/smsc/smsc911x.c |
2866 | @@ -1342,6 +1342,42 @@ static void smsc911x_rx_multicast_update_workaround(struct smsc911x_data *pdata) |
2867 | spin_unlock(&pdata->mac_lock); |
2868 | } |
2869 | |
2870 | +static int smsc911x_phy_general_power_up(struct smsc911x_data *pdata) |
2871 | +{ |
2872 | + int rc = 0; |
2873 | + |
2874 | + if (!pdata->phy_dev) |
2875 | + return rc; |
2876 | + |
2877 | + /* If the internal PHY is in General Power-Down mode, all, except the |
2878 | + * management interface, is powered-down and stays in that condition as |
2879 | + * long as Phy register bit 0.11 is HIGH. |
2880 | + * |
2881 | + * In that case, clear the bit 0.11, so the PHY powers up and we can |
2882 | + * access to the phy registers. |
2883 | + */ |
2884 | + rc = phy_read(pdata->phy_dev, MII_BMCR); |
2885 | + if (rc < 0) { |
2886 | + SMSC_WARN(pdata, drv, "Failed reading PHY control reg"); |
2887 | + return rc; |
2888 | + } |
2889 | + |
2890 | + /* If the PHY general power-down bit is not set is not necessary to |
2891 | + * disable the general power down-mode. |
2892 | + */ |
2893 | + if (rc & BMCR_PDOWN) { |
2894 | + rc = phy_write(pdata->phy_dev, MII_BMCR, rc & ~BMCR_PDOWN); |
2895 | + if (rc < 0) { |
2896 | + SMSC_WARN(pdata, drv, "Failed writing PHY control reg"); |
2897 | + return rc; |
2898 | + } |
2899 | + |
2900 | + usleep_range(1000, 1500); |
2901 | + } |
2902 | + |
2903 | + return 0; |
2904 | +} |
2905 | + |
2906 | static int smsc911x_phy_disable_energy_detect(struct smsc911x_data *pdata) |
2907 | { |
2908 | int rc = 0; |
2909 | @@ -1415,6 +1451,16 @@ static int smsc911x_soft_reset(struct smsc911x_data *pdata) |
2910 | int ret; |
2911 | |
2912 | /* |
2913 | + * Make sure to power-up the PHY chip before doing a reset, otherwise |
2914 | + * the reset fails. |
2915 | + */ |
2916 | + ret = smsc911x_phy_general_power_up(pdata); |
2917 | + if (ret) { |
2918 | + SMSC_WARN(pdata, drv, "Failed to power-up the PHY chip"); |
2919 | + return ret; |
2920 | + } |
2921 | + |
2922 | + /* |
2923 | * LAN9210/LAN9211/LAN9220/LAN9221 chips have an internal PHY that |
2924 | * are initialized in a Energy Detect Power-Down mode that prevents |
2925 | * the MAC chip to be software reseted. So we have to wakeup the PHY |
2926 | diff --git a/drivers/net/ethernet/sun/sunvnet.c b/drivers/net/ethernet/sun/sunvnet.c |
2927 | index f67539650c38..993779a77b34 100644 |
2928 | --- a/drivers/net/ethernet/sun/sunvnet.c |
2929 | +++ b/drivers/net/ethernet/sun/sunvnet.c |
2930 | @@ -693,7 +693,7 @@ static int vnet_start_xmit(struct sk_buff *skb, struct net_device *dev) |
2931 | spin_lock_irqsave(&port->vio.lock, flags); |
2932 | |
2933 | dr = &port->vio.drings[VIO_DRIVER_TX_RING]; |
2934 | - if (unlikely(vnet_tx_dring_avail(dr) < 2)) { |
2935 | + if (unlikely(vnet_tx_dring_avail(dr) < 1)) { |
2936 | if (!netif_queue_stopped(dev)) { |
2937 | netif_stop_queue(dev); |
2938 | |
2939 | @@ -749,7 +749,7 @@ static int vnet_start_xmit(struct sk_buff *skb, struct net_device *dev) |
2940 | dev->stats.tx_bytes += skb->len; |
2941 | |
2942 | dr->prod = (dr->prod + 1) & (VNET_TX_RING_SIZE - 1); |
2943 | - if (unlikely(vnet_tx_dring_avail(dr) < 2)) { |
2944 | + if (unlikely(vnet_tx_dring_avail(dr) < 1)) { |
2945 | netif_stop_queue(dev); |
2946 | if (vnet_tx_dring_avail(dr) > VNET_TX_WAKEUP_THRESH(dr)) |
2947 | netif_wake_queue(dev); |
2948 | diff --git a/drivers/net/ethernet/ti/cpts.c b/drivers/net/ethernet/ti/cpts.c |
2949 | index ab92f67da035..4a4388b813ac 100644 |
2950 | --- a/drivers/net/ethernet/ti/cpts.c |
2951 | +++ b/drivers/net/ethernet/ti/cpts.c |
2952 | @@ -264,7 +264,7 @@ static int cpts_match(struct sk_buff *skb, unsigned int ptp_class, |
2953 | |
2954 | switch (ptp_class & PTP_CLASS_PMASK) { |
2955 | case PTP_CLASS_IPV4: |
2956 | - offset += ETH_HLEN + IPV4_HLEN(data) + UDP_HLEN; |
2957 | + offset += ETH_HLEN + IPV4_HLEN(data + offset) + UDP_HLEN; |
2958 | break; |
2959 | case PTP_CLASS_IPV6: |
2960 | offset += ETH_HLEN + IP6_HLEN + UDP_HLEN; |
2961 | diff --git a/drivers/net/macvtap.c b/drivers/net/macvtap.c |
2962 | index 9b5481c70b4c..07c942b6ae01 100644 |
2963 | --- a/drivers/net/macvtap.c |
2964 | +++ b/drivers/net/macvtap.c |
2965 | @@ -629,6 +629,8 @@ static void macvtap_skb_to_vnet_hdr(const struct sk_buff *skb, |
2966 | if (skb->ip_summed == CHECKSUM_PARTIAL) { |
2967 | vnet_hdr->flags = VIRTIO_NET_HDR_F_NEEDS_CSUM; |
2968 | vnet_hdr->csum_start = skb_checksum_start_offset(skb); |
2969 | + if (vlan_tx_tag_present(skb)) |
2970 | + vnet_hdr->csum_start += VLAN_HLEN; |
2971 | vnet_hdr->csum_offset = skb->csum_offset; |
2972 | } else if (skb->ip_summed == CHECKSUM_UNNECESSARY) { |
2973 | vnet_hdr->flags = VIRTIO_NET_HDR_F_DATA_VALID; |
2974 | diff --git a/drivers/net/phy/dp83640.c b/drivers/net/phy/dp83640.c |
2975 | index c301e4cb37ca..2fa0c3d50692 100644 |
2976 | --- a/drivers/net/phy/dp83640.c |
2977 | +++ b/drivers/net/phy/dp83640.c |
2978 | @@ -784,7 +784,7 @@ static int match(struct sk_buff *skb, unsigned int type, struct rxts *rxts) |
2979 | |
2980 | switch (type & PTP_CLASS_PMASK) { |
2981 | case PTP_CLASS_IPV4: |
2982 | - offset += ETH_HLEN + IPV4_HLEN(data) + UDP_HLEN; |
2983 | + offset += ETH_HLEN + IPV4_HLEN(data + offset) + UDP_HLEN; |
2984 | break; |
2985 | case PTP_CLASS_IPV6: |
2986 | offset += ETH_HLEN + IP6_HLEN + UDP_HLEN; |
2987 | @@ -927,7 +927,7 @@ static int is_sync(struct sk_buff *skb, int type) |
2988 | |
2989 | switch (type & PTP_CLASS_PMASK) { |
2990 | case PTP_CLASS_IPV4: |
2991 | - offset += ETH_HLEN + IPV4_HLEN(data) + UDP_HLEN; |
2992 | + offset += ETH_HLEN + IPV4_HLEN(data + offset) + UDP_HLEN; |
2993 | break; |
2994 | case PTP_CLASS_IPV6: |
2995 | offset += ETH_HLEN + IP6_HLEN + UDP_HLEN; |
2996 | diff --git a/drivers/net/ppp/ppp_generic.c b/drivers/net/ppp/ppp_generic.c |
2997 | index 90c639b0f18d..17ecdd60cf6c 100644 |
2998 | --- a/drivers/net/ppp/ppp_generic.c |
2999 | +++ b/drivers/net/ppp/ppp_generic.c |
3000 | @@ -755,23 +755,23 @@ static long ppp_ioctl(struct file *file, unsigned int cmd, unsigned long arg) |
3001 | |
3002 | err = get_filter(argp, &code); |
3003 | if (err >= 0) { |
3004 | + struct bpf_prog *pass_filter = NULL; |
3005 | struct sock_fprog_kern fprog = { |
3006 | .len = err, |
3007 | .filter = code, |
3008 | }; |
3009 | |
3010 | - ppp_lock(ppp); |
3011 | - if (ppp->pass_filter) { |
3012 | - bpf_prog_destroy(ppp->pass_filter); |
3013 | - ppp->pass_filter = NULL; |
3014 | + err = 0; |
3015 | + if (fprog.filter) |
3016 | + err = bpf_prog_create(&pass_filter, &fprog); |
3017 | + if (!err) { |
3018 | + ppp_lock(ppp); |
3019 | + if (ppp->pass_filter) |
3020 | + bpf_prog_destroy(ppp->pass_filter); |
3021 | + ppp->pass_filter = pass_filter; |
3022 | + ppp_unlock(ppp); |
3023 | } |
3024 | - if (fprog.filter != NULL) |
3025 | - err = bpf_prog_create(&ppp->pass_filter, |
3026 | - &fprog); |
3027 | - else |
3028 | - err = 0; |
3029 | kfree(code); |
3030 | - ppp_unlock(ppp); |
3031 | } |
3032 | break; |
3033 | } |
3034 | @@ -781,23 +781,23 @@ static long ppp_ioctl(struct file *file, unsigned int cmd, unsigned long arg) |
3035 | |
3036 | err = get_filter(argp, &code); |
3037 | if (err >= 0) { |
3038 | + struct bpf_prog *active_filter = NULL; |
3039 | struct sock_fprog_kern fprog = { |
3040 | .len = err, |
3041 | .filter = code, |
3042 | }; |
3043 | |
3044 | - ppp_lock(ppp); |
3045 | - if (ppp->active_filter) { |
3046 | - bpf_prog_destroy(ppp->active_filter); |
3047 | - ppp->active_filter = NULL; |
3048 | + err = 0; |
3049 | + if (fprog.filter) |
3050 | + err = bpf_prog_create(&active_filter, &fprog); |
3051 | + if (!err) { |
3052 | + ppp_lock(ppp); |
3053 | + if (ppp->active_filter) |
3054 | + bpf_prog_destroy(ppp->active_filter); |
3055 | + ppp->active_filter = active_filter; |
3056 | + ppp_unlock(ppp); |
3057 | } |
3058 | - if (fprog.filter != NULL) |
3059 | - err = bpf_prog_create(&ppp->active_filter, |
3060 | - &fprog); |
3061 | - else |
3062 | - err = 0; |
3063 | kfree(code); |
3064 | - ppp_unlock(ppp); |
3065 | } |
3066 | break; |
3067 | } |
3068 | diff --git a/drivers/net/tun.c b/drivers/net/tun.c |
3069 | index 610d1662c500..d965e8a7a675 100644 |
3070 | --- a/drivers/net/tun.c |
3071 | +++ b/drivers/net/tun.c |
3072 | @@ -1225,6 +1225,10 @@ static ssize_t tun_put_user(struct tun_struct *tun, |
3073 | struct tun_pi pi = { 0, skb->protocol }; |
3074 | ssize_t total = 0; |
3075 | int vlan_offset = 0, copied; |
3076 | + int vlan_hlen = 0; |
3077 | + |
3078 | + if (vlan_tx_tag_present(skb)) |
3079 | + vlan_hlen = VLAN_HLEN; |
3080 | |
3081 | if (!(tun->flags & TUN_NO_PI)) { |
3082 | if ((len -= sizeof(pi)) < 0) |
3083 | @@ -1276,7 +1280,8 @@ static ssize_t tun_put_user(struct tun_struct *tun, |
3084 | |
3085 | if (skb->ip_summed == CHECKSUM_PARTIAL) { |
3086 | gso.flags = VIRTIO_NET_HDR_F_NEEDS_CSUM; |
3087 | - gso.csum_start = skb_checksum_start_offset(skb); |
3088 | + gso.csum_start = skb_checksum_start_offset(skb) + |
3089 | + vlan_hlen; |
3090 | gso.csum_offset = skb->csum_offset; |
3091 | } else if (skb->ip_summed == CHECKSUM_UNNECESSARY) { |
3092 | gso.flags = VIRTIO_NET_HDR_F_DATA_VALID; |
3093 | @@ -1289,10 +1294,9 @@ static ssize_t tun_put_user(struct tun_struct *tun, |
3094 | } |
3095 | |
3096 | copied = total; |
3097 | - total += skb->len; |
3098 | - if (!vlan_tx_tag_present(skb)) { |
3099 | - len = min_t(int, skb->len, len); |
3100 | - } else { |
3101 | + len = min_t(int, skb->len + vlan_hlen, len); |
3102 | + total += skb->len + vlan_hlen; |
3103 | + if (vlan_hlen) { |
3104 | int copy, ret; |
3105 | struct { |
3106 | __be16 h_vlan_proto; |
3107 | @@ -1303,8 +1307,6 @@ static ssize_t tun_put_user(struct tun_struct *tun, |
3108 | veth.h_vlan_TCI = htons(vlan_tx_tag_get(skb)); |
3109 | |
3110 | vlan_offset = offsetof(struct vlan_ethhdr, h_vlan_proto); |
3111 | - len = min_t(int, skb->len + VLAN_HLEN, len); |
3112 | - total += VLAN_HLEN; |
3113 | |
3114 | copy = min_t(int, vlan_offset, len); |
3115 | ret = skb_copy_datagram_const_iovec(skb, 0, iv, copied, copy); |
3116 | diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c |
3117 | index b4831274b0ab..81a8a296a582 100644 |
3118 | --- a/drivers/net/vxlan.c |
3119 | +++ b/drivers/net/vxlan.c |
3120 | @@ -274,13 +274,15 @@ static inline struct vxlan_rdst *first_remote_rtnl(struct vxlan_fdb *fdb) |
3121 | return list_first_entry(&fdb->remotes, struct vxlan_rdst, list); |
3122 | } |
3123 | |
3124 | -/* Find VXLAN socket based on network namespace and UDP port */ |
3125 | -static struct vxlan_sock *vxlan_find_sock(struct net *net, __be16 port) |
3126 | +/* Find VXLAN socket based on network namespace, address family and UDP port */ |
3127 | +static struct vxlan_sock *vxlan_find_sock(struct net *net, |
3128 | + sa_family_t family, __be16 port) |
3129 | { |
3130 | struct vxlan_sock *vs; |
3131 | |
3132 | hlist_for_each_entry_rcu(vs, vs_head(net, port), hlist) { |
3133 | - if (inet_sk(vs->sock->sk)->inet_sport == port) |
3134 | + if (inet_sk(vs->sock->sk)->inet_sport == port && |
3135 | + inet_sk(vs->sock->sk)->sk.sk_family == family) |
3136 | return vs; |
3137 | } |
3138 | return NULL; |
3139 | @@ -299,11 +301,12 @@ static struct vxlan_dev *vxlan_vs_find_vni(struct vxlan_sock *vs, u32 id) |
3140 | } |
3141 | |
3142 | /* Look up VNI in a per net namespace table */ |
3143 | -static struct vxlan_dev *vxlan_find_vni(struct net *net, u32 id, __be16 port) |
3144 | +static struct vxlan_dev *vxlan_find_vni(struct net *net, u32 id, |
3145 | + sa_family_t family, __be16 port) |
3146 | { |
3147 | struct vxlan_sock *vs; |
3148 | |
3149 | - vs = vxlan_find_sock(net, port); |
3150 | + vs = vxlan_find_sock(net, family, port); |
3151 | if (!vs) |
3152 | return NULL; |
3153 | |
3154 | @@ -620,6 +623,8 @@ static int vxlan_gro_complete(struct sk_buff *skb, int nhoff) |
3155 | int vxlan_len = sizeof(struct vxlanhdr) + sizeof(struct ethhdr); |
3156 | int err = -ENOSYS; |
3157 | |
3158 | + udp_tunnel_gro_complete(skb, nhoff); |
3159 | + |
3160 | eh = (struct ethhdr *)(skb->data + nhoff + sizeof(struct vxlanhdr)); |
3161 | type = eh->h_proto; |
3162 | |
3163 | @@ -1820,7 +1825,8 @@ static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev, |
3164 | struct vxlan_dev *dst_vxlan; |
3165 | |
3166 | ip_rt_put(rt); |
3167 | - dst_vxlan = vxlan_find_vni(vxlan->net, vni, dst_port); |
3168 | + dst_vxlan = vxlan_find_vni(vxlan->net, vni, |
3169 | + dst->sa.sa_family, dst_port); |
3170 | if (!dst_vxlan) |
3171 | goto tx_error; |
3172 | vxlan_encap_bypass(skb, vxlan, dst_vxlan); |
3173 | @@ -1874,7 +1880,8 @@ static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev, |
3174 | struct vxlan_dev *dst_vxlan; |
3175 | |
3176 | dst_release(ndst); |
3177 | - dst_vxlan = vxlan_find_vni(vxlan->net, vni, dst_port); |
3178 | + dst_vxlan = vxlan_find_vni(vxlan->net, vni, |
3179 | + dst->sa.sa_family, dst_port); |
3180 | if (!dst_vxlan) |
3181 | goto tx_error; |
3182 | vxlan_encap_bypass(skb, vxlan, dst_vxlan); |
3183 | @@ -2034,13 +2041,15 @@ static int vxlan_init(struct net_device *dev) |
3184 | struct vxlan_dev *vxlan = netdev_priv(dev); |
3185 | struct vxlan_net *vn = net_generic(vxlan->net, vxlan_net_id); |
3186 | struct vxlan_sock *vs; |
3187 | + bool ipv6 = vxlan->flags & VXLAN_F_IPV6; |
3188 | |
3189 | dev->tstats = netdev_alloc_pcpu_stats(struct pcpu_sw_netstats); |
3190 | if (!dev->tstats) |
3191 | return -ENOMEM; |
3192 | |
3193 | spin_lock(&vn->sock_lock); |
3194 | - vs = vxlan_find_sock(vxlan->net, vxlan->dst_port); |
3195 | + vs = vxlan_find_sock(vxlan->net, ipv6 ? AF_INET6 : AF_INET, |
3196 | + vxlan->dst_port); |
3197 | if (vs) { |
3198 | /* If we have a socket with same port already, reuse it */ |
3199 | atomic_inc(&vs->refcnt); |
3200 | @@ -2439,6 +2448,7 @@ struct vxlan_sock *vxlan_sock_add(struct net *net, __be16 port, |
3201 | { |
3202 | struct vxlan_net *vn = net_generic(net, vxlan_net_id); |
3203 | struct vxlan_sock *vs; |
3204 | + bool ipv6 = flags & VXLAN_F_IPV6; |
3205 | |
3206 | vs = vxlan_socket_create(net, port, rcv, data, flags); |
3207 | if (!IS_ERR(vs)) |
3208 | @@ -2448,7 +2458,7 @@ struct vxlan_sock *vxlan_sock_add(struct net *net, __be16 port, |
3209 | return vs; |
3210 | |
3211 | spin_lock(&vn->sock_lock); |
3212 | - vs = vxlan_find_sock(net, port); |
3213 | + vs = vxlan_find_sock(net, ipv6 ? AF_INET6 : AF_INET, port); |
3214 | if (vs) { |
3215 | if (vs->rcv == rcv) |
3216 | atomic_inc(&vs->refcnt); |
3217 | @@ -2607,7 +2617,8 @@ static int vxlan_newlink(struct net *net, struct net_device *dev, |
3218 | nla_get_u8(data[IFLA_VXLAN_UDP_ZERO_CSUM6_RX])) |
3219 | vxlan->flags |= VXLAN_F_UDP_ZERO_CSUM6_RX; |
3220 | |
3221 | - if (vxlan_find_vni(net, vni, vxlan->dst_port)) { |
3222 | + if (vxlan_find_vni(net, vni, use_ipv6 ? AF_INET6 : AF_INET, |
3223 | + vxlan->dst_port)) { |
3224 | pr_info("duplicate VNI %u\n", vni); |
3225 | return -EEXIST; |
3226 | } |
3227 | diff --git a/drivers/net/wireless/iwlwifi/mvm/fw.c b/drivers/net/wireless/iwlwifi/mvm/fw.c |
3228 | index bf720a875e6b..c77e6bcc63d5 100644 |
3229 | --- a/drivers/net/wireless/iwlwifi/mvm/fw.c |
3230 | +++ b/drivers/net/wireless/iwlwifi/mvm/fw.c |
3231 | @@ -282,7 +282,7 @@ int iwl_run_init_mvm_ucode(struct iwl_mvm *mvm, bool read_nvm) |
3232 | |
3233 | lockdep_assert_held(&mvm->mutex); |
3234 | |
3235 | - if (WARN_ON_ONCE(mvm->init_ucode_complete)) |
3236 | + if (WARN_ON_ONCE(mvm->init_ucode_complete || mvm->calibrating)) |
3237 | return 0; |
3238 | |
3239 | iwl_init_notification_wait(&mvm->notif_wait, |
3240 | @@ -332,6 +332,8 @@ int iwl_run_init_mvm_ucode(struct iwl_mvm *mvm, bool read_nvm) |
3241 | goto out; |
3242 | } |
3243 | |
3244 | + mvm->calibrating = true; |
3245 | + |
3246 | /* Send TX valid antennas before triggering calibrations */ |
3247 | ret = iwl_send_tx_ant_cfg(mvm, mvm->fw->valid_tx_ant); |
3248 | if (ret) |
3249 | @@ -356,11 +358,17 @@ int iwl_run_init_mvm_ucode(struct iwl_mvm *mvm, bool read_nvm) |
3250 | MVM_UCODE_CALIB_TIMEOUT); |
3251 | if (!ret) |
3252 | mvm->init_ucode_complete = true; |
3253 | + |
3254 | + if (ret && iwl_mvm_is_radio_killed(mvm)) { |
3255 | + IWL_DEBUG_RF_KILL(mvm, "RFKILL while calibrating.\n"); |
3256 | + ret = 1; |
3257 | + } |
3258 | goto out; |
3259 | |
3260 | error: |
3261 | iwl_remove_notification(&mvm->notif_wait, &calib_wait); |
3262 | out: |
3263 | + mvm->calibrating = false; |
3264 | if (iwlmvm_mod_params.init_dbg && !mvm->nvm_data) { |
3265 | /* we want to debug INIT and we have no NVM - fake */ |
3266 | mvm->nvm_data = kzalloc(sizeof(struct iwl_nvm_data) + |
3267 | diff --git a/drivers/net/wireless/iwlwifi/mvm/mac80211.c b/drivers/net/wireless/iwlwifi/mvm/mac80211.c |
3268 | index cdc272d776e7..26de13bb78a8 100644 |
3269 | --- a/drivers/net/wireless/iwlwifi/mvm/mac80211.c |
3270 | +++ b/drivers/net/wireless/iwlwifi/mvm/mac80211.c |
3271 | @@ -778,6 +778,7 @@ static void iwl_mvm_restart_cleanup(struct iwl_mvm *mvm) |
3272 | iwl_trans_stop_device(mvm->trans); |
3273 | |
3274 | mvm->scan_status = IWL_MVM_SCAN_NONE; |
3275 | + mvm->calibrating = false; |
3276 | |
3277 | /* just in case one was running */ |
3278 | ieee80211_remain_on_channel_expired(mvm->hw); |
3279 | diff --git a/drivers/net/wireless/iwlwifi/mvm/mvm.h b/drivers/net/wireless/iwlwifi/mvm/mvm.h |
3280 | index 2e73d3bd7757..c35f5557307e 100644 |
3281 | --- a/drivers/net/wireless/iwlwifi/mvm/mvm.h |
3282 | +++ b/drivers/net/wireless/iwlwifi/mvm/mvm.h |
3283 | @@ -541,6 +541,7 @@ struct iwl_mvm { |
3284 | enum iwl_ucode_type cur_ucode; |
3285 | bool ucode_loaded; |
3286 | bool init_ucode_complete; |
3287 | + bool calibrating; |
3288 | u32 error_event_table; |
3289 | u32 log_event_table; |
3290 | u32 umac_error_event_table; |
3291 | diff --git a/drivers/net/wireless/iwlwifi/mvm/ops.c b/drivers/net/wireless/iwlwifi/mvm/ops.c |
3292 | index d31a1178ae35..f9471ee9e260 100644 |
3293 | --- a/drivers/net/wireless/iwlwifi/mvm/ops.c |
3294 | +++ b/drivers/net/wireless/iwlwifi/mvm/ops.c |
3295 | @@ -745,6 +745,7 @@ void iwl_mvm_set_hw_ctkill_state(struct iwl_mvm *mvm, bool state) |
3296 | static bool iwl_mvm_set_hw_rfkill_state(struct iwl_op_mode *op_mode, bool state) |
3297 | { |
3298 | struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode); |
3299 | + bool calibrating = ACCESS_ONCE(mvm->calibrating); |
3300 | |
3301 | if (state) |
3302 | set_bit(IWL_MVM_STATUS_HW_RFKILL, &mvm->status); |
3303 | @@ -753,7 +754,15 @@ static bool iwl_mvm_set_hw_rfkill_state(struct iwl_op_mode *op_mode, bool state) |
3304 | |
3305 | wiphy_rfkill_set_hw_state(mvm->hw->wiphy, iwl_mvm_is_radio_killed(mvm)); |
3306 | |
3307 | - return state && mvm->cur_ucode != IWL_UCODE_INIT; |
3308 | + /* iwl_run_init_mvm_ucode is waiting for results, abort it */ |
3309 | + if (calibrating) |
3310 | + iwl_abort_notification_waits(&mvm->notif_wait); |
3311 | + |
3312 | + /* |
3313 | + * Stop the device if we run OPERATIONAL firmware or if we are in the |
3314 | + * middle of the calibrations. |
3315 | + */ |
3316 | + return state && (mvm->cur_ucode != IWL_UCODE_INIT || calibrating); |
3317 | } |
3318 | |
3319 | static void iwl_mvm_free_skb(struct iwl_op_mode *op_mode, struct sk_buff *skb) |
3320 | diff --git a/drivers/net/wireless/iwlwifi/pcie/trans.c b/drivers/net/wireless/iwlwifi/pcie/trans.c |
3321 | index d7231a82fe42..6c02467a37c8 100644 |
3322 | --- a/drivers/net/wireless/iwlwifi/pcie/trans.c |
3323 | +++ b/drivers/net/wireless/iwlwifi/pcie/trans.c |
3324 | @@ -913,7 +913,8 @@ static void iwl_trans_pcie_stop_device(struct iwl_trans *trans) |
3325 | * restart. So don't process again if the device is |
3326 | * already dead. |
3327 | */ |
3328 | - if (test_bit(STATUS_DEVICE_ENABLED, &trans->status)) { |
3329 | + if (test_and_clear_bit(STATUS_DEVICE_ENABLED, &trans->status)) { |
3330 | + IWL_DEBUG_INFO(trans, "DEVICE_ENABLED bit was set and is now cleared\n"); |
3331 | iwl_pcie_tx_stop(trans); |
3332 | iwl_pcie_rx_stop(trans); |
3333 | |
3334 | @@ -943,7 +944,6 @@ static void iwl_trans_pcie_stop_device(struct iwl_trans *trans) |
3335 | /* clear all status bits */ |
3336 | clear_bit(STATUS_SYNC_HCMD_ACTIVE, &trans->status); |
3337 | clear_bit(STATUS_INT_ENABLED, &trans->status); |
3338 | - clear_bit(STATUS_DEVICE_ENABLED, &trans->status); |
3339 | clear_bit(STATUS_TPOWER_PMI, &trans->status); |
3340 | clear_bit(STATUS_RFKILL, &trans->status); |
3341 | |
3342 | diff --git a/drivers/net/wireless/mac80211_hwsim.c b/drivers/net/wireless/mac80211_hwsim.c |
3343 | index 1326f6121835..6b48c865dd7a 100644 |
3344 | --- a/drivers/net/wireless/mac80211_hwsim.c |
3345 | +++ b/drivers/net/wireless/mac80211_hwsim.c |
3346 | @@ -1987,7 +1987,7 @@ static int mac80211_hwsim_create_radio(int channels, const char *reg_alpha2, |
3347 | if (err != 0) { |
3348 | printk(KERN_DEBUG "mac80211_hwsim: device_bind_driver failed (%d)\n", |
3349 | err); |
3350 | - goto failed_hw; |
3351 | + goto failed_bind; |
3352 | } |
3353 | |
3354 | skb_queue_head_init(&data->pending); |
3355 | @@ -2183,6 +2183,8 @@ static int mac80211_hwsim_create_radio(int channels, const char *reg_alpha2, |
3356 | return idx; |
3357 | |
3358 | failed_hw: |
3359 | + device_release_driver(data->dev); |
3360 | +failed_bind: |
3361 | device_unregister(data->dev); |
3362 | failed_drvdata: |
3363 | ieee80211_free_hw(hw); |
3364 | diff --git a/drivers/platform/x86/asus-nb-wmi.c b/drivers/platform/x86/asus-nb-wmi.c |
3365 | index 3a4951f46065..c1a6cd66af42 100644 |
3366 | --- a/drivers/platform/x86/asus-nb-wmi.c |
3367 | +++ b/drivers/platform/x86/asus-nb-wmi.c |
3368 | @@ -182,6 +182,15 @@ static const struct dmi_system_id asus_quirks[] = { |
3369 | }, |
3370 | { |
3371 | .callback = dmi_matched, |
3372 | + .ident = "ASUSTeK COMPUTER INC. X550VB", |
3373 | + .matches = { |
3374 | + DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."), |
3375 | + DMI_MATCH(DMI_PRODUCT_NAME, "X550VB"), |
3376 | + }, |
3377 | + .driver_data = &quirk_asus_wapf4, |
3378 | + }, |
3379 | + { |
3380 | + .callback = dmi_matched, |
3381 | .ident = "ASUSTeK COMPUTER INC. X55A", |
3382 | .matches = { |
3383 | DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."), |
3384 | diff --git a/drivers/platform/x86/dell-wmi.c b/drivers/platform/x86/dell-wmi.c |
3385 | index 390e8e33d5e3..25721bf20092 100644 |
3386 | --- a/drivers/platform/x86/dell-wmi.c |
3387 | +++ b/drivers/platform/x86/dell-wmi.c |
3388 | @@ -163,18 +163,24 @@ static void dell_wmi_notify(u32 value, void *context) |
3389 | const struct key_entry *key; |
3390 | int reported_key; |
3391 | u16 *buffer_entry = (u16 *)obj->buffer.pointer; |
3392 | + int buffer_size = obj->buffer.length/2; |
3393 | |
3394 | - if (dell_new_hk_type && (buffer_entry[1] != 0x10)) { |
3395 | + if (buffer_size >= 2 && dell_new_hk_type && buffer_entry[1] != 0x10) { |
3396 | pr_info("Received unknown WMI event (0x%x)\n", |
3397 | buffer_entry[1]); |
3398 | kfree(obj); |
3399 | return; |
3400 | } |
3401 | |
3402 | - if (dell_new_hk_type || buffer_entry[1] == 0x0) |
3403 | + if (buffer_size >= 3 && (dell_new_hk_type || buffer_entry[1] == 0x0)) |
3404 | reported_key = (int)buffer_entry[2]; |
3405 | - else |
3406 | + else if (buffer_size >= 2) |
3407 | reported_key = (int)buffer_entry[1] & 0xffff; |
3408 | + else { |
3409 | + pr_info("Received unknown WMI event\n"); |
3410 | + kfree(obj); |
3411 | + return; |
3412 | + } |
3413 | |
3414 | key = sparse_keymap_entry_from_scancode(dell_wmi_input_dev, |
3415 | reported_key); |
3416 | diff --git a/drivers/platform/x86/ideapad-laptop.c b/drivers/platform/x86/ideapad-laptop.c |
3417 | index 02152de135b5..ed494f37c40f 100644 |
3418 | --- a/drivers/platform/x86/ideapad-laptop.c |
3419 | +++ b/drivers/platform/x86/ideapad-laptop.c |
3420 | @@ -837,6 +837,13 @@ static const struct dmi_system_id no_hw_rfkill_list[] = { |
3421 | DMI_MATCH(DMI_PRODUCT_VERSION, "Lenovo Yoga 2"), |
3422 | }, |
3423 | }, |
3424 | + { |
3425 | + .ident = "Lenovo Yoga 3 Pro 1370", |
3426 | + .matches = { |
3427 | + DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"), |
3428 | + DMI_MATCH(DMI_PRODUCT_VERSION, "Lenovo YOGA 3 Pro-1370"), |
3429 | + }, |
3430 | + }, |
3431 | {} |
3432 | }; |
3433 | |
3434 | diff --git a/drivers/power/bq2415x_charger.c b/drivers/power/bq2415x_charger.c |
3435 | index e384844a1ae1..1f49986fc605 100644 |
3436 | --- a/drivers/power/bq2415x_charger.c |
3437 | +++ b/drivers/power/bq2415x_charger.c |
3438 | @@ -1579,8 +1579,15 @@ static int bq2415x_probe(struct i2c_client *client, |
3439 | if (np) { |
3440 | bq->notify_psy = power_supply_get_by_phandle(np, "ti,usb-charger-detection"); |
3441 | |
3442 | - if (!bq->notify_psy) |
3443 | - return -EPROBE_DEFER; |
3444 | + if (IS_ERR(bq->notify_psy)) { |
3445 | + dev_info(&client->dev, |
3446 | + "no 'ti,usb-charger-detection' property (err=%ld)\n", |
3447 | + PTR_ERR(bq->notify_psy)); |
3448 | + bq->notify_psy = NULL; |
3449 | + } else if (!bq->notify_psy) { |
3450 | + ret = -EPROBE_DEFER; |
3451 | + goto error_2; |
3452 | + } |
3453 | } |
3454 | else if (pdata->notify_device) |
3455 | bq->notify_psy = power_supply_get_by_name(pdata->notify_device); |
3456 | @@ -1602,27 +1609,27 @@ static int bq2415x_probe(struct i2c_client *client, |
3457 | ret = of_property_read_u32(np, "ti,current-limit", |
3458 | &bq->init_data.current_limit); |
3459 | if (ret) |
3460 | - return ret; |
3461 | + goto error_2; |
3462 | ret = of_property_read_u32(np, "ti,weak-battery-voltage", |
3463 | &bq->init_data.weak_battery_voltage); |
3464 | if (ret) |
3465 | - return ret; |
3466 | + goto error_2; |
3467 | ret = of_property_read_u32(np, "ti,battery-regulation-voltage", |
3468 | &bq->init_data.battery_regulation_voltage); |
3469 | if (ret) |
3470 | - return ret; |
3471 | + goto error_2; |
3472 | ret = of_property_read_u32(np, "ti,charge-current", |
3473 | &bq->init_data.charge_current); |
3474 | if (ret) |
3475 | - return ret; |
3476 | + goto error_2; |
3477 | ret = of_property_read_u32(np, "ti,termination-current", |
3478 | &bq->init_data.termination_current); |
3479 | if (ret) |
3480 | - return ret; |
3481 | + goto error_2; |
3482 | ret = of_property_read_u32(np, "ti,resistor-sense", |
3483 | &bq->init_data.resistor_sense); |
3484 | if (ret) |
3485 | - return ret; |
3486 | + goto error_2; |
3487 | } else { |
3488 | memcpy(&bq->init_data, pdata, sizeof(bq->init_data)); |
3489 | } |
3490 | diff --git a/drivers/power/charger-manager.c b/drivers/power/charger-manager.c |
3491 | index ef1f4c928431..03bfac3655ef 100644 |
3492 | --- a/drivers/power/charger-manager.c |
3493 | +++ b/drivers/power/charger-manager.c |
3494 | @@ -97,6 +97,7 @@ static struct charger_global_desc *g_desc; /* init with setup_charger_manager */ |
3495 | static bool is_batt_present(struct charger_manager *cm) |
3496 | { |
3497 | union power_supply_propval val; |
3498 | + struct power_supply *psy; |
3499 | bool present = false; |
3500 | int i, ret; |
3501 | |
3502 | @@ -107,16 +108,27 @@ static bool is_batt_present(struct charger_manager *cm) |
3503 | case CM_NO_BATTERY: |
3504 | break; |
3505 | case CM_FUEL_GAUGE: |
3506 | - ret = cm->fuel_gauge->get_property(cm->fuel_gauge, |
3507 | + psy = power_supply_get_by_name(cm->desc->psy_fuel_gauge); |
3508 | + if (!psy) |
3509 | + break; |
3510 | + |
3511 | + ret = psy->get_property(psy, |
3512 | POWER_SUPPLY_PROP_PRESENT, &val); |
3513 | if (ret == 0 && val.intval) |
3514 | present = true; |
3515 | break; |
3516 | case CM_CHARGER_STAT: |
3517 | - for (i = 0; cm->charger_stat[i]; i++) { |
3518 | - ret = cm->charger_stat[i]->get_property( |
3519 | - cm->charger_stat[i], |
3520 | - POWER_SUPPLY_PROP_PRESENT, &val); |
3521 | + for (i = 0; cm->desc->psy_charger_stat[i]; i++) { |
3522 | + psy = power_supply_get_by_name( |
3523 | + cm->desc->psy_charger_stat[i]); |
3524 | + if (!psy) { |
3525 | + dev_err(cm->dev, "Cannot find power supply \"%s\"\n", |
3526 | + cm->desc->psy_charger_stat[i]); |
3527 | + continue; |
3528 | + } |
3529 | + |
3530 | + ret = psy->get_property(psy, POWER_SUPPLY_PROP_PRESENT, |
3531 | + &val); |
3532 | if (ret == 0 && val.intval) { |
3533 | present = true; |
3534 | break; |
3535 | @@ -139,14 +151,20 @@ static bool is_batt_present(struct charger_manager *cm) |
3536 | static bool is_ext_pwr_online(struct charger_manager *cm) |
3537 | { |
3538 | union power_supply_propval val; |
3539 | + struct power_supply *psy; |
3540 | bool online = false; |
3541 | int i, ret; |
3542 | |
3543 | /* If at least one of them has one, it's yes. */ |
3544 | - for (i = 0; cm->charger_stat[i]; i++) { |
3545 | - ret = cm->charger_stat[i]->get_property( |
3546 | - cm->charger_stat[i], |
3547 | - POWER_SUPPLY_PROP_ONLINE, &val); |
3548 | + for (i = 0; cm->desc->psy_charger_stat[i]; i++) { |
3549 | + psy = power_supply_get_by_name(cm->desc->psy_charger_stat[i]); |
3550 | + if (!psy) { |
3551 | + dev_err(cm->dev, "Cannot find power supply \"%s\"\n", |
3552 | + cm->desc->psy_charger_stat[i]); |
3553 | + continue; |
3554 | + } |
3555 | + |
3556 | + ret = psy->get_property(psy, POWER_SUPPLY_PROP_ONLINE, &val); |
3557 | if (ret == 0 && val.intval) { |
3558 | online = true; |
3559 | break; |
3560 | @@ -167,12 +185,14 @@ static bool is_ext_pwr_online(struct charger_manager *cm) |
3561 | static int get_batt_uV(struct charger_manager *cm, int *uV) |
3562 | { |
3563 | union power_supply_propval val; |
3564 | + struct power_supply *fuel_gauge; |
3565 | int ret; |
3566 | |
3567 | - if (!cm->fuel_gauge) |
3568 | + fuel_gauge = power_supply_get_by_name(cm->desc->psy_fuel_gauge); |
3569 | + if (!fuel_gauge) |
3570 | return -ENODEV; |
3571 | |
3572 | - ret = cm->fuel_gauge->get_property(cm->fuel_gauge, |
3573 | + ret = fuel_gauge->get_property(fuel_gauge, |
3574 | POWER_SUPPLY_PROP_VOLTAGE_NOW, &val); |
3575 | if (ret) |
3576 | return ret; |
3577 | @@ -189,6 +209,7 @@ static bool is_charging(struct charger_manager *cm) |
3578 | { |
3579 | int i, ret; |
3580 | bool charging = false; |
3581 | + struct power_supply *psy; |
3582 | union power_supply_propval val; |
3583 | |
3584 | /* If there is no battery, it cannot be charged */ |
3585 | @@ -196,17 +217,22 @@ static bool is_charging(struct charger_manager *cm) |
3586 | return false; |
3587 | |
3588 | /* If at least one of the charger is charging, return yes */ |
3589 | - for (i = 0; cm->charger_stat[i]; i++) { |
3590 | + for (i = 0; cm->desc->psy_charger_stat[i]; i++) { |
3591 | /* 1. The charger sholuld not be DISABLED */ |
3592 | if (cm->emergency_stop) |
3593 | continue; |
3594 | if (!cm->charger_enabled) |
3595 | continue; |
3596 | |
3597 | + psy = power_supply_get_by_name(cm->desc->psy_charger_stat[i]); |
3598 | + if (!psy) { |
3599 | + dev_err(cm->dev, "Cannot find power supply \"%s\"\n", |
3600 | + cm->desc->psy_charger_stat[i]); |
3601 | + continue; |
3602 | + } |
3603 | + |
3604 | /* 2. The charger should be online (ext-power) */ |
3605 | - ret = cm->charger_stat[i]->get_property( |
3606 | - cm->charger_stat[i], |
3607 | - POWER_SUPPLY_PROP_ONLINE, &val); |
3608 | + ret = psy->get_property(psy, POWER_SUPPLY_PROP_ONLINE, &val); |
3609 | if (ret) { |
3610 | dev_warn(cm->dev, "Cannot read ONLINE value from %s\n", |
3611 | cm->desc->psy_charger_stat[i]); |
3612 | @@ -219,9 +245,7 @@ static bool is_charging(struct charger_manager *cm) |
3613 | * 3. The charger should not be FULL, DISCHARGING, |
3614 | * or NOT_CHARGING. |
3615 | */ |
3616 | - ret = cm->charger_stat[i]->get_property( |
3617 | - cm->charger_stat[i], |
3618 | - POWER_SUPPLY_PROP_STATUS, &val); |
3619 | + ret = psy->get_property(psy, POWER_SUPPLY_PROP_STATUS, &val); |
3620 | if (ret) { |
3621 | dev_warn(cm->dev, "Cannot read STATUS value from %s\n", |
3622 | cm->desc->psy_charger_stat[i]); |
3623 | @@ -248,6 +272,7 @@ static bool is_full_charged(struct charger_manager *cm) |
3624 | { |
3625 | struct charger_desc *desc = cm->desc; |
3626 | union power_supply_propval val; |
3627 | + struct power_supply *fuel_gauge; |
3628 | int ret = 0; |
3629 | int uV; |
3630 | |
3631 | @@ -255,11 +280,15 @@ static bool is_full_charged(struct charger_manager *cm) |
3632 | if (!is_batt_present(cm)) |
3633 | return false; |
3634 | |
3635 | - if (cm->fuel_gauge && desc->fullbatt_full_capacity > 0) { |
3636 | + fuel_gauge = power_supply_get_by_name(cm->desc->psy_fuel_gauge); |
3637 | + if (!fuel_gauge) |
3638 | + return false; |
3639 | + |
3640 | + if (desc->fullbatt_full_capacity > 0) { |
3641 | val.intval = 0; |
3642 | |
3643 | /* Not full if capacity of fuel gauge isn't full */ |
3644 | - ret = cm->fuel_gauge->get_property(cm->fuel_gauge, |
3645 | + ret = fuel_gauge->get_property(fuel_gauge, |
3646 | POWER_SUPPLY_PROP_CHARGE_FULL, &val); |
3647 | if (!ret && val.intval > desc->fullbatt_full_capacity) |
3648 | return true; |
3649 | @@ -273,10 +302,10 @@ static bool is_full_charged(struct charger_manager *cm) |
3650 | } |
3651 | |
3652 | /* Full, if the capacity is more than fullbatt_soc */ |
3653 | - if (cm->fuel_gauge && desc->fullbatt_soc > 0) { |
3654 | + if (desc->fullbatt_soc > 0) { |
3655 | val.intval = 0; |
3656 | |
3657 | - ret = cm->fuel_gauge->get_property(cm->fuel_gauge, |
3658 | + ret = fuel_gauge->get_property(fuel_gauge, |
3659 | POWER_SUPPLY_PROP_CAPACITY, &val); |
3660 | if (!ret && val.intval >= desc->fullbatt_soc) |
3661 | return true; |
3662 | @@ -551,6 +580,20 @@ static int check_charging_duration(struct charger_manager *cm) |
3663 | return ret; |
3664 | } |
3665 | |
3666 | +static int cm_get_battery_temperature_by_psy(struct charger_manager *cm, |
3667 | + int *temp) |
3668 | +{ |
3669 | + struct power_supply *fuel_gauge; |
3670 | + |
3671 | + fuel_gauge = power_supply_get_by_name(cm->desc->psy_fuel_gauge); |
3672 | + if (!fuel_gauge) |
3673 | + return -ENODEV; |
3674 | + |
3675 | + return fuel_gauge->get_property(fuel_gauge, |
3676 | + POWER_SUPPLY_PROP_TEMP, |
3677 | + (union power_supply_propval *)temp); |
3678 | +} |
3679 | + |
3680 | static int cm_get_battery_temperature(struct charger_manager *cm, |
3681 | int *temp) |
3682 | { |
3683 | @@ -560,15 +603,18 @@ static int cm_get_battery_temperature(struct charger_manager *cm, |
3684 | return -ENODEV; |
3685 | |
3686 | #ifdef CONFIG_THERMAL |
3687 | - ret = thermal_zone_get_temp(cm->tzd_batt, (unsigned long *)temp); |
3688 | - if (!ret) |
3689 | - /* Calibrate temperature unit */ |
3690 | - *temp /= 100; |
3691 | -#else |
3692 | - ret = cm->fuel_gauge->get_property(cm->fuel_gauge, |
3693 | - POWER_SUPPLY_PROP_TEMP, |
3694 | - (union power_supply_propval *)temp); |
3695 | + if (cm->tzd_batt) { |
3696 | + ret = thermal_zone_get_temp(cm->tzd_batt, (unsigned long *)temp); |
3697 | + if (!ret) |
3698 | + /* Calibrate temperature unit */ |
3699 | + *temp /= 100; |
3700 | + } else |
3701 | #endif |
3702 | + { |
3703 | + /* if-else continued from CONFIG_THERMAL */ |
3704 | + ret = cm_get_battery_temperature_by_psy(cm, temp); |
3705 | + } |
3706 | + |
3707 | return ret; |
3708 | } |
3709 | |
3710 | @@ -827,6 +873,7 @@ static int charger_get_property(struct power_supply *psy, |
3711 | struct charger_manager *cm = container_of(psy, |
3712 | struct charger_manager, charger_psy); |
3713 | struct charger_desc *desc = cm->desc; |
3714 | + struct power_supply *fuel_gauge; |
3715 | int ret = 0; |
3716 | int uV; |
3717 | |
3718 | @@ -857,14 +904,20 @@ static int charger_get_property(struct power_supply *psy, |
3719 | ret = get_batt_uV(cm, &val->intval); |
3720 | break; |
3721 | case POWER_SUPPLY_PROP_CURRENT_NOW: |
3722 | - ret = cm->fuel_gauge->get_property(cm->fuel_gauge, |
3723 | + fuel_gauge = power_supply_get_by_name(cm->desc->psy_fuel_gauge); |
3724 | + if (!fuel_gauge) { |
3725 | + ret = -ENODEV; |
3726 | + break; |
3727 | + } |
3728 | + ret = fuel_gauge->get_property(fuel_gauge, |
3729 | POWER_SUPPLY_PROP_CURRENT_NOW, val); |
3730 | break; |
3731 | case POWER_SUPPLY_PROP_TEMP: |
3732 | case POWER_SUPPLY_PROP_TEMP_AMBIENT: |
3733 | return cm_get_battery_temperature(cm, &val->intval); |
3734 | case POWER_SUPPLY_PROP_CAPACITY: |
3735 | - if (!cm->fuel_gauge) { |
3736 | + fuel_gauge = power_supply_get_by_name(cm->desc->psy_fuel_gauge); |
3737 | + if (!fuel_gauge) { |
3738 | ret = -ENODEV; |
3739 | break; |
3740 | } |
3741 | @@ -875,7 +928,7 @@ static int charger_get_property(struct power_supply *psy, |
3742 | break; |
3743 | } |
3744 | |
3745 | - ret = cm->fuel_gauge->get_property(cm->fuel_gauge, |
3746 | + ret = fuel_gauge->get_property(fuel_gauge, |
3747 | POWER_SUPPLY_PROP_CAPACITY, val); |
3748 | if (ret) |
3749 | break; |
3750 | @@ -924,7 +977,14 @@ static int charger_get_property(struct power_supply *psy, |
3751 | break; |
3752 | case POWER_SUPPLY_PROP_CHARGE_NOW: |
3753 | if (is_charging(cm)) { |
3754 | - ret = cm->fuel_gauge->get_property(cm->fuel_gauge, |
3755 | + fuel_gauge = power_supply_get_by_name( |
3756 | + cm->desc->psy_fuel_gauge); |
3757 | + if (!fuel_gauge) { |
3758 | + ret = -ENODEV; |
3759 | + break; |
3760 | + } |
3761 | + |
3762 | + ret = fuel_gauge->get_property(fuel_gauge, |
3763 | POWER_SUPPLY_PROP_CHARGE_NOW, |
3764 | val); |
3765 | if (ret) { |
3766 | @@ -1485,14 +1545,15 @@ err: |
3767 | return ret; |
3768 | } |
3769 | |
3770 | -static int cm_init_thermal_data(struct charger_manager *cm) |
3771 | +static int cm_init_thermal_data(struct charger_manager *cm, |
3772 | + struct power_supply *fuel_gauge) |
3773 | { |
3774 | struct charger_desc *desc = cm->desc; |
3775 | union power_supply_propval val; |
3776 | int ret; |
3777 | |
3778 | /* Verify whether fuel gauge provides battery temperature */ |
3779 | - ret = cm->fuel_gauge->get_property(cm->fuel_gauge, |
3780 | + ret = fuel_gauge->get_property(fuel_gauge, |
3781 | POWER_SUPPLY_PROP_TEMP, &val); |
3782 | |
3783 | if (!ret) { |
3784 | @@ -1502,8 +1563,6 @@ static int cm_init_thermal_data(struct charger_manager *cm) |
3785 | cm->desc->measure_battery_temp = true; |
3786 | } |
3787 | #ifdef CONFIG_THERMAL |
3788 | - cm->tzd_batt = cm->fuel_gauge->tzd; |
3789 | - |
3790 | if (ret && desc->thermal_zone) { |
3791 | cm->tzd_batt = |
3792 | thermal_zone_get_zone_by_name(desc->thermal_zone); |
3793 | @@ -1666,6 +1725,7 @@ static int charger_manager_probe(struct platform_device *pdev) |
3794 | int ret = 0, i = 0; |
3795 | int j = 0; |
3796 | union power_supply_propval val; |
3797 | + struct power_supply *fuel_gauge; |
3798 | |
3799 | if (g_desc && !rtc_dev && g_desc->rtc_name) { |
3800 | rtc_dev = rtc_class_open(g_desc->rtc_name); |
3801 | @@ -1729,23 +1789,20 @@ static int charger_manager_probe(struct platform_device *pdev) |
3802 | while (desc->psy_charger_stat[i]) |
3803 | i++; |
3804 | |
3805 | - cm->charger_stat = devm_kzalloc(&pdev->dev, |
3806 | - sizeof(struct power_supply *) * i, GFP_KERNEL); |
3807 | - if (!cm->charger_stat) |
3808 | - return -ENOMEM; |
3809 | - |
3810 | + /* Check if charger's supplies are present at probe */ |
3811 | for (i = 0; desc->psy_charger_stat[i]; i++) { |
3812 | - cm->charger_stat[i] = power_supply_get_by_name( |
3813 | - desc->psy_charger_stat[i]); |
3814 | - if (!cm->charger_stat[i]) { |
3815 | + struct power_supply *psy; |
3816 | + |
3817 | + psy = power_supply_get_by_name(desc->psy_charger_stat[i]); |
3818 | + if (!psy) { |
3819 | dev_err(&pdev->dev, "Cannot find power supply \"%s\"\n", |
3820 | desc->psy_charger_stat[i]); |
3821 | return -ENODEV; |
3822 | } |
3823 | } |
3824 | |
3825 | - cm->fuel_gauge = power_supply_get_by_name(desc->psy_fuel_gauge); |
3826 | - if (!cm->fuel_gauge) { |
3827 | + fuel_gauge = power_supply_get_by_name(desc->psy_fuel_gauge); |
3828 | + if (!fuel_gauge) { |
3829 | dev_err(&pdev->dev, "Cannot find power supply \"%s\"\n", |
3830 | desc->psy_fuel_gauge); |
3831 | return -ENODEV; |
3832 | @@ -1788,13 +1845,13 @@ static int charger_manager_probe(struct platform_device *pdev) |
3833 | cm->charger_psy.num_properties = psy_default.num_properties; |
3834 | |
3835 | /* Find which optional psy-properties are available */ |
3836 | - if (!cm->fuel_gauge->get_property(cm->fuel_gauge, |
3837 | + if (!fuel_gauge->get_property(fuel_gauge, |
3838 | POWER_SUPPLY_PROP_CHARGE_NOW, &val)) { |
3839 | cm->charger_psy.properties[cm->charger_psy.num_properties] = |
3840 | POWER_SUPPLY_PROP_CHARGE_NOW; |
3841 | cm->charger_psy.num_properties++; |
3842 | } |
3843 | - if (!cm->fuel_gauge->get_property(cm->fuel_gauge, |
3844 | + if (!fuel_gauge->get_property(fuel_gauge, |
3845 | POWER_SUPPLY_PROP_CURRENT_NOW, |
3846 | &val)) { |
3847 | cm->charger_psy.properties[cm->charger_psy.num_properties] = |
3848 | @@ -1802,7 +1859,7 @@ static int charger_manager_probe(struct platform_device *pdev) |
3849 | cm->charger_psy.num_properties++; |
3850 | } |
3851 | |
3852 | - ret = cm_init_thermal_data(cm); |
3853 | + ret = cm_init_thermal_data(cm, fuel_gauge); |
3854 | if (ret) { |
3855 | dev_err(&pdev->dev, "Failed to initialize thermal data\n"); |
3856 | cm->desc->measure_battery_temp = false; |
3857 | @@ -2059,8 +2116,8 @@ static bool find_power_supply(struct charger_manager *cm, |
3858 | int i; |
3859 | bool found = false; |
3860 | |
3861 | - for (i = 0; cm->charger_stat[i]; i++) { |
3862 | - if (psy == cm->charger_stat[i]) { |
3863 | + for (i = 0; cm->desc->psy_charger_stat[i]; i++) { |
3864 | + if (!strcmp(psy->name, cm->desc->psy_charger_stat[i])) { |
3865 | found = true; |
3866 | break; |
3867 | } |
3868 | diff --git a/drivers/pwm/core.c b/drivers/pwm/core.c |
3869 | index d2c35920ff08..66c210e39d71 100644 |
3870 | --- a/drivers/pwm/core.c |
3871 | +++ b/drivers/pwm/core.c |
3872 | @@ -602,12 +602,9 @@ struct pwm_device *pwm_get(struct device *dev, const char *con_id) |
3873 | struct pwm_device *pwm = ERR_PTR(-EPROBE_DEFER); |
3874 | const char *dev_id = dev ? dev_name(dev) : NULL; |
3875 | struct pwm_chip *chip = NULL; |
3876 | - unsigned int index = 0; |
3877 | unsigned int best = 0; |
3878 | - struct pwm_lookup *p; |
3879 | + struct pwm_lookup *p, *chosen = NULL; |
3880 | unsigned int match; |
3881 | - unsigned int period; |
3882 | - enum pwm_polarity polarity; |
3883 | |
3884 | /* look up via DT first */ |
3885 | if (IS_ENABLED(CONFIG_OF) && dev && dev->of_node) |
3886 | @@ -653,10 +650,7 @@ struct pwm_device *pwm_get(struct device *dev, const char *con_id) |
3887 | } |
3888 | |
3889 | if (match > best) { |
3890 | - chip = pwmchip_find_by_name(p->provider); |
3891 | - index = p->index; |
3892 | - period = p->period; |
3893 | - polarity = p->polarity; |
3894 | + chosen = p; |
3895 | |
3896 | if (match != 3) |
3897 | best = match; |
3898 | @@ -665,17 +659,22 @@ struct pwm_device *pwm_get(struct device *dev, const char *con_id) |
3899 | } |
3900 | } |
3901 | |
3902 | - mutex_unlock(&pwm_lookup_lock); |
3903 | + if (!chosen) |
3904 | + goto out; |
3905 | |
3906 | - if (chip) |
3907 | - pwm = pwm_request_from_chip(chip, index, con_id ?: dev_id); |
3908 | - if (IS_ERR(pwm)) |
3909 | - return pwm; |
3910 | + chip = pwmchip_find_by_name(chosen->provider); |
3911 | + if (!chip) |
3912 | + goto out; |
3913 | |
3914 | - pwm_set_period(pwm, period); |
3915 | - pwm_set_polarity(pwm, polarity); |
3916 | + pwm = pwm_request_from_chip(chip, chosen->index, con_id ?: dev_id); |
3917 | + if (IS_ERR(pwm)) |
3918 | + goto out; |
3919 | |
3920 | + pwm_set_period(pwm, chosen->period); |
3921 | + pwm_set_polarity(pwm, chosen->polarity); |
3922 | |
3923 | +out: |
3924 | + mutex_unlock(&pwm_lookup_lock); |
3925 | return pwm; |
3926 | } |
3927 | EXPORT_SYMBOL_GPL(pwm_get); |
3928 | diff --git a/drivers/scsi/scsi_error.c b/drivers/scsi/scsi_error.c |
3929 | index 5db8454474ee..817d654dbc82 100644 |
3930 | --- a/drivers/scsi/scsi_error.c |
3931 | +++ b/drivers/scsi/scsi_error.c |
3932 | @@ -1998,8 +1998,10 @@ static void scsi_restart_operations(struct Scsi_Host *shost) |
3933 | * is no point trying to lock the door of an off-line device. |
3934 | */ |
3935 | shost_for_each_device(sdev, shost) { |
3936 | - if (scsi_device_online(sdev) && sdev->locked) |
3937 | + if (scsi_device_online(sdev) && sdev->was_reset && sdev->locked) { |
3938 | scsi_eh_lock_door(sdev); |
3939 | + sdev->was_reset = 0; |
3940 | + } |
3941 | } |
3942 | |
3943 | /* |
3944 | diff --git a/fs/gfs2/dir.c b/fs/gfs2/dir.c |
3945 | index 1a349f9a9685..5d4261ff5d23 100644 |
3946 | --- a/fs/gfs2/dir.c |
3947 | +++ b/fs/gfs2/dir.c |
3948 | @@ -2100,8 +2100,13 @@ int gfs2_diradd_alloc_required(struct inode *inode, const struct qstr *name, |
3949 | } |
3950 | if (IS_ERR(dent)) |
3951 | return PTR_ERR(dent); |
3952 | - da->bh = bh; |
3953 | - da->dent = dent; |
3954 | + |
3955 | + if (da->save_loc) { |
3956 | + da->bh = bh; |
3957 | + da->dent = dent; |
3958 | + } else { |
3959 | + brelse(bh); |
3960 | + } |
3961 | return 0; |
3962 | } |
3963 | |
3964 | diff --git a/fs/gfs2/dir.h b/fs/gfs2/dir.h |
3965 | index 126c65dda028..e1b309c24dab 100644 |
3966 | --- a/fs/gfs2/dir.h |
3967 | +++ b/fs/gfs2/dir.h |
3968 | @@ -23,6 +23,7 @@ struct gfs2_diradd { |
3969 | unsigned nr_blocks; |
3970 | struct gfs2_dirent *dent; |
3971 | struct buffer_head *bh; |
3972 | + int save_loc; |
3973 | }; |
3974 | |
3975 | extern struct inode *gfs2_dir_search(struct inode *dir, |
3976 | diff --git a/fs/gfs2/inode.c b/fs/gfs2/inode.c |
3977 | index fc8ac2ee0667..7d2723ce067e 100644 |
3978 | --- a/fs/gfs2/inode.c |
3979 | +++ b/fs/gfs2/inode.c |
3980 | @@ -600,7 +600,7 @@ static int gfs2_create_inode(struct inode *dir, struct dentry *dentry, |
3981 | int error, free_vfs_inode = 0; |
3982 | u32 aflags = 0; |
3983 | unsigned blocks = 1; |
3984 | - struct gfs2_diradd da = { .bh = NULL, }; |
3985 | + struct gfs2_diradd da = { .bh = NULL, .save_loc = 1, }; |
3986 | |
3987 | if (!name->len || name->len > GFS2_FNAMESIZE) |
3988 | return -ENAMETOOLONG; |
3989 | @@ -899,7 +899,7 @@ static int gfs2_link(struct dentry *old_dentry, struct inode *dir, |
3990 | struct gfs2_inode *ip = GFS2_I(inode); |
3991 | struct gfs2_holder ghs[2]; |
3992 | struct buffer_head *dibh; |
3993 | - struct gfs2_diradd da = { .bh = NULL, }; |
3994 | + struct gfs2_diradd da = { .bh = NULL, .save_loc = 1, }; |
3995 | int error; |
3996 | |
3997 | if (S_ISDIR(inode->i_mode)) |
3998 | @@ -1337,7 +1337,7 @@ static int gfs2_rename(struct inode *odir, struct dentry *odentry, |
3999 | struct gfs2_rgrpd *nrgd; |
4000 | unsigned int num_gh; |
4001 | int dir_rename = 0; |
4002 | - struct gfs2_diradd da = { .nr_blocks = 0, }; |
4003 | + struct gfs2_diradd da = { .nr_blocks = 0, .save_loc = 0, }; |
4004 | unsigned int x; |
4005 | int error; |
4006 | |
4007 | diff --git a/fs/nfs/delegation.c b/fs/nfs/delegation.c |
4008 | index 5853f53db732..7f3f60641344 100644 |
4009 | --- a/fs/nfs/delegation.c |
4010 | +++ b/fs/nfs/delegation.c |
4011 | @@ -125,6 +125,8 @@ again: |
4012 | continue; |
4013 | if (!test_bit(NFS_DELEGATED_STATE, &state->flags)) |
4014 | continue; |
4015 | + if (!nfs4_valid_open_stateid(state)) |
4016 | + continue; |
4017 | if (!nfs4_stateid_match(&state->stateid, stateid)) |
4018 | continue; |
4019 | get_nfs_open_context(ctx); |
4020 | @@ -193,7 +195,11 @@ static int nfs_do_return_delegation(struct inode *inode, struct nfs_delegation * |
4021 | { |
4022 | int res = 0; |
4023 | |
4024 | - res = nfs4_proc_delegreturn(inode, delegation->cred, &delegation->stateid, issync); |
4025 | + if (!test_bit(NFS_DELEGATION_REVOKED, &delegation->flags)) |
4026 | + res = nfs4_proc_delegreturn(inode, |
4027 | + delegation->cred, |
4028 | + &delegation->stateid, |
4029 | + issync); |
4030 | nfs_free_delegation(delegation); |
4031 | return res; |
4032 | } |
4033 | @@ -380,11 +386,13 @@ static int nfs_end_delegation_return(struct inode *inode, struct nfs_delegation |
4034 | { |
4035 | struct nfs_client *clp = NFS_SERVER(inode)->nfs_client; |
4036 | struct nfs_inode *nfsi = NFS_I(inode); |
4037 | - int err; |
4038 | + int err = 0; |
4039 | |
4040 | if (delegation == NULL) |
4041 | return 0; |
4042 | do { |
4043 | + if (test_bit(NFS_DELEGATION_REVOKED, &delegation->flags)) |
4044 | + break; |
4045 | err = nfs_delegation_claim_opens(inode, &delegation->stateid); |
4046 | if (!issync || err != -EAGAIN) |
4047 | break; |
4048 | @@ -605,10 +613,23 @@ static void nfs_client_mark_return_unused_delegation_types(struct nfs_client *cl |
4049 | rcu_read_unlock(); |
4050 | } |
4051 | |
4052 | +static void nfs_revoke_delegation(struct inode *inode) |
4053 | +{ |
4054 | + struct nfs_delegation *delegation; |
4055 | + rcu_read_lock(); |
4056 | + delegation = rcu_dereference(NFS_I(inode)->delegation); |
4057 | + if (delegation != NULL) { |
4058 | + set_bit(NFS_DELEGATION_REVOKED, &delegation->flags); |
4059 | + nfs_mark_return_delegation(NFS_SERVER(inode), delegation); |
4060 | + } |
4061 | + rcu_read_unlock(); |
4062 | +} |
4063 | + |
4064 | void nfs_remove_bad_delegation(struct inode *inode) |
4065 | { |
4066 | struct nfs_delegation *delegation; |
4067 | |
4068 | + nfs_revoke_delegation(inode); |
4069 | delegation = nfs_inode_detach_delegation(inode); |
4070 | if (delegation) { |
4071 | nfs_inode_find_state_and_recover(inode, &delegation->stateid); |
4072 | diff --git a/fs/nfs/delegation.h b/fs/nfs/delegation.h |
4073 | index 5c1cce39297f..e3c20a3ccc93 100644 |
4074 | --- a/fs/nfs/delegation.h |
4075 | +++ b/fs/nfs/delegation.h |
4076 | @@ -31,6 +31,7 @@ enum { |
4077 | NFS_DELEGATION_RETURN_IF_CLOSED, |
4078 | NFS_DELEGATION_REFERENCED, |
4079 | NFS_DELEGATION_RETURNING, |
4080 | + NFS_DELEGATION_REVOKED, |
4081 | }; |
4082 | |
4083 | int nfs_inode_set_delegation(struct inode *inode, struct rpc_cred *cred, struct nfs_openres *res); |
4084 | diff --git a/fs/nfs/direct.c b/fs/nfs/direct.c |
4085 | index 65ef6e00deee..c6b5eddbcac0 100644 |
4086 | --- a/fs/nfs/direct.c |
4087 | +++ b/fs/nfs/direct.c |
4088 | @@ -270,6 +270,7 @@ static void nfs_direct_req_free(struct kref *kref) |
4089 | { |
4090 | struct nfs_direct_req *dreq = container_of(kref, struct nfs_direct_req, kref); |
4091 | |
4092 | + nfs_free_pnfs_ds_cinfo(&dreq->ds_cinfo); |
4093 | if (dreq->l_ctx != NULL) |
4094 | nfs_put_lock_context(dreq->l_ctx); |
4095 | if (dreq->ctx != NULL) |
4096 | diff --git a/fs/nfs/filelayout/filelayout.c b/fs/nfs/filelayout/filelayout.c |
4097 | index f59713e091a8..eb777ed34568 100644 |
4098 | --- a/fs/nfs/filelayout/filelayout.c |
4099 | +++ b/fs/nfs/filelayout/filelayout.c |
4100 | @@ -145,9 +145,6 @@ static int filelayout_async_handle_error(struct rpc_task *task, |
4101 | case -NFS4ERR_DELEG_REVOKED: |
4102 | case -NFS4ERR_ADMIN_REVOKED: |
4103 | case -NFS4ERR_BAD_STATEID: |
4104 | - if (state == NULL) |
4105 | - break; |
4106 | - nfs_remove_bad_delegation(state->inode); |
4107 | case -NFS4ERR_OPENMODE: |
4108 | if (state == NULL) |
4109 | break; |
4110 | diff --git a/fs/nfs/inode.c b/fs/nfs/inode.c |
4111 | index 577a36f0a510..0689aa522092 100644 |
4112 | --- a/fs/nfs/inode.c |
4113 | +++ b/fs/nfs/inode.c |
4114 | @@ -624,7 +624,7 @@ int nfs_getattr(struct vfsmount *mnt, struct dentry *dentry, struct kstat *stat) |
4115 | { |
4116 | struct inode *inode = dentry->d_inode; |
4117 | int need_atime = NFS_I(inode)->cache_validity & NFS_INO_INVALID_ATIME; |
4118 | - int err; |
4119 | + int err = 0; |
4120 | |
4121 | trace_nfs_getattr_enter(inode); |
4122 | /* Flush out writes to the server in order to update c/mtime. */ |
4123 | diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c |
4124 | index 0422d77b73c7..d3ebdae1d9b8 100644 |
4125 | --- a/fs/nfs/nfs4proc.c |
4126 | +++ b/fs/nfs/nfs4proc.c |
4127 | @@ -360,11 +360,6 @@ static int nfs4_handle_exception(struct nfs_server *server, int errorcode, struc |
4128 | case -NFS4ERR_DELEG_REVOKED: |
4129 | case -NFS4ERR_ADMIN_REVOKED: |
4130 | case -NFS4ERR_BAD_STATEID: |
4131 | - if (inode != NULL && nfs4_have_delegation(inode, FMODE_READ)) { |
4132 | - nfs_remove_bad_delegation(inode); |
4133 | - exception->retry = 1; |
4134 | - break; |
4135 | - } |
4136 | if (state == NULL) |
4137 | break; |
4138 | ret = nfs4_schedule_stateid_recovery(server, state); |
4139 | @@ -1647,7 +1642,7 @@ static int nfs4_handle_delegation_recall_error(struct nfs_server *server, struct |
4140 | nfs_inode_find_state_and_recover(state->inode, |
4141 | stateid); |
4142 | nfs4_schedule_stateid_recovery(server, state); |
4143 | - return 0; |
4144 | + return -EAGAIN; |
4145 | case -NFS4ERR_DELAY: |
4146 | case -NFS4ERR_GRACE: |
4147 | set_bit(NFS_DELEGATED_STATE, &state->flags); |
4148 | @@ -2102,46 +2097,60 @@ static int nfs4_open_expired(struct nfs4_state_owner *sp, struct nfs4_state *sta |
4149 | return ret; |
4150 | } |
4151 | |
4152 | +static void nfs_finish_clear_delegation_stateid(struct nfs4_state *state) |
4153 | +{ |
4154 | + nfs_remove_bad_delegation(state->inode); |
4155 | + write_seqlock(&state->seqlock); |
4156 | + nfs4_stateid_copy(&state->stateid, &state->open_stateid); |
4157 | + write_sequnlock(&state->seqlock); |
4158 | + clear_bit(NFS_DELEGATED_STATE, &state->flags); |
4159 | +} |
4160 | + |
4161 | +static void nfs40_clear_delegation_stateid(struct nfs4_state *state) |
4162 | +{ |
4163 | + if (rcu_access_pointer(NFS_I(state->inode)->delegation) != NULL) |
4164 | + nfs_finish_clear_delegation_stateid(state); |
4165 | +} |
4166 | + |
4167 | +static int nfs40_open_expired(struct nfs4_state_owner *sp, struct nfs4_state *state) |
4168 | +{ |
4169 | + /* NFSv4.0 doesn't allow for delegation recovery on open expire */ |
4170 | + nfs40_clear_delegation_stateid(state); |
4171 | + return nfs4_open_expired(sp, state); |
4172 | +} |
4173 | + |
4174 | #if defined(CONFIG_NFS_V4_1) |
4175 | -static void nfs41_clear_delegation_stateid(struct nfs4_state *state) |
4176 | +static void nfs41_check_delegation_stateid(struct nfs4_state *state) |
4177 | { |
4178 | struct nfs_server *server = NFS_SERVER(state->inode); |
4179 | - nfs4_stateid *stateid = &state->stateid; |
4180 | + nfs4_stateid stateid; |
4181 | struct nfs_delegation *delegation; |
4182 | - struct rpc_cred *cred = NULL; |
4183 | - int status = -NFS4ERR_BAD_STATEID; |
4184 | - |
4185 | - /* If a state reset has been done, test_stateid is unneeded */ |
4186 | - if (test_bit(NFS_DELEGATED_STATE, &state->flags) == 0) |
4187 | - return; |
4188 | + struct rpc_cred *cred; |
4189 | + int status; |
4190 | |
4191 | /* Get the delegation credential for use by test/free_stateid */ |
4192 | rcu_read_lock(); |
4193 | delegation = rcu_dereference(NFS_I(state->inode)->delegation); |
4194 | - if (delegation != NULL && |
4195 | - nfs4_stateid_match(&delegation->stateid, stateid)) { |
4196 | - cred = get_rpccred(delegation->cred); |
4197 | - rcu_read_unlock(); |
4198 | - status = nfs41_test_stateid(server, stateid, cred); |
4199 | - trace_nfs4_test_delegation_stateid(state, NULL, status); |
4200 | - } else |
4201 | + if (delegation == NULL) { |
4202 | rcu_read_unlock(); |
4203 | + return; |
4204 | + } |
4205 | + |
4206 | + nfs4_stateid_copy(&stateid, &delegation->stateid); |
4207 | + cred = get_rpccred(delegation->cred); |
4208 | + rcu_read_unlock(); |
4209 | + status = nfs41_test_stateid(server, &stateid, cred); |
4210 | + trace_nfs4_test_delegation_stateid(state, NULL, status); |
4211 | |
4212 | if (status != NFS_OK) { |
4213 | /* Free the stateid unless the server explicitly |
4214 | * informs us the stateid is unrecognized. */ |
4215 | if (status != -NFS4ERR_BAD_STATEID) |
4216 | - nfs41_free_stateid(server, stateid, cred); |
4217 | - nfs_remove_bad_delegation(state->inode); |
4218 | - |
4219 | - write_seqlock(&state->seqlock); |
4220 | - nfs4_stateid_copy(&state->stateid, &state->open_stateid); |
4221 | - write_sequnlock(&state->seqlock); |
4222 | - clear_bit(NFS_DELEGATED_STATE, &state->flags); |
4223 | + nfs41_free_stateid(server, &stateid, cred); |
4224 | + nfs_finish_clear_delegation_stateid(state); |
4225 | } |
4226 | |
4227 | - if (cred != NULL) |
4228 | - put_rpccred(cred); |
4229 | + put_rpccred(cred); |
4230 | } |
4231 | |
4232 | /** |
4233 | @@ -2185,7 +2194,7 @@ static int nfs41_open_expired(struct nfs4_state_owner *sp, struct nfs4_state *st |
4234 | { |
4235 | int status; |
4236 | |
4237 | - nfs41_clear_delegation_stateid(state); |
4238 | + nfs41_check_delegation_stateid(state); |
4239 | status = nfs41_check_open_stateid(state); |
4240 | if (status != NFS_OK) |
4241 | status = nfs4_open_expired(sp, state); |
4242 | @@ -4827,9 +4836,6 @@ nfs4_async_handle_error(struct rpc_task *task, const struct nfs_server *server, |
4243 | case -NFS4ERR_DELEG_REVOKED: |
4244 | case -NFS4ERR_ADMIN_REVOKED: |
4245 | case -NFS4ERR_BAD_STATEID: |
4246 | - if (state == NULL) |
4247 | - break; |
4248 | - nfs_remove_bad_delegation(state->inode); |
4249 | case -NFS4ERR_OPENMODE: |
4250 | if (state == NULL) |
4251 | break; |
4252 | @@ -8366,7 +8372,7 @@ static const struct nfs4_state_recovery_ops nfs41_reboot_recovery_ops = { |
4253 | static const struct nfs4_state_recovery_ops nfs40_nograce_recovery_ops = { |
4254 | .owner_flag_bit = NFS_OWNER_RECLAIM_NOGRACE, |
4255 | .state_flag_bit = NFS_STATE_RECLAIM_NOGRACE, |
4256 | - .recover_open = nfs4_open_expired, |
4257 | + .recover_open = nfs40_open_expired, |
4258 | .recover_lock = nfs4_lock_expired, |
4259 | .establish_clid = nfs4_init_clientid, |
4260 | }; |
4261 | diff --git a/include/dt-bindings/pinctrl/dra.h b/include/dt-bindings/pinctrl/dra.h |
4262 | index 3d33794e4f3e..7448edff4723 100644 |
4263 | --- a/include/dt-bindings/pinctrl/dra.h |
4264 | +++ b/include/dt-bindings/pinctrl/dra.h |
4265 | @@ -40,8 +40,8 @@ |
4266 | |
4267 | /* Active pin states */ |
4268 | #define PIN_OUTPUT (0 | PULL_DIS) |
4269 | -#define PIN_OUTPUT_PULLUP (PIN_OUTPUT | PULL_ENA | PULL_UP) |
4270 | -#define PIN_OUTPUT_PULLDOWN (PIN_OUTPUT | PULL_ENA) |
4271 | +#define PIN_OUTPUT_PULLUP (PULL_UP) |
4272 | +#define PIN_OUTPUT_PULLDOWN (0) |
4273 | #define PIN_INPUT (INPUT_EN | PULL_DIS) |
4274 | #define PIN_INPUT_SLEW (INPUT_EN | SLEWCONTROL) |
4275 | #define PIN_INPUT_PULLUP (PULL_ENA | INPUT_EN | PULL_UP) |
4276 | diff --git a/include/linux/bootmem.h b/include/linux/bootmem.h |
4277 | index 4e2bd4c95b66..0995c2de8162 100644 |
4278 | --- a/include/linux/bootmem.h |
4279 | +++ b/include/linux/bootmem.h |
4280 | @@ -46,6 +46,7 @@ extern unsigned long init_bootmem_node(pg_data_t *pgdat, |
4281 | extern unsigned long init_bootmem(unsigned long addr, unsigned long memend); |
4282 | |
4283 | extern unsigned long free_all_bootmem(void); |
4284 | +extern void reset_node_managed_pages(pg_data_t *pgdat); |
4285 | extern void reset_all_zones_managed_pages(void); |
4286 | |
4287 | extern void free_bootmem_node(pg_data_t *pgdat, |
4288 | diff --git a/include/linux/clocksource.h b/include/linux/clocksource.h |
4289 | index 653f0e2b6ca9..abcafaa20b86 100644 |
4290 | --- a/include/linux/clocksource.h |
4291 | +++ b/include/linux/clocksource.h |
4292 | @@ -287,7 +287,7 @@ extern struct clocksource* clocksource_get_next(void); |
4293 | extern void clocksource_change_rating(struct clocksource *cs, int rating); |
4294 | extern void clocksource_suspend(void); |
4295 | extern void clocksource_resume(void); |
4296 | -extern struct clocksource * __init __weak clocksource_default_clock(void); |
4297 | +extern struct clocksource * __init clocksource_default_clock(void); |
4298 | extern void clocksource_mark_unstable(struct clocksource *cs); |
4299 | |
4300 | extern u64 |
4301 | diff --git a/include/linux/crash_dump.h b/include/linux/crash_dump.h |
4302 | index 72ab536ad3de..3849fce7ecfe 100644 |
4303 | --- a/include/linux/crash_dump.h |
4304 | +++ b/include/linux/crash_dump.h |
4305 | @@ -14,14 +14,13 @@ |
4306 | extern unsigned long long elfcorehdr_addr; |
4307 | extern unsigned long long elfcorehdr_size; |
4308 | |
4309 | -extern int __weak elfcorehdr_alloc(unsigned long long *addr, |
4310 | - unsigned long long *size); |
4311 | -extern void __weak elfcorehdr_free(unsigned long long addr); |
4312 | -extern ssize_t __weak elfcorehdr_read(char *buf, size_t count, u64 *ppos); |
4313 | -extern ssize_t __weak elfcorehdr_read_notes(char *buf, size_t count, u64 *ppos); |
4314 | -extern int __weak remap_oldmem_pfn_range(struct vm_area_struct *vma, |
4315 | - unsigned long from, unsigned long pfn, |
4316 | - unsigned long size, pgprot_t prot); |
4317 | +extern int elfcorehdr_alloc(unsigned long long *addr, unsigned long long *size); |
4318 | +extern void elfcorehdr_free(unsigned long long addr); |
4319 | +extern ssize_t elfcorehdr_read(char *buf, size_t count, u64 *ppos); |
4320 | +extern ssize_t elfcorehdr_read_notes(char *buf, size_t count, u64 *ppos); |
4321 | +extern int remap_oldmem_pfn_range(struct vm_area_struct *vma, |
4322 | + unsigned long from, unsigned long pfn, |
4323 | + unsigned long size, pgprot_t prot); |
4324 | |
4325 | extern ssize_t copy_oldmem_page(unsigned long, char *, size_t, |
4326 | unsigned long, int); |
4327 | diff --git a/include/linux/kgdb.h b/include/linux/kgdb.h |
4328 | index 6b06d378f3df..e465bb15912d 100644 |
4329 | --- a/include/linux/kgdb.h |
4330 | +++ b/include/linux/kgdb.h |
4331 | @@ -283,7 +283,7 @@ struct kgdb_io { |
4332 | |
4333 | extern struct kgdb_arch arch_kgdb_ops; |
4334 | |
4335 | -extern unsigned long __weak kgdb_arch_pc(int exception, struct pt_regs *regs); |
4336 | +extern unsigned long kgdb_arch_pc(int exception, struct pt_regs *regs); |
4337 | |
4338 | #ifdef CONFIG_SERIAL_KGDB_NMI |
4339 | extern int kgdb_register_nmi_console(void); |
4340 | diff --git a/include/linux/khugepaged.h b/include/linux/khugepaged.h |
4341 | index 6b394f0b5148..eeb307985715 100644 |
4342 | --- a/include/linux/khugepaged.h |
4343 | +++ b/include/linux/khugepaged.h |
4344 | @@ -6,7 +6,8 @@ |
4345 | #ifdef CONFIG_TRANSPARENT_HUGEPAGE |
4346 | extern int __khugepaged_enter(struct mm_struct *mm); |
4347 | extern void __khugepaged_exit(struct mm_struct *mm); |
4348 | -extern int khugepaged_enter_vma_merge(struct vm_area_struct *vma); |
4349 | +extern int khugepaged_enter_vma_merge(struct vm_area_struct *vma, |
4350 | + unsigned long vm_flags); |
4351 | |
4352 | #define khugepaged_enabled() \ |
4353 | (transparent_hugepage_flags & \ |
4354 | @@ -35,13 +36,13 @@ static inline void khugepaged_exit(struct mm_struct *mm) |
4355 | __khugepaged_exit(mm); |
4356 | } |
4357 | |
4358 | -static inline int khugepaged_enter(struct vm_area_struct *vma) |
4359 | +static inline int khugepaged_enter(struct vm_area_struct *vma, |
4360 | + unsigned long vm_flags) |
4361 | { |
4362 | if (!test_bit(MMF_VM_HUGEPAGE, &vma->vm_mm->flags)) |
4363 | if ((khugepaged_always() || |
4364 | - (khugepaged_req_madv() && |
4365 | - vma->vm_flags & VM_HUGEPAGE)) && |
4366 | - !(vma->vm_flags & VM_NOHUGEPAGE)) |
4367 | + (khugepaged_req_madv() && (vm_flags & VM_HUGEPAGE))) && |
4368 | + !(vm_flags & VM_NOHUGEPAGE)) |
4369 | if (__khugepaged_enter(vma->vm_mm)) |
4370 | return -ENOMEM; |
4371 | return 0; |
4372 | @@ -54,11 +55,13 @@ static inline int khugepaged_fork(struct mm_struct *mm, struct mm_struct *oldmm) |
4373 | static inline void khugepaged_exit(struct mm_struct *mm) |
4374 | { |
4375 | } |
4376 | -static inline int khugepaged_enter(struct vm_area_struct *vma) |
4377 | +static inline int khugepaged_enter(struct vm_area_struct *vma, |
4378 | + unsigned long vm_flags) |
4379 | { |
4380 | return 0; |
4381 | } |
4382 | -static inline int khugepaged_enter_vma_merge(struct vm_area_struct *vma) |
4383 | +static inline int khugepaged_enter_vma_merge(struct vm_area_struct *vma, |
4384 | + unsigned long vm_flags) |
4385 | { |
4386 | return 0; |
4387 | } |
4388 | diff --git a/include/linux/memory.h b/include/linux/memory.h |
4389 | index bb7384e3c3d8..8b8d8d12348e 100644 |
4390 | --- a/include/linux/memory.h |
4391 | +++ b/include/linux/memory.h |
4392 | @@ -35,7 +35,7 @@ struct memory_block { |
4393 | }; |
4394 | |
4395 | int arch_get_memory_phys_device(unsigned long start_pfn); |
4396 | -unsigned long __weak memory_block_size_bytes(void); |
4397 | +unsigned long memory_block_size_bytes(void); |
4398 | |
4399 | /* These states are exposed to userspace as text strings in sysfs */ |
4400 | #define MEM_ONLINE (1<<0) /* exposed to userspace */ |
4401 | diff --git a/include/linux/mfd/max77693-private.h b/include/linux/mfd/max77693-private.h |
4402 | index c466ff3e16b8..dc558cffd5db 100644 |
4403 | --- a/include/linux/mfd/max77693-private.h |
4404 | +++ b/include/linux/mfd/max77693-private.h |
4405 | @@ -262,6 +262,13 @@ enum max77693_irq_source { |
4406 | MAX77693_IRQ_GROUP_NR, |
4407 | }; |
4408 | |
4409 | +#define SRC_IRQ_CHARGER BIT(0) |
4410 | +#define SRC_IRQ_TOP BIT(1) |
4411 | +#define SRC_IRQ_FLASH BIT(2) |
4412 | +#define SRC_IRQ_MUIC BIT(3) |
4413 | +#define SRC_IRQ_ALL (SRC_IRQ_CHARGER | SRC_IRQ_TOP \ |
4414 | + | SRC_IRQ_FLASH | SRC_IRQ_MUIC) |
4415 | + |
4416 | #define LED_IRQ_FLED2_OPEN BIT(0) |
4417 | #define LED_IRQ_FLED2_SHORT BIT(1) |
4418 | #define LED_IRQ_FLED1_OPEN BIT(2) |
4419 | diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h |
4420 | index 318df7051850..b21bac467dbe 100644 |
4421 | --- a/include/linux/mmzone.h |
4422 | +++ b/include/linux/mmzone.h |
4423 | @@ -431,6 +431,15 @@ struct zone { |
4424 | */ |
4425 | int nr_migrate_reserve_block; |
4426 | |
4427 | +#ifdef CONFIG_MEMORY_ISOLATION |
4428 | + /* |
4429 | + * Number of isolated pageblock. It is used to solve incorrect |
4430 | + * freepage counting problem due to racy retrieving migratetype |
4431 | + * of pageblock. Protected by zone->lock. |
4432 | + */ |
4433 | + unsigned long nr_isolate_pageblock; |
4434 | +#endif |
4435 | + |
4436 | #ifdef CONFIG_MEMORY_HOTPLUG |
4437 | /* see spanned/present_pages for more description */ |
4438 | seqlock_t span_seqlock; |
4439 | diff --git a/include/linux/nfs_xdr.h b/include/linux/nfs_xdr.h |
4440 | index 0040629894df..cbec0ee54e10 100644 |
4441 | --- a/include/linux/nfs_xdr.h |
4442 | +++ b/include/linux/nfs_xdr.h |
4443 | @@ -1232,11 +1232,22 @@ struct nfs41_free_stateid_res { |
4444 | unsigned int status; |
4445 | }; |
4446 | |
4447 | +static inline void |
4448 | +nfs_free_pnfs_ds_cinfo(struct pnfs_ds_commit_info *cinfo) |
4449 | +{ |
4450 | + kfree(cinfo->buckets); |
4451 | +} |
4452 | + |
4453 | #else |
4454 | |
4455 | struct pnfs_ds_commit_info { |
4456 | }; |
4457 | |
4458 | +static inline void |
4459 | +nfs_free_pnfs_ds_cinfo(struct pnfs_ds_commit_info *cinfo) |
4460 | +{ |
4461 | +} |
4462 | + |
4463 | #endif /* CONFIG_NFS_V4_1 */ |
4464 | |
4465 | struct nfs_page; |
4466 | diff --git a/include/linux/page-isolation.h b/include/linux/page-isolation.h |
4467 | index 3fff8e774067..2dc1e1697b45 100644 |
4468 | --- a/include/linux/page-isolation.h |
4469 | +++ b/include/linux/page-isolation.h |
4470 | @@ -2,6 +2,10 @@ |
4471 | #define __LINUX_PAGEISOLATION_H |
4472 | |
4473 | #ifdef CONFIG_MEMORY_ISOLATION |
4474 | +static inline bool has_isolate_pageblock(struct zone *zone) |
4475 | +{ |
4476 | + return zone->nr_isolate_pageblock; |
4477 | +} |
4478 | static inline bool is_migrate_isolate_page(struct page *page) |
4479 | { |
4480 | return get_pageblock_migratetype(page) == MIGRATE_ISOLATE; |
4481 | @@ -11,6 +15,10 @@ static inline bool is_migrate_isolate(int migratetype) |
4482 | return migratetype == MIGRATE_ISOLATE; |
4483 | } |
4484 | #else |
4485 | +static inline bool has_isolate_pageblock(struct zone *zone) |
4486 | +{ |
4487 | + return false; |
4488 | +} |
4489 | static inline bool is_migrate_isolate_page(struct page *page) |
4490 | { |
4491 | return false; |
4492 | diff --git a/include/linux/power/charger-manager.h b/include/linux/power/charger-manager.h |
4493 | index 07e7945a1ff2..e97fc656a058 100644 |
4494 | --- a/include/linux/power/charger-manager.h |
4495 | +++ b/include/linux/power/charger-manager.h |
4496 | @@ -253,9 +253,6 @@ struct charger_manager { |
4497 | struct device *dev; |
4498 | struct charger_desc *desc; |
4499 | |
4500 | - struct power_supply *fuel_gauge; |
4501 | - struct power_supply **charger_stat; |
4502 | - |
4503 | #ifdef CONFIG_THERMAL |
4504 | struct thermal_zone_device *tzd_batt; |
4505 | #endif |
4506 | diff --git a/include/linux/ring_buffer.h b/include/linux/ring_buffer.h |
4507 | index 49a4d6f59108..e2c13cd863bd 100644 |
4508 | --- a/include/linux/ring_buffer.h |
4509 | +++ b/include/linux/ring_buffer.h |
4510 | @@ -97,7 +97,7 @@ __ring_buffer_alloc(unsigned long size, unsigned flags, struct lock_class_key *k |
4511 | __ring_buffer_alloc((size), (flags), &__key); \ |
4512 | }) |
4513 | |
4514 | -int ring_buffer_wait(struct ring_buffer *buffer, int cpu); |
4515 | +int ring_buffer_wait(struct ring_buffer *buffer, int cpu, bool full); |
4516 | int ring_buffer_poll_wait(struct ring_buffer *buffer, int cpu, |
4517 | struct file *filp, poll_table *poll_table); |
4518 | |
4519 | diff --git a/include/net/sctp/sctp.h b/include/net/sctp/sctp.h |
4520 | index 9fbd856e6713..856f01cb51dd 100644 |
4521 | --- a/include/net/sctp/sctp.h |
4522 | +++ b/include/net/sctp/sctp.h |
4523 | @@ -426,6 +426,11 @@ static inline void sctp_assoc_pending_pmtu(struct sock *sk, struct sctp_associat |
4524 | asoc->pmtu_pending = 0; |
4525 | } |
4526 | |
4527 | +static inline bool sctp_chunk_pending(const struct sctp_chunk *chunk) |
4528 | +{ |
4529 | + return !list_empty(&chunk->list); |
4530 | +} |
4531 | + |
4532 | /* Walk through a list of TLV parameters. Don't trust the |
4533 | * individual parameter lengths and instead depend on |
4534 | * the chunk length to indicate when to stop. Make sure |
4535 | diff --git a/include/net/sctp/sm.h b/include/net/sctp/sm.h |
4536 | index 7f4eeb340a54..72a31db47ded 100644 |
4537 | --- a/include/net/sctp/sm.h |
4538 | +++ b/include/net/sctp/sm.h |
4539 | @@ -248,9 +248,9 @@ struct sctp_chunk *sctp_make_asconf_update_ip(struct sctp_association *, |
4540 | int, __be16); |
4541 | struct sctp_chunk *sctp_make_asconf_set_prim(struct sctp_association *asoc, |
4542 | union sctp_addr *addr); |
4543 | -int sctp_verify_asconf(const struct sctp_association *asoc, |
4544 | - struct sctp_paramhdr *param_hdr, void *chunk_end, |
4545 | - struct sctp_paramhdr **errp); |
4546 | +bool sctp_verify_asconf(const struct sctp_association *asoc, |
4547 | + struct sctp_chunk *chunk, bool addr_param_needed, |
4548 | + struct sctp_paramhdr **errp); |
4549 | struct sctp_chunk *sctp_process_asconf(struct sctp_association *asoc, |
4550 | struct sctp_chunk *asconf); |
4551 | int sctp_process_asconf_ack(struct sctp_association *asoc, |
4552 | diff --git a/include/net/udp_tunnel.h b/include/net/udp_tunnel.h |
4553 | index ffd69cbded35..55003b84e2e1 100644 |
4554 | --- a/include/net/udp_tunnel.h |
4555 | +++ b/include/net/udp_tunnel.h |
4556 | @@ -26,6 +26,15 @@ struct udp_port_cfg { |
4557 | use_udp6_rx_checksums:1; |
4558 | }; |
4559 | |
4560 | +static inline void udp_tunnel_gro_complete(struct sk_buff *skb, int nhoff) |
4561 | +{ |
4562 | + struct udphdr *uh; |
4563 | + |
4564 | + uh = (struct udphdr *)(skb->data + nhoff - sizeof(struct udphdr)); |
4565 | + skb_shinfo(skb)->gso_type |= uh->check ? |
4566 | + SKB_GSO_UDP_TUNNEL_CSUM : SKB_GSO_UDP_TUNNEL; |
4567 | +} |
4568 | + |
4569 | int udp_sock_create(struct net *net, struct udp_port_cfg *cfg, |
4570 | struct socket **sockp); |
4571 | |
4572 | diff --git a/include/uapi/linux/netfilter/xt_bpf.h b/include/uapi/linux/netfilter/xt_bpf.h |
4573 | index 1fad2c27ac32..5df03939f69f 100644 |
4574 | --- a/include/uapi/linux/netfilter/xt_bpf.h |
4575 | +++ b/include/uapi/linux/netfilter/xt_bpf.h |
4576 | @@ -8,6 +8,8 @@ |
4577 | |
4578 | struct bpf_prog; |
4579 | |
4580 | +struct sk_filter; |
4581 | + |
4582 | struct xt_bpf_info { |
4583 | __u16 bpf_program_num_elem; |
4584 | struct sock_filter bpf_program[XT_BPF_MAX_NUM_INSTR]; |
4585 | diff --git a/init/main.c b/init/main.c |
4586 | index bb1aed928f21..d0f4b5902c62 100644 |
4587 | --- a/init/main.c |
4588 | +++ b/init/main.c |
4589 | @@ -544,7 +544,7 @@ asmlinkage __visible void __init start_kernel(void) |
4590 | static_command_line, __start___param, |
4591 | __stop___param - __start___param, |
4592 | -1, -1, &unknown_bootoption); |
4593 | - if (after_dashes) |
4594 | + if (!IS_ERR_OR_NULL(after_dashes)) |
4595 | parse_args("Setting init args", after_dashes, NULL, 0, -1, -1, |
4596 | set_init_arg); |
4597 | |
4598 | diff --git a/ipc/ipc_sysctl.c b/ipc/ipc_sysctl.c |
4599 | index c3f0326e98db..e8075b247497 100644 |
4600 | --- a/ipc/ipc_sysctl.c |
4601 | +++ b/ipc/ipc_sysctl.c |
4602 | @@ -123,7 +123,6 @@ static int proc_ipcauto_dointvec_minmax(struct ctl_table *table, int write, |
4603 | void __user *buffer, size_t *lenp, loff_t *ppos) |
4604 | { |
4605 | struct ctl_table ipc_table; |
4606 | - size_t lenp_bef = *lenp; |
4607 | int oldval; |
4608 | int rc; |
4609 | |
4610 | @@ -133,7 +132,7 @@ static int proc_ipcauto_dointvec_minmax(struct ctl_table *table, int write, |
4611 | |
4612 | rc = proc_dointvec_minmax(&ipc_table, write, buffer, lenp, ppos); |
4613 | |
4614 | - if (write && !rc && lenp_bef == *lenp) { |
4615 | + if (write && !rc) { |
4616 | int newval = *((int *)(ipc_table.data)); |
4617 | /* |
4618 | * The file "auto_msgmni" has correctly been set. |
4619 | diff --git a/kernel/audit.c b/kernel/audit.c |
4620 | index ba2ff5a5c600..6726aa6f82be 100644 |
4621 | --- a/kernel/audit.c |
4622 | +++ b/kernel/audit.c |
4623 | @@ -724,7 +724,7 @@ static int audit_get_feature(struct sk_buff *skb) |
4624 | |
4625 | seq = nlmsg_hdr(skb)->nlmsg_seq; |
4626 | |
4627 | - audit_send_reply(skb, seq, AUDIT_GET, 0, 0, &af, sizeof(af)); |
4628 | + audit_send_reply(skb, seq, AUDIT_GET_FEATURE, 0, 0, &af, sizeof(af)); |
4629 | |
4630 | return 0; |
4631 | } |
4632 | @@ -739,7 +739,7 @@ static void audit_log_feature_change(int which, u32 old_feature, u32 new_feature |
4633 | |
4634 | ab = audit_log_start(NULL, GFP_KERNEL, AUDIT_FEATURE_CHANGE); |
4635 | audit_log_task_info(ab, current); |
4636 | - audit_log_format(ab, "feature=%s old=%u new=%u old_lock=%u new_lock=%u res=%d", |
4637 | + audit_log_format(ab, " feature=%s old=%u new=%u old_lock=%u new_lock=%u res=%d", |
4638 | audit_feature_names[which], !!old_feature, !!new_feature, |
4639 | !!old_lock, !!new_lock, res); |
4640 | audit_log_end(ab); |
4641 | diff --git a/kernel/audit_tree.c b/kernel/audit_tree.c |
4642 | index 135944a7b28a..a79db03990db 100644 |
4643 | --- a/kernel/audit_tree.c |
4644 | +++ b/kernel/audit_tree.c |
4645 | @@ -154,6 +154,7 @@ static struct audit_chunk *alloc_chunk(int count) |
4646 | chunk->owners[i].index = i; |
4647 | } |
4648 | fsnotify_init_mark(&chunk->mark, audit_tree_destroy_watch); |
4649 | + chunk->mark.mask = FS_IN_IGNORED; |
4650 | return chunk; |
4651 | } |
4652 | |
4653 | diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c |
4654 | index 1b70cb6fbe3c..89a404a63ae5 100644 |
4655 | --- a/kernel/rcu/tree.c |
4656 | +++ b/kernel/rcu/tree.c |
4657 | @@ -1928,7 +1928,7 @@ static void rcu_report_qs_rsp(struct rcu_state *rsp, unsigned long flags) |
4658 | { |
4659 | WARN_ON_ONCE(!rcu_gp_in_progress(rsp)); |
4660 | raw_spin_unlock_irqrestore(&rcu_get_root(rsp)->lock, flags); |
4661 | - wake_up(&rsp->gp_wq); /* Memory barrier implied by wake_up() path. */ |
4662 | + rcu_gp_kthread_wake(rsp); |
4663 | } |
4664 | |
4665 | /* |
4666 | @@ -2507,7 +2507,7 @@ static void force_quiescent_state(struct rcu_state *rsp) |
4667 | } |
4668 | ACCESS_ONCE(rsp->gp_flags) |= RCU_GP_FLAG_FQS; |
4669 | raw_spin_unlock_irqrestore(&rnp_old->lock, flags); |
4670 | - wake_up(&rsp->gp_wq); /* Memory barrier implied by wake_up() path. */ |
4671 | + rcu_gp_kthread_wake(rsp); |
4672 | } |
4673 | |
4674 | /* |
4675 | diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c |
4676 | index 2d75c94ae87d..a56e07c8d15b 100644 |
4677 | --- a/kernel/trace/ring_buffer.c |
4678 | +++ b/kernel/trace/ring_buffer.c |
4679 | @@ -538,16 +538,18 @@ static void rb_wake_up_waiters(struct irq_work *work) |
4680 | * ring_buffer_wait - wait for input to the ring buffer |
4681 | * @buffer: buffer to wait on |
4682 | * @cpu: the cpu buffer to wait on |
4683 | + * @full: wait until a full page is available, if @cpu != RING_BUFFER_ALL_CPUS |
4684 | * |
4685 | * If @cpu == RING_BUFFER_ALL_CPUS then the task will wake up as soon |
4686 | * as data is added to any of the @buffer's cpu buffers. Otherwise |
4687 | * it will wait for data to be added to a specific cpu buffer. |
4688 | */ |
4689 | -int ring_buffer_wait(struct ring_buffer *buffer, int cpu) |
4690 | +int ring_buffer_wait(struct ring_buffer *buffer, int cpu, bool full) |
4691 | { |
4692 | - struct ring_buffer_per_cpu *cpu_buffer; |
4693 | + struct ring_buffer_per_cpu *uninitialized_var(cpu_buffer); |
4694 | DEFINE_WAIT(wait); |
4695 | struct rb_irq_work *work; |
4696 | + int ret = 0; |
4697 | |
4698 | /* |
4699 | * Depending on what the caller is waiting for, either any |
4700 | @@ -564,36 +566,61 @@ int ring_buffer_wait(struct ring_buffer *buffer, int cpu) |
4701 | } |
4702 | |
4703 | |
4704 | - prepare_to_wait(&work->waiters, &wait, TASK_INTERRUPTIBLE); |
4705 | + while (true) { |
4706 | + prepare_to_wait(&work->waiters, &wait, TASK_INTERRUPTIBLE); |
4707 | |
4708 | - /* |
4709 | - * The events can happen in critical sections where |
4710 | - * checking a work queue can cause deadlocks. |
4711 | - * After adding a task to the queue, this flag is set |
4712 | - * only to notify events to try to wake up the queue |
4713 | - * using irq_work. |
4714 | - * |
4715 | - * We don't clear it even if the buffer is no longer |
4716 | - * empty. The flag only causes the next event to run |
4717 | - * irq_work to do the work queue wake up. The worse |
4718 | - * that can happen if we race with !trace_empty() is that |
4719 | - * an event will cause an irq_work to try to wake up |
4720 | - * an empty queue. |
4721 | - * |
4722 | - * There's no reason to protect this flag either, as |
4723 | - * the work queue and irq_work logic will do the necessary |
4724 | - * synchronization for the wake ups. The only thing |
4725 | - * that is necessary is that the wake up happens after |
4726 | - * a task has been queued. It's OK for spurious wake ups. |
4727 | - */ |
4728 | - work->waiters_pending = true; |
4729 | + /* |
4730 | + * The events can happen in critical sections where |
4731 | + * checking a work queue can cause deadlocks. |
4732 | + * After adding a task to the queue, this flag is set |
4733 | + * only to notify events to try to wake up the queue |
4734 | + * using irq_work. |
4735 | + * |
4736 | + * We don't clear it even if the buffer is no longer |
4737 | + * empty. The flag only causes the next event to run |
4738 | + * irq_work to do the work queue wake up. The worse |
4739 | + * that can happen if we race with !trace_empty() is that |
4740 | + * an event will cause an irq_work to try to wake up |
4741 | + * an empty queue. |
4742 | + * |
4743 | + * There's no reason to protect this flag either, as |
4744 | + * the work queue and irq_work logic will do the necessary |
4745 | + * synchronization for the wake ups. The only thing |
4746 | + * that is necessary is that the wake up happens after |
4747 | + * a task has been queued. It's OK for spurious wake ups. |
4748 | + */ |
4749 | + work->waiters_pending = true; |
4750 | + |
4751 | + if (signal_pending(current)) { |
4752 | + ret = -EINTR; |
4753 | + break; |
4754 | + } |
4755 | + |
4756 | + if (cpu == RING_BUFFER_ALL_CPUS && !ring_buffer_empty(buffer)) |
4757 | + break; |
4758 | + |
4759 | + if (cpu != RING_BUFFER_ALL_CPUS && |
4760 | + !ring_buffer_empty_cpu(buffer, cpu)) { |
4761 | + unsigned long flags; |
4762 | + bool pagebusy; |
4763 | + |
4764 | + if (!full) |
4765 | + break; |
4766 | + |
4767 | + raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags); |
4768 | + pagebusy = cpu_buffer->reader_page == cpu_buffer->commit_page; |
4769 | + raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags); |
4770 | + |
4771 | + if (!pagebusy) |
4772 | + break; |
4773 | + } |
4774 | |
4775 | - if ((cpu == RING_BUFFER_ALL_CPUS && ring_buffer_empty(buffer)) || |
4776 | - (cpu != RING_BUFFER_ALL_CPUS && ring_buffer_empty_cpu(buffer, cpu))) |
4777 | schedule(); |
4778 | + } |
4779 | |
4780 | finish_wait(&work->waiters, &wait); |
4781 | - return 0; |
4782 | + |
4783 | + return ret; |
4784 | } |
4785 | |
4786 | /** |
4787 | diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c |
4788 | index 8a528392b1f4..15209335888d 100644 |
4789 | --- a/kernel/trace/trace.c |
4790 | +++ b/kernel/trace/trace.c |
4791 | @@ -1076,13 +1076,14 @@ update_max_tr_single(struct trace_array *tr, struct task_struct *tsk, int cpu) |
4792 | } |
4793 | #endif /* CONFIG_TRACER_MAX_TRACE */ |
4794 | |
4795 | -static int wait_on_pipe(struct trace_iterator *iter) |
4796 | +static int wait_on_pipe(struct trace_iterator *iter, bool full) |
4797 | { |
4798 | /* Iterators are static, they should be filled or empty */ |
4799 | if (trace_buffer_iter(iter, iter->cpu_file)) |
4800 | return 0; |
4801 | |
4802 | - return ring_buffer_wait(iter->trace_buffer->buffer, iter->cpu_file); |
4803 | + return ring_buffer_wait(iter->trace_buffer->buffer, iter->cpu_file, |
4804 | + full); |
4805 | } |
4806 | |
4807 | #ifdef CONFIG_FTRACE_STARTUP_TEST |
4808 | @@ -4434,15 +4435,12 @@ static int tracing_wait_pipe(struct file *filp) |
4809 | |
4810 | mutex_unlock(&iter->mutex); |
4811 | |
4812 | - ret = wait_on_pipe(iter); |
4813 | + ret = wait_on_pipe(iter, false); |
4814 | |
4815 | mutex_lock(&iter->mutex); |
4816 | |
4817 | if (ret) |
4818 | return ret; |
4819 | - |
4820 | - if (signal_pending(current)) |
4821 | - return -EINTR; |
4822 | } |
4823 | |
4824 | return 1; |
4825 | @@ -5372,16 +5370,12 @@ tracing_buffers_read(struct file *filp, char __user *ubuf, |
4826 | goto out_unlock; |
4827 | } |
4828 | mutex_unlock(&trace_types_lock); |
4829 | - ret = wait_on_pipe(iter); |
4830 | + ret = wait_on_pipe(iter, false); |
4831 | mutex_lock(&trace_types_lock); |
4832 | if (ret) { |
4833 | size = ret; |
4834 | goto out_unlock; |
4835 | } |
4836 | - if (signal_pending(current)) { |
4837 | - size = -EINTR; |
4838 | - goto out_unlock; |
4839 | - } |
4840 | goto again; |
4841 | } |
4842 | size = 0; |
4843 | @@ -5587,14 +5581,11 @@ tracing_buffers_splice_read(struct file *file, loff_t *ppos, |
4844 | goto out; |
4845 | } |
4846 | mutex_unlock(&trace_types_lock); |
4847 | - ret = wait_on_pipe(iter); |
4848 | + ret = wait_on_pipe(iter, true); |
4849 | mutex_lock(&trace_types_lock); |
4850 | if (ret) |
4851 | goto out; |
4852 | - if (signal_pending(current)) { |
4853 | - ret = -EINTR; |
4854 | - goto out; |
4855 | - } |
4856 | + |
4857 | goto again; |
4858 | } |
4859 | |
4860 | diff --git a/mm/bootmem.c b/mm/bootmem.c |
4861 | index 90bd3507b413..6603a7c4b136 100644 |
4862 | --- a/mm/bootmem.c |
4863 | +++ b/mm/bootmem.c |
4864 | @@ -243,13 +243,10 @@ static unsigned long __init free_all_bootmem_core(bootmem_data_t *bdata) |
4865 | |
4866 | static int reset_managed_pages_done __initdata; |
4867 | |
4868 | -static inline void __init reset_node_managed_pages(pg_data_t *pgdat) |
4869 | +void reset_node_managed_pages(pg_data_t *pgdat) |
4870 | { |
4871 | struct zone *z; |
4872 | |
4873 | - if (reset_managed_pages_done) |
4874 | - return; |
4875 | - |
4876 | for (z = pgdat->node_zones; z < pgdat->node_zones + MAX_NR_ZONES; z++) |
4877 | z->managed_pages = 0; |
4878 | } |
4879 | @@ -258,8 +255,12 @@ void __init reset_all_zones_managed_pages(void) |
4880 | { |
4881 | struct pglist_data *pgdat; |
4882 | |
4883 | + if (reset_managed_pages_done) |
4884 | + return; |
4885 | + |
4886 | for_each_online_pgdat(pgdat) |
4887 | reset_node_managed_pages(pgdat); |
4888 | + |
4889 | reset_managed_pages_done = 1; |
4890 | } |
4891 | |
4892 | diff --git a/mm/huge_memory.c b/mm/huge_memory.c |
4893 | index 45c6d6738dfa..23e900255205 100644 |
4894 | --- a/mm/huge_memory.c |
4895 | +++ b/mm/huge_memory.c |
4896 | @@ -803,7 +803,7 @@ int do_huge_pmd_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma, |
4897 | return VM_FAULT_FALLBACK; |
4898 | if (unlikely(anon_vma_prepare(vma))) |
4899 | return VM_FAULT_OOM; |
4900 | - if (unlikely(khugepaged_enter(vma))) |
4901 | + if (unlikely(khugepaged_enter(vma, vma->vm_flags))) |
4902 | return VM_FAULT_OOM; |
4903 | if (!(flags & FAULT_FLAG_WRITE) && |
4904 | transparent_hugepage_use_zero_page()) { |
4905 | @@ -1970,7 +1970,7 @@ int hugepage_madvise(struct vm_area_struct *vma, |
4906 | * register it here without waiting a page fault that |
4907 | * may not happen any time soon. |
4908 | */ |
4909 | - if (unlikely(khugepaged_enter_vma_merge(vma))) |
4910 | + if (unlikely(khugepaged_enter_vma_merge(vma, *vm_flags))) |
4911 | return -ENOMEM; |
4912 | break; |
4913 | case MADV_NOHUGEPAGE: |
4914 | @@ -2071,7 +2071,8 @@ int __khugepaged_enter(struct mm_struct *mm) |
4915 | return 0; |
4916 | } |
4917 | |
4918 | -int khugepaged_enter_vma_merge(struct vm_area_struct *vma) |
4919 | +int khugepaged_enter_vma_merge(struct vm_area_struct *vma, |
4920 | + unsigned long vm_flags) |
4921 | { |
4922 | unsigned long hstart, hend; |
4923 | if (!vma->anon_vma) |
4924 | @@ -2083,11 +2084,11 @@ int khugepaged_enter_vma_merge(struct vm_area_struct *vma) |
4925 | if (vma->vm_ops) |
4926 | /* khugepaged not yet working on file or special mappings */ |
4927 | return 0; |
4928 | - VM_BUG_ON(vma->vm_flags & VM_NO_THP); |
4929 | + VM_BUG_ON(vm_flags & VM_NO_THP); |
4930 | hstart = (vma->vm_start + ~HPAGE_PMD_MASK) & HPAGE_PMD_MASK; |
4931 | hend = vma->vm_end & HPAGE_PMD_MASK; |
4932 | if (hstart < hend) |
4933 | - return khugepaged_enter(vma); |
4934 | + return khugepaged_enter(vma, vm_flags); |
4935 | return 0; |
4936 | } |
4937 | |
4938 | diff --git a/mm/internal.h b/mm/internal.h |
4939 | index a1b651b11c5f..5f2772f97013 100644 |
4940 | --- a/mm/internal.h |
4941 | +++ b/mm/internal.h |
4942 | @@ -108,6 +108,31 @@ extern pmd_t *mm_find_pmd(struct mm_struct *mm, unsigned long address); |
4943 | /* |
4944 | * in mm/page_alloc.c |
4945 | */ |
4946 | + |
4947 | +/* |
4948 | + * Locate the struct page for both the matching buddy in our |
4949 | + * pair (buddy1) and the combined O(n+1) page they form (page). |
4950 | + * |
4951 | + * 1) Any buddy B1 will have an order O twin B2 which satisfies |
4952 | + * the following equation: |
4953 | + * B2 = B1 ^ (1 << O) |
4954 | + * For example, if the starting buddy (buddy2) is #8 its order |
4955 | + * 1 buddy is #10: |
4956 | + * B2 = 8 ^ (1 << 1) = 8 ^ 2 = 10 |
4957 | + * |
4958 | + * 2) Any buddy B will have an order O+1 parent P which |
4959 | + * satisfies the following equation: |
4960 | + * P = B & ~(1 << O) |
4961 | + * |
4962 | + * Assumption: *_mem_map is contiguous at least up to MAX_ORDER |
4963 | + */ |
4964 | +static inline unsigned long |
4965 | +__find_buddy_index(unsigned long page_idx, unsigned int order) |
4966 | +{ |
4967 | + return page_idx ^ (1 << order); |
4968 | +} |
4969 | + |
4970 | +extern int __isolate_free_page(struct page *page, unsigned int order); |
4971 | extern void __free_pages_bootmem(struct page *page, unsigned int order); |
4972 | extern void prep_compound_page(struct page *page, unsigned long order); |
4973 | #ifdef CONFIG_MEMORY_FAILURE |
4974 | diff --git a/mm/iov_iter.c b/mm/iov_iter.c |
4975 | index 9a09f2034fcc..141dcf796d28 100644 |
4976 | --- a/mm/iov_iter.c |
4977 | +++ b/mm/iov_iter.c |
4978 | @@ -699,9 +699,9 @@ size_t iov_iter_single_seg_count(const struct iov_iter *i) |
4979 | if (i->nr_segs == 1) |
4980 | return i->count; |
4981 | else if (i->type & ITER_BVEC) |
4982 | - return min(i->count, i->iov->iov_len - i->iov_offset); |
4983 | - else |
4984 | return min(i->count, i->bvec->bv_len - i->iov_offset); |
4985 | + else |
4986 | + return min(i->count, i->iov->iov_len - i->iov_offset); |
4987 | } |
4988 | EXPORT_SYMBOL(iov_iter_single_seg_count); |
4989 | |
4990 | diff --git a/mm/memory_hotplug.c b/mm/memory_hotplug.c |
4991 | index 2ff8c2325e96..55f94669d7c6 100644 |
4992 | --- a/mm/memory_hotplug.c |
4993 | +++ b/mm/memory_hotplug.c |
4994 | @@ -31,6 +31,7 @@ |
4995 | #include <linux/stop_machine.h> |
4996 | #include <linux/hugetlb.h> |
4997 | #include <linux/memblock.h> |
4998 | +#include <linux/bootmem.h> |
4999 | |
5000 | #include <asm/tlbflush.h> |
5001 | |
5002 | @@ -1066,6 +1067,16 @@ out: |
5003 | } |
5004 | #endif /* CONFIG_MEMORY_HOTPLUG_SPARSE */ |
5005 | |
5006 | +static void reset_node_present_pages(pg_data_t *pgdat) |
5007 | +{ |
5008 | + struct zone *z; |
5009 | + |
5010 | + for (z = pgdat->node_zones; z < pgdat->node_zones + MAX_NR_ZONES; z++) |
5011 | + z->present_pages = 0; |
5012 | + |
5013 | + pgdat->node_present_pages = 0; |
5014 | +} |
5015 | + |
5016 | /* we are OK calling __meminit stuff here - we have CONFIG_MEMORY_HOTPLUG */ |
5017 | static pg_data_t __ref *hotadd_new_pgdat(int nid, u64 start) |
5018 | { |
5019 | @@ -1096,6 +1107,21 @@ static pg_data_t __ref *hotadd_new_pgdat(int nid, u64 start) |
5020 | build_all_zonelists(pgdat, NULL); |
5021 | mutex_unlock(&zonelists_mutex); |
5022 | |
5023 | + /* |
5024 | + * zone->managed_pages is set to an approximate value in |
5025 | + * free_area_init_core(), which will cause |
5026 | + * /sys/device/system/node/nodeX/meminfo has wrong data. |
5027 | + * So reset it to 0 before any memory is onlined. |
5028 | + */ |
5029 | + reset_node_managed_pages(pgdat); |
5030 | + |
5031 | + /* |
5032 | + * When memory is hot-added, all the memory is in offline state. So |
5033 | + * clear all zones' present_pages because they will be updated in |
5034 | + * online_pages() and offline_pages(). |
5035 | + */ |
5036 | + reset_node_present_pages(pgdat); |
5037 | + |
5038 | return pgdat; |
5039 | } |
5040 | |
5041 | diff --git a/mm/mmap.c b/mm/mmap.c |
5042 | index c0a3637cdb64..ebc25fab1545 100644 |
5043 | --- a/mm/mmap.c |
5044 | +++ b/mm/mmap.c |
5045 | @@ -1056,7 +1056,7 @@ struct vm_area_struct *vma_merge(struct mm_struct *mm, |
5046 | end, prev->vm_pgoff, NULL); |
5047 | if (err) |
5048 | return NULL; |
5049 | - khugepaged_enter_vma_merge(prev); |
5050 | + khugepaged_enter_vma_merge(prev, vm_flags); |
5051 | return prev; |
5052 | } |
5053 | |
5054 | @@ -1075,7 +1075,7 @@ struct vm_area_struct *vma_merge(struct mm_struct *mm, |
5055 | next->vm_pgoff - pglen, NULL); |
5056 | if (err) |
5057 | return NULL; |
5058 | - khugepaged_enter_vma_merge(area); |
5059 | + khugepaged_enter_vma_merge(area, vm_flags); |
5060 | return area; |
5061 | } |
5062 | |
5063 | @@ -2192,7 +2192,7 @@ int expand_upwards(struct vm_area_struct *vma, unsigned long address) |
5064 | } |
5065 | } |
5066 | vma_unlock_anon_vma(vma); |
5067 | - khugepaged_enter_vma_merge(vma); |
5068 | + khugepaged_enter_vma_merge(vma, vma->vm_flags); |
5069 | validate_mm(vma->vm_mm); |
5070 | return error; |
5071 | } |
5072 | @@ -2261,7 +2261,7 @@ int expand_downwards(struct vm_area_struct *vma, |
5073 | } |
5074 | } |
5075 | vma_unlock_anon_vma(vma); |
5076 | - khugepaged_enter_vma_merge(vma); |
5077 | + khugepaged_enter_vma_merge(vma, vma->vm_flags); |
5078 | validate_mm(vma->vm_mm); |
5079 | return error; |
5080 | } |
5081 | diff --git a/mm/nobootmem.c b/mm/nobootmem.c |
5082 | index 7c7ab32ee503..90b50468333e 100644 |
5083 | --- a/mm/nobootmem.c |
5084 | +++ b/mm/nobootmem.c |
5085 | @@ -145,12 +145,10 @@ static unsigned long __init free_low_memory_core_early(void) |
5086 | |
5087 | static int reset_managed_pages_done __initdata; |
5088 | |
5089 | -static inline void __init reset_node_managed_pages(pg_data_t *pgdat) |
5090 | +void reset_node_managed_pages(pg_data_t *pgdat) |
5091 | { |
5092 | struct zone *z; |
5093 | |
5094 | - if (reset_managed_pages_done) |
5095 | - return; |
5096 | for (z = pgdat->node_zones; z < pgdat->node_zones + MAX_NR_ZONES; z++) |
5097 | z->managed_pages = 0; |
5098 | } |
5099 | @@ -159,8 +157,12 @@ void __init reset_all_zones_managed_pages(void) |
5100 | { |
5101 | struct pglist_data *pgdat; |
5102 | |
5103 | + if (reset_managed_pages_done) |
5104 | + return; |
5105 | + |
5106 | for_each_online_pgdat(pgdat) |
5107 | reset_node_managed_pages(pgdat); |
5108 | + |
5109 | reset_managed_pages_done = 1; |
5110 | } |
5111 | |
5112 | diff --git a/mm/page_alloc.c b/mm/page_alloc.c |
5113 | index 8c5029f22bbe..c5fe124d6cdb 100644 |
5114 | --- a/mm/page_alloc.c |
5115 | +++ b/mm/page_alloc.c |
5116 | @@ -468,29 +468,6 @@ static inline void rmv_page_order(struct page *page) |
5117 | } |
5118 | |
5119 | /* |
5120 | - * Locate the struct page for both the matching buddy in our |
5121 | - * pair (buddy1) and the combined O(n+1) page they form (page). |
5122 | - * |
5123 | - * 1) Any buddy B1 will have an order O twin B2 which satisfies |
5124 | - * the following equation: |
5125 | - * B2 = B1 ^ (1 << O) |
5126 | - * For example, if the starting buddy (buddy2) is #8 its order |
5127 | - * 1 buddy is #10: |
5128 | - * B2 = 8 ^ (1 << 1) = 8 ^ 2 = 10 |
5129 | - * |
5130 | - * 2) Any buddy B will have an order O+1 parent P which |
5131 | - * satisfies the following equation: |
5132 | - * P = B & ~(1 << O) |
5133 | - * |
5134 | - * Assumption: *_mem_map is contiguous at least up to MAX_ORDER |
5135 | - */ |
5136 | -static inline unsigned long |
5137 | -__find_buddy_index(unsigned long page_idx, unsigned int order) |
5138 | -{ |
5139 | - return page_idx ^ (1 << order); |
5140 | -} |
5141 | - |
5142 | -/* |
5143 | * This function checks whether a page is free && is the buddy |
5144 | * we can do coalesce a page and its buddy if |
5145 | * (a) the buddy is not in a hole && |
5146 | @@ -570,6 +547,7 @@ static inline void __free_one_page(struct page *page, |
5147 | unsigned long combined_idx; |
5148 | unsigned long uninitialized_var(buddy_idx); |
5149 | struct page *buddy; |
5150 | + int max_order = MAX_ORDER; |
5151 | |
5152 | VM_BUG_ON(!zone_is_initialized(zone)); |
5153 | |
5154 | @@ -578,13 +556,24 @@ static inline void __free_one_page(struct page *page, |
5155 | return; |
5156 | |
5157 | VM_BUG_ON(migratetype == -1); |
5158 | + if (is_migrate_isolate(migratetype)) { |
5159 | + /* |
5160 | + * We restrict max order of merging to prevent merge |
5161 | + * between freepages on isolate pageblock and normal |
5162 | + * pageblock. Without this, pageblock isolation |
5163 | + * could cause incorrect freepage accounting. |
5164 | + */ |
5165 | + max_order = min(MAX_ORDER, pageblock_order + 1); |
5166 | + } else { |
5167 | + __mod_zone_freepage_state(zone, 1 << order, migratetype); |
5168 | + } |
5169 | |
5170 | - page_idx = pfn & ((1 << MAX_ORDER) - 1); |
5171 | + page_idx = pfn & ((1 << max_order) - 1); |
5172 | |
5173 | VM_BUG_ON_PAGE(page_idx & ((1 << order) - 1), page); |
5174 | VM_BUG_ON_PAGE(bad_range(zone, page), page); |
5175 | |
5176 | - while (order < MAX_ORDER-1) { |
5177 | + while (order < max_order - 1) { |
5178 | buddy_idx = __find_buddy_index(page_idx, order); |
5179 | buddy = page + (buddy_idx - page_idx); |
5180 | if (!page_is_buddy(page, buddy, order)) |
5181 | @@ -716,14 +705,12 @@ static void free_pcppages_bulk(struct zone *zone, int count, |
5182 | /* must delete as __free_one_page list manipulates */ |
5183 | list_del(&page->lru); |
5184 | mt = get_freepage_migratetype(page); |
5185 | + if (unlikely(has_isolate_pageblock(zone))) |
5186 | + mt = get_pageblock_migratetype(page); |
5187 | + |
5188 | /* MIGRATE_MOVABLE list may include MIGRATE_RESERVEs */ |
5189 | __free_one_page(page, page_to_pfn(page), zone, 0, mt); |
5190 | trace_mm_page_pcpu_drain(page, 0, mt); |
5191 | - if (likely(!is_migrate_isolate_page(page))) { |
5192 | - __mod_zone_page_state(zone, NR_FREE_PAGES, 1); |
5193 | - if (is_migrate_cma(mt)) |
5194 | - __mod_zone_page_state(zone, NR_FREE_CMA_PAGES, 1); |
5195 | - } |
5196 | } while (--to_free && --batch_free && !list_empty(list)); |
5197 | } |
5198 | spin_unlock(&zone->lock); |
5199 | @@ -740,9 +727,11 @@ static void free_one_page(struct zone *zone, |
5200 | if (nr_scanned) |
5201 | __mod_zone_page_state(zone, NR_PAGES_SCANNED, -nr_scanned); |
5202 | |
5203 | + if (unlikely(has_isolate_pageblock(zone) || |
5204 | + is_migrate_isolate(migratetype))) { |
5205 | + migratetype = get_pfnblock_migratetype(page, pfn); |
5206 | + } |
5207 | __free_one_page(page, pfn, zone, order, migratetype); |
5208 | - if (unlikely(!is_migrate_isolate(migratetype))) |
5209 | - __mod_zone_freepage_state(zone, 1 << order, migratetype); |
5210 | spin_unlock(&zone->lock); |
5211 | } |
5212 | |
5213 | @@ -1485,7 +1474,7 @@ void split_page(struct page *page, unsigned int order) |
5214 | } |
5215 | EXPORT_SYMBOL_GPL(split_page); |
5216 | |
5217 | -static int __isolate_free_page(struct page *page, unsigned int order) |
5218 | +int __isolate_free_page(struct page *page, unsigned int order) |
5219 | { |
5220 | unsigned long watermark; |
5221 | struct zone *zone; |
5222 | diff --git a/mm/page_isolation.c b/mm/page_isolation.c |
5223 | index d1473b2e9481..c8778f7e208e 100644 |
5224 | --- a/mm/page_isolation.c |
5225 | +++ b/mm/page_isolation.c |
5226 | @@ -60,6 +60,7 @@ out: |
5227 | int migratetype = get_pageblock_migratetype(page); |
5228 | |
5229 | set_pageblock_migratetype(page, MIGRATE_ISOLATE); |
5230 | + zone->nr_isolate_pageblock++; |
5231 | nr_pages = move_freepages_block(zone, page, MIGRATE_ISOLATE); |
5232 | |
5233 | __mod_zone_freepage_state(zone, -nr_pages, migratetype); |
5234 | @@ -75,16 +76,54 @@ void unset_migratetype_isolate(struct page *page, unsigned migratetype) |
5235 | { |
5236 | struct zone *zone; |
5237 | unsigned long flags, nr_pages; |
5238 | + struct page *isolated_page = NULL; |
5239 | + unsigned int order; |
5240 | + unsigned long page_idx, buddy_idx; |
5241 | + struct page *buddy; |
5242 | |
5243 | zone = page_zone(page); |
5244 | spin_lock_irqsave(&zone->lock, flags); |
5245 | if (get_pageblock_migratetype(page) != MIGRATE_ISOLATE) |
5246 | goto out; |
5247 | - nr_pages = move_freepages_block(zone, page, migratetype); |
5248 | - __mod_zone_freepage_state(zone, nr_pages, migratetype); |
5249 | + |
5250 | + /* |
5251 | + * Because freepage with more than pageblock_order on isolated |
5252 | + * pageblock is restricted to merge due to freepage counting problem, |
5253 | + * it is possible that there is free buddy page. |
5254 | + * move_freepages_block() doesn't care of merge so we need other |
5255 | + * approach in order to merge them. Isolation and free will make |
5256 | + * these pages to be merged. |
5257 | + */ |
5258 | + if (PageBuddy(page)) { |
5259 | + order = page_order(page); |
5260 | + if (order >= pageblock_order) { |
5261 | + page_idx = page_to_pfn(page) & ((1 << MAX_ORDER) - 1); |
5262 | + buddy_idx = __find_buddy_index(page_idx, order); |
5263 | + buddy = page + (buddy_idx - page_idx); |
5264 | + |
5265 | + if (!is_migrate_isolate_page(buddy)) { |
5266 | + __isolate_free_page(page, order); |
5267 | + set_page_refcounted(page); |
5268 | + isolated_page = page; |
5269 | + } |
5270 | + } |
5271 | + } |
5272 | + |
5273 | + /* |
5274 | + * If we isolate freepage with more than pageblock_order, there |
5275 | + * should be no freepage in the range, so we could avoid costly |
5276 | + * pageblock scanning for freepage moving. |
5277 | + */ |
5278 | + if (!isolated_page) { |
5279 | + nr_pages = move_freepages_block(zone, page, migratetype); |
5280 | + __mod_zone_freepage_state(zone, nr_pages, migratetype); |
5281 | + } |
5282 | set_pageblock_migratetype(page, migratetype); |
5283 | + zone->nr_isolate_pageblock--; |
5284 | out: |
5285 | spin_unlock_irqrestore(&zone->lock, flags); |
5286 | + if (isolated_page) |
5287 | + __free_pages(isolated_page, order); |
5288 | } |
5289 | |
5290 | static inline struct page * |
5291 | diff --git a/net/ceph/crypto.c b/net/ceph/crypto.c |
5292 | index ffeba8f9dda9..c0d666a993ec 100644 |
5293 | --- a/net/ceph/crypto.c |
5294 | +++ b/net/ceph/crypto.c |
5295 | @@ -90,11 +90,82 @@ static struct crypto_blkcipher *ceph_crypto_alloc_cipher(void) |
5296 | |
5297 | static const u8 *aes_iv = (u8 *)CEPH_AES_IV; |
5298 | |
5299 | +/* |
5300 | + * Should be used for buffers allocated with ceph_kvmalloc(). |
5301 | + * Currently these are encrypt out-buffer (ceph_buffer) and decrypt |
5302 | + * in-buffer (msg front). |
5303 | + * |
5304 | + * Dispose of @sgt with teardown_sgtable(). |
5305 | + * |
5306 | + * @prealloc_sg is to avoid memory allocation inside sg_alloc_table() |
5307 | + * in cases where a single sg is sufficient. No attempt to reduce the |
5308 | + * number of sgs by squeezing physically contiguous pages together is |
5309 | + * made though, for simplicity. |
5310 | + */ |
5311 | +static int setup_sgtable(struct sg_table *sgt, struct scatterlist *prealloc_sg, |
5312 | + const void *buf, unsigned int buf_len) |
5313 | +{ |
5314 | + struct scatterlist *sg; |
5315 | + const bool is_vmalloc = is_vmalloc_addr(buf); |
5316 | + unsigned int off = offset_in_page(buf); |
5317 | + unsigned int chunk_cnt = 1; |
5318 | + unsigned int chunk_len = PAGE_ALIGN(off + buf_len); |
5319 | + int i; |
5320 | + int ret; |
5321 | + |
5322 | + if (buf_len == 0) { |
5323 | + memset(sgt, 0, sizeof(*sgt)); |
5324 | + return -EINVAL; |
5325 | + } |
5326 | + |
5327 | + if (is_vmalloc) { |
5328 | + chunk_cnt = chunk_len >> PAGE_SHIFT; |
5329 | + chunk_len = PAGE_SIZE; |
5330 | + } |
5331 | + |
5332 | + if (chunk_cnt > 1) { |
5333 | + ret = sg_alloc_table(sgt, chunk_cnt, GFP_NOFS); |
5334 | + if (ret) |
5335 | + return ret; |
5336 | + } else { |
5337 | + WARN_ON(chunk_cnt != 1); |
5338 | + sg_init_table(prealloc_sg, 1); |
5339 | + sgt->sgl = prealloc_sg; |
5340 | + sgt->nents = sgt->orig_nents = 1; |
5341 | + } |
5342 | + |
5343 | + for_each_sg(sgt->sgl, sg, sgt->orig_nents, i) { |
5344 | + struct page *page; |
5345 | + unsigned int len = min(chunk_len - off, buf_len); |
5346 | + |
5347 | + if (is_vmalloc) |
5348 | + page = vmalloc_to_page(buf); |
5349 | + else |
5350 | + page = virt_to_page(buf); |
5351 | + |
5352 | + sg_set_page(sg, page, len, off); |
5353 | + |
5354 | + off = 0; |
5355 | + buf += len; |
5356 | + buf_len -= len; |
5357 | + } |
5358 | + WARN_ON(buf_len != 0); |
5359 | + |
5360 | + return 0; |
5361 | +} |
5362 | + |
5363 | +static void teardown_sgtable(struct sg_table *sgt) |
5364 | +{ |
5365 | + if (sgt->orig_nents > 1) |
5366 | + sg_free_table(sgt); |
5367 | +} |
5368 | + |
5369 | static int ceph_aes_encrypt(const void *key, int key_len, |
5370 | void *dst, size_t *dst_len, |
5371 | const void *src, size_t src_len) |
5372 | { |
5373 | - struct scatterlist sg_in[2], sg_out[1]; |
5374 | + struct scatterlist sg_in[2], prealloc_sg; |
5375 | + struct sg_table sg_out; |
5376 | struct crypto_blkcipher *tfm = ceph_crypto_alloc_cipher(); |
5377 | struct blkcipher_desc desc = { .tfm = tfm, .flags = 0 }; |
5378 | int ret; |
5379 | @@ -110,16 +181,18 @@ static int ceph_aes_encrypt(const void *key, int key_len, |
5380 | |
5381 | *dst_len = src_len + zero_padding; |
5382 | |
5383 | - crypto_blkcipher_setkey((void *)tfm, key, key_len); |
5384 | sg_init_table(sg_in, 2); |
5385 | sg_set_buf(&sg_in[0], src, src_len); |
5386 | sg_set_buf(&sg_in[1], pad, zero_padding); |
5387 | - sg_init_table(sg_out, 1); |
5388 | - sg_set_buf(sg_out, dst, *dst_len); |
5389 | + ret = setup_sgtable(&sg_out, &prealloc_sg, dst, *dst_len); |
5390 | + if (ret) |
5391 | + goto out_tfm; |
5392 | + |
5393 | + crypto_blkcipher_setkey((void *)tfm, key, key_len); |
5394 | iv = crypto_blkcipher_crt(tfm)->iv; |
5395 | ivsize = crypto_blkcipher_ivsize(tfm); |
5396 | - |
5397 | memcpy(iv, aes_iv, ivsize); |
5398 | + |
5399 | /* |
5400 | print_hex_dump(KERN_ERR, "enc key: ", DUMP_PREFIX_NONE, 16, 1, |
5401 | key, key_len, 1); |
5402 | @@ -128,16 +201,22 @@ static int ceph_aes_encrypt(const void *key, int key_len, |
5403 | print_hex_dump(KERN_ERR, "enc pad: ", DUMP_PREFIX_NONE, 16, 1, |
5404 | pad, zero_padding, 1); |
5405 | */ |
5406 | - ret = crypto_blkcipher_encrypt(&desc, sg_out, sg_in, |
5407 | + ret = crypto_blkcipher_encrypt(&desc, sg_out.sgl, sg_in, |
5408 | src_len + zero_padding); |
5409 | - crypto_free_blkcipher(tfm); |
5410 | - if (ret < 0) |
5411 | + if (ret < 0) { |
5412 | pr_err("ceph_aes_crypt failed %d\n", ret); |
5413 | + goto out_sg; |
5414 | + } |
5415 | /* |
5416 | print_hex_dump(KERN_ERR, "enc out: ", DUMP_PREFIX_NONE, 16, 1, |
5417 | dst, *dst_len, 1); |
5418 | */ |
5419 | - return 0; |
5420 | + |
5421 | +out_sg: |
5422 | + teardown_sgtable(&sg_out); |
5423 | +out_tfm: |
5424 | + crypto_free_blkcipher(tfm); |
5425 | + return ret; |
5426 | } |
5427 | |
5428 | static int ceph_aes_encrypt2(const void *key, int key_len, void *dst, |
5429 | @@ -145,7 +224,8 @@ static int ceph_aes_encrypt2(const void *key, int key_len, void *dst, |
5430 | const void *src1, size_t src1_len, |
5431 | const void *src2, size_t src2_len) |
5432 | { |
5433 | - struct scatterlist sg_in[3], sg_out[1]; |
5434 | + struct scatterlist sg_in[3], prealloc_sg; |
5435 | + struct sg_table sg_out; |
5436 | struct crypto_blkcipher *tfm = ceph_crypto_alloc_cipher(); |
5437 | struct blkcipher_desc desc = { .tfm = tfm, .flags = 0 }; |
5438 | int ret; |
5439 | @@ -161,17 +241,19 @@ static int ceph_aes_encrypt2(const void *key, int key_len, void *dst, |
5440 | |
5441 | *dst_len = src1_len + src2_len + zero_padding; |
5442 | |
5443 | - crypto_blkcipher_setkey((void *)tfm, key, key_len); |
5444 | sg_init_table(sg_in, 3); |
5445 | sg_set_buf(&sg_in[0], src1, src1_len); |
5446 | sg_set_buf(&sg_in[1], src2, src2_len); |
5447 | sg_set_buf(&sg_in[2], pad, zero_padding); |
5448 | - sg_init_table(sg_out, 1); |
5449 | - sg_set_buf(sg_out, dst, *dst_len); |
5450 | + ret = setup_sgtable(&sg_out, &prealloc_sg, dst, *dst_len); |
5451 | + if (ret) |
5452 | + goto out_tfm; |
5453 | + |
5454 | + crypto_blkcipher_setkey((void *)tfm, key, key_len); |
5455 | iv = crypto_blkcipher_crt(tfm)->iv; |
5456 | ivsize = crypto_blkcipher_ivsize(tfm); |
5457 | - |
5458 | memcpy(iv, aes_iv, ivsize); |
5459 | + |
5460 | /* |
5461 | print_hex_dump(KERN_ERR, "enc key: ", DUMP_PREFIX_NONE, 16, 1, |
5462 | key, key_len, 1); |
5463 | @@ -182,23 +264,30 @@ static int ceph_aes_encrypt2(const void *key, int key_len, void *dst, |
5464 | print_hex_dump(KERN_ERR, "enc pad: ", DUMP_PREFIX_NONE, 16, 1, |
5465 | pad, zero_padding, 1); |
5466 | */ |
5467 | - ret = crypto_blkcipher_encrypt(&desc, sg_out, sg_in, |
5468 | + ret = crypto_blkcipher_encrypt(&desc, sg_out.sgl, sg_in, |
5469 | src1_len + src2_len + zero_padding); |
5470 | - crypto_free_blkcipher(tfm); |
5471 | - if (ret < 0) |
5472 | + if (ret < 0) { |
5473 | pr_err("ceph_aes_crypt2 failed %d\n", ret); |
5474 | + goto out_sg; |
5475 | + } |
5476 | /* |
5477 | print_hex_dump(KERN_ERR, "enc out: ", DUMP_PREFIX_NONE, 16, 1, |
5478 | dst, *dst_len, 1); |
5479 | */ |
5480 | - return 0; |
5481 | + |
5482 | +out_sg: |
5483 | + teardown_sgtable(&sg_out); |
5484 | +out_tfm: |
5485 | + crypto_free_blkcipher(tfm); |
5486 | + return ret; |
5487 | } |
5488 | |
5489 | static int ceph_aes_decrypt(const void *key, int key_len, |
5490 | void *dst, size_t *dst_len, |
5491 | const void *src, size_t src_len) |
5492 | { |
5493 | - struct scatterlist sg_in[1], sg_out[2]; |
5494 | + struct sg_table sg_in; |
5495 | + struct scatterlist sg_out[2], prealloc_sg; |
5496 | struct crypto_blkcipher *tfm = ceph_crypto_alloc_cipher(); |
5497 | struct blkcipher_desc desc = { .tfm = tfm }; |
5498 | char pad[16]; |
5499 | @@ -210,16 +299,16 @@ static int ceph_aes_decrypt(const void *key, int key_len, |
5500 | if (IS_ERR(tfm)) |
5501 | return PTR_ERR(tfm); |
5502 | |
5503 | - crypto_blkcipher_setkey((void *)tfm, key, key_len); |
5504 | - sg_init_table(sg_in, 1); |
5505 | sg_init_table(sg_out, 2); |
5506 | - sg_set_buf(sg_in, src, src_len); |
5507 | sg_set_buf(&sg_out[0], dst, *dst_len); |
5508 | sg_set_buf(&sg_out[1], pad, sizeof(pad)); |
5509 | + ret = setup_sgtable(&sg_in, &prealloc_sg, src, src_len); |
5510 | + if (ret) |
5511 | + goto out_tfm; |
5512 | |
5513 | + crypto_blkcipher_setkey((void *)tfm, key, key_len); |
5514 | iv = crypto_blkcipher_crt(tfm)->iv; |
5515 | ivsize = crypto_blkcipher_ivsize(tfm); |
5516 | - |
5517 | memcpy(iv, aes_iv, ivsize); |
5518 | |
5519 | /* |
5520 | @@ -228,12 +317,10 @@ static int ceph_aes_decrypt(const void *key, int key_len, |
5521 | print_hex_dump(KERN_ERR, "dec in: ", DUMP_PREFIX_NONE, 16, 1, |
5522 | src, src_len, 1); |
5523 | */ |
5524 | - |
5525 | - ret = crypto_blkcipher_decrypt(&desc, sg_out, sg_in, src_len); |
5526 | - crypto_free_blkcipher(tfm); |
5527 | + ret = crypto_blkcipher_decrypt(&desc, sg_out, sg_in.sgl, src_len); |
5528 | if (ret < 0) { |
5529 | pr_err("ceph_aes_decrypt failed %d\n", ret); |
5530 | - return ret; |
5531 | + goto out_sg; |
5532 | } |
5533 | |
5534 | if (src_len <= *dst_len) |
5535 | @@ -251,7 +338,12 @@ static int ceph_aes_decrypt(const void *key, int key_len, |
5536 | print_hex_dump(KERN_ERR, "dec out: ", DUMP_PREFIX_NONE, 16, 1, |
5537 | dst, *dst_len, 1); |
5538 | */ |
5539 | - return 0; |
5540 | + |
5541 | +out_sg: |
5542 | + teardown_sgtable(&sg_in); |
5543 | +out_tfm: |
5544 | + crypto_free_blkcipher(tfm); |
5545 | + return ret; |
5546 | } |
5547 | |
5548 | static int ceph_aes_decrypt2(const void *key, int key_len, |
5549 | @@ -259,7 +351,8 @@ static int ceph_aes_decrypt2(const void *key, int key_len, |
5550 | void *dst2, size_t *dst2_len, |
5551 | const void *src, size_t src_len) |
5552 | { |
5553 | - struct scatterlist sg_in[1], sg_out[3]; |
5554 | + struct sg_table sg_in; |
5555 | + struct scatterlist sg_out[3], prealloc_sg; |
5556 | struct crypto_blkcipher *tfm = ceph_crypto_alloc_cipher(); |
5557 | struct blkcipher_desc desc = { .tfm = tfm }; |
5558 | char pad[16]; |
5559 | @@ -271,17 +364,17 @@ static int ceph_aes_decrypt2(const void *key, int key_len, |
5560 | if (IS_ERR(tfm)) |
5561 | return PTR_ERR(tfm); |
5562 | |
5563 | - sg_init_table(sg_in, 1); |
5564 | - sg_set_buf(sg_in, src, src_len); |
5565 | sg_init_table(sg_out, 3); |
5566 | sg_set_buf(&sg_out[0], dst1, *dst1_len); |
5567 | sg_set_buf(&sg_out[1], dst2, *dst2_len); |
5568 | sg_set_buf(&sg_out[2], pad, sizeof(pad)); |
5569 | + ret = setup_sgtable(&sg_in, &prealloc_sg, src, src_len); |
5570 | + if (ret) |
5571 | + goto out_tfm; |
5572 | |
5573 | crypto_blkcipher_setkey((void *)tfm, key, key_len); |
5574 | iv = crypto_blkcipher_crt(tfm)->iv; |
5575 | ivsize = crypto_blkcipher_ivsize(tfm); |
5576 | - |
5577 | memcpy(iv, aes_iv, ivsize); |
5578 | |
5579 | /* |
5580 | @@ -290,12 +383,10 @@ static int ceph_aes_decrypt2(const void *key, int key_len, |
5581 | print_hex_dump(KERN_ERR, "dec in: ", DUMP_PREFIX_NONE, 16, 1, |
5582 | src, src_len, 1); |
5583 | */ |
5584 | - |
5585 | - ret = crypto_blkcipher_decrypt(&desc, sg_out, sg_in, src_len); |
5586 | - crypto_free_blkcipher(tfm); |
5587 | + ret = crypto_blkcipher_decrypt(&desc, sg_out, sg_in.sgl, src_len); |
5588 | if (ret < 0) { |
5589 | pr_err("ceph_aes_decrypt failed %d\n", ret); |
5590 | - return ret; |
5591 | + goto out_sg; |
5592 | } |
5593 | |
5594 | if (src_len <= *dst1_len) |
5595 | @@ -325,7 +416,11 @@ static int ceph_aes_decrypt2(const void *key, int key_len, |
5596 | dst2, *dst2_len, 1); |
5597 | */ |
5598 | |
5599 | - return 0; |
5600 | +out_sg: |
5601 | + teardown_sgtable(&sg_in); |
5602 | +out_tfm: |
5603 | + crypto_free_blkcipher(tfm); |
5604 | + return ret; |
5605 | } |
5606 | |
5607 | |
5608 | diff --git a/net/ipv4/inet_fragment.c b/net/ipv4/inet_fragment.c |
5609 | index 9eb89f3f0ee4..19419b60cb37 100644 |
5610 | --- a/net/ipv4/inet_fragment.c |
5611 | +++ b/net/ipv4/inet_fragment.c |
5612 | @@ -146,7 +146,6 @@ evict_again: |
5613 | atomic_inc(&fq->refcnt); |
5614 | spin_unlock(&hb->chain_lock); |
5615 | del_timer_sync(&fq->timer); |
5616 | - WARN_ON(atomic_read(&fq->refcnt) != 1); |
5617 | inet_frag_put(fq, f); |
5618 | goto evict_again; |
5619 | } |
5620 | @@ -285,7 +284,8 @@ static inline void fq_unlink(struct inet_frag_queue *fq, struct inet_frags *f) |
5621 | struct inet_frag_bucket *hb; |
5622 | |
5623 | hb = get_frag_bucket_locked(fq, f); |
5624 | - hlist_del(&fq->list); |
5625 | + if (!(fq->flags & INET_FRAG_EVICTED)) |
5626 | + hlist_del(&fq->list); |
5627 | spin_unlock(&hb->chain_lock); |
5628 | } |
5629 | |
5630 | diff --git a/net/ipv4/ip_sockglue.c b/net/ipv4/ip_sockglue.c |
5631 | index 5cb830c78990..2407e5db84ff 100644 |
5632 | --- a/net/ipv4/ip_sockglue.c |
5633 | +++ b/net/ipv4/ip_sockglue.c |
5634 | @@ -195,7 +195,7 @@ int ip_cmsg_send(struct net *net, struct msghdr *msg, struct ipcm_cookie *ipc, |
5635 | for (cmsg = CMSG_FIRSTHDR(msg); cmsg; cmsg = CMSG_NXTHDR(msg, cmsg)) { |
5636 | if (!CMSG_OK(msg, cmsg)) |
5637 | return -EINVAL; |
5638 | -#if defined(CONFIG_IPV6) |
5639 | +#if IS_ENABLED(CONFIG_IPV6) |
5640 | if (allow_ipv6 && |
5641 | cmsg->cmsg_level == SOL_IPV6 && |
5642 | cmsg->cmsg_type == IPV6_PKTINFO) { |
5643 | diff --git a/net/ipv6/ip6_gre.c b/net/ipv6/ip6_gre.c |
5644 | index 97299d76c1b0..cacb493a133d 100644 |
5645 | --- a/net/ipv6/ip6_gre.c |
5646 | +++ b/net/ipv6/ip6_gre.c |
5647 | @@ -957,8 +957,6 @@ static void ip6gre_tnl_link_config(struct ip6_tnl *t, int set_mtu) |
5648 | else |
5649 | dev->flags &= ~IFF_POINTOPOINT; |
5650 | |
5651 | - dev->iflink = p->link; |
5652 | - |
5653 | /* Precalculate GRE options length */ |
5654 | if (t->parms.o_flags&(GRE_CSUM|GRE_KEY|GRE_SEQ)) { |
5655 | if (t->parms.o_flags&GRE_CSUM) |
5656 | @@ -1268,6 +1266,7 @@ static int ip6gre_tunnel_init(struct net_device *dev) |
5657 | u64_stats_init(&ip6gre_tunnel_stats->syncp); |
5658 | } |
5659 | |
5660 | + dev->iflink = tunnel->parms.link; |
5661 | |
5662 | return 0; |
5663 | } |
5664 | @@ -1477,6 +1476,8 @@ static int ip6gre_tap_init(struct net_device *dev) |
5665 | if (!dev->tstats) |
5666 | return -ENOMEM; |
5667 | |
5668 | + dev->iflink = tunnel->parms.link; |
5669 | + |
5670 | return 0; |
5671 | } |
5672 | |
5673 | diff --git a/net/ipv6/ip6_tunnel.c b/net/ipv6/ip6_tunnel.c |
5674 | index 69a84b464009..d2eeb3bf8fd8 100644 |
5675 | --- a/net/ipv6/ip6_tunnel.c |
5676 | +++ b/net/ipv6/ip6_tunnel.c |
5677 | @@ -272,9 +272,6 @@ static int ip6_tnl_create2(struct net_device *dev) |
5678 | int err; |
5679 | |
5680 | t = netdev_priv(dev); |
5681 | - err = ip6_tnl_dev_init(dev); |
5682 | - if (err < 0) |
5683 | - goto out; |
5684 | |
5685 | err = register_netdevice(dev); |
5686 | if (err < 0) |
5687 | @@ -1462,6 +1459,7 @@ ip6_tnl_change_mtu(struct net_device *dev, int new_mtu) |
5688 | |
5689 | |
5690 | static const struct net_device_ops ip6_tnl_netdev_ops = { |
5691 | + .ndo_init = ip6_tnl_dev_init, |
5692 | .ndo_uninit = ip6_tnl_dev_uninit, |
5693 | .ndo_start_xmit = ip6_tnl_xmit, |
5694 | .ndo_do_ioctl = ip6_tnl_ioctl, |
5695 | @@ -1546,16 +1544,10 @@ static int __net_init ip6_fb_tnl_dev_init(struct net_device *dev) |
5696 | struct ip6_tnl *t = netdev_priv(dev); |
5697 | struct net *net = dev_net(dev); |
5698 | struct ip6_tnl_net *ip6n = net_generic(net, ip6_tnl_net_id); |
5699 | - int err = ip6_tnl_dev_init_gen(dev); |
5700 | - |
5701 | - if (err) |
5702 | - return err; |
5703 | |
5704 | t->parms.proto = IPPROTO_IPV6; |
5705 | dev_hold(dev); |
5706 | |
5707 | - ip6_tnl_link_config(t); |
5708 | - |
5709 | rcu_assign_pointer(ip6n->tnls_wc[0], t); |
5710 | return 0; |
5711 | } |
5712 | diff --git a/net/ipv6/ip6_vti.c b/net/ipv6/ip6_vti.c |
5713 | index 5833a2244467..99c9487f236a 100644 |
5714 | --- a/net/ipv6/ip6_vti.c |
5715 | +++ b/net/ipv6/ip6_vti.c |
5716 | @@ -172,10 +172,6 @@ static int vti6_tnl_create2(struct net_device *dev) |
5717 | struct vti6_net *ip6n = net_generic(net, vti6_net_id); |
5718 | int err; |
5719 | |
5720 | - err = vti6_dev_init(dev); |
5721 | - if (err < 0) |
5722 | - goto out; |
5723 | - |
5724 | err = register_netdevice(dev); |
5725 | if (err < 0) |
5726 | goto out; |
5727 | @@ -783,6 +779,7 @@ static int vti6_change_mtu(struct net_device *dev, int new_mtu) |
5728 | } |
5729 | |
5730 | static const struct net_device_ops vti6_netdev_ops = { |
5731 | + .ndo_init = vti6_dev_init, |
5732 | .ndo_uninit = vti6_dev_uninit, |
5733 | .ndo_start_xmit = vti6_tnl_xmit, |
5734 | .ndo_do_ioctl = vti6_ioctl, |
5735 | @@ -852,16 +849,10 @@ static int __net_init vti6_fb_tnl_dev_init(struct net_device *dev) |
5736 | struct ip6_tnl *t = netdev_priv(dev); |
5737 | struct net *net = dev_net(dev); |
5738 | struct vti6_net *ip6n = net_generic(net, vti6_net_id); |
5739 | - int err = vti6_dev_init_gen(dev); |
5740 | - |
5741 | - if (err) |
5742 | - return err; |
5743 | |
5744 | t->parms.proto = IPPROTO_IPV6; |
5745 | dev_hold(dev); |
5746 | |
5747 | - vti6_link_config(t); |
5748 | - |
5749 | rcu_assign_pointer(ip6n->tnls_wc[0], t); |
5750 | return 0; |
5751 | } |
5752 | diff --git a/net/ipv6/sit.c b/net/ipv6/sit.c |
5753 | index 6163f851dc01..ca1c7c4ccc88 100644 |
5754 | --- a/net/ipv6/sit.c |
5755 | +++ b/net/ipv6/sit.c |
5756 | @@ -195,10 +195,8 @@ static int ipip6_tunnel_create(struct net_device *dev) |
5757 | struct sit_net *sitn = net_generic(net, sit_net_id); |
5758 | int err; |
5759 | |
5760 | - err = ipip6_tunnel_init(dev); |
5761 | - if (err < 0) |
5762 | - goto out; |
5763 | - ipip6_tunnel_clone_6rd(dev, sitn); |
5764 | + memcpy(dev->dev_addr, &t->parms.iph.saddr, 4); |
5765 | + memcpy(dev->broadcast, &t->parms.iph.daddr, 4); |
5766 | |
5767 | if ((__force u16)t->parms.i_flags & SIT_ISATAP) |
5768 | dev->priv_flags |= IFF_ISATAP; |
5769 | @@ -207,7 +205,8 @@ static int ipip6_tunnel_create(struct net_device *dev) |
5770 | if (err < 0) |
5771 | goto out; |
5772 | |
5773 | - strcpy(t->parms.name, dev->name); |
5774 | + ipip6_tunnel_clone_6rd(dev, sitn); |
5775 | + |
5776 | dev->rtnl_link_ops = &sit_link_ops; |
5777 | |
5778 | dev_hold(dev); |
5779 | @@ -1314,6 +1313,7 @@ static int ipip6_tunnel_change_mtu(struct net_device *dev, int new_mtu) |
5780 | } |
5781 | |
5782 | static const struct net_device_ops ipip6_netdev_ops = { |
5783 | + .ndo_init = ipip6_tunnel_init, |
5784 | .ndo_uninit = ipip6_tunnel_uninit, |
5785 | .ndo_start_xmit = sit_tunnel_xmit, |
5786 | .ndo_do_ioctl = ipip6_tunnel_ioctl, |
5787 | @@ -1359,9 +1359,7 @@ static int ipip6_tunnel_init(struct net_device *dev) |
5788 | |
5789 | tunnel->dev = dev; |
5790 | tunnel->net = dev_net(dev); |
5791 | - |
5792 | - memcpy(dev->dev_addr, &tunnel->parms.iph.saddr, 4); |
5793 | - memcpy(dev->broadcast, &tunnel->parms.iph.daddr, 4); |
5794 | + strcpy(tunnel->parms.name, dev->name); |
5795 | |
5796 | ipip6_tunnel_bind_dev(dev); |
5797 | dev->tstats = netdev_alloc_pcpu_stats(struct pcpu_sw_netstats); |
5798 | @@ -1386,7 +1384,6 @@ static int __net_init ipip6_fb_tunnel_init(struct net_device *dev) |
5799 | |
5800 | tunnel->dev = dev; |
5801 | tunnel->net = dev_net(dev); |
5802 | - strcpy(tunnel->parms.name, dev->name); |
5803 | |
5804 | iph->version = 4; |
5805 | iph->protocol = IPPROTO_IPV6; |
5806 | diff --git a/net/mac80211/ibss.c b/net/mac80211/ibss.c |
5807 | index 9713dc54ea4b..af526e9cfc1a 100644 |
5808 | --- a/net/mac80211/ibss.c |
5809 | +++ b/net/mac80211/ibss.c |
5810 | @@ -804,7 +804,7 @@ ieee80211_ibss_process_chanswitch(struct ieee80211_sub_if_data *sdata, |
5811 | |
5812 | memset(¶ms, 0, sizeof(params)); |
5813 | memset(&csa_ie, 0, sizeof(csa_ie)); |
5814 | - err = ieee80211_parse_ch_switch_ie(sdata, elems, beacon, |
5815 | + err = ieee80211_parse_ch_switch_ie(sdata, elems, |
5816 | ifibss->chandef.chan->band, |
5817 | sta_flags, ifibss->bssid, &csa_ie); |
5818 | /* can't switch to destination channel, fail */ |
5819 | diff --git a/net/mac80211/ieee80211_i.h b/net/mac80211/ieee80211_i.h |
5820 | index ef7a089ac546..5d102b5c6e81 100644 |
5821 | --- a/net/mac80211/ieee80211_i.h |
5822 | +++ b/net/mac80211/ieee80211_i.h |
5823 | @@ -1639,7 +1639,6 @@ void ieee80211_process_measurement_req(struct ieee80211_sub_if_data *sdata, |
5824 | * ieee80211_parse_ch_switch_ie - parses channel switch IEs |
5825 | * @sdata: the sdata of the interface which has received the frame |
5826 | * @elems: parsed 802.11 elements received with the frame |
5827 | - * @beacon: indicates if the frame was a beacon or probe response |
5828 | * @current_band: indicates the current band |
5829 | * @sta_flags: contains information about own capabilities and restrictions |
5830 | * to decide which channel switch announcements can be accepted. Only the |
5831 | @@ -1653,7 +1652,7 @@ void ieee80211_process_measurement_req(struct ieee80211_sub_if_data *sdata, |
5832 | * Return: 0 on success, <0 on error and >0 if there is nothing to parse. |
5833 | */ |
5834 | int ieee80211_parse_ch_switch_ie(struct ieee80211_sub_if_data *sdata, |
5835 | - struct ieee802_11_elems *elems, bool beacon, |
5836 | + struct ieee802_11_elems *elems, |
5837 | enum ieee80211_band current_band, |
5838 | u32 sta_flags, u8 *bssid, |
5839 | struct ieee80211_csa_ie *csa_ie); |
5840 | diff --git a/net/mac80211/iface.c b/net/mac80211/iface.c |
5841 | index f75e5f132c5a..3538e5e47b0e 100644 |
5842 | --- a/net/mac80211/iface.c |
5843 | +++ b/net/mac80211/iface.c |
5844 | @@ -765,10 +765,12 @@ static void ieee80211_do_stop(struct ieee80211_sub_if_data *sdata, |
5845 | int i, flushed; |
5846 | struct ps_data *ps; |
5847 | struct cfg80211_chan_def chandef; |
5848 | + bool cancel_scan; |
5849 | |
5850 | clear_bit(SDATA_STATE_RUNNING, &sdata->state); |
5851 | |
5852 | - if (rcu_access_pointer(local->scan_sdata) == sdata) |
5853 | + cancel_scan = rcu_access_pointer(local->scan_sdata) == sdata; |
5854 | + if (cancel_scan) |
5855 | ieee80211_scan_cancel(local); |
5856 | |
5857 | /* |
5858 | @@ -990,6 +992,9 @@ static void ieee80211_do_stop(struct ieee80211_sub_if_data *sdata, |
5859 | |
5860 | ieee80211_recalc_ps(local, -1); |
5861 | |
5862 | + if (cancel_scan) |
5863 | + flush_delayed_work(&local->scan_work); |
5864 | + |
5865 | if (local->open_count == 0) { |
5866 | ieee80211_stop_device(local); |
5867 | |
5868 | diff --git a/net/mac80211/mesh.c b/net/mac80211/mesh.c |
5869 | index e9f99c1e3fad..0c8b2a77d312 100644 |
5870 | --- a/net/mac80211/mesh.c |
5871 | +++ b/net/mac80211/mesh.c |
5872 | @@ -874,7 +874,7 @@ ieee80211_mesh_process_chnswitch(struct ieee80211_sub_if_data *sdata, |
5873 | |
5874 | memset(¶ms, 0, sizeof(params)); |
5875 | memset(&csa_ie, 0, sizeof(csa_ie)); |
5876 | - err = ieee80211_parse_ch_switch_ie(sdata, elems, beacon, band, |
5877 | + err = ieee80211_parse_ch_switch_ie(sdata, elems, band, |
5878 | sta_flags, sdata->vif.addr, |
5879 | &csa_ie); |
5880 | if (err < 0) |
5881 | diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c |
5882 | index b82a12a9f0f1..86d44db5da06 100644 |
5883 | --- a/net/mac80211/mlme.c |
5884 | +++ b/net/mac80211/mlme.c |
5885 | @@ -1058,7 +1058,7 @@ ieee80211_sta_process_chanswitch(struct ieee80211_sub_if_data *sdata, |
5886 | |
5887 | current_band = cbss->channel->band; |
5888 | memset(&csa_ie, 0, sizeof(csa_ie)); |
5889 | - res = ieee80211_parse_ch_switch_ie(sdata, elems, beacon, current_band, |
5890 | + res = ieee80211_parse_ch_switch_ie(sdata, elems, current_band, |
5891 | ifmgd->flags, |
5892 | ifmgd->associated->bssid, &csa_ie); |
5893 | if (res < 0) |
5894 | @@ -1154,7 +1154,8 @@ ieee80211_sta_process_chanswitch(struct ieee80211_sub_if_data *sdata, |
5895 | ieee80211_queue_work(&local->hw, &ifmgd->chswitch_work); |
5896 | else |
5897 | mod_timer(&ifmgd->chswitch_timer, |
5898 | - TU_TO_EXP_TIME(csa_ie.count * cbss->beacon_interval)); |
5899 | + TU_TO_EXP_TIME((csa_ie.count - 1) * |
5900 | + cbss->beacon_interval)); |
5901 | } |
5902 | |
5903 | static u32 ieee80211_handle_pwr_constr(struct ieee80211_sub_if_data *sdata, |
5904 | diff --git a/net/mac80211/rx.c b/net/mac80211/rx.c |
5905 | index bd2c9b22c945..7e77410ca799 100644 |
5906 | --- a/net/mac80211/rx.c |
5907 | +++ b/net/mac80211/rx.c |
5908 | @@ -1667,11 +1667,14 @@ ieee80211_rx_h_defragment(struct ieee80211_rx_data *rx) |
5909 | sc = le16_to_cpu(hdr->seq_ctrl); |
5910 | frag = sc & IEEE80211_SCTL_FRAG; |
5911 | |
5912 | - if (likely((!ieee80211_has_morefrags(fc) && frag == 0) || |
5913 | - is_multicast_ether_addr(hdr->addr1))) { |
5914 | - /* not fragmented */ |
5915 | + if (likely(!ieee80211_has_morefrags(fc) && frag == 0)) |
5916 | + goto out; |
5917 | + |
5918 | + if (is_multicast_ether_addr(hdr->addr1)) { |
5919 | + rx->local->dot11MulticastReceivedFrameCount++; |
5920 | goto out; |
5921 | } |
5922 | + |
5923 | I802_DEBUG_INC(rx->local->rx_handlers_fragments); |
5924 | |
5925 | if (skb_linearize(rx->skb)) |
5926 | @@ -1764,10 +1767,7 @@ ieee80211_rx_h_defragment(struct ieee80211_rx_data *rx) |
5927 | out: |
5928 | if (rx->sta) |
5929 | rx->sta->rx_packets++; |
5930 | - if (is_multicast_ether_addr(hdr->addr1)) |
5931 | - rx->local->dot11MulticastReceivedFrameCount++; |
5932 | - else |
5933 | - ieee80211_led_rx(rx->local); |
5934 | + ieee80211_led_rx(rx->local); |
5935 | return RX_CONTINUE; |
5936 | } |
5937 | |
5938 | diff --git a/net/mac80211/spectmgmt.c b/net/mac80211/spectmgmt.c |
5939 | index 6ab009070084..efeba56c913b 100644 |
5940 | --- a/net/mac80211/spectmgmt.c |
5941 | +++ b/net/mac80211/spectmgmt.c |
5942 | @@ -22,7 +22,7 @@ |
5943 | #include "wme.h" |
5944 | |
5945 | int ieee80211_parse_ch_switch_ie(struct ieee80211_sub_if_data *sdata, |
5946 | - struct ieee802_11_elems *elems, bool beacon, |
5947 | + struct ieee802_11_elems *elems, |
5948 | enum ieee80211_band current_band, |
5949 | u32 sta_flags, u8 *bssid, |
5950 | struct ieee80211_csa_ie *csa_ie) |
5951 | @@ -91,19 +91,13 @@ int ieee80211_parse_ch_switch_ie(struct ieee80211_sub_if_data *sdata, |
5952 | return -EINVAL; |
5953 | } |
5954 | |
5955 | - if (!beacon && sec_chan_offs) { |
5956 | + if (sec_chan_offs) { |
5957 | secondary_channel_offset = sec_chan_offs->sec_chan_offs; |
5958 | - } else if (beacon && ht_oper) { |
5959 | - secondary_channel_offset = |
5960 | - ht_oper->ht_param & IEEE80211_HT_PARAM_CHA_SEC_OFFSET; |
5961 | } else if (!(sta_flags & IEEE80211_STA_DISABLE_HT)) { |
5962 | - /* If it's not a beacon, HT is enabled and the IE not present, |
5963 | - * it's 20 MHz, 802.11-2012 8.5.2.6: |
5964 | - * This element [the Secondary Channel Offset Element] is |
5965 | - * present when switching to a 40 MHz channel. It may be |
5966 | - * present when switching to a 20 MHz channel (in which |
5967 | - * case the secondary channel offset is set to SCN). |
5968 | - */ |
5969 | + /* If the secondary channel offset IE is not present, |
5970 | + * we can't know what's the post-CSA offset, so the |
5971 | + * best we can do is use 20MHz. |
5972 | + */ |
5973 | secondary_channel_offset = IEEE80211_HT_PARAM_CHA_SEC_NONE; |
5974 | } |
5975 | |
5976 | diff --git a/net/netfilter/ipset/ip_set_core.c b/net/netfilter/ipset/ip_set_core.c |
5977 | index ec8114fae50b..6582dce828b5 100644 |
5978 | --- a/net/netfilter/ipset/ip_set_core.c |
5979 | +++ b/net/netfilter/ipset/ip_set_core.c |
5980 | @@ -635,7 +635,7 @@ ip_set_nfnl_get_byindex(struct net *net, ip_set_id_t index) |
5981 | struct ip_set *set; |
5982 | struct ip_set_net *inst = ip_set_pernet(net); |
5983 | |
5984 | - if (index > inst->ip_set_max) |
5985 | + if (index >= inst->ip_set_max) |
5986 | return IPSET_INVALID_ID; |
5987 | |
5988 | nfnl_lock(NFNL_SUBSYS_IPSET); |
5989 | diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c |
5990 | index deeb95fb7028..c62c08e0998d 100644 |
5991 | --- a/net/netfilter/nf_tables_api.c |
5992 | +++ b/net/netfilter/nf_tables_api.c |
5993 | @@ -1102,10 +1102,10 @@ static int nf_tables_newchain(struct sock *nlsk, struct sk_buff *skb, |
5994 | basechain->stats = stats; |
5995 | } else { |
5996 | stats = netdev_alloc_pcpu_stats(struct nft_stats); |
5997 | - if (IS_ERR(stats)) { |
5998 | + if (stats == NULL) { |
5999 | module_put(type->owner); |
6000 | kfree(basechain); |
6001 | - return PTR_ERR(stats); |
6002 | + return -ENOMEM; |
6003 | } |
6004 | rcu_assign_pointer(basechain->stats, stats); |
6005 | } |
6006 | diff --git a/net/netfilter/nfnetlink_log.c b/net/netfilter/nfnetlink_log.c |
6007 | index a11c5ff2f720..32507355cc5d 100644 |
6008 | --- a/net/netfilter/nfnetlink_log.c |
6009 | +++ b/net/netfilter/nfnetlink_log.c |
6010 | @@ -43,7 +43,8 @@ |
6011 | #define NFULNL_NLBUFSIZ_DEFAULT NLMSG_GOODSIZE |
6012 | #define NFULNL_TIMEOUT_DEFAULT 100 /* every second */ |
6013 | #define NFULNL_QTHRESH_DEFAULT 100 /* 100 packets */ |
6014 | -#define NFULNL_COPY_RANGE_MAX 0xFFFF /* max packet size is limited by 16-bit struct nfattr nfa_len field */ |
6015 | +/* max packet size is limited by 16-bit struct nfattr nfa_len field */ |
6016 | +#define NFULNL_COPY_RANGE_MAX (0xFFFF - NLA_HDRLEN) |
6017 | |
6018 | #define PRINTR(x, args...) do { if (net_ratelimit()) \ |
6019 | printk(x, ## args); } while (0); |
6020 | @@ -252,6 +253,8 @@ nfulnl_set_mode(struct nfulnl_instance *inst, u_int8_t mode, |
6021 | |
6022 | case NFULNL_COPY_PACKET: |
6023 | inst->copy_mode = mode; |
6024 | + if (range == 0) |
6025 | + range = NFULNL_COPY_RANGE_MAX; |
6026 | inst->copy_range = min_t(unsigned int, |
6027 | range, NFULNL_COPY_RANGE_MAX); |
6028 | break; |
6029 | @@ -343,26 +346,25 @@ nfulnl_alloc_skb(struct net *net, u32 peer_portid, unsigned int inst_size, |
6030 | return skb; |
6031 | } |
6032 | |
6033 | -static int |
6034 | +static void |
6035 | __nfulnl_send(struct nfulnl_instance *inst) |
6036 | { |
6037 | - int status = -1; |
6038 | - |
6039 | if (inst->qlen > 1) { |
6040 | struct nlmsghdr *nlh = nlmsg_put(inst->skb, 0, 0, |
6041 | NLMSG_DONE, |
6042 | sizeof(struct nfgenmsg), |
6043 | 0); |
6044 | - if (!nlh) |
6045 | + if (WARN_ONCE(!nlh, "bad nlskb size: %u, tailroom %d\n", |
6046 | + inst->skb->len, skb_tailroom(inst->skb))) { |
6047 | + kfree_skb(inst->skb); |
6048 | goto out; |
6049 | + } |
6050 | } |
6051 | - status = nfnetlink_unicast(inst->skb, inst->net, inst->peer_portid, |
6052 | - MSG_DONTWAIT); |
6053 | - |
6054 | + nfnetlink_unicast(inst->skb, inst->net, inst->peer_portid, |
6055 | + MSG_DONTWAIT); |
6056 | +out: |
6057 | inst->qlen = 0; |
6058 | inst->skb = NULL; |
6059 | -out: |
6060 | - return status; |
6061 | } |
6062 | |
6063 | static void |
6064 | @@ -649,7 +651,8 @@ nfulnl_log_packet(struct net *net, |
6065 | + nla_total_size(sizeof(u_int32_t)) /* gid */ |
6066 | + nla_total_size(plen) /* prefix */ |
6067 | + nla_total_size(sizeof(struct nfulnl_msg_packet_hw)) |
6068 | - + nla_total_size(sizeof(struct nfulnl_msg_packet_timestamp)); |
6069 | + + nla_total_size(sizeof(struct nfulnl_msg_packet_timestamp)) |
6070 | + + nla_total_size(sizeof(struct nfgenmsg)); /* NLMSG_DONE */ |
6071 | |
6072 | if (in && skb_mac_header_was_set(skb)) { |
6073 | size += nla_total_size(skb->dev->hard_header_len) |
6074 | @@ -678,8 +681,7 @@ nfulnl_log_packet(struct net *net, |
6075 | break; |
6076 | |
6077 | case NFULNL_COPY_PACKET: |
6078 | - if (inst->copy_range == 0 |
6079 | - || inst->copy_range > skb->len) |
6080 | + if (inst->copy_range > skb->len) |
6081 | data_len = skb->len; |
6082 | else |
6083 | data_len = inst->copy_range; |
6084 | @@ -692,8 +694,7 @@ nfulnl_log_packet(struct net *net, |
6085 | goto unlock_and_release; |
6086 | } |
6087 | |
6088 | - if (inst->skb && |
6089 | - size > skb_tailroom(inst->skb) - sizeof(struct nfgenmsg)) { |
6090 | + if (inst->skb && size > skb_tailroom(inst->skb)) { |
6091 | /* either the queue len is too high or we don't have |
6092 | * enough room in the skb left. flush to userspace. */ |
6093 | __nfulnl_flush(inst); |
6094 | diff --git a/net/netfilter/nft_compat.c b/net/netfilter/nft_compat.c |
6095 | index 1840989092ed..5b5ab9ec1a90 100644 |
6096 | --- a/net/netfilter/nft_compat.c |
6097 | +++ b/net/netfilter/nft_compat.c |
6098 | @@ -696,7 +696,7 @@ nft_target_select_ops(const struct nft_ctx *ctx, |
6099 | family = ctx->afi->family; |
6100 | |
6101 | /* Re-use the existing target if it's already loaded. */ |
6102 | - list_for_each_entry(nft_target, &nft_match_list, head) { |
6103 | + list_for_each_entry(nft_target, &nft_target_list, head) { |
6104 | struct xt_target *target = nft_target->ops.data; |
6105 | |
6106 | if (strcmp(target->name, tg_name) == 0 && |
6107 | diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c |
6108 | index f1de72de273e..0007b8180397 100644 |
6109 | --- a/net/netlink/af_netlink.c |
6110 | +++ b/net/netlink/af_netlink.c |
6111 | @@ -1440,7 +1440,7 @@ static void netlink_unbind(int group, long unsigned int groups, |
6112 | return; |
6113 | |
6114 | for (undo = 0; undo < group; undo++) |
6115 | - if (test_bit(group, &groups)) |
6116 | + if (test_bit(undo, &groups)) |
6117 | nlk->netlink_unbind(undo); |
6118 | } |
6119 | |
6120 | @@ -1492,7 +1492,7 @@ static int netlink_bind(struct socket *sock, struct sockaddr *addr, |
6121 | netlink_insert(sk, net, nladdr->nl_pid) : |
6122 | netlink_autobind(sock); |
6123 | if (err) { |
6124 | - netlink_unbind(nlk->ngroups - 1, groups, nlk); |
6125 | + netlink_unbind(nlk->ngroups, groups, nlk); |
6126 | return err; |
6127 | } |
6128 | } |
6129 | @@ -2509,6 +2509,7 @@ __netlink_kernel_create(struct net *net, int unit, struct module *module, |
6130 | nl_table[unit].module = module; |
6131 | if (cfg) { |
6132 | nl_table[unit].bind = cfg->bind; |
6133 | + nl_table[unit].unbind = cfg->unbind; |
6134 | nl_table[unit].flags = cfg->flags; |
6135 | if (cfg->compare) |
6136 | nl_table[unit].compare = cfg->compare; |
6137 | diff --git a/net/sctp/associola.c b/net/sctp/associola.c |
6138 | index a88b8524846e..f791edd64d6c 100644 |
6139 | --- a/net/sctp/associola.c |
6140 | +++ b/net/sctp/associola.c |
6141 | @@ -1668,6 +1668,8 @@ struct sctp_chunk *sctp_assoc_lookup_asconf_ack( |
6142 | * ack chunk whose serial number matches that of the request. |
6143 | */ |
6144 | list_for_each_entry(ack, &asoc->asconf_ack_list, transmitted_list) { |
6145 | + if (sctp_chunk_pending(ack)) |
6146 | + continue; |
6147 | if (ack->subh.addip_hdr->serial == serial) { |
6148 | sctp_chunk_hold(ack); |
6149 | return ack; |
6150 | diff --git a/net/sctp/auth.c b/net/sctp/auth.c |
6151 | index 0e8529113dc5..fb7976aee61c 100644 |
6152 | --- a/net/sctp/auth.c |
6153 | +++ b/net/sctp/auth.c |
6154 | @@ -862,8 +862,6 @@ int sctp_auth_set_key(struct sctp_endpoint *ep, |
6155 | list_add(&cur_key->key_list, sh_keys); |
6156 | |
6157 | cur_key->key = key; |
6158 | - sctp_auth_key_hold(key); |
6159 | - |
6160 | return 0; |
6161 | nomem: |
6162 | if (!replace) |
6163 | diff --git a/net/sctp/inqueue.c b/net/sctp/inqueue.c |
6164 | index 4de12afa13d4..7e8a16c77039 100644 |
6165 | --- a/net/sctp/inqueue.c |
6166 | +++ b/net/sctp/inqueue.c |
6167 | @@ -140,18 +140,9 @@ struct sctp_chunk *sctp_inq_pop(struct sctp_inq *queue) |
6168 | } else { |
6169 | /* Nothing to do. Next chunk in the packet, please. */ |
6170 | ch = (sctp_chunkhdr_t *) chunk->chunk_end; |
6171 | - |
6172 | /* Force chunk->skb->data to chunk->chunk_end. */ |
6173 | - skb_pull(chunk->skb, |
6174 | - chunk->chunk_end - chunk->skb->data); |
6175 | - |
6176 | - /* Verify that we have at least chunk headers |
6177 | - * worth of buffer left. |
6178 | - */ |
6179 | - if (skb_headlen(chunk->skb) < sizeof(sctp_chunkhdr_t)) { |
6180 | - sctp_chunk_free(chunk); |
6181 | - chunk = queue->in_progress = NULL; |
6182 | - } |
6183 | + skb_pull(chunk->skb, chunk->chunk_end - chunk->skb->data); |
6184 | + /* We are guaranteed to pull a SCTP header. */ |
6185 | } |
6186 | } |
6187 | |
6188 | @@ -187,24 +178,14 @@ struct sctp_chunk *sctp_inq_pop(struct sctp_inq *queue) |
6189 | skb_pull(chunk->skb, sizeof(sctp_chunkhdr_t)); |
6190 | chunk->subh.v = NULL; /* Subheader is no longer valid. */ |
6191 | |
6192 | - if (chunk->chunk_end < skb_tail_pointer(chunk->skb)) { |
6193 | + if (chunk->chunk_end + sizeof(sctp_chunkhdr_t) < |
6194 | + skb_tail_pointer(chunk->skb)) { |
6195 | /* This is not a singleton */ |
6196 | chunk->singleton = 0; |
6197 | } else if (chunk->chunk_end > skb_tail_pointer(chunk->skb)) { |
6198 | - /* RFC 2960, Section 6.10 Bundling |
6199 | - * |
6200 | - * Partial chunks MUST NOT be placed in an SCTP packet. |
6201 | - * If the receiver detects a partial chunk, it MUST drop |
6202 | - * the chunk. |
6203 | - * |
6204 | - * Since the end of the chunk is past the end of our buffer |
6205 | - * (which contains the whole packet, we can freely discard |
6206 | - * the whole packet. |
6207 | - */ |
6208 | - sctp_chunk_free(chunk); |
6209 | - chunk = queue->in_progress = NULL; |
6210 | - |
6211 | - return NULL; |
6212 | + /* Discard inside state machine. */ |
6213 | + chunk->pdiscard = 1; |
6214 | + chunk->chunk_end = skb_tail_pointer(chunk->skb); |
6215 | } else { |
6216 | /* We are at the end of the packet, so mark the chunk |
6217 | * in case we need to send a SACK. |
6218 | diff --git a/net/sctp/sm_make_chunk.c b/net/sctp/sm_make_chunk.c |
6219 | index ae0e616a7ca5..9f32741abb1c 100644 |
6220 | --- a/net/sctp/sm_make_chunk.c |
6221 | +++ b/net/sctp/sm_make_chunk.c |
6222 | @@ -2609,6 +2609,9 @@ do_addr_param: |
6223 | addr_param = param.v + sizeof(sctp_addip_param_t); |
6224 | |
6225 | af = sctp_get_af_specific(param_type2af(param.p->type)); |
6226 | + if (af == NULL) |
6227 | + break; |
6228 | + |
6229 | af->from_addr_param(&addr, addr_param, |
6230 | htons(asoc->peer.port), 0); |
6231 | |
6232 | @@ -3110,50 +3113,63 @@ static __be16 sctp_process_asconf_param(struct sctp_association *asoc, |
6233 | return SCTP_ERROR_NO_ERROR; |
6234 | } |
6235 | |
6236 | -/* Verify the ASCONF packet before we process it. */ |
6237 | -int sctp_verify_asconf(const struct sctp_association *asoc, |
6238 | - struct sctp_paramhdr *param_hdr, void *chunk_end, |
6239 | - struct sctp_paramhdr **errp) { |
6240 | - sctp_addip_param_t *asconf_param; |
6241 | +/* Verify the ASCONF packet before we process it. */ |
6242 | +bool sctp_verify_asconf(const struct sctp_association *asoc, |
6243 | + struct sctp_chunk *chunk, bool addr_param_needed, |
6244 | + struct sctp_paramhdr **errp) |
6245 | +{ |
6246 | + sctp_addip_chunk_t *addip = (sctp_addip_chunk_t *) chunk->chunk_hdr; |
6247 | union sctp_params param; |
6248 | - int length, plen; |
6249 | + bool addr_param_seen = false; |
6250 | |
6251 | - param.v = (sctp_paramhdr_t *) param_hdr; |
6252 | - while (param.v <= chunk_end - sizeof(sctp_paramhdr_t)) { |
6253 | - length = ntohs(param.p->length); |
6254 | - *errp = param.p; |
6255 | - |
6256 | - if (param.v > chunk_end - length || |
6257 | - length < sizeof(sctp_paramhdr_t)) |
6258 | - return 0; |
6259 | + sctp_walk_params(param, addip, addip_hdr.params) { |
6260 | + size_t length = ntohs(param.p->length); |
6261 | |
6262 | + *errp = param.p; |
6263 | switch (param.p->type) { |
6264 | + case SCTP_PARAM_ERR_CAUSE: |
6265 | + break; |
6266 | + case SCTP_PARAM_IPV4_ADDRESS: |
6267 | + if (length != sizeof(sctp_ipv4addr_param_t)) |
6268 | + return false; |
6269 | + addr_param_seen = true; |
6270 | + break; |
6271 | + case SCTP_PARAM_IPV6_ADDRESS: |
6272 | + if (length != sizeof(sctp_ipv6addr_param_t)) |
6273 | + return false; |
6274 | + addr_param_seen = true; |
6275 | + break; |
6276 | case SCTP_PARAM_ADD_IP: |
6277 | case SCTP_PARAM_DEL_IP: |
6278 | case SCTP_PARAM_SET_PRIMARY: |
6279 | - asconf_param = (sctp_addip_param_t *)param.v; |
6280 | - plen = ntohs(asconf_param->param_hdr.length); |
6281 | - if (plen < sizeof(sctp_addip_param_t) + |
6282 | - sizeof(sctp_paramhdr_t)) |
6283 | - return 0; |
6284 | + /* In ASCONF chunks, these need to be first. */ |
6285 | + if (addr_param_needed && !addr_param_seen) |
6286 | + return false; |
6287 | + length = ntohs(param.addip->param_hdr.length); |
6288 | + if (length < sizeof(sctp_addip_param_t) + |
6289 | + sizeof(sctp_paramhdr_t)) |
6290 | + return false; |
6291 | break; |
6292 | case SCTP_PARAM_SUCCESS_REPORT: |
6293 | case SCTP_PARAM_ADAPTATION_LAYER_IND: |
6294 | if (length != sizeof(sctp_addip_param_t)) |
6295 | - return 0; |
6296 | - |
6297 | + return false; |
6298 | break; |
6299 | default: |
6300 | - break; |
6301 | + /* This is unkown to us, reject! */ |
6302 | + return false; |
6303 | } |
6304 | - |
6305 | - param.v += WORD_ROUND(length); |
6306 | } |
6307 | |
6308 | - if (param.v != chunk_end) |
6309 | - return 0; |
6310 | + /* Remaining sanity checks. */ |
6311 | + if (addr_param_needed && !addr_param_seen) |
6312 | + return false; |
6313 | + if (!addr_param_needed && addr_param_seen) |
6314 | + return false; |
6315 | + if (param.v != chunk->chunk_end) |
6316 | + return false; |
6317 | |
6318 | - return 1; |
6319 | + return true; |
6320 | } |
6321 | |
6322 | /* Process an incoming ASCONF chunk with the next expected serial no. and |
6323 | @@ -3162,16 +3178,17 @@ int sctp_verify_asconf(const struct sctp_association *asoc, |
6324 | struct sctp_chunk *sctp_process_asconf(struct sctp_association *asoc, |
6325 | struct sctp_chunk *asconf) |
6326 | { |
6327 | + sctp_addip_chunk_t *addip = (sctp_addip_chunk_t *) asconf->chunk_hdr; |
6328 | + bool all_param_pass = true; |
6329 | + union sctp_params param; |
6330 | sctp_addiphdr_t *hdr; |
6331 | union sctp_addr_param *addr_param; |
6332 | sctp_addip_param_t *asconf_param; |
6333 | struct sctp_chunk *asconf_ack; |
6334 | - |
6335 | __be16 err_code; |
6336 | int length = 0; |
6337 | int chunk_len; |
6338 | __u32 serial; |
6339 | - int all_param_pass = 1; |
6340 | |
6341 | chunk_len = ntohs(asconf->chunk_hdr->length) - sizeof(sctp_chunkhdr_t); |
6342 | hdr = (sctp_addiphdr_t *)asconf->skb->data; |
6343 | @@ -3199,9 +3216,14 @@ struct sctp_chunk *sctp_process_asconf(struct sctp_association *asoc, |
6344 | goto done; |
6345 | |
6346 | /* Process the TLVs contained within the ASCONF chunk. */ |
6347 | - while (chunk_len > 0) { |
6348 | + sctp_walk_params(param, addip, addip_hdr.params) { |
6349 | + /* Skip preceeding address parameters. */ |
6350 | + if (param.p->type == SCTP_PARAM_IPV4_ADDRESS || |
6351 | + param.p->type == SCTP_PARAM_IPV6_ADDRESS) |
6352 | + continue; |
6353 | + |
6354 | err_code = sctp_process_asconf_param(asoc, asconf, |
6355 | - asconf_param); |
6356 | + param.addip); |
6357 | /* ADDIP 4.1 A7) |
6358 | * If an error response is received for a TLV parameter, |
6359 | * all TLVs with no response before the failed TLV are |
6360 | @@ -3209,28 +3231,20 @@ struct sctp_chunk *sctp_process_asconf(struct sctp_association *asoc, |
6361 | * the failed response are considered unsuccessful unless |
6362 | * a specific success indication is present for the parameter. |
6363 | */ |
6364 | - if (SCTP_ERROR_NO_ERROR != err_code) |
6365 | - all_param_pass = 0; |
6366 | - |
6367 | + if (err_code != SCTP_ERROR_NO_ERROR) |
6368 | + all_param_pass = false; |
6369 | if (!all_param_pass) |
6370 | - sctp_add_asconf_response(asconf_ack, |
6371 | - asconf_param->crr_id, err_code, |
6372 | - asconf_param); |
6373 | + sctp_add_asconf_response(asconf_ack, param.addip->crr_id, |
6374 | + err_code, param.addip); |
6375 | |
6376 | /* ADDIP 4.3 D11) When an endpoint receiving an ASCONF to add |
6377 | * an IP address sends an 'Out of Resource' in its response, it |
6378 | * MUST also fail any subsequent add or delete requests bundled |
6379 | * in the ASCONF. |
6380 | */ |
6381 | - if (SCTP_ERROR_RSRC_LOW == err_code) |
6382 | + if (err_code == SCTP_ERROR_RSRC_LOW) |
6383 | goto done; |
6384 | - |
6385 | - /* Move to the next ASCONF param. */ |
6386 | - length = ntohs(asconf_param->param_hdr.length); |
6387 | - asconf_param = (void *)asconf_param + length; |
6388 | - chunk_len -= length; |
6389 | } |
6390 | - |
6391 | done: |
6392 | asoc->peer.addip_serial++; |
6393 | |
6394 | diff --git a/net/sctp/sm_statefuns.c b/net/sctp/sm_statefuns.c |
6395 | index c8f606324134..3ee27b7704ff 100644 |
6396 | --- a/net/sctp/sm_statefuns.c |
6397 | +++ b/net/sctp/sm_statefuns.c |
6398 | @@ -170,6 +170,9 @@ sctp_chunk_length_valid(struct sctp_chunk *chunk, |
6399 | { |
6400 | __u16 chunk_length = ntohs(chunk->chunk_hdr->length); |
6401 | |
6402 | + /* Previously already marked? */ |
6403 | + if (unlikely(chunk->pdiscard)) |
6404 | + return 0; |
6405 | if (unlikely(chunk_length < required_length)) |
6406 | return 0; |
6407 | |
6408 | @@ -3591,9 +3594,7 @@ sctp_disposition_t sctp_sf_do_asconf(struct net *net, |
6409 | struct sctp_chunk *asconf_ack = NULL; |
6410 | struct sctp_paramhdr *err_param = NULL; |
6411 | sctp_addiphdr_t *hdr; |
6412 | - union sctp_addr_param *addr_param; |
6413 | __u32 serial; |
6414 | - int length; |
6415 | |
6416 | if (!sctp_vtag_verify(chunk, asoc)) { |
6417 | sctp_add_cmd_sf(commands, SCTP_CMD_REPORT_BAD_TAG, |
6418 | @@ -3618,17 +3619,8 @@ sctp_disposition_t sctp_sf_do_asconf(struct net *net, |
6419 | hdr = (sctp_addiphdr_t *)chunk->skb->data; |
6420 | serial = ntohl(hdr->serial); |
6421 | |
6422 | - addr_param = (union sctp_addr_param *)hdr->params; |
6423 | - length = ntohs(addr_param->p.length); |
6424 | - if (length < sizeof(sctp_paramhdr_t)) |
6425 | - return sctp_sf_violation_paramlen(net, ep, asoc, type, arg, |
6426 | - (void *)addr_param, commands); |
6427 | - |
6428 | /* Verify the ASCONF chunk before processing it. */ |
6429 | - if (!sctp_verify_asconf(asoc, |
6430 | - (sctp_paramhdr_t *)((void *)addr_param + length), |
6431 | - (void *)chunk->chunk_end, |
6432 | - &err_param)) |
6433 | + if (!sctp_verify_asconf(asoc, chunk, true, &err_param)) |
6434 | return sctp_sf_violation_paramlen(net, ep, asoc, type, arg, |
6435 | (void *)err_param, commands); |
6436 | |
6437 | @@ -3745,10 +3737,7 @@ sctp_disposition_t sctp_sf_do_asconf_ack(struct net *net, |
6438 | rcvd_serial = ntohl(addip_hdr->serial); |
6439 | |
6440 | /* Verify the ASCONF-ACK chunk before processing it. */ |
6441 | - if (!sctp_verify_asconf(asoc, |
6442 | - (sctp_paramhdr_t *)addip_hdr->params, |
6443 | - (void *)asconf_ack->chunk_end, |
6444 | - &err_param)) |
6445 | + if (!sctp_verify_asconf(asoc, asconf_ack, false, &err_param)) |
6446 | return sctp_sf_violation_paramlen(net, ep, asoc, type, arg, |
6447 | (void *)err_param, commands); |
6448 | |
6449 | diff --git a/net/sunrpc/auth_gss/auth_gss.c b/net/sunrpc/auth_gss/auth_gss.c |
6450 | index afb292cd797d..53ed8d3f8897 100644 |
6451 | --- a/net/sunrpc/auth_gss/auth_gss.c |
6452 | +++ b/net/sunrpc/auth_gss/auth_gss.c |
6453 | @@ -1353,6 +1353,7 @@ gss_stringify_acceptor(struct rpc_cred *cred) |
6454 | char *string = NULL; |
6455 | struct gss_cred *gss_cred = container_of(cred, struct gss_cred, gc_base); |
6456 | struct gss_cl_ctx *ctx; |
6457 | + unsigned int len; |
6458 | struct xdr_netobj *acceptor; |
6459 | |
6460 | rcu_read_lock(); |
6461 | @@ -1360,15 +1361,39 @@ gss_stringify_acceptor(struct rpc_cred *cred) |
6462 | if (!ctx) |
6463 | goto out; |
6464 | |
6465 | - acceptor = &ctx->gc_acceptor; |
6466 | + len = ctx->gc_acceptor.len; |
6467 | + rcu_read_unlock(); |
6468 | |
6469 | /* no point if there's no string */ |
6470 | - if (!acceptor->len) |
6471 | - goto out; |
6472 | - |
6473 | - string = kmalloc(acceptor->len + 1, GFP_KERNEL); |
6474 | + if (!len) |
6475 | + return NULL; |
6476 | +realloc: |
6477 | + string = kmalloc(len + 1, GFP_KERNEL); |
6478 | if (!string) |
6479 | + return NULL; |
6480 | + |
6481 | + rcu_read_lock(); |
6482 | + ctx = rcu_dereference(gss_cred->gc_ctx); |
6483 | + |
6484 | + /* did the ctx disappear or was it replaced by one with no acceptor? */ |
6485 | + if (!ctx || !ctx->gc_acceptor.len) { |
6486 | + kfree(string); |
6487 | + string = NULL; |
6488 | goto out; |
6489 | + } |
6490 | + |
6491 | + acceptor = &ctx->gc_acceptor; |
6492 | + |
6493 | + /* |
6494 | + * Did we find a new acceptor that's longer than the original? Allocate |
6495 | + * a longer buffer and try again. |
6496 | + */ |
6497 | + if (len < acceptor->len) { |
6498 | + len = acceptor->len; |
6499 | + rcu_read_unlock(); |
6500 | + kfree(string); |
6501 | + goto realloc; |
6502 | + } |
6503 | |
6504 | memcpy(string, acceptor->data, acceptor->len); |
6505 | string[acceptor->len] = '\0'; |
6506 | diff --git a/scripts/checkpatch.pl b/scripts/checkpatch.pl |
6507 | index 4d08b398411f..cab1691b5ad6 100755 |
6508 | --- a/scripts/checkpatch.pl |
6509 | +++ b/scripts/checkpatch.pl |
6510 | @@ -2424,7 +2424,7 @@ sub process { |
6511 | "please, no space before tabs\n" . $herevet) && |
6512 | $fix) { |
6513 | while ($fixed[$fixlinenr] =~ |
6514 | - s/(^\+.*) {8,8}+\t/$1\t\t/) {} |
6515 | + s/(^\+.*) {8,8}\t/$1\t\t/) {} |
6516 | while ($fixed[$fixlinenr] =~ |
6517 | s/(^\+.*) +\t/$1\t/) {} |
6518 | } |
6519 | diff --git a/scripts/package/builddeb b/scripts/package/builddeb |
6520 | index 35d5a5877d04..7c0e6e46905d 100644 |
6521 | --- a/scripts/package/builddeb |
6522 | +++ b/scripts/package/builddeb |
6523 | @@ -152,18 +152,16 @@ if grep -q '^CONFIG_MODULES=y' $KCONFIG_CONFIG ; then |
6524 | rmdir "$tmpdir/lib/modules/$version" |
6525 | fi |
6526 | if [ -n "$BUILD_DEBUG" ] ; then |
6527 | - ( |
6528 | - cd $tmpdir |
6529 | - for module in $(find lib/modules/ -name *.ko); do |
6530 | - mkdir -p $(dirname $dbg_dir/usr/lib/debug/$module) |
6531 | - # only keep debug symbols in the debug file |
6532 | - $OBJCOPY --only-keep-debug $module $dbg_dir/usr/lib/debug/$module |
6533 | - # strip original module from debug symbols |
6534 | - $OBJCOPY --strip-debug $module |
6535 | - # then add a link to those |
6536 | - $OBJCOPY --add-gnu-debuglink=$dbg_dir/usr/lib/debug/$module $module |
6537 | - done |
6538 | - ) |
6539 | + for module in $(find $tmpdir/lib/modules/ -name *.ko -printf '%P\n'); do |
6540 | + module=lib/modules/$module |
6541 | + mkdir -p $(dirname $dbg_dir/usr/lib/debug/$module) |
6542 | + # only keep debug symbols in the debug file |
6543 | + $OBJCOPY --only-keep-debug $tmpdir/$module $dbg_dir/usr/lib/debug/$module |
6544 | + # strip original module from debug symbols |
6545 | + $OBJCOPY --strip-debug $tmpdir/$module |
6546 | + # then add a link to those |
6547 | + $OBJCOPY --add-gnu-debuglink=$dbg_dir/usr/lib/debug/$module $tmpdir/$module |
6548 | + done |
6549 | fi |
6550 | fi |
6551 | |
6552 | diff --git a/security/keys/keyctl.c b/security/keys/keyctl.c |
6553 | index e26f860e5f2e..eff88a5f5d40 100644 |
6554 | --- a/security/keys/keyctl.c |
6555 | +++ b/security/keys/keyctl.c |
6556 | @@ -37,6 +37,8 @@ static int key_get_type_from_user(char *type, |
6557 | return ret; |
6558 | if (ret == 0 || ret >= len) |
6559 | return -EINVAL; |
6560 | + if (type[0] == '.') |
6561 | + return -EPERM; |
6562 | type[len - 1] = '\0'; |
6563 | return 0; |
6564 | } |
6565 | diff --git a/sound/pci/hda/patch_conexant.c b/sound/pci/hda/patch_conexant.c |
6566 | index 47ccb8f44adb..2fc3d13762c2 100644 |
6567 | --- a/sound/pci/hda/patch_conexant.c |
6568 | +++ b/sound/pci/hda/patch_conexant.c |
6569 | @@ -44,6 +44,7 @@ struct conexant_spec { |
6570 | unsigned int num_eapds; |
6571 | hda_nid_t eapds[4]; |
6572 | bool dynamic_eapd; |
6573 | + hda_nid_t mute_led_eapd; |
6574 | |
6575 | unsigned int parse_flags; /* flag for snd_hda_parse_pin_defcfg() */ |
6576 | |
6577 | @@ -164,6 +165,17 @@ static void cx_auto_vmaster_hook(void *private_data, int enabled) |
6578 | cx_auto_turn_eapd(codec, spec->num_eapds, spec->eapds, enabled); |
6579 | } |
6580 | |
6581 | +/* turn on/off EAPD according to Master switch (inversely!) for mute LED */ |
6582 | +static void cx_auto_vmaster_hook_mute_led(void *private_data, int enabled) |
6583 | +{ |
6584 | + struct hda_codec *codec = private_data; |
6585 | + struct conexant_spec *spec = codec->spec; |
6586 | + |
6587 | + snd_hda_codec_write(codec, spec->mute_led_eapd, 0, |
6588 | + AC_VERB_SET_EAPD_BTLENABLE, |
6589 | + enabled ? 0x00 : 0x02); |
6590 | +} |
6591 | + |
6592 | static int cx_auto_build_controls(struct hda_codec *codec) |
6593 | { |
6594 | int err; |
6595 | @@ -224,6 +236,7 @@ enum { |
6596 | CXT_FIXUP_TOSHIBA_P105, |
6597 | CXT_FIXUP_HP_530, |
6598 | CXT_FIXUP_CAP_MIX_AMP_5047, |
6599 | + CXT_FIXUP_MUTE_LED_EAPD, |
6600 | }; |
6601 | |
6602 | /* for hda_fixup_thinkpad_acpi() */ |
6603 | @@ -557,6 +570,18 @@ static void cxt_fixup_olpc_xo(struct hda_codec *codec, |
6604 | } |
6605 | } |
6606 | |
6607 | +static void cxt_fixup_mute_led_eapd(struct hda_codec *codec, |
6608 | + const struct hda_fixup *fix, int action) |
6609 | +{ |
6610 | + struct conexant_spec *spec = codec->spec; |
6611 | + |
6612 | + if (action == HDA_FIXUP_ACT_PRE_PROBE) { |
6613 | + spec->mute_led_eapd = 0x1b; |
6614 | + spec->dynamic_eapd = 1; |
6615 | + spec->gen.vmaster_mute.hook = cx_auto_vmaster_hook_mute_led; |
6616 | + } |
6617 | +} |
6618 | + |
6619 | /* |
6620 | * Fix max input level on mixer widget to 0dB |
6621 | * (originally it has 0x2b steps with 0dB offset 0x14) |
6622 | @@ -705,6 +730,10 @@ static const struct hda_fixup cxt_fixups[] = { |
6623 | .type = HDA_FIXUP_FUNC, |
6624 | .v.func = cxt_fixup_cap_mix_amp_5047, |
6625 | }, |
6626 | + [CXT_FIXUP_MUTE_LED_EAPD] = { |
6627 | + .type = HDA_FIXUP_FUNC, |
6628 | + .v.func = cxt_fixup_mute_led_eapd, |
6629 | + }, |
6630 | }; |
6631 | |
6632 | static const struct snd_pci_quirk cxt5045_fixups[] = { |
6633 | @@ -761,6 +790,7 @@ static const struct snd_pci_quirk cxt5066_fixups[] = { |
6634 | SND_PCI_QUIRK(0x17aa, 0x21cf, "Lenovo T520", CXT_PINCFG_LENOVO_TP410), |
6635 | SND_PCI_QUIRK(0x17aa, 0x21da, "Lenovo X220", CXT_PINCFG_LENOVO_TP410), |
6636 | SND_PCI_QUIRK(0x17aa, 0x21db, "Lenovo X220-tablet", CXT_PINCFG_LENOVO_TP410), |
6637 | + SND_PCI_QUIRK(0x17aa, 0x38af, "Lenovo IdeaPad Z560", CXT_FIXUP_MUTE_LED_EAPD), |
6638 | SND_PCI_QUIRK(0x17aa, 0x3975, "Lenovo U300s", CXT_FIXUP_STEREO_DMIC), |
6639 | SND_PCI_QUIRK(0x17aa, 0x3977, "Lenovo IdeaPad U310", CXT_FIXUP_STEREO_DMIC), |
6640 | SND_PCI_QUIRK(0x17aa, 0x397b, "Lenovo S205", CXT_FIXUP_STEREO_DMIC), |
6641 | @@ -779,6 +809,7 @@ static const struct hda_model_fixup cxt5066_fixup_models[] = { |
6642 | { .id = CXT_PINCFG_LEMOTE_A1004, .name = "lemote-a1004" }, |
6643 | { .id = CXT_PINCFG_LEMOTE_A1205, .name = "lemote-a1205" }, |
6644 | { .id = CXT_FIXUP_OLPC_XO, .name = "olpc-xo" }, |
6645 | + { .id = CXT_FIXUP_MUTE_LED_EAPD, .name = "mute-led-eapd" }, |
6646 | {} |
6647 | }; |
6648 | |
6649 | diff --git a/sound/usb/mixer_quirks.c b/sound/usb/mixer_quirks.c |
6650 | index f119a41ed9a9..7c83bab69dee 100644 |
6651 | --- a/sound/usb/mixer_quirks.c |
6652 | +++ b/sound/usb/mixer_quirks.c |
6653 | @@ -885,6 +885,11 @@ static int snd_ftu_eff_switch_put(struct snd_kcontrol *kctl, |
6654 | return changed; |
6655 | } |
6656 | |
6657 | +static void kctl_private_value_free(struct snd_kcontrol *kctl) |
6658 | +{ |
6659 | + kfree((void *)kctl->private_value); |
6660 | +} |
6661 | + |
6662 | static int snd_ftu_create_effect_switch(struct usb_mixer_interface *mixer, |
6663 | int validx, int bUnitID) |
6664 | { |
6665 | @@ -919,6 +924,7 @@ static int snd_ftu_create_effect_switch(struct usb_mixer_interface *mixer, |
6666 | return -ENOMEM; |
6667 | } |
6668 | |
6669 | + kctl->private_free = kctl_private_value_free; |
6670 | err = snd_ctl_add(mixer->chip->card, kctl); |
6671 | if (err < 0) |
6672 | return err; |