Magellan Linux

Contents of /trunk/kernel-alx/patches-4.9/0241-4.9.142-all-fixes.patch

Parent Directory Parent Directory | Revision Log Revision Log


Revision 3294 - (show annotations) (download)
Tue Mar 12 10:43:04 2019 UTC (5 years, 1 month ago) by niro
File size: 141113 byte(s)
-linux-4.9.142
1 diff --git a/Documentation/sysctl/fs.txt b/Documentation/sysctl/fs.txt
2 index 35e17f748ca7..af5859b2d0f9 100644
3 --- a/Documentation/sysctl/fs.txt
4 +++ b/Documentation/sysctl/fs.txt
5 @@ -34,7 +34,9 @@ Currently, these files are in /proc/sys/fs:
6 - overflowgid
7 - pipe-user-pages-hard
8 - pipe-user-pages-soft
9 +- protected_fifos
10 - protected_hardlinks
11 +- protected_regular
12 - protected_symlinks
13 - suid_dumpable
14 - super-max
15 @@ -182,6 +184,24 @@ applied.
16
17 ==============================================================
18
19 +protected_fifos:
20 +
21 +The intent of this protection is to avoid unintentional writes to
22 +an attacker-controlled FIFO, where a program expected to create a regular
23 +file.
24 +
25 +When set to "0", writing to FIFOs is unrestricted.
26 +
27 +When set to "1" don't allow O_CREAT open on FIFOs that we don't own
28 +in world writable sticky directories, unless they are owned by the
29 +owner of the directory.
30 +
31 +When set to "2" it also applies to group writable sticky directories.
32 +
33 +This protection is based on the restrictions in Openwall.
34 +
35 +==============================================================
36 +
37 protected_hardlinks:
38
39 A long-standing class of security issues is the hardlink-based
40 @@ -202,6 +222,22 @@ This protection is based on the restrictions in Openwall and grsecurity.
41
42 ==============================================================
43
44 +protected_regular:
45 +
46 +This protection is similar to protected_fifos, but it
47 +avoids writes to an attacker-controlled regular file, where a program
48 +expected to create one.
49 +
50 +When set to "0", writing to regular files is unrestricted.
51 +
52 +When set to "1" don't allow O_CREAT open on regular files that we
53 +don't own in world writable sticky directories, unless they are
54 +owned by the owner of the directory.
55 +
56 +When set to "2" it also applies to group writable sticky directories.
57 +
58 +==============================================================
59 +
60 protected_symlinks:
61
62 A long-standing class of security issues is the symlink-based
63 diff --git a/MAINTAINERS b/MAINTAINERS
64 index 63cefa62324c..4f559f5b3a89 100644
65 --- a/MAINTAINERS
66 +++ b/MAINTAINERS
67 @@ -11469,6 +11469,7 @@ F: arch/alpha/kernel/srm_env.c
68
69 STABLE BRANCH
70 M: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
71 +M: Sasha Levin <sashal@kernel.org>
72 L: stable@vger.kernel.org
73 S: Supported
74 F: Documentation/stable_kernel_rules.txt
75 diff --git a/Makefile b/Makefile
76 index 8eba73521a7f..72ed8ff90329 100644
77 --- a/Makefile
78 +++ b/Makefile
79 @@ -1,6 +1,6 @@
80 VERSION = 4
81 PATCHLEVEL = 9
82 -SUBLEVEL = 141
83 +SUBLEVEL = 142
84 EXTRAVERSION =
85 NAME = Roaring Lionus
86
87 diff --git a/arch/arm64/Makefile b/arch/arm64/Makefile
88 index 92110c2c6c59..ee94597773fa 100644
89 --- a/arch/arm64/Makefile
90 +++ b/arch/arm64/Makefile
91 @@ -10,7 +10,7 @@
92 #
93 # Copyright (C) 1995-2001 by Russell King
94
95 -LDFLAGS_vmlinux :=-p --no-undefined -X
96 +LDFLAGS_vmlinux :=--no-undefined -X
97 CPPFLAGS_vmlinux.lds = -DTEXT_OFFSET=$(TEXT_OFFSET)
98 GZFLAGS :=-9
99
100 diff --git a/arch/powerpc/include/asm/io.h b/arch/powerpc/include/asm/io.h
101 index f6fda8482f60..3b10f532c28e 100644
102 --- a/arch/powerpc/include/asm/io.h
103 +++ b/arch/powerpc/include/asm/io.h
104 @@ -333,19 +333,13 @@ extern void _memcpy_toio(volatile void __iomem *dest, const void *src,
105 * their hooks, a bitfield is reserved for use by the platform near the
106 * top of MMIO addresses (not PIO, those have to cope the hard way).
107 *
108 - * This bit field is 12 bits and is at the top of the IO virtual
109 - * addresses PCI_IO_INDIRECT_TOKEN_MASK.
110 + * The highest address in the kernel virtual space are:
111 *
112 - * The kernel virtual space is thus:
113 + * d0003fffffffffff # with Hash MMU
114 + * c00fffffffffffff # with Radix MMU
115 *
116 - * 0xD000000000000000 : vmalloc
117 - * 0xD000080000000000 : PCI PHB IO space
118 - * 0xD000080080000000 : ioremap
119 - * 0xD0000fffffffffff : end of ioremap region
120 - *
121 - * Since the top 4 bits are reserved as the region ID, we use thus
122 - * the next 12 bits and keep 4 bits available for the future if the
123 - * virtual address space is ever to be extended.
124 + * The top 4 bits are reserved as the region ID on hash, leaving us 8 bits
125 + * that can be used for the field.
126 *
127 * The direct IO mapping operations will then mask off those bits
128 * before doing the actual access, though that only happen when
129 @@ -357,8 +351,8 @@ extern void _memcpy_toio(volatile void __iomem *dest, const void *src,
130 */
131
132 #ifdef CONFIG_PPC_INDIRECT_MMIO
133 -#define PCI_IO_IND_TOKEN_MASK 0x0fff000000000000ul
134 -#define PCI_IO_IND_TOKEN_SHIFT 48
135 +#define PCI_IO_IND_TOKEN_SHIFT 52
136 +#define PCI_IO_IND_TOKEN_MASK (0xfful << PCI_IO_IND_TOKEN_SHIFT)
137 #define PCI_FIX_ADDR(addr) \
138 ((PCI_IO_ADDR)(((unsigned long)(addr)) & ~PCI_IO_IND_TOKEN_MASK))
139 #define PCI_GET_ADDR_TOKEN(addr) \
140 diff --git a/arch/powerpc/kvm/trace.h b/arch/powerpc/kvm/trace.h
141 index 2e0e67ef3544..e8cedf32345a 100644
142 --- a/arch/powerpc/kvm/trace.h
143 +++ b/arch/powerpc/kvm/trace.h
144 @@ -5,8 +5,6 @@
145
146 #undef TRACE_SYSTEM
147 #define TRACE_SYSTEM kvm
148 -#define TRACE_INCLUDE_PATH .
149 -#define TRACE_INCLUDE_FILE trace
150
151 /*
152 * Tracepoint for guest mode entry.
153 @@ -119,4 +117,10 @@ TRACE_EVENT(kvm_check_requests,
154 #endif /* _TRACE_KVM_H */
155
156 /* This part must be outside protection */
157 +#undef TRACE_INCLUDE_PATH
158 +#undef TRACE_INCLUDE_FILE
159 +
160 +#define TRACE_INCLUDE_PATH .
161 +#define TRACE_INCLUDE_FILE trace
162 +
163 #include <trace/define_trace.h>
164 diff --git a/arch/powerpc/kvm/trace_booke.h b/arch/powerpc/kvm/trace_booke.h
165 index 7ec534d1db9f..7eadbf449a1f 100644
166 --- a/arch/powerpc/kvm/trace_booke.h
167 +++ b/arch/powerpc/kvm/trace_booke.h
168 @@ -5,8 +5,6 @@
169
170 #undef TRACE_SYSTEM
171 #define TRACE_SYSTEM kvm_booke
172 -#define TRACE_INCLUDE_PATH .
173 -#define TRACE_INCLUDE_FILE trace_booke
174
175 #define kvm_trace_symbol_exit \
176 {0, "CRITICAL"}, \
177 @@ -217,4 +215,11 @@ TRACE_EVENT(kvm_booke_queue_irqprio,
178 #endif
179
180 /* This part must be outside protection */
181 +
182 +#undef TRACE_INCLUDE_PATH
183 +#undef TRACE_INCLUDE_FILE
184 +
185 +#define TRACE_INCLUDE_PATH .
186 +#define TRACE_INCLUDE_FILE trace_booke
187 +
188 #include <trace/define_trace.h>
189 diff --git a/arch/powerpc/kvm/trace_hv.h b/arch/powerpc/kvm/trace_hv.h
190 index fb21990c0fb4..d9a21a7bd5c9 100644
191 --- a/arch/powerpc/kvm/trace_hv.h
192 +++ b/arch/powerpc/kvm/trace_hv.h
193 @@ -8,8 +8,6 @@
194
195 #undef TRACE_SYSTEM
196 #define TRACE_SYSTEM kvm_hv
197 -#define TRACE_INCLUDE_PATH .
198 -#define TRACE_INCLUDE_FILE trace_hv
199
200 #define kvm_trace_symbol_hcall \
201 {H_REMOVE, "H_REMOVE"}, \
202 @@ -496,4 +494,11 @@ TRACE_EVENT(kvmppc_run_vcpu_exit,
203 #endif /* _TRACE_KVM_HV_H */
204
205 /* This part must be outside protection */
206 +
207 +#undef TRACE_INCLUDE_PATH
208 +#undef TRACE_INCLUDE_FILE
209 +
210 +#define TRACE_INCLUDE_PATH .
211 +#define TRACE_INCLUDE_FILE trace_hv
212 +
213 #include <trace/define_trace.h>
214 diff --git a/arch/powerpc/kvm/trace_pr.h b/arch/powerpc/kvm/trace_pr.h
215 index d44f324184fb..e8e2b9ad4ac6 100644
216 --- a/arch/powerpc/kvm/trace_pr.h
217 +++ b/arch/powerpc/kvm/trace_pr.h
218 @@ -7,8 +7,6 @@
219
220 #undef TRACE_SYSTEM
221 #define TRACE_SYSTEM kvm_pr
222 -#define TRACE_INCLUDE_PATH .
223 -#define TRACE_INCLUDE_FILE trace_pr
224
225 TRACE_EVENT(kvm_book3s_reenter,
226 TP_PROTO(int r, struct kvm_vcpu *vcpu),
227 @@ -271,4 +269,11 @@ TRACE_EVENT(kvm_unmap_hva,
228 #endif /* _TRACE_KVM_H */
229
230 /* This part must be outside protection */
231 +
232 +#undef TRACE_INCLUDE_PATH
233 +#undef TRACE_INCLUDE_FILE
234 +
235 +#define TRACE_INCLUDE_PATH .
236 +#define TRACE_INCLUDE_FILE trace_pr
237 +
238 #include <trace/define_trace.h>
239 diff --git a/arch/powerpc/mm/numa.c b/arch/powerpc/mm/numa.c
240 index 6cff96e0d77b..0ef83c274019 100644
241 --- a/arch/powerpc/mm/numa.c
242 +++ b/arch/powerpc/mm/numa.c
243 @@ -1289,7 +1289,7 @@ static long vphn_get_associativity(unsigned long cpu,
244
245 switch (rc) {
246 case H_FUNCTION:
247 - printk(KERN_INFO
248 + printk_once(KERN_INFO
249 "VPHN is not supported. Disabling polling...\n");
250 stop_topology_update();
251 break;
252 diff --git a/arch/s390/mm/gmap.c b/arch/s390/mm/gmap.c
253 index cb2cd04e6698..b6c85b760305 100644
254 --- a/arch/s390/mm/gmap.c
255 +++ b/arch/s390/mm/gmap.c
256 @@ -686,6 +686,8 @@ void gmap_discard(struct gmap *gmap, unsigned long from, unsigned long to)
257 vmaddr |= gaddr & ~PMD_MASK;
258 /* Find vma in the parent mm */
259 vma = find_vma(gmap->mm, vmaddr);
260 + if (!vma)
261 + continue;
262 size = min(to - gaddr, PMD_SIZE - (gaddr & ~PMD_MASK));
263 zap_page_range(vma, vmaddr, size, NULL);
264 }
265 diff --git a/arch/x86/events/intel/uncore_snb.c b/arch/x86/events/intel/uncore_snb.c
266 index a3dcc12bef4a..8c700069060b 100644
267 --- a/arch/x86/events/intel/uncore_snb.c
268 +++ b/arch/x86/events/intel/uncore_snb.c
269 @@ -14,6 +14,25 @@
270 #define PCI_DEVICE_ID_INTEL_SKL_HQ_IMC 0x1910
271 #define PCI_DEVICE_ID_INTEL_SKL_SD_IMC 0x190f
272 #define PCI_DEVICE_ID_INTEL_SKL_SQ_IMC 0x191f
273 +#define PCI_DEVICE_ID_INTEL_KBL_Y_IMC 0x590c
274 +#define PCI_DEVICE_ID_INTEL_KBL_U_IMC 0x5904
275 +#define PCI_DEVICE_ID_INTEL_KBL_UQ_IMC 0x5914
276 +#define PCI_DEVICE_ID_INTEL_KBL_SD_IMC 0x590f
277 +#define PCI_DEVICE_ID_INTEL_KBL_SQ_IMC 0x591f
278 +#define PCI_DEVICE_ID_INTEL_CFL_2U_IMC 0x3ecc
279 +#define PCI_DEVICE_ID_INTEL_CFL_4U_IMC 0x3ed0
280 +#define PCI_DEVICE_ID_INTEL_CFL_4H_IMC 0x3e10
281 +#define PCI_DEVICE_ID_INTEL_CFL_6H_IMC 0x3ec4
282 +#define PCI_DEVICE_ID_INTEL_CFL_2S_D_IMC 0x3e0f
283 +#define PCI_DEVICE_ID_INTEL_CFL_4S_D_IMC 0x3e1f
284 +#define PCI_DEVICE_ID_INTEL_CFL_6S_D_IMC 0x3ec2
285 +#define PCI_DEVICE_ID_INTEL_CFL_8S_D_IMC 0x3e30
286 +#define PCI_DEVICE_ID_INTEL_CFL_4S_W_IMC 0x3e18
287 +#define PCI_DEVICE_ID_INTEL_CFL_6S_W_IMC 0x3ec6
288 +#define PCI_DEVICE_ID_INTEL_CFL_8S_W_IMC 0x3e31
289 +#define PCI_DEVICE_ID_INTEL_CFL_4S_S_IMC 0x3e33
290 +#define PCI_DEVICE_ID_INTEL_CFL_6S_S_IMC 0x3eca
291 +#define PCI_DEVICE_ID_INTEL_CFL_8S_S_IMC 0x3e32
292
293 /* SNB event control */
294 #define SNB_UNC_CTL_EV_SEL_MASK 0x000000ff
295 @@ -631,7 +650,82 @@ static const struct pci_device_id skl_uncore_pci_ids[] = {
296 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_SKL_SQ_IMC),
297 .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
298 },
299 -
300 + { /* IMC */
301 + PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_KBL_Y_IMC),
302 + .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
303 + },
304 + { /* IMC */
305 + PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_KBL_U_IMC),
306 + .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
307 + },
308 + { /* IMC */
309 + PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_KBL_UQ_IMC),
310 + .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
311 + },
312 + { /* IMC */
313 + PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_KBL_SD_IMC),
314 + .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
315 + },
316 + { /* IMC */
317 + PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_KBL_SQ_IMC),
318 + .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
319 + },
320 + { /* IMC */
321 + PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_CFL_2U_IMC),
322 + .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
323 + },
324 + { /* IMC */
325 + PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_CFL_4U_IMC),
326 + .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
327 + },
328 + { /* IMC */
329 + PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_CFL_4H_IMC),
330 + .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
331 + },
332 + { /* IMC */
333 + PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_CFL_6H_IMC),
334 + .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
335 + },
336 + { /* IMC */
337 + PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_CFL_2S_D_IMC),
338 + .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
339 + },
340 + { /* IMC */
341 + PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_CFL_4S_D_IMC),
342 + .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
343 + },
344 + { /* IMC */
345 + PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_CFL_6S_D_IMC),
346 + .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
347 + },
348 + { /* IMC */
349 + PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_CFL_8S_D_IMC),
350 + .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
351 + },
352 + { /* IMC */
353 + PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_CFL_4S_W_IMC),
354 + .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
355 + },
356 + { /* IMC */
357 + PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_CFL_6S_W_IMC),
358 + .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
359 + },
360 + { /* IMC */
361 + PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_CFL_8S_W_IMC),
362 + .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
363 + },
364 + { /* IMC */
365 + PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_CFL_4S_S_IMC),
366 + .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
367 + },
368 + { /* IMC */
369 + PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_CFL_6S_S_IMC),
370 + .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
371 + },
372 + { /* IMC */
373 + PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_CFL_8S_S_IMC),
374 + .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
375 + },
376 { /* end: all zeroes */ },
377 };
378
379 @@ -680,6 +774,25 @@ static const struct imc_uncore_pci_dev desktop_imc_pci_ids[] = {
380 IMC_DEV(SKL_HQ_IMC, &skl_uncore_pci_driver), /* 6th Gen Core H Quad Core */
381 IMC_DEV(SKL_SD_IMC, &skl_uncore_pci_driver), /* 6th Gen Core S Dual Core */
382 IMC_DEV(SKL_SQ_IMC, &skl_uncore_pci_driver), /* 6th Gen Core S Quad Core */
383 + IMC_DEV(KBL_Y_IMC, &skl_uncore_pci_driver), /* 7th Gen Core Y */
384 + IMC_DEV(KBL_U_IMC, &skl_uncore_pci_driver), /* 7th Gen Core U */
385 + IMC_DEV(KBL_UQ_IMC, &skl_uncore_pci_driver), /* 7th Gen Core U Quad Core */
386 + IMC_DEV(KBL_SD_IMC, &skl_uncore_pci_driver), /* 7th Gen Core S Dual Core */
387 + IMC_DEV(KBL_SQ_IMC, &skl_uncore_pci_driver), /* 7th Gen Core S Quad Core */
388 + IMC_DEV(CFL_2U_IMC, &skl_uncore_pci_driver), /* 8th Gen Core U 2 Cores */
389 + IMC_DEV(CFL_4U_IMC, &skl_uncore_pci_driver), /* 8th Gen Core U 4 Cores */
390 + IMC_DEV(CFL_4H_IMC, &skl_uncore_pci_driver), /* 8th Gen Core H 4 Cores */
391 + IMC_DEV(CFL_6H_IMC, &skl_uncore_pci_driver), /* 8th Gen Core H 6 Cores */
392 + IMC_DEV(CFL_2S_D_IMC, &skl_uncore_pci_driver), /* 8th Gen Core S 2 Cores Desktop */
393 + IMC_DEV(CFL_4S_D_IMC, &skl_uncore_pci_driver), /* 8th Gen Core S 4 Cores Desktop */
394 + IMC_DEV(CFL_6S_D_IMC, &skl_uncore_pci_driver), /* 8th Gen Core S 6 Cores Desktop */
395 + IMC_DEV(CFL_8S_D_IMC, &skl_uncore_pci_driver), /* 8th Gen Core S 8 Cores Desktop */
396 + IMC_DEV(CFL_4S_W_IMC, &skl_uncore_pci_driver), /* 8th Gen Core S 4 Cores Work Station */
397 + IMC_DEV(CFL_6S_W_IMC, &skl_uncore_pci_driver), /* 8th Gen Core S 6 Cores Work Station */
398 + IMC_DEV(CFL_8S_W_IMC, &skl_uncore_pci_driver), /* 8th Gen Core S 8 Cores Work Station */
399 + IMC_DEV(CFL_4S_S_IMC, &skl_uncore_pci_driver), /* 8th Gen Core S 4 Cores Server */
400 + IMC_DEV(CFL_6S_S_IMC, &skl_uncore_pci_driver), /* 8th Gen Core S 6 Cores Server */
401 + IMC_DEV(CFL_8S_S_IMC, &skl_uncore_pci_driver), /* 8th Gen Core S 8 Cores Server */
402 { /* end marker */ }
403 };
404
405 diff --git a/drivers/block/floppy.c b/drivers/block/floppy.c
406 index a321d7d849c6..326b9ba4518e 100644
407 --- a/drivers/block/floppy.c
408 +++ b/drivers/block/floppy.c
409 @@ -3823,10 +3823,11 @@ static int __floppy_read_block_0(struct block_device *bdev, int drive)
410 bio.bi_end_io = floppy_rb0_cb;
411 bio_set_op_attrs(&bio, REQ_OP_READ, 0);
412
413 + init_completion(&cbdata.complete);
414 +
415 submit_bio(&bio);
416 process_fd_request();
417
418 - init_completion(&cbdata.complete);
419 wait_for_completion(&cbdata.complete);
420
421 __free_page(page);
422 diff --git a/drivers/cpufreq/imx6q-cpufreq.c b/drivers/cpufreq/imx6q-cpufreq.c
423 index ef1fa8145419..fa86946d12aa 100644
424 --- a/drivers/cpufreq/imx6q-cpufreq.c
425 +++ b/drivers/cpufreq/imx6q-cpufreq.c
426 @@ -130,8 +130,13 @@ static int imx6q_set_target(struct cpufreq_policy *policy, unsigned int index)
427 /* Ensure the arm clock divider is what we expect */
428 ret = clk_set_rate(arm_clk, new_freq * 1000);
429 if (ret) {
430 + int ret1;
431 +
432 dev_err(cpu_dev, "failed to set clock rate: %d\n", ret);
433 - regulator_set_voltage_tol(arm_reg, volt_old, 0);
434 + ret1 = regulator_set_voltage_tol(arm_reg, volt_old, 0);
435 + if (ret1)
436 + dev_warn(cpu_dev,
437 + "failed to restore vddarm voltage: %d\n", ret1);
438 return ret;
439 }
440
441 diff --git a/drivers/firmware/efi/arm-init.c b/drivers/firmware/efi/arm-init.c
442 index 1d1c9693ebfb..8ee91777abce 100644
443 --- a/drivers/firmware/efi/arm-init.c
444 +++ b/drivers/firmware/efi/arm-init.c
445 @@ -256,6 +256,10 @@ void __init efi_init(void)
446 (params.mmap & ~PAGE_MASK)));
447
448 init_screen_info();
449 +
450 + /* ARM does not permit early mappings to persist across paging_init() */
451 + if (IS_ENABLED(CONFIG_ARM))
452 + efi_memmap_unmap();
453 }
454
455 static int __init register_gop_device(void)
456 diff --git a/drivers/firmware/efi/arm-runtime.c b/drivers/firmware/efi/arm-runtime.c
457 index 4d788e0debfe..069c5a4479e6 100644
458 --- a/drivers/firmware/efi/arm-runtime.c
459 +++ b/drivers/firmware/efi/arm-runtime.c
460 @@ -118,7 +118,7 @@ static int __init arm_enable_runtime_services(void)
461 {
462 u64 mapsize;
463
464 - if (!efi_enabled(EFI_BOOT) || !efi_enabled(EFI_MEMMAP)) {
465 + if (!efi_enabled(EFI_BOOT)) {
466 pr_info("EFI services will not be available.\n");
467 return 0;
468 }
469 diff --git a/drivers/firmware/efi/memmap.c b/drivers/firmware/efi/memmap.c
470 index 78686443cb37..3fd2b450c649 100644
471 --- a/drivers/firmware/efi/memmap.c
472 +++ b/drivers/firmware/efi/memmap.c
473 @@ -117,6 +117,9 @@ int __init efi_memmap_init_early(struct efi_memory_map_data *data)
474
475 void __init efi_memmap_unmap(void)
476 {
477 + if (!efi_enabled(EFI_MEMMAP))
478 + return;
479 +
480 if (!efi.memmap.late) {
481 unsigned long size;
482
483 diff --git a/drivers/gpio/gpiolib.c b/drivers/gpio/gpiolib.c
484 index 2ec402ae14de..9e2fe12c2858 100644
485 --- a/drivers/gpio/gpiolib.c
486 +++ b/drivers/gpio/gpiolib.c
487 @@ -1153,7 +1153,7 @@ int gpiochip_add_data(struct gpio_chip *chip, void *data)
488 gdev->descs = kcalloc(chip->ngpio, sizeof(gdev->descs[0]), GFP_KERNEL);
489 if (!gdev->descs) {
490 status = -ENOMEM;
491 - goto err_free_gdev;
492 + goto err_free_ida;
493 }
494
495 if (chip->ngpio == 0) {
496 @@ -1285,8 +1285,9 @@ err_free_label:
497 kfree(gdev->label);
498 err_free_descs:
499 kfree(gdev->descs);
500 -err_free_gdev:
501 +err_free_ida:
502 ida_simple_remove(&gpio_ida, gdev->id);
503 +err_free_gdev:
504 /* failures here can mean systems won't boot... */
505 pr_err("%s: GPIOs %d..%d (%s) failed to register\n", __func__,
506 gdev->base, gdev->base + gdev->ngpio - 1,
507 diff --git a/drivers/gpu/drm/ast/ast_drv.c b/drivers/gpu/drm/ast/ast_drv.c
508 index f54afd2113a9..736133f5c5a9 100644
509 --- a/drivers/gpu/drm/ast/ast_drv.c
510 +++ b/drivers/gpu/drm/ast/ast_drv.c
511 @@ -60,8 +60,29 @@ static const struct pci_device_id pciidlist[] = {
512
513 MODULE_DEVICE_TABLE(pci, pciidlist);
514
515 +static void ast_kick_out_firmware_fb(struct pci_dev *pdev)
516 +{
517 + struct apertures_struct *ap;
518 + bool primary = false;
519 +
520 + ap = alloc_apertures(1);
521 + if (!ap)
522 + return;
523 +
524 + ap->ranges[0].base = pci_resource_start(pdev, 0);
525 + ap->ranges[0].size = pci_resource_len(pdev, 0);
526 +
527 +#ifdef CONFIG_X86
528 + primary = pdev->resource[PCI_ROM_RESOURCE].flags & IORESOURCE_ROM_SHADOW;
529 +#endif
530 + drm_fb_helper_remove_conflicting_framebuffers(ap, "astdrmfb", primary);
531 + kfree(ap);
532 +}
533 +
534 static int ast_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
535 {
536 + ast_kick_out_firmware_fb(pdev);
537 +
538 return drm_get_pci_dev(pdev, ent, &driver);
539 }
540
541 diff --git a/drivers/gpu/drm/ast/ast_mode.c b/drivers/gpu/drm/ast/ast_mode.c
542 index 5957c3e659fe..57205016b04a 100644
543 --- a/drivers/gpu/drm/ast/ast_mode.c
544 +++ b/drivers/gpu/drm/ast/ast_mode.c
545 @@ -545,6 +545,7 @@ static int ast_crtc_do_set_base(struct drm_crtc *crtc,
546 }
547 ast_bo_unreserve(bo);
548
549 + ast_set_offset_reg(crtc);
550 ast_set_start_address_crt1(crtc, (u32)gpu_addr);
551
552 return 0;
553 @@ -1235,7 +1236,7 @@ static int ast_cursor_move(struct drm_crtc *crtc,
554 ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0xc7, ((y >> 8) & 0x07));
555
556 /* dummy write to fire HWC */
557 - ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xCB, 0xFF, 0x00);
558 + ast_show_cursor(crtc);
559
560 return 0;
561 }
562 diff --git a/drivers/infiniband/core/verbs.c b/drivers/infiniband/core/verbs.c
563 index d28c4cf7c1ee..dc92dc41ef93 100644
564 --- a/drivers/infiniband/core/verbs.c
565 +++ b/drivers/infiniband/core/verbs.c
566 @@ -1522,7 +1522,7 @@ static bool is_valid_mcast_lid(struct ib_qp *qp, u16 lid)
567 */
568 if (!ib_query_qp(qp, &attr, IB_QP_STATE | IB_QP_PORT, &init_attr)) {
569 if (attr.qp_state >= IB_QPS_INIT) {
570 - if (qp->device->get_link_layer(qp->device, attr.port_num) !=
571 + if (rdma_port_get_link_layer(qp->device, attr.port_num) !=
572 IB_LINK_LAYER_INFINIBAND)
573 return true;
574 goto lid_check;
575 @@ -1531,7 +1531,7 @@ static bool is_valid_mcast_lid(struct ib_qp *qp, u16 lid)
576
577 /* Can't get a quick answer, iterate over all ports */
578 for (port = 0; port < qp->device->phys_port_cnt; port++)
579 - if (qp->device->get_link_layer(qp->device, port) !=
580 + if (rdma_port_get_link_layer(qp->device, port) !=
581 IB_LINK_LAYER_INFINIBAND)
582 num_eth_ports++;
583
584 diff --git a/drivers/infiniband/hw/hfi1/user_sdma.c b/drivers/infiniband/hw/hfi1/user_sdma.c
585 index 018a41562704..619475c7d761 100644
586 --- a/drivers/infiniband/hw/hfi1/user_sdma.c
587 +++ b/drivers/infiniband/hw/hfi1/user_sdma.c
588 @@ -148,11 +148,8 @@ MODULE_PARM_DESC(sdma_comp_size, "Size of User SDMA completion ring. Default: 12
589 #define TXREQ_FLAGS_REQ_LAST_PKT BIT(0)
590
591 /* SDMA request flag bits */
592 -#define SDMA_REQ_FOR_THREAD 1
593 -#define SDMA_REQ_SEND_DONE 2
594 -#define SDMA_REQ_HAVE_AHG 3
595 -#define SDMA_REQ_HAS_ERROR 4
596 -#define SDMA_REQ_DONE_ERROR 5
597 +#define SDMA_REQ_HAVE_AHG 1
598 +#define SDMA_REQ_HAS_ERROR 2
599
600 #define SDMA_PKT_Q_INACTIVE BIT(0)
601 #define SDMA_PKT_Q_ACTIVE BIT(1)
602 @@ -252,8 +249,6 @@ struct user_sdma_request {
603 u64 seqsubmitted;
604 struct list_head txps;
605 unsigned long flags;
606 - /* status of the last txreq completed */
607 - int status;
608 };
609
610 /*
611 @@ -546,7 +541,6 @@ int hfi1_user_sdma_process_request(struct file *fp, struct iovec *iovec,
612 struct sdma_req_info info;
613 struct user_sdma_request *req;
614 u8 opcode, sc, vl;
615 - int req_queued = 0;
616 u16 dlid;
617 u32 selector;
618
619 @@ -611,11 +605,13 @@ int hfi1_user_sdma_process_request(struct file *fp, struct iovec *iovec,
620 req->data_iovs = req_iovcnt(info.ctrl) - 1; /* subtract header vector */
621 req->pq = pq;
622 req->cq = cq;
623 - req->status = -1;
624 INIT_LIST_HEAD(&req->txps);
625
626 memcpy(&req->info, &info, sizeof(info));
627
628 + /* The request is initialized, count it */
629 + atomic_inc(&pq->n_reqs);
630 +
631 if (req_opcode(info.ctrl) == EXPECTED) {
632 /* expected must have a TID info and at least one data vector */
633 if (req->data_iovs < 2) {
634 @@ -704,7 +700,7 @@ int hfi1_user_sdma_process_request(struct file *fp, struct iovec *iovec,
635 memcpy(&req->iovs[i].iov, iovec + idx++, sizeof(struct iovec));
636 ret = pin_vector_pages(req, &req->iovs[i]);
637 if (ret) {
638 - req->status = ret;
639 + req->data_iovs = i;
640 goto free_req;
641 }
642 req->data_len += req->iovs[i].iov.iov_len;
643 @@ -772,14 +768,10 @@ int hfi1_user_sdma_process_request(struct file *fp, struct iovec *iovec,
644 }
645
646 set_comp_state(pq, cq, info.comp_idx, QUEUED, 0);
647 - atomic_inc(&pq->n_reqs);
648 - req_queued = 1;
649 /* Send the first N packets in the request to buy us some time */
650 ret = user_sdma_send_pkts(req, pcount);
651 - if (unlikely(ret < 0 && ret != -EBUSY)) {
652 - req->status = ret;
653 + if (unlikely(ret < 0 && ret != -EBUSY))
654 goto free_req;
655 - }
656
657 /*
658 * It is possible that the SDMA engine would have processed all the
659 @@ -796,17 +788,11 @@ int hfi1_user_sdma_process_request(struct file *fp, struct iovec *iovec,
660 * request have been submitted to the SDMA engine. However, it
661 * will not wait for send completions.
662 */
663 - while (!test_bit(SDMA_REQ_SEND_DONE, &req->flags)) {
664 + while (req->seqsubmitted != req->info.npkts) {
665 ret = user_sdma_send_pkts(req, pcount);
666 if (ret < 0) {
667 - if (ret != -EBUSY) {
668 - req->status = ret;
669 - set_bit(SDMA_REQ_DONE_ERROR, &req->flags);
670 - if (ACCESS_ONCE(req->seqcomp) ==
671 - req->seqsubmitted - 1)
672 - goto free_req;
673 - return ret;
674 - }
675 + if (ret != -EBUSY)
676 + goto free_req;
677 wait_event_interruptible_timeout(
678 pq->busy.wait_dma,
679 (pq->state == SDMA_PKT_Q_ACTIVE),
680 @@ -817,10 +803,19 @@ int hfi1_user_sdma_process_request(struct file *fp, struct iovec *iovec,
681 *count += idx;
682 return 0;
683 free_req:
684 - user_sdma_free_request(req, true);
685 - if (req_queued)
686 + /*
687 + * If the submitted seqsubmitted == npkts, the completion routine
688 + * controls the final state. If sequbmitted < npkts, wait for any
689 + * outstanding packets to finish before cleaning up.
690 + */
691 + if (req->seqsubmitted < req->info.npkts) {
692 + if (req->seqsubmitted)
693 + wait_event(pq->busy.wait_dma,
694 + (req->seqcomp == req->seqsubmitted - 1));
695 + user_sdma_free_request(req, true);
696 pq_update(pq);
697 - set_comp_state(pq, cq, info.comp_idx, ERROR, req->status);
698 + set_comp_state(pq, cq, info.comp_idx, ERROR, ret);
699 + }
700 return ret;
701 }
702
703 @@ -903,10 +898,8 @@ static int user_sdma_send_pkts(struct user_sdma_request *req, unsigned maxpkts)
704 pq = req->pq;
705
706 /* If tx completion has reported an error, we are done. */
707 - if (test_bit(SDMA_REQ_HAS_ERROR, &req->flags)) {
708 - set_bit(SDMA_REQ_DONE_ERROR, &req->flags);
709 + if (test_bit(SDMA_REQ_HAS_ERROR, &req->flags))
710 return -EFAULT;
711 - }
712
713 /*
714 * Check if we might have sent the entire request already
715 @@ -929,10 +922,8 @@ static int user_sdma_send_pkts(struct user_sdma_request *req, unsigned maxpkts)
716 * with errors. If so, we are not going to process any
717 * more packets from this request.
718 */
719 - if (test_bit(SDMA_REQ_HAS_ERROR, &req->flags)) {
720 - set_bit(SDMA_REQ_DONE_ERROR, &req->flags);
721 + if (test_bit(SDMA_REQ_HAS_ERROR, &req->flags))
722 return -EFAULT;
723 - }
724
725 tx = kmem_cache_alloc(pq->txreq_cache, GFP_KERNEL);
726 if (!tx)
727 @@ -1090,7 +1081,6 @@ dosend:
728 ret = sdma_send_txlist(req->sde, &pq->busy, &req->txps, &count);
729 req->seqsubmitted += count;
730 if (req->seqsubmitted == req->info.npkts) {
731 - set_bit(SDMA_REQ_SEND_DONE, &req->flags);
732 /*
733 * The txreq has already been submitted to the HW queue
734 * so we can free the AHG entry now. Corruption will not
735 @@ -1489,11 +1479,15 @@ static int set_txreq_header_ahg(struct user_sdma_request *req,
736 return diff;
737 }
738
739 -/*
740 - * SDMA tx request completion callback. Called when the SDMA progress
741 - * state machine gets notification that the SDMA descriptors for this
742 - * tx request have been processed by the DMA engine. Called in
743 - * interrupt context.
744 +/**
745 + * user_sdma_txreq_cb() - SDMA tx request completion callback.
746 + * @txreq: valid sdma tx request
747 + * @status: success/failure of request
748 + *
749 + * Called when the SDMA progress state machine gets notification that
750 + * the SDMA descriptors for this tx request have been processed by the
751 + * DMA engine. Called in interrupt context.
752 + * Only do work on completed sequences.
753 */
754 static void user_sdma_txreq_cb(struct sdma_txreq *txreq, int status)
755 {
756 @@ -1502,7 +1496,7 @@ static void user_sdma_txreq_cb(struct sdma_txreq *txreq, int status)
757 struct user_sdma_request *req;
758 struct hfi1_user_sdma_pkt_q *pq;
759 struct hfi1_user_sdma_comp_q *cq;
760 - u16 idx;
761 + enum hfi1_sdma_comp_state state = COMPLETE;
762
763 if (!tx->req)
764 return;
765 @@ -1515,31 +1509,19 @@ static void user_sdma_txreq_cb(struct sdma_txreq *txreq, int status)
766 SDMA_DBG(req, "SDMA completion with error %d",
767 status);
768 set_bit(SDMA_REQ_HAS_ERROR, &req->flags);
769 + state = ERROR;
770 }
771
772 req->seqcomp = tx->seqnum;
773 kmem_cache_free(pq->txreq_cache, tx);
774 - tx = NULL;
775 -
776 - idx = req->info.comp_idx;
777 - if (req->status == -1 && status == SDMA_TXREQ_S_OK) {
778 - if (req->seqcomp == req->info.npkts - 1) {
779 - req->status = 0;
780 - user_sdma_free_request(req, false);
781 - pq_update(pq);
782 - set_comp_state(pq, cq, idx, COMPLETE, 0);
783 - }
784 - } else {
785 - if (status != SDMA_TXREQ_S_OK)
786 - req->status = status;
787 - if (req->seqcomp == (ACCESS_ONCE(req->seqsubmitted) - 1) &&
788 - (test_bit(SDMA_REQ_SEND_DONE, &req->flags) ||
789 - test_bit(SDMA_REQ_DONE_ERROR, &req->flags))) {
790 - user_sdma_free_request(req, false);
791 - pq_update(pq);
792 - set_comp_state(pq, cq, idx, ERROR, req->status);
793 - }
794 - }
795 +
796 + /* sequence isn't complete? We are done */
797 + if (req->seqcomp != req->info.npkts - 1)
798 + return;
799 +
800 + user_sdma_free_request(req, false);
801 + set_comp_state(pq, cq, req->info.comp_idx, state, status);
802 + pq_update(pq);
803 }
804
805 static inline void pq_update(struct hfi1_user_sdma_pkt_q *pq)
806 @@ -1572,6 +1554,8 @@ static void user_sdma_free_request(struct user_sdma_request *req, bool unpin)
807 if (!node)
808 continue;
809
810 + req->iovs[i].node = NULL;
811 +
812 if (unpin)
813 hfi1_mmu_rb_remove(req->pq->handler,
814 &node->rb);
815 diff --git a/drivers/input/joystick/xpad.c b/drivers/input/joystick/xpad.c
816 index f397a5b6910f..2e52015634f9 100644
817 --- a/drivers/input/joystick/xpad.c
818 +++ b/drivers/input/joystick/xpad.c
819 @@ -89,8 +89,10 @@
820
821 #define XPAD_PKT_LEN 64
822
823 -/* xbox d-pads should map to buttons, as is required for DDR pads
824 - but we map them to axes when possible to simplify things */
825 +/*
826 + * xbox d-pads should map to buttons, as is required for DDR pads
827 + * but we map them to axes when possible to simplify things
828 + */
829 #define MAP_DPAD_TO_BUTTONS (1 << 0)
830 #define MAP_TRIGGERS_TO_BUTTONS (1 << 1)
831 #define MAP_STICKS_TO_NULL (1 << 2)
832 @@ -126,45 +128,77 @@ static const struct xpad_device {
833 u8 mapping;
834 u8 xtype;
835 } xpad_device[] = {
836 + { 0x0079, 0x18d4, "GPD Win 2 X-Box Controller", 0, XTYPE_XBOX360 },
837 + { 0x044f, 0x0f00, "Thrustmaster Wheel", 0, XTYPE_XBOX },
838 + { 0x044f, 0x0f03, "Thrustmaster Wheel", 0, XTYPE_XBOX },
839 + { 0x044f, 0x0f07, "Thrustmaster, Inc. Controller", 0, XTYPE_XBOX },
840 + { 0x044f, 0x0f10, "Thrustmaster Modena GT Wheel", 0, XTYPE_XBOX },
841 + { 0x044f, 0xb326, "Thrustmaster Gamepad GP XID", 0, XTYPE_XBOX360 },
842 { 0x045e, 0x0202, "Microsoft X-Box pad v1 (US)", 0, XTYPE_XBOX },
843 { 0x045e, 0x0285, "Microsoft X-Box pad (Japan)", 0, XTYPE_XBOX },
844 { 0x045e, 0x0287, "Microsoft Xbox Controller S", 0, XTYPE_XBOX },
845 + { 0x045e, 0x0288, "Microsoft Xbox Controller S v2", 0, XTYPE_XBOX },
846 { 0x045e, 0x0289, "Microsoft X-Box pad v2 (US)", 0, XTYPE_XBOX },
847 { 0x045e, 0x028e, "Microsoft X-Box 360 pad", 0, XTYPE_XBOX360 },
848 + { 0x045e, 0x028f, "Microsoft X-Box 360 pad v2", 0, XTYPE_XBOX360 },
849 + { 0x045e, 0x0291, "Xbox 360 Wireless Receiver (XBOX)", MAP_DPAD_TO_BUTTONS, XTYPE_XBOX360W },
850 { 0x045e, 0x02d1, "Microsoft X-Box One pad", 0, XTYPE_XBOXONE },
851 { 0x045e, 0x02dd, "Microsoft X-Box One pad (Firmware 2015)", 0, XTYPE_XBOXONE },
852 { 0x045e, 0x02e3, "Microsoft X-Box One Elite pad", 0, XTYPE_XBOXONE },
853 - { 0x045e, 0x0291, "Xbox 360 Wireless Receiver (XBOX)", MAP_DPAD_TO_BUTTONS, XTYPE_XBOX360W },
854 + { 0x045e, 0x02ea, "Microsoft X-Box One S pad", 0, XTYPE_XBOXONE },
855 { 0x045e, 0x0719, "Xbox 360 Wireless Receiver", MAP_DPAD_TO_BUTTONS, XTYPE_XBOX360W },
856 - { 0x044f, 0x0f07, "Thrustmaster, Inc. Controller", 0, XTYPE_XBOX },
857 - { 0x044f, 0xb326, "Thrustmaster Gamepad GP XID", 0, XTYPE_XBOX360 },
858 { 0x046d, 0xc21d, "Logitech Gamepad F310", 0, XTYPE_XBOX360 },
859 { 0x046d, 0xc21e, "Logitech Gamepad F510", 0, XTYPE_XBOX360 },
860 { 0x046d, 0xc21f, "Logitech Gamepad F710", 0, XTYPE_XBOX360 },
861 { 0x046d, 0xc242, "Logitech Chillstream Controller", 0, XTYPE_XBOX360 },
862 { 0x046d, 0xca84, "Logitech Xbox Cordless Controller", 0, XTYPE_XBOX },
863 { 0x046d, 0xca88, "Logitech Compact Controller for Xbox", 0, XTYPE_XBOX },
864 + { 0x046d, 0xca8a, "Logitech Precision Vibration Feedback Wheel", 0, XTYPE_XBOX },
865 + { 0x046d, 0xcaa3, "Logitech DriveFx Racing Wheel", 0, XTYPE_XBOX360 },
866 + { 0x056e, 0x2004, "Elecom JC-U3613M", 0, XTYPE_XBOX360 },
867 { 0x05fd, 0x1007, "Mad Catz Controller (unverified)", 0, XTYPE_XBOX },
868 { 0x05fd, 0x107a, "InterAct 'PowerPad Pro' X-Box pad (Germany)", 0, XTYPE_XBOX },
869 + { 0x05fe, 0x3030, "Chic Controller", 0, XTYPE_XBOX },
870 + { 0x05fe, 0x3031, "Chic Controller", 0, XTYPE_XBOX },
871 + { 0x062a, 0x0020, "Logic3 Xbox GamePad", 0, XTYPE_XBOX },
872 + { 0x062a, 0x0033, "Competition Pro Steering Wheel", 0, XTYPE_XBOX },
873 + { 0x06a3, 0x0200, "Saitek Racing Wheel", 0, XTYPE_XBOX },
874 + { 0x06a3, 0x0201, "Saitek Adrenalin", 0, XTYPE_XBOX },
875 + { 0x06a3, 0xf51a, "Saitek P3600", 0, XTYPE_XBOX360 },
876 + { 0x0738, 0x4506, "Mad Catz 4506 Wireless Controller", 0, XTYPE_XBOX },
877 { 0x0738, 0x4516, "Mad Catz Control Pad", 0, XTYPE_XBOX },
878 + { 0x0738, 0x4520, "Mad Catz Control Pad Pro", 0, XTYPE_XBOX },
879 { 0x0738, 0x4522, "Mad Catz LumiCON", 0, XTYPE_XBOX },
880 { 0x0738, 0x4526, "Mad Catz Control Pad Pro", 0, XTYPE_XBOX },
881 + { 0x0738, 0x4530, "Mad Catz Universal MC2 Racing Wheel and Pedals", 0, XTYPE_XBOX },
882 { 0x0738, 0x4536, "Mad Catz MicroCON", 0, XTYPE_XBOX },
883 { 0x0738, 0x4540, "Mad Catz Beat Pad", MAP_DPAD_TO_BUTTONS, XTYPE_XBOX },
884 { 0x0738, 0x4556, "Mad Catz Lynx Wireless Controller", 0, XTYPE_XBOX },
885 + { 0x0738, 0x4586, "Mad Catz MicroCon Wireless Controller", 0, XTYPE_XBOX },
886 + { 0x0738, 0x4588, "Mad Catz Blaster", 0, XTYPE_XBOX },
887 + { 0x0738, 0x45ff, "Mad Catz Beat Pad (w/ Handle)", MAP_DPAD_TO_BUTTONS, XTYPE_XBOX },
888 { 0x0738, 0x4716, "Mad Catz Wired Xbox 360 Controller", 0, XTYPE_XBOX360 },
889 { 0x0738, 0x4718, "Mad Catz Street Fighter IV FightStick SE", 0, XTYPE_XBOX360 },
890 { 0x0738, 0x4726, "Mad Catz Xbox 360 Controller", 0, XTYPE_XBOX360 },
891 { 0x0738, 0x4728, "Mad Catz Street Fighter IV FightPad", MAP_TRIGGERS_TO_BUTTONS, XTYPE_XBOX360 },
892 + { 0x0738, 0x4736, "Mad Catz MicroCon Gamepad", 0, XTYPE_XBOX360 },
893 { 0x0738, 0x4738, "Mad Catz Wired Xbox 360 Controller (SFIV)", MAP_TRIGGERS_TO_BUTTONS, XTYPE_XBOX360 },
894 { 0x0738, 0x4740, "Mad Catz Beat Pad", 0, XTYPE_XBOX360 },
895 + { 0x0738, 0x4743, "Mad Catz Beat Pad Pro", MAP_DPAD_TO_BUTTONS, XTYPE_XBOX },
896 + { 0x0738, 0x4758, "Mad Catz Arcade Game Stick", MAP_TRIGGERS_TO_BUTTONS, XTYPE_XBOX360 },
897 { 0x0738, 0x4a01, "Mad Catz FightStick TE 2", MAP_TRIGGERS_TO_BUTTONS, XTYPE_XBOXONE },
898 { 0x0738, 0x6040, "Mad Catz Beat Pad Pro", MAP_DPAD_TO_BUTTONS, XTYPE_XBOX },
899 + { 0x0738, 0x9871, "Mad Catz Portable Drum", 0, XTYPE_XBOX360 },
900 { 0x0738, 0xb726, "Mad Catz Xbox controller - MW2", 0, XTYPE_XBOX360 },
901 + { 0x0738, 0xb738, "Mad Catz MVC2TE Stick 2", MAP_TRIGGERS_TO_BUTTONS, XTYPE_XBOX360 },
902 { 0x0738, 0xbeef, "Mad Catz JOYTECH NEO SE Advanced GamePad", XTYPE_XBOX360 },
903 { 0x0738, 0xcb02, "Saitek Cyborg Rumble Pad - PC/Xbox 360", 0, XTYPE_XBOX360 },
904 { 0x0738, 0xcb03, "Saitek P3200 Rumble Pad - PC/Xbox 360", 0, XTYPE_XBOX360 },
905 + { 0x0738, 0xcb29, "Saitek Aviator Stick AV8R02", 0, XTYPE_XBOX360 },
906 { 0x0738, 0xf738, "Super SFIV FightStick TE S", 0, XTYPE_XBOX360 },
907 + { 0x07ff, 0xffff, "Mad Catz GamePad", 0, XTYPE_XBOX360 },
908 + { 0x0c12, 0x0005, "Intec wireless", 0, XTYPE_XBOX },
909 + { 0x0c12, 0x8801, "Nyko Xbox Controller", 0, XTYPE_XBOX },
910 { 0x0c12, 0x8802, "Zeroplus Xbox Controller", 0, XTYPE_XBOX },
911 { 0x0c12, 0x8809, "RedOctane Xbox Dance Pad", DANCEPAD_MAP_CONFIG, XTYPE_XBOX },
912 { 0x0c12, 0x880a, "Pelican Eclipse PL-2023", 0, XTYPE_XBOX },
913 @@ -172,35 +206,66 @@ static const struct xpad_device {
914 { 0x0c12, 0x9902, "HAMA VibraX - *FAULTY HARDWARE*", 0, XTYPE_XBOX },
915 { 0x0d2f, 0x0002, "Andamiro Pump It Up pad", MAP_DPAD_TO_BUTTONS, XTYPE_XBOX },
916 { 0x0e4c, 0x1097, "Radica Gamester Controller", 0, XTYPE_XBOX },
917 + { 0x0e4c, 0x1103, "Radica Gamester Reflex", MAP_TRIGGERS_TO_BUTTONS, XTYPE_XBOX },
918 { 0x0e4c, 0x2390, "Radica Games Jtech Controller", 0, XTYPE_XBOX },
919 + { 0x0e4c, 0x3510, "Radica Gamester", 0, XTYPE_XBOX },
920 { 0x0e6f, 0x0003, "Logic3 Freebird wireless Controller", 0, XTYPE_XBOX },
921 { 0x0e6f, 0x0005, "Eclipse wireless Controller", 0, XTYPE_XBOX },
922 { 0x0e6f, 0x0006, "Edge wireless Controller", 0, XTYPE_XBOX },
923 + { 0x0e6f, 0x0008, "After Glow Pro Controller", 0, XTYPE_XBOX },
924 { 0x0e6f, 0x0105, "HSM3 Xbox360 dancepad", MAP_DPAD_TO_BUTTONS, XTYPE_XBOX360 },
925 { 0x0e6f, 0x0113, "Afterglow AX.1 Gamepad for Xbox 360", 0, XTYPE_XBOX360 },
926 + { 0x0e6f, 0x011f, "Rock Candy Gamepad Wired Controller", 0, XTYPE_XBOX360 },
927 + { 0x0e6f, 0x0131, "PDP EA Sports Controller", 0, XTYPE_XBOX360 },
928 + { 0x0e6f, 0x0133, "Xbox 360 Wired Controller", 0, XTYPE_XBOX360 },
929 { 0x0e6f, 0x0139, "Afterglow Prismatic Wired Controller", 0, XTYPE_XBOXONE },
930 + { 0x0e6f, 0x013a, "PDP Xbox One Controller", 0, XTYPE_XBOXONE },
931 + { 0x0e6f, 0x0146, "Rock Candy Wired Controller for Xbox One", 0, XTYPE_XBOXONE },
932 + { 0x0e6f, 0x0147, "PDP Marvel Xbox One Controller", 0, XTYPE_XBOXONE },
933 + { 0x0e6f, 0x015c, "PDP Xbox One Arcade Stick", MAP_TRIGGERS_TO_BUTTONS, XTYPE_XBOXONE },
934 + { 0x0e6f, 0x0161, "PDP Xbox One Controller", 0, XTYPE_XBOXONE },
935 + { 0x0e6f, 0x0162, "PDP Xbox One Controller", 0, XTYPE_XBOXONE },
936 + { 0x0e6f, 0x0163, "PDP Xbox One Controller", 0, XTYPE_XBOXONE },
937 + { 0x0e6f, 0x0164, "PDP Battlefield One", 0, XTYPE_XBOXONE },
938 + { 0x0e6f, 0x0165, "PDP Titanfall 2", 0, XTYPE_XBOXONE },
939 { 0x0e6f, 0x0201, "Pelican PL-3601 'TSZ' Wired Xbox 360 Controller", 0, XTYPE_XBOX360 },
940 { 0x0e6f, 0x0213, "Afterglow Gamepad for Xbox 360", 0, XTYPE_XBOX360 },
941 { 0x0e6f, 0x021f, "Rock Candy Gamepad for Xbox 360", 0, XTYPE_XBOX360 },
942 - { 0x0e6f, 0x0146, "Rock Candy Wired Controller for Xbox One", 0, XTYPE_XBOXONE },
943 + { 0x0e6f, 0x0246, "Rock Candy Gamepad for Xbox One 2015", 0, XTYPE_XBOXONE },
944 + { 0x0e6f, 0x02ab, "PDP Controller for Xbox One", 0, XTYPE_XBOXONE },
945 + { 0x0e6f, 0x02a4, "PDP Wired Controller for Xbox One - Stealth Series", 0, XTYPE_XBOXONE },
946 + { 0x0e6f, 0x02a6, "PDP Wired Controller for Xbox One - Camo Series", 0, XTYPE_XBOXONE },
947 { 0x0e6f, 0x0301, "Logic3 Controller", 0, XTYPE_XBOX360 },
948 + { 0x0e6f, 0x0346, "Rock Candy Gamepad for Xbox One 2016", 0, XTYPE_XBOXONE },
949 { 0x0e6f, 0x0401, "Logic3 Controller", 0, XTYPE_XBOX360 },
950 + { 0x0e6f, 0x0413, "Afterglow AX.1 Gamepad for Xbox 360", 0, XTYPE_XBOX360 },
951 + { 0x0e6f, 0x0501, "PDP Xbox 360 Controller", 0, XTYPE_XBOX360 },
952 + { 0x0e6f, 0xf900, "PDP Afterglow AX.1", 0, XTYPE_XBOX360 },
953 { 0x0e8f, 0x0201, "SmartJoy Frag Xpad/PS2 adaptor", 0, XTYPE_XBOX },
954 { 0x0e8f, 0x3008, "Generic xbox control (dealextreme)", 0, XTYPE_XBOX },
955 { 0x0f0d, 0x000a, "Hori Co. DOA4 FightStick", 0, XTYPE_XBOX360 },
956 + { 0x0f0d, 0x000c, "Hori PadEX Turbo", 0, XTYPE_XBOX360 },
957 { 0x0f0d, 0x000d, "Hori Fighting Stick EX2", MAP_TRIGGERS_TO_BUTTONS, XTYPE_XBOX360 },
958 { 0x0f0d, 0x0016, "Hori Real Arcade Pro.EX", MAP_TRIGGERS_TO_BUTTONS, XTYPE_XBOX360 },
959 + { 0x0f0d, 0x001b, "Hori Real Arcade Pro VX", MAP_TRIGGERS_TO_BUTTONS, XTYPE_XBOX360 },
960 + { 0x0f0d, 0x0063, "Hori Real Arcade Pro Hayabusa (USA) Xbox One", MAP_TRIGGERS_TO_BUTTONS, XTYPE_XBOXONE },
961 { 0x0f0d, 0x0067, "HORIPAD ONE", 0, XTYPE_XBOXONE },
962 + { 0x0f0d, 0x0078, "Hori Real Arcade Pro V Kai Xbox One", MAP_TRIGGERS_TO_BUTTONS, XTYPE_XBOXONE },
963 + { 0x0f30, 0x010b, "Philips Recoil", 0, XTYPE_XBOX },
964 { 0x0f30, 0x0202, "Joytech Advanced Controller", 0, XTYPE_XBOX },
965 { 0x0f30, 0x8888, "BigBen XBMiniPad Controller", 0, XTYPE_XBOX },
966 { 0x102c, 0xff0c, "Joytech Wireless Advanced Controller", 0, XTYPE_XBOX },
967 + { 0x11c9, 0x55f0, "Nacon GC-100XF", 0, XTYPE_XBOX360 },
968 { 0x12ab, 0x0004, "Honey Bee Xbox360 dancepad", MAP_DPAD_TO_BUTTONS, XTYPE_XBOX360 },
969 { 0x12ab, 0x0301, "PDP AFTERGLOW AX.1", 0, XTYPE_XBOX360 },
970 + { 0x12ab, 0x0303, "Mortal Kombat Klassic FightStick", MAP_TRIGGERS_TO_BUTTONS, XTYPE_XBOX360 },
971 { 0x12ab, 0x8809, "Xbox DDR dancepad", MAP_DPAD_TO_BUTTONS, XTYPE_XBOX },
972 { 0x1430, 0x4748, "RedOctane Guitar Hero X-plorer", 0, XTYPE_XBOX360 },
973 { 0x1430, 0x8888, "TX6500+ Dance Pad (first generation)", MAP_DPAD_TO_BUTTONS, XTYPE_XBOX },
974 + { 0x1430, 0xf801, "RedOctane Controller", 0, XTYPE_XBOX360 },
975 { 0x146b, 0x0601, "BigBen Interactive XBOX 360 Controller", 0, XTYPE_XBOX360 },
976 { 0x1532, 0x0037, "Razer Sabertooth", 0, XTYPE_XBOX360 },
977 + { 0x1532, 0x0a00, "Razer Atrox Arcade Stick", MAP_TRIGGERS_TO_BUTTONS, XTYPE_XBOXONE },
978 { 0x1532, 0x0a03, "Razer Wildcat", 0, XTYPE_XBOXONE },
979 { 0x15e4, 0x3f00, "Power A Mini Pro Elite", 0, XTYPE_XBOX360 },
980 { 0x15e4, 0x3f0a, "Xbox Airflo wired controller", 0, XTYPE_XBOX360 },
981 @@ -208,27 +273,67 @@ static const struct xpad_device {
982 { 0x162e, 0xbeef, "Joytech Neo-Se Take2", 0, XTYPE_XBOX360 },
983 { 0x1689, 0xfd00, "Razer Onza Tournament Edition", 0, XTYPE_XBOX360 },
984 { 0x1689, 0xfd01, "Razer Onza Classic Edition", 0, XTYPE_XBOX360 },
985 - { 0x24c6, 0x542a, "Xbox ONE spectra", 0, XTYPE_XBOXONE },
986 - { 0x24c6, 0x5d04, "Razer Sabertooth", 0, XTYPE_XBOX360 },
987 + { 0x1689, 0xfe00, "Razer Sabertooth", 0, XTYPE_XBOX360 },
988 { 0x1bad, 0x0002, "Harmonix Rock Band Guitar", 0, XTYPE_XBOX360 },
989 { 0x1bad, 0x0003, "Harmonix Rock Band Drumkit", MAP_DPAD_TO_BUTTONS, XTYPE_XBOX360 },
990 + { 0x1bad, 0x0130, "Ion Drum Rocker", MAP_DPAD_TO_BUTTONS, XTYPE_XBOX360 },
991 { 0x1bad, 0xf016, "Mad Catz Xbox 360 Controller", 0, XTYPE_XBOX360 },
992 + { 0x1bad, 0xf018, "Mad Catz Street Fighter IV SE Fighting Stick", MAP_TRIGGERS_TO_BUTTONS, XTYPE_XBOX360 },
993 + { 0x1bad, 0xf019, "Mad Catz Brawlstick for Xbox 360", MAP_TRIGGERS_TO_BUTTONS, XTYPE_XBOX360 },
994 + { 0x1bad, 0xf021, "Mad Cats Ghost Recon FS GamePad", 0, XTYPE_XBOX360 },
995 { 0x1bad, 0xf023, "MLG Pro Circuit Controller (Xbox)", 0, XTYPE_XBOX360 },
996 + { 0x1bad, 0xf025, "Mad Catz Call Of Duty", 0, XTYPE_XBOX360 },
997 + { 0x1bad, 0xf027, "Mad Catz FPS Pro", 0, XTYPE_XBOX360 },
998 { 0x1bad, 0xf028, "Street Fighter IV FightPad", 0, XTYPE_XBOX360 },
999 + { 0x1bad, 0xf02e, "Mad Catz Fightpad", MAP_TRIGGERS_TO_BUTTONS, XTYPE_XBOX360 },
1000 + { 0x1bad, 0xf030, "Mad Catz Xbox 360 MC2 MicroCon Racing Wheel", 0, XTYPE_XBOX360 },
1001 + { 0x1bad, 0xf036, "Mad Catz MicroCon GamePad Pro", 0, XTYPE_XBOX360 },
1002 { 0x1bad, 0xf038, "Street Fighter IV FightStick TE", 0, XTYPE_XBOX360 },
1003 + { 0x1bad, 0xf039, "Mad Catz MvC2 TE", MAP_TRIGGERS_TO_BUTTONS, XTYPE_XBOX360 },
1004 + { 0x1bad, 0xf03a, "Mad Catz SFxT Fightstick Pro", MAP_TRIGGERS_TO_BUTTONS, XTYPE_XBOX360 },
1005 + { 0x1bad, 0xf03d, "Street Fighter IV Arcade Stick TE - Chun Li", MAP_TRIGGERS_TO_BUTTONS, XTYPE_XBOX360 },
1006 + { 0x1bad, 0xf03e, "Mad Catz MLG FightStick TE", MAP_TRIGGERS_TO_BUTTONS, XTYPE_XBOX360 },
1007 + { 0x1bad, 0xf03f, "Mad Catz FightStick SoulCaliber", MAP_TRIGGERS_TO_BUTTONS, XTYPE_XBOX360 },
1008 + { 0x1bad, 0xf042, "Mad Catz FightStick TES+", MAP_TRIGGERS_TO_BUTTONS, XTYPE_XBOX360 },
1009 + { 0x1bad, 0xf080, "Mad Catz FightStick TE2", MAP_TRIGGERS_TO_BUTTONS, XTYPE_XBOX360 },
1010 + { 0x1bad, 0xf501, "HoriPad EX2 Turbo", 0, XTYPE_XBOX360 },
1011 + { 0x1bad, 0xf502, "Hori Real Arcade Pro.VX SA", MAP_TRIGGERS_TO_BUTTONS, XTYPE_XBOX360 },
1012 + { 0x1bad, 0xf503, "Hori Fighting Stick VX", MAP_TRIGGERS_TO_BUTTONS, XTYPE_XBOX360 },
1013 + { 0x1bad, 0xf504, "Hori Real Arcade Pro. EX", MAP_TRIGGERS_TO_BUTTONS, XTYPE_XBOX360 },
1014 + { 0x1bad, 0xf505, "Hori Fighting Stick EX2B", MAP_TRIGGERS_TO_BUTTONS, XTYPE_XBOX360 },
1015 + { 0x1bad, 0xf506, "Hori Real Arcade Pro.EX Premium VLX", 0, XTYPE_XBOX360 },
1016 { 0x1bad, 0xf900, "Harmonix Xbox 360 Controller", 0, XTYPE_XBOX360 },
1017 { 0x1bad, 0xf901, "Gamestop Xbox 360 Controller", 0, XTYPE_XBOX360 },
1018 { 0x1bad, 0xf903, "Tron Xbox 360 controller", 0, XTYPE_XBOX360 },
1019 + { 0x1bad, 0xf904, "PDP Versus Fighting Pad", 0, XTYPE_XBOX360 },
1020 + { 0x1bad, 0xf906, "MortalKombat FightStick", MAP_TRIGGERS_TO_BUTTONS, XTYPE_XBOX360 },
1021 + { 0x1bad, 0xfa01, "MadCatz GamePad", 0, XTYPE_XBOX360 },
1022 + { 0x1bad, 0xfd00, "Razer Onza TE", 0, XTYPE_XBOX360 },
1023 + { 0x1bad, 0xfd01, "Razer Onza", 0, XTYPE_XBOX360 },
1024 { 0x24c6, 0x5000, "Razer Atrox Arcade Stick", MAP_TRIGGERS_TO_BUTTONS, XTYPE_XBOX360 },
1025 { 0x24c6, 0x5300, "PowerA MINI PROEX Controller", 0, XTYPE_XBOX360 },
1026 { 0x24c6, 0x5303, "Xbox Airflo wired controller", 0, XTYPE_XBOX360 },
1027 + { 0x24c6, 0x530a, "Xbox 360 Pro EX Controller", 0, XTYPE_XBOX360 },
1028 + { 0x24c6, 0x531a, "PowerA Pro Ex", 0, XTYPE_XBOX360 },
1029 + { 0x24c6, 0x5397, "FUS1ON Tournament Controller", 0, XTYPE_XBOX360 },
1030 { 0x24c6, 0x541a, "PowerA Xbox One Mini Wired Controller", 0, XTYPE_XBOXONE },
1031 + { 0x24c6, 0x542a, "Xbox ONE spectra", 0, XTYPE_XBOXONE },
1032 { 0x24c6, 0x543a, "PowerA Xbox One wired controller", 0, XTYPE_XBOXONE },
1033 { 0x24c6, 0x5500, "Hori XBOX 360 EX 2 with Turbo", 0, XTYPE_XBOX360 },
1034 { 0x24c6, 0x5501, "Hori Real Arcade Pro VX-SA", 0, XTYPE_XBOX360 },
1035 + { 0x24c6, 0x5502, "Hori Fighting Stick VX Alt", MAP_TRIGGERS_TO_BUTTONS, XTYPE_XBOX360 },
1036 + { 0x24c6, 0x5503, "Hori Fighting Edge", MAP_TRIGGERS_TO_BUTTONS, XTYPE_XBOX360 },
1037 { 0x24c6, 0x5506, "Hori SOULCALIBUR V Stick", 0, XTYPE_XBOX360 },
1038 + { 0x24c6, 0x550d, "Hori GEM Xbox controller", 0, XTYPE_XBOX360 },
1039 + { 0x24c6, 0x550e, "Hori Real Arcade Pro V Kai 360", MAP_TRIGGERS_TO_BUTTONS, XTYPE_XBOX360 },
1040 + { 0x24c6, 0x551a, "PowerA FUSION Pro Controller", 0, XTYPE_XBOXONE },
1041 + { 0x24c6, 0x561a, "PowerA FUSION Controller", 0, XTYPE_XBOXONE },
1042 + { 0x24c6, 0x5b00, "ThrustMaster Ferrari 458 Racing Wheel", 0, XTYPE_XBOX360 },
1043 { 0x24c6, 0x5b02, "Thrustmaster, Inc. GPX Controller", 0, XTYPE_XBOX360 },
1044 { 0x24c6, 0x5b03, "Thrustmaster Ferrari 458 Racing Wheel", 0, XTYPE_XBOX360 },
1045 + { 0x24c6, 0x5d04, "Razer Sabertooth", 0, XTYPE_XBOX360 },
1046 + { 0x24c6, 0xfafe, "Rock Candy Gamepad for Xbox 360", 0, XTYPE_XBOX360 },
1047 + { 0x3767, 0x0101, "Fanatec Speedster 3 Forceshock Wheel", 0, XTYPE_XBOX },
1048 { 0xffff, 0xffff, "Chinese-made Xbox Controller", 0, XTYPE_XBOX },
1049 { 0x0000, 0x0000, "Generic X-Box pad", 0, XTYPE_UNKNOWN }
1050 };
1051 @@ -289,15 +394,15 @@ static const signed short xpad_abs_triggers[] = {
1052 * match against vendor id as well. Wired Xbox 360 devices have protocol 1,
1053 * wireless controllers have protocol 129.
1054 */
1055 -#define XPAD_XBOX360_VENDOR_PROTOCOL(vend,pr) \
1056 +#define XPAD_XBOX360_VENDOR_PROTOCOL(vend, pr) \
1057 .match_flags = USB_DEVICE_ID_MATCH_VENDOR | USB_DEVICE_ID_MATCH_INT_INFO, \
1058 .idVendor = (vend), \
1059 .bInterfaceClass = USB_CLASS_VENDOR_SPEC, \
1060 .bInterfaceSubClass = 93, \
1061 .bInterfaceProtocol = (pr)
1062 #define XPAD_XBOX360_VENDOR(vend) \
1063 - { XPAD_XBOX360_VENDOR_PROTOCOL(vend,1) }, \
1064 - { XPAD_XBOX360_VENDOR_PROTOCOL(vend,129) }
1065 + { XPAD_XBOX360_VENDOR_PROTOCOL((vend), 1) }, \
1066 + { XPAD_XBOX360_VENDOR_PROTOCOL((vend), 129) }
1067
1068 /* The Xbox One controller uses subclass 71 and protocol 208. */
1069 #define XPAD_XBOXONE_VENDOR_PROTOCOL(vend, pr) \
1070 @@ -307,37 +412,138 @@ static const signed short xpad_abs_triggers[] = {
1071 .bInterfaceSubClass = 71, \
1072 .bInterfaceProtocol = (pr)
1073 #define XPAD_XBOXONE_VENDOR(vend) \
1074 - { XPAD_XBOXONE_VENDOR_PROTOCOL(vend, 208) }
1075 + { XPAD_XBOXONE_VENDOR_PROTOCOL((vend), 208) }
1076
1077 -static struct usb_device_id xpad_table[] = {
1078 +static const struct usb_device_id xpad_table[] = {
1079 { USB_INTERFACE_INFO('X', 'B', 0) }, /* X-Box USB-IF not approved class */
1080 + XPAD_XBOX360_VENDOR(0x0079), /* GPD Win 2 Controller */
1081 XPAD_XBOX360_VENDOR(0x044f), /* Thrustmaster X-Box 360 controllers */
1082 XPAD_XBOX360_VENDOR(0x045e), /* Microsoft X-Box 360 controllers */
1083 XPAD_XBOXONE_VENDOR(0x045e), /* Microsoft X-Box One controllers */
1084 XPAD_XBOX360_VENDOR(0x046d), /* Logitech X-Box 360 style controllers */
1085 + XPAD_XBOX360_VENDOR(0x056e), /* Elecom JC-U3613M */
1086 + XPAD_XBOX360_VENDOR(0x06a3), /* Saitek P3600 */
1087 XPAD_XBOX360_VENDOR(0x0738), /* Mad Catz X-Box 360 controllers */
1088 { USB_DEVICE(0x0738, 0x4540) }, /* Mad Catz Beat Pad */
1089 XPAD_XBOXONE_VENDOR(0x0738), /* Mad Catz FightStick TE 2 */
1090 + XPAD_XBOX360_VENDOR(0x07ff), /* Mad Catz GamePad */
1091 XPAD_XBOX360_VENDOR(0x0e6f), /* 0x0e6f X-Box 360 controllers */
1092 XPAD_XBOXONE_VENDOR(0x0e6f), /* 0x0e6f X-Box One controllers */
1093 + XPAD_XBOX360_VENDOR(0x0f0d), /* Hori Controllers */
1094 + XPAD_XBOXONE_VENDOR(0x0f0d), /* Hori Controllers */
1095 + XPAD_XBOX360_VENDOR(0x11c9), /* Nacon GC100XF */
1096 XPAD_XBOX360_VENDOR(0x12ab), /* X-Box 360 dance pads */
1097 XPAD_XBOX360_VENDOR(0x1430), /* RedOctane X-Box 360 controllers */
1098 XPAD_XBOX360_VENDOR(0x146b), /* BigBen Interactive Controllers */
1099 - XPAD_XBOX360_VENDOR(0x1bad), /* Harminix Rock Band Guitar and Drums */
1100 - XPAD_XBOX360_VENDOR(0x0f0d), /* Hori Controllers */
1101 - XPAD_XBOXONE_VENDOR(0x0f0d), /* Hori Controllers */
1102 - XPAD_XBOX360_VENDOR(0x1689), /* Razer Onza */
1103 - XPAD_XBOX360_VENDOR(0x24c6), /* PowerA Controllers */
1104 - XPAD_XBOXONE_VENDOR(0x24c6), /* PowerA Controllers */
1105 XPAD_XBOX360_VENDOR(0x1532), /* Razer Sabertooth */
1106 XPAD_XBOXONE_VENDOR(0x1532), /* Razer Wildcat */
1107 XPAD_XBOX360_VENDOR(0x15e4), /* Numark X-Box 360 controllers */
1108 XPAD_XBOX360_VENDOR(0x162e), /* Joytech X-Box 360 controllers */
1109 + XPAD_XBOX360_VENDOR(0x1689), /* Razer Onza */
1110 + XPAD_XBOX360_VENDOR(0x1bad), /* Harminix Rock Band Guitar and Drums */
1111 + XPAD_XBOX360_VENDOR(0x24c6), /* PowerA Controllers */
1112 + XPAD_XBOXONE_VENDOR(0x24c6), /* PowerA Controllers */
1113 { }
1114 };
1115
1116 MODULE_DEVICE_TABLE(usb, xpad_table);
1117
1118 +struct xboxone_init_packet {
1119 + u16 idVendor;
1120 + u16 idProduct;
1121 + const u8 *data;
1122 + u8 len;
1123 +};
1124 +
1125 +#define XBOXONE_INIT_PKT(_vid, _pid, _data) \
1126 + { \
1127 + .idVendor = (_vid), \
1128 + .idProduct = (_pid), \
1129 + .data = (_data), \
1130 + .len = ARRAY_SIZE(_data), \
1131 + }
1132 +
1133 +
1134 +/*
1135 + * This packet is required for all Xbox One pads with 2015
1136 + * or later firmware installed (or present from the factory).
1137 + */
1138 +static const u8 xboxone_fw2015_init[] = {
1139 + 0x05, 0x20, 0x00, 0x01, 0x00
1140 +};
1141 +
1142 +/*
1143 + * This packet is required for the Titanfall 2 Xbox One pads
1144 + * (0x0e6f:0x0165) to finish initialization and for Hori pads
1145 + * (0x0f0d:0x0067) to make the analog sticks work.
1146 + */
1147 +static const u8 xboxone_hori_init[] = {
1148 + 0x01, 0x20, 0x00, 0x09, 0x00, 0x04, 0x20, 0x3a,
1149 + 0x00, 0x00, 0x00, 0x80, 0x00
1150 +};
1151 +
1152 +/*
1153 + * This packet is required for some of the PDP pads to start
1154 + * sending input reports. These pads include: (0x0e6f:0x02ab),
1155 + * (0x0e6f:0x02a4).
1156 + */
1157 +static const u8 xboxone_pdp_init1[] = {
1158 + 0x0a, 0x20, 0x00, 0x03, 0x00, 0x01, 0x14
1159 +};
1160 +
1161 +/*
1162 + * This packet is required for some of the PDP pads to start
1163 + * sending input reports. These pads include: (0x0e6f:0x02ab),
1164 + * (0x0e6f:0x02a4).
1165 + */
1166 +static const u8 xboxone_pdp_init2[] = {
1167 + 0x06, 0x20, 0x00, 0x02, 0x01, 0x00
1168 +};
1169 +
1170 +/*
1171 + * A specific rumble packet is required for some PowerA pads to start
1172 + * sending input reports. One of those pads is (0x24c6:0x543a).
1173 + */
1174 +static const u8 xboxone_rumblebegin_init[] = {
1175 + 0x09, 0x00, 0x00, 0x09, 0x00, 0x0F, 0x00, 0x00,
1176 + 0x1D, 0x1D, 0xFF, 0x00, 0x00
1177 +};
1178 +
1179 +/*
1180 + * A rumble packet with zero FF intensity will immediately
1181 + * terminate the rumbling required to init PowerA pads.
1182 + * This should happen fast enough that the motors don't
1183 + * spin up to enough speed to actually vibrate the gamepad.
1184 + */
1185 +static const u8 xboxone_rumbleend_init[] = {
1186 + 0x09, 0x00, 0x00, 0x09, 0x00, 0x0F, 0x00, 0x00,
1187 + 0x00, 0x00, 0x00, 0x00, 0x00
1188 +};
1189 +
1190 +/*
1191 + * This specifies the selection of init packets that a gamepad
1192 + * will be sent on init *and* the order in which they will be
1193 + * sent. The correct sequence number will be added when the
1194 + * packet is going to be sent.
1195 + */
1196 +static const struct xboxone_init_packet xboxone_init_packets[] = {
1197 + XBOXONE_INIT_PKT(0x0e6f, 0x0165, xboxone_hori_init),
1198 + XBOXONE_INIT_PKT(0x0f0d, 0x0067, xboxone_hori_init),
1199 + XBOXONE_INIT_PKT(0x0000, 0x0000, xboxone_fw2015_init),
1200 + XBOXONE_INIT_PKT(0x0e6f, 0x02ab, xboxone_pdp_init1),
1201 + XBOXONE_INIT_PKT(0x0e6f, 0x02ab, xboxone_pdp_init2),
1202 + XBOXONE_INIT_PKT(0x0e6f, 0x02a4, xboxone_pdp_init1),
1203 + XBOXONE_INIT_PKT(0x0e6f, 0x02a4, xboxone_pdp_init2),
1204 + XBOXONE_INIT_PKT(0x0e6f, 0x02a6, xboxone_pdp_init1),
1205 + XBOXONE_INIT_PKT(0x0e6f, 0x02a6, xboxone_pdp_init2),
1206 + XBOXONE_INIT_PKT(0x24c6, 0x541a, xboxone_rumblebegin_init),
1207 + XBOXONE_INIT_PKT(0x24c6, 0x542a, xboxone_rumblebegin_init),
1208 + XBOXONE_INIT_PKT(0x24c6, 0x543a, xboxone_rumblebegin_init),
1209 + XBOXONE_INIT_PKT(0x24c6, 0x541a, xboxone_rumbleend_init),
1210 + XBOXONE_INIT_PKT(0x24c6, 0x542a, xboxone_rumbleend_init),
1211 + XBOXONE_INIT_PKT(0x24c6, 0x543a, xboxone_rumbleend_init),
1212 +};
1213 +
1214 struct xpad_output_packet {
1215 u8 data[XPAD_PKT_LEN];
1216 u8 len;
1217 @@ -374,6 +580,7 @@ struct usb_xpad {
1218
1219 struct xpad_output_packet out_packets[XPAD_NUM_OUT_PACKETS];
1220 int last_out_packet;
1221 + int init_seq;
1222
1223 #if defined(CONFIG_JOYSTICK_XPAD_LEDS)
1224 struct xpad_led *led;
1225 @@ -390,6 +597,7 @@ struct usb_xpad {
1226
1227 static int xpad_init_input(struct usb_xpad *xpad);
1228 static void xpad_deinit_input(struct usb_xpad *xpad);
1229 +static void xpadone_ack_mode_report(struct usb_xpad *xpad, u8 seq_num);
1230
1231 /*
1232 * xpad_process_packet
1233 @@ -609,14 +817,36 @@ static void xpad360w_process_packet(struct usb_xpad *xpad, u16 cmd, unsigned cha
1234 }
1235
1236 /*
1237 - * xpadone_process_buttons
1238 + * xpadone_process_packet
1239 + *
1240 + * Completes a request by converting the data into events for the
1241 + * input subsystem. This version is for the Xbox One controller.
1242 *
1243 - * Process a button update packet from an Xbox one controller.
1244 + * The report format was gleaned from
1245 + * https://github.com/kylelemons/xbox/blob/master/xbox.go
1246 */
1247 -static void xpadone_process_buttons(struct usb_xpad *xpad,
1248 - struct input_dev *dev,
1249 - unsigned char *data)
1250 +static void xpadone_process_packet(struct usb_xpad *xpad, u16 cmd, unsigned char *data)
1251 {
1252 + struct input_dev *dev = xpad->dev;
1253 +
1254 + /* the xbox button has its own special report */
1255 + if (data[0] == 0X07) {
1256 + /*
1257 + * The Xbox One S controller requires these reports to be
1258 + * acked otherwise it continues sending them forever and
1259 + * won't report further mode button events.
1260 + */
1261 + if (data[1] == 0x30)
1262 + xpadone_ack_mode_report(xpad, data[2]);
1263 +
1264 + input_report_key(dev, BTN_MODE, data[4] & 0x01);
1265 + input_sync(dev);
1266 + return;
1267 + }
1268 + /* check invalid packet */
1269 + else if (data[0] != 0X20)
1270 + return;
1271 +
1272 /* menu/view buttons */
1273 input_report_key(dev, BTN_START, data[4] & 0x04);
1274 input_report_key(dev, BTN_SELECT, data[4] & 0x08);
1275 @@ -679,34 +909,6 @@ static void xpadone_process_buttons(struct usb_xpad *xpad,
1276 input_sync(dev);
1277 }
1278
1279 -/*
1280 - * xpadone_process_packet
1281 - *
1282 - * Completes a request by converting the data into events for the
1283 - * input subsystem. This version is for the Xbox One controller.
1284 - *
1285 - * The report format was gleaned from
1286 - * https://github.com/kylelemons/xbox/blob/master/xbox.go
1287 - */
1288 -
1289 -static void xpadone_process_packet(struct usb_xpad *xpad,
1290 - u16 cmd, unsigned char *data)
1291 -{
1292 - struct input_dev *dev = xpad->dev;
1293 -
1294 - switch (data[0]) {
1295 - case 0x20:
1296 - xpadone_process_buttons(xpad, dev, data);
1297 - break;
1298 -
1299 - case 0x07:
1300 - /* the xbox button has its own special report */
1301 - input_report_key(dev, BTN_MODE, data[4] & 0x01);
1302 - input_sync(dev);
1303 - break;
1304 - }
1305 -}
1306 -
1307 static void xpad_irq_in(struct urb *urb)
1308 {
1309 struct usb_xpad *xpad = urb->context;
1310 @@ -753,12 +955,48 @@ exit:
1311 __func__, retval);
1312 }
1313
1314 +/* Callers must hold xpad->odata_lock spinlock */
1315 +static bool xpad_prepare_next_init_packet(struct usb_xpad *xpad)
1316 +{
1317 + const struct xboxone_init_packet *init_packet;
1318 +
1319 + if (xpad->xtype != XTYPE_XBOXONE)
1320 + return false;
1321 +
1322 + /* Perform initialization sequence for Xbox One pads that require it */
1323 + while (xpad->init_seq < ARRAY_SIZE(xboxone_init_packets)) {
1324 + init_packet = &xboxone_init_packets[xpad->init_seq++];
1325 +
1326 + if (init_packet->idVendor != 0 &&
1327 + init_packet->idVendor != xpad->dev->id.vendor)
1328 + continue;
1329 +
1330 + if (init_packet->idProduct != 0 &&
1331 + init_packet->idProduct != xpad->dev->id.product)
1332 + continue;
1333 +
1334 + /* This packet applies to our device, so prepare to send it */
1335 + memcpy(xpad->odata, init_packet->data, init_packet->len);
1336 + xpad->irq_out->transfer_buffer_length = init_packet->len;
1337 +
1338 + /* Update packet with current sequence number */
1339 + xpad->odata[2] = xpad->odata_serial++;
1340 + return true;
1341 + }
1342 +
1343 + return false;
1344 +}
1345 +
1346 /* Callers must hold xpad->odata_lock spinlock */
1347 static bool xpad_prepare_next_out_packet(struct usb_xpad *xpad)
1348 {
1349 struct xpad_output_packet *pkt, *packet = NULL;
1350 int i;
1351
1352 + /* We may have init packets to send before we can send user commands */
1353 + if (xpad_prepare_next_init_packet(xpad))
1354 + return true;
1355 +
1356 for (i = 0; i < XPAD_NUM_OUT_PACKETS; i++) {
1357 if (++xpad->last_out_packet >= XPAD_NUM_OUT_PACKETS)
1358 xpad->last_out_packet = 0;
1359 @@ -851,10 +1089,9 @@ static void xpad_irq_out(struct urb *urb)
1360 spin_unlock_irqrestore(&xpad->odata_lock, flags);
1361 }
1362
1363 -static int xpad_init_output(struct usb_interface *intf, struct usb_xpad *xpad)
1364 +static int xpad_init_output(struct usb_interface *intf, struct usb_xpad *xpad,
1365 + struct usb_endpoint_descriptor *ep_irq_out)
1366 {
1367 - struct usb_endpoint_descriptor *ep_irq_out;
1368 - int ep_irq_out_idx;
1369 int error;
1370
1371 if (xpad->xtype == XTYPE_UNKNOWN)
1372 @@ -864,23 +1101,17 @@ static int xpad_init_output(struct usb_interface *intf, struct usb_xpad *xpad)
1373
1374 xpad->odata = usb_alloc_coherent(xpad->udev, XPAD_PKT_LEN,
1375 GFP_KERNEL, &xpad->odata_dma);
1376 - if (!xpad->odata) {
1377 - error = -ENOMEM;
1378 - goto fail1;
1379 - }
1380 + if (!xpad->odata)
1381 + return -ENOMEM;
1382
1383 spin_lock_init(&xpad->odata_lock);
1384
1385 xpad->irq_out = usb_alloc_urb(0, GFP_KERNEL);
1386 if (!xpad->irq_out) {
1387 error = -ENOMEM;
1388 - goto fail2;
1389 + goto err_free_coherent;
1390 }
1391
1392 - /* Xbox One controller has in/out endpoints swapped. */
1393 - ep_irq_out_idx = xpad->xtype == XTYPE_XBOXONE ? 0 : 1;
1394 - ep_irq_out = &intf->cur_altsetting->endpoint[ep_irq_out_idx].desc;
1395 -
1396 usb_fill_int_urb(xpad->irq_out, xpad->udev,
1397 usb_sndintpipe(xpad->udev, ep_irq_out->bEndpointAddress),
1398 xpad->odata, XPAD_PKT_LEN,
1399 @@ -890,8 +1121,9 @@ static int xpad_init_output(struct usb_interface *intf, struct usb_xpad *xpad)
1400
1401 return 0;
1402
1403 - fail2: usb_free_coherent(xpad->udev, XPAD_PKT_LEN, xpad->odata, xpad->odata_dma);
1404 - fail1: return error;
1405 +err_free_coherent:
1406 + usb_free_coherent(xpad->udev, XPAD_PKT_LEN, xpad->odata, xpad->odata_dma);
1407 + return error;
1408 }
1409
1410 static void xpad_stop_output(struct usb_xpad *xpad)
1411 @@ -950,24 +1182,17 @@ static int xpad_inquiry_pad_presence(struct usb_xpad *xpad)
1412
1413 static int xpad_start_xbox_one(struct usb_xpad *xpad)
1414 {
1415 - struct xpad_output_packet *packet =
1416 - &xpad->out_packets[XPAD_OUT_CMD_IDX];
1417 unsigned long flags;
1418 int retval;
1419
1420 spin_lock_irqsave(&xpad->odata_lock, flags);
1421
1422 - /* Xbox one controller needs to be initialized. */
1423 - packet->data[0] = 0x05;
1424 - packet->data[1] = 0x20;
1425 - packet->data[2] = xpad->odata_serial++; /* packet serial */
1426 - packet->data[3] = 0x01; /* rumble bit enable? */
1427 - packet->data[4] = 0x00;
1428 - packet->len = 5;
1429 - packet->pending = true;
1430 -
1431 - /* Reset the sequence so we send out start packet first */
1432 - xpad->last_out_packet = -1;
1433 + /*
1434 + * Begin the init sequence by attempting to send a packet.
1435 + * We will cycle through the init packet sequence before
1436 + * sending any packets from the output ring.
1437 + */
1438 + xpad->init_seq = 0;
1439 retval = xpad_try_sending_next_out_packet(xpad);
1440
1441 spin_unlock_irqrestore(&xpad->odata_lock, flags);
1442 @@ -975,6 +1200,30 @@ static int xpad_start_xbox_one(struct usb_xpad *xpad)
1443 return retval;
1444 }
1445
1446 +static void xpadone_ack_mode_report(struct usb_xpad *xpad, u8 seq_num)
1447 +{
1448 + unsigned long flags;
1449 + struct xpad_output_packet *packet =
1450 + &xpad->out_packets[XPAD_OUT_CMD_IDX];
1451 + static const u8 mode_report_ack[] = {
1452 + 0x01, 0x20, 0x00, 0x09, 0x00, 0x07, 0x20, 0x02,
1453 + 0x00, 0x00, 0x00, 0x00, 0x00
1454 + };
1455 +
1456 + spin_lock_irqsave(&xpad->odata_lock, flags);
1457 +
1458 + packet->len = sizeof(mode_report_ack);
1459 + memcpy(packet->data, mode_report_ack, packet->len);
1460 + packet->data[2] = seq_num;
1461 + packet->pending = true;
1462 +
1463 + /* Reset the sequence so we send out the ack now */
1464 + xpad->last_out_packet = -1;
1465 + xpad_try_sending_next_out_packet(xpad);
1466 +
1467 + spin_unlock_irqrestore(&xpad->odata_lock, flags);
1468 +}
1469 +
1470 #ifdef CONFIG_JOYSTICK_XPAD_FF
1471 static int xpad_play_effect(struct input_dev *dev, void *data, struct ff_effect *effect)
1472 {
1473 @@ -1046,9 +1295,9 @@ static int xpad_play_effect(struct input_dev *dev, void *data, struct ff_effect
1474 packet->data[7] = 0x00;
1475 packet->data[8] = strong / 512; /* left actuator */
1476 packet->data[9] = weak / 512; /* right actuator */
1477 - packet->data[10] = 0xFF;
1478 - packet->data[11] = 0x00;
1479 - packet->data[12] = 0x00;
1480 + packet->data[10] = 0xFF; /* on period */
1481 + packet->data[11] = 0x00; /* off period */
1482 + packet->data[12] = 0xFF; /* repeat count */
1483 packet->len = 13;
1484 packet->pending = true;
1485 break;
1486 @@ -1199,6 +1448,7 @@ static int xpad_led_probe(struct usb_xpad *xpad)
1487 led_cdev = &led->led_cdev;
1488 led_cdev->name = led->name;
1489 led_cdev->brightness_set = xpad_led_set;
1490 + led_cdev->flags = LED_CORE_SUSPENDRESUME;
1491
1492 error = led_classdev_register(&xpad->udev->dev, led_cdev);
1493 if (error)
1494 @@ -1333,7 +1583,6 @@ static void xpad_close(struct input_dev *dev)
1495 static void xpad_set_up_abs(struct input_dev *input_dev, signed short abs)
1496 {
1497 struct usb_xpad *xpad = input_get_drvdata(input_dev);
1498 - set_bit(abs, input_dev->absbit);
1499
1500 switch (abs) {
1501 case ABS_X:
1502 @@ -1353,6 +1602,9 @@ static void xpad_set_up_abs(struct input_dev *input_dev, signed short abs)
1503 case ABS_HAT0Y: /* the d-pad (only if dpad is mapped to axes */
1504 input_set_abs_params(input_dev, abs, -1, 1, 0, 0);
1505 break;
1506 + default:
1507 + input_set_abs_params(input_dev, abs, 0, 0, 0, 0);
1508 + break;
1509 }
1510 }
1511
1512 @@ -1393,10 +1645,7 @@ static int xpad_init_input(struct usb_xpad *xpad)
1513 input_dev->close = xpad_close;
1514 }
1515
1516 - __set_bit(EV_KEY, input_dev->evbit);
1517 -
1518 if (!(xpad->mapping & MAP_STICKS_TO_NULL)) {
1519 - __set_bit(EV_ABS, input_dev->evbit);
1520 /* set up axes */
1521 for (i = 0; xpad_abs[i] >= 0; i++)
1522 xpad_set_up_abs(input_dev, xpad_abs[i]);
1523 @@ -1404,21 +1653,22 @@ static int xpad_init_input(struct usb_xpad *xpad)
1524
1525 /* set up standard buttons */
1526 for (i = 0; xpad_common_btn[i] >= 0; i++)
1527 - __set_bit(xpad_common_btn[i], input_dev->keybit);
1528 + input_set_capability(input_dev, EV_KEY, xpad_common_btn[i]);
1529
1530 /* set up model-specific ones */
1531 if (xpad->xtype == XTYPE_XBOX360 || xpad->xtype == XTYPE_XBOX360W ||
1532 xpad->xtype == XTYPE_XBOXONE) {
1533 for (i = 0; xpad360_btn[i] >= 0; i++)
1534 - __set_bit(xpad360_btn[i], input_dev->keybit);
1535 + input_set_capability(input_dev, EV_KEY, xpad360_btn[i]);
1536 } else {
1537 for (i = 0; xpad_btn[i] >= 0; i++)
1538 - __set_bit(xpad_btn[i], input_dev->keybit);
1539 + input_set_capability(input_dev, EV_KEY, xpad_btn[i]);
1540 }
1541
1542 if (xpad->mapping & MAP_DPAD_TO_BUTTONS) {
1543 for (i = 0; xpad_btn_pad[i] >= 0; i++)
1544 - __set_bit(xpad_btn_pad[i], input_dev->keybit);
1545 + input_set_capability(input_dev, EV_KEY,
1546 + xpad_btn_pad[i]);
1547 }
1548
1549 /*
1550 @@ -1435,7 +1685,8 @@ static int xpad_init_input(struct usb_xpad *xpad)
1551
1552 if (xpad->mapping & MAP_TRIGGERS_TO_BUTTONS) {
1553 for (i = 0; xpad_btn_triggers[i] >= 0; i++)
1554 - __set_bit(xpad_btn_triggers[i], input_dev->keybit);
1555 + input_set_capability(input_dev, EV_KEY,
1556 + xpad_btn_triggers[i]);
1557 } else {
1558 for (i = 0; xpad_abs_triggers[i] >= 0; i++)
1559 xpad_set_up_abs(input_dev, xpad_abs_triggers[i]);
1560 @@ -1469,8 +1720,7 @@ static int xpad_probe(struct usb_interface *intf, const struct usb_device_id *id
1561 {
1562 struct usb_device *udev = interface_to_usbdev(intf);
1563 struct usb_xpad *xpad;
1564 - struct usb_endpoint_descriptor *ep_irq_in;
1565 - int ep_irq_in_idx;
1566 + struct usb_endpoint_descriptor *ep_irq_in, *ep_irq_out;
1567 int i, error;
1568
1569 if (intf->cur_altsetting->desc.bNumEndpoints != 2)
1570 @@ -1540,13 +1790,28 @@ static int xpad_probe(struct usb_interface *intf, const struct usb_device_id *id
1571 goto err_free_in_urb;
1572 }
1573
1574 - error = xpad_init_output(intf, xpad);
1575 - if (error)
1576 + ep_irq_in = ep_irq_out = NULL;
1577 +
1578 + for (i = 0; i < 2; i++) {
1579 + struct usb_endpoint_descriptor *ep =
1580 + &intf->cur_altsetting->endpoint[i].desc;
1581 +
1582 + if (usb_endpoint_xfer_int(ep)) {
1583 + if (usb_endpoint_dir_in(ep))
1584 + ep_irq_in = ep;
1585 + else
1586 + ep_irq_out = ep;
1587 + }
1588 + }
1589 +
1590 + if (!ep_irq_in || !ep_irq_out) {
1591 + error = -ENODEV;
1592 goto err_free_in_urb;
1593 + }
1594
1595 - /* Xbox One controller has in/out endpoints swapped. */
1596 - ep_irq_in_idx = xpad->xtype == XTYPE_XBOXONE ? 1 : 0;
1597 - ep_irq_in = &intf->cur_altsetting->endpoint[ep_irq_in_idx].desc;
1598 + error = xpad_init_output(intf, xpad, ep_irq_out);
1599 + if (error)
1600 + goto err_free_in_urb;
1601
1602 usb_fill_int_urb(xpad->irq_in, udev,
1603 usb_rcvintpipe(udev, ep_irq_in->bEndpointAddress),
1604 @@ -1663,8 +1928,16 @@ static int xpad_resume(struct usb_interface *intf)
1605 retval = xpad360w_start_input(xpad);
1606 } else {
1607 mutex_lock(&input->mutex);
1608 - if (input->users)
1609 + if (input->users) {
1610 retval = xpad_start_input(xpad);
1611 + } else if (xpad->xtype == XTYPE_XBOXONE) {
1612 + /*
1613 + * Even if there are no users, we'll send Xbox One pads
1614 + * the startup sequence so they don't sit there and
1615 + * blink until somebody opens the input device again.
1616 + */
1617 + retval = xpad_start_xbox_one(xpad);
1618 + }
1619 mutex_unlock(&input->mutex);
1620 }
1621
1622 diff --git a/drivers/net/can/dev.c b/drivers/net/can/dev.c
1623 index 8d6208c0b400..ff3d9fc0f1b3 100644
1624 --- a/drivers/net/can/dev.c
1625 +++ b/drivers/net/can/dev.c
1626 @@ -453,6 +453,34 @@ void can_put_echo_skb(struct sk_buff *skb, struct net_device *dev,
1627 }
1628 EXPORT_SYMBOL_GPL(can_put_echo_skb);
1629
1630 +struct sk_buff *__can_get_echo_skb(struct net_device *dev, unsigned int idx, u8 *len_ptr)
1631 +{
1632 + struct can_priv *priv = netdev_priv(dev);
1633 + struct sk_buff *skb = priv->echo_skb[idx];
1634 + struct canfd_frame *cf;
1635 +
1636 + if (idx >= priv->echo_skb_max) {
1637 + netdev_err(dev, "%s: BUG! Trying to access can_priv::echo_skb out of bounds (%u/max %u)\n",
1638 + __func__, idx, priv->echo_skb_max);
1639 + return NULL;
1640 + }
1641 +
1642 + if (!skb) {
1643 + netdev_err(dev, "%s: BUG! Trying to echo non existing skb: can_priv::echo_skb[%u]\n",
1644 + __func__, idx);
1645 + return NULL;
1646 + }
1647 +
1648 + /* Using "struct canfd_frame::len" for the frame
1649 + * length is supported on both CAN and CANFD frames.
1650 + */
1651 + cf = (struct canfd_frame *)skb->data;
1652 + *len_ptr = cf->len;
1653 + priv->echo_skb[idx] = NULL;
1654 +
1655 + return skb;
1656 +}
1657 +
1658 /*
1659 * Get the skb from the stack and loop it back locally
1660 *
1661 @@ -462,22 +490,16 @@ EXPORT_SYMBOL_GPL(can_put_echo_skb);
1662 */
1663 unsigned int can_get_echo_skb(struct net_device *dev, unsigned int idx)
1664 {
1665 - struct can_priv *priv = netdev_priv(dev);
1666 -
1667 - BUG_ON(idx >= priv->echo_skb_max);
1668 -
1669 - if (priv->echo_skb[idx]) {
1670 - struct sk_buff *skb = priv->echo_skb[idx];
1671 - struct can_frame *cf = (struct can_frame *)skb->data;
1672 - u8 dlc = cf->can_dlc;
1673 + struct sk_buff *skb;
1674 + u8 len;
1675
1676 - netif_rx(priv->echo_skb[idx]);
1677 - priv->echo_skb[idx] = NULL;
1678 + skb = __can_get_echo_skb(dev, idx, &len);
1679 + if (!skb)
1680 + return 0;
1681
1682 - return dlc;
1683 - }
1684 + netif_rx(skb);
1685
1686 - return 0;
1687 + return len;
1688 }
1689 EXPORT_SYMBOL_GPL(can_get_echo_skb);
1690
1691 diff --git a/drivers/net/ethernet/broadcom/genet/bcmmii.c b/drivers/net/ethernet/broadcom/genet/bcmmii.c
1692 index 3b9e1a5dce82..9bd90a7c4d40 100644
1693 --- a/drivers/net/ethernet/broadcom/genet/bcmmii.c
1694 +++ b/drivers/net/ethernet/broadcom/genet/bcmmii.c
1695 @@ -483,7 +483,7 @@ static int bcmgenet_mii_of_init(struct bcmgenet_priv *priv)
1696 if (!compat)
1697 return -ENOMEM;
1698
1699 - priv->mdio_dn = of_find_compatible_node(dn, NULL, compat);
1700 + priv->mdio_dn = of_get_compatible_child(dn, compat);
1701 kfree(compat);
1702 if (!priv->mdio_dn) {
1703 dev_err(kdev, "unable to find MDIO bus node\n");
1704 diff --git a/drivers/net/usb/lan78xx.c b/drivers/net/usb/lan78xx.c
1705 index 0cbcd3f77341..6b4e38105b72 100644
1706 --- a/drivers/net/usb/lan78xx.c
1707 +++ b/drivers/net/usb/lan78xx.c
1708 @@ -31,6 +31,7 @@
1709 #include <linux/mdio.h>
1710 #include <net/ip6_checksum.h>
1711 #include <linux/microchipphy.h>
1712 +#include <linux/of_net.h>
1713 #include "lan78xx.h"
1714
1715 #define DRIVER_AUTHOR "WOOJUNG HUH <woojung.huh@microchip.com>"
1716 @@ -1644,34 +1645,31 @@ static void lan78xx_init_mac_address(struct lan78xx_net *dev)
1717 addr[5] = (addr_hi >> 8) & 0xFF;
1718
1719 if (!is_valid_ether_addr(addr)) {
1720 - /* reading mac address from EEPROM or OTP */
1721 - if ((lan78xx_read_eeprom(dev, EEPROM_MAC_OFFSET, ETH_ALEN,
1722 - addr) == 0) ||
1723 - (lan78xx_read_otp(dev, EEPROM_MAC_OFFSET, ETH_ALEN,
1724 - addr) == 0)) {
1725 - if (is_valid_ether_addr(addr)) {
1726 - /* eeprom values are valid so use them */
1727 - netif_dbg(dev, ifup, dev->net,
1728 - "MAC address read from EEPROM");
1729 - } else {
1730 - /* generate random MAC */
1731 - random_ether_addr(addr);
1732 - netif_dbg(dev, ifup, dev->net,
1733 - "MAC address set to random addr");
1734 - }
1735 -
1736 - addr_lo = addr[0] | (addr[1] << 8) |
1737 - (addr[2] << 16) | (addr[3] << 24);
1738 - addr_hi = addr[4] | (addr[5] << 8);
1739 -
1740 - ret = lan78xx_write_reg(dev, RX_ADDRL, addr_lo);
1741 - ret = lan78xx_write_reg(dev, RX_ADDRH, addr_hi);
1742 + if (!eth_platform_get_mac_address(&dev->udev->dev, addr)) {
1743 + /* valid address present in Device Tree */
1744 + netif_dbg(dev, ifup, dev->net,
1745 + "MAC address read from Device Tree");
1746 + } else if (((lan78xx_read_eeprom(dev, EEPROM_MAC_OFFSET,
1747 + ETH_ALEN, addr) == 0) ||
1748 + (lan78xx_read_otp(dev, EEPROM_MAC_OFFSET,
1749 + ETH_ALEN, addr) == 0)) &&
1750 + is_valid_ether_addr(addr)) {
1751 + /* eeprom values are valid so use them */
1752 + netif_dbg(dev, ifup, dev->net,
1753 + "MAC address read from EEPROM");
1754 } else {
1755 /* generate random MAC */
1756 random_ether_addr(addr);
1757 netif_dbg(dev, ifup, dev->net,
1758 "MAC address set to random addr");
1759 }
1760 +
1761 + addr_lo = addr[0] | (addr[1] << 8) |
1762 + (addr[2] << 16) | (addr[3] << 24);
1763 + addr_hi = addr[4] | (addr[5] << 8);
1764 +
1765 + ret = lan78xx_write_reg(dev, RX_ADDRL, addr_lo);
1766 + ret = lan78xx_write_reg(dev, RX_ADDRH, addr_hi);
1767 }
1768
1769 ret = lan78xx_write_reg(dev, MAF_LO(0), addr_lo);
1770 diff --git a/drivers/net/wireless/ath/ath10k/mac.c b/drivers/net/wireless/ath/ath10k/mac.c
1771 index 5fe6841b8889..fb632a454fc2 100644
1772 --- a/drivers/net/wireless/ath/ath10k/mac.c
1773 +++ b/drivers/net/wireless/ath/ath10k/mac.c
1774 @@ -4967,7 +4967,9 @@ static int ath10k_add_interface(struct ieee80211_hw *hw,
1775 }
1776
1777 ar->free_vdev_map &= ~(1LL << arvif->vdev_id);
1778 + spin_lock_bh(&ar->data_lock);
1779 list_add(&arvif->list, &ar->arvifs);
1780 + spin_unlock_bh(&ar->data_lock);
1781
1782 /* It makes no sense to have firmware do keepalives. mac80211 already
1783 * takes care of this with idle connection polling.
1784 @@ -5118,7 +5120,9 @@ err_peer_delete:
1785 err_vdev_delete:
1786 ath10k_wmi_vdev_delete(ar, arvif->vdev_id);
1787 ar->free_vdev_map |= 1LL << arvif->vdev_id;
1788 + spin_lock_bh(&ar->data_lock);
1789 list_del(&arvif->list);
1790 + spin_unlock_bh(&ar->data_lock);
1791
1792 err:
1793 if (arvif->beacon_buf) {
1794 @@ -5164,7 +5168,9 @@ static void ath10k_remove_interface(struct ieee80211_hw *hw,
1795 arvif->vdev_id, ret);
1796
1797 ar->free_vdev_map |= 1LL << arvif->vdev_id;
1798 + spin_lock_bh(&ar->data_lock);
1799 list_del(&arvif->list);
1800 + spin_unlock_bh(&ar->data_lock);
1801
1802 if (arvif->vdev_type == WMI_VDEV_TYPE_AP ||
1803 arvif->vdev_type == WMI_VDEV_TYPE_IBSS) {
1804 diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c
1805 index c221597e2519..530f52120972 100644
1806 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c
1807 +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c
1808 @@ -5990,7 +5990,8 @@ static int brcmf_construct_chaninfo(struct brcmf_cfg80211_info *cfg,
1809 * for subsequent chanspecs.
1810 */
1811 channel->flags = IEEE80211_CHAN_NO_HT40 |
1812 - IEEE80211_CHAN_NO_80MHZ;
1813 + IEEE80211_CHAN_NO_80MHZ |
1814 + IEEE80211_CHAN_NO_160MHZ;
1815 ch.bw = BRCMU_CHAN_BW_20;
1816 cfg->d11inf.encchspec(&ch);
1817 chaninfo = ch.chspec;
1818 diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
1819 index 0bffade1ea5b..92557cd31a39 100644
1820 --- a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
1821 +++ b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
1822 @@ -327,8 +327,12 @@ struct ieee80211_regdomain *iwl_mvm_get_regdomain(struct wiphy *wiphy,
1823 goto out;
1824 }
1825
1826 - if (changed)
1827 - *changed = (resp->status == MCC_RESP_NEW_CHAN_PROFILE);
1828 + if (changed) {
1829 + u32 status = le32_to_cpu(resp->status);
1830 +
1831 + *changed = (status == MCC_RESP_NEW_CHAN_PROFILE ||
1832 + status == MCC_RESP_ILLEGAL);
1833 + }
1834
1835 regd = iwl_parse_nvm_mcc_info(mvm->trans->dev, mvm->cfg,
1836 __le32_to_cpu(resp->n_channels),
1837 @@ -3976,10 +3980,6 @@ static void iwl_mvm_mac_sta_statistics(struct ieee80211_hw *hw,
1838 sinfo->filled |= BIT(NL80211_STA_INFO_SIGNAL_AVG);
1839 }
1840
1841 - if (!fw_has_capa(&mvm->fw->ucode_capa,
1842 - IWL_UCODE_TLV_CAPA_RADIO_BEACON_STATS))
1843 - return;
1844 -
1845 /* if beacon filtering isn't on mac80211 does it anyway */
1846 if (!(vif->driver_flags & IEEE80211_VIF_BEACON_FILTER))
1847 return;
1848 diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/nvm.c b/drivers/net/wireless/intel/iwlwifi/mvm/nvm.c
1849 index eade099b6dbf..e51aca87b4b0 100644
1850 --- a/drivers/net/wireless/intel/iwlwifi/mvm/nvm.c
1851 +++ b/drivers/net/wireless/intel/iwlwifi/mvm/nvm.c
1852 @@ -739,9 +739,8 @@ iwl_mvm_update_mcc(struct iwl_mvm *mvm, const char *alpha2,
1853 }
1854
1855 IWL_DEBUG_LAR(mvm,
1856 - "MCC response status: 0x%x. new MCC: 0x%x ('%c%c') change: %d n_chans: %d\n",
1857 - status, mcc, mcc >> 8, mcc & 0xff,
1858 - !!(status == MCC_RESP_NEW_CHAN_PROFILE), n_channels);
1859 + "MCC response status: 0x%x. new MCC: 0x%x ('%c%c') n_chans: %d\n",
1860 + status, mcc, mcc >> 8, mcc & 0xff, n_channels);
1861
1862 exit:
1863 iwl_free_resp(&cmd);
1864 diff --git a/drivers/net/wireless/marvell/mwifiex/cfg80211.c b/drivers/net/wireless/marvell/mwifiex/cfg80211.c
1865 index 48d51be11f9b..4da3541471e6 100644
1866 --- a/drivers/net/wireless/marvell/mwifiex/cfg80211.c
1867 +++ b/drivers/net/wireless/marvell/mwifiex/cfg80211.c
1868 @@ -1209,6 +1209,12 @@ mwifiex_cfg80211_change_virtual_intf(struct wiphy *wiphy,
1869 priv->adapter->curr_iface_comb.p2p_intf--;
1870 priv->adapter->curr_iface_comb.sta_intf++;
1871 dev->ieee80211_ptr->iftype = type;
1872 + if (mwifiex_deinit_priv_params(priv))
1873 + return -1;
1874 + if (mwifiex_init_new_priv_params(priv, dev, type))
1875 + return -1;
1876 + if (mwifiex_sta_init_cmd(priv, false, false))
1877 + return -1;
1878 break;
1879 case NL80211_IFTYPE_ADHOC:
1880 if (mwifiex_cfg80211_deinit_p2p(priv))
1881 @@ -3079,8 +3085,10 @@ int mwifiex_del_virtual_intf(struct wiphy *wiphy, struct wireless_dev *wdev)
1882
1883 mwifiex_stop_net_dev_queue(priv->netdev, adapter);
1884
1885 - skb_queue_walk_safe(&priv->bypass_txq, skb, tmp)
1886 + skb_queue_walk_safe(&priv->bypass_txq, skb, tmp) {
1887 + skb_unlink(skb, &priv->bypass_txq);
1888 mwifiex_write_data_complete(priv->adapter, skb, 0, -1);
1889 + }
1890
1891 if (netif_carrier_ok(priv->netdev))
1892 netif_carrier_off(priv->netdev);
1893 diff --git a/drivers/net/wireless/marvell/mwifiex/pcie.c b/drivers/net/wireless/marvell/mwifiex/pcie.c
1894 index 1fdb86cd4734..cb681b265b10 100644
1895 --- a/drivers/net/wireless/marvell/mwifiex/pcie.c
1896 +++ b/drivers/net/wireless/marvell/mwifiex/pcie.c
1897 @@ -101,7 +101,6 @@ static int mwifiex_pcie_suspend(struct device *dev)
1898 {
1899 struct mwifiex_adapter *adapter;
1900 struct pcie_service_card *card;
1901 - int hs_actived;
1902 struct pci_dev *pdev = to_pci_dev(dev);
1903
1904 if (pdev) {
1905 @@ -117,7 +116,15 @@ static int mwifiex_pcie_suspend(struct device *dev)
1906
1907 adapter = card->adapter;
1908
1909 - hs_actived = mwifiex_enable_hs(adapter);
1910 + /* Enable the Host Sleep */
1911 + if (!mwifiex_enable_hs(adapter)) {
1912 + mwifiex_dbg(adapter, ERROR,
1913 + "cmd: failed to suspend\n");
1914 + adapter->hs_enabling = false;
1915 + return -EFAULT;
1916 + }
1917 +
1918 + flush_workqueue(adapter->workqueue);
1919
1920 /* Indicate device suspended */
1921 adapter->is_suspended = true;
1922 @@ -1676,9 +1683,6 @@ static int mwifiex_pcie_process_cmd_complete(struct mwifiex_adapter *adapter)
1923
1924 if (!adapter->curr_cmd) {
1925 if (adapter->ps_state == PS_STATE_SLEEP_CFM) {
1926 - mwifiex_process_sleep_confirm_resp(adapter, skb->data,
1927 - skb->len);
1928 - mwifiex_pcie_enable_host_int(adapter);
1929 if (mwifiex_write_reg(adapter,
1930 PCIE_CPU_INT_EVENT,
1931 CPU_INTR_SLEEP_CFM_DONE)) {
1932 @@ -1691,6 +1695,9 @@ static int mwifiex_pcie_process_cmd_complete(struct mwifiex_adapter *adapter)
1933 while (reg->sleep_cookie && (count++ < 10) &&
1934 mwifiex_pcie_ok_to_access_hw(adapter))
1935 usleep_range(50, 60);
1936 + mwifiex_pcie_enable_host_int(adapter);
1937 + mwifiex_process_sleep_confirm_resp(adapter, skb->data,
1938 + skb->len);
1939 } else {
1940 mwifiex_dbg(adapter, ERROR,
1941 "There is no command but got cmdrsp\n");
1942 @@ -2329,6 +2336,8 @@ static int mwifiex_process_pcie_int(struct mwifiex_adapter *adapter)
1943 ret = mwifiex_pcie_process_cmd_complete(adapter);
1944 if (ret)
1945 return ret;
1946 + if (adapter->hs_activated)
1947 + return ret;
1948 }
1949
1950 if (card->msi_enable) {
1951 diff --git a/drivers/net/wireless/marvell/mwifiex/wmm.c b/drivers/net/wireless/marvell/mwifiex/wmm.c
1952 index 0eb246502e1d..dea2fe671dfe 100644
1953 --- a/drivers/net/wireless/marvell/mwifiex/wmm.c
1954 +++ b/drivers/net/wireless/marvell/mwifiex/wmm.c
1955 @@ -503,8 +503,10 @@ mwifiex_wmm_del_pkts_in_ralist_node(struct mwifiex_private *priv,
1956 struct mwifiex_adapter *adapter = priv->adapter;
1957 struct sk_buff *skb, *tmp;
1958
1959 - skb_queue_walk_safe(&ra_list->skb_head, skb, tmp)
1960 + skb_queue_walk_safe(&ra_list->skb_head, skb, tmp) {
1961 + skb_unlink(skb, &ra_list->skb_head);
1962 mwifiex_write_data_complete(adapter, skb, 0, -1);
1963 + }
1964 }
1965
1966 /*
1967 @@ -600,11 +602,15 @@ mwifiex_clean_txrx(struct mwifiex_private *priv)
1968 priv->adapter->if_ops.clean_pcie_ring(priv->adapter);
1969 spin_unlock_irqrestore(&priv->wmm.ra_list_spinlock, flags);
1970
1971 - skb_queue_walk_safe(&priv->tdls_txq, skb, tmp)
1972 + skb_queue_walk_safe(&priv->tdls_txq, skb, tmp) {
1973 + skb_unlink(skb, &priv->tdls_txq);
1974 mwifiex_write_data_complete(priv->adapter, skb, 0, -1);
1975 + }
1976
1977 - skb_queue_walk_safe(&priv->bypass_txq, skb, tmp)
1978 + skb_queue_walk_safe(&priv->bypass_txq, skb, tmp) {
1979 + skb_unlink(skb, &priv->bypass_txq);
1980 mwifiex_write_data_complete(priv->adapter, skb, 0, -1);
1981 + }
1982 atomic_set(&priv->adapter->bypass_tx_pending, 0);
1983
1984 idr_for_each(&priv->ack_status_frames, mwifiex_free_ack_frame, NULL);
1985 diff --git a/drivers/net/wireless/st/cw1200/wsm.c b/drivers/net/wireless/st/cw1200/wsm.c
1986 index ed93bf3474ec..be4c22e0d902 100644
1987 --- a/drivers/net/wireless/st/cw1200/wsm.c
1988 +++ b/drivers/net/wireless/st/cw1200/wsm.c
1989 @@ -1805,16 +1805,18 @@ static int wsm_buf_reserve(struct wsm_buf *buf, size_t extra_size)
1990 {
1991 size_t pos = buf->data - buf->begin;
1992 size_t size = pos + extra_size;
1993 + u8 *tmp;
1994
1995 size = round_up(size, FWLOAD_BLOCK_SIZE);
1996
1997 - buf->begin = krealloc(buf->begin, size, GFP_KERNEL | GFP_DMA);
1998 - if (buf->begin) {
1999 - buf->data = &buf->begin[pos];
2000 - buf->end = &buf->begin[size];
2001 - return 0;
2002 - } else {
2003 - buf->end = buf->data = buf->begin;
2004 + tmp = krealloc(buf->begin, size, GFP_KERNEL | GFP_DMA);
2005 + if (!tmp) {
2006 + wsm_buf_deinit(buf);
2007 return -ENOMEM;
2008 }
2009 +
2010 + buf->begin = tmp;
2011 + buf->data = &buf->begin[pos];
2012 + buf->end = &buf->begin[size];
2013 + return 0;
2014 }
2015 diff --git a/drivers/nfc/nfcmrvl/uart.c b/drivers/nfc/nfcmrvl/uart.c
2016 index 6c0c301611c4..1b11ded79c4f 100644
2017 --- a/drivers/nfc/nfcmrvl/uart.c
2018 +++ b/drivers/nfc/nfcmrvl/uart.c
2019 @@ -73,10 +73,9 @@ static int nfcmrvl_uart_parse_dt(struct device_node *node,
2020 struct device_node *matched_node;
2021 int ret;
2022
2023 - matched_node = of_find_compatible_node(node, NULL, "marvell,nfc-uart");
2024 + matched_node = of_get_compatible_child(node, "marvell,nfc-uart");
2025 if (!matched_node) {
2026 - matched_node = of_find_compatible_node(node, NULL,
2027 - "mrvl,nfc-uart");
2028 + matched_node = of_get_compatible_child(node, "mrvl,nfc-uart");
2029 if (!matched_node)
2030 return -ENODEV;
2031 }
2032 diff --git a/drivers/of/base.c b/drivers/of/base.c
2033 index 466b285cef3e..f366af135d5b 100644
2034 --- a/drivers/of/base.c
2035 +++ b/drivers/of/base.c
2036 @@ -738,6 +738,31 @@ struct device_node *of_get_next_available_child(const struct device_node *node,
2037 }
2038 EXPORT_SYMBOL(of_get_next_available_child);
2039
2040 +/**
2041 + * of_get_compatible_child - Find compatible child node
2042 + * @parent: parent node
2043 + * @compatible: compatible string
2044 + *
2045 + * Lookup child node whose compatible property contains the given compatible
2046 + * string.
2047 + *
2048 + * Returns a node pointer with refcount incremented, use of_node_put() on it
2049 + * when done; or NULL if not found.
2050 + */
2051 +struct device_node *of_get_compatible_child(const struct device_node *parent,
2052 + const char *compatible)
2053 +{
2054 + struct device_node *child;
2055 +
2056 + for_each_child_of_node(parent, child) {
2057 + if (of_device_is_compatible(child, compatible))
2058 + break;
2059 + }
2060 +
2061 + return child;
2062 +}
2063 +EXPORT_SYMBOL(of_get_compatible_child);
2064 +
2065 /**
2066 * of_get_child_by_name - Find the child node by name for a given parent
2067 * @node: parent node
2068 diff --git a/drivers/pinctrl/meson/pinctrl-meson.c b/drivers/pinctrl/meson/pinctrl-meson.c
2069 index 9443c9d408c6..df61a71420b1 100644
2070 --- a/drivers/pinctrl/meson/pinctrl-meson.c
2071 +++ b/drivers/pinctrl/meson/pinctrl-meson.c
2072 @@ -275,7 +275,7 @@ static int meson_pinconf_set(struct pinctrl_dev *pcdev, unsigned int pin,
2073 dev_dbg(pc->dev, "pin %u: disable bias\n", pin);
2074
2075 meson_calc_reg_and_bit(bank, pin, REG_PULL, &reg, &bit);
2076 - ret = regmap_update_bits(pc->reg_pull, reg,
2077 + ret = regmap_update_bits(pc->reg_pullen, reg,
2078 BIT(bit), 0);
2079 if (ret)
2080 return ret;
2081 diff --git a/drivers/rtc/rtc-pcf2127.c b/drivers/rtc/rtc-pcf2127.c
2082 index 2bfdf638b673..8a3667e761dd 100644
2083 --- a/drivers/rtc/rtc-pcf2127.c
2084 +++ b/drivers/rtc/rtc-pcf2127.c
2085 @@ -237,6 +237,9 @@ static int pcf2127_i2c_gather_write(void *context,
2086 memcpy(buf + 1, val, val_size);
2087
2088 ret = i2c_master_send(client, buf, val_size + 1);
2089 +
2090 + kfree(buf);
2091 +
2092 if (ret != val_size + 1)
2093 return ret < 0 ? ret : -EIO;
2094
2095 diff --git a/drivers/scsi/ufs/ufs.h b/drivers/scsi/ufs/ufs.h
2096 index 5bb2316f60bf..54deeb754db5 100644
2097 --- a/drivers/scsi/ufs/ufs.h
2098 +++ b/drivers/scsi/ufs/ufs.h
2099 @@ -46,6 +46,7 @@
2100 #define QUERY_DESC_HDR_SIZE 2
2101 #define QUERY_OSF_SIZE (GENERAL_UPIU_REQUEST_SIZE - \
2102 (sizeof(struct utp_upiu_header)))
2103 +#define RESPONSE_UPIU_SENSE_DATA_LENGTH 18
2104
2105 #define UPIU_HEADER_DWORD(byte3, byte2, byte1, byte0)\
2106 cpu_to_be32((byte3 << 24) | (byte2 << 16) |\
2107 @@ -410,7 +411,7 @@ struct utp_cmd_rsp {
2108 __be32 residual_transfer_count;
2109 __be32 reserved[4];
2110 __be16 sense_data_len;
2111 - u8 sense_data[18];
2112 + u8 sense_data[RESPONSE_UPIU_SENSE_DATA_LENGTH];
2113 };
2114
2115 /**
2116 diff --git a/drivers/scsi/ufs/ufshcd-pci.c b/drivers/scsi/ufs/ufshcd-pci.c
2117 index d15eaa466c59..52b546fb509b 100644
2118 --- a/drivers/scsi/ufs/ufshcd-pci.c
2119 +++ b/drivers/scsi/ufs/ufshcd-pci.c
2120 @@ -104,6 +104,7 @@ static void ufshcd_pci_remove(struct pci_dev *pdev)
2121 pm_runtime_forbid(&pdev->dev);
2122 pm_runtime_get_noresume(&pdev->dev);
2123 ufshcd_remove(hba);
2124 + ufshcd_dealloc_host(hba);
2125 }
2126
2127 /**
2128 @@ -147,6 +148,7 @@ ufshcd_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
2129 err = ufshcd_init(hba, mmio_base, pdev->irq);
2130 if (err) {
2131 dev_err(&pdev->dev, "Initialization failed\n");
2132 + ufshcd_dealloc_host(hba);
2133 return err;
2134 }
2135
2136 diff --git a/drivers/scsi/ufs/ufshcd-pltfrm.c b/drivers/scsi/ufs/ufshcd-pltfrm.c
2137 index db53f38da864..a72a4ba78125 100644
2138 --- a/drivers/scsi/ufs/ufshcd-pltfrm.c
2139 +++ b/drivers/scsi/ufs/ufshcd-pltfrm.c
2140 @@ -163,7 +163,7 @@ static int ufshcd_populate_vreg(struct device *dev, const char *name,
2141 if (ret) {
2142 dev_err(dev, "%s: unable to find %s err %d\n",
2143 __func__, prop_name, ret);
2144 - goto out_free;
2145 + goto out;
2146 }
2147
2148 vreg->min_uA = 0;
2149 @@ -185,9 +185,6 @@ static int ufshcd_populate_vreg(struct device *dev, const char *name,
2150
2151 goto out;
2152
2153 -out_free:
2154 - devm_kfree(dev, vreg);
2155 - vreg = NULL;
2156 out:
2157 if (!ret)
2158 *out_vreg = vreg;
2159 diff --git a/drivers/scsi/ufs/ufshcd.c b/drivers/scsi/ufs/ufshcd.c
2160 index f857086ce2fa..5cfd56f08ffb 100644
2161 --- a/drivers/scsi/ufs/ufshcd.c
2162 +++ b/drivers/scsi/ufs/ufshcd.c
2163 @@ -672,6 +672,21 @@ int ufshcd_hold(struct ufs_hba *hba, bool async)
2164 start:
2165 switch (hba->clk_gating.state) {
2166 case CLKS_ON:
2167 + /*
2168 + * Wait for the ungate work to complete if in progress.
2169 + * Though the clocks may be in ON state, the link could
2170 + * still be in hibner8 state if hibern8 is allowed
2171 + * during clock gating.
2172 + * Make sure we exit hibern8 state also in addition to
2173 + * clocks being ON.
2174 + */
2175 + if (ufshcd_can_hibern8_during_gating(hba) &&
2176 + ufshcd_is_link_hibern8(hba)) {
2177 + spin_unlock_irqrestore(hba->host->host_lock, flags);
2178 + flush_work(&hba->clk_gating.ungate_work);
2179 + spin_lock_irqsave(hba->host->host_lock, flags);
2180 + goto start;
2181 + }
2182 break;
2183 case REQ_CLKS_OFF:
2184 if (cancel_delayed_work(&hba->clk_gating.gate_work)) {
2185 @@ -901,10 +916,14 @@ static inline void ufshcd_copy_sense_data(struct ufshcd_lrb *lrbp)
2186 int len;
2187 if (lrbp->sense_buffer &&
2188 ufshcd_get_rsp_upiu_data_seg_len(lrbp->ucd_rsp_ptr)) {
2189 + int len_to_copy;
2190 +
2191 len = be16_to_cpu(lrbp->ucd_rsp_ptr->sr.sense_data_len);
2192 + len_to_copy = min_t(int, RESPONSE_UPIU_SENSE_DATA_LENGTH, len);
2193 +
2194 memcpy(lrbp->sense_buffer,
2195 lrbp->ucd_rsp_ptr->sr.sense_data,
2196 - min_t(int, len, SCSI_SENSE_BUFFERSIZE));
2197 + min_t(int, len_to_copy, SCSI_SENSE_BUFFERSIZE));
2198 }
2199 }
2200
2201 @@ -6373,7 +6392,10 @@ EXPORT_SYMBOL(ufshcd_system_suspend);
2202
2203 int ufshcd_system_resume(struct ufs_hba *hba)
2204 {
2205 - if (!hba || !hba->is_powered || pm_runtime_suspended(hba->dev))
2206 + if (!hba)
2207 + return -EINVAL;
2208 +
2209 + if (!hba->is_powered || pm_runtime_suspended(hba->dev))
2210 /*
2211 * Let the runtime resume take care of resuming
2212 * if runtime suspended.
2213 @@ -6394,7 +6416,10 @@ EXPORT_SYMBOL(ufshcd_system_resume);
2214 */
2215 int ufshcd_runtime_suspend(struct ufs_hba *hba)
2216 {
2217 - if (!hba || !hba->is_powered)
2218 + if (!hba)
2219 + return -EINVAL;
2220 +
2221 + if (!hba->is_powered)
2222 return 0;
2223
2224 return ufshcd_suspend(hba, UFS_RUNTIME_PM);
2225 @@ -6424,10 +6449,13 @@ EXPORT_SYMBOL(ufshcd_runtime_suspend);
2226 */
2227 int ufshcd_runtime_resume(struct ufs_hba *hba)
2228 {
2229 - if (!hba || !hba->is_powered)
2230 + if (!hba)
2231 + return -EINVAL;
2232 +
2233 + if (!hba->is_powered)
2234 return 0;
2235 - else
2236 - return ufshcd_resume(hba, UFS_RUNTIME_PM);
2237 +
2238 + return ufshcd_resume(hba, UFS_RUNTIME_PM);
2239 }
2240 EXPORT_SYMBOL(ufshcd_runtime_resume);
2241
2242 @@ -6479,8 +6507,6 @@ void ufshcd_remove(struct ufs_hba *hba)
2243 ufshcd_disable_intr(hba, hba->intr_mask);
2244 ufshcd_hba_stop(hba, true);
2245
2246 - scsi_host_put(hba->host);
2247 -
2248 ufshcd_exit_clk_gating(hba);
2249 if (ufshcd_is_clkscaling_enabled(hba))
2250 devfreq_remove_device(hba->devfreq);
2251 @@ -6605,15 +6631,47 @@ static int ufshcd_devfreq_target(struct device *dev,
2252 {
2253 int err = 0;
2254 struct ufs_hba *hba = dev_get_drvdata(dev);
2255 + bool release_clk_hold = false;
2256 + unsigned long irq_flags;
2257
2258 if (!ufshcd_is_clkscaling_enabled(hba))
2259 return -EINVAL;
2260
2261 + spin_lock_irqsave(hba->host->host_lock, irq_flags);
2262 + if (ufshcd_eh_in_progress(hba)) {
2263 + spin_unlock_irqrestore(hba->host->host_lock, irq_flags);
2264 + return 0;
2265 + }
2266 +
2267 + if (ufshcd_is_clkgating_allowed(hba) &&
2268 + (hba->clk_gating.state != CLKS_ON)) {
2269 + if (cancel_delayed_work(&hba->clk_gating.gate_work)) {
2270 + /* hold the vote until the scaling work is completed */
2271 + hba->clk_gating.active_reqs++;
2272 + release_clk_hold = true;
2273 + hba->clk_gating.state = CLKS_ON;
2274 + } else {
2275 + /*
2276 + * Clock gating work seems to be running in parallel
2277 + * hence skip scaling work to avoid deadlock between
2278 + * current scaling work and gating work.
2279 + */
2280 + spin_unlock_irqrestore(hba->host->host_lock, irq_flags);
2281 + return 0;
2282 + }
2283 + }
2284 + spin_unlock_irqrestore(hba->host->host_lock, irq_flags);
2285 +
2286 if (*freq == UINT_MAX)
2287 err = ufshcd_scale_clks(hba, true);
2288 else if (*freq == 0)
2289 err = ufshcd_scale_clks(hba, false);
2290
2291 + spin_lock_irqsave(hba->host->host_lock, irq_flags);
2292 + if (release_clk_hold)
2293 + __ufshcd_release(hba);
2294 + spin_unlock_irqrestore(hba->host->host_lock, irq_flags);
2295 +
2296 return err;
2297 }
2298
2299 @@ -6816,7 +6874,6 @@ exit_gating:
2300 ufshcd_exit_clk_gating(hba);
2301 out_disable:
2302 hba->is_irq_enabled = false;
2303 - scsi_host_put(host);
2304 ufshcd_hba_exit(hba);
2305 out_error:
2306 return err;
2307 diff --git a/drivers/tty/n_tty.c b/drivers/tty/n_tty.c
2308 index 0475f9685a41..904fc9c37fde 100644
2309 --- a/drivers/tty/n_tty.c
2310 +++ b/drivers/tty/n_tty.c
2311 @@ -154,17 +154,28 @@ static inline unsigned char *echo_buf_addr(struct n_tty_data *ldata, size_t i)
2312 return &ldata->echo_buf[i & (N_TTY_BUF_SIZE - 1)];
2313 }
2314
2315 +/* If we are not echoing the data, perhaps this is a secret so erase it */
2316 +static void zero_buffer(struct tty_struct *tty, u8 *buffer, int size)
2317 +{
2318 + bool icanon = !!L_ICANON(tty);
2319 + bool no_echo = !L_ECHO(tty);
2320 +
2321 + if (icanon && no_echo)
2322 + memset(buffer, 0x00, size);
2323 +}
2324 +
2325 static int tty_copy_to_user(struct tty_struct *tty, void __user *to,
2326 size_t tail, size_t n)
2327 {
2328 struct n_tty_data *ldata = tty->disc_data;
2329 size_t size = N_TTY_BUF_SIZE - tail;
2330 - const void *from = read_buf_addr(ldata, tail);
2331 + void *from = read_buf_addr(ldata, tail);
2332 int uncopied;
2333
2334 if (n > size) {
2335 tty_audit_add_data(tty, from, size);
2336 uncopied = copy_to_user(to, from, size);
2337 + zero_buffer(tty, from, size - uncopied);
2338 if (uncopied)
2339 return uncopied;
2340 to += size;
2341 @@ -173,7 +184,9 @@ static int tty_copy_to_user(struct tty_struct *tty, void __user *to,
2342 }
2343
2344 tty_audit_add_data(tty, from, n);
2345 - return copy_to_user(to, from, n);
2346 + uncopied = copy_to_user(to, from, n);
2347 + zero_buffer(tty, from, n - uncopied);
2348 + return uncopied;
2349 }
2350
2351 /**
2352 @@ -1962,11 +1975,12 @@ static int copy_from_read_buf(struct tty_struct *tty,
2353 n = min(head - ldata->read_tail, N_TTY_BUF_SIZE - tail);
2354 n = min(*nr, n);
2355 if (n) {
2356 - const unsigned char *from = read_buf_addr(ldata, tail);
2357 + unsigned char *from = read_buf_addr(ldata, tail);
2358 retval = copy_to_user(*b, from, n);
2359 n -= retval;
2360 is_eof = n == 1 && *from == EOF_CHAR(tty);
2361 tty_audit_add_data(tty, from, n);
2362 + zero_buffer(tty, from, n);
2363 smp_store_release(&ldata->read_tail, ldata->read_tail + n);
2364 /* Turn single EOF into zero-length read */
2365 if (L_EXTPROC(tty) && ldata->icanon && is_eof &&
2366 diff --git a/drivers/tty/tty_buffer.c b/drivers/tty/tty_buffer.c
2367 index e99f1c5b1df6..41b9a7ccce08 100644
2368 --- a/drivers/tty/tty_buffer.c
2369 +++ b/drivers/tty/tty_buffer.c
2370 @@ -458,6 +458,8 @@ int tty_ldisc_receive_buf(struct tty_ldisc *ld, unsigned char *p,
2371 if (count && ld->ops->receive_buf)
2372 ld->ops->receive_buf(ld->tty, p, f, count);
2373 }
2374 + if (count > 0)
2375 + memset(p, 0, count);
2376 return count;
2377 }
2378 EXPORT_SYMBOL_GPL(tty_ldisc_receive_buf);
2379 diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c
2380 index 7aee55244b4a..851f5a553de2 100644
2381 --- a/drivers/usb/core/hub.c
2382 +++ b/drivers/usb/core/hub.c
2383 @@ -2809,7 +2809,9 @@ static int hub_port_reset(struct usb_hub *hub, int port1,
2384 USB_PORT_FEAT_C_BH_PORT_RESET);
2385 usb_clear_port_feature(hub->hdev, port1,
2386 USB_PORT_FEAT_C_PORT_LINK_STATE);
2387 - usb_clear_port_feature(hub->hdev, port1,
2388 +
2389 + if (udev)
2390 + usb_clear_port_feature(hub->hdev, port1,
2391 USB_PORT_FEAT_C_CONNECTION);
2392
2393 /*
2394 diff --git a/drivers/usb/dwc3/core.c b/drivers/usb/dwc3/core.c
2395 index 53b26e978d90..1e91b803ee4e 100644
2396 --- a/drivers/usb/dwc3/core.c
2397 +++ b/drivers/usb/dwc3/core.c
2398 @@ -1145,6 +1145,7 @@ static int dwc3_probe(struct platform_device *pdev)
2399
2400 err5:
2401 dwc3_event_buffers_cleanup(dwc);
2402 + dwc3_ulpi_exit(dwc);
2403
2404 err4:
2405 dwc3_free_scratch_buffers(dwc);
2406 diff --git a/drivers/usb/host/xhci-hub.c b/drivers/usb/host/xhci-hub.c
2407 index 0f09ab5399f4..00d10660ff14 100644
2408 --- a/drivers/usb/host/xhci-hub.c
2409 +++ b/drivers/usb/host/xhci-hub.c
2410 @@ -768,7 +768,7 @@ static u32 xhci_get_port_status(struct usb_hcd *hcd,
2411 status |= USB_PORT_STAT_SUSPEND;
2412 }
2413 if ((raw_port_status & PORT_PLS_MASK) == XDEV_RESUME &&
2414 - !DEV_SUPERSPEED_ANY(raw_port_status)) {
2415 + !DEV_SUPERSPEED_ANY(raw_port_status) && hcd->speed < HCD_USB3) {
2416 if ((raw_port_status & PORT_RESET) ||
2417 !(raw_port_status & PORT_PE))
2418 return 0xffffffff;
2419 @@ -814,7 +814,7 @@ static u32 xhci_get_port_status(struct usb_hcd *hcd,
2420 time_left = wait_for_completion_timeout(
2421 &bus_state->rexit_done[wIndex],
2422 msecs_to_jiffies(
2423 - XHCI_MAX_REXIT_TIMEOUT));
2424 + XHCI_MAX_REXIT_TIMEOUT_MS));
2425 spin_lock_irqsave(&xhci->lock, flags);
2426
2427 if (time_left) {
2428 @@ -828,7 +828,7 @@ static u32 xhci_get_port_status(struct usb_hcd *hcd,
2429 } else {
2430 int port_status = readl(port_array[wIndex]);
2431 xhci_warn(xhci, "Port resume took longer than %i msec, port status = 0x%x\n",
2432 - XHCI_MAX_REXIT_TIMEOUT,
2433 + XHCI_MAX_REXIT_TIMEOUT_MS,
2434 port_status);
2435 status |= USB_PORT_STAT_SUSPEND;
2436 clear_bit(wIndex, &bus_state->rexit_ports);
2437 @@ -1322,13 +1322,16 @@ int xhci_bus_suspend(struct usb_hcd *hcd)
2438 __le32 __iomem **port_array;
2439 struct xhci_bus_state *bus_state;
2440 unsigned long flags;
2441 + u32 portsc_buf[USB_MAXCHILDREN];
2442 + bool wake_enabled;
2443
2444 max_ports = xhci_get_ports(hcd, &port_array);
2445 bus_state = &xhci->bus_state[hcd_index(hcd)];
2446 + wake_enabled = hcd->self.root_hub->do_remote_wakeup;
2447
2448 spin_lock_irqsave(&xhci->lock, flags);
2449
2450 - if (hcd->self.root_hub->do_remote_wakeup) {
2451 + if (wake_enabled) {
2452 if (bus_state->resuming_ports || /* USB2 */
2453 bus_state->port_remote_wakeup) { /* USB3 */
2454 spin_unlock_irqrestore(&xhci->lock, flags);
2455 @@ -1336,26 +1339,36 @@ int xhci_bus_suspend(struct usb_hcd *hcd)
2456 return -EBUSY;
2457 }
2458 }
2459 -
2460 - port_index = max_ports;
2461 + /*
2462 + * Prepare ports for suspend, but don't write anything before all ports
2463 + * are checked and we know bus suspend can proceed
2464 + */
2465 bus_state->bus_suspended = 0;
2466 + port_index = max_ports;
2467 while (port_index--) {
2468 - /* suspend the port if the port is not suspended */
2469 u32 t1, t2;
2470 - int slot_id;
2471
2472 t1 = readl(port_array[port_index]);
2473 t2 = xhci_port_state_to_neutral(t1);
2474 + portsc_buf[port_index] = 0;
2475
2476 - if ((t1 & PORT_PE) && !(t1 & PORT_PLS_MASK)) {
2477 - xhci_dbg(xhci, "port %d not suspended\n", port_index);
2478 - slot_id = xhci_find_slot_id_by_port(hcd, xhci,
2479 - port_index + 1);
2480 - if (slot_id) {
2481 + /* Bail out if a USB3 port has a new device in link training */
2482 + if ((t1 & PORT_PLS_MASK) == XDEV_POLLING) {
2483 + bus_state->bus_suspended = 0;
2484 + spin_unlock_irqrestore(&xhci->lock, flags);
2485 + xhci_dbg(xhci, "Bus suspend bailout, port in polling\n");
2486 + return -EBUSY;
2487 + }
2488 +
2489 + /* suspend ports in U0, or bail out for new connect changes */
2490 + if ((t1 & PORT_PE) && (t1 & PORT_PLS_MASK) == XDEV_U0) {
2491 + if ((t1 & PORT_CSC) && wake_enabled) {
2492 + bus_state->bus_suspended = 0;
2493 spin_unlock_irqrestore(&xhci->lock, flags);
2494 - xhci_stop_device(xhci, slot_id, 1);
2495 - spin_lock_irqsave(&xhci->lock, flags);
2496 + xhci_dbg(xhci, "Bus suspend bailout, port connect change\n");
2497 + return -EBUSY;
2498 }
2499 + xhci_dbg(xhci, "port %d not suspended\n", port_index);
2500 t2 &= ~PORT_PLS_MASK;
2501 t2 |= PORT_LINK_STROBE | XDEV_U3;
2502 set_bit(port_index, &bus_state->bus_suspended);
2503 @@ -1364,7 +1377,7 @@ int xhci_bus_suspend(struct usb_hcd *hcd)
2504 * including the USB 3.0 roothub, but only if CONFIG_PM
2505 * is enabled, so also enable remote wake here.
2506 */
2507 - if (hcd->self.root_hub->do_remote_wakeup) {
2508 + if (wake_enabled) {
2509 if (t1 & PORT_CONNECT) {
2510 t2 |= PORT_WKOC_E | PORT_WKDISC_E;
2511 t2 &= ~PORT_WKCONN_E;
2512 @@ -1377,7 +1390,26 @@ int xhci_bus_suspend(struct usb_hcd *hcd)
2513
2514 t1 = xhci_port_state_to_neutral(t1);
2515 if (t1 != t2)
2516 - writel(t2, port_array[port_index]);
2517 + portsc_buf[port_index] = t2;
2518 + }
2519 +
2520 + /* write port settings, stopping and suspending ports if needed */
2521 + port_index = max_ports;
2522 + while (port_index--) {
2523 + if (!portsc_buf[port_index])
2524 + continue;
2525 + if (test_bit(port_index, &bus_state->bus_suspended)) {
2526 + int slot_id;
2527 +
2528 + slot_id = xhci_find_slot_id_by_port(hcd, xhci,
2529 + port_index + 1);
2530 + if (slot_id) {
2531 + spin_unlock_irqrestore(&xhci->lock, flags);
2532 + xhci_stop_device(xhci, slot_id, 1);
2533 + spin_lock_irqsave(&xhci->lock, flags);
2534 + }
2535 + }
2536 + writel(portsc_buf[port_index], port_array[port_index]);
2537 }
2538 hcd->state = HC_STATE_SUSPENDED;
2539 bus_state->next_statechange = jiffies + msecs_to_jiffies(10);
2540 diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c
2541 index 89a14d5f6ad8..f4e34a75d413 100644
2542 --- a/drivers/usb/host/xhci-ring.c
2543 +++ b/drivers/usb/host/xhci-ring.c
2544 @@ -1676,7 +1676,7 @@ static void handle_port_status(struct xhci_hcd *xhci,
2545 * RExit to a disconnect state). If so, let the the driver know it's
2546 * out of the RExit state.
2547 */
2548 - if (!DEV_SUPERSPEED_ANY(temp) &&
2549 + if (!DEV_SUPERSPEED_ANY(temp) && hcd->speed < HCD_USB3 &&
2550 test_and_clear_bit(faked_port_index,
2551 &bus_state->rexit_ports)) {
2552 complete(&bus_state->rexit_done[faked_port_index]);
2553 diff --git a/drivers/usb/host/xhci.h b/drivers/usb/host/xhci.h
2554 index b9181281aa9e..e679fec9ce3a 100644
2555 --- a/drivers/usb/host/xhci.h
2556 +++ b/drivers/usb/host/xhci.h
2557 @@ -1509,7 +1509,7 @@ struct xhci_bus_state {
2558 * It can take up to 20 ms to transition from RExit to U0 on the
2559 * Intel Lynx Point LP xHCI host.
2560 */
2561 -#define XHCI_MAX_REXIT_TIMEOUT (20 * 1000)
2562 +#define XHCI_MAX_REXIT_TIMEOUT_MS 20
2563
2564 static inline unsigned int hcd_index(struct usb_hcd *hcd)
2565 {
2566 diff --git a/fs/9p/vfs_dir.c b/fs/9p/vfs_dir.c
2567 index b0405d6aac85..48db9a9f13f9 100644
2568 --- a/fs/9p/vfs_dir.c
2569 +++ b/fs/9p/vfs_dir.c
2570 @@ -76,15 +76,6 @@ static inline int dt_type(struct p9_wstat *mistat)
2571 return rettype;
2572 }
2573
2574 -static void p9stat_init(struct p9_wstat *stbuf)
2575 -{
2576 - stbuf->name = NULL;
2577 - stbuf->uid = NULL;
2578 - stbuf->gid = NULL;
2579 - stbuf->muid = NULL;
2580 - stbuf->extension = NULL;
2581 -}
2582 -
2583 /**
2584 * v9fs_alloc_rdir_buf - Allocate buffer used for read and readdir
2585 * @filp: opened file structure
2586 @@ -145,12 +136,10 @@ static int v9fs_dir_readdir(struct file *file, struct dir_context *ctx)
2587 rdir->tail = n;
2588 }
2589 while (rdir->head < rdir->tail) {
2590 - p9stat_init(&st);
2591 err = p9stat_read(fid->clnt, rdir->buf + rdir->head,
2592 rdir->tail - rdir->head, &st);
2593 if (err) {
2594 p9_debug(P9_DEBUG_VFS, "returned %d\n", err);
2595 - p9stat_free(&st);
2596 return -EIO;
2597 }
2598 reclen = st.size+2;
2599 diff --git a/fs/bfs/inode.c b/fs/bfs/inode.c
2600 index 1e5c896f6b79..0acb83efedea 100644
2601 --- a/fs/bfs/inode.c
2602 +++ b/fs/bfs/inode.c
2603 @@ -350,7 +350,8 @@ static int bfs_fill_super(struct super_block *s, void *data, int silent)
2604
2605 s->s_magic = BFS_MAGIC;
2606
2607 - if (le32_to_cpu(bfs_sb->s_start) > le32_to_cpu(bfs_sb->s_end)) {
2608 + if (le32_to_cpu(bfs_sb->s_start) > le32_to_cpu(bfs_sb->s_end) ||
2609 + le32_to_cpu(bfs_sb->s_start) < BFS_BSIZE) {
2610 printf("Superblock is corrupted\n");
2611 goto out1;
2612 }
2613 @@ -359,9 +360,11 @@ static int bfs_fill_super(struct super_block *s, void *data, int silent)
2614 sizeof(struct bfs_inode)
2615 + BFS_ROOT_INO - 1;
2616 imap_len = (info->si_lasti / 8) + 1;
2617 - info->si_imap = kzalloc(imap_len, GFP_KERNEL);
2618 - if (!info->si_imap)
2619 + info->si_imap = kzalloc(imap_len, GFP_KERNEL | __GFP_NOWARN);
2620 + if (!info->si_imap) {
2621 + printf("Cannot allocate %u bytes\n", imap_len);
2622 goto out1;
2623 + }
2624 for (i = 0; i < BFS_ROOT_INO; i++)
2625 set_bit(i, info->si_imap);
2626
2627 diff --git a/fs/gfs2/ops_fstype.c b/fs/gfs2/ops_fstype.c
2628 index 6d7f66816319..84e5ac061b17 100644
2629 --- a/fs/gfs2/ops_fstype.c
2630 +++ b/fs/gfs2/ops_fstype.c
2631 @@ -71,13 +71,13 @@ static struct gfs2_sbd *init_sbd(struct super_block *sb)
2632 if (!sdp)
2633 return NULL;
2634
2635 - sb->s_fs_info = sdp;
2636 sdp->sd_vfs = sb;
2637 sdp->sd_lkstats = alloc_percpu(struct gfs2_pcpu_lkstats);
2638 if (!sdp->sd_lkstats) {
2639 kfree(sdp);
2640 return NULL;
2641 }
2642 + sb->s_fs_info = sdp;
2643
2644 set_bit(SDF_NOJOURNALID, &sdp->sd_flags);
2645 gfs2_tune_init(&sdp->sd_tune);
2646 diff --git a/fs/namei.c b/fs/namei.c
2647 index 85ac38b99065..eb4626bad88a 100644
2648 --- a/fs/namei.c
2649 +++ b/fs/namei.c
2650 @@ -892,6 +892,8 @@ static inline void put_link(struct nameidata *nd)
2651
2652 int sysctl_protected_symlinks __read_mostly = 0;
2653 int sysctl_protected_hardlinks __read_mostly = 0;
2654 +int sysctl_protected_fifos __read_mostly;
2655 +int sysctl_protected_regular __read_mostly;
2656
2657 /**
2658 * may_follow_link - Check symlink following for unsafe situations
2659 @@ -1005,6 +1007,45 @@ static int may_linkat(struct path *link)
2660 return -EPERM;
2661 }
2662
2663 +/**
2664 + * may_create_in_sticky - Check whether an O_CREAT open in a sticky directory
2665 + * should be allowed, or not, on files that already
2666 + * exist.
2667 + * @dir: the sticky parent directory
2668 + * @inode: the inode of the file to open
2669 + *
2670 + * Block an O_CREAT open of a FIFO (or a regular file) when:
2671 + * - sysctl_protected_fifos (or sysctl_protected_regular) is enabled
2672 + * - the file already exists
2673 + * - we are in a sticky directory
2674 + * - we don't own the file
2675 + * - the owner of the directory doesn't own the file
2676 + * - the directory is world writable
2677 + * If the sysctl_protected_fifos (or sysctl_protected_regular) is set to 2
2678 + * the directory doesn't have to be world writable: being group writable will
2679 + * be enough.
2680 + *
2681 + * Returns 0 if the open is allowed, -ve on error.
2682 + */
2683 +static int may_create_in_sticky(struct dentry * const dir,
2684 + struct inode * const inode)
2685 +{
2686 + if ((!sysctl_protected_fifos && S_ISFIFO(inode->i_mode)) ||
2687 + (!sysctl_protected_regular && S_ISREG(inode->i_mode)) ||
2688 + likely(!(dir->d_inode->i_mode & S_ISVTX)) ||
2689 + uid_eq(inode->i_uid, dir->d_inode->i_uid) ||
2690 + uid_eq(current_fsuid(), inode->i_uid))
2691 + return 0;
2692 +
2693 + if (likely(dir->d_inode->i_mode & 0002) ||
2694 + (dir->d_inode->i_mode & 0020 &&
2695 + ((sysctl_protected_fifos >= 2 && S_ISFIFO(inode->i_mode)) ||
2696 + (sysctl_protected_regular >= 2 && S_ISREG(inode->i_mode))))) {
2697 + return -EACCES;
2698 + }
2699 + return 0;
2700 +}
2701 +
2702 static __always_inline
2703 const char *get_link(struct nameidata *nd)
2704 {
2705 @@ -3356,9 +3397,15 @@ finish_open:
2706 if (error)
2707 return error;
2708 audit_inode(nd->name, nd->path.dentry, 0);
2709 - error = -EISDIR;
2710 - if ((open_flag & O_CREAT) && d_is_dir(nd->path.dentry))
2711 - goto out;
2712 + if (open_flag & O_CREAT) {
2713 + error = -EISDIR;
2714 + if (d_is_dir(nd->path.dentry))
2715 + goto out;
2716 + error = may_create_in_sticky(dir,
2717 + d_backing_inode(nd->path.dentry));
2718 + if (unlikely(error))
2719 + goto out;
2720 + }
2721 error = -ENOTDIR;
2722 if ((nd->flags & LOOKUP_DIRECTORY) && !d_can_lookup(nd->path.dentry))
2723 goto out;
2724 diff --git a/include/linux/can/dev.h b/include/linux/can/dev.h
2725 index 5f5270941ba0..f7178f44825b 100644
2726 --- a/include/linux/can/dev.h
2727 +++ b/include/linux/can/dev.h
2728 @@ -154,6 +154,7 @@ void can_change_state(struct net_device *dev, struct can_frame *cf,
2729
2730 void can_put_echo_skb(struct sk_buff *skb, struct net_device *dev,
2731 unsigned int idx);
2732 +struct sk_buff *__can_get_echo_skb(struct net_device *dev, unsigned int idx, u8 *len_ptr);
2733 unsigned int can_get_echo_skb(struct net_device *dev, unsigned int idx);
2734 void can_free_echo_skb(struct net_device *dev, unsigned int idx);
2735
2736 diff --git a/include/linux/fs.h b/include/linux/fs.h
2737 index e9867aff53d8..bcad2b963296 100644
2738 --- a/include/linux/fs.h
2739 +++ b/include/linux/fs.h
2740 @@ -69,6 +69,8 @@ extern struct inodes_stat_t inodes_stat;
2741 extern int leases_enable, lease_break_time;
2742 extern int sysctl_protected_symlinks;
2743 extern int sysctl_protected_hardlinks;
2744 +extern int sysctl_protected_fifos;
2745 +extern int sysctl_protected_regular;
2746
2747 struct buffer_head;
2748 typedef int (get_block_t)(struct inode *inode, sector_t iblock,
2749 diff --git a/include/linux/integrity.h b/include/linux/integrity.h
2750 index c2d6082a1a4c..858d3f4a2241 100644
2751 --- a/include/linux/integrity.h
2752 +++ b/include/linux/integrity.h
2753 @@ -14,6 +14,7 @@
2754
2755 enum integrity_status {
2756 INTEGRITY_PASS = 0,
2757 + INTEGRITY_PASS_IMMUTABLE,
2758 INTEGRITY_FAIL,
2759 INTEGRITY_NOLABEL,
2760 INTEGRITY_NOXATTRS,
2761 diff --git a/include/linux/of.h b/include/linux/of.h
2762 index 299aeb192727..a19cc85b9373 100644
2763 --- a/include/linux/of.h
2764 +++ b/include/linux/of.h
2765 @@ -275,6 +275,8 @@ extern struct device_node *of_get_next_child(const struct device_node *node,
2766 extern struct device_node *of_get_next_available_child(
2767 const struct device_node *node, struct device_node *prev);
2768
2769 +extern struct device_node *of_get_compatible_child(const struct device_node *parent,
2770 + const char *compatible);
2771 extern struct device_node *of_get_child_by_name(const struct device_node *node,
2772 const char *name);
2773
2774 @@ -606,6 +608,12 @@ static inline bool of_have_populated_dt(void)
2775 return false;
2776 }
2777
2778 +static inline struct device_node *of_get_compatible_child(const struct device_node *parent,
2779 + const char *compatible)
2780 +{
2781 + return NULL;
2782 +}
2783 +
2784 static inline struct device_node *of_get_child_by_name(
2785 const struct device_node *node,
2786 const char *name)
2787 diff --git a/include/linux/pfn_t.h b/include/linux/pfn_t.h
2788 index a3d90b9da18d..407874535fd3 100644
2789 --- a/include/linux/pfn_t.h
2790 +++ b/include/linux/pfn_t.h
2791 @@ -9,7 +9,7 @@
2792 * PFN_DEV - pfn is not covered by system memmap by default
2793 * PFN_MAP - pfn has a dynamic page mapping established by a device driver
2794 */
2795 -#define PFN_FLAGS_MASK (((u64) ~PAGE_MASK) << (BITS_PER_LONG_LONG - PAGE_SHIFT))
2796 +#define PFN_FLAGS_MASK (((u64) (~PAGE_MASK)) << (BITS_PER_LONG_LONG - PAGE_SHIFT))
2797 #define PFN_SG_CHAIN (1ULL << (BITS_PER_LONG_LONG - 1))
2798 #define PFN_SG_LAST (1ULL << (BITS_PER_LONG_LONG - 2))
2799 #define PFN_DEV (1ULL << (BITS_PER_LONG_LONG - 3))
2800 diff --git a/kernel/debug/kdb/kdb_io.c b/kernel/debug/kdb/kdb_io.c
2801 index 77777d918676..cc892a9e109d 100644
2802 --- a/kernel/debug/kdb/kdb_io.c
2803 +++ b/kernel/debug/kdb/kdb_io.c
2804 @@ -215,7 +215,7 @@ static char *kdb_read(char *buffer, size_t bufsize)
2805 int count;
2806 int i;
2807 int diag, dtab_count;
2808 - int key;
2809 + int key, buf_size, ret;
2810
2811
2812 diag = kdbgetintenv("DTABCOUNT", &dtab_count);
2813 @@ -335,9 +335,8 @@ poll_again:
2814 else
2815 p_tmp = tmpbuffer;
2816 len = strlen(p_tmp);
2817 - count = kallsyms_symbol_complete(p_tmp,
2818 - sizeof(tmpbuffer) -
2819 - (p_tmp - tmpbuffer));
2820 + buf_size = sizeof(tmpbuffer) - (p_tmp - tmpbuffer);
2821 + count = kallsyms_symbol_complete(p_tmp, buf_size);
2822 if (tab == 2 && count > 0) {
2823 kdb_printf("\n%d symbols are found.", count);
2824 if (count > dtab_count) {
2825 @@ -349,9 +348,13 @@ poll_again:
2826 }
2827 kdb_printf("\n");
2828 for (i = 0; i < count; i++) {
2829 - if (WARN_ON(!kallsyms_symbol_next(p_tmp, i)))
2830 + ret = kallsyms_symbol_next(p_tmp, i, buf_size);
2831 + if (WARN_ON(!ret))
2832 break;
2833 - kdb_printf("%s ", p_tmp);
2834 + if (ret != -E2BIG)
2835 + kdb_printf("%s ", p_tmp);
2836 + else
2837 + kdb_printf("%s... ", p_tmp);
2838 *(p_tmp + len) = '\0';
2839 }
2840 if (i >= dtab_count)
2841 diff --git a/kernel/debug/kdb/kdb_private.h b/kernel/debug/kdb/kdb_private.h
2842 index 75014d7f4568..533e04e75a9c 100644
2843 --- a/kernel/debug/kdb/kdb_private.h
2844 +++ b/kernel/debug/kdb/kdb_private.h
2845 @@ -83,7 +83,7 @@ typedef struct __ksymtab {
2846 unsigned long sym_start;
2847 unsigned long sym_end;
2848 } kdb_symtab_t;
2849 -extern int kallsyms_symbol_next(char *prefix_name, int flag);
2850 +extern int kallsyms_symbol_next(char *prefix_name, int flag, int buf_size);
2851 extern int kallsyms_symbol_complete(char *prefix_name, int max_len);
2852
2853 /* Exported Symbols for kernel loadable modules to use. */
2854 diff --git a/kernel/debug/kdb/kdb_support.c b/kernel/debug/kdb/kdb_support.c
2855 index d35cc2d3a4cc..2aed4a33521b 100644
2856 --- a/kernel/debug/kdb/kdb_support.c
2857 +++ b/kernel/debug/kdb/kdb_support.c
2858 @@ -221,11 +221,13 @@ int kallsyms_symbol_complete(char *prefix_name, int max_len)
2859 * Parameters:
2860 * prefix_name prefix of a symbol name to lookup
2861 * flag 0 means search from the head, 1 means continue search.
2862 + * buf_size maximum length that can be written to prefix_name
2863 + * buffer
2864 * Returns:
2865 * 1 if a symbol matches the given prefix.
2866 * 0 if no string found
2867 */
2868 -int kallsyms_symbol_next(char *prefix_name, int flag)
2869 +int kallsyms_symbol_next(char *prefix_name, int flag, int buf_size)
2870 {
2871 int prefix_len = strlen(prefix_name);
2872 static loff_t pos;
2873 @@ -235,10 +237,8 @@ int kallsyms_symbol_next(char *prefix_name, int flag)
2874 pos = 0;
2875
2876 while ((name = kdb_walk_kallsyms(&pos))) {
2877 - if (strncmp(name, prefix_name, prefix_len) == 0) {
2878 - strncpy(prefix_name, name, strlen(name)+1);
2879 - return 1;
2880 - }
2881 + if (!strncmp(name, prefix_name, prefix_len))
2882 + return strscpy(prefix_name, name, buf_size);
2883 }
2884 return 0;
2885 }
2886 diff --git a/kernel/sched/core.c b/kernel/sched/core.c
2887 index 917be221438b..6b3fff6a6437 100644
2888 --- a/kernel/sched/core.c
2889 +++ b/kernel/sched/core.c
2890 @@ -4087,8 +4087,8 @@ static int __sched_setscheduler(struct task_struct *p,
2891 int queue_flags = DEQUEUE_SAVE | DEQUEUE_MOVE;
2892 struct rq *rq;
2893
2894 - /* may grab non-irq protected spin_locks */
2895 - BUG_ON(in_interrupt());
2896 + /* The pi code expects interrupts enabled */
2897 + BUG_ON(pi && in_interrupt());
2898 recheck:
2899 /* double check policy once rq lock held */
2900 if (policy < 0) {
2901 diff --git a/kernel/sysctl.c b/kernel/sysctl.c
2902 index 7df6be31be36..23f658d311c0 100644
2903 --- a/kernel/sysctl.c
2904 +++ b/kernel/sysctl.c
2905 @@ -1794,6 +1794,24 @@ static struct ctl_table fs_table[] = {
2906 .extra1 = &zero,
2907 .extra2 = &one,
2908 },
2909 + {
2910 + .procname = "protected_fifos",
2911 + .data = &sysctl_protected_fifos,
2912 + .maxlen = sizeof(int),
2913 + .mode = 0600,
2914 + .proc_handler = proc_dointvec_minmax,
2915 + .extra1 = &zero,
2916 + .extra2 = &two,
2917 + },
2918 + {
2919 + .procname = "protected_regular",
2920 + .data = &sysctl_protected_regular,
2921 + .maxlen = sizeof(int),
2922 + .mode = 0600,
2923 + .proc_handler = proc_dointvec_minmax,
2924 + .extra1 = &zero,
2925 + .extra2 = &two,
2926 + },
2927 {
2928 .procname = "suid_dumpable",
2929 .data = &suid_dumpable,
2930 diff --git a/mm/shmem.c b/mm/shmem.c
2931 index 4b5cca167baf..358a92be43eb 100644
2932 --- a/mm/shmem.c
2933 +++ b/mm/shmem.c
2934 @@ -2414,9 +2414,7 @@ static loff_t shmem_file_llseek(struct file *file, loff_t offset, int whence)
2935 inode_lock(inode);
2936 /* We're holding i_mutex so we can access i_size directly */
2937
2938 - if (offset < 0)
2939 - offset = -EINVAL;
2940 - else if (offset >= inode->i_size)
2941 + if (offset < 0 || offset >= inode->i_size)
2942 offset = -ENXIO;
2943 else {
2944 start = offset >> PAGE_SHIFT;
2945 diff --git a/mm/slab.c b/mm/slab.c
2946 index c59844dbd034..263dcda6897b 100644
2947 --- a/mm/slab.c
2948 +++ b/mm/slab.c
2949 @@ -3690,6 +3690,8 @@ __do_kmalloc_node(size_t size, gfp_t flags, int node, unsigned long caller)
2950 struct kmem_cache *cachep;
2951 void *ret;
2952
2953 + if (unlikely(size > KMALLOC_MAX_CACHE_SIZE))
2954 + return NULL;
2955 cachep = kmalloc_slab(size, flags);
2956 if (unlikely(ZERO_OR_NULL_PTR(cachep)))
2957 return cachep;
2958 @@ -3725,6 +3727,8 @@ static __always_inline void *__do_kmalloc(size_t size, gfp_t flags,
2959 struct kmem_cache *cachep;
2960 void *ret;
2961
2962 + if (unlikely(size > KMALLOC_MAX_CACHE_SIZE))
2963 + return NULL;
2964 cachep = kmalloc_slab(size, flags);
2965 if (unlikely(ZERO_OR_NULL_PTR(cachep)))
2966 return cachep;
2967 diff --git a/mm/slab_common.c b/mm/slab_common.c
2968 index 622f6b6ae844..13f1926f8fcd 100644
2969 --- a/mm/slab_common.c
2970 +++ b/mm/slab_common.c
2971 @@ -883,18 +883,18 @@ struct kmem_cache *kmalloc_slab(size_t size, gfp_t flags)
2972 {
2973 int index;
2974
2975 - if (unlikely(size > KMALLOC_MAX_SIZE)) {
2976 - WARN_ON_ONCE(!(flags & __GFP_NOWARN));
2977 - return NULL;
2978 - }
2979 -
2980 if (size <= 192) {
2981 if (!size)
2982 return ZERO_SIZE_PTR;
2983
2984 index = size_index[size_index_elem(size)];
2985 - } else
2986 + } else {
2987 + if (unlikely(size > KMALLOC_MAX_CACHE_SIZE)) {
2988 + WARN_ON(1);
2989 + return NULL;
2990 + }
2991 index = fls(size - 1);
2992 + }
2993
2994 #ifdef CONFIG_ZONE_DMA
2995 if (unlikely((flags & GFP_DMA)))
2996 diff --git a/net/ieee802154/6lowpan/6lowpan_i.h b/net/ieee802154/6lowpan/6lowpan_i.h
2997 index 3bfec472734a..78916c510d9a 100644
2998 --- a/net/ieee802154/6lowpan/6lowpan_i.h
2999 +++ b/net/ieee802154/6lowpan/6lowpan_i.h
3000 @@ -19,8 +19,8 @@ typedef unsigned __bitwise__ lowpan_rx_result;
3001 struct frag_lowpan_compare_key {
3002 u16 tag;
3003 u16 d_size;
3004 - const struct ieee802154_addr src;
3005 - const struct ieee802154_addr dst;
3006 + struct ieee802154_addr src;
3007 + struct ieee802154_addr dst;
3008 };
3009
3010 /* Equivalent of ipv4 struct ipq
3011 diff --git a/net/ieee802154/6lowpan/reassembly.c b/net/ieee802154/6lowpan/reassembly.c
3012 index 6fca75581e13..aab1e2dfdfca 100644
3013 --- a/net/ieee802154/6lowpan/reassembly.c
3014 +++ b/net/ieee802154/6lowpan/reassembly.c
3015 @@ -74,14 +74,14 @@ fq_find(struct net *net, const struct lowpan_802154_cb *cb,
3016 {
3017 struct netns_ieee802154_lowpan *ieee802154_lowpan =
3018 net_ieee802154_lowpan(net);
3019 - struct frag_lowpan_compare_key key = {
3020 - .tag = cb->d_tag,
3021 - .d_size = cb->d_size,
3022 - .src = *src,
3023 - .dst = *dst,
3024 - };
3025 + struct frag_lowpan_compare_key key = {};
3026 struct inet_frag_queue *q;
3027
3028 + key.tag = cb->d_tag;
3029 + key.d_size = cb->d_size;
3030 + key.src = *src;
3031 + key.dst = *dst;
3032 +
3033 q = inet_frag_find(&ieee802154_lowpan->frags, &key);
3034 if (!q)
3035 return NULL;
3036 @@ -371,7 +371,7 @@ int lowpan_frag_rcv(struct sk_buff *skb, u8 frag_type)
3037 struct lowpan_frag_queue *fq;
3038 struct net *net = dev_net(skb->dev);
3039 struct lowpan_802154_cb *cb = lowpan_802154_cb(skb);
3040 - struct ieee802154_hdr hdr;
3041 + struct ieee802154_hdr hdr = {};
3042 int err;
3043
3044 if (ieee802154_hdr_peek_addrs(skb, &hdr) < 0)
3045 diff --git a/net/llc/af_llc.c b/net/llc/af_llc.c
3046 index 85aae8c84aeb..789e66b0187a 100644
3047 --- a/net/llc/af_llc.c
3048 +++ b/net/llc/af_llc.c
3049 @@ -726,7 +726,6 @@ static int llc_ui_recvmsg(struct socket *sock, struct msghdr *msg, size_t len,
3050 struct sk_buff *skb = NULL;
3051 struct sock *sk = sock->sk;
3052 struct llc_sock *llc = llc_sk(sk);
3053 - unsigned long cpu_flags;
3054 size_t copied = 0;
3055 u32 peek_seq = 0;
3056 u32 *seq, skb_len;
3057 @@ -851,9 +850,8 @@ static int llc_ui_recvmsg(struct socket *sock, struct msghdr *msg, size_t len,
3058 goto copy_uaddr;
3059
3060 if (!(flags & MSG_PEEK)) {
3061 - spin_lock_irqsave(&sk->sk_receive_queue.lock, cpu_flags);
3062 - sk_eat_skb(sk, skb);
3063 - spin_unlock_irqrestore(&sk->sk_receive_queue.lock, cpu_flags);
3064 + skb_unlink(skb, &sk->sk_receive_queue);
3065 + kfree_skb(skb);
3066 *seq = 0;
3067 }
3068
3069 @@ -874,9 +872,8 @@ copy_uaddr:
3070 llc_cmsg_rcv(msg, skb);
3071
3072 if (!(flags & MSG_PEEK)) {
3073 - spin_lock_irqsave(&sk->sk_receive_queue.lock, cpu_flags);
3074 - sk_eat_skb(sk, skb);
3075 - spin_unlock_irqrestore(&sk->sk_receive_queue.lock, cpu_flags);
3076 + skb_unlink(skb, &sk->sk_receive_queue);
3077 + kfree_skb(skb);
3078 *seq = 0;
3079 }
3080
3081 diff --git a/net/sctp/associola.c b/net/sctp/associola.c
3082 index 738c55e994c4..7e127cde1ccc 100644
3083 --- a/net/sctp/associola.c
3084 +++ b/net/sctp/associola.c
3085 @@ -488,8 +488,9 @@ void sctp_assoc_set_primary(struct sctp_association *asoc,
3086 void sctp_assoc_rm_peer(struct sctp_association *asoc,
3087 struct sctp_transport *peer)
3088 {
3089 - struct list_head *pos;
3090 - struct sctp_transport *transport;
3091 + struct sctp_transport *transport;
3092 + struct list_head *pos;
3093 + struct sctp_chunk *ch;
3094
3095 pr_debug("%s: association:%p addr:%pISpc\n",
3096 __func__, asoc, &peer->ipaddr.sa);
3097 @@ -547,7 +548,6 @@ void sctp_assoc_rm_peer(struct sctp_association *asoc,
3098 */
3099 if (!list_empty(&peer->transmitted)) {
3100 struct sctp_transport *active = asoc->peer.active_path;
3101 - struct sctp_chunk *ch;
3102
3103 /* Reset the transport of each chunk on this list */
3104 list_for_each_entry(ch, &peer->transmitted,
3105 @@ -569,6 +569,10 @@ void sctp_assoc_rm_peer(struct sctp_association *asoc,
3106 sctp_transport_hold(active);
3107 }
3108
3109 + list_for_each_entry(ch, &asoc->outqueue.out_chunk_list, list)
3110 + if (ch->transport == peer)
3111 + ch->transport = NULL;
3112 +
3113 asoc->peer.transport_count--;
3114
3115 sctp_transport_free(peer);
3116 diff --git a/net/sunrpc/auth_generic.c b/net/sunrpc/auth_generic.c
3117 index f1df9837f1ac..1ac08dcbf85d 100644
3118 --- a/net/sunrpc/auth_generic.c
3119 +++ b/net/sunrpc/auth_generic.c
3120 @@ -281,13 +281,7 @@ static bool generic_key_to_expire(struct rpc_cred *cred)
3121 {
3122 struct auth_cred *acred = &container_of(cred, struct generic_cred,
3123 gc_base)->acred;
3124 - bool ret;
3125 -
3126 - get_rpccred(cred);
3127 - ret = test_bit(RPC_CRED_KEY_EXPIRE_SOON, &acred->ac_flags);
3128 - put_rpccred(cred);
3129 -
3130 - return ret;
3131 + return test_bit(RPC_CRED_KEY_EXPIRE_SOON, &acred->ac_flags);
3132 }
3133
3134 static const struct rpc_credops generic_credops = {
3135 diff --git a/security/integrity/evm/evm.h b/security/integrity/evm/evm.h
3136 index f5f12727771a..2ff02459fcfd 100644
3137 --- a/security/integrity/evm/evm.h
3138 +++ b/security/integrity/evm/evm.h
3139 @@ -48,7 +48,7 @@ int evm_calc_hmac(struct dentry *dentry, const char *req_xattr_name,
3140 size_t req_xattr_value_len, char *digest);
3141 int evm_calc_hash(struct dentry *dentry, const char *req_xattr_name,
3142 const char *req_xattr_value,
3143 - size_t req_xattr_value_len, char *digest);
3144 + size_t req_xattr_value_len, char type, char *digest);
3145 int evm_init_hmac(struct inode *inode, const struct xattr *xattr,
3146 char *hmac_val);
3147 int evm_init_secfs(void);
3148 diff --git a/security/integrity/evm/evm_crypto.c b/security/integrity/evm/evm_crypto.c
3149 index 6fcbd8e99baf..c783fefa558a 100644
3150 --- a/security/integrity/evm/evm_crypto.c
3151 +++ b/security/integrity/evm/evm_crypto.c
3152 @@ -139,7 +139,7 @@ out:
3153 * protection.)
3154 */
3155 static void hmac_add_misc(struct shash_desc *desc, struct inode *inode,
3156 - char *digest)
3157 + char type, char *digest)
3158 {
3159 struct h_misc {
3160 unsigned long ino;
3161 @@ -150,13 +150,27 @@ static void hmac_add_misc(struct shash_desc *desc, struct inode *inode,
3162 } hmac_misc;
3163
3164 memset(&hmac_misc, 0, sizeof(hmac_misc));
3165 - hmac_misc.ino = inode->i_ino;
3166 - hmac_misc.generation = inode->i_generation;
3167 - hmac_misc.uid = from_kuid(inode->i_sb->s_user_ns, inode->i_uid);
3168 - hmac_misc.gid = from_kgid(inode->i_sb->s_user_ns, inode->i_gid);
3169 + /* Don't include the inode or generation number in portable
3170 + * signatures
3171 + */
3172 + if (type != EVM_XATTR_PORTABLE_DIGSIG) {
3173 + hmac_misc.ino = inode->i_ino;
3174 + hmac_misc.generation = inode->i_generation;
3175 + }
3176 + /* The hmac uid and gid must be encoded in the initial user
3177 + * namespace (not the filesystems user namespace) as encoding
3178 + * them in the filesystems user namespace allows an attack
3179 + * where first they are written in an unprivileged fuse mount
3180 + * of a filesystem and then the system is tricked to mount the
3181 + * filesystem for real on next boot and trust it because
3182 + * everything is signed.
3183 + */
3184 + hmac_misc.uid = from_kuid(&init_user_ns, inode->i_uid);
3185 + hmac_misc.gid = from_kgid(&init_user_ns, inode->i_gid);
3186 hmac_misc.mode = inode->i_mode;
3187 crypto_shash_update(desc, (const u8 *)&hmac_misc, sizeof(hmac_misc));
3188 - if (evm_hmac_attrs & EVM_ATTR_FSUUID)
3189 + if ((evm_hmac_attrs & EVM_ATTR_FSUUID) &&
3190 + type != EVM_XATTR_PORTABLE_DIGSIG)
3191 crypto_shash_update(desc, inode->i_sb->s_uuid,
3192 sizeof(inode->i_sb->s_uuid));
3193 crypto_shash_final(desc, digest);
3194 @@ -182,6 +196,7 @@ static int evm_calc_hmac_or_hash(struct dentry *dentry,
3195 char *xattr_value = NULL;
3196 int error;
3197 int size;
3198 + bool ima_present = false;
3199
3200 if (!(inode->i_opflags & IOP_XATTR))
3201 return -EOPNOTSUPP;
3202 @@ -192,11 +207,18 @@ static int evm_calc_hmac_or_hash(struct dentry *dentry,
3203
3204 error = -ENODATA;
3205 for (xattrname = evm_config_xattrnames; *xattrname != NULL; xattrname++) {
3206 + bool is_ima = false;
3207 +
3208 + if (strcmp(*xattrname, XATTR_NAME_IMA) == 0)
3209 + is_ima = true;
3210 +
3211 if ((req_xattr_name && req_xattr_value)
3212 && !strcmp(*xattrname, req_xattr_name)) {
3213 error = 0;
3214 crypto_shash_update(desc, (const u8 *)req_xattr_value,
3215 req_xattr_value_len);
3216 + if (is_ima)
3217 + ima_present = true;
3218 continue;
3219 }
3220 size = vfs_getxattr_alloc(dentry, *xattrname,
3221 @@ -211,9 +233,14 @@ static int evm_calc_hmac_or_hash(struct dentry *dentry,
3222 error = 0;
3223 xattr_size = size;
3224 crypto_shash_update(desc, (const u8 *)xattr_value, xattr_size);
3225 + if (is_ima)
3226 + ima_present = true;
3227 }
3228 - hmac_add_misc(desc, inode, digest);
3229 + hmac_add_misc(desc, inode, type, digest);
3230
3231 + /* Portable EVM signatures must include an IMA hash */
3232 + if (type == EVM_XATTR_PORTABLE_DIGSIG && !ima_present)
3233 + return -EPERM;
3234 out:
3235 kfree(xattr_value);
3236 kfree(desc);
3237 @@ -225,17 +252,45 @@ int evm_calc_hmac(struct dentry *dentry, const char *req_xattr_name,
3238 char *digest)
3239 {
3240 return evm_calc_hmac_or_hash(dentry, req_xattr_name, req_xattr_value,
3241 - req_xattr_value_len, EVM_XATTR_HMAC, digest);
3242 + req_xattr_value_len, EVM_XATTR_HMAC, digest);
3243 }
3244
3245 int evm_calc_hash(struct dentry *dentry, const char *req_xattr_name,
3246 const char *req_xattr_value, size_t req_xattr_value_len,
3247 - char *digest)
3248 + char type, char *digest)
3249 {
3250 return evm_calc_hmac_or_hash(dentry, req_xattr_name, req_xattr_value,
3251 - req_xattr_value_len, IMA_XATTR_DIGEST, digest);
3252 + req_xattr_value_len, type, digest);
3253 +}
3254 +
3255 +static int evm_is_immutable(struct dentry *dentry, struct inode *inode)
3256 +{
3257 + const struct evm_ima_xattr_data *xattr_data = NULL;
3258 + struct integrity_iint_cache *iint;
3259 + int rc = 0;
3260 +
3261 + iint = integrity_iint_find(inode);
3262 + if (iint && (iint->flags & EVM_IMMUTABLE_DIGSIG))
3263 + return 1;
3264 +
3265 + /* Do this the hard way */
3266 + rc = vfs_getxattr_alloc(dentry, XATTR_NAME_EVM, (char **)&xattr_data, 0,
3267 + GFP_NOFS);
3268 + if (rc <= 0) {
3269 + if (rc == -ENODATA)
3270 + return 0;
3271 + return rc;
3272 + }
3273 + if (xattr_data->type == EVM_XATTR_PORTABLE_DIGSIG)
3274 + rc = 1;
3275 + else
3276 + rc = 0;
3277 +
3278 + kfree(xattr_data);
3279 + return rc;
3280 }
3281
3282 +
3283 /*
3284 * Calculate the hmac and update security.evm xattr
3285 *
3286 @@ -248,6 +303,16 @@ int evm_update_evmxattr(struct dentry *dentry, const char *xattr_name,
3287 struct evm_ima_xattr_data xattr_data;
3288 int rc = 0;
3289
3290 + /*
3291 + * Don't permit any transformation of the EVM xattr if the signature
3292 + * is of an immutable type
3293 + */
3294 + rc = evm_is_immutable(dentry, inode);
3295 + if (rc < 0)
3296 + return rc;
3297 + if (rc)
3298 + return -EPERM;
3299 +
3300 rc = evm_calc_hmac(dentry, xattr_name, xattr_value,
3301 xattr_value_len, xattr_data.digest);
3302 if (rc == 0) {
3303 @@ -273,7 +338,7 @@ int evm_init_hmac(struct inode *inode, const struct xattr *lsm_xattr,
3304 }
3305
3306 crypto_shash_update(desc, lsm_xattr->value, lsm_xattr->value_len);
3307 - hmac_add_misc(desc, inode, hmac_val);
3308 + hmac_add_misc(desc, inode, EVM_XATTR_HMAC, hmac_val);
3309 kfree(desc);
3310 return 0;
3311 }
3312 diff --git a/security/integrity/evm/evm_main.c b/security/integrity/evm/evm_main.c
3313 index ba8615576d4d..976b8dce6496 100644
3314 --- a/security/integrity/evm/evm_main.c
3315 +++ b/security/integrity/evm/evm_main.c
3316 @@ -29,7 +29,7 @@
3317 int evm_initialized;
3318
3319 static char *integrity_status_msg[] = {
3320 - "pass", "fail", "no_label", "no_xattrs", "unknown"
3321 + "pass", "pass_immutable", "fail", "no_label", "no_xattrs", "unknown"
3322 };
3323 char *evm_hmac = "hmac(sha1)";
3324 char *evm_hash = "sha1";
3325 @@ -118,7 +118,8 @@ static enum integrity_status evm_verify_hmac(struct dentry *dentry,
3326 enum integrity_status evm_status = INTEGRITY_PASS;
3327 int rc, xattr_len;
3328
3329 - if (iint && iint->evm_status == INTEGRITY_PASS)
3330 + if (iint && (iint->evm_status == INTEGRITY_PASS ||
3331 + iint->evm_status == INTEGRITY_PASS_IMMUTABLE))
3332 return iint->evm_status;
3333
3334 /* if status is not PASS, try to check again - against -ENOMEM */
3335 @@ -155,22 +156,26 @@ static enum integrity_status evm_verify_hmac(struct dentry *dentry,
3336 rc = -EINVAL;
3337 break;
3338 case EVM_IMA_XATTR_DIGSIG:
3339 + case EVM_XATTR_PORTABLE_DIGSIG:
3340 rc = evm_calc_hash(dentry, xattr_name, xattr_value,
3341 - xattr_value_len, calc.digest);
3342 + xattr_value_len, xattr_data->type,
3343 + calc.digest);
3344 if (rc)
3345 break;
3346 rc = integrity_digsig_verify(INTEGRITY_KEYRING_EVM,
3347 (const char *)xattr_data, xattr_len,
3348 calc.digest, sizeof(calc.digest));
3349 if (!rc) {
3350 - /* Replace RSA with HMAC if not mounted readonly and
3351 - * not immutable
3352 - */
3353 - if (!IS_RDONLY(d_backing_inode(dentry)) &&
3354 - !IS_IMMUTABLE(d_backing_inode(dentry)))
3355 + if (xattr_data->type == EVM_XATTR_PORTABLE_DIGSIG) {
3356 + if (iint)
3357 + iint->flags |= EVM_IMMUTABLE_DIGSIG;
3358 + evm_status = INTEGRITY_PASS_IMMUTABLE;
3359 + } else if (!IS_RDONLY(d_backing_inode(dentry)) &&
3360 + !IS_IMMUTABLE(d_backing_inode(dentry))) {
3361 evm_update_evmxattr(dentry, xattr_name,
3362 xattr_value,
3363 xattr_value_len);
3364 + }
3365 }
3366 break;
3367 default:
3368 @@ -271,7 +276,7 @@ static enum integrity_status evm_verify_current_integrity(struct dentry *dentry)
3369 * affect security.evm. An interesting side affect of writing posix xattr
3370 * acls is their modifying of the i_mode, which is included in security.evm.
3371 * For posix xattr acls only, permit security.evm, even if it currently
3372 - * doesn't exist, to be updated.
3373 + * doesn't exist, to be updated unless the EVM signature is immutable.
3374 */
3375 static int evm_protect_xattr(struct dentry *dentry, const char *xattr_name,
3376 const void *xattr_value, size_t xattr_value_len)
3377 @@ -339,7 +344,8 @@ int evm_inode_setxattr(struct dentry *dentry, const char *xattr_name,
3378 if (strcmp(xattr_name, XATTR_NAME_EVM) == 0) {
3379 if (!xattr_value_len)
3380 return -EINVAL;
3381 - if (xattr_data->type != EVM_IMA_XATTR_DIGSIG)
3382 + if (xattr_data->type != EVM_IMA_XATTR_DIGSIG &&
3383 + xattr_data->type != EVM_XATTR_PORTABLE_DIGSIG)
3384 return -EPERM;
3385 }
3386 return evm_protect_xattr(dentry, xattr_name, xattr_value,
3387 @@ -416,6 +422,9 @@ void evm_inode_post_removexattr(struct dentry *dentry, const char *xattr_name)
3388 /**
3389 * evm_inode_setattr - prevent updating an invalid EVM extended attribute
3390 * @dentry: pointer to the affected dentry
3391 + *
3392 + * Permit update of file attributes when files have a valid EVM signature,
3393 + * except in the case of them having an immutable portable signature.
3394 */
3395 int evm_inode_setattr(struct dentry *dentry, struct iattr *attr)
3396 {
3397 diff --git a/security/integrity/iint.c b/security/integrity/iint.c
3398 index c710d22042f9..7ea39b19e8ad 100644
3399 --- a/security/integrity/iint.c
3400 +++ b/security/integrity/iint.c
3401 @@ -74,6 +74,7 @@ static void iint_free(struct integrity_iint_cache *iint)
3402 iint->ima_hash = NULL;
3403 iint->version = 0;
3404 iint->flags = 0UL;
3405 + iint->atomic_flags = 0UL;
3406 iint->ima_file_status = INTEGRITY_UNKNOWN;
3407 iint->ima_mmap_status = INTEGRITY_UNKNOWN;
3408 iint->ima_bprm_status = INTEGRITY_UNKNOWN;
3409 @@ -155,12 +156,14 @@ static void init_once(void *foo)
3410 memset(iint, 0, sizeof(*iint));
3411 iint->version = 0;
3412 iint->flags = 0UL;
3413 + iint->atomic_flags = 0;
3414 iint->ima_file_status = INTEGRITY_UNKNOWN;
3415 iint->ima_mmap_status = INTEGRITY_UNKNOWN;
3416 iint->ima_bprm_status = INTEGRITY_UNKNOWN;
3417 iint->ima_read_status = INTEGRITY_UNKNOWN;
3418 iint->evm_status = INTEGRITY_UNKNOWN;
3419 iint->measured_pcrs = 0;
3420 + mutex_init(&iint->mutex);
3421 }
3422
3423 static int __init integrity_iintcache_init(void)
3424 diff --git a/security/integrity/ima/ima_api.c b/security/integrity/ima/ima_api.c
3425 index d01a52f8f708..3b43057bf949 100644
3426 --- a/security/integrity/ima/ima_api.c
3427 +++ b/security/integrity/ima/ima_api.c
3428 @@ -198,42 +198,59 @@ int ima_collect_measurement(struct integrity_iint_cache *iint,
3429 struct inode *inode = file_inode(file);
3430 const char *filename = file->f_path.dentry->d_name.name;
3431 int result = 0;
3432 + int length;
3433 + void *tmpbuf;
3434 + u64 i_version;
3435 struct {
3436 struct ima_digest_data hdr;
3437 char digest[IMA_MAX_DIGEST_SIZE];
3438 } hash;
3439
3440 - if (!(iint->flags & IMA_COLLECTED)) {
3441 - u64 i_version = file_inode(file)->i_version;
3442 + if (iint->flags & IMA_COLLECTED)
3443 + goto out;
3444
3445 - if (file->f_flags & O_DIRECT) {
3446 - audit_cause = "failed(directio)";
3447 - result = -EACCES;
3448 - goto out;
3449 - }
3450 + /*
3451 + * Dectecting file change is based on i_version. On filesystems
3452 + * which do not support i_version, support is limited to an initial
3453 + * measurement/appraisal/audit.
3454 + */
3455 + i_version = file_inode(file)->i_version;
3456 + hash.hdr.algo = algo;
3457
3458 - hash.hdr.algo = algo;
3459 -
3460 - result = (!buf) ? ima_calc_file_hash(file, &hash.hdr) :
3461 - ima_calc_buffer_hash(buf, size, &hash.hdr);
3462 - if (!result) {
3463 - int length = sizeof(hash.hdr) + hash.hdr.length;
3464 - void *tmpbuf = krealloc(iint->ima_hash, length,
3465 - GFP_NOFS);
3466 - if (tmpbuf) {
3467 - iint->ima_hash = tmpbuf;
3468 - memcpy(iint->ima_hash, &hash, length);
3469 - iint->version = i_version;
3470 - iint->flags |= IMA_COLLECTED;
3471 - } else
3472 - result = -ENOMEM;
3473 - }
3474 + /* Initialize hash digest to 0's in case of failure */
3475 + memset(&hash.digest, 0, sizeof(hash.digest));
3476 +
3477 + if (buf)
3478 + result = ima_calc_buffer_hash(buf, size, &hash.hdr);
3479 + else
3480 + result = ima_calc_file_hash(file, &hash.hdr);
3481 +
3482 + if (result && result != -EBADF && result != -EINVAL)
3483 + goto out;
3484 +
3485 + length = sizeof(hash.hdr) + hash.hdr.length;
3486 + tmpbuf = krealloc(iint->ima_hash, length, GFP_NOFS);
3487 + if (!tmpbuf) {
3488 + result = -ENOMEM;
3489 + goto out;
3490 }
3491 +
3492 + iint->ima_hash = tmpbuf;
3493 + memcpy(iint->ima_hash, &hash, length);
3494 + iint->version = i_version;
3495 +
3496 + /* Possibly temporary failure due to type of read (eg. O_DIRECT) */
3497 + if (!result)
3498 + iint->flags |= IMA_COLLECTED;
3499 out:
3500 - if (result)
3501 + if (result) {
3502 + if (file->f_flags & O_DIRECT)
3503 + audit_cause = "failed(directio)";
3504 +
3505 integrity_audit_msg(AUDIT_INTEGRITY_DATA, inode,
3506 filename, "collect_data", audit_cause,
3507 result, 0);
3508 + }
3509 return result;
3510 }
3511
3512 @@ -277,7 +294,7 @@ void ima_store_measurement(struct integrity_iint_cache *iint,
3513 }
3514
3515 result = ima_store_template(entry, violation, inode, filename, pcr);
3516 - if (!result || result == -EEXIST) {
3517 + if ((!result || result == -EEXIST) && !(file->f_flags & O_DIRECT)) {
3518 iint->flags |= IMA_MEASURED;
3519 iint->measured_pcrs |= (0x1 << pcr);
3520 }
3521 diff --git a/security/integrity/ima/ima_appraise.c b/security/integrity/ima/ima_appraise.c
3522 index 1e6f23f77f15..af55c31754a4 100644
3523 --- a/security/integrity/ima/ima_appraise.c
3524 +++ b/security/integrity/ima/ima_appraise.c
3525 @@ -214,7 +214,9 @@ int ima_appraise_measurement(enum ima_hooks func,
3526 }
3527
3528 status = evm_verifyxattr(dentry, XATTR_NAME_IMA, xattr_value, rc, iint);
3529 - if ((status != INTEGRITY_PASS) && (status != INTEGRITY_UNKNOWN)) {
3530 + if ((status != INTEGRITY_PASS) &&
3531 + (status != INTEGRITY_PASS_IMMUTABLE) &&
3532 + (status != INTEGRITY_UNKNOWN)) {
3533 if ((status == INTEGRITY_NOLABEL)
3534 || (status == INTEGRITY_NOXATTRS))
3535 cause = "missing-HMAC";
3536 @@ -232,6 +234,7 @@ int ima_appraise_measurement(enum ima_hooks func,
3537 status = INTEGRITY_FAIL;
3538 break;
3539 }
3540 + clear_bit(IMA_DIGSIG, &iint->atomic_flags);
3541 if (xattr_len - sizeof(xattr_value->type) - hash_start >=
3542 iint->ima_hash->length)
3543 /* xattr length may be longer. md5 hash in previous
3544 @@ -250,7 +253,7 @@ int ima_appraise_measurement(enum ima_hooks func,
3545 status = INTEGRITY_PASS;
3546 break;
3547 case EVM_IMA_XATTR_DIGSIG:
3548 - iint->flags |= IMA_DIGSIG;
3549 + set_bit(IMA_DIGSIG, &iint->atomic_flags);
3550 rc = integrity_digsig_verify(INTEGRITY_KEYRING_IMA,
3551 (const char *)xattr_value, rc,
3552 iint->ima_hash->digest,
3553 @@ -301,7 +304,7 @@ void ima_update_xattr(struct integrity_iint_cache *iint, struct file *file)
3554 int rc = 0;
3555
3556 /* do not collect and update hash for digital signatures */
3557 - if (iint->flags & IMA_DIGSIG)
3558 + if (test_bit(IMA_DIGSIG, &iint->atomic_flags))
3559 return;
3560
3561 if (iint->ima_file_status != INTEGRITY_PASS)
3562 @@ -311,7 +314,9 @@ void ima_update_xattr(struct integrity_iint_cache *iint, struct file *file)
3563 if (rc < 0)
3564 return;
3565
3566 + inode_lock(file_inode(file));
3567 ima_fix_xattr(dentry, iint);
3568 + inode_unlock(file_inode(file));
3569 }
3570
3571 /**
3572 @@ -334,16 +339,14 @@ void ima_inode_post_setattr(struct dentry *dentry)
3573 return;
3574
3575 must_appraise = ima_must_appraise(inode, MAY_ACCESS, POST_SETATTR);
3576 + if (!must_appraise)
3577 + __vfs_removexattr(dentry, XATTR_NAME_IMA);
3578 iint = integrity_iint_find(inode);
3579 if (iint) {
3580 - iint->flags &= ~(IMA_APPRAISE | IMA_APPRAISED |
3581 - IMA_APPRAISE_SUBMASK | IMA_APPRAISED_SUBMASK |
3582 - IMA_ACTION_RULE_FLAGS);
3583 - if (must_appraise)
3584 - iint->flags |= IMA_APPRAISE;
3585 + set_bit(IMA_CHANGE_ATTR, &iint->atomic_flags);
3586 + if (!must_appraise)
3587 + clear_bit(IMA_UPDATE_XATTR, &iint->atomic_flags);
3588 }
3589 - if (!must_appraise)
3590 - __vfs_removexattr(dentry, XATTR_NAME_IMA);
3591 }
3592
3593 /*
3594 @@ -372,12 +375,12 @@ static void ima_reset_appraise_flags(struct inode *inode, int digsig)
3595 iint = integrity_iint_find(inode);
3596 if (!iint)
3597 return;
3598 -
3599 - iint->flags &= ~IMA_DONE_MASK;
3600 iint->measured_pcrs = 0;
3601 + set_bit(IMA_CHANGE_XATTR, &iint->atomic_flags);
3602 if (digsig)
3603 - iint->flags |= IMA_DIGSIG;
3604 - return;
3605 + set_bit(IMA_DIGSIG, &iint->atomic_flags);
3606 + else
3607 + clear_bit(IMA_DIGSIG, &iint->atomic_flags);
3608 }
3609
3610 int ima_inode_setxattr(struct dentry *dentry, const char *xattr_name,
3611 diff --git a/security/integrity/ima/ima_crypto.c b/security/integrity/ima/ima_crypto.c
3612 index 93f09173cc49..20e66291ca99 100644
3613 --- a/security/integrity/ima/ima_crypto.c
3614 +++ b/security/integrity/ima/ima_crypto.c
3615 @@ -443,6 +443,16 @@ int ima_calc_file_hash(struct file *file, struct ima_digest_data *hash)
3616 loff_t i_size;
3617 int rc;
3618
3619 + /*
3620 + * For consistency, fail file's opened with the O_DIRECT flag on
3621 + * filesystems mounted with/without DAX option.
3622 + */
3623 + if (file->f_flags & O_DIRECT) {
3624 + hash->length = hash_digest_size[ima_hash_algo];
3625 + hash->algo = ima_hash_algo;
3626 + return -EINVAL;
3627 + }
3628 +
3629 i_size = i_size_read(file_inode(file));
3630
3631 if (ima_ahash_minsize && i_size >= ima_ahash_minsize) {
3632 diff --git a/security/integrity/ima/ima_main.c b/security/integrity/ima/ima_main.c
3633 index 9652541c4d43..ea1e629a5d4c 100644
3634 --- a/security/integrity/ima/ima_main.c
3635 +++ b/security/integrity/ima/ima_main.c
3636 @@ -99,10 +99,13 @@ static void ima_rdwr_violation_check(struct file *file,
3637 if (!iint)
3638 iint = integrity_iint_find(inode);
3639 /* IMA_MEASURE is set from reader side */
3640 - if (iint && (iint->flags & IMA_MEASURE))
3641 + if (iint && test_bit(IMA_MUST_MEASURE,
3642 + &iint->atomic_flags))
3643 send_tomtou = true;
3644 }
3645 } else {
3646 + if (must_measure)
3647 + set_bit(IMA_MUST_MEASURE, &iint->atomic_flags);
3648 if ((atomic_read(&inode->i_writecount) > 0) && must_measure)
3649 send_writers = true;
3650 }
3651 @@ -124,21 +127,24 @@ static void ima_check_last_writer(struct integrity_iint_cache *iint,
3652 struct inode *inode, struct file *file)
3653 {
3654 fmode_t mode = file->f_mode;
3655 + bool update;
3656
3657 if (!(mode & FMODE_WRITE))
3658 return;
3659
3660 - inode_lock(inode);
3661 + mutex_lock(&iint->mutex);
3662 if (atomic_read(&inode->i_writecount) == 1) {
3663 + update = test_and_clear_bit(IMA_UPDATE_XATTR,
3664 + &iint->atomic_flags);
3665 if ((iint->version != inode->i_version) ||
3666 (iint->flags & IMA_NEW_FILE)) {
3667 iint->flags &= ~(IMA_DONE_MASK | IMA_NEW_FILE);
3668 iint->measured_pcrs = 0;
3669 - if (iint->flags & IMA_APPRAISE)
3670 + if (update)
3671 ima_update_xattr(iint, file);
3672 }
3673 }
3674 - inode_unlock(inode);
3675 + mutex_unlock(&iint->mutex);
3676 }
3677
3678 /**
3679 @@ -171,7 +177,7 @@ static int process_measurement(struct file *file, char *buf, loff_t size,
3680 char *pathbuf = NULL;
3681 char filename[NAME_MAX];
3682 const char *pathname = NULL;
3683 - int rc = -ENOMEM, action, must_appraise;
3684 + int rc = 0, action, must_appraise = 0;
3685 int pcr = CONFIG_IMA_MEASURE_PCR_IDX;
3686 struct evm_ima_xattr_data *xattr_value = NULL;
3687 int xattr_len = 0;
3688 @@ -202,17 +208,31 @@ static int process_measurement(struct file *file, char *buf, loff_t size,
3689 if (action) {
3690 iint = integrity_inode_get(inode);
3691 if (!iint)
3692 - goto out;
3693 + rc = -ENOMEM;
3694 }
3695
3696 - if (violation_check) {
3697 + if (!rc && violation_check)
3698 ima_rdwr_violation_check(file, iint, action & IMA_MEASURE,
3699 &pathbuf, &pathname);
3700 - if (!action) {
3701 - rc = 0;
3702 - goto out_free;
3703 - }
3704 - }
3705 +
3706 + inode_unlock(inode);
3707 +
3708 + if (rc)
3709 + goto out;
3710 + if (!action)
3711 + goto out;
3712 +
3713 + mutex_lock(&iint->mutex);
3714 +
3715 + if (test_and_clear_bit(IMA_CHANGE_ATTR, &iint->atomic_flags))
3716 + /* reset appraisal flags if ima_inode_post_setattr was called */
3717 + iint->flags &= ~(IMA_APPRAISE | IMA_APPRAISED |
3718 + IMA_APPRAISE_SUBMASK | IMA_APPRAISED_SUBMASK |
3719 + IMA_ACTION_FLAGS);
3720 +
3721 + if (test_and_clear_bit(IMA_CHANGE_XATTR, &iint->atomic_flags))
3722 + /* reset all flags if ima_inode_setxattr was called */
3723 + iint->flags &= ~IMA_DONE_MASK;
3724
3725 /* Determine if already appraised/measured based on bitmask
3726 * (IMA_MEASURE, IMA_MEASURED, IMA_XXXX_APPRAISE, IMA_XXXX_APPRAISED,
3727 @@ -230,7 +250,7 @@ static int process_measurement(struct file *file, char *buf, loff_t size,
3728 if (!action) {
3729 if (must_appraise)
3730 rc = ima_get_cache_status(iint, func);
3731 - goto out_digsig;
3732 + goto out_locked;
3733 }
3734
3735 template_desc = ima_template_desc_current();
3736 @@ -242,11 +262,8 @@ static int process_measurement(struct file *file, char *buf, loff_t size,
3737 hash_algo = ima_get_hash_algo(xattr_value, xattr_len);
3738
3739 rc = ima_collect_measurement(iint, file, buf, size, hash_algo);
3740 - if (rc != 0) {
3741 - if (file->f_flags & O_DIRECT)
3742 - rc = (iint->flags & IMA_PERMIT_DIRECTIO) ? 0 : -EACCES;
3743 - goto out_digsig;
3744 - }
3745 + if (rc != 0 && rc != -EBADF && rc != -EINVAL)
3746 + goto out_locked;
3747
3748 if (!pathbuf) /* ima_rdwr_violation possibly pre-fetched */
3749 pathname = ima_d_path(&file->f_path, &pathbuf, filename);
3750 @@ -254,24 +271,32 @@ static int process_measurement(struct file *file, char *buf, loff_t size,
3751 if (action & IMA_MEASURE)
3752 ima_store_measurement(iint, file, pathname,
3753 xattr_value, xattr_len, pcr);
3754 - if (action & IMA_APPRAISE_SUBMASK)
3755 + if (rc == 0 && (action & IMA_APPRAISE_SUBMASK)) {
3756 + inode_lock(inode);
3757 rc = ima_appraise_measurement(func, iint, file, pathname,
3758 xattr_value, xattr_len, opened);
3759 + inode_unlock(inode);
3760 + }
3761 if (action & IMA_AUDIT)
3762 ima_audit_measurement(iint, pathname);
3763
3764 -out_digsig:
3765 - if ((mask & MAY_WRITE) && (iint->flags & IMA_DIGSIG) &&
3766 + if ((file->f_flags & O_DIRECT) && (iint->flags & IMA_PERMIT_DIRECTIO))
3767 + rc = 0;
3768 +out_locked:
3769 + if ((mask & MAY_WRITE) && test_bit(IMA_DIGSIG, &iint->atomic_flags) &&
3770 !(iint->flags & IMA_NEW_FILE))
3771 rc = -EACCES;
3772 + mutex_unlock(&iint->mutex);
3773 kfree(xattr_value);
3774 -out_free:
3775 +out:
3776 if (pathbuf)
3777 __putname(pathbuf);
3778 -out:
3779 - inode_unlock(inode);
3780 - if ((rc && must_appraise) && (ima_appraise & IMA_APPRAISE_ENFORCE))
3781 - return -EACCES;
3782 + if (must_appraise) {
3783 + if (rc && (ima_appraise & IMA_APPRAISE_ENFORCE))
3784 + return -EACCES;
3785 + if (file->f_mode & FMODE_WRITE)
3786 + set_bit(IMA_UPDATE_XATTR, &iint->atomic_flags);
3787 + }
3788 return 0;
3789 }
3790
3791 diff --git a/security/integrity/integrity.h b/security/integrity/integrity.h
3792 index 24520b4ef3b0..2f7e236b931c 100644
3793 --- a/security/integrity/integrity.h
3794 +++ b/security/integrity/integrity.h
3795 @@ -29,10 +29,10 @@
3796 /* iint cache flags */
3797 #define IMA_ACTION_FLAGS 0xff000000
3798 #define IMA_ACTION_RULE_FLAGS 0x06000000
3799 -#define IMA_DIGSIG 0x01000000
3800 -#define IMA_DIGSIG_REQUIRED 0x02000000
3801 -#define IMA_PERMIT_DIRECTIO 0x04000000
3802 -#define IMA_NEW_FILE 0x08000000
3803 +#define IMA_DIGSIG_REQUIRED 0x01000000
3804 +#define IMA_PERMIT_DIRECTIO 0x02000000
3805 +#define IMA_NEW_FILE 0x04000000
3806 +#define EVM_IMMUTABLE_DIGSIG 0x08000000
3807
3808 #define IMA_DO_MASK (IMA_MEASURE | IMA_APPRAISE | IMA_AUDIT | \
3809 IMA_APPRAISE_SUBMASK)
3810 @@ -53,11 +53,19 @@
3811 #define IMA_APPRAISED_SUBMASK (IMA_FILE_APPRAISED | IMA_MMAP_APPRAISED | \
3812 IMA_BPRM_APPRAISED | IMA_READ_APPRAISED)
3813
3814 +/* iint cache atomic_flags */
3815 +#define IMA_CHANGE_XATTR 0
3816 +#define IMA_UPDATE_XATTR 1
3817 +#define IMA_CHANGE_ATTR 2
3818 +#define IMA_DIGSIG 3
3819 +#define IMA_MUST_MEASURE 4
3820 +
3821 enum evm_ima_xattr_type {
3822 IMA_XATTR_DIGEST = 0x01,
3823 EVM_XATTR_HMAC,
3824 EVM_IMA_XATTR_DIGSIG,
3825 IMA_XATTR_DIGEST_NG,
3826 + EVM_XATTR_PORTABLE_DIGSIG,
3827 IMA_XATTR_LAST
3828 };
3829
3830 @@ -100,10 +108,12 @@ struct signature_v2_hdr {
3831 /* integrity data associated with an inode */
3832 struct integrity_iint_cache {
3833 struct rb_node rb_node; /* rooted in integrity_iint_tree */
3834 + struct mutex mutex; /* protects: version, flags, digest */
3835 struct inode *inode; /* back pointer to inode in question */
3836 u64 version; /* track inode changes */
3837 unsigned long flags;
3838 unsigned long measured_pcrs;
3839 + unsigned long atomic_flags;
3840 enum integrity_status ima_file_status:4;
3841 enum integrity_status ima_mmap_status:4;
3842 enum integrity_status ima_bprm_status:4;
3843 diff --git a/security/selinux/ss/policydb.c b/security/selinux/ss/policydb.c
3844 index d719db4219cd..175e4dce58df 100644
3845 --- a/security/selinux/ss/policydb.c
3846 +++ b/security/selinux/ss/policydb.c
3847 @@ -1097,7 +1097,7 @@ static int str_read(char **strp, gfp_t flags, void *fp, u32 len)
3848 if ((len == 0) || (len == (u32)-1))
3849 return -EINVAL;
3850
3851 - str = kmalloc(len + 1, flags);
3852 + str = kmalloc(len + 1, flags | __GFP_NOWARN);
3853 if (!str)
3854 return -ENOMEM;
3855
3856 diff --git a/tools/power/cpupower/bench/Makefile b/tools/power/cpupower/bench/Makefile
3857 index 3e59f1aa3947..8a285bca8e6c 100644
3858 --- a/tools/power/cpupower/bench/Makefile
3859 +++ b/tools/power/cpupower/bench/Makefile
3860 @@ -8,7 +8,7 @@ endif
3861 ifeq ($(strip $(STATIC)),true)
3862 LIBS = -L../ -L$(OUTPUT) -lm
3863 OBJS = $(OUTPUT)main.o $(OUTPUT)parse.o $(OUTPUT)system.o $(OUTPUT)benchmark.o \
3864 - $(OUTPUT)../lib/cpufreq.o $(OUTPUT)../lib/sysfs.o
3865 + $(OUTPUT)../lib/cpufreq.o $(OUTPUT)../lib/cpupower.o
3866 else
3867 LIBS = -L../ -L$(OUTPUT) -lm -lcpupower
3868 OBJS = $(OUTPUT)main.o $(OUTPUT)parse.o $(OUTPUT)system.o $(OUTPUT)benchmark.o
3869 diff --git a/tools/power/cpupower/lib/cpufreq.c b/tools/power/cpupower/lib/cpufreq.c
3870 index 1b993fe1ce23..0c0f3e3f0d80 100644
3871 --- a/tools/power/cpupower/lib/cpufreq.c
3872 +++ b/tools/power/cpupower/lib/cpufreq.c
3873 @@ -28,7 +28,7 @@ static unsigned int sysfs_cpufreq_read_file(unsigned int cpu, const char *fname,
3874
3875 snprintf(path, sizeof(path), PATH_TO_CPU "cpu%u/cpufreq/%s",
3876 cpu, fname);
3877 - return sysfs_read_file(path, buf, buflen);
3878 + return cpupower_read_sysfs(path, buf, buflen);
3879 }
3880
3881 /* helper function to write a new value to a /sys file */
3882 diff --git a/tools/power/cpupower/lib/cpuidle.c b/tools/power/cpupower/lib/cpuidle.c
3883 index 9bd4c7655fdb..852d25462388 100644
3884 --- a/tools/power/cpupower/lib/cpuidle.c
3885 +++ b/tools/power/cpupower/lib/cpuidle.c
3886 @@ -319,7 +319,7 @@ static unsigned int sysfs_cpuidle_read_file(const char *fname, char *buf,
3887
3888 snprintf(path, sizeof(path), PATH_TO_CPU "cpuidle/%s", fname);
3889
3890 - return sysfs_read_file(path, buf, buflen);
3891 + return cpupower_read_sysfs(path, buf, buflen);
3892 }
3893
3894
3895 diff --git a/tools/power/cpupower/lib/cpupower.c b/tools/power/cpupower/lib/cpupower.c
3896 index 9c395ec924de..9711d628b0f4 100644
3897 --- a/tools/power/cpupower/lib/cpupower.c
3898 +++ b/tools/power/cpupower/lib/cpupower.c
3899 @@ -15,7 +15,7 @@
3900 #include "cpupower.h"
3901 #include "cpupower_intern.h"
3902
3903 -unsigned int sysfs_read_file(const char *path, char *buf, size_t buflen)
3904 +unsigned int cpupower_read_sysfs(const char *path, char *buf, size_t buflen)
3905 {
3906 int fd;
3907 ssize_t numread;
3908 @@ -95,7 +95,7 @@ static int sysfs_topology_read_file(unsigned int cpu, const char *fname, int *re
3909
3910 snprintf(path, sizeof(path), PATH_TO_CPU "cpu%u/topology/%s",
3911 cpu, fname);
3912 - if (sysfs_read_file(path, linebuf, MAX_LINE_LEN) == 0)
3913 + if (cpupower_read_sysfs(path, linebuf, MAX_LINE_LEN) == 0)
3914 return -1;
3915 *result = strtol(linebuf, &endp, 0);
3916 if (endp == linebuf || errno == ERANGE)
3917 diff --git a/tools/power/cpupower/lib/cpupower_intern.h b/tools/power/cpupower/lib/cpupower_intern.h
3918 index f8ec4009621c..433fa8619679 100644
3919 --- a/tools/power/cpupower/lib/cpupower_intern.h
3920 +++ b/tools/power/cpupower/lib/cpupower_intern.h
3921 @@ -2,4 +2,4 @@
3922 #define MAX_LINE_LEN 4096
3923 #define SYSFS_PATH_MAX 255
3924
3925 -unsigned int sysfs_read_file(const char *path, char *buf, size_t buflen);
3926 +unsigned int cpupower_read_sysfs(const char *path, char *buf, size_t buflen);