Magellan Linux

Annotation of /trunk/kernel-alx/patches-4.19/0135-4.19.36-all-fixes.patch

Parent Directory Parent Directory | Revision Log Revision Log


Revision 3414 - (hide annotations) (download)
Fri Aug 2 11:47:45 2019 UTC (4 years, 9 months ago) by niro
File size: 251959 byte(s)
-linux-4.19.36
1 niro 3414 diff --git a/MAINTAINERS b/MAINTAINERS
2     index 9e9b19ecf6f7..11a59e82d92e 100644
3     --- a/MAINTAINERS
4     +++ b/MAINTAINERS
5     @@ -7320,6 +7320,12 @@ L: alsa-devel@alsa-project.org (moderated for non-subscribers)
6     S: Supported
7     F: sound/soc/intel/
8    
9     +INTEL ATOMISP2 DUMMY / POWER-MANAGEMENT DRIVER
10     +M: Hans de Goede <hdegoede@redhat.com>
11     +L: platform-driver-x86@vger.kernel.org
12     +S: Maintained
13     +F: drivers/platform/x86/intel_atomisp2_pm.c
14     +
15     INTEL C600 SERIES SAS CONTROLLER DRIVER
16     M: Intel SCU Linux support <intel-linux-scu@intel.com>
17     M: Artur Paszkiewicz <artur.paszkiewicz@intel.com>
18     diff --git a/Makefile b/Makefile
19     index f4229975b48c..3fac08f6a11e 100644
20     --- a/Makefile
21     +++ b/Makefile
22     @@ -1,7 +1,7 @@
23     # SPDX-License-Identifier: GPL-2.0
24     VERSION = 4
25     PATCHLEVEL = 19
26     -SUBLEVEL = 35
27     +SUBLEVEL = 36
28     EXTRAVERSION =
29     NAME = "People's Front"
30    
31     diff --git a/arch/arc/configs/hsdk_defconfig b/arch/arc/configs/hsdk_defconfig
32     index 2a1d2cbfee1a..651fa7978e51 100644
33     --- a/arch/arc/configs/hsdk_defconfig
34     +++ b/arch/arc/configs/hsdk_defconfig
35     @@ -8,6 +8,7 @@ CONFIG_NAMESPACES=y
36     # CONFIG_UTS_NS is not set
37     # CONFIG_PID_NS is not set
38     CONFIG_BLK_DEV_INITRD=y
39     +CONFIG_BLK_DEV_RAM=y
40     CONFIG_EMBEDDED=y
41     CONFIG_PERF_EVENTS=y
42     # CONFIG_VM_EVENT_COUNTERS is not set
43     diff --git a/arch/arc/kernel/head.S b/arch/arc/kernel/head.S
44     index 1f945d0f40da..208bf2c9e7b0 100644
45     --- a/arch/arc/kernel/head.S
46     +++ b/arch/arc/kernel/head.S
47     @@ -107,6 +107,7 @@ ENTRY(stext)
48     ; r2 = pointer to uboot provided cmdline or external DTB in mem
49     ; These are handled later in handle_uboot_args()
50     st r0, [@uboot_tag]
51     + st r1, [@uboot_magic]
52     st r2, [@uboot_arg]
53     #endif
54    
55     diff --git a/arch/arc/kernel/setup.c b/arch/arc/kernel/setup.c
56     index 3320ca2fe20f..a1218937abd6 100644
57     --- a/arch/arc/kernel/setup.c
58     +++ b/arch/arc/kernel/setup.c
59     @@ -35,6 +35,7 @@ unsigned int intr_to_DE_cnt;
60    
61     /* Part of U-boot ABI: see head.S */
62     int __initdata uboot_tag;
63     +int __initdata uboot_magic;
64     char __initdata *uboot_arg;
65    
66     const struct machine_desc *machine_desc;
67     @@ -484,6 +485,8 @@ static inline bool uboot_arg_invalid(unsigned long addr)
68     #define UBOOT_TAG_NONE 0
69     #define UBOOT_TAG_CMDLINE 1
70     #define UBOOT_TAG_DTB 2
71     +/* We always pass 0 as magic from U-boot */
72     +#define UBOOT_MAGIC_VALUE 0
73    
74     void __init handle_uboot_args(void)
75     {
76     @@ -499,6 +502,11 @@ void __init handle_uboot_args(void)
77     goto ignore_uboot_args;
78     }
79    
80     + if (uboot_magic != UBOOT_MAGIC_VALUE) {
81     + pr_warn(IGNORE_ARGS "non zero uboot magic\n");
82     + goto ignore_uboot_args;
83     + }
84     +
85     if (uboot_tag != UBOOT_TAG_NONE &&
86     uboot_arg_invalid((unsigned long)uboot_arg)) {
87     pr_warn(IGNORE_ARGS "invalid uboot arg: '%px'\n", uboot_arg);
88     diff --git a/arch/arm/crypto/sha256-armv4.pl b/arch/arm/crypto/sha256-armv4.pl
89     index b9ec44060ed3..a03cf4dfb781 100644
90     --- a/arch/arm/crypto/sha256-armv4.pl
91     +++ b/arch/arm/crypto/sha256-armv4.pl
92     @@ -212,10 +212,11 @@ K256:
93     .global sha256_block_data_order
94     .type sha256_block_data_order,%function
95     sha256_block_data_order:
96     +.Lsha256_block_data_order:
97     #if __ARM_ARCH__<7
98     sub r3,pc,#8 @ sha256_block_data_order
99     #else
100     - adr r3,sha256_block_data_order
101     + adr r3,.Lsha256_block_data_order
102     #endif
103     #if __ARM_MAX_ARCH__>=7 && !defined(__KERNEL__)
104     ldr r12,.LOPENSSL_armcap
105     diff --git a/arch/arm/crypto/sha256-core.S_shipped b/arch/arm/crypto/sha256-core.S_shipped
106     index 3b58300d611c..054aae0edfce 100644
107     --- a/arch/arm/crypto/sha256-core.S_shipped
108     +++ b/arch/arm/crypto/sha256-core.S_shipped
109     @@ -93,10 +93,11 @@ K256:
110     .global sha256_block_data_order
111     .type sha256_block_data_order,%function
112     sha256_block_data_order:
113     +.Lsha256_block_data_order:
114     #if __ARM_ARCH__<7
115     sub r3,pc,#8 @ sha256_block_data_order
116     #else
117     - adr r3,sha256_block_data_order
118     + adr r3,.Lsha256_block_data_order
119     #endif
120     #if __ARM_MAX_ARCH__>=7 && !defined(__KERNEL__)
121     ldr r12,.LOPENSSL_armcap
122     diff --git a/arch/arm/crypto/sha512-armv4.pl b/arch/arm/crypto/sha512-armv4.pl
123     index fb5d15048c0b..788c17b56ecc 100644
124     --- a/arch/arm/crypto/sha512-armv4.pl
125     +++ b/arch/arm/crypto/sha512-armv4.pl
126     @@ -274,10 +274,11 @@ WORD64(0x5fcb6fab,0x3ad6faec, 0x6c44198c,0x4a475817)
127     .global sha512_block_data_order
128     .type sha512_block_data_order,%function
129     sha512_block_data_order:
130     +.Lsha512_block_data_order:
131     #if __ARM_ARCH__<7
132     sub r3,pc,#8 @ sha512_block_data_order
133     #else
134     - adr r3,sha512_block_data_order
135     + adr r3,.Lsha512_block_data_order
136     #endif
137     #if __ARM_MAX_ARCH__>=7 && !defined(__KERNEL__)
138     ldr r12,.LOPENSSL_armcap
139     diff --git a/arch/arm/crypto/sha512-core.S_shipped b/arch/arm/crypto/sha512-core.S_shipped
140     index b1c334a49cda..710ea309769e 100644
141     --- a/arch/arm/crypto/sha512-core.S_shipped
142     +++ b/arch/arm/crypto/sha512-core.S_shipped
143     @@ -141,10 +141,11 @@ WORD64(0x5fcb6fab,0x3ad6faec, 0x6c44198c,0x4a475817)
144     .global sha512_block_data_order
145     .type sha512_block_data_order,%function
146     sha512_block_data_order:
147     +.Lsha512_block_data_order:
148     #if __ARM_ARCH__<7
149     sub r3,pc,#8 @ sha512_block_data_order
150     #else
151     - adr r3,sha512_block_data_order
152     + adr r3,.Lsha512_block_data_order
153     #endif
154     #if __ARM_MAX_ARCH__>=7 && !defined(__KERNEL__)
155     ldr r12,.LOPENSSL_armcap
156     diff --git a/arch/arm/kernel/patch.c b/arch/arm/kernel/patch.c
157     index a50dc00d79a2..d0a05a3bdb96 100644
158     --- a/arch/arm/kernel/patch.c
159     +++ b/arch/arm/kernel/patch.c
160     @@ -16,7 +16,7 @@ struct patch {
161     unsigned int insn;
162     };
163    
164     -static DEFINE_SPINLOCK(patch_lock);
165     +static DEFINE_RAW_SPINLOCK(patch_lock);
166    
167     static void __kprobes *patch_map(void *addr, int fixmap, unsigned long *flags)
168     __acquires(&patch_lock)
169     @@ -33,7 +33,7 @@ static void __kprobes *patch_map(void *addr, int fixmap, unsigned long *flags)
170     return addr;
171    
172     if (flags)
173     - spin_lock_irqsave(&patch_lock, *flags);
174     + raw_spin_lock_irqsave(&patch_lock, *flags);
175     else
176     __acquire(&patch_lock);
177    
178     @@ -48,7 +48,7 @@ static void __kprobes patch_unmap(int fixmap, unsigned long *flags)
179     clear_fixmap(fixmap);
180    
181     if (flags)
182     - spin_unlock_irqrestore(&patch_lock, *flags);
183     + raw_spin_unlock_irqrestore(&patch_lock, *flags);
184     else
185     __release(&patch_lock);
186     }
187     diff --git a/arch/arm/plat-samsung/Kconfig b/arch/arm/plat-samsung/Kconfig
188     index b600e38364eb..377ff9cda667 100644
189     --- a/arch/arm/plat-samsung/Kconfig
190     +++ b/arch/arm/plat-samsung/Kconfig
191     @@ -256,7 +256,7 @@ config S3C_PM_DEBUG_LED_SMDK
192    
193     config SAMSUNG_PM_CHECK
194     bool "S3C2410 PM Suspend Memory CRC"
195     - depends on PM
196     + depends on PM && (PLAT_S3C24XX || ARCH_S3C64XX || ARCH_S5PV210)
197     select CRC32
198     help
199     Enable the PM code's memory area checksum over sleep. This option
200     diff --git a/arch/powerpc/kernel/rtasd.c b/arch/powerpc/kernel/rtasd.c
201     index 44d66c33d59d..23b88b923f06 100644
202     --- a/arch/powerpc/kernel/rtasd.c
203     +++ b/arch/powerpc/kernel/rtasd.c
204     @@ -274,27 +274,16 @@ void pSeries_log_error(char *buf, unsigned int err_type, int fatal)
205     }
206    
207     #ifdef CONFIG_PPC_PSERIES
208     -static s32 prrn_update_scope;
209     -
210     -static void prrn_work_fn(struct work_struct *work)
211     +static void handle_prrn_event(s32 scope)
212     {
213     /*
214     * For PRRN, we must pass the negative of the scope value in
215     * the RTAS event.
216     */
217     - pseries_devicetree_update(-prrn_update_scope);
218     + pseries_devicetree_update(-scope);
219     numa_update_cpu_topology(false);
220     }
221    
222     -static DECLARE_WORK(prrn_work, prrn_work_fn);
223     -
224     -static void prrn_schedule_update(u32 scope)
225     -{
226     - flush_work(&prrn_work);
227     - prrn_update_scope = scope;
228     - schedule_work(&prrn_work);
229     -}
230     -
231     static void handle_rtas_event(const struct rtas_error_log *log)
232     {
233     if (rtas_error_type(log) != RTAS_TYPE_PRRN || !prrn_is_enabled())
234     @@ -303,7 +292,7 @@ static void handle_rtas_event(const struct rtas_error_log *log)
235     /* For PRRN Events the extended log length is used to denote
236     * the scope for calling rtas update-nodes.
237     */
238     - prrn_schedule_update(rtas_error_extended_log_length(log));
239     + handle_prrn_event(rtas_error_extended_log_length(log));
240     }
241    
242     #else
243     diff --git a/arch/x86/hyperv/hv_init.c b/arch/x86/hyperv/hv_init.c
244     index 87abd5145cc9..3fb855155286 100644
245     --- a/arch/x86/hyperv/hv_init.c
246     +++ b/arch/x86/hyperv/hv_init.c
247     @@ -101,9 +101,13 @@ static int hv_cpu_init(unsigned int cpu)
248     u64 msr_vp_index;
249     struct hv_vp_assist_page **hvp = &hv_vp_assist_page[smp_processor_id()];
250     void **input_arg;
251     + struct page *pg;
252    
253     input_arg = (void **)this_cpu_ptr(hyperv_pcpu_input_arg);
254     - *input_arg = page_address(alloc_page(GFP_KERNEL));
255     + pg = alloc_page(GFP_KERNEL);
256     + if (unlikely(!pg))
257     + return -ENOMEM;
258     + *input_arg = page_address(pg);
259    
260     hv_get_vp_index(msr_vp_index);
261    
262     diff --git a/arch/x86/kernel/aperture_64.c b/arch/x86/kernel/aperture_64.c
263     index 2c4d5ece7456..93426c5fc70f 100644
264     --- a/arch/x86/kernel/aperture_64.c
265     +++ b/arch/x86/kernel/aperture_64.c
266     @@ -14,6 +14,7 @@
267     #define pr_fmt(fmt) "AGP: " fmt
268    
269     #include <linux/kernel.h>
270     +#include <linux/kcore.h>
271     #include <linux/types.h>
272     #include <linux/init.h>
273     #include <linux/memblock.h>
274     @@ -57,7 +58,7 @@ int fallback_aper_force __initdata;
275    
276     int fix_aperture __initdata = 1;
277    
278     -#ifdef CONFIG_PROC_VMCORE
279     +#if defined(CONFIG_PROC_VMCORE) || defined(CONFIG_PROC_KCORE)
280     /*
281     * If the first kernel maps the aperture over e820 RAM, the kdump kernel will
282     * use the same range because it will remain configured in the northbridge.
283     @@ -66,20 +67,25 @@ int fix_aperture __initdata = 1;
284     */
285     static unsigned long aperture_pfn_start, aperture_page_count;
286    
287     -static int gart_oldmem_pfn_is_ram(unsigned long pfn)
288     +static int gart_mem_pfn_is_ram(unsigned long pfn)
289     {
290     return likely((pfn < aperture_pfn_start) ||
291     (pfn >= aperture_pfn_start + aperture_page_count));
292     }
293    
294     -static void exclude_from_vmcore(u64 aper_base, u32 aper_order)
295     +static void __init exclude_from_core(u64 aper_base, u32 aper_order)
296     {
297     aperture_pfn_start = aper_base >> PAGE_SHIFT;
298     aperture_page_count = (32 * 1024 * 1024) << aper_order >> PAGE_SHIFT;
299     - WARN_ON(register_oldmem_pfn_is_ram(&gart_oldmem_pfn_is_ram));
300     +#ifdef CONFIG_PROC_VMCORE
301     + WARN_ON(register_oldmem_pfn_is_ram(&gart_mem_pfn_is_ram));
302     +#endif
303     +#ifdef CONFIG_PROC_KCORE
304     + WARN_ON(register_mem_pfn_is_ram(&gart_mem_pfn_is_ram));
305     +#endif
306     }
307     #else
308     -static void exclude_from_vmcore(u64 aper_base, u32 aper_order)
309     +static void exclude_from_core(u64 aper_base, u32 aper_order)
310     {
311     }
312     #endif
313     @@ -469,7 +475,7 @@ out:
314     * may have allocated the range over its e820 RAM
315     * and fixed up the northbridge
316     */
317     - exclude_from_vmcore(last_aper_base, last_aper_order);
318     + exclude_from_core(last_aper_base, last_aper_order);
319    
320     return 1;
321     }
322     @@ -515,7 +521,7 @@ out:
323     * overlap with the first kernel's memory. We can't access the
324     * range through vmcore even though it should be part of the dump.
325     */
326     - exclude_from_vmcore(aper_alloc, aper_order);
327     + exclude_from_core(aper_alloc, aper_order);
328    
329     /* Fix up the north bridges */
330     for (i = 0; i < amd_nb_bus_dev_ranges[i].dev_limit; i++) {
331     diff --git a/arch/x86/kernel/cpu/cyrix.c b/arch/x86/kernel/cpu/cyrix.c
332     index 8949b7ae6d92..fa61c870ada9 100644
333     --- a/arch/x86/kernel/cpu/cyrix.c
334     +++ b/arch/x86/kernel/cpu/cyrix.c
335     @@ -124,7 +124,7 @@ static void set_cx86_reorder(void)
336     setCx86(CX86_CCR3, (ccr3 & 0x0f) | 0x10); /* enable MAPEN */
337    
338     /* Load/Store Serialize to mem access disable (=reorder it) */
339     - setCx86_old(CX86_PCR0, getCx86_old(CX86_PCR0) & ~0x80);
340     + setCx86(CX86_PCR0, getCx86(CX86_PCR0) & ~0x80);
341     /* set load/store serialize from 1GB to 4GB */
342     ccr3 |= 0xe0;
343     setCx86(CX86_CCR3, ccr3);
344     @@ -135,11 +135,11 @@ static void set_cx86_memwb(void)
345     pr_info("Enable Memory-Write-back mode on Cyrix/NSC processor.\n");
346    
347     /* CCR2 bit 2: unlock NW bit */
348     - setCx86_old(CX86_CCR2, getCx86_old(CX86_CCR2) & ~0x04);
349     + setCx86(CX86_CCR2, getCx86(CX86_CCR2) & ~0x04);
350     /* set 'Not Write-through' */
351     write_cr0(read_cr0() | X86_CR0_NW);
352     /* CCR2 bit 2: lock NW bit and set WT1 */
353     - setCx86_old(CX86_CCR2, getCx86_old(CX86_CCR2) | 0x14);
354     + setCx86(CX86_CCR2, getCx86(CX86_CCR2) | 0x14);
355     }
356    
357     /*
358     @@ -153,14 +153,14 @@ static void geode_configure(void)
359     local_irq_save(flags);
360    
361     /* Suspend on halt power saving and enable #SUSP pin */
362     - setCx86_old(CX86_CCR2, getCx86_old(CX86_CCR2) | 0x88);
363     + setCx86(CX86_CCR2, getCx86(CX86_CCR2) | 0x88);
364    
365     ccr3 = getCx86(CX86_CCR3);
366     setCx86(CX86_CCR3, (ccr3 & 0x0f) | 0x10); /* enable MAPEN */
367    
368    
369     /* FPU fast, DTE cache, Mem bypass */
370     - setCx86_old(CX86_CCR4, getCx86_old(CX86_CCR4) | 0x38);
371     + setCx86(CX86_CCR4, getCx86(CX86_CCR4) | 0x38);
372     setCx86(CX86_CCR3, ccr3); /* disable MAPEN */
373    
374     set_cx86_memwb();
375     @@ -296,7 +296,7 @@ static void init_cyrix(struct cpuinfo_x86 *c)
376     /* GXm supports extended cpuid levels 'ala' AMD */
377     if (c->cpuid_level == 2) {
378     /* Enable cxMMX extensions (GX1 Datasheet 54) */
379     - setCx86_old(CX86_CCR7, getCx86_old(CX86_CCR7) | 1);
380     + setCx86(CX86_CCR7, getCx86(CX86_CCR7) | 1);
381    
382     /*
383     * GXm : 0x30 ... 0x5f GXm datasheet 51
384     @@ -319,7 +319,7 @@ static void init_cyrix(struct cpuinfo_x86 *c)
385     if (dir1 > 7) {
386     dir0_msn++; /* M II */
387     /* Enable MMX extensions (App note 108) */
388     - setCx86_old(CX86_CCR7, getCx86_old(CX86_CCR7)|1);
389     + setCx86(CX86_CCR7, getCx86(CX86_CCR7)|1);
390     } else {
391     /* A 6x86MX - it has the bug. */
392     set_cpu_bug(c, X86_BUG_COMA);
393     diff --git a/arch/x86/kernel/hpet.c b/arch/x86/kernel/hpet.c
394     index b0acb22e5a46..1e3f1f140ffb 100644
395     --- a/arch/x86/kernel/hpet.c
396     +++ b/arch/x86/kernel/hpet.c
397     @@ -909,6 +909,8 @@ int __init hpet_enable(void)
398     return 0;
399    
400     hpet_set_mapping();
401     + if (!hpet_virt_address)
402     + return 0;
403    
404     /*
405     * Read the period and check for a sane value:
406     diff --git a/arch/x86/kernel/hw_breakpoint.c b/arch/x86/kernel/hw_breakpoint.c
407     index 34a5c1715148..2882fe1d2a78 100644
408     --- a/arch/x86/kernel/hw_breakpoint.c
409     +++ b/arch/x86/kernel/hw_breakpoint.c
410     @@ -357,6 +357,7 @@ int hw_breakpoint_arch_parse(struct perf_event *bp,
411     #endif
412     default:
413     WARN_ON_ONCE(1);
414     + return -EINVAL;
415     }
416    
417     /*
418     diff --git a/arch/x86/kernel/mpparse.c b/arch/x86/kernel/mpparse.c
419     index f1c5eb99d445..ddb1ca6923b1 100644
420     --- a/arch/x86/kernel/mpparse.c
421     +++ b/arch/x86/kernel/mpparse.c
422     @@ -599,8 +599,8 @@ static int __init smp_scan_config(unsigned long base, unsigned long length)
423     mpf_base = base;
424     mpf_found = true;
425    
426     - pr_info("found SMP MP-table at [mem %#010lx-%#010lx] mapped at [%p]\n",
427     - base, base + sizeof(*mpf) - 1, mpf);
428     + pr_info("found SMP MP-table at [mem %#010lx-%#010lx]\n",
429     + base, base + sizeof(*mpf) - 1);
430    
431     memblock_reserve(base, sizeof(*mpf));
432     if (mpf->physptr)
433     diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
434     index 6b6bcafd1d2c..3380a312d186 100644
435     --- a/arch/x86/kvm/vmx.c
436     +++ b/arch/x86/kvm/vmx.c
437     @@ -13181,24 +13181,6 @@ static void prepare_vmcs12(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12,
438     kvm_clear_interrupt_queue(vcpu);
439     }
440    
441     -static void load_vmcs12_mmu_host_state(struct kvm_vcpu *vcpu,
442     - struct vmcs12 *vmcs12)
443     -{
444     - u32 entry_failure_code;
445     -
446     - nested_ept_uninit_mmu_context(vcpu);
447     -
448     - /*
449     - * Only PDPTE load can fail as the value of cr3 was checked on entry and
450     - * couldn't have changed.
451     - */
452     - if (nested_vmx_load_cr3(vcpu, vmcs12->host_cr3, false, &entry_failure_code))
453     - nested_vmx_abort(vcpu, VMX_ABORT_LOAD_HOST_PDPTE_FAIL);
454     -
455     - if (!enable_ept)
456     - vcpu->arch.walk_mmu->inject_page_fault = kvm_inject_page_fault;
457     -}
458     -
459     /*
460     * A part of what we need to when the nested L2 guest exits and we want to
461     * run its L1 parent, is to reset L1's guest state to the host state specified
462     @@ -13212,6 +13194,7 @@ static void load_vmcs12_host_state(struct kvm_vcpu *vcpu,
463     struct vmcs12 *vmcs12)
464     {
465     struct kvm_segment seg;
466     + u32 entry_failure_code;
467    
468     if (vmcs12->vm_exit_controls & VM_EXIT_LOAD_IA32_EFER)
469     vcpu->arch.efer = vmcs12->host_ia32_efer;
470     @@ -13238,7 +13221,17 @@ static void load_vmcs12_host_state(struct kvm_vcpu *vcpu,
471     vcpu->arch.cr4_guest_owned_bits = ~vmcs_readl(CR4_GUEST_HOST_MASK);
472     vmx_set_cr4(vcpu, vmcs12->host_cr4);
473    
474     - load_vmcs12_mmu_host_state(vcpu, vmcs12);
475     + nested_ept_uninit_mmu_context(vcpu);
476     +
477     + /*
478     + * Only PDPTE load can fail as the value of cr3 was checked on entry and
479     + * couldn't have changed.
480     + */
481     + if (nested_vmx_load_cr3(vcpu, vmcs12->host_cr3, false, &entry_failure_code))
482     + nested_vmx_abort(vcpu, VMX_ABORT_LOAD_HOST_PDPTE_FAIL);
483     +
484     + if (!enable_ept)
485     + vcpu->arch.walk_mmu->inject_page_fault = kvm_inject_page_fault;
486    
487     /*
488     * If vmcs01 don't use VPID, CPU flushes TLB on every
489     @@ -13334,6 +13327,140 @@ static void load_vmcs12_host_state(struct kvm_vcpu *vcpu,
490     nested_vmx_abort(vcpu, VMX_ABORT_LOAD_HOST_MSR_FAIL);
491     }
492    
493     +static inline u64 nested_vmx_get_vmcs01_guest_efer(struct vcpu_vmx *vmx)
494     +{
495     + struct shared_msr_entry *efer_msr;
496     + unsigned int i;
497     +
498     + if (vm_entry_controls_get(vmx) & VM_ENTRY_LOAD_IA32_EFER)
499     + return vmcs_read64(GUEST_IA32_EFER);
500     +
501     + if (cpu_has_load_ia32_efer)
502     + return host_efer;
503     +
504     + for (i = 0; i < vmx->msr_autoload.guest.nr; ++i) {
505     + if (vmx->msr_autoload.guest.val[i].index == MSR_EFER)
506     + return vmx->msr_autoload.guest.val[i].value;
507     + }
508     +
509     + efer_msr = find_msr_entry(vmx, MSR_EFER);
510     + if (efer_msr)
511     + return efer_msr->data;
512     +
513     + return host_efer;
514     +}
515     +
516     +static void nested_vmx_restore_host_state(struct kvm_vcpu *vcpu)
517     +{
518     + struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
519     + struct vcpu_vmx *vmx = to_vmx(vcpu);
520     + struct vmx_msr_entry g, h;
521     + struct msr_data msr;
522     + gpa_t gpa;
523     + u32 i, j;
524     +
525     + vcpu->arch.pat = vmcs_read64(GUEST_IA32_PAT);
526     +
527     + if (vmcs12->vm_entry_controls & VM_ENTRY_LOAD_DEBUG_CONTROLS) {
528     + /*
529     + * L1's host DR7 is lost if KVM_GUESTDBG_USE_HW_BP is set
530     + * as vmcs01.GUEST_DR7 contains a userspace defined value
531     + * and vcpu->arch.dr7 is not squirreled away before the
532     + * nested VMENTER (not worth adding a variable in nested_vmx).
533     + */
534     + if (vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP)
535     + kvm_set_dr(vcpu, 7, DR7_FIXED_1);
536     + else
537     + WARN_ON(kvm_set_dr(vcpu, 7, vmcs_readl(GUEST_DR7)));
538     + }
539     +
540     + /*
541     + * Note that calling vmx_set_{efer,cr0,cr4} is important as they
542     + * handle a variety of side effects to KVM's software model.
543     + */
544     + vmx_set_efer(vcpu, nested_vmx_get_vmcs01_guest_efer(vmx));
545     +
546     + vcpu->arch.cr0_guest_owned_bits = X86_CR0_TS;
547     + vmx_set_cr0(vcpu, vmcs_readl(CR0_READ_SHADOW));
548     +
549     + vcpu->arch.cr4_guest_owned_bits = ~vmcs_readl(CR4_GUEST_HOST_MASK);
550     + vmx_set_cr4(vcpu, vmcs_readl(CR4_READ_SHADOW));
551     +
552     + nested_ept_uninit_mmu_context(vcpu);
553     + vcpu->arch.cr3 = vmcs_readl(GUEST_CR3);
554     + __set_bit(VCPU_EXREG_CR3, (ulong *)&vcpu->arch.regs_avail);
555     +
556     + /*
557     + * Use ept_save_pdptrs(vcpu) to load the MMU's cached PDPTRs
558     + * from vmcs01 (if necessary). The PDPTRs are not loaded on
559     + * VMFail, like everything else we just need to ensure our
560     + * software model is up-to-date.
561     + */
562     + ept_save_pdptrs(vcpu);
563     +
564     + kvm_mmu_reset_context(vcpu);
565     +
566     + if (cpu_has_vmx_msr_bitmap())
567     + vmx_update_msr_bitmap(vcpu);
568     +
569     + /*
570     + * This nasty bit of open coding is a compromise between blindly
571     + * loading L1's MSRs using the exit load lists (incorrect emulation
572     + * of VMFail), leaving the nested VM's MSRs in the software model
573     + * (incorrect behavior) and snapshotting the modified MSRs (too
574     + * expensive since the lists are unbound by hardware). For each
575     + * MSR that was (prematurely) loaded from the nested VMEntry load
576     + * list, reload it from the exit load list if it exists and differs
577     + * from the guest value. The intent is to stuff host state as
578     + * silently as possible, not to fully process the exit load list.
579     + */
580     + msr.host_initiated = false;
581     + for (i = 0; i < vmcs12->vm_entry_msr_load_count; i++) {
582     + gpa = vmcs12->vm_entry_msr_load_addr + (i * sizeof(g));
583     + if (kvm_vcpu_read_guest(vcpu, gpa, &g, sizeof(g))) {
584     + pr_debug_ratelimited(
585     + "%s read MSR index failed (%u, 0x%08llx)\n",
586     + __func__, i, gpa);
587     + goto vmabort;
588     + }
589     +
590     + for (j = 0; j < vmcs12->vm_exit_msr_load_count; j++) {
591     + gpa = vmcs12->vm_exit_msr_load_addr + (j * sizeof(h));
592     + if (kvm_vcpu_read_guest(vcpu, gpa, &h, sizeof(h))) {
593     + pr_debug_ratelimited(
594     + "%s read MSR failed (%u, 0x%08llx)\n",
595     + __func__, j, gpa);
596     + goto vmabort;
597     + }
598     + if (h.index != g.index)
599     + continue;
600     + if (h.value == g.value)
601     + break;
602     +
603     + if (nested_vmx_load_msr_check(vcpu, &h)) {
604     + pr_debug_ratelimited(
605     + "%s check failed (%u, 0x%x, 0x%x)\n",
606     + __func__, j, h.index, h.reserved);
607     + goto vmabort;
608     + }
609     +
610     + msr.index = h.index;
611     + msr.data = h.value;
612     + if (kvm_set_msr(vcpu, &msr)) {
613     + pr_debug_ratelimited(
614     + "%s WRMSR failed (%u, 0x%x, 0x%llx)\n",
615     + __func__, j, h.index, h.value);
616     + goto vmabort;
617     + }
618     + }
619     + }
620     +
621     + return;
622     +
623     +vmabort:
624     + nested_vmx_abort(vcpu, VMX_ABORT_LOAD_HOST_MSR_FAIL);
625     +}
626     +
627     /*
628     * Emulate an exit from nested guest (L2) to L1, i.e., prepare to run L1
629     * and modify vmcs12 to make it see what it would expect to see there if
630     @@ -13478,7 +13605,13 @@ static void nested_vmx_vmexit(struct kvm_vcpu *vcpu, u32 exit_reason,
631     */
632     nested_vmx_failValid(vcpu, VMXERR_ENTRY_INVALID_CONTROL_FIELD);
633    
634     - load_vmcs12_mmu_host_state(vcpu, vmcs12);
635     + /*
636     + * Restore L1's host state to KVM's software model. We're here
637     + * because a consistency check was caught by hardware, which
638     + * means some amount of guest state has been propagated to KVM's
639     + * model and needs to be unwound to the host's state.
640     + */
641     + nested_vmx_restore_host_state(vcpu);
642    
643     /*
644     * The emulated instruction was already skipped in
645     diff --git a/block/blk-iolatency.c b/block/blk-iolatency.c
646     index b154e057ca67..6b8396ccb5c4 100644
647     --- a/block/blk-iolatency.c
648     +++ b/block/blk-iolatency.c
649     @@ -75,6 +75,7 @@
650     #include <linux/blk-mq.h>
651     #include "blk-rq-qos.h"
652     #include "blk-stat.h"
653     +#include "blk.h"
654    
655     #define DEFAULT_SCALE_COOKIE 1000000U
656    
657     diff --git a/drivers/acpi/ec.c b/drivers/acpi/ec.c
658     index d4e5610e09c5..49e16f009095 100644
659     --- a/drivers/acpi/ec.c
660     +++ b/drivers/acpi/ec.c
661     @@ -194,6 +194,7 @@ static struct workqueue_struct *ec_query_wq;
662     static int EC_FLAGS_QUERY_HANDSHAKE; /* Needs QR_EC issued when SCI_EVT set */
663     static int EC_FLAGS_CORRECT_ECDT; /* Needs ECDT port address correction */
664     static int EC_FLAGS_IGNORE_DSDT_GPE; /* Needs ECDT GPE as correction setting */
665     +static int EC_FLAGS_CLEAR_ON_RESUME; /* Needs acpi_ec_clear() on boot/resume */
666    
667     /* --------------------------------------------------------------------------
668     * Logging/Debugging
669     @@ -499,6 +500,26 @@ static inline void __acpi_ec_disable_event(struct acpi_ec *ec)
670     ec_log_drv("event blocked");
671     }
672    
673     +/*
674     + * Process _Q events that might have accumulated in the EC.
675     + * Run with locked ec mutex.
676     + */
677     +static void acpi_ec_clear(struct acpi_ec *ec)
678     +{
679     + int i, status;
680     + u8 value = 0;
681     +
682     + for (i = 0; i < ACPI_EC_CLEAR_MAX; i++) {
683     + status = acpi_ec_query(ec, &value);
684     + if (status || !value)
685     + break;
686     + }
687     + if (unlikely(i == ACPI_EC_CLEAR_MAX))
688     + pr_warn("Warning: Maximum of %d stale EC events cleared\n", i);
689     + else
690     + pr_info("%d stale EC events cleared\n", i);
691     +}
692     +
693     static void acpi_ec_enable_event(struct acpi_ec *ec)
694     {
695     unsigned long flags;
696     @@ -507,6 +528,10 @@ static void acpi_ec_enable_event(struct acpi_ec *ec)
697     if (acpi_ec_started(ec))
698     __acpi_ec_enable_event(ec);
699     spin_unlock_irqrestore(&ec->lock, flags);
700     +
701     + /* Drain additional events if hardware requires that */
702     + if (EC_FLAGS_CLEAR_ON_RESUME)
703     + acpi_ec_clear(ec);
704     }
705    
706     #ifdef CONFIG_PM_SLEEP
707     @@ -1034,6 +1059,18 @@ void acpi_ec_unblock_transactions(void)
708     acpi_ec_start(first_ec, true);
709     }
710    
711     +void acpi_ec_mark_gpe_for_wake(void)
712     +{
713     + if (first_ec && !ec_no_wakeup)
714     + acpi_mark_gpe_for_wake(NULL, first_ec->gpe);
715     +}
716     +
717     +void acpi_ec_set_gpe_wake_mask(u8 action)
718     +{
719     + if (first_ec && !ec_no_wakeup)
720     + acpi_set_gpe_wake_mask(NULL, first_ec->gpe, action);
721     +}
722     +
723     void acpi_ec_dispatch_gpe(void)
724     {
725     if (first_ec)
726     @@ -1808,6 +1845,31 @@ static int ec_flag_query_handshake(const struct dmi_system_id *id)
727     }
728     #endif
729    
730     +/*
731     + * On some hardware it is necessary to clear events accumulated by the EC during
732     + * sleep. These ECs stop reporting GPEs until they are manually polled, if too
733     + * many events are accumulated. (e.g. Samsung Series 5/9 notebooks)
734     + *
735     + * https://bugzilla.kernel.org/show_bug.cgi?id=44161
736     + *
737     + * Ideally, the EC should also be instructed NOT to accumulate events during
738     + * sleep (which Windows seems to do somehow), but the interface to control this
739     + * behaviour is not known at this time.
740     + *
741     + * Models known to be affected are Samsung 530Uxx/535Uxx/540Uxx/550Pxx/900Xxx,
742     + * however it is very likely that other Samsung models are affected.
743     + *
744     + * On systems which don't accumulate _Q events during sleep, this extra check
745     + * should be harmless.
746     + */
747     +static int ec_clear_on_resume(const struct dmi_system_id *id)
748     +{
749     + pr_debug("Detected system needing EC poll on resume.\n");
750     + EC_FLAGS_CLEAR_ON_RESUME = 1;
751     + ec_event_clearing = ACPI_EC_EVT_TIMING_STATUS;
752     + return 0;
753     +}
754     +
755     /*
756     * Some ECDTs contain wrong register addresses.
757     * MSI MS-171F
758     @@ -1857,6 +1919,9 @@ static const struct dmi_system_id ec_dmi_table[] __initconst = {
759     ec_honor_ecdt_gpe, "ASUS X580VD", {
760     DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
761     DMI_MATCH(DMI_PRODUCT_NAME, "X580VD"),}, NULL},
762     + {
763     + ec_clear_on_resume, "Samsung hardware", {
764     + DMI_MATCH(DMI_SYS_VENDOR, "SAMSUNG ELECTRONICS CO., LTD.")}, NULL},
765     {},
766     };
767    
768     diff --git a/drivers/acpi/internal.h b/drivers/acpi/internal.h
769     index 530a3f675490..f59d0b9e2683 100644
770     --- a/drivers/acpi/internal.h
771     +++ b/drivers/acpi/internal.h
772     @@ -188,6 +188,8 @@ int acpi_ec_ecdt_probe(void);
773     int acpi_ec_dsdt_probe(void);
774     void acpi_ec_block_transactions(void);
775     void acpi_ec_unblock_transactions(void);
776     +void acpi_ec_mark_gpe_for_wake(void);
777     +void acpi_ec_set_gpe_wake_mask(u8 action);
778     void acpi_ec_dispatch_gpe(void);
779     int acpi_ec_add_query_handler(struct acpi_ec *ec, u8 query_bit,
780     acpi_handle handle, acpi_ec_query_func func,
781     diff --git a/drivers/acpi/sbs.c b/drivers/acpi/sbs.c
782     index 295b59271189..96c5e27967f4 100644
783     --- a/drivers/acpi/sbs.c
784     +++ b/drivers/acpi/sbs.c
785     @@ -441,9 +441,13 @@ static int acpi_ac_get_present(struct acpi_sbs *sbs)
786    
787     /*
788     * The spec requires that bit 4 always be 1. If it's not set, assume
789     - * that the implementation doesn't support an SBS charger
790     + * that the implementation doesn't support an SBS charger.
791     + *
792     + * And on some MacBooks a status of 0xffff is always returned, no
793     + * matter whether the charger is plugged in or not, which is also
794     + * wrong, so ignore the SBS charger for those too.
795     */
796     - if (!((status >> 4) & 0x1))
797     + if (!((status >> 4) & 0x1) || status == 0xffff)
798     return -ENODEV;
799    
800     sbs->charger_present = (status >> 15) & 0x1;
801     diff --git a/drivers/acpi/sleep.c b/drivers/acpi/sleep.c
802     index 754d59f95500..74c489047f57 100644
803     --- a/drivers/acpi/sleep.c
804     +++ b/drivers/acpi/sleep.c
805     @@ -940,6 +940,8 @@ static int lps0_device_attach(struct acpi_device *adev,
806    
807     acpi_handle_debug(adev->handle, "_DSM function mask: 0x%x\n",
808     bitmask);
809     +
810     + acpi_ec_mark_gpe_for_wake();
811     } else {
812     acpi_handle_debug(adev->handle,
813     "_DSM function 0 evaluation failed\n");
814     @@ -968,11 +970,16 @@ static int acpi_s2idle_prepare(void)
815     if (lps0_device_handle) {
816     acpi_sleep_run_lps0_dsm(ACPI_LPS0_SCREEN_OFF);
817     acpi_sleep_run_lps0_dsm(ACPI_LPS0_ENTRY);
818     +
819     + acpi_ec_set_gpe_wake_mask(ACPI_GPE_ENABLE);
820     }
821    
822     if (acpi_sci_irq_valid())
823     enable_irq_wake(acpi_sci_irq);
824    
825     + /* Change the configuration of GPEs to avoid spurious wakeup. */
826     + acpi_enable_all_wakeup_gpes();
827     + acpi_os_wait_events_complete();
828     return 0;
829     }
830    
831     @@ -1017,10 +1024,14 @@ static void acpi_s2idle_sync(void)
832    
833     static void acpi_s2idle_restore(void)
834     {
835     + acpi_enable_all_runtime_gpes();
836     +
837     if (acpi_sci_irq_valid())
838     disable_irq_wake(acpi_sci_irq);
839    
840     if (lps0_device_handle) {
841     + acpi_ec_set_gpe_wake_mask(ACPI_GPE_DISABLE);
842     +
843     acpi_sleep_run_lps0_dsm(ACPI_LPS0_EXIT);
844     acpi_sleep_run_lps0_dsm(ACPI_LPS0_SCREEN_ON);
845     }
846     diff --git a/drivers/acpi/utils.c b/drivers/acpi/utils.c
847     index 78db97687f26..c4b06cc075f9 100644
848     --- a/drivers/acpi/utils.c
849     +++ b/drivers/acpi/utils.c
850     @@ -800,6 +800,7 @@ bool acpi_dev_present(const char *hid, const char *uid, s64 hrv)
851     match.hrv = hrv;
852    
853     dev = bus_find_device(&acpi_bus_type, NULL, &match, acpi_dev_match_cb);
854     + put_device(dev);
855     return !!dev;
856     }
857     EXPORT_SYMBOL(acpi_dev_present);
858     diff --git a/drivers/auxdisplay/hd44780.c b/drivers/auxdisplay/hd44780.c
859     index f1a42f0f1ded..df3da49ff9e8 100644
860     --- a/drivers/auxdisplay/hd44780.c
861     +++ b/drivers/auxdisplay/hd44780.c
862     @@ -299,6 +299,8 @@ static int hd44780_remove(struct platform_device *pdev)
863     struct charlcd *lcd = platform_get_drvdata(pdev);
864    
865     charlcd_unregister(lcd);
866     +
867     + kfree(lcd);
868     return 0;
869     }
870    
871     diff --git a/drivers/base/node.c b/drivers/base/node.c
872     index 1ac4c36e13bb..c3968e2d0a98 100644
873     --- a/drivers/base/node.c
874     +++ b/drivers/base/node.c
875     @@ -197,11 +197,16 @@ static ssize_t node_read_vmstat(struct device *dev,
876     sum_zone_numa_state(nid, i));
877     #endif
878    
879     - for (i = 0; i < NR_VM_NODE_STAT_ITEMS; i++)
880     + for (i = 0; i < NR_VM_NODE_STAT_ITEMS; i++) {
881     + /* Skip hidden vmstat items. */
882     + if (*vmstat_text[i + NR_VM_ZONE_STAT_ITEMS +
883     + NR_VM_NUMA_STAT_ITEMS] == '\0')
884     + continue;
885     n += sprintf(buf+n, "%s %lu\n",
886     vmstat_text[i + NR_VM_ZONE_STAT_ITEMS +
887     NR_VM_NUMA_STAT_ITEMS],
888     node_page_state(pgdat, i));
889     + }
890    
891     return n;
892     }
893     diff --git a/drivers/base/power/domain.c b/drivers/base/power/domain.c
894     index 4b5714199490..bf5be0bfaf77 100644
895     --- a/drivers/base/power/domain.c
896     +++ b/drivers/base/power/domain.c
897     @@ -1388,12 +1388,12 @@ static int genpd_add_device(struct generic_pm_domain *genpd, struct device *dev,
898     if (IS_ERR(gpd_data))
899     return PTR_ERR(gpd_data);
900    
901     - genpd_lock(genpd);
902     -
903     ret = genpd->attach_dev ? genpd->attach_dev(genpd, dev) : 0;
904     if (ret)
905     goto out;
906    
907     + genpd_lock(genpd);
908     +
909     dev_pm_domain_set(dev, &genpd->domain);
910    
911     genpd->device_count++;
912     @@ -1401,9 +1401,8 @@ static int genpd_add_device(struct generic_pm_domain *genpd, struct device *dev,
913    
914     list_add_tail(&gpd_data->base.list_node, &genpd->dev_list);
915    
916     - out:
917     genpd_unlock(genpd);
918     -
919     + out:
920     if (ret)
921     genpd_free_dev_data(dev, gpd_data);
922     else
923     @@ -1452,15 +1451,15 @@ static int genpd_remove_device(struct generic_pm_domain *genpd,
924     genpd->device_count--;
925     genpd->max_off_time_changed = true;
926    
927     - if (genpd->detach_dev)
928     - genpd->detach_dev(genpd, dev);
929     -
930     dev_pm_domain_set(dev, NULL);
931    
932     list_del_init(&pdd->list_node);
933    
934     genpd_unlock(genpd);
935    
936     + if (genpd->detach_dev)
937     + genpd->detach_dev(genpd, dev);
938     +
939     genpd_free_dev_data(dev, gpd_data);
940    
941     return 0;
942     diff --git a/drivers/crypto/axis/artpec6_crypto.c b/drivers/crypto/axis/artpec6_crypto.c
943     index 7f07a5085e9b..fdcdc751d03b 100644
944     --- a/drivers/crypto/axis/artpec6_crypto.c
945     +++ b/drivers/crypto/axis/artpec6_crypto.c
946     @@ -284,6 +284,7 @@ enum artpec6_crypto_hash_flags {
947    
948     struct artpec6_crypto_req_common {
949     struct list_head list;
950     + struct list_head complete_in_progress;
951     struct artpec6_crypto_dma_descriptors *dma;
952     struct crypto_async_request *req;
953     void (*complete)(struct crypto_async_request *req);
954     @@ -2046,7 +2047,8 @@ static int artpec6_crypto_prepare_aead(struct aead_request *areq)
955     return artpec6_crypto_dma_map_descs(common);
956     }
957    
958     -static void artpec6_crypto_process_queue(struct artpec6_crypto *ac)
959     +static void artpec6_crypto_process_queue(struct artpec6_crypto *ac,
960     + struct list_head *completions)
961     {
962     struct artpec6_crypto_req_common *req;
963    
964     @@ -2057,7 +2059,7 @@ static void artpec6_crypto_process_queue(struct artpec6_crypto *ac)
965     list_move_tail(&req->list, &ac->pending);
966     artpec6_crypto_start_dma(req);
967    
968     - req->req->complete(req->req, -EINPROGRESS);
969     + list_add_tail(&req->complete_in_progress, completions);
970     }
971    
972     /*
973     @@ -2087,6 +2089,11 @@ static void artpec6_crypto_task(unsigned long data)
974     struct artpec6_crypto *ac = (struct artpec6_crypto *)data;
975     struct artpec6_crypto_req_common *req;
976     struct artpec6_crypto_req_common *n;
977     + struct list_head complete_done;
978     + struct list_head complete_in_progress;
979     +
980     + INIT_LIST_HEAD(&complete_done);
981     + INIT_LIST_HEAD(&complete_in_progress);
982    
983     if (list_empty(&ac->pending)) {
984     pr_debug("Spurious IRQ\n");
985     @@ -2120,19 +2127,30 @@ static void artpec6_crypto_task(unsigned long data)
986    
987     pr_debug("Completing request %p\n", req);
988    
989     - list_del(&req->list);
990     + list_move_tail(&req->list, &complete_done);
991    
992     artpec6_crypto_dma_unmap_all(req);
993     artpec6_crypto_copy_bounce_buffers(req);
994    
995     ac->pending_count--;
996     artpec6_crypto_common_destroy(req);
997     - req->complete(req->req);
998     }
999    
1000     - artpec6_crypto_process_queue(ac);
1001     + artpec6_crypto_process_queue(ac, &complete_in_progress);
1002    
1003     spin_unlock_bh(&ac->queue_lock);
1004     +
1005     + /* Perform the completion callbacks without holding the queue lock
1006     + * to allow new request submissions from the callbacks.
1007     + */
1008     + list_for_each_entry_safe(req, n, &complete_done, list) {
1009     + req->complete(req->req);
1010     + }
1011     +
1012     + list_for_each_entry_safe(req, n, &complete_in_progress,
1013     + complete_in_progress) {
1014     + req->req->complete(req->req, -EINPROGRESS);
1015     + }
1016     }
1017    
1018     static void artpec6_crypto_complete_crypto(struct crypto_async_request *req)
1019     diff --git a/drivers/gpio/gpio-pxa.c b/drivers/gpio/gpio-pxa.c
1020     index eb27fa76e8fc..bcc6be4a5cb2 100644
1021     --- a/drivers/gpio/gpio-pxa.c
1022     +++ b/drivers/gpio/gpio-pxa.c
1023     @@ -777,6 +777,9 @@ static int pxa_gpio_suspend(void)
1024     struct pxa_gpio_bank *c;
1025     int gpio;
1026    
1027     + if (!pchip)
1028     + return 0;
1029     +
1030     for_each_gpio_bank(gpio, c, pchip) {
1031     c->saved_gplr = readl_relaxed(c->regbase + GPLR_OFFSET);
1032     c->saved_gpdr = readl_relaxed(c->regbase + GPDR_OFFSET);
1033     @@ -795,6 +798,9 @@ static void pxa_gpio_resume(void)
1034     struct pxa_gpio_bank *c;
1035     int gpio;
1036    
1037     + if (!pchip)
1038     + return;
1039     +
1040     for_each_gpio_bank(gpio, c, pchip) {
1041     /* restore level with set/clear */
1042     writel_relaxed(c->saved_gplr, c->regbase + GPSR_OFFSET);
1043     diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_cik.c b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_cik.c
1044     index 47243165a082..ae90a99909ef 100644
1045     --- a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_cik.c
1046     +++ b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_cik.c
1047     @@ -323,57 +323,7 @@ static int init_mqd_hiq(struct mqd_manager *mm, void **mqd,
1048     struct kfd_mem_obj **mqd_mem_obj, uint64_t *gart_addr,
1049     struct queue_properties *q)
1050     {
1051     - uint64_t addr;
1052     - struct cik_mqd *m;
1053     - int retval;
1054     -
1055     - retval = kfd_gtt_sa_allocate(mm->dev, sizeof(struct cik_mqd),
1056     - mqd_mem_obj);
1057     -
1058     - if (retval != 0)
1059     - return -ENOMEM;
1060     -
1061     - m = (struct cik_mqd *) (*mqd_mem_obj)->cpu_ptr;
1062     - addr = (*mqd_mem_obj)->gpu_addr;
1063     -
1064     - memset(m, 0, ALIGN(sizeof(struct cik_mqd), 256));
1065     -
1066     - m->header = 0xC0310800;
1067     - m->compute_pipelinestat_enable = 1;
1068     - m->compute_static_thread_mgmt_se0 = 0xFFFFFFFF;
1069     - m->compute_static_thread_mgmt_se1 = 0xFFFFFFFF;
1070     - m->compute_static_thread_mgmt_se2 = 0xFFFFFFFF;
1071     - m->compute_static_thread_mgmt_se3 = 0xFFFFFFFF;
1072     -
1073     - m->cp_hqd_persistent_state = DEFAULT_CP_HQD_PERSISTENT_STATE |
1074     - PRELOAD_REQ;
1075     - m->cp_hqd_quantum = QUANTUM_EN | QUANTUM_SCALE_1MS |
1076     - QUANTUM_DURATION(10);
1077     -
1078     - m->cp_mqd_control = MQD_CONTROL_PRIV_STATE_EN;
1079     - m->cp_mqd_base_addr_lo = lower_32_bits(addr);
1080     - m->cp_mqd_base_addr_hi = upper_32_bits(addr);
1081     -
1082     - m->cp_hqd_ib_control = DEFAULT_MIN_IB_AVAIL_SIZE;
1083     -
1084     - /*
1085     - * Pipe Priority
1086     - * Identifies the pipe relative priority when this queue is connected
1087     - * to the pipeline. The pipe priority is against the GFX pipe and HP3D.
1088     - * In KFD we are using a fixed pipe priority set to CS_MEDIUM.
1089     - * 0 = CS_LOW (typically below GFX)
1090     - * 1 = CS_MEDIUM (typically between HP3D and GFX
1091     - * 2 = CS_HIGH (typically above HP3D)
1092     - */
1093     - m->cp_hqd_pipe_priority = 1;
1094     - m->cp_hqd_queue_priority = 15;
1095     -
1096     - *mqd = m;
1097     - if (gart_addr)
1098     - *gart_addr = addr;
1099     - retval = mm->update_mqd(mm, m, q);
1100     -
1101     - return retval;
1102     + return init_mqd(mm, mqd, mqd_mem_obj, gart_addr, q);
1103     }
1104    
1105     static int update_mqd_hiq(struct mqd_manager *mm, void *mqd,
1106     diff --git a/drivers/gpu/drm/cirrus/cirrus_drv.h b/drivers/gpu/drm/cirrus/cirrus_drv.h
1107     index ce9db7aab225..a29f87e98d9d 100644
1108     --- a/drivers/gpu/drm/cirrus/cirrus_drv.h
1109     +++ b/drivers/gpu/drm/cirrus/cirrus_drv.h
1110     @@ -146,7 +146,7 @@ struct cirrus_device {
1111    
1112     struct cirrus_fbdev {
1113     struct drm_fb_helper helper;
1114     - struct drm_framebuffer gfb;
1115     + struct drm_framebuffer *gfb;
1116     void *sysram;
1117     int size;
1118     int x1, y1, x2, y2; /* dirty rect */
1119     diff --git a/drivers/gpu/drm/cirrus/cirrus_fbdev.c b/drivers/gpu/drm/cirrus/cirrus_fbdev.c
1120     index b643ac92801c..82cc82e0bd80 100644
1121     --- a/drivers/gpu/drm/cirrus/cirrus_fbdev.c
1122     +++ b/drivers/gpu/drm/cirrus/cirrus_fbdev.c
1123     @@ -22,14 +22,14 @@ static void cirrus_dirty_update(struct cirrus_fbdev *afbdev,
1124     struct drm_gem_object *obj;
1125     struct cirrus_bo *bo;
1126     int src_offset, dst_offset;
1127     - int bpp = afbdev->gfb.format->cpp[0];
1128     + int bpp = afbdev->gfb->format->cpp[0];
1129     int ret = -EBUSY;
1130     bool unmap = false;
1131     bool store_for_later = false;
1132     int x2, y2;
1133     unsigned long flags;
1134    
1135     - obj = afbdev->gfb.obj[0];
1136     + obj = afbdev->gfb->obj[0];
1137     bo = gem_to_cirrus_bo(obj);
1138    
1139     /*
1140     @@ -82,7 +82,7 @@ static void cirrus_dirty_update(struct cirrus_fbdev *afbdev,
1141     }
1142     for (i = y; i < y + height; i++) {
1143     /* assume equal stride for now */
1144     - src_offset = dst_offset = i * afbdev->gfb.pitches[0] + (x * bpp);
1145     + src_offset = dst_offset = i * afbdev->gfb->pitches[0] + (x * bpp);
1146     memcpy_toio(bo->kmap.virtual + src_offset, afbdev->sysram + src_offset, width * bpp);
1147    
1148     }
1149     @@ -192,23 +192,26 @@ static int cirrusfb_create(struct drm_fb_helper *helper,
1150     return -ENOMEM;
1151    
1152     info = drm_fb_helper_alloc_fbi(helper);
1153     - if (IS_ERR(info))
1154     - return PTR_ERR(info);
1155     + if (IS_ERR(info)) {
1156     + ret = PTR_ERR(info);
1157     + goto err_vfree;
1158     + }
1159    
1160     info->par = gfbdev;
1161    
1162     - ret = cirrus_framebuffer_init(cdev->dev, &gfbdev->gfb, &mode_cmd, gobj);
1163     + fb = kzalloc(sizeof(*fb), GFP_KERNEL);
1164     + if (!fb) {
1165     + ret = -ENOMEM;
1166     + goto err_drm_gem_object_put_unlocked;
1167     + }
1168     +
1169     + ret = cirrus_framebuffer_init(cdev->dev, fb, &mode_cmd, gobj);
1170     if (ret)
1171     - return ret;
1172     + goto err_kfree;
1173    
1174     gfbdev->sysram = sysram;
1175     gfbdev->size = size;
1176     -
1177     - fb = &gfbdev->gfb;
1178     - if (!fb) {
1179     - DRM_INFO("fb is NULL\n");
1180     - return -EINVAL;
1181     - }
1182     + gfbdev->gfb = fb;
1183    
1184     /* setup helper */
1185     gfbdev->helper.fb = fb;
1186     @@ -241,24 +244,27 @@ static int cirrusfb_create(struct drm_fb_helper *helper,
1187     DRM_INFO(" pitch is %d\n", fb->pitches[0]);
1188    
1189     return 0;
1190     +
1191     +err_kfree:
1192     + kfree(fb);
1193     +err_drm_gem_object_put_unlocked:
1194     + drm_gem_object_put_unlocked(gobj);
1195     +err_vfree:
1196     + vfree(sysram);
1197     + return ret;
1198     }
1199    
1200     static int cirrus_fbdev_destroy(struct drm_device *dev,
1201     struct cirrus_fbdev *gfbdev)
1202     {
1203     - struct drm_framebuffer *gfb = &gfbdev->gfb;
1204     + struct drm_framebuffer *gfb = gfbdev->gfb;
1205    
1206     drm_fb_helper_unregister_fbi(&gfbdev->helper);
1207    
1208     - if (gfb->obj[0]) {
1209     - drm_gem_object_put_unlocked(gfb->obj[0]);
1210     - gfb->obj[0] = NULL;
1211     - }
1212     -
1213     vfree(gfbdev->sysram);
1214     drm_fb_helper_fini(&gfbdev->helper);
1215     - drm_framebuffer_unregister_private(gfb);
1216     - drm_framebuffer_cleanup(gfb);
1217     + if (gfb)
1218     + drm_framebuffer_put(gfb);
1219    
1220     return 0;
1221     }
1222     diff --git a/drivers/gpu/drm/cirrus/cirrus_mode.c b/drivers/gpu/drm/cirrus/cirrus_mode.c
1223     index 336bfda40125..90a4e641d3fb 100644
1224     --- a/drivers/gpu/drm/cirrus/cirrus_mode.c
1225     +++ b/drivers/gpu/drm/cirrus/cirrus_mode.c
1226     @@ -127,7 +127,7 @@ static int cirrus_crtc_do_set_base(struct drm_crtc *crtc,
1227     return ret;
1228     }
1229    
1230     - if (&cdev->mode_info.gfbdev->gfb == crtc->primary->fb) {
1231     + if (cdev->mode_info.gfbdev->gfb == crtc->primary->fb) {
1232     /* if pushing console in kmap it */
1233     ret = ttm_bo_kmap(&bo->bo, 0, bo->bo.num_pages, &bo->kmap);
1234     if (ret)
1235     diff --git a/drivers/gpu/drm/exynos/exynos_mixer.c b/drivers/gpu/drm/exynos/exynos_mixer.c
1236     index ffbf4a950f69..522d6c46d7b1 100644
1237     --- a/drivers/gpu/drm/exynos/exynos_mixer.c
1238     +++ b/drivers/gpu/drm/exynos/exynos_mixer.c
1239     @@ -20,6 +20,7 @@
1240     #include "regs-vp.h"
1241    
1242     #include <linux/kernel.h>
1243     +#include <linux/ktime.h>
1244     #include <linux/spinlock.h>
1245     #include <linux/wait.h>
1246     #include <linux/i2c.h>
1247     @@ -337,15 +338,62 @@ static void mixer_cfg_vp_blend(struct mixer_context *ctx)
1248     mixer_reg_write(ctx, MXR_VIDEO_CFG, val);
1249     }
1250    
1251     -static void mixer_vsync_set_update(struct mixer_context *ctx, bool enable)
1252     +static bool mixer_is_synced(struct mixer_context *ctx)
1253     {
1254     - /* block update on vsync */
1255     - mixer_reg_writemask(ctx, MXR_STATUS, enable ?
1256     - MXR_STATUS_SYNC_ENABLE : 0, MXR_STATUS_SYNC_ENABLE);
1257     + u32 base, shadow;
1258    
1259     + if (ctx->mxr_ver == MXR_VER_16_0_33_0 ||
1260     + ctx->mxr_ver == MXR_VER_128_0_0_184)
1261     + return !(mixer_reg_read(ctx, MXR_CFG) &
1262     + MXR_CFG_LAYER_UPDATE_COUNT_MASK);
1263     +
1264     + if (test_bit(MXR_BIT_VP_ENABLED, &ctx->flags) &&
1265     + vp_reg_read(ctx, VP_SHADOW_UPDATE))
1266     + return false;
1267     +
1268     + base = mixer_reg_read(ctx, MXR_CFG);
1269     + shadow = mixer_reg_read(ctx, MXR_CFG_S);
1270     + if (base != shadow)
1271     + return false;
1272     +
1273     + base = mixer_reg_read(ctx, MXR_GRAPHIC_BASE(0));
1274     + shadow = mixer_reg_read(ctx, MXR_GRAPHIC_BASE_S(0));
1275     + if (base != shadow)
1276     + return false;
1277     +
1278     + base = mixer_reg_read(ctx, MXR_GRAPHIC_BASE(1));
1279     + shadow = mixer_reg_read(ctx, MXR_GRAPHIC_BASE_S(1));
1280     + if (base != shadow)
1281     + return false;
1282     +
1283     + return true;
1284     +}
1285     +
1286     +static int mixer_wait_for_sync(struct mixer_context *ctx)
1287     +{
1288     + ktime_t timeout = ktime_add_us(ktime_get(), 100000);
1289     +
1290     + while (!mixer_is_synced(ctx)) {
1291     + usleep_range(1000, 2000);
1292     + if (ktime_compare(ktime_get(), timeout) > 0)
1293     + return -ETIMEDOUT;
1294     + }
1295     + return 0;
1296     +}
1297     +
1298     +static void mixer_disable_sync(struct mixer_context *ctx)
1299     +{
1300     + mixer_reg_writemask(ctx, MXR_STATUS, 0, MXR_STATUS_SYNC_ENABLE);
1301     +}
1302     +
1303     +static void mixer_enable_sync(struct mixer_context *ctx)
1304     +{
1305     + if (ctx->mxr_ver == MXR_VER_16_0_33_0 ||
1306     + ctx->mxr_ver == MXR_VER_128_0_0_184)
1307     + mixer_reg_writemask(ctx, MXR_CFG, ~0, MXR_CFG_LAYER_UPDATE);
1308     + mixer_reg_writemask(ctx, MXR_STATUS, ~0, MXR_STATUS_SYNC_ENABLE);
1309     if (test_bit(MXR_BIT_VP_ENABLED, &ctx->flags))
1310     - vp_reg_write(ctx, VP_SHADOW_UPDATE, enable ?
1311     - VP_SHADOW_UPDATE_ENABLE : 0);
1312     + vp_reg_write(ctx, VP_SHADOW_UPDATE, VP_SHADOW_UPDATE_ENABLE);
1313     }
1314    
1315     static void mixer_cfg_scan(struct mixer_context *ctx, int width, int height)
1316     @@ -482,7 +530,6 @@ static void vp_video_buffer(struct mixer_context *ctx,
1317    
1318     spin_lock_irqsave(&ctx->reg_slock, flags);
1319    
1320     - vp_reg_write(ctx, VP_SHADOW_UPDATE, 1);
1321     /* interlace or progressive scan mode */
1322     val = (test_bit(MXR_BIT_INTERLACE, &ctx->flags) ? ~0 : 0);
1323     vp_reg_writemask(ctx, VP_MODE, val, VP_MODE_LINE_SKIP);
1324     @@ -537,11 +584,6 @@ static void vp_video_buffer(struct mixer_context *ctx,
1325     vp_regs_dump(ctx);
1326     }
1327    
1328     -static void mixer_layer_update(struct mixer_context *ctx)
1329     -{
1330     - mixer_reg_writemask(ctx, MXR_CFG, ~0, MXR_CFG_LAYER_UPDATE);
1331     -}
1332     -
1333     static void mixer_graph_buffer(struct mixer_context *ctx,
1334     struct exynos_drm_plane *plane)
1335     {
1336     @@ -618,11 +660,6 @@ static void mixer_graph_buffer(struct mixer_context *ctx,
1337     mixer_cfg_layer(ctx, win, priority, true);
1338     mixer_cfg_gfx_blend(ctx, win, fb->format->has_alpha);
1339    
1340     - /* layer update mandatory for mixer 16.0.33.0 */
1341     - if (ctx->mxr_ver == MXR_VER_16_0_33_0 ||
1342     - ctx->mxr_ver == MXR_VER_128_0_0_184)
1343     - mixer_layer_update(ctx);
1344     -
1345     spin_unlock_irqrestore(&ctx->reg_slock, flags);
1346    
1347     mixer_regs_dump(ctx);
1348     @@ -687,7 +724,7 @@ static void mixer_win_reset(struct mixer_context *ctx)
1349     static irqreturn_t mixer_irq_handler(int irq, void *arg)
1350     {
1351     struct mixer_context *ctx = arg;
1352     - u32 val, base, shadow;
1353     + u32 val;
1354    
1355     spin_lock(&ctx->reg_slock);
1356    
1357     @@ -701,26 +738,9 @@ static irqreturn_t mixer_irq_handler(int irq, void *arg)
1358     val &= ~MXR_INT_STATUS_VSYNC;
1359    
1360     /* interlace scan need to check shadow register */
1361     - if (test_bit(MXR_BIT_INTERLACE, &ctx->flags)) {
1362     - if (test_bit(MXR_BIT_VP_ENABLED, &ctx->flags) &&
1363     - vp_reg_read(ctx, VP_SHADOW_UPDATE))
1364     - goto out;
1365     -
1366     - base = mixer_reg_read(ctx, MXR_CFG);
1367     - shadow = mixer_reg_read(ctx, MXR_CFG_S);
1368     - if (base != shadow)
1369     - goto out;
1370     -
1371     - base = mixer_reg_read(ctx, MXR_GRAPHIC_BASE(0));
1372     - shadow = mixer_reg_read(ctx, MXR_GRAPHIC_BASE_S(0));
1373     - if (base != shadow)
1374     - goto out;
1375     -
1376     - base = mixer_reg_read(ctx, MXR_GRAPHIC_BASE(1));
1377     - shadow = mixer_reg_read(ctx, MXR_GRAPHIC_BASE_S(1));
1378     - if (base != shadow)
1379     - goto out;
1380     - }
1381     + if (test_bit(MXR_BIT_INTERLACE, &ctx->flags)
1382     + && !mixer_is_synced(ctx))
1383     + goto out;
1384    
1385     drm_crtc_handle_vblank(&ctx->crtc->base);
1386     }
1387     @@ -895,12 +915,14 @@ static void mixer_disable_vblank(struct exynos_drm_crtc *crtc)
1388    
1389     static void mixer_atomic_begin(struct exynos_drm_crtc *crtc)
1390     {
1391     - struct mixer_context *mixer_ctx = crtc->ctx;
1392     + struct mixer_context *ctx = crtc->ctx;
1393    
1394     - if (!test_bit(MXR_BIT_POWERED, &mixer_ctx->flags))
1395     + if (!test_bit(MXR_BIT_POWERED, &ctx->flags))
1396     return;
1397    
1398     - mixer_vsync_set_update(mixer_ctx, false);
1399     + if (mixer_wait_for_sync(ctx))
1400     + dev_err(ctx->dev, "timeout waiting for VSYNC\n");
1401     + mixer_disable_sync(ctx);
1402     }
1403    
1404     static void mixer_update_plane(struct exynos_drm_crtc *crtc,
1405     @@ -942,7 +964,7 @@ static void mixer_atomic_flush(struct exynos_drm_crtc *crtc)
1406     if (!test_bit(MXR_BIT_POWERED, &mixer_ctx->flags))
1407     return;
1408    
1409     - mixer_vsync_set_update(mixer_ctx, true);
1410     + mixer_enable_sync(mixer_ctx);
1411     exynos_crtc_handle_event(crtc);
1412     }
1413    
1414     @@ -957,7 +979,7 @@ static void mixer_enable(struct exynos_drm_crtc *crtc)
1415    
1416     exynos_drm_pipe_clk_enable(crtc, true);
1417    
1418     - mixer_vsync_set_update(ctx, false);
1419     + mixer_disable_sync(ctx);
1420    
1421     mixer_reg_writemask(ctx, MXR_STATUS, ~0, MXR_STATUS_SOFT_RESET);
1422    
1423     @@ -970,7 +992,7 @@ static void mixer_enable(struct exynos_drm_crtc *crtc)
1424    
1425     mixer_commit(ctx);
1426    
1427     - mixer_vsync_set_update(ctx, true);
1428     + mixer_enable_sync(ctx);
1429    
1430     set_bit(MXR_BIT_POWERED, &ctx->flags);
1431     }
1432     diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/volt.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/volt.h
1433     index 8a0f85f5fc1a..6a765682fbfa 100644
1434     --- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/volt.h
1435     +++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/volt.h
1436     @@ -38,6 +38,7 @@ int nvkm_volt_set_id(struct nvkm_volt *, u8 id, u8 min_id, u8 temp,
1437    
1438     int nv40_volt_new(struct nvkm_device *, int, struct nvkm_volt **);
1439     int gf100_volt_new(struct nvkm_device *, int, struct nvkm_volt **);
1440     +int gf117_volt_new(struct nvkm_device *, int, struct nvkm_volt **);
1441     int gk104_volt_new(struct nvkm_device *, int, struct nvkm_volt **);
1442     int gk20a_volt_new(struct nvkm_device *, int, struct nvkm_volt **);
1443     int gm20b_volt_new(struct nvkm_device *, int, struct nvkm_volt **);
1444     diff --git a/drivers/gpu/drm/nouveau/nouveau_debugfs.c b/drivers/gpu/drm/nouveau/nouveau_debugfs.c
1445     index 9109b69cd052..9635704a1d86 100644
1446     --- a/drivers/gpu/drm/nouveau/nouveau_debugfs.c
1447     +++ b/drivers/gpu/drm/nouveau/nouveau_debugfs.c
1448     @@ -161,7 +161,7 @@ nouveau_debugfs_pstate_set(struct file *file, const char __user *ubuf,
1449     }
1450    
1451     ret = pm_runtime_get_sync(drm->dev);
1452     - if (IS_ERR_VALUE(ret) && ret != -EACCES)
1453     + if (ret < 0 && ret != -EACCES)
1454     return ret;
1455     ret = nvif_mthd(ctrl, NVIF_CONTROL_PSTATE_USER, &args, sizeof(args));
1456     pm_runtime_put_autosuspend(drm->dev);
1457     diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c b/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c
1458     index e294013426ce..347a6a4cb339 100644
1459     --- a/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c
1460     +++ b/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c
1461     @@ -1613,7 +1613,7 @@ nvd7_chipset = {
1462     .pci = gf106_pci_new,
1463     .therm = gf119_therm_new,
1464     .timer = nv41_timer_new,
1465     - .volt = gf100_volt_new,
1466     + .volt = gf117_volt_new,
1467     .ce[0] = gf100_ce_new,
1468     .disp = gf119_disp_new,
1469     .dma = gf119_dma_new,
1470     diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/volt/Kbuild b/drivers/gpu/drm/nouveau/nvkm/subdev/volt/Kbuild
1471     index bcd179ba11d0..146adcdd316a 100644
1472     --- a/drivers/gpu/drm/nouveau/nvkm/subdev/volt/Kbuild
1473     +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/volt/Kbuild
1474     @@ -2,6 +2,7 @@ nvkm-y += nvkm/subdev/volt/base.o
1475     nvkm-y += nvkm/subdev/volt/gpio.o
1476     nvkm-y += nvkm/subdev/volt/nv40.o
1477     nvkm-y += nvkm/subdev/volt/gf100.o
1478     +nvkm-y += nvkm/subdev/volt/gf117.o
1479     nvkm-y += nvkm/subdev/volt/gk104.o
1480     nvkm-y += nvkm/subdev/volt/gk20a.o
1481     nvkm-y += nvkm/subdev/volt/gm20b.o
1482     diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/volt/gf117.c b/drivers/gpu/drm/nouveau/nvkm/subdev/volt/gf117.c
1483     new file mode 100644
1484     index 000000000000..547a58f0aeac
1485     --- /dev/null
1486     +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/volt/gf117.c
1487     @@ -0,0 +1,60 @@
1488     +/*
1489     + * Copyright 2019 Ilia Mirkin
1490     + *
1491     + * Permission is hereby granted, free of charge, to any person obtaining a
1492     + * copy of this software and associated documentation files (the "Software"),
1493     + * to deal in the Software without restriction, including without limitation
1494     + * the rights to use, copy, modify, merge, publish, distribute, sublicense,
1495     + * and/or sell copies of the Software, and to permit persons to whom the
1496     + * Software is furnished to do so, subject to the following conditions:
1497     + *
1498     + * The above copyright notice and this permission notice shall be included in
1499     + * all copies or substantial portions of the Software.
1500     + *
1501     + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
1502     + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
1503     + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
1504     + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
1505     + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
1506     + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
1507     + * OTHER DEALINGS IN THE SOFTWARE.
1508     + *
1509     + * Authors: Ilia Mirkin
1510     + */
1511     +#include "priv.h"
1512     +
1513     +#include <subdev/fuse.h>
1514     +
1515     +static int
1516     +gf117_volt_speedo_read(struct nvkm_volt *volt)
1517     +{
1518     + struct nvkm_device *device = volt->subdev.device;
1519     + struct nvkm_fuse *fuse = device->fuse;
1520     +
1521     + if (!fuse)
1522     + return -EINVAL;
1523     +
1524     + return nvkm_fuse_read(fuse, 0x3a8);
1525     +}
1526     +
1527     +static const struct nvkm_volt_func
1528     +gf117_volt = {
1529     + .oneinit = gf100_volt_oneinit,
1530     + .vid_get = nvkm_voltgpio_get,
1531     + .vid_set = nvkm_voltgpio_set,
1532     + .speedo_read = gf117_volt_speedo_read,
1533     +};
1534     +
1535     +int
1536     +gf117_volt_new(struct nvkm_device *device, int index, struct nvkm_volt **pvolt)
1537     +{
1538     + struct nvkm_volt *volt;
1539     + int ret;
1540     +
1541     + ret = nvkm_volt_new_(&gf117_volt, device, index, &volt);
1542     + *pvolt = volt;
1543     + if (ret)
1544     + return ret;
1545     +
1546     + return nvkm_voltgpio_init(volt);
1547     +}
1548     diff --git a/drivers/gpu/drm/panel/panel-innolux-p079zca.c b/drivers/gpu/drm/panel/panel-innolux-p079zca.c
1549     index 72edb334d997..88c7d035ace6 100644
1550     --- a/drivers/gpu/drm/panel/panel-innolux-p079zca.c
1551     +++ b/drivers/gpu/drm/panel/panel-innolux-p079zca.c
1552     @@ -70,18 +70,12 @@ static inline struct innolux_panel *to_innolux_panel(struct drm_panel *panel)
1553     static int innolux_panel_disable(struct drm_panel *panel)
1554     {
1555     struct innolux_panel *innolux = to_innolux_panel(panel);
1556     - int err;
1557    
1558     if (!innolux->enabled)
1559     return 0;
1560    
1561     backlight_disable(innolux->backlight);
1562    
1563     - err = mipi_dsi_dcs_set_display_off(innolux->link);
1564     - if (err < 0)
1565     - DRM_DEV_ERROR(panel->dev, "failed to set display off: %d\n",
1566     - err);
1567     -
1568     innolux->enabled = false;
1569    
1570     return 0;
1571     @@ -95,6 +89,11 @@ static int innolux_panel_unprepare(struct drm_panel *panel)
1572     if (!innolux->prepared)
1573     return 0;
1574    
1575     + err = mipi_dsi_dcs_set_display_off(innolux->link);
1576     + if (err < 0)
1577     + DRM_DEV_ERROR(panel->dev, "failed to set display off: %d\n",
1578     + err);
1579     +
1580     err = mipi_dsi_dcs_enter_sleep_mode(innolux->link);
1581     if (err < 0) {
1582     DRM_DEV_ERROR(panel->dev, "failed to enter sleep mode: %d\n",
1583     diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c
1584     index 7c484729f9b2..268f5a3b3122 100644
1585     --- a/drivers/gpu/drm/ttm/ttm_bo.c
1586     +++ b/drivers/gpu/drm/ttm/ttm_bo.c
1587     @@ -1445,7 +1445,6 @@ static void ttm_bo_global_kobj_release(struct kobject *kobj)
1588     container_of(kobj, struct ttm_bo_global, kobj);
1589    
1590     __free_page(glob->dummy_read_page);
1591     - kfree(glob);
1592     }
1593    
1594     void ttm_bo_global_release(struct drm_global_reference *ref)
1595     diff --git a/drivers/gpu/drm/ttm/ttm_memory.c b/drivers/gpu/drm/ttm/ttm_memory.c
1596     index 450387c92b63..df73d5ff84a8 100644
1597     --- a/drivers/gpu/drm/ttm/ttm_memory.c
1598     +++ b/drivers/gpu/drm/ttm/ttm_memory.c
1599     @@ -216,14 +216,6 @@ static ssize_t ttm_mem_global_store(struct kobject *kobj,
1600     return size;
1601     }
1602    
1603     -static void ttm_mem_global_kobj_release(struct kobject *kobj)
1604     -{
1605     - struct ttm_mem_global *glob =
1606     - container_of(kobj, struct ttm_mem_global, kobj);
1607     -
1608     - kfree(glob);
1609     -}
1610     -
1611     static struct attribute *ttm_mem_global_attrs[] = {
1612     &ttm_mem_global_lower_mem_limit,
1613     NULL
1614     @@ -235,7 +227,6 @@ static const struct sysfs_ops ttm_mem_global_ops = {
1615     };
1616    
1617     static struct kobj_type ttm_mem_glob_kobj_type = {
1618     - .release = &ttm_mem_global_kobj_release,
1619     .sysfs_ops = &ttm_mem_global_ops,
1620     .default_attrs = ttm_mem_global_attrs,
1621     };
1622     diff --git a/drivers/gpu/drm/udl/udl_gem.c b/drivers/gpu/drm/udl/udl_gem.c
1623     index d5a23295dd80..bb7b58407039 100644
1624     --- a/drivers/gpu/drm/udl/udl_gem.c
1625     +++ b/drivers/gpu/drm/udl/udl_gem.c
1626     @@ -224,7 +224,7 @@ int udl_gem_mmap(struct drm_file *file, struct drm_device *dev,
1627     *offset = drm_vma_node_offset_addr(&gobj->base.vma_node);
1628    
1629     out:
1630     - drm_gem_object_put(&gobj->base);
1631     + drm_gem_object_put_unlocked(&gobj->base);
1632     unlock:
1633     mutex_unlock(&udl->gem_lock);
1634     return ret;
1635     diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h
1636     index b7870e7e41d4..97d33b8ed36c 100644
1637     --- a/drivers/hid/hid-ids.h
1638     +++ b/drivers/hid/hid-ids.h
1639     @@ -348,6 +348,7 @@
1640     #define USB_DEVICE_ID_DMI_ENC 0x5fab
1641    
1642     #define USB_VENDOR_ID_DRAGONRISE 0x0079
1643     +#define USB_DEVICE_ID_REDRAGON_SEYMUR2 0x0006
1644     #define USB_DEVICE_ID_DRAGONRISE_WIIU 0x1800
1645     #define USB_DEVICE_ID_DRAGONRISE_PS3 0x1801
1646     #define USB_DEVICE_ID_DRAGONRISE_DOLPHINBAR 0x1803
1647     diff --git a/drivers/hid/hid-quirks.c b/drivers/hid/hid-quirks.c
1648     index 77316f022c5a..94088c0ed68a 100644
1649     --- a/drivers/hid/hid-quirks.c
1650     +++ b/drivers/hid/hid-quirks.c
1651     @@ -70,6 +70,7 @@ static const struct hid_device_id hid_quirks[] = {
1652     { HID_USB_DEVICE(USB_VENDOR_ID_DMI, USB_DEVICE_ID_DMI_ENC), HID_QUIRK_NOGET },
1653     { HID_USB_DEVICE(USB_VENDOR_ID_DRACAL_RAPHNET, USB_DEVICE_ID_RAPHNET_2NES2SNES), HID_QUIRK_MULTI_INPUT },
1654     { HID_USB_DEVICE(USB_VENDOR_ID_DRACAL_RAPHNET, USB_DEVICE_ID_RAPHNET_4NES4SNES), HID_QUIRK_MULTI_INPUT },
1655     + { HID_USB_DEVICE(USB_VENDOR_ID_DRAGONRISE, USB_DEVICE_ID_REDRAGON_SEYMUR2), HID_QUIRK_INCREMENT_USAGE_ON_DUPLICATE },
1656     { HID_USB_DEVICE(USB_VENDOR_ID_DRAGONRISE, USB_DEVICE_ID_DRAGONRISE_DOLPHINBAR), HID_QUIRK_MULTI_INPUT },
1657     { HID_USB_DEVICE(USB_VENDOR_ID_DRAGONRISE, USB_DEVICE_ID_DRAGONRISE_GAMECUBE1), HID_QUIRK_MULTI_INPUT },
1658     { HID_USB_DEVICE(USB_VENDOR_ID_DRAGONRISE, USB_DEVICE_ID_DRAGONRISE_PS3), HID_QUIRK_MULTI_INPUT },
1659     diff --git a/drivers/hid/i2c-hid/Makefile b/drivers/hid/i2c-hid/Makefile
1660     index 832d8f9aaba2..099e1ce2f234 100644
1661     --- a/drivers/hid/i2c-hid/Makefile
1662     +++ b/drivers/hid/i2c-hid/Makefile
1663     @@ -3,3 +3,6 @@
1664     #
1665    
1666     obj-$(CONFIG_I2C_HID) += i2c-hid.o
1667     +
1668     +i2c-hid-objs = i2c-hid-core.o
1669     +i2c-hid-$(CONFIG_DMI) += i2c-hid-dmi-quirks.o
1670     diff --git a/drivers/hid/i2c-hid/i2c-hid-core.c b/drivers/hid/i2c-hid/i2c-hid-core.c
1671     new file mode 100644
1672     index 000000000000..3cde7c1b9c33
1673     --- /dev/null
1674     +++ b/drivers/hid/i2c-hid/i2c-hid-core.c
1675     @@ -0,0 +1,1348 @@
1676     +/*
1677     + * HID over I2C protocol implementation
1678     + *
1679     + * Copyright (c) 2012 Benjamin Tissoires <benjamin.tissoires@gmail.com>
1680     + * Copyright (c) 2012 Ecole Nationale de l'Aviation Civile, France
1681     + * Copyright (c) 2012 Red Hat, Inc
1682     + *
1683     + * This code is partly based on "USB HID support for Linux":
1684     + *
1685     + * Copyright (c) 1999 Andreas Gal
1686     + * Copyright (c) 2000-2005 Vojtech Pavlik <vojtech@suse.cz>
1687     + * Copyright (c) 2005 Michael Haboustak <mike-@cinci.rr.com> for Concept2, Inc
1688     + * Copyright (c) 2007-2008 Oliver Neukum
1689     + * Copyright (c) 2006-2010 Jiri Kosina
1690     + *
1691     + * This file is subject to the terms and conditions of the GNU General Public
1692     + * License. See the file COPYING in the main directory of this archive for
1693     + * more details.
1694     + */
1695     +
1696     +#include <linux/module.h>
1697     +#include <linux/i2c.h>
1698     +#include <linux/interrupt.h>
1699     +#include <linux/input.h>
1700     +#include <linux/irq.h>
1701     +#include <linux/delay.h>
1702     +#include <linux/slab.h>
1703     +#include <linux/pm.h>
1704     +#include <linux/pm_runtime.h>
1705     +#include <linux/device.h>
1706     +#include <linux/wait.h>
1707     +#include <linux/err.h>
1708     +#include <linux/string.h>
1709     +#include <linux/list.h>
1710     +#include <linux/jiffies.h>
1711     +#include <linux/kernel.h>
1712     +#include <linux/hid.h>
1713     +#include <linux/mutex.h>
1714     +#include <linux/acpi.h>
1715     +#include <linux/of.h>
1716     +#include <linux/regulator/consumer.h>
1717     +
1718     +#include <linux/platform_data/i2c-hid.h>
1719     +
1720     +#include "../hid-ids.h"
1721     +#include "i2c-hid.h"
1722     +
1723     +/* quirks to control the device */
1724     +#define I2C_HID_QUIRK_SET_PWR_WAKEUP_DEV BIT(0)
1725     +#define I2C_HID_QUIRK_NO_IRQ_AFTER_RESET BIT(1)
1726     +#define I2C_HID_QUIRK_NO_RUNTIME_PM BIT(2)
1727     +#define I2C_HID_QUIRK_DELAY_AFTER_SLEEP BIT(3)
1728     +
1729     +/* flags */
1730     +#define I2C_HID_STARTED 0
1731     +#define I2C_HID_RESET_PENDING 1
1732     +#define I2C_HID_READ_PENDING 2
1733     +
1734     +#define I2C_HID_PWR_ON 0x00
1735     +#define I2C_HID_PWR_SLEEP 0x01
1736     +
1737     +/* debug option */
1738     +static bool debug;
1739     +module_param(debug, bool, 0444);
1740     +MODULE_PARM_DESC(debug, "print a lot of debug information");
1741     +
1742     +#define i2c_hid_dbg(ihid, fmt, arg...) \
1743     +do { \
1744     + if (debug) \
1745     + dev_printk(KERN_DEBUG, &(ihid)->client->dev, fmt, ##arg); \
1746     +} while (0)
1747     +
1748     +struct i2c_hid_desc {
1749     + __le16 wHIDDescLength;
1750     + __le16 bcdVersion;
1751     + __le16 wReportDescLength;
1752     + __le16 wReportDescRegister;
1753     + __le16 wInputRegister;
1754     + __le16 wMaxInputLength;
1755     + __le16 wOutputRegister;
1756     + __le16 wMaxOutputLength;
1757     + __le16 wCommandRegister;
1758     + __le16 wDataRegister;
1759     + __le16 wVendorID;
1760     + __le16 wProductID;
1761     + __le16 wVersionID;
1762     + __le32 reserved;
1763     +} __packed;
1764     +
1765     +struct i2c_hid_cmd {
1766     + unsigned int registerIndex;
1767     + __u8 opcode;
1768     + unsigned int length;
1769     + bool wait;
1770     +};
1771     +
1772     +union command {
1773     + u8 data[0];
1774     + struct cmd {
1775     + __le16 reg;
1776     + __u8 reportTypeID;
1777     + __u8 opcode;
1778     + } __packed c;
1779     +};
1780     +
1781     +#define I2C_HID_CMD(opcode_) \
1782     + .opcode = opcode_, .length = 4, \
1783     + .registerIndex = offsetof(struct i2c_hid_desc, wCommandRegister)
1784     +
1785     +/* fetch HID descriptor */
1786     +static const struct i2c_hid_cmd hid_descr_cmd = { .length = 2 };
1787     +/* fetch report descriptors */
1788     +static const struct i2c_hid_cmd hid_report_descr_cmd = {
1789     + .registerIndex = offsetof(struct i2c_hid_desc,
1790     + wReportDescRegister),
1791     + .opcode = 0x00,
1792     + .length = 2 };
1793     +/* commands */
1794     +static const struct i2c_hid_cmd hid_reset_cmd = { I2C_HID_CMD(0x01),
1795     + .wait = true };
1796     +static const struct i2c_hid_cmd hid_get_report_cmd = { I2C_HID_CMD(0x02) };
1797     +static const struct i2c_hid_cmd hid_set_report_cmd = { I2C_HID_CMD(0x03) };
1798     +static const struct i2c_hid_cmd hid_set_power_cmd = { I2C_HID_CMD(0x08) };
1799     +static const struct i2c_hid_cmd hid_no_cmd = { .length = 0 };
1800     +
1801     +/*
1802     + * These definitions are not used here, but are defined by the spec.
1803     + * Keeping them here for documentation purposes.
1804     + *
1805     + * static const struct i2c_hid_cmd hid_get_idle_cmd = { I2C_HID_CMD(0x04) };
1806     + * static const struct i2c_hid_cmd hid_set_idle_cmd = { I2C_HID_CMD(0x05) };
1807     + * static const struct i2c_hid_cmd hid_get_protocol_cmd = { I2C_HID_CMD(0x06) };
1808     + * static const struct i2c_hid_cmd hid_set_protocol_cmd = { I2C_HID_CMD(0x07) };
1809     + */
1810     +
1811     +/* The main device structure */
1812     +struct i2c_hid {
1813     + struct i2c_client *client; /* i2c client */
1814     + struct hid_device *hid; /* pointer to corresponding HID dev */
1815     + union {
1816     + __u8 hdesc_buffer[sizeof(struct i2c_hid_desc)];
1817     + struct i2c_hid_desc hdesc; /* the HID Descriptor */
1818     + };
1819     + __le16 wHIDDescRegister; /* location of the i2c
1820     + * register of the HID
1821     + * descriptor. */
1822     + unsigned int bufsize; /* i2c buffer size */
1823     + u8 *inbuf; /* Input buffer */
1824     + u8 *rawbuf; /* Raw Input buffer */
1825     + u8 *cmdbuf; /* Command buffer */
1826     + u8 *argsbuf; /* Command arguments buffer */
1827     +
1828     + unsigned long flags; /* device flags */
1829     + unsigned long quirks; /* Various quirks */
1830     +
1831     + wait_queue_head_t wait; /* For waiting the interrupt */
1832     +
1833     + struct i2c_hid_platform_data pdata;
1834     +
1835     + bool irq_wake_enabled;
1836     + struct mutex reset_lock;
1837     +
1838     + unsigned long sleep_delay;
1839     +};
1840     +
1841     +static const struct i2c_hid_quirks {
1842     + __u16 idVendor;
1843     + __u16 idProduct;
1844     + __u32 quirks;
1845     +} i2c_hid_quirks[] = {
1846     + { USB_VENDOR_ID_WEIDA, USB_DEVICE_ID_WEIDA_8752,
1847     + I2C_HID_QUIRK_SET_PWR_WAKEUP_DEV },
1848     + { USB_VENDOR_ID_WEIDA, USB_DEVICE_ID_WEIDA_8755,
1849     + I2C_HID_QUIRK_SET_PWR_WAKEUP_DEV },
1850     + { I2C_VENDOR_ID_HANTICK, I2C_PRODUCT_ID_HANTICK_5288,
1851     + I2C_HID_QUIRK_NO_IRQ_AFTER_RESET |
1852     + I2C_HID_QUIRK_NO_RUNTIME_PM },
1853     + { I2C_VENDOR_ID_RAYDIUM, I2C_PRODUCT_ID_RAYDIUM_4B33,
1854     + I2C_HID_QUIRK_DELAY_AFTER_SLEEP },
1855     + { 0, 0 }
1856     +};
1857     +
1858     +/*
1859     + * i2c_hid_lookup_quirk: return any quirks associated with a I2C HID device
1860     + * @idVendor: the 16-bit vendor ID
1861     + * @idProduct: the 16-bit product ID
1862     + *
1863     + * Returns: a u32 quirks value.
1864     + */
1865     +static u32 i2c_hid_lookup_quirk(const u16 idVendor, const u16 idProduct)
1866     +{
1867     + u32 quirks = 0;
1868     + int n;
1869     +
1870     + for (n = 0; i2c_hid_quirks[n].idVendor; n++)
1871     + if (i2c_hid_quirks[n].idVendor == idVendor &&
1872     + (i2c_hid_quirks[n].idProduct == (__u16)HID_ANY_ID ||
1873     + i2c_hid_quirks[n].idProduct == idProduct))
1874     + quirks = i2c_hid_quirks[n].quirks;
1875     +
1876     + return quirks;
1877     +}
1878     +
1879     +static int __i2c_hid_command(struct i2c_client *client,
1880     + const struct i2c_hid_cmd *command, u8 reportID,
1881     + u8 reportType, u8 *args, int args_len,
1882     + unsigned char *buf_recv, int data_len)
1883     +{
1884     + struct i2c_hid *ihid = i2c_get_clientdata(client);
1885     + union command *cmd = (union command *)ihid->cmdbuf;
1886     + int ret;
1887     + struct i2c_msg msg[2];
1888     + int msg_num = 1;
1889     +
1890     + int length = command->length;
1891     + bool wait = command->wait;
1892     + unsigned int registerIndex = command->registerIndex;
1893     +
1894     + /* special case for hid_descr_cmd */
1895     + if (command == &hid_descr_cmd) {
1896     + cmd->c.reg = ihid->wHIDDescRegister;
1897     + } else {
1898     + cmd->data[0] = ihid->hdesc_buffer[registerIndex];
1899     + cmd->data[1] = ihid->hdesc_buffer[registerIndex + 1];
1900     + }
1901     +
1902     + if (length > 2) {
1903     + cmd->c.opcode = command->opcode;
1904     + cmd->c.reportTypeID = reportID | reportType << 4;
1905     + }
1906     +
1907     + memcpy(cmd->data + length, args, args_len);
1908     + length += args_len;
1909     +
1910     + i2c_hid_dbg(ihid, "%s: cmd=%*ph\n", __func__, length, cmd->data);
1911     +
1912     + msg[0].addr = client->addr;
1913     + msg[0].flags = client->flags & I2C_M_TEN;
1914     + msg[0].len = length;
1915     + msg[0].buf = cmd->data;
1916     + if (data_len > 0) {
1917     + msg[1].addr = client->addr;
1918     + msg[1].flags = client->flags & I2C_M_TEN;
1919     + msg[1].flags |= I2C_M_RD;
1920     + msg[1].len = data_len;
1921     + msg[1].buf = buf_recv;
1922     + msg_num = 2;
1923     + set_bit(I2C_HID_READ_PENDING, &ihid->flags);
1924     + }
1925     +
1926     + if (wait)
1927     + set_bit(I2C_HID_RESET_PENDING, &ihid->flags);
1928     +
1929     + ret = i2c_transfer(client->adapter, msg, msg_num);
1930     +
1931     + if (data_len > 0)
1932     + clear_bit(I2C_HID_READ_PENDING, &ihid->flags);
1933     +
1934     + if (ret != msg_num)
1935     + return ret < 0 ? ret : -EIO;
1936     +
1937     + ret = 0;
1938     +
1939     + if (wait && (ihid->quirks & I2C_HID_QUIRK_NO_IRQ_AFTER_RESET)) {
1940     + msleep(100);
1941     + } else if (wait) {
1942     + i2c_hid_dbg(ihid, "%s: waiting...\n", __func__);
1943     + if (!wait_event_timeout(ihid->wait,
1944     + !test_bit(I2C_HID_RESET_PENDING, &ihid->flags),
1945     + msecs_to_jiffies(5000)))
1946     + ret = -ENODATA;
1947     + i2c_hid_dbg(ihid, "%s: finished.\n", __func__);
1948     + }
1949     +
1950     + return ret;
1951     +}
1952     +
1953     +static int i2c_hid_command(struct i2c_client *client,
1954     + const struct i2c_hid_cmd *command,
1955     + unsigned char *buf_recv, int data_len)
1956     +{
1957     + return __i2c_hid_command(client, command, 0, 0, NULL, 0,
1958     + buf_recv, data_len);
1959     +}
1960     +
1961     +static int i2c_hid_get_report(struct i2c_client *client, u8 reportType,
1962     + u8 reportID, unsigned char *buf_recv, int data_len)
1963     +{
1964     + struct i2c_hid *ihid = i2c_get_clientdata(client);
1965     + u8 args[3];
1966     + int ret;
1967     + int args_len = 0;
1968     + u16 readRegister = le16_to_cpu(ihid->hdesc.wDataRegister);
1969     +
1970     + i2c_hid_dbg(ihid, "%s\n", __func__);
1971     +
1972     + if (reportID >= 0x0F) {
1973     + args[args_len++] = reportID;
1974     + reportID = 0x0F;
1975     + }
1976     +
1977     + args[args_len++] = readRegister & 0xFF;
1978     + args[args_len++] = readRegister >> 8;
1979     +
1980     + ret = __i2c_hid_command(client, &hid_get_report_cmd, reportID,
1981     + reportType, args, args_len, buf_recv, data_len);
1982     + if (ret) {
1983     + dev_err(&client->dev,
1984     + "failed to retrieve report from device.\n");
1985     + return ret;
1986     + }
1987     +
1988     + return 0;
1989     +}
1990     +
1991     +/**
1992     + * i2c_hid_set_or_send_report: forward an incoming report to the device
1993     + * @client: the i2c_client of the device
1994     + * @reportType: 0x03 for HID_FEATURE_REPORT ; 0x02 for HID_OUTPUT_REPORT
1995     + * @reportID: the report ID
1996     + * @buf: the actual data to transfer, without the report ID
1997     + * @len: size of buf
1998     + * @use_data: true: use SET_REPORT HID command, false: send plain OUTPUT report
1999     + */
2000     +static int i2c_hid_set_or_send_report(struct i2c_client *client, u8 reportType,
2001     + u8 reportID, unsigned char *buf, size_t data_len, bool use_data)
2002     +{
2003     + struct i2c_hid *ihid = i2c_get_clientdata(client);
2004     + u8 *args = ihid->argsbuf;
2005     + const struct i2c_hid_cmd *hidcmd;
2006     + int ret;
2007     + u16 dataRegister = le16_to_cpu(ihid->hdesc.wDataRegister);
2008     + u16 outputRegister = le16_to_cpu(ihid->hdesc.wOutputRegister);
2009     + u16 maxOutputLength = le16_to_cpu(ihid->hdesc.wMaxOutputLength);
2010     + u16 size;
2011     + int args_len;
2012     + int index = 0;
2013     +
2014     + i2c_hid_dbg(ihid, "%s\n", __func__);
2015     +
2016     + if (data_len > ihid->bufsize)
2017     + return -EINVAL;
2018     +
2019     + size = 2 /* size */ +
2020     + (reportID ? 1 : 0) /* reportID */ +
2021     + data_len /* buf */;
2022     + args_len = (reportID >= 0x0F ? 1 : 0) /* optional third byte */ +
2023     + 2 /* dataRegister */ +
2024     + size /* args */;
2025     +
2026     + if (!use_data && maxOutputLength == 0)
2027     + return -ENOSYS;
2028     +
2029     + if (reportID >= 0x0F) {
2030     + args[index++] = reportID;
2031     + reportID = 0x0F;
2032     + }
2033     +
2034     + /*
2035     + * use the data register for feature reports or if the device does not
2036     + * support the output register
2037     + */
2038     + if (use_data) {
2039     + args[index++] = dataRegister & 0xFF;
2040     + args[index++] = dataRegister >> 8;
2041     + hidcmd = &hid_set_report_cmd;
2042     + } else {
2043     + args[index++] = outputRegister & 0xFF;
2044     + args[index++] = outputRegister >> 8;
2045     + hidcmd = &hid_no_cmd;
2046     + }
2047     +
2048     + args[index++] = size & 0xFF;
2049     + args[index++] = size >> 8;
2050     +
2051     + if (reportID)
2052     + args[index++] = reportID;
2053     +
2054     + memcpy(&args[index], buf, data_len);
2055     +
2056     + ret = __i2c_hid_command(client, hidcmd, reportID,
2057     + reportType, args, args_len, NULL, 0);
2058     + if (ret) {
2059     + dev_err(&client->dev, "failed to set a report to device.\n");
2060     + return ret;
2061     + }
2062     +
2063     + return data_len;
2064     +}
2065     +
2066     +static int i2c_hid_set_power(struct i2c_client *client, int power_state)
2067     +{
2068     + struct i2c_hid *ihid = i2c_get_clientdata(client);
2069     + int ret;
2070     + unsigned long now, delay;
2071     +
2072     + i2c_hid_dbg(ihid, "%s\n", __func__);
2073     +
2074     + /*
2075     + * Some devices require to send a command to wakeup before power on.
2076     + * The call will get a return value (EREMOTEIO) but device will be
2077     + * triggered and activated. After that, it goes like a normal device.
2078     + */
2079     + if (power_state == I2C_HID_PWR_ON &&
2080     + ihid->quirks & I2C_HID_QUIRK_SET_PWR_WAKEUP_DEV) {
2081     + ret = i2c_hid_command(client, &hid_set_power_cmd, NULL, 0);
2082     +
2083     + /* Device was already activated */
2084     + if (!ret)
2085     + goto set_pwr_exit;
2086     + }
2087     +
2088     + if (ihid->quirks & I2C_HID_QUIRK_DELAY_AFTER_SLEEP &&
2089     + power_state == I2C_HID_PWR_ON) {
2090     + now = jiffies;
2091     + if (time_after(ihid->sleep_delay, now)) {
2092     + delay = jiffies_to_usecs(ihid->sleep_delay - now);
2093     + usleep_range(delay, delay + 1);
2094     + }
2095     + }
2096     +
2097     + ret = __i2c_hid_command(client, &hid_set_power_cmd, power_state,
2098     + 0, NULL, 0, NULL, 0);
2099     +
2100     + if (ihid->quirks & I2C_HID_QUIRK_DELAY_AFTER_SLEEP &&
2101     + power_state == I2C_HID_PWR_SLEEP)
2102     + ihid->sleep_delay = jiffies + msecs_to_jiffies(20);
2103     +
2104     + if (ret)
2105     + dev_err(&client->dev, "failed to change power setting.\n");
2106     +
2107     +set_pwr_exit:
2108     + return ret;
2109     +}
2110     +
2111     +static int i2c_hid_hwreset(struct i2c_client *client)
2112     +{
2113     + struct i2c_hid *ihid = i2c_get_clientdata(client);
2114     + int ret;
2115     +
2116     + i2c_hid_dbg(ihid, "%s\n", __func__);
2117     +
2118     + /*
2119     + * This prevents sending feature reports while the device is
2120     + * being reset. Otherwise we may lose the reset complete
2121     + * interrupt.
2122     + */
2123     + mutex_lock(&ihid->reset_lock);
2124     +
2125     + ret = i2c_hid_set_power(client, I2C_HID_PWR_ON);
2126     + if (ret)
2127     + goto out_unlock;
2128     +
2129     + /*
2130     + * The HID over I2C specification states that if a DEVICE needs time
2131     + * after the PWR_ON request, it should utilise CLOCK stretching.
2132     + * However, it has been observered that the Windows driver provides a
2133     + * 1ms sleep between the PWR_ON and RESET requests and that some devices
2134     + * rely on this.
2135     + */
2136     + usleep_range(1000, 5000);
2137     +
2138     + i2c_hid_dbg(ihid, "resetting...\n");
2139     +
2140     + ret = i2c_hid_command(client, &hid_reset_cmd, NULL, 0);
2141     + if (ret) {
2142     + dev_err(&client->dev, "failed to reset device.\n");
2143     + i2c_hid_set_power(client, I2C_HID_PWR_SLEEP);
2144     + }
2145     +
2146     +out_unlock:
2147     + mutex_unlock(&ihid->reset_lock);
2148     + return ret;
2149     +}
2150     +
2151     +static void i2c_hid_get_input(struct i2c_hid *ihid)
2152     +{
2153     + int ret;
2154     + u32 ret_size;
2155     + int size = le16_to_cpu(ihid->hdesc.wMaxInputLength);
2156     +
2157     + if (size > ihid->bufsize)
2158     + size = ihid->bufsize;
2159     +
2160     + ret = i2c_master_recv(ihid->client, ihid->inbuf, size);
2161     + if (ret != size) {
2162     + if (ret < 0)
2163     + return;
2164     +
2165     + dev_err(&ihid->client->dev, "%s: got %d data instead of %d\n",
2166     + __func__, ret, size);
2167     + return;
2168     + }
2169     +
2170     + ret_size = ihid->inbuf[0] | ihid->inbuf[1] << 8;
2171     +
2172     + if (!ret_size) {
2173     + /* host or device initiated RESET completed */
2174     + if (test_and_clear_bit(I2C_HID_RESET_PENDING, &ihid->flags))
2175     + wake_up(&ihid->wait);
2176     + return;
2177     + }
2178     +
2179     + if ((ret_size > size) || (ret_size < 2)) {
2180     + dev_err(&ihid->client->dev, "%s: incomplete report (%d/%d)\n",
2181     + __func__, size, ret_size);
2182     + return;
2183     + }
2184     +
2185     + i2c_hid_dbg(ihid, "input: %*ph\n", ret_size, ihid->inbuf);
2186     +
2187     + if (test_bit(I2C_HID_STARTED, &ihid->flags))
2188     + hid_input_report(ihid->hid, HID_INPUT_REPORT, ihid->inbuf + 2,
2189     + ret_size - 2, 1);
2190     +
2191     + return;
2192     +}
2193     +
2194     +static irqreturn_t i2c_hid_irq(int irq, void *dev_id)
2195     +{
2196     + struct i2c_hid *ihid = dev_id;
2197     +
2198     + if (test_bit(I2C_HID_READ_PENDING, &ihid->flags))
2199     + return IRQ_HANDLED;
2200     +
2201     + i2c_hid_get_input(ihid);
2202     +
2203     + return IRQ_HANDLED;
2204     +}
2205     +
2206     +static int i2c_hid_get_report_length(struct hid_report *report)
2207     +{
2208     + return ((report->size - 1) >> 3) + 1 +
2209     + report->device->report_enum[report->type].numbered + 2;
2210     +}
2211     +
2212     +/*
2213     + * Traverse the supplied list of reports and find the longest
2214     + */
2215     +static void i2c_hid_find_max_report(struct hid_device *hid, unsigned int type,
2216     + unsigned int *max)
2217     +{
2218     + struct hid_report *report;
2219     + unsigned int size;
2220     +
2221     + /* We should not rely on wMaxInputLength, as some devices may set it to
2222     + * a wrong length. */
2223     + list_for_each_entry(report, &hid->report_enum[type].report_list, list) {
2224     + size = i2c_hid_get_report_length(report);
2225     + if (*max < size)
2226     + *max = size;
2227     + }
2228     +}
2229     +
2230     +static void i2c_hid_free_buffers(struct i2c_hid *ihid)
2231     +{
2232     + kfree(ihid->inbuf);
2233     + kfree(ihid->rawbuf);
2234     + kfree(ihid->argsbuf);
2235     + kfree(ihid->cmdbuf);
2236     + ihid->inbuf = NULL;
2237     + ihid->rawbuf = NULL;
2238     + ihid->cmdbuf = NULL;
2239     + ihid->argsbuf = NULL;
2240     + ihid->bufsize = 0;
2241     +}
2242     +
2243     +static int i2c_hid_alloc_buffers(struct i2c_hid *ihid, size_t report_size)
2244     +{
2245     + /* the worst case is computed from the set_report command with a
2246     + * reportID > 15 and the maximum report length */
2247     + int args_len = sizeof(__u8) + /* ReportID */
2248     + sizeof(__u8) + /* optional ReportID byte */
2249     + sizeof(__u16) + /* data register */
2250     + sizeof(__u16) + /* size of the report */
2251     + report_size; /* report */
2252     +
2253     + ihid->inbuf = kzalloc(report_size, GFP_KERNEL);
2254     + ihid->rawbuf = kzalloc(report_size, GFP_KERNEL);
2255     + ihid->argsbuf = kzalloc(args_len, GFP_KERNEL);
2256     + ihid->cmdbuf = kzalloc(sizeof(union command) + args_len, GFP_KERNEL);
2257     +
2258     + if (!ihid->inbuf || !ihid->rawbuf || !ihid->argsbuf || !ihid->cmdbuf) {
2259     + i2c_hid_free_buffers(ihid);
2260     + return -ENOMEM;
2261     + }
2262     +
2263     + ihid->bufsize = report_size;
2264     +
2265     + return 0;
2266     +}
2267     +
2268     +static int i2c_hid_get_raw_report(struct hid_device *hid,
2269     + unsigned char report_number, __u8 *buf, size_t count,
2270     + unsigned char report_type)
2271     +{
2272     + struct i2c_client *client = hid->driver_data;
2273     + struct i2c_hid *ihid = i2c_get_clientdata(client);
2274     + size_t ret_count, ask_count;
2275     + int ret;
2276     +
2277     + if (report_type == HID_OUTPUT_REPORT)
2278     + return -EINVAL;
2279     +
2280     + /* +2 bytes to include the size of the reply in the query buffer */
2281     + ask_count = min(count + 2, (size_t)ihid->bufsize);
2282     +
2283     + ret = i2c_hid_get_report(client,
2284     + report_type == HID_FEATURE_REPORT ? 0x03 : 0x01,
2285     + report_number, ihid->rawbuf, ask_count);
2286     +
2287     + if (ret < 0)
2288     + return ret;
2289     +
2290     + ret_count = ihid->rawbuf[0] | (ihid->rawbuf[1] << 8);
2291     +
2292     + if (ret_count <= 2)
2293     + return 0;
2294     +
2295     + ret_count = min(ret_count, ask_count);
2296     +
2297     + /* The query buffer contains the size, dropping it in the reply */
2298     + count = min(count, ret_count - 2);
2299     + memcpy(buf, ihid->rawbuf + 2, count);
2300     +
2301     + return count;
2302     +}
2303     +
2304     +static int i2c_hid_output_raw_report(struct hid_device *hid, __u8 *buf,
2305     + size_t count, unsigned char report_type, bool use_data)
2306     +{
2307     + struct i2c_client *client = hid->driver_data;
2308     + struct i2c_hid *ihid = i2c_get_clientdata(client);
2309     + int report_id = buf[0];
2310     + int ret;
2311     +
2312     + if (report_type == HID_INPUT_REPORT)
2313     + return -EINVAL;
2314     +
2315     + mutex_lock(&ihid->reset_lock);
2316     +
2317     + if (report_id) {
2318     + buf++;
2319     + count--;
2320     + }
2321     +
2322     + ret = i2c_hid_set_or_send_report(client,
2323     + report_type == HID_FEATURE_REPORT ? 0x03 : 0x02,
2324     + report_id, buf, count, use_data);
2325     +
2326     + if (report_id && ret >= 0)
2327     + ret++; /* add report_id to the number of transfered bytes */
2328     +
2329     + mutex_unlock(&ihid->reset_lock);
2330     +
2331     + return ret;
2332     +}
2333     +
2334     +static int i2c_hid_output_report(struct hid_device *hid, __u8 *buf,
2335     + size_t count)
2336     +{
2337     + return i2c_hid_output_raw_report(hid, buf, count, HID_OUTPUT_REPORT,
2338     + false);
2339     +}
2340     +
2341     +static int i2c_hid_raw_request(struct hid_device *hid, unsigned char reportnum,
2342     + __u8 *buf, size_t len, unsigned char rtype,
2343     + int reqtype)
2344     +{
2345     + switch (reqtype) {
2346     + case HID_REQ_GET_REPORT:
2347     + return i2c_hid_get_raw_report(hid, reportnum, buf, len, rtype);
2348     + case HID_REQ_SET_REPORT:
2349     + if (buf[0] != reportnum)
2350     + return -EINVAL;
2351     + return i2c_hid_output_raw_report(hid, buf, len, rtype, true);
2352     + default:
2353     + return -EIO;
2354     + }
2355     +}
2356     +
2357     +static int i2c_hid_parse(struct hid_device *hid)
2358     +{
2359     + struct i2c_client *client = hid->driver_data;
2360     + struct i2c_hid *ihid = i2c_get_clientdata(client);
2361     + struct i2c_hid_desc *hdesc = &ihid->hdesc;
2362     + unsigned int rsize;
2363     + char *rdesc;
2364     + int ret;
2365     + int tries = 3;
2366     + char *use_override;
2367     +
2368     + i2c_hid_dbg(ihid, "entering %s\n", __func__);
2369     +
2370     + rsize = le16_to_cpu(hdesc->wReportDescLength);
2371     + if (!rsize || rsize > HID_MAX_DESCRIPTOR_SIZE) {
2372     + dbg_hid("weird size of report descriptor (%u)\n", rsize);
2373     + return -EINVAL;
2374     + }
2375     +
2376     + do {
2377     + ret = i2c_hid_hwreset(client);
2378     + if (ret)
2379     + msleep(1000);
2380     + } while (tries-- > 0 && ret);
2381     +
2382     + if (ret)
2383     + return ret;
2384     +
2385     + use_override = i2c_hid_get_dmi_hid_report_desc_override(client->name,
2386     + &rsize);
2387     +
2388     + if (use_override) {
2389     + rdesc = use_override;
2390     + i2c_hid_dbg(ihid, "Using a HID report descriptor override\n");
2391     + } else {
2392     + rdesc = kzalloc(rsize, GFP_KERNEL);
2393     +
2394     + if (!rdesc) {
2395     + dbg_hid("couldn't allocate rdesc memory\n");
2396     + return -ENOMEM;
2397     + }
2398     +
2399     + i2c_hid_dbg(ihid, "asking HID report descriptor\n");
2400     +
2401     + ret = i2c_hid_command(client, &hid_report_descr_cmd,
2402     + rdesc, rsize);
2403     + if (ret) {
2404     + hid_err(hid, "reading report descriptor failed\n");
2405     + kfree(rdesc);
2406     + return -EIO;
2407     + }
2408     + }
2409     +
2410     + i2c_hid_dbg(ihid, "Report Descriptor: %*ph\n", rsize, rdesc);
2411     +
2412     + ret = hid_parse_report(hid, rdesc, rsize);
2413     + if (!use_override)
2414     + kfree(rdesc);
2415     +
2416     + if (ret) {
2417     + dbg_hid("parsing report descriptor failed\n");
2418     + return ret;
2419     + }
2420     +
2421     + return 0;
2422     +}
2423     +
2424     +static int i2c_hid_start(struct hid_device *hid)
2425     +{
2426     + struct i2c_client *client = hid->driver_data;
2427     + struct i2c_hid *ihid = i2c_get_clientdata(client);
2428     + int ret;
2429     + unsigned int bufsize = HID_MIN_BUFFER_SIZE;
2430     +
2431     + i2c_hid_find_max_report(hid, HID_INPUT_REPORT, &bufsize);
2432     + i2c_hid_find_max_report(hid, HID_OUTPUT_REPORT, &bufsize);
2433     + i2c_hid_find_max_report(hid, HID_FEATURE_REPORT, &bufsize);
2434     +
2435     + if (bufsize > ihid->bufsize) {
2436     + disable_irq(client->irq);
2437     + i2c_hid_free_buffers(ihid);
2438     +
2439     + ret = i2c_hid_alloc_buffers(ihid, bufsize);
2440     + enable_irq(client->irq);
2441     +
2442     + if (ret)
2443     + return ret;
2444     + }
2445     +
2446     + return 0;
2447     +}
2448     +
2449     +static void i2c_hid_stop(struct hid_device *hid)
2450     +{
2451     + hid->claimed = 0;
2452     +}
2453     +
2454     +static int i2c_hid_open(struct hid_device *hid)
2455     +{
2456     + struct i2c_client *client = hid->driver_data;
2457     + struct i2c_hid *ihid = i2c_get_clientdata(client);
2458     + int ret = 0;
2459     +
2460     + ret = pm_runtime_get_sync(&client->dev);
2461     + if (ret < 0)
2462     + return ret;
2463     +
2464     + set_bit(I2C_HID_STARTED, &ihid->flags);
2465     + return 0;
2466     +}
2467     +
2468     +static void i2c_hid_close(struct hid_device *hid)
2469     +{
2470     + struct i2c_client *client = hid->driver_data;
2471     + struct i2c_hid *ihid = i2c_get_clientdata(client);
2472     +
2473     + clear_bit(I2C_HID_STARTED, &ihid->flags);
2474     +
2475     + /* Save some power */
2476     + pm_runtime_put(&client->dev);
2477     +}
2478     +
2479     +static int i2c_hid_power(struct hid_device *hid, int lvl)
2480     +{
2481     + struct i2c_client *client = hid->driver_data;
2482     + struct i2c_hid *ihid = i2c_get_clientdata(client);
2483     +
2484     + i2c_hid_dbg(ihid, "%s lvl:%d\n", __func__, lvl);
2485     +
2486     + switch (lvl) {
2487     + case PM_HINT_FULLON:
2488     + pm_runtime_get_sync(&client->dev);
2489     + break;
2490     + case PM_HINT_NORMAL:
2491     + pm_runtime_put(&client->dev);
2492     + break;
2493     + }
2494     + return 0;
2495     +}
2496     +
2497     +struct hid_ll_driver i2c_hid_ll_driver = {
2498     + .parse = i2c_hid_parse,
2499     + .start = i2c_hid_start,
2500     + .stop = i2c_hid_stop,
2501     + .open = i2c_hid_open,
2502     + .close = i2c_hid_close,
2503     + .power = i2c_hid_power,
2504     + .output_report = i2c_hid_output_report,
2505     + .raw_request = i2c_hid_raw_request,
2506     +};
2507     +EXPORT_SYMBOL_GPL(i2c_hid_ll_driver);
2508     +
2509     +static int i2c_hid_init_irq(struct i2c_client *client)
2510     +{
2511     + struct i2c_hid *ihid = i2c_get_clientdata(client);
2512     + unsigned long irqflags = 0;
2513     + int ret;
2514     +
2515     + dev_dbg(&client->dev, "Requesting IRQ: %d\n", client->irq);
2516     +
2517     + if (!irq_get_trigger_type(client->irq))
2518     + irqflags = IRQF_TRIGGER_LOW;
2519     +
2520     + ret = request_threaded_irq(client->irq, NULL, i2c_hid_irq,
2521     + irqflags | IRQF_ONESHOT, client->name, ihid);
2522     + if (ret < 0) {
2523     + dev_warn(&client->dev,
2524     + "Could not register for %s interrupt, irq = %d,"
2525     + " ret = %d\n",
2526     + client->name, client->irq, ret);
2527     +
2528     + return ret;
2529     + }
2530     +
2531     + return 0;
2532     +}
2533     +
2534     +static int i2c_hid_fetch_hid_descriptor(struct i2c_hid *ihid)
2535     +{
2536     + struct i2c_client *client = ihid->client;
2537     + struct i2c_hid_desc *hdesc = &ihid->hdesc;
2538     + unsigned int dsize;
2539     + int ret;
2540     +
2541     + /* i2c hid fetch using a fixed descriptor size (30 bytes) */
2542     + if (i2c_hid_get_dmi_i2c_hid_desc_override(client->name)) {
2543     + i2c_hid_dbg(ihid, "Using a HID descriptor override\n");
2544     + ihid->hdesc =
2545     + *i2c_hid_get_dmi_i2c_hid_desc_override(client->name);
2546     + } else {
2547     + i2c_hid_dbg(ihid, "Fetching the HID descriptor\n");
2548     + ret = i2c_hid_command(client, &hid_descr_cmd,
2549     + ihid->hdesc_buffer,
2550     + sizeof(struct i2c_hid_desc));
2551     + if (ret) {
2552     + dev_err(&client->dev, "hid_descr_cmd failed\n");
2553     + return -ENODEV;
2554     + }
2555     + }
2556     +
2557     + /* Validate the length of HID descriptor, the 4 first bytes:
2558     + * bytes 0-1 -> length
2559     + * bytes 2-3 -> bcdVersion (has to be 1.00) */
2560     + /* check bcdVersion == 1.0 */
2561     + if (le16_to_cpu(hdesc->bcdVersion) != 0x0100) {
2562     + dev_err(&client->dev,
2563     + "unexpected HID descriptor bcdVersion (0x%04hx)\n",
2564     + le16_to_cpu(hdesc->bcdVersion));
2565     + return -ENODEV;
2566     + }
2567     +
2568     + /* Descriptor length should be 30 bytes as per the specification */
2569     + dsize = le16_to_cpu(hdesc->wHIDDescLength);
2570     + if (dsize != sizeof(struct i2c_hid_desc)) {
2571     + dev_err(&client->dev, "weird size of HID descriptor (%u)\n",
2572     + dsize);
2573     + return -ENODEV;
2574     + }
2575     + i2c_hid_dbg(ihid, "HID Descriptor: %*ph\n", dsize, ihid->hdesc_buffer);
2576     + return 0;
2577     +}
2578     +
2579     +#ifdef CONFIG_ACPI
2580     +static const struct acpi_device_id i2c_hid_acpi_blacklist[] = {
2581     + /*
2582     + * The CHPN0001 ACPI device, which is used to describe the Chipone
2583     + * ICN8505 controller, has a _CID of PNP0C50 but is not HID compatible.
2584     + */
2585     + {"CHPN0001", 0 },
2586     + { },
2587     +};
2588     +
2589     +static int i2c_hid_acpi_pdata(struct i2c_client *client,
2590     + struct i2c_hid_platform_data *pdata)
2591     +{
2592     + static guid_t i2c_hid_guid =
2593     + GUID_INIT(0x3CDFF6F7, 0x4267, 0x4555,
2594     + 0xAD, 0x05, 0xB3, 0x0A, 0x3D, 0x89, 0x38, 0xDE);
2595     + union acpi_object *obj;
2596     + struct acpi_device *adev;
2597     + acpi_handle handle;
2598     +
2599     + handle = ACPI_HANDLE(&client->dev);
2600     + if (!handle || acpi_bus_get_device(handle, &adev)) {
2601     + dev_err(&client->dev, "Error could not get ACPI device\n");
2602     + return -ENODEV;
2603     + }
2604     +
2605     + if (acpi_match_device_ids(adev, i2c_hid_acpi_blacklist) == 0)
2606     + return -ENODEV;
2607     +
2608     + obj = acpi_evaluate_dsm_typed(handle, &i2c_hid_guid, 1, 1, NULL,
2609     + ACPI_TYPE_INTEGER);
2610     + if (!obj) {
2611     + dev_err(&client->dev, "Error _DSM call to get HID descriptor address failed\n");
2612     + return -ENODEV;
2613     + }
2614     +
2615     + pdata->hid_descriptor_address = obj->integer.value;
2616     + ACPI_FREE(obj);
2617     +
2618     + return 0;
2619     +}
2620     +
2621     +static void i2c_hid_acpi_fix_up_power(struct device *dev)
2622     +{
2623     + struct acpi_device *adev;
2624     +
2625     + adev = ACPI_COMPANION(dev);
2626     + if (adev)
2627     + acpi_device_fix_up_power(adev);
2628     +}
2629     +
2630     +static const struct acpi_device_id i2c_hid_acpi_match[] = {
2631     + {"ACPI0C50", 0 },
2632     + {"PNP0C50", 0 },
2633     + { },
2634     +};
2635     +MODULE_DEVICE_TABLE(acpi, i2c_hid_acpi_match);
2636     +#else
2637     +static inline int i2c_hid_acpi_pdata(struct i2c_client *client,
2638     + struct i2c_hid_platform_data *pdata)
2639     +{
2640     + return -ENODEV;
2641     +}
2642     +
2643     +static inline void i2c_hid_acpi_fix_up_power(struct device *dev) {}
2644     +#endif
2645     +
2646     +#ifdef CONFIG_OF
2647     +static int i2c_hid_of_probe(struct i2c_client *client,
2648     + struct i2c_hid_platform_data *pdata)
2649     +{
2650     + struct device *dev = &client->dev;
2651     + u32 val;
2652     + int ret;
2653     +
2654     + ret = of_property_read_u32(dev->of_node, "hid-descr-addr", &val);
2655     + if (ret) {
2656     + dev_err(&client->dev, "HID register address not provided\n");
2657     + return -ENODEV;
2658     + }
2659     + if (val >> 16) {
2660     + dev_err(&client->dev, "Bad HID register address: 0x%08x\n",
2661     + val);
2662     + return -EINVAL;
2663     + }
2664     + pdata->hid_descriptor_address = val;
2665     +
2666     + return 0;
2667     +}
2668     +
2669     +static const struct of_device_id i2c_hid_of_match[] = {
2670     + { .compatible = "hid-over-i2c" },
2671     + {},
2672     +};
2673     +MODULE_DEVICE_TABLE(of, i2c_hid_of_match);
2674     +#else
2675     +static inline int i2c_hid_of_probe(struct i2c_client *client,
2676     + struct i2c_hid_platform_data *pdata)
2677     +{
2678     + return -ENODEV;
2679     +}
2680     +#endif
2681     +
2682     +static void i2c_hid_fwnode_probe(struct i2c_client *client,
2683     + struct i2c_hid_platform_data *pdata)
2684     +{
2685     + u32 val;
2686     +
2687     + if (!device_property_read_u32(&client->dev, "post-power-on-delay-ms",
2688     + &val))
2689     + pdata->post_power_delay_ms = val;
2690     +}
2691     +
2692     +static int i2c_hid_probe(struct i2c_client *client,
2693     + const struct i2c_device_id *dev_id)
2694     +{
2695     + int ret;
2696     + struct i2c_hid *ihid;
2697     + struct hid_device *hid;
2698     + __u16 hidRegister;
2699     + struct i2c_hid_platform_data *platform_data = client->dev.platform_data;
2700     +
2701     + dbg_hid("HID probe called for i2c 0x%02x\n", client->addr);
2702     +
2703     + if (!client->irq) {
2704     + dev_err(&client->dev,
2705     + "HID over i2c has not been provided an Int IRQ\n");
2706     + return -EINVAL;
2707     + }
2708     +
2709     + if (client->irq < 0) {
2710     + if (client->irq != -EPROBE_DEFER)
2711     + dev_err(&client->dev,
2712     + "HID over i2c doesn't have a valid IRQ\n");
2713     + return client->irq;
2714     + }
2715     +
2716     + ihid = devm_kzalloc(&client->dev, sizeof(*ihid), GFP_KERNEL);
2717     + if (!ihid)
2718     + return -ENOMEM;
2719     +
2720     + if (client->dev.of_node) {
2721     + ret = i2c_hid_of_probe(client, &ihid->pdata);
2722     + if (ret)
2723     + return ret;
2724     + } else if (!platform_data) {
2725     + ret = i2c_hid_acpi_pdata(client, &ihid->pdata);
2726     + if (ret)
2727     + return ret;
2728     + } else {
2729     + ihid->pdata = *platform_data;
2730     + }
2731     +
2732     + /* Parse platform agnostic common properties from ACPI / device tree */
2733     + i2c_hid_fwnode_probe(client, &ihid->pdata);
2734     +
2735     + ihid->pdata.supplies[0].supply = "vdd";
2736     + ihid->pdata.supplies[1].supply = "vddl";
2737     +
2738     + ret = devm_regulator_bulk_get(&client->dev,
2739     + ARRAY_SIZE(ihid->pdata.supplies),
2740     + ihid->pdata.supplies);
2741     + if (ret)
2742     + return ret;
2743     +
2744     + ret = regulator_bulk_enable(ARRAY_SIZE(ihid->pdata.supplies),
2745     + ihid->pdata.supplies);
2746     + if (ret < 0)
2747     + return ret;
2748     +
2749     + if (ihid->pdata.post_power_delay_ms)
2750     + msleep(ihid->pdata.post_power_delay_ms);
2751     +
2752     + i2c_set_clientdata(client, ihid);
2753     +
2754     + ihid->client = client;
2755     +
2756     + hidRegister = ihid->pdata.hid_descriptor_address;
2757     + ihid->wHIDDescRegister = cpu_to_le16(hidRegister);
2758     +
2759     + init_waitqueue_head(&ihid->wait);
2760     + mutex_init(&ihid->reset_lock);
2761     +
2762     + /* we need to allocate the command buffer without knowing the maximum
2763     + * size of the reports. Let's use HID_MIN_BUFFER_SIZE, then we do the
2764     + * real computation later. */
2765     + ret = i2c_hid_alloc_buffers(ihid, HID_MIN_BUFFER_SIZE);
2766     + if (ret < 0)
2767     + goto err_regulator;
2768     +
2769     + i2c_hid_acpi_fix_up_power(&client->dev);
2770     +
2771     + pm_runtime_get_noresume(&client->dev);
2772     + pm_runtime_set_active(&client->dev);
2773     + pm_runtime_enable(&client->dev);
2774     + device_enable_async_suspend(&client->dev);
2775     +
2776     + /* Make sure there is something at this address */
2777     + ret = i2c_smbus_read_byte(client);
2778     + if (ret < 0) {
2779     + dev_dbg(&client->dev, "nothing at this address: %d\n", ret);
2780     + ret = -ENXIO;
2781     + goto err_pm;
2782     + }
2783     +
2784     + ret = i2c_hid_fetch_hid_descriptor(ihid);
2785     + if (ret < 0)
2786     + goto err_pm;
2787     +
2788     + ret = i2c_hid_init_irq(client);
2789     + if (ret < 0)
2790     + goto err_pm;
2791     +
2792     + hid = hid_allocate_device();
2793     + if (IS_ERR(hid)) {
2794     + ret = PTR_ERR(hid);
2795     + goto err_irq;
2796     + }
2797     +
2798     + ihid->hid = hid;
2799     +
2800     + hid->driver_data = client;
2801     + hid->ll_driver = &i2c_hid_ll_driver;
2802     + hid->dev.parent = &client->dev;
2803     + hid->bus = BUS_I2C;
2804     + hid->version = le16_to_cpu(ihid->hdesc.bcdVersion);
2805     + hid->vendor = le16_to_cpu(ihid->hdesc.wVendorID);
2806     + hid->product = le16_to_cpu(ihid->hdesc.wProductID);
2807     +
2808     + snprintf(hid->name, sizeof(hid->name), "%s %04hX:%04hX",
2809     + client->name, hid->vendor, hid->product);
2810     + strlcpy(hid->phys, dev_name(&client->dev), sizeof(hid->phys));
2811     +
2812     + ihid->quirks = i2c_hid_lookup_quirk(hid->vendor, hid->product);
2813     +
2814     + ret = hid_add_device(hid);
2815     + if (ret) {
2816     + if (ret != -ENODEV)
2817     + hid_err(client, "can't add hid device: %d\n", ret);
2818     + goto err_mem_free;
2819     + }
2820     +
2821     + if (!(ihid->quirks & I2C_HID_QUIRK_NO_RUNTIME_PM))
2822     + pm_runtime_put(&client->dev);
2823     +
2824     + return 0;
2825     +
2826     +err_mem_free:
2827     + hid_destroy_device(hid);
2828     +
2829     +err_irq:
2830     + free_irq(client->irq, ihid);
2831     +
2832     +err_pm:
2833     + pm_runtime_put_noidle(&client->dev);
2834     + pm_runtime_disable(&client->dev);
2835     +
2836     +err_regulator:
2837     + regulator_bulk_disable(ARRAY_SIZE(ihid->pdata.supplies),
2838     + ihid->pdata.supplies);
2839     + i2c_hid_free_buffers(ihid);
2840     + return ret;
2841     +}
2842     +
2843     +static int i2c_hid_remove(struct i2c_client *client)
2844     +{
2845     + struct i2c_hid *ihid = i2c_get_clientdata(client);
2846     + struct hid_device *hid;
2847     +
2848     + if (!(ihid->quirks & I2C_HID_QUIRK_NO_RUNTIME_PM))
2849     + pm_runtime_get_sync(&client->dev);
2850     + pm_runtime_disable(&client->dev);
2851     + pm_runtime_set_suspended(&client->dev);
2852     + pm_runtime_put_noidle(&client->dev);
2853     +
2854     + hid = ihid->hid;
2855     + hid_destroy_device(hid);
2856     +
2857     + free_irq(client->irq, ihid);
2858     +
2859     + if (ihid->bufsize)
2860     + i2c_hid_free_buffers(ihid);
2861     +
2862     + regulator_bulk_disable(ARRAY_SIZE(ihid->pdata.supplies),
2863     + ihid->pdata.supplies);
2864     +
2865     + return 0;
2866     +}
2867     +
2868     +static void i2c_hid_shutdown(struct i2c_client *client)
2869     +{
2870     + struct i2c_hid *ihid = i2c_get_clientdata(client);
2871     +
2872     + i2c_hid_set_power(client, I2C_HID_PWR_SLEEP);
2873     + free_irq(client->irq, ihid);
2874     +}
2875     +
2876     +#ifdef CONFIG_PM_SLEEP
2877     +static int i2c_hid_suspend(struct device *dev)
2878     +{
2879     + struct i2c_client *client = to_i2c_client(dev);
2880     + struct i2c_hid *ihid = i2c_get_clientdata(client);
2881     + struct hid_device *hid = ihid->hid;
2882     + int ret;
2883     + int wake_status;
2884     +
2885     + if (hid->driver && hid->driver->suspend) {
2886     + /*
2887     + * Wake up the device so that IO issues in
2888     + * HID driver's suspend code can succeed.
2889     + */
2890     + ret = pm_runtime_resume(dev);
2891     + if (ret < 0)
2892     + return ret;
2893     +
2894     + ret = hid->driver->suspend(hid, PMSG_SUSPEND);
2895     + if (ret < 0)
2896     + return ret;
2897     + }
2898     +
2899     + if (!pm_runtime_suspended(dev)) {
2900     + /* Save some power */
2901     + i2c_hid_set_power(client, I2C_HID_PWR_SLEEP);
2902     +
2903     + disable_irq(client->irq);
2904     + }
2905     +
2906     + if (device_may_wakeup(&client->dev)) {
2907     + wake_status = enable_irq_wake(client->irq);
2908     + if (!wake_status)
2909     + ihid->irq_wake_enabled = true;
2910     + else
2911     + hid_warn(hid, "Failed to enable irq wake: %d\n",
2912     + wake_status);
2913     + } else {
2914     + regulator_bulk_disable(ARRAY_SIZE(ihid->pdata.supplies),
2915     + ihid->pdata.supplies);
2916     + }
2917     +
2918     + return 0;
2919     +}
2920     +
2921     +static int i2c_hid_resume(struct device *dev)
2922     +{
2923     + int ret;
2924     + struct i2c_client *client = to_i2c_client(dev);
2925     + struct i2c_hid *ihid = i2c_get_clientdata(client);
2926     + struct hid_device *hid = ihid->hid;
2927     + int wake_status;
2928     +
2929     + if (!device_may_wakeup(&client->dev)) {
2930     + ret = regulator_bulk_enable(ARRAY_SIZE(ihid->pdata.supplies),
2931     + ihid->pdata.supplies);
2932     + if (ret)
2933     + hid_warn(hid, "Failed to enable supplies: %d\n", ret);
2934     +
2935     + if (ihid->pdata.post_power_delay_ms)
2936     + msleep(ihid->pdata.post_power_delay_ms);
2937     + } else if (ihid->irq_wake_enabled) {
2938     + wake_status = disable_irq_wake(client->irq);
2939     + if (!wake_status)
2940     + ihid->irq_wake_enabled = false;
2941     + else
2942     + hid_warn(hid, "Failed to disable irq wake: %d\n",
2943     + wake_status);
2944     + }
2945     +
2946     + /* We'll resume to full power */
2947     + pm_runtime_disable(dev);
2948     + pm_runtime_set_active(dev);
2949     + pm_runtime_enable(dev);
2950     +
2951     + enable_irq(client->irq);
2952     +
2953     + /* Instead of resetting device, simply powers the device on. This
2954     + * solves "incomplete reports" on Raydium devices 2386:3118 and
2955     + * 2386:4B33 and fixes various SIS touchscreens no longer sending
2956     + * data after a suspend/resume.
2957     + */
2958     + ret = i2c_hid_set_power(client, I2C_HID_PWR_ON);
2959     + if (ret)
2960     + return ret;
2961     +
2962     + if (hid->driver && hid->driver->reset_resume) {
2963     + ret = hid->driver->reset_resume(hid);
2964     + return ret;
2965     + }
2966     +
2967     + return 0;
2968     +}
2969     +#endif
2970     +
2971     +#ifdef CONFIG_PM
2972     +static int i2c_hid_runtime_suspend(struct device *dev)
2973     +{
2974     + struct i2c_client *client = to_i2c_client(dev);
2975     +
2976     + i2c_hid_set_power(client, I2C_HID_PWR_SLEEP);
2977     + disable_irq(client->irq);
2978     + return 0;
2979     +}
2980     +
2981     +static int i2c_hid_runtime_resume(struct device *dev)
2982     +{
2983     + struct i2c_client *client = to_i2c_client(dev);
2984     +
2985     + enable_irq(client->irq);
2986     + i2c_hid_set_power(client, I2C_HID_PWR_ON);
2987     + return 0;
2988     +}
2989     +#endif
2990     +
2991     +static const struct dev_pm_ops i2c_hid_pm = {
2992     + SET_SYSTEM_SLEEP_PM_OPS(i2c_hid_suspend, i2c_hid_resume)
2993     + SET_RUNTIME_PM_OPS(i2c_hid_runtime_suspend, i2c_hid_runtime_resume,
2994     + NULL)
2995     +};
2996     +
2997     +static const struct i2c_device_id i2c_hid_id_table[] = {
2998     + { "hid", 0 },
2999     + { "hid-over-i2c", 0 },
3000     + { },
3001     +};
3002     +MODULE_DEVICE_TABLE(i2c, i2c_hid_id_table);
3003     +
3004     +
3005     +static struct i2c_driver i2c_hid_driver = {
3006     + .driver = {
3007     + .name = "i2c_hid",
3008     + .pm = &i2c_hid_pm,
3009     + .acpi_match_table = ACPI_PTR(i2c_hid_acpi_match),
3010     + .of_match_table = of_match_ptr(i2c_hid_of_match),
3011     + },
3012     +
3013     + .probe = i2c_hid_probe,
3014     + .remove = i2c_hid_remove,
3015     + .shutdown = i2c_hid_shutdown,
3016     + .id_table = i2c_hid_id_table,
3017     +};
3018     +
3019     +module_i2c_driver(i2c_hid_driver);
3020     +
3021     +MODULE_DESCRIPTION("HID over I2C core driver");
3022     +MODULE_AUTHOR("Benjamin Tissoires <benjamin.tissoires@gmail.com>");
3023     +MODULE_LICENSE("GPL");
3024     diff --git a/drivers/hid/i2c-hid/i2c-hid-dmi-quirks.c b/drivers/hid/i2c-hid/i2c-hid-dmi-quirks.c
3025     new file mode 100644
3026     index 000000000000..1d645c9ab417
3027     --- /dev/null
3028     +++ b/drivers/hid/i2c-hid/i2c-hid-dmi-quirks.c
3029     @@ -0,0 +1,376 @@
3030     +// SPDX-License-Identifier: GPL-2.0+
3031     +
3032     +/*
3033     + * Quirks for I2C-HID devices that do not supply proper descriptors
3034     + *
3035     + * Copyright (c) 2018 Julian Sax <jsbc@gmx.de>
3036     + *
3037     + */
3038     +
3039     +#include <linux/types.h>
3040     +#include <linux/dmi.h>
3041     +#include <linux/mod_devicetable.h>
3042     +
3043     +#include "i2c-hid.h"
3044     +
3045     +
3046     +struct i2c_hid_desc_override {
3047     + union {
3048     + struct i2c_hid_desc *i2c_hid_desc;
3049     + uint8_t *i2c_hid_desc_buffer;
3050     + };
3051     + uint8_t *hid_report_desc;
3052     + unsigned int hid_report_desc_size;
3053     + uint8_t *i2c_name;
3054     +};
3055     +
3056     +
3057     +/*
3058     + * descriptors for the SIPODEV SP1064 touchpad
3059     + *
3060     + * This device does not supply any descriptors and on windows a filter
3061     + * driver operates between the i2c-hid layer and the device and injects
3062     + * these descriptors when the device is prompted. The descriptors were
3063     + * extracted by listening to the i2c-hid traffic that occurs between the
3064     + * windows filter driver and the windows i2c-hid driver.
3065     + */
3066     +
3067     +static const struct i2c_hid_desc_override sipodev_desc = {
3068     + .i2c_hid_desc_buffer = (uint8_t [])
3069     + {0x1e, 0x00, /* Length of descriptor */
3070     + 0x00, 0x01, /* Version of descriptor */
3071     + 0xdb, 0x01, /* Length of report descriptor */
3072     + 0x21, 0x00, /* Location of report descriptor */
3073     + 0x24, 0x00, /* Location of input report */
3074     + 0x1b, 0x00, /* Max input report length */
3075     + 0x25, 0x00, /* Location of output report */
3076     + 0x11, 0x00, /* Max output report length */
3077     + 0x22, 0x00, /* Location of command register */
3078     + 0x23, 0x00, /* Location of data register */
3079     + 0x11, 0x09, /* Vendor ID */
3080     + 0x88, 0x52, /* Product ID */
3081     + 0x06, 0x00, /* Version ID */
3082     + 0x00, 0x00, 0x00, 0x00 /* Reserved */
3083     + },
3084     +
3085     + .hid_report_desc = (uint8_t [])
3086     + {0x05, 0x01, /* Usage Page (Desktop), */
3087     + 0x09, 0x02, /* Usage (Mouse), */
3088     + 0xA1, 0x01, /* Collection (Application), */
3089     + 0x85, 0x01, /* Report ID (1), */
3090     + 0x09, 0x01, /* Usage (Pointer), */
3091     + 0xA1, 0x00, /* Collection (Physical), */
3092     + 0x05, 0x09, /* Usage Page (Button), */
3093     + 0x19, 0x01, /* Usage Minimum (01h), */
3094     + 0x29, 0x02, /* Usage Maximum (02h), */
3095     + 0x25, 0x01, /* Logical Maximum (1), */
3096     + 0x75, 0x01, /* Report Size (1), */
3097     + 0x95, 0x02, /* Report Count (2), */
3098     + 0x81, 0x02, /* Input (Variable), */
3099     + 0x95, 0x06, /* Report Count (6), */
3100     + 0x81, 0x01, /* Input (Constant), */
3101     + 0x05, 0x01, /* Usage Page (Desktop), */
3102     + 0x09, 0x30, /* Usage (X), */
3103     + 0x09, 0x31, /* Usage (Y), */
3104     + 0x15, 0x81, /* Logical Minimum (-127), */
3105     + 0x25, 0x7F, /* Logical Maximum (127), */
3106     + 0x75, 0x08, /* Report Size (8), */
3107     + 0x95, 0x02, /* Report Count (2), */
3108     + 0x81, 0x06, /* Input (Variable, Relative), */
3109     + 0xC0, /* End Collection, */
3110     + 0xC0, /* End Collection, */
3111     + 0x05, 0x0D, /* Usage Page (Digitizer), */
3112     + 0x09, 0x05, /* Usage (Touchpad), */
3113     + 0xA1, 0x01, /* Collection (Application), */
3114     + 0x85, 0x04, /* Report ID (4), */
3115     + 0x05, 0x0D, /* Usage Page (Digitizer), */
3116     + 0x09, 0x22, /* Usage (Finger), */
3117     + 0xA1, 0x02, /* Collection (Logical), */
3118     + 0x15, 0x00, /* Logical Minimum (0), */
3119     + 0x25, 0x01, /* Logical Maximum (1), */
3120     + 0x09, 0x47, /* Usage (Touch Valid), */
3121     + 0x09, 0x42, /* Usage (Tip Switch), */
3122     + 0x95, 0x02, /* Report Count (2), */
3123     + 0x75, 0x01, /* Report Size (1), */
3124     + 0x81, 0x02, /* Input (Variable), */
3125     + 0x95, 0x01, /* Report Count (1), */
3126     + 0x75, 0x03, /* Report Size (3), */
3127     + 0x25, 0x05, /* Logical Maximum (5), */
3128     + 0x09, 0x51, /* Usage (Contact Identifier), */
3129     + 0x81, 0x02, /* Input (Variable), */
3130     + 0x75, 0x01, /* Report Size (1), */
3131     + 0x95, 0x03, /* Report Count (3), */
3132     + 0x81, 0x03, /* Input (Constant, Variable), */
3133     + 0x05, 0x01, /* Usage Page (Desktop), */
3134     + 0x26, 0x44, 0x0A, /* Logical Maximum (2628), */
3135     + 0x75, 0x10, /* Report Size (16), */
3136     + 0x55, 0x0E, /* Unit Exponent (14), */
3137     + 0x65, 0x11, /* Unit (Centimeter), */
3138     + 0x09, 0x30, /* Usage (X), */
3139     + 0x46, 0x1A, 0x04, /* Physical Maximum (1050), */
3140     + 0x95, 0x01, /* Report Count (1), */
3141     + 0x81, 0x02, /* Input (Variable), */
3142     + 0x46, 0xBC, 0x02, /* Physical Maximum (700), */
3143     + 0x26, 0x34, 0x05, /* Logical Maximum (1332), */
3144     + 0x09, 0x31, /* Usage (Y), */
3145     + 0x81, 0x02, /* Input (Variable), */
3146     + 0xC0, /* End Collection, */
3147     + 0x05, 0x0D, /* Usage Page (Digitizer), */
3148     + 0x09, 0x22, /* Usage (Finger), */
3149     + 0xA1, 0x02, /* Collection (Logical), */
3150     + 0x25, 0x01, /* Logical Maximum (1), */
3151     + 0x09, 0x47, /* Usage (Touch Valid), */
3152     + 0x09, 0x42, /* Usage (Tip Switch), */
3153     + 0x95, 0x02, /* Report Count (2), */
3154     + 0x75, 0x01, /* Report Size (1), */
3155     + 0x81, 0x02, /* Input (Variable), */
3156     + 0x95, 0x01, /* Report Count (1), */
3157     + 0x75, 0x03, /* Report Size (3), */
3158     + 0x25, 0x05, /* Logical Maximum (5), */
3159     + 0x09, 0x51, /* Usage (Contact Identifier), */
3160     + 0x81, 0x02, /* Input (Variable), */
3161     + 0x75, 0x01, /* Report Size (1), */
3162     + 0x95, 0x03, /* Report Count (3), */
3163     + 0x81, 0x03, /* Input (Constant, Variable), */
3164     + 0x05, 0x01, /* Usage Page (Desktop), */
3165     + 0x26, 0x44, 0x0A, /* Logical Maximum (2628), */
3166     + 0x75, 0x10, /* Report Size (16), */
3167     + 0x09, 0x30, /* Usage (X), */
3168     + 0x46, 0x1A, 0x04, /* Physical Maximum (1050), */
3169     + 0x95, 0x01, /* Report Count (1), */
3170     + 0x81, 0x02, /* Input (Variable), */
3171     + 0x46, 0xBC, 0x02, /* Physical Maximum (700), */
3172     + 0x26, 0x34, 0x05, /* Logical Maximum (1332), */
3173     + 0x09, 0x31, /* Usage (Y), */
3174     + 0x81, 0x02, /* Input (Variable), */
3175     + 0xC0, /* End Collection, */
3176     + 0x05, 0x0D, /* Usage Page (Digitizer), */
3177     + 0x09, 0x22, /* Usage (Finger), */
3178     + 0xA1, 0x02, /* Collection (Logical), */
3179     + 0x25, 0x01, /* Logical Maximum (1), */
3180     + 0x09, 0x47, /* Usage (Touch Valid), */
3181     + 0x09, 0x42, /* Usage (Tip Switch), */
3182     + 0x95, 0x02, /* Report Count (2), */
3183     + 0x75, 0x01, /* Report Size (1), */
3184     + 0x81, 0x02, /* Input (Variable), */
3185     + 0x95, 0x01, /* Report Count (1), */
3186     + 0x75, 0x03, /* Report Size (3), */
3187     + 0x25, 0x05, /* Logical Maximum (5), */
3188     + 0x09, 0x51, /* Usage (Contact Identifier), */
3189     + 0x81, 0x02, /* Input (Variable), */
3190     + 0x75, 0x01, /* Report Size (1), */
3191     + 0x95, 0x03, /* Report Count (3), */
3192     + 0x81, 0x03, /* Input (Constant, Variable), */
3193     + 0x05, 0x01, /* Usage Page (Desktop), */
3194     + 0x26, 0x44, 0x0A, /* Logical Maximum (2628), */
3195     + 0x75, 0x10, /* Report Size (16), */
3196     + 0x09, 0x30, /* Usage (X), */
3197     + 0x46, 0x1A, 0x04, /* Physical Maximum (1050), */
3198     + 0x95, 0x01, /* Report Count (1), */
3199     + 0x81, 0x02, /* Input (Variable), */
3200     + 0x46, 0xBC, 0x02, /* Physical Maximum (700), */
3201     + 0x26, 0x34, 0x05, /* Logical Maximum (1332), */
3202     + 0x09, 0x31, /* Usage (Y), */
3203     + 0x81, 0x02, /* Input (Variable), */
3204     + 0xC0, /* End Collection, */
3205     + 0x05, 0x0D, /* Usage Page (Digitizer), */
3206     + 0x09, 0x22, /* Usage (Finger), */
3207     + 0xA1, 0x02, /* Collection (Logical), */
3208     + 0x25, 0x01, /* Logical Maximum (1), */
3209     + 0x09, 0x47, /* Usage (Touch Valid), */
3210     + 0x09, 0x42, /* Usage (Tip Switch), */
3211     + 0x95, 0x02, /* Report Count (2), */
3212     + 0x75, 0x01, /* Report Size (1), */
3213     + 0x81, 0x02, /* Input (Variable), */
3214     + 0x95, 0x01, /* Report Count (1), */
3215     + 0x75, 0x03, /* Report Size (3), */
3216     + 0x25, 0x05, /* Logical Maximum (5), */
3217     + 0x09, 0x51, /* Usage (Contact Identifier), */
3218     + 0x81, 0x02, /* Input (Variable), */
3219     + 0x75, 0x01, /* Report Size (1), */
3220     + 0x95, 0x03, /* Report Count (3), */
3221     + 0x81, 0x03, /* Input (Constant, Variable), */
3222     + 0x05, 0x01, /* Usage Page (Desktop), */
3223     + 0x26, 0x44, 0x0A, /* Logical Maximum (2628), */
3224     + 0x75, 0x10, /* Report Size (16), */
3225     + 0x09, 0x30, /* Usage (X), */
3226     + 0x46, 0x1A, 0x04, /* Physical Maximum (1050), */
3227     + 0x95, 0x01, /* Report Count (1), */
3228     + 0x81, 0x02, /* Input (Variable), */
3229     + 0x46, 0xBC, 0x02, /* Physical Maximum (700), */
3230     + 0x26, 0x34, 0x05, /* Logical Maximum (1332), */
3231     + 0x09, 0x31, /* Usage (Y), */
3232     + 0x81, 0x02, /* Input (Variable), */
3233     + 0xC0, /* End Collection, */
3234     + 0x05, 0x0D, /* Usage Page (Digitizer), */
3235     + 0x55, 0x0C, /* Unit Exponent (12), */
3236     + 0x66, 0x01, 0x10, /* Unit (Seconds), */
3237     + 0x47, 0xFF, 0xFF, 0x00, 0x00,/* Physical Maximum (65535), */
3238     + 0x27, 0xFF, 0xFF, 0x00, 0x00,/* Logical Maximum (65535), */
3239     + 0x75, 0x10, /* Report Size (16), */
3240     + 0x95, 0x01, /* Report Count (1), */
3241     + 0x09, 0x56, /* Usage (Scan Time), */
3242     + 0x81, 0x02, /* Input (Variable), */
3243     + 0x09, 0x54, /* Usage (Contact Count), */
3244     + 0x25, 0x7F, /* Logical Maximum (127), */
3245     + 0x75, 0x08, /* Report Size (8), */
3246     + 0x81, 0x02, /* Input (Variable), */
3247     + 0x05, 0x09, /* Usage Page (Button), */
3248     + 0x09, 0x01, /* Usage (01h), */
3249     + 0x25, 0x01, /* Logical Maximum (1), */
3250     + 0x75, 0x01, /* Report Size (1), */
3251     + 0x95, 0x01, /* Report Count (1), */
3252     + 0x81, 0x02, /* Input (Variable), */
3253     + 0x95, 0x07, /* Report Count (7), */
3254     + 0x81, 0x03, /* Input (Constant, Variable), */
3255     + 0x05, 0x0D, /* Usage Page (Digitizer), */
3256     + 0x85, 0x02, /* Report ID (2), */
3257     + 0x09, 0x55, /* Usage (Contact Count Maximum), */
3258     + 0x09, 0x59, /* Usage (59h), */
3259     + 0x75, 0x04, /* Report Size (4), */
3260     + 0x95, 0x02, /* Report Count (2), */
3261     + 0x25, 0x0F, /* Logical Maximum (15), */
3262     + 0xB1, 0x02, /* Feature (Variable), */
3263     + 0x05, 0x0D, /* Usage Page (Digitizer), */
3264     + 0x85, 0x07, /* Report ID (7), */
3265     + 0x09, 0x60, /* Usage (60h), */
3266     + 0x75, 0x01, /* Report Size (1), */
3267     + 0x95, 0x01, /* Report Count (1), */
3268     + 0x25, 0x01, /* Logical Maximum (1), */
3269     + 0xB1, 0x02, /* Feature (Variable), */
3270     + 0x95, 0x07, /* Report Count (7), */
3271     + 0xB1, 0x03, /* Feature (Constant, Variable), */
3272     + 0x85, 0x06, /* Report ID (6), */
3273     + 0x06, 0x00, 0xFF, /* Usage Page (FF00h), */
3274     + 0x09, 0xC5, /* Usage (C5h), */
3275     + 0x26, 0xFF, 0x00, /* Logical Maximum (255), */
3276     + 0x75, 0x08, /* Report Size (8), */
3277     + 0x96, 0x00, 0x01, /* Report Count (256), */
3278     + 0xB1, 0x02, /* Feature (Variable), */
3279     + 0xC0, /* End Collection, */
3280     + 0x06, 0x00, 0xFF, /* Usage Page (FF00h), */
3281     + 0x09, 0x01, /* Usage (01h), */
3282     + 0xA1, 0x01, /* Collection (Application), */
3283     + 0x85, 0x0D, /* Report ID (13), */
3284     + 0x26, 0xFF, 0x00, /* Logical Maximum (255), */
3285     + 0x19, 0x01, /* Usage Minimum (01h), */
3286     + 0x29, 0x02, /* Usage Maximum (02h), */
3287     + 0x75, 0x08, /* Report Size (8), */
3288     + 0x95, 0x02, /* Report Count (2), */
3289     + 0xB1, 0x02, /* Feature (Variable), */
3290     + 0xC0, /* End Collection, */
3291     + 0x05, 0x0D, /* Usage Page (Digitizer), */
3292     + 0x09, 0x0E, /* Usage (Configuration), */
3293     + 0xA1, 0x01, /* Collection (Application), */
3294     + 0x85, 0x03, /* Report ID (3), */
3295     + 0x09, 0x22, /* Usage (Finger), */
3296     + 0xA1, 0x02, /* Collection (Logical), */
3297     + 0x09, 0x52, /* Usage (Device Mode), */
3298     + 0x25, 0x0A, /* Logical Maximum (10), */
3299     + 0x95, 0x01, /* Report Count (1), */
3300     + 0xB1, 0x02, /* Feature (Variable), */
3301     + 0xC0, /* End Collection, */
3302     + 0x09, 0x22, /* Usage (Finger), */
3303     + 0xA1, 0x00, /* Collection (Physical), */
3304     + 0x85, 0x05, /* Report ID (5), */
3305     + 0x09, 0x57, /* Usage (57h), */
3306     + 0x09, 0x58, /* Usage (58h), */
3307     + 0x75, 0x01, /* Report Size (1), */
3308     + 0x95, 0x02, /* Report Count (2), */
3309     + 0x25, 0x01, /* Logical Maximum (1), */
3310     + 0xB1, 0x02, /* Feature (Variable), */
3311     + 0x95, 0x06, /* Report Count (6), */
3312     + 0xB1, 0x03, /* Feature (Constant, Variable),*/
3313     + 0xC0, /* End Collection, */
3314     + 0xC0 /* End Collection */
3315     + },
3316     + .hid_report_desc_size = 475,
3317     + .i2c_name = "SYNA3602:00"
3318     +};
3319     +
3320     +
3321     +static const struct dmi_system_id i2c_hid_dmi_desc_override_table[] = {
3322     + {
3323     + .ident = "Teclast F6 Pro",
3324     + .matches = {
3325     + DMI_EXACT_MATCH(DMI_SYS_VENDOR, "TECLAST"),
3326     + DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "F6 Pro"),
3327     + },
3328     + .driver_data = (void *)&sipodev_desc
3329     + },
3330     + {
3331     + .ident = "Teclast F7",
3332     + .matches = {
3333     + DMI_EXACT_MATCH(DMI_SYS_VENDOR, "TECLAST"),
3334     + DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "F7"),
3335     + },
3336     + .driver_data = (void *)&sipodev_desc
3337     + },
3338     + {
3339     + .ident = "Trekstor Primebook C13",
3340     + .matches = {
3341     + DMI_EXACT_MATCH(DMI_SYS_VENDOR, "TREKSTOR"),
3342     + DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "Primebook C13"),
3343     + },
3344     + .driver_data = (void *)&sipodev_desc
3345     + },
3346     + {
3347     + .ident = "Trekstor Primebook C11",
3348     + .matches = {
3349     + DMI_EXACT_MATCH(DMI_SYS_VENDOR, "TREKSTOR"),
3350     + DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "Primebook C11"),
3351     + },
3352     + .driver_data = (void *)&sipodev_desc
3353     + },
3354     + {
3355     + .ident = "Direkt-Tek DTLAPY116-2",
3356     + .matches = {
3357     + DMI_EXACT_MATCH(DMI_SYS_VENDOR, "Direkt-Tek"),
3358     + DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "DTLAPY116-2"),
3359     + },
3360     + .driver_data = (void *)&sipodev_desc
3361     + },
3362     + {
3363     + .ident = "Mediacom Flexbook Edge 11",
3364     + .matches = {
3365     + DMI_EXACT_MATCH(DMI_SYS_VENDOR, "MEDIACOM"),
3366     + DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "FlexBook edge11 - M-FBE11"),
3367     + },
3368     + .driver_data = (void *)&sipodev_desc
3369     + }
3370     +};
3371     +
3372     +
3373     +struct i2c_hid_desc *i2c_hid_get_dmi_i2c_hid_desc_override(uint8_t *i2c_name)
3374     +{
3375     + struct i2c_hid_desc_override *override;
3376     + const struct dmi_system_id *system_id;
3377     +
3378     + system_id = dmi_first_match(i2c_hid_dmi_desc_override_table);
3379     + if (!system_id)
3380     + return NULL;
3381     +
3382     + override = system_id->driver_data;
3383     + if (strcmp(override->i2c_name, i2c_name))
3384     + return NULL;
3385     +
3386     + return override->i2c_hid_desc;
3387     +}
3388     +
3389     +char *i2c_hid_get_dmi_hid_report_desc_override(uint8_t *i2c_name,
3390     + unsigned int *size)
3391     +{
3392     + struct i2c_hid_desc_override *override;
3393     + const struct dmi_system_id *system_id;
3394     +
3395     + system_id = dmi_first_match(i2c_hid_dmi_desc_override_table);
3396     + if (!system_id)
3397     + return NULL;
3398     +
3399     + override = system_id->driver_data;
3400     + if (strcmp(override->i2c_name, i2c_name))
3401     + return NULL;
3402     +
3403     + *size = override->hid_report_desc_size;
3404     + return override->hid_report_desc;
3405     +}
3406     diff --git a/drivers/hid/i2c-hid/i2c-hid.c b/drivers/hid/i2c-hid/i2c-hid.c
3407     deleted file mode 100644
3408     index 88daa388e1f6..000000000000
3409     --- a/drivers/hid/i2c-hid/i2c-hid.c
3410     +++ /dev/null
3411     @@ -1,1328 +0,0 @@
3412     -/*
3413     - * HID over I2C protocol implementation
3414     - *
3415     - * Copyright (c) 2012 Benjamin Tissoires <benjamin.tissoires@gmail.com>
3416     - * Copyright (c) 2012 Ecole Nationale de l'Aviation Civile, France
3417     - * Copyright (c) 2012 Red Hat, Inc
3418     - *
3419     - * This code is partly based on "USB HID support for Linux":
3420     - *
3421     - * Copyright (c) 1999 Andreas Gal
3422     - * Copyright (c) 2000-2005 Vojtech Pavlik <vojtech@suse.cz>
3423     - * Copyright (c) 2005 Michael Haboustak <mike-@cinci.rr.com> for Concept2, Inc
3424     - * Copyright (c) 2007-2008 Oliver Neukum
3425     - * Copyright (c) 2006-2010 Jiri Kosina
3426     - *
3427     - * This file is subject to the terms and conditions of the GNU General Public
3428     - * License. See the file COPYING in the main directory of this archive for
3429     - * more details.
3430     - */
3431     -
3432     -#include <linux/module.h>
3433     -#include <linux/i2c.h>
3434     -#include <linux/interrupt.h>
3435     -#include <linux/input.h>
3436     -#include <linux/irq.h>
3437     -#include <linux/delay.h>
3438     -#include <linux/slab.h>
3439     -#include <linux/pm.h>
3440     -#include <linux/pm_runtime.h>
3441     -#include <linux/device.h>
3442     -#include <linux/wait.h>
3443     -#include <linux/err.h>
3444     -#include <linux/string.h>
3445     -#include <linux/list.h>
3446     -#include <linux/jiffies.h>
3447     -#include <linux/kernel.h>
3448     -#include <linux/hid.h>
3449     -#include <linux/mutex.h>
3450     -#include <linux/acpi.h>
3451     -#include <linux/of.h>
3452     -#include <linux/regulator/consumer.h>
3453     -
3454     -#include <linux/platform_data/i2c-hid.h>
3455     -
3456     -#include "../hid-ids.h"
3457     -
3458     -/* quirks to control the device */
3459     -#define I2C_HID_QUIRK_SET_PWR_WAKEUP_DEV BIT(0)
3460     -#define I2C_HID_QUIRK_NO_IRQ_AFTER_RESET BIT(1)
3461     -#define I2C_HID_QUIRK_NO_RUNTIME_PM BIT(2)
3462     -#define I2C_HID_QUIRK_DELAY_AFTER_SLEEP BIT(3)
3463     -
3464     -/* flags */
3465     -#define I2C_HID_STARTED 0
3466     -#define I2C_HID_RESET_PENDING 1
3467     -#define I2C_HID_READ_PENDING 2
3468     -
3469     -#define I2C_HID_PWR_ON 0x00
3470     -#define I2C_HID_PWR_SLEEP 0x01
3471     -
3472     -/* debug option */
3473     -static bool debug;
3474     -module_param(debug, bool, 0444);
3475     -MODULE_PARM_DESC(debug, "print a lot of debug information");
3476     -
3477     -#define i2c_hid_dbg(ihid, fmt, arg...) \
3478     -do { \
3479     - if (debug) \
3480     - dev_printk(KERN_DEBUG, &(ihid)->client->dev, fmt, ##arg); \
3481     -} while (0)
3482     -
3483     -struct i2c_hid_desc {
3484     - __le16 wHIDDescLength;
3485     - __le16 bcdVersion;
3486     - __le16 wReportDescLength;
3487     - __le16 wReportDescRegister;
3488     - __le16 wInputRegister;
3489     - __le16 wMaxInputLength;
3490     - __le16 wOutputRegister;
3491     - __le16 wMaxOutputLength;
3492     - __le16 wCommandRegister;
3493     - __le16 wDataRegister;
3494     - __le16 wVendorID;
3495     - __le16 wProductID;
3496     - __le16 wVersionID;
3497     - __le32 reserved;
3498     -} __packed;
3499     -
3500     -struct i2c_hid_cmd {
3501     - unsigned int registerIndex;
3502     - __u8 opcode;
3503     - unsigned int length;
3504     - bool wait;
3505     -};
3506     -
3507     -union command {
3508     - u8 data[0];
3509     - struct cmd {
3510     - __le16 reg;
3511     - __u8 reportTypeID;
3512     - __u8 opcode;
3513     - } __packed c;
3514     -};
3515     -
3516     -#define I2C_HID_CMD(opcode_) \
3517     - .opcode = opcode_, .length = 4, \
3518     - .registerIndex = offsetof(struct i2c_hid_desc, wCommandRegister)
3519     -
3520     -/* fetch HID descriptor */
3521     -static const struct i2c_hid_cmd hid_descr_cmd = { .length = 2 };
3522     -/* fetch report descriptors */
3523     -static const struct i2c_hid_cmd hid_report_descr_cmd = {
3524     - .registerIndex = offsetof(struct i2c_hid_desc,
3525     - wReportDescRegister),
3526     - .opcode = 0x00,
3527     - .length = 2 };
3528     -/* commands */
3529     -static const struct i2c_hid_cmd hid_reset_cmd = { I2C_HID_CMD(0x01),
3530     - .wait = true };
3531     -static const struct i2c_hid_cmd hid_get_report_cmd = { I2C_HID_CMD(0x02) };
3532     -static const struct i2c_hid_cmd hid_set_report_cmd = { I2C_HID_CMD(0x03) };
3533     -static const struct i2c_hid_cmd hid_set_power_cmd = { I2C_HID_CMD(0x08) };
3534     -static const struct i2c_hid_cmd hid_no_cmd = { .length = 0 };
3535     -
3536     -/*
3537     - * These definitions are not used here, but are defined by the spec.
3538     - * Keeping them here for documentation purposes.
3539     - *
3540     - * static const struct i2c_hid_cmd hid_get_idle_cmd = { I2C_HID_CMD(0x04) };
3541     - * static const struct i2c_hid_cmd hid_set_idle_cmd = { I2C_HID_CMD(0x05) };
3542     - * static const struct i2c_hid_cmd hid_get_protocol_cmd = { I2C_HID_CMD(0x06) };
3543     - * static const struct i2c_hid_cmd hid_set_protocol_cmd = { I2C_HID_CMD(0x07) };
3544     - */
3545     -
3546     -/* The main device structure */
3547     -struct i2c_hid {
3548     - struct i2c_client *client; /* i2c client */
3549     - struct hid_device *hid; /* pointer to corresponding HID dev */
3550     - union {
3551     - __u8 hdesc_buffer[sizeof(struct i2c_hid_desc)];
3552     - struct i2c_hid_desc hdesc; /* the HID Descriptor */
3553     - };
3554     - __le16 wHIDDescRegister; /* location of the i2c
3555     - * register of the HID
3556     - * descriptor. */
3557     - unsigned int bufsize; /* i2c buffer size */
3558     - u8 *inbuf; /* Input buffer */
3559     - u8 *rawbuf; /* Raw Input buffer */
3560     - u8 *cmdbuf; /* Command buffer */
3561     - u8 *argsbuf; /* Command arguments buffer */
3562     -
3563     - unsigned long flags; /* device flags */
3564     - unsigned long quirks; /* Various quirks */
3565     -
3566     - wait_queue_head_t wait; /* For waiting the interrupt */
3567     -
3568     - struct i2c_hid_platform_data pdata;
3569     -
3570     - bool irq_wake_enabled;
3571     - struct mutex reset_lock;
3572     -
3573     - unsigned long sleep_delay;
3574     -};
3575     -
3576     -static const struct i2c_hid_quirks {
3577     - __u16 idVendor;
3578     - __u16 idProduct;
3579     - __u32 quirks;
3580     -} i2c_hid_quirks[] = {
3581     - { USB_VENDOR_ID_WEIDA, USB_DEVICE_ID_WEIDA_8752,
3582     - I2C_HID_QUIRK_SET_PWR_WAKEUP_DEV },
3583     - { USB_VENDOR_ID_WEIDA, USB_DEVICE_ID_WEIDA_8755,
3584     - I2C_HID_QUIRK_SET_PWR_WAKEUP_DEV },
3585     - { I2C_VENDOR_ID_HANTICK, I2C_PRODUCT_ID_HANTICK_5288,
3586     - I2C_HID_QUIRK_NO_IRQ_AFTER_RESET |
3587     - I2C_HID_QUIRK_NO_RUNTIME_PM },
3588     - { I2C_VENDOR_ID_RAYDIUM, I2C_PRODUCT_ID_RAYDIUM_4B33,
3589     - I2C_HID_QUIRK_DELAY_AFTER_SLEEP },
3590     - { 0, 0 }
3591     -};
3592     -
3593     -/*
3594     - * i2c_hid_lookup_quirk: return any quirks associated with a I2C HID device
3595     - * @idVendor: the 16-bit vendor ID
3596     - * @idProduct: the 16-bit product ID
3597     - *
3598     - * Returns: a u32 quirks value.
3599     - */
3600     -static u32 i2c_hid_lookup_quirk(const u16 idVendor, const u16 idProduct)
3601     -{
3602     - u32 quirks = 0;
3603     - int n;
3604     -
3605     - for (n = 0; i2c_hid_quirks[n].idVendor; n++)
3606     - if (i2c_hid_quirks[n].idVendor == idVendor &&
3607     - (i2c_hid_quirks[n].idProduct == (__u16)HID_ANY_ID ||
3608     - i2c_hid_quirks[n].idProduct == idProduct))
3609     - quirks = i2c_hid_quirks[n].quirks;
3610     -
3611     - return quirks;
3612     -}
3613     -
3614     -static int __i2c_hid_command(struct i2c_client *client,
3615     - const struct i2c_hid_cmd *command, u8 reportID,
3616     - u8 reportType, u8 *args, int args_len,
3617     - unsigned char *buf_recv, int data_len)
3618     -{
3619     - struct i2c_hid *ihid = i2c_get_clientdata(client);
3620     - union command *cmd = (union command *)ihid->cmdbuf;
3621     - int ret;
3622     - struct i2c_msg msg[2];
3623     - int msg_num = 1;
3624     -
3625     - int length = command->length;
3626     - bool wait = command->wait;
3627     - unsigned int registerIndex = command->registerIndex;
3628     -
3629     - /* special case for hid_descr_cmd */
3630     - if (command == &hid_descr_cmd) {
3631     - cmd->c.reg = ihid->wHIDDescRegister;
3632     - } else {
3633     - cmd->data[0] = ihid->hdesc_buffer[registerIndex];
3634     - cmd->data[1] = ihid->hdesc_buffer[registerIndex + 1];
3635     - }
3636     -
3637     - if (length > 2) {
3638     - cmd->c.opcode = command->opcode;
3639     - cmd->c.reportTypeID = reportID | reportType << 4;
3640     - }
3641     -
3642     - memcpy(cmd->data + length, args, args_len);
3643     - length += args_len;
3644     -
3645     - i2c_hid_dbg(ihid, "%s: cmd=%*ph\n", __func__, length, cmd->data);
3646     -
3647     - msg[0].addr = client->addr;
3648     - msg[0].flags = client->flags & I2C_M_TEN;
3649     - msg[0].len = length;
3650     - msg[0].buf = cmd->data;
3651     - if (data_len > 0) {
3652     - msg[1].addr = client->addr;
3653     - msg[1].flags = client->flags & I2C_M_TEN;
3654     - msg[1].flags |= I2C_M_RD;
3655     - msg[1].len = data_len;
3656     - msg[1].buf = buf_recv;
3657     - msg_num = 2;
3658     - set_bit(I2C_HID_READ_PENDING, &ihid->flags);
3659     - }
3660     -
3661     - if (wait)
3662     - set_bit(I2C_HID_RESET_PENDING, &ihid->flags);
3663     -
3664     - ret = i2c_transfer(client->adapter, msg, msg_num);
3665     -
3666     - if (data_len > 0)
3667     - clear_bit(I2C_HID_READ_PENDING, &ihid->flags);
3668     -
3669     - if (ret != msg_num)
3670     - return ret < 0 ? ret : -EIO;
3671     -
3672     - ret = 0;
3673     -
3674     - if (wait && (ihid->quirks & I2C_HID_QUIRK_NO_IRQ_AFTER_RESET)) {
3675     - msleep(100);
3676     - } else if (wait) {
3677     - i2c_hid_dbg(ihid, "%s: waiting...\n", __func__);
3678     - if (!wait_event_timeout(ihid->wait,
3679     - !test_bit(I2C_HID_RESET_PENDING, &ihid->flags),
3680     - msecs_to_jiffies(5000)))
3681     - ret = -ENODATA;
3682     - i2c_hid_dbg(ihid, "%s: finished.\n", __func__);
3683     - }
3684     -
3685     - return ret;
3686     -}
3687     -
3688     -static int i2c_hid_command(struct i2c_client *client,
3689     - const struct i2c_hid_cmd *command,
3690     - unsigned char *buf_recv, int data_len)
3691     -{
3692     - return __i2c_hid_command(client, command, 0, 0, NULL, 0,
3693     - buf_recv, data_len);
3694     -}
3695     -
3696     -static int i2c_hid_get_report(struct i2c_client *client, u8 reportType,
3697     - u8 reportID, unsigned char *buf_recv, int data_len)
3698     -{
3699     - struct i2c_hid *ihid = i2c_get_clientdata(client);
3700     - u8 args[3];
3701     - int ret;
3702     - int args_len = 0;
3703     - u16 readRegister = le16_to_cpu(ihid->hdesc.wDataRegister);
3704     -
3705     - i2c_hid_dbg(ihid, "%s\n", __func__);
3706     -
3707     - if (reportID >= 0x0F) {
3708     - args[args_len++] = reportID;
3709     - reportID = 0x0F;
3710     - }
3711     -
3712     - args[args_len++] = readRegister & 0xFF;
3713     - args[args_len++] = readRegister >> 8;
3714     -
3715     - ret = __i2c_hid_command(client, &hid_get_report_cmd, reportID,
3716     - reportType, args, args_len, buf_recv, data_len);
3717     - if (ret) {
3718     - dev_err(&client->dev,
3719     - "failed to retrieve report from device.\n");
3720     - return ret;
3721     - }
3722     -
3723     - return 0;
3724     -}
3725     -
3726     -/**
3727     - * i2c_hid_set_or_send_report: forward an incoming report to the device
3728     - * @client: the i2c_client of the device
3729     - * @reportType: 0x03 for HID_FEATURE_REPORT ; 0x02 for HID_OUTPUT_REPORT
3730     - * @reportID: the report ID
3731     - * @buf: the actual data to transfer, without the report ID
3732     - * @len: size of buf
3733     - * @use_data: true: use SET_REPORT HID command, false: send plain OUTPUT report
3734     - */
3735     -static int i2c_hid_set_or_send_report(struct i2c_client *client, u8 reportType,
3736     - u8 reportID, unsigned char *buf, size_t data_len, bool use_data)
3737     -{
3738     - struct i2c_hid *ihid = i2c_get_clientdata(client);
3739     - u8 *args = ihid->argsbuf;
3740     - const struct i2c_hid_cmd *hidcmd;
3741     - int ret;
3742     - u16 dataRegister = le16_to_cpu(ihid->hdesc.wDataRegister);
3743     - u16 outputRegister = le16_to_cpu(ihid->hdesc.wOutputRegister);
3744     - u16 maxOutputLength = le16_to_cpu(ihid->hdesc.wMaxOutputLength);
3745     - u16 size;
3746     - int args_len;
3747     - int index = 0;
3748     -
3749     - i2c_hid_dbg(ihid, "%s\n", __func__);
3750     -
3751     - if (data_len > ihid->bufsize)
3752     - return -EINVAL;
3753     -
3754     - size = 2 /* size */ +
3755     - (reportID ? 1 : 0) /* reportID */ +
3756     - data_len /* buf */;
3757     - args_len = (reportID >= 0x0F ? 1 : 0) /* optional third byte */ +
3758     - 2 /* dataRegister */ +
3759     - size /* args */;
3760     -
3761     - if (!use_data && maxOutputLength == 0)
3762     - return -ENOSYS;
3763     -
3764     - if (reportID >= 0x0F) {
3765     - args[index++] = reportID;
3766     - reportID = 0x0F;
3767     - }
3768     -
3769     - /*
3770     - * use the data register for feature reports or if the device does not
3771     - * support the output register
3772     - */
3773     - if (use_data) {
3774     - args[index++] = dataRegister & 0xFF;
3775     - args[index++] = dataRegister >> 8;
3776     - hidcmd = &hid_set_report_cmd;
3777     - } else {
3778     - args[index++] = outputRegister & 0xFF;
3779     - args[index++] = outputRegister >> 8;
3780     - hidcmd = &hid_no_cmd;
3781     - }
3782     -
3783     - args[index++] = size & 0xFF;
3784     - args[index++] = size >> 8;
3785     -
3786     - if (reportID)
3787     - args[index++] = reportID;
3788     -
3789     - memcpy(&args[index], buf, data_len);
3790     -
3791     - ret = __i2c_hid_command(client, hidcmd, reportID,
3792     - reportType, args, args_len, NULL, 0);
3793     - if (ret) {
3794     - dev_err(&client->dev, "failed to set a report to device.\n");
3795     - return ret;
3796     - }
3797     -
3798     - return data_len;
3799     -}
3800     -
3801     -static int i2c_hid_set_power(struct i2c_client *client, int power_state)
3802     -{
3803     - struct i2c_hid *ihid = i2c_get_clientdata(client);
3804     - int ret;
3805     - unsigned long now, delay;
3806     -
3807     - i2c_hid_dbg(ihid, "%s\n", __func__);
3808     -
3809     - /*
3810     - * Some devices require to send a command to wakeup before power on.
3811     - * The call will get a return value (EREMOTEIO) but device will be
3812     - * triggered and activated. After that, it goes like a normal device.
3813     - */
3814     - if (power_state == I2C_HID_PWR_ON &&
3815     - ihid->quirks & I2C_HID_QUIRK_SET_PWR_WAKEUP_DEV) {
3816     - ret = i2c_hid_command(client, &hid_set_power_cmd, NULL, 0);
3817     -
3818     - /* Device was already activated */
3819     - if (!ret)
3820     - goto set_pwr_exit;
3821     - }
3822     -
3823     - if (ihid->quirks & I2C_HID_QUIRK_DELAY_AFTER_SLEEP &&
3824     - power_state == I2C_HID_PWR_ON) {
3825     - now = jiffies;
3826     - if (time_after(ihid->sleep_delay, now)) {
3827     - delay = jiffies_to_usecs(ihid->sleep_delay - now);
3828     - usleep_range(delay, delay + 1);
3829     - }
3830     - }
3831     -
3832     - ret = __i2c_hid_command(client, &hid_set_power_cmd, power_state,
3833     - 0, NULL, 0, NULL, 0);
3834     -
3835     - if (ihid->quirks & I2C_HID_QUIRK_DELAY_AFTER_SLEEP &&
3836     - power_state == I2C_HID_PWR_SLEEP)
3837     - ihid->sleep_delay = jiffies + msecs_to_jiffies(20);
3838     -
3839     - if (ret)
3840     - dev_err(&client->dev, "failed to change power setting.\n");
3841     -
3842     -set_pwr_exit:
3843     - return ret;
3844     -}
3845     -
3846     -static int i2c_hid_hwreset(struct i2c_client *client)
3847     -{
3848     - struct i2c_hid *ihid = i2c_get_clientdata(client);
3849     - int ret;
3850     -
3851     - i2c_hid_dbg(ihid, "%s\n", __func__);
3852     -
3853     - /*
3854     - * This prevents sending feature reports while the device is
3855     - * being reset. Otherwise we may lose the reset complete
3856     - * interrupt.
3857     - */
3858     - mutex_lock(&ihid->reset_lock);
3859     -
3860     - ret = i2c_hid_set_power(client, I2C_HID_PWR_ON);
3861     - if (ret)
3862     - goto out_unlock;
3863     -
3864     - /*
3865     - * The HID over I2C specification states that if a DEVICE needs time
3866     - * after the PWR_ON request, it should utilise CLOCK stretching.
3867     - * However, it has been observered that the Windows driver provides a
3868     - * 1ms sleep between the PWR_ON and RESET requests and that some devices
3869     - * rely on this.
3870     - */
3871     - usleep_range(1000, 5000);
3872     -
3873     - i2c_hid_dbg(ihid, "resetting...\n");
3874     -
3875     - ret = i2c_hid_command(client, &hid_reset_cmd, NULL, 0);
3876     - if (ret) {
3877     - dev_err(&client->dev, "failed to reset device.\n");
3878     - i2c_hid_set_power(client, I2C_HID_PWR_SLEEP);
3879     - }
3880     -
3881     -out_unlock:
3882     - mutex_unlock(&ihid->reset_lock);
3883     - return ret;
3884     -}
3885     -
3886     -static void i2c_hid_get_input(struct i2c_hid *ihid)
3887     -{
3888     - int ret;
3889     - u32 ret_size;
3890     - int size = le16_to_cpu(ihid->hdesc.wMaxInputLength);
3891     -
3892     - if (size > ihid->bufsize)
3893     - size = ihid->bufsize;
3894     -
3895     - ret = i2c_master_recv(ihid->client, ihid->inbuf, size);
3896     - if (ret != size) {
3897     - if (ret < 0)
3898     - return;
3899     -
3900     - dev_err(&ihid->client->dev, "%s: got %d data instead of %d\n",
3901     - __func__, ret, size);
3902     - return;
3903     - }
3904     -
3905     - ret_size = ihid->inbuf[0] | ihid->inbuf[1] << 8;
3906     -
3907     - if (!ret_size) {
3908     - /* host or device initiated RESET completed */
3909     - if (test_and_clear_bit(I2C_HID_RESET_PENDING, &ihid->flags))
3910     - wake_up(&ihid->wait);
3911     - return;
3912     - }
3913     -
3914     - if ((ret_size > size) || (ret_size < 2)) {
3915     - dev_err(&ihid->client->dev, "%s: incomplete report (%d/%d)\n",
3916     - __func__, size, ret_size);
3917     - return;
3918     - }
3919     -
3920     - i2c_hid_dbg(ihid, "input: %*ph\n", ret_size, ihid->inbuf);
3921     -
3922     - if (test_bit(I2C_HID_STARTED, &ihid->flags))
3923     - hid_input_report(ihid->hid, HID_INPUT_REPORT, ihid->inbuf + 2,
3924     - ret_size - 2, 1);
3925     -
3926     - return;
3927     -}
3928     -
3929     -static irqreturn_t i2c_hid_irq(int irq, void *dev_id)
3930     -{
3931     - struct i2c_hid *ihid = dev_id;
3932     -
3933     - if (test_bit(I2C_HID_READ_PENDING, &ihid->flags))
3934     - return IRQ_HANDLED;
3935     -
3936     - i2c_hid_get_input(ihid);
3937     -
3938     - return IRQ_HANDLED;
3939     -}
3940     -
3941     -static int i2c_hid_get_report_length(struct hid_report *report)
3942     -{
3943     - return ((report->size - 1) >> 3) + 1 +
3944     - report->device->report_enum[report->type].numbered + 2;
3945     -}
3946     -
3947     -/*
3948     - * Traverse the supplied list of reports and find the longest
3949     - */
3950     -static void i2c_hid_find_max_report(struct hid_device *hid, unsigned int type,
3951     - unsigned int *max)
3952     -{
3953     - struct hid_report *report;
3954     - unsigned int size;
3955     -
3956     - /* We should not rely on wMaxInputLength, as some devices may set it to
3957     - * a wrong length. */
3958     - list_for_each_entry(report, &hid->report_enum[type].report_list, list) {
3959     - size = i2c_hid_get_report_length(report);
3960     - if (*max < size)
3961     - *max = size;
3962     - }
3963     -}
3964     -
3965     -static void i2c_hid_free_buffers(struct i2c_hid *ihid)
3966     -{
3967     - kfree(ihid->inbuf);
3968     - kfree(ihid->rawbuf);
3969     - kfree(ihid->argsbuf);
3970     - kfree(ihid->cmdbuf);
3971     - ihid->inbuf = NULL;
3972     - ihid->rawbuf = NULL;
3973     - ihid->cmdbuf = NULL;
3974     - ihid->argsbuf = NULL;
3975     - ihid->bufsize = 0;
3976     -}
3977     -
3978     -static int i2c_hid_alloc_buffers(struct i2c_hid *ihid, size_t report_size)
3979     -{
3980     - /* the worst case is computed from the set_report command with a
3981     - * reportID > 15 and the maximum report length */
3982     - int args_len = sizeof(__u8) + /* ReportID */
3983     - sizeof(__u8) + /* optional ReportID byte */
3984     - sizeof(__u16) + /* data register */
3985     - sizeof(__u16) + /* size of the report */
3986     - report_size; /* report */
3987     -
3988     - ihid->inbuf = kzalloc(report_size, GFP_KERNEL);
3989     - ihid->rawbuf = kzalloc(report_size, GFP_KERNEL);
3990     - ihid->argsbuf = kzalloc(args_len, GFP_KERNEL);
3991     - ihid->cmdbuf = kzalloc(sizeof(union command) + args_len, GFP_KERNEL);
3992     -
3993     - if (!ihid->inbuf || !ihid->rawbuf || !ihid->argsbuf || !ihid->cmdbuf) {
3994     - i2c_hid_free_buffers(ihid);
3995     - return -ENOMEM;
3996     - }
3997     -
3998     - ihid->bufsize = report_size;
3999     -
4000     - return 0;
4001     -}
4002     -
4003     -static int i2c_hid_get_raw_report(struct hid_device *hid,
4004     - unsigned char report_number, __u8 *buf, size_t count,
4005     - unsigned char report_type)
4006     -{
4007     - struct i2c_client *client = hid->driver_data;
4008     - struct i2c_hid *ihid = i2c_get_clientdata(client);
4009     - size_t ret_count, ask_count;
4010     - int ret;
4011     -
4012     - if (report_type == HID_OUTPUT_REPORT)
4013     - return -EINVAL;
4014     -
4015     - /* +2 bytes to include the size of the reply in the query buffer */
4016     - ask_count = min(count + 2, (size_t)ihid->bufsize);
4017     -
4018     - ret = i2c_hid_get_report(client,
4019     - report_type == HID_FEATURE_REPORT ? 0x03 : 0x01,
4020     - report_number, ihid->rawbuf, ask_count);
4021     -
4022     - if (ret < 0)
4023     - return ret;
4024     -
4025     - ret_count = ihid->rawbuf[0] | (ihid->rawbuf[1] << 8);
4026     -
4027     - if (ret_count <= 2)
4028     - return 0;
4029     -
4030     - ret_count = min(ret_count, ask_count);
4031     -
4032     - /* The query buffer contains the size, dropping it in the reply */
4033     - count = min(count, ret_count - 2);
4034     - memcpy(buf, ihid->rawbuf + 2, count);
4035     -
4036     - return count;
4037     -}
4038     -
4039     -static int i2c_hid_output_raw_report(struct hid_device *hid, __u8 *buf,
4040     - size_t count, unsigned char report_type, bool use_data)
4041     -{
4042     - struct i2c_client *client = hid->driver_data;
4043     - struct i2c_hid *ihid = i2c_get_clientdata(client);
4044     - int report_id = buf[0];
4045     - int ret;
4046     -
4047     - if (report_type == HID_INPUT_REPORT)
4048     - return -EINVAL;
4049     -
4050     - mutex_lock(&ihid->reset_lock);
4051     -
4052     - if (report_id) {
4053     - buf++;
4054     - count--;
4055     - }
4056     -
4057     - ret = i2c_hid_set_or_send_report(client,
4058     - report_type == HID_FEATURE_REPORT ? 0x03 : 0x02,
4059     - report_id, buf, count, use_data);
4060     -
4061     - if (report_id && ret >= 0)
4062     - ret++; /* add report_id to the number of transfered bytes */
4063     -
4064     - mutex_unlock(&ihid->reset_lock);
4065     -
4066     - return ret;
4067     -}
4068     -
4069     -static int i2c_hid_output_report(struct hid_device *hid, __u8 *buf,
4070     - size_t count)
4071     -{
4072     - return i2c_hid_output_raw_report(hid, buf, count, HID_OUTPUT_REPORT,
4073     - false);
4074     -}
4075     -
4076     -static int i2c_hid_raw_request(struct hid_device *hid, unsigned char reportnum,
4077     - __u8 *buf, size_t len, unsigned char rtype,
4078     - int reqtype)
4079     -{
4080     - switch (reqtype) {
4081     - case HID_REQ_GET_REPORT:
4082     - return i2c_hid_get_raw_report(hid, reportnum, buf, len, rtype);
4083     - case HID_REQ_SET_REPORT:
4084     - if (buf[0] != reportnum)
4085     - return -EINVAL;
4086     - return i2c_hid_output_raw_report(hid, buf, len, rtype, true);
4087     - default:
4088     - return -EIO;
4089     - }
4090     -}
4091     -
4092     -static int i2c_hid_parse(struct hid_device *hid)
4093     -{
4094     - struct i2c_client *client = hid->driver_data;
4095     - struct i2c_hid *ihid = i2c_get_clientdata(client);
4096     - struct i2c_hid_desc *hdesc = &ihid->hdesc;
4097     - unsigned int rsize;
4098     - char *rdesc;
4099     - int ret;
4100     - int tries = 3;
4101     -
4102     - i2c_hid_dbg(ihid, "entering %s\n", __func__);
4103     -
4104     - rsize = le16_to_cpu(hdesc->wReportDescLength);
4105     - if (!rsize || rsize > HID_MAX_DESCRIPTOR_SIZE) {
4106     - dbg_hid("weird size of report descriptor (%u)\n", rsize);
4107     - return -EINVAL;
4108     - }
4109     -
4110     - do {
4111     - ret = i2c_hid_hwreset(client);
4112     - if (ret)
4113     - msleep(1000);
4114     - } while (tries-- > 0 && ret);
4115     -
4116     - if (ret)
4117     - return ret;
4118     -
4119     - rdesc = kzalloc(rsize, GFP_KERNEL);
4120     -
4121     - if (!rdesc) {
4122     - dbg_hid("couldn't allocate rdesc memory\n");
4123     - return -ENOMEM;
4124     - }
4125     -
4126     - i2c_hid_dbg(ihid, "asking HID report descriptor\n");
4127     -
4128     - ret = i2c_hid_command(client, &hid_report_descr_cmd, rdesc, rsize);
4129     - if (ret) {
4130     - hid_err(hid, "reading report descriptor failed\n");
4131     - kfree(rdesc);
4132     - return -EIO;
4133     - }
4134     -
4135     - i2c_hid_dbg(ihid, "Report Descriptor: %*ph\n", rsize, rdesc);
4136     -
4137     - ret = hid_parse_report(hid, rdesc, rsize);
4138     - kfree(rdesc);
4139     - if (ret) {
4140     - dbg_hid("parsing report descriptor failed\n");
4141     - return ret;
4142     - }
4143     -
4144     - return 0;
4145     -}
4146     -
4147     -static int i2c_hid_start(struct hid_device *hid)
4148     -{
4149     - struct i2c_client *client = hid->driver_data;
4150     - struct i2c_hid *ihid = i2c_get_clientdata(client);
4151     - int ret;
4152     - unsigned int bufsize = HID_MIN_BUFFER_SIZE;
4153     -
4154     - i2c_hid_find_max_report(hid, HID_INPUT_REPORT, &bufsize);
4155     - i2c_hid_find_max_report(hid, HID_OUTPUT_REPORT, &bufsize);
4156     - i2c_hid_find_max_report(hid, HID_FEATURE_REPORT, &bufsize);
4157     -
4158     - if (bufsize > ihid->bufsize) {
4159     - disable_irq(client->irq);
4160     - i2c_hid_free_buffers(ihid);
4161     -
4162     - ret = i2c_hid_alloc_buffers(ihid, bufsize);
4163     - enable_irq(client->irq);
4164     -
4165     - if (ret)
4166     - return ret;
4167     - }
4168     -
4169     - return 0;
4170     -}
4171     -
4172     -static void i2c_hid_stop(struct hid_device *hid)
4173     -{
4174     - hid->claimed = 0;
4175     -}
4176     -
4177     -static int i2c_hid_open(struct hid_device *hid)
4178     -{
4179     - struct i2c_client *client = hid->driver_data;
4180     - struct i2c_hid *ihid = i2c_get_clientdata(client);
4181     - int ret = 0;
4182     -
4183     - ret = pm_runtime_get_sync(&client->dev);
4184     - if (ret < 0)
4185     - return ret;
4186     -
4187     - set_bit(I2C_HID_STARTED, &ihid->flags);
4188     - return 0;
4189     -}
4190     -
4191     -static void i2c_hid_close(struct hid_device *hid)
4192     -{
4193     - struct i2c_client *client = hid->driver_data;
4194     - struct i2c_hid *ihid = i2c_get_clientdata(client);
4195     -
4196     - clear_bit(I2C_HID_STARTED, &ihid->flags);
4197     -
4198     - /* Save some power */
4199     - pm_runtime_put(&client->dev);
4200     -}
4201     -
4202     -static int i2c_hid_power(struct hid_device *hid, int lvl)
4203     -{
4204     - struct i2c_client *client = hid->driver_data;
4205     - struct i2c_hid *ihid = i2c_get_clientdata(client);
4206     -
4207     - i2c_hid_dbg(ihid, "%s lvl:%d\n", __func__, lvl);
4208     -
4209     - switch (lvl) {
4210     - case PM_HINT_FULLON:
4211     - pm_runtime_get_sync(&client->dev);
4212     - break;
4213     - case PM_HINT_NORMAL:
4214     - pm_runtime_put(&client->dev);
4215     - break;
4216     - }
4217     - return 0;
4218     -}
4219     -
4220     -struct hid_ll_driver i2c_hid_ll_driver = {
4221     - .parse = i2c_hid_parse,
4222     - .start = i2c_hid_start,
4223     - .stop = i2c_hid_stop,
4224     - .open = i2c_hid_open,
4225     - .close = i2c_hid_close,
4226     - .power = i2c_hid_power,
4227     - .output_report = i2c_hid_output_report,
4228     - .raw_request = i2c_hid_raw_request,
4229     -};
4230     -EXPORT_SYMBOL_GPL(i2c_hid_ll_driver);
4231     -
4232     -static int i2c_hid_init_irq(struct i2c_client *client)
4233     -{
4234     - struct i2c_hid *ihid = i2c_get_clientdata(client);
4235     - unsigned long irqflags = 0;
4236     - int ret;
4237     -
4238     - dev_dbg(&client->dev, "Requesting IRQ: %d\n", client->irq);
4239     -
4240     - if (!irq_get_trigger_type(client->irq))
4241     - irqflags = IRQF_TRIGGER_LOW;
4242     -
4243     - ret = request_threaded_irq(client->irq, NULL, i2c_hid_irq,
4244     - irqflags | IRQF_ONESHOT, client->name, ihid);
4245     - if (ret < 0) {
4246     - dev_warn(&client->dev,
4247     - "Could not register for %s interrupt, irq = %d,"
4248     - " ret = %d\n",
4249     - client->name, client->irq, ret);
4250     -
4251     - return ret;
4252     - }
4253     -
4254     - return 0;
4255     -}
4256     -
4257     -static int i2c_hid_fetch_hid_descriptor(struct i2c_hid *ihid)
4258     -{
4259     - struct i2c_client *client = ihid->client;
4260     - struct i2c_hid_desc *hdesc = &ihid->hdesc;
4261     - unsigned int dsize;
4262     - int ret;
4263     -
4264     - /* i2c hid fetch using a fixed descriptor size (30 bytes) */
4265     - i2c_hid_dbg(ihid, "Fetching the HID descriptor\n");
4266     - ret = i2c_hid_command(client, &hid_descr_cmd, ihid->hdesc_buffer,
4267     - sizeof(struct i2c_hid_desc));
4268     - if (ret) {
4269     - dev_err(&client->dev, "hid_descr_cmd failed\n");
4270     - return -ENODEV;
4271     - }
4272     -
4273     - /* Validate the length of HID descriptor, the 4 first bytes:
4274     - * bytes 0-1 -> length
4275     - * bytes 2-3 -> bcdVersion (has to be 1.00) */
4276     - /* check bcdVersion == 1.0 */
4277     - if (le16_to_cpu(hdesc->bcdVersion) != 0x0100) {
4278     - dev_err(&client->dev,
4279     - "unexpected HID descriptor bcdVersion (0x%04hx)\n",
4280     - le16_to_cpu(hdesc->bcdVersion));
4281     - return -ENODEV;
4282     - }
4283     -
4284     - /* Descriptor length should be 30 bytes as per the specification */
4285     - dsize = le16_to_cpu(hdesc->wHIDDescLength);
4286     - if (dsize != sizeof(struct i2c_hid_desc)) {
4287     - dev_err(&client->dev, "weird size of HID descriptor (%u)\n",
4288     - dsize);
4289     - return -ENODEV;
4290     - }
4291     - i2c_hid_dbg(ihid, "HID Descriptor: %*ph\n", dsize, ihid->hdesc_buffer);
4292     - return 0;
4293     -}
4294     -
4295     -#ifdef CONFIG_ACPI
4296     -static const struct acpi_device_id i2c_hid_acpi_blacklist[] = {
4297     - /*
4298     - * The CHPN0001 ACPI device, which is used to describe the Chipone
4299     - * ICN8505 controller, has a _CID of PNP0C50 but is not HID compatible.
4300     - */
4301     - {"CHPN0001", 0 },
4302     - { },
4303     -};
4304     -
4305     -static int i2c_hid_acpi_pdata(struct i2c_client *client,
4306     - struct i2c_hid_platform_data *pdata)
4307     -{
4308     - static guid_t i2c_hid_guid =
4309     - GUID_INIT(0x3CDFF6F7, 0x4267, 0x4555,
4310     - 0xAD, 0x05, 0xB3, 0x0A, 0x3D, 0x89, 0x38, 0xDE);
4311     - union acpi_object *obj;
4312     - struct acpi_device *adev;
4313     - acpi_handle handle;
4314     -
4315     - handle = ACPI_HANDLE(&client->dev);
4316     - if (!handle || acpi_bus_get_device(handle, &adev)) {
4317     - dev_err(&client->dev, "Error could not get ACPI device\n");
4318     - return -ENODEV;
4319     - }
4320     -
4321     - if (acpi_match_device_ids(adev, i2c_hid_acpi_blacklist) == 0)
4322     - return -ENODEV;
4323     -
4324     - obj = acpi_evaluate_dsm_typed(handle, &i2c_hid_guid, 1, 1, NULL,
4325     - ACPI_TYPE_INTEGER);
4326     - if (!obj) {
4327     - dev_err(&client->dev, "Error _DSM call to get HID descriptor address failed\n");
4328     - return -ENODEV;
4329     - }
4330     -
4331     - pdata->hid_descriptor_address = obj->integer.value;
4332     - ACPI_FREE(obj);
4333     -
4334     - return 0;
4335     -}
4336     -
4337     -static void i2c_hid_acpi_fix_up_power(struct device *dev)
4338     -{
4339     - struct acpi_device *adev;
4340     -
4341     - adev = ACPI_COMPANION(dev);
4342     - if (adev)
4343     - acpi_device_fix_up_power(adev);
4344     -}
4345     -
4346     -static const struct acpi_device_id i2c_hid_acpi_match[] = {
4347     - {"ACPI0C50", 0 },
4348     - {"PNP0C50", 0 },
4349     - { },
4350     -};
4351     -MODULE_DEVICE_TABLE(acpi, i2c_hid_acpi_match);
4352     -#else
4353     -static inline int i2c_hid_acpi_pdata(struct i2c_client *client,
4354     - struct i2c_hid_platform_data *pdata)
4355     -{
4356     - return -ENODEV;
4357     -}
4358     -
4359     -static inline void i2c_hid_acpi_fix_up_power(struct device *dev) {}
4360     -#endif
4361     -
4362     -#ifdef CONFIG_OF
4363     -static int i2c_hid_of_probe(struct i2c_client *client,
4364     - struct i2c_hid_platform_data *pdata)
4365     -{
4366     - struct device *dev = &client->dev;
4367     - u32 val;
4368     - int ret;
4369     -
4370     - ret = of_property_read_u32(dev->of_node, "hid-descr-addr", &val);
4371     - if (ret) {
4372     - dev_err(&client->dev, "HID register address not provided\n");
4373     - return -ENODEV;
4374     - }
4375     - if (val >> 16) {
4376     - dev_err(&client->dev, "Bad HID register address: 0x%08x\n",
4377     - val);
4378     - return -EINVAL;
4379     - }
4380     - pdata->hid_descriptor_address = val;
4381     -
4382     - return 0;
4383     -}
4384     -
4385     -static const struct of_device_id i2c_hid_of_match[] = {
4386     - { .compatible = "hid-over-i2c" },
4387     - {},
4388     -};
4389     -MODULE_DEVICE_TABLE(of, i2c_hid_of_match);
4390     -#else
4391     -static inline int i2c_hid_of_probe(struct i2c_client *client,
4392     - struct i2c_hid_platform_data *pdata)
4393     -{
4394     - return -ENODEV;
4395     -}
4396     -#endif
4397     -
4398     -static void i2c_hid_fwnode_probe(struct i2c_client *client,
4399     - struct i2c_hid_platform_data *pdata)
4400     -{
4401     - u32 val;
4402     -
4403     - if (!device_property_read_u32(&client->dev, "post-power-on-delay-ms",
4404     - &val))
4405     - pdata->post_power_delay_ms = val;
4406     -}
4407     -
4408     -static int i2c_hid_probe(struct i2c_client *client,
4409     - const struct i2c_device_id *dev_id)
4410     -{
4411     - int ret;
4412     - struct i2c_hid *ihid;
4413     - struct hid_device *hid;
4414     - __u16 hidRegister;
4415     - struct i2c_hid_platform_data *platform_data = client->dev.platform_data;
4416     -
4417     - dbg_hid("HID probe called for i2c 0x%02x\n", client->addr);
4418     -
4419     - if (!client->irq) {
4420     - dev_err(&client->dev,
4421     - "HID over i2c has not been provided an Int IRQ\n");
4422     - return -EINVAL;
4423     - }
4424     -
4425     - if (client->irq < 0) {
4426     - if (client->irq != -EPROBE_DEFER)
4427     - dev_err(&client->dev,
4428     - "HID over i2c doesn't have a valid IRQ\n");
4429     - return client->irq;
4430     - }
4431     -
4432     - ihid = devm_kzalloc(&client->dev, sizeof(*ihid), GFP_KERNEL);
4433     - if (!ihid)
4434     - return -ENOMEM;
4435     -
4436     - if (client->dev.of_node) {
4437     - ret = i2c_hid_of_probe(client, &ihid->pdata);
4438     - if (ret)
4439     - return ret;
4440     - } else if (!platform_data) {
4441     - ret = i2c_hid_acpi_pdata(client, &ihid->pdata);
4442     - if (ret)
4443     - return ret;
4444     - } else {
4445     - ihid->pdata = *platform_data;
4446     - }
4447     -
4448     - /* Parse platform agnostic common properties from ACPI / device tree */
4449     - i2c_hid_fwnode_probe(client, &ihid->pdata);
4450     -
4451     - ihid->pdata.supplies[0].supply = "vdd";
4452     - ihid->pdata.supplies[1].supply = "vddl";
4453     -
4454     - ret = devm_regulator_bulk_get(&client->dev,
4455     - ARRAY_SIZE(ihid->pdata.supplies),
4456     - ihid->pdata.supplies);
4457     - if (ret)
4458     - return ret;
4459     -
4460     - ret = regulator_bulk_enable(ARRAY_SIZE(ihid->pdata.supplies),
4461     - ihid->pdata.supplies);
4462     - if (ret < 0)
4463     - return ret;
4464     -
4465     - if (ihid->pdata.post_power_delay_ms)
4466     - msleep(ihid->pdata.post_power_delay_ms);
4467     -
4468     - i2c_set_clientdata(client, ihid);
4469     -
4470     - ihid->client = client;
4471     -
4472     - hidRegister = ihid->pdata.hid_descriptor_address;
4473     - ihid->wHIDDescRegister = cpu_to_le16(hidRegister);
4474     -
4475     - init_waitqueue_head(&ihid->wait);
4476     - mutex_init(&ihid->reset_lock);
4477     -
4478     - /* we need to allocate the command buffer without knowing the maximum
4479     - * size of the reports. Let's use HID_MIN_BUFFER_SIZE, then we do the
4480     - * real computation later. */
4481     - ret = i2c_hid_alloc_buffers(ihid, HID_MIN_BUFFER_SIZE);
4482     - if (ret < 0)
4483     - goto err_regulator;
4484     -
4485     - i2c_hid_acpi_fix_up_power(&client->dev);
4486     -
4487     - pm_runtime_get_noresume(&client->dev);
4488     - pm_runtime_set_active(&client->dev);
4489     - pm_runtime_enable(&client->dev);
4490     - device_enable_async_suspend(&client->dev);
4491     -
4492     - /* Make sure there is something at this address */
4493     - ret = i2c_smbus_read_byte(client);
4494     - if (ret < 0) {
4495     - dev_dbg(&client->dev, "nothing at this address: %d\n", ret);
4496     - ret = -ENXIO;
4497     - goto err_pm;
4498     - }
4499     -
4500     - ret = i2c_hid_fetch_hid_descriptor(ihid);
4501     - if (ret < 0)
4502     - goto err_pm;
4503     -
4504     - ret = i2c_hid_init_irq(client);
4505     - if (ret < 0)
4506     - goto err_pm;
4507     -
4508     - hid = hid_allocate_device();
4509     - if (IS_ERR(hid)) {
4510     - ret = PTR_ERR(hid);
4511     - goto err_irq;
4512     - }
4513     -
4514     - ihid->hid = hid;
4515     -
4516     - hid->driver_data = client;
4517     - hid->ll_driver = &i2c_hid_ll_driver;
4518     - hid->dev.parent = &client->dev;
4519     - hid->bus = BUS_I2C;
4520     - hid->version = le16_to_cpu(ihid->hdesc.bcdVersion);
4521     - hid->vendor = le16_to_cpu(ihid->hdesc.wVendorID);
4522     - hid->product = le16_to_cpu(ihid->hdesc.wProductID);
4523     -
4524     - snprintf(hid->name, sizeof(hid->name), "%s %04hX:%04hX",
4525     - client->name, hid->vendor, hid->product);
4526     - strlcpy(hid->phys, dev_name(&client->dev), sizeof(hid->phys));
4527     -
4528     - ihid->quirks = i2c_hid_lookup_quirk(hid->vendor, hid->product);
4529     -
4530     - ret = hid_add_device(hid);
4531     - if (ret) {
4532     - if (ret != -ENODEV)
4533     - hid_err(client, "can't add hid device: %d\n", ret);
4534     - goto err_mem_free;
4535     - }
4536     -
4537     - if (!(ihid->quirks & I2C_HID_QUIRK_NO_RUNTIME_PM))
4538     - pm_runtime_put(&client->dev);
4539     -
4540     - return 0;
4541     -
4542     -err_mem_free:
4543     - hid_destroy_device(hid);
4544     -
4545     -err_irq:
4546     - free_irq(client->irq, ihid);
4547     -
4548     -err_pm:
4549     - pm_runtime_put_noidle(&client->dev);
4550     - pm_runtime_disable(&client->dev);
4551     -
4552     -err_regulator:
4553     - regulator_bulk_disable(ARRAY_SIZE(ihid->pdata.supplies),
4554     - ihid->pdata.supplies);
4555     - i2c_hid_free_buffers(ihid);
4556     - return ret;
4557     -}
4558     -
4559     -static int i2c_hid_remove(struct i2c_client *client)
4560     -{
4561     - struct i2c_hid *ihid = i2c_get_clientdata(client);
4562     - struct hid_device *hid;
4563     -
4564     - if (!(ihid->quirks & I2C_HID_QUIRK_NO_RUNTIME_PM))
4565     - pm_runtime_get_sync(&client->dev);
4566     - pm_runtime_disable(&client->dev);
4567     - pm_runtime_set_suspended(&client->dev);
4568     - pm_runtime_put_noidle(&client->dev);
4569     -
4570     - hid = ihid->hid;
4571     - hid_destroy_device(hid);
4572     -
4573     - free_irq(client->irq, ihid);
4574     -
4575     - if (ihid->bufsize)
4576     - i2c_hid_free_buffers(ihid);
4577     -
4578     - regulator_bulk_disable(ARRAY_SIZE(ihid->pdata.supplies),
4579     - ihid->pdata.supplies);
4580     -
4581     - return 0;
4582     -}
4583     -
4584     -static void i2c_hid_shutdown(struct i2c_client *client)
4585     -{
4586     - struct i2c_hid *ihid = i2c_get_clientdata(client);
4587     -
4588     - i2c_hid_set_power(client, I2C_HID_PWR_SLEEP);
4589     - free_irq(client->irq, ihid);
4590     -}
4591     -
4592     -#ifdef CONFIG_PM_SLEEP
4593     -static int i2c_hid_suspend(struct device *dev)
4594     -{
4595     - struct i2c_client *client = to_i2c_client(dev);
4596     - struct i2c_hid *ihid = i2c_get_clientdata(client);
4597     - struct hid_device *hid = ihid->hid;
4598     - int ret;
4599     - int wake_status;
4600     -
4601     - if (hid->driver && hid->driver->suspend) {
4602     - /*
4603     - * Wake up the device so that IO issues in
4604     - * HID driver's suspend code can succeed.
4605     - */
4606     - ret = pm_runtime_resume(dev);
4607     - if (ret < 0)
4608     - return ret;
4609     -
4610     - ret = hid->driver->suspend(hid, PMSG_SUSPEND);
4611     - if (ret < 0)
4612     - return ret;
4613     - }
4614     -
4615     - if (!pm_runtime_suspended(dev)) {
4616     - /* Save some power */
4617     - i2c_hid_set_power(client, I2C_HID_PWR_SLEEP);
4618     -
4619     - disable_irq(client->irq);
4620     - }
4621     -
4622     - if (device_may_wakeup(&client->dev)) {
4623     - wake_status = enable_irq_wake(client->irq);
4624     - if (!wake_status)
4625     - ihid->irq_wake_enabled = true;
4626     - else
4627     - hid_warn(hid, "Failed to enable irq wake: %d\n",
4628     - wake_status);
4629     - } else {
4630     - regulator_bulk_disable(ARRAY_SIZE(ihid->pdata.supplies),
4631     - ihid->pdata.supplies);
4632     - }
4633     -
4634     - return 0;
4635     -}
4636     -
4637     -static int i2c_hid_resume(struct device *dev)
4638     -{
4639     - int ret;
4640     - struct i2c_client *client = to_i2c_client(dev);
4641     - struct i2c_hid *ihid = i2c_get_clientdata(client);
4642     - struct hid_device *hid = ihid->hid;
4643     - int wake_status;
4644     -
4645     - if (!device_may_wakeup(&client->dev)) {
4646     - ret = regulator_bulk_enable(ARRAY_SIZE(ihid->pdata.supplies),
4647     - ihid->pdata.supplies);
4648     - if (ret)
4649     - hid_warn(hid, "Failed to enable supplies: %d\n", ret);
4650     -
4651     - if (ihid->pdata.post_power_delay_ms)
4652     - msleep(ihid->pdata.post_power_delay_ms);
4653     - } else if (ihid->irq_wake_enabled) {
4654     - wake_status = disable_irq_wake(client->irq);
4655     - if (!wake_status)
4656     - ihid->irq_wake_enabled = false;
4657     - else
4658     - hid_warn(hid, "Failed to disable irq wake: %d\n",
4659     - wake_status);
4660     - }
4661     -
4662     - /* We'll resume to full power */
4663     - pm_runtime_disable(dev);
4664     - pm_runtime_set_active(dev);
4665     - pm_runtime_enable(dev);
4666     -
4667     - enable_irq(client->irq);
4668     -
4669     - /* Instead of resetting device, simply powers the device on. This
4670     - * solves "incomplete reports" on Raydium devices 2386:3118 and
4671     - * 2386:4B33 and fixes various SIS touchscreens no longer sending
4672     - * data after a suspend/resume.
4673     - */
4674     - ret = i2c_hid_set_power(client, I2C_HID_PWR_ON);
4675     - if (ret)
4676     - return ret;
4677     -
4678     - if (hid->driver && hid->driver->reset_resume) {
4679     - ret = hid->driver->reset_resume(hid);
4680     - return ret;
4681     - }
4682     -
4683     - return 0;
4684     -}
4685     -#endif
4686     -
4687     -#ifdef CONFIG_PM
4688     -static int i2c_hid_runtime_suspend(struct device *dev)
4689     -{
4690     - struct i2c_client *client = to_i2c_client(dev);
4691     -
4692     - i2c_hid_set_power(client, I2C_HID_PWR_SLEEP);
4693     - disable_irq(client->irq);
4694     - return 0;
4695     -}
4696     -
4697     -static int i2c_hid_runtime_resume(struct device *dev)
4698     -{
4699     - struct i2c_client *client = to_i2c_client(dev);
4700     -
4701     - enable_irq(client->irq);
4702     - i2c_hid_set_power(client, I2C_HID_PWR_ON);
4703     - return 0;
4704     -}
4705     -#endif
4706     -
4707     -static const struct dev_pm_ops i2c_hid_pm = {
4708     - SET_SYSTEM_SLEEP_PM_OPS(i2c_hid_suspend, i2c_hid_resume)
4709     - SET_RUNTIME_PM_OPS(i2c_hid_runtime_suspend, i2c_hid_runtime_resume,
4710     - NULL)
4711     -};
4712     -
4713     -static const struct i2c_device_id i2c_hid_id_table[] = {
4714     - { "hid", 0 },
4715     - { "hid-over-i2c", 0 },
4716     - { },
4717     -};
4718     -MODULE_DEVICE_TABLE(i2c, i2c_hid_id_table);
4719     -
4720     -
4721     -static struct i2c_driver i2c_hid_driver = {
4722     - .driver = {
4723     - .name = "i2c_hid",
4724     - .pm = &i2c_hid_pm,
4725     - .acpi_match_table = ACPI_PTR(i2c_hid_acpi_match),
4726     - .of_match_table = of_match_ptr(i2c_hid_of_match),
4727     - },
4728     -
4729     - .probe = i2c_hid_probe,
4730     - .remove = i2c_hid_remove,
4731     - .shutdown = i2c_hid_shutdown,
4732     - .id_table = i2c_hid_id_table,
4733     -};
4734     -
4735     -module_i2c_driver(i2c_hid_driver);
4736     -
4737     -MODULE_DESCRIPTION("HID over I2C core driver");
4738     -MODULE_AUTHOR("Benjamin Tissoires <benjamin.tissoires@gmail.com>");
4739     -MODULE_LICENSE("GPL");
4740     diff --git a/drivers/hid/i2c-hid/i2c-hid.h b/drivers/hid/i2c-hid/i2c-hid.h
4741     new file mode 100644
4742     index 000000000000..a8c19aef5824
4743     --- /dev/null
4744     +++ b/drivers/hid/i2c-hid/i2c-hid.h
4745     @@ -0,0 +1,20 @@
4746     +/* SPDX-License-Identifier: GPL-2.0+ */
4747     +
4748     +#ifndef I2C_HID_H
4749     +#define I2C_HID_H
4750     +
4751     +
4752     +#ifdef CONFIG_DMI
4753     +struct i2c_hid_desc *i2c_hid_get_dmi_i2c_hid_desc_override(uint8_t *i2c_name);
4754     +char *i2c_hid_get_dmi_hid_report_desc_override(uint8_t *i2c_name,
4755     + unsigned int *size);
4756     +#else
4757     +static inline struct i2c_hid_desc
4758     + *i2c_hid_get_dmi_i2c_hid_desc_override(uint8_t *i2c_name)
4759     +{ return NULL; }
4760     +static inline char *i2c_hid_get_dmi_hid_report_desc_override(uint8_t *i2c_name,
4761     + unsigned int *size)
4762     +{ return NULL; }
4763     +#endif
4764     +
4765     +#endif
4766     diff --git a/drivers/hwtracing/coresight/coresight-cpu-debug.c b/drivers/hwtracing/coresight/coresight-cpu-debug.c
4767     index 45b2460f3166..e8819d750938 100644
4768     --- a/drivers/hwtracing/coresight/coresight-cpu-debug.c
4769     +++ b/drivers/hwtracing/coresight/coresight-cpu-debug.c
4770     @@ -668,6 +668,10 @@ static const struct amba_id debug_ids[] = {
4771     .id = 0x000bbd08,
4772     .mask = 0x000fffff,
4773     },
4774     + { /* Debug for Cortex-A73 */
4775     + .id = 0x000bbd09,
4776     + .mask = 0x000fffff,
4777     + },
4778     { 0, 0 },
4779     };
4780    
4781     diff --git a/drivers/infiniband/hw/hfi1/qp.c b/drivers/infiniband/hw/hfi1/qp.c
4782     index 9b1e84a6b1cc..63c5ba66b305 100644
4783     --- a/drivers/infiniband/hw/hfi1/qp.c
4784     +++ b/drivers/infiniband/hw/hfi1/qp.c
4785     @@ -784,7 +784,7 @@ void notify_error_qp(struct rvt_qp *qp)
4786     write_seqlock(lock);
4787     if (!list_empty(&priv->s_iowait.list) &&
4788     !(qp->s_flags & RVT_S_BUSY)) {
4789     - qp->s_flags &= ~RVT_S_ANY_WAIT_IO;
4790     + qp->s_flags &= ~HFI1_S_ANY_WAIT_IO;
4791     list_del_init(&priv->s_iowait.list);
4792     priv->s_iowait.lock = NULL;
4793     rvt_put_qp(qp);
4794     diff --git a/drivers/infiniband/hw/i40iw/i40iw_utils.c b/drivers/infiniband/hw/i40iw/i40iw_utils.c
4795     index a9ea966877f2..dda8e79d4b27 100644
4796     --- a/drivers/infiniband/hw/i40iw/i40iw_utils.c
4797     +++ b/drivers/infiniband/hw/i40iw/i40iw_utils.c
4798     @@ -173,7 +173,12 @@ int i40iw_inetaddr_event(struct notifier_block *notifier,
4799    
4800     rcu_read_lock();
4801     in = __in_dev_get_rcu(upper_dev);
4802     - local_ipaddr = ntohl(in->ifa_list->ifa_address);
4803     +
4804     + if (!in->ifa_list)
4805     + local_ipaddr = 0;
4806     + else
4807     + local_ipaddr = ntohl(in->ifa_list->ifa_address);
4808     +
4809     rcu_read_unlock();
4810     } else {
4811     local_ipaddr = ntohl(ifa->ifa_address);
4812     @@ -185,6 +190,11 @@ int i40iw_inetaddr_event(struct notifier_block *notifier,
4813     case NETDEV_UP:
4814     /* Fall through */
4815     case NETDEV_CHANGEADDR:
4816     +
4817     + /* Just skip if no need to handle ARP cache */
4818     + if (!local_ipaddr)
4819     + break;
4820     +
4821     i40iw_manage_arp_cache(iwdev,
4822     netdev->dev_addr,
4823     &local_ipaddr,
4824     diff --git a/drivers/infiniband/hw/mlx4/alias_GUID.c b/drivers/infiniband/hw/mlx4/alias_GUID.c
4825     index 155b4dfc0ae8..baab9afa9174 100644
4826     --- a/drivers/infiniband/hw/mlx4/alias_GUID.c
4827     +++ b/drivers/infiniband/hw/mlx4/alias_GUID.c
4828     @@ -804,8 +804,8 @@ void mlx4_ib_destroy_alias_guid_service(struct mlx4_ib_dev *dev)
4829     unsigned long flags;
4830    
4831     for (i = 0 ; i < dev->num_ports; i++) {
4832     - cancel_delayed_work(&dev->sriov.alias_guid.ports_guid[i].alias_guid_work);
4833     det = &sriov->alias_guid.ports_guid[i];
4834     + cancel_delayed_work_sync(&det->alias_guid_work);
4835     spin_lock_irqsave(&sriov->alias_guid.ag_work_lock, flags);
4836     while (!list_empty(&det->cb_list)) {
4837     cb_ctx = list_entry(det->cb_list.next,
4838     diff --git a/drivers/iommu/dmar.c b/drivers/iommu/dmar.c
4839     index d9c748b6f9e4..7f9824b0609e 100644
4840     --- a/drivers/iommu/dmar.c
4841     +++ b/drivers/iommu/dmar.c
4842     @@ -144,7 +144,7 @@ dmar_alloc_pci_notify_info(struct pci_dev *dev, unsigned long event)
4843     for (tmp = dev; tmp; tmp = tmp->bus->self)
4844     level++;
4845    
4846     - size = sizeof(*info) + level * sizeof(struct acpi_dmar_pci_path);
4847     + size = sizeof(*info) + level * sizeof(info->path[0]);
4848     if (size <= sizeof(dmar_pci_notify_info_buf)) {
4849     info = (struct dmar_pci_notify_info *)dmar_pci_notify_info_buf;
4850     } else {
4851     diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c
4852     index 2b8f5ebae821..603bf5233a99 100644
4853     --- a/drivers/iommu/intel-iommu.c
4854     +++ b/drivers/iommu/intel-iommu.c
4855     @@ -1624,6 +1624,9 @@ static void iommu_disable_protect_mem_regions(struct intel_iommu *iommu)
4856     u32 pmen;
4857     unsigned long flags;
4858    
4859     + if (!cap_plmr(iommu->cap) && !cap_phmr(iommu->cap))
4860     + return;
4861     +
4862     raw_spin_lock_irqsave(&iommu->register_lock, flags);
4863     pmen = readl(iommu->reg + DMAR_PMEN_REG);
4864     pmen &= ~DMA_PMEN_EPM;
4865     diff --git a/drivers/irqchip/irq-mbigen.c b/drivers/irqchip/irq-mbigen.c
4866     index 567b29c47608..98b6e1d4b1a6 100644
4867     --- a/drivers/irqchip/irq-mbigen.c
4868     +++ b/drivers/irqchip/irq-mbigen.c
4869     @@ -161,6 +161,9 @@ static void mbigen_write_msg(struct msi_desc *desc, struct msi_msg *msg)
4870     void __iomem *base = d->chip_data;
4871     u32 val;
4872    
4873     + if (!msg->address_lo && !msg->address_hi)
4874     + return;
4875     +
4876     base += get_mbigen_vec_reg(d->hwirq);
4877     val = readl_relaxed(base);
4878    
4879     diff --git a/drivers/irqchip/irq-stm32-exti.c b/drivers/irqchip/irq-stm32-exti.c
4880     index 0a2088e12d96..97b27f338c30 100644
4881     --- a/drivers/irqchip/irq-stm32-exti.c
4882     +++ b/drivers/irqchip/irq-stm32-exti.c
4883     @@ -650,11 +650,6 @@ stm32_exti_chip_data *stm32_exti_chip_init(struct stm32_exti_host_data *h_data,
4884     */
4885     writel_relaxed(0, base + stm32_bank->imr_ofst);
4886     writel_relaxed(0, base + stm32_bank->emr_ofst);
4887     - writel_relaxed(0, base + stm32_bank->rtsr_ofst);
4888     - writel_relaxed(0, base + stm32_bank->ftsr_ofst);
4889     - writel_relaxed(~0UL, base + stm32_bank->rpr_ofst);
4890     - if (stm32_bank->fpr_ofst != UNDEF_REG)
4891     - writel_relaxed(~0UL, base + stm32_bank->fpr_ofst);
4892    
4893     pr_info("%s: bank%d, External IRQs available:%#x\n",
4894     node->full_name, bank_idx, irqs_mask);
4895     diff --git a/drivers/media/usb/au0828/au0828-core.c b/drivers/media/usb/au0828/au0828-core.c
4896     index cd363a2100d4..257ae0d8cfe2 100644
4897     --- a/drivers/media/usb/au0828/au0828-core.c
4898     +++ b/drivers/media/usb/au0828/au0828-core.c
4899     @@ -629,7 +629,6 @@ static int au0828_usb_probe(struct usb_interface *interface,
4900     pr_err("%s() au0282_dev_register failed to register on V4L2\n",
4901     __func__);
4902     mutex_unlock(&dev->lock);
4903     - kfree(dev);
4904     goto done;
4905     }
4906    
4907     diff --git a/drivers/misc/lkdtm/core.c b/drivers/misc/lkdtm/core.c
4908     index 2154d1bfd18b..07caaa2cfe1e 100644
4909     --- a/drivers/misc/lkdtm/core.c
4910     +++ b/drivers/misc/lkdtm/core.c
4911     @@ -152,7 +152,9 @@ static const struct crashtype crashtypes[] = {
4912     CRASHTYPE(EXEC_VMALLOC),
4913     CRASHTYPE(EXEC_RODATA),
4914     CRASHTYPE(EXEC_USERSPACE),
4915     + CRASHTYPE(EXEC_NULL),
4916     CRASHTYPE(ACCESS_USERSPACE),
4917     + CRASHTYPE(ACCESS_NULL),
4918     CRASHTYPE(WRITE_RO),
4919     CRASHTYPE(WRITE_RO_AFTER_INIT),
4920     CRASHTYPE(WRITE_KERN),
4921     diff --git a/drivers/misc/lkdtm/lkdtm.h b/drivers/misc/lkdtm/lkdtm.h
4922     index 9e513dcfd809..8c3f2e6af256 100644
4923     --- a/drivers/misc/lkdtm/lkdtm.h
4924     +++ b/drivers/misc/lkdtm/lkdtm.h
4925     @@ -45,7 +45,9 @@ void lkdtm_EXEC_KMALLOC(void);
4926     void lkdtm_EXEC_VMALLOC(void);
4927     void lkdtm_EXEC_RODATA(void);
4928     void lkdtm_EXEC_USERSPACE(void);
4929     +void lkdtm_EXEC_NULL(void);
4930     void lkdtm_ACCESS_USERSPACE(void);
4931     +void lkdtm_ACCESS_NULL(void);
4932    
4933     /* lkdtm_refcount.c */
4934     void lkdtm_REFCOUNT_INC_OVERFLOW(void);
4935     diff --git a/drivers/misc/lkdtm/perms.c b/drivers/misc/lkdtm/perms.c
4936     index 53b85c9d16b8..62f76d506f04 100644
4937     --- a/drivers/misc/lkdtm/perms.c
4938     +++ b/drivers/misc/lkdtm/perms.c
4939     @@ -47,7 +47,7 @@ static noinline void execute_location(void *dst, bool write)
4940     {
4941     void (*func)(void) = dst;
4942    
4943     - pr_info("attempting ok execution at %p\n", do_nothing);
4944     + pr_info("attempting ok execution at %px\n", do_nothing);
4945     do_nothing();
4946    
4947     if (write == CODE_WRITE) {
4948     @@ -55,7 +55,7 @@ static noinline void execute_location(void *dst, bool write)
4949     flush_icache_range((unsigned long)dst,
4950     (unsigned long)dst + EXEC_SIZE);
4951     }
4952     - pr_info("attempting bad execution at %p\n", func);
4953     + pr_info("attempting bad execution at %px\n", func);
4954     func();
4955     }
4956    
4957     @@ -66,14 +66,14 @@ static void execute_user_location(void *dst)
4958     /* Intentionally crossing kernel/user memory boundary. */
4959     void (*func)(void) = dst;
4960    
4961     - pr_info("attempting ok execution at %p\n", do_nothing);
4962     + pr_info("attempting ok execution at %px\n", do_nothing);
4963     do_nothing();
4964    
4965     copied = access_process_vm(current, (unsigned long)dst, do_nothing,
4966     EXEC_SIZE, FOLL_WRITE);
4967     if (copied < EXEC_SIZE)
4968     return;
4969     - pr_info("attempting bad execution at %p\n", func);
4970     + pr_info("attempting bad execution at %px\n", func);
4971     func();
4972     }
4973    
4974     @@ -82,7 +82,7 @@ void lkdtm_WRITE_RO(void)
4975     /* Explicitly cast away "const" for the test. */
4976     unsigned long *ptr = (unsigned long *)&rodata;
4977    
4978     - pr_info("attempting bad rodata write at %p\n", ptr);
4979     + pr_info("attempting bad rodata write at %px\n", ptr);
4980     *ptr ^= 0xabcd1234;
4981     }
4982    
4983     @@ -100,7 +100,7 @@ void lkdtm_WRITE_RO_AFTER_INIT(void)
4984     return;
4985     }
4986    
4987     - pr_info("attempting bad ro_after_init write at %p\n", ptr);
4988     + pr_info("attempting bad ro_after_init write at %px\n", ptr);
4989     *ptr ^= 0xabcd1234;
4990     }
4991    
4992     @@ -112,7 +112,7 @@ void lkdtm_WRITE_KERN(void)
4993     size = (unsigned long)do_overwritten - (unsigned long)do_nothing;
4994     ptr = (unsigned char *)do_overwritten;
4995    
4996     - pr_info("attempting bad %zu byte write at %p\n", size, ptr);
4997     + pr_info("attempting bad %zu byte write at %px\n", size, ptr);
4998     memcpy(ptr, (unsigned char *)do_nothing, size);
4999     flush_icache_range((unsigned long)ptr, (unsigned long)(ptr + size));
5000    
5001     @@ -164,6 +164,11 @@ void lkdtm_EXEC_USERSPACE(void)
5002     vm_munmap(user_addr, PAGE_SIZE);
5003     }
5004    
5005     +void lkdtm_EXEC_NULL(void)
5006     +{
5007     + execute_location(NULL, CODE_AS_IS);
5008     +}
5009     +
5010     void lkdtm_ACCESS_USERSPACE(void)
5011     {
5012     unsigned long user_addr, tmp = 0;
5013     @@ -185,16 +190,29 @@ void lkdtm_ACCESS_USERSPACE(void)
5014    
5015     ptr = (unsigned long *)user_addr;
5016    
5017     - pr_info("attempting bad read at %p\n", ptr);
5018     + pr_info("attempting bad read at %px\n", ptr);
5019     tmp = *ptr;
5020     tmp += 0xc0dec0de;
5021    
5022     - pr_info("attempting bad write at %p\n", ptr);
5023     + pr_info("attempting bad write at %px\n", ptr);
5024     *ptr = tmp;
5025    
5026     vm_munmap(user_addr, PAGE_SIZE);
5027     }
5028    
5029     +void lkdtm_ACCESS_NULL(void)
5030     +{
5031     + unsigned long tmp;
5032     + unsigned long *ptr = (unsigned long *)NULL;
5033     +
5034     + pr_info("attempting bad read at %px\n", ptr);
5035     + tmp = *ptr;
5036     + tmp += 0xc0dec0de;
5037     +
5038     + pr_info("attempting bad write at %px\n", ptr);
5039     + *ptr = tmp;
5040     +}
5041     +
5042     void __init lkdtm_perms_init(void)
5043     {
5044     /* Make sure we can write to __ro_after_init values during __init */
5045     diff --git a/drivers/mmc/host/davinci_mmc.c b/drivers/mmc/host/davinci_mmc.c
5046     index 9e68c3645e22..e6f14257a7d0 100644
5047     --- a/drivers/mmc/host/davinci_mmc.c
5048     +++ b/drivers/mmc/host/davinci_mmc.c
5049     @@ -1117,7 +1117,7 @@ static inline void mmc_davinci_cpufreq_deregister(struct mmc_davinci_host *host)
5050     {
5051     }
5052     #endif
5053     -static void __init init_mmcsd_host(struct mmc_davinci_host *host)
5054     +static void init_mmcsd_host(struct mmc_davinci_host *host)
5055     {
5056    
5057     mmc_davinci_reset_ctrl(host, 1);
5058     diff --git a/drivers/net/ethernet/stmicro/stmmac/ring_mode.c b/drivers/net/ethernet/stmicro/stmmac/ring_mode.c
5059     index afed0f0f4027..c0c75c111abb 100644
5060     --- a/drivers/net/ethernet/stmicro/stmmac/ring_mode.c
5061     +++ b/drivers/net/ethernet/stmicro/stmmac/ring_mode.c
5062     @@ -59,7 +59,7 @@ static int jumbo_frm(void *p, struct sk_buff *skb, int csum)
5063    
5064     desc->des3 = cpu_to_le32(des2 + BUF_SIZE_4KiB);
5065     stmmac_prepare_tx_desc(priv, desc, 1, bmax, csum,
5066     - STMMAC_RING_MODE, 0, false, skb->len);
5067     + STMMAC_RING_MODE, 1, false, skb->len);
5068     tx_q->tx_skbuff[entry] = NULL;
5069     entry = STMMAC_GET_ENTRY(entry, DMA_TX_SIZE);
5070    
5071     @@ -91,7 +91,7 @@ static int jumbo_frm(void *p, struct sk_buff *skb, int csum)
5072     tx_q->tx_skbuff_dma[entry].is_jumbo = true;
5073     desc->des3 = cpu_to_le32(des2 + BUF_SIZE_4KiB);
5074     stmmac_prepare_tx_desc(priv, desc, 1, nopaged_len, csum,
5075     - STMMAC_RING_MODE, 0, true, skb->len);
5076     + STMMAC_RING_MODE, 1, true, skb->len);
5077     }
5078    
5079     tx_q->cur_tx = entry;
5080     diff --git a/drivers/net/wireless/rsi/rsi_common.h b/drivers/net/wireless/rsi/rsi_common.h
5081     index d9ff3b8be86e..60f1f286b030 100644
5082     --- a/drivers/net/wireless/rsi/rsi_common.h
5083     +++ b/drivers/net/wireless/rsi/rsi_common.h
5084     @@ -75,7 +75,6 @@ static inline int rsi_kill_thread(struct rsi_thread *handle)
5085     atomic_inc(&handle->thread_done);
5086     rsi_set_event(&handle->event);
5087    
5088     - wait_for_completion(&handle->completion);
5089     return kthread_stop(handle->task);
5090     }
5091    
5092     diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c
5093     index 7eb1549cea81..30649addc625 100644
5094     --- a/drivers/pci/pci.c
5095     +++ b/drivers/pci/pci.c
5096     @@ -2489,6 +2489,25 @@ void pci_config_pm_runtime_put(struct pci_dev *pdev)
5097     pm_runtime_put_sync(parent);
5098     }
5099    
5100     +static const struct dmi_system_id bridge_d3_blacklist[] = {
5101     +#ifdef CONFIG_X86
5102     + {
5103     + /*
5104     + * Gigabyte X299 root port is not marked as hotplug capable
5105     + * which allows Linux to power manage it. However, this
5106     + * confuses the BIOS SMI handler so don't power manage root
5107     + * ports on that system.
5108     + */
5109     + .ident = "X299 DESIGNARE EX-CF",
5110     + .matches = {
5111     + DMI_MATCH(DMI_BOARD_VENDOR, "Gigabyte Technology Co., Ltd."),
5112     + DMI_MATCH(DMI_BOARD_NAME, "X299 DESIGNARE EX-CF"),
5113     + },
5114     + },
5115     +#endif
5116     + { }
5117     +};
5118     +
5119     /**
5120     * pci_bridge_d3_possible - Is it possible to put the bridge into D3
5121     * @bridge: Bridge to check
5122     @@ -2530,6 +2549,9 @@ bool pci_bridge_d3_possible(struct pci_dev *bridge)
5123     if (bridge->is_hotplug_bridge)
5124     return false;
5125    
5126     + if (dmi_check_system(bridge_d3_blacklist))
5127     + return false;
5128     +
5129     /*
5130     * It should be safe to put PCIe ports from 2015 or newer
5131     * to D3.
5132     diff --git a/drivers/pinctrl/core.c b/drivers/pinctrl/core.c
5133     index a3dd777e3ce8..c6ff4d5fa482 100644
5134     --- a/drivers/pinctrl/core.c
5135     +++ b/drivers/pinctrl/core.c
5136     @@ -627,7 +627,7 @@ static int pinctrl_generic_group_name_to_selector(struct pinctrl_dev *pctldev,
5137     while (selector < ngroups) {
5138     const char *gname = ops->get_group_name(pctldev, selector);
5139    
5140     - if (!strcmp(function, gname))
5141     + if (gname && !strcmp(function, gname))
5142     return selector;
5143    
5144     selector++;
5145     @@ -743,7 +743,7 @@ int pinctrl_get_group_selector(struct pinctrl_dev *pctldev,
5146     while (group_selector < ngroups) {
5147     const char *gname = pctlops->get_group_name(pctldev,
5148     group_selector);
5149     - if (!strcmp(gname, pin_group)) {
5150     + if (gname && !strcmp(gname, pin_group)) {
5151     dev_dbg(pctldev->dev,
5152     "found group selector %u for %s\n",
5153     group_selector,
5154     diff --git a/drivers/platform/x86/Kconfig b/drivers/platform/x86/Kconfig
5155     index 7563c07e14e4..1e2524de6a63 100644
5156     --- a/drivers/platform/x86/Kconfig
5157     +++ b/drivers/platform/x86/Kconfig
5158     @@ -1231,6 +1231,18 @@ config I2C_MULTI_INSTANTIATE
5159     To compile this driver as a module, choose M here: the module
5160     will be called i2c-multi-instantiate.
5161    
5162     +config INTEL_ATOMISP2_PM
5163     + tristate "Intel AtomISP2 dummy / power-management driver"
5164     + depends on PCI && IOSF_MBI && PM
5165     + help
5166     + Power-management driver for Intel's Image Signal Processor found on
5167     + Bay and Cherry Trail devices. This dummy driver's sole purpose is to
5168     + turn the ISP off (put it in D3) to save power and to allow entering
5169     + of S0ix modes.
5170     +
5171     + To compile this driver as a module, choose M here: the module
5172     + will be called intel_atomisp2_pm.
5173     +
5174     endif # X86_PLATFORM_DEVICES
5175    
5176     config PMC_ATOM
5177     diff --git a/drivers/platform/x86/Makefile b/drivers/platform/x86/Makefile
5178     index e6d1becf81ce..dc29af4d8e2f 100644
5179     --- a/drivers/platform/x86/Makefile
5180     +++ b/drivers/platform/x86/Makefile
5181     @@ -92,3 +92,4 @@ obj-$(CONFIG_MLX_PLATFORM) += mlx-platform.o
5182     obj-$(CONFIG_INTEL_TURBO_MAX_3) += intel_turbo_max_3.o
5183     obj-$(CONFIG_INTEL_CHTDC_TI_PWRBTN) += intel_chtdc_ti_pwrbtn.o
5184     obj-$(CONFIG_I2C_MULTI_INSTANTIATE) += i2c-multi-instantiate.o
5185     +obj-$(CONFIG_INTEL_ATOMISP2_PM) += intel_atomisp2_pm.o
5186     diff --git a/drivers/platform/x86/intel_atomisp2_pm.c b/drivers/platform/x86/intel_atomisp2_pm.c
5187     new file mode 100644
5188     index 000000000000..9371603a0ac9
5189     --- /dev/null
5190     +++ b/drivers/platform/x86/intel_atomisp2_pm.c
5191     @@ -0,0 +1,119 @@
5192     +// SPDX-License-Identifier: GPL-2.0
5193     +/*
5194     + * Dummy driver for Intel's Image Signal Processor found on Bay and Cherry
5195     + * Trail devices. The sole purpose of this driver is to allow the ISP to
5196     + * be put in D3.
5197     + *
5198     + * Copyright (C) 2018 Hans de Goede <hdegoede@redhat.com>
5199     + *
5200     + * Based on various non upstream patches for ISP support:
5201     + * Copyright (C) 2010-2017 Intel Corporation. All rights reserved.
5202     + * Copyright (c) 2010 Silicon Hive www.siliconhive.com.
5203     + */
5204     +
5205     +#include <linux/delay.h>
5206     +#include <linux/module.h>
5207     +#include <linux/mod_devicetable.h>
5208     +#include <linux/pci.h>
5209     +#include <linux/pm_runtime.h>
5210     +#include <asm/iosf_mbi.h>
5211     +
5212     +/* PCI configuration regs */
5213     +#define PCI_INTERRUPT_CTRL 0x9c
5214     +
5215     +#define PCI_CSI_CONTROL 0xe8
5216     +#define PCI_CSI_CONTROL_PORTS_OFF_MASK 0x7
5217     +
5218     +/* IOSF BT_MBI_UNIT_PMC regs */
5219     +#define ISPSSPM0 0x39
5220     +#define ISPSSPM0_ISPSSC_OFFSET 0
5221     +#define ISPSSPM0_ISPSSC_MASK 0x00000003
5222     +#define ISPSSPM0_ISPSSS_OFFSET 24
5223     +#define ISPSSPM0_ISPSSS_MASK 0x03000000
5224     +#define ISPSSPM0_IUNIT_POWER_ON 0x0
5225     +#define ISPSSPM0_IUNIT_POWER_OFF 0x3
5226     +
5227     +static int isp_probe(struct pci_dev *dev, const struct pci_device_id *id)
5228     +{
5229     + unsigned long timeout;
5230     + u32 val;
5231     +
5232     + pci_write_config_dword(dev, PCI_INTERRUPT_CTRL, 0);
5233     +
5234     + /*
5235     + * MRFLD IUNIT DPHY is located in an always-power-on island
5236     + * MRFLD HW design need all CSI ports are disabled before
5237     + * powering down the IUNIT.
5238     + */
5239     + pci_read_config_dword(dev, PCI_CSI_CONTROL, &val);
5240     + val |= PCI_CSI_CONTROL_PORTS_OFF_MASK;
5241     + pci_write_config_dword(dev, PCI_CSI_CONTROL, val);
5242     +
5243     + /* Write 0x3 to ISPSSPM0 bit[1:0] to power off the IUNIT */
5244     + iosf_mbi_modify(BT_MBI_UNIT_PMC, MBI_REG_READ, ISPSSPM0,
5245     + ISPSSPM0_IUNIT_POWER_OFF, ISPSSPM0_ISPSSC_MASK);
5246     +
5247     + /*
5248     + * There should be no IUNIT access while power-down is
5249     + * in progress HW sighting: 4567865
5250     + * Wait up to 50 ms for the IUNIT to shut down.
5251     + */
5252     + timeout = jiffies + msecs_to_jiffies(50);
5253     + while (1) {
5254     + /* Wait until ISPSSPM0 bit[25:24] shows 0x3 */
5255     + iosf_mbi_read(BT_MBI_UNIT_PMC, MBI_REG_READ, ISPSSPM0, &val);
5256     + val = (val & ISPSSPM0_ISPSSS_MASK) >> ISPSSPM0_ISPSSS_OFFSET;
5257     + if (val == ISPSSPM0_IUNIT_POWER_OFF)
5258     + break;
5259     +
5260     + if (time_after(jiffies, timeout)) {
5261     + dev_err(&dev->dev, "IUNIT power-off timeout.\n");
5262     + return -EBUSY;
5263     + }
5264     + usleep_range(1000, 2000);
5265     + }
5266     +
5267     + pm_runtime_allow(&dev->dev);
5268     + pm_runtime_put_sync_suspend(&dev->dev);
5269     +
5270     + return 0;
5271     +}
5272     +
5273     +static void isp_remove(struct pci_dev *dev)
5274     +{
5275     + pm_runtime_get_sync(&dev->dev);
5276     + pm_runtime_forbid(&dev->dev);
5277     +}
5278     +
5279     +static int isp_pci_suspend(struct device *dev)
5280     +{
5281     + return 0;
5282     +}
5283     +
5284     +static int isp_pci_resume(struct device *dev)
5285     +{
5286     + return 0;
5287     +}
5288     +
5289     +static UNIVERSAL_DEV_PM_OPS(isp_pm_ops, isp_pci_suspend,
5290     + isp_pci_resume, NULL);
5291     +
5292     +static const struct pci_device_id isp_id_table[] = {
5293     + { PCI_VDEVICE(INTEL, 0x22b8), },
5294     + { 0, }
5295     +};
5296     +MODULE_DEVICE_TABLE(pci, isp_id_table);
5297     +
5298     +static struct pci_driver isp_pci_driver = {
5299     + .name = "intel_atomisp2_pm",
5300     + .id_table = isp_id_table,
5301     + .probe = isp_probe,
5302     + .remove = isp_remove,
5303     + .driver.pm = &isp_pm_ops,
5304     +};
5305     +
5306     +module_pci_driver(isp_pci_driver);
5307     +
5308     +MODULE_DESCRIPTION("Intel AtomISP2 dummy / power-management drv (for suspend)");
5309     +MODULE_AUTHOR("Hans de Goede <hdegoede@redhat.com>");
5310     +MODULE_LICENSE("GPL v2");
5311     diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
5312     index 18e4289baf99..655790f30434 100644
5313     --- a/drivers/scsi/scsi_lib.c
5314     +++ b/drivers/scsi/scsi_lib.c
5315     @@ -3095,7 +3095,6 @@ void scsi_device_resume(struct scsi_device *sdev)
5316     * device deleted during suspend)
5317     */
5318     mutex_lock(&sdev->state_mutex);
5319     - WARN_ON_ONCE(!sdev->quiesced_by);
5320     sdev->quiesced_by = NULL;
5321     blk_clear_preempt_only(sdev->request_queue);
5322     if (sdev->sdev_state == SDEV_QUIESCE)
5323     diff --git a/drivers/scsi/scsi_transport_iscsi.c b/drivers/scsi/scsi_transport_iscsi.c
5324     index 6fd2fe210fc3..4d0fc6b01fa0 100644
5325     --- a/drivers/scsi/scsi_transport_iscsi.c
5326     +++ b/drivers/scsi/scsi_transport_iscsi.c
5327     @@ -2185,6 +2185,8 @@ void iscsi_remove_session(struct iscsi_cls_session *session)
5328     scsi_target_unblock(&session->dev, SDEV_TRANSPORT_OFFLINE);
5329     /* flush running scans then delete devices */
5330     flush_work(&session->scan_work);
5331     + /* flush running unbind operations */
5332     + flush_work(&session->unbind_work);
5333     __iscsi_unbind_session(&session->unbind_work);
5334    
5335     /* hw iscsi may not have removed all connections from session */
5336     diff --git a/drivers/soc/tegra/pmc.c b/drivers/soc/tegra/pmc.c
5337     index ed71a4c9c8b2..4b452f36f054 100644
5338     --- a/drivers/soc/tegra/pmc.c
5339     +++ b/drivers/soc/tegra/pmc.c
5340     @@ -524,16 +524,10 @@ EXPORT_SYMBOL(tegra_powergate_power_off);
5341     */
5342     int tegra_powergate_is_powered(unsigned int id)
5343     {
5344     - int status;
5345     -
5346     if (!tegra_powergate_is_valid(id))
5347     return -EINVAL;
5348    
5349     - mutex_lock(&pmc->powergates_lock);
5350     - status = tegra_powergate_state(id);
5351     - mutex_unlock(&pmc->powergates_lock);
5352     -
5353     - return status;
5354     + return tegra_powergate_state(id);
5355     }
5356    
5357     /**
5358     diff --git a/drivers/thermal/broadcom/bcm2835_thermal.c b/drivers/thermal/broadcom/bcm2835_thermal.c
5359     index 24b006a95142..8646fb7425f2 100644
5360     --- a/drivers/thermal/broadcom/bcm2835_thermal.c
5361     +++ b/drivers/thermal/broadcom/bcm2835_thermal.c
5362     @@ -128,8 +128,7 @@ static const struct debugfs_reg32 bcm2835_thermal_regs[] = {
5363    
5364     static void bcm2835_thermal_debugfs(struct platform_device *pdev)
5365     {
5366     - struct thermal_zone_device *tz = platform_get_drvdata(pdev);
5367     - struct bcm2835_thermal_data *data = tz->devdata;
5368     + struct bcm2835_thermal_data *data = platform_get_drvdata(pdev);
5369     struct debugfs_regset32 *regset;
5370    
5371     data->debugfsdir = debugfs_create_dir("bcm2835_thermal", NULL);
5372     @@ -275,7 +274,7 @@ static int bcm2835_thermal_probe(struct platform_device *pdev)
5373    
5374     data->tz = tz;
5375    
5376     - platform_set_drvdata(pdev, tz);
5377     + platform_set_drvdata(pdev, data);
5378    
5379     /*
5380     * Thermal_zone doesn't enable hwmon as default,
5381     @@ -299,8 +298,8 @@ err_clk:
5382    
5383     static int bcm2835_thermal_remove(struct platform_device *pdev)
5384     {
5385     - struct thermal_zone_device *tz = platform_get_drvdata(pdev);
5386     - struct bcm2835_thermal_data *data = tz->devdata;
5387     + struct bcm2835_thermal_data *data = platform_get_drvdata(pdev);
5388     + struct thermal_zone_device *tz = data->tz;
5389    
5390     debugfs_remove_recursive(data->debugfsdir);
5391     thermal_zone_of_sensor_unregister(&pdev->dev, tz);
5392     diff --git a/drivers/thermal/int340x_thermal/int3400_thermal.c b/drivers/thermal/int340x_thermal/int3400_thermal.c
5393     index e26b01c05e82..e9d58de8b5da 100644
5394     --- a/drivers/thermal/int340x_thermal/int3400_thermal.c
5395     +++ b/drivers/thermal/int340x_thermal/int3400_thermal.c
5396     @@ -22,6 +22,13 @@ enum int3400_thermal_uuid {
5397     INT3400_THERMAL_PASSIVE_1,
5398     INT3400_THERMAL_ACTIVE,
5399     INT3400_THERMAL_CRITICAL,
5400     + INT3400_THERMAL_ADAPTIVE_PERFORMANCE,
5401     + INT3400_THERMAL_EMERGENCY_CALL_MODE,
5402     + INT3400_THERMAL_PASSIVE_2,
5403     + INT3400_THERMAL_POWER_BOSS,
5404     + INT3400_THERMAL_VIRTUAL_SENSOR,
5405     + INT3400_THERMAL_COOLING_MODE,
5406     + INT3400_THERMAL_HARDWARE_DUTY_CYCLING,
5407     INT3400_THERMAL_MAXIMUM_UUID,
5408     };
5409    
5410     @@ -29,6 +36,13 @@ static char *int3400_thermal_uuids[INT3400_THERMAL_MAXIMUM_UUID] = {
5411     "42A441D6-AE6A-462b-A84B-4A8CE79027D3",
5412     "3A95C389-E4B8-4629-A526-C52C88626BAE",
5413     "97C68AE7-15FA-499c-B8C9-5DA81D606E0A",
5414     + "63BE270F-1C11-48FD-A6F7-3AF253FF3E2D",
5415     + "5349962F-71E6-431D-9AE8-0A635B710AEE",
5416     + "9E04115A-AE87-4D1C-9500-0F3E340BFE75",
5417     + "F5A35014-C209-46A4-993A-EB56DE7530A1",
5418     + "6ED722A7-9240-48A5-B479-31EEF723D7CF",
5419     + "16CAF1B7-DD38-40ED-B1C1-1B8A1913D531",
5420     + "BE84BABF-C4D4-403D-B495-3128FD44dAC1",
5421     };
5422    
5423     struct int3400_thermal_priv {
5424     @@ -302,10 +316,9 @@ static int int3400_thermal_probe(struct platform_device *pdev)
5425    
5426     platform_set_drvdata(pdev, priv);
5427    
5428     - if (priv->uuid_bitmap & 1 << INT3400_THERMAL_PASSIVE_1) {
5429     - int3400_thermal_ops.get_mode = int3400_thermal_get_mode;
5430     - int3400_thermal_ops.set_mode = int3400_thermal_set_mode;
5431     - }
5432     + int3400_thermal_ops.get_mode = int3400_thermal_get_mode;
5433     + int3400_thermal_ops.set_mode = int3400_thermal_set_mode;
5434     +
5435     priv->thermal = thermal_zone_device_register("INT3400 Thermal", 0, 0,
5436     priv, &int3400_thermal_ops,
5437     &int3400_thermal_params, 0, 0);
5438     diff --git a/drivers/thermal/intel_powerclamp.c b/drivers/thermal/intel_powerclamp.c
5439     index cde891c54cde..8e8328347c0e 100644
5440     --- a/drivers/thermal/intel_powerclamp.c
5441     +++ b/drivers/thermal/intel_powerclamp.c
5442     @@ -101,7 +101,7 @@ struct powerclamp_worker_data {
5443     bool clamping;
5444     };
5445    
5446     -static struct powerclamp_worker_data * __percpu worker_data;
5447     +static struct powerclamp_worker_data __percpu *worker_data;
5448     static struct thermal_cooling_device *cooling_dev;
5449     static unsigned long *cpu_clamping_mask; /* bit map for tracking per cpu
5450     * clamping kthread worker
5451     @@ -494,7 +494,7 @@ static void start_power_clamp_worker(unsigned long cpu)
5452     struct powerclamp_worker_data *w_data = per_cpu_ptr(worker_data, cpu);
5453     struct kthread_worker *worker;
5454    
5455     - worker = kthread_create_worker_on_cpu(cpu, 0, "kidle_inject/%ld", cpu);
5456     + worker = kthread_create_worker_on_cpu(cpu, 0, "kidle_inj/%ld", cpu);
5457     if (IS_ERR(worker))
5458     return;
5459    
5460     diff --git a/drivers/thermal/samsung/exynos_tmu.c b/drivers/thermal/samsung/exynos_tmu.c
5461     index 48eef552cba4..fc9399d9c082 100644
5462     --- a/drivers/thermal/samsung/exynos_tmu.c
5463     +++ b/drivers/thermal/samsung/exynos_tmu.c
5464     @@ -666,7 +666,7 @@ static int exynos_get_temp(void *p, int *temp)
5465     struct exynos_tmu_data *data = p;
5466     int value, ret = 0;
5467    
5468     - if (!data || !data->tmu_read || !data->enabled)
5469     + if (!data || !data->tmu_read)
5470     return -EINVAL;
5471     else if (!data->enabled)
5472     /*
5473     diff --git a/drivers/tty/serial/xilinx_uartps.c b/drivers/tty/serial/xilinx_uartps.c
5474     index 0e3627289047..77efa0a43fe7 100644
5475     --- a/drivers/tty/serial/xilinx_uartps.c
5476     +++ b/drivers/tty/serial/xilinx_uartps.c
5477     @@ -1223,7 +1223,7 @@ static void cdns_uart_console_write(struct console *co, const char *s,
5478     *
5479     * Return: 0 on success, negative errno otherwise.
5480     */
5481     -static int __init cdns_uart_console_setup(struct console *co, char *options)
5482     +static int cdns_uart_console_setup(struct console *co, char *options)
5483     {
5484     struct uart_port *port = console_port;
5485    
5486     diff --git a/fs/9p/v9fs.c b/fs/9p/v9fs.c
5487     index 89bac3d2f05b..619128b55837 100644
5488     --- a/fs/9p/v9fs.c
5489     +++ b/fs/9p/v9fs.c
5490     @@ -61,6 +61,8 @@ enum {
5491     Opt_cache_loose, Opt_fscache, Opt_mmap,
5492     /* Access options */
5493     Opt_access, Opt_posixacl,
5494     + /* Lock timeout option */
5495     + Opt_locktimeout,
5496     /* Error token */
5497     Opt_err
5498     };
5499     @@ -80,6 +82,7 @@ static const match_table_t tokens = {
5500     {Opt_cachetag, "cachetag=%s"},
5501     {Opt_access, "access=%s"},
5502     {Opt_posixacl, "posixacl"},
5503     + {Opt_locktimeout, "locktimeout=%u"},
5504     {Opt_err, NULL}
5505     };
5506    
5507     @@ -187,6 +190,7 @@ static int v9fs_parse_options(struct v9fs_session_info *v9ses, char *opts)
5508     #ifdef CONFIG_9P_FSCACHE
5509     v9ses->cachetag = NULL;
5510     #endif
5511     + v9ses->session_lock_timeout = P9_LOCK_TIMEOUT;
5512    
5513     if (!opts)
5514     return 0;
5515     @@ -359,6 +363,23 @@ static int v9fs_parse_options(struct v9fs_session_info *v9ses, char *opts)
5516     #endif
5517     break;
5518    
5519     + case Opt_locktimeout:
5520     + r = match_int(&args[0], &option);
5521     + if (r < 0) {
5522     + p9_debug(P9_DEBUG_ERROR,
5523     + "integer field, but no integer?\n");
5524     + ret = r;
5525     + continue;
5526     + }
5527     + if (option < 1) {
5528     + p9_debug(P9_DEBUG_ERROR,
5529     + "locktimeout must be a greater than zero integer.\n");
5530     + ret = -EINVAL;
5531     + continue;
5532     + }
5533     + v9ses->session_lock_timeout = (long)option * HZ;
5534     + break;
5535     +
5536     default:
5537     continue;
5538     }
5539     diff --git a/fs/9p/v9fs.h b/fs/9p/v9fs.h
5540     index 982e017acadb..129e5243a6bf 100644
5541     --- a/fs/9p/v9fs.h
5542     +++ b/fs/9p/v9fs.h
5543     @@ -116,6 +116,7 @@ struct v9fs_session_info {
5544     struct p9_client *clnt; /* 9p client */
5545     struct list_head slist; /* list of sessions registered with v9fs */
5546     struct rw_semaphore rename_sem;
5547     + long session_lock_timeout; /* retry interval for blocking locks */
5548     };
5549    
5550     /* cache_validity flags */
5551     diff --git a/fs/9p/vfs_dir.c b/fs/9p/vfs_dir.c
5552     index 48db9a9f13f9..cb6c4031af55 100644
5553     --- a/fs/9p/vfs_dir.c
5554     +++ b/fs/9p/vfs_dir.c
5555     @@ -105,7 +105,6 @@ static int v9fs_dir_readdir(struct file *file, struct dir_context *ctx)
5556     int err = 0;
5557     struct p9_fid *fid;
5558     int buflen;
5559     - int reclen = 0;
5560     struct p9_rdir *rdir;
5561     struct kvec kvec;
5562    
5563     @@ -138,11 +137,10 @@ static int v9fs_dir_readdir(struct file *file, struct dir_context *ctx)
5564     while (rdir->head < rdir->tail) {
5565     err = p9stat_read(fid->clnt, rdir->buf + rdir->head,
5566     rdir->tail - rdir->head, &st);
5567     - if (err) {
5568     + if (err <= 0) {
5569     p9_debug(P9_DEBUG_VFS, "returned %d\n", err);
5570     return -EIO;
5571     }
5572     - reclen = st.size+2;
5573    
5574     over = !dir_emit(ctx, st.name, strlen(st.name),
5575     v9fs_qid2ino(&st.qid), dt_type(&st));
5576     @@ -150,8 +148,8 @@ static int v9fs_dir_readdir(struct file *file, struct dir_context *ctx)
5577     if (over)
5578     return 0;
5579    
5580     - rdir->head += reclen;
5581     - ctx->pos += reclen;
5582     + rdir->head += err;
5583     + ctx->pos += err;
5584     }
5585     }
5586     }
5587     diff --git a/fs/9p/vfs_file.c b/fs/9p/vfs_file.c
5588     index c87e6d6ec069..05454a7e22dc 100644
5589     --- a/fs/9p/vfs_file.c
5590     +++ b/fs/9p/vfs_file.c
5591     @@ -154,6 +154,7 @@ static int v9fs_file_do_lock(struct file *filp, int cmd, struct file_lock *fl)
5592     uint8_t status = P9_LOCK_ERROR;
5593     int res = 0;
5594     unsigned char fl_type;
5595     + struct v9fs_session_info *v9ses;
5596    
5597     fid = filp->private_data;
5598     BUG_ON(fid == NULL);
5599     @@ -189,6 +190,8 @@ static int v9fs_file_do_lock(struct file *filp, int cmd, struct file_lock *fl)
5600     if (IS_SETLKW(cmd))
5601     flock.flags = P9_LOCK_FLAGS_BLOCK;
5602    
5603     + v9ses = v9fs_inode2v9ses(file_inode(filp));
5604     +
5605     /*
5606     * if its a blocked request and we get P9_LOCK_BLOCKED as the status
5607     * for lock request, keep on trying
5608     @@ -202,7 +205,8 @@ static int v9fs_file_do_lock(struct file *filp, int cmd, struct file_lock *fl)
5609     break;
5610     if (status == P9_LOCK_BLOCKED && !IS_SETLKW(cmd))
5611     break;
5612     - if (schedule_timeout_interruptible(P9_LOCK_TIMEOUT) != 0)
5613     + if (schedule_timeout_interruptible(v9ses->session_lock_timeout)
5614     + != 0)
5615     break;
5616     /*
5617     * p9_client_lock_dotl overwrites flock.client_id with the
5618     diff --git a/fs/cifs/inode.c b/fs/cifs/inode.c
5619     index 020f49c15b30..b59ebed4f615 100644
5620     --- a/fs/cifs/inode.c
5621     +++ b/fs/cifs/inode.c
5622     @@ -780,43 +780,50 @@ cifs_get_inode_info(struct inode **inode, const char *full_path,
5623     } else if ((rc == -EACCES) && backup_cred(cifs_sb) &&
5624     (strcmp(server->vals->version_string, SMB1_VERSION_STRING)
5625     == 0)) {
5626     - /*
5627     - * For SMB2 and later the backup intent flag is already
5628     - * sent if needed on open and there is no path based
5629     - * FindFirst operation to use to retry with
5630     - */
5631     + /*
5632     + * For SMB2 and later the backup intent flag is already
5633     + * sent if needed on open and there is no path based
5634     + * FindFirst operation to use to retry with
5635     + */
5636    
5637     - srchinf = kzalloc(sizeof(struct cifs_search_info),
5638     - GFP_KERNEL);
5639     - if (srchinf == NULL) {
5640     - rc = -ENOMEM;
5641     - goto cgii_exit;
5642     - }
5643     + srchinf = kzalloc(sizeof(struct cifs_search_info),
5644     + GFP_KERNEL);
5645     + if (srchinf == NULL) {
5646     + rc = -ENOMEM;
5647     + goto cgii_exit;
5648     + }
5649    
5650     - srchinf->endOfSearch = false;
5651     + srchinf->endOfSearch = false;
5652     + if (tcon->unix_ext)
5653     + srchinf->info_level = SMB_FIND_FILE_UNIX;
5654     + else if ((tcon->ses->capabilities &
5655     + tcon->ses->server->vals->cap_nt_find) == 0)
5656     + srchinf->info_level = SMB_FIND_FILE_INFO_STANDARD;
5657     + else if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_SERVER_INUM)
5658     srchinf->info_level = SMB_FIND_FILE_ID_FULL_DIR_INFO;
5659     + else /* no srvino useful for fallback to some netapp */
5660     + srchinf->info_level = SMB_FIND_FILE_DIRECTORY_INFO;
5661    
5662     - srchflgs = CIFS_SEARCH_CLOSE_ALWAYS |
5663     - CIFS_SEARCH_CLOSE_AT_END |
5664     - CIFS_SEARCH_BACKUP_SEARCH;
5665     + srchflgs = CIFS_SEARCH_CLOSE_ALWAYS |
5666     + CIFS_SEARCH_CLOSE_AT_END |
5667     + CIFS_SEARCH_BACKUP_SEARCH;
5668    
5669     - rc = CIFSFindFirst(xid, tcon, full_path,
5670     - cifs_sb, NULL, srchflgs, srchinf, false);
5671     - if (!rc) {
5672     - data =
5673     - (FILE_ALL_INFO *)srchinf->srch_entries_start;
5674     + rc = CIFSFindFirst(xid, tcon, full_path,
5675     + cifs_sb, NULL, srchflgs, srchinf, false);
5676     + if (!rc) {
5677     + data = (FILE_ALL_INFO *)srchinf->srch_entries_start;
5678    
5679     - cifs_dir_info_to_fattr(&fattr,
5680     - (FILE_DIRECTORY_INFO *)data, cifs_sb);
5681     - fattr.cf_uniqueid = le64_to_cpu(
5682     - ((SEARCH_ID_FULL_DIR_INFO *)data)->UniqueId);
5683     - validinum = true;
5684     + cifs_dir_info_to_fattr(&fattr,
5685     + (FILE_DIRECTORY_INFO *)data, cifs_sb);
5686     + fattr.cf_uniqueid = le64_to_cpu(
5687     + ((SEARCH_ID_FULL_DIR_INFO *)data)->UniqueId);
5688     + validinum = true;
5689    
5690     - cifs_buf_release(srchinf->ntwrk_buf_start);
5691     - }
5692     - kfree(srchinf);
5693     - if (rc)
5694     - goto cgii_exit;
5695     + cifs_buf_release(srchinf->ntwrk_buf_start);
5696     + }
5697     + kfree(srchinf);
5698     + if (rc)
5699     + goto cgii_exit;
5700     } else
5701     goto cgii_exit;
5702    
5703     diff --git a/fs/cifs/smb2maperror.c b/fs/cifs/smb2maperror.c
5704     index c3ae8c1d6089..18814f1d67d9 100644
5705     --- a/fs/cifs/smb2maperror.c
5706     +++ b/fs/cifs/smb2maperror.c
5707     @@ -1036,7 +1036,8 @@ static const struct status_to_posix_error smb2_error_map_table[] = {
5708     {STATUS_UNFINISHED_CONTEXT_DELETED, -EIO,
5709     "STATUS_UNFINISHED_CONTEXT_DELETED"},
5710     {STATUS_NO_TGT_REPLY, -EIO, "STATUS_NO_TGT_REPLY"},
5711     - {STATUS_OBJECTID_NOT_FOUND, -EIO, "STATUS_OBJECTID_NOT_FOUND"},
5712     + /* Note that ENOATTTR and ENODATA are the same errno */
5713     + {STATUS_OBJECTID_NOT_FOUND, -ENODATA, "STATUS_OBJECTID_NOT_FOUND"},
5714     {STATUS_NO_IP_ADDRESSES, -EIO, "STATUS_NO_IP_ADDRESSES"},
5715     {STATUS_WRONG_CREDENTIAL_HANDLE, -EIO,
5716     "STATUS_WRONG_CREDENTIAL_HANDLE"},
5717     diff --git a/fs/ext4/ioctl.c b/fs/ext4/ioctl.c
5718     index 2e76fb55d94a..5f24fdc140ad 100644
5719     --- a/fs/ext4/ioctl.c
5720     +++ b/fs/ext4/ioctl.c
5721     @@ -999,6 +999,13 @@ resizefs_out:
5722     if (!blk_queue_discard(q))
5723     return -EOPNOTSUPP;
5724    
5725     + /*
5726     + * We haven't replayed the journal, so we cannot use our
5727     + * block-bitmap-guided storage zapping commands.
5728     + */
5729     + if (test_opt(sb, NOLOAD) && ext4_has_feature_journal(sb))
5730     + return -EROFS;
5731     +
5732     if (copy_from_user(&range, (struct fstrim_range __user *)arg,
5733     sizeof(range)))
5734     return -EFAULT;
5735     diff --git a/fs/ext4/resize.c b/fs/ext4/resize.c
5736     index 3d9b18505c0c..e7ae26e36c9c 100644
5737     --- a/fs/ext4/resize.c
5738     +++ b/fs/ext4/resize.c
5739     @@ -932,11 +932,18 @@ static int add_new_gdb_meta_bg(struct super_block *sb,
5740     memcpy(n_group_desc, o_group_desc,
5741     EXT4_SB(sb)->s_gdb_count * sizeof(struct buffer_head *));
5742     n_group_desc[gdb_num] = gdb_bh;
5743     +
5744     + BUFFER_TRACE(gdb_bh, "get_write_access");
5745     + err = ext4_journal_get_write_access(handle, gdb_bh);
5746     + if (err) {
5747     + kvfree(n_group_desc);
5748     + brelse(gdb_bh);
5749     + return err;
5750     + }
5751     +
5752     EXT4_SB(sb)->s_group_desc = n_group_desc;
5753     EXT4_SB(sb)->s_gdb_count++;
5754     kvfree(o_group_desc);
5755     - BUFFER_TRACE(gdb_bh, "get_write_access");
5756     - err = ext4_journal_get_write_access(handle, gdb_bh);
5757     return err;
5758     }
5759    
5760     @@ -2073,6 +2080,10 @@ out:
5761     free_flex_gd(flex_gd);
5762     if (resize_inode != NULL)
5763     iput(resize_inode);
5764     - ext4_msg(sb, KERN_INFO, "resized filesystem to %llu", n_blocks_count);
5765     + if (err)
5766     + ext4_warning(sb, "error (%d) occurred during "
5767     + "file system resize", err);
5768     + ext4_msg(sb, KERN_INFO, "resized filesystem to %llu",
5769     + ext4_blocks_count(es));
5770     return err;
5771     }
5772     diff --git a/fs/ext4/super.c b/fs/ext4/super.c
5773     index a1cf7d68b4f0..abba7ece78e9 100644
5774     --- a/fs/ext4/super.c
5775     +++ b/fs/ext4/super.c
5776     @@ -430,6 +430,12 @@ static void ext4_journal_commit_callback(journal_t *journal, transaction_t *txn)
5777     spin_unlock(&sbi->s_md_lock);
5778     }
5779    
5780     +static bool system_going_down(void)
5781     +{
5782     + return system_state == SYSTEM_HALT || system_state == SYSTEM_POWER_OFF
5783     + || system_state == SYSTEM_RESTART;
5784     +}
5785     +
5786     /* Deal with the reporting of failure conditions on a filesystem such as
5787     * inconsistencies detected or read IO failures.
5788     *
5789     @@ -460,7 +466,12 @@ static void ext4_handle_error(struct super_block *sb)
5790     if (journal)
5791     jbd2_journal_abort(journal, -EIO);
5792     }
5793     - if (test_opt(sb, ERRORS_RO)) {
5794     + /*
5795     + * We force ERRORS_RO behavior when system is rebooting. Otherwise we
5796     + * could panic during 'reboot -f' as the underlying device got already
5797     + * disabled.
5798     + */
5799     + if (test_opt(sb, ERRORS_RO) || system_going_down()) {
5800     ext4_msg(sb, KERN_CRIT, "Remounting filesystem read-only");
5801     /*
5802     * Make sure updated value of ->s_mount_flags will be visible
5803     @@ -468,8 +479,7 @@ static void ext4_handle_error(struct super_block *sb)
5804     */
5805     smp_wmb();
5806     sb->s_flags |= SB_RDONLY;
5807     - }
5808     - if (test_opt(sb, ERRORS_PANIC)) {
5809     + } else if (test_opt(sb, ERRORS_PANIC)) {
5810     if (EXT4_SB(sb)->s_journal &&
5811     !(EXT4_SB(sb)->s_journal->j_flags & JBD2_REC_ERR))
5812     return;
5813     diff --git a/fs/f2fs/debug.c b/fs/f2fs/debug.c
5814     index 214a968962a1..ebe649d9793c 100644
5815     --- a/fs/f2fs/debug.c
5816     +++ b/fs/f2fs/debug.c
5817     @@ -190,8 +190,7 @@ static void update_mem_info(struct f2fs_sb_info *sbi)
5818     si->base_mem += MAIN_SEGS(sbi) * sizeof(struct seg_entry);
5819     si->base_mem += f2fs_bitmap_size(MAIN_SEGS(sbi));
5820     si->base_mem += 2 * SIT_VBLOCK_MAP_SIZE * MAIN_SEGS(sbi);
5821     - if (f2fs_discard_en(sbi))
5822     - si->base_mem += SIT_VBLOCK_MAP_SIZE * MAIN_SEGS(sbi);
5823     + si->base_mem += SIT_VBLOCK_MAP_SIZE * MAIN_SEGS(sbi);
5824     si->base_mem += SIT_VBLOCK_MAP_SIZE;
5825     if (sbi->segs_per_sec > 1)
5826     si->base_mem += MAIN_SECS(sbi) * sizeof(struct sec_entry);
5827     diff --git a/fs/f2fs/f2fs.h b/fs/f2fs/f2fs.h
5828     index a3ba20e5946f..1f5d5f62bb77 100644
5829     --- a/fs/f2fs/f2fs.h
5830     +++ b/fs/f2fs/f2fs.h
5831     @@ -3409,11 +3409,20 @@ static inline int get_blkz_type(struct f2fs_sb_info *sbi,
5832     }
5833     #endif
5834    
5835     -static inline bool f2fs_discard_en(struct f2fs_sb_info *sbi)
5836     +static inline bool f2fs_hw_should_discard(struct f2fs_sb_info *sbi)
5837     {
5838     - struct request_queue *q = bdev_get_queue(sbi->sb->s_bdev);
5839     + return f2fs_sb_has_blkzoned(sbi->sb);
5840     +}
5841    
5842     - return blk_queue_discard(q) || f2fs_sb_has_blkzoned(sbi->sb);
5843     +static inline bool f2fs_hw_support_discard(struct f2fs_sb_info *sbi)
5844     +{
5845     + return blk_queue_discard(bdev_get_queue(sbi->sb->s_bdev));
5846     +}
5847     +
5848     +static inline bool f2fs_realtime_discard_enable(struct f2fs_sb_info *sbi)
5849     +{
5850     + return (test_opt(sbi, DISCARD) && f2fs_hw_support_discard(sbi)) ||
5851     + f2fs_hw_should_discard(sbi);
5852     }
5853    
5854     static inline void set_opt_mode(struct f2fs_sb_info *sbi, unsigned int mt)
5855     diff --git a/fs/f2fs/file.c b/fs/f2fs/file.c
5856     index 81c1dd635a8d..b3f46e3bec17 100644
5857     --- a/fs/f2fs/file.c
5858     +++ b/fs/f2fs/file.c
5859     @@ -770,7 +770,6 @@ int f2fs_setattr(struct dentry *dentry, struct iattr *attr)
5860     {
5861     struct inode *inode = d_inode(dentry);
5862     int err;
5863     - bool size_changed = false;
5864    
5865     if (unlikely(f2fs_cp_error(F2FS_I_SB(inode))))
5866     return -EIO;
5867     @@ -830,8 +829,6 @@ int f2fs_setattr(struct dentry *dentry, struct iattr *attr)
5868     down_write(&F2FS_I(inode)->i_sem);
5869     F2FS_I(inode)->last_disk_size = i_size_read(inode);
5870     up_write(&F2FS_I(inode)->i_sem);
5871     -
5872     - size_changed = true;
5873     }
5874    
5875     __setattr_copy(inode, attr);
5876     @@ -845,7 +842,7 @@ int f2fs_setattr(struct dentry *dentry, struct iattr *attr)
5877     }
5878    
5879     /* file size may changed here */
5880     - f2fs_mark_inode_dirty_sync(inode, size_changed);
5881     + f2fs_mark_inode_dirty_sync(inode, true);
5882    
5883     /* inode change will produce dirty node pages flushed by checkpoint */
5884     f2fs_balance_fs(F2FS_I_SB(inode), true);
5885     @@ -1983,7 +1980,7 @@ static int f2fs_ioc_fitrim(struct file *filp, unsigned long arg)
5886     if (!capable(CAP_SYS_ADMIN))
5887     return -EPERM;
5888    
5889     - if (!blk_queue_discard(q))
5890     + if (!f2fs_hw_support_discard(F2FS_SB(sb)))
5891     return -EOPNOTSUPP;
5892    
5893     if (copy_from_user(&range, (struct fstrim_range __user *)arg,
5894     diff --git a/fs/f2fs/recovery.c b/fs/f2fs/recovery.c
5895     index 9a8579fb3a30..ae0e5f2e67b4 100644
5896     --- a/fs/f2fs/recovery.c
5897     +++ b/fs/f2fs/recovery.c
5898     @@ -99,8 +99,12 @@ err_out:
5899     return ERR_PTR(err);
5900     }
5901    
5902     -static void del_fsync_inode(struct fsync_inode_entry *entry)
5903     +static void del_fsync_inode(struct fsync_inode_entry *entry, int drop)
5904     {
5905     + if (drop) {
5906     + /* inode should not be recovered, drop it */
5907     + f2fs_inode_synced(entry->inode);
5908     + }
5909     iput(entry->inode);
5910     list_del(&entry->list);
5911     kmem_cache_free(fsync_entry_slab, entry);
5912     @@ -321,12 +325,12 @@ next:
5913     return err;
5914     }
5915    
5916     -static void destroy_fsync_dnodes(struct list_head *head)
5917     +static void destroy_fsync_dnodes(struct list_head *head, int drop)
5918     {
5919     struct fsync_inode_entry *entry, *tmp;
5920    
5921     list_for_each_entry_safe(entry, tmp, head, list)
5922     - del_fsync_inode(entry);
5923     + del_fsync_inode(entry, drop);
5924     }
5925    
5926     static int check_index_in_prev_nodes(struct f2fs_sb_info *sbi,
5927     @@ -561,7 +565,7 @@ out:
5928     }
5929    
5930     static int recover_data(struct f2fs_sb_info *sbi, struct list_head *inode_list,
5931     - struct list_head *dir_list)
5932     + struct list_head *tmp_inode_list, struct list_head *dir_list)
5933     {
5934     struct curseg_info *curseg;
5935     struct page *page = NULL;
5936     @@ -615,7 +619,7 @@ static int recover_data(struct f2fs_sb_info *sbi, struct list_head *inode_list,
5937     }
5938    
5939     if (entry->blkaddr == blkaddr)
5940     - del_fsync_inode(entry);
5941     + list_move_tail(&entry->list, tmp_inode_list);
5942     next:
5943     /* check next segment */
5944     blkaddr = next_blkaddr_of_node(page);
5945     @@ -628,7 +632,7 @@ next:
5946    
5947     int f2fs_recover_fsync_data(struct f2fs_sb_info *sbi, bool check_only)
5948     {
5949     - struct list_head inode_list;
5950     + struct list_head inode_list, tmp_inode_list;
5951     struct list_head dir_list;
5952     int err;
5953     int ret = 0;
5954     @@ -659,6 +663,7 @@ int f2fs_recover_fsync_data(struct f2fs_sb_info *sbi, bool check_only)
5955     }
5956    
5957     INIT_LIST_HEAD(&inode_list);
5958     + INIT_LIST_HEAD(&tmp_inode_list);
5959     INIT_LIST_HEAD(&dir_list);
5960    
5961     /* prevent checkpoint */
5962     @@ -677,11 +682,16 @@ int f2fs_recover_fsync_data(struct f2fs_sb_info *sbi, bool check_only)
5963     need_writecp = true;
5964    
5965     /* step #2: recover data */
5966     - err = recover_data(sbi, &inode_list, &dir_list);
5967     + err = recover_data(sbi, &inode_list, &tmp_inode_list, &dir_list);
5968     if (!err)
5969     f2fs_bug_on(sbi, !list_empty(&inode_list));
5970     + else {
5971     + /* restore s_flags to let iput() trash data */
5972     + sbi->sb->s_flags = s_flags;
5973     + }
5974     skip:
5975     - destroy_fsync_dnodes(&inode_list);
5976     + destroy_fsync_dnodes(&inode_list, err);
5977     + destroy_fsync_dnodes(&tmp_inode_list, err);
5978    
5979     /* truncate meta pages to be used by the recovery */
5980     truncate_inode_pages_range(META_MAPPING(sbi),
5981     @@ -690,13 +700,13 @@ skip:
5982     if (err) {
5983     truncate_inode_pages_final(NODE_MAPPING(sbi));
5984     truncate_inode_pages_final(META_MAPPING(sbi));
5985     + } else {
5986     + clear_sbi_flag(sbi, SBI_POR_DOING);
5987     }
5988     -
5989     - clear_sbi_flag(sbi, SBI_POR_DOING);
5990     mutex_unlock(&sbi->cp_mutex);
5991    
5992     /* let's drop all the directory inodes for clean checkpoint */
5993     - destroy_fsync_dnodes(&dir_list);
5994     + destroy_fsync_dnodes(&dir_list, err);
5995    
5996     if (need_writecp) {
5997     set_sbi_flag(sbi, SBI_IS_RECOVERED);
5998     diff --git a/fs/f2fs/segment.c b/fs/f2fs/segment.c
5999     index 1fa6f8185766..ac038563273d 100644
6000     --- a/fs/f2fs/segment.c
6001     +++ b/fs/f2fs/segment.c
6002     @@ -1744,11 +1744,11 @@ static bool add_discard_addrs(struct f2fs_sb_info *sbi, struct cp_control *cpc,
6003     struct list_head *head = &SM_I(sbi)->dcc_info->entry_list;
6004     int i;
6005    
6006     - if (se->valid_blocks == max_blocks || !f2fs_discard_en(sbi))
6007     + if (se->valid_blocks == max_blocks || !f2fs_hw_support_discard(sbi))
6008     return false;
6009    
6010     if (!force) {
6011     - if (!test_opt(sbi, DISCARD) || !se->valid_blocks ||
6012     + if (!f2fs_realtime_discard_enable(sbi) || !se->valid_blocks ||
6013     SM_I(sbi)->dcc_info->nr_discards >=
6014     SM_I(sbi)->dcc_info->max_discards)
6015     return false;
6016     @@ -1854,7 +1854,7 @@ void f2fs_clear_prefree_segments(struct f2fs_sb_info *sbi,
6017     dirty_i->nr_dirty[PRE]--;
6018     }
6019    
6020     - if (!test_opt(sbi, DISCARD))
6021     + if (!f2fs_realtime_discard_enable(sbi))
6022     continue;
6023    
6024     if (force && start >= cpc->trim_start &&
6025     @@ -2044,8 +2044,7 @@ static void update_sit_entry(struct f2fs_sb_info *sbi, block_t blkaddr, int del)
6026     del = 0;
6027     }
6028    
6029     - if (f2fs_discard_en(sbi) &&
6030     - !f2fs_test_and_set_bit(offset, se->discard_map))
6031     + if (!f2fs_test_and_set_bit(offset, se->discard_map))
6032     sbi->discard_blks--;
6033    
6034     /* don't overwrite by SSR to keep node chain */
6035     @@ -2073,8 +2072,7 @@ static void update_sit_entry(struct f2fs_sb_info *sbi, block_t blkaddr, int del)
6036     del = 0;
6037     }
6038    
6039     - if (f2fs_discard_en(sbi) &&
6040     - f2fs_test_and_clear_bit(offset, se->discard_map))
6041     + if (f2fs_test_and_clear_bit(offset, se->discard_map))
6042     sbi->discard_blks++;
6043     }
6044     if (!f2fs_test_bit(offset, se->ckpt_valid_map))
6045     @@ -2690,7 +2688,7 @@ int f2fs_trim_fs(struct f2fs_sb_info *sbi, struct fstrim_range *range)
6046     * discard option. User configuration looks like using runtime discard
6047     * or periodic fstrim instead of it.
6048     */
6049     - if (test_opt(sbi, DISCARD))
6050     + if (f2fs_realtime_discard_enable(sbi))
6051     goto out;
6052    
6053     start_block = START_BLOCK(sbi, start_segno);
6054     @@ -3781,13 +3779,11 @@ static int build_sit_info(struct f2fs_sb_info *sbi)
6055     return -ENOMEM;
6056     #endif
6057    
6058     - if (f2fs_discard_en(sbi)) {
6059     - sit_i->sentries[start].discard_map
6060     - = f2fs_kzalloc(sbi, SIT_VBLOCK_MAP_SIZE,
6061     - GFP_KERNEL);
6062     - if (!sit_i->sentries[start].discard_map)
6063     - return -ENOMEM;
6064     - }
6065     + sit_i->sentries[start].discard_map
6066     + = f2fs_kzalloc(sbi, SIT_VBLOCK_MAP_SIZE,
6067     + GFP_KERNEL);
6068     + if (!sit_i->sentries[start].discard_map)
6069     + return -ENOMEM;
6070     }
6071    
6072     sit_i->tmp_map = f2fs_kzalloc(sbi, SIT_VBLOCK_MAP_SIZE, GFP_KERNEL);
6073     @@ -3935,18 +3931,16 @@ static int build_sit_entries(struct f2fs_sb_info *sbi)
6074     total_node_blocks += se->valid_blocks;
6075    
6076     /* build discard map only one time */
6077     - if (f2fs_discard_en(sbi)) {
6078     - if (is_set_ckpt_flags(sbi, CP_TRIMMED_FLAG)) {
6079     - memset(se->discard_map, 0xff,
6080     - SIT_VBLOCK_MAP_SIZE);
6081     - } else {
6082     - memcpy(se->discard_map,
6083     - se->cur_valid_map,
6084     - SIT_VBLOCK_MAP_SIZE);
6085     - sbi->discard_blks +=
6086     - sbi->blocks_per_seg -
6087     - se->valid_blocks;
6088     - }
6089     + if (is_set_ckpt_flags(sbi, CP_TRIMMED_FLAG)) {
6090     + memset(se->discard_map, 0xff,
6091     + SIT_VBLOCK_MAP_SIZE);
6092     + } else {
6093     + memcpy(se->discard_map,
6094     + se->cur_valid_map,
6095     + SIT_VBLOCK_MAP_SIZE);
6096     + sbi->discard_blks +=
6097     + sbi->blocks_per_seg -
6098     + se->valid_blocks;
6099     }
6100    
6101     if (sbi->segs_per_sec > 1)
6102     @@ -3984,16 +3978,13 @@ static int build_sit_entries(struct f2fs_sb_info *sbi)
6103     if (IS_NODESEG(se->type))
6104     total_node_blocks += se->valid_blocks;
6105    
6106     - if (f2fs_discard_en(sbi)) {
6107     - if (is_set_ckpt_flags(sbi, CP_TRIMMED_FLAG)) {
6108     - memset(se->discard_map, 0xff,
6109     - SIT_VBLOCK_MAP_SIZE);
6110     - } else {
6111     - memcpy(se->discard_map, se->cur_valid_map,
6112     - SIT_VBLOCK_MAP_SIZE);
6113     - sbi->discard_blks += old_valid_blocks;
6114     - sbi->discard_blks -= se->valid_blocks;
6115     - }
6116     + if (is_set_ckpt_flags(sbi, CP_TRIMMED_FLAG)) {
6117     + memset(se->discard_map, 0xff, SIT_VBLOCK_MAP_SIZE);
6118     + } else {
6119     + memcpy(se->discard_map, se->cur_valid_map,
6120     + SIT_VBLOCK_MAP_SIZE);
6121     + sbi->discard_blks += old_valid_blocks;
6122     + sbi->discard_blks -= se->valid_blocks;
6123     }
6124    
6125     if (sbi->segs_per_sec > 1) {
6126     diff --git a/fs/f2fs/super.c b/fs/f2fs/super.c
6127     index 79370b7fa9d2..2264f27fd26d 100644
6128     --- a/fs/f2fs/super.c
6129     +++ b/fs/f2fs/super.c
6130     @@ -360,7 +360,6 @@ static int f2fs_check_quota_options(struct f2fs_sb_info *sbi)
6131     static int parse_options(struct super_block *sb, char *options)
6132     {
6133     struct f2fs_sb_info *sbi = F2FS_SB(sb);
6134     - struct request_queue *q;
6135     substring_t args[MAX_OPT_ARGS];
6136     char *p, *name;
6137     int arg = 0;
6138     @@ -415,14 +414,7 @@ static int parse_options(struct super_block *sb, char *options)
6139     return -EINVAL;
6140     break;
6141     case Opt_discard:
6142     - q = bdev_get_queue(sb->s_bdev);
6143     - if (blk_queue_discard(q)) {
6144     - set_opt(sbi, DISCARD);
6145     - } else if (!f2fs_sb_has_blkzoned(sb)) {
6146     - f2fs_msg(sb, KERN_WARNING,
6147     - "mounting with \"discard\" option, but "
6148     - "the device does not support discard");
6149     - }
6150     + set_opt(sbi, DISCARD);
6151     break;
6152     case Opt_nodiscard:
6153     if (f2fs_sb_has_blkzoned(sb)) {
6154     @@ -1033,7 +1025,8 @@ static void f2fs_put_super(struct super_block *sb)
6155     /* be sure to wait for any on-going discard commands */
6156     dropped = f2fs_wait_discard_bios(sbi);
6157    
6158     - if (f2fs_discard_en(sbi) && !sbi->discard_blks && !dropped) {
6159     + if ((f2fs_hw_support_discard(sbi) || f2fs_hw_should_discard(sbi)) &&
6160     + !sbi->discard_blks && !dropped) {
6161     struct cp_control cpc = {
6162     .reason = CP_UMOUNT | CP_TRIMMED,
6163     };
6164     @@ -1403,8 +1396,7 @@ static void default_options(struct f2fs_sb_info *sbi)
6165     set_opt(sbi, NOHEAP);
6166     sbi->sb->s_flags |= SB_LAZYTIME;
6167     set_opt(sbi, FLUSH_MERGE);
6168     - if (blk_queue_discard(bdev_get_queue(sbi->sb->s_bdev)))
6169     - set_opt(sbi, DISCARD);
6170     + set_opt(sbi, DISCARD);
6171     if (f2fs_sb_has_blkzoned(sbi->sb))
6172     set_opt_mode(sbi, F2FS_MOUNT_LFS);
6173     else
6174     @@ -1893,6 +1885,19 @@ void f2fs_quota_off_umount(struct super_block *sb)
6175     }
6176     }
6177    
6178     +static void f2fs_truncate_quota_inode_pages(struct super_block *sb)
6179     +{
6180     + struct quota_info *dqopt = sb_dqopt(sb);
6181     + int type;
6182     +
6183     + for (type = 0; type < MAXQUOTAS; type++) {
6184     + if (!dqopt->files[type])
6185     + continue;
6186     + f2fs_inode_synced(dqopt->files[type]);
6187     + }
6188     +}
6189     +
6190     +
6191     static int f2fs_get_projid(struct inode *inode, kprojid_t *projid)
6192     {
6193     *projid = F2FS_I(inode)->i_projid;
6194     @@ -2337,7 +2342,7 @@ int f2fs_sanity_check_ckpt(struct f2fs_sb_info *sbi)
6195     unsigned int segment_count_main;
6196     unsigned int cp_pack_start_sum, cp_payload;
6197     block_t user_block_count;
6198     - int i;
6199     + int i, j;
6200    
6201     total = le32_to_cpu(raw_super->segment_count);
6202     fsmeta = le32_to_cpu(raw_super->segment_count_ckpt);
6203     @@ -2378,11 +2383,43 @@ int f2fs_sanity_check_ckpt(struct f2fs_sb_info *sbi)
6204     if (le32_to_cpu(ckpt->cur_node_segno[i]) >= main_segs ||
6205     le16_to_cpu(ckpt->cur_node_blkoff[i]) >= blocks_per_seg)
6206     return 1;
6207     + for (j = i + 1; j < NR_CURSEG_NODE_TYPE; j++) {
6208     + if (le32_to_cpu(ckpt->cur_node_segno[i]) ==
6209     + le32_to_cpu(ckpt->cur_node_segno[j])) {
6210     + f2fs_msg(sbi->sb, KERN_ERR,
6211     + "Node segment (%u, %u) has the same "
6212     + "segno: %u", i, j,
6213     + le32_to_cpu(ckpt->cur_node_segno[i]));
6214     + return 1;
6215     + }
6216     + }
6217     }
6218     for (i = 0; i < NR_CURSEG_DATA_TYPE; i++) {
6219     if (le32_to_cpu(ckpt->cur_data_segno[i]) >= main_segs ||
6220     le16_to_cpu(ckpt->cur_data_blkoff[i]) >= blocks_per_seg)
6221     return 1;
6222     + for (j = i + 1; j < NR_CURSEG_DATA_TYPE; j++) {
6223     + if (le32_to_cpu(ckpt->cur_data_segno[i]) ==
6224     + le32_to_cpu(ckpt->cur_data_segno[j])) {
6225     + f2fs_msg(sbi->sb, KERN_ERR,
6226     + "Data segment (%u, %u) has the same "
6227     + "segno: %u", i, j,
6228     + le32_to_cpu(ckpt->cur_data_segno[i]));
6229     + return 1;
6230     + }
6231     + }
6232     + }
6233     + for (i = 0; i < NR_CURSEG_NODE_TYPE; i++) {
6234     + for (j = i; j < NR_CURSEG_DATA_TYPE; j++) {
6235     + if (le32_to_cpu(ckpt->cur_node_segno[i]) ==
6236     + le32_to_cpu(ckpt->cur_data_segno[j])) {
6237     + f2fs_msg(sbi->sb, KERN_ERR,
6238     + "Data segment (%u) and Data segment (%u)"
6239     + " has the same segno: %u", i, j,
6240     + le32_to_cpu(ckpt->cur_node_segno[i]));
6241     + return 1;
6242     + }
6243     + }
6244     }
6245    
6246     sit_bitmap_size = le32_to_cpu(ckpt->sit_ver_bitmap_bytesize);
6247     @@ -3107,10 +3144,10 @@ skip_recovery:
6248    
6249     free_meta:
6250     #ifdef CONFIG_QUOTA
6251     + f2fs_truncate_quota_inode_pages(sb);
6252     if (f2fs_sb_has_quota_ino(sb) && !f2fs_readonly(sb))
6253     f2fs_quota_off_umount(sbi->sb);
6254     #endif
6255     - f2fs_sync_inode_meta(sbi);
6256     /*
6257     * Some dirty meta pages can be produced by f2fs_recover_orphan_inodes()
6258     * failed by EIO. Then, iput(node_inode) can trigger balance_fs_bg()
6259     diff --git a/fs/notify/inotify/inotify_user.c b/fs/notify/inotify/inotify_user.c
6260     index 780bba695453..97a51690338e 100644
6261     --- a/fs/notify/inotify/inotify_user.c
6262     +++ b/fs/notify/inotify/inotify_user.c
6263     @@ -519,8 +519,10 @@ static int inotify_update_existing_watch(struct fsnotify_group *group,
6264     fsn_mark = fsnotify_find_mark(&inode->i_fsnotify_marks, group);
6265     if (!fsn_mark)
6266     return -ENOENT;
6267     - else if (create)
6268     - return -EEXIST;
6269     + else if (create) {
6270     + ret = -EEXIST;
6271     + goto out;
6272     + }
6273    
6274     i_mark = container_of(fsn_mark, struct inotify_inode_mark, fsn_mark);
6275    
6276     @@ -548,6 +550,7 @@ static int inotify_update_existing_watch(struct fsnotify_group *group,
6277     /* return the wd */
6278     ret = i_mark->wd;
6279    
6280     +out:
6281     /* match the get from fsnotify_find_mark() */
6282     fsnotify_put_mark(fsn_mark);
6283    
6284     diff --git a/fs/proc/kcore.c b/fs/proc/kcore.c
6285     index d297fe4472a9..d0137e3e585e 100644
6286     --- a/fs/proc/kcore.c
6287     +++ b/fs/proc/kcore.c
6288     @@ -54,6 +54,28 @@ static LIST_HEAD(kclist_head);
6289     static DECLARE_RWSEM(kclist_lock);
6290     static int kcore_need_update = 1;
6291    
6292     +/*
6293     + * Returns > 0 for RAM pages, 0 for non-RAM pages, < 0 on error
6294     + * Same as oldmem_pfn_is_ram in vmcore
6295     + */
6296     +static int (*mem_pfn_is_ram)(unsigned long pfn);
6297     +
6298     +int __init register_mem_pfn_is_ram(int (*fn)(unsigned long pfn))
6299     +{
6300     + if (mem_pfn_is_ram)
6301     + return -EBUSY;
6302     + mem_pfn_is_ram = fn;
6303     + return 0;
6304     +}
6305     +
6306     +static int pfn_is_ram(unsigned long pfn)
6307     +{
6308     + if (mem_pfn_is_ram)
6309     + return mem_pfn_is_ram(pfn);
6310     + else
6311     + return 1;
6312     +}
6313     +
6314     /* This doesn't grab kclist_lock, so it should only be used at init time. */
6315     void __init kclist_add(struct kcore_list *new, void *addr, size_t size,
6316     int type)
6317     @@ -465,6 +487,11 @@ read_kcore(struct file *file, char __user *buffer, size_t buflen, loff_t *fpos)
6318     goto out;
6319     }
6320     m = NULL; /* skip the list anchor */
6321     + } else if (!pfn_is_ram(__pa(start) >> PAGE_SHIFT)) {
6322     + if (clear_user(buffer, tsz)) {
6323     + ret = -EFAULT;
6324     + goto out;
6325     + }
6326     } else if (m->type == KCORE_VMALLOC) {
6327     vread(buf, (char *)start, tsz);
6328     /* we have to zero-fill user buffer even if no read */
6329     diff --git a/include/linux/atalk.h b/include/linux/atalk.h
6330     index 23f805562f4e..d5cfc0b15b76 100644
6331     --- a/include/linux/atalk.h
6332     +++ b/include/linux/atalk.h
6333     @@ -158,19 +158,29 @@ extern int sysctl_aarp_retransmit_limit;
6334     extern int sysctl_aarp_resolve_time;
6335    
6336     #ifdef CONFIG_SYSCTL
6337     -extern void atalk_register_sysctl(void);
6338     +extern int atalk_register_sysctl(void);
6339     extern void atalk_unregister_sysctl(void);
6340     #else
6341     -#define atalk_register_sysctl() do { } while(0)
6342     -#define atalk_unregister_sysctl() do { } while(0)
6343     +static inline int atalk_register_sysctl(void)
6344     +{
6345     + return 0;
6346     +}
6347     +static inline void atalk_unregister_sysctl(void)
6348     +{
6349     +}
6350     #endif
6351    
6352     #ifdef CONFIG_PROC_FS
6353     extern int atalk_proc_init(void);
6354     extern void atalk_proc_exit(void);
6355     #else
6356     -#define atalk_proc_init() ({ 0; })
6357     -#define atalk_proc_exit() do { } while(0)
6358     +static inline int atalk_proc_init(void)
6359     +{
6360     + return 0;
6361     +}
6362     +static inline void atalk_proc_exit(void)
6363     +{
6364     +}
6365     #endif /* CONFIG_PROC_FS */
6366    
6367     #endif /* __LINUX_ATALK_H__ */
6368     diff --git a/include/linux/compiler.h b/include/linux/compiler.h
6369     index 269d376f5a11..81c2238b884c 100644
6370     --- a/include/linux/compiler.h
6371     +++ b/include/linux/compiler.h
6372     @@ -124,7 +124,10 @@ void ftrace_likely_update(struct ftrace_likely_data *f, int val,
6373     # define ASM_UNREACHABLE
6374     #endif
6375     #ifndef unreachable
6376     -# define unreachable() do { annotate_reachable(); do { } while (1); } while (0)
6377     +# define unreachable() do { \
6378     + annotate_unreachable(); \
6379     + __builtin_unreachable(); \
6380     +} while (0)
6381     #endif
6382    
6383     /*
6384     diff --git a/include/linux/kcore.h b/include/linux/kcore.h
6385     index 8c3f8c14eeaa..c843f4a9c512 100644
6386     --- a/include/linux/kcore.h
6387     +++ b/include/linux/kcore.h
6388     @@ -44,6 +44,8 @@ void kclist_add_remap(struct kcore_list *m, void *addr, void *vaddr, size_t sz)
6389     m->vaddr = (unsigned long)vaddr;
6390     kclist_add(m, addr, sz, KCORE_REMAP);
6391     }
6392     +
6393     +extern int __init register_mem_pfn_is_ram(int (*fn)(unsigned long pfn));
6394     #else
6395     static inline
6396     void kclist_add(struct kcore_list *new, void *addr, size_t size, int type)
6397     diff --git a/include/linux/swap.h b/include/linux/swap.h
6398     index 77221c16733a..7bd0a6f2ac2b 100644
6399     --- a/include/linux/swap.h
6400     +++ b/include/linux/swap.h
6401     @@ -155,9 +155,9 @@ struct swap_extent {
6402     /*
6403     * Max bad pages in the new format..
6404     */
6405     -#define __swapoffset(x) ((unsigned long)&((union swap_header *)0)->x)
6406     #define MAX_SWAP_BADPAGES \
6407     - ((__swapoffset(magic.magic) - __swapoffset(info.badpages)) / sizeof(int))
6408     + ((offsetof(union swap_header, magic.magic) - \
6409     + offsetof(union swap_header, info.badpages)) / sizeof(int))
6410    
6411     enum {
6412     SWP_USED = (1 << 0), /* is slot in swap_info[] used? */
6413     diff --git a/include/net/bluetooth/hci_core.h b/include/net/bluetooth/hci_core.h
6414     index 0db1b9b428b7..1dfb75057580 100644
6415     --- a/include/net/bluetooth/hci_core.h
6416     +++ b/include/net/bluetooth/hci_core.h
6417     @@ -259,6 +259,8 @@ struct hci_dev {
6418     __u16 le_max_tx_time;
6419     __u16 le_max_rx_len;
6420     __u16 le_max_rx_time;
6421     + __u8 le_max_key_size;
6422     + __u8 le_min_key_size;
6423     __u16 discov_interleaved_timeout;
6424     __u16 conn_info_min_age;
6425     __u16 conn_info_max_age;
6426     diff --git a/include/net/xfrm.h b/include/net/xfrm.h
6427     index da588def3c61..5e3daf53b3d1 100644
6428     --- a/include/net/xfrm.h
6429     +++ b/include/net/xfrm.h
6430     @@ -850,7 +850,7 @@ static inline void xfrm_pols_put(struct xfrm_policy **pols, int npols)
6431     xfrm_pol_put(pols[i]);
6432     }
6433    
6434     -void __xfrm_state_destroy(struct xfrm_state *);
6435     +void __xfrm_state_destroy(struct xfrm_state *, bool);
6436    
6437     static inline void __xfrm_state_put(struct xfrm_state *x)
6438     {
6439     @@ -860,7 +860,13 @@ static inline void __xfrm_state_put(struct xfrm_state *x)
6440     static inline void xfrm_state_put(struct xfrm_state *x)
6441     {
6442     if (refcount_dec_and_test(&x->refcnt))
6443     - __xfrm_state_destroy(x);
6444     + __xfrm_state_destroy(x, false);
6445     +}
6446     +
6447     +static inline void xfrm_state_put_sync(struct xfrm_state *x)
6448     +{
6449     + if (refcount_dec_and_test(&x->refcnt))
6450     + __xfrm_state_destroy(x, true);
6451     }
6452    
6453     static inline void xfrm_state_hold(struct xfrm_state *x)
6454     @@ -1616,7 +1622,7 @@ struct xfrmk_spdinfo {
6455    
6456     struct xfrm_state *xfrm_find_acq_byseq(struct net *net, u32 mark, u32 seq);
6457     int xfrm_state_delete(struct xfrm_state *x);
6458     -int xfrm_state_flush(struct net *net, u8 proto, bool task_valid);
6459     +int xfrm_state_flush(struct net *net, u8 proto, bool task_valid, bool sync);
6460     int xfrm_dev_state_flush(struct net *net, struct net_device *dev, bool task_valid);
6461     void xfrm_sad_getinfo(struct net *net, struct xfrmk_sadinfo *si);
6462     void xfrm_spd_getinfo(struct net *net, struct xfrmk_spdinfo *si);
6463     diff --git a/include/trace/events/rxrpc.h b/include/trace/events/rxrpc.h
6464     index 573d5b901fb1..6d182746afab 100644
6465     --- a/include/trace/events/rxrpc.h
6466     +++ b/include/trace/events/rxrpc.h
6467     @@ -76,6 +76,7 @@ enum rxrpc_client_trace {
6468     rxrpc_client_chan_disconnect,
6469     rxrpc_client_chan_pass,
6470     rxrpc_client_chan_unstarted,
6471     + rxrpc_client_chan_wait_failed,
6472     rxrpc_client_cleanup,
6473     rxrpc_client_count,
6474     rxrpc_client_discard,
6475     @@ -275,6 +276,7 @@ enum rxrpc_tx_point {
6476     EM(rxrpc_client_chan_disconnect, "ChDisc") \
6477     EM(rxrpc_client_chan_pass, "ChPass") \
6478     EM(rxrpc_client_chan_unstarted, "ChUnst") \
6479     + EM(rxrpc_client_chan_wait_failed, "ChWtFl") \
6480     EM(rxrpc_client_cleanup, "Clean ") \
6481     EM(rxrpc_client_count, "Count ") \
6482     EM(rxrpc_client_discard, "Discar") \
6483     diff --git a/include/uapi/linux/netfilter/xt_cgroup.h b/include/uapi/linux/netfilter/xt_cgroup.h
6484     index e96dfa1b34f7..b74e370d6133 100644
6485     --- a/include/uapi/linux/netfilter/xt_cgroup.h
6486     +++ b/include/uapi/linux/netfilter/xt_cgroup.h
6487     @@ -22,4 +22,20 @@ struct xt_cgroup_info_v1 {
6488     void *priv __attribute__((aligned(8)));
6489     };
6490    
6491     +#define XT_CGROUP_PATH_MAX 512
6492     +
6493     +struct xt_cgroup_info_v2 {
6494     + __u8 has_path;
6495     + __u8 has_classid;
6496     + __u8 invert_path;
6497     + __u8 invert_classid;
6498     + union {
6499     + char path[XT_CGROUP_PATH_MAX];
6500     + __u32 classid;
6501     + };
6502     +
6503     + /* kernel internal data */
6504     + void *priv __attribute__((aligned(8)));
6505     +};
6506     +
6507     #endif /* _UAPI_XT_CGROUP_H */
6508     diff --git a/kernel/bpf/inode.c b/kernel/bpf/inode.c
6509     index 2ada5e21dfa6..4a8f390a2b82 100644
6510     --- a/kernel/bpf/inode.c
6511     +++ b/kernel/bpf/inode.c
6512     @@ -554,19 +554,6 @@ struct bpf_prog *bpf_prog_get_type_path(const char *name, enum bpf_prog_type typ
6513     }
6514     EXPORT_SYMBOL(bpf_prog_get_type_path);
6515    
6516     -static void bpf_evict_inode(struct inode *inode)
6517     -{
6518     - enum bpf_type type;
6519     -
6520     - truncate_inode_pages_final(&inode->i_data);
6521     - clear_inode(inode);
6522     -
6523     - if (S_ISLNK(inode->i_mode))
6524     - kfree(inode->i_link);
6525     - if (!bpf_inode_type(inode, &type))
6526     - bpf_any_put(inode->i_private, type);
6527     -}
6528     -
6529     /*
6530     * Display the mount options in /proc/mounts.
6531     */
6532     @@ -579,11 +566,28 @@ static int bpf_show_options(struct seq_file *m, struct dentry *root)
6533     return 0;
6534     }
6535    
6536     +static void bpf_destroy_inode_deferred(struct rcu_head *head)
6537     +{
6538     + struct inode *inode = container_of(head, struct inode, i_rcu);
6539     + enum bpf_type type;
6540     +
6541     + if (S_ISLNK(inode->i_mode))
6542     + kfree(inode->i_link);
6543     + if (!bpf_inode_type(inode, &type))
6544     + bpf_any_put(inode->i_private, type);
6545     + free_inode_nonrcu(inode);
6546     +}
6547     +
6548     +static void bpf_destroy_inode(struct inode *inode)
6549     +{
6550     + call_rcu(&inode->i_rcu, bpf_destroy_inode_deferred);
6551     +}
6552     +
6553     static const struct super_operations bpf_super_ops = {
6554     .statfs = simple_statfs,
6555     .drop_inode = generic_delete_inode,
6556     .show_options = bpf_show_options,
6557     - .evict_inode = bpf_evict_inode,
6558     + .destroy_inode = bpf_destroy_inode,
6559     };
6560    
6561     enum {
6562     diff --git a/kernel/events/core.c b/kernel/events/core.c
6563     index aa996a0854b9..87bd96399d1c 100644
6564     --- a/kernel/events/core.c
6565     +++ b/kernel/events/core.c
6566     @@ -7178,6 +7178,7 @@ static void perf_event_mmap_output(struct perf_event *event,
6567     struct perf_output_handle handle;
6568     struct perf_sample_data sample;
6569     int size = mmap_event->event_id.header.size;
6570     + u32 type = mmap_event->event_id.header.type;
6571     int ret;
6572    
6573     if (!perf_event_mmap_match(event, data))
6574     @@ -7221,6 +7222,7 @@ static void perf_event_mmap_output(struct perf_event *event,
6575     perf_output_end(&handle);
6576     out:
6577     mmap_event->event_id.header.size = size;
6578     + mmap_event->event_id.header.type = type;
6579     }
6580    
6581     static void perf_event_mmap_event(struct perf_mmap_event *mmap_event)
6582     diff --git a/kernel/hung_task.c b/kernel/hung_task.c
6583     index 9eca2371f189..4a9191617076 100644
6584     --- a/kernel/hung_task.c
6585     +++ b/kernel/hung_task.c
6586     @@ -15,6 +15,7 @@
6587     #include <linux/lockdep.h>
6588     #include <linux/export.h>
6589     #include <linux/sysctl.h>
6590     +#include <linux/suspend.h>
6591     #include <linux/utsname.h>
6592     #include <linux/sched/signal.h>
6593     #include <linux/sched/debug.h>
6594     @@ -240,6 +241,28 @@ void reset_hung_task_detector(void)
6595     }
6596     EXPORT_SYMBOL_GPL(reset_hung_task_detector);
6597    
6598     +static bool hung_detector_suspended;
6599     +
6600     +static int hungtask_pm_notify(struct notifier_block *self,
6601     + unsigned long action, void *hcpu)
6602     +{
6603     + switch (action) {
6604     + case PM_SUSPEND_PREPARE:
6605     + case PM_HIBERNATION_PREPARE:
6606     + case PM_RESTORE_PREPARE:
6607     + hung_detector_suspended = true;
6608     + break;
6609     + case PM_POST_SUSPEND:
6610     + case PM_POST_HIBERNATION:
6611     + case PM_POST_RESTORE:
6612     + hung_detector_suspended = false;
6613     + break;
6614     + default:
6615     + break;
6616     + }
6617     + return NOTIFY_OK;
6618     +}
6619     +
6620     /*
6621     * kthread which checks for tasks stuck in D state
6622     */
6623     @@ -259,7 +282,8 @@ static int watchdog(void *dummy)
6624     interval = min_t(unsigned long, interval, timeout);
6625     t = hung_timeout_jiffies(hung_last_checked, interval);
6626     if (t <= 0) {
6627     - if (!atomic_xchg(&reset_hung_task, 0))
6628     + if (!atomic_xchg(&reset_hung_task, 0) &&
6629     + !hung_detector_suspended)
6630     check_hung_uninterruptible_tasks(timeout);
6631     hung_last_checked = jiffies;
6632     continue;
6633     @@ -273,6 +297,10 @@ static int watchdog(void *dummy)
6634     static int __init hung_task_init(void)
6635     {
6636     atomic_notifier_chain_register(&panic_notifier_list, &panic_block);
6637     +
6638     + /* Disable hung task detector on suspend */
6639     + pm_notifier(hungtask_pm_notify, 0);
6640     +
6641     watchdog_task = kthread_run(watchdog, NULL, "khungtaskd");
6642    
6643     return 0;
6644     diff --git a/kernel/sched/core.c b/kernel/sched/core.c
6645     index 9a4f57d7e931..d7f409866cdf 100644
6646     --- a/kernel/sched/core.c
6647     +++ b/kernel/sched/core.c
6648     @@ -6930,7 +6930,7 @@ static int __maybe_unused cpu_period_quota_parse(char *buf,
6649     {
6650     char tok[21]; /* U64_MAX */
6651    
6652     - if (!sscanf(buf, "%s %llu", tok, periodp))
6653     + if (sscanf(buf, "%20s %llu", tok, periodp) < 1)
6654     return -EINVAL;
6655    
6656     *periodp *= NSEC_PER_USEC;
6657     diff --git a/kernel/sched/cpufreq_schedutil.c b/kernel/sched/cpufreq_schedutil.c
6658     index 3fffad3bc8a8..217f81ecae17 100644
6659     --- a/kernel/sched/cpufreq_schedutil.c
6660     +++ b/kernel/sched/cpufreq_schedutil.c
6661     @@ -50,10 +50,10 @@ struct sugov_cpu {
6662    
6663     bool iowait_boost_pending;
6664     unsigned int iowait_boost;
6665     - unsigned int iowait_boost_max;
6666     u64 last_update;
6667    
6668     unsigned long bw_dl;
6669     + unsigned long min;
6670     unsigned long max;
6671    
6672     /* The field below is for single-CPU policies only: */
6673     @@ -283,8 +283,7 @@ static bool sugov_iowait_reset(struct sugov_cpu *sg_cpu, u64 time,
6674     if (delta_ns <= TICK_NSEC)
6675     return false;
6676    
6677     - sg_cpu->iowait_boost = set_iowait_boost
6678     - ? sg_cpu->sg_policy->policy->min : 0;
6679     + sg_cpu->iowait_boost = set_iowait_boost ? sg_cpu->min : 0;
6680     sg_cpu->iowait_boost_pending = set_iowait_boost;
6681    
6682     return true;
6683     @@ -324,14 +323,13 @@ static void sugov_iowait_boost(struct sugov_cpu *sg_cpu, u64 time,
6684    
6685     /* Double the boost at each request */
6686     if (sg_cpu->iowait_boost) {
6687     - sg_cpu->iowait_boost <<= 1;
6688     - if (sg_cpu->iowait_boost > sg_cpu->iowait_boost_max)
6689     - sg_cpu->iowait_boost = sg_cpu->iowait_boost_max;
6690     + sg_cpu->iowait_boost =
6691     + min_t(unsigned int, sg_cpu->iowait_boost << 1, SCHED_CAPACITY_SCALE);
6692     return;
6693     }
6694    
6695     /* First wakeup after IO: start with minimum boost */
6696     - sg_cpu->iowait_boost = sg_cpu->sg_policy->policy->min;
6697     + sg_cpu->iowait_boost = sg_cpu->min;
6698     }
6699    
6700     /**
6701     @@ -353,47 +351,38 @@ static void sugov_iowait_boost(struct sugov_cpu *sg_cpu, u64 time,
6702     * This mechanism is designed to boost high frequently IO waiting tasks, while
6703     * being more conservative on tasks which does sporadic IO operations.
6704     */
6705     -static void sugov_iowait_apply(struct sugov_cpu *sg_cpu, u64 time,
6706     - unsigned long *util, unsigned long *max)
6707     +static unsigned long sugov_iowait_apply(struct sugov_cpu *sg_cpu, u64 time,
6708     + unsigned long util, unsigned long max)
6709     {
6710     - unsigned int boost_util, boost_max;
6711     + unsigned long boost;
6712    
6713     /* No boost currently required */
6714     if (!sg_cpu->iowait_boost)
6715     - return;
6716     + return util;
6717    
6718     /* Reset boost if the CPU appears to have been idle enough */
6719     if (sugov_iowait_reset(sg_cpu, time, false))
6720     - return;
6721     + return util;
6722    
6723     - /*
6724     - * An IO waiting task has just woken up:
6725     - * allow to further double the boost value
6726     - */
6727     - if (sg_cpu->iowait_boost_pending) {
6728     - sg_cpu->iowait_boost_pending = false;
6729     - } else {
6730     + if (!sg_cpu->iowait_boost_pending) {
6731     /*
6732     - * Otherwise: reduce the boost value and disable it when we
6733     - * reach the minimum.
6734     + * No boost pending; reduce the boost value.
6735     */
6736     sg_cpu->iowait_boost >>= 1;
6737     - if (sg_cpu->iowait_boost < sg_cpu->sg_policy->policy->min) {
6738     + if (sg_cpu->iowait_boost < sg_cpu->min) {
6739     sg_cpu->iowait_boost = 0;
6740     - return;
6741     + return util;
6742     }
6743     }
6744    
6745     + sg_cpu->iowait_boost_pending = false;
6746     +
6747     /*
6748     - * Apply the current boost value: a CPU is boosted only if its current
6749     - * utilization is smaller then the current IO boost level.
6750     + * @util is already in capacity scale; convert iowait_boost
6751     + * into the same scale so we can compare.
6752     */
6753     - boost_util = sg_cpu->iowait_boost;
6754     - boost_max = sg_cpu->iowait_boost_max;
6755     - if (*util * boost_max < *max * boost_util) {
6756     - *util = boost_util;
6757     - *max = boost_max;
6758     - }
6759     + boost = (sg_cpu->iowait_boost * max) >> SCHED_CAPACITY_SHIFT;
6760     + return max(boost, util);
6761     }
6762    
6763     #ifdef CONFIG_NO_HZ_COMMON
6764     @@ -440,7 +429,7 @@ static void sugov_update_single(struct update_util_data *hook, u64 time,
6765    
6766     util = sugov_get_util(sg_cpu);
6767     max = sg_cpu->max;
6768     - sugov_iowait_apply(sg_cpu, time, &util, &max);
6769     + util = sugov_iowait_apply(sg_cpu, time, util, max);
6770     next_f = get_next_freq(sg_policy, util, max);
6771     /*
6772     * Do not reduce the frequency if the CPU has not been idle
6773     @@ -480,7 +469,7 @@ static unsigned int sugov_next_freq_shared(struct sugov_cpu *sg_cpu, u64 time)
6774    
6775     j_util = sugov_get_util(j_sg_cpu);
6776     j_max = j_sg_cpu->max;
6777     - sugov_iowait_apply(j_sg_cpu, time, &j_util, &j_max);
6778     + j_util = sugov_iowait_apply(j_sg_cpu, time, j_util, j_max);
6779    
6780     if (j_util * max > j_max * util) {
6781     util = j_util;
6782     @@ -817,7 +806,9 @@ static int sugov_start(struct cpufreq_policy *policy)
6783     memset(sg_cpu, 0, sizeof(*sg_cpu));
6784     sg_cpu->cpu = cpu;
6785     sg_cpu->sg_policy = sg_policy;
6786     - sg_cpu->iowait_boost_max = policy->cpuinfo.max_freq;
6787     + sg_cpu->min =
6788     + (SCHED_CAPACITY_SCALE * policy->cpuinfo.min_freq) /
6789     + policy->cpuinfo.max_freq;
6790     }
6791    
6792     for_each_cpu(cpu, policy->cpus) {
6793     diff --git a/lib/div64.c b/lib/div64.c
6794     index 01c8602bb6ff..ee146bb4c558 100644
6795     --- a/lib/div64.c
6796     +++ b/lib/div64.c
6797     @@ -109,7 +109,7 @@ u64 div64_u64_rem(u64 dividend, u64 divisor, u64 *remainder)
6798     quot = div_u64_rem(dividend, divisor, &rem32);
6799     *remainder = rem32;
6800     } else {
6801     - int n = 1 + fls(high);
6802     + int n = fls(high);
6803     quot = div_u64(dividend >> n, divisor >> n);
6804    
6805     if (quot != 0)
6806     @@ -147,7 +147,7 @@ u64 div64_u64(u64 dividend, u64 divisor)
6807     if (high == 0) {
6808     quot = div_u64(dividend, divisor);
6809     } else {
6810     - int n = 1 + fls(high);
6811     + int n = fls(high);
6812     quot = div_u64(dividend >> n, divisor >> n);
6813    
6814     if (quot != 0)
6815     diff --git a/mm/vmstat.c b/mm/vmstat.c
6816     index 7878da76abf2..2878dc4e9af6 100644
6817     --- a/mm/vmstat.c
6818     +++ b/mm/vmstat.c
6819     @@ -1547,6 +1547,10 @@ static void zoneinfo_show_print(struct seq_file *m, pg_data_t *pgdat,
6820     if (is_zone_first_populated(pgdat, zone)) {
6821     seq_printf(m, "\n per-node stats");
6822     for (i = 0; i < NR_VM_NODE_STAT_ITEMS; i++) {
6823     + /* Skip hidden vmstat items. */
6824     + if (*vmstat_text[i + NR_VM_ZONE_STAT_ITEMS +
6825     + NR_VM_NUMA_STAT_ITEMS] == '\0')
6826     + continue;
6827     seq_printf(m, "\n %-12s %lu",
6828     vmstat_text[i + NR_VM_ZONE_STAT_ITEMS +
6829     NR_VM_NUMA_STAT_ITEMS],
6830     diff --git a/net/9p/protocol.c b/net/9p/protocol.c
6831     index ee32bbf12675..b4d80c533f89 100644
6832     --- a/net/9p/protocol.c
6833     +++ b/net/9p/protocol.c
6834     @@ -571,9 +571,10 @@ int p9stat_read(struct p9_client *clnt, char *buf, int len, struct p9_wstat *st)
6835     if (ret) {
6836     p9_debug(P9_DEBUG_9P, "<<< p9stat_read failed: %d\n", ret);
6837     trace_9p_protocol_dump(clnt, &fake_pdu);
6838     + return ret;
6839     }
6840    
6841     - return ret;
6842     + return fake_pdu.offset;
6843     }
6844     EXPORT_SYMBOL(p9stat_read);
6845    
6846     diff --git a/net/appletalk/atalk_proc.c b/net/appletalk/atalk_proc.c
6847     index 8006295f8bd7..dda73991bb54 100644
6848     --- a/net/appletalk/atalk_proc.c
6849     +++ b/net/appletalk/atalk_proc.c
6850     @@ -255,7 +255,7 @@ out_interface:
6851     goto out;
6852     }
6853    
6854     -void __exit atalk_proc_exit(void)
6855     +void atalk_proc_exit(void)
6856     {
6857     remove_proc_entry("interface", atalk_proc_dir);
6858     remove_proc_entry("route", atalk_proc_dir);
6859     diff --git a/net/appletalk/ddp.c b/net/appletalk/ddp.c
6860     index 9b6bc5abe946..795fbc6c06aa 100644
6861     --- a/net/appletalk/ddp.c
6862     +++ b/net/appletalk/ddp.c
6863     @@ -1910,12 +1910,16 @@ static const char atalk_err_snap[] __initconst =
6864     /* Called by proto.c on kernel start up */
6865     static int __init atalk_init(void)
6866     {
6867     - int rc = proto_register(&ddp_proto, 0);
6868     + int rc;
6869    
6870     - if (rc != 0)
6871     + rc = proto_register(&ddp_proto, 0);
6872     + if (rc)
6873     goto out;
6874    
6875     - (void)sock_register(&atalk_family_ops);
6876     + rc = sock_register(&atalk_family_ops);
6877     + if (rc)
6878     + goto out_proto;
6879     +
6880     ddp_dl = register_snap_client(ddp_snap_id, atalk_rcv);
6881     if (!ddp_dl)
6882     printk(atalk_err_snap);
6883     @@ -1923,12 +1927,33 @@ static int __init atalk_init(void)
6884     dev_add_pack(&ltalk_packet_type);
6885     dev_add_pack(&ppptalk_packet_type);
6886    
6887     - register_netdevice_notifier(&ddp_notifier);
6888     + rc = register_netdevice_notifier(&ddp_notifier);
6889     + if (rc)
6890     + goto out_sock;
6891     +
6892     aarp_proto_init();
6893     - atalk_proc_init();
6894     - atalk_register_sysctl();
6895     + rc = atalk_proc_init();
6896     + if (rc)
6897     + goto out_aarp;
6898     +
6899     + rc = atalk_register_sysctl();
6900     + if (rc)
6901     + goto out_proc;
6902     out:
6903     return rc;
6904     +out_proc:
6905     + atalk_proc_exit();
6906     +out_aarp:
6907     + aarp_cleanup_module();
6908     + unregister_netdevice_notifier(&ddp_notifier);
6909     +out_sock:
6910     + dev_remove_pack(&ppptalk_packet_type);
6911     + dev_remove_pack(&ltalk_packet_type);
6912     + unregister_snap_client(ddp_dl);
6913     + sock_unregister(PF_APPLETALK);
6914     +out_proto:
6915     + proto_unregister(&ddp_proto);
6916     + goto out;
6917     }
6918     module_init(atalk_init);
6919    
6920     diff --git a/net/appletalk/sysctl_net_atalk.c b/net/appletalk/sysctl_net_atalk.c
6921     index c744a853fa5f..d945b7c0176d 100644
6922     --- a/net/appletalk/sysctl_net_atalk.c
6923     +++ b/net/appletalk/sysctl_net_atalk.c
6924     @@ -45,9 +45,12 @@ static struct ctl_table atalk_table[] = {
6925    
6926     static struct ctl_table_header *atalk_table_header;
6927    
6928     -void atalk_register_sysctl(void)
6929     +int __init atalk_register_sysctl(void)
6930     {
6931     atalk_table_header = register_net_sysctl(&init_net, "net/appletalk", atalk_table);
6932     + if (!atalk_table_header)
6933     + return -ENOMEM;
6934     + return 0;
6935     }
6936    
6937     void atalk_unregister_sysctl(void)
6938     diff --git a/net/bluetooth/hci_core.c b/net/bluetooth/hci_core.c
6939     index 74b29c7d841c..a06f03047717 100644
6940     --- a/net/bluetooth/hci_core.c
6941     +++ b/net/bluetooth/hci_core.c
6942     @@ -3084,6 +3084,8 @@ struct hci_dev *hci_alloc_dev(void)
6943     hdev->le_max_tx_time = 0x0148;
6944     hdev->le_max_rx_len = 0x001b;
6945     hdev->le_max_rx_time = 0x0148;
6946     + hdev->le_max_key_size = SMP_MAX_ENC_KEY_SIZE;
6947     + hdev->le_min_key_size = SMP_MIN_ENC_KEY_SIZE;
6948     hdev->le_tx_def_phys = HCI_LE_SET_PHY_1M;
6949     hdev->le_rx_def_phys = HCI_LE_SET_PHY_1M;
6950    
6951     diff --git a/net/bluetooth/smp.c b/net/bluetooth/smp.c
6952     index 73f7211d0431..a1c1b7e8a45c 100644
6953     --- a/net/bluetooth/smp.c
6954     +++ b/net/bluetooth/smp.c
6955     @@ -88,9 +88,6 @@ struct smp_dev {
6956     u8 local_rand[16];
6957     bool debug_key;
6958    
6959     - u8 min_key_size;
6960     - u8 max_key_size;
6961     -
6962     struct crypto_cipher *tfm_aes;
6963     struct crypto_shash *tfm_cmac;
6964     struct crypto_kpp *tfm_ecdh;
6965     @@ -720,7 +717,7 @@ static void build_pairing_cmd(struct l2cap_conn *conn,
6966     if (rsp == NULL) {
6967     req->io_capability = conn->hcon->io_capability;
6968     req->oob_flag = oob_flag;
6969     - req->max_key_size = SMP_DEV(hdev)->max_key_size;
6970     + req->max_key_size = hdev->le_max_key_size;
6971     req->init_key_dist = local_dist;
6972     req->resp_key_dist = remote_dist;
6973     req->auth_req = (authreq & AUTH_REQ_MASK(hdev));
6974     @@ -731,7 +728,7 @@ static void build_pairing_cmd(struct l2cap_conn *conn,
6975    
6976     rsp->io_capability = conn->hcon->io_capability;
6977     rsp->oob_flag = oob_flag;
6978     - rsp->max_key_size = SMP_DEV(hdev)->max_key_size;
6979     + rsp->max_key_size = hdev->le_max_key_size;
6980     rsp->init_key_dist = req->init_key_dist & remote_dist;
6981     rsp->resp_key_dist = req->resp_key_dist & local_dist;
6982     rsp->auth_req = (authreq & AUTH_REQ_MASK(hdev));
6983     @@ -745,7 +742,7 @@ static u8 check_enc_key_size(struct l2cap_conn *conn, __u8 max_key_size)
6984     struct hci_dev *hdev = conn->hcon->hdev;
6985     struct smp_chan *smp = chan->data;
6986    
6987     - if (max_key_size > SMP_DEV(hdev)->max_key_size ||
6988     + if (max_key_size > hdev->le_max_key_size ||
6989     max_key_size < SMP_MIN_ENC_KEY_SIZE)
6990     return SMP_ENC_KEY_SIZE;
6991    
6992     @@ -3264,8 +3261,6 @@ static struct l2cap_chan *smp_add_cid(struct hci_dev *hdev, u16 cid)
6993     smp->tfm_aes = tfm_aes;
6994     smp->tfm_cmac = tfm_cmac;
6995     smp->tfm_ecdh = tfm_ecdh;
6996     - smp->min_key_size = SMP_MIN_ENC_KEY_SIZE;
6997     - smp->max_key_size = SMP_MAX_ENC_KEY_SIZE;
6998    
6999     create_chan:
7000     chan = l2cap_chan_create();
7001     @@ -3391,7 +3386,7 @@ static ssize_t le_min_key_size_read(struct file *file,
7002     struct hci_dev *hdev = file->private_data;
7003     char buf[4];
7004    
7005     - snprintf(buf, sizeof(buf), "%2u\n", SMP_DEV(hdev)->min_key_size);
7006     + snprintf(buf, sizeof(buf), "%2u\n", hdev->le_min_key_size);
7007    
7008     return simple_read_from_buffer(user_buf, count, ppos, buf, strlen(buf));
7009     }
7010     @@ -3412,11 +3407,11 @@ static ssize_t le_min_key_size_write(struct file *file,
7011    
7012     sscanf(buf, "%hhu", &key_size);
7013    
7014     - if (key_size > SMP_DEV(hdev)->max_key_size ||
7015     + if (key_size > hdev->le_max_key_size ||
7016     key_size < SMP_MIN_ENC_KEY_SIZE)
7017     return -EINVAL;
7018    
7019     - SMP_DEV(hdev)->min_key_size = key_size;
7020     + hdev->le_min_key_size = key_size;
7021    
7022     return count;
7023     }
7024     @@ -3435,7 +3430,7 @@ static ssize_t le_max_key_size_read(struct file *file,
7025     struct hci_dev *hdev = file->private_data;
7026     char buf[4];
7027    
7028     - snprintf(buf, sizeof(buf), "%2u\n", SMP_DEV(hdev)->max_key_size);
7029     + snprintf(buf, sizeof(buf), "%2u\n", hdev->le_max_key_size);
7030    
7031     return simple_read_from_buffer(user_buf, count, ppos, buf, strlen(buf));
7032     }
7033     @@ -3457,10 +3452,10 @@ static ssize_t le_max_key_size_write(struct file *file,
7034     sscanf(buf, "%hhu", &key_size);
7035    
7036     if (key_size > SMP_MAX_ENC_KEY_SIZE ||
7037     - key_size < SMP_DEV(hdev)->min_key_size)
7038     + key_size < hdev->le_min_key_size)
7039     return -EINVAL;
7040    
7041     - SMP_DEV(hdev)->max_key_size = key_size;
7042     + hdev->le_max_key_size = key_size;
7043    
7044     return count;
7045     }
7046     diff --git a/net/ipv6/ip6_gre.c b/net/ipv6/ip6_gre.c
7047     index c4a7db62658e..01ecd510014f 100644
7048     --- a/net/ipv6/ip6_gre.c
7049     +++ b/net/ipv6/ip6_gre.c
7050     @@ -1743,6 +1743,9 @@ static int ip6erspan_tap_validate(struct nlattr *tb[], struct nlattr *data[],
7051     static void ip6erspan_set_version(struct nlattr *data[],
7052     struct __ip6_tnl_parm *parms)
7053     {
7054     + if (!data)
7055     + return;
7056     +
7057     parms->erspan_ver = 1;
7058     if (data[IFLA_GRE_ERSPAN_VER])
7059     parms->erspan_ver = nla_get_u8(data[IFLA_GRE_ERSPAN_VER]);
7060     diff --git a/net/ipv6/xfrm6_tunnel.c b/net/ipv6/xfrm6_tunnel.c
7061     index f5b4febeaa25..bc65db782bfb 100644
7062     --- a/net/ipv6/xfrm6_tunnel.c
7063     +++ b/net/ipv6/xfrm6_tunnel.c
7064     @@ -344,8 +344,8 @@ static void __net_exit xfrm6_tunnel_net_exit(struct net *net)
7065     struct xfrm6_tunnel_net *xfrm6_tn = xfrm6_tunnel_pernet(net);
7066     unsigned int i;
7067    
7068     - xfrm_state_flush(net, IPSEC_PROTO_ANY, false);
7069     xfrm_flush_gc();
7070     + xfrm_state_flush(net, IPSEC_PROTO_ANY, false, true);
7071    
7072     for (i = 0; i < XFRM6_TUNNEL_SPI_BYADDR_HSIZE; i++)
7073     WARN_ON_ONCE(!hlist_empty(&xfrm6_tn->spi_byaddr[i]));
7074     diff --git a/net/key/af_key.c b/net/key/af_key.c
7075     index 7da629d59717..7d4bed955060 100644
7076     --- a/net/key/af_key.c
7077     +++ b/net/key/af_key.c
7078     @@ -1773,7 +1773,7 @@ static int pfkey_flush(struct sock *sk, struct sk_buff *skb, const struct sadb_m
7079     if (proto == 0)
7080     return -EINVAL;
7081    
7082     - err = xfrm_state_flush(net, proto, true);
7083     + err = xfrm_state_flush(net, proto, true, false);
7084     err2 = unicast_flush_resp(sk, hdr);
7085     if (err || err2) {
7086     if (err == -ESRCH) /* empty table - go quietly */
7087     diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c
7088     index 7d424fd27025..c06393fc716d 100644
7089     --- a/net/netfilter/nf_tables_api.c
7090     +++ b/net/netfilter/nf_tables_api.c
7091     @@ -7203,9 +7203,6 @@ static void __nft_release_tables(struct net *net)
7092    
7093     list_for_each_entry(chain, &table->chains, list)
7094     nf_tables_unregister_hook(net, table, chain);
7095     - list_for_each_entry(flowtable, &table->flowtables, list)
7096     - nf_unregister_net_hooks(net, flowtable->ops,
7097     - flowtable->ops_len);
7098     /* No packets are walking on these chains anymore. */
7099     ctx.table = table;
7100     list_for_each_entry(chain, &table->chains, list) {
7101     diff --git a/net/netfilter/xt_cgroup.c b/net/netfilter/xt_cgroup.c
7102     index 5d92e1781980..5cb1ecb29ea4 100644
7103     --- a/net/netfilter/xt_cgroup.c
7104     +++ b/net/netfilter/xt_cgroup.c
7105     @@ -68,6 +68,38 @@ static int cgroup_mt_check_v1(const struct xt_mtchk_param *par)
7106     return 0;
7107     }
7108    
7109     +static int cgroup_mt_check_v2(const struct xt_mtchk_param *par)
7110     +{
7111     + struct xt_cgroup_info_v2 *info = par->matchinfo;
7112     + struct cgroup *cgrp;
7113     +
7114     + if ((info->invert_path & ~1) || (info->invert_classid & ~1))
7115     + return -EINVAL;
7116     +
7117     + if (!info->has_path && !info->has_classid) {
7118     + pr_info("xt_cgroup: no path or classid specified\n");
7119     + return -EINVAL;
7120     + }
7121     +
7122     + if (info->has_path && info->has_classid) {
7123     + pr_info_ratelimited("path and classid specified\n");
7124     + return -EINVAL;
7125     + }
7126     +
7127     + info->priv = NULL;
7128     + if (info->has_path) {
7129     + cgrp = cgroup_get_from_path(info->path);
7130     + if (IS_ERR(cgrp)) {
7131     + pr_info_ratelimited("invalid path, errno=%ld\n",
7132     + PTR_ERR(cgrp));
7133     + return -EINVAL;
7134     + }
7135     + info->priv = cgrp;
7136     + }
7137     +
7138     + return 0;
7139     +}
7140     +
7141     static bool
7142     cgroup_mt_v0(const struct sk_buff *skb, struct xt_action_param *par)
7143     {
7144     @@ -99,6 +131,24 @@ static bool cgroup_mt_v1(const struct sk_buff *skb, struct xt_action_param *par)
7145     info->invert_classid;
7146     }
7147    
7148     +static bool cgroup_mt_v2(const struct sk_buff *skb, struct xt_action_param *par)
7149     +{
7150     + const struct xt_cgroup_info_v2 *info = par->matchinfo;
7151     + struct sock_cgroup_data *skcd = &skb->sk->sk_cgrp_data;
7152     + struct cgroup *ancestor = info->priv;
7153     + struct sock *sk = skb->sk;
7154     +
7155     + if (!sk || !sk_fullsock(sk) || !net_eq(xt_net(par), sock_net(sk)))
7156     + return false;
7157     +
7158     + if (ancestor)
7159     + return cgroup_is_descendant(sock_cgroup_ptr(skcd), ancestor) ^
7160     + info->invert_path;
7161     + else
7162     + return (info->classid == sock_cgroup_classid(skcd)) ^
7163     + info->invert_classid;
7164     +}
7165     +
7166     static void cgroup_mt_destroy_v1(const struct xt_mtdtor_param *par)
7167     {
7168     struct xt_cgroup_info_v1 *info = par->matchinfo;
7169     @@ -107,6 +157,14 @@ static void cgroup_mt_destroy_v1(const struct xt_mtdtor_param *par)
7170     cgroup_put(info->priv);
7171     }
7172    
7173     +static void cgroup_mt_destroy_v2(const struct xt_mtdtor_param *par)
7174     +{
7175     + struct xt_cgroup_info_v2 *info = par->matchinfo;
7176     +
7177     + if (info->priv)
7178     + cgroup_put(info->priv);
7179     +}
7180     +
7181     static struct xt_match cgroup_mt_reg[] __read_mostly = {
7182     {
7183     .name = "cgroup",
7184     @@ -134,6 +192,20 @@ static struct xt_match cgroup_mt_reg[] __read_mostly = {
7185     (1 << NF_INET_POST_ROUTING) |
7186     (1 << NF_INET_LOCAL_IN),
7187     },
7188     + {
7189     + .name = "cgroup",
7190     + .revision = 2,
7191     + .family = NFPROTO_UNSPEC,
7192     + .checkentry = cgroup_mt_check_v2,
7193     + .match = cgroup_mt_v2,
7194     + .matchsize = sizeof(struct xt_cgroup_info_v2),
7195     + .usersize = offsetof(struct xt_cgroup_info_v2, priv),
7196     + .destroy = cgroup_mt_destroy_v2,
7197     + .me = THIS_MODULE,
7198     + .hooks = (1 << NF_INET_LOCAL_OUT) |
7199     + (1 << NF_INET_POST_ROUTING) |
7200     + (1 << NF_INET_LOCAL_IN),
7201     + },
7202     };
7203    
7204     static int __init cgroup_mt_init(void)
7205     diff --git a/net/rds/rdma.c b/net/rds/rdma.c
7206     index 98237feb607a..e1965d9cbcf8 100644
7207     --- a/net/rds/rdma.c
7208     +++ b/net/rds/rdma.c
7209     @@ -517,9 +517,10 @@ static int rds_rdma_pages(struct rds_iovec iov[], int nr_iovecs)
7210     return tot_pages;
7211     }
7212    
7213     -int rds_rdma_extra_size(struct rds_rdma_args *args)
7214     +int rds_rdma_extra_size(struct rds_rdma_args *args,
7215     + struct rds_iov_vector *iov)
7216     {
7217     - struct rds_iovec vec;
7218     + struct rds_iovec *vec;
7219     struct rds_iovec __user *local_vec;
7220     int tot_pages = 0;
7221     unsigned int nr_pages;
7222     @@ -530,13 +531,23 @@ int rds_rdma_extra_size(struct rds_rdma_args *args)
7223     if (args->nr_local == 0)
7224     return -EINVAL;
7225    
7226     + iov->iov = kcalloc(args->nr_local,
7227     + sizeof(struct rds_iovec),
7228     + GFP_KERNEL);
7229     + if (!iov->iov)
7230     + return -ENOMEM;
7231     +
7232     + vec = &iov->iov[0];
7233     +
7234     + if (copy_from_user(vec, local_vec, args->nr_local *
7235     + sizeof(struct rds_iovec)))
7236     + return -EFAULT;
7237     + iov->len = args->nr_local;
7238     +
7239     /* figure out the number of pages in the vector */
7240     - for (i = 0; i < args->nr_local; i++) {
7241     - if (copy_from_user(&vec, &local_vec[i],
7242     - sizeof(struct rds_iovec)))
7243     - return -EFAULT;
7244     + for (i = 0; i < args->nr_local; i++, vec++) {
7245    
7246     - nr_pages = rds_pages_in_vec(&vec);
7247     + nr_pages = rds_pages_in_vec(vec);
7248     if (nr_pages == 0)
7249     return -EINVAL;
7250    
7251     @@ -558,15 +569,15 @@ int rds_rdma_extra_size(struct rds_rdma_args *args)
7252     * Extract all arguments and set up the rdma_op
7253     */
7254     int rds_cmsg_rdma_args(struct rds_sock *rs, struct rds_message *rm,
7255     - struct cmsghdr *cmsg)
7256     + struct cmsghdr *cmsg,
7257     + struct rds_iov_vector *vec)
7258     {
7259     struct rds_rdma_args *args;
7260     struct rm_rdma_op *op = &rm->rdma;
7261     int nr_pages;
7262     unsigned int nr_bytes;
7263     struct page **pages = NULL;
7264     - struct rds_iovec iovstack[UIO_FASTIOV], *iovs = iovstack;
7265     - int iov_size;
7266     + struct rds_iovec *iovs;
7267     unsigned int i, j;
7268     int ret = 0;
7269    
7270     @@ -586,31 +597,23 @@ int rds_cmsg_rdma_args(struct rds_sock *rs, struct rds_message *rm,
7271     goto out_ret;
7272     }
7273    
7274     - /* Check whether to allocate the iovec area */
7275     - iov_size = args->nr_local * sizeof(struct rds_iovec);
7276     - if (args->nr_local > UIO_FASTIOV) {
7277     - iovs = sock_kmalloc(rds_rs_to_sk(rs), iov_size, GFP_KERNEL);
7278     - if (!iovs) {
7279     - ret = -ENOMEM;
7280     - goto out_ret;
7281     - }
7282     + if (vec->len != args->nr_local) {
7283     + ret = -EINVAL;
7284     + goto out_ret;
7285     }
7286    
7287     - if (copy_from_user(iovs, (struct rds_iovec __user *)(unsigned long) args->local_vec_addr, iov_size)) {
7288     - ret = -EFAULT;
7289     - goto out;
7290     - }
7291     + iovs = vec->iov;
7292    
7293     nr_pages = rds_rdma_pages(iovs, args->nr_local);
7294     if (nr_pages < 0) {
7295     ret = -EINVAL;
7296     - goto out;
7297     + goto out_ret;
7298     }
7299    
7300     pages = kcalloc(nr_pages, sizeof(struct page *), GFP_KERNEL);
7301     if (!pages) {
7302     ret = -ENOMEM;
7303     - goto out;
7304     + goto out_ret;
7305     }
7306    
7307     op->op_write = !!(args->flags & RDS_RDMA_READWRITE);
7308     @@ -623,7 +626,7 @@ int rds_cmsg_rdma_args(struct rds_sock *rs, struct rds_message *rm,
7309     op->op_sg = rds_message_alloc_sgs(rm, nr_pages);
7310     if (!op->op_sg) {
7311     ret = -ENOMEM;
7312     - goto out;
7313     + goto out_pages;
7314     }
7315    
7316     if (op->op_notify || op->op_recverr) {
7317     @@ -635,7 +638,7 @@ int rds_cmsg_rdma_args(struct rds_sock *rs, struct rds_message *rm,
7318     op->op_notifier = kmalloc(sizeof(struct rds_notifier), GFP_KERNEL);
7319     if (!op->op_notifier) {
7320     ret = -ENOMEM;
7321     - goto out;
7322     + goto out_pages;
7323     }
7324     op->op_notifier->n_user_token = args->user_token;
7325     op->op_notifier->n_status = RDS_RDMA_SUCCESS;
7326     @@ -681,7 +684,7 @@ int rds_cmsg_rdma_args(struct rds_sock *rs, struct rds_message *rm,
7327     */
7328     ret = rds_pin_pages(iov->addr, nr, pages, !op->op_write);
7329     if (ret < 0)
7330     - goto out;
7331     + goto out_pages;
7332     else
7333     ret = 0;
7334    
7335     @@ -714,13 +717,11 @@ int rds_cmsg_rdma_args(struct rds_sock *rs, struct rds_message *rm,
7336     nr_bytes,
7337     (unsigned int) args->remote_vec.bytes);
7338     ret = -EINVAL;
7339     - goto out;
7340     + goto out_pages;
7341     }
7342     op->op_bytes = nr_bytes;
7343    
7344     -out:
7345     - if (iovs != iovstack)
7346     - sock_kfree_s(rds_rs_to_sk(rs), iovs, iov_size);
7347     +out_pages:
7348     kfree(pages);
7349     out_ret:
7350     if (ret)
7351     diff --git a/net/rds/rds.h b/net/rds/rds.h
7352     index c4dcf654d8fe..4234ab81b5af 100644
7353     --- a/net/rds/rds.h
7354     +++ b/net/rds/rds.h
7355     @@ -386,6 +386,18 @@ static inline void rds_message_zcopy_queue_init(struct rds_msg_zcopy_queue *q)
7356     INIT_LIST_HEAD(&q->zcookie_head);
7357     }
7358    
7359     +struct rds_iov_vector {
7360     + struct rds_iovec *iov;
7361     + int len;
7362     +};
7363     +
7364     +struct rds_iov_vector_arr {
7365     + struct rds_iov_vector *vec;
7366     + int len;
7367     + int indx;
7368     + int incr;
7369     +};
7370     +
7371     struct rds_message {
7372     refcount_t m_refcount;
7373     struct list_head m_sock_item;
7374     @@ -904,13 +916,13 @@ int rds_get_mr(struct rds_sock *rs, char __user *optval, int optlen);
7375     int rds_get_mr_for_dest(struct rds_sock *rs, char __user *optval, int optlen);
7376     int rds_free_mr(struct rds_sock *rs, char __user *optval, int optlen);
7377     void rds_rdma_drop_keys(struct rds_sock *rs);
7378     -int rds_rdma_extra_size(struct rds_rdma_args *args);
7379     -int rds_cmsg_rdma_args(struct rds_sock *rs, struct rds_message *rm,
7380     - struct cmsghdr *cmsg);
7381     +int rds_rdma_extra_size(struct rds_rdma_args *args,
7382     + struct rds_iov_vector *iov);
7383     int rds_cmsg_rdma_dest(struct rds_sock *rs, struct rds_message *rm,
7384     struct cmsghdr *cmsg);
7385     int rds_cmsg_rdma_args(struct rds_sock *rs, struct rds_message *rm,
7386     - struct cmsghdr *cmsg);
7387     + struct cmsghdr *cmsg,
7388     + struct rds_iov_vector *vec);
7389     int rds_cmsg_rdma_map(struct rds_sock *rs, struct rds_message *rm,
7390     struct cmsghdr *cmsg);
7391     void rds_rdma_free_op(struct rm_rdma_op *ro);
7392     diff --git a/net/rds/send.c b/net/rds/send.c
7393     index fe785ee819dd..ec2267cbf85f 100644
7394     --- a/net/rds/send.c
7395     +++ b/net/rds/send.c
7396     @@ -876,13 +876,15 @@ out:
7397     * rds_message is getting to be quite complicated, and we'd like to allocate
7398     * it all in one go. This figures out how big it needs to be up front.
7399     */
7400     -static int rds_rm_size(struct msghdr *msg, int num_sgs)
7401     +static int rds_rm_size(struct msghdr *msg, int num_sgs,
7402     + struct rds_iov_vector_arr *vct)
7403     {
7404     struct cmsghdr *cmsg;
7405     int size = 0;
7406     int cmsg_groups = 0;
7407     int retval;
7408     bool zcopy_cookie = false;
7409     + struct rds_iov_vector *iov, *tmp_iov;
7410    
7411     for_each_cmsghdr(cmsg, msg) {
7412     if (!CMSG_OK(msg, cmsg))
7413     @@ -893,8 +895,24 @@ static int rds_rm_size(struct msghdr *msg, int num_sgs)
7414    
7415     switch (cmsg->cmsg_type) {
7416     case RDS_CMSG_RDMA_ARGS:
7417     + if (vct->indx >= vct->len) {
7418     + vct->len += vct->incr;
7419     + tmp_iov =
7420     + krealloc(vct->vec,
7421     + vct->len *
7422     + sizeof(struct rds_iov_vector),
7423     + GFP_KERNEL);
7424     + if (!tmp_iov) {
7425     + vct->len -= vct->incr;
7426     + return -ENOMEM;
7427     + }
7428     + vct->vec = tmp_iov;
7429     + }
7430     + iov = &vct->vec[vct->indx];
7431     + memset(iov, 0, sizeof(struct rds_iov_vector));
7432     + vct->indx++;
7433     cmsg_groups |= 1;
7434     - retval = rds_rdma_extra_size(CMSG_DATA(cmsg));
7435     + retval = rds_rdma_extra_size(CMSG_DATA(cmsg), iov);
7436     if (retval < 0)
7437     return retval;
7438     size += retval;
7439     @@ -951,10 +969,11 @@ static int rds_cmsg_zcopy(struct rds_sock *rs, struct rds_message *rm,
7440     }
7441    
7442     static int rds_cmsg_send(struct rds_sock *rs, struct rds_message *rm,
7443     - struct msghdr *msg, int *allocated_mr)
7444     + struct msghdr *msg, int *allocated_mr,
7445     + struct rds_iov_vector_arr *vct)
7446     {
7447     struct cmsghdr *cmsg;
7448     - int ret = 0;
7449     + int ret = 0, ind = 0;
7450    
7451     for_each_cmsghdr(cmsg, msg) {
7452     if (!CMSG_OK(msg, cmsg))
7453     @@ -968,7 +987,10 @@ static int rds_cmsg_send(struct rds_sock *rs, struct rds_message *rm,
7454     */
7455     switch (cmsg->cmsg_type) {
7456     case RDS_CMSG_RDMA_ARGS:
7457     - ret = rds_cmsg_rdma_args(rs, rm, cmsg);
7458     + if (ind >= vct->indx)
7459     + return -ENOMEM;
7460     + ret = rds_cmsg_rdma_args(rs, rm, cmsg, &vct->vec[ind]);
7461     + ind++;
7462     break;
7463    
7464     case RDS_CMSG_RDMA_DEST:
7465     @@ -1084,6 +1106,11 @@ int rds_sendmsg(struct socket *sock, struct msghdr *msg, size_t payload_len)
7466     sock_flag(rds_rs_to_sk(rs), SOCK_ZEROCOPY));
7467     int num_sgs = ceil(payload_len, PAGE_SIZE);
7468     int namelen;
7469     + struct rds_iov_vector_arr vct = {0};
7470     + int ind;
7471     +
7472     + /* expect 1 RDMA CMSG per rds_sendmsg. can still grow if more needed. */
7473     + vct.incr = 1;
7474    
7475     /* Mirror Linux UDP mirror of BSD error message compatibility */
7476     /* XXX: Perhaps MSG_MORE someday */
7477     @@ -1220,7 +1247,7 @@ int rds_sendmsg(struct socket *sock, struct msghdr *msg, size_t payload_len)
7478     num_sgs = iov_iter_npages(&msg->msg_iter, INT_MAX);
7479     }
7480     /* size of rm including all sgs */
7481     - ret = rds_rm_size(msg, num_sgs);
7482     + ret = rds_rm_size(msg, num_sgs, &vct);
7483     if (ret < 0)
7484     goto out;
7485    
7486     @@ -1270,7 +1297,7 @@ int rds_sendmsg(struct socket *sock, struct msghdr *msg, size_t payload_len)
7487     rm->m_conn_path = cpath;
7488    
7489     /* Parse any control messages the user may have included. */
7490     - ret = rds_cmsg_send(rs, rm, msg, &allocated_mr);
7491     + ret = rds_cmsg_send(rs, rm, msg, &allocated_mr, &vct);
7492     if (ret) {
7493     /* Trigger connection so that its ready for the next retry */
7494     if (ret == -EAGAIN)
7495     @@ -1348,9 +1375,18 @@ int rds_sendmsg(struct socket *sock, struct msghdr *msg, size_t payload_len)
7496     if (ret)
7497     goto out;
7498     rds_message_put(rm);
7499     +
7500     + for (ind = 0; ind < vct.indx; ind++)
7501     + kfree(vct.vec[ind].iov);
7502     + kfree(vct.vec);
7503     +
7504     return payload_len;
7505    
7506     out:
7507     + for (ind = 0; ind < vct.indx; ind++)
7508     + kfree(vct.vec[ind].iov);
7509     + kfree(vct.vec);
7510     +
7511     /* If the user included a RDMA_MAP cmsg, we allocated a MR on the fly.
7512     * If the sendmsg goes through, we keep the MR. If it fails with EAGAIN
7513     * or in any other way, we need to destroy the MR again */
7514     diff --git a/net/rxrpc/conn_client.c b/net/rxrpc/conn_client.c
7515     index 6e419b15a9f8..c979a56faaef 100644
7516     --- a/net/rxrpc/conn_client.c
7517     +++ b/net/rxrpc/conn_client.c
7518     @@ -707,6 +707,7 @@ int rxrpc_connect_call(struct rxrpc_sock *rx,
7519    
7520     ret = rxrpc_wait_for_channel(call, gfp);
7521     if (ret < 0) {
7522     + trace_rxrpc_client(call->conn, ret, rxrpc_client_chan_wait_failed);
7523     rxrpc_disconnect_client_call(call);
7524     goto out;
7525     }
7526     @@ -777,16 +778,22 @@ static void rxrpc_set_client_reap_timer(struct rxrpc_net *rxnet)
7527     */
7528     void rxrpc_disconnect_client_call(struct rxrpc_call *call)
7529     {
7530     - unsigned int channel = call->cid & RXRPC_CHANNELMASK;
7531     struct rxrpc_connection *conn = call->conn;
7532     - struct rxrpc_channel *chan = &conn->channels[channel];
7533     + struct rxrpc_channel *chan = NULL;
7534     struct rxrpc_net *rxnet = conn->params.local->rxnet;
7535     + unsigned int channel = -1;
7536     + u32 cid;
7537    
7538     + spin_lock(&conn->channel_lock);
7539     +
7540     + cid = call->cid;
7541     + if (cid) {
7542     + channel = cid & RXRPC_CHANNELMASK;
7543     + chan = &conn->channels[channel];
7544     + }
7545     trace_rxrpc_client(conn, channel, rxrpc_client_chan_disconnect);
7546     call->conn = NULL;
7547    
7548     - spin_lock(&conn->channel_lock);
7549     -
7550     /* Calls that have never actually been assigned a channel can simply be
7551     * discarded. If the conn didn't get used either, it will follow
7552     * immediately unless someone else grabs it in the meantime.
7553     @@ -810,7 +817,10 @@ void rxrpc_disconnect_client_call(struct rxrpc_call *call)
7554     goto out;
7555     }
7556    
7557     - ASSERTCMP(rcu_access_pointer(chan->call), ==, call);
7558     + if (rcu_access_pointer(chan->call) != call) {
7559     + spin_unlock(&conn->channel_lock);
7560     + BUG();
7561     + }
7562    
7563     /* If a client call was exposed to the world, we save the result for
7564     * retransmission.
7565     diff --git a/net/xfrm/xfrm_state.c b/net/xfrm/xfrm_state.c
7566     index cc0203efb584..3f729cd512af 100644
7567     --- a/net/xfrm/xfrm_state.c
7568     +++ b/net/xfrm/xfrm_state.c
7569     @@ -432,7 +432,7 @@ void xfrm_state_free(struct xfrm_state *x)
7570     }
7571     EXPORT_SYMBOL(xfrm_state_free);
7572    
7573     -static void xfrm_state_gc_destroy(struct xfrm_state *x)
7574     +static void ___xfrm_state_destroy(struct xfrm_state *x)
7575     {
7576     tasklet_hrtimer_cancel(&x->mtimer);
7577     del_timer_sync(&x->rtimer);
7578     @@ -474,7 +474,7 @@ static void xfrm_state_gc_task(struct work_struct *work)
7579     synchronize_rcu();
7580    
7581     hlist_for_each_entry_safe(x, tmp, &gc_list, gclist)
7582     - xfrm_state_gc_destroy(x);
7583     + ___xfrm_state_destroy(x);
7584     }
7585    
7586     static enum hrtimer_restart xfrm_timer_handler(struct hrtimer *me)
7587     @@ -598,14 +598,19 @@ struct xfrm_state *xfrm_state_alloc(struct net *net)
7588     }
7589     EXPORT_SYMBOL(xfrm_state_alloc);
7590    
7591     -void __xfrm_state_destroy(struct xfrm_state *x)
7592     +void __xfrm_state_destroy(struct xfrm_state *x, bool sync)
7593     {
7594     WARN_ON(x->km.state != XFRM_STATE_DEAD);
7595    
7596     - spin_lock_bh(&xfrm_state_gc_lock);
7597     - hlist_add_head(&x->gclist, &xfrm_state_gc_list);
7598     - spin_unlock_bh(&xfrm_state_gc_lock);
7599     - schedule_work(&xfrm_state_gc_work);
7600     + if (sync) {
7601     + synchronize_rcu();
7602     + ___xfrm_state_destroy(x);
7603     + } else {
7604     + spin_lock_bh(&xfrm_state_gc_lock);
7605     + hlist_add_head(&x->gclist, &xfrm_state_gc_list);
7606     + spin_unlock_bh(&xfrm_state_gc_lock);
7607     + schedule_work(&xfrm_state_gc_work);
7608     + }
7609     }
7610     EXPORT_SYMBOL(__xfrm_state_destroy);
7611    
7612     @@ -708,7 +713,7 @@ xfrm_dev_state_flush_secctx_check(struct net *net, struct net_device *dev, bool
7613     }
7614     #endif
7615    
7616     -int xfrm_state_flush(struct net *net, u8 proto, bool task_valid)
7617     +int xfrm_state_flush(struct net *net, u8 proto, bool task_valid, bool sync)
7618     {
7619     int i, err = 0, cnt = 0;
7620    
7621     @@ -730,7 +735,10 @@ restart:
7622     err = xfrm_state_delete(x);
7623     xfrm_audit_state_delete(x, err ? 0 : 1,
7624     task_valid);
7625     - xfrm_state_put(x);
7626     + if (sync)
7627     + xfrm_state_put_sync(x);
7628     + else
7629     + xfrm_state_put(x);
7630     if (!err)
7631     cnt++;
7632    
7633     @@ -2217,7 +2225,7 @@ void xfrm_state_delete_tunnel(struct xfrm_state *x)
7634     if (atomic_read(&t->tunnel_users) == 2)
7635     xfrm_state_delete(t);
7636     atomic_dec(&t->tunnel_users);
7637     - xfrm_state_put(t);
7638     + xfrm_state_put_sync(t);
7639     x->tunnel = NULL;
7640     }
7641     }
7642     @@ -2377,8 +2385,8 @@ void xfrm_state_fini(struct net *net)
7643     unsigned int sz;
7644    
7645     flush_work(&net->xfrm.state_hash_work);
7646     - xfrm_state_flush(net, IPSEC_PROTO_ANY, false);
7647     flush_work(&xfrm_state_gc_work);
7648     + xfrm_state_flush(net, IPSEC_PROTO_ANY, false, true);
7649    
7650     WARN_ON(!list_empty(&net->xfrm.state_all));
7651    
7652     diff --git a/net/xfrm/xfrm_user.c b/net/xfrm/xfrm_user.c
7653     index ab557827aac0..7e4904b93004 100644
7654     --- a/net/xfrm/xfrm_user.c
7655     +++ b/net/xfrm/xfrm_user.c
7656     @@ -1932,7 +1932,7 @@ static int xfrm_flush_sa(struct sk_buff *skb, struct nlmsghdr *nlh,
7657     struct xfrm_usersa_flush *p = nlmsg_data(nlh);
7658     int err;
7659    
7660     - err = xfrm_state_flush(net, p->proto, true);
7661     + err = xfrm_state_flush(net, p->proto, true, false);
7662     if (err) {
7663     if (err == -ESRCH) /* empty table */
7664     return 0;
7665     diff --git a/sound/drivers/opl3/opl3_voice.h b/sound/drivers/opl3/opl3_voice.h
7666     index 5b02bd49fde4..4e4ecc21760b 100644
7667     --- a/sound/drivers/opl3/opl3_voice.h
7668     +++ b/sound/drivers/opl3/opl3_voice.h
7669     @@ -41,7 +41,7 @@ void snd_opl3_timer_func(struct timer_list *t);
7670    
7671     /* Prototypes for opl3_drums.c */
7672     void snd_opl3_load_drums(struct snd_opl3 *opl3);
7673     -void snd_opl3_drum_switch(struct snd_opl3 *opl3, int note, int on_off, int vel, struct snd_midi_channel *chan);
7674     +void snd_opl3_drum_switch(struct snd_opl3 *opl3, int note, int vel, int on_off, struct snd_midi_channel *chan);
7675    
7676     /* Prototypes for opl3_oss.c */
7677     #if IS_ENABLED(CONFIG_SND_SEQUENCER_OSS)
7678     diff --git a/sound/isa/sb/sb8.c b/sound/isa/sb/sb8.c
7679     index d77dcba276b5..1eb8b61a185b 100644
7680     --- a/sound/isa/sb/sb8.c
7681     +++ b/sound/isa/sb/sb8.c
7682     @@ -111,6 +111,10 @@ static int snd_sb8_probe(struct device *pdev, unsigned int dev)
7683    
7684     /* block the 0x388 port to avoid PnP conflicts */
7685     acard->fm_res = request_region(0x388, 4, "SoundBlaster FM");
7686     + if (!acard->fm_res) {
7687     + err = -EBUSY;
7688     + goto _err;
7689     + }
7690    
7691     if (port[dev] != SNDRV_AUTO_PORT) {
7692     if ((err = snd_sbdsp_create(card, port[dev], irq[dev],
7693     diff --git a/sound/pci/echoaudio/echoaudio.c b/sound/pci/echoaudio/echoaudio.c
7694     index 907cf1a46712..3ef2b27ebbe8 100644
7695     --- a/sound/pci/echoaudio/echoaudio.c
7696     +++ b/sound/pci/echoaudio/echoaudio.c
7697     @@ -1954,6 +1954,11 @@ static int snd_echo_create(struct snd_card *card,
7698     }
7699     chip->dsp_registers = (volatile u32 __iomem *)
7700     ioremap_nocache(chip->dsp_registers_phys, sz);
7701     + if (!chip->dsp_registers) {
7702     + dev_err(chip->card->dev, "ioremap failed\n");
7703     + snd_echo_free(chip);
7704     + return -ENOMEM;
7705     + }
7706    
7707     if (request_irq(pci->irq, snd_echo_interrupt, IRQF_SHARED,
7708     KBUILD_MODNAME, chip)) {
7709     diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
7710     index b9d832bde23e..bd60eb7168fa 100644
7711     --- a/sound/pci/hda/patch_realtek.c
7712     +++ b/sound/pci/hda/patch_realtek.c
7713     @@ -5604,6 +5604,7 @@ enum {
7714     ALC298_FIXUP_TPT470_DOCK,
7715     ALC255_FIXUP_DUMMY_LINEOUT_VERB,
7716     ALC255_FIXUP_DELL_HEADSET_MIC,
7717     + ALC256_FIXUP_HUAWEI_MBXP_PINS,
7718     ALC295_FIXUP_HP_X360,
7719     ALC221_FIXUP_HP_HEADSET_MIC,
7720     ALC285_FIXUP_LENOVO_HEADPHONE_NOISE,
7721     @@ -5892,6 +5893,22 @@ static const struct hda_fixup alc269_fixups[] = {
7722     .chained = true,
7723     .chain_id = ALC269_FIXUP_HEADSET_MIC
7724     },
7725     + [ALC256_FIXUP_HUAWEI_MBXP_PINS] = {
7726     + .type = HDA_FIXUP_PINS,
7727     + .v.pins = (const struct hda_pintbl[]) {
7728     + {0x12, 0x90a60130},
7729     + {0x13, 0x40000000},
7730     + {0x14, 0x90170110},
7731     + {0x18, 0x411111f0},
7732     + {0x19, 0x04a11040},
7733     + {0x1a, 0x411111f0},
7734     + {0x1b, 0x90170112},
7735     + {0x1d, 0x40759a05},
7736     + {0x1e, 0x411111f0},
7737     + {0x21, 0x04211020},
7738     + { }
7739     + },
7740     + },
7741     [ALC269_FIXUP_ASUS_X101_FUNC] = {
7742     .type = HDA_FIXUP_FUNC,
7743     .v.func = alc269_fixup_x101_headset_mic,
7744     @@ -6885,6 +6902,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
7745     SND_PCI_QUIRK(0x17aa, 0x511f, "Thinkpad", ALC298_FIXUP_TPT470_DOCK),
7746     SND_PCI_QUIRK(0x17aa, 0x3bf8, "Quanta FL1", ALC269_FIXUP_PCM_44K),
7747     SND_PCI_QUIRK(0x17aa, 0x9e54, "LENOVO NB", ALC269_FIXUP_LENOVO_EAPD),
7748     + SND_PCI_QUIRK(0x19e5, 0x3204, "Huawei MBXP", ALC256_FIXUP_HUAWEI_MBXP_PINS),
7749     SND_PCI_QUIRK(0x1b7d, 0xa831, "Ordissimo EVE2 ", ALC269VB_FIXUP_ORDISSIMO_EVE2), /* Also known as Malata PC-B1303 */
7750    
7751     #if 0
7752     diff --git a/sound/soc/soc-ops.c b/sound/soc/soc-ops.c
7753     index 592efb370c44..f4dc3d445aae 100644
7754     --- a/sound/soc/soc-ops.c
7755     +++ b/sound/soc/soc-ops.c
7756     @@ -373,7 +373,7 @@ int snd_soc_get_volsw_sx(struct snd_kcontrol *kcontrol,
7757     unsigned int rshift = mc->rshift;
7758     int max = mc->max;
7759     int min = mc->min;
7760     - unsigned int mask = (1 << (fls(min + max) - 1)) - 1;
7761     + unsigned int mask = (1U << (fls(min + max) - 1)) - 1;
7762     unsigned int val;
7763     int ret;
7764    
7765     @@ -418,7 +418,7 @@ int snd_soc_put_volsw_sx(struct snd_kcontrol *kcontrol,
7766     unsigned int rshift = mc->rshift;
7767     int max = mc->max;
7768     int min = mc->min;
7769     - unsigned int mask = (1 << (fls(min + max) - 1)) - 1;
7770     + unsigned int mask = (1U << (fls(min + max) - 1)) - 1;
7771     int err = 0;
7772     unsigned int val, val_mask, val2 = 0;
7773    
7774     diff --git a/tools/perf/Documentation/perf-config.txt b/tools/perf/Documentation/perf-config.txt
7775     index 32f4a898e3f2..05c10eb56a0c 100644
7776     --- a/tools/perf/Documentation/perf-config.txt
7777     +++ b/tools/perf/Documentation/perf-config.txt
7778     @@ -114,7 +114,7 @@ Given a $HOME/.perfconfig like this:
7779    
7780     [report]
7781     # Defaults
7782     - sort-order = comm,dso,symbol
7783     + sort_order = comm,dso,symbol
7784     percent-limit = 0
7785     queue-size = 0
7786     children = true
7787     diff --git a/tools/perf/builtin-top.c b/tools/perf/builtin-top.c
7788     index d21d8751e749..33eefc33e0ea 100644
7789     --- a/tools/perf/builtin-top.c
7790     +++ b/tools/perf/builtin-top.c
7791     @@ -1491,8 +1491,9 @@ int cmd_top(int argc, const char **argv)
7792     annotation_config__init();
7793    
7794     symbol_conf.try_vmlinux_path = (symbol_conf.vmlinux_name == NULL);
7795     - if (symbol__init(NULL) < 0)
7796     - return -1;
7797     + status = symbol__init(NULL);
7798     + if (status < 0)
7799     + goto out_delete_evlist;
7800    
7801     sort__setup_elide(stdout);
7802    
7803     diff --git a/tools/perf/tests/evsel-tp-sched.c b/tools/perf/tests/evsel-tp-sched.c
7804     index d0406116c905..926a8e1b5e94 100644
7805     --- a/tools/perf/tests/evsel-tp-sched.c
7806     +++ b/tools/perf/tests/evsel-tp-sched.c
7807     @@ -85,5 +85,6 @@ int test__perf_evsel__tp_sched_test(struct test *test __maybe_unused, int subtes
7808     if (perf_evsel__test_field(evsel, "target_cpu", 4, true))
7809     ret = -1;
7810    
7811     + perf_evsel__delete(evsel);
7812     return ret;
7813     }
7814     diff --git a/tools/perf/tests/expr.c b/tools/perf/tests/expr.c
7815     index 01f0706995a9..9acc1e80b936 100644
7816     --- a/tools/perf/tests/expr.c
7817     +++ b/tools/perf/tests/expr.c
7818     @@ -19,7 +19,7 @@ int test__expr(struct test *t __maybe_unused, int subtest __maybe_unused)
7819     const char *p;
7820     const char **other;
7821     double val;
7822     - int ret;
7823     + int i, ret;
7824     struct parse_ctx ctx;
7825     int num_other;
7826    
7827     @@ -56,6 +56,9 @@ int test__expr(struct test *t __maybe_unused, int subtest __maybe_unused)
7828     TEST_ASSERT_VAL("find other", !strcmp(other[1], "BAZ"));
7829     TEST_ASSERT_VAL("find other", !strcmp(other[2], "BOZO"));
7830     TEST_ASSERT_VAL("find other", other[3] == NULL);
7831     +
7832     + for (i = 0; i < num_other; i++)
7833     + free((void *)other[i]);
7834     free((void *)other);
7835    
7836     return 0;
7837     diff --git a/tools/perf/tests/openat-syscall-all-cpus.c b/tools/perf/tests/openat-syscall-all-cpus.c
7838     index c531e6deb104..493ecb611540 100644
7839     --- a/tools/perf/tests/openat-syscall-all-cpus.c
7840     +++ b/tools/perf/tests/openat-syscall-all-cpus.c
7841     @@ -45,7 +45,7 @@ int test__openat_syscall_event_on_all_cpus(struct test *test __maybe_unused, int
7842     if (IS_ERR(evsel)) {
7843     tracing_path__strerror_open_tp(errno, errbuf, sizeof(errbuf), "syscalls", "sys_enter_openat");
7844     pr_debug("%s\n", errbuf);
7845     - goto out_thread_map_delete;
7846     + goto out_cpu_map_delete;
7847     }
7848    
7849     if (perf_evsel__open(evsel, cpus, threads) < 0) {
7850     @@ -119,6 +119,8 @@ out_close_fd:
7851     perf_evsel__close_fd(evsel);
7852     out_evsel_delete:
7853     perf_evsel__delete(evsel);
7854     +out_cpu_map_delete:
7855     + cpu_map__put(cpus);
7856     out_thread_map_delete:
7857     thread_map__put(threads);
7858     return err;
7859     diff --git a/tools/perf/util/build-id.c b/tools/perf/util/build-id.c
7860     index 04b1d53e4bf9..1d352621bd48 100644
7861     --- a/tools/perf/util/build-id.c
7862     +++ b/tools/perf/util/build-id.c
7863     @@ -183,6 +183,7 @@ char *build_id_cache__linkname(const char *sbuild_id, char *bf, size_t size)
7864     return bf;
7865     }
7866    
7867     +/* The caller is responsible to free the returned buffer. */
7868     char *build_id_cache__origname(const char *sbuild_id)
7869     {
7870     char *linkname;
7871     diff --git a/tools/perf/util/config.c b/tools/perf/util/config.c
7872     index 5ac157056cdf..9bb742accfa5 100644
7873     --- a/tools/perf/util/config.c
7874     +++ b/tools/perf/util/config.c
7875     @@ -628,11 +628,10 @@ static int collect_config(const char *var, const char *value,
7876     }
7877    
7878     ret = set_value(item, value);
7879     - return ret;
7880    
7881     out_free:
7882     free(key);
7883     - return -1;
7884     + return ret;
7885     }
7886    
7887     int perf_config_set__collect(struct perf_config_set *set, const char *file_name,
7888     diff --git a/tools/perf/util/evsel.c b/tools/perf/util/evsel.c
7889     index e7dbdcc8d465..b65ad5a273eb 100644
7890     --- a/tools/perf/util/evsel.c
7891     +++ b/tools/perf/util/evsel.c
7892     @@ -1274,6 +1274,7 @@ void perf_evsel__exit(struct perf_evsel *evsel)
7893     {
7894     assert(list_empty(&evsel->node));
7895     assert(evsel->evlist == NULL);
7896     + perf_evsel__free_counts(evsel);
7897     perf_evsel__free_fd(evsel);
7898     perf_evsel__free_id(evsel);
7899     perf_evsel__free_config_terms(evsel);
7900     diff --git a/tools/perf/util/hist.c b/tools/perf/util/hist.c
7901     index 828cb9794c76..e1e94b44d588 100644
7902     --- a/tools/perf/util/hist.c
7903     +++ b/tools/perf/util/hist.c
7904     @@ -1048,8 +1048,10 @@ int hist_entry_iter__add(struct hist_entry_iter *iter, struct addr_location *al,
7905    
7906     err = sample__resolve_callchain(iter->sample, &callchain_cursor, &iter->parent,
7907     iter->evsel, al, max_stack_depth);
7908     - if (err)
7909     + if (err) {
7910     + map__put(alm);
7911     return err;
7912     + }
7913    
7914     err = iter->ops->prepare_entry(iter, al);
7915     if (err)
7916     diff --git a/tools/perf/util/parse-events.c b/tools/perf/util/parse-events.c
7917     index ebb18a9bc460..1a7c76d2baa8 100644
7918     --- a/tools/perf/util/parse-events.c
7919     +++ b/tools/perf/util/parse-events.c
7920     @@ -2263,6 +2263,7 @@ static bool is_event_supported(u8 type, unsigned config)
7921     perf_evsel__delete(evsel);
7922     }
7923    
7924     + thread_map__put(tmap);
7925     return ret;
7926     }
7927    
7928     @@ -2333,6 +2334,7 @@ void print_sdt_events(const char *subsys_glob, const char *event_glob,
7929     printf(" %-50s [%s]\n", buf, "SDT event");
7930     free(buf);
7931     }
7932     + free(path);
7933     } else
7934     printf(" %-50s [%s]\n", nd->s, "SDT event");
7935     if (nd2) {
7936     diff --git a/tools/power/x86/turbostat/turbostat.c b/tools/power/x86/turbostat/turbostat.c
7937     index 980bd9d20646..83964f796edb 100644
7938     --- a/tools/power/x86/turbostat/turbostat.c
7939     +++ b/tools/power/x86/turbostat/turbostat.c
7940     @@ -5054,6 +5054,9 @@ int fork_it(char **argv)
7941     signal(SIGQUIT, SIG_IGN);
7942     if (waitpid(child_pid, &status, 0) == -1)
7943     err(status, "waitpid");
7944     +
7945     + if (WIFEXITED(status))
7946     + status = WEXITSTATUS(status);
7947     }
7948     /*
7949     * n.b. fork_it() does not check for errors from for_all_cpus()
7950     diff --git a/tools/usb/usbip/libsrc/vhci_driver.c b/tools/usb/usbip/libsrc/vhci_driver.c
7951     index 4204359c9fee..8159fd98680b 100644
7952     --- a/tools/usb/usbip/libsrc/vhci_driver.c
7953     +++ b/tools/usb/usbip/libsrc/vhci_driver.c
7954     @@ -150,7 +150,7 @@ static int get_nports(struct udev_device *hc_device)
7955    
7956     static int vhci_hcd_filter(const struct dirent *dirent)
7957     {
7958     - return strcmp(dirent->d_name, "vhci_hcd") >= 0;
7959     + return !strncmp(dirent->d_name, "vhci_hcd.", 9);
7960     }
7961    
7962     static int get_ncontrollers(void)