Magellan Linux

Annotation of /trunk/kernel-alx/patches-4.9/0157-4.9.58-all-fixes.patch

Parent Directory Parent Directory | Revision Log Revision Log


Revision 3042 - (hide annotations) (download)
Wed Dec 20 11:49:07 2017 UTC (6 years, 5 months ago) by niro
File size: 51106 byte(s)
-linux-4.9.58
1 niro 3042 diff --git a/Makefile b/Makefile
2     index d5a2ab9b3291..32686667bb7e 100644
3     --- a/Makefile
4     +++ b/Makefile
5     @@ -1,6 +1,6 @@
6     VERSION = 4
7     PATCHLEVEL = 9
8     -SUBLEVEL = 57
9     +SUBLEVEL = 58
10     EXTRAVERSION =
11     NAME = Roaring Lionus
12    
13     diff --git a/arch/mips/include/asm/irq.h b/arch/mips/include/asm/irq.h
14     index ddd1c918103b..c5d351786416 100644
15     --- a/arch/mips/include/asm/irq.h
16     +++ b/arch/mips/include/asm/irq.h
17     @@ -18,7 +18,7 @@
18     #include <irq.h>
19    
20     #define IRQ_STACK_SIZE THREAD_SIZE
21     -#define IRQ_STACK_START (IRQ_STACK_SIZE - sizeof(unsigned long))
22     +#define IRQ_STACK_START (IRQ_STACK_SIZE - 16)
23    
24     extern void *irq_stack[NR_CPUS];
25    
26     diff --git a/arch/powerpc/perf/isa207-common.h b/arch/powerpc/perf/isa207-common.h
27     index 4d0a4e5017c2..8e6dd17fe603 100644
28     --- a/arch/powerpc/perf/isa207-common.h
29     +++ b/arch/powerpc/perf/isa207-common.h
30     @@ -201,6 +201,10 @@
31     CNST_PMC_VAL(1) | CNST_PMC_VAL(2) | CNST_PMC_VAL(3) | \
32     CNST_PMC_VAL(4) | CNST_PMC_VAL(5) | CNST_PMC_VAL(6) | CNST_NC_VAL
33    
34     +/*
35     + * Lets restrict use of PMC5 for instruction counting.
36     + */
37     +#define P9_DD1_TEST_ADDER (ISA207_TEST_ADDER | CNST_PMC_VAL(5))
38    
39     /* Bits in MMCR1 for PowerISA v2.07 */
40     #define MMCR1_UNIT_SHIFT(pmc) (60 - (4 * ((pmc) - 1)))
41     diff --git a/arch/powerpc/perf/power9-pmu.c b/arch/powerpc/perf/power9-pmu.c
42     index 8e9a81967ff8..9abcd8f65504 100644
43     --- a/arch/powerpc/perf/power9-pmu.c
44     +++ b/arch/powerpc/perf/power9-pmu.c
45     @@ -295,7 +295,7 @@ static struct power_pmu power9_pmu = {
46     .name = "POWER9",
47     .n_counter = MAX_PMU_COUNTERS,
48     .add_fields = ISA207_ADD_FIELDS,
49     - .test_adder = ISA207_TEST_ADDER,
50     + .test_adder = P9_DD1_TEST_ADDER,
51     .compute_mmcr = isa207_compute_mmcr,
52     .config_bhrb = power9_config_bhrb,
53     .bhrb_filter_map = power9_bhrb_filter_map,
54     diff --git a/arch/sparc/include/asm/setup.h b/arch/sparc/include/asm/setup.h
55     index be0cc1beed41..3fae200dd251 100644
56     --- a/arch/sparc/include/asm/setup.h
57     +++ b/arch/sparc/include/asm/setup.h
58     @@ -59,8 +59,11 @@ extern atomic_t dcpage_flushes;
59     extern atomic_t dcpage_flushes_xcall;
60    
61     extern int sysctl_tsb_ratio;
62     -#endif
63    
64     +#ifdef CONFIG_SERIAL_SUNHV
65     +void sunhv_migrate_hvcons_irq(int cpu);
66     +#endif
67     +#endif
68     void sun_do_break(void);
69     extern int stop_a_enabled;
70     extern int scons_pwroff;
71     diff --git a/arch/sparc/kernel/smp_64.c b/arch/sparc/kernel/smp_64.c
72     index 2deb89ef1d5f..ca7cb8e57ab0 100644
73     --- a/arch/sparc/kernel/smp_64.c
74     +++ b/arch/sparc/kernel/smp_64.c
75     @@ -1465,8 +1465,12 @@ void smp_send_stop(void)
76     int cpu;
77    
78     if (tlb_type == hypervisor) {
79     + int this_cpu = smp_processor_id();
80     +#ifdef CONFIG_SERIAL_SUNHV
81     + sunhv_migrate_hvcons_irq(this_cpu);
82     +#endif
83     for_each_online_cpu(cpu) {
84     - if (cpu == smp_processor_id())
85     + if (cpu == this_cpu)
86     continue;
87     #ifdef CONFIG_SUN_LDOMS
88     if (ldom_domaining_enabled) {
89     diff --git a/arch/x86/mm/init_64.c b/arch/x86/mm/init_64.c
90     index 9a324fc8bed8..3e27ded6ac65 100644
91     --- a/arch/x86/mm/init_64.c
92     +++ b/arch/x86/mm/init_64.c
93     @@ -689,7 +689,7 @@ static void __meminit free_pagetable(struct page *page, int order)
94     if (PageReserved(page)) {
95     __ClearPageReserved(page);
96    
97     - magic = (unsigned long)page->lru.next;
98     + magic = (unsigned long)page->freelist;
99     if (magic == SECTION_INFO || magic == MIX_SECTION_INFO) {
100     while (nr_pages--)
101     put_page_bootmem(page++);
102     diff --git a/block/bsg-lib.c b/block/bsg-lib.c
103     index 341b8d858e67..650f427d915b 100644
104     --- a/block/bsg-lib.c
105     +++ b/block/bsg-lib.c
106     @@ -147,6 +147,7 @@ static int bsg_create_job(struct device *dev, struct request *req)
107     failjob_rls_rqst_payload:
108     kfree(job->request_payload.sg_list);
109     failjob_rls_job:
110     + kfree(job);
111     return -ENOMEM;
112     }
113    
114     diff --git a/crypto/Kconfig b/crypto/Kconfig
115     index 84d71482bf08..fa98ad7edb60 100644
116     --- a/crypto/Kconfig
117     +++ b/crypto/Kconfig
118     @@ -360,6 +360,7 @@ config CRYPTO_XTS
119     select CRYPTO_BLKCIPHER
120     select CRYPTO_MANAGER
121     select CRYPTO_GF128MUL
122     + select CRYPTO_ECB
123     help
124     XTS: IEEE1619/D16 narrow block cipher use with aes-xts-plain,
125     key size 256, 384 or 512 bits. This implementation currently
126     diff --git a/drivers/bluetooth/btmrvl_sdio.c b/drivers/bluetooth/btmrvl_sdio.c
127     index d02f2c14df32..c738baeb6d45 100644
128     --- a/drivers/bluetooth/btmrvl_sdio.c
129     +++ b/drivers/bluetooth/btmrvl_sdio.c
130     @@ -1682,8 +1682,12 @@ static int btmrvl_sdio_resume(struct device *dev)
131     /* Disable platform specific wakeup interrupt */
132     if (card->plt_wake_cfg && card->plt_wake_cfg->irq_bt >= 0) {
133     disable_irq_wake(card->plt_wake_cfg->irq_bt);
134     - if (!card->plt_wake_cfg->wake_by_bt)
135     - disable_irq(card->plt_wake_cfg->irq_bt);
136     + disable_irq(card->plt_wake_cfg->irq_bt);
137     + if (card->plt_wake_cfg->wake_by_bt)
138     + /* Undo our disable, since interrupt handler already
139     + * did this.
140     + */
141     + enable_irq(card->plt_wake_cfg->irq_bt);
142     }
143    
144     return 0;
145     diff --git a/drivers/cpufreq/Kconfig.arm b/drivers/cpufreq/Kconfig.arm
146     index d89b8afe23b6..bc3917d6015a 100644
147     --- a/drivers/cpufreq/Kconfig.arm
148     +++ b/drivers/cpufreq/Kconfig.arm
149     @@ -244,7 +244,7 @@ config ARM_PXA2xx_CPUFREQ
150    
151     config ACPI_CPPC_CPUFREQ
152     tristate "CPUFreq driver based on the ACPI CPPC spec"
153     - depends on ACPI
154     + depends on ACPI_PROCESSOR
155     select ACPI_CPPC_LIB
156     default n
157     help
158     diff --git a/drivers/edac/mce_amd.c b/drivers/edac/mce_amd.c
159     index daaac2c79ca7..7db692ed3dea 100644
160     --- a/drivers/edac/mce_amd.c
161     +++ b/drivers/edac/mce_amd.c
162     @@ -981,20 +981,19 @@ int amd_decode_mce(struct notifier_block *nb, unsigned long val, void *data)
163     pr_cont("]: 0x%016llx\n", m->status);
164    
165     if (m->status & MCI_STATUS_ADDRV)
166     - pr_emerg(HW_ERR "Error Addr: 0x%016llx", m->addr);
167     + pr_emerg(HW_ERR "Error Addr: 0x%016llx\n", m->addr);
168    
169     if (boot_cpu_has(X86_FEATURE_SMCA)) {
170     + pr_emerg(HW_ERR "IPID: 0x%016llx", m->ipid);
171     +
172     if (m->status & MCI_STATUS_SYNDV)
173     pr_cont(", Syndrome: 0x%016llx", m->synd);
174    
175     - pr_cont(", IPID: 0x%016llx", m->ipid);
176     -
177     pr_cont("\n");
178    
179     decode_smca_errors(m);
180     goto err_code;
181     - } else
182     - pr_cont("\n");
183     + }
184    
185     if (!fam_ops)
186     goto err_code;
187     diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
188     index 264899df9bfc..05ff98b43c50 100644
189     --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
190     +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
191     @@ -491,6 +491,9 @@ static int amdgpu_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_mem_
192     case TTM_PL_TT:
193     break;
194     case TTM_PL_VRAM:
195     + if (mem->start == AMDGPU_BO_INVALID_OFFSET)
196     + return -EINVAL;
197     +
198     mem->bus.offset = mem->start << PAGE_SHIFT;
199     /* check if it's visible */
200     if ((mem->bus.offset + mem->bus.size) > adev->mc.visible_vram_size)
201     diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c
202     index 6584d505460c..133f89600279 100644
203     --- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c
204     +++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c
205     @@ -1129,7 +1129,7 @@ gf100_gr_trap_intr(struct gf100_gr *gr)
206     if (trap & 0x00000008) {
207     u32 stat = nvkm_rd32(device, 0x408030);
208    
209     - nvkm_snprintbf(error, sizeof(error), gf100_m2mf_error,
210     + nvkm_snprintbf(error, sizeof(error), gf100_ccache_error,
211     stat & 0x3fffffff);
212     nvkm_error(subdev, "CCACHE %08x [%s]\n", stat, error);
213     nvkm_wr32(device, 0x408030, 0xc0000000);
214     diff --git a/drivers/i2c/busses/i2c-at91.c b/drivers/i2c/busses/i2c-at91.c
215     index 0b86c6173e07..c925a690cb32 100644
216     --- a/drivers/i2c/busses/i2c-at91.c
217     +++ b/drivers/i2c/busses/i2c-at91.c
218     @@ -1180,6 +1180,7 @@ static int at91_twi_suspend_noirq(struct device *dev)
219    
220     static int at91_twi_resume_noirq(struct device *dev)
221     {
222     + struct at91_twi_dev *twi_dev = dev_get_drvdata(dev);
223     int ret;
224    
225     if (!pm_runtime_status_suspended(dev)) {
226     @@ -1191,6 +1192,8 @@ static int at91_twi_resume_noirq(struct device *dev)
227     pm_runtime_mark_last_busy(dev);
228     pm_request_autosuspend(dev);
229    
230     + at91_init_twi_bus(twi_dev);
231     +
232     return 0;
233     }
234    
235     diff --git a/drivers/iio/adc/xilinx-xadc-core.c b/drivers/iio/adc/xilinx-xadc-core.c
236     index 0a6beb3d99cb..56cf5907a5f0 100644
237     --- a/drivers/iio/adc/xilinx-xadc-core.c
238     +++ b/drivers/iio/adc/xilinx-xadc-core.c
239     @@ -1208,7 +1208,7 @@ static int xadc_probe(struct platform_device *pdev)
240    
241     ret = xadc->ops->setup(pdev, indio_dev, irq);
242     if (ret)
243     - goto err_free_samplerate_trigger;
244     + goto err_clk_disable_unprepare;
245    
246     ret = request_irq(irq, xadc->ops->interrupt_handler, 0,
247     dev_name(&pdev->dev), indio_dev);
248     @@ -1268,6 +1268,8 @@ static int xadc_probe(struct platform_device *pdev)
249    
250     err_free_irq:
251     free_irq(irq, indio_dev);
252     +err_clk_disable_unprepare:
253     + clk_disable_unprepare(xadc->clk);
254     err_free_samplerate_trigger:
255     if (xadc->ops->flags & XADC_FLAGS_BUFFERED)
256     iio_trigger_free(xadc->samplerate_trigger);
257     @@ -1277,8 +1279,6 @@ static int xadc_probe(struct platform_device *pdev)
258     err_triggered_buffer_cleanup:
259     if (xadc->ops->flags & XADC_FLAGS_BUFFERED)
260     iio_triggered_buffer_cleanup(indio_dev);
261     -err_clk_disable_unprepare:
262     - clk_disable_unprepare(xadc->clk);
263     err_device_free:
264     kfree(indio_dev->channels);
265    
266     diff --git a/drivers/infiniband/hw/hfi1/init.c b/drivers/infiniband/hw/hfi1/init.c
267     index 34cfd341b6d6..a3dd27b1305d 100644
268     --- a/drivers/infiniband/hw/hfi1/init.c
269     +++ b/drivers/infiniband/hw/hfi1/init.c
270     @@ -297,14 +297,15 @@ struct hfi1_ctxtdata *hfi1_create_ctxtdata(struct hfi1_pportdata *ppd, u32 ctxt,
271     * The resulting value will be rounded down to the closest
272     * multiple of dd->rcv_entries.group_size.
273     */
274     - rcd->egrbufs.buffers = kcalloc(rcd->egrbufs.count,
275     - sizeof(*rcd->egrbufs.buffers),
276     - GFP_KERNEL);
277     + rcd->egrbufs.buffers = kzalloc_node(
278     + rcd->egrbufs.count * sizeof(*rcd->egrbufs.buffers),
279     + GFP_KERNEL, numa);
280     if (!rcd->egrbufs.buffers)
281     goto bail;
282     - rcd->egrbufs.rcvtids = kcalloc(rcd->egrbufs.count,
283     - sizeof(*rcd->egrbufs.rcvtids),
284     - GFP_KERNEL);
285     + rcd->egrbufs.rcvtids = kzalloc_node(
286     + rcd->egrbufs.count *
287     + sizeof(*rcd->egrbufs.rcvtids),
288     + GFP_KERNEL, numa);
289     if (!rcd->egrbufs.rcvtids)
290     goto bail;
291     rcd->egrbufs.size = eager_buffer_size;
292     @@ -322,8 +323,8 @@ struct hfi1_ctxtdata *hfi1_create_ctxtdata(struct hfi1_pportdata *ppd, u32 ctxt,
293     rcd->egrbufs.rcvtid_size = HFI1_MAX_EAGER_BUFFER_SIZE;
294    
295     if (ctxt < dd->first_user_ctxt) { /* N/A for PSM contexts */
296     - rcd->opstats = kzalloc(sizeof(*rcd->opstats),
297     - GFP_KERNEL);
298     + rcd->opstats = kzalloc_node(sizeof(*rcd->opstats),
299     + GFP_KERNEL, numa);
300     if (!rcd->opstats)
301     goto bail;
302     }
303     diff --git a/drivers/infiniband/hw/hfi1/pcie.c b/drivers/infiniband/hw/hfi1/pcie.c
304     index 4ac8f330c5cb..335613a1a46a 100644
305     --- a/drivers/infiniband/hw/hfi1/pcie.c
306     +++ b/drivers/infiniband/hw/hfi1/pcie.c
307     @@ -673,12 +673,12 @@ MODULE_PARM_DESC(pcie_retry, "Driver will try this many times to reach requested
308    
309     #define UNSET_PSET 255
310     #define DEFAULT_DISCRETE_PSET 2 /* discrete HFI */
311     -#define DEFAULT_MCP_PSET 4 /* MCP HFI */
312     +#define DEFAULT_MCP_PSET 6 /* MCP HFI */
313     static uint pcie_pset = UNSET_PSET;
314     module_param(pcie_pset, uint, S_IRUGO);
315     MODULE_PARM_DESC(pcie_pset, "PCIe Eq Pset value to use, range is 0-10");
316    
317     -static uint pcie_ctle = 1; /* discrete on, integrated off */
318     +static uint pcie_ctle = 3; /* discrete on, integrated on */
319     module_param(pcie_ctle, uint, S_IRUGO);
320     MODULE_PARM_DESC(pcie_ctle, "PCIe static CTLE mode, bit 0 - discrete on/off, bit 1 - integrated on/off");
321    
322     diff --git a/drivers/irqchip/irq-crossbar.c b/drivers/irqchip/irq-crossbar.c
323     index 1eef56a89b1f..05bbf171df37 100644
324     --- a/drivers/irqchip/irq-crossbar.c
325     +++ b/drivers/irqchip/irq-crossbar.c
326     @@ -198,7 +198,8 @@ static const struct irq_domain_ops crossbar_domain_ops = {
327    
328     static int __init crossbar_of_init(struct device_node *node)
329     {
330     - int i, size, max = 0, reserved = 0, entry;
331     + int i, size, reserved = 0;
332     + u32 max = 0, entry;
333     const __be32 *irqsr;
334     int ret = -ENOMEM;
335    
336     diff --git a/drivers/md/linear.c b/drivers/md/linear.c
337     index b0c0aef92a37..12abf69d568a 100644
338     --- a/drivers/md/linear.c
339     +++ b/drivers/md/linear.c
340     @@ -223,7 +223,8 @@ static int linear_add(struct mddev *mddev, struct md_rdev *rdev)
341     * oldconf until no one uses it anymore.
342     */
343     mddev_suspend(mddev);
344     - oldconf = rcu_dereference(mddev->private);
345     + oldconf = rcu_dereference_protected(mddev->private,
346     + lockdep_is_held(&mddev->reconfig_mutex));
347     mddev->raid_disks++;
348     WARN_ONCE(mddev->raid_disks != newconf->raid_disks,
349     "copied raid_disks doesn't match mddev->raid_disks");
350     diff --git a/drivers/net/ethernet/marvell/mvpp2.c b/drivers/net/ethernet/marvell/mvpp2.c
351     index 0a4e81a253fb..ed6fae964ec5 100644
352     --- a/drivers/net/ethernet/marvell/mvpp2.c
353     +++ b/drivers/net/ethernet/marvell/mvpp2.c
354     @@ -4413,13 +4413,12 @@ static void mvpp2_txq_bufs_free(struct mvpp2_port *port,
355     struct mvpp2_txq_pcpu_buf *tx_buf =
356     txq_pcpu->buffs + txq_pcpu->txq_get_index;
357    
358     - mvpp2_txq_inc_get(txq_pcpu);
359     -
360     dma_unmap_single(port->dev->dev.parent, tx_buf->phys,
361     tx_buf->size, DMA_TO_DEVICE);
362     - if (!tx_buf->skb)
363     - continue;
364     - dev_kfree_skb_any(tx_buf->skb);
365     + if (tx_buf->skb)
366     + dev_kfree_skb_any(tx_buf->skb);
367     +
368     + mvpp2_txq_inc_get(txq_pcpu);
369     }
370     }
371    
372     diff --git a/drivers/net/ethernet/mellanox/mlx4/en_clock.c b/drivers/net/ethernet/mellanox/mlx4/en_clock.c
373     index a5fc46bbcbe2..d4d97ca12e83 100644
374     --- a/drivers/net/ethernet/mellanox/mlx4/en_clock.c
375     +++ b/drivers/net/ethernet/mellanox/mlx4/en_clock.c
376     @@ -88,10 +88,17 @@ void mlx4_en_remove_timestamp(struct mlx4_en_dev *mdev)
377     }
378     }
379    
380     +#define MLX4_EN_WRAP_AROUND_SEC 10UL
381     +/* By scheduling the overflow check every 5 seconds, we have a reasonably
382     + * good chance we wont miss a wrap around.
383     + * TOTO: Use a timer instead of a work queue to increase the guarantee.
384     + */
385     +#define MLX4_EN_OVERFLOW_PERIOD (MLX4_EN_WRAP_AROUND_SEC * HZ / 2)
386     +
387     void mlx4_en_ptp_overflow_check(struct mlx4_en_dev *mdev)
388     {
389     bool timeout = time_is_before_jiffies(mdev->last_overflow_check +
390     - mdev->overflow_period);
391     + MLX4_EN_OVERFLOW_PERIOD);
392     unsigned long flags;
393    
394     if (timeout) {
395     @@ -236,7 +243,6 @@ static const struct ptp_clock_info mlx4_en_ptp_clock_info = {
396     .enable = mlx4_en_phc_enable,
397     };
398    
399     -#define MLX4_EN_WRAP_AROUND_SEC 10ULL
400    
401     /* This function calculates the max shift that enables the user range
402     * of MLX4_EN_WRAP_AROUND_SEC values in the cycles register.
403     @@ -261,7 +267,6 @@ void mlx4_en_init_timestamp(struct mlx4_en_dev *mdev)
404     {
405     struct mlx4_dev *dev = mdev->dev;
406     unsigned long flags;
407     - u64 ns, zero = 0;
408    
409     /* mlx4_en_init_timestamp is called for each netdev.
410     * mdev->ptp_clock is common for all ports, skip initialization if
411     @@ -285,13 +290,6 @@ void mlx4_en_init_timestamp(struct mlx4_en_dev *mdev)
412     ktime_to_ns(ktime_get_real()));
413     write_unlock_irqrestore(&mdev->clock_lock, flags);
414    
415     - /* Calculate period in seconds to call the overflow watchdog - to make
416     - * sure counter is checked at least once every wrap around.
417     - */
418     - ns = cyclecounter_cyc2ns(&mdev->cycles, mdev->cycles.mask, zero, &zero);
419     - do_div(ns, NSEC_PER_SEC / 2 / HZ);
420     - mdev->overflow_period = ns;
421     -
422     /* Configure the PHC */
423     mdev->ptp_clock_info = mlx4_en_ptp_clock_info;
424     snprintf(mdev->ptp_clock_info.name, 16, "mlx4 ptp");
425     diff --git a/drivers/net/ethernet/mellanox/mlx4/main.c b/drivers/net/ethernet/mellanox/mlx4/main.c
426     index ba652d8a2b93..727122de7df0 100644
427     --- a/drivers/net/ethernet/mellanox/mlx4/main.c
428     +++ b/drivers/net/ethernet/mellanox/mlx4/main.c
429     @@ -841,8 +841,6 @@ static int mlx4_slave_cap(struct mlx4_dev *dev)
430     return -ENOSYS;
431     }
432    
433     - mlx4_log_num_mgm_entry_size = hca_param.log_mc_entry_sz;
434     -
435     dev->caps.hca_core_clock = hca_param.hca_core_clock;
436    
437     memset(&dev_cap, 0, sizeof(dev_cap));
438     diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
439     index a3528dd1e72e..df0f39611c5e 100644
440     --- a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
441     +++ b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
442     @@ -419,7 +419,6 @@ struct mlx4_en_dev {
443     struct cyclecounter cycles;
444     struct timecounter clock;
445     unsigned long last_overflow_check;
446     - unsigned long overflow_period;
447     struct ptp_clock *ptp_clock;
448     struct ptp_clock_info ptp_clock_info;
449     struct notifier_block nb;
450     diff --git a/drivers/net/ethernet/qlogic/qed/qed.h b/drivers/net/ethernet/qlogic/qed/qed.h
451     index 653bb5735f0c..433f8be57847 100644
452     --- a/drivers/net/ethernet/qlogic/qed/qed.h
453     +++ b/drivers/net/ethernet/qlogic/qed/qed.h
454     @@ -642,7 +642,9 @@ static inline u8 qed_concrete_to_sw_fid(struct qed_dev *cdev,
455     #define OOO_LB_TC 9
456    
457     int qed_configure_vport_wfq(struct qed_dev *cdev, u16 vp_id, u32 rate);
458     -void qed_configure_vp_wfq_on_link_change(struct qed_dev *cdev, u32 min_pf_rate);
459     +void qed_configure_vp_wfq_on_link_change(struct qed_dev *cdev,
460     + struct qed_ptt *p_ptt,
461     + u32 min_pf_rate);
462    
463     void qed_clean_wfq_db(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt);
464     #define QED_LEADING_HWFN(dev) (&dev->hwfns[0])
465     diff --git a/drivers/net/ethernet/qlogic/qed/qed_dev.c b/drivers/net/ethernet/qlogic/qed/qed_dev.c
466     index edae5fc5fccd..afe5e57d9acb 100644
467     --- a/drivers/net/ethernet/qlogic/qed/qed_dev.c
468     +++ b/drivers/net/ethernet/qlogic/qed/qed_dev.c
469     @@ -877,7 +877,7 @@ qed_hw_init_pf_doorbell_bar(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
470     /* Either EDPM is mandatory, or we are attempting to allocate a
471     * WID per CPU.
472     */
473     - n_cpus = num_active_cpus();
474     + n_cpus = num_present_cpus();
475     rc = qed_hw_init_dpi_size(p_hwfn, p_ptt, pwm_regsize, n_cpus);
476     }
477    
478     @@ -2732,7 +2732,8 @@ int qed_configure_vport_wfq(struct qed_dev *cdev, u16 vp_id, u32 rate)
479     }
480    
481     /* API to configure WFQ from mcp link change */
482     -void qed_configure_vp_wfq_on_link_change(struct qed_dev *cdev, u32 min_pf_rate)
483     +void qed_configure_vp_wfq_on_link_change(struct qed_dev *cdev,
484     + struct qed_ptt *p_ptt, u32 min_pf_rate)
485     {
486     int i;
487    
488     @@ -2746,8 +2747,7 @@ void qed_configure_vp_wfq_on_link_change(struct qed_dev *cdev, u32 min_pf_rate)
489     for_each_hwfn(cdev, i) {
490     struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
491    
492     - __qed_configure_vp_wfq_on_link_change(p_hwfn,
493     - p_hwfn->p_dpc_ptt,
494     + __qed_configure_vp_wfq_on_link_change(p_hwfn, p_ptt,
495     min_pf_rate);
496     }
497     }
498     diff --git a/drivers/net/ethernet/qlogic/qed/qed_mcp.c b/drivers/net/ethernet/qlogic/qed/qed_mcp.c
499     index bdc9ba92f6d4..8b7d2f963ee1 100644
500     --- a/drivers/net/ethernet/qlogic/qed/qed_mcp.c
501     +++ b/drivers/net/ethernet/qlogic/qed/qed_mcp.c
502     @@ -628,7 +628,8 @@ static void qed_mcp_handle_link_change(struct qed_hwfn *p_hwfn,
503    
504     /* Min bandwidth configuration */
505     __qed_configure_pf_min_bandwidth(p_hwfn, p_ptt, p_link, min_bw);
506     - qed_configure_vp_wfq_on_link_change(p_hwfn->cdev, p_link->min_pf_rate);
507     + qed_configure_vp_wfq_on_link_change(p_hwfn->cdev, p_ptt,
508     + p_link->min_pf_rate);
509    
510     p_link->an = !!(status & LINK_STATUS_AUTO_NEGOTIATE_ENABLED);
511     p_link->an_complete = !!(status &
512     diff --git a/drivers/net/ethernet/qlogic/qed/qed_roce.c b/drivers/net/ethernet/qlogic/qed/qed_roce.c
513     index f3a825a8f8d5..d9dcb0d1714c 100644
514     --- a/drivers/net/ethernet/qlogic/qed/qed_roce.c
515     +++ b/drivers/net/ethernet/qlogic/qed/qed_roce.c
516     @@ -1766,13 +1766,13 @@ static int qed_roce_query_qp(struct qed_hwfn *p_hwfn,
517     if (rc)
518     goto err_resp;
519    
520     - dma_free_coherent(&p_hwfn->cdev->pdev->dev, sizeof(*p_resp_ramrod_res),
521     - p_resp_ramrod_res, resp_ramrod_res_phys);
522     -
523     out_params->rq_psn = le32_to_cpu(p_resp_ramrod_res->psn);
524     rq_err_state = GET_FIELD(le32_to_cpu(p_resp_ramrod_res->err_flag),
525     ROCE_QUERY_QP_RESP_OUTPUT_PARAMS_ERROR_FLG);
526    
527     + dma_free_coherent(&p_hwfn->cdev->pdev->dev, sizeof(*p_resp_ramrod_res),
528     + p_resp_ramrod_res, resp_ramrod_res_phys);
529     +
530     if (!(qp->req_offloaded)) {
531     /* Don't send query qp for the requester */
532     out_params->sq_psn = qp->sq_psn;
533     @@ -1813,9 +1813,6 @@ static int qed_roce_query_qp(struct qed_hwfn *p_hwfn,
534     if (rc)
535     goto err_req;
536    
537     - dma_free_coherent(&p_hwfn->cdev->pdev->dev, sizeof(*p_req_ramrod_res),
538     - p_req_ramrod_res, req_ramrod_res_phys);
539     -
540     out_params->sq_psn = le32_to_cpu(p_req_ramrod_res->psn);
541     sq_err_state = GET_FIELD(le32_to_cpu(p_req_ramrod_res->flags),
542     ROCE_QUERY_QP_REQ_OUTPUT_PARAMS_ERR_FLG);
543     @@ -1823,6 +1820,9 @@ static int qed_roce_query_qp(struct qed_hwfn *p_hwfn,
544     GET_FIELD(le32_to_cpu(p_req_ramrod_res->flags),
545     ROCE_QUERY_QP_REQ_OUTPUT_PARAMS_SQ_DRAINING_FLG);
546    
547     + dma_free_coherent(&p_hwfn->cdev->pdev->dev, sizeof(*p_req_ramrod_res),
548     + p_req_ramrod_res, req_ramrod_res_phys);
549     +
550     out_params->draining = false;
551    
552     if (rq_err_state)
553     diff --git a/drivers/net/ethernet/qlogic/qede/qede_ethtool.c b/drivers/net/ethernet/qlogic/qede/qede_ethtool.c
554     index 7567cc464b88..634e4149af22 100644
555     --- a/drivers/net/ethernet/qlogic/qede/qede_ethtool.c
556     +++ b/drivers/net/ethernet/qlogic/qede/qede_ethtool.c
557     @@ -1221,7 +1221,7 @@ static int qede_selftest_receive_traffic(struct qede_dev *edev)
558     struct qede_rx_queue *rxq = NULL;
559     struct sw_rx_data *sw_rx_data;
560     union eth_rx_cqe *cqe;
561     - int i, rc = 0;
562     + int i, iter, rc = 0;
563     u8 *data_ptr;
564    
565     for_each_queue(i) {
566     @@ -1240,7 +1240,7 @@ static int qede_selftest_receive_traffic(struct qede_dev *edev)
567     * enabled. This is because the queue 0 is configured as the default
568     * queue and that the loopback traffic is not IP.
569     */
570     - for (i = 0; i < QEDE_SELFTEST_POLL_COUNT; i++) {
571     + for (iter = 0; iter < QEDE_SELFTEST_POLL_COUNT; iter++) {
572     if (!qede_has_rx_work(rxq)) {
573     usleep_range(100, 200);
574     continue;
575     @@ -1287,7 +1287,7 @@ static int qede_selftest_receive_traffic(struct qede_dev *edev)
576     qed_chain_recycle_consumed(&rxq->rx_comp_ring);
577     }
578    
579     - if (i == QEDE_SELFTEST_POLL_COUNT) {
580     + if (iter == QEDE_SELFTEST_POLL_COUNT) {
581     DP_NOTICE(edev, "Failed to receive the traffic\n");
582     return -1;
583     }
584     diff --git a/drivers/net/wireless/mac80211_hwsim.c b/drivers/net/wireless/mac80211_hwsim.c
585     index c06932c5ecdb..d2a28a9d3209 100644
586     --- a/drivers/net/wireless/mac80211_hwsim.c
587     +++ b/drivers/net/wireless/mac80211_hwsim.c
588     @@ -3046,6 +3046,7 @@ static int hwsim_register_received_nl(struct sk_buff *skb_2,
589     static int hwsim_new_radio_nl(struct sk_buff *msg, struct genl_info *info)
590     {
591     struct hwsim_new_radio_params param = { 0 };
592     + const char *hwname = NULL;
593    
594     param.reg_strict = info->attrs[HWSIM_ATTR_REG_STRICT_REG];
595     param.p2p_device = info->attrs[HWSIM_ATTR_SUPPORT_P2P_DEVICE];
596     @@ -3059,8 +3060,14 @@ static int hwsim_new_radio_nl(struct sk_buff *msg, struct genl_info *info)
597     if (info->attrs[HWSIM_ATTR_NO_VIF])
598     param.no_vif = true;
599    
600     - if (info->attrs[HWSIM_ATTR_RADIO_NAME])
601     - param.hwname = nla_data(info->attrs[HWSIM_ATTR_RADIO_NAME]);
602     + if (info->attrs[HWSIM_ATTR_RADIO_NAME]) {
603     + hwname = kasprintf(GFP_KERNEL, "%.*s",
604     + nla_len(info->attrs[HWSIM_ATTR_RADIO_NAME]),
605     + (char *)nla_data(info->attrs[HWSIM_ATTR_RADIO_NAME]));
606     + if (!hwname)
607     + return -ENOMEM;
608     + param.hwname = hwname;
609     + }
610    
611     if (info->attrs[HWSIM_ATTR_USE_CHANCTX])
612     param.use_chanctx = true;
613     @@ -3088,11 +3095,15 @@ static int hwsim_del_radio_nl(struct sk_buff *msg, struct genl_info *info)
614     s64 idx = -1;
615     const char *hwname = NULL;
616    
617     - if (info->attrs[HWSIM_ATTR_RADIO_ID])
618     + if (info->attrs[HWSIM_ATTR_RADIO_ID]) {
619     idx = nla_get_u32(info->attrs[HWSIM_ATTR_RADIO_ID]);
620     - else if (info->attrs[HWSIM_ATTR_RADIO_NAME])
621     - hwname = (void *)nla_data(info->attrs[HWSIM_ATTR_RADIO_NAME]);
622     - else
623     + } else if (info->attrs[HWSIM_ATTR_RADIO_NAME]) {
624     + hwname = kasprintf(GFP_KERNEL, "%.*s",
625     + nla_len(info->attrs[HWSIM_ATTR_RADIO_NAME]),
626     + (char *)nla_data(info->attrs[HWSIM_ATTR_RADIO_NAME]));
627     + if (!hwname)
628     + return -ENOMEM;
629     + } else
630     return -EINVAL;
631    
632     spin_lock_bh(&hwsim_radio_lock);
633     @@ -3101,7 +3112,8 @@ static int hwsim_del_radio_nl(struct sk_buff *msg, struct genl_info *info)
634     if (data->idx != idx)
635     continue;
636     } else {
637     - if (strcmp(hwname, wiphy_name(data->hw->wiphy)))
638     + if (!hwname ||
639     + strcmp(hwname, wiphy_name(data->hw->wiphy)))
640     continue;
641     }
642    
643     @@ -3112,10 +3124,12 @@ static int hwsim_del_radio_nl(struct sk_buff *msg, struct genl_info *info)
644     spin_unlock_bh(&hwsim_radio_lock);
645     mac80211_hwsim_del_radio(data, wiphy_name(data->hw->wiphy),
646     info);
647     + kfree(hwname);
648     return 0;
649     }
650     spin_unlock_bh(&hwsim_radio_lock);
651    
652     + kfree(hwname);
653     return -ENODEV;
654     }
655    
656     diff --git a/drivers/net/xen-netback/hash.c b/drivers/net/xen-netback/hash.c
657     index e8c5dddc54ba..3c4c58b9fe76 100644
658     --- a/drivers/net/xen-netback/hash.c
659     +++ b/drivers/net/xen-netback/hash.c
660     @@ -39,7 +39,7 @@ static void xenvif_add_hash(struct xenvif *vif, const u8 *tag,
661     unsigned long flags;
662     bool found;
663    
664     - new = kmalloc(sizeof(*entry), GFP_KERNEL);
665     + new = kmalloc(sizeof(*entry), GFP_ATOMIC);
666     if (!new)
667     return;
668    
669     diff --git a/drivers/regulator/core.c b/drivers/regulator/core.c
670     index 9faccfceb53c..9403245503de 100644
671     --- a/drivers/regulator/core.c
672     +++ b/drivers/regulator/core.c
673     @@ -4507,6 +4507,16 @@ static int __init regulator_init_complete(void)
674     if (of_have_populated_dt())
675     has_full_constraints = true;
676    
677     + /*
678     + * Regulators may had failed to resolve their input supplies
679     + * when were registered, either because the input supply was
680     + * not registered yet or because its parent device was not
681     + * bound yet. So attempt to resolve the input supplies for
682     + * pending regulators before trying to disable unused ones.
683     + */
684     + class_for_each_device(&regulator_class, NULL, NULL,
685     + regulator_register_resolve_supply);
686     +
687     /* If we have a full configuration then disable any regulators
688     * we have permission to change the status for and which are
689     * not in use or always_on. This is effectively the default
690     diff --git a/drivers/scsi/device_handler/scsi_dh_emc.c b/drivers/scsi/device_handler/scsi_dh_emc.c
691     index 375d81850f15..d5f6fbfa17bf 100644
692     --- a/drivers/scsi/device_handler/scsi_dh_emc.c
693     +++ b/drivers/scsi/device_handler/scsi_dh_emc.c
694     @@ -461,7 +461,7 @@ static int clariion_prep_fn(struct scsi_device *sdev, struct request *req)
695     static int clariion_std_inquiry(struct scsi_device *sdev,
696     struct clariion_dh_data *csdev)
697     {
698     - int err;
699     + int err = SCSI_DH_OK;
700     char *sp_model;
701    
702     err = send_inquiry_cmd(sdev, 0, csdev);
703     diff --git a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_2835_arm.c b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_2835_arm.c
704     index 6d459ef8c121..f72eebc71dd8 100644
705     --- a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_2835_arm.c
706     +++ b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_2835_arm.c
707     @@ -106,8 +106,14 @@ int vchiq_platform_init(struct platform_device *pdev, VCHIQ_STATE_T *state)
708    
709     g_virt_to_bus_offset = virt_to_dma(dev, (void *)0);
710    
711     - (void)of_property_read_u32(dev->of_node, "cache-line-size",
712     + err = of_property_read_u32(dev->of_node, "cache-line-size",
713     &g_cache_line_size);
714     +
715     + if (err) {
716     + dev_err(dev, "Missing cache-line-size property\n");
717     + return -ENODEV;
718     + }
719     +
720     g_fragments_size = 2 * g_cache_line_size;
721    
722     /* Allocate space for the channels in coherent memory */
723     diff --git a/drivers/target/iscsi/iscsi_target_erl0.c b/drivers/target/iscsi/iscsi_target_erl0.c
724     index efc453ef6831..ab92a1bc9666 100644
725     --- a/drivers/target/iscsi/iscsi_target_erl0.c
726     +++ b/drivers/target/iscsi/iscsi_target_erl0.c
727     @@ -44,10 +44,8 @@ void iscsit_set_dataout_sequence_values(
728     */
729     if (cmd->unsolicited_data) {
730     cmd->seq_start_offset = cmd->write_data_done;
731     - cmd->seq_end_offset = (cmd->write_data_done +
732     - ((cmd->se_cmd.data_length >
733     - conn->sess->sess_ops->FirstBurstLength) ?
734     - conn->sess->sess_ops->FirstBurstLength : cmd->se_cmd.data_length));
735     + cmd->seq_end_offset = min(cmd->se_cmd.data_length,
736     + conn->sess->sess_ops->FirstBurstLength);
737     return;
738     }
739    
740     diff --git a/drivers/tty/serial/sunhv.c b/drivers/tty/serial/sunhv.c
741     index 4e603d060e80..59828d819145 100644
742     --- a/drivers/tty/serial/sunhv.c
743     +++ b/drivers/tty/serial/sunhv.c
744     @@ -398,6 +398,12 @@ static struct uart_driver sunhv_reg = {
745    
746     static struct uart_port *sunhv_port;
747    
748     +void sunhv_migrate_hvcons_irq(int cpu)
749     +{
750     + /* Migrate hvcons irq to param cpu */
751     + irq_force_affinity(sunhv_port->irq, cpumask_of(cpu));
752     +}
753     +
754     /* Copy 's' into the con_write_page, decoding "\n" into
755     * "\r\n" along the way. We have to return two lengths
756     * because the caller needs to know how much to advance
757     diff --git a/drivers/usb/dwc3/gadget.c b/drivers/usb/dwc3/gadget.c
758     index f92c680e3937..c61ddbf94bc7 100644
759     --- a/drivers/usb/dwc3/gadget.c
760     +++ b/drivers/usb/dwc3/gadget.c
761     @@ -817,9 +817,42 @@ static void dwc3_prepare_one_trb(struct dwc3_ep *dep,
762     if (!node) {
763     trb->ctrl = DWC3_TRBCTL_ISOCHRONOUS_FIRST;
764    
765     + /*
766     + * USB Specification 2.0 Section 5.9.2 states that: "If
767     + * there is only a single transaction in the microframe,
768     + * only a DATA0 data packet PID is used. If there are
769     + * two transactions per microframe, DATA1 is used for
770     + * the first transaction data packet and DATA0 is used
771     + * for the second transaction data packet. If there are
772     + * three transactions per microframe, DATA2 is used for
773     + * the first transaction data packet, DATA1 is used for
774     + * the second, and DATA0 is used for the third."
775     + *
776     + * IOW, we should satisfy the following cases:
777     + *
778     + * 1) length <= maxpacket
779     + * - DATA0
780     + *
781     + * 2) maxpacket < length <= (2 * maxpacket)
782     + * - DATA1, DATA0
783     + *
784     + * 3) (2 * maxpacket) < length <= (3 * maxpacket)
785     + * - DATA2, DATA1, DATA0
786     + */
787     if (speed == USB_SPEED_HIGH) {
788     struct usb_ep *ep = &dep->endpoint;
789     - trb->size |= DWC3_TRB_SIZE_PCM1(ep->mult - 1);
790     + unsigned int mult = ep->mult - 1;
791     + unsigned int maxp;
792     +
793     + maxp = usb_endpoint_maxp(ep->desc) & 0x07ff;
794     +
795     + if (length <= (2 * maxp))
796     + mult--;
797     +
798     + if (length <= maxp)
799     + mult--;
800     +
801     + trb->size |= DWC3_TRB_SIZE_PCM1(mult);
802     }
803     } else {
804     trb->ctrl = DWC3_TRBCTL_ISOCHRONOUS;
805     diff --git a/drivers/watchdog/kempld_wdt.c b/drivers/watchdog/kempld_wdt.c
806     index 8e302d0e346c..3efa295ac627 100644
807     --- a/drivers/watchdog/kempld_wdt.c
808     +++ b/drivers/watchdog/kempld_wdt.c
809     @@ -140,12 +140,19 @@ static int kempld_wdt_set_stage_timeout(struct kempld_wdt_data *wdt_data,
810     unsigned int timeout)
811     {
812     struct kempld_device_data *pld = wdt_data->pld;
813     - u32 prescaler = kempld_prescaler[PRESCALER_21];
814     + u32 prescaler;
815     u64 stage_timeout64;
816     u32 stage_timeout;
817     u32 remainder;
818     u8 stage_cfg;
819    
820     +#if GCC_VERSION < 40400
821     + /* work around a bug compiling do_div() */
822     + prescaler = READ_ONCE(kempld_prescaler[PRESCALER_21]);
823     +#else
824     + prescaler = kempld_prescaler[PRESCALER_21];
825     +#endif
826     +
827     if (!stage)
828     return -EINVAL;
829    
830     diff --git a/fs/btrfs/send.c b/fs/btrfs/send.c
831     index 71261b459863..77f9efc1f7aa 100644
832     --- a/fs/btrfs/send.c
833     +++ b/fs/btrfs/send.c
834     @@ -1680,6 +1680,9 @@ static int is_inode_existent(struct send_ctx *sctx, u64 ino, u64 gen)
835     {
836     int ret;
837    
838     + if (ino == BTRFS_FIRST_FREE_OBJECTID)
839     + return 1;
840     +
841     ret = get_cur_inode_state(sctx, ino, gen);
842     if (ret < 0)
843     goto out;
844     @@ -1865,7 +1868,7 @@ static int will_overwrite_ref(struct send_ctx *sctx, u64 dir, u64 dir_gen,
845     * not deleted and then re-created, if it was then we have no overwrite
846     * and we can just unlink this entry.
847     */
848     - if (sctx->parent_root) {
849     + if (sctx->parent_root && dir != BTRFS_FIRST_FREE_OBJECTID) {
850     ret = get_inode_info(sctx->parent_root, dir, NULL, &gen, NULL,
851     NULL, NULL, NULL);
852     if (ret < 0 && ret != -ENOENT)
853     diff --git a/fs/ceph/inode.c b/fs/ceph/inode.c
854     index 953275b651bc..4a6df2ce0f76 100644
855     --- a/fs/ceph/inode.c
856     +++ b/fs/ceph/inode.c
857     @@ -1323,8 +1323,8 @@ int ceph_fill_trace(struct super_block *sb, struct ceph_mds_request *req,
858     ceph_dir_clear_ordered(dir);
859     dout("d_delete %p\n", dn);
860     d_delete(dn);
861     - } else {
862     - if (have_lease && d_unhashed(dn))
863     + } else if (have_lease) {
864     + if (d_unhashed(dn))
865     d_add(dn, NULL);
866     update_dentry_lease(dn, rinfo->dlease,
867     session,
868     diff --git a/fs/ceph/ioctl.c b/fs/ceph/ioctl.c
869     index 7d752d53353a..4c9c72f26eb9 100644
870     --- a/fs/ceph/ioctl.c
871     +++ b/fs/ceph/ioctl.c
872     @@ -25,7 +25,7 @@ static long ceph_ioctl_get_layout(struct file *file, void __user *arg)
873     l.stripe_count = ci->i_layout.stripe_count;
874     l.object_size = ci->i_layout.object_size;
875     l.data_pool = ci->i_layout.pool_id;
876     - l.preferred_osd = (s32)-1;
877     + l.preferred_osd = -1;
878     if (copy_to_user(arg, &l, sizeof(l)))
879     return -EFAULT;
880     }
881     @@ -97,7 +97,7 @@ static long ceph_ioctl_set_layout(struct file *file, void __user *arg)
882     nl.data_pool = ci->i_layout.pool_id;
883    
884     /* this is obsolete, and always -1 */
885     - nl.preferred_osd = le64_to_cpu(-1);
886     + nl.preferred_osd = -1;
887    
888     err = __validate_layout(mdsc, &nl);
889     if (err)
890     diff --git a/fs/ceph/mds_client.c b/fs/ceph/mds_client.c
891     index e3e1a80b351e..c0f52c443c34 100644
892     --- a/fs/ceph/mds_client.c
893     +++ b/fs/ceph/mds_client.c
894     @@ -1782,13 +1782,18 @@ static int build_dentry_path(struct dentry *dentry,
895     int *pfreepath)
896     {
897     char *path;
898     + struct inode *dir;
899    
900     - if (ceph_snap(d_inode(dentry->d_parent)) == CEPH_NOSNAP) {
901     - *pino = ceph_ino(d_inode(dentry->d_parent));
902     + rcu_read_lock();
903     + dir = d_inode_rcu(dentry->d_parent);
904     + if (dir && ceph_snap(dir) == CEPH_NOSNAP) {
905     + *pino = ceph_ino(dir);
906     + rcu_read_unlock();
907     *ppath = dentry->d_name.name;
908     *ppathlen = dentry->d_name.len;
909     return 0;
910     }
911     + rcu_read_unlock();
912     path = ceph_mdsc_build_path(dentry, ppathlen, pino, 1);
913     if (IS_ERR(path))
914     return PTR_ERR(path);
915     diff --git a/fs/f2fs/data.c b/fs/f2fs/data.c
916     index 14db4b712021..99432b59c5cb 100644
917     --- a/fs/f2fs/data.c
918     +++ b/fs/f2fs/data.c
919     @@ -1619,7 +1619,12 @@ static int f2fs_write_begin(struct file *file, struct address_space *mapping,
920     goto fail;
921     }
922     repeat:
923     - page = grab_cache_page_write_begin(mapping, index, flags);
924     + /*
925     + * Do not use grab_cache_page_write_begin() to avoid deadlock due to
926     + * wait_for_stable_page. Will wait that below with our IO control.
927     + */
928     + page = pagecache_get_page(mapping, index,
929     + FGP_LOCK | FGP_WRITE | FGP_CREAT, GFP_NOFS);
930     if (!page) {
931     err = -ENOMEM;
932     goto fail;
933     diff --git a/fs/f2fs/segment.c b/fs/f2fs/segment.c
934     index 74a2b444406d..e10f61684ea4 100644
935     --- a/fs/f2fs/segment.c
936     +++ b/fs/f2fs/segment.c
937     @@ -1263,7 +1263,7 @@ static int get_ssr_segment(struct f2fs_sb_info *sbi, int type)
938     struct curseg_info *curseg = CURSEG_I(sbi, type);
939     const struct victim_selection *v_ops = DIRTY_I(sbi)->v_ops;
940    
941     - if (IS_NODESEG(type) || !has_not_enough_free_secs(sbi, 0, 0))
942     + if (IS_NODESEG(type))
943     return v_ops->get_victim(sbi,
944     &(curseg)->next_segno, BG_GC, type, SSR);
945    
946     diff --git a/fs/nfsd/nfs4callback.c b/fs/nfsd/nfs4callback.c
947     index 211dc2aed8e1..3069cd46ea66 100644
948     --- a/fs/nfsd/nfs4callback.c
949     +++ b/fs/nfsd/nfs4callback.c
950     @@ -753,6 +753,14 @@ int set_callback_cred(void)
951     return 0;
952     }
953    
954     +void cleanup_callback_cred(void)
955     +{
956     + if (callback_cred) {
957     + put_rpccred(callback_cred);
958     + callback_cred = NULL;
959     + }
960     +}
961     +
962     static struct rpc_cred *get_backchannel_cred(struct nfs4_client *clp, struct rpc_clnt *client, struct nfsd4_session *ses)
963     {
964     if (clp->cl_minorversion == 0) {
965     diff --git a/fs/nfsd/nfs4state.c b/fs/nfsd/nfs4state.c
966     index a0dee8ae9f97..d35eb077330f 100644
967     --- a/fs/nfsd/nfs4state.c
968     +++ b/fs/nfsd/nfs4state.c
969     @@ -7012,23 +7012,24 @@ nfs4_state_start(void)
970    
971     ret = set_callback_cred();
972     if (ret)
973     - return -ENOMEM;
974     + return ret;
975     +
976     laundry_wq = alloc_workqueue("%s", WQ_UNBOUND, 0, "nfsd4");
977     if (laundry_wq == NULL) {
978     ret = -ENOMEM;
979     - goto out_recovery;
980     + goto out_cleanup_cred;
981     }
982     ret = nfsd4_create_callback_queue();
983     if (ret)
984     goto out_free_laundry;
985    
986     set_max_delegations();
987     -
988     return 0;
989    
990     out_free_laundry:
991     destroy_workqueue(laundry_wq);
992     -out_recovery:
993     +out_cleanup_cred:
994     + cleanup_callback_cred();
995     return ret;
996     }
997    
998     @@ -7086,6 +7087,7 @@ nfs4_state_shutdown(void)
999     {
1000     destroy_workqueue(laundry_wq);
1001     nfsd4_destroy_callback_queue();
1002     + cleanup_callback_cred();
1003     }
1004    
1005     static void
1006     diff --git a/fs/nfsd/state.h b/fs/nfsd/state.h
1007     index 4516e8b7d776..005c911b34ac 100644
1008     --- a/fs/nfsd/state.h
1009     +++ b/fs/nfsd/state.h
1010     @@ -615,6 +615,7 @@ extern struct nfs4_client_reclaim *nfsd4_find_reclaim_client(const char *recdir,
1011     extern __be32 nfs4_check_open_reclaim(clientid_t *clid,
1012     struct nfsd4_compound_state *cstate, struct nfsd_net *nn);
1013     extern int set_callback_cred(void);
1014     +extern void cleanup_callback_cred(void);
1015     extern void nfsd4_probe_callback(struct nfs4_client *clp);
1016     extern void nfsd4_probe_callback_sync(struct nfs4_client *clp);
1017     extern void nfsd4_change_callback(struct nfs4_client *clp, struct nfs4_cb_conn *);
1018     diff --git a/fs/ocfs2/dlmglue.c b/fs/ocfs2/dlmglue.c
1019     index 77d1632e905d..8dce4099a6ca 100644
1020     --- a/fs/ocfs2/dlmglue.c
1021     +++ b/fs/ocfs2/dlmglue.c
1022     @@ -532,6 +532,7 @@ void ocfs2_lock_res_init_once(struct ocfs2_lock_res *res)
1023     init_waitqueue_head(&res->l_event);
1024     INIT_LIST_HEAD(&res->l_blocked_list);
1025     INIT_LIST_HEAD(&res->l_mask_waiters);
1026     + INIT_LIST_HEAD(&res->l_holders);
1027     }
1028    
1029     void ocfs2_inode_lock_res_init(struct ocfs2_lock_res *res,
1030     @@ -749,6 +750,50 @@ void ocfs2_lock_res_free(struct ocfs2_lock_res *res)
1031     res->l_flags = 0UL;
1032     }
1033    
1034     +/*
1035     + * Keep a list of processes who have interest in a lockres.
1036     + * Note: this is now only uesed for check recursive cluster locking.
1037     + */
1038     +static inline void ocfs2_add_holder(struct ocfs2_lock_res *lockres,
1039     + struct ocfs2_lock_holder *oh)
1040     +{
1041     + INIT_LIST_HEAD(&oh->oh_list);
1042     + oh->oh_owner_pid = get_pid(task_pid(current));
1043     +
1044     + spin_lock(&lockres->l_lock);
1045     + list_add_tail(&oh->oh_list, &lockres->l_holders);
1046     + spin_unlock(&lockres->l_lock);
1047     +}
1048     +
1049     +static inline void ocfs2_remove_holder(struct ocfs2_lock_res *lockres,
1050     + struct ocfs2_lock_holder *oh)
1051     +{
1052     + spin_lock(&lockres->l_lock);
1053     + list_del(&oh->oh_list);
1054     + spin_unlock(&lockres->l_lock);
1055     +
1056     + put_pid(oh->oh_owner_pid);
1057     +}
1058     +
1059     +static inline int ocfs2_is_locked_by_me(struct ocfs2_lock_res *lockres)
1060     +{
1061     + struct ocfs2_lock_holder *oh;
1062     + struct pid *pid;
1063     +
1064     + /* look in the list of holders for one with the current task as owner */
1065     + spin_lock(&lockres->l_lock);
1066     + pid = task_pid(current);
1067     + list_for_each_entry(oh, &lockres->l_holders, oh_list) {
1068     + if (oh->oh_owner_pid == pid) {
1069     + spin_unlock(&lockres->l_lock);
1070     + return 1;
1071     + }
1072     + }
1073     + spin_unlock(&lockres->l_lock);
1074     +
1075     + return 0;
1076     +}
1077     +
1078     static inline void ocfs2_inc_holders(struct ocfs2_lock_res *lockres,
1079     int level)
1080     {
1081     @@ -2333,8 +2378,9 @@ int ocfs2_inode_lock_full_nested(struct inode *inode,
1082     goto getbh;
1083     }
1084    
1085     - if (ocfs2_mount_local(osb))
1086     - goto local;
1087     + if ((arg_flags & OCFS2_META_LOCK_GETBH) ||
1088     + ocfs2_mount_local(osb))
1089     + goto update;
1090    
1091     if (!(arg_flags & OCFS2_META_LOCK_RECOVERY))
1092     ocfs2_wait_for_recovery(osb);
1093     @@ -2363,7 +2409,7 @@ int ocfs2_inode_lock_full_nested(struct inode *inode,
1094     if (!(arg_flags & OCFS2_META_LOCK_RECOVERY))
1095     ocfs2_wait_for_recovery(osb);
1096    
1097     -local:
1098     +update:
1099     /*
1100     * We only see this flag if we're being called from
1101     * ocfs2_read_locked_inode(). It means we're locking an inode
1102     @@ -2497,6 +2543,59 @@ void ocfs2_inode_unlock(struct inode *inode,
1103     ocfs2_cluster_unlock(OCFS2_SB(inode->i_sb), lockres, level);
1104     }
1105    
1106     +/*
1107     + * This _tracker variantes are introduced to deal with the recursive cluster
1108     + * locking issue. The idea is to keep track of a lock holder on the stack of
1109     + * the current process. If there's a lock holder on the stack, we know the
1110     + * task context is already protected by cluster locking. Currently, they're
1111     + * used in some VFS entry routines.
1112     + *
1113     + * return < 0 on error, return == 0 if there's no lock holder on the stack
1114     + * before this call, return == 1 if this call would be a recursive locking.
1115     + */
1116     +int ocfs2_inode_lock_tracker(struct inode *inode,
1117     + struct buffer_head **ret_bh,
1118     + int ex,
1119     + struct ocfs2_lock_holder *oh)
1120     +{
1121     + int status;
1122     + int arg_flags = 0, has_locked;
1123     + struct ocfs2_lock_res *lockres;
1124     +
1125     + lockres = &OCFS2_I(inode)->ip_inode_lockres;
1126     + has_locked = ocfs2_is_locked_by_me(lockres);
1127     + /* Just get buffer head if the cluster lock has been taken */
1128     + if (has_locked)
1129     + arg_flags = OCFS2_META_LOCK_GETBH;
1130     +
1131     + if (likely(!has_locked || ret_bh)) {
1132     + status = ocfs2_inode_lock_full(inode, ret_bh, ex, arg_flags);
1133     + if (status < 0) {
1134     + if (status != -ENOENT)
1135     + mlog_errno(status);
1136     + return status;
1137     + }
1138     + }
1139     + if (!has_locked)
1140     + ocfs2_add_holder(lockres, oh);
1141     +
1142     + return has_locked;
1143     +}
1144     +
1145     +void ocfs2_inode_unlock_tracker(struct inode *inode,
1146     + int ex,
1147     + struct ocfs2_lock_holder *oh,
1148     + int had_lock)
1149     +{
1150     + struct ocfs2_lock_res *lockres;
1151     +
1152     + lockres = &OCFS2_I(inode)->ip_inode_lockres;
1153     + if (!had_lock) {
1154     + ocfs2_remove_holder(lockres, oh);
1155     + ocfs2_inode_unlock(inode, ex);
1156     + }
1157     +}
1158     +
1159     int ocfs2_orphan_scan_lock(struct ocfs2_super *osb, u32 *seqno)
1160     {
1161     struct ocfs2_lock_res *lockres;
1162     diff --git a/fs/ocfs2/dlmglue.h b/fs/ocfs2/dlmglue.h
1163     index d293a22c32c5..a7fc18ba0dc1 100644
1164     --- a/fs/ocfs2/dlmglue.h
1165     +++ b/fs/ocfs2/dlmglue.h
1166     @@ -70,6 +70,11 @@ struct ocfs2_orphan_scan_lvb {
1167     __be32 lvb_os_seqno;
1168     };
1169    
1170     +struct ocfs2_lock_holder {
1171     + struct list_head oh_list;
1172     + struct pid *oh_owner_pid;
1173     +};
1174     +
1175     /* ocfs2_inode_lock_full() 'arg_flags' flags */
1176     /* don't wait on recovery. */
1177     #define OCFS2_META_LOCK_RECOVERY (0x01)
1178     @@ -77,6 +82,8 @@ struct ocfs2_orphan_scan_lvb {
1179     #define OCFS2_META_LOCK_NOQUEUE (0x02)
1180     /* don't block waiting for the downconvert thread, instead return -EAGAIN */
1181     #define OCFS2_LOCK_NONBLOCK (0x04)
1182     +/* just get back disk inode bh if we've got cluster lock. */
1183     +#define OCFS2_META_LOCK_GETBH (0x08)
1184    
1185     /* Locking subclasses of inode cluster lock */
1186     enum {
1187     @@ -170,4 +177,15 @@ void ocfs2_put_dlm_debug(struct ocfs2_dlm_debug *dlm_debug);
1188    
1189     /* To set the locking protocol on module initialization */
1190     void ocfs2_set_locking_protocol(void);
1191     +
1192     +/* The _tracker pair is used to avoid cluster recursive locking */
1193     +int ocfs2_inode_lock_tracker(struct inode *inode,
1194     + struct buffer_head **ret_bh,
1195     + int ex,
1196     + struct ocfs2_lock_holder *oh);
1197     +void ocfs2_inode_unlock_tracker(struct inode *inode,
1198     + int ex,
1199     + struct ocfs2_lock_holder *oh,
1200     + int had_lock);
1201     +
1202     #endif /* DLMGLUE_H */
1203     diff --git a/fs/ocfs2/ocfs2.h b/fs/ocfs2/ocfs2.h
1204     index e63af7ddfe68..594575e380e8 100644
1205     --- a/fs/ocfs2/ocfs2.h
1206     +++ b/fs/ocfs2/ocfs2.h
1207     @@ -172,6 +172,7 @@ struct ocfs2_lock_res {
1208    
1209     struct list_head l_blocked_list;
1210     struct list_head l_mask_waiters;
1211     + struct list_head l_holders;
1212    
1213     unsigned long l_flags;
1214     char l_name[OCFS2_LOCK_ID_MAX_LEN];
1215     diff --git a/include/uapi/linux/mroute6.h b/include/uapi/linux/mroute6.h
1216     index 5062fb5751e1..ed5721148768 100644
1217     --- a/include/uapi/linux/mroute6.h
1218     +++ b/include/uapi/linux/mroute6.h
1219     @@ -4,6 +4,7 @@
1220     #include <linux/kernel.h>
1221     #include <linux/types.h>
1222     #include <linux/sockios.h>
1223     +#include <linux/in6.h> /* For struct sockaddr_in6. */
1224    
1225     /*
1226     * Based on the MROUTING 3.5 defines primarily to keep
1227     diff --git a/include/uapi/linux/rds.h b/include/uapi/linux/rds.h
1228     index 0f9265cb2a96..7af20a136429 100644
1229     --- a/include/uapi/linux/rds.h
1230     +++ b/include/uapi/linux/rds.h
1231     @@ -35,6 +35,7 @@
1232     #define _LINUX_RDS_H
1233    
1234     #include <linux/types.h>
1235     +#include <linux/socket.h> /* For __kernel_sockaddr_storage. */
1236    
1237     #define RDS_IB_ABI_VERSION 0x301
1238    
1239     @@ -223,7 +224,7 @@ struct rds_get_mr_args {
1240     };
1241    
1242     struct rds_get_mr_for_dest_args {
1243     - struct sockaddr_storage dest_addr;
1244     + struct __kernel_sockaddr_storage dest_addr;
1245     struct rds_iovec vec;
1246     uint64_t cookie_addr;
1247     uint64_t flags;
1248     diff --git a/init/initramfs.c b/init/initramfs.c
1249     index b32ad7d97ac9..981f286c1d16 100644
1250     --- a/init/initramfs.c
1251     +++ b/init/initramfs.c
1252     @@ -18,6 +18,7 @@
1253     #include <linux/dirent.h>
1254     #include <linux/syscalls.h>
1255     #include <linux/utime.h>
1256     +#include <linux/file.h>
1257    
1258     static ssize_t __init xwrite(int fd, const char *p, size_t count)
1259     {
1260     @@ -647,6 +648,7 @@ static int __init populate_rootfs(void)
1261     printk(KERN_EMERG "Initramfs unpacking failed: %s\n", err);
1262     free_initrd();
1263     #endif
1264     + flush_delayed_fput();
1265     /*
1266     * Try loading default modules from initramfs. This gives
1267     * us a chance to load before device_initcalls.
1268     diff --git a/init/main.c b/init/main.c
1269     index ae3996ae9bac..25bac88bc66e 100644
1270     --- a/init/main.c
1271     +++ b/init/main.c
1272     @@ -70,7 +70,6 @@
1273     #include <linux/shmem_fs.h>
1274     #include <linux/slab.h>
1275     #include <linux/perf_event.h>
1276     -#include <linux/file.h>
1277     #include <linux/ptrace.h>
1278     #include <linux/blkdev.h>
1279     #include <linux/elevator.h>
1280     @@ -947,8 +946,6 @@ static int __ref kernel_init(void *unused)
1281     system_state = SYSTEM_RUNNING;
1282     numa_default_policy();
1283    
1284     - flush_delayed_fput();
1285     -
1286     rcu_end_inkernel_boot();
1287    
1288     if (ramdisk_execute_command) {
1289     diff --git a/kernel/locking/lockdep.c b/kernel/locking/lockdep.c
1290     index 4d7ffc0a0d00..6599c7f3071d 100644
1291     --- a/kernel/locking/lockdep.c
1292     +++ b/kernel/locking/lockdep.c
1293     @@ -3260,10 +3260,17 @@ static int __lock_acquire(struct lockdep_map *lock, unsigned int subclass,
1294     if (depth) {
1295     hlock = curr->held_locks + depth - 1;
1296     if (hlock->class_idx == class_idx && nest_lock) {
1297     - if (hlock->references)
1298     + if (hlock->references) {
1299     + /*
1300     + * Check: unsigned int references:12, overflow.
1301     + */
1302     + if (DEBUG_LOCKS_WARN_ON(hlock->references == (1 << 12)-1))
1303     + return 0;
1304     +
1305     hlock->references++;
1306     - else
1307     + } else {
1308     hlock->references = 2;
1309     + }
1310    
1311     return 1;
1312     }
1313     diff --git a/kernel/sched/core.c b/kernel/sched/core.c
1314     index d7dda36fbc7b..02e7ad860b52 100644
1315     --- a/kernel/sched/core.c
1316     +++ b/kernel/sched/core.c
1317     @@ -1141,6 +1141,7 @@ static int __set_cpus_allowed_ptr(struct task_struct *p,
1318     int ret = 0;
1319    
1320     rq = task_rq_lock(p, &rf);
1321     + update_rq_clock(rq);
1322    
1323     if (p->flags & PF_KTHREAD) {
1324     /*
1325     diff --git a/kernel/time/hrtimer.c b/kernel/time/hrtimer.c
1326     index bb5ec425dfe0..eeb7f2f5698d 100644
1327     --- a/kernel/time/hrtimer.c
1328     +++ b/kernel/time/hrtimer.c
1329     @@ -94,17 +94,15 @@ DEFINE_PER_CPU(struct hrtimer_cpu_base, hrtimer_bases) =
1330     };
1331    
1332     static const int hrtimer_clock_to_base_table[MAX_CLOCKS] = {
1333     + /* Make sure we catch unsupported clockids */
1334     + [0 ... MAX_CLOCKS - 1] = HRTIMER_MAX_CLOCK_BASES,
1335     +
1336     [CLOCK_REALTIME] = HRTIMER_BASE_REALTIME,
1337     [CLOCK_MONOTONIC] = HRTIMER_BASE_MONOTONIC,
1338     [CLOCK_BOOTTIME] = HRTIMER_BASE_BOOTTIME,
1339     [CLOCK_TAI] = HRTIMER_BASE_TAI,
1340     };
1341    
1342     -static inline int hrtimer_clockid_to_base(clockid_t clock_id)
1343     -{
1344     - return hrtimer_clock_to_base_table[clock_id];
1345     -}
1346     -
1347     /*
1348     * Functions and macros which are different for UP/SMP systems are kept in a
1349     * single place
1350     @@ -1112,6 +1110,18 @@ u64 hrtimer_get_next_event(void)
1351     }
1352     #endif
1353    
1354     +static inline int hrtimer_clockid_to_base(clockid_t clock_id)
1355     +{
1356     + if (likely(clock_id < MAX_CLOCKS)) {
1357     + int base = hrtimer_clock_to_base_table[clock_id];
1358     +
1359     + if (likely(base != HRTIMER_MAX_CLOCK_BASES))
1360     + return base;
1361     + }
1362     + WARN(1, "Invalid clockid %d. Using MONOTONIC\n", clock_id);
1363     + return HRTIMER_BASE_MONOTONIC;
1364     +}
1365     +
1366     static void __hrtimer_init(struct hrtimer *timer, clockid_t clock_id,
1367     enum hrtimer_mode mode)
1368     {
1369     diff --git a/mm/memory_hotplug.c b/mm/memory_hotplug.c
1370     index ede137345a99..c9f715b2917f 100644
1371     --- a/mm/memory_hotplug.c
1372     +++ b/mm/memory_hotplug.c
1373     @@ -179,7 +179,7 @@ static void release_memory_resource(struct resource *res)
1374     void get_page_bootmem(unsigned long info, struct page *page,
1375     unsigned long type)
1376     {
1377     - page->lru.next = (struct list_head *) type;
1378     + page->freelist = (void *)type;
1379     SetPagePrivate(page);
1380     set_page_private(page, info);
1381     page_ref_inc(page);
1382     @@ -189,11 +189,12 @@ void put_page_bootmem(struct page *page)
1383     {
1384     unsigned long type;
1385    
1386     - type = (unsigned long) page->lru.next;
1387     + type = (unsigned long) page->freelist;
1388     BUG_ON(type < MEMORY_HOTPLUG_MIN_BOOTMEM_TYPE ||
1389     type > MEMORY_HOTPLUG_MAX_BOOTMEM_TYPE);
1390    
1391     if (page_ref_dec_return(page) == 1) {
1392     + page->freelist = NULL;
1393     ClearPagePrivate(page);
1394     set_page_private(page, 0);
1395     INIT_LIST_HEAD(&page->lru);
1396     diff --git a/mm/slab_common.c b/mm/slab_common.c
1397     index 5d2f24fbafc5..622f6b6ae844 100644
1398     --- a/mm/slab_common.c
1399     +++ b/mm/slab_common.c
1400     @@ -255,7 +255,7 @@ struct kmem_cache *find_mergeable(size_t size, size_t align,
1401     {
1402     struct kmem_cache *s;
1403    
1404     - if (slab_nomerge || (flags & SLAB_NEVER_MERGE))
1405     + if (slab_nomerge)
1406     return NULL;
1407    
1408     if (ctor)
1409     @@ -266,6 +266,9 @@ struct kmem_cache *find_mergeable(size_t size, size_t align,
1410     size = ALIGN(size, align);
1411     flags = kmem_cache_flags(size, flags, name, NULL);
1412    
1413     + if (flags & SLAB_NEVER_MERGE)
1414     + return NULL;
1415     +
1416     list_for_each_entry_reverse(s, &slab_caches, list) {
1417     if (slab_unmergeable(s))
1418     continue;
1419     diff --git a/mm/sparse.c b/mm/sparse.c
1420     index 1e168bf2779a..8c4c82e358e6 100644
1421     --- a/mm/sparse.c
1422     +++ b/mm/sparse.c
1423     @@ -662,7 +662,7 @@ static void free_map_bootmem(struct page *memmap)
1424     >> PAGE_SHIFT;
1425    
1426     for (i = 0; i < nr_pages; i++, page++) {
1427     - magic = (unsigned long) page->lru.next;
1428     + magic = (unsigned long) page->freelist;
1429    
1430     BUG_ON(magic == NODE_INFO);
1431    
1432     diff --git a/net/mac80211/sta_info.c b/net/mac80211/sta_info.c
1433     index b2c823ffad74..348700b424ea 100644
1434     --- a/net/mac80211/sta_info.c
1435     +++ b/net/mac80211/sta_info.c
1436     @@ -688,7 +688,7 @@ static void __sta_info_recalc_tim(struct sta_info *sta, bool ignore_pending)
1437     }
1438    
1439     /* No need to do anything if the driver does all */
1440     - if (ieee80211_hw_check(&local->hw, AP_LINK_PS))
1441     + if (ieee80211_hw_check(&local->hw, AP_LINK_PS) && !local->ops->set_tim)
1442     return;
1443    
1444     if (sta->dead)
1445     diff --git a/net/netfilter/nf_conntrack_expect.c b/net/netfilter/nf_conntrack_expect.c
1446     index f8dbacf66795..0d6c72d6b9ba 100644
1447     --- a/net/netfilter/nf_conntrack_expect.c
1448     +++ b/net/netfilter/nf_conntrack_expect.c
1449     @@ -411,7 +411,7 @@ static inline int __nf_ct_expect_check(struct nf_conntrack_expect *expect)
1450     struct net *net = nf_ct_exp_net(expect);
1451     struct hlist_node *next;
1452     unsigned int h;
1453     - int ret = 1;
1454     + int ret = 0;
1455    
1456     if (!master_help) {
1457     ret = -ESHUTDOWN;
1458     @@ -461,7 +461,7 @@ int nf_ct_expect_related_report(struct nf_conntrack_expect *expect,
1459    
1460     spin_lock_bh(&nf_conntrack_expect_lock);
1461     ret = __nf_ct_expect_check(expect);
1462     - if (ret <= 0)
1463     + if (ret < 0)
1464     goto out;
1465    
1466     ret = nf_ct_expect_insert(expect);
1467     diff --git a/sound/pci/hda/patch_hdmi.c b/sound/pci/hda/patch_hdmi.c
1468     index 775c67818bf1..bd650222e711 100644
1469     --- a/sound/pci/hda/patch_hdmi.c
1470     +++ b/sound/pci/hda/patch_hdmi.c
1471     @@ -3685,6 +3685,7 @@ HDA_CODEC_ENTRY(0x80862808, "Broadwell HDMI", patch_i915_hsw_hdmi),
1472     HDA_CODEC_ENTRY(0x80862809, "Skylake HDMI", patch_i915_hsw_hdmi),
1473     HDA_CODEC_ENTRY(0x8086280a, "Broxton HDMI", patch_i915_hsw_hdmi),
1474     HDA_CODEC_ENTRY(0x8086280b, "Kabylake HDMI", patch_i915_hsw_hdmi),
1475     +HDA_CODEC_ENTRY(0x8086280d, "Geminilake HDMI", patch_i915_hsw_hdmi),
1476     HDA_CODEC_ENTRY(0x80862880, "CedarTrail HDMI", patch_generic_hdmi),
1477     HDA_CODEC_ENTRY(0x80862882, "Valleyview2 HDMI", patch_i915_byt_hdmi),
1478     HDA_CODEC_ENTRY(0x80862883, "Braswell HDMI", patch_i915_byt_hdmi),
1479     diff --git a/sound/soc/mediatek/Kconfig b/sound/soc/mediatek/Kconfig
1480     index 05cf809cf9e1..d7013bde6f45 100644
1481     --- a/sound/soc/mediatek/Kconfig
1482     +++ b/sound/soc/mediatek/Kconfig
1483     @@ -13,7 +13,7 @@ config SND_SOC_MT2701
1484    
1485     config SND_SOC_MT2701_CS42448
1486     tristate "ASoc Audio driver for MT2701 with CS42448 codec"
1487     - depends on SND_SOC_MT2701
1488     + depends on SND_SOC_MT2701 && I2C
1489     select SND_SOC_CS42XX8_I2C
1490     select SND_SOC_BT_SCO
1491     help