Magellan Linux

Annotation of /trunk/kernel-magellan/patches-4.13/0110-4.13.11-all-fixes.patch

Parent Directory Parent Directory | Revision Log Revision Log


Revision 3019 - (hide annotations) (download)
Mon Nov 6 09:39:50 2017 UTC (6 years, 6 months ago) by niro
File size: 53411 byte(s)
-linux-4.13.11
1 niro 3019 diff --git a/Makefile b/Makefile
2     index 0e30a0d282e8..8280953c8a45 100644
3     --- a/Makefile
4     +++ b/Makefile
5     @@ -1,6 +1,6 @@
6     VERSION = 4
7     PATCHLEVEL = 13
8     -SUBLEVEL = 10
9     +SUBLEVEL = 11
10     EXTRAVERSION =
11     NAME = Fearless Coyote
12    
13     diff --git a/arch/powerpc/kvm/book3s_64_vio.c b/arch/powerpc/kvm/book3s_64_vio.c
14     index 53766e2bc029..58f6fbc7df39 100644
15     --- a/arch/powerpc/kvm/book3s_64_vio.c
16     +++ b/arch/powerpc/kvm/book3s_64_vio.c
17     @@ -479,28 +479,30 @@ long kvmppc_h_put_tce(struct kvm_vcpu *vcpu, unsigned long liobn,
18     return ret;
19    
20     dir = iommu_tce_direction(tce);
21     +
22     + idx = srcu_read_lock(&vcpu->kvm->srcu);
23     +
24     if ((dir != DMA_NONE) && kvmppc_gpa_to_ua(vcpu->kvm,
25     - tce & ~(TCE_PCI_READ | TCE_PCI_WRITE), &ua, NULL))
26     - return H_PARAMETER;
27     + tce & ~(TCE_PCI_READ | TCE_PCI_WRITE), &ua, NULL)) {
28     + ret = H_PARAMETER;
29     + goto unlock_exit;
30     + }
31    
32     entry = ioba >> stt->page_shift;
33    
34     list_for_each_entry_lockless(stit, &stt->iommu_tables, next) {
35     - if (dir == DMA_NONE) {
36     + if (dir == DMA_NONE)
37     ret = kvmppc_tce_iommu_unmap(vcpu->kvm,
38     stit->tbl, entry);
39     - } else {
40     - idx = srcu_read_lock(&vcpu->kvm->srcu);
41     + else
42     ret = kvmppc_tce_iommu_map(vcpu->kvm, stit->tbl,
43     entry, ua, dir);
44     - srcu_read_unlock(&vcpu->kvm->srcu, idx);
45     - }
46    
47     if (ret == H_SUCCESS)
48     continue;
49    
50     if (ret == H_TOO_HARD)
51     - return ret;
52     + goto unlock_exit;
53    
54     WARN_ON_ONCE(1);
55     kvmppc_clear_tce(stit->tbl, entry);
56     @@ -508,7 +510,10 @@ long kvmppc_h_put_tce(struct kvm_vcpu *vcpu, unsigned long liobn,
57    
58     kvmppc_tce_put(stt, entry, tce);
59    
60     - return H_SUCCESS;
61     +unlock_exit:
62     + srcu_read_unlock(&vcpu->kvm->srcu, idx);
63     +
64     + return ret;
65     }
66     EXPORT_SYMBOL_GPL(kvmppc_h_put_tce);
67    
68     diff --git a/arch/powerpc/kvm/book3s_hv_rmhandlers.S b/arch/powerpc/kvm/book3s_hv_rmhandlers.S
69     index dc58c2a560f9..e92cb5fd28f2 100644
70     --- a/arch/powerpc/kvm/book3s_hv_rmhandlers.S
71     +++ b/arch/powerpc/kvm/book3s_hv_rmhandlers.S
72     @@ -1296,6 +1296,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR)
73     bne 3f
74     BEGIN_FTR_SECTION
75     PPC_MSGSYNC
76     + lwsync
77     END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300)
78     lbz r0, HSTATE_HOST_IPI(r13)
79     cmpwi r0, 0
80     @@ -2767,6 +2768,10 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
81     PPC_MSGCLR(6)
82     /* see if it's a host IPI */
83     li r3, 1
84     +BEGIN_FTR_SECTION
85     + PPC_MSGSYNC
86     + lwsync
87     +END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300)
88     lbz r0, HSTATE_HOST_IPI(r13)
89     cmpwi r0, 0
90     bnelr
91     diff --git a/arch/powerpc/kvm/powerpc.c b/arch/powerpc/kvm/powerpc.c
92     index 1a75c0b5f4ca..86468190d4b2 100644
93     --- a/arch/powerpc/kvm/powerpc.c
94     +++ b/arch/powerpc/kvm/powerpc.c
95     @@ -639,8 +639,7 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
96     break;
97     #endif
98     case KVM_CAP_PPC_HTM:
99     - r = cpu_has_feature(CPU_FTR_TM_COMP) &&
100     - is_kvmppc_hv_enabled(kvm);
101     + r = cpu_has_feature(CPU_FTR_TM_COMP) && hv_enabled;
102     break;
103     default:
104     r = 0;
105     diff --git a/arch/powerpc/sysdev/xive/common.c b/arch/powerpc/sysdev/xive/common.c
106     index 6595462b1fc8..6e0c9dee724f 100644
107     --- a/arch/powerpc/sysdev/xive/common.c
108     +++ b/arch/powerpc/sysdev/xive/common.c
109     @@ -447,7 +447,7 @@ static int xive_find_target_in_mask(const struct cpumask *mask,
110     int cpu, first, num, i;
111    
112     /* Pick up a starting point CPU in the mask based on fuzz */
113     - num = cpumask_weight(mask);
114     + num = min_t(int, cpumask_weight(mask), nr_cpu_ids);
115     first = fuzz % num;
116    
117     /* Locate it */
118     diff --git a/arch/s390/kernel/entry.S b/arch/s390/kernel/entry.S
119     index 21900e1cee9c..d185aa3965bf 100644
120     --- a/arch/s390/kernel/entry.S
121     +++ b/arch/s390/kernel/entry.S
122     @@ -521,12 +521,15 @@ ENTRY(pgm_check_handler)
123     tmhh %r8,0x0001 # test problem state bit
124     jnz 2f # -> fault in user space
125     #if IS_ENABLED(CONFIG_KVM)
126     - # cleanup critical section for sie64a
127     + # cleanup critical section for program checks in sie64a
128     lgr %r14,%r9
129     slg %r14,BASED(.Lsie_critical_start)
130     clg %r14,BASED(.Lsie_critical_length)
131     jhe 0f
132     - brasl %r14,.Lcleanup_sie
133     + lg %r14,__SF_EMPTY(%r15) # get control block pointer
134     + ni __SIE_PROG0C+3(%r14),0xfe # no longer in SIE
135     + lctlg %c1,%c1,__LC_USER_ASCE # load primary asce
136     + larl %r9,sie_exit # skip forward to sie_exit
137     #endif
138     0: tmhh %r8,0x4000 # PER bit set in old PSW ?
139     jnz 1f # -> enabled, can't be a double fault
140     diff --git a/arch/x86/kernel/amd_nb.c b/arch/x86/kernel/amd_nb.c
141     index 458da8509b75..6db28f17ff28 100644
142     --- a/arch/x86/kernel/amd_nb.c
143     +++ b/arch/x86/kernel/amd_nb.c
144     @@ -27,6 +27,8 @@ static const struct pci_device_id amd_root_ids[] = {
145     {}
146     };
147    
148     +#define PCI_DEVICE_ID_AMD_CNB17H_F4 0x1704
149     +
150     const struct pci_device_id amd_nb_misc_ids[] = {
151     { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_K8_NB_MISC) },
152     { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_10H_NB_MISC) },
153     @@ -37,6 +39,7 @@ const struct pci_device_id amd_nb_misc_ids[] = {
154     { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_16H_NB_F3) },
155     { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_16H_M30H_NB_F3) },
156     { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_DF_F3) },
157     + { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_CNB17H_F3) },
158     {}
159     };
160     EXPORT_SYMBOL_GPL(amd_nb_misc_ids);
161     @@ -48,6 +51,7 @@ static const struct pci_device_id amd_nb_link_ids[] = {
162     { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_16H_NB_F4) },
163     { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_16H_M30H_NB_F4) },
164     { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_DF_F4) },
165     + { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_CNB17H_F4) },
166     {}
167     };
168    
169     @@ -402,11 +406,48 @@ void amd_flush_garts(void)
170     }
171     EXPORT_SYMBOL_GPL(amd_flush_garts);
172    
173     +static void __fix_erratum_688(void *info)
174     +{
175     +#define MSR_AMD64_IC_CFG 0xC0011021
176     +
177     + msr_set_bit(MSR_AMD64_IC_CFG, 3);
178     + msr_set_bit(MSR_AMD64_IC_CFG, 14);
179     +}
180     +
181     +/* Apply erratum 688 fix so machines without a BIOS fix work. */
182     +static __init void fix_erratum_688(void)
183     +{
184     + struct pci_dev *F4;
185     + u32 val;
186     +
187     + if (boot_cpu_data.x86 != 0x14)
188     + return;
189     +
190     + if (!amd_northbridges.num)
191     + return;
192     +
193     + F4 = node_to_amd_nb(0)->link;
194     + if (!F4)
195     + return;
196     +
197     + if (pci_read_config_dword(F4, 0x164, &val))
198     + return;
199     +
200     + if (val & BIT(2))
201     + return;
202     +
203     + on_each_cpu(__fix_erratum_688, NULL, 0);
204     +
205     + pr_info("x86/cpu/AMD: CPU erratum 688 worked around\n");
206     +}
207     +
208     static __init int init_amd_nbs(void)
209     {
210     amd_cache_northbridges();
211     amd_cache_gart();
212    
213     + fix_erratum_688();
214     +
215     return 0;
216     }
217    
218     diff --git a/drivers/block/nbd.c b/drivers/block/nbd.c
219     index da7043893249..ebb60db0e499 100644
220     --- a/drivers/block/nbd.c
221     +++ b/drivers/block/nbd.c
222     @@ -386,6 +386,15 @@ static int sock_xmit(struct nbd_device *nbd, int index, int send,
223     return result;
224     }
225    
226     +/*
227     + * Different settings for sk->sk_sndtimeo can result in different return values
228     + * if there is a signal pending when we enter sendmsg, because reasons?
229     + */
230     +static inline int was_interrupted(int result)
231     +{
232     + return result == -ERESTARTSYS || result == -EINTR;
233     +}
234     +
235     /* always call with the tx_lock held */
236     static int nbd_send_cmd(struct nbd_device *nbd, struct nbd_cmd *cmd, int index)
237     {
238     @@ -458,7 +467,7 @@ static int nbd_send_cmd(struct nbd_device *nbd, struct nbd_cmd *cmd, int index)
239     result = sock_xmit(nbd, index, 1, &from,
240     (type == NBD_CMD_WRITE) ? MSG_MORE : 0, &sent);
241     if (result <= 0) {
242     - if (result == -ERESTARTSYS) {
243     + if (was_interrupted(result)) {
244     /* If we havne't sent anything we can just return BUSY,
245     * however if we have sent something we need to make
246     * sure we only allow this req to be sent until we are
247     @@ -502,7 +511,7 @@ static int nbd_send_cmd(struct nbd_device *nbd, struct nbd_cmd *cmd, int index)
248     }
249     result = sock_xmit(nbd, index, 1, &from, flags, &sent);
250     if (result <= 0) {
251     - if (result == -ERESTARTSYS) {
252     + if (was_interrupted(result)) {
253     /* We've already sent the header, we
254     * have no choice but to set pending and
255     * return BUSY.
256     diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c
257     index 1f01020ce3a9..6cdf43a8bf6a 100644
258     --- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c
259     +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c
260     @@ -830,7 +830,7 @@ uint32_t smu7_get_xclk(struct pp_hwmgr *hwmgr)
261     {
262     uint32_t reference_clock, tmp;
263     struct cgs_display_info info = {0};
264     - struct cgs_mode_info mode_info;
265     + struct cgs_mode_info mode_info = {0};
266    
267     info.mode_info = &mode_info;
268    
269     @@ -3951,10 +3951,9 @@ static int smu7_program_display_gap(struct pp_hwmgr *hwmgr)
270     uint32_t ref_clock;
271     uint32_t refresh_rate = 0;
272     struct cgs_display_info info = {0};
273     - struct cgs_mode_info mode_info;
274     + struct cgs_mode_info mode_info = {0};
275    
276     info.mode_info = &mode_info;
277     -
278     cgs_get_active_displays_info(hwmgr->device, &info);
279     num_active_displays = info.display_count;
280    
281     @@ -3970,6 +3969,7 @@ static int smu7_program_display_gap(struct pp_hwmgr *hwmgr)
282     frame_time_in_us = 1000000 / refresh_rate;
283    
284     pre_vbi_time_in_us = frame_time_in_us - 200 - mode_info.vblank_time_us;
285     +
286     data->frame_time_x2 = frame_time_in_us * 2 / 100;
287    
288     display_gap2 = pre_vbi_time_in_us * (ref_clock / 100);
289     diff --git a/drivers/gpu/drm/i915/i915_perf.c b/drivers/gpu/drm/i915/i915_perf.c
290     index f33d90226704..1c3f92533778 100644
291     --- a/drivers/gpu/drm/i915/i915_perf.c
292     +++ b/drivers/gpu/drm/i915/i915_perf.c
293     @@ -2480,6 +2480,10 @@ static const struct file_operations fops = {
294     .poll = i915_perf_poll,
295     .read = i915_perf_read,
296     .unlocked_ioctl = i915_perf_ioctl,
297     + /* Our ioctl have no arguments, so it's safe to use the same function
298     + * to handle 32bits compatibility.
299     + */
300     + .compat_ioctl = i915_perf_ioctl,
301     };
302    
303    
304     diff --git a/drivers/input/mouse/elan_i2c_core.c b/drivers/input/mouse/elan_i2c_core.c
305     index cfbc8ba4c96c..a6b762271a40 100644
306     --- a/drivers/input/mouse/elan_i2c_core.c
307     +++ b/drivers/input/mouse/elan_i2c_core.c
308     @@ -1253,6 +1253,7 @@ static const struct acpi_device_id elan_acpi_id[] = {
309     { "ELAN0605", 0 },
310     { "ELAN0609", 0 },
311     { "ELAN060B", 0 },
312     + { "ELAN0611", 0 },
313     { "ELAN1000", 0 },
314     { }
315     };
316     diff --git a/drivers/input/tablet/gtco.c b/drivers/input/tablet/gtco.c
317     index b796e891e2ee..4b8b9d7aa75e 100644
318     --- a/drivers/input/tablet/gtco.c
319     +++ b/drivers/input/tablet/gtco.c
320     @@ -230,13 +230,17 @@ static void parse_hid_report_descriptor(struct gtco *device, char * report,
321    
322     /* Walk this report and pull out the info we need */
323     while (i < length) {
324     - prefix = report[i];
325     -
326     - /* Skip over prefix */
327     - i++;
328     + prefix = report[i++];
329    
330     /* Determine data size and save the data in the proper variable */
331     - size = PREF_SIZE(prefix);
332     + size = (1U << PREF_SIZE(prefix)) >> 1;
333     + if (i + size > length) {
334     + dev_err(ddev,
335     + "Not enough data (need %d, have %d)\n",
336     + i + size, length);
337     + break;
338     + }
339     +
340     switch (size) {
341     case 1:
342     data = report[i];
343     @@ -244,8 +248,7 @@ static void parse_hid_report_descriptor(struct gtco *device, char * report,
344     case 2:
345     data16 = get_unaligned_le16(&report[i]);
346     break;
347     - case 3:
348     - size = 4;
349     + case 4:
350     data32 = get_unaligned_le32(&report[i]);
351     break;
352     }
353     diff --git a/drivers/net/can/sun4i_can.c b/drivers/net/can/sun4i_can.c
354     index 68ef0a4cd821..b0c80859f746 100644
355     --- a/drivers/net/can/sun4i_can.c
356     +++ b/drivers/net/can/sun4i_can.c
357     @@ -342,7 +342,7 @@ static int sun4i_can_start(struct net_device *dev)
358    
359     /* enter the selected mode */
360     mod_reg_val = readl(priv->base + SUN4I_REG_MSEL_ADDR);
361     - if (priv->can.ctrlmode & CAN_CTRLMODE_PRESUME_ACK)
362     + if (priv->can.ctrlmode & CAN_CTRLMODE_LOOPBACK)
363     mod_reg_val |= SUN4I_MSEL_LOOPBACK_MODE;
364     else if (priv->can.ctrlmode & CAN_CTRLMODE_LISTENONLY)
365     mod_reg_val |= SUN4I_MSEL_LISTEN_ONLY_MODE;
366     @@ -811,7 +811,6 @@ static int sun4ican_probe(struct platform_device *pdev)
367     priv->can.ctrlmode_supported = CAN_CTRLMODE_BERR_REPORTING |
368     CAN_CTRLMODE_LISTENONLY |
369     CAN_CTRLMODE_LOOPBACK |
370     - CAN_CTRLMODE_PRESUME_ACK |
371     CAN_CTRLMODE_3_SAMPLES;
372     priv->base = addr;
373     priv->clk = clk;
374     diff --git a/drivers/net/can/usb/kvaser_usb.c b/drivers/net/can/usb/kvaser_usb.c
375     index 18cc529fb807..9b18d96ef526 100644
376     --- a/drivers/net/can/usb/kvaser_usb.c
377     +++ b/drivers/net/can/usb/kvaser_usb.c
378     @@ -137,6 +137,7 @@ static inline bool kvaser_is_usbcan(const struct usb_device_id *id)
379     #define CMD_RESET_ERROR_COUNTER 49
380     #define CMD_TX_ACKNOWLEDGE 50
381     #define CMD_CAN_ERROR_EVENT 51
382     +#define CMD_FLUSH_QUEUE_REPLY 68
383    
384     #define CMD_LEAF_USB_THROTTLE 77
385     #define CMD_LEAF_LOG_MESSAGE 106
386     @@ -1301,6 +1302,11 @@ static void kvaser_usb_handle_message(const struct kvaser_usb *dev,
387     goto warn;
388     break;
389    
390     + case CMD_FLUSH_QUEUE_REPLY:
391     + if (dev->family != KVASER_LEAF)
392     + goto warn;
393     + break;
394     +
395     default:
396     warn: dev_warn(dev->udev->dev.parent,
397     "Unhandled message (%d)\n", msg->id);
398     @@ -1609,7 +1615,8 @@ static int kvaser_usb_close(struct net_device *netdev)
399     if (err)
400     netdev_warn(netdev, "Cannot flush queue, error %d\n", err);
401    
402     - if (kvaser_usb_send_simple_msg(dev, CMD_RESET_CHIP, priv->channel))
403     + err = kvaser_usb_send_simple_msg(dev, CMD_RESET_CHIP, priv->channel);
404     + if (err)
405     netdev_warn(netdev, "Cannot reset card, error %d\n", err);
406    
407     err = kvaser_usb_stop_chip(priv);
408     diff --git a/drivers/nvme/host/fc.c b/drivers/nvme/host/fc.c
409     index 5c2a08ef08ba..0cc96e9ae6ff 100644
410     --- a/drivers/nvme/host/fc.c
411     +++ b/drivers/nvme/host/fc.c
412     @@ -2477,10 +2477,10 @@ nvme_fc_delete_association(struct nvme_fc_ctrl *ctrl)
413     nvme_fc_abort_aen_ops(ctrl);
414    
415     /* wait for all io that had to be aborted */
416     - spin_lock_irqsave(&ctrl->lock, flags);
417     + spin_lock_irq(&ctrl->lock);
418     wait_event_lock_irq(ctrl->ioabort_wait, ctrl->iocnt == 0, ctrl->lock);
419     ctrl->flags &= ~FCCTRL_TERMIO;
420     - spin_unlock_irqrestore(&ctrl->lock, flags);
421     + spin_unlock_irq(&ctrl->lock);
422    
423     nvme_fc_term_aen_ops(ctrl);
424    
425     @@ -2693,6 +2693,7 @@ nvme_fc_init_ctrl(struct device *dev, struct nvmf_ctrl_options *opts,
426     ctrl->rport = rport;
427     ctrl->dev = lport->dev;
428     ctrl->cnum = idx;
429     + init_waitqueue_head(&ctrl->ioabort_wait);
430    
431     get_device(ctrl->dev);
432     kref_init(&ctrl->ref);
433     diff --git a/drivers/regulator/fan53555.c b/drivers/regulator/fan53555.c
434     index 60f431831582..ca29d49fdadd 100644
435     --- a/drivers/regulator/fan53555.c
436     +++ b/drivers/regulator/fan53555.c
437     @@ -476,7 +476,10 @@ static const struct i2c_device_id fan53555_id[] = {
438     .name = "fan53555",
439     .driver_data = FAN53555_VENDOR_FAIRCHILD
440     }, {
441     - .name = "syr82x",
442     + .name = "syr827",
443     + .driver_data = FAN53555_VENDOR_SILERGY
444     + }, {
445     + .name = "syr828",
446     .driver_data = FAN53555_VENDOR_SILERGY
447     },
448     { },
449     diff --git a/drivers/s390/scsi/zfcp_aux.c b/drivers/s390/scsi/zfcp_aux.c
450     index bcc8f3dfd4c4..b3f9243cfed5 100644
451     --- a/drivers/s390/scsi/zfcp_aux.c
452     +++ b/drivers/s390/scsi/zfcp_aux.c
453     @@ -358,6 +358,8 @@ struct zfcp_adapter *zfcp_adapter_enqueue(struct ccw_device *ccw_device)
454    
455     adapter->next_port_scan = jiffies;
456    
457     + adapter->erp_action.adapter = adapter;
458     +
459     if (zfcp_qdio_setup(adapter))
460     goto failed;
461    
462     @@ -514,6 +516,9 @@ struct zfcp_port *zfcp_port_enqueue(struct zfcp_adapter *adapter, u64 wwpn,
463     port->dev.groups = zfcp_port_attr_groups;
464     port->dev.release = zfcp_port_release;
465    
466     + port->erp_action.adapter = adapter;
467     + port->erp_action.port = port;
468     +
469     if (dev_set_name(&port->dev, "0x%016llx", (unsigned long long)wwpn)) {
470     kfree(port);
471     goto err_out;
472     diff --git a/drivers/s390/scsi/zfcp_erp.c b/drivers/s390/scsi/zfcp_erp.c
473     index 7ccfce559034..3b23d6754598 100644
474     --- a/drivers/s390/scsi/zfcp_erp.c
475     +++ b/drivers/s390/scsi/zfcp_erp.c
476     @@ -193,9 +193,8 @@ static struct zfcp_erp_action *zfcp_erp_setup_act(int need, u32 act_status,
477     atomic_or(ZFCP_STATUS_COMMON_ERP_INUSE,
478     &zfcp_sdev->status);
479     erp_action = &zfcp_sdev->erp_action;
480     - memset(erp_action, 0, sizeof(struct zfcp_erp_action));
481     - erp_action->port = port;
482     - erp_action->sdev = sdev;
483     + WARN_ON_ONCE(erp_action->port != port);
484     + WARN_ON_ONCE(erp_action->sdev != sdev);
485     if (!(atomic_read(&zfcp_sdev->status) &
486     ZFCP_STATUS_COMMON_RUNNING))
487     act_status |= ZFCP_STATUS_ERP_CLOSE_ONLY;
488     @@ -208,8 +207,8 @@ static struct zfcp_erp_action *zfcp_erp_setup_act(int need, u32 act_status,
489     zfcp_erp_action_dismiss_port(port);
490     atomic_or(ZFCP_STATUS_COMMON_ERP_INUSE, &port->status);
491     erp_action = &port->erp_action;
492     - memset(erp_action, 0, sizeof(struct zfcp_erp_action));
493     - erp_action->port = port;
494     + WARN_ON_ONCE(erp_action->port != port);
495     + WARN_ON_ONCE(erp_action->sdev != NULL);
496     if (!(atomic_read(&port->status) & ZFCP_STATUS_COMMON_RUNNING))
497     act_status |= ZFCP_STATUS_ERP_CLOSE_ONLY;
498     break;
499     @@ -219,7 +218,8 @@ static struct zfcp_erp_action *zfcp_erp_setup_act(int need, u32 act_status,
500     zfcp_erp_action_dismiss_adapter(adapter);
501     atomic_or(ZFCP_STATUS_COMMON_ERP_INUSE, &adapter->status);
502     erp_action = &adapter->erp_action;
503     - memset(erp_action, 0, sizeof(struct zfcp_erp_action));
504     + WARN_ON_ONCE(erp_action->port != NULL);
505     + WARN_ON_ONCE(erp_action->sdev != NULL);
506     if (!(atomic_read(&adapter->status) &
507     ZFCP_STATUS_COMMON_RUNNING))
508     act_status |= ZFCP_STATUS_ERP_CLOSE_ONLY;
509     @@ -229,7 +229,11 @@ static struct zfcp_erp_action *zfcp_erp_setup_act(int need, u32 act_status,
510     return NULL;
511     }
512    
513     - erp_action->adapter = adapter;
514     + WARN_ON_ONCE(erp_action->adapter != adapter);
515     + memset(&erp_action->list, 0, sizeof(erp_action->list));
516     + memset(&erp_action->timer, 0, sizeof(erp_action->timer));
517     + erp_action->step = ZFCP_ERP_STEP_UNINITIALIZED;
518     + erp_action->fsf_req_id = 0;
519     erp_action->action = need;
520     erp_action->status = act_status;
521    
522     diff --git a/drivers/s390/scsi/zfcp_scsi.c b/drivers/s390/scsi/zfcp_scsi.c
523     index a1eeeaaa0fca..1c11b8402b41 100644
524     --- a/drivers/s390/scsi/zfcp_scsi.c
525     +++ b/drivers/s390/scsi/zfcp_scsi.c
526     @@ -115,10 +115,15 @@ static int zfcp_scsi_slave_alloc(struct scsi_device *sdev)
527     struct zfcp_unit *unit;
528     int npiv = adapter->connection_features & FSF_FEATURE_NPIV_MODE;
529    
530     + zfcp_sdev->erp_action.adapter = adapter;
531     + zfcp_sdev->erp_action.sdev = sdev;
532     +
533     port = zfcp_get_port_by_wwpn(adapter, rport->port_name);
534     if (!port)
535     return -ENXIO;
536    
537     + zfcp_sdev->erp_action.port = port;
538     +
539     unit = zfcp_unit_find(port, zfcp_scsi_dev_lun(sdev));
540     if (unit)
541     put_device(&unit->dev);
542     diff --git a/drivers/scsi/aacraid/comminit.c b/drivers/scsi/aacraid/comminit.c
543     index 9ee025b1d0e0..289b6fe306fd 100644
544     --- a/drivers/scsi/aacraid/comminit.c
545     +++ b/drivers/scsi/aacraid/comminit.c
546     @@ -302,9 +302,11 @@ int aac_send_shutdown(struct aac_dev * dev)
547     return -ENOMEM;
548     aac_fib_init(fibctx);
549    
550     - mutex_lock(&dev->ioctl_mutex);
551     - dev->adapter_shutdown = 1;
552     - mutex_unlock(&dev->ioctl_mutex);
553     + if (!dev->adapter_shutdown) {
554     + mutex_lock(&dev->ioctl_mutex);
555     + dev->adapter_shutdown = 1;
556     + mutex_unlock(&dev->ioctl_mutex);
557     + }
558    
559     cmd = (struct aac_close *) fib_data(fibctx);
560     cmd->command = cpu_to_le32(VM_CloseAll);
561     diff --git a/drivers/scsi/aacraid/linit.c b/drivers/scsi/aacraid/linit.c
562     index 0f277df73af0..231bd3345f44 100644
563     --- a/drivers/scsi/aacraid/linit.c
564     +++ b/drivers/scsi/aacraid/linit.c
565     @@ -1401,8 +1401,9 @@ static void __aac_shutdown(struct aac_dev * aac)
566     {
567     int i;
568    
569     + mutex_lock(&aac->ioctl_mutex);
570     aac->adapter_shutdown = 1;
571     - aac_send_shutdown(aac);
572     + mutex_unlock(&aac->ioctl_mutex);
573    
574     if (aac->aif_thread) {
575     int i;
576     @@ -1415,7 +1416,11 @@ static void __aac_shutdown(struct aac_dev * aac)
577     }
578     kthread_stop(aac->thread);
579     }
580     +
581     + aac_send_shutdown(aac);
582     +
583     aac_adapter_disable_int(aac);
584     +
585     if (aac_is_src(aac)) {
586     if (aac->max_msix > 1) {
587     for (i = 0; i < aac->max_msix; i++) {
588     diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c
589     index 5da006d81900..2bf6d4022af0 100644
590     --- a/drivers/scsi/qla2xxx/qla_os.c
591     +++ b/drivers/scsi/qla2xxx/qla_os.c
592     @@ -3051,6 +3051,8 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
593     host->max_cmd_len, host->max_channel, host->max_lun,
594     host->transportt, sht->vendor_id);
595    
596     + INIT_WORK(&base_vha->iocb_work, qla2x00_iocb_work_fn);
597     +
598     /* Set up the irqs */
599     ret = qla2x00_request_irqs(ha, rsp);
600     if (ret)
601     @@ -3165,8 +3167,6 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
602     host->can_queue, base_vha->req,
603     base_vha->mgmt_svr_loop_id, host->sg_tablesize);
604    
605     - INIT_WORK(&base_vha->iocb_work, qla2x00_iocb_work_fn);
606     -
607     if (ha->mqenable) {
608     bool mq = false;
609     bool startit = false;
610     diff --git a/drivers/scsi/sg.c b/drivers/scsi/sg.c
611     index 1a9de8419997..d99b10c73c55 100644
612     --- a/drivers/scsi/sg.c
613     +++ b/drivers/scsi/sg.c
614     @@ -837,7 +837,7 @@ sg_fill_request_table(Sg_fd *sfp, sg_req_info_t *rinfo)
615    
616     val = 0;
617     list_for_each_entry(srp, &sfp->rq_list, entry) {
618     - if (val > SG_MAX_QUEUE)
619     + if (val >= SG_MAX_QUEUE)
620     break;
621     rinfo[val].req_state = srp->done + 1;
622     rinfo[val].problem =
623     diff --git a/drivers/spi/spi-armada-3700.c b/drivers/spi/spi-armada-3700.c
624     index 6c7d7a460689..53a04267eb38 100644
625     --- a/drivers/spi/spi-armada-3700.c
626     +++ b/drivers/spi/spi-armada-3700.c
627     @@ -161,7 +161,7 @@ static void a3700_spi_deactivate_cs(struct a3700_spi *a3700_spi,
628     }
629    
630     static int a3700_spi_pin_mode_set(struct a3700_spi *a3700_spi,
631     - unsigned int pin_mode)
632     + unsigned int pin_mode, bool receiving)
633     {
634     u32 val;
635    
636     @@ -177,6 +177,9 @@ static int a3700_spi_pin_mode_set(struct a3700_spi *a3700_spi,
637     break;
638     case SPI_NBITS_QUAD:
639     val |= A3700_SPI_DATA_PIN1;
640     + /* RX during address reception uses 4-pin */
641     + if (receiving)
642     + val |= A3700_SPI_ADDR_PIN;
643     break;
644     default:
645     dev_err(&a3700_spi->master->dev, "wrong pin mode %u", pin_mode);
646     @@ -392,7 +395,8 @@ static bool a3700_spi_wait_completion(struct spi_device *spi)
647    
648     spireg_write(a3700_spi, A3700_SPI_INT_MASK_REG, 0);
649    
650     - return true;
651     + /* Timeout was reached */
652     + return false;
653     }
654    
655     static bool a3700_spi_transfer_wait(struct spi_device *spi,
656     @@ -653,7 +657,7 @@ static int a3700_spi_transfer_one(struct spi_master *master,
657     else if (xfer->rx_buf)
658     nbits = xfer->rx_nbits;
659    
660     - a3700_spi_pin_mode_set(a3700_spi, nbits);
661     + a3700_spi_pin_mode_set(a3700_spi, nbits, xfer->rx_buf ? true : false);
662    
663     if (xfer->rx_buf) {
664     /* Set read data length */
665     diff --git a/drivers/spi/spi-bcm-qspi.c b/drivers/spi/spi-bcm-qspi.c
666     index b19722ba908c..31eb882ed62d 100644
667     --- a/drivers/spi/spi-bcm-qspi.c
668     +++ b/drivers/spi/spi-bcm-qspi.c
669     @@ -1278,7 +1278,7 @@ int bcm_qspi_probe(struct platform_device *pdev,
670     goto qspi_probe_err;
671     }
672     } else {
673     - goto qspi_probe_err;
674     + goto qspi_resource_err;
675     }
676    
677     res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "bspi");
678     @@ -1300,7 +1300,7 @@ int bcm_qspi_probe(struct platform_device *pdev,
679     qspi->base[CHIP_SELECT] = devm_ioremap_resource(dev, res);
680     if (IS_ERR(qspi->base[CHIP_SELECT])) {
681     ret = PTR_ERR(qspi->base[CHIP_SELECT]);
682     - goto qspi_probe_err;
683     + goto qspi_resource_err;
684     }
685     }
686    
687     @@ -1308,7 +1308,7 @@ int bcm_qspi_probe(struct platform_device *pdev,
688     GFP_KERNEL);
689     if (!qspi->dev_ids) {
690     ret = -ENOMEM;
691     - goto qspi_probe_err;
692     + goto qspi_resource_err;
693     }
694    
695     for (val = 0; val < num_irqs; val++) {
696     @@ -1397,8 +1397,9 @@ int bcm_qspi_probe(struct platform_device *pdev,
697     bcm_qspi_hw_uninit(qspi);
698     clk_disable_unprepare(qspi->clk);
699     qspi_probe_err:
700     - spi_master_put(master);
701     kfree(qspi->dev_ids);
702     +qspi_resource_err:
703     + spi_master_put(master);
704     return ret;
705     }
706     /* probe function to be called by SoC specific platform driver probe */
707     diff --git a/drivers/xen/gntdev.c b/drivers/xen/gntdev.c
708     index 82360594fa8e..57efbd3b053b 100644
709     --- a/drivers/xen/gntdev.c
710     +++ b/drivers/xen/gntdev.c
711     @@ -1024,6 +1024,7 @@ static int gntdev_mmap(struct file *flip, struct vm_area_struct *vma)
712     mutex_unlock(&priv->lock);
713    
714     if (use_ptemod) {
715     + map->pages_vm_start = vma->vm_start;
716     err = apply_to_page_range(vma->vm_mm, vma->vm_start,
717     vma->vm_end - vma->vm_start,
718     find_grant_ptes, map);
719     @@ -1061,7 +1062,6 @@ static int gntdev_mmap(struct file *flip, struct vm_area_struct *vma)
720     set_grant_ptes_as_special, NULL);
721     }
722     #endif
723     - map->pages_vm_start = vma->vm_start;
724     }
725    
726     return 0;
727     diff --git a/drivers/xen/xen-balloon.c b/drivers/xen/xen-balloon.c
728     index e89136ab851e..b437fccd4e62 100644
729     --- a/drivers/xen/xen-balloon.c
730     +++ b/drivers/xen/xen-balloon.c
731     @@ -57,7 +57,7 @@ static int register_balloon(struct device *dev);
732     static void watch_target(struct xenbus_watch *watch,
733     const char *path, const char *token)
734     {
735     - unsigned long long new_target;
736     + unsigned long long new_target, static_max;
737     int err;
738     static bool watch_fired;
739     static long target_diff;
740     @@ -72,13 +72,20 @@ static void watch_target(struct xenbus_watch *watch,
741     * pages. PAGE_SHIFT converts bytes to pages, hence PAGE_SHIFT - 10.
742     */
743     new_target >>= PAGE_SHIFT - 10;
744     - if (watch_fired) {
745     - balloon_set_new_target(new_target - target_diff);
746     - return;
747     +
748     + if (!watch_fired) {
749     + watch_fired = true;
750     + err = xenbus_scanf(XBT_NIL, "memory", "static-max", "%llu",
751     + &static_max);
752     + if (err != 1)
753     + static_max = new_target;
754     + else
755     + static_max >>= PAGE_SHIFT - 10;
756     + target_diff = xen_pv_domain() ? 0
757     + : static_max - balloon_stats.target_pages;
758     }
759    
760     - watch_fired = true;
761     - target_diff = new_target - balloon_stats.target_pages;
762     + balloon_set_new_target(new_target - target_diff);
763     }
764     static struct xenbus_watch target_watch = {
765     .node = "memory/target",
766     diff --git a/fs/ceph/caps.c b/fs/ceph/caps.c
767     index 7007ae2a5ad2..388f0267cec5 100644
768     --- a/fs/ceph/caps.c
769     +++ b/fs/ceph/caps.c
770     @@ -1985,6 +1985,7 @@ static int try_flush_caps(struct inode *inode, u64 *ptid)
771     retry:
772     spin_lock(&ci->i_ceph_lock);
773     if (ci->i_ceph_flags & CEPH_I_NOFLUSH) {
774     + spin_unlock(&ci->i_ceph_lock);
775     dout("try_flush_caps skipping %p I_NOFLUSH set\n", inode);
776     goto out;
777     }
778     @@ -2002,8 +2003,10 @@ static int try_flush_caps(struct inode *inode, u64 *ptid)
779     mutex_lock(&session->s_mutex);
780     goto retry;
781     }
782     - if (cap->session->s_state < CEPH_MDS_SESSION_OPEN)
783     + if (cap->session->s_state < CEPH_MDS_SESSION_OPEN) {
784     + spin_unlock(&ci->i_ceph_lock);
785     goto out;
786     + }
787    
788     flushing = __mark_caps_flushing(inode, session, true,
789     &flush_tid, &oldest_flush_tid);
790     diff --git a/fs/cifs/Kconfig b/fs/cifs/Kconfig
791     index f7243617316c..d5b2e12b5d02 100644
792     --- a/fs/cifs/Kconfig
793     +++ b/fs/cifs/Kconfig
794     @@ -5,9 +5,14 @@ config CIFS
795     select CRYPTO
796     select CRYPTO_MD4
797     select CRYPTO_MD5
798     + select CRYPTO_SHA256
799     + select CRYPTO_CMAC
800     select CRYPTO_HMAC
801     select CRYPTO_ARC4
802     + select CRYPTO_AEAD2
803     + select CRYPTO_CCM
804     select CRYPTO_ECB
805     + select CRYPTO_AES
806     select CRYPTO_DES
807     help
808     This is the client VFS module for the SMB3 family of NAS protocols,
809     diff --git a/fs/cifs/cifsglob.h b/fs/cifs/cifsglob.h
810     index 03b6eae0ae28..ab69d895d1e9 100644
811     --- a/fs/cifs/cifsglob.h
812     +++ b/fs/cifs/cifsglob.h
813     @@ -661,7 +661,9 @@ struct TCP_Server_Info {
814     #endif
815     unsigned int max_read;
816     unsigned int max_write;
817     - __u8 preauth_hash[512];
818     +#ifdef CONFIG_CIFS_SMB311
819     + __u8 preauth_sha_hash[64]; /* save initital negprot hash */
820     +#endif /* 3.1.1 */
821     struct delayed_work reconnect; /* reconnect workqueue job */
822     struct mutex reconnect_mutex; /* prevent simultaneous reconnects */
823     unsigned long echo_interval;
824     @@ -849,7 +851,9 @@ struct cifs_ses {
825     __u8 smb3signingkey[SMB3_SIGN_KEY_SIZE];
826     __u8 smb3encryptionkey[SMB3_SIGN_KEY_SIZE];
827     __u8 smb3decryptionkey[SMB3_SIGN_KEY_SIZE];
828     - __u8 preauth_hash[512];
829     +#ifdef CONFIG_CIFS_SMB311
830     + __u8 preauth_sha_hash[64];
831     +#endif /* 3.1.1 */
832     };
833    
834     static inline bool
835     diff --git a/fs/cifs/smb2pdu.c b/fs/cifs/smb2pdu.c
836     index ddc633ef6064..834d18dbfb58 100644
837     --- a/fs/cifs/smb2pdu.c
838     +++ b/fs/cifs/smb2pdu.c
839     @@ -1243,7 +1243,7 @@ SMB2_tcon(const unsigned int xid, struct cifs_ses *ses, const char *tree,
840     struct smb2_tree_connect_req *req;
841     struct smb2_tree_connect_rsp *rsp = NULL;
842     struct kvec iov[2];
843     - struct kvec rsp_iov;
844     + struct kvec rsp_iov = { NULL, 0 };
845     int rc = 0;
846     int resp_buftype;
847     int unc_path_len;
848     @@ -1360,7 +1360,7 @@ SMB2_tcon(const unsigned int xid, struct cifs_ses *ses, const char *tree,
849     return rc;
850    
851     tcon_error_exit:
852     - if (rsp->hdr.sync_hdr.Status == STATUS_BAD_NETWORK_NAME) {
853     + if (rsp && rsp->hdr.sync_hdr.Status == STATUS_BAD_NETWORK_NAME) {
854     cifs_dbg(VFS, "BAD_NETWORK_NAME: %s\n", tree);
855     }
856     goto tcon_exit;
857     @@ -1963,6 +1963,9 @@ SMB2_ioctl(const unsigned int xid, struct cifs_tcon *tcon, u64 persistent_fid,
858     } else
859     iov[0].iov_len = get_rfc1002_length(req) + 4;
860    
861     + /* validate negotiate request must be signed - see MS-SMB2 3.2.5.5 */
862     + if (opcode == FSCTL_VALIDATE_NEGOTIATE_INFO)
863     + req->hdr.sync_hdr.Flags |= SMB2_FLAGS_SIGNED;
864    
865     rc = SendReceive2(xid, ses, iov, n_iov, &resp_buftype, flags, &rsp_iov);
866     cifs_small_buf_release(req);
867     diff --git a/fs/cifs/smb2transport.c b/fs/cifs/smb2transport.c
868     index 67367cf1f8cd..99493946e2f9 100644
869     --- a/fs/cifs/smb2transport.c
870     +++ b/fs/cifs/smb2transport.c
871     @@ -390,6 +390,7 @@ generate_smb30signingkey(struct cifs_ses *ses)
872     return generate_smb3signingkey(ses, &triplet);
873     }
874    
875     +#ifdef CONFIG_CIFS_SMB311
876     int
877     generate_smb311signingkey(struct cifs_ses *ses)
878    
879     @@ -398,25 +399,26 @@ generate_smb311signingkey(struct cifs_ses *ses)
880     struct derivation *d;
881    
882     d = &triplet.signing;
883     - d->label.iov_base = "SMB2AESCMAC";
884     - d->label.iov_len = 12;
885     - d->context.iov_base = "SmbSign";
886     - d->context.iov_len = 8;
887     + d->label.iov_base = "SMBSigningKey";
888     + d->label.iov_len = 14;
889     + d->context.iov_base = ses->preauth_sha_hash;
890     + d->context.iov_len = 64;
891    
892     d = &triplet.encryption;
893     - d->label.iov_base = "SMB2AESCCM";
894     - d->label.iov_len = 11;
895     - d->context.iov_base = "ServerIn ";
896     - d->context.iov_len = 10;
897     + d->label.iov_base = "SMBC2SCipherKey";
898     + d->label.iov_len = 16;
899     + d->context.iov_base = ses->preauth_sha_hash;
900     + d->context.iov_len = 64;
901    
902     d = &triplet.decryption;
903     - d->label.iov_base = "SMB2AESCCM";
904     - d->label.iov_len = 11;
905     - d->context.iov_base = "ServerOut";
906     - d->context.iov_len = 10;
907     + d->label.iov_base = "SMBS2CCipherKey";
908     + d->label.iov_len = 16;
909     + d->context.iov_base = ses->preauth_sha_hash;
910     + d->context.iov_len = 64;
911    
912     return generate_smb3signingkey(ses, &triplet);
913     }
914     +#endif /* 311 */
915    
916     int
917     smb3_calc_signature(struct smb_rqst *rqst, struct TCP_Server_Info *server)
918     diff --git a/fs/fuse/dir.c b/fs/fuse/dir.c
919     index 00800c07ba1c..4fbcb8721b2f 100644
920     --- a/fs/fuse/dir.c
921     +++ b/fs/fuse/dir.c
922     @@ -1312,7 +1312,8 @@ static int parse_dirplusfile(char *buf, size_t nbytes, struct file *file,
923     */
924     over = !dir_emit(ctx, dirent->name, dirent->namelen,
925     dirent->ino, dirent->type);
926     - ctx->pos = dirent->off;
927     + if (!over)
928     + ctx->pos = dirent->off;
929     }
930    
931     buf += reclen;
932     diff --git a/fs/overlayfs/inode.c b/fs/overlayfs/inode.c
933     index ef55c926463c..7b43b89defad 100644
934     --- a/fs/overlayfs/inode.c
935     +++ b/fs/overlayfs/inode.c
936     @@ -595,18 +595,30 @@ static bool ovl_verify_inode(struct inode *inode, struct dentry *lowerdentry,
937     return true;
938     }
939    
940     -struct inode *ovl_get_inode(struct dentry *dentry, struct dentry *upperdentry)
941     +struct inode *ovl_get_inode(struct dentry *dentry, struct dentry *upperdentry,
942     + struct dentry *index)
943     {
944     struct dentry *lowerdentry = ovl_dentry_lower(dentry);
945     struct inode *realinode = upperdentry ? d_inode(upperdentry) : NULL;
946     struct inode *inode;
947     + /* Already indexed or could be indexed on copy up? */
948     + bool indexed = (index || (ovl_indexdir(dentry->d_sb) && !upperdentry));
949     +
950     + if (WARN_ON(upperdentry && indexed && !lowerdentry))
951     + return ERR_PTR(-EIO);
952    
953     if (!realinode)
954     realinode = d_inode(lowerdentry);
955    
956     - if (!S_ISDIR(realinode->i_mode) &&
957     - (upperdentry || (lowerdentry && ovl_indexdir(dentry->d_sb)))) {
958     - struct inode *key = d_inode(lowerdentry ?: upperdentry);
959     + /*
960     + * Copy up origin (lower) may exist for non-indexed upper, but we must
961     + * not use lower as hash key in that case.
962     + * Hash inodes that are or could be indexed by origin inode and
963     + * non-indexed upper inodes that could be hard linked by upper inode.
964     + */
965     + if (!S_ISDIR(realinode->i_mode) && (upperdentry || indexed)) {
966     + struct inode *key = d_inode(indexed ? lowerdentry :
967     + upperdentry);
968     unsigned int nlink;
969    
970     inode = iget5_locked(dentry->d_sb, (unsigned long) key,
971     diff --git a/fs/overlayfs/namei.c b/fs/overlayfs/namei.c
972     index 9deec68075dc..0223ef4acbe4 100644
973     --- a/fs/overlayfs/namei.c
974     +++ b/fs/overlayfs/namei.c
975     @@ -405,14 +405,13 @@ int ovl_verify_index(struct dentry *index, struct path *lowerstack,
976     * be treated as stale (i.e. after unlink of the overlay inode).
977     * We don't know the verification rules for directory and whiteout
978     * index entries, because they have not been implemented yet, so return
979     - * EROFS if those entries are found to avoid corrupting an index that
980     - * was created by a newer kernel.
981     + * EINVAL if those entries are found to abort the mount to avoid
982     + * corrupting an index that was created by a newer kernel.
983     */
984     - err = -EROFS;
985     + err = -EINVAL;
986     if (d_is_dir(index) || ovl_is_whiteout(index))
987     goto fail;
988    
989     - err = -EINVAL;
990     if (index->d_name.len < sizeof(struct ovl_fh)*2)
991     goto fail;
992    
993     @@ -507,6 +506,10 @@ static struct dentry *ovl_lookup_index(struct dentry *dentry,
994     index = lookup_one_len_unlocked(name.name, ofs->indexdir, name.len);
995     if (IS_ERR(index)) {
996     err = PTR_ERR(index);
997     + if (err == -ENOENT) {
998     + index = NULL;
999     + goto out;
1000     + }
1001     pr_warn_ratelimited("overlayfs: failed inode index lookup (ino=%lu, key=%*s, err=%i);\n"
1002     "overlayfs: mount with '-o index=off' to disable inodes index.\n",
1003     d_inode(origin)->i_ino, name.len, name.name,
1004     @@ -516,18 +519,9 @@ static struct dentry *ovl_lookup_index(struct dentry *dentry,
1005    
1006     inode = d_inode(index);
1007     if (d_is_negative(index)) {
1008     - if (upper && d_inode(origin)->i_nlink > 1) {
1009     - pr_warn_ratelimited("overlayfs: hard link with origin but no index (ino=%lu).\n",
1010     - d_inode(origin)->i_ino);
1011     - goto fail;
1012     - }
1013     -
1014     - dput(index);
1015     - index = NULL;
1016     + goto out_dput;
1017     } else if (upper && d_inode(upper) != inode) {
1018     - pr_warn_ratelimited("overlayfs: wrong index found (index=%pd2, ino=%lu, upper ino=%lu).\n",
1019     - index, inode->i_ino, d_inode(upper)->i_ino);
1020     - goto fail;
1021     + goto out_dput;
1022     } else if (ovl_dentry_weird(index) || ovl_is_whiteout(index) ||
1023     ((inode->i_mode ^ d_inode(origin)->i_mode) & S_IFMT)) {
1024     /*
1025     @@ -547,6 +541,11 @@ static struct dentry *ovl_lookup_index(struct dentry *dentry,
1026     kfree(name.name);
1027     return index;
1028    
1029     +out_dput:
1030     + dput(index);
1031     + index = NULL;
1032     + goto out;
1033     +
1034     fail:
1035     dput(index);
1036     index = ERR_PTR(-EIO);
1037     @@ -709,7 +708,7 @@ struct dentry *ovl_lookup(struct inode *dir, struct dentry *dentry,
1038     upperdentry = dget(index);
1039    
1040     if (upperdentry || ctr) {
1041     - inode = ovl_get_inode(dentry, upperdentry);
1042     + inode = ovl_get_inode(dentry, upperdentry, index);
1043     err = PTR_ERR(inode);
1044     if (IS_ERR(inode))
1045     goto out_free_oe;
1046     diff --git a/fs/overlayfs/overlayfs.h b/fs/overlayfs/overlayfs.h
1047     index f57f47742f5f..bccb1e683387 100644
1048     --- a/fs/overlayfs/overlayfs.h
1049     +++ b/fs/overlayfs/overlayfs.h
1050     @@ -284,7 +284,8 @@ int ovl_update_time(struct inode *inode, struct timespec *ts, int flags);
1051     bool ovl_is_private_xattr(const char *name);
1052    
1053     struct inode *ovl_new_inode(struct super_block *sb, umode_t mode, dev_t rdev);
1054     -struct inode *ovl_get_inode(struct dentry *dentry, struct dentry *upperdentry);
1055     +struct inode *ovl_get_inode(struct dentry *dentry, struct dentry *upperdentry,
1056     + struct dentry *index);
1057     static inline void ovl_copyattr(struct inode *from, struct inode *to)
1058     {
1059     to->i_uid = from->i_uid;
1060     diff --git a/fs/overlayfs/readdir.c b/fs/overlayfs/readdir.c
1061     index 74f7ead442f0..3ff960372cb9 100644
1062     --- a/fs/overlayfs/readdir.c
1063     +++ b/fs/overlayfs/readdir.c
1064     @@ -704,13 +704,12 @@ int ovl_indexdir_cleanup(struct dentry *dentry, struct vfsmount *mnt,
1065     break;
1066     }
1067     err = ovl_verify_index(index, lowerstack, numlower);
1068     - if (err) {
1069     - if (err == -EROFS)
1070     - break;
1071     + /* Cleanup stale and orphan index entries */
1072     + if (err && (err == -ESTALE || err == -ENOENT))
1073     err = ovl_cleanup(dir, index);
1074     - if (err)
1075     - break;
1076     - }
1077     + if (err)
1078     + break;
1079     +
1080     dput(index);
1081     index = NULL;
1082     }
1083     diff --git a/fs/overlayfs/super.c b/fs/overlayfs/super.c
1084     index a1464905c1ea..e2192e1eb564 100644
1085     --- a/fs/overlayfs/super.c
1086     +++ b/fs/overlayfs/super.c
1087     @@ -174,6 +174,9 @@ static struct inode *ovl_alloc_inode(struct super_block *sb)
1088     {
1089     struct ovl_inode *oi = kmem_cache_alloc(ovl_inode_cachep, GFP_KERNEL);
1090    
1091     + if (!oi)
1092     + return NULL;
1093     +
1094     oi->cache = NULL;
1095     oi->redirect = NULL;
1096     oi->version = 0;
1097     diff --git a/include/uapi/linux/spi/spidev.h b/include/uapi/linux/spi/spidev.h
1098     index dd5f21e75805..856de39d0b89 100644
1099     --- a/include/uapi/linux/spi/spidev.h
1100     +++ b/include/uapi/linux/spi/spidev.h
1101     @@ -23,6 +23,7 @@
1102     #define SPIDEV_H
1103    
1104     #include <linux/types.h>
1105     +#include <linux/ioctl.h>
1106    
1107     /* User space versions of kernel symbols for SPI clocking modes,
1108     * matching <linux/spi/spi.h>
1109     diff --git a/kernel/workqueue.c b/kernel/workqueue.c
1110     index ca937b0c3a96..d5bf849e0f48 100644
1111     --- a/kernel/workqueue.c
1112     +++ b/kernel/workqueue.c
1113     @@ -68,6 +68,7 @@ enum {
1114     * attach_mutex to avoid changing binding state while
1115     * worker_attach_to_pool() is in progress.
1116     */
1117     + POOL_MANAGER_ACTIVE = 1 << 0, /* being managed */
1118     POOL_DISASSOCIATED = 1 << 2, /* cpu can't serve workers */
1119    
1120     /* worker flags */
1121     @@ -165,7 +166,6 @@ struct worker_pool {
1122     /* L: hash of busy workers */
1123    
1124     /* see manage_workers() for details on the two manager mutexes */
1125     - struct mutex manager_arb; /* manager arbitration */
1126     struct worker *manager; /* L: purely informational */
1127     struct mutex attach_mutex; /* attach/detach exclusion */
1128     struct list_head workers; /* A: attached workers */
1129     @@ -299,6 +299,7 @@ static struct workqueue_attrs *wq_update_unbound_numa_attrs_buf;
1130    
1131     static DEFINE_MUTEX(wq_pool_mutex); /* protects pools and workqueues list */
1132     static DEFINE_SPINLOCK(wq_mayday_lock); /* protects wq->maydays list */
1133     +static DECLARE_WAIT_QUEUE_HEAD(wq_manager_wait); /* wait for manager to go away */
1134    
1135     static LIST_HEAD(workqueues); /* PR: list of all workqueues */
1136     static bool workqueue_freezing; /* PL: have wqs started freezing? */
1137     @@ -801,7 +802,7 @@ static bool need_to_create_worker(struct worker_pool *pool)
1138     /* Do we have too many workers and should some go away? */
1139     static bool too_many_workers(struct worker_pool *pool)
1140     {
1141     - bool managing = mutex_is_locked(&pool->manager_arb);
1142     + bool managing = pool->flags & POOL_MANAGER_ACTIVE;
1143     int nr_idle = pool->nr_idle + managing; /* manager is considered idle */
1144     int nr_busy = pool->nr_workers - nr_idle;
1145    
1146     @@ -1980,24 +1981,17 @@ static bool manage_workers(struct worker *worker)
1147     {
1148     struct worker_pool *pool = worker->pool;
1149    
1150     - /*
1151     - * Anyone who successfully grabs manager_arb wins the arbitration
1152     - * and becomes the manager. mutex_trylock() on pool->manager_arb
1153     - * failure while holding pool->lock reliably indicates that someone
1154     - * else is managing the pool and the worker which failed trylock
1155     - * can proceed to executing work items. This means that anyone
1156     - * grabbing manager_arb is responsible for actually performing
1157     - * manager duties. If manager_arb is grabbed and released without
1158     - * actual management, the pool may stall indefinitely.
1159     - */
1160     - if (!mutex_trylock(&pool->manager_arb))
1161     + if (pool->flags & POOL_MANAGER_ACTIVE)
1162     return false;
1163     +
1164     + pool->flags |= POOL_MANAGER_ACTIVE;
1165     pool->manager = worker;
1166    
1167     maybe_create_worker(pool);
1168    
1169     pool->manager = NULL;
1170     - mutex_unlock(&pool->manager_arb);
1171     + pool->flags &= ~POOL_MANAGER_ACTIVE;
1172     + wake_up(&wq_manager_wait);
1173     return true;
1174     }
1175    
1176     @@ -3215,7 +3209,6 @@ static int init_worker_pool(struct worker_pool *pool)
1177     setup_timer(&pool->mayday_timer, pool_mayday_timeout,
1178     (unsigned long)pool);
1179    
1180     - mutex_init(&pool->manager_arb);
1181     mutex_init(&pool->attach_mutex);
1182     INIT_LIST_HEAD(&pool->workers);
1183    
1184     @@ -3285,13 +3278,15 @@ static void put_unbound_pool(struct worker_pool *pool)
1185     hash_del(&pool->hash_node);
1186    
1187     /*
1188     - * Become the manager and destroy all workers. Grabbing
1189     - * manager_arb prevents @pool's workers from blocking on
1190     - * attach_mutex.
1191     + * Become the manager and destroy all workers. This prevents
1192     + * @pool's workers from blocking on attach_mutex. We're the last
1193     + * manager and @pool gets freed with the flag set.
1194     */
1195     - mutex_lock(&pool->manager_arb);
1196     -
1197     spin_lock_irq(&pool->lock);
1198     + wait_event_lock_irq(wq_manager_wait,
1199     + !(pool->flags & POOL_MANAGER_ACTIVE), pool->lock);
1200     + pool->flags |= POOL_MANAGER_ACTIVE;
1201     +
1202     while ((worker = first_idle_worker(pool)))
1203     destroy_worker(worker);
1204     WARN_ON(pool->nr_workers || pool->nr_idle);
1205     @@ -3305,8 +3300,6 @@ static void put_unbound_pool(struct worker_pool *pool)
1206     if (pool->detach_completion)
1207     wait_for_completion(pool->detach_completion);
1208    
1209     - mutex_unlock(&pool->manager_arb);
1210     -
1211     /* shut down the timers */
1212     del_timer_sync(&pool->idle_timer);
1213     del_timer_sync(&pool->mayday_timer);
1214     diff --git a/lib/assoc_array.c b/lib/assoc_array.c
1215     index 59fd7c0b119c..5cd093589c5a 100644
1216     --- a/lib/assoc_array.c
1217     +++ b/lib/assoc_array.c
1218     @@ -598,21 +598,31 @@ static bool assoc_array_insert_into_terminal_node(struct assoc_array_edit *edit,
1219     if ((edit->segment_cache[ASSOC_ARRAY_FAN_OUT] ^ base_seg) == 0)
1220     goto all_leaves_cluster_together;
1221    
1222     - /* Otherwise we can just insert a new node ahead of the old
1223     - * one.
1224     + /* Otherwise all the old leaves cluster in the same slot, but
1225     + * the new leaf wants to go into a different slot - so we
1226     + * create a new node (n0) to hold the new leaf and a pointer to
1227     + * a new node (n1) holding all the old leaves.
1228     + *
1229     + * This can be done by falling through to the node splitting
1230     + * path.
1231     */
1232     - goto present_leaves_cluster_but_not_new_leaf;
1233     + pr_devel("present leaves cluster but not new leaf\n");
1234     }
1235    
1236     split_node:
1237     pr_devel("split node\n");
1238    
1239     - /* We need to split the current node; we know that the node doesn't
1240     - * simply contain a full set of leaves that cluster together (it
1241     - * contains meta pointers and/or non-clustering leaves).
1242     + /* We need to split the current node. The node must contain anything
1243     + * from a single leaf (in the one leaf case, this leaf will cluster
1244     + * with the new leaf) and the rest meta-pointers, to all leaves, some
1245     + * of which may cluster.
1246     + *
1247     + * It won't contain the case in which all the current leaves plus the
1248     + * new leaves want to cluster in the same slot.
1249     *
1250     * We need to expel at least two leaves out of a set consisting of the
1251     - * leaves in the node and the new leaf.
1252     + * leaves in the node and the new leaf. The current meta pointers can
1253     + * just be copied as they shouldn't cluster with any of the leaves.
1254     *
1255     * We need a new node (n0) to replace the current one and a new node to
1256     * take the expelled nodes (n1).
1257     @@ -717,33 +727,6 @@ static bool assoc_array_insert_into_terminal_node(struct assoc_array_edit *edit,
1258     pr_devel("<--%s() = ok [split node]\n", __func__);
1259     return true;
1260    
1261     -present_leaves_cluster_but_not_new_leaf:
1262     - /* All the old leaves cluster in the same slot, but the new leaf wants
1263     - * to go into a different slot, so we create a new node to hold the new
1264     - * leaf and a pointer to a new node holding all the old leaves.
1265     - */
1266     - pr_devel("present leaves cluster but not new leaf\n");
1267     -
1268     - new_n0->back_pointer = node->back_pointer;
1269     - new_n0->parent_slot = node->parent_slot;
1270     - new_n0->nr_leaves_on_branch = node->nr_leaves_on_branch;
1271     - new_n1->back_pointer = assoc_array_node_to_ptr(new_n0);
1272     - new_n1->parent_slot = edit->segment_cache[0];
1273     - new_n1->nr_leaves_on_branch = node->nr_leaves_on_branch;
1274     - edit->adjust_count_on = new_n0;
1275     -
1276     - for (i = 0; i < ASSOC_ARRAY_FAN_OUT; i++)
1277     - new_n1->slots[i] = node->slots[i];
1278     -
1279     - new_n0->slots[edit->segment_cache[0]] = assoc_array_node_to_ptr(new_n0);
1280     - edit->leaf_p = &new_n0->slots[edit->segment_cache[ASSOC_ARRAY_FAN_OUT]];
1281     -
1282     - edit->set[0].ptr = &assoc_array_ptr_to_node(node->back_pointer)->slots[node->parent_slot];
1283     - edit->set[0].to = assoc_array_node_to_ptr(new_n0);
1284     - edit->excised_meta[0] = assoc_array_node_to_ptr(node);
1285     - pr_devel("<--%s() = ok [insert node before]\n", __func__);
1286     - return true;
1287     -
1288     all_leaves_cluster_together:
1289     /* All the leaves, new and old, want to cluster together in this node
1290     * in the same slot, so we have to replace this node with a shortcut to
1291     diff --git a/net/wireless/sme.c b/net/wireless/sme.c
1292     index 0a49b88070d0..b6533ecbf5b1 100644
1293     --- a/net/wireless/sme.c
1294     +++ b/net/wireless/sme.c
1295     @@ -522,11 +522,6 @@ static int cfg80211_sme_connect(struct wireless_dev *wdev,
1296     return -EOPNOTSUPP;
1297    
1298     if (wdev->current_bss) {
1299     - if (!prev_bssid)
1300     - return -EALREADY;
1301     - if (prev_bssid &&
1302     - !ether_addr_equal(prev_bssid, wdev->current_bss->pub.bssid))
1303     - return -ENOTCONN;
1304     cfg80211_unhold_bss(wdev->current_bss);
1305     cfg80211_put_bss(wdev->wiphy, &wdev->current_bss->pub);
1306     wdev->current_bss = NULL;
1307     @@ -1063,11 +1058,35 @@ int cfg80211_connect(struct cfg80211_registered_device *rdev,
1308    
1309     ASSERT_WDEV_LOCK(wdev);
1310    
1311     - if (WARN_ON(wdev->connect_keys)) {
1312     - kzfree(wdev->connect_keys);
1313     - wdev->connect_keys = NULL;
1314     + /*
1315     + * If we have an ssid_len, we're trying to connect or are
1316     + * already connected, so reject a new SSID unless it's the
1317     + * same (which is the case for re-association.)
1318     + */
1319     + if (wdev->ssid_len &&
1320     + (wdev->ssid_len != connect->ssid_len ||
1321     + memcmp(wdev->ssid, connect->ssid, wdev->ssid_len)))
1322     + return -EALREADY;
1323     +
1324     + /*
1325     + * If connected, reject (re-)association unless prev_bssid
1326     + * matches the current BSSID.
1327     + */
1328     + if (wdev->current_bss) {
1329     + if (!prev_bssid)
1330     + return -EALREADY;
1331     + if (!ether_addr_equal(prev_bssid, wdev->current_bss->pub.bssid))
1332     + return -ENOTCONN;
1333     }
1334    
1335     + /*
1336     + * Reject if we're in the process of connecting with WEP,
1337     + * this case isn't very interesting and trying to handle
1338     + * it would make the code much more complex.
1339     + */
1340     + if (wdev->connect_keys)
1341     + return -EINPROGRESS;
1342     +
1343     cfg80211_oper_and_ht_capa(&connect->ht_capa_mask,
1344     rdev->wiphy.ht_capa_mod_mask);
1345    
1346     @@ -1118,7 +1137,12 @@ int cfg80211_connect(struct cfg80211_registered_device *rdev,
1347    
1348     if (err) {
1349     wdev->connect_keys = NULL;
1350     - wdev->ssid_len = 0;
1351     + /*
1352     + * This could be reassoc getting refused, don't clear
1353     + * ssid_len in that case.
1354     + */
1355     + if (!wdev->current_bss)
1356     + wdev->ssid_len = 0;
1357     return err;
1358     }
1359    
1360     @@ -1145,6 +1169,14 @@ int cfg80211_disconnect(struct cfg80211_registered_device *rdev,
1361     else if (wdev->ssid_len)
1362     err = rdev_disconnect(rdev, dev, reason);
1363    
1364     + /*
1365     + * Clear ssid_len unless we actually were fully connected,
1366     + * in which case cfg80211_disconnected() will take care of
1367     + * this later.
1368     + */
1369     + if (!wdev->current_bss)
1370     + wdev->ssid_len = 0;
1371     +
1372     return err;
1373     }
1374    
1375     diff --git a/net/xfrm/xfrm_user.c b/net/xfrm/xfrm_user.c
1376     index 9391ced05259..c0a6cdd42ff2 100644
1377     --- a/net/xfrm/xfrm_user.c
1378     +++ b/net/xfrm/xfrm_user.c
1379     @@ -1684,32 +1684,34 @@ static int dump_one_policy(struct xfrm_policy *xp, int dir, int count, void *ptr
1380    
1381     static int xfrm_dump_policy_done(struct netlink_callback *cb)
1382     {
1383     - struct xfrm_policy_walk *walk = (struct xfrm_policy_walk *) &cb->args[1];
1384     + struct xfrm_policy_walk *walk = (struct xfrm_policy_walk *)cb->args;
1385     struct net *net = sock_net(cb->skb->sk);
1386    
1387     xfrm_policy_walk_done(walk, net);
1388     return 0;
1389     }
1390    
1391     +static int xfrm_dump_policy_start(struct netlink_callback *cb)
1392     +{
1393     + struct xfrm_policy_walk *walk = (struct xfrm_policy_walk *)cb->args;
1394     +
1395     + BUILD_BUG_ON(sizeof(*walk) > sizeof(cb->args));
1396     +
1397     + xfrm_policy_walk_init(walk, XFRM_POLICY_TYPE_ANY);
1398     + return 0;
1399     +}
1400     +
1401     static int xfrm_dump_policy(struct sk_buff *skb, struct netlink_callback *cb)
1402     {
1403     struct net *net = sock_net(skb->sk);
1404     - struct xfrm_policy_walk *walk = (struct xfrm_policy_walk *) &cb->args[1];
1405     + struct xfrm_policy_walk *walk = (struct xfrm_policy_walk *)cb->args;
1406     struct xfrm_dump_info info;
1407    
1408     - BUILD_BUG_ON(sizeof(struct xfrm_policy_walk) >
1409     - sizeof(cb->args) - sizeof(cb->args[0]));
1410     -
1411     info.in_skb = cb->skb;
1412     info.out_skb = skb;
1413     info.nlmsg_seq = cb->nlh->nlmsg_seq;
1414     info.nlmsg_flags = NLM_F_MULTI;
1415    
1416     - if (!cb->args[0]) {
1417     - cb->args[0] = 1;
1418     - xfrm_policy_walk_init(walk, XFRM_POLICY_TYPE_ANY);
1419     - }
1420     -
1421     (void) xfrm_policy_walk(net, walk, dump_one_policy, &info);
1422    
1423     return skb->len;
1424     @@ -2467,6 +2469,7 @@ static const struct nla_policy xfrma_spd_policy[XFRMA_SPD_MAX+1] = {
1425    
1426     static const struct xfrm_link {
1427     int (*doit)(struct sk_buff *, struct nlmsghdr *, struct nlattr **);
1428     + int (*start)(struct netlink_callback *);
1429     int (*dump)(struct sk_buff *, struct netlink_callback *);
1430     int (*done)(struct netlink_callback *);
1431     const struct nla_policy *nla_pol;
1432     @@ -2480,6 +2483,7 @@ static const struct xfrm_link {
1433     [XFRM_MSG_NEWPOLICY - XFRM_MSG_BASE] = { .doit = xfrm_add_policy },
1434     [XFRM_MSG_DELPOLICY - XFRM_MSG_BASE] = { .doit = xfrm_get_policy },
1435     [XFRM_MSG_GETPOLICY - XFRM_MSG_BASE] = { .doit = xfrm_get_policy,
1436     + .start = xfrm_dump_policy_start,
1437     .dump = xfrm_dump_policy,
1438     .done = xfrm_dump_policy_done },
1439     [XFRM_MSG_ALLOCSPI - XFRM_MSG_BASE] = { .doit = xfrm_alloc_userspi },
1440     @@ -2532,6 +2536,7 @@ static int xfrm_user_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh,
1441    
1442     {
1443     struct netlink_dump_control c = {
1444     + .start = link->start,
1445     .dump = link->dump,
1446     .done = link->done,
1447     };
1448     diff --git a/samples/trace_events/trace-events-sample.c b/samples/trace_events/trace-events-sample.c
1449     index 446beb7ac48d..5522692100ba 100644
1450     --- a/samples/trace_events/trace-events-sample.c
1451     +++ b/samples/trace_events/trace-events-sample.c
1452     @@ -78,7 +78,7 @@ static int simple_thread_fn(void *arg)
1453     }
1454    
1455     static DEFINE_MUTEX(thread_mutex);
1456     -static bool simple_thread_cnt;
1457     +static int simple_thread_cnt;
1458    
1459     int foo_bar_reg(void)
1460     {
1461     diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
1462     index 217bb582aff1..fe4d06398fc3 100644
1463     --- a/sound/pci/hda/patch_realtek.c
1464     +++ b/sound/pci/hda/patch_realtek.c
1465     @@ -327,6 +327,7 @@ static void alc_fill_eapd_coef(struct hda_codec *codec)
1466     case 0x10ec0215:
1467     case 0x10ec0225:
1468     case 0x10ec0233:
1469     + case 0x10ec0236:
1470     case 0x10ec0255:
1471     case 0x10ec0256:
1472     case 0x10ec0282:
1473     @@ -911,6 +912,7 @@ static struct alc_codec_rename_pci_table rename_pci_tbl[] = {
1474     { 0x10ec0275, 0x1028, 0, "ALC3260" },
1475     { 0x10ec0899, 0x1028, 0, "ALC3861" },
1476     { 0x10ec0298, 0x1028, 0, "ALC3266" },
1477     + { 0x10ec0236, 0x1028, 0, "ALC3204" },
1478     { 0x10ec0256, 0x1028, 0, "ALC3246" },
1479     { 0x10ec0225, 0x1028, 0, "ALC3253" },
1480     { 0x10ec0295, 0x1028, 0, "ALC3254" },
1481     @@ -3930,6 +3932,7 @@ static void alc_headset_mode_unplugged(struct hda_codec *codec)
1482     alc_process_coef_fw(codec, coef0255_1);
1483     alc_process_coef_fw(codec, coef0255);
1484     break;
1485     + case 0x10ec0236:
1486     case 0x10ec0256:
1487     alc_process_coef_fw(codec, coef0256);
1488     alc_process_coef_fw(codec, coef0255);
1489     @@ -4028,6 +4031,7 @@ static void alc_headset_mode_mic_in(struct hda_codec *codec, hda_nid_t hp_pin,
1490     };
1491    
1492     switch (codec->core.vendor_id) {
1493     + case 0x10ec0236:
1494     case 0x10ec0255:
1495     case 0x10ec0256:
1496     alc_write_coef_idx(codec, 0x45, 0xc489);
1497     @@ -4160,6 +4164,7 @@ static void alc_headset_mode_default(struct hda_codec *codec)
1498     alc_process_coef_fw(codec, alc225_pre_hsmode);
1499     alc_process_coef_fw(codec, coef0225);
1500     break;
1501     + case 0x10ec0236:
1502     case 0x10ec0255:
1503     case 0x10ec0256:
1504     alc_process_coef_fw(codec, coef0255);
1505     @@ -4256,6 +4261,7 @@ static void alc_headset_mode_ctia(struct hda_codec *codec)
1506     case 0x10ec0255:
1507     alc_process_coef_fw(codec, coef0255);
1508     break;
1509     + case 0x10ec0236:
1510     case 0x10ec0256:
1511     alc_process_coef_fw(codec, coef0256);
1512     break;
1513     @@ -4366,6 +4372,7 @@ static void alc_headset_mode_omtp(struct hda_codec *codec)
1514     case 0x10ec0255:
1515     alc_process_coef_fw(codec, coef0255);
1516     break;
1517     + case 0x10ec0236:
1518     case 0x10ec0256:
1519     alc_process_coef_fw(codec, coef0256);
1520     break;
1521     @@ -4451,6 +4458,7 @@ static void alc_determine_headset_type(struct hda_codec *codec)
1522     };
1523    
1524     switch (codec->core.vendor_id) {
1525     + case 0x10ec0236:
1526     case 0x10ec0255:
1527     case 0x10ec0256:
1528     alc_process_coef_fw(codec, coef0255);
1529     @@ -4705,6 +4713,7 @@ static void alc255_set_default_jack_type(struct hda_codec *codec)
1530     case 0x10ec0255:
1531     alc_process_coef_fw(codec, alc255fw);
1532     break;
1533     + case 0x10ec0236:
1534     case 0x10ec0256:
1535     alc_process_coef_fw(codec, alc256fw);
1536     break;
1537     @@ -6402,6 +6411,14 @@ static const struct snd_hda_pin_quirk alc269_pin_fixup_tbl[] = {
1538     ALC225_STANDARD_PINS,
1539     {0x12, 0xb7a60130},
1540     {0x1b, 0x90170110}),
1541     + SND_HDA_PIN_QUIRK(0x10ec0236, 0x1028, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE,
1542     + {0x12, 0x90a60140},
1543     + {0x14, 0x90170110},
1544     + {0x21, 0x02211020}),
1545     + SND_HDA_PIN_QUIRK(0x10ec0236, 0x1028, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE,
1546     + {0x12, 0x90a60140},
1547     + {0x14, 0x90170150},
1548     + {0x21, 0x02211020}),
1549     SND_HDA_PIN_QUIRK(0x10ec0255, 0x1028, "Dell", ALC255_FIXUP_DELL2_MIC_NO_PRESENCE,
1550     {0x14, 0x90170110},
1551     {0x21, 0x02211020}),
1552     @@ -6789,6 +6806,7 @@ static int patch_alc269(struct hda_codec *codec)
1553     case 0x10ec0255:
1554     spec->codec_variant = ALC269_TYPE_ALC255;
1555     break;
1556     + case 0x10ec0236:
1557     case 0x10ec0256:
1558     spec->codec_variant = ALC269_TYPE_ALC256;
1559     spec->shutup = alc256_shutup;
1560     @@ -7840,6 +7858,7 @@ static const struct hda_device_id snd_hda_id_realtek[] = {
1561     HDA_CODEC_ENTRY(0x10ec0233, "ALC233", patch_alc269),
1562     HDA_CODEC_ENTRY(0x10ec0234, "ALC234", patch_alc269),
1563     HDA_CODEC_ENTRY(0x10ec0235, "ALC233", patch_alc269),
1564     + HDA_CODEC_ENTRY(0x10ec0236, "ALC236", patch_alc269),
1565     HDA_CODEC_ENTRY(0x10ec0255, "ALC255", patch_alc269),
1566     HDA_CODEC_ENTRY(0x10ec0256, "ALC256", patch_alc269),
1567     HDA_CODEC_ENTRY(0x10ec0260, "ALC260", patch_alc260),