Contents of /trunk/kernel-alx/patches-4.9/0159-4.9.60-all-fixes.patch
Parent Directory | Revision Log
Revision 3044 -
(show annotations)
(download)
Wed Dec 20 11:49:22 2017 UTC (6 years, 10 months ago) by niro
File size: 33553 byte(s)
Wed Dec 20 11:49:22 2017 UTC (6 years, 10 months ago) by niro
File size: 33553 byte(s)
-linux-4.9.60
1 | diff --git a/Makefile b/Makefile |
2 | index 900cd7c3a9ee..2f7a386b1751 100644 |
3 | --- a/Makefile |
4 | +++ b/Makefile |
5 | @@ -1,6 +1,6 @@ |
6 | VERSION = 4 |
7 | PATCHLEVEL = 9 |
8 | -SUBLEVEL = 59 |
9 | +SUBLEVEL = 60 |
10 | EXTRAVERSION = |
11 | NAME = Roaring Lionus |
12 | |
13 | diff --git a/arch/powerpc/kvm/powerpc.c b/arch/powerpc/kvm/powerpc.c |
14 | index 70963c845e96..fc0df0f6fe88 100644 |
15 | --- a/arch/powerpc/kvm/powerpc.c |
16 | +++ b/arch/powerpc/kvm/powerpc.c |
17 | @@ -601,8 +601,7 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext) |
18 | break; |
19 | #endif |
20 | case KVM_CAP_PPC_HTM: |
21 | - r = cpu_has_feature(CPU_FTR_TM_COMP) && |
22 | - is_kvmppc_hv_enabled(kvm); |
23 | + r = cpu_has_feature(CPU_FTR_TM_COMP) && hv_enabled; |
24 | break; |
25 | default: |
26 | r = 0; |
27 | diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c |
28 | index 08cd0bd3ebe5..3907439417e7 100644 |
29 | --- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c |
30 | +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c |
31 | @@ -825,7 +825,7 @@ uint32_t smu7_get_xclk(struct pp_hwmgr *hwmgr) |
32 | { |
33 | uint32_t reference_clock, tmp; |
34 | struct cgs_display_info info = {0}; |
35 | - struct cgs_mode_info mode_info; |
36 | + struct cgs_mode_info mode_info = {0}; |
37 | |
38 | info.mode_info = &mode_info; |
39 | |
40 | @@ -3718,10 +3718,9 @@ int smu7_program_display_gap(struct pp_hwmgr *hwmgr) |
41 | uint32_t ref_clock; |
42 | uint32_t refresh_rate = 0; |
43 | struct cgs_display_info info = {0}; |
44 | - struct cgs_mode_info mode_info; |
45 | + struct cgs_mode_info mode_info = {0}; |
46 | |
47 | info.mode_info = &mode_info; |
48 | - |
49 | cgs_get_active_displays_info(hwmgr->device, &info); |
50 | num_active_displays = info.display_count; |
51 | |
52 | @@ -3737,6 +3736,7 @@ int smu7_program_display_gap(struct pp_hwmgr *hwmgr) |
53 | frame_time_in_us = 1000000 / refresh_rate; |
54 | |
55 | pre_vbi_time_in_us = frame_time_in_us - 200 - mode_info.vblank_time_us; |
56 | + |
57 | data->frame_time_x2 = frame_time_in_us * 2 / 100; |
58 | |
59 | display_gap2 = pre_vbi_time_in_us * (ref_clock / 100); |
60 | diff --git a/drivers/input/mouse/elan_i2c_core.c b/drivers/input/mouse/elan_i2c_core.c |
61 | index 681dce15fbc8..b8c50d883b2c 100644 |
62 | --- a/drivers/input/mouse/elan_i2c_core.c |
63 | +++ b/drivers/input/mouse/elan_i2c_core.c |
64 | @@ -1240,6 +1240,7 @@ static const struct acpi_device_id elan_acpi_id[] = { |
65 | { "ELAN0605", 0 }, |
66 | { "ELAN0609", 0 }, |
67 | { "ELAN060B", 0 }, |
68 | + { "ELAN0611", 0 }, |
69 | { "ELAN1000", 0 }, |
70 | { } |
71 | }; |
72 | diff --git a/drivers/input/tablet/gtco.c b/drivers/input/tablet/gtco.c |
73 | index abf09ac42ce4..339a0e2d2f86 100644 |
74 | --- a/drivers/input/tablet/gtco.c |
75 | +++ b/drivers/input/tablet/gtco.c |
76 | @@ -230,13 +230,17 @@ static void parse_hid_report_descriptor(struct gtco *device, char * report, |
77 | |
78 | /* Walk this report and pull out the info we need */ |
79 | while (i < length) { |
80 | - prefix = report[i]; |
81 | - |
82 | - /* Skip over prefix */ |
83 | - i++; |
84 | + prefix = report[i++]; |
85 | |
86 | /* Determine data size and save the data in the proper variable */ |
87 | - size = PREF_SIZE(prefix); |
88 | + size = (1U << PREF_SIZE(prefix)) >> 1; |
89 | + if (i + size > length) { |
90 | + dev_err(ddev, |
91 | + "Not enough data (need %d, have %d)\n", |
92 | + i + size, length); |
93 | + break; |
94 | + } |
95 | + |
96 | switch (size) { |
97 | case 1: |
98 | data = report[i]; |
99 | @@ -244,8 +248,7 @@ static void parse_hid_report_descriptor(struct gtco *device, char * report, |
100 | case 2: |
101 | data16 = get_unaligned_le16(&report[i]); |
102 | break; |
103 | - case 3: |
104 | - size = 4; |
105 | + case 4: |
106 | data32 = get_unaligned_le32(&report[i]); |
107 | break; |
108 | } |
109 | diff --git a/drivers/net/can/sun4i_can.c b/drivers/net/can/sun4i_can.c |
110 | index 68ef0a4cd821..b0c80859f746 100644 |
111 | --- a/drivers/net/can/sun4i_can.c |
112 | +++ b/drivers/net/can/sun4i_can.c |
113 | @@ -342,7 +342,7 @@ static int sun4i_can_start(struct net_device *dev) |
114 | |
115 | /* enter the selected mode */ |
116 | mod_reg_val = readl(priv->base + SUN4I_REG_MSEL_ADDR); |
117 | - if (priv->can.ctrlmode & CAN_CTRLMODE_PRESUME_ACK) |
118 | + if (priv->can.ctrlmode & CAN_CTRLMODE_LOOPBACK) |
119 | mod_reg_val |= SUN4I_MSEL_LOOPBACK_MODE; |
120 | else if (priv->can.ctrlmode & CAN_CTRLMODE_LISTENONLY) |
121 | mod_reg_val |= SUN4I_MSEL_LISTEN_ONLY_MODE; |
122 | @@ -811,7 +811,6 @@ static int sun4ican_probe(struct platform_device *pdev) |
123 | priv->can.ctrlmode_supported = CAN_CTRLMODE_BERR_REPORTING | |
124 | CAN_CTRLMODE_LISTENONLY | |
125 | CAN_CTRLMODE_LOOPBACK | |
126 | - CAN_CTRLMODE_PRESUME_ACK | |
127 | CAN_CTRLMODE_3_SAMPLES; |
128 | priv->base = addr; |
129 | priv->clk = clk; |
130 | diff --git a/drivers/net/can/usb/kvaser_usb.c b/drivers/net/can/usb/kvaser_usb.c |
131 | index d51e0c401b48..4224e066cb16 100644 |
132 | --- a/drivers/net/can/usb/kvaser_usb.c |
133 | +++ b/drivers/net/can/usb/kvaser_usb.c |
134 | @@ -137,6 +137,7 @@ static inline bool kvaser_is_usbcan(const struct usb_device_id *id) |
135 | #define CMD_RESET_ERROR_COUNTER 49 |
136 | #define CMD_TX_ACKNOWLEDGE 50 |
137 | #define CMD_CAN_ERROR_EVENT 51 |
138 | +#define CMD_FLUSH_QUEUE_REPLY 68 |
139 | |
140 | #define CMD_LEAF_USB_THROTTLE 77 |
141 | #define CMD_LEAF_LOG_MESSAGE 106 |
142 | @@ -1301,6 +1302,11 @@ static void kvaser_usb_handle_message(const struct kvaser_usb *dev, |
143 | goto warn; |
144 | break; |
145 | |
146 | + case CMD_FLUSH_QUEUE_REPLY: |
147 | + if (dev->family != KVASER_LEAF) |
148 | + goto warn; |
149 | + break; |
150 | + |
151 | default: |
152 | warn: dev_warn(dev->udev->dev.parent, |
153 | "Unhandled message (%d)\n", msg->id); |
154 | @@ -1609,7 +1615,8 @@ static int kvaser_usb_close(struct net_device *netdev) |
155 | if (err) |
156 | netdev_warn(netdev, "Cannot flush queue, error %d\n", err); |
157 | |
158 | - if (kvaser_usb_send_simple_msg(dev, CMD_RESET_CHIP, priv->channel)) |
159 | + err = kvaser_usb_send_simple_msg(dev, CMD_RESET_CHIP, priv->channel); |
160 | + if (err) |
161 | netdev_warn(netdev, "Cannot reset card, error %d\n", err); |
162 | |
163 | err = kvaser_usb_stop_chip(priv); |
164 | diff --git a/drivers/regulator/fan53555.c b/drivers/regulator/fan53555.c |
165 | index d7da81a875cf..c9af065feb28 100644 |
166 | --- a/drivers/regulator/fan53555.c |
167 | +++ b/drivers/regulator/fan53555.c |
168 | @@ -476,7 +476,10 @@ static const struct i2c_device_id fan53555_id[] = { |
169 | .name = "fan53555", |
170 | .driver_data = FAN53555_VENDOR_FAIRCHILD |
171 | }, { |
172 | - .name = "syr82x", |
173 | + .name = "syr827", |
174 | + .driver_data = FAN53555_VENDOR_SILERGY |
175 | + }, { |
176 | + .name = "syr828", |
177 | .driver_data = FAN53555_VENDOR_SILERGY |
178 | }, |
179 | { }, |
180 | diff --git a/drivers/s390/scsi/zfcp_aux.c b/drivers/s390/scsi/zfcp_aux.c |
181 | index bcc8f3dfd4c4..b3f9243cfed5 100644 |
182 | --- a/drivers/s390/scsi/zfcp_aux.c |
183 | +++ b/drivers/s390/scsi/zfcp_aux.c |
184 | @@ -358,6 +358,8 @@ struct zfcp_adapter *zfcp_adapter_enqueue(struct ccw_device *ccw_device) |
185 | |
186 | adapter->next_port_scan = jiffies; |
187 | |
188 | + adapter->erp_action.adapter = adapter; |
189 | + |
190 | if (zfcp_qdio_setup(adapter)) |
191 | goto failed; |
192 | |
193 | @@ -514,6 +516,9 @@ struct zfcp_port *zfcp_port_enqueue(struct zfcp_adapter *adapter, u64 wwpn, |
194 | port->dev.groups = zfcp_port_attr_groups; |
195 | port->dev.release = zfcp_port_release; |
196 | |
197 | + port->erp_action.adapter = adapter; |
198 | + port->erp_action.port = port; |
199 | + |
200 | if (dev_set_name(&port->dev, "0x%016llx", (unsigned long long)wwpn)) { |
201 | kfree(port); |
202 | goto err_out; |
203 | diff --git a/drivers/s390/scsi/zfcp_erp.c b/drivers/s390/scsi/zfcp_erp.c |
204 | index 7ccfce559034..3b23d6754598 100644 |
205 | --- a/drivers/s390/scsi/zfcp_erp.c |
206 | +++ b/drivers/s390/scsi/zfcp_erp.c |
207 | @@ -193,9 +193,8 @@ static struct zfcp_erp_action *zfcp_erp_setup_act(int need, u32 act_status, |
208 | atomic_or(ZFCP_STATUS_COMMON_ERP_INUSE, |
209 | &zfcp_sdev->status); |
210 | erp_action = &zfcp_sdev->erp_action; |
211 | - memset(erp_action, 0, sizeof(struct zfcp_erp_action)); |
212 | - erp_action->port = port; |
213 | - erp_action->sdev = sdev; |
214 | + WARN_ON_ONCE(erp_action->port != port); |
215 | + WARN_ON_ONCE(erp_action->sdev != sdev); |
216 | if (!(atomic_read(&zfcp_sdev->status) & |
217 | ZFCP_STATUS_COMMON_RUNNING)) |
218 | act_status |= ZFCP_STATUS_ERP_CLOSE_ONLY; |
219 | @@ -208,8 +207,8 @@ static struct zfcp_erp_action *zfcp_erp_setup_act(int need, u32 act_status, |
220 | zfcp_erp_action_dismiss_port(port); |
221 | atomic_or(ZFCP_STATUS_COMMON_ERP_INUSE, &port->status); |
222 | erp_action = &port->erp_action; |
223 | - memset(erp_action, 0, sizeof(struct zfcp_erp_action)); |
224 | - erp_action->port = port; |
225 | + WARN_ON_ONCE(erp_action->port != port); |
226 | + WARN_ON_ONCE(erp_action->sdev != NULL); |
227 | if (!(atomic_read(&port->status) & ZFCP_STATUS_COMMON_RUNNING)) |
228 | act_status |= ZFCP_STATUS_ERP_CLOSE_ONLY; |
229 | break; |
230 | @@ -219,7 +218,8 @@ static struct zfcp_erp_action *zfcp_erp_setup_act(int need, u32 act_status, |
231 | zfcp_erp_action_dismiss_adapter(adapter); |
232 | atomic_or(ZFCP_STATUS_COMMON_ERP_INUSE, &adapter->status); |
233 | erp_action = &adapter->erp_action; |
234 | - memset(erp_action, 0, sizeof(struct zfcp_erp_action)); |
235 | + WARN_ON_ONCE(erp_action->port != NULL); |
236 | + WARN_ON_ONCE(erp_action->sdev != NULL); |
237 | if (!(atomic_read(&adapter->status) & |
238 | ZFCP_STATUS_COMMON_RUNNING)) |
239 | act_status |= ZFCP_STATUS_ERP_CLOSE_ONLY; |
240 | @@ -229,7 +229,11 @@ static struct zfcp_erp_action *zfcp_erp_setup_act(int need, u32 act_status, |
241 | return NULL; |
242 | } |
243 | |
244 | - erp_action->adapter = adapter; |
245 | + WARN_ON_ONCE(erp_action->adapter != adapter); |
246 | + memset(&erp_action->list, 0, sizeof(erp_action->list)); |
247 | + memset(&erp_action->timer, 0, sizeof(erp_action->timer)); |
248 | + erp_action->step = ZFCP_ERP_STEP_UNINITIALIZED; |
249 | + erp_action->fsf_req_id = 0; |
250 | erp_action->action = need; |
251 | erp_action->status = act_status; |
252 | |
253 | diff --git a/drivers/s390/scsi/zfcp_scsi.c b/drivers/s390/scsi/zfcp_scsi.c |
254 | index 9bd9b9a29dfc..a9b8104b982e 100644 |
255 | --- a/drivers/s390/scsi/zfcp_scsi.c |
256 | +++ b/drivers/s390/scsi/zfcp_scsi.c |
257 | @@ -115,10 +115,15 @@ static int zfcp_scsi_slave_alloc(struct scsi_device *sdev) |
258 | struct zfcp_unit *unit; |
259 | int npiv = adapter->connection_features & FSF_FEATURE_NPIV_MODE; |
260 | |
261 | + zfcp_sdev->erp_action.adapter = adapter; |
262 | + zfcp_sdev->erp_action.sdev = sdev; |
263 | + |
264 | port = zfcp_get_port_by_wwpn(adapter, rport->port_name); |
265 | if (!port) |
266 | return -ENXIO; |
267 | |
268 | + zfcp_sdev->erp_action.port = port; |
269 | + |
270 | unit = zfcp_unit_find(port, zfcp_scsi_dev_lun(sdev)); |
271 | if (unit) |
272 | put_device(&unit->dev); |
273 | diff --git a/drivers/scsi/sg.c b/drivers/scsi/sg.c |
274 | index 02dfbc1373e3..184c7db1e0ca 100644 |
275 | --- a/drivers/scsi/sg.c |
276 | +++ b/drivers/scsi/sg.c |
277 | @@ -837,7 +837,7 @@ sg_fill_request_table(Sg_fd *sfp, sg_req_info_t *rinfo) |
278 | |
279 | val = 0; |
280 | list_for_each_entry(srp, &sfp->rq_list, entry) { |
281 | - if (val > SG_MAX_QUEUE) |
282 | + if (val >= SG_MAX_QUEUE) |
283 | break; |
284 | rinfo[val].req_state = srp->done + 1; |
285 | rinfo[val].problem = |
286 | diff --git a/drivers/spi/spi-bcm-qspi.c b/drivers/spi/spi-bcm-qspi.c |
287 | index 14f9dea3173f..7d629b4e1ecc 100644 |
288 | --- a/drivers/spi/spi-bcm-qspi.c |
289 | +++ b/drivers/spi/spi-bcm-qspi.c |
290 | @@ -1215,7 +1215,7 @@ int bcm_qspi_probe(struct platform_device *pdev, |
291 | goto qspi_probe_err; |
292 | } |
293 | } else { |
294 | - goto qspi_probe_err; |
295 | + goto qspi_resource_err; |
296 | } |
297 | |
298 | res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "bspi"); |
299 | @@ -1237,7 +1237,7 @@ int bcm_qspi_probe(struct platform_device *pdev, |
300 | qspi->base[CHIP_SELECT] = devm_ioremap_resource(dev, res); |
301 | if (IS_ERR(qspi->base[CHIP_SELECT])) { |
302 | ret = PTR_ERR(qspi->base[CHIP_SELECT]); |
303 | - goto qspi_probe_err; |
304 | + goto qspi_resource_err; |
305 | } |
306 | } |
307 | |
308 | @@ -1245,7 +1245,7 @@ int bcm_qspi_probe(struct platform_device *pdev, |
309 | GFP_KERNEL); |
310 | if (!qspi->dev_ids) { |
311 | ret = -ENOMEM; |
312 | - goto qspi_probe_err; |
313 | + goto qspi_resource_err; |
314 | } |
315 | |
316 | for (val = 0; val < num_irqs; val++) { |
317 | @@ -1334,8 +1334,9 @@ int bcm_qspi_probe(struct platform_device *pdev, |
318 | bcm_qspi_hw_uninit(qspi); |
319 | clk_disable_unprepare(qspi->clk); |
320 | qspi_probe_err: |
321 | - spi_master_put(master); |
322 | kfree(qspi->dev_ids); |
323 | +qspi_resource_err: |
324 | + spi_master_put(master); |
325 | return ret; |
326 | } |
327 | /* probe function to be called by SoC specific platform driver probe */ |
328 | diff --git a/drivers/usb/host/xhci-hub.c b/drivers/usb/host/xhci-hub.c |
329 | index 4a02c5c7df0d..0722f75f1d6a 100644 |
330 | --- a/drivers/usb/host/xhci-hub.c |
331 | +++ b/drivers/usb/host/xhci-hub.c |
332 | @@ -412,15 +412,25 @@ static int xhci_stop_device(struct xhci_hcd *xhci, int slot_id, int suspend) |
333 | GFP_NOWAIT); |
334 | if (!command) { |
335 | spin_unlock_irqrestore(&xhci->lock, flags); |
336 | - xhci_free_command(xhci, cmd); |
337 | - return -ENOMEM; |
338 | + ret = -ENOMEM; |
339 | + goto cmd_cleanup; |
340 | + } |
341 | |
342 | + ret = xhci_queue_stop_endpoint(xhci, command, slot_id, |
343 | + i, suspend); |
344 | + if (ret) { |
345 | + spin_unlock_irqrestore(&xhci->lock, flags); |
346 | + xhci_free_command(xhci, command); |
347 | + goto cmd_cleanup; |
348 | } |
349 | - xhci_queue_stop_endpoint(xhci, command, slot_id, i, |
350 | - suspend); |
351 | } |
352 | } |
353 | - xhci_queue_stop_endpoint(xhci, cmd, slot_id, 0, suspend); |
354 | + ret = xhci_queue_stop_endpoint(xhci, cmd, slot_id, 0, suspend); |
355 | + if (ret) { |
356 | + spin_unlock_irqrestore(&xhci->lock, flags); |
357 | + goto cmd_cleanup; |
358 | + } |
359 | + |
360 | xhci_ring_cmd_db(xhci); |
361 | spin_unlock_irqrestore(&xhci->lock, flags); |
362 | |
363 | @@ -431,6 +441,8 @@ static int xhci_stop_device(struct xhci_hcd *xhci, int slot_id, int suspend) |
364 | xhci_warn(xhci, "Timeout while waiting for stop endpoint command\n"); |
365 | ret = -ETIME; |
366 | } |
367 | + |
368 | +cmd_cleanup: |
369 | xhci_free_command(xhci, cmd); |
370 | return ret; |
371 | } |
372 | diff --git a/drivers/xen/gntdev.c b/drivers/xen/gntdev.c |
373 | index 2ef2b61b69df..79b8ab4c6663 100644 |
374 | --- a/drivers/xen/gntdev.c |
375 | +++ b/drivers/xen/gntdev.c |
376 | @@ -1030,6 +1030,7 @@ static int gntdev_mmap(struct file *flip, struct vm_area_struct *vma) |
377 | mutex_unlock(&priv->lock); |
378 | |
379 | if (use_ptemod) { |
380 | + map->pages_vm_start = vma->vm_start; |
381 | err = apply_to_page_range(vma->vm_mm, vma->vm_start, |
382 | vma->vm_end - vma->vm_start, |
383 | find_grant_ptes, map); |
384 | @@ -1067,7 +1068,6 @@ static int gntdev_mmap(struct file *flip, struct vm_area_struct *vma) |
385 | set_grant_ptes_as_special, NULL); |
386 | } |
387 | #endif |
388 | - map->pages_vm_start = vma->vm_start; |
389 | } |
390 | |
391 | return 0; |
392 | diff --git a/fs/ceph/caps.c b/fs/ceph/caps.c |
393 | index 03951f90ecf7..3e1c136aadb7 100644 |
394 | --- a/fs/ceph/caps.c |
395 | +++ b/fs/ceph/caps.c |
396 | @@ -1900,6 +1900,7 @@ static int try_flush_caps(struct inode *inode, u64 *ptid) |
397 | retry: |
398 | spin_lock(&ci->i_ceph_lock); |
399 | if (ci->i_ceph_flags & CEPH_I_NOFLUSH) { |
400 | + spin_unlock(&ci->i_ceph_lock); |
401 | dout("try_flush_caps skipping %p I_NOFLUSH set\n", inode); |
402 | goto out; |
403 | } |
404 | @@ -1917,8 +1918,10 @@ static int try_flush_caps(struct inode *inode, u64 *ptid) |
405 | mutex_lock(&session->s_mutex); |
406 | goto retry; |
407 | } |
408 | - if (cap->session->s_state < CEPH_MDS_SESSION_OPEN) |
409 | + if (cap->session->s_state < CEPH_MDS_SESSION_OPEN) { |
410 | + spin_unlock(&ci->i_ceph_lock); |
411 | goto out; |
412 | + } |
413 | |
414 | flushing = __mark_caps_flushing(inode, session, true, |
415 | &flush_tid, &oldest_flush_tid); |
416 | diff --git a/fs/ecryptfs/ecryptfs_kernel.h b/fs/ecryptfs/ecryptfs_kernel.h |
417 | index 599a29237cfe..a896e46671ea 100644 |
418 | --- a/fs/ecryptfs/ecryptfs_kernel.h |
419 | +++ b/fs/ecryptfs/ecryptfs_kernel.h |
420 | @@ -84,11 +84,16 @@ struct ecryptfs_page_crypt_context { |
421 | static inline struct ecryptfs_auth_tok * |
422 | ecryptfs_get_encrypted_key_payload_data(struct key *key) |
423 | { |
424 | - if (key->type == &key_type_encrypted) |
425 | - return (struct ecryptfs_auth_tok *) |
426 | - (&((struct encrypted_key_payload *)key->payload.data[0])->payload_data); |
427 | - else |
428 | + struct encrypted_key_payload *payload; |
429 | + |
430 | + if (key->type != &key_type_encrypted) |
431 | return NULL; |
432 | + |
433 | + payload = key->payload.data[0]; |
434 | + if (!payload) |
435 | + return ERR_PTR(-EKEYREVOKED); |
436 | + |
437 | + return (struct ecryptfs_auth_tok *)payload->payload_data; |
438 | } |
439 | |
440 | static inline struct key *ecryptfs_get_encrypted_key(char *sig) |
441 | @@ -114,12 +119,17 @@ static inline struct ecryptfs_auth_tok * |
442 | ecryptfs_get_key_payload_data(struct key *key) |
443 | { |
444 | struct ecryptfs_auth_tok *auth_tok; |
445 | + const struct user_key_payload *ukp; |
446 | |
447 | auth_tok = ecryptfs_get_encrypted_key_payload_data(key); |
448 | - if (!auth_tok) |
449 | - return (struct ecryptfs_auth_tok *)user_key_payload(key)->data; |
450 | - else |
451 | + if (auth_tok) |
452 | return auth_tok; |
453 | + |
454 | + ukp = user_key_payload(key); |
455 | + if (!ukp) |
456 | + return ERR_PTR(-EKEYREVOKED); |
457 | + |
458 | + return (struct ecryptfs_auth_tok *)ukp->data; |
459 | } |
460 | |
461 | #define ECRYPTFS_MAX_KEYSET_SIZE 1024 |
462 | diff --git a/fs/ecryptfs/keystore.c b/fs/ecryptfs/keystore.c |
463 | index 3cf1546dca82..fa218cd64f74 100644 |
464 | --- a/fs/ecryptfs/keystore.c |
465 | +++ b/fs/ecryptfs/keystore.c |
466 | @@ -459,7 +459,8 @@ static int ecryptfs_verify_version(u16 version) |
467 | * @auth_tok_key: key containing the authentication token |
468 | * @auth_tok: authentication token |
469 | * |
470 | - * Returns zero on valid auth tok; -EINVAL otherwise |
471 | + * Returns zero on valid auth tok; -EINVAL if the payload is invalid; or |
472 | + * -EKEYREVOKED if the key was revoked before we acquired its semaphore. |
473 | */ |
474 | static int |
475 | ecryptfs_verify_auth_tok_from_key(struct key *auth_tok_key, |
476 | @@ -468,6 +469,12 @@ ecryptfs_verify_auth_tok_from_key(struct key *auth_tok_key, |
477 | int rc = 0; |
478 | |
479 | (*auth_tok) = ecryptfs_get_key_payload_data(auth_tok_key); |
480 | + if (IS_ERR(*auth_tok)) { |
481 | + rc = PTR_ERR(*auth_tok); |
482 | + *auth_tok = NULL; |
483 | + goto out; |
484 | + } |
485 | + |
486 | if (ecryptfs_verify_version((*auth_tok)->version)) { |
487 | printk(KERN_ERR "Data structure version mismatch. Userspace " |
488 | "tools must match eCryptfs kernel module with major " |
489 | diff --git a/fs/fuse/dir.c b/fs/fuse/dir.c |
490 | index 642c57b8de7b..4bbad745415a 100644 |
491 | --- a/fs/fuse/dir.c |
492 | +++ b/fs/fuse/dir.c |
493 | @@ -1312,7 +1312,8 @@ static int parse_dirplusfile(char *buf, size_t nbytes, struct file *file, |
494 | */ |
495 | over = !dir_emit(ctx, dirent->name, dirent->namelen, |
496 | dirent->ino, dirent->type); |
497 | - ctx->pos = dirent->off; |
498 | + if (!over) |
499 | + ctx->pos = dirent->off; |
500 | } |
501 | |
502 | buf += reclen; |
503 | diff --git a/include/uapi/linux/spi/spidev.h b/include/uapi/linux/spi/spidev.h |
504 | index dd5f21e75805..856de39d0b89 100644 |
505 | --- a/include/uapi/linux/spi/spidev.h |
506 | +++ b/include/uapi/linux/spi/spidev.h |
507 | @@ -23,6 +23,7 @@ |
508 | #define SPIDEV_H |
509 | |
510 | #include <linux/types.h> |
511 | +#include <linux/ioctl.h> |
512 | |
513 | /* User space versions of kernel symbols for SPI clocking modes, |
514 | * matching <linux/spi/spi.h> |
515 | diff --git a/kernel/workqueue.c b/kernel/workqueue.c |
516 | index 776dda02e751..296dcca77f33 100644 |
517 | --- a/kernel/workqueue.c |
518 | +++ b/kernel/workqueue.c |
519 | @@ -68,6 +68,7 @@ enum { |
520 | * attach_mutex to avoid changing binding state while |
521 | * worker_attach_to_pool() is in progress. |
522 | */ |
523 | + POOL_MANAGER_ACTIVE = 1 << 0, /* being managed */ |
524 | POOL_DISASSOCIATED = 1 << 2, /* cpu can't serve workers */ |
525 | |
526 | /* worker flags */ |
527 | @@ -165,7 +166,6 @@ struct worker_pool { |
528 | /* L: hash of busy workers */ |
529 | |
530 | /* see manage_workers() for details on the two manager mutexes */ |
531 | - struct mutex manager_arb; /* manager arbitration */ |
532 | struct worker *manager; /* L: purely informational */ |
533 | struct mutex attach_mutex; /* attach/detach exclusion */ |
534 | struct list_head workers; /* A: attached workers */ |
535 | @@ -297,6 +297,7 @@ static struct workqueue_attrs *wq_update_unbound_numa_attrs_buf; |
536 | |
537 | static DEFINE_MUTEX(wq_pool_mutex); /* protects pools and workqueues list */ |
538 | static DEFINE_SPINLOCK(wq_mayday_lock); /* protects wq->maydays list */ |
539 | +static DECLARE_WAIT_QUEUE_HEAD(wq_manager_wait); /* wait for manager to go away */ |
540 | |
541 | static LIST_HEAD(workqueues); /* PR: list of all workqueues */ |
542 | static bool workqueue_freezing; /* PL: have wqs started freezing? */ |
543 | @@ -799,7 +800,7 @@ static bool need_to_create_worker(struct worker_pool *pool) |
544 | /* Do we have too many workers and should some go away? */ |
545 | static bool too_many_workers(struct worker_pool *pool) |
546 | { |
547 | - bool managing = mutex_is_locked(&pool->manager_arb); |
548 | + bool managing = pool->flags & POOL_MANAGER_ACTIVE; |
549 | int nr_idle = pool->nr_idle + managing; /* manager is considered idle */ |
550 | int nr_busy = pool->nr_workers - nr_idle; |
551 | |
552 | @@ -1979,24 +1980,17 @@ static bool manage_workers(struct worker *worker) |
553 | { |
554 | struct worker_pool *pool = worker->pool; |
555 | |
556 | - /* |
557 | - * Anyone who successfully grabs manager_arb wins the arbitration |
558 | - * and becomes the manager. mutex_trylock() on pool->manager_arb |
559 | - * failure while holding pool->lock reliably indicates that someone |
560 | - * else is managing the pool and the worker which failed trylock |
561 | - * can proceed to executing work items. This means that anyone |
562 | - * grabbing manager_arb is responsible for actually performing |
563 | - * manager duties. If manager_arb is grabbed and released without |
564 | - * actual management, the pool may stall indefinitely. |
565 | - */ |
566 | - if (!mutex_trylock(&pool->manager_arb)) |
567 | + if (pool->flags & POOL_MANAGER_ACTIVE) |
568 | return false; |
569 | + |
570 | + pool->flags |= POOL_MANAGER_ACTIVE; |
571 | pool->manager = worker; |
572 | |
573 | maybe_create_worker(pool); |
574 | |
575 | pool->manager = NULL; |
576 | - mutex_unlock(&pool->manager_arb); |
577 | + pool->flags &= ~POOL_MANAGER_ACTIVE; |
578 | + wake_up(&wq_manager_wait); |
579 | return true; |
580 | } |
581 | |
582 | @@ -3203,7 +3197,6 @@ static int init_worker_pool(struct worker_pool *pool) |
583 | setup_timer(&pool->mayday_timer, pool_mayday_timeout, |
584 | (unsigned long)pool); |
585 | |
586 | - mutex_init(&pool->manager_arb); |
587 | mutex_init(&pool->attach_mutex); |
588 | INIT_LIST_HEAD(&pool->workers); |
589 | |
590 | @@ -3273,13 +3266,15 @@ static void put_unbound_pool(struct worker_pool *pool) |
591 | hash_del(&pool->hash_node); |
592 | |
593 | /* |
594 | - * Become the manager and destroy all workers. Grabbing |
595 | - * manager_arb prevents @pool's workers from blocking on |
596 | - * attach_mutex. |
597 | + * Become the manager and destroy all workers. This prevents |
598 | + * @pool's workers from blocking on attach_mutex. We're the last |
599 | + * manager and @pool gets freed with the flag set. |
600 | */ |
601 | - mutex_lock(&pool->manager_arb); |
602 | - |
603 | spin_lock_irq(&pool->lock); |
604 | + wait_event_lock_irq(wq_manager_wait, |
605 | + !(pool->flags & POOL_MANAGER_ACTIVE), pool->lock); |
606 | + pool->flags |= POOL_MANAGER_ACTIVE; |
607 | + |
608 | while ((worker = first_idle_worker(pool))) |
609 | destroy_worker(worker); |
610 | WARN_ON(pool->nr_workers || pool->nr_idle); |
611 | @@ -3293,8 +3288,6 @@ static void put_unbound_pool(struct worker_pool *pool) |
612 | if (pool->detach_completion) |
613 | wait_for_completion(pool->detach_completion); |
614 | |
615 | - mutex_unlock(&pool->manager_arb); |
616 | - |
617 | /* shut down the timers */ |
618 | del_timer_sync(&pool->idle_timer); |
619 | del_timer_sync(&pool->mayday_timer); |
620 | diff --git a/lib/assoc_array.c b/lib/assoc_array.c |
621 | index 59fd7c0b119c..5cd093589c5a 100644 |
622 | --- a/lib/assoc_array.c |
623 | +++ b/lib/assoc_array.c |
624 | @@ -598,21 +598,31 @@ static bool assoc_array_insert_into_terminal_node(struct assoc_array_edit *edit, |
625 | if ((edit->segment_cache[ASSOC_ARRAY_FAN_OUT] ^ base_seg) == 0) |
626 | goto all_leaves_cluster_together; |
627 | |
628 | - /* Otherwise we can just insert a new node ahead of the old |
629 | - * one. |
630 | + /* Otherwise all the old leaves cluster in the same slot, but |
631 | + * the new leaf wants to go into a different slot - so we |
632 | + * create a new node (n0) to hold the new leaf and a pointer to |
633 | + * a new node (n1) holding all the old leaves. |
634 | + * |
635 | + * This can be done by falling through to the node splitting |
636 | + * path. |
637 | */ |
638 | - goto present_leaves_cluster_but_not_new_leaf; |
639 | + pr_devel("present leaves cluster but not new leaf\n"); |
640 | } |
641 | |
642 | split_node: |
643 | pr_devel("split node\n"); |
644 | |
645 | - /* We need to split the current node; we know that the node doesn't |
646 | - * simply contain a full set of leaves that cluster together (it |
647 | - * contains meta pointers and/or non-clustering leaves). |
648 | + /* We need to split the current node. The node must contain anything |
649 | + * from a single leaf (in the one leaf case, this leaf will cluster |
650 | + * with the new leaf) and the rest meta-pointers, to all leaves, some |
651 | + * of which may cluster. |
652 | + * |
653 | + * It won't contain the case in which all the current leaves plus the |
654 | + * new leaves want to cluster in the same slot. |
655 | * |
656 | * We need to expel at least two leaves out of a set consisting of the |
657 | - * leaves in the node and the new leaf. |
658 | + * leaves in the node and the new leaf. The current meta pointers can |
659 | + * just be copied as they shouldn't cluster with any of the leaves. |
660 | * |
661 | * We need a new node (n0) to replace the current one and a new node to |
662 | * take the expelled nodes (n1). |
663 | @@ -717,33 +727,6 @@ static bool assoc_array_insert_into_terminal_node(struct assoc_array_edit *edit, |
664 | pr_devel("<--%s() = ok [split node]\n", __func__); |
665 | return true; |
666 | |
667 | -present_leaves_cluster_but_not_new_leaf: |
668 | - /* All the old leaves cluster in the same slot, but the new leaf wants |
669 | - * to go into a different slot, so we create a new node to hold the new |
670 | - * leaf and a pointer to a new node holding all the old leaves. |
671 | - */ |
672 | - pr_devel("present leaves cluster but not new leaf\n"); |
673 | - |
674 | - new_n0->back_pointer = node->back_pointer; |
675 | - new_n0->parent_slot = node->parent_slot; |
676 | - new_n0->nr_leaves_on_branch = node->nr_leaves_on_branch; |
677 | - new_n1->back_pointer = assoc_array_node_to_ptr(new_n0); |
678 | - new_n1->parent_slot = edit->segment_cache[0]; |
679 | - new_n1->nr_leaves_on_branch = node->nr_leaves_on_branch; |
680 | - edit->adjust_count_on = new_n0; |
681 | - |
682 | - for (i = 0; i < ASSOC_ARRAY_FAN_OUT; i++) |
683 | - new_n1->slots[i] = node->slots[i]; |
684 | - |
685 | - new_n0->slots[edit->segment_cache[0]] = assoc_array_node_to_ptr(new_n0); |
686 | - edit->leaf_p = &new_n0->slots[edit->segment_cache[ASSOC_ARRAY_FAN_OUT]]; |
687 | - |
688 | - edit->set[0].ptr = &assoc_array_ptr_to_node(node->back_pointer)->slots[node->parent_slot]; |
689 | - edit->set[0].to = assoc_array_node_to_ptr(new_n0); |
690 | - edit->excised_meta[0] = assoc_array_node_to_ptr(node); |
691 | - pr_devel("<--%s() = ok [insert node before]\n", __func__); |
692 | - return true; |
693 | - |
694 | all_leaves_cluster_together: |
695 | /* All the leaves, new and old, want to cluster together in this node |
696 | * in the same slot, so we have to replace this node with a shortcut to |
697 | diff --git a/net/wireless/sme.c b/net/wireless/sme.c |
698 | index 35cc1de85dcc..6fd24f6435c3 100644 |
699 | --- a/net/wireless/sme.c |
700 | +++ b/net/wireless/sme.c |
701 | @@ -505,11 +505,6 @@ static int cfg80211_sme_connect(struct wireless_dev *wdev, |
702 | return -EOPNOTSUPP; |
703 | |
704 | if (wdev->current_bss) { |
705 | - if (!prev_bssid) |
706 | - return -EALREADY; |
707 | - if (prev_bssid && |
708 | - !ether_addr_equal(prev_bssid, wdev->current_bss->pub.bssid)) |
709 | - return -ENOTCONN; |
710 | cfg80211_unhold_bss(wdev->current_bss); |
711 | cfg80211_put_bss(wdev->wiphy, &wdev->current_bss->pub); |
712 | wdev->current_bss = NULL; |
713 | @@ -1025,11 +1020,35 @@ int cfg80211_connect(struct cfg80211_registered_device *rdev, |
714 | |
715 | ASSERT_WDEV_LOCK(wdev); |
716 | |
717 | - if (WARN_ON(wdev->connect_keys)) { |
718 | - kzfree(wdev->connect_keys); |
719 | - wdev->connect_keys = NULL; |
720 | + /* |
721 | + * If we have an ssid_len, we're trying to connect or are |
722 | + * already connected, so reject a new SSID unless it's the |
723 | + * same (which is the case for re-association.) |
724 | + */ |
725 | + if (wdev->ssid_len && |
726 | + (wdev->ssid_len != connect->ssid_len || |
727 | + memcmp(wdev->ssid, connect->ssid, wdev->ssid_len))) |
728 | + return -EALREADY; |
729 | + |
730 | + /* |
731 | + * If connected, reject (re-)association unless prev_bssid |
732 | + * matches the current BSSID. |
733 | + */ |
734 | + if (wdev->current_bss) { |
735 | + if (!prev_bssid) |
736 | + return -EALREADY; |
737 | + if (!ether_addr_equal(prev_bssid, wdev->current_bss->pub.bssid)) |
738 | + return -ENOTCONN; |
739 | } |
740 | |
741 | + /* |
742 | + * Reject if we're in the process of connecting with WEP, |
743 | + * this case isn't very interesting and trying to handle |
744 | + * it would make the code much more complex. |
745 | + */ |
746 | + if (wdev->connect_keys) |
747 | + return -EINPROGRESS; |
748 | + |
749 | cfg80211_oper_and_ht_capa(&connect->ht_capa_mask, |
750 | rdev->wiphy.ht_capa_mod_mask); |
751 | |
752 | @@ -1080,7 +1099,12 @@ int cfg80211_connect(struct cfg80211_registered_device *rdev, |
753 | |
754 | if (err) { |
755 | wdev->connect_keys = NULL; |
756 | - wdev->ssid_len = 0; |
757 | + /* |
758 | + * This could be reassoc getting refused, don't clear |
759 | + * ssid_len in that case. |
760 | + */ |
761 | + if (!wdev->current_bss) |
762 | + wdev->ssid_len = 0; |
763 | return err; |
764 | } |
765 | |
766 | @@ -1105,5 +1129,13 @@ int cfg80211_disconnect(struct cfg80211_registered_device *rdev, |
767 | else if (wdev->current_bss) |
768 | err = rdev_disconnect(rdev, dev, reason); |
769 | |
770 | + /* |
771 | + * Clear ssid_len unless we actually were fully connected, |
772 | + * in which case cfg80211_disconnected() will take care of |
773 | + * this later. |
774 | + */ |
775 | + if (!wdev->current_bss) |
776 | + wdev->ssid_len = 0; |
777 | + |
778 | return err; |
779 | } |
780 | diff --git a/net/xfrm/xfrm_user.c b/net/xfrm/xfrm_user.c |
781 | index a7e27e1140dd..22934885bd3f 100644 |
782 | --- a/net/xfrm/xfrm_user.c |
783 | +++ b/net/xfrm/xfrm_user.c |
784 | @@ -1656,32 +1656,34 @@ static int dump_one_policy(struct xfrm_policy *xp, int dir, int count, void *ptr |
785 | |
786 | static int xfrm_dump_policy_done(struct netlink_callback *cb) |
787 | { |
788 | - struct xfrm_policy_walk *walk = (struct xfrm_policy_walk *) &cb->args[1]; |
789 | + struct xfrm_policy_walk *walk = (struct xfrm_policy_walk *)cb->args; |
790 | struct net *net = sock_net(cb->skb->sk); |
791 | |
792 | xfrm_policy_walk_done(walk, net); |
793 | return 0; |
794 | } |
795 | |
796 | +static int xfrm_dump_policy_start(struct netlink_callback *cb) |
797 | +{ |
798 | + struct xfrm_policy_walk *walk = (struct xfrm_policy_walk *)cb->args; |
799 | + |
800 | + BUILD_BUG_ON(sizeof(*walk) > sizeof(cb->args)); |
801 | + |
802 | + xfrm_policy_walk_init(walk, XFRM_POLICY_TYPE_ANY); |
803 | + return 0; |
804 | +} |
805 | + |
806 | static int xfrm_dump_policy(struct sk_buff *skb, struct netlink_callback *cb) |
807 | { |
808 | struct net *net = sock_net(skb->sk); |
809 | - struct xfrm_policy_walk *walk = (struct xfrm_policy_walk *) &cb->args[1]; |
810 | + struct xfrm_policy_walk *walk = (struct xfrm_policy_walk *)cb->args; |
811 | struct xfrm_dump_info info; |
812 | |
813 | - BUILD_BUG_ON(sizeof(struct xfrm_policy_walk) > |
814 | - sizeof(cb->args) - sizeof(cb->args[0])); |
815 | - |
816 | info.in_skb = cb->skb; |
817 | info.out_skb = skb; |
818 | info.nlmsg_seq = cb->nlh->nlmsg_seq; |
819 | info.nlmsg_flags = NLM_F_MULTI; |
820 | |
821 | - if (!cb->args[0]) { |
822 | - cb->args[0] = 1; |
823 | - xfrm_policy_walk_init(walk, XFRM_POLICY_TYPE_ANY); |
824 | - } |
825 | - |
826 | (void) xfrm_policy_walk(net, walk, dump_one_policy, &info); |
827 | |
828 | return skb->len; |
829 | @@ -2415,6 +2417,7 @@ static const struct nla_policy xfrma_spd_policy[XFRMA_SPD_MAX+1] = { |
830 | |
831 | static const struct xfrm_link { |
832 | int (*doit)(struct sk_buff *, struct nlmsghdr *, struct nlattr **); |
833 | + int (*start)(struct netlink_callback *); |
834 | int (*dump)(struct sk_buff *, struct netlink_callback *); |
835 | int (*done)(struct netlink_callback *); |
836 | const struct nla_policy *nla_pol; |
837 | @@ -2428,6 +2431,7 @@ static const struct xfrm_link { |
838 | [XFRM_MSG_NEWPOLICY - XFRM_MSG_BASE] = { .doit = xfrm_add_policy }, |
839 | [XFRM_MSG_DELPOLICY - XFRM_MSG_BASE] = { .doit = xfrm_get_policy }, |
840 | [XFRM_MSG_GETPOLICY - XFRM_MSG_BASE] = { .doit = xfrm_get_policy, |
841 | + .start = xfrm_dump_policy_start, |
842 | .dump = xfrm_dump_policy, |
843 | .done = xfrm_dump_policy_done }, |
844 | [XFRM_MSG_ALLOCSPI - XFRM_MSG_BASE] = { .doit = xfrm_alloc_userspi }, |
845 | @@ -2479,6 +2483,7 @@ static int xfrm_user_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh) |
846 | |
847 | { |
848 | struct netlink_dump_control c = { |
849 | + .start = link->start, |
850 | .dump = link->dump, |
851 | .done = link->done, |
852 | }; |
853 | diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c |
854 | index 6f337f00ba58..fe1d06d50392 100644 |
855 | --- a/sound/pci/hda/patch_realtek.c |
856 | +++ b/sound/pci/hda/patch_realtek.c |
857 | @@ -329,6 +329,7 @@ static void alc_fill_eapd_coef(struct hda_codec *codec) |
858 | break; |
859 | case 0x10ec0225: |
860 | case 0x10ec0233: |
861 | + case 0x10ec0236: |
862 | case 0x10ec0255: |
863 | case 0x10ec0256: |
864 | case 0x10ec0282: |
865 | @@ -909,6 +910,7 @@ static struct alc_codec_rename_pci_table rename_pci_tbl[] = { |
866 | { 0x10ec0275, 0x1028, 0, "ALC3260" }, |
867 | { 0x10ec0899, 0x1028, 0, "ALC3861" }, |
868 | { 0x10ec0298, 0x1028, 0, "ALC3266" }, |
869 | + { 0x10ec0236, 0x1028, 0, "ALC3204" }, |
870 | { 0x10ec0256, 0x1028, 0, "ALC3246" }, |
871 | { 0x10ec0225, 0x1028, 0, "ALC3253" }, |
872 | { 0x10ec0295, 0x1028, 0, "ALC3254" }, |
873 | @@ -3694,6 +3696,7 @@ static void alc_headset_mode_unplugged(struct hda_codec *codec) |
874 | alc_process_coef_fw(codec, coef0255_1); |
875 | alc_process_coef_fw(codec, coef0255); |
876 | break; |
877 | + case 0x10ec0236: |
878 | case 0x10ec0256: |
879 | alc_process_coef_fw(codec, coef0256); |
880 | alc_process_coef_fw(codec, coef0255); |
881 | @@ -3777,6 +3780,7 @@ static void alc_headset_mode_mic_in(struct hda_codec *codec, hda_nid_t hp_pin, |
882 | |
883 | |
884 | switch (codec->core.vendor_id) { |
885 | + case 0x10ec0236: |
886 | case 0x10ec0255: |
887 | case 0x10ec0256: |
888 | alc_write_coef_idx(codec, 0x45, 0xc489); |
889 | @@ -3885,6 +3889,7 @@ static void alc_headset_mode_default(struct hda_codec *codec) |
890 | case 0x10ec0295: |
891 | alc_process_coef_fw(codec, coef0225); |
892 | break; |
893 | + case 0x10ec0236: |
894 | case 0x10ec0255: |
895 | case 0x10ec0256: |
896 | alc_process_coef_fw(codec, coef0255); |
897 | @@ -3971,6 +3976,7 @@ static void alc_headset_mode_ctia(struct hda_codec *codec) |
898 | case 0x10ec0255: |
899 | alc_process_coef_fw(codec, coef0255); |
900 | break; |
901 | + case 0x10ec0236: |
902 | case 0x10ec0256: |
903 | alc_process_coef_fw(codec, coef0256); |
904 | break; |
905 | @@ -4064,6 +4070,7 @@ static void alc_headset_mode_omtp(struct hda_codec *codec) |
906 | case 0x10ec0255: |
907 | alc_process_coef_fw(codec, coef0255); |
908 | break; |
909 | + case 0x10ec0236: |
910 | case 0x10ec0256: |
911 | alc_process_coef_fw(codec, coef0256); |
912 | break; |
913 | @@ -4131,6 +4138,7 @@ static void alc_determine_headset_type(struct hda_codec *codec) |
914 | }; |
915 | |
916 | switch (codec->core.vendor_id) { |
917 | + case 0x10ec0236: |
918 | case 0x10ec0255: |
919 | case 0x10ec0256: |
920 | alc_process_coef_fw(codec, coef0255); |
921 | @@ -4335,6 +4343,7 @@ static void alc255_set_default_jack_type(struct hda_codec *codec) |
922 | case 0x10ec0255: |
923 | alc_process_coef_fw(codec, alc255fw); |
924 | break; |
925 | + case 0x10ec0236: |
926 | case 0x10ec0256: |
927 | alc_process_coef_fw(codec, alc256fw); |
928 | break; |
929 | @@ -5852,6 +5861,14 @@ static const struct snd_hda_pin_quirk alc269_pin_fixup_tbl[] = { |
930 | ALC225_STANDARD_PINS, |
931 | {0x12, 0xb7a60130}, |
932 | {0x1b, 0x90170110}), |
933 | + SND_HDA_PIN_QUIRK(0x10ec0236, 0x1028, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE, |
934 | + {0x12, 0x90a60140}, |
935 | + {0x14, 0x90170110}, |
936 | + {0x21, 0x02211020}), |
937 | + SND_HDA_PIN_QUIRK(0x10ec0236, 0x1028, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE, |
938 | + {0x12, 0x90a60140}, |
939 | + {0x14, 0x90170150}, |
940 | + {0x21, 0x02211020}), |
941 | SND_HDA_PIN_QUIRK(0x10ec0255, 0x1028, "Dell", ALC255_FIXUP_DELL2_MIC_NO_PRESENCE, |
942 | {0x14, 0x90170110}, |
943 | {0x21, 0x02211020}), |
944 | @@ -6226,6 +6243,7 @@ static int patch_alc269(struct hda_codec *codec) |
945 | case 0x10ec0255: |
946 | spec->codec_variant = ALC269_TYPE_ALC255; |
947 | break; |
948 | + case 0x10ec0236: |
949 | case 0x10ec0256: |
950 | spec->codec_variant = ALC269_TYPE_ALC256; |
951 | spec->gen.mixer_nid = 0; /* ALC256 does not have any loopback mixer path */ |
952 | @@ -7205,6 +7223,7 @@ static const struct hda_device_id snd_hda_id_realtek[] = { |
953 | HDA_CODEC_ENTRY(0x10ec0233, "ALC233", patch_alc269), |
954 | HDA_CODEC_ENTRY(0x10ec0234, "ALC234", patch_alc269), |
955 | HDA_CODEC_ENTRY(0x10ec0235, "ALC233", patch_alc269), |
956 | + HDA_CODEC_ENTRY(0x10ec0236, "ALC236", patch_alc269), |
957 | HDA_CODEC_ENTRY(0x10ec0255, "ALC255", patch_alc269), |
958 | HDA_CODEC_ENTRY(0x10ec0256, "ALC256", patch_alc269), |
959 | HDA_CODEC_ENTRY(0x10ec0260, "ALC260", patch_alc260), |