Magellan Linux

Annotation of /trunk/kernel-magellan/patches-5.0/0102-5.0.3-all-fixes.patch

Parent Directory Parent Directory | Revision Log Revision Log


Revision 3329 - (hide annotations) (download)
Fri Apr 26 12:20:12 2019 UTC (5 years ago) by niro
File size: 52675 byte(s)
-linux-5.0.3
1 niro 3329 diff --git a/Makefile b/Makefile
2     index bb2f7664594a..fb888787e7d1 100644
3     --- a/Makefile
4     +++ b/Makefile
5     @@ -1,7 +1,7 @@
6     # SPDX-License-Identifier: GPL-2.0
7     VERSION = 5
8     PATCHLEVEL = 0
9     -SUBLEVEL = 2
10     +SUBLEVEL = 3
11     EXTRAVERSION =
12     NAME = Shy Crocodile
13    
14     diff --git a/arch/x86/events/intel/core.c b/arch/x86/events/intel/core.c
15     index dadb8f7e5a0d..2480feb07df3 100644
16     --- a/arch/x86/events/intel/core.c
17     +++ b/arch/x86/events/intel/core.c
18     @@ -3398,7 +3398,7 @@ tfa_get_event_constraints(struct cpu_hw_events *cpuc, int idx,
19     /*
20     * Without TFA we must not use PMC3.
21     */
22     - if (!allow_tsx_force_abort && test_bit(3, c->idxmsk)) {
23     + if (!allow_tsx_force_abort && test_bit(3, c->idxmsk) && idx >= 0) {
24     c = dyn_constraint(cpuc, c, idx);
25     c->idxmsk64 &= ~(1ULL << 3);
26     c->weight--;
27     @@ -4142,7 +4142,7 @@ static struct attribute *intel_pmu_caps_attrs[] = {
28     NULL
29     };
30    
31     -DEVICE_BOOL_ATTR(allow_tsx_force_abort, 0644, allow_tsx_force_abort);
32     +static DEVICE_BOOL_ATTR(allow_tsx_force_abort, 0644, allow_tsx_force_abort);
33    
34     static struct attribute *intel_pmu_attrs[] = {
35     &dev_attr_freeze_on_smi.attr,
36     diff --git a/arch/x86/events/perf_event.h b/arch/x86/events/perf_event.h
37     index a345d079f876..acd72e669c04 100644
38     --- a/arch/x86/events/perf_event.h
39     +++ b/arch/x86/events/perf_event.h
40     @@ -1032,12 +1032,12 @@ static inline int intel_pmu_init(void)
41     return 0;
42     }
43    
44     -static inline int intel_cpuc_prepare(struct cpu_hw_event *cpuc, int cpu)
45     +static inline int intel_cpuc_prepare(struct cpu_hw_events *cpuc, int cpu)
46     {
47     return 0;
48     }
49    
50     -static inline void intel_cpuc_finish(struct cpu_hw_event *cpuc)
51     +static inline void intel_cpuc_finish(struct cpu_hw_events *cpuc)
52     {
53     }
54    
55     diff --git a/drivers/connector/cn_proc.c b/drivers/connector/cn_proc.c
56     index ed5e42461094..ad48fd52cb53 100644
57     --- a/drivers/connector/cn_proc.c
58     +++ b/drivers/connector/cn_proc.c
59     @@ -250,6 +250,7 @@ void proc_coredump_connector(struct task_struct *task)
60     {
61     struct cn_msg *msg;
62     struct proc_event *ev;
63     + struct task_struct *parent;
64     __u8 buffer[CN_PROC_MSG_SIZE] __aligned(8);
65    
66     if (atomic_read(&proc_event_num_listeners) < 1)
67     @@ -262,8 +263,14 @@ void proc_coredump_connector(struct task_struct *task)
68     ev->what = PROC_EVENT_COREDUMP;
69     ev->event_data.coredump.process_pid = task->pid;
70     ev->event_data.coredump.process_tgid = task->tgid;
71     - ev->event_data.coredump.parent_pid = task->real_parent->pid;
72     - ev->event_data.coredump.parent_tgid = task->real_parent->tgid;
73     +
74     + rcu_read_lock();
75     + if (pid_alive(task)) {
76     + parent = rcu_dereference(task->real_parent);
77     + ev->event_data.coredump.parent_pid = parent->pid;
78     + ev->event_data.coredump.parent_tgid = parent->tgid;
79     + }
80     + rcu_read_unlock();
81    
82     memcpy(&msg->id, &cn_proc_event_id, sizeof(msg->id));
83     msg->ack = 0; /* not used */
84     @@ -276,6 +283,7 @@ void proc_exit_connector(struct task_struct *task)
85     {
86     struct cn_msg *msg;
87     struct proc_event *ev;
88     + struct task_struct *parent;
89     __u8 buffer[CN_PROC_MSG_SIZE] __aligned(8);
90    
91     if (atomic_read(&proc_event_num_listeners) < 1)
92     @@ -290,8 +298,14 @@ void proc_exit_connector(struct task_struct *task)
93     ev->event_data.exit.process_tgid = task->tgid;
94     ev->event_data.exit.exit_code = task->exit_code;
95     ev->event_data.exit.exit_signal = task->exit_signal;
96     - ev->event_data.exit.parent_pid = task->real_parent->pid;
97     - ev->event_data.exit.parent_tgid = task->real_parent->tgid;
98     +
99     + rcu_read_lock();
100     + if (pid_alive(task)) {
101     + parent = rcu_dereference(task->real_parent);
102     + ev->event_data.exit.parent_pid = parent->pid;
103     + ev->event_data.exit.parent_tgid = parent->tgid;
104     + }
105     + rcu_read_unlock();
106    
107     memcpy(&msg->id, &cn_proc_event_id, sizeof(msg->id));
108     msg->ack = 0; /* not used */
109     diff --git a/drivers/gpu/drm/drm_atomic_helper.c b/drivers/gpu/drm/drm_atomic_helper.c
110     index f4290f6b0c38..2323ba9310d9 100644
111     --- a/drivers/gpu/drm/drm_atomic_helper.c
112     +++ b/drivers/gpu/drm/drm_atomic_helper.c
113     @@ -1611,6 +1611,15 @@ int drm_atomic_helper_async_check(struct drm_device *dev,
114     if (old_plane_state->fb != new_plane_state->fb)
115     return -EINVAL;
116    
117     + /*
118     + * FIXME: Since prepare_fb and cleanup_fb are always called on
119     + * the new_plane_state for async updates we need to block framebuffer
120     + * changes. This prevents use of a fb that's been cleaned up and
121     + * double cleanups from occuring.
122     + */
123     + if (old_plane_state->fb != new_plane_state->fb)
124     + return -EINVAL;
125     +
126     funcs = plane->helper_private;
127     if (!funcs->atomic_async_update)
128     return -EINVAL;
129     diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c
130     index abb5d382f64d..ecef42bfe19d 100644
131     --- a/drivers/md/raid10.c
132     +++ b/drivers/md/raid10.c
133     @@ -4670,7 +4670,6 @@ read_more:
134     atomic_inc(&r10_bio->remaining);
135     read_bio->bi_next = NULL;
136     generic_make_request(read_bio);
137     - sector_nr += nr_sectors;
138     sectors_done += nr_sectors;
139     if (sector_nr <= last)
140     goto read_more;
141     diff --git a/drivers/net/dsa/mv88e6xxx/chip.c b/drivers/net/dsa/mv88e6xxx/chip.c
142     index 76cc163b3cf1..4a0ec8e87c7a 100644
143     --- a/drivers/net/dsa/mv88e6xxx/chip.c
144     +++ b/drivers/net/dsa/mv88e6xxx/chip.c
145     @@ -559,6 +559,9 @@ static int mv88e6xxx_port_setup_mac(struct mv88e6xxx_chip *chip, int port,
146     goto restore_link;
147     }
148    
149     + if (speed == SPEED_MAX && chip->info->ops->port_max_speed_mode)
150     + mode = chip->info->ops->port_max_speed_mode(port);
151     +
152     if (chip->info->ops->port_set_pause) {
153     err = chip->info->ops->port_set_pause(chip, port, pause);
154     if (err)
155     @@ -3042,6 +3045,7 @@ static const struct mv88e6xxx_ops mv88e6141_ops = {
156     .port_set_duplex = mv88e6xxx_port_set_duplex,
157     .port_set_rgmii_delay = mv88e6390_port_set_rgmii_delay,
158     .port_set_speed = mv88e6341_port_set_speed,
159     + .port_max_speed_mode = mv88e6341_port_max_speed_mode,
160     .port_tag_remap = mv88e6095_port_tag_remap,
161     .port_set_frame_mode = mv88e6351_port_set_frame_mode,
162     .port_set_egress_floods = mv88e6352_port_set_egress_floods,
163     @@ -3360,6 +3364,7 @@ static const struct mv88e6xxx_ops mv88e6190_ops = {
164     .port_set_duplex = mv88e6xxx_port_set_duplex,
165     .port_set_rgmii_delay = mv88e6390_port_set_rgmii_delay,
166     .port_set_speed = mv88e6390_port_set_speed,
167     + .port_max_speed_mode = mv88e6390_port_max_speed_mode,
168     .port_tag_remap = mv88e6390_port_tag_remap,
169     .port_set_frame_mode = mv88e6351_port_set_frame_mode,
170     .port_set_egress_floods = mv88e6352_port_set_egress_floods,
171     @@ -3404,6 +3409,7 @@ static const struct mv88e6xxx_ops mv88e6190x_ops = {
172     .port_set_duplex = mv88e6xxx_port_set_duplex,
173     .port_set_rgmii_delay = mv88e6390_port_set_rgmii_delay,
174     .port_set_speed = mv88e6390x_port_set_speed,
175     + .port_max_speed_mode = mv88e6390x_port_max_speed_mode,
176     .port_tag_remap = mv88e6390_port_tag_remap,
177     .port_set_frame_mode = mv88e6351_port_set_frame_mode,
178     .port_set_egress_floods = mv88e6352_port_set_egress_floods,
179     @@ -3448,6 +3454,7 @@ static const struct mv88e6xxx_ops mv88e6191_ops = {
180     .port_set_duplex = mv88e6xxx_port_set_duplex,
181     .port_set_rgmii_delay = mv88e6390_port_set_rgmii_delay,
182     .port_set_speed = mv88e6390_port_set_speed,
183     + .port_max_speed_mode = mv88e6390_port_max_speed_mode,
184     .port_tag_remap = mv88e6390_port_tag_remap,
185     .port_set_frame_mode = mv88e6351_port_set_frame_mode,
186     .port_set_egress_floods = mv88e6352_port_set_egress_floods,
187     @@ -3541,6 +3548,7 @@ static const struct mv88e6xxx_ops mv88e6290_ops = {
188     .port_set_duplex = mv88e6xxx_port_set_duplex,
189     .port_set_rgmii_delay = mv88e6390_port_set_rgmii_delay,
190     .port_set_speed = mv88e6390_port_set_speed,
191     + .port_max_speed_mode = mv88e6390_port_max_speed_mode,
192     .port_tag_remap = mv88e6390_port_tag_remap,
193     .port_set_frame_mode = mv88e6351_port_set_frame_mode,
194     .port_set_egress_floods = mv88e6352_port_set_egress_floods,
195     @@ -3672,6 +3680,7 @@ static const struct mv88e6xxx_ops mv88e6341_ops = {
196     .port_set_duplex = mv88e6xxx_port_set_duplex,
197     .port_set_rgmii_delay = mv88e6390_port_set_rgmii_delay,
198     .port_set_speed = mv88e6341_port_set_speed,
199     + .port_max_speed_mode = mv88e6341_port_max_speed_mode,
200     .port_tag_remap = mv88e6095_port_tag_remap,
201     .port_set_frame_mode = mv88e6351_port_set_frame_mode,
202     .port_set_egress_floods = mv88e6352_port_set_egress_floods,
203     @@ -3847,6 +3856,7 @@ static const struct mv88e6xxx_ops mv88e6390_ops = {
204     .port_set_duplex = mv88e6xxx_port_set_duplex,
205     .port_set_rgmii_delay = mv88e6390_port_set_rgmii_delay,
206     .port_set_speed = mv88e6390_port_set_speed,
207     + .port_max_speed_mode = mv88e6390_port_max_speed_mode,
208     .port_tag_remap = mv88e6390_port_tag_remap,
209     .port_set_frame_mode = mv88e6351_port_set_frame_mode,
210     .port_set_egress_floods = mv88e6352_port_set_egress_floods,
211     @@ -3895,6 +3905,7 @@ static const struct mv88e6xxx_ops mv88e6390x_ops = {
212     .port_set_duplex = mv88e6xxx_port_set_duplex,
213     .port_set_rgmii_delay = mv88e6390_port_set_rgmii_delay,
214     .port_set_speed = mv88e6390x_port_set_speed,
215     + .port_max_speed_mode = mv88e6390x_port_max_speed_mode,
216     .port_tag_remap = mv88e6390_port_tag_remap,
217     .port_set_frame_mode = mv88e6351_port_set_frame_mode,
218     .port_set_egress_floods = mv88e6352_port_set_egress_floods,
219     diff --git a/drivers/net/dsa/mv88e6xxx/chip.h b/drivers/net/dsa/mv88e6xxx/chip.h
220     index 546651d8c3e1..dfb1af65c205 100644
221     --- a/drivers/net/dsa/mv88e6xxx/chip.h
222     +++ b/drivers/net/dsa/mv88e6xxx/chip.h
223     @@ -377,6 +377,9 @@ struct mv88e6xxx_ops {
224     */
225     int (*port_set_speed)(struct mv88e6xxx_chip *chip, int port, int speed);
226    
227     + /* What interface mode should be used for maximum speed? */
228     + phy_interface_t (*port_max_speed_mode)(int port);
229     +
230     int (*port_tag_remap)(struct mv88e6xxx_chip *chip, int port);
231    
232     int (*port_set_frame_mode)(struct mv88e6xxx_chip *chip, int port,
233     diff --git a/drivers/net/dsa/mv88e6xxx/port.c b/drivers/net/dsa/mv88e6xxx/port.c
234     index 184c2b1b3115..5e921bb6c214 100644
235     --- a/drivers/net/dsa/mv88e6xxx/port.c
236     +++ b/drivers/net/dsa/mv88e6xxx/port.c
237     @@ -312,6 +312,14 @@ int mv88e6341_port_set_speed(struct mv88e6xxx_chip *chip, int port, int speed)
238     return mv88e6xxx_port_set_speed(chip, port, speed, !port, true);
239     }
240    
241     +phy_interface_t mv88e6341_port_max_speed_mode(int port)
242     +{
243     + if (port == 5)
244     + return PHY_INTERFACE_MODE_2500BASEX;
245     +
246     + return PHY_INTERFACE_MODE_NA;
247     +}
248     +
249     /* Support 10, 100, 200, 1000 Mbps (e.g. 88E6352 family) */
250     int mv88e6352_port_set_speed(struct mv88e6xxx_chip *chip, int port, int speed)
251     {
252     @@ -345,6 +353,14 @@ int mv88e6390_port_set_speed(struct mv88e6xxx_chip *chip, int port, int speed)
253     return mv88e6xxx_port_set_speed(chip, port, speed, true, true);
254     }
255    
256     +phy_interface_t mv88e6390_port_max_speed_mode(int port)
257     +{
258     + if (port == 9 || port == 10)
259     + return PHY_INTERFACE_MODE_2500BASEX;
260     +
261     + return PHY_INTERFACE_MODE_NA;
262     +}
263     +
264     /* Support 10, 100, 200, 1000, 2500, 10000 Mbps (e.g. 88E6190X) */
265     int mv88e6390x_port_set_speed(struct mv88e6xxx_chip *chip, int port, int speed)
266     {
267     @@ -360,6 +376,14 @@ int mv88e6390x_port_set_speed(struct mv88e6xxx_chip *chip, int port, int speed)
268     return mv88e6xxx_port_set_speed(chip, port, speed, true, true);
269     }
270    
271     +phy_interface_t mv88e6390x_port_max_speed_mode(int port)
272     +{
273     + if (port == 9 || port == 10)
274     + return PHY_INTERFACE_MODE_XAUI;
275     +
276     + return PHY_INTERFACE_MODE_NA;
277     +}
278     +
279     int mv88e6390x_port_set_cmode(struct mv88e6xxx_chip *chip, int port,
280     phy_interface_t mode)
281     {
282     diff --git a/drivers/net/dsa/mv88e6xxx/port.h b/drivers/net/dsa/mv88e6xxx/port.h
283     index 4aadf321edb7..c7bed263a0f4 100644
284     --- a/drivers/net/dsa/mv88e6xxx/port.h
285     +++ b/drivers/net/dsa/mv88e6xxx/port.h
286     @@ -285,6 +285,10 @@ int mv88e6352_port_set_speed(struct mv88e6xxx_chip *chip, int port, int speed);
287     int mv88e6390_port_set_speed(struct mv88e6xxx_chip *chip, int port, int speed);
288     int mv88e6390x_port_set_speed(struct mv88e6xxx_chip *chip, int port, int speed);
289    
290     +phy_interface_t mv88e6341_port_max_speed_mode(int port);
291     +phy_interface_t mv88e6390_port_max_speed_mode(int port);
292     +phy_interface_t mv88e6390x_port_max_speed_mode(int port);
293     +
294     int mv88e6xxx_port_set_state(struct mv88e6xxx_chip *chip, int port, u8 state);
295    
296     int mv88e6xxx_port_set_vlan_map(struct mv88e6xxx_chip *chip, int port, u16 map);
297     diff --git a/drivers/net/ethernet/hisilicon/hns3/hnae3.h b/drivers/net/ethernet/hisilicon/hns3/hnae3.h
298     index 36eab37d8a40..09c774fe8853 100644
299     --- a/drivers/net/ethernet/hisilicon/hns3/hnae3.h
300     +++ b/drivers/net/ethernet/hisilicon/hns3/hnae3.h
301     @@ -192,6 +192,7 @@ struct hnae3_ae_dev {
302     const struct hnae3_ae_ops *ops;
303     struct list_head node;
304     u32 flag;
305     + u8 override_pci_need_reset; /* fix to stop multiple reset happening */
306     enum hnae3_dev_type dev_type;
307     enum hnae3_reset_type reset_type;
308     void *priv;
309     diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
310     index 1bf7a5f116a0..d84c50068f66 100644
311     --- a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
312     +++ b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
313     @@ -1852,7 +1852,9 @@ static pci_ers_result_t hns3_slot_reset(struct pci_dev *pdev)
314    
315     /* request the reset */
316     if (ae_dev->ops->reset_event) {
317     - ae_dev->ops->reset_event(pdev, NULL);
318     + if (!ae_dev->override_pci_need_reset)
319     + ae_dev->ops->reset_event(pdev, NULL);
320     +
321     return PCI_ERS_RESULT_RECOVERED;
322     }
323    
324     @@ -2476,6 +2478,8 @@ static int hns3_add_frag(struct hns3_enet_ring *ring, struct hns3_desc *desc,
325     desc = &ring->desc[ring->next_to_clean];
326     desc_cb = &ring->desc_cb[ring->next_to_clean];
327     bd_base_info = le32_to_cpu(desc->rx.bd_base_info);
328     + /* make sure HW write desc complete */
329     + dma_rmb();
330     if (!hnae3_get_bit(bd_base_info, HNS3_RXD_VLD_B))
331     return -ENXIO;
332    
333     diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.c
334     index d0f654123b9b..efb6c1a25171 100644
335     --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.c
336     +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.c
337     @@ -1259,8 +1259,10 @@ pci_ers_result_t hclge_handle_hw_ras_error(struct hnae3_ae_dev *ae_dev)
338     hclge_handle_all_ras_errors(hdev);
339     } else {
340     if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state) ||
341     - hdev->pdev->revision < 0x21)
342     + hdev->pdev->revision < 0x21) {
343     + ae_dev->override_pci_need_reset = 1;
344     return PCI_ERS_RESULT_RECOVERED;
345     + }
346     }
347    
348     if (status & HCLGE_RAS_REG_ROCEE_ERR_MASK) {
349     @@ -1269,8 +1271,11 @@ pci_ers_result_t hclge_handle_hw_ras_error(struct hnae3_ae_dev *ae_dev)
350     }
351    
352     if (status & HCLGE_RAS_REG_NFE_MASK ||
353     - status & HCLGE_RAS_REG_ROCEE_ERR_MASK)
354     + status & HCLGE_RAS_REG_ROCEE_ERR_MASK) {
355     + ae_dev->override_pci_need_reset = 0;
356     return PCI_ERS_RESULT_NEED_RESET;
357     + }
358     + ae_dev->override_pci_need_reset = 1;
359    
360     return PCI_ERS_RESULT_RECOVERED;
361     }
362     diff --git a/drivers/net/ethernet/mellanox/mlx4/cmd.c b/drivers/net/ethernet/mellanox/mlx4/cmd.c
363     index e65bc3c95630..857588e2488d 100644
364     --- a/drivers/net/ethernet/mellanox/mlx4/cmd.c
365     +++ b/drivers/net/ethernet/mellanox/mlx4/cmd.c
366     @@ -2645,6 +2645,8 @@ int mlx4_cmd_use_events(struct mlx4_dev *dev)
367     if (!priv->cmd.context)
368     return -ENOMEM;
369    
370     + if (mlx4_is_mfunc(dev))
371     + mutex_lock(&priv->cmd.slave_cmd_mutex);
372     down_write(&priv->cmd.switch_sem);
373     for (i = 0; i < priv->cmd.max_cmds; ++i) {
374     priv->cmd.context[i].token = i;
375     @@ -2670,6 +2672,8 @@ int mlx4_cmd_use_events(struct mlx4_dev *dev)
376     down(&priv->cmd.poll_sem);
377     priv->cmd.use_events = 1;
378     up_write(&priv->cmd.switch_sem);
379     + if (mlx4_is_mfunc(dev))
380     + mutex_unlock(&priv->cmd.slave_cmd_mutex);
381    
382     return err;
383     }
384     @@ -2682,6 +2686,8 @@ void mlx4_cmd_use_polling(struct mlx4_dev *dev)
385     struct mlx4_priv *priv = mlx4_priv(dev);
386     int i;
387    
388     + if (mlx4_is_mfunc(dev))
389     + mutex_lock(&priv->cmd.slave_cmd_mutex);
390     down_write(&priv->cmd.switch_sem);
391     priv->cmd.use_events = 0;
392    
393     @@ -2689,9 +2695,12 @@ void mlx4_cmd_use_polling(struct mlx4_dev *dev)
394     down(&priv->cmd.event_sem);
395    
396     kfree(priv->cmd.context);
397     + priv->cmd.context = NULL;
398    
399     up(&priv->cmd.poll_sem);
400     up_write(&priv->cmd.switch_sem);
401     + if (mlx4_is_mfunc(dev))
402     + mutex_unlock(&priv->cmd.slave_cmd_mutex);
403     }
404    
405     struct mlx4_cmd_mailbox *mlx4_alloc_cmd_mailbox(struct mlx4_dev *dev)
406     diff --git a/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c b/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
407     index eb13d3618162..4356f3a58002 100644
408     --- a/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
409     +++ b/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
410     @@ -2719,13 +2719,13 @@ static int qp_get_mtt_size(struct mlx4_qp_context *qpc)
411     int total_pages;
412     int total_mem;
413     int page_offset = (be32_to_cpu(qpc->params2) >> 6) & 0x3f;
414     + int tot;
415    
416     sq_size = 1 << (log_sq_size + log_sq_sride + 4);
417     rq_size = (srq|rss|xrc) ? 0 : (1 << (log_rq_size + log_rq_stride + 4));
418     total_mem = sq_size + rq_size;
419     - total_pages =
420     - roundup_pow_of_two((total_mem + (page_offset << 6)) >>
421     - page_shift);
422     + tot = (total_mem + (page_offset << 6)) >> page_shift;
423     + total_pages = !tot ? 1 : roundup_pow_of_two(tot);
424    
425     return total_pages;
426     }
427     diff --git a/drivers/net/ethernet/microchip/lan743x_main.c b/drivers/net/ethernet/microchip/lan743x_main.c
428     index 4d1b4a24907f..13e6bf13ac4d 100644
429     --- a/drivers/net/ethernet/microchip/lan743x_main.c
430     +++ b/drivers/net/ethernet/microchip/lan743x_main.c
431     @@ -585,8 +585,7 @@ static int lan743x_intr_open(struct lan743x_adapter *adapter)
432    
433     if (adapter->csr.flags &
434     LAN743X_CSR_FLAG_SUPPORTS_INTR_AUTO_SET_CLR) {
435     - flags = LAN743X_VECTOR_FLAG_VECTOR_ENABLE_AUTO_CLEAR |
436     - LAN743X_VECTOR_FLAG_VECTOR_ENABLE_AUTO_SET |
437     + flags = LAN743X_VECTOR_FLAG_VECTOR_ENABLE_AUTO_SET |
438     LAN743X_VECTOR_FLAG_SOURCE_ENABLE_AUTO_SET |
439     LAN743X_VECTOR_FLAG_SOURCE_ENABLE_AUTO_CLEAR |
440     LAN743X_VECTOR_FLAG_SOURCE_STATUS_AUTO_CLEAR;
441     @@ -599,12 +598,6 @@ static int lan743x_intr_open(struct lan743x_adapter *adapter)
442     /* map TX interrupt to vector */
443     int_vec_map1 |= INT_VEC_MAP1_TX_VEC_(index, vector);
444     lan743x_csr_write(adapter, INT_VEC_MAP1, int_vec_map1);
445     - if (flags &
446     - LAN743X_VECTOR_FLAG_VECTOR_ENABLE_AUTO_CLEAR) {
447     - int_vec_en_auto_clr |= INT_VEC_EN_(vector);
448     - lan743x_csr_write(adapter, INT_VEC_EN_AUTO_CLR,
449     - int_vec_en_auto_clr);
450     - }
451    
452     /* Remove TX interrupt from shared mask */
453     intr->vector_list[0].int_mask &= ~int_bit;
454     @@ -1902,7 +1895,17 @@ static int lan743x_rx_next_index(struct lan743x_rx *rx, int index)
455     return ((++index) % rx->ring_size);
456     }
457    
458     -static int lan743x_rx_allocate_ring_element(struct lan743x_rx *rx, int index)
459     +static struct sk_buff *lan743x_rx_allocate_skb(struct lan743x_rx *rx)
460     +{
461     + int length = 0;
462     +
463     + length = (LAN743X_MAX_FRAME_SIZE + ETH_HLEN + 4 + RX_HEAD_PADDING);
464     + return __netdev_alloc_skb(rx->adapter->netdev,
465     + length, GFP_ATOMIC | GFP_DMA);
466     +}
467     +
468     +static int lan743x_rx_init_ring_element(struct lan743x_rx *rx, int index,
469     + struct sk_buff *skb)
470     {
471     struct lan743x_rx_buffer_info *buffer_info;
472     struct lan743x_rx_descriptor *descriptor;
473     @@ -1911,9 +1914,7 @@ static int lan743x_rx_allocate_ring_element(struct lan743x_rx *rx, int index)
474     length = (LAN743X_MAX_FRAME_SIZE + ETH_HLEN + 4 + RX_HEAD_PADDING);
475     descriptor = &rx->ring_cpu_ptr[index];
476     buffer_info = &rx->buffer_info[index];
477     - buffer_info->skb = __netdev_alloc_skb(rx->adapter->netdev,
478     - length,
479     - GFP_ATOMIC | GFP_DMA);
480     + buffer_info->skb = skb;
481     if (!(buffer_info->skb))
482     return -ENOMEM;
483     buffer_info->dma_ptr = dma_map_single(&rx->adapter->pdev->dev,
484     @@ -2060,8 +2061,19 @@ static int lan743x_rx_process_packet(struct lan743x_rx *rx)
485     /* packet is available */
486     if (first_index == last_index) {
487     /* single buffer packet */
488     + struct sk_buff *new_skb = NULL;
489     int packet_length;
490    
491     + new_skb = lan743x_rx_allocate_skb(rx);
492     + if (!new_skb) {
493     + /* failed to allocate next skb.
494     + * Memory is very low.
495     + * Drop this packet and reuse buffer.
496     + */
497     + lan743x_rx_reuse_ring_element(rx, first_index);
498     + goto process_extension;
499     + }
500     +
501     buffer_info = &rx->buffer_info[first_index];
502     skb = buffer_info->skb;
503     descriptor = &rx->ring_cpu_ptr[first_index];
504     @@ -2081,7 +2093,7 @@ static int lan743x_rx_process_packet(struct lan743x_rx *rx)
505     skb_put(skb, packet_length - 4);
506     skb->protocol = eth_type_trans(skb,
507     rx->adapter->netdev);
508     - lan743x_rx_allocate_ring_element(rx, first_index);
509     + lan743x_rx_init_ring_element(rx, first_index, new_skb);
510     } else {
511     int index = first_index;
512    
513     @@ -2094,26 +2106,23 @@ static int lan743x_rx_process_packet(struct lan743x_rx *rx)
514     if (first_index <= last_index) {
515     while ((index >= first_index) &&
516     (index <= last_index)) {
517     - lan743x_rx_release_ring_element(rx,
518     - index);
519     - lan743x_rx_allocate_ring_element(rx,
520     - index);
521     + lan743x_rx_reuse_ring_element(rx,
522     + index);
523     index = lan743x_rx_next_index(rx,
524     index);
525     }
526     } else {
527     while ((index >= first_index) ||
528     (index <= last_index)) {
529     - lan743x_rx_release_ring_element(rx,
530     - index);
531     - lan743x_rx_allocate_ring_element(rx,
532     - index);
533     + lan743x_rx_reuse_ring_element(rx,
534     + index);
535     index = lan743x_rx_next_index(rx,
536     index);
537     }
538     }
539     }
540    
541     +process_extension:
542     if (extension_index >= 0) {
543     descriptor = &rx->ring_cpu_ptr[extension_index];
544     buffer_info = &rx->buffer_info[extension_index];
545     @@ -2290,7 +2299,9 @@ static int lan743x_rx_ring_init(struct lan743x_rx *rx)
546    
547     rx->last_head = 0;
548     for (index = 0; index < rx->ring_size; index++) {
549     - ret = lan743x_rx_allocate_ring_element(rx, index);
550     + struct sk_buff *new_skb = lan743x_rx_allocate_skb(rx);
551     +
552     + ret = lan743x_rx_init_ring_element(rx, index, new_skb);
553     if (ret)
554     goto cleanup;
555     }
556     diff --git a/drivers/net/ethernet/renesas/ravb_main.c b/drivers/net/ethernet/renesas/ravb_main.c
557     index d28c8f9ca55b..8154b38c08f7 100644
558     --- a/drivers/net/ethernet/renesas/ravb_main.c
559     +++ b/drivers/net/ethernet/renesas/ravb_main.c
560     @@ -458,7 +458,7 @@ static int ravb_dmac_init(struct net_device *ndev)
561     RCR_EFFS | RCR_ENCF | RCR_ETS0 | RCR_ESF | 0x18000000, RCR);
562    
563     /* Set FIFO size */
564     - ravb_write(ndev, TGC_TQP_AVBMODE1 | 0x00222200, TGC);
565     + ravb_write(ndev, TGC_TQP_AVBMODE1 | 0x00112200, TGC);
566    
567     /* Timestamp enable */
568     ravb_write(ndev, TCCR_TFEN, TCCR);
569     diff --git a/drivers/net/ppp/pptp.c b/drivers/net/ppp/pptp.c
570     index 8f09edd811e9..50c60550f295 100644
571     --- a/drivers/net/ppp/pptp.c
572     +++ b/drivers/net/ppp/pptp.c
573     @@ -532,6 +532,7 @@ static void pptp_sock_destruct(struct sock *sk)
574     pppox_unbind_sock(sk);
575     }
576     skb_queue_purge(&sk->sk_receive_queue);
577     + dst_release(rcu_dereference_protected(sk->sk_dst_cache, 1));
578     }
579    
580     static int pptp_create(struct net *net, struct socket *sock, int kern)
581     diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c
582     index 2aae11feff0c..d6fb6a89f9b3 100644
583     --- a/drivers/net/vxlan.c
584     +++ b/drivers/net/vxlan.c
585     @@ -1657,6 +1657,14 @@ static int vxlan_rcv(struct sock *sk, struct sk_buff *skb)
586     goto drop;
587     }
588    
589     + rcu_read_lock();
590     +
591     + if (unlikely(!(vxlan->dev->flags & IFF_UP))) {
592     + rcu_read_unlock();
593     + atomic_long_inc(&vxlan->dev->rx_dropped);
594     + goto drop;
595     + }
596     +
597     stats = this_cpu_ptr(vxlan->dev->tstats);
598     u64_stats_update_begin(&stats->syncp);
599     stats->rx_packets++;
600     @@ -1664,6 +1672,9 @@ static int vxlan_rcv(struct sock *sk, struct sk_buff *skb)
601     u64_stats_update_end(&stats->syncp);
602    
603     gro_cells_receive(&vxlan->gro_cells, skb);
604     +
605     + rcu_read_unlock();
606     +
607     return 0;
608    
609     drop:
610     @@ -2693,6 +2704,8 @@ static void vxlan_uninit(struct net_device *dev)
611     {
612     struct vxlan_dev *vxlan = netdev_priv(dev);
613    
614     + gro_cells_destroy(&vxlan->gro_cells);
615     +
616     vxlan_fdb_delete_default(vxlan, vxlan->cfg.vni);
617    
618     free_percpu(dev->tstats);
619     @@ -3794,7 +3807,6 @@ static void vxlan_dellink(struct net_device *dev, struct list_head *head)
620    
621     vxlan_flush(vxlan, true);
622    
623     - gro_cells_destroy(&vxlan->gro_cells);
624     list_del(&vxlan->next);
625     unregister_netdevice_queue(dev, head);
626     }
627     diff --git a/fs/f2fs/file.c b/fs/f2fs/file.c
628     index bba56b39dcc5..ae2b45e75847 100644
629     --- a/fs/f2fs/file.c
630     +++ b/fs/f2fs/file.c
631     @@ -1750,10 +1750,12 @@ static int f2fs_ioc_start_atomic_write(struct file *filp)
632    
633     down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
634    
635     - if (!get_dirty_pages(inode))
636     - goto skip_flush;
637     -
638     - f2fs_msg(F2FS_I_SB(inode)->sb, KERN_WARNING,
639     + /*
640     + * Should wait end_io to count F2FS_WB_CP_DATA correctly by
641     + * f2fs_is_atomic_file.
642     + */
643     + if (get_dirty_pages(inode))
644     + f2fs_msg(F2FS_I_SB(inode)->sb, KERN_WARNING,
645     "Unexpected flush for atomic writes: ino=%lu, npages=%u",
646     inode->i_ino, get_dirty_pages(inode));
647     ret = filemap_write_and_wait_range(inode->i_mapping, 0, LLONG_MAX);
648     @@ -1761,7 +1763,7 @@ static int f2fs_ioc_start_atomic_write(struct file *filp)
649     up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
650     goto out;
651     }
652     -skip_flush:
653     +
654     set_inode_flag(inode, FI_ATOMIC_FILE);
655     clear_inode_flag(inode, FI_ATOMIC_REVOKE_REQUEST);
656     up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
657     diff --git a/net/core/gro_cells.c b/net/core/gro_cells.c
658     index acf45ddbe924..e095fb871d91 100644
659     --- a/net/core/gro_cells.c
660     +++ b/net/core/gro_cells.c
661     @@ -13,22 +13,36 @@ int gro_cells_receive(struct gro_cells *gcells, struct sk_buff *skb)
662     {
663     struct net_device *dev = skb->dev;
664     struct gro_cell *cell;
665     + int res;
666    
667     - if (!gcells->cells || skb_cloned(skb) || netif_elide_gro(dev))
668     - return netif_rx(skb);
669     + rcu_read_lock();
670     + if (unlikely(!(dev->flags & IFF_UP)))
671     + goto drop;
672     +
673     + if (!gcells->cells || skb_cloned(skb) || netif_elide_gro(dev)) {
674     + res = netif_rx(skb);
675     + goto unlock;
676     + }
677    
678     cell = this_cpu_ptr(gcells->cells);
679    
680     if (skb_queue_len(&cell->napi_skbs) > netdev_max_backlog) {
681     +drop:
682     atomic_long_inc(&dev->rx_dropped);
683     kfree_skb(skb);
684     - return NET_RX_DROP;
685     + res = NET_RX_DROP;
686     + goto unlock;
687     }
688    
689     __skb_queue_tail(&cell->napi_skbs, skb);
690     if (skb_queue_len(&cell->napi_skbs) == 1)
691     napi_schedule(&cell->napi);
692     - return NET_RX_SUCCESS;
693     +
694     + res = NET_RX_SUCCESS;
695     +
696     +unlock:
697     + rcu_read_unlock();
698     + return res;
699     }
700     EXPORT_SYMBOL(gro_cells_receive);
701    
702     diff --git a/net/hsr/hsr_device.c b/net/hsr/hsr_device.c
703     index b8cd43c9ed5b..a97bf326b231 100644
704     --- a/net/hsr/hsr_device.c
705     +++ b/net/hsr/hsr_device.c
706     @@ -94,9 +94,8 @@ static void hsr_check_announce(struct net_device *hsr_dev,
707     && (old_operstate != IF_OPER_UP)) {
708     /* Went up */
709     hsr->announce_count = 0;
710     - hsr->announce_timer.expires = jiffies +
711     - msecs_to_jiffies(HSR_ANNOUNCE_INTERVAL);
712     - add_timer(&hsr->announce_timer);
713     + mod_timer(&hsr->announce_timer,
714     + jiffies + msecs_to_jiffies(HSR_ANNOUNCE_INTERVAL));
715     }
716    
717     if ((hsr_dev->operstate != IF_OPER_UP) && (old_operstate == IF_OPER_UP))
718     @@ -332,6 +331,7 @@ static void hsr_announce(struct timer_list *t)
719     {
720     struct hsr_priv *hsr;
721     struct hsr_port *master;
722     + unsigned long interval;
723    
724     hsr = from_timer(hsr, t, announce_timer);
725    
726     @@ -343,18 +343,16 @@ static void hsr_announce(struct timer_list *t)
727     hsr->protVersion);
728     hsr->announce_count++;
729    
730     - hsr->announce_timer.expires = jiffies +
731     - msecs_to_jiffies(HSR_ANNOUNCE_INTERVAL);
732     + interval = msecs_to_jiffies(HSR_ANNOUNCE_INTERVAL);
733     } else {
734     send_hsr_supervision_frame(master, HSR_TLV_LIFE_CHECK,
735     hsr->protVersion);
736    
737     - hsr->announce_timer.expires = jiffies +
738     - msecs_to_jiffies(HSR_LIFE_CHECK_INTERVAL);
739     + interval = msecs_to_jiffies(HSR_LIFE_CHECK_INTERVAL);
740     }
741    
742     if (is_admin_up(master->dev))
743     - add_timer(&hsr->announce_timer);
744     + mod_timer(&hsr->announce_timer, jiffies + interval);
745    
746     rcu_read_unlock();
747     }
748     @@ -486,7 +484,7 @@ int hsr_dev_finalize(struct net_device *hsr_dev, struct net_device *slave[2],
749    
750     res = hsr_add_port(hsr, hsr_dev, HSR_PT_MASTER);
751     if (res)
752     - return res;
753     + goto err_add_port;
754    
755     res = register_netdevice(hsr_dev);
756     if (res)
757     @@ -506,6 +504,8 @@ int hsr_dev_finalize(struct net_device *hsr_dev, struct net_device *slave[2],
758     fail:
759     hsr_for_each_port(hsr, port)
760     hsr_del_port(port);
761     +err_add_port:
762     + hsr_del_node(&hsr->self_node_db);
763    
764     return res;
765     }
766     diff --git a/net/hsr/hsr_framereg.c b/net/hsr/hsr_framereg.c
767     index 286ceb41ac0c..9af16cb68f76 100644
768     --- a/net/hsr/hsr_framereg.c
769     +++ b/net/hsr/hsr_framereg.c
770     @@ -124,6 +124,18 @@ int hsr_create_self_node(struct list_head *self_node_db,
771     return 0;
772     }
773    
774     +void hsr_del_node(struct list_head *self_node_db)
775     +{
776     + struct hsr_node *node;
777     +
778     + rcu_read_lock();
779     + node = list_first_or_null_rcu(self_node_db, struct hsr_node, mac_list);
780     + rcu_read_unlock();
781     + if (node) {
782     + list_del_rcu(&node->mac_list);
783     + kfree(node);
784     + }
785     +}
786    
787     /* Allocate an hsr_node and add it to node_db. 'addr' is the node's AddressA;
788     * seq_out is used to initialize filtering of outgoing duplicate frames
789     diff --git a/net/hsr/hsr_framereg.h b/net/hsr/hsr_framereg.h
790     index 370b45998121..531fd3dfcac1 100644
791     --- a/net/hsr/hsr_framereg.h
792     +++ b/net/hsr/hsr_framereg.h
793     @@ -16,6 +16,7 @@
794    
795     struct hsr_node;
796    
797     +void hsr_del_node(struct list_head *self_node_db);
798     struct hsr_node *hsr_add_node(struct list_head *node_db, unsigned char addr[],
799     u16 seq_out);
800     struct hsr_node *hsr_get_node(struct hsr_port *port, struct sk_buff *skb,
801     diff --git a/net/ipv4/fou.c b/net/ipv4/fou.c
802     index 437070d1ffb1..79e98e21cdd7 100644
803     --- a/net/ipv4/fou.c
804     +++ b/net/ipv4/fou.c
805     @@ -1024,7 +1024,7 @@ static int gue_err(struct sk_buff *skb, u32 info)
806     int ret;
807    
808     len = sizeof(struct udphdr) + sizeof(struct guehdr);
809     - if (!pskb_may_pull(skb, len))
810     + if (!pskb_may_pull(skb, transport_offset + len))
811     return -EINVAL;
812    
813     guehdr = (struct guehdr *)&udp_hdr(skb)[1];
814     @@ -1059,7 +1059,7 @@ static int gue_err(struct sk_buff *skb, u32 info)
815    
816     optlen = guehdr->hlen << 2;
817    
818     - if (!pskb_may_pull(skb, len + optlen))
819     + if (!pskb_may_pull(skb, transport_offset + len + optlen))
820     return -EINVAL;
821    
822     guehdr = (struct guehdr *)&udp_hdr(skb)[1];
823     diff --git a/net/ipv4/route.c b/net/ipv4/route.c
824     index 7bb9128c8363..e04cdb58a602 100644
825     --- a/net/ipv4/route.c
826     +++ b/net/ipv4/route.c
827     @@ -1303,6 +1303,10 @@ static void ip_del_fnhe(struct fib_nh *nh, __be32 daddr)
828     if (fnhe->fnhe_daddr == daddr) {
829     rcu_assign_pointer(*fnhe_p, rcu_dereference_protected(
830     fnhe->fnhe_next, lockdep_is_held(&fnhe_lock)));
831     + /* set fnhe_daddr to 0 to ensure it won't bind with
832     + * new dsts in rt_bind_exception().
833     + */
834     + fnhe->fnhe_daddr = 0;
835     fnhe_flush_routes(fnhe);
836     kfree_rcu(fnhe, rcu);
837     break;
838     @@ -2144,12 +2148,13 @@ int ip_route_input_rcu(struct sk_buff *skb, __be32 daddr, __be32 saddr,
839     int our = 0;
840     int err = -EINVAL;
841    
842     - if (in_dev)
843     - our = ip_check_mc_rcu(in_dev, daddr, saddr,
844     - ip_hdr(skb)->protocol);
845     + if (!in_dev)
846     + return err;
847     + our = ip_check_mc_rcu(in_dev, daddr, saddr,
848     + ip_hdr(skb)->protocol);
849    
850     /* check l3 master if no match yet */
851     - if ((!in_dev || !our) && netif_is_l3_slave(dev)) {
852     + if (!our && netif_is_l3_slave(dev)) {
853     struct in_device *l3_in_dev;
854    
855     l3_in_dev = __in_dev_get_rcu(skb->dev);
856     diff --git a/net/ipv4/syncookies.c b/net/ipv4/syncookies.c
857     index 606f868d9f3f..e531344611a0 100644
858     --- a/net/ipv4/syncookies.c
859     +++ b/net/ipv4/syncookies.c
860     @@ -216,7 +216,12 @@ struct sock *tcp_get_cookie_sock(struct sock *sk, struct sk_buff *skb,
861     refcount_set(&req->rsk_refcnt, 1);
862     tcp_sk(child)->tsoffset = tsoff;
863     sock_rps_save_rxhash(child, skb);
864     - inet_csk_reqsk_queue_add(sk, req, child);
865     + if (!inet_csk_reqsk_queue_add(sk, req, child)) {
866     + bh_unlock_sock(child);
867     + sock_put(child);
868     + child = NULL;
869     + reqsk_put(req);
870     + }
871     } else {
872     reqsk_free(req);
873     }
874     diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
875     index cf3c5095c10e..ce365cbba1d1 100644
876     --- a/net/ipv4/tcp.c
877     +++ b/net/ipv4/tcp.c
878     @@ -1914,6 +1914,11 @@ static int tcp_inq_hint(struct sock *sk)
879     inq = tp->rcv_nxt - tp->copied_seq;
880     release_sock(sk);
881     }
882     + /* After receiving a FIN, tell the user-space to continue reading
883     + * by returning a non-zero inq.
884     + */
885     + if (inq == 0 && sock_flag(sk, SOCK_DONE))
886     + inq = 1;
887     return inq;
888     }
889    
890     diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
891     index 76858b14ebe9..7b1ef897b398 100644
892     --- a/net/ipv4/tcp_input.c
893     +++ b/net/ipv4/tcp_input.c
894     @@ -6519,7 +6519,13 @@ int tcp_conn_request(struct request_sock_ops *rsk_ops,
895     af_ops->send_synack(fastopen_sk, dst, &fl, req,
896     &foc, TCP_SYNACK_FASTOPEN);
897     /* Add the child socket directly into the accept queue */
898     - inet_csk_reqsk_queue_add(sk, req, fastopen_sk);
899     + if (!inet_csk_reqsk_queue_add(sk, req, fastopen_sk)) {
900     + reqsk_fastopen_remove(fastopen_sk, req, false);
901     + bh_unlock_sock(fastopen_sk);
902     + sock_put(fastopen_sk);
903     + reqsk_put(req);
904     + goto drop;
905     + }
906     sk->sk_data_ready(sk);
907     bh_unlock_sock(fastopen_sk);
908     sock_put(fastopen_sk);
909     diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
910     index ec3cea9d6828..1aae9ab57fe9 100644
911     --- a/net/ipv4/tcp_ipv4.c
912     +++ b/net/ipv4/tcp_ipv4.c
913     @@ -1734,15 +1734,8 @@ EXPORT_SYMBOL(tcp_add_backlog);
914     int tcp_filter(struct sock *sk, struct sk_buff *skb)
915     {
916     struct tcphdr *th = (struct tcphdr *)skb->data;
917     - unsigned int eaten = skb->len;
918     - int err;
919    
920     - err = sk_filter_trim_cap(sk, skb, th->doff * 4);
921     - if (!err) {
922     - eaten -= skb->len;
923     - TCP_SKB_CB(skb)->end_seq -= eaten;
924     - }
925     - return err;
926     + return sk_filter_trim_cap(sk, skb, th->doff * 4);
927     }
928     EXPORT_SYMBOL(tcp_filter);
929    
930     diff --git a/net/ipv6/fou6.c b/net/ipv6/fou6.c
931     index 867474abe269..ec4e2ed95f36 100644
932     --- a/net/ipv6/fou6.c
933     +++ b/net/ipv6/fou6.c
934     @@ -94,7 +94,7 @@ static int gue6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
935     int ret;
936    
937     len = sizeof(struct udphdr) + sizeof(struct guehdr);
938     - if (!pskb_may_pull(skb, len))
939     + if (!pskb_may_pull(skb, transport_offset + len))
940     return -EINVAL;
941    
942     guehdr = (struct guehdr *)&udp_hdr(skb)[1];
943     @@ -129,7 +129,7 @@ static int gue6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
944    
945     optlen = guehdr->hlen << 2;
946    
947     - if (!pskb_may_pull(skb, len + optlen))
948     + if (!pskb_may_pull(skb, transport_offset + len + optlen))
949     return -EINVAL;
950    
951     guehdr = (struct guehdr *)&udp_hdr(skb)[1];
952     diff --git a/net/ipv6/sit.c b/net/ipv6/sit.c
953     index 09e440e8dfae..07e21a82ce4c 100644
954     --- a/net/ipv6/sit.c
955     +++ b/net/ipv6/sit.c
956     @@ -778,8 +778,9 @@ static bool check_6rd(struct ip_tunnel *tunnel, const struct in6_addr *v6dst,
957     pbw0 = tunnel->ip6rd.prefixlen >> 5;
958     pbi0 = tunnel->ip6rd.prefixlen & 0x1f;
959    
960     - d = (ntohl(v6dst->s6_addr32[pbw0]) << pbi0) >>
961     - tunnel->ip6rd.relay_prefixlen;
962     + d = tunnel->ip6rd.relay_prefixlen < 32 ?
963     + (ntohl(v6dst->s6_addr32[pbw0]) << pbi0) >>
964     + tunnel->ip6rd.relay_prefixlen : 0;
965    
966     pbi1 = pbi0 - tunnel->ip6rd.relay_prefixlen;
967     if (pbi1 > 0)
968     diff --git a/net/l2tp/l2tp_ip6.c b/net/l2tp/l2tp_ip6.c
969     index 0ae6899edac0..37a69df17cab 100644
970     --- a/net/l2tp/l2tp_ip6.c
971     +++ b/net/l2tp/l2tp_ip6.c
972     @@ -674,9 +674,6 @@ static int l2tp_ip6_recvmsg(struct sock *sk, struct msghdr *msg, size_t len,
973     if (flags & MSG_OOB)
974     goto out;
975    
976     - if (addr_len)
977     - *addr_len = sizeof(*lsa);
978     -
979     if (flags & MSG_ERRQUEUE)
980     return ipv6_recv_error(sk, msg, len, addr_len);
981    
982     @@ -706,6 +703,7 @@ static int l2tp_ip6_recvmsg(struct sock *sk, struct msghdr *msg, size_t len,
983     lsa->l2tp_conn_id = 0;
984     if (ipv6_addr_type(&lsa->l2tp_addr) & IPV6_ADDR_LINKLOCAL)
985     lsa->l2tp_scope_id = inet6_iif(skb);
986     + *addr_len = sizeof(*lsa);
987     }
988    
989     if (np->rxopt.all)
990     diff --git a/net/rxrpc/conn_client.c b/net/rxrpc/conn_client.c
991     index b2adfa825363..5cf6d9f4761d 100644
992     --- a/net/rxrpc/conn_client.c
993     +++ b/net/rxrpc/conn_client.c
994     @@ -353,7 +353,7 @@ static int rxrpc_get_client_conn(struct rxrpc_sock *rx,
995     * normally have to take channel_lock but we do this before anyone else
996     * can see the connection.
997     */
998     - list_add_tail(&call->chan_wait_link, &candidate->waiting_calls);
999     + list_add(&call->chan_wait_link, &candidate->waiting_calls);
1000    
1001     if (cp->exclusive) {
1002     call->conn = candidate;
1003     @@ -432,7 +432,7 @@ found_extant_conn:
1004     call->conn = conn;
1005     call->security_ix = conn->security_ix;
1006     call->service_id = conn->service_id;
1007     - list_add(&call->chan_wait_link, &conn->waiting_calls);
1008     + list_add_tail(&call->chan_wait_link, &conn->waiting_calls);
1009     spin_unlock(&conn->channel_lock);
1010     _leave(" = 0 [extant %d]", conn->debug_id);
1011     return 0;
1012     diff --git a/net/sched/cls_flower.c b/net/sched/cls_flower.c
1013     index 12ca9d13db83..bf67ae5ac1c3 100644
1014     --- a/net/sched/cls_flower.c
1015     +++ b/net/sched/cls_flower.c
1016     @@ -1327,46 +1327,46 @@ static int fl_change(struct net *net, struct sk_buff *in_skb,
1017     if (err < 0)
1018     goto errout;
1019    
1020     - if (!handle) {
1021     - handle = 1;
1022     - err = idr_alloc_u32(&head->handle_idr, fnew, &handle,
1023     - INT_MAX, GFP_KERNEL);
1024     - } else if (!fold) {
1025     - /* user specifies a handle and it doesn't exist */
1026     - err = idr_alloc_u32(&head->handle_idr, fnew, &handle,
1027     - handle, GFP_KERNEL);
1028     - }
1029     - if (err)
1030     - goto errout;
1031     - fnew->handle = handle;
1032     -
1033     if (tb[TCA_FLOWER_FLAGS]) {
1034     fnew->flags = nla_get_u32(tb[TCA_FLOWER_FLAGS]);
1035    
1036     if (!tc_flags_valid(fnew->flags)) {
1037     err = -EINVAL;
1038     - goto errout_idr;
1039     + goto errout;
1040     }
1041     }
1042    
1043     err = fl_set_parms(net, tp, fnew, mask, base, tb, tca[TCA_RATE], ovr,
1044     tp->chain->tmplt_priv, extack);
1045     if (err)
1046     - goto errout_idr;
1047     + goto errout;
1048    
1049     err = fl_check_assign_mask(head, fnew, fold, mask);
1050     if (err)
1051     - goto errout_idr;
1052     + goto errout;
1053     +
1054     + if (!handle) {
1055     + handle = 1;
1056     + err = idr_alloc_u32(&head->handle_idr, fnew, &handle,
1057     + INT_MAX, GFP_KERNEL);
1058     + } else if (!fold) {
1059     + /* user specifies a handle and it doesn't exist */
1060     + err = idr_alloc_u32(&head->handle_idr, fnew, &handle,
1061     + handle, GFP_KERNEL);
1062     + }
1063     + if (err)
1064     + goto errout_mask;
1065     + fnew->handle = handle;
1066    
1067     if (!fold && __fl_lookup(fnew->mask, &fnew->mkey)) {
1068     err = -EEXIST;
1069     - goto errout_mask;
1070     + goto errout_idr;
1071     }
1072    
1073     err = rhashtable_insert_fast(&fnew->mask->ht, &fnew->ht_node,
1074     fnew->mask->filter_ht_params);
1075     if (err)
1076     - goto errout_mask;
1077     + goto errout_idr;
1078    
1079     if (!tc_skip_hw(fnew->flags)) {
1080     err = fl_hw_replace_filter(tp, fnew, extack);
1081     @@ -1405,12 +1405,13 @@ errout_mask_ht:
1082     rhashtable_remove_fast(&fnew->mask->ht, &fnew->ht_node,
1083     fnew->mask->filter_ht_params);
1084    
1085     -errout_mask:
1086     - fl_mask_put(head, fnew->mask, false);
1087     -
1088     errout_idr:
1089     if (!fold)
1090     idr_remove(&head->handle_idr, fnew->handle);
1091     +
1092     +errout_mask:
1093     + fl_mask_put(head, fnew->mask, false);
1094     +
1095     errout:
1096     tcf_exts_destroy(&fnew->exts);
1097     kfree(fnew);
1098     diff --git a/net/sctp/stream.c b/net/sctp/stream.c
1099     index 2936ed17bf9e..3b47457862cc 100644
1100     --- a/net/sctp/stream.c
1101     +++ b/net/sctp/stream.c
1102     @@ -230,8 +230,6 @@ int sctp_stream_init(struct sctp_stream *stream, __u16 outcnt, __u16 incnt,
1103     for (i = 0; i < stream->outcnt; i++)
1104     SCTP_SO(stream, i)->state = SCTP_STREAM_OPEN;
1105    
1106     - sched->init(stream);
1107     -
1108     in:
1109     sctp_stream_interleave_init(stream);
1110     if (!incnt)
1111     diff --git a/net/vmw_vsock/virtio_transport_common.c b/net/vmw_vsock/virtio_transport_common.c
1112     index 3ae3a33da70b..602715fc9a75 100644
1113     --- a/net/vmw_vsock/virtio_transport_common.c
1114     +++ b/net/vmw_vsock/virtio_transport_common.c
1115     @@ -662,6 +662,8 @@ static int virtio_transport_reset(struct vsock_sock *vsk,
1116     */
1117     static int virtio_transport_reset_no_sock(struct virtio_vsock_pkt *pkt)
1118     {
1119     + const struct virtio_transport *t;
1120     + struct virtio_vsock_pkt *reply;
1121     struct virtio_vsock_pkt_info info = {
1122     .op = VIRTIO_VSOCK_OP_RST,
1123     .type = le16_to_cpu(pkt->hdr.type),
1124     @@ -672,15 +674,21 @@ static int virtio_transport_reset_no_sock(struct virtio_vsock_pkt *pkt)
1125     if (le16_to_cpu(pkt->hdr.op) == VIRTIO_VSOCK_OP_RST)
1126     return 0;
1127    
1128     - pkt = virtio_transport_alloc_pkt(&info, 0,
1129     - le64_to_cpu(pkt->hdr.dst_cid),
1130     - le32_to_cpu(pkt->hdr.dst_port),
1131     - le64_to_cpu(pkt->hdr.src_cid),
1132     - le32_to_cpu(pkt->hdr.src_port));
1133     - if (!pkt)
1134     + reply = virtio_transport_alloc_pkt(&info, 0,
1135     + le64_to_cpu(pkt->hdr.dst_cid),
1136     + le32_to_cpu(pkt->hdr.dst_port),
1137     + le64_to_cpu(pkt->hdr.src_cid),
1138     + le32_to_cpu(pkt->hdr.src_port));
1139     + if (!reply)
1140     return -ENOMEM;
1141    
1142     - return virtio_transport_get_ops()->send_pkt(pkt);
1143     + t = virtio_transport_get_ops();
1144     + if (!t) {
1145     + virtio_transport_free_pkt(reply);
1146     + return -ENOTCONN;
1147     + }
1148     +
1149     + return t->send_pkt(reply);
1150     }
1151    
1152     static void virtio_transport_wait_close(struct sock *sk, long timeout)
1153     diff --git a/net/x25/af_x25.c b/net/x25/af_x25.c
1154     index eff31348e20b..20a511398389 100644
1155     --- a/net/x25/af_x25.c
1156     +++ b/net/x25/af_x25.c
1157     @@ -820,8 +820,13 @@ static int x25_connect(struct socket *sock, struct sockaddr *uaddr,
1158     sock->state = SS_CONNECTED;
1159     rc = 0;
1160     out_put_neigh:
1161     - if (rc)
1162     + if (rc) {
1163     + read_lock_bh(&x25_list_lock);
1164     x25_neigh_put(x25->neighbour);
1165     + x25->neighbour = NULL;
1166     + read_unlock_bh(&x25_list_lock);
1167     + x25->state = X25_STATE_0;
1168     + }
1169     out_put_route:
1170     x25_route_put(rt);
1171     out:
1172     diff --git a/sound/firewire/bebob/bebob.c b/sound/firewire/bebob/bebob.c
1173     index d91874275d2c..5b46e8dcc2dd 100644
1174     --- a/sound/firewire/bebob/bebob.c
1175     +++ b/sound/firewire/bebob/bebob.c
1176     @@ -448,7 +448,19 @@ static const struct ieee1394_device_id bebob_id_table[] = {
1177     /* Focusrite, SaffirePro 26 I/O */
1178     SND_BEBOB_DEV_ENTRY(VEN_FOCUSRITE, 0x00000003, &saffirepro_26_spec),
1179     /* Focusrite, SaffirePro 10 I/O */
1180     - SND_BEBOB_DEV_ENTRY(VEN_FOCUSRITE, 0x00000006, &saffirepro_10_spec),
1181     + {
1182     + // The combination of vendor_id and model_id is the same as the
1183     + // same as the one of Liquid Saffire 56.
1184     + .match_flags = IEEE1394_MATCH_VENDOR_ID |
1185     + IEEE1394_MATCH_MODEL_ID |
1186     + IEEE1394_MATCH_SPECIFIER_ID |
1187     + IEEE1394_MATCH_VERSION,
1188     + .vendor_id = VEN_FOCUSRITE,
1189     + .model_id = 0x000006,
1190     + .specifier_id = 0x00a02d,
1191     + .version = 0x010001,
1192     + .driver_data = (kernel_ulong_t)&saffirepro_10_spec,
1193     + },
1194     /* Focusrite, Saffire(no label and LE) */
1195     SND_BEBOB_DEV_ENTRY(VEN_FOCUSRITE, MODEL_FOCUSRITE_SAFFIRE_BOTH,
1196     &saffire_spec),
1197     diff --git a/sound/firewire/motu/amdtp-motu.c b/sound/firewire/motu/amdtp-motu.c
1198     index f0555a24d90e..6c9b743ea74b 100644
1199     --- a/sound/firewire/motu/amdtp-motu.c
1200     +++ b/sound/firewire/motu/amdtp-motu.c
1201     @@ -136,7 +136,9 @@ static void read_pcm_s32(struct amdtp_stream *s,
1202     byte = (u8 *)buffer + p->pcm_byte_offset;
1203    
1204     for (c = 0; c < channels; ++c) {
1205     - *dst = (byte[0] << 24) | (byte[1] << 16) | byte[2];
1206     + *dst = (byte[0] << 24) |
1207     + (byte[1] << 16) |
1208     + (byte[2] << 8);
1209     byte += 3;
1210     dst++;
1211     }
1212     diff --git a/sound/hda/hdac_i915.c b/sound/hda/hdac_i915.c
1213     index 617ff1aa818f..27eb0270a711 100644
1214     --- a/sound/hda/hdac_i915.c
1215     +++ b/sound/hda/hdac_i915.c
1216     @@ -144,9 +144,9 @@ int snd_hdac_i915_init(struct hdac_bus *bus)
1217     return -ENODEV;
1218     if (!acomp->ops) {
1219     request_module("i915");
1220     - /* 10s timeout */
1221     + /* 60s timeout */
1222     wait_for_completion_timeout(&bind_complete,
1223     - msecs_to_jiffies(10 * 1000));
1224     + msecs_to_jiffies(60 * 1000));
1225     }
1226     if (!acomp->ops) {
1227     dev_info(bus->dev, "couldn't bind with audio component\n");
1228     diff --git a/sound/pci/hda/patch_conexant.c b/sound/pci/hda/patch_conexant.c
1229     index a4ee7656d9ee..fb65ad31e86c 100644
1230     --- a/sound/pci/hda/patch_conexant.c
1231     +++ b/sound/pci/hda/patch_conexant.c
1232     @@ -936,6 +936,9 @@ static const struct snd_pci_quirk cxt5066_fixups[] = {
1233     SND_PCI_QUIRK(0x103c, 0x8299, "HP 800 G3 SFF", CXT_FIXUP_HP_MIC_NO_PRESENCE),
1234     SND_PCI_QUIRK(0x103c, 0x829a, "HP 800 G3 DM", CXT_FIXUP_HP_MIC_NO_PRESENCE),
1235     SND_PCI_QUIRK(0x103c, 0x8455, "HP Z2 G4", CXT_FIXUP_HP_MIC_NO_PRESENCE),
1236     + SND_PCI_QUIRK(0x103c, 0x8456, "HP Z2 G4 SFF", CXT_FIXUP_HP_MIC_NO_PRESENCE),
1237     + SND_PCI_QUIRK(0x103c, 0x8457, "HP Z2 G4 mini", CXT_FIXUP_HP_MIC_NO_PRESENCE),
1238     + SND_PCI_QUIRK(0x103c, 0x8458, "HP Z2 G4 mini premium", CXT_FIXUP_HP_MIC_NO_PRESENCE),
1239     SND_PCI_QUIRK(0x1043, 0x138d, "Asus", CXT_FIXUP_HEADPHONE_MIC_PIN),
1240     SND_PCI_QUIRK(0x152d, 0x0833, "OLPC XO-1.5", CXT_FIXUP_OLPC_XO),
1241     SND_PCI_QUIRK(0x17aa, 0x20f2, "Lenovo T400", CXT_PINCFG_LENOVO_TP410),
1242     diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
1243     index 1ffa36e987b4..3a8568d3928f 100644
1244     --- a/sound/pci/hda/patch_realtek.c
1245     +++ b/sound/pci/hda/patch_realtek.c
1246     @@ -118,6 +118,7 @@ struct alc_spec {
1247     unsigned int has_alc5505_dsp:1;
1248     unsigned int no_depop_delay:1;
1249     unsigned int done_hp_init:1;
1250     + unsigned int no_shutup_pins:1;
1251    
1252     /* for PLL fix */
1253     hda_nid_t pll_nid;
1254     @@ -476,6 +477,14 @@ static void alc_auto_setup_eapd(struct hda_codec *codec, bool on)
1255     set_eapd(codec, *p, on);
1256     }
1257    
1258     +static void alc_shutup_pins(struct hda_codec *codec)
1259     +{
1260     + struct alc_spec *spec = codec->spec;
1261     +
1262     + if (!spec->no_shutup_pins)
1263     + snd_hda_shutup_pins(codec);
1264     +}
1265     +
1266     /* generic shutup callback;
1267     * just turning off EAPD and a little pause for avoiding pop-noise
1268     */
1269     @@ -486,7 +495,7 @@ static void alc_eapd_shutup(struct hda_codec *codec)
1270     alc_auto_setup_eapd(codec, false);
1271     if (!spec->no_depop_delay)
1272     msleep(200);
1273     - snd_hda_shutup_pins(codec);
1274     + alc_shutup_pins(codec);
1275     }
1276    
1277     /* generic EAPD initialization */
1278     @@ -814,7 +823,7 @@ static inline void alc_shutup(struct hda_codec *codec)
1279     if (spec && spec->shutup)
1280     spec->shutup(codec);
1281     else
1282     - snd_hda_shutup_pins(codec);
1283     + alc_shutup_pins(codec);
1284     }
1285    
1286     static void alc_reboot_notify(struct hda_codec *codec)
1287     @@ -2950,7 +2959,7 @@ static void alc269_shutup(struct hda_codec *codec)
1288     (alc_get_coef0(codec) & 0x00ff) == 0x018) {
1289     msleep(150);
1290     }
1291     - snd_hda_shutup_pins(codec);
1292     + alc_shutup_pins(codec);
1293     }
1294    
1295     static struct coef_fw alc282_coefs[] = {
1296     @@ -3053,14 +3062,15 @@ static void alc282_shutup(struct hda_codec *codec)
1297     if (hp_pin_sense)
1298     msleep(85);
1299    
1300     - snd_hda_codec_write(codec, hp_pin, 0,
1301     - AC_VERB_SET_PIN_WIDGET_CONTROL, 0x0);
1302     + if (!spec->no_shutup_pins)
1303     + snd_hda_codec_write(codec, hp_pin, 0,
1304     + AC_VERB_SET_PIN_WIDGET_CONTROL, 0x0);
1305    
1306     if (hp_pin_sense)
1307     msleep(100);
1308    
1309     alc_auto_setup_eapd(codec, false);
1310     - snd_hda_shutup_pins(codec);
1311     + alc_shutup_pins(codec);
1312     alc_write_coef_idx(codec, 0x78, coef78);
1313     }
1314    
1315     @@ -3166,15 +3176,16 @@ static void alc283_shutup(struct hda_codec *codec)
1316     if (hp_pin_sense)
1317     msleep(100);
1318    
1319     - snd_hda_codec_write(codec, hp_pin, 0,
1320     - AC_VERB_SET_PIN_WIDGET_CONTROL, 0x0);
1321     + if (!spec->no_shutup_pins)
1322     + snd_hda_codec_write(codec, hp_pin, 0,
1323     + AC_VERB_SET_PIN_WIDGET_CONTROL, 0x0);
1324    
1325     alc_update_coef_idx(codec, 0x46, 0, 3 << 12);
1326    
1327     if (hp_pin_sense)
1328     msleep(100);
1329     alc_auto_setup_eapd(codec, false);
1330     - snd_hda_shutup_pins(codec);
1331     + alc_shutup_pins(codec);
1332     alc_write_coef_idx(codec, 0x43, 0x9614);
1333     }
1334    
1335     @@ -3240,14 +3251,15 @@ static void alc256_shutup(struct hda_codec *codec)
1336     /* NOTE: call this before clearing the pin, otherwise codec stalls */
1337     alc_update_coef_idx(codec, 0x46, 0, 3 << 12);
1338    
1339     - snd_hda_codec_write(codec, hp_pin, 0,
1340     - AC_VERB_SET_PIN_WIDGET_CONTROL, 0x0);
1341     + if (!spec->no_shutup_pins)
1342     + snd_hda_codec_write(codec, hp_pin, 0,
1343     + AC_VERB_SET_PIN_WIDGET_CONTROL, 0x0);
1344    
1345     if (hp_pin_sense)
1346     msleep(100);
1347    
1348     alc_auto_setup_eapd(codec, false);
1349     - snd_hda_shutup_pins(codec);
1350     + alc_shutup_pins(codec);
1351     }
1352    
1353     static void alc225_init(struct hda_codec *codec)
1354     @@ -3334,7 +3346,7 @@ static void alc225_shutup(struct hda_codec *codec)
1355     msleep(100);
1356    
1357     alc_auto_setup_eapd(codec, false);
1358     - snd_hda_shutup_pins(codec);
1359     + alc_shutup_pins(codec);
1360     }
1361    
1362     static void alc_default_init(struct hda_codec *codec)
1363     @@ -3388,14 +3400,15 @@ static void alc_default_shutup(struct hda_codec *codec)
1364     if (hp_pin_sense)
1365     msleep(85);
1366    
1367     - snd_hda_codec_write(codec, hp_pin, 0,
1368     - AC_VERB_SET_PIN_WIDGET_CONTROL, 0x0);
1369     + if (!spec->no_shutup_pins)
1370     + snd_hda_codec_write(codec, hp_pin, 0,
1371     + AC_VERB_SET_PIN_WIDGET_CONTROL, 0x0);
1372    
1373     if (hp_pin_sense)
1374     msleep(100);
1375    
1376     alc_auto_setup_eapd(codec, false);
1377     - snd_hda_shutup_pins(codec);
1378     + alc_shutup_pins(codec);
1379     }
1380    
1381     static void alc294_hp_init(struct hda_codec *codec)
1382     @@ -3412,8 +3425,9 @@ static void alc294_hp_init(struct hda_codec *codec)
1383    
1384     msleep(100);
1385    
1386     - snd_hda_codec_write(codec, hp_pin, 0,
1387     - AC_VERB_SET_PIN_WIDGET_CONTROL, 0x0);
1388     + if (!spec->no_shutup_pins)
1389     + snd_hda_codec_write(codec, hp_pin, 0,
1390     + AC_VERB_SET_PIN_WIDGET_CONTROL, 0x0);
1391    
1392     alc_update_coef_idx(codec, 0x6f, 0x000f, 0);/* Set HP depop to manual mode */
1393     alc_update_coefex_idx(codec, 0x58, 0x00, 0x8000, 0x8000); /* HP depop procedure start */
1394     @@ -5007,16 +5021,12 @@ static void alc_fixup_auto_mute_via_amp(struct hda_codec *codec,
1395     }
1396     }
1397    
1398     -static void alc_no_shutup(struct hda_codec *codec)
1399     -{
1400     -}
1401     -
1402     static void alc_fixup_no_shutup(struct hda_codec *codec,
1403     const struct hda_fixup *fix, int action)
1404     {
1405     if (action == HDA_FIXUP_ACT_PRE_PROBE) {
1406     struct alc_spec *spec = codec->spec;
1407     - spec->shutup = alc_no_shutup;
1408     + spec->no_shutup_pins = 1;
1409     }
1410     }
1411    
1412     @@ -5661,6 +5671,7 @@ enum {
1413     ALC225_FIXUP_HEADSET_JACK,
1414     ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE,
1415     ALC285_FIXUP_LENOVO_PC_BEEP_IN_NOISE,
1416     + ALC255_FIXUP_ACER_HEADSET_MIC,
1417     };
1418    
1419     static const struct hda_fixup alc269_fixups[] = {
1420     @@ -6627,6 +6638,16 @@ static const struct hda_fixup alc269_fixups[] = {
1421     .chained = true,
1422     .chain_id = ALC285_FIXUP_LENOVO_HEADPHONE_NOISE
1423     },
1424     + [ALC255_FIXUP_ACER_HEADSET_MIC] = {
1425     + .type = HDA_FIXUP_PINS,
1426     + .v.pins = (const struct hda_pintbl[]) {
1427     + { 0x19, 0x03a11130 },
1428     + { 0x1a, 0x90a60140 }, /* use as internal mic */
1429     + { }
1430     + },
1431     + .chained = true,
1432     + .chain_id = ALC255_FIXUP_HEADSET_MODE_NO_HP_MIC
1433     + },
1434     };
1435    
1436     static const struct snd_pci_quirk alc269_fixup_tbl[] = {
1437     @@ -6646,6 +6667,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
1438     SND_PCI_QUIRK(0x1025, 0x128f, "Acer Veriton Z6860G", ALC286_FIXUP_ACER_AIO_MIC_NO_PRESENCE),
1439     SND_PCI_QUIRK(0x1025, 0x1290, "Acer Veriton Z4860G", ALC286_FIXUP_ACER_AIO_MIC_NO_PRESENCE),
1440     SND_PCI_QUIRK(0x1025, 0x1291, "Acer Veriton Z4660G", ALC286_FIXUP_ACER_AIO_MIC_NO_PRESENCE),
1441     + SND_PCI_QUIRK(0x1025, 0x1330, "Acer TravelMate X514-51T", ALC255_FIXUP_ACER_HEADSET_MIC),
1442     SND_PCI_QUIRK(0x1028, 0x0470, "Dell M101z", ALC269_FIXUP_DELL_M101Z),
1443     SND_PCI_QUIRK(0x1028, 0x054b, "Dell XPS one 2710", ALC275_FIXUP_DELL_XPS),
1444     SND_PCI_QUIRK(0x1028, 0x05bd, "Dell Latitude E6440", ALC292_FIXUP_DELL_E7X),
1445     @@ -6677,6 +6699,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
1446     SND_PCI_QUIRK(0x1028, 0x0704, "Dell XPS 13 9350", ALC256_FIXUP_DELL_XPS_13_HEADPHONE_NOISE),
1447     SND_PCI_QUIRK(0x1028, 0x0706, "Dell Inspiron 7559", ALC256_FIXUP_DELL_INSPIRON_7559_SUBWOOFER),
1448     SND_PCI_QUIRK(0x1028, 0x0725, "Dell Inspiron 3162", ALC255_FIXUP_DELL_SPK_NOISE),
1449     + SND_PCI_QUIRK(0x1028, 0x0738, "Dell Precision 5820", ALC269_FIXUP_NO_SHUTUP),
1450     SND_PCI_QUIRK(0x1028, 0x075b, "Dell XPS 13 9360", ALC256_FIXUP_DELL_XPS_13_HEADPHONE_NOISE),
1451     SND_PCI_QUIRK(0x1028, 0x075c, "Dell XPS 27 7760", ALC298_FIXUP_SPK_VOLUME),
1452     SND_PCI_QUIRK(0x1028, 0x075d, "Dell AIO", ALC298_FIXUP_SPK_VOLUME),
1453     @@ -6751,11 +6774,13 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
1454     SND_PCI_QUIRK(0x103c, 0x2336, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1),
1455     SND_PCI_QUIRK(0x103c, 0x2337, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1),
1456     SND_PCI_QUIRK(0x103c, 0x221c, "HP EliteBook 755 G2", ALC280_FIXUP_HP_HEADSET_MIC),
1457     + SND_PCI_QUIRK(0x103c, 0x802e, "HP Z240 SFF", ALC221_FIXUP_HP_MIC_NO_PRESENCE),
1458     + SND_PCI_QUIRK(0x103c, 0x802f, "HP Z240", ALC221_FIXUP_HP_MIC_NO_PRESENCE),
1459     SND_PCI_QUIRK(0x103c, 0x820d, "HP Pavilion 15", ALC269_FIXUP_HP_MUTE_LED_MIC3),
1460     SND_PCI_QUIRK(0x103c, 0x8256, "HP", ALC221_FIXUP_HP_FRONT_MIC),
1461     SND_PCI_QUIRK(0x103c, 0x827e, "HP x360", ALC295_FIXUP_HP_X360),
1462     - SND_PCI_QUIRK(0x103c, 0x82bf, "HP", ALC221_FIXUP_HP_MIC_NO_PRESENCE),
1463     - SND_PCI_QUIRK(0x103c, 0x82c0, "HP", ALC221_FIXUP_HP_MIC_NO_PRESENCE),
1464     + SND_PCI_QUIRK(0x103c, 0x82bf, "HP G3 mini", ALC221_FIXUP_HP_MIC_NO_PRESENCE),
1465     + SND_PCI_QUIRK(0x103c, 0x82c0, "HP G3 mini premium", ALC221_FIXUP_HP_MIC_NO_PRESENCE),
1466     SND_PCI_QUIRK(0x103c, 0x83b9, "HP Spectre x360", ALC269_FIXUP_HP_MUTE_LED_MIC3),
1467     SND_PCI_QUIRK(0x1043, 0x103e, "ASUS X540SA", ALC256_FIXUP_ASUS_MIC),
1468     SND_PCI_QUIRK(0x1043, 0x103f, "ASUS TX300", ALC282_FIXUP_ASUS_TX300),
1469     @@ -6771,7 +6796,6 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
1470     SND_PCI_QUIRK(0x1043, 0x12e0, "ASUS X541SA", ALC256_FIXUP_ASUS_MIC),
1471     SND_PCI_QUIRK(0x1043, 0x13b0, "ASUS Z550SA", ALC256_FIXUP_ASUS_MIC),
1472     SND_PCI_QUIRK(0x1043, 0x1427, "Asus Zenbook UX31E", ALC269VB_FIXUP_ASUS_ZENBOOK),
1473     - SND_PCI_QUIRK(0x1043, 0x14a1, "ASUS UX533FD", ALC294_FIXUP_ASUS_SPK),
1474     SND_PCI_QUIRK(0x1043, 0x1517, "Asus Zenbook UX31A", ALC269VB_FIXUP_ASUS_ZENBOOK_UX31A),
1475     SND_PCI_QUIRK(0x1043, 0x16e3, "ASUS UX50", ALC269_FIXUP_STEREO_DMIC),
1476     SND_PCI_QUIRK(0x1043, 0x1a13, "Asus G73Jw", ALC269_FIXUP_ASUS_G73JW),
1477     @@ -7388,6 +7412,10 @@ static const struct snd_hda_pin_quirk alc269_pin_fixup_tbl[] = {
1478     {0x14, 0x90170110},
1479     {0x1b, 0x90a70130},
1480     {0x21, 0x04211020}),
1481     + SND_HDA_PIN_QUIRK(0x10ec0294, 0x1043, "ASUS", ALC294_FIXUP_ASUS_SPK,
1482     + {0x12, 0x90a60130},
1483     + {0x17, 0x90170110},
1484     + {0x21, 0x03211020}),
1485     SND_HDA_PIN_QUIRK(0x10ec0294, 0x1043, "ASUS", ALC294_FIXUP_ASUS_SPK,
1486     {0x12, 0x90a60130},
1487     {0x17, 0x90170110},