Magellan Linux

Contents of /trunk/kernel-alx/patches-4.19/0129-4.19.30-all-fixes.patch

Parent Directory Parent Directory | Revision Log Revision Log


Revision 3408 - (show annotations) (download)
Fri Aug 2 11:47:40 2019 UTC (5 years, 3 months ago) by niro
File size: 61365 byte(s)
-linux-4.19.30
1 diff --git a/Makefile b/Makefile
2 index 6e526583291c..72e27c379eaf 100644
3 --- a/Makefile
4 +++ b/Makefile
5 @@ -1,7 +1,7 @@
6 # SPDX-License-Identifier: GPL-2.0
7 VERSION = 4
8 PATCHLEVEL = 19
9 -SUBLEVEL = 29
10 +SUBLEVEL = 30
11 EXTRAVERSION =
12 NAME = "People's Front"
13
14 diff --git a/arch/x86/events/intel/core.c b/arch/x86/events/intel/core.c
15 index 220b40b75e6f..12453cf7c11b 100644
16 --- a/arch/x86/events/intel/core.c
17 +++ b/arch/x86/events/intel/core.c
18 @@ -3282,7 +3282,7 @@ tfa_get_event_constraints(struct cpu_hw_events *cpuc, int idx,
19 /*
20 * Without TFA we must not use PMC3.
21 */
22 - if (!allow_tsx_force_abort && test_bit(3, c->idxmsk)) {
23 + if (!allow_tsx_force_abort && test_bit(3, c->idxmsk) && idx >= 0) {
24 c = dyn_constraint(cpuc, c, idx);
25 c->idxmsk64 &= ~(1ULL << 3);
26 c->weight--;
27 @@ -3989,7 +3989,7 @@ static struct attribute *intel_pmu_caps_attrs[] = {
28 NULL
29 };
30
31 -DEVICE_BOOL_ATTR(allow_tsx_force_abort, 0644, allow_tsx_force_abort);
32 +static DEVICE_BOOL_ATTR(allow_tsx_force_abort, 0644, allow_tsx_force_abort);
33
34 static struct attribute *intel_pmu_attrs[] = {
35 &dev_attr_freeze_on_smi.attr,
36 diff --git a/arch/x86/events/perf_event.h b/arch/x86/events/perf_event.h
37 index 5c424009b71f..42a36280d168 100644
38 --- a/arch/x86/events/perf_event.h
39 +++ b/arch/x86/events/perf_event.h
40 @@ -1030,12 +1030,12 @@ static inline int intel_pmu_init(void)
41 return 0;
42 }
43
44 -static inline int intel_cpuc_prepare(struct cpu_hw_event *cpuc, int cpu)
45 +static inline int intel_cpuc_prepare(struct cpu_hw_events *cpuc, int cpu)
46 {
47 return 0;
48 }
49
50 -static inline void intel_cpuc_finish(struct cpu_hw_event *cpuc)
51 +static inline void intel_cpuc_finish(struct cpu_hw_events *cpuc)
52 {
53 }
54
55 diff --git a/drivers/connector/cn_proc.c b/drivers/connector/cn_proc.c
56 index ed5e42461094..ad48fd52cb53 100644
57 --- a/drivers/connector/cn_proc.c
58 +++ b/drivers/connector/cn_proc.c
59 @@ -250,6 +250,7 @@ void proc_coredump_connector(struct task_struct *task)
60 {
61 struct cn_msg *msg;
62 struct proc_event *ev;
63 + struct task_struct *parent;
64 __u8 buffer[CN_PROC_MSG_SIZE] __aligned(8);
65
66 if (atomic_read(&proc_event_num_listeners) < 1)
67 @@ -262,8 +263,14 @@ void proc_coredump_connector(struct task_struct *task)
68 ev->what = PROC_EVENT_COREDUMP;
69 ev->event_data.coredump.process_pid = task->pid;
70 ev->event_data.coredump.process_tgid = task->tgid;
71 - ev->event_data.coredump.parent_pid = task->real_parent->pid;
72 - ev->event_data.coredump.parent_tgid = task->real_parent->tgid;
73 +
74 + rcu_read_lock();
75 + if (pid_alive(task)) {
76 + parent = rcu_dereference(task->real_parent);
77 + ev->event_data.coredump.parent_pid = parent->pid;
78 + ev->event_data.coredump.parent_tgid = parent->tgid;
79 + }
80 + rcu_read_unlock();
81
82 memcpy(&msg->id, &cn_proc_event_id, sizeof(msg->id));
83 msg->ack = 0; /* not used */
84 @@ -276,6 +283,7 @@ void proc_exit_connector(struct task_struct *task)
85 {
86 struct cn_msg *msg;
87 struct proc_event *ev;
88 + struct task_struct *parent;
89 __u8 buffer[CN_PROC_MSG_SIZE] __aligned(8);
90
91 if (atomic_read(&proc_event_num_listeners) < 1)
92 @@ -290,8 +298,14 @@ void proc_exit_connector(struct task_struct *task)
93 ev->event_data.exit.process_tgid = task->tgid;
94 ev->event_data.exit.exit_code = task->exit_code;
95 ev->event_data.exit.exit_signal = task->exit_signal;
96 - ev->event_data.exit.parent_pid = task->real_parent->pid;
97 - ev->event_data.exit.parent_tgid = task->real_parent->tgid;
98 +
99 + rcu_read_lock();
100 + if (pid_alive(task)) {
101 + parent = rcu_dereference(task->real_parent);
102 + ev->event_data.exit.parent_pid = parent->pid;
103 + ev->event_data.exit.parent_tgid = parent->tgid;
104 + }
105 + rcu_read_unlock();
106
107 memcpy(&msg->id, &cn_proc_event_id, sizeof(msg->id));
108 msg->ack = 0; /* not used */
109 diff --git a/drivers/gpu/drm/drm_atomic_helper.c b/drivers/gpu/drm/drm_atomic_helper.c
110 index 1d74aed7e471..94f5c3646cb7 100644
111 --- a/drivers/gpu/drm/drm_atomic_helper.c
112 +++ b/drivers/gpu/drm/drm_atomic_helper.c
113 @@ -1573,6 +1573,15 @@ int drm_atomic_helper_async_check(struct drm_device *dev,
114 if (old_plane_state->fb != new_plane_state->fb)
115 return -EINVAL;
116
117 + /*
118 + * FIXME: Since prepare_fb and cleanup_fb are always called on
119 + * the new_plane_state for async updates we need to block framebuffer
120 + * changes. This prevents use of a fb that's been cleaned up and
121 + * double cleanups from occuring.
122 + */
123 + if (old_plane_state->fb != new_plane_state->fb)
124 + return -EINVAL;
125 +
126 funcs = plane->helper_private;
127 if (!funcs->atomic_async_update)
128 return -EINVAL;
129 diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c
130 index 7033a2880771..9df1334608b7 100644
131 --- a/drivers/md/raid10.c
132 +++ b/drivers/md/raid10.c
133 @@ -4630,7 +4630,6 @@ read_more:
134 atomic_inc(&r10_bio->remaining);
135 read_bio->bi_next = NULL;
136 generic_make_request(read_bio);
137 - sector_nr += nr_sectors;
138 sectors_done += nr_sectors;
139 if (sector_nr <= last)
140 goto read_more;
141 diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
142 index a6fcc5c96070..b2c42cae3081 100644
143 --- a/drivers/net/bonding/bond_main.c
144 +++ b/drivers/net/bonding/bond_main.c
145 @@ -1171,29 +1171,22 @@ static rx_handler_result_t bond_handle_frame(struct sk_buff **pskb)
146 }
147 }
148
149 - /* Link-local multicast packets should be passed to the
150 - * stack on the link they arrive as well as pass them to the
151 - * bond-master device. These packets are mostly usable when
152 - * stack receives it with the link on which they arrive
153 - * (e.g. LLDP) they also must be available on master. Some of
154 - * the use cases include (but are not limited to): LLDP agents
155 - * that must be able to operate both on enslaved interfaces as
156 - * well as on bonds themselves; linux bridges that must be able
157 - * to process/pass BPDUs from attached bonds when any kind of
158 - * STP version is enabled on the network.
159 + /*
160 + * For packets determined by bond_should_deliver_exact_match() call to
161 + * be suppressed we want to make an exception for link-local packets.
162 + * This is necessary for e.g. LLDP daemons to be able to monitor
163 + * inactive slave links without being forced to bind to them
164 + * explicitly.
165 + *
166 + * At the same time, packets that are passed to the bonding master
167 + * (including link-local ones) can have their originating interface
168 + * determined via PACKET_ORIGDEV socket option.
169 */
170 - if (is_link_local_ether_addr(eth_hdr(skb)->h_dest)) {
171 - struct sk_buff *nskb = skb_clone(skb, GFP_ATOMIC);
172 -
173 - if (nskb) {
174 - nskb->dev = bond->dev;
175 - nskb->queue_mapping = 0;
176 - netif_rx(nskb);
177 - }
178 - return RX_HANDLER_PASS;
179 - }
180 - if (bond_should_deliver_exact_match(skb, slave, bond))
181 + if (bond_should_deliver_exact_match(skb, slave, bond)) {
182 + if (is_link_local_ether_addr(eth_hdr(skb)->h_dest))
183 + return RX_HANDLER_PASS;
184 return RX_HANDLER_EXACT;
185 + }
186
187 skb->dev = bond->dev;
188
189 diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c
190 index 41fa22c562c1..f81ad0aa8b09 100644
191 --- a/drivers/net/ethernet/intel/i40e/i40e_main.c
192 +++ b/drivers/net/ethernet/intel/i40e/i40e_main.c
193 @@ -424,9 +424,9 @@ static void i40e_get_netdev_stats_struct(struct net_device *netdev,
194 struct rtnl_link_stats64 *stats)
195 {
196 struct i40e_netdev_priv *np = netdev_priv(netdev);
197 - struct i40e_ring *tx_ring, *rx_ring;
198 struct i40e_vsi *vsi = np->vsi;
199 struct rtnl_link_stats64 *vsi_stats = i40e_get_vsi_stats_struct(vsi);
200 + struct i40e_ring *ring;
201 int i;
202
203 if (test_bit(__I40E_VSI_DOWN, vsi->state))
204 @@ -440,24 +440,26 @@ static void i40e_get_netdev_stats_struct(struct net_device *netdev,
205 u64 bytes, packets;
206 unsigned int start;
207
208 - tx_ring = READ_ONCE(vsi->tx_rings[i]);
209 - if (!tx_ring)
210 + ring = READ_ONCE(vsi->tx_rings[i]);
211 + if (!ring)
212 continue;
213 - i40e_get_netdev_stats_struct_tx(tx_ring, stats);
214 + i40e_get_netdev_stats_struct_tx(ring, stats);
215
216 - rx_ring = &tx_ring[1];
217 + if (i40e_enabled_xdp_vsi(vsi)) {
218 + ring++;
219 + i40e_get_netdev_stats_struct_tx(ring, stats);
220 + }
221
222 + ring++;
223 do {
224 - start = u64_stats_fetch_begin_irq(&rx_ring->syncp);
225 - packets = rx_ring->stats.packets;
226 - bytes = rx_ring->stats.bytes;
227 - } while (u64_stats_fetch_retry_irq(&rx_ring->syncp, start));
228 + start = u64_stats_fetch_begin_irq(&ring->syncp);
229 + packets = ring->stats.packets;
230 + bytes = ring->stats.bytes;
231 + } while (u64_stats_fetch_retry_irq(&ring->syncp, start));
232
233 stats->rx_packets += packets;
234 stats->rx_bytes += bytes;
235
236 - if (i40e_enabled_xdp_vsi(vsi))
237 - i40e_get_netdev_stats_struct_tx(&rx_ring[1], stats);
238 }
239 rcu_read_unlock();
240
241 diff --git a/drivers/net/ethernet/mellanox/mlx4/cmd.c b/drivers/net/ethernet/mellanox/mlx4/cmd.c
242 index e65bc3c95630..857588e2488d 100644
243 --- a/drivers/net/ethernet/mellanox/mlx4/cmd.c
244 +++ b/drivers/net/ethernet/mellanox/mlx4/cmd.c
245 @@ -2645,6 +2645,8 @@ int mlx4_cmd_use_events(struct mlx4_dev *dev)
246 if (!priv->cmd.context)
247 return -ENOMEM;
248
249 + if (mlx4_is_mfunc(dev))
250 + mutex_lock(&priv->cmd.slave_cmd_mutex);
251 down_write(&priv->cmd.switch_sem);
252 for (i = 0; i < priv->cmd.max_cmds; ++i) {
253 priv->cmd.context[i].token = i;
254 @@ -2670,6 +2672,8 @@ int mlx4_cmd_use_events(struct mlx4_dev *dev)
255 down(&priv->cmd.poll_sem);
256 priv->cmd.use_events = 1;
257 up_write(&priv->cmd.switch_sem);
258 + if (mlx4_is_mfunc(dev))
259 + mutex_unlock(&priv->cmd.slave_cmd_mutex);
260
261 return err;
262 }
263 @@ -2682,6 +2686,8 @@ void mlx4_cmd_use_polling(struct mlx4_dev *dev)
264 struct mlx4_priv *priv = mlx4_priv(dev);
265 int i;
266
267 + if (mlx4_is_mfunc(dev))
268 + mutex_lock(&priv->cmd.slave_cmd_mutex);
269 down_write(&priv->cmd.switch_sem);
270 priv->cmd.use_events = 0;
271
272 @@ -2689,9 +2695,12 @@ void mlx4_cmd_use_polling(struct mlx4_dev *dev)
273 down(&priv->cmd.event_sem);
274
275 kfree(priv->cmd.context);
276 + priv->cmd.context = NULL;
277
278 up(&priv->cmd.poll_sem);
279 up_write(&priv->cmd.switch_sem);
280 + if (mlx4_is_mfunc(dev))
281 + mutex_unlock(&priv->cmd.slave_cmd_mutex);
282 }
283
284 struct mlx4_cmd_mailbox *mlx4_alloc_cmd_mailbox(struct mlx4_dev *dev)
285 diff --git a/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c b/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
286 index 31bd56727022..676428a57662 100644
287 --- a/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
288 +++ b/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
289 @@ -2719,13 +2719,13 @@ static int qp_get_mtt_size(struct mlx4_qp_context *qpc)
290 int total_pages;
291 int total_mem;
292 int page_offset = (be32_to_cpu(qpc->params2) >> 6) & 0x3f;
293 + int tot;
294
295 sq_size = 1 << (log_sq_size + log_sq_sride + 4);
296 rq_size = (srq|rss|xrc) ? 0 : (1 << (log_rq_size + log_rq_stride + 4));
297 total_mem = sq_size + rq_size;
298 - total_pages =
299 - roundup_pow_of_two((total_mem + (page_offset << 6)) >>
300 - page_shift);
301 + tot = (total_mem + (page_offset << 6)) >> page_shift;
302 + total_pages = !tot ? 1 : roundup_pow_of_two(tot);
303
304 return total_pages;
305 }
306 diff --git a/drivers/net/ethernet/microchip/lan743x_main.c b/drivers/net/ethernet/microchip/lan743x_main.c
307 index de5a6abda7e3..208341541087 100644
308 --- a/drivers/net/ethernet/microchip/lan743x_main.c
309 +++ b/drivers/net/ethernet/microchip/lan743x_main.c
310 @@ -585,8 +585,7 @@ static int lan743x_intr_open(struct lan743x_adapter *adapter)
311
312 if (adapter->csr.flags &
313 LAN743X_CSR_FLAG_SUPPORTS_INTR_AUTO_SET_CLR) {
314 - flags = LAN743X_VECTOR_FLAG_VECTOR_ENABLE_AUTO_CLEAR |
315 - LAN743X_VECTOR_FLAG_VECTOR_ENABLE_AUTO_SET |
316 + flags = LAN743X_VECTOR_FLAG_VECTOR_ENABLE_AUTO_SET |
317 LAN743X_VECTOR_FLAG_SOURCE_ENABLE_AUTO_SET |
318 LAN743X_VECTOR_FLAG_SOURCE_ENABLE_AUTO_CLEAR |
319 LAN743X_VECTOR_FLAG_SOURCE_STATUS_AUTO_CLEAR;
320 @@ -599,12 +598,6 @@ static int lan743x_intr_open(struct lan743x_adapter *adapter)
321 /* map TX interrupt to vector */
322 int_vec_map1 |= INT_VEC_MAP1_TX_VEC_(index, vector);
323 lan743x_csr_write(adapter, INT_VEC_MAP1, int_vec_map1);
324 - if (flags &
325 - LAN743X_VECTOR_FLAG_VECTOR_ENABLE_AUTO_CLEAR) {
326 - int_vec_en_auto_clr |= INT_VEC_EN_(vector);
327 - lan743x_csr_write(adapter, INT_VEC_EN_AUTO_CLR,
328 - int_vec_en_auto_clr);
329 - }
330
331 /* Remove TX interrupt from shared mask */
332 intr->vector_list[0].int_mask &= ~int_bit;
333 @@ -1905,7 +1898,17 @@ static int lan743x_rx_next_index(struct lan743x_rx *rx, int index)
334 return ((++index) % rx->ring_size);
335 }
336
337 -static int lan743x_rx_allocate_ring_element(struct lan743x_rx *rx, int index)
338 +static struct sk_buff *lan743x_rx_allocate_skb(struct lan743x_rx *rx)
339 +{
340 + int length = 0;
341 +
342 + length = (LAN743X_MAX_FRAME_SIZE + ETH_HLEN + 4 + RX_HEAD_PADDING);
343 + return __netdev_alloc_skb(rx->adapter->netdev,
344 + length, GFP_ATOMIC | GFP_DMA);
345 +}
346 +
347 +static int lan743x_rx_init_ring_element(struct lan743x_rx *rx, int index,
348 + struct sk_buff *skb)
349 {
350 struct lan743x_rx_buffer_info *buffer_info;
351 struct lan743x_rx_descriptor *descriptor;
352 @@ -1914,9 +1917,7 @@ static int lan743x_rx_allocate_ring_element(struct lan743x_rx *rx, int index)
353 length = (LAN743X_MAX_FRAME_SIZE + ETH_HLEN + 4 + RX_HEAD_PADDING);
354 descriptor = &rx->ring_cpu_ptr[index];
355 buffer_info = &rx->buffer_info[index];
356 - buffer_info->skb = __netdev_alloc_skb(rx->adapter->netdev,
357 - length,
358 - GFP_ATOMIC | GFP_DMA);
359 + buffer_info->skb = skb;
360 if (!(buffer_info->skb))
361 return -ENOMEM;
362 buffer_info->dma_ptr = dma_map_single(&rx->adapter->pdev->dev,
363 @@ -2063,8 +2064,19 @@ static int lan743x_rx_process_packet(struct lan743x_rx *rx)
364 /* packet is available */
365 if (first_index == last_index) {
366 /* single buffer packet */
367 + struct sk_buff *new_skb = NULL;
368 int packet_length;
369
370 + new_skb = lan743x_rx_allocate_skb(rx);
371 + if (!new_skb) {
372 + /* failed to allocate next skb.
373 + * Memory is very low.
374 + * Drop this packet and reuse buffer.
375 + */
376 + lan743x_rx_reuse_ring_element(rx, first_index);
377 + goto process_extension;
378 + }
379 +
380 buffer_info = &rx->buffer_info[first_index];
381 skb = buffer_info->skb;
382 descriptor = &rx->ring_cpu_ptr[first_index];
383 @@ -2084,7 +2096,7 @@ static int lan743x_rx_process_packet(struct lan743x_rx *rx)
384 skb_put(skb, packet_length - 4);
385 skb->protocol = eth_type_trans(skb,
386 rx->adapter->netdev);
387 - lan743x_rx_allocate_ring_element(rx, first_index);
388 + lan743x_rx_init_ring_element(rx, first_index, new_skb);
389 } else {
390 int index = first_index;
391
392 @@ -2097,26 +2109,23 @@ static int lan743x_rx_process_packet(struct lan743x_rx *rx)
393 if (first_index <= last_index) {
394 while ((index >= first_index) &&
395 (index <= last_index)) {
396 - lan743x_rx_release_ring_element(rx,
397 - index);
398 - lan743x_rx_allocate_ring_element(rx,
399 - index);
400 + lan743x_rx_reuse_ring_element(rx,
401 + index);
402 index = lan743x_rx_next_index(rx,
403 index);
404 }
405 } else {
406 while ((index >= first_index) ||
407 (index <= last_index)) {
408 - lan743x_rx_release_ring_element(rx,
409 - index);
410 - lan743x_rx_allocate_ring_element(rx,
411 - index);
412 + lan743x_rx_reuse_ring_element(rx,
413 + index);
414 index = lan743x_rx_next_index(rx,
415 index);
416 }
417 }
418 }
419
420 +process_extension:
421 if (extension_index >= 0) {
422 descriptor = &rx->ring_cpu_ptr[extension_index];
423 buffer_info = &rx->buffer_info[extension_index];
424 @@ -2293,7 +2302,9 @@ static int lan743x_rx_ring_init(struct lan743x_rx *rx)
425
426 rx->last_head = 0;
427 for (index = 0; index < rx->ring_size; index++) {
428 - ret = lan743x_rx_allocate_ring_element(rx, index);
429 + struct sk_buff *new_skb = lan743x_rx_allocate_skb(rx);
430 +
431 + ret = lan743x_rx_init_ring_element(rx, index, new_skb);
432 if (ret)
433 goto cleanup;
434 }
435 diff --git a/drivers/net/ethernet/renesas/ravb_main.c b/drivers/net/ethernet/renesas/ravb_main.c
436 index 8441c86d9f3b..5f092bbd0514 100644
437 --- a/drivers/net/ethernet/renesas/ravb_main.c
438 +++ b/drivers/net/ethernet/renesas/ravb_main.c
439 @@ -459,7 +459,7 @@ static int ravb_dmac_init(struct net_device *ndev)
440 RCR_EFFS | RCR_ENCF | RCR_ETS0 | RCR_ESF | 0x18000000, RCR);
441
442 /* Set FIFO size */
443 - ravb_write(ndev, TGC_TQP_AVBMODE1 | 0x00222200, TGC);
444 + ravb_write(ndev, TGC_TQP_AVBMODE1 | 0x00112200, TGC);
445
446 /* Timestamp enable */
447 ravb_write(ndev, TCCR_TFEN, TCCR);
448 diff --git a/drivers/net/ipvlan/ipvlan_main.c b/drivers/net/ipvlan/ipvlan_main.c
449 index 5fb541897863..68b8007da82b 100644
450 --- a/drivers/net/ipvlan/ipvlan_main.c
451 +++ b/drivers/net/ipvlan/ipvlan_main.c
452 @@ -494,6 +494,8 @@ static int ipvlan_nl_changelink(struct net_device *dev,
453
454 if (!data)
455 return 0;
456 + if (!ns_capable(dev_net(ipvlan->phy_dev)->user_ns, CAP_NET_ADMIN))
457 + return -EPERM;
458
459 if (data[IFLA_IPVLAN_MODE]) {
460 u16 nmode = nla_get_u16(data[IFLA_IPVLAN_MODE]);
461 @@ -596,6 +598,8 @@ int ipvlan_link_new(struct net *src_net, struct net_device *dev,
462 struct ipvl_dev *tmp = netdev_priv(phy_dev);
463
464 phy_dev = tmp->phy_dev;
465 + if (!ns_capable(dev_net(phy_dev)->user_ns, CAP_NET_ADMIN))
466 + return -EPERM;
467 } else if (!netif_is_ipvlan_port(phy_dev)) {
468 /* Exit early if the underlying link is invalid or busy */
469 if (phy_dev->type != ARPHRD_ETHER ||
470 diff --git a/drivers/net/phy/mdio_bus.c b/drivers/net/phy/mdio_bus.c
471 index 15c5586d74ff..c5588d4508f9 100644
472 --- a/drivers/net/phy/mdio_bus.c
473 +++ b/drivers/net/phy/mdio_bus.c
474 @@ -380,7 +380,6 @@ int __mdiobus_register(struct mii_bus *bus, struct module *owner)
475 err = device_register(&bus->dev);
476 if (err) {
477 pr_err("mii_bus %s failed to register\n", bus->id);
478 - put_device(&bus->dev);
479 return -EINVAL;
480 }
481
482 diff --git a/drivers/net/ppp/pptp.c b/drivers/net/ppp/pptp.c
483 index 67ffe74747a1..7321a4eca235 100644
484 --- a/drivers/net/ppp/pptp.c
485 +++ b/drivers/net/ppp/pptp.c
486 @@ -537,6 +537,7 @@ static void pptp_sock_destruct(struct sock *sk)
487 pppox_unbind_sock(sk);
488 }
489 skb_queue_purge(&sk->sk_receive_queue);
490 + dst_release(rcu_dereference_protected(sk->sk_dst_cache, 1));
491 }
492
493 static int pptp_create(struct net *net, struct socket *sock, int kern)
494 diff --git a/drivers/net/team/team.c b/drivers/net/team/team.c
495 index 723814d84b7d..95ee9d815d76 100644
496 --- a/drivers/net/team/team.c
497 +++ b/drivers/net/team/team.c
498 @@ -1259,7 +1259,7 @@ static int team_port_add(struct team *team, struct net_device *port_dev,
499 list_add_tail_rcu(&port->list, &team->port_list);
500 team_port_enable(team, port);
501 __team_compute_features(team);
502 - __team_port_change_port_added(port, !!netif_carrier_ok(port_dev));
503 + __team_port_change_port_added(port, !!netif_oper_up(port_dev));
504 __team_options_change_check(team);
505
506 netdev_info(dev, "Port device %s added\n", portname);
507 @@ -2918,7 +2918,7 @@ static int team_device_event(struct notifier_block *unused,
508
509 switch (event) {
510 case NETDEV_UP:
511 - if (netif_carrier_ok(dev))
512 + if (netif_oper_up(dev))
513 team_port_change_check(port, true);
514 break;
515 case NETDEV_DOWN:
516 diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c
517 index 9fc9aed6ca9a..52387f7f12ed 100644
518 --- a/drivers/net/vxlan.c
519 +++ b/drivers/net/vxlan.c
520 @@ -1469,6 +1469,14 @@ static int vxlan_rcv(struct sock *sk, struct sk_buff *skb)
521 goto drop;
522 }
523
524 + rcu_read_lock();
525 +
526 + if (unlikely(!(vxlan->dev->flags & IFF_UP))) {
527 + rcu_read_unlock();
528 + atomic_long_inc(&vxlan->dev->rx_dropped);
529 + goto drop;
530 + }
531 +
532 stats = this_cpu_ptr(vxlan->dev->tstats);
533 u64_stats_update_begin(&stats->syncp);
534 stats->rx_packets++;
535 @@ -1476,6 +1484,9 @@ static int vxlan_rcv(struct sock *sk, struct sk_buff *skb)
536 u64_stats_update_end(&stats->syncp);
537
538 gro_cells_receive(&vxlan->gro_cells, skb);
539 +
540 + rcu_read_unlock();
541 +
542 return 0;
543
544 drop:
545 @@ -2460,6 +2471,8 @@ static void vxlan_uninit(struct net_device *dev)
546 {
547 struct vxlan_dev *vxlan = netdev_priv(dev);
548
549 + gro_cells_destroy(&vxlan->gro_cells);
550 +
551 vxlan_fdb_delete_default(vxlan, vxlan->cfg.vni);
552
553 free_percpu(dev->tstats);
554 @@ -3526,7 +3539,6 @@ static void vxlan_dellink(struct net_device *dev, struct list_head *head)
555
556 vxlan_flush(vxlan, true);
557
558 - gro_cells_destroy(&vxlan->gro_cells);
559 list_del(&vxlan->next);
560 unregister_netdevice_queue(dev, head);
561 }
562 diff --git a/drivers/staging/erofs/internal.h b/drivers/staging/erofs/internal.h
563 index c70f0c5237ea..58d8cbc3f921 100644
564 --- a/drivers/staging/erofs/internal.h
565 +++ b/drivers/staging/erofs/internal.h
566 @@ -260,6 +260,7 @@ repeat:
567 }
568
569 #define __erofs_workgroup_get(grp) atomic_inc(&(grp)->refcount)
570 +#define __erofs_workgroup_put(grp) atomic_dec(&(grp)->refcount)
571
572 extern int erofs_workgroup_put(struct erofs_workgroup *grp);
573
574 diff --git a/drivers/staging/erofs/utils.c b/drivers/staging/erofs/utils.c
575 index dd2ac9dbc4b4..2d96820da62e 100644
576 --- a/drivers/staging/erofs/utils.c
577 +++ b/drivers/staging/erofs/utils.c
578 @@ -87,12 +87,21 @@ int erofs_register_workgroup(struct super_block *sb,
579 grp = (void *)((unsigned long)grp |
580 1UL << RADIX_TREE_EXCEPTIONAL_SHIFT);
581
582 - err = radix_tree_insert(&sbi->workstn_tree,
583 - grp->index, grp);
584 + /*
585 + * Bump up reference count before making this workgroup
586 + * visible to other users in order to avoid potential UAF
587 + * without serialized by erofs_workstn_lock.
588 + */
589 + __erofs_workgroup_get(grp);
590
591 - if (!err) {
592 - __erofs_workgroup_get(grp);
593 - }
594 + err = radix_tree_insert(&sbi->workstn_tree,
595 + grp->index, grp);
596 + if (unlikely(err))
597 + /*
598 + * it's safe to decrease since the workgroup isn't visible
599 + * and refcount >= 2 (cannot be freezed).
600 + */
601 + __erofs_workgroup_put(grp);
602
603 erofs_workstn_unlock(sbi);
604 radix_tree_preload_end();
605 @@ -101,19 +110,99 @@ int erofs_register_workgroup(struct super_block *sb,
606
607 extern void erofs_workgroup_free_rcu(struct erofs_workgroup *grp);
608
609 +static void __erofs_workgroup_free(struct erofs_workgroup *grp)
610 +{
611 + atomic_long_dec(&erofs_global_shrink_cnt);
612 + erofs_workgroup_free_rcu(grp);
613 +}
614 +
615 int erofs_workgroup_put(struct erofs_workgroup *grp)
616 {
617 int count = atomic_dec_return(&grp->refcount);
618
619 if (count == 1)
620 atomic_long_inc(&erofs_global_shrink_cnt);
621 - else if (!count) {
622 - atomic_long_dec(&erofs_global_shrink_cnt);
623 - erofs_workgroup_free_rcu(grp);
624 - }
625 + else if (!count)
626 + __erofs_workgroup_free(grp);
627 return count;
628 }
629
630 +#ifdef EROFS_FS_HAS_MANAGED_CACHE
631 +/* for cache-managed case, customized reclaim paths exist */
632 +static void erofs_workgroup_unfreeze_final(struct erofs_workgroup *grp)
633 +{
634 + erofs_workgroup_unfreeze(grp, 0);
635 + __erofs_workgroup_free(grp);
636 +}
637 +
638 +bool erofs_try_to_release_workgroup(struct erofs_sb_info *sbi,
639 + struct erofs_workgroup *grp,
640 + bool cleanup)
641 +{
642 + void *entry;
643 +
644 + /*
645 + * for managed cache enabled, the refcount of workgroups
646 + * themselves could be < 0 (freezed). So there is no guarantee
647 + * that all refcount > 0 if managed cache is enabled.
648 + */
649 + if (!erofs_workgroup_try_to_freeze(grp, 1))
650 + return false;
651 +
652 + /*
653 + * note that all cached pages should be unlinked
654 + * before delete it from the radix tree.
655 + * Otherwise some cached pages of an orphan old workgroup
656 + * could be still linked after the new one is available.
657 + */
658 + if (erofs_try_to_free_all_cached_pages(sbi, grp)) {
659 + erofs_workgroup_unfreeze(grp, 1);
660 + return false;
661 + }
662 +
663 + /*
664 + * it is impossible to fail after the workgroup is freezed,
665 + * however in order to avoid some race conditions, add a
666 + * DBG_BUGON to observe this in advance.
667 + */
668 + entry = radix_tree_delete(&sbi->workstn_tree, grp->index);
669 + DBG_BUGON((void *)((unsigned long)entry &
670 + ~RADIX_TREE_EXCEPTIONAL_ENTRY) != grp);
671 +
672 + /*
673 + * if managed cache is enable, the last refcount
674 + * should indicate the related workstation.
675 + */
676 + erofs_workgroup_unfreeze_final(grp);
677 + return true;
678 +}
679 +
680 +#else
681 +/* for nocache case, no customized reclaim path at all */
682 +bool erofs_try_to_release_workgroup(struct erofs_sb_info *sbi,
683 + struct erofs_workgroup *grp,
684 + bool cleanup)
685 +{
686 + int cnt = atomic_read(&grp->refcount);
687 + void *entry;
688 +
689 + DBG_BUGON(cnt <= 0);
690 + DBG_BUGON(cleanup && cnt != 1);
691 +
692 + if (cnt > 1)
693 + return false;
694 +
695 + entry = radix_tree_delete(&sbi->workstn_tree, grp->index);
696 + DBG_BUGON((void *)((unsigned long)entry &
697 + ~RADIX_TREE_EXCEPTIONAL_ENTRY) != grp);
698 +
699 + /* (rarely) could be grabbed again when freeing */
700 + erofs_workgroup_put(grp);
701 + return true;
702 +}
703 +
704 +#endif
705 +
706 unsigned long erofs_shrink_workstation(struct erofs_sb_info *sbi,
707 unsigned long nr_shrink,
708 bool cleanup)
709 @@ -130,44 +219,16 @@ repeat:
710 batch, first_index, PAGEVEC_SIZE);
711
712 for (i = 0; i < found; ++i) {
713 - int cnt;
714 struct erofs_workgroup *grp = (void *)
715 ((unsigned long)batch[i] &
716 ~RADIX_TREE_EXCEPTIONAL_ENTRY);
717
718 first_index = grp->index + 1;
719
720 - cnt = atomic_read(&grp->refcount);
721 - BUG_ON(cnt <= 0);
722 -
723 - if (cleanup)
724 - BUG_ON(cnt != 1);
725 -
726 -#ifndef EROFS_FS_HAS_MANAGED_CACHE
727 - else if (cnt > 1)
728 -#else
729 - if (!erofs_workgroup_try_to_freeze(grp, 1))
730 -#endif
731 + /* try to shrink each valid workgroup */
732 + if (!erofs_try_to_release_workgroup(sbi, grp, cleanup))
733 continue;
734
735 - if (radix_tree_delete(&sbi->workstn_tree,
736 - grp->index) != grp) {
737 -#ifdef EROFS_FS_HAS_MANAGED_CACHE
738 -skip:
739 - erofs_workgroup_unfreeze(grp, 1);
740 -#endif
741 - continue;
742 - }
743 -
744 -#ifdef EROFS_FS_HAS_MANAGED_CACHE
745 - if (erofs_try_to_free_all_cached_pages(sbi, grp))
746 - goto skip;
747 -
748 - erofs_workgroup_unfreeze(grp, 1);
749 -#endif
750 - /* (rarely) grabbed again when freeing */
751 - erofs_workgroup_put(grp);
752 -
753 ++freed;
754 if (unlikely(!--nr_shrink))
755 break;
756 diff --git a/drivers/vhost/vsock.c b/drivers/vhost/vsock.c
757 index fa93f6711d8d..e440f87ae1d6 100644
758 --- a/drivers/vhost/vsock.c
759 +++ b/drivers/vhost/vsock.c
760 @@ -642,7 +642,7 @@ static int vhost_vsock_set_cid(struct vhost_vsock *vsock, u64 guest_cid)
761 hash_del_rcu(&vsock->hash);
762
763 vsock->guest_cid = guest_cid;
764 - hash_add_rcu(vhost_vsock_hash, &vsock->hash, guest_cid);
765 + hash_add_rcu(vhost_vsock_hash, &vsock->hash, vsock->guest_cid);
766 spin_unlock_bh(&vhost_vsock_lock);
767
768 return 0;
769 diff --git a/fs/f2fs/file.c b/fs/f2fs/file.c
770 index fd36aa6569dc..81c1dd635a8d 100644
771 --- a/fs/f2fs/file.c
772 +++ b/fs/f2fs/file.c
773 @@ -1736,10 +1736,12 @@ static int f2fs_ioc_start_atomic_write(struct file *filp)
774
775 down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
776
777 - if (!get_dirty_pages(inode))
778 - goto skip_flush;
779 -
780 - f2fs_msg(F2FS_I_SB(inode)->sb, KERN_WARNING,
781 + /*
782 + * Should wait end_io to count F2FS_WB_CP_DATA correctly by
783 + * f2fs_is_atomic_file.
784 + */
785 + if (get_dirty_pages(inode))
786 + f2fs_msg(F2FS_I_SB(inode)->sb, KERN_WARNING,
787 "Unexpected flush for atomic writes: ino=%lu, npages=%u",
788 inode->i_ino, get_dirty_pages(inode));
789 ret = filemap_write_and_wait_range(inode->i_mapping, 0, LLONG_MAX);
790 @@ -1747,7 +1749,7 @@ static int f2fs_ioc_start_atomic_write(struct file *filp)
791 up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
792 goto out;
793 }
794 -skip_flush:
795 +
796 set_inode_flag(inode, FI_ATOMIC_FILE);
797 clear_inode_flag(inode, FI_ATOMIC_REVOKE_REQUEST);
798 up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
799 diff --git a/net/core/gro_cells.c b/net/core/gro_cells.c
800 index acf45ddbe924..e095fb871d91 100644
801 --- a/net/core/gro_cells.c
802 +++ b/net/core/gro_cells.c
803 @@ -13,22 +13,36 @@ int gro_cells_receive(struct gro_cells *gcells, struct sk_buff *skb)
804 {
805 struct net_device *dev = skb->dev;
806 struct gro_cell *cell;
807 + int res;
808
809 - if (!gcells->cells || skb_cloned(skb) || netif_elide_gro(dev))
810 - return netif_rx(skb);
811 + rcu_read_lock();
812 + if (unlikely(!(dev->flags & IFF_UP)))
813 + goto drop;
814 +
815 + if (!gcells->cells || skb_cloned(skb) || netif_elide_gro(dev)) {
816 + res = netif_rx(skb);
817 + goto unlock;
818 + }
819
820 cell = this_cpu_ptr(gcells->cells);
821
822 if (skb_queue_len(&cell->napi_skbs) > netdev_max_backlog) {
823 +drop:
824 atomic_long_inc(&dev->rx_dropped);
825 kfree_skb(skb);
826 - return NET_RX_DROP;
827 + res = NET_RX_DROP;
828 + goto unlock;
829 }
830
831 __skb_queue_tail(&cell->napi_skbs, skb);
832 if (skb_queue_len(&cell->napi_skbs) == 1)
833 napi_schedule(&cell->napi);
834 - return NET_RX_SUCCESS;
835 +
836 + res = NET_RX_SUCCESS;
837 +
838 +unlock:
839 + rcu_read_unlock();
840 + return res;
841 }
842 EXPORT_SYMBOL(gro_cells_receive);
843
844 diff --git a/net/hsr/hsr_device.c b/net/hsr/hsr_device.c
845 index b8cd43c9ed5b..a97bf326b231 100644
846 --- a/net/hsr/hsr_device.c
847 +++ b/net/hsr/hsr_device.c
848 @@ -94,9 +94,8 @@ static void hsr_check_announce(struct net_device *hsr_dev,
849 && (old_operstate != IF_OPER_UP)) {
850 /* Went up */
851 hsr->announce_count = 0;
852 - hsr->announce_timer.expires = jiffies +
853 - msecs_to_jiffies(HSR_ANNOUNCE_INTERVAL);
854 - add_timer(&hsr->announce_timer);
855 + mod_timer(&hsr->announce_timer,
856 + jiffies + msecs_to_jiffies(HSR_ANNOUNCE_INTERVAL));
857 }
858
859 if ((hsr_dev->operstate != IF_OPER_UP) && (old_operstate == IF_OPER_UP))
860 @@ -332,6 +331,7 @@ static void hsr_announce(struct timer_list *t)
861 {
862 struct hsr_priv *hsr;
863 struct hsr_port *master;
864 + unsigned long interval;
865
866 hsr = from_timer(hsr, t, announce_timer);
867
868 @@ -343,18 +343,16 @@ static void hsr_announce(struct timer_list *t)
869 hsr->protVersion);
870 hsr->announce_count++;
871
872 - hsr->announce_timer.expires = jiffies +
873 - msecs_to_jiffies(HSR_ANNOUNCE_INTERVAL);
874 + interval = msecs_to_jiffies(HSR_ANNOUNCE_INTERVAL);
875 } else {
876 send_hsr_supervision_frame(master, HSR_TLV_LIFE_CHECK,
877 hsr->protVersion);
878
879 - hsr->announce_timer.expires = jiffies +
880 - msecs_to_jiffies(HSR_LIFE_CHECK_INTERVAL);
881 + interval = msecs_to_jiffies(HSR_LIFE_CHECK_INTERVAL);
882 }
883
884 if (is_admin_up(master->dev))
885 - add_timer(&hsr->announce_timer);
886 + mod_timer(&hsr->announce_timer, jiffies + interval);
887
888 rcu_read_unlock();
889 }
890 @@ -486,7 +484,7 @@ int hsr_dev_finalize(struct net_device *hsr_dev, struct net_device *slave[2],
891
892 res = hsr_add_port(hsr, hsr_dev, HSR_PT_MASTER);
893 if (res)
894 - return res;
895 + goto err_add_port;
896
897 res = register_netdevice(hsr_dev);
898 if (res)
899 @@ -506,6 +504,8 @@ int hsr_dev_finalize(struct net_device *hsr_dev, struct net_device *slave[2],
900 fail:
901 hsr_for_each_port(hsr, port)
902 hsr_del_port(port);
903 +err_add_port:
904 + hsr_del_node(&hsr->self_node_db);
905
906 return res;
907 }
908 diff --git a/net/hsr/hsr_framereg.c b/net/hsr/hsr_framereg.c
909 index 286ceb41ac0c..9af16cb68f76 100644
910 --- a/net/hsr/hsr_framereg.c
911 +++ b/net/hsr/hsr_framereg.c
912 @@ -124,6 +124,18 @@ int hsr_create_self_node(struct list_head *self_node_db,
913 return 0;
914 }
915
916 +void hsr_del_node(struct list_head *self_node_db)
917 +{
918 + struct hsr_node *node;
919 +
920 + rcu_read_lock();
921 + node = list_first_or_null_rcu(self_node_db, struct hsr_node, mac_list);
922 + rcu_read_unlock();
923 + if (node) {
924 + list_del_rcu(&node->mac_list);
925 + kfree(node);
926 + }
927 +}
928
929 /* Allocate an hsr_node and add it to node_db. 'addr' is the node's AddressA;
930 * seq_out is used to initialize filtering of outgoing duplicate frames
931 diff --git a/net/hsr/hsr_framereg.h b/net/hsr/hsr_framereg.h
932 index 370b45998121..531fd3dfcac1 100644
933 --- a/net/hsr/hsr_framereg.h
934 +++ b/net/hsr/hsr_framereg.h
935 @@ -16,6 +16,7 @@
936
937 struct hsr_node;
938
939 +void hsr_del_node(struct list_head *self_node_db);
940 struct hsr_node *hsr_add_node(struct list_head *node_db, unsigned char addr[],
941 u16 seq_out);
942 struct hsr_node *hsr_get_node(struct hsr_port *port, struct sk_buff *skb,
943 diff --git a/net/ipv4/route.c b/net/ipv4/route.c
944 index ca87bb6784e5..7a556e459375 100644
945 --- a/net/ipv4/route.c
946 +++ b/net/ipv4/route.c
947 @@ -1308,6 +1308,10 @@ static void ip_del_fnhe(struct fib_nh *nh, __be32 daddr)
948 if (fnhe->fnhe_daddr == daddr) {
949 rcu_assign_pointer(*fnhe_p, rcu_dereference_protected(
950 fnhe->fnhe_next, lockdep_is_held(&fnhe_lock)));
951 + /* set fnhe_daddr to 0 to ensure it won't bind with
952 + * new dsts in rt_bind_exception().
953 + */
954 + fnhe->fnhe_daddr = 0;
955 fnhe_flush_routes(fnhe);
956 kfree_rcu(fnhe, rcu);
957 break;
958 @@ -2155,12 +2159,13 @@ int ip_route_input_rcu(struct sk_buff *skb, __be32 daddr, __be32 saddr,
959 int our = 0;
960 int err = -EINVAL;
961
962 - if (in_dev)
963 - our = ip_check_mc_rcu(in_dev, daddr, saddr,
964 - ip_hdr(skb)->protocol);
965 + if (!in_dev)
966 + return err;
967 + our = ip_check_mc_rcu(in_dev, daddr, saddr,
968 + ip_hdr(skb)->protocol);
969
970 /* check l3 master if no match yet */
971 - if ((!in_dev || !our) && netif_is_l3_slave(dev)) {
972 + if (!our && netif_is_l3_slave(dev)) {
973 struct in_device *l3_in_dev;
974
975 l3_in_dev = __in_dev_get_rcu(skb->dev);
976 diff --git a/net/ipv4/syncookies.c b/net/ipv4/syncookies.c
977 index c3387dfd725b..f66b2e6d97a7 100644
978 --- a/net/ipv4/syncookies.c
979 +++ b/net/ipv4/syncookies.c
980 @@ -216,7 +216,12 @@ struct sock *tcp_get_cookie_sock(struct sock *sk, struct sk_buff *skb,
981 refcount_set(&req->rsk_refcnt, 1);
982 tcp_sk(child)->tsoffset = tsoff;
983 sock_rps_save_rxhash(child, skb);
984 - inet_csk_reqsk_queue_add(sk, req, child);
985 + if (!inet_csk_reqsk_queue_add(sk, req, child)) {
986 + bh_unlock_sock(child);
987 + sock_put(child);
988 + child = NULL;
989 + reqsk_put(req);
990 + }
991 } else {
992 reqsk_free(req);
993 }
994 diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
995 index 432dc9af1172..30c6e94b06c4 100644
996 --- a/net/ipv4/tcp.c
997 +++ b/net/ipv4/tcp.c
998 @@ -1901,6 +1901,11 @@ static int tcp_inq_hint(struct sock *sk)
999 inq = tp->rcv_nxt - tp->copied_seq;
1000 release_sock(sk);
1001 }
1002 + /* After receiving a FIN, tell the user-space to continue reading
1003 + * by returning a non-zero inq.
1004 + */
1005 + if (inq == 0 && sock_flag(sk, SOCK_DONE))
1006 + inq = 1;
1007 return inq;
1008 }
1009
1010 diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
1011 index 664fa7d8f7d9..572f79abd393 100644
1012 --- a/net/ipv4/tcp_input.c
1013 +++ b/net/ipv4/tcp_input.c
1014 @@ -6514,7 +6514,13 @@ int tcp_conn_request(struct request_sock_ops *rsk_ops,
1015 af_ops->send_synack(fastopen_sk, dst, &fl, req,
1016 &foc, TCP_SYNACK_FASTOPEN);
1017 /* Add the child socket directly into the accept queue */
1018 - inet_csk_reqsk_queue_add(sk, req, fastopen_sk);
1019 + if (!inet_csk_reqsk_queue_add(sk, req, fastopen_sk)) {
1020 + reqsk_fastopen_remove(fastopen_sk, req, false);
1021 + bh_unlock_sock(fastopen_sk);
1022 + sock_put(fastopen_sk);
1023 + reqsk_put(req);
1024 + goto drop;
1025 + }
1026 sk->sk_data_ready(sk);
1027 bh_unlock_sock(fastopen_sk);
1028 sock_put(fastopen_sk);
1029 diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
1030 index 3b83b157b0a1..30fdf891940b 100644
1031 --- a/net/ipv4/tcp_ipv4.c
1032 +++ b/net/ipv4/tcp_ipv4.c
1033 @@ -1646,15 +1646,8 @@ EXPORT_SYMBOL(tcp_add_backlog);
1034 int tcp_filter(struct sock *sk, struct sk_buff *skb)
1035 {
1036 struct tcphdr *th = (struct tcphdr *)skb->data;
1037 - unsigned int eaten = skb->len;
1038 - int err;
1039
1040 - err = sk_filter_trim_cap(sk, skb, th->doff * 4);
1041 - if (!err) {
1042 - eaten -= skb->len;
1043 - TCP_SKB_CB(skb)->end_seq -= eaten;
1044 - }
1045 - return err;
1046 + return sk_filter_trim_cap(sk, skb, th->doff * 4);
1047 }
1048 EXPORT_SYMBOL(tcp_filter);
1049
1050 diff --git a/net/ipv6/route.c b/net/ipv6/route.c
1051 index ba59a9c14e02..66cc94427437 100644
1052 --- a/net/ipv6/route.c
1053 +++ b/net/ipv6/route.c
1054 @@ -1282,18 +1282,29 @@ static DEFINE_SPINLOCK(rt6_exception_lock);
1055 static void rt6_remove_exception(struct rt6_exception_bucket *bucket,
1056 struct rt6_exception *rt6_ex)
1057 {
1058 + struct fib6_info *from;
1059 struct net *net;
1060
1061 if (!bucket || !rt6_ex)
1062 return;
1063
1064 net = dev_net(rt6_ex->rt6i->dst.dev);
1065 + net->ipv6.rt6_stats->fib_rt_cache--;
1066 +
1067 + /* purge completely the exception to allow releasing the held resources:
1068 + * some [sk] cache may keep the dst around for unlimited time
1069 + */
1070 + from = rcu_dereference_protected(rt6_ex->rt6i->from,
1071 + lockdep_is_held(&rt6_exception_lock));
1072 + rcu_assign_pointer(rt6_ex->rt6i->from, NULL);
1073 + fib6_info_release(from);
1074 + dst_dev_put(&rt6_ex->rt6i->dst);
1075 +
1076 hlist_del_rcu(&rt6_ex->hlist);
1077 dst_release(&rt6_ex->rt6i->dst);
1078 kfree_rcu(rt6_ex, rcu);
1079 WARN_ON_ONCE(!bucket->depth);
1080 bucket->depth--;
1081 - net->ipv6.rt6_stats->fib_rt_cache--;
1082 }
1083
1084 /* Remove oldest rt6_ex in bucket and free the memory
1085 @@ -1612,15 +1623,15 @@ static int rt6_remove_exception_rt(struct rt6_info *rt)
1086 static void rt6_update_exception_stamp_rt(struct rt6_info *rt)
1087 {
1088 struct rt6_exception_bucket *bucket;
1089 - struct fib6_info *from = rt->from;
1090 struct in6_addr *src_key = NULL;
1091 struct rt6_exception *rt6_ex;
1092 -
1093 - if (!from ||
1094 - !(rt->rt6i_flags & RTF_CACHE))
1095 - return;
1096 + struct fib6_info *from;
1097
1098 rcu_read_lock();
1099 + from = rcu_dereference(rt->from);
1100 + if (!from || !(rt->rt6i_flags & RTF_CACHE))
1101 + goto unlock;
1102 +
1103 bucket = rcu_dereference(from->rt6i_exception_bucket);
1104
1105 #ifdef CONFIG_IPV6_SUBTREES
1106 @@ -1639,6 +1650,7 @@ static void rt6_update_exception_stamp_rt(struct rt6_info *rt)
1107 if (rt6_ex)
1108 rt6_ex->stamp = jiffies;
1109
1110 +unlock:
1111 rcu_read_unlock();
1112 }
1113
1114 @@ -2796,20 +2808,24 @@ static int ip6_route_check_nh_onlink(struct net *net,
1115 u32 tbid = l3mdev_fib_table(dev) ? : RT_TABLE_MAIN;
1116 const struct in6_addr *gw_addr = &cfg->fc_gateway;
1117 u32 flags = RTF_LOCAL | RTF_ANYCAST | RTF_REJECT;
1118 + struct fib6_info *from;
1119 struct rt6_info *grt;
1120 int err;
1121
1122 err = 0;
1123 grt = ip6_nh_lookup_table(net, cfg, gw_addr, tbid, 0);
1124 if (grt) {
1125 + rcu_read_lock();
1126 + from = rcu_dereference(grt->from);
1127 if (!grt->dst.error &&
1128 /* ignore match if it is the default route */
1129 - grt->from && !ipv6_addr_any(&grt->from->fib6_dst.addr) &&
1130 + from && !ipv6_addr_any(&from->fib6_dst.addr) &&
1131 (grt->rt6i_flags & flags || dev != grt->dst.dev)) {
1132 NL_SET_ERR_MSG(extack,
1133 "Nexthop has invalid gateway or device mismatch");
1134 err = -EINVAL;
1135 }
1136 + rcu_read_unlock();
1137
1138 ip6_rt_put(grt);
1139 }
1140 @@ -4710,7 +4726,7 @@ static int rt6_fill_node(struct net *net, struct sk_buff *skb,
1141 table = rt->fib6_table->tb6_id;
1142 else
1143 table = RT6_TABLE_UNSPEC;
1144 - rtm->rtm_table = table;
1145 + rtm->rtm_table = table < 256 ? table : RT_TABLE_COMPAT;
1146 if (nla_put_u32(skb, RTA_TABLE, table))
1147 goto nla_put_failure;
1148
1149 diff --git a/net/ipv6/sit.c b/net/ipv6/sit.c
1150 index 868d7da7a0cb..de9aa5cb295c 100644
1151 --- a/net/ipv6/sit.c
1152 +++ b/net/ipv6/sit.c
1153 @@ -778,8 +778,9 @@ static bool check_6rd(struct ip_tunnel *tunnel, const struct in6_addr *v6dst,
1154 pbw0 = tunnel->ip6rd.prefixlen >> 5;
1155 pbi0 = tunnel->ip6rd.prefixlen & 0x1f;
1156
1157 - d = (ntohl(v6dst->s6_addr32[pbw0]) << pbi0) >>
1158 - tunnel->ip6rd.relay_prefixlen;
1159 + d = tunnel->ip6rd.relay_prefixlen < 32 ?
1160 + (ntohl(v6dst->s6_addr32[pbw0]) << pbi0) >>
1161 + tunnel->ip6rd.relay_prefixlen : 0;
1162
1163 pbi1 = pbi0 - tunnel->ip6rd.relay_prefixlen;
1164 if (pbi1 > 0)
1165 diff --git a/net/l2tp/l2tp_ip6.c b/net/l2tp/l2tp_ip6.c
1166 index 0ae6899edac0..37a69df17cab 100644
1167 --- a/net/l2tp/l2tp_ip6.c
1168 +++ b/net/l2tp/l2tp_ip6.c
1169 @@ -674,9 +674,6 @@ static int l2tp_ip6_recvmsg(struct sock *sk, struct msghdr *msg, size_t len,
1170 if (flags & MSG_OOB)
1171 goto out;
1172
1173 - if (addr_len)
1174 - *addr_len = sizeof(*lsa);
1175 -
1176 if (flags & MSG_ERRQUEUE)
1177 return ipv6_recv_error(sk, msg, len, addr_len);
1178
1179 @@ -706,6 +703,7 @@ static int l2tp_ip6_recvmsg(struct sock *sk, struct msghdr *msg, size_t len,
1180 lsa->l2tp_conn_id = 0;
1181 if (ipv6_addr_type(&lsa->l2tp_addr) & IPV6_ADDR_LINKLOCAL)
1182 lsa->l2tp_scope_id = inet6_iif(skb);
1183 + *addr_len = sizeof(*lsa);
1184 }
1185
1186 if (np->rxopt.all)
1187 diff --git a/net/rxrpc/conn_client.c b/net/rxrpc/conn_client.c
1188 index 521189f4b666..6e419b15a9f8 100644
1189 --- a/net/rxrpc/conn_client.c
1190 +++ b/net/rxrpc/conn_client.c
1191 @@ -353,7 +353,7 @@ static int rxrpc_get_client_conn(struct rxrpc_sock *rx,
1192 * normally have to take channel_lock but we do this before anyone else
1193 * can see the connection.
1194 */
1195 - list_add_tail(&call->chan_wait_link, &candidate->waiting_calls);
1196 + list_add(&call->chan_wait_link, &candidate->waiting_calls);
1197
1198 if (cp->exclusive) {
1199 call->conn = candidate;
1200 @@ -432,7 +432,7 @@ found_extant_conn:
1201 call->conn = conn;
1202 call->security_ix = conn->security_ix;
1203 call->service_id = conn->service_id;
1204 - list_add(&call->chan_wait_link, &conn->waiting_calls);
1205 + list_add_tail(&call->chan_wait_link, &conn->waiting_calls);
1206 spin_unlock(&conn->channel_lock);
1207 _leave(" = 0 [extant %d]", conn->debug_id);
1208 return 0;
1209 diff --git a/net/sched/cls_flower.c b/net/sched/cls_flower.c
1210 index 84893bc67531..09b359784629 100644
1211 --- a/net/sched/cls_flower.c
1212 +++ b/net/sched/cls_flower.c
1213 @@ -1213,47 +1213,47 @@ static int fl_change(struct net *net, struct sk_buff *in_skb,
1214 if (err < 0)
1215 goto errout;
1216
1217 - if (!handle) {
1218 - handle = 1;
1219 - err = idr_alloc_u32(&head->handle_idr, fnew, &handle,
1220 - INT_MAX, GFP_KERNEL);
1221 - } else if (!fold) {
1222 - /* user specifies a handle and it doesn't exist */
1223 - err = idr_alloc_u32(&head->handle_idr, fnew, &handle,
1224 - handle, GFP_KERNEL);
1225 - }
1226 - if (err)
1227 - goto errout;
1228 - fnew->handle = handle;
1229 -
1230 if (tb[TCA_FLOWER_FLAGS]) {
1231 fnew->flags = nla_get_u32(tb[TCA_FLOWER_FLAGS]);
1232
1233 if (!tc_flags_valid(fnew->flags)) {
1234 err = -EINVAL;
1235 - goto errout_idr;
1236 + goto errout;
1237 }
1238 }
1239
1240 err = fl_set_parms(net, tp, fnew, mask, base, tb, tca[TCA_RATE], ovr,
1241 tp->chain->tmplt_priv, extack);
1242 if (err)
1243 - goto errout_idr;
1244 + goto errout;
1245
1246 err = fl_check_assign_mask(head, fnew, fold, mask);
1247 if (err)
1248 - goto errout_idr;
1249 + goto errout;
1250 +
1251 + if (!handle) {
1252 + handle = 1;
1253 + err = idr_alloc_u32(&head->handle_idr, fnew, &handle,
1254 + INT_MAX, GFP_KERNEL);
1255 + } else if (!fold) {
1256 + /* user specifies a handle and it doesn't exist */
1257 + err = idr_alloc_u32(&head->handle_idr, fnew, &handle,
1258 + handle, GFP_KERNEL);
1259 + }
1260 + if (err)
1261 + goto errout_mask;
1262 + fnew->handle = handle;
1263
1264 if (!tc_skip_sw(fnew->flags)) {
1265 if (!fold && fl_lookup(fnew->mask, &fnew->mkey)) {
1266 err = -EEXIST;
1267 - goto errout_mask;
1268 + goto errout_idr;
1269 }
1270
1271 err = rhashtable_insert_fast(&fnew->mask->ht, &fnew->ht_node,
1272 fnew->mask->filter_ht_params);
1273 if (err)
1274 - goto errout_mask;
1275 + goto errout_idr;
1276 }
1277
1278 if (!tc_skip_hw(fnew->flags)) {
1279 @@ -1290,12 +1290,13 @@ static int fl_change(struct net *net, struct sk_buff *in_skb,
1280 kfree(mask);
1281 return 0;
1282
1283 -errout_mask:
1284 - fl_mask_put(head, fnew->mask, false);
1285 -
1286 errout_idr:
1287 if (!fold)
1288 idr_remove(&head->handle_idr, fnew->handle);
1289 +
1290 +errout_mask:
1291 + fl_mask_put(head, fnew->mask, false);
1292 +
1293 errout:
1294 tcf_exts_destroy(&fnew->exts);
1295 kfree(fnew);
1296 diff --git a/net/sctp/stream.c b/net/sctp/stream.c
1297 index 2936ed17bf9e..3b47457862cc 100644
1298 --- a/net/sctp/stream.c
1299 +++ b/net/sctp/stream.c
1300 @@ -230,8 +230,6 @@ int sctp_stream_init(struct sctp_stream *stream, __u16 outcnt, __u16 incnt,
1301 for (i = 0; i < stream->outcnt; i++)
1302 SCTP_SO(stream, i)->state = SCTP_STREAM_OPEN;
1303
1304 - sched->init(stream);
1305 -
1306 in:
1307 sctp_stream_interleave_init(stream);
1308 if (!incnt)
1309 diff --git a/net/smc/smc.h b/net/smc/smc.h
1310 index 5721416d0605..adbdf195eb08 100644
1311 --- a/net/smc/smc.h
1312 +++ b/net/smc/smc.h
1313 @@ -113,9 +113,9 @@ struct smc_host_cdc_msg { /* Connection Data Control message */
1314 } __aligned(8);
1315
1316 enum smc_urg_state {
1317 - SMC_URG_VALID, /* data present */
1318 - SMC_URG_NOTYET, /* data pending */
1319 - SMC_URG_READ /* data was already read */
1320 + SMC_URG_VALID = 1, /* data present */
1321 + SMC_URG_NOTYET = 2, /* data pending */
1322 + SMC_URG_READ = 3, /* data was already read */
1323 };
1324
1325 struct smc_connection {
1326 diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c
1327 index c754f3a90a2e..f601933ad728 100644
1328 --- a/net/unix/af_unix.c
1329 +++ b/net/unix/af_unix.c
1330 @@ -888,7 +888,7 @@ retry:
1331 addr->hash ^= sk->sk_type;
1332
1333 __unix_remove_socket(sk);
1334 - u->addr = addr;
1335 + smp_store_release(&u->addr, addr);
1336 __unix_insert_socket(&unix_socket_table[addr->hash], sk);
1337 spin_unlock(&unix_table_lock);
1338 err = 0;
1339 @@ -1058,7 +1058,7 @@ static int unix_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
1340
1341 err = 0;
1342 __unix_remove_socket(sk);
1343 - u->addr = addr;
1344 + smp_store_release(&u->addr, addr);
1345 __unix_insert_socket(list, sk);
1346
1347 out_unlock:
1348 @@ -1329,15 +1329,29 @@ restart:
1349 RCU_INIT_POINTER(newsk->sk_wq, &newu->peer_wq);
1350 otheru = unix_sk(other);
1351
1352 - /* copy address information from listening to new sock*/
1353 - if (otheru->addr) {
1354 - refcount_inc(&otheru->addr->refcnt);
1355 - newu->addr = otheru->addr;
1356 - }
1357 + /* copy address information from listening to new sock
1358 + *
1359 + * The contents of *(otheru->addr) and otheru->path
1360 + * are seen fully set up here, since we have found
1361 + * otheru in hash under unix_table_lock. Insertion
1362 + * into the hash chain we'd found it in had been done
1363 + * in an earlier critical area protected by unix_table_lock,
1364 + * the same one where we'd set *(otheru->addr) contents,
1365 + * as well as otheru->path and otheru->addr itself.
1366 + *
1367 + * Using smp_store_release() here to set newu->addr
1368 + * is enough to make those stores, as well as stores
1369 + * to newu->path visible to anyone who gets newu->addr
1370 + * by smp_load_acquire(). IOW, the same warranties
1371 + * as for unix_sock instances bound in unix_bind() or
1372 + * in unix_autobind().
1373 + */
1374 if (otheru->path.dentry) {
1375 path_get(&otheru->path);
1376 newu->path = otheru->path;
1377 }
1378 + refcount_inc(&otheru->addr->refcnt);
1379 + smp_store_release(&newu->addr, otheru->addr);
1380
1381 /* Set credentials */
1382 copy_peercred(sk, other);
1383 @@ -1451,7 +1465,7 @@ out:
1384 static int unix_getname(struct socket *sock, struct sockaddr *uaddr, int peer)
1385 {
1386 struct sock *sk = sock->sk;
1387 - struct unix_sock *u;
1388 + struct unix_address *addr;
1389 DECLARE_SOCKADDR(struct sockaddr_un *, sunaddr, uaddr);
1390 int err = 0;
1391
1392 @@ -1466,19 +1480,15 @@ static int unix_getname(struct socket *sock, struct sockaddr *uaddr, int peer)
1393 sock_hold(sk);
1394 }
1395
1396 - u = unix_sk(sk);
1397 - unix_state_lock(sk);
1398 - if (!u->addr) {
1399 + addr = smp_load_acquire(&unix_sk(sk)->addr);
1400 + if (!addr) {
1401 sunaddr->sun_family = AF_UNIX;
1402 sunaddr->sun_path[0] = 0;
1403 err = sizeof(short);
1404 } else {
1405 - struct unix_address *addr = u->addr;
1406 -
1407 err = addr->len;
1408 memcpy(sunaddr, addr->name, addr->len);
1409 }
1410 - unix_state_unlock(sk);
1411 sock_put(sk);
1412 out:
1413 return err;
1414 @@ -2071,11 +2081,11 @@ static int unix_seqpacket_recvmsg(struct socket *sock, struct msghdr *msg,
1415
1416 static void unix_copy_addr(struct msghdr *msg, struct sock *sk)
1417 {
1418 - struct unix_sock *u = unix_sk(sk);
1419 + struct unix_address *addr = smp_load_acquire(&unix_sk(sk)->addr);
1420
1421 - if (u->addr) {
1422 - msg->msg_namelen = u->addr->len;
1423 - memcpy(msg->msg_name, u->addr->name, u->addr->len);
1424 + if (addr) {
1425 + msg->msg_namelen = addr->len;
1426 + memcpy(msg->msg_name, addr->name, addr->len);
1427 }
1428 }
1429
1430 @@ -2579,15 +2589,14 @@ static int unix_open_file(struct sock *sk)
1431 if (!ns_capable(sock_net(sk)->user_ns, CAP_NET_ADMIN))
1432 return -EPERM;
1433
1434 - unix_state_lock(sk);
1435 + if (!smp_load_acquire(&unix_sk(sk)->addr))
1436 + return -ENOENT;
1437 +
1438 path = unix_sk(sk)->path;
1439 - if (!path.dentry) {
1440 - unix_state_unlock(sk);
1441 + if (!path.dentry)
1442 return -ENOENT;
1443 - }
1444
1445 path_get(&path);
1446 - unix_state_unlock(sk);
1447
1448 fd = get_unused_fd_flags(O_CLOEXEC);
1449 if (fd < 0)
1450 @@ -2828,7 +2837,7 @@ static int unix_seq_show(struct seq_file *seq, void *v)
1451 (s->sk_state == TCP_ESTABLISHED ? SS_CONNECTING : SS_DISCONNECTING),
1452 sock_i_ino(s));
1453
1454 - if (u->addr) {
1455 + if (u->addr) { // under unix_table_lock here
1456 int i, len;
1457 seq_putc(seq, ' ');
1458
1459 diff --git a/net/unix/diag.c b/net/unix/diag.c
1460 index 384c84e83462..3183d9b8ab33 100644
1461 --- a/net/unix/diag.c
1462 +++ b/net/unix/diag.c
1463 @@ -10,7 +10,8 @@
1464
1465 static int sk_diag_dump_name(struct sock *sk, struct sk_buff *nlskb)
1466 {
1467 - struct unix_address *addr = unix_sk(sk)->addr;
1468 + /* might or might not have unix_table_lock */
1469 + struct unix_address *addr = smp_load_acquire(&unix_sk(sk)->addr);
1470
1471 if (!addr)
1472 return 0;
1473 diff --git a/net/x25/af_x25.c b/net/x25/af_x25.c
1474 index fef473c736fa..f7f53f9ae7ef 100644
1475 --- a/net/x25/af_x25.c
1476 +++ b/net/x25/af_x25.c
1477 @@ -679,8 +679,7 @@ static int x25_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
1478 struct sockaddr_x25 *addr = (struct sockaddr_x25 *)uaddr;
1479 int len, i, rc = 0;
1480
1481 - if (!sock_flag(sk, SOCK_ZAPPED) ||
1482 - addr_len != sizeof(struct sockaddr_x25) ||
1483 + if (addr_len != sizeof(struct sockaddr_x25) ||
1484 addr->sx25_family != AF_X25) {
1485 rc = -EINVAL;
1486 goto out;
1487 @@ -695,9 +694,13 @@ static int x25_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
1488 }
1489
1490 lock_sock(sk);
1491 - x25_sk(sk)->source_addr = addr->sx25_addr;
1492 - x25_insert_socket(sk);
1493 - sock_reset_flag(sk, SOCK_ZAPPED);
1494 + if (sock_flag(sk, SOCK_ZAPPED)) {
1495 + x25_sk(sk)->source_addr = addr->sx25_addr;
1496 + x25_insert_socket(sk);
1497 + sock_reset_flag(sk, SOCK_ZAPPED);
1498 + } else {
1499 + rc = -EINVAL;
1500 + }
1501 release_sock(sk);
1502 SOCK_DEBUG(sk, "x25_bind: socket is bound\n");
1503 out:
1504 @@ -813,8 +816,13 @@ static int x25_connect(struct socket *sock, struct sockaddr *uaddr,
1505 sock->state = SS_CONNECTED;
1506 rc = 0;
1507 out_put_neigh:
1508 - if (rc)
1509 + if (rc) {
1510 + read_lock_bh(&x25_list_lock);
1511 x25_neigh_put(x25->neighbour);
1512 + x25->neighbour = NULL;
1513 + read_unlock_bh(&x25_list_lock);
1514 + x25->state = X25_STATE_0;
1515 + }
1516 out_put_route:
1517 x25_route_put(rt);
1518 out:
1519 diff --git a/security/lsm_audit.c b/security/lsm_audit.c
1520 index f84001019356..33028c098ef3 100644
1521 --- a/security/lsm_audit.c
1522 +++ b/security/lsm_audit.c
1523 @@ -321,6 +321,7 @@ static void dump_common_audit_data(struct audit_buffer *ab,
1524 if (a->u.net->sk) {
1525 struct sock *sk = a->u.net->sk;
1526 struct unix_sock *u;
1527 + struct unix_address *addr;
1528 int len = 0;
1529 char *p = NULL;
1530
1531 @@ -351,14 +352,15 @@ static void dump_common_audit_data(struct audit_buffer *ab,
1532 #endif
1533 case AF_UNIX:
1534 u = unix_sk(sk);
1535 + addr = smp_load_acquire(&u->addr);
1536 + if (!addr)
1537 + break;
1538 if (u->path.dentry) {
1539 audit_log_d_path(ab, " path=", &u->path);
1540 break;
1541 }
1542 - if (!u->addr)
1543 - break;
1544 - len = u->addr->len-sizeof(short);
1545 - p = &u->addr->name->sun_path[0];
1546 + len = addr->len-sizeof(short);
1547 + p = &addr->name->sun_path[0];
1548 audit_log_format(ab, " path=");
1549 if (*p)
1550 audit_log_untrustedstring(ab, p);
1551 diff --git a/sound/firewire/bebob/bebob.c b/sound/firewire/bebob/bebob.c
1552 index de4af8a41ff0..5636e89ce5c7 100644
1553 --- a/sound/firewire/bebob/bebob.c
1554 +++ b/sound/firewire/bebob/bebob.c
1555 @@ -474,7 +474,19 @@ static const struct ieee1394_device_id bebob_id_table[] = {
1556 /* Focusrite, SaffirePro 26 I/O */
1557 SND_BEBOB_DEV_ENTRY(VEN_FOCUSRITE, 0x00000003, &saffirepro_26_spec),
1558 /* Focusrite, SaffirePro 10 I/O */
1559 - SND_BEBOB_DEV_ENTRY(VEN_FOCUSRITE, 0x00000006, &saffirepro_10_spec),
1560 + {
1561 + // The combination of vendor_id and model_id is the same as the
1562 + // same as the one of Liquid Saffire 56.
1563 + .match_flags = IEEE1394_MATCH_VENDOR_ID |
1564 + IEEE1394_MATCH_MODEL_ID |
1565 + IEEE1394_MATCH_SPECIFIER_ID |
1566 + IEEE1394_MATCH_VERSION,
1567 + .vendor_id = VEN_FOCUSRITE,
1568 + .model_id = 0x000006,
1569 + .specifier_id = 0x00a02d,
1570 + .version = 0x010001,
1571 + .driver_data = (kernel_ulong_t)&saffirepro_10_spec,
1572 + },
1573 /* Focusrite, Saffire(no label and LE) */
1574 SND_BEBOB_DEV_ENTRY(VEN_FOCUSRITE, MODEL_FOCUSRITE_SAFFIRE_BOTH,
1575 &saffire_spec),
1576 diff --git a/sound/firewire/motu/amdtp-motu.c b/sound/firewire/motu/amdtp-motu.c
1577 index f0555a24d90e..6c9b743ea74b 100644
1578 --- a/sound/firewire/motu/amdtp-motu.c
1579 +++ b/sound/firewire/motu/amdtp-motu.c
1580 @@ -136,7 +136,9 @@ static void read_pcm_s32(struct amdtp_stream *s,
1581 byte = (u8 *)buffer + p->pcm_byte_offset;
1582
1583 for (c = 0; c < channels; ++c) {
1584 - *dst = (byte[0] << 24) | (byte[1] << 16) | byte[2];
1585 + *dst = (byte[0] << 24) |
1586 + (byte[1] << 16) |
1587 + (byte[2] << 8);
1588 byte += 3;
1589 dst++;
1590 }
1591 diff --git a/sound/hda/hdac_i915.c b/sound/hda/hdac_i915.c
1592 index 617ff1aa818f..27eb0270a711 100644
1593 --- a/sound/hda/hdac_i915.c
1594 +++ b/sound/hda/hdac_i915.c
1595 @@ -144,9 +144,9 @@ int snd_hdac_i915_init(struct hdac_bus *bus)
1596 return -ENODEV;
1597 if (!acomp->ops) {
1598 request_module("i915");
1599 - /* 10s timeout */
1600 + /* 60s timeout */
1601 wait_for_completion_timeout(&bind_complete,
1602 - msecs_to_jiffies(10 * 1000));
1603 + msecs_to_jiffies(60 * 1000));
1604 }
1605 if (!acomp->ops) {
1606 dev_info(bus->dev, "couldn't bind with audio component\n");
1607 diff --git a/sound/pci/hda/patch_conexant.c b/sound/pci/hda/patch_conexant.c
1608 index fead0acb29f7..3cbd2119e148 100644
1609 --- a/sound/pci/hda/patch_conexant.c
1610 +++ b/sound/pci/hda/patch_conexant.c
1611 @@ -936,6 +936,9 @@ static const struct snd_pci_quirk cxt5066_fixups[] = {
1612 SND_PCI_QUIRK(0x103c, 0x8299, "HP 800 G3 SFF", CXT_FIXUP_HP_MIC_NO_PRESENCE),
1613 SND_PCI_QUIRK(0x103c, 0x829a, "HP 800 G3 DM", CXT_FIXUP_HP_MIC_NO_PRESENCE),
1614 SND_PCI_QUIRK(0x103c, 0x8455, "HP Z2 G4", CXT_FIXUP_HP_MIC_NO_PRESENCE),
1615 + SND_PCI_QUIRK(0x103c, 0x8456, "HP Z2 G4 SFF", CXT_FIXUP_HP_MIC_NO_PRESENCE),
1616 + SND_PCI_QUIRK(0x103c, 0x8457, "HP Z2 G4 mini", CXT_FIXUP_HP_MIC_NO_PRESENCE),
1617 + SND_PCI_QUIRK(0x103c, 0x8458, "HP Z2 G4 mini premium", CXT_FIXUP_HP_MIC_NO_PRESENCE),
1618 SND_PCI_QUIRK(0x1043, 0x138d, "Asus", CXT_FIXUP_HEADPHONE_MIC_PIN),
1619 SND_PCI_QUIRK(0x152d, 0x0833, "OLPC XO-1.5", CXT_FIXUP_OLPC_XO),
1620 SND_PCI_QUIRK(0x17aa, 0x20f2, "Lenovo T400", CXT_PINCFG_LENOVO_TP410),
1621 diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
1622 index bf1ffcaab23f..877293149e3a 100644
1623 --- a/sound/pci/hda/patch_realtek.c
1624 +++ b/sound/pci/hda/patch_realtek.c
1625 @@ -118,6 +118,7 @@ struct alc_spec {
1626 unsigned int has_alc5505_dsp:1;
1627 unsigned int no_depop_delay:1;
1628 unsigned int done_hp_init:1;
1629 + unsigned int no_shutup_pins:1;
1630
1631 /* for PLL fix */
1632 hda_nid_t pll_nid;
1633 @@ -476,6 +477,14 @@ static void alc_auto_setup_eapd(struct hda_codec *codec, bool on)
1634 set_eapd(codec, *p, on);
1635 }
1636
1637 +static void alc_shutup_pins(struct hda_codec *codec)
1638 +{
1639 + struct alc_spec *spec = codec->spec;
1640 +
1641 + if (!spec->no_shutup_pins)
1642 + snd_hda_shutup_pins(codec);
1643 +}
1644 +
1645 /* generic shutup callback;
1646 * just turning off EAPD and a little pause for avoiding pop-noise
1647 */
1648 @@ -486,7 +495,7 @@ static void alc_eapd_shutup(struct hda_codec *codec)
1649 alc_auto_setup_eapd(codec, false);
1650 if (!spec->no_depop_delay)
1651 msleep(200);
1652 - snd_hda_shutup_pins(codec);
1653 + alc_shutup_pins(codec);
1654 }
1655
1656 /* generic EAPD initialization */
1657 @@ -814,7 +823,7 @@ static inline void alc_shutup(struct hda_codec *codec)
1658 if (spec && spec->shutup)
1659 spec->shutup(codec);
1660 else
1661 - snd_hda_shutup_pins(codec);
1662 + alc_shutup_pins(codec);
1663 }
1664
1665 static void alc_reboot_notify(struct hda_codec *codec)
1666 @@ -2950,7 +2959,7 @@ static void alc269_shutup(struct hda_codec *codec)
1667 (alc_get_coef0(codec) & 0x00ff) == 0x018) {
1668 msleep(150);
1669 }
1670 - snd_hda_shutup_pins(codec);
1671 + alc_shutup_pins(codec);
1672 }
1673
1674 static struct coef_fw alc282_coefs[] = {
1675 @@ -3053,14 +3062,15 @@ static void alc282_shutup(struct hda_codec *codec)
1676 if (hp_pin_sense)
1677 msleep(85);
1678
1679 - snd_hda_codec_write(codec, hp_pin, 0,
1680 - AC_VERB_SET_PIN_WIDGET_CONTROL, 0x0);
1681 + if (!spec->no_shutup_pins)
1682 + snd_hda_codec_write(codec, hp_pin, 0,
1683 + AC_VERB_SET_PIN_WIDGET_CONTROL, 0x0);
1684
1685 if (hp_pin_sense)
1686 msleep(100);
1687
1688 alc_auto_setup_eapd(codec, false);
1689 - snd_hda_shutup_pins(codec);
1690 + alc_shutup_pins(codec);
1691 alc_write_coef_idx(codec, 0x78, coef78);
1692 }
1693
1694 @@ -3166,15 +3176,16 @@ static void alc283_shutup(struct hda_codec *codec)
1695 if (hp_pin_sense)
1696 msleep(100);
1697
1698 - snd_hda_codec_write(codec, hp_pin, 0,
1699 - AC_VERB_SET_PIN_WIDGET_CONTROL, 0x0);
1700 + if (!spec->no_shutup_pins)
1701 + snd_hda_codec_write(codec, hp_pin, 0,
1702 + AC_VERB_SET_PIN_WIDGET_CONTROL, 0x0);
1703
1704 alc_update_coef_idx(codec, 0x46, 0, 3 << 12);
1705
1706 if (hp_pin_sense)
1707 msleep(100);
1708 alc_auto_setup_eapd(codec, false);
1709 - snd_hda_shutup_pins(codec);
1710 + alc_shutup_pins(codec);
1711 alc_write_coef_idx(codec, 0x43, 0x9614);
1712 }
1713
1714 @@ -3240,14 +3251,15 @@ static void alc256_shutup(struct hda_codec *codec)
1715 /* NOTE: call this before clearing the pin, otherwise codec stalls */
1716 alc_update_coef_idx(codec, 0x46, 0, 3 << 12);
1717
1718 - snd_hda_codec_write(codec, hp_pin, 0,
1719 - AC_VERB_SET_PIN_WIDGET_CONTROL, 0x0);
1720 + if (!spec->no_shutup_pins)
1721 + snd_hda_codec_write(codec, hp_pin, 0,
1722 + AC_VERB_SET_PIN_WIDGET_CONTROL, 0x0);
1723
1724 if (hp_pin_sense)
1725 msleep(100);
1726
1727 alc_auto_setup_eapd(codec, false);
1728 - snd_hda_shutup_pins(codec);
1729 + alc_shutup_pins(codec);
1730 }
1731
1732 static void alc225_init(struct hda_codec *codec)
1733 @@ -3334,7 +3346,7 @@ static void alc225_shutup(struct hda_codec *codec)
1734 msleep(100);
1735
1736 alc_auto_setup_eapd(codec, false);
1737 - snd_hda_shutup_pins(codec);
1738 + alc_shutup_pins(codec);
1739 }
1740
1741 static void alc_default_init(struct hda_codec *codec)
1742 @@ -3388,14 +3400,15 @@ static void alc_default_shutup(struct hda_codec *codec)
1743 if (hp_pin_sense)
1744 msleep(85);
1745
1746 - snd_hda_codec_write(codec, hp_pin, 0,
1747 - AC_VERB_SET_PIN_WIDGET_CONTROL, 0x0);
1748 + if (!spec->no_shutup_pins)
1749 + snd_hda_codec_write(codec, hp_pin, 0,
1750 + AC_VERB_SET_PIN_WIDGET_CONTROL, 0x0);
1751
1752 if (hp_pin_sense)
1753 msleep(100);
1754
1755 alc_auto_setup_eapd(codec, false);
1756 - snd_hda_shutup_pins(codec);
1757 + alc_shutup_pins(codec);
1758 }
1759
1760 static void alc294_hp_init(struct hda_codec *codec)
1761 @@ -3412,8 +3425,9 @@ static void alc294_hp_init(struct hda_codec *codec)
1762
1763 msleep(100);
1764
1765 - snd_hda_codec_write(codec, hp_pin, 0,
1766 - AC_VERB_SET_PIN_WIDGET_CONTROL, 0x0);
1767 + if (!spec->no_shutup_pins)
1768 + snd_hda_codec_write(codec, hp_pin, 0,
1769 + AC_VERB_SET_PIN_WIDGET_CONTROL, 0x0);
1770
1771 alc_update_coef_idx(codec, 0x6f, 0x000f, 0);/* Set HP depop to manual mode */
1772 alc_update_coefex_idx(codec, 0x58, 0x00, 0x8000, 0x8000); /* HP depop procedure start */
1773 @@ -5007,16 +5021,12 @@ static void alc_fixup_auto_mute_via_amp(struct hda_codec *codec,
1774 }
1775 }
1776
1777 -static void alc_no_shutup(struct hda_codec *codec)
1778 -{
1779 -}
1780 -
1781 static void alc_fixup_no_shutup(struct hda_codec *codec,
1782 const struct hda_fixup *fix, int action)
1783 {
1784 if (action == HDA_FIXUP_ACT_PRE_PROBE) {
1785 struct alc_spec *spec = codec->spec;
1786 - spec->shutup = alc_no_shutup;
1787 + spec->no_shutup_pins = 1;
1788 }
1789 }
1790
1791 @@ -5602,6 +5612,7 @@ enum {
1792 ALC294_FIXUP_ASUS_SPK,
1793 ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE,
1794 ALC285_FIXUP_LENOVO_PC_BEEP_IN_NOISE,
1795 + ALC255_FIXUP_ACER_HEADSET_MIC,
1796 };
1797
1798 static const struct hda_fixup alc269_fixups[] = {
1799 @@ -6546,6 +6557,16 @@ static const struct hda_fixup alc269_fixups[] = {
1800 .chained = true,
1801 .chain_id = ALC285_FIXUP_LENOVO_HEADPHONE_NOISE
1802 },
1803 + [ALC255_FIXUP_ACER_HEADSET_MIC] = {
1804 + .type = HDA_FIXUP_PINS,
1805 + .v.pins = (const struct hda_pintbl[]) {
1806 + { 0x19, 0x03a11130 },
1807 + { 0x1a, 0x90a60140 }, /* use as internal mic */
1808 + { }
1809 + },
1810 + .chained = true,
1811 + .chain_id = ALC255_FIXUP_HEADSET_MODE_NO_HP_MIC
1812 + },
1813 };
1814
1815 static const struct snd_pci_quirk alc269_fixup_tbl[] = {
1816 @@ -6565,6 +6586,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
1817 SND_PCI_QUIRK(0x1025, 0x128f, "Acer Veriton Z6860G", ALC286_FIXUP_ACER_AIO_MIC_NO_PRESENCE),
1818 SND_PCI_QUIRK(0x1025, 0x1290, "Acer Veriton Z4860G", ALC286_FIXUP_ACER_AIO_MIC_NO_PRESENCE),
1819 SND_PCI_QUIRK(0x1025, 0x1291, "Acer Veriton Z4660G", ALC286_FIXUP_ACER_AIO_MIC_NO_PRESENCE),
1820 + SND_PCI_QUIRK(0x1025, 0x1330, "Acer TravelMate X514-51T", ALC255_FIXUP_ACER_HEADSET_MIC),
1821 SND_PCI_QUIRK(0x1028, 0x0470, "Dell M101z", ALC269_FIXUP_DELL_M101Z),
1822 SND_PCI_QUIRK(0x1028, 0x054b, "Dell XPS one 2710", ALC275_FIXUP_DELL_XPS),
1823 SND_PCI_QUIRK(0x1028, 0x05bd, "Dell Latitude E6440", ALC292_FIXUP_DELL_E7X),
1824 @@ -6596,6 +6618,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
1825 SND_PCI_QUIRK(0x1028, 0x0704, "Dell XPS 13 9350", ALC256_FIXUP_DELL_XPS_13_HEADPHONE_NOISE),
1826 SND_PCI_QUIRK(0x1028, 0x0706, "Dell Inspiron 7559", ALC256_FIXUP_DELL_INSPIRON_7559_SUBWOOFER),
1827 SND_PCI_QUIRK(0x1028, 0x0725, "Dell Inspiron 3162", ALC255_FIXUP_DELL_SPK_NOISE),
1828 + SND_PCI_QUIRK(0x1028, 0x0738, "Dell Precision 5820", ALC269_FIXUP_NO_SHUTUP),
1829 SND_PCI_QUIRK(0x1028, 0x075b, "Dell XPS 13 9360", ALC256_FIXUP_DELL_XPS_13_HEADPHONE_NOISE),
1830 SND_PCI_QUIRK(0x1028, 0x075c, "Dell XPS 27 7760", ALC298_FIXUP_SPK_VOLUME),
1831 SND_PCI_QUIRK(0x1028, 0x075d, "Dell AIO", ALC298_FIXUP_SPK_VOLUME),
1832 @@ -6670,11 +6693,13 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
1833 SND_PCI_QUIRK(0x103c, 0x2336, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1),
1834 SND_PCI_QUIRK(0x103c, 0x2337, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1),
1835 SND_PCI_QUIRK(0x103c, 0x221c, "HP EliteBook 755 G2", ALC280_FIXUP_HP_HEADSET_MIC),
1836 + SND_PCI_QUIRK(0x103c, 0x802e, "HP Z240 SFF", ALC221_FIXUP_HP_MIC_NO_PRESENCE),
1837 + SND_PCI_QUIRK(0x103c, 0x802f, "HP Z240", ALC221_FIXUP_HP_MIC_NO_PRESENCE),
1838 SND_PCI_QUIRK(0x103c, 0x820d, "HP Pavilion 15", ALC269_FIXUP_HP_MUTE_LED_MIC3),
1839 SND_PCI_QUIRK(0x103c, 0x8256, "HP", ALC221_FIXUP_HP_FRONT_MIC),
1840 SND_PCI_QUIRK(0x103c, 0x827e, "HP x360", ALC295_FIXUP_HP_X360),
1841 - SND_PCI_QUIRK(0x103c, 0x82bf, "HP", ALC221_FIXUP_HP_MIC_NO_PRESENCE),
1842 - SND_PCI_QUIRK(0x103c, 0x82c0, "HP", ALC221_FIXUP_HP_MIC_NO_PRESENCE),
1843 + SND_PCI_QUIRK(0x103c, 0x82bf, "HP G3 mini", ALC221_FIXUP_HP_MIC_NO_PRESENCE),
1844 + SND_PCI_QUIRK(0x103c, 0x82c0, "HP G3 mini premium", ALC221_FIXUP_HP_MIC_NO_PRESENCE),
1845 SND_PCI_QUIRK(0x103c, 0x83b9, "HP Spectre x360", ALC269_FIXUP_HP_MUTE_LED_MIC3),
1846 SND_PCI_QUIRK(0x1043, 0x103e, "ASUS X540SA", ALC256_FIXUP_ASUS_MIC),
1847 SND_PCI_QUIRK(0x1043, 0x103f, "ASUS TX300", ALC282_FIXUP_ASUS_TX300),
1848 @@ -6690,7 +6715,6 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
1849 SND_PCI_QUIRK(0x1043, 0x12e0, "ASUS X541SA", ALC256_FIXUP_ASUS_MIC),
1850 SND_PCI_QUIRK(0x1043, 0x13b0, "ASUS Z550SA", ALC256_FIXUP_ASUS_MIC),
1851 SND_PCI_QUIRK(0x1043, 0x1427, "Asus Zenbook UX31E", ALC269VB_FIXUP_ASUS_ZENBOOK),
1852 - SND_PCI_QUIRK(0x1043, 0x14a1, "ASUS UX533FD", ALC294_FIXUP_ASUS_SPK),
1853 SND_PCI_QUIRK(0x1043, 0x1517, "Asus Zenbook UX31A", ALC269VB_FIXUP_ASUS_ZENBOOK_UX31A),
1854 SND_PCI_QUIRK(0x1043, 0x16e3, "ASUS UX50", ALC269_FIXUP_STEREO_DMIC),
1855 SND_PCI_QUIRK(0x1043, 0x1a13, "Asus G73Jw", ALC269_FIXUP_ASUS_G73JW),
1856 @@ -7303,6 +7327,10 @@ static const struct snd_hda_pin_quirk alc269_pin_fixup_tbl[] = {
1857 {0x14, 0x90170110},
1858 {0x1b, 0x90a70130},
1859 {0x21, 0x04211020}),
1860 + SND_HDA_PIN_QUIRK(0x10ec0294, 0x1043, "ASUS", ALC294_FIXUP_ASUS_SPK,
1861 + {0x12, 0x90a60130},
1862 + {0x17, 0x90170110},
1863 + {0x21, 0x03211020}),
1864 SND_HDA_PIN_QUIRK(0x10ec0294, 0x1043, "ASUS", ALC294_FIXUP_ASUS_SPK,
1865 {0x12, 0x90a60130},
1866 {0x17, 0x90170110},