Magellan Linux

Annotation of /trunk/kernel-alx/patches-5.4/0154-5.4.55-all-fixes.patch

Parent Directory Parent Directory | Revision Log Revision Log


Revision 3554 - (hide annotations) (download)
Tue Aug 4 14:58:22 2020 UTC (3 years, 10 months ago) by niro
File size: 17779 byte(s)
-linux-5.4.55
1 niro 3554 diff --git a/Makefile b/Makefile
2     index ea711f30de29..072fe0eaa740 100644
3     --- a/Makefile
4     +++ b/Makefile
5     @@ -1,7 +1,7 @@
6     # SPDX-License-Identifier: GPL-2.0
7     VERSION = 5
8     PATCHLEVEL = 4
9     -SUBLEVEL = 54
10     +SUBLEVEL = 55
11     EXTRAVERSION =
12     NAME = Kleptomaniac Octopus
13    
14     diff --git a/drivers/base/power/wakeup.c b/drivers/base/power/wakeup.c
15     index 0bd9b291bb29..92f0960e9014 100644
16     --- a/drivers/base/power/wakeup.c
17     +++ b/drivers/base/power/wakeup.c
18     @@ -1073,6 +1073,9 @@ static void *wakeup_sources_stats_seq_next(struct seq_file *m,
19     break;
20     }
21    
22     + if (!next_ws)
23     + print_wakeup_source_stats(m, &deleted_ws);
24     +
25     return next_ws;
26     }
27    
28     diff --git a/drivers/base/regmap/regmap-debugfs.c b/drivers/base/regmap/regmap-debugfs.c
29     index e16afa27700d..f58baff2be0a 100644
30     --- a/drivers/base/regmap/regmap-debugfs.c
31     +++ b/drivers/base/regmap/regmap-debugfs.c
32     @@ -227,6 +227,9 @@ static ssize_t regmap_read_debugfs(struct regmap *map, unsigned int from,
33     if (*ppos < 0 || !count)
34     return -EINVAL;
35    
36     + if (count > (PAGE_SIZE << (MAX_ORDER - 1)))
37     + count = PAGE_SIZE << (MAX_ORDER - 1);
38     +
39     buf = kmalloc(count, GFP_KERNEL);
40     if (!buf)
41     return -ENOMEM;
42     @@ -371,6 +374,9 @@ static ssize_t regmap_reg_ranges_read_file(struct file *file,
43     if (*ppos < 0 || !count)
44     return -EINVAL;
45    
46     + if (count > (PAGE_SIZE << (MAX_ORDER - 1)))
47     + count = PAGE_SIZE << (MAX_ORDER - 1);
48     +
49     buf = kmalloc(count, GFP_KERNEL);
50     if (!buf)
51     return -ENOMEM;
52     diff --git a/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c b/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c
53     index cd9d08695cc1..00c4beb760c3 100644
54     --- a/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c
55     +++ b/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c
56     @@ -2802,7 +2802,7 @@ static int dpaa_eth_probe(struct platform_device *pdev)
57     }
58    
59     /* Do this here, so we can be verbose early */
60     - SET_NETDEV_DEV(net_dev, dev->parent);
61     + SET_NETDEV_DEV(net_dev, dev);
62     dev_set_drvdata(dev, net_dev);
63    
64     priv = netdev_priv(net_dev);
65     diff --git a/drivers/net/wan/x25_asy.c b/drivers/net/wan/x25_asy.c
66     index 914be5847386..cdcc380b4c26 100644
67     --- a/drivers/net/wan/x25_asy.c
68     +++ b/drivers/net/wan/x25_asy.c
69     @@ -183,7 +183,7 @@ static inline void x25_asy_unlock(struct x25_asy *sl)
70     netif_wake_queue(sl->dev);
71     }
72    
73     -/* Send one completely decapsulated IP datagram to the IP layer. */
74     +/* Send an LAPB frame to the LAPB module to process. */
75    
76     static void x25_asy_bump(struct x25_asy *sl)
77     {
78     @@ -195,13 +195,12 @@ static void x25_asy_bump(struct x25_asy *sl)
79     count = sl->rcount;
80     dev->stats.rx_bytes += count;
81    
82     - skb = dev_alloc_skb(count+1);
83     + skb = dev_alloc_skb(count);
84     if (skb == NULL) {
85     netdev_warn(sl->dev, "memory squeeze, dropping packet\n");
86     dev->stats.rx_dropped++;
87     return;
88     }
89     - skb_push(skb, 1); /* LAPB internal control */
90     skb_put_data(skb, sl->rbuff, count);
91     skb->protocol = x25_type_trans(skb, sl->dev);
92     err = lapb_data_received(skb->dev, skb);
93     @@ -209,7 +208,6 @@ static void x25_asy_bump(struct x25_asy *sl)
94     kfree_skb(skb);
95     printk(KERN_DEBUG "x25_asy: data received err - %d\n", err);
96     } else {
97     - netif_rx(skb);
98     dev->stats.rx_packets++;
99     }
100     }
101     @@ -356,12 +354,21 @@ static netdev_tx_t x25_asy_xmit(struct sk_buff *skb,
102     */
103    
104     /*
105     - * Called when I frame data arrives. We did the work above - throw it
106     - * at the net layer.
107     + * Called when I frame data arrive. We add a pseudo header for upper
108     + * layers and pass it to upper layers.
109     */
110    
111     static int x25_asy_data_indication(struct net_device *dev, struct sk_buff *skb)
112     {
113     + if (skb_cow(skb, 1)) {
114     + kfree_skb(skb);
115     + return NET_RX_DROP;
116     + }
117     + skb_push(skb, 1);
118     + skb->data[0] = X25_IFACE_DATA;
119     +
120     + skb->protocol = x25_type_trans(skb, dev);
121     +
122     return netif_rx(skb);
123     }
124    
125     @@ -657,7 +664,7 @@ static void x25_asy_unesc(struct x25_asy *sl, unsigned char s)
126     switch (s) {
127     case X25_END:
128     if (!test_and_clear_bit(SLF_ERROR, &sl->flags) &&
129     - sl->rcount > 2)
130     + sl->rcount >= 2)
131     x25_asy_bump(sl);
132     clear_bit(SLF_ESCAPE, &sl->flags);
133     sl->rcount = 0;
134     diff --git a/include/linux/tcp.h b/include/linux/tcp.h
135     index 668e25a76d69..358deb4ff830 100644
136     --- a/include/linux/tcp.h
137     +++ b/include/linux/tcp.h
138     @@ -216,6 +216,8 @@ struct tcp_sock {
139     } rack;
140     u16 advmss; /* Advertised MSS */
141     u8 compressed_ack;
142     + u8 tlp_retrans:1, /* TLP is a retransmission */
143     + unused_1:7;
144     u32 chrono_start; /* Start time in jiffies of a TCP chrono */
145     u32 chrono_stat[3]; /* Time in jiffies for chrono_stat stats */
146     u8 chrono_type:2, /* current chronograph type */
147     @@ -238,7 +240,7 @@ struct tcp_sock {
148     save_syn:1, /* Save headers of SYN packet */
149     is_cwnd_limited:1,/* forward progress limited by snd_cwnd? */
150     syn_smc:1; /* SYN includes SMC */
151     - u32 tlp_high_seq; /* snd_nxt at the time of TLP retransmit. */
152     + u32 tlp_high_seq; /* snd_nxt at the time of TLP */
153    
154     u32 tcp_tx_delay; /* delay (in usec) added to TX packets */
155     u64 tcp_wstamp_ns; /* departure time for next sent data packet */
156     diff --git a/net/ax25/af_ax25.c b/net/ax25/af_ax25.c
157     index e5a3dc28116d..2fdb1b573e8c 100644
158     --- a/net/ax25/af_ax25.c
159     +++ b/net/ax25/af_ax25.c
160     @@ -1187,7 +1187,10 @@ static int __must_check ax25_connect(struct socket *sock,
161     if (addr_len > sizeof(struct sockaddr_ax25) &&
162     fsa->fsa_ax25.sax25_ndigis != 0) {
163     /* Valid number of digipeaters ? */
164     - if (fsa->fsa_ax25.sax25_ndigis < 1 || fsa->fsa_ax25.sax25_ndigis > AX25_MAX_DIGIS) {
165     + if (fsa->fsa_ax25.sax25_ndigis < 1 ||
166     + fsa->fsa_ax25.sax25_ndigis > AX25_MAX_DIGIS ||
167     + addr_len < sizeof(struct sockaddr_ax25) +
168     + sizeof(ax25_address) * fsa->fsa_ax25.sax25_ndigis) {
169     err = -EINVAL;
170     goto out_release;
171     }
172     @@ -1507,7 +1510,10 @@ static int ax25_sendmsg(struct socket *sock, struct msghdr *msg, size_t len)
173     struct full_sockaddr_ax25 *fsa = (struct full_sockaddr_ax25 *)usax;
174    
175     /* Valid number of digipeaters ? */
176     - if (usax->sax25_ndigis < 1 || usax->sax25_ndigis > AX25_MAX_DIGIS) {
177     + if (usax->sax25_ndigis < 1 ||
178     + usax->sax25_ndigis > AX25_MAX_DIGIS ||
179     + addr_len < sizeof(struct sockaddr_ax25) +
180     + sizeof(ax25_address) * usax->sax25_ndigis) {
181     err = -EINVAL;
182     goto out;
183     }
184     diff --git a/net/core/dev.c b/net/core/dev.c
185     index 727965565d31..25858f1f67cf 100644
186     --- a/net/core/dev.c
187     +++ b/net/core/dev.c
188     @@ -5229,7 +5229,7 @@ static void flush_backlog(struct work_struct *work)
189     skb_queue_walk_safe(&sd->input_pkt_queue, skb, tmp) {
190     if (skb->dev->reg_state == NETREG_UNREGISTERING) {
191     __skb_unlink(skb, &sd->input_pkt_queue);
192     - kfree_skb(skb);
193     + dev_kfree_skb_irq(skb);
194     input_queue_head_incr(sd);
195     }
196     }
197     diff --git a/net/core/net-sysfs.c b/net/core/net-sysfs.c
198     index 4c826b8bf9b1..2ebf9b252779 100644
199     --- a/net/core/net-sysfs.c
200     +++ b/net/core/net-sysfs.c
201     @@ -1036,7 +1036,7 @@ static ssize_t tx_timeout_show(struct netdev_queue *queue, char *buf)
202     trans_timeout = queue->trans_timeout;
203     spin_unlock_irq(&queue->_xmit_lock);
204    
205     - return sprintf(buf, "%lu", trans_timeout);
206     + return sprintf(buf, fmt_ulong, trans_timeout);
207     }
208    
209     static unsigned int get_netdev_queue_index(struct netdev_queue *queue)
210     diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c
211     index 944acb1a9f29..b0c06a063776 100644
212     --- a/net/core/rtnetlink.c
213     +++ b/net/core/rtnetlink.c
214     @@ -3231,7 +3231,8 @@ replay:
215     */
216     if (err < 0) {
217     /* If device is not registered at all, free it now */
218     - if (dev->reg_state == NETREG_UNINITIALIZED)
219     + if (dev->reg_state == NETREG_UNINITIALIZED ||
220     + dev->reg_state == NETREG_UNREGISTERED)
221     free_netdev(dev);
222     goto out;
223     }
224     diff --git a/net/core/sock_reuseport.c b/net/core/sock_reuseport.c
225     index f3ceec93f392..40829111fe00 100644
226     --- a/net/core/sock_reuseport.c
227     +++ b/net/core/sock_reuseport.c
228     @@ -112,6 +112,7 @@ static struct sock_reuseport *reuseport_grow(struct sock_reuseport *reuse)
229     more_reuse->prog = reuse->prog;
230     more_reuse->reuseport_id = reuse->reuseport_id;
231     more_reuse->bind_inany = reuse->bind_inany;
232     + more_reuse->has_conns = reuse->has_conns;
233    
234     memcpy(more_reuse->socks, reuse->socks,
235     reuse->num_socks * sizeof(struct sock *));
236     diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
237     index 6d331307beca..5040f7ca37ec 100644
238     --- a/net/ipv4/tcp_input.c
239     +++ b/net/ipv4/tcp_input.c
240     @@ -3505,10 +3505,8 @@ static void tcp_replace_ts_recent(struct tcp_sock *tp, u32 seq)
241     }
242     }
243    
244     -/* This routine deals with acks during a TLP episode.
245     - * We mark the end of a TLP episode on receiving TLP dupack or when
246     - * ack is after tlp_high_seq.
247     - * Ref: loss detection algorithm in draft-dukkipati-tcpm-tcp-loss-probe.
248     +/* This routine deals with acks during a TLP episode and ends an episode by
249     + * resetting tlp_high_seq. Ref: TLP algorithm in draft-ietf-tcpm-rack
250     */
251     static void tcp_process_tlp_ack(struct sock *sk, u32 ack, int flag)
252     {
253     @@ -3517,7 +3515,10 @@ static void tcp_process_tlp_ack(struct sock *sk, u32 ack, int flag)
254     if (before(ack, tp->tlp_high_seq))
255     return;
256    
257     - if (flag & FLAG_DSACKING_ACK) {
258     + if (!tp->tlp_retrans) {
259     + /* TLP of new data has been acknowledged */
260     + tp->tlp_high_seq = 0;
261     + } else if (flag & FLAG_DSACKING_ACK) {
262     /* This DSACK means original and TLP probe arrived; no loss */
263     tp->tlp_high_seq = 0;
264     } else if (after(ack, tp->tlp_high_seq)) {
265     diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
266     index 5dc7485c4076..4407193bd702 100644
267     --- a/net/ipv4/tcp_output.c
268     +++ b/net/ipv4/tcp_output.c
269     @@ -2564,6 +2564,11 @@ void tcp_send_loss_probe(struct sock *sk)
270     int pcount;
271     int mss = tcp_current_mss(sk);
272    
273     + /* At most one outstanding TLP */
274     + if (tp->tlp_high_seq)
275     + goto rearm_timer;
276     +
277     + tp->tlp_retrans = 0;
278     skb = tcp_send_head(sk);
279     if (skb && tcp_snd_wnd_test(tp, skb, mss)) {
280     pcount = tp->packets_out;
281     @@ -2581,10 +2586,6 @@ void tcp_send_loss_probe(struct sock *sk)
282     return;
283     }
284    
285     - /* At most one outstanding TLP retransmission. */
286     - if (tp->tlp_high_seq)
287     - goto rearm_timer;
288     -
289     if (skb_still_in_host_queue(sk, skb))
290     goto rearm_timer;
291    
292     @@ -2606,10 +2607,12 @@ void tcp_send_loss_probe(struct sock *sk)
293     if (__tcp_retransmit_skb(sk, skb, 1))
294     goto rearm_timer;
295    
296     + tp->tlp_retrans = 1;
297     +
298     +probe_sent:
299     /* Record snd_nxt for loss detection. */
300     tp->tlp_high_seq = tp->snd_nxt;
301    
302     -probe_sent:
303     NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPLOSSPROBES);
304     /* Reset s.t. tcp_rearm_rto will restart timer from now */
305     inet_csk(sk)->icsk_pending = 0;
306     diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
307     index f3b7cb725c1b..5d016bbdf16e 100644
308     --- a/net/ipv4/udp.c
309     +++ b/net/ipv4/udp.c
310     @@ -413,7 +413,7 @@ static struct sock *udp4_lib_lookup2(struct net *net,
311     struct udp_hslot *hslot2,
312     struct sk_buff *skb)
313     {
314     - struct sock *sk, *result;
315     + struct sock *sk, *result, *reuseport_result;
316     int score, badness;
317     u32 hash = 0;
318    
319     @@ -423,17 +423,20 @@ static struct sock *udp4_lib_lookup2(struct net *net,
320     score = compute_score(sk, net, saddr, sport,
321     daddr, hnum, dif, sdif);
322     if (score > badness) {
323     + reuseport_result = NULL;
324     +
325     if (sk->sk_reuseport &&
326     sk->sk_state != TCP_ESTABLISHED) {
327     hash = udp_ehashfn(net, daddr, hnum,
328     saddr, sport);
329     - result = reuseport_select_sock(sk, hash, skb,
330     - sizeof(struct udphdr));
331     - if (result && !reuseport_has_conns(sk, false))
332     - return result;
333     + reuseport_result = reuseport_select_sock(sk, hash, skb,
334     + sizeof(struct udphdr));
335     + if (reuseport_result && !reuseport_has_conns(sk, false))
336     + return reuseport_result;
337     }
338     +
339     + result = reuseport_result ? : sk;
340     badness = score;
341     - result = sk;
342     }
343     }
344     return result;
345     @@ -2045,7 +2048,7 @@ static int udp_queue_rcv_one_skb(struct sock *sk, struct sk_buff *skb)
346     /*
347     * UDP-Lite specific tests, ignored on UDP sockets
348     */
349     - if ((is_udplite & UDPLITE_RECV_CC) && UDP_SKB_CB(skb)->partial_cov) {
350     + if ((up->pcflag & UDPLITE_RECV_CC) && UDP_SKB_CB(skb)->partial_cov) {
351    
352     /*
353     * MIB statistics other than incrementing the error count are
354     diff --git a/net/ipv6/ip6_gre.c b/net/ipv6/ip6_gre.c
355     index 04d76f043e18..44876509d215 100644
356     --- a/net/ipv6/ip6_gre.c
357     +++ b/net/ipv6/ip6_gre.c
358     @@ -1560,17 +1560,18 @@ static void ip6gre_destroy_tunnels(struct net *net, struct list_head *head)
359     static int __net_init ip6gre_init_net(struct net *net)
360     {
361     struct ip6gre_net *ign = net_generic(net, ip6gre_net_id);
362     + struct net_device *ndev;
363     int err;
364    
365     if (!net_has_fallback_tunnels(net))
366     return 0;
367     - ign->fb_tunnel_dev = alloc_netdev(sizeof(struct ip6_tnl), "ip6gre0",
368     - NET_NAME_UNKNOWN,
369     - ip6gre_tunnel_setup);
370     - if (!ign->fb_tunnel_dev) {
371     + ndev = alloc_netdev(sizeof(struct ip6_tnl), "ip6gre0",
372     + NET_NAME_UNKNOWN, ip6gre_tunnel_setup);
373     + if (!ndev) {
374     err = -ENOMEM;
375     goto err_alloc_dev;
376     }
377     + ign->fb_tunnel_dev = ndev;
378     dev_net_set(ign->fb_tunnel_dev, net);
379     /* FB netdevice is special: we have one, and only one per netns.
380     * Allowing to move it to another netns is clearly unsafe.
381     @@ -1590,7 +1591,7 @@ static int __net_init ip6gre_init_net(struct net *net)
382     return 0;
383    
384     err_reg_dev:
385     - free_netdev(ign->fb_tunnel_dev);
386     + free_netdev(ndev);
387     err_alloc_dev:
388     return err;
389     }
390     diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c
391     index 9fec580c968e..6762430280f5 100644
392     --- a/net/ipv6/udp.c
393     +++ b/net/ipv6/udp.c
394     @@ -148,7 +148,7 @@ static struct sock *udp6_lib_lookup2(struct net *net,
395     int dif, int sdif, struct udp_hslot *hslot2,
396     struct sk_buff *skb)
397     {
398     - struct sock *sk, *result;
399     + struct sock *sk, *result, *reuseport_result;
400     int score, badness;
401     u32 hash = 0;
402    
403     @@ -158,17 +158,20 @@ static struct sock *udp6_lib_lookup2(struct net *net,
404     score = compute_score(sk, net, saddr, sport,
405     daddr, hnum, dif, sdif);
406     if (score > badness) {
407     + reuseport_result = NULL;
408     +
409     if (sk->sk_reuseport &&
410     sk->sk_state != TCP_ESTABLISHED) {
411     hash = udp6_ehashfn(net, daddr, hnum,
412     saddr, sport);
413    
414     - result = reuseport_select_sock(sk, hash, skb,
415     - sizeof(struct udphdr));
416     - if (result && !reuseport_has_conns(sk, false))
417     - return result;
418     + reuseport_result = reuseport_select_sock(sk, hash, skb,
419     + sizeof(struct udphdr));
420     + if (reuseport_result && !reuseport_has_conns(sk, false))
421     + return reuseport_result;
422     }
423     - result = sk;
424     +
425     + result = reuseport_result ? : sk;
426     badness = score;
427     }
428     }
429     @@ -643,7 +646,7 @@ static int udpv6_queue_rcv_one_skb(struct sock *sk, struct sk_buff *skb)
430     /*
431     * UDP-Lite specific tests, ignored on UDP sockets (see net/ipv4/udp.c).
432     */
433     - if ((is_udplite & UDPLITE_RECV_CC) && UDP_SKB_CB(skb)->partial_cov) {
434     + if ((up->pcflag & UDPLITE_RECV_CC) && UDP_SKB_CB(skb)->partial_cov) {
435    
436     if (up->pcrlen == 0) { /* full coverage was set */
437     net_dbg_ratelimited("UDPLITE6: partial coverage %d while full coverage %d requested\n",
438     diff --git a/net/qrtr/qrtr.c b/net/qrtr/qrtr.c
439     index 14c101e104ce..1ce1e710d025 100644
440     --- a/net/qrtr/qrtr.c
441     +++ b/net/qrtr/qrtr.c
442     @@ -1004,6 +1004,7 @@ static int qrtr_release(struct socket *sock)
443     sk->sk_state_change(sk);
444    
445     sock_set_flag(sk, SOCK_DEAD);
446     + sock_orphan(sk);
447     sock->sk = NULL;
448    
449     if (!sock_flag(sk, SOCK_ZAPPED))
450     diff --git a/net/rxrpc/recvmsg.c b/net/rxrpc/recvmsg.c
451     index 8578c39ec839..6896a33ef842 100644
452     --- a/net/rxrpc/recvmsg.c
453     +++ b/net/rxrpc/recvmsg.c
454     @@ -464,7 +464,7 @@ try_again:
455     list_empty(&rx->recvmsg_q) &&
456     rx->sk.sk_state != RXRPC_SERVER_LISTENING) {
457     release_sock(&rx->sk);
458     - return -ENODATA;
459     + return -EAGAIN;
460     }
461    
462     if (list_empty(&rx->recvmsg_q)) {
463     diff --git a/net/rxrpc/sendmsg.c b/net/rxrpc/sendmsg.c
464     index 5e9c43d4a314..49d03c8c64da 100644
465     --- a/net/rxrpc/sendmsg.c
466     +++ b/net/rxrpc/sendmsg.c
467     @@ -306,7 +306,7 @@ static int rxrpc_send_data(struct rxrpc_sock *rx,
468     /* this should be in poll */
469     sk_clear_bit(SOCKWQ_ASYNC_NOSPACE, sk);
470    
471     - if (sk->sk_err || (sk->sk_shutdown & SEND_SHUTDOWN))
472     + if (sk->sk_shutdown & SEND_SHUTDOWN)
473     return -EPIPE;
474    
475     more = msg->msg_flags & MSG_MORE;
476     diff --git a/net/sctp/stream.c b/net/sctp/stream.c
477     index c1a100d2fed3..e13cbd5c0193 100644
478     --- a/net/sctp/stream.c
479     +++ b/net/sctp/stream.c
480     @@ -22,17 +22,11 @@
481     #include <net/sctp/sm.h>
482     #include <net/sctp/stream_sched.h>
483    
484     -/* Migrates chunks from stream queues to new stream queues if needed,
485     - * but not across associations. Also, removes those chunks to streams
486     - * higher than the new max.
487     - */
488     -static void sctp_stream_outq_migrate(struct sctp_stream *stream,
489     - struct sctp_stream *new, __u16 outcnt)
490     +static void sctp_stream_shrink_out(struct sctp_stream *stream, __u16 outcnt)
491     {
492     struct sctp_association *asoc;
493     struct sctp_chunk *ch, *temp;
494     struct sctp_outq *outq;
495     - int i;
496    
497     asoc = container_of(stream, struct sctp_association, stream);
498     outq = &asoc->outqueue;
499     @@ -56,6 +50,19 @@ static void sctp_stream_outq_migrate(struct sctp_stream *stream,
500    
501     sctp_chunk_free(ch);
502     }
503     +}
504     +
505     +/* Migrates chunks from stream queues to new stream queues if needed,
506     + * but not across associations. Also, removes those chunks to streams
507     + * higher than the new max.
508     + */
509     +static void sctp_stream_outq_migrate(struct sctp_stream *stream,
510     + struct sctp_stream *new, __u16 outcnt)
511     +{
512     + int i;
513     +
514     + if (stream->outcnt > outcnt)
515     + sctp_stream_shrink_out(stream, outcnt);
516    
517     if (new) {
518     /* Here we actually move the old ext stuff into the new
519     @@ -1038,11 +1045,13 @@ struct sctp_chunk *sctp_process_strreset_resp(
520     nums = ntohs(addstrm->number_of_streams);
521     number = stream->outcnt - nums;
522    
523     - if (result == SCTP_STRRESET_PERFORMED)
524     + if (result == SCTP_STRRESET_PERFORMED) {
525     for (i = number; i < stream->outcnt; i++)
526     SCTP_SO(stream, i)->state = SCTP_STREAM_OPEN;
527     - else
528     + } else {
529     + sctp_stream_shrink_out(stream, number);
530     stream->outcnt = number;
531     + }
532    
533     *evp = sctp_ulpevent_make_stream_change_event(asoc, flags,
534     0, nums, GFP_ATOMIC);