/[pkg-src]/trunk/kernel26-magellan/patches-2.6.16-r12/0129-2.6.16.15-SCTP-deadlock-CVE-2006-2275.patch |
Contents of /trunk/kernel26-magellan/patches-2.6.16-r12/0129-2.6.16.15-SCTP-deadlock-CVE-2006-2275.patch
Parent Directory | Revision Log
Revision 72 -
(show annotations)
(download)
Mon Jun 5 09:25:38 2006 UTC (18 years, 3 months ago) by niro
File size: 5187 byte(s)
Mon Jun 5 09:25:38 2006 UTC (18 years, 3 months ago) by niro
File size: 5187 byte(s)
ver bump to 2.6.16-r12: - updated to linux-2.6.16.19 - updated to ck11
1 | From: Neil Horman <nhorman@tuxdriver.com> |
2 | Date: Sat, 6 May 2006 00:02:09 +0000 (-0700) |
3 | Subject: [PATCH] SCTP: Allow spillover of receive buffer to avoid deadlock. (CVE-2006-2275) |
4 | X-Git-Url: http://www.kernel.org/git/?p=linux/kernel/git/stable/linux-2.6.16.y.git;a=commitdiff;h=2e2a2cd09dd7b3fbc99a1879a54090fd6db16f0c |
5 | |
6 | [PATCH] SCTP: Allow spillover of receive buffer to avoid deadlock. (CVE-2006-2275) |
7 | |
8 | This patch fixes a deadlock situation in the receive path by allowing |
9 | temporary spillover of the receive buffer. |
10 | |
11 | - If the chunk we receive has a tsn that immediately follows the ctsn, |
12 | accept it even if we run out of receive buffer space and renege data with |
13 | higher TSNs. |
14 | - Once we accept one chunk in a packet, accept all the remaining chunks |
15 | even if we run out of receive buffer space. |
16 | |
17 | Signed-off-by: Neil Horman <nhorman@tuxdriver.com> |
18 | Acked-by: Mark Butler <butlerm@middle.net> |
19 | Acked-by: Vlad Yasevich <vladislav.yasevich@hp.com> |
20 | Signed-off-by: Sridhar Samudrala <sri@us.ibm.com> |
21 | Signed-off-by: David S. Miller <davem@davemloft.net> |
22 | Signed-off-by: Chris Wright <chrisw@sous-sol.org> |
23 | --- |
24 | |
25 | --- a/include/net/sctp/structs.h |
26 | +++ b/include/net/sctp/structs.h |
27 | @@ -702,6 +702,7 @@ struct sctp_chunk { |
28 | __u8 tsn_gap_acked; /* Is this chunk acked by a GAP ACK? */ |
29 | __s8 fast_retransmit; /* Is this chunk fast retransmitted? */ |
30 | __u8 tsn_missing_report; /* Data chunk missing counter. */ |
31 | + __u8 data_accepted; /* At least 1 chunk in this packet accepted */ |
32 | }; |
33 | |
34 | void sctp_chunk_hold(struct sctp_chunk *); |
35 | --- a/net/sctp/inqueue.c |
36 | +++ b/net/sctp/inqueue.c |
37 | @@ -149,6 +149,7 @@ struct sctp_chunk *sctp_inq_pop(struct s |
38 | /* This is the first chunk in the packet. */ |
39 | chunk->singleton = 1; |
40 | ch = (sctp_chunkhdr_t *) chunk->skb->data; |
41 | + chunk->data_accepted = 0; |
42 | } |
43 | |
44 | chunk->chunk_hdr = ch; |
45 | --- a/net/sctp/sm_statefuns.c |
46 | +++ b/net/sctp/sm_statefuns.c |
47 | @@ -5154,7 +5154,9 @@ static int sctp_eat_data(const struct sc |
48 | int tmp; |
49 | __u32 tsn; |
50 | int account_value; |
51 | + struct sctp_tsnmap *map = (struct sctp_tsnmap *)&asoc->peer.tsn_map; |
52 | struct sock *sk = asoc->base.sk; |
53 | + int rcvbuf_over = 0; |
54 | |
55 | data_hdr = chunk->subh.data_hdr = (sctp_datahdr_t *)chunk->skb->data; |
56 | skb_pull(chunk->skb, sizeof(sctp_datahdr_t)); |
57 | @@ -5165,10 +5167,16 @@ static int sctp_eat_data(const struct sc |
58 | /* ASSERT: Now skb->data is really the user data. */ |
59 | |
60 | /* |
61 | - * if we are established, and we have used up our receive |
62 | - * buffer memory, drop the frame |
63 | + * If we are established, and we have used up our receive buffer |
64 | + * memory, think about droping the frame. |
65 | + * Note that we have an opportunity to improve performance here. |
66 | + * If we accept one chunk from an skbuff, we have to keep all the |
67 | + * memory of that skbuff around until the chunk is read into user |
68 | + * space. Therefore, once we accept 1 chunk we may as well accept all |
69 | + * remaining chunks in the skbuff. The data_accepted flag helps us do |
70 | + * that. |
71 | */ |
72 | - if (asoc->state == SCTP_STATE_ESTABLISHED) { |
73 | + if ((asoc->state == SCTP_STATE_ESTABLISHED) && (!chunk->data_accepted)) { |
74 | /* |
75 | * If the receive buffer policy is 1, then each |
76 | * association can allocate up to sk_rcvbuf bytes |
77 | @@ -5179,9 +5187,25 @@ static int sctp_eat_data(const struct sc |
78 | account_value = atomic_read(&asoc->rmem_alloc); |
79 | else |
80 | account_value = atomic_read(&sk->sk_rmem_alloc); |
81 | - |
82 | - if (account_value > sk->sk_rcvbuf) |
83 | - return SCTP_IERROR_IGNORE_TSN; |
84 | + if (account_value > sk->sk_rcvbuf) { |
85 | + /* |
86 | + * We need to make forward progress, even when we are |
87 | + * under memory pressure, so we always allow the |
88 | + * next tsn after the ctsn ack point to be accepted. |
89 | + * This lets us avoid deadlocks in which we have to |
90 | + * drop frames that would otherwise let us drain the |
91 | + * receive queue. |
92 | + */ |
93 | + if ((sctp_tsnmap_get_ctsn(map) + 1) != tsn) |
94 | + return SCTP_IERROR_IGNORE_TSN; |
95 | + |
96 | + /* |
97 | + * We're going to accept the frame but we should renege |
98 | + * to make space for it. This will send us down that |
99 | + * path later in this function. |
100 | + */ |
101 | + rcvbuf_over = 1; |
102 | + } |
103 | } |
104 | |
105 | /* Process ECN based congestion. |
106 | @@ -5229,6 +5253,7 @@ static int sctp_eat_data(const struct sc |
107 | datalen -= sizeof(sctp_data_chunk_t); |
108 | |
109 | deliver = SCTP_CMD_CHUNK_ULP; |
110 | + chunk->data_accepted = 1; |
111 | |
112 | /* Think about partial delivery. */ |
113 | if ((datalen >= asoc->rwnd) && (!asoc->ulpq.pd_mode)) { |
114 | @@ -5245,7 +5270,8 @@ static int sctp_eat_data(const struct sc |
115 | * large spill over. |
116 | */ |
117 | if (!asoc->rwnd || asoc->rwnd_over || |
118 | - (datalen > asoc->rwnd + asoc->frag_point)) { |
119 | + (datalen > asoc->rwnd + asoc->frag_point) || |
120 | + rcvbuf_over) { |
121 | |
122 | /* If this is the next TSN, consider reneging to make |
123 | * room. Note: Playing nice with a confused sender. A |
124 | @@ -5253,8 +5279,8 @@ static int sctp_eat_data(const struct sc |
125 | * space and in the future we may want to detect and |
126 | * do more drastic reneging. |
127 | */ |
128 | - if (sctp_tsnmap_has_gap(&asoc->peer.tsn_map) && |
129 | - (sctp_tsnmap_get_ctsn(&asoc->peer.tsn_map) + 1) == tsn) { |
130 | + if (sctp_tsnmap_has_gap(map) && |
131 | + (sctp_tsnmap_get_ctsn(map) + 1) == tsn) { |
132 | SCTP_DEBUG_PRINTK("Reneging for tsn:%u\n", tsn); |
133 | deliver = SCTP_CMD_RENEGE; |
134 | } else { |