Contents of /trunk/kernel-lts/patches-3.4/0150-3.4.51-all-fixes.patch
Parent Directory | Revision Log
Revision 2226 -
(show annotations)
(download)
Mon Jul 1 09:56:29 2013 UTC (11 years, 2 months ago) by niro
File size: 32156 byte(s)
Mon Jul 1 09:56:29 2013 UTC (11 years, 2 months ago) by niro
File size: 32156 byte(s)
-linux-3.4.51
1 | diff --git a/arch/tile/lib/exports.c b/arch/tile/lib/exports.c |
2 | index 2a81d32..e51e5cd 100644 |
3 | --- a/arch/tile/lib/exports.c |
4 | +++ b/arch/tile/lib/exports.c |
5 | @@ -90,4 +90,6 @@ uint64_t __ashrdi3(uint64_t, unsigned int); |
6 | EXPORT_SYMBOL(__ashrdi3); |
7 | uint64_t __ashldi3(uint64_t, unsigned int); |
8 | EXPORT_SYMBOL(__ashldi3); |
9 | +int __ffsdi2(uint64_t); |
10 | +EXPORT_SYMBOL(__ffsdi2); |
11 | #endif |
12 | diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c |
13 | index b27b452..3663e0b 100644 |
14 | --- a/arch/x86/kvm/x86.c |
15 | +++ b/arch/x86/kvm/x86.c |
16 | @@ -555,8 +555,6 @@ int __kvm_set_xcr(struct kvm_vcpu *vcpu, u32 index, u64 xcr) |
17 | if (index != XCR_XFEATURE_ENABLED_MASK) |
18 | return 1; |
19 | xcr0 = xcr; |
20 | - if (kvm_x86_ops->get_cpl(vcpu) != 0) |
21 | - return 1; |
22 | if (!(xcr0 & XSTATE_FP)) |
23 | return 1; |
24 | if ((xcr0 & XSTATE_YMM) && !(xcr0 & XSTATE_SSE)) |
25 | @@ -570,7 +568,8 @@ int __kvm_set_xcr(struct kvm_vcpu *vcpu, u32 index, u64 xcr) |
26 | |
27 | int kvm_set_xcr(struct kvm_vcpu *vcpu, u32 index, u64 xcr) |
28 | { |
29 | - if (__kvm_set_xcr(vcpu, index, xcr)) { |
30 | + if (kvm_x86_ops->get_cpl(vcpu) != 0 || |
31 | + __kvm_set_xcr(vcpu, index, xcr)) { |
32 | kvm_inject_gp(vcpu, 0); |
33 | return 1; |
34 | } |
35 | diff --git a/drivers/clk/clk.c b/drivers/clk/clk.c |
36 | index 7f1ea56..4c63665 100644 |
37 | --- a/drivers/clk/clk.c |
38 | +++ b/drivers/clk/clk.c |
39 | @@ -1453,6 +1453,7 @@ int clk_notifier_unregister(struct clk *clk, struct notifier_block *nb) |
40 | /* XXX the notifier code should handle this better */ |
41 | if (!cn->notifier_head.head) { |
42 | srcu_cleanup_notifier_head(&cn->notifier_head); |
43 | + list_del(&cn->node); |
44 | kfree(cn); |
45 | } |
46 | |
47 | diff --git a/drivers/input/touchscreen/cyttsp_core.c b/drivers/input/touchscreen/cyttsp_core.c |
48 | index f030d9e..3f505d5 100644 |
49 | --- a/drivers/input/touchscreen/cyttsp_core.c |
50 | +++ b/drivers/input/touchscreen/cyttsp_core.c |
51 | @@ -133,7 +133,7 @@ static int cyttsp_exit_bl_mode(struct cyttsp *ts) |
52 | memcpy(bl_cmd, bl_command, sizeof(bl_command)); |
53 | if (ts->pdata->bl_keys) |
54 | memcpy(&bl_cmd[sizeof(bl_command) - CY_NUM_BL_KEYS], |
55 | - ts->pdata->bl_keys, sizeof(bl_command)); |
56 | + ts->pdata->bl_keys, CY_NUM_BL_KEYS); |
57 | |
58 | error = ttsp_write_block_data(ts, CY_REG_BASE, |
59 | sizeof(bl_cmd), bl_cmd); |
60 | diff --git a/drivers/net/bonding/bond_alb.c b/drivers/net/bonding/bond_alb.c |
61 | index 2e1f806..b6ed7e9 100644 |
62 | --- a/drivers/net/bonding/bond_alb.c |
63 | +++ b/drivers/net/bonding/bond_alb.c |
64 | @@ -704,6 +704,12 @@ static struct slave *rlb_arp_xmit(struct sk_buff *skb, struct bonding *bond) |
65 | struct arp_pkt *arp = arp_pkt(skb); |
66 | struct slave *tx_slave = NULL; |
67 | |
68 | + /* Don't modify or load balance ARPs that do not originate locally |
69 | + * (e.g.,arrive via a bridge). |
70 | + */ |
71 | + if (!bond_slave_has_mac(bond, arp->mac_src)) |
72 | + return NULL; |
73 | + |
74 | if (arp->op_code == htons(ARPOP_REPLY)) { |
75 | /* the arp must be sent on the selected |
76 | * rx channel |
77 | diff --git a/drivers/net/bonding/bonding.h b/drivers/net/bonding/bonding.h |
78 | index 4581aa5..51f1766 100644 |
79 | --- a/drivers/net/bonding/bonding.h |
80 | +++ b/drivers/net/bonding/bonding.h |
81 | @@ -18,6 +18,7 @@ |
82 | #include <linux/timer.h> |
83 | #include <linux/proc_fs.h> |
84 | #include <linux/if_bonding.h> |
85 | +#include <linux/etherdevice.h> |
86 | #include <linux/cpumask.h> |
87 | #include <linux/in6.h> |
88 | #include <linux/netpoll.h> |
89 | @@ -450,6 +451,18 @@ static inline void bond_destroy_proc_dir(struct bond_net *bn) |
90 | } |
91 | #endif |
92 | |
93 | +static inline struct slave *bond_slave_has_mac(struct bonding *bond, |
94 | + const u8 *mac) |
95 | +{ |
96 | + int i = 0; |
97 | + struct slave *tmp; |
98 | + |
99 | + bond_for_each_slave(bond, tmp, i) |
100 | + if (!compare_ether_addr_64bits(mac, tmp->dev->dev_addr)) |
101 | + return tmp; |
102 | + |
103 | + return NULL; |
104 | +} |
105 | |
106 | /* exported from bond_main.c */ |
107 | extern int bond_net_id; |
108 | diff --git a/drivers/net/ethernet/freescale/gianfar_ptp.c b/drivers/net/ethernet/freescale/gianfar_ptp.c |
109 | index ca2748a..8de54e5 100644 |
110 | --- a/drivers/net/ethernet/freescale/gianfar_ptp.c |
111 | +++ b/drivers/net/ethernet/freescale/gianfar_ptp.c |
112 | @@ -520,6 +520,7 @@ static int gianfar_ptp_probe(struct platform_device *dev) |
113 | return 0; |
114 | |
115 | no_clock: |
116 | + iounmap(etsects->regs); |
117 | no_ioremap: |
118 | release_resource(etsects->rsrc); |
119 | no_resource: |
120 | diff --git a/drivers/net/ethernet/realtek/8139cp.c b/drivers/net/ethernet/realtek/8139cp.c |
121 | index b3287c0..4ce981c 100644 |
122 | --- a/drivers/net/ethernet/realtek/8139cp.c |
123 | +++ b/drivers/net/ethernet/realtek/8139cp.c |
124 | @@ -1097,6 +1097,7 @@ static void cp_clean_rings (struct cp_private *cp) |
125 | cp->dev->stats.tx_dropped++; |
126 | } |
127 | } |
128 | + netdev_reset_queue(cp->dev); |
129 | |
130 | memset(cp->rx_ring, 0, sizeof(struct cp_desc) * CP_RX_RING_SIZE); |
131 | memset(cp->tx_ring, 0, sizeof(struct cp_desc) * CP_TX_RING_SIZE); |
132 | diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c |
133 | index cf20388..2a59e7a 100644 |
134 | --- a/drivers/net/ethernet/realtek/r8169.c |
135 | +++ b/drivers/net/ethernet/realtek/r8169.c |
136 | @@ -5126,7 +5126,20 @@ err_out: |
137 | return -EIO; |
138 | } |
139 | |
140 | -static inline void rtl8169_tso_csum(struct rtl8169_private *tp, |
141 | +static bool rtl_skb_pad(struct sk_buff *skb) |
142 | +{ |
143 | + if (skb_padto(skb, ETH_ZLEN)) |
144 | + return false; |
145 | + skb_put(skb, ETH_ZLEN - skb->len); |
146 | + return true; |
147 | +} |
148 | + |
149 | +static bool rtl_test_hw_pad_bug(struct rtl8169_private *tp, struct sk_buff *skb) |
150 | +{ |
151 | + return skb->len < ETH_ZLEN && tp->mac_version == RTL_GIGA_MAC_VER_34; |
152 | +} |
153 | + |
154 | +static inline bool rtl8169_tso_csum(struct rtl8169_private *tp, |
155 | struct sk_buff *skb, u32 *opts) |
156 | { |
157 | const struct rtl_tx_desc_info *info = tx_desc_info + tp->txd_version; |
158 | @@ -5139,13 +5152,20 @@ static inline void rtl8169_tso_csum(struct rtl8169_private *tp, |
159 | } else if (skb->ip_summed == CHECKSUM_PARTIAL) { |
160 | const struct iphdr *ip = ip_hdr(skb); |
161 | |
162 | + if (unlikely(rtl_test_hw_pad_bug(tp, skb))) |
163 | + return skb_checksum_help(skb) == 0 && rtl_skb_pad(skb); |
164 | + |
165 | if (ip->protocol == IPPROTO_TCP) |
166 | opts[offset] |= info->checksum.tcp; |
167 | else if (ip->protocol == IPPROTO_UDP) |
168 | opts[offset] |= info->checksum.udp; |
169 | else |
170 | WARN_ON_ONCE(1); |
171 | + } else { |
172 | + if (unlikely(rtl_test_hw_pad_bug(tp, skb))) |
173 | + return rtl_skb_pad(skb); |
174 | } |
175 | + return true; |
176 | } |
177 | |
178 | static netdev_tx_t rtl8169_start_xmit(struct sk_buff *skb, |
179 | @@ -5166,17 +5186,15 @@ static netdev_tx_t rtl8169_start_xmit(struct sk_buff *skb, |
180 | goto err_stop_0; |
181 | } |
182 | |
183 | - /* 8168evl does not automatically pad to minimum length. */ |
184 | - if (unlikely(tp->mac_version == RTL_GIGA_MAC_VER_34 && |
185 | - skb->len < ETH_ZLEN)) { |
186 | - if (skb_padto(skb, ETH_ZLEN)) |
187 | - goto err_update_stats; |
188 | - skb_put(skb, ETH_ZLEN - skb->len); |
189 | - } |
190 | - |
191 | if (unlikely(le32_to_cpu(txd->opts1) & DescOwn)) |
192 | goto err_stop_0; |
193 | |
194 | + opts[1] = cpu_to_le32(rtl8169_tx_vlan_tag(tp, skb)); |
195 | + opts[0] = DescOwn; |
196 | + |
197 | + if (!rtl8169_tso_csum(tp, skb, opts)) |
198 | + goto err_update_stats; |
199 | + |
200 | len = skb_headlen(skb); |
201 | mapping = dma_map_single(d, skb->data, len, DMA_TO_DEVICE); |
202 | if (unlikely(dma_mapping_error(d, mapping))) { |
203 | @@ -5188,11 +5206,6 @@ static netdev_tx_t rtl8169_start_xmit(struct sk_buff *skb, |
204 | tp->tx_skb[entry].len = len; |
205 | txd->addr = cpu_to_le64(mapping); |
206 | |
207 | - opts[1] = cpu_to_le32(rtl8169_tx_vlan_tag(tp, skb)); |
208 | - opts[0] = DescOwn; |
209 | - |
210 | - rtl8169_tso_csum(tp, skb, opts); |
211 | - |
212 | frags = rtl8169_xmit_frags(tp, skb, opts); |
213 | if (frags < 0) |
214 | goto err_dma_1; |
215 | diff --git a/drivers/net/team/team_mode_roundrobin.c b/drivers/net/team/team_mode_roundrobin.c |
216 | index a0e8f80..bf6a818 100644 |
217 | --- a/drivers/net/team/team_mode_roundrobin.c |
218 | +++ b/drivers/net/team/team_mode_roundrobin.c |
219 | @@ -52,6 +52,8 @@ static bool rr_transmit(struct team *team, struct sk_buff *skb) |
220 | |
221 | port_index = rr_priv(team)->sent_packets++ % team->port_count; |
222 | port = team_get_port_by_index_rcu(team, port_index); |
223 | + if (unlikely(!port)) |
224 | + goto drop; |
225 | port = __get_first_port_up(team, port); |
226 | if (unlikely(!port)) |
227 | goto drop; |
228 | diff --git a/drivers/target/iscsi/iscsi_target_erl0.c b/drivers/target/iscsi/iscsi_target_erl0.c |
229 | index 1ab0560..a7c4324 100644 |
230 | --- a/drivers/target/iscsi/iscsi_target_erl0.c |
231 | +++ b/drivers/target/iscsi/iscsi_target_erl0.c |
232 | @@ -831,11 +831,11 @@ extern int iscsit_stop_time2retain_timer(struct iscsi_session *sess) |
233 | return 0; |
234 | |
235 | sess->time2retain_timer_flags |= ISCSI_TF_STOP; |
236 | - spin_unlock_bh(&se_tpg->session_lock); |
237 | + spin_unlock(&se_tpg->session_lock); |
238 | |
239 | del_timer_sync(&sess->time2retain_timer); |
240 | |
241 | - spin_lock_bh(&se_tpg->session_lock); |
242 | + spin_lock(&se_tpg->session_lock); |
243 | sess->time2retain_timer_flags &= ~ISCSI_TF_RUNNING; |
244 | pr_debug("Stopped Time2Retain Timer for SID: %u\n", |
245 | sess->sid); |
246 | diff --git a/drivers/usb/serial/ti_usb_3410_5052.c b/drivers/usb/serial/ti_usb_3410_5052.c |
247 | index 3377437..a39a08c 100644 |
248 | --- a/drivers/usb/serial/ti_usb_3410_5052.c |
249 | +++ b/drivers/usb/serial/ti_usb_3410_5052.c |
250 | @@ -179,7 +179,8 @@ static struct usb_device_id ti_id_table_3410[15+TI_EXTRA_VID_PID_COUNT+1] = { |
251 | { USB_DEVICE(IBM_VENDOR_ID, IBM_4543_PRODUCT_ID) }, |
252 | { USB_DEVICE(IBM_VENDOR_ID, IBM_454B_PRODUCT_ID) }, |
253 | { USB_DEVICE(IBM_VENDOR_ID, IBM_454C_PRODUCT_ID) }, |
254 | - { USB_DEVICE(ABBOTT_VENDOR_ID, ABBOTT_PRODUCT_ID) }, |
255 | + { USB_DEVICE(ABBOTT_VENDOR_ID, ABBOTT_STEREO_PLUG_ID) }, |
256 | + { USB_DEVICE(ABBOTT_VENDOR_ID, ABBOTT_STRIP_PORT_ID) }, |
257 | { USB_DEVICE(TI_VENDOR_ID, FRI2_PRODUCT_ID) }, |
258 | }; |
259 | |
260 | diff --git a/drivers/usb/serial/ti_usb_3410_5052.h b/drivers/usb/serial/ti_usb_3410_5052.h |
261 | index b353e7e..4a2423e 100644 |
262 | --- a/drivers/usb/serial/ti_usb_3410_5052.h |
263 | +++ b/drivers/usb/serial/ti_usb_3410_5052.h |
264 | @@ -52,7 +52,9 @@ |
265 | |
266 | /* Abbott Diabetics vendor and product ids */ |
267 | #define ABBOTT_VENDOR_ID 0x1a61 |
268 | -#define ABBOTT_PRODUCT_ID 0x3410 |
269 | +#define ABBOTT_STEREO_PLUG_ID 0x3410 |
270 | +#define ABBOTT_PRODUCT_ID ABBOTT_STEREO_PLUG_ID |
271 | +#define ABBOTT_STRIP_PORT_ID 0x3420 |
272 | |
273 | /* Commands */ |
274 | #define TI_GET_VERSION 0x01 |
275 | diff --git a/fs/notify/inotify/inotify_user.c b/fs/notify/inotify/inotify_user.c |
276 | index 6f292dd..f255d37 100644 |
277 | --- a/fs/notify/inotify/inotify_user.c |
278 | +++ b/fs/notify/inotify/inotify_user.c |
279 | @@ -577,7 +577,6 @@ static int inotify_update_existing_watch(struct fsnotify_group *group, |
280 | int add = (arg & IN_MASK_ADD); |
281 | int ret; |
282 | |
283 | - /* don't allow invalid bits: we don't want flags set */ |
284 | mask = inotify_arg_to_mask(arg); |
285 | |
286 | fsn_mark = fsnotify_find_inode_mark(group, inode); |
287 | @@ -628,7 +627,6 @@ static int inotify_new_watch(struct fsnotify_group *group, |
288 | struct idr *idr = &group->inotify_data.idr; |
289 | spinlock_t *idr_lock = &group->inotify_data.idr_lock; |
290 | |
291 | - /* don't allow invalid bits: we don't want flags set */ |
292 | mask = inotify_arg_to_mask(arg); |
293 | |
294 | tmp_i_mark = kmem_cache_alloc(inotify_inode_mark_cachep, GFP_KERNEL); |
295 | @@ -757,6 +755,10 @@ SYSCALL_DEFINE3(inotify_add_watch, int, fd, const char __user *, pathname, |
296 | int ret, fput_needed; |
297 | unsigned flags = 0; |
298 | |
299 | + /* don't allow invalid bits: we don't want flags set */ |
300 | + if (unlikely(!(mask & ALL_INOTIFY_BITS))) |
301 | + return -EINVAL; |
302 | + |
303 | filp = fget_light(fd, &fput_needed); |
304 | if (unlikely(!filp)) |
305 | return -EBADF; |
306 | diff --git a/include/linux/rculist_nulls.h b/include/linux/rculist_nulls.h |
307 | index 2ae1371..1c33dd7 100644 |
308 | --- a/include/linux/rculist_nulls.h |
309 | +++ b/include/linux/rculist_nulls.h |
310 | @@ -105,9 +105,14 @@ static inline void hlist_nulls_add_head_rcu(struct hlist_nulls_node *n, |
311 | * @head: the head for your list. |
312 | * @member: the name of the hlist_nulls_node within the struct. |
313 | * |
314 | + * The barrier() is needed to make sure compiler doesn't cache first element [1], |
315 | + * as this loop can be restarted [2] |
316 | + * [1] Documentation/atomic_ops.txt around line 114 |
317 | + * [2] Documentation/RCU/rculist_nulls.txt around line 146 |
318 | */ |
319 | #define hlist_nulls_for_each_entry_rcu(tpos, pos, head, member) \ |
320 | - for (pos = rcu_dereference_raw(hlist_nulls_first_rcu(head)); \ |
321 | + for (({barrier();}), \ |
322 | + pos = rcu_dereference_raw(hlist_nulls_first_rcu(head)); \ |
323 | (!is_a_nulls(pos)) && \ |
324 | ({ tpos = hlist_nulls_entry(pos, typeof(*tpos), member); 1; }); \ |
325 | pos = rcu_dereference_raw(hlist_nulls_next_rcu(pos))) |
326 | diff --git a/include/linux/socket.h b/include/linux/socket.h |
327 | index 8f15b1d..9b54ebe 100644 |
328 | --- a/include/linux/socket.h |
329 | +++ b/include/linux/socket.h |
330 | @@ -336,6 +336,9 @@ extern int put_cmsg(struct msghdr*, int level, int type, int len, void *data); |
331 | |
332 | struct timespec; |
333 | |
334 | +/* The __sys_...msg variants allow MSG_CMSG_COMPAT */ |
335 | +extern long __sys_recvmsg(int fd, struct msghdr __user *msg, unsigned flags); |
336 | +extern long __sys_sendmsg(int fd, struct msghdr __user *msg, unsigned flags); |
337 | extern int __sys_recvmmsg(int fd, struct mmsghdr __user *mmsg, unsigned int vlen, |
338 | unsigned int flags, struct timespec *timeout); |
339 | extern int __sys_sendmmsg(int fd, struct mmsghdr __user *mmsg, |
340 | diff --git a/net/compat.c b/net/compat.c |
341 | index ae6d67a..014e1c7 100644 |
342 | --- a/net/compat.c |
343 | +++ b/net/compat.c |
344 | @@ -743,19 +743,25 @@ static unsigned char nas[21] = { |
345 | |
346 | asmlinkage long compat_sys_sendmsg(int fd, struct compat_msghdr __user *msg, unsigned flags) |
347 | { |
348 | - return sys_sendmsg(fd, (struct msghdr __user *)msg, flags | MSG_CMSG_COMPAT); |
349 | + if (flags & MSG_CMSG_COMPAT) |
350 | + return -EINVAL; |
351 | + return __sys_sendmsg(fd, (struct msghdr __user *)msg, flags | MSG_CMSG_COMPAT); |
352 | } |
353 | |
354 | asmlinkage long compat_sys_sendmmsg(int fd, struct compat_mmsghdr __user *mmsg, |
355 | unsigned vlen, unsigned int flags) |
356 | { |
357 | + if (flags & MSG_CMSG_COMPAT) |
358 | + return -EINVAL; |
359 | return __sys_sendmmsg(fd, (struct mmsghdr __user *)mmsg, vlen, |
360 | flags | MSG_CMSG_COMPAT); |
361 | } |
362 | |
363 | asmlinkage long compat_sys_recvmsg(int fd, struct compat_msghdr __user *msg, unsigned int flags) |
364 | { |
365 | - return sys_recvmsg(fd, (struct msghdr __user *)msg, flags | MSG_CMSG_COMPAT); |
366 | + if (flags & MSG_CMSG_COMPAT) |
367 | + return -EINVAL; |
368 | + return __sys_recvmsg(fd, (struct msghdr __user *)msg, flags | MSG_CMSG_COMPAT); |
369 | } |
370 | |
371 | asmlinkage long compat_sys_recv(int fd, void __user *buf, size_t len, unsigned flags) |
372 | @@ -777,6 +783,9 @@ asmlinkage long compat_sys_recvmmsg(int fd, struct compat_mmsghdr __user *mmsg, |
373 | int datagrams; |
374 | struct timespec ktspec; |
375 | |
376 | + if (flags & MSG_CMSG_COMPAT) |
377 | + return -EINVAL; |
378 | + |
379 | if (COMPAT_USE_64BIT_TIME) |
380 | return __sys_recvmmsg(fd, (struct mmsghdr __user *)mmsg, vlen, |
381 | flags | MSG_CMSG_COMPAT, |
382 | diff --git a/net/ipv4/ip_gre.c b/net/ipv4/ip_gre.c |
383 | index b57532d..a16509c 100644 |
384 | --- a/net/ipv4/ip_gre.c |
385 | +++ b/net/ipv4/ip_gre.c |
386 | @@ -722,6 +722,7 @@ static netdev_tx_t ipgre_tunnel_xmit(struct sk_buff *skb, struct net_device *dev |
387 | tiph = &tunnel->parms.iph; |
388 | } |
389 | |
390 | + memset(&(IPCB(skb)->opt), 0, sizeof(IPCB(skb)->opt)); |
391 | if ((dst = tiph->daddr) == 0) { |
392 | /* NBMA tunnel */ |
393 | |
394 | @@ -865,7 +866,6 @@ static netdev_tx_t ipgre_tunnel_xmit(struct sk_buff *skb, struct net_device *dev |
395 | skb_reset_transport_header(skb); |
396 | skb_push(skb, gre_hlen); |
397 | skb_reset_network_header(skb); |
398 | - memset(&(IPCB(skb)->opt), 0, sizeof(IPCB(skb)->opt)); |
399 | IPCB(skb)->flags &= ~(IPSKB_XFRM_TUNNEL_SIZE | IPSKB_XFRM_TRANSFORMED | |
400 | IPSKB_REROUTED); |
401 | skb_dst_drop(skb); |
402 | diff --git a/net/ipv4/ipip.c b/net/ipv4/ipip.c |
403 | index ae1413e..d2f6348 100644 |
404 | --- a/net/ipv4/ipip.c |
405 | +++ b/net/ipv4/ipip.c |
406 | @@ -448,6 +448,7 @@ static netdev_tx_t ipip_tunnel_xmit(struct sk_buff *skb, struct net_device *dev) |
407 | if (tos & 1) |
408 | tos = old_iph->tos; |
409 | |
410 | + memset(&(IPCB(skb)->opt), 0, sizeof(IPCB(skb)->opt)); |
411 | if (!dst) { |
412 | /* NBMA tunnel */ |
413 | if ((rt = skb_rtable(skb)) == NULL) { |
414 | @@ -530,7 +531,6 @@ static netdev_tx_t ipip_tunnel_xmit(struct sk_buff *skb, struct net_device *dev) |
415 | skb->transport_header = skb->network_header; |
416 | skb_push(skb, sizeof(struct iphdr)); |
417 | skb_reset_network_header(skb); |
418 | - memset(&(IPCB(skb)->opt), 0, sizeof(IPCB(skb)->opt)); |
419 | IPCB(skb)->flags &= ~(IPSKB_XFRM_TUNNEL_SIZE | IPSKB_XFRM_TRANSFORMED | |
420 | IPSKB_REROUTED); |
421 | skb_dst_drop(skb); |
422 | diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c |
423 | index dcb19f5..0b91c30 100644 |
424 | --- a/net/ipv4/tcp.c |
425 | +++ b/net/ipv4/tcp.c |
426 | @@ -3055,8 +3055,11 @@ int tcp_md5_hash_skb_data(struct tcp_md5sig_pool *hp, |
427 | |
428 | for (i = 0; i < shi->nr_frags; ++i) { |
429 | const struct skb_frag_struct *f = &shi->frags[i]; |
430 | - struct page *page = skb_frag_page(f); |
431 | - sg_set_page(&sg, page, skb_frag_size(f), f->page_offset); |
432 | + unsigned int offset = f->page_offset; |
433 | + struct page *page = skb_frag_page(f) + (offset >> PAGE_SHIFT); |
434 | + |
435 | + sg_set_page(&sg, page, skb_frag_size(f), |
436 | + offset_in_page(offset)); |
437 | if (crypto_hash_update(desc, &sg, skb_frag_size(f))) |
438 | return 1; |
439 | } |
440 | diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c |
441 | index 762c78f..55d96c3 100644 |
442 | --- a/net/ipv4/tcp_input.c |
443 | +++ b/net/ipv4/tcp_input.c |
444 | @@ -3038,8 +3038,8 @@ static void tcp_update_cwnd_in_recovery(struct sock *sk, int newly_acked_sacked, |
445 | * tcp_xmit_retransmit_queue(). |
446 | */ |
447 | static void tcp_fastretrans_alert(struct sock *sk, int pkts_acked, |
448 | - int prior_sacked, bool is_dupack, |
449 | - int flag) |
450 | + int prior_sacked, int prior_packets, |
451 | + bool is_dupack, int flag) |
452 | { |
453 | struct inet_connection_sock *icsk = inet_csk(sk); |
454 | struct tcp_sock *tp = tcp_sk(sk); |
455 | @@ -3105,7 +3105,8 @@ static void tcp_fastretrans_alert(struct sock *sk, int pkts_acked, |
456 | tcp_add_reno_sack(sk); |
457 | } else |
458 | do_lost = tcp_try_undo_partial(sk, pkts_acked); |
459 | - newly_acked_sacked = pkts_acked + tp->sacked_out - prior_sacked; |
460 | + newly_acked_sacked = prior_packets - tp->packets_out + |
461 | + tp->sacked_out - prior_sacked; |
462 | break; |
463 | case TCP_CA_Loss: |
464 | if (flag & FLAG_DATA_ACKED) |
465 | @@ -3127,7 +3128,8 @@ static void tcp_fastretrans_alert(struct sock *sk, int pkts_acked, |
466 | if (is_dupack) |
467 | tcp_add_reno_sack(sk); |
468 | } |
469 | - newly_acked_sacked = pkts_acked + tp->sacked_out - prior_sacked; |
470 | + newly_acked_sacked = prior_packets - tp->packets_out + |
471 | + tp->sacked_out - prior_sacked; |
472 | |
473 | if (icsk->icsk_ca_state <= TCP_CA_Disorder) |
474 | tcp_try_undo_dsack(sk); |
475 | @@ -3740,9 +3742,10 @@ static int tcp_ack(struct sock *sk, const struct sk_buff *skb, int flag) |
476 | bool is_dupack = false; |
477 | u32 prior_in_flight; |
478 | u32 prior_fackets; |
479 | - int prior_packets; |
480 | + int prior_packets = tp->packets_out; |
481 | int prior_sacked = tp->sacked_out; |
482 | int pkts_acked = 0; |
483 | + int previous_packets_out = 0; |
484 | int frto_cwnd = 0; |
485 | |
486 | /* If the ack is older than previous acks |
487 | @@ -3819,14 +3822,14 @@ static int tcp_ack(struct sock *sk, const struct sk_buff *skb, int flag) |
488 | sk->sk_err_soft = 0; |
489 | icsk->icsk_probes_out = 0; |
490 | tp->rcv_tstamp = tcp_time_stamp; |
491 | - prior_packets = tp->packets_out; |
492 | if (!prior_packets) |
493 | goto no_queue; |
494 | |
495 | /* See if we can take anything off of the retransmit queue. */ |
496 | + previous_packets_out = tp->packets_out; |
497 | flag |= tcp_clean_rtx_queue(sk, prior_fackets, prior_snd_una); |
498 | |
499 | - pkts_acked = prior_packets - tp->packets_out; |
500 | + pkts_acked = previous_packets_out - tp->packets_out; |
501 | |
502 | if (tp->frto_counter) |
503 | frto_cwnd = tcp_process_frto(sk, flag); |
504 | @@ -3841,7 +3844,7 @@ static int tcp_ack(struct sock *sk, const struct sk_buff *skb, int flag) |
505 | tcp_cong_avoid(sk, ack, prior_in_flight); |
506 | is_dupack = !(flag & (FLAG_SND_UNA_ADVANCED | FLAG_NOT_DUP)); |
507 | tcp_fastretrans_alert(sk, pkts_acked, prior_sacked, |
508 | - is_dupack, flag); |
509 | + prior_packets, is_dupack, flag); |
510 | } else { |
511 | if ((flag & FLAG_DATA_ACKED) && !frto_cwnd) |
512 | tcp_cong_avoid(sk, ack, prior_in_flight); |
513 | @@ -3856,7 +3859,7 @@ no_queue: |
514 | /* If data was DSACKed, see if we can undo a cwnd reduction. */ |
515 | if (flag & FLAG_DSACKING_ACK) |
516 | tcp_fastretrans_alert(sk, pkts_acked, prior_sacked, |
517 | - is_dupack, flag); |
518 | + prior_packets, is_dupack, flag); |
519 | /* If this ack opens up a zero window, clear backoff. It was |
520 | * being used to time the probes, and is probably far higher than |
521 | * it needs to be for normal retransmission. |
522 | @@ -3876,7 +3879,7 @@ old_ack: |
523 | if (TCP_SKB_CB(skb)->sacked) { |
524 | flag |= tcp_sacktag_write_queue(sk, skb, prior_snd_una); |
525 | tcp_fastretrans_alert(sk, pkts_acked, prior_sacked, |
526 | - is_dupack, flag); |
527 | + prior_packets, is_dupack, flag); |
528 | } |
529 | |
530 | SOCK_DEBUG(sk, "Ack %u before %u:%u\n", ack, tp->snd_una, tp->snd_nxt); |
531 | diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c |
532 | index 9db21e3..12999a3 100644 |
533 | --- a/net/ipv4/tcp_output.c |
534 | +++ b/net/ipv4/tcp_output.c |
535 | @@ -835,11 +835,13 @@ static int tcp_transmit_skb(struct sock *sk, struct sk_buff *skb, int clone_it, |
536 | &md5); |
537 | tcp_header_size = tcp_options_size + sizeof(struct tcphdr); |
538 | |
539 | - if (tcp_packets_in_flight(tp) == 0) { |
540 | + if (tcp_packets_in_flight(tp) == 0) |
541 | tcp_ca_event(sk, CA_EVENT_TX_START); |
542 | - skb->ooo_okay = 1; |
543 | - } else |
544 | - skb->ooo_okay = 0; |
545 | + |
546 | + /* if no packet is in qdisc/device queue, then allow XPS to select |
547 | + * another queue. |
548 | + */ |
549 | + skb->ooo_okay = sk_wmem_alloc_get(sk) == 0; |
550 | |
551 | skb_push(skb, tcp_header_size); |
552 | skb_reset_transport_header(skb); |
553 | diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c |
554 | index 2c496d6..f4fe3c0 100644 |
555 | --- a/net/ipv6/addrconf.c |
556 | +++ b/net/ipv6/addrconf.c |
557 | @@ -2432,8 +2432,10 @@ static void init_loopback(struct net_device *dev) |
558 | sp_rt = addrconf_dst_alloc(idev, &sp_ifa->addr, 0); |
559 | |
560 | /* Failure cases are ignored */ |
561 | - if (!IS_ERR(sp_rt)) |
562 | + if (!IS_ERR(sp_rt)) { |
563 | + sp_ifa->rt = sp_rt; |
564 | ip6_ins_rt(sp_rt); |
565 | + } |
566 | } |
567 | read_unlock_bh(&idev->lock); |
568 | } |
569 | diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c |
570 | index ce661ba..bf290ce 100644 |
571 | --- a/net/ipv6/ip6_output.c |
572 | +++ b/net/ipv6/ip6_output.c |
573 | @@ -1236,7 +1236,7 @@ int ip6_append_data(struct sock *sk, int getfrag(void *from, char *to, |
574 | if (WARN_ON(np->cork.opt)) |
575 | return -EINVAL; |
576 | |
577 | - np->cork.opt = kmalloc(opt->tot_len, sk->sk_allocation); |
578 | + np->cork.opt = kzalloc(opt->tot_len, sk->sk_allocation); |
579 | if (unlikely(np->cork.opt == NULL)) |
580 | return -ENOBUFS; |
581 | |
582 | diff --git a/net/l2tp/l2tp_ppp.c b/net/l2tp/l2tp_ppp.c |
583 | index 9728a75..c6dee80 100644 |
584 | --- a/net/l2tp/l2tp_ppp.c |
585 | +++ b/net/l2tp/l2tp_ppp.c |
586 | @@ -350,19 +350,19 @@ static int pppol2tp_sendmsg(struct kiocb *iocb, struct socket *sock, struct msgh |
587 | skb_put(skb, 2); |
588 | |
589 | /* Copy user data into skb */ |
590 | - error = memcpy_fromiovec(skb->data, m->msg_iov, total_len); |
591 | + error = memcpy_fromiovec(skb_put(skb, total_len), m->msg_iov, |
592 | + total_len); |
593 | if (error < 0) { |
594 | kfree_skb(skb); |
595 | goto error_put_sess_tun; |
596 | } |
597 | - skb_put(skb, total_len); |
598 | |
599 | l2tp_xmit_skb(session, skb, session->hdr_len); |
600 | |
601 | sock_put(ps->tunnel_sock); |
602 | sock_put(sk); |
603 | |
604 | - return error; |
605 | + return total_len; |
606 | |
607 | error_put_sess_tun: |
608 | sock_put(ps->tunnel_sock); |
609 | diff --git a/net/netlabel/netlabel_domainhash.c b/net/netlabel/netlabel_domainhash.c |
610 | index d8d4243..6bb1d42 100644 |
611 | --- a/net/netlabel/netlabel_domainhash.c |
612 | +++ b/net/netlabel/netlabel_domainhash.c |
613 | @@ -245,6 +245,71 @@ static void netlbl_domhsh_audit_add(struct netlbl_dom_map *entry, |
614 | } |
615 | } |
616 | |
617 | +/** |
618 | + * netlbl_domhsh_validate - Validate a new domain mapping entry |
619 | + * @entry: the entry to validate |
620 | + * |
621 | + * This function validates the new domain mapping entry to ensure that it is |
622 | + * a valid entry. Returns zero on success, negative values on failure. |
623 | + * |
624 | + */ |
625 | +static int netlbl_domhsh_validate(const struct netlbl_dom_map *entry) |
626 | +{ |
627 | + struct netlbl_af4list *iter4; |
628 | + struct netlbl_domaddr4_map *map4; |
629 | +#if IS_ENABLED(CONFIG_IPV6) |
630 | + struct netlbl_af6list *iter6; |
631 | + struct netlbl_domaddr6_map *map6; |
632 | +#endif /* IPv6 */ |
633 | + |
634 | + if (entry == NULL) |
635 | + return -EINVAL; |
636 | + |
637 | + switch (entry->type) { |
638 | + case NETLBL_NLTYPE_UNLABELED: |
639 | + if (entry->type_def.cipsov4 != NULL || |
640 | + entry->type_def.addrsel != NULL) |
641 | + return -EINVAL; |
642 | + break; |
643 | + case NETLBL_NLTYPE_CIPSOV4: |
644 | + if (entry->type_def.cipsov4 == NULL) |
645 | + return -EINVAL; |
646 | + break; |
647 | + case NETLBL_NLTYPE_ADDRSELECT: |
648 | + netlbl_af4list_foreach(iter4, &entry->type_def.addrsel->list4) { |
649 | + map4 = netlbl_domhsh_addr4_entry(iter4); |
650 | + switch (map4->type) { |
651 | + case NETLBL_NLTYPE_UNLABELED: |
652 | + if (map4->type_def.cipsov4 != NULL) |
653 | + return -EINVAL; |
654 | + break; |
655 | + case NETLBL_NLTYPE_CIPSOV4: |
656 | + if (map4->type_def.cipsov4 == NULL) |
657 | + return -EINVAL; |
658 | + break; |
659 | + default: |
660 | + return -EINVAL; |
661 | + } |
662 | + } |
663 | +#if IS_ENABLED(CONFIG_IPV6) |
664 | + netlbl_af6list_foreach(iter6, &entry->type_def.addrsel->list6) { |
665 | + map6 = netlbl_domhsh_addr6_entry(iter6); |
666 | + switch (map6->type) { |
667 | + case NETLBL_NLTYPE_UNLABELED: |
668 | + break; |
669 | + default: |
670 | + return -EINVAL; |
671 | + } |
672 | + } |
673 | +#endif /* IPv6 */ |
674 | + break; |
675 | + default: |
676 | + return -EINVAL; |
677 | + } |
678 | + |
679 | + return 0; |
680 | +} |
681 | + |
682 | /* |
683 | * Domain Hash Table Functions |
684 | */ |
685 | @@ -311,6 +376,10 @@ int netlbl_domhsh_add(struct netlbl_dom_map *entry, |
686 | struct netlbl_af6list *tmp6; |
687 | #endif /* IPv6 */ |
688 | |
689 | + ret_val = netlbl_domhsh_validate(entry); |
690 | + if (ret_val != 0) |
691 | + return ret_val; |
692 | + |
693 | /* XXX - we can remove this RCU read lock as the spinlock protects the |
694 | * entire function, but before we do we need to fixup the |
695 | * netlbl_af[4,6]list RCU functions to do "the right thing" with |
696 | diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c |
697 | index cfcd783..8ed5d93 100644 |
698 | --- a/net/packet/af_packet.c |
699 | +++ b/net/packet/af_packet.c |
700 | @@ -2848,12 +2848,11 @@ static int packet_getname_spkt(struct socket *sock, struct sockaddr *uaddr, |
701 | return -EOPNOTSUPP; |
702 | |
703 | uaddr->sa_family = AF_PACKET; |
704 | + memset(uaddr->sa_data, 0, sizeof(uaddr->sa_data)); |
705 | rcu_read_lock(); |
706 | dev = dev_get_by_index_rcu(sock_net(sk), pkt_sk(sk)->ifindex); |
707 | if (dev) |
708 | - strncpy(uaddr->sa_data, dev->name, 14); |
709 | - else |
710 | - memset(uaddr->sa_data, 0, 14); |
711 | + strlcpy(uaddr->sa_data, dev->name, sizeof(uaddr->sa_data)); |
712 | rcu_read_unlock(); |
713 | *uaddr_len = sizeof(*uaddr); |
714 | |
715 | diff --git a/net/sctp/socket.c b/net/sctp/socket.c |
716 | index 9fd05ed..4bc6e0b 100644 |
717 | --- a/net/sctp/socket.c |
718 | +++ b/net/sctp/socket.c |
719 | @@ -3929,6 +3929,12 @@ SCTP_STATIC void sctp_destroy_sock(struct sock *sk) |
720 | |
721 | /* Release our hold on the endpoint. */ |
722 | sp = sctp_sk(sk); |
723 | + /* This could happen during socket init, thus we bail out |
724 | + * early, since the rest of the below is not setup either. |
725 | + */ |
726 | + if (sp->ep == NULL) |
727 | + return; |
728 | + |
729 | if (sp->do_auto_asconf) { |
730 | sp->do_auto_asconf = 0; |
731 | list_del(&sp->auto_asconf_list); |
732 | diff --git a/net/socket.c b/net/socket.c |
733 | index dab3176..47ce3ea 100644 |
734 | --- a/net/socket.c |
735 | +++ b/net/socket.c |
736 | @@ -1899,9 +1899,9 @@ struct used_address { |
737 | unsigned int name_len; |
738 | }; |
739 | |
740 | -static int __sys_sendmsg(struct socket *sock, struct msghdr __user *msg, |
741 | - struct msghdr *msg_sys, unsigned flags, |
742 | - struct used_address *used_address) |
743 | +static int ___sys_sendmsg(struct socket *sock, struct msghdr __user *msg, |
744 | + struct msghdr *msg_sys, unsigned flags, |
745 | + struct used_address *used_address) |
746 | { |
747 | struct compat_msghdr __user *msg_compat = |
748 | (struct compat_msghdr __user *)msg; |
749 | @@ -2017,22 +2017,30 @@ out: |
750 | * BSD sendmsg interface |
751 | */ |
752 | |
753 | -SYSCALL_DEFINE3(sendmsg, int, fd, struct msghdr __user *, msg, unsigned, flags) |
754 | +long __sys_sendmsg(int fd, struct msghdr __user *msg, unsigned flags) |
755 | { |
756 | int fput_needed, err; |
757 | struct msghdr msg_sys; |
758 | - struct socket *sock = sockfd_lookup_light(fd, &err, &fput_needed); |
759 | + struct socket *sock; |
760 | |
761 | + sock = sockfd_lookup_light(fd, &err, &fput_needed); |
762 | if (!sock) |
763 | goto out; |
764 | |
765 | - err = __sys_sendmsg(sock, msg, &msg_sys, flags, NULL); |
766 | + err = ___sys_sendmsg(sock, msg, &msg_sys, flags, NULL); |
767 | |
768 | fput_light(sock->file, fput_needed); |
769 | out: |
770 | return err; |
771 | } |
772 | |
773 | +SYSCALL_DEFINE3(sendmsg, int, fd, struct msghdr __user *, msg, unsigned int, flags) |
774 | +{ |
775 | + if (flags & MSG_CMSG_COMPAT) |
776 | + return -EINVAL; |
777 | + return __sys_sendmsg(fd, msg, flags); |
778 | +} |
779 | + |
780 | /* |
781 | * Linux sendmmsg interface |
782 | */ |
783 | @@ -2063,15 +2071,16 @@ int __sys_sendmmsg(int fd, struct mmsghdr __user *mmsg, unsigned int vlen, |
784 | |
785 | while (datagrams < vlen) { |
786 | if (MSG_CMSG_COMPAT & flags) { |
787 | - err = __sys_sendmsg(sock, (struct msghdr __user *)compat_entry, |
788 | - &msg_sys, flags, &used_address); |
789 | + err = ___sys_sendmsg(sock, (struct msghdr __user *)compat_entry, |
790 | + &msg_sys, flags, &used_address); |
791 | if (err < 0) |
792 | break; |
793 | err = __put_user(err, &compat_entry->msg_len); |
794 | ++compat_entry; |
795 | } else { |
796 | - err = __sys_sendmsg(sock, (struct msghdr __user *)entry, |
797 | - &msg_sys, flags, &used_address); |
798 | + err = ___sys_sendmsg(sock, |
799 | + (struct msghdr __user *)entry, |
800 | + &msg_sys, flags, &used_address); |
801 | if (err < 0) |
802 | break; |
803 | err = put_user(err, &entry->msg_len); |
804 | @@ -2095,11 +2104,13 @@ int __sys_sendmmsg(int fd, struct mmsghdr __user *mmsg, unsigned int vlen, |
805 | SYSCALL_DEFINE4(sendmmsg, int, fd, struct mmsghdr __user *, mmsg, |
806 | unsigned int, vlen, unsigned int, flags) |
807 | { |
808 | + if (flags & MSG_CMSG_COMPAT) |
809 | + return -EINVAL; |
810 | return __sys_sendmmsg(fd, mmsg, vlen, flags); |
811 | } |
812 | |
813 | -static int __sys_recvmsg(struct socket *sock, struct msghdr __user *msg, |
814 | - struct msghdr *msg_sys, unsigned flags, int nosec) |
815 | +static int ___sys_recvmsg(struct socket *sock, struct msghdr __user *msg, |
816 | + struct msghdr *msg_sys, unsigned flags, int nosec) |
817 | { |
818 | struct compat_msghdr __user *msg_compat = |
819 | (struct compat_msghdr __user *)msg; |
820 | @@ -2192,23 +2203,31 @@ out: |
821 | * BSD recvmsg interface |
822 | */ |
823 | |
824 | -SYSCALL_DEFINE3(recvmsg, int, fd, struct msghdr __user *, msg, |
825 | - unsigned int, flags) |
826 | +long __sys_recvmsg(int fd, struct msghdr __user *msg, unsigned flags) |
827 | { |
828 | int fput_needed, err; |
829 | struct msghdr msg_sys; |
830 | - struct socket *sock = sockfd_lookup_light(fd, &err, &fput_needed); |
831 | + struct socket *sock; |
832 | |
833 | + sock = sockfd_lookup_light(fd, &err, &fput_needed); |
834 | if (!sock) |
835 | goto out; |
836 | |
837 | - err = __sys_recvmsg(sock, msg, &msg_sys, flags, 0); |
838 | + err = ___sys_recvmsg(sock, msg, &msg_sys, flags, 0); |
839 | |
840 | fput_light(sock->file, fput_needed); |
841 | out: |
842 | return err; |
843 | } |
844 | |
845 | +SYSCALL_DEFINE3(recvmsg, int, fd, struct msghdr __user *, msg, |
846 | + unsigned int, flags) |
847 | +{ |
848 | + if (flags & MSG_CMSG_COMPAT) |
849 | + return -EINVAL; |
850 | + return __sys_recvmsg(fd, msg, flags); |
851 | +} |
852 | + |
853 | /* |
854 | * Linux recvmmsg interface |
855 | */ |
856 | @@ -2246,17 +2265,18 @@ int __sys_recvmmsg(int fd, struct mmsghdr __user *mmsg, unsigned int vlen, |
857 | * No need to ask LSM for more than the first datagram. |
858 | */ |
859 | if (MSG_CMSG_COMPAT & flags) { |
860 | - err = __sys_recvmsg(sock, (struct msghdr __user *)compat_entry, |
861 | - &msg_sys, flags & ~MSG_WAITFORONE, |
862 | - datagrams); |
863 | + err = ___sys_recvmsg(sock, (struct msghdr __user *)compat_entry, |
864 | + &msg_sys, flags & ~MSG_WAITFORONE, |
865 | + datagrams); |
866 | if (err < 0) |
867 | break; |
868 | err = __put_user(err, &compat_entry->msg_len); |
869 | ++compat_entry; |
870 | } else { |
871 | - err = __sys_recvmsg(sock, (struct msghdr __user *)entry, |
872 | - &msg_sys, flags & ~MSG_WAITFORONE, |
873 | - datagrams); |
874 | + err = ___sys_recvmsg(sock, |
875 | + (struct msghdr __user *)entry, |
876 | + &msg_sys, flags & ~MSG_WAITFORONE, |
877 | + datagrams); |
878 | if (err < 0) |
879 | break; |
880 | err = put_user(err, &entry->msg_len); |
881 | @@ -2323,6 +2343,9 @@ SYSCALL_DEFINE5(recvmmsg, int, fd, struct mmsghdr __user *, mmsg, |
882 | int datagrams; |
883 | struct timespec timeout_sys; |
884 | |
885 | + if (flags & MSG_CMSG_COMPAT) |
886 | + return -EINVAL; |
887 | + |
888 | if (!timeout) |
889 | return __sys_recvmmsg(fd, mmsg, vlen, flags, NULL); |
890 | |
891 | diff --git a/sound/usb/card.c b/sound/usb/card.c |
892 | index b41730d..658ea11 100644 |
893 | --- a/sound/usb/card.c |
894 | +++ b/sound/usb/card.c |
895 | @@ -149,14 +149,32 @@ static int snd_usb_create_stream(struct snd_usb_audio *chip, int ctrlif, int int |
896 | return -EINVAL; |
897 | } |
898 | |
899 | + alts = &iface->altsetting[0]; |
900 | + altsd = get_iface_desc(alts); |
901 | + |
902 | + /* |
903 | + * Android with both accessory and audio interfaces enabled gets the |
904 | + * interface numbers wrong. |
905 | + */ |
906 | + if ((chip->usb_id == USB_ID(0x18d1, 0x2d04) || |
907 | + chip->usb_id == USB_ID(0x18d1, 0x2d05)) && |
908 | + interface == 0 && |
909 | + altsd->bInterfaceClass == USB_CLASS_VENDOR_SPEC && |
910 | + altsd->bInterfaceSubClass == USB_SUBCLASS_VENDOR_SPEC) { |
911 | + interface = 2; |
912 | + iface = usb_ifnum_to_if(dev, interface); |
913 | + if (!iface) |
914 | + return -EINVAL; |
915 | + alts = &iface->altsetting[0]; |
916 | + altsd = get_iface_desc(alts); |
917 | + } |
918 | + |
919 | if (usb_interface_claimed(iface)) { |
920 | snd_printdd(KERN_INFO "%d:%d:%d: skipping, already claimed\n", |
921 | dev->devnum, ctrlif, interface); |
922 | return -EINVAL; |
923 | } |
924 | |
925 | - alts = &iface->altsetting[0]; |
926 | - altsd = get_iface_desc(alts); |
927 | if ((altsd->bInterfaceClass == USB_CLASS_AUDIO || |
928 | altsd->bInterfaceClass == USB_CLASS_VENDOR_SPEC) && |
929 | altsd->bInterfaceSubClass == USB_SUBCLASS_MIDISTREAMING) { |
930 | diff --git a/sound/usb/mixer.c b/sound/usb/mixer.c |
931 | index e075a67..5ca4652 100644 |
932 | --- a/sound/usb/mixer.c |
933 | +++ b/sound/usb/mixer.c |
934 | @@ -821,6 +821,7 @@ static void volume_control_quirks(struct usb_mixer_elem_info *cval, |
935 | |
936 | case USB_ID(0x046d, 0x0808): |
937 | case USB_ID(0x046d, 0x0809): |
938 | + case USB_ID(0x046d, 0x081b): /* HD Webcam c310 */ |
939 | case USB_ID(0x046d, 0x081d): /* HD Webcam c510 */ |
940 | case USB_ID(0x046d, 0x0825): /* HD Webcam c270 */ |
941 | case USB_ID(0x046d, 0x0991): |