Magellan Linux

Annotation of /trunk/kernel-lts/patches-3.4/0161-3.4.62-all-fixes.patch

Parent Directory Parent Directory | Revision Log Revision Log


Revision 2303 - (hide annotations) (download)
Mon Oct 14 07:15:13 2013 UTC (10 years, 8 months ago) by niro
File size: 18199 byte(s)
-linux-3.4.62
1 niro 2303 diff --git a/arch/m32r/boot/compressed/Makefile b/arch/m32r/boot/compressed/Makefile
2     index 177716b..01729c2 100644
3     --- a/arch/m32r/boot/compressed/Makefile
4     +++ b/arch/m32r/boot/compressed/Makefile
5     @@ -43,9 +43,9 @@ endif
6    
7     OBJCOPYFLAGS += -R .empty_zero_page
8    
9     -suffix_$(CONFIG_KERNEL_GZIP) = gz
10     -suffix_$(CONFIG_KERNEL_BZIP2) = bz2
11     -suffix_$(CONFIG_KERNEL_LZMA) = lzma
12     +suffix-$(CONFIG_KERNEL_GZIP) = gz
13     +suffix-$(CONFIG_KERNEL_BZIP2) = bz2
14     +suffix-$(CONFIG_KERNEL_LZMA) = lzma
15    
16     $(obj)/piggy.o: $(obj)/vmlinux.scr $(obj)/vmlinux.bin.$(suffix-y) FORCE
17     $(call if_changed,ld)
18     diff --git a/arch/m32r/boot/compressed/misc.c b/arch/m32r/boot/compressed/misc.c
19     index 370d608..28a0952 100644
20     --- a/arch/m32r/boot/compressed/misc.c
21     +++ b/arch/m32r/boot/compressed/misc.c
22     @@ -28,7 +28,7 @@ static unsigned long free_mem_ptr;
23     static unsigned long free_mem_end_ptr;
24    
25     #ifdef CONFIG_KERNEL_BZIP2
26     -static void *memset(void *s, int c, size_t n)
27     +void *memset(void *s, int c, size_t n)
28     {
29     char *ss = s;
30    
31     @@ -39,6 +39,16 @@ static void *memset(void *s, int c, size_t n)
32     #endif
33    
34     #ifdef CONFIG_KERNEL_GZIP
35     +void *memcpy(void *dest, const void *src, size_t n)
36     +{
37     + char *d = dest;
38     + const char *s = src;
39     + while (n--)
40     + *d++ = *s++;
41     +
42     + return dest;
43     +}
44     +
45     #define BOOT_HEAP_SIZE 0x10000
46     #include "../../../../lib/decompress_inflate.c"
47     #endif
48     diff --git a/arch/x86/kvm/emulate.c b/arch/x86/kvm/emulate.c
49     index 8c45818..8375622 100644
50     --- a/arch/x86/kvm/emulate.c
51     +++ b/arch/x86/kvm/emulate.c
52     @@ -3737,10 +3737,6 @@ static int decode_operand(struct x86_emulate_ctxt *ctxt, struct operand *op,
53     break;
54     case OpMem8:
55     ctxt->memop.bytes = 1;
56     - if (ctxt->memop.type == OP_REG) {
57     - ctxt->memop.addr.reg = decode_register(ctxt, ctxt->modrm_rm, 1);
58     - fetch_register_operand(&ctxt->memop);
59     - }
60     goto mem_common;
61     case OpMem16:
62     ctxt->memop.bytes = 2;
63     diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
64     index d9f8358..80103bb 100644
65     --- a/drivers/net/bonding/bond_main.c
66     +++ b/drivers/net/bonding/bond_main.c
67     @@ -3750,11 +3750,17 @@ static int bond_neigh_init(struct neighbour *n)
68     * The bonding ndo_neigh_setup is called at init time beofre any
69     * slave exists. So we must declare proxy setup function which will
70     * be used at run time to resolve the actual slave neigh param setup.
71     + *
72     + * It's also called by master devices (such as vlans) to setup their
73     + * underlying devices. In that case - do nothing, we're already set up from
74     + * our init.
75     */
76     static int bond_neigh_setup(struct net_device *dev,
77     struct neigh_parms *parms)
78     {
79     - parms->neigh_setup = bond_neigh_init;
80     + /* modify only our neigh_parms */
81     + if (parms->dev == dev)
82     + parms->neigh_setup = bond_neigh_init;
83    
84     return 0;
85     }
86     diff --git a/drivers/net/ethernet/realtek/8139cp.c b/drivers/net/ethernet/realtek/8139cp.c
87     index 2205db7..1b44047 100644
88     --- a/drivers/net/ethernet/realtek/8139cp.c
89     +++ b/drivers/net/ethernet/realtek/8139cp.c
90     @@ -524,6 +524,7 @@ rx_status_loop:
91     PCI_DMA_FROMDEVICE);
92     if (dma_mapping_error(&cp->pdev->dev, new_mapping)) {
93     dev->stats.rx_dropped++;
94     + kfree_skb(new_skb);
95     goto rx_next;
96     }
97    
98     diff --git a/drivers/net/macvtap.c b/drivers/net/macvtap.c
99     index 5151f06..77ce8b2 100644
100     --- a/drivers/net/macvtap.c
101     +++ b/drivers/net/macvtap.c
102     @@ -642,6 +642,28 @@ static int macvtap_skb_to_vnet_hdr(const struct sk_buff *skb,
103     return 0;
104     }
105    
106     +static unsigned long iov_pages(const struct iovec *iv, int offset,
107     + unsigned long nr_segs)
108     +{
109     + unsigned long seg, base;
110     + int pages = 0, len, size;
111     +
112     + while (nr_segs && (offset >= iv->iov_len)) {
113     + offset -= iv->iov_len;
114     + ++iv;
115     + --nr_segs;
116     + }
117     +
118     + for (seg = 0; seg < nr_segs; seg++) {
119     + base = (unsigned long)iv[seg].iov_base + offset;
120     + len = iv[seg].iov_len - offset;
121     + size = ((base & ~PAGE_MASK) + len + ~PAGE_MASK) >> PAGE_SHIFT;
122     + pages += size;
123     + offset = 0;
124     + }
125     +
126     + return pages;
127     +}
128    
129     /* Get packet from user space buffer */
130     static ssize_t macvtap_get_user(struct macvtap_queue *q, struct msghdr *m,
131     @@ -688,31 +710,15 @@ static ssize_t macvtap_get_user(struct macvtap_queue *q, struct msghdr *m,
132     if (unlikely(count > UIO_MAXIOV))
133     goto err;
134    
135     - if (m && m->msg_control && sock_flag(&q->sk, SOCK_ZEROCOPY))
136     - zerocopy = true;
137     -
138     - if (zerocopy) {
139     - /* Userspace may produce vectors with count greater than
140     - * MAX_SKB_FRAGS, so we need to linearize parts of the skb
141     - * to let the rest of data to be fit in the frags.
142     - */
143     - if (count > MAX_SKB_FRAGS) {
144     - copylen = iov_length(iv, count - MAX_SKB_FRAGS);
145     - if (copylen < vnet_hdr_len)
146     - copylen = 0;
147     - else
148     - copylen -= vnet_hdr_len;
149     - }
150     - /* There are 256 bytes to be copied in skb, so there is enough
151     - * room for skb expand head in case it is used.
152     - * The rest buffer is mapped from userspace.
153     - */
154     - if (copylen < vnet_hdr.hdr_len)
155     - copylen = vnet_hdr.hdr_len;
156     - if (!copylen)
157     - copylen = GOODCOPY_LEN;
158     + if (m && m->msg_control && sock_flag(&q->sk, SOCK_ZEROCOPY)) {
159     + copylen = vnet_hdr.hdr_len ? vnet_hdr.hdr_len : GOODCOPY_LEN;
160     linear = copylen;
161     - } else {
162     + if (iov_pages(iv, vnet_hdr_len + copylen, count)
163     + <= MAX_SKB_FRAGS)
164     + zerocopy = true;
165     + }
166     +
167     + if (!zerocopy) {
168     copylen = len;
169     linear = vnet_hdr.hdr_len;
170     }
171     @@ -724,9 +730,15 @@ static ssize_t macvtap_get_user(struct macvtap_queue *q, struct msghdr *m,
172    
173     if (zerocopy)
174     err = zerocopy_sg_from_iovec(skb, iv, vnet_hdr_len, count);
175     - else
176     + else {
177     err = skb_copy_datagram_from_iovec(skb, 0, iv, vnet_hdr_len,
178     len);
179     + if (!err && m && m->msg_control) {
180     + struct ubuf_info *uarg = m->msg_control;
181     + uarg->callback(uarg);
182     + }
183     + }
184     +
185     if (err)
186     goto err_kfree;
187    
188     diff --git a/drivers/net/tun.c b/drivers/net/tun.c
189     index c896b8f..194f879 100644
190     --- a/drivers/net/tun.c
191     +++ b/drivers/net/tun.c
192     @@ -615,8 +615,9 @@ static ssize_t tun_get_user(struct tun_struct *tun,
193     int offset = 0;
194    
195     if (!(tun->flags & TUN_NO_PI)) {
196     - if ((len -= sizeof(pi)) > count)
197     + if (len < sizeof(pi))
198     return -EINVAL;
199     + len -= sizeof(pi);
200    
201     if (memcpy_fromiovecend((void *)&pi, iv, 0, sizeof(pi)))
202     return -EFAULT;
203     @@ -624,8 +625,9 @@ static ssize_t tun_get_user(struct tun_struct *tun,
204     }
205    
206     if (tun->flags & TUN_VNET_HDR) {
207     - if ((len -= tun->vnet_hdr_sz) > count)
208     + if (len < tun->vnet_hdr_sz)
209     return -EINVAL;
210     + len -= tun->vnet_hdr_sz;
211    
212     if (memcpy_fromiovecend((void *)&gso, iv, offset, sizeof(gso)))
213     return -EFAULT;
214     diff --git a/drivers/vhost/vhost.c b/drivers/vhost/vhost.c
215     index 1a9e2a9..a50cb9c 100644
216     --- a/drivers/vhost/vhost.c
217     +++ b/drivers/vhost/vhost.c
218     @@ -1603,6 +1603,7 @@ void vhost_zerocopy_callback(struct ubuf_info *ubuf)
219     struct vhost_ubuf_ref *ubufs = ubuf->ctx;
220     struct vhost_virtqueue *vq = ubufs->vq;
221    
222     + vhost_poll_queue(&vq->poll);
223     /* set len = 1 to mark this desc buffers done DMA */
224     vq->heads[ubuf->desc].len = VHOST_DMA_DONE_LEN;
225     kref_put(&ubufs->kref, vhost_zerocopy_done_signal);
226     diff --git a/include/linux/icmpv6.h b/include/linux/icmpv6.h
227     index ba45e6b..f5a21d0 100644
228     --- a/include/linux/icmpv6.h
229     +++ b/include/linux/icmpv6.h
230     @@ -123,6 +123,8 @@ static inline struct icmp6hdr *icmp6_hdr(const struct sk_buff *skb)
231     #define ICMPV6_NOT_NEIGHBOUR 2
232     #define ICMPV6_ADDR_UNREACH 3
233     #define ICMPV6_PORT_UNREACH 4
234     +#define ICMPV6_POLICY_FAIL 5
235     +#define ICMPV6_REJECT_ROUTE 6
236    
237     /*
238     * Codes for Time Exceeded
239     diff --git a/include/linux/ipv6.h b/include/linux/ipv6.h
240     index 8260ef7..e6412ee 100644
241     --- a/include/linux/ipv6.h
242     +++ b/include/linux/ipv6.h
243     @@ -260,6 +260,7 @@ struct inet6_skb_parm {
244     #define IP6SKB_XFRM_TRANSFORMED 1
245     #define IP6SKB_FORWARDED 2
246     #define IP6SKB_REROUTED 4
247     +#define IP6SKB_FRAGMENTED 16
248     };
249    
250     #define IP6CB(skb) ((struct inet6_skb_parm*)((skb)->cb))
251     diff --git a/net/bridge/br_multicast.c b/net/bridge/br_multicast.c
252     index 9069071..ca670d9 100644
253     --- a/net/bridge/br_multicast.c
254     +++ b/net/bridge/br_multicast.c
255     @@ -1155,7 +1155,8 @@ static int br_ip6_multicast_query(struct net_bridge *br,
256     mld2q = (struct mld2_query *)icmp6_hdr(skb);
257     if (!mld2q->mld2q_nsrcs)
258     group = &mld2q->mld2q_mca;
259     - max_delay = mld2q->mld2q_mrc ? MLDV2_MRC(mld2q->mld2q_mrc) : 1;
260     +
261     + max_delay = max(msecs_to_jiffies(MLDV2_MRC(ntohs(mld2q->mld2q_mrc))), 1UL);
262     }
263    
264     if (!group)
265     diff --git a/net/core/neighbour.c b/net/core/neighbour.c
266     index 69b7ca3..ebd4b21 100644
267     --- a/net/core/neighbour.c
268     +++ b/net/core/neighbour.c
269     @@ -1442,16 +1442,18 @@ struct neigh_parms *neigh_parms_alloc(struct net_device *dev,
270     atomic_set(&p->refcnt, 1);
271     p->reachable_time =
272     neigh_rand_reach_time(p->base_reachable_time);
273     + dev_hold(dev);
274     + p->dev = dev;
275     + write_pnet(&p->net, hold_net(net));
276     + p->sysctl_table = NULL;
277    
278     if (ops->ndo_neigh_setup && ops->ndo_neigh_setup(dev, p)) {
279     + release_net(net);
280     + dev_put(dev);
281     kfree(p);
282     return NULL;
283     }
284    
285     - dev_hold(dev);
286     - p->dev = dev;
287     - write_pnet(&p->net, hold_net(net));
288     - p->sysctl_table = NULL;
289     write_lock_bh(&tbl->lock);
290     p->next = tbl->parms.next;
291     tbl->parms.next = p;
292     diff --git a/net/core/sysctl_net_core.c b/net/core/sysctl_net_core.c
293     index 0c28508..77d1550 100644
294     --- a/net/core/sysctl_net_core.c
295     +++ b/net/core/sysctl_net_core.c
296     @@ -19,6 +19,9 @@
297     #include <net/sock.h>
298     #include <net/net_ratelimit.h>
299    
300     +static int zero = 0;
301     +static int ushort_max = USHRT_MAX;
302     +
303     #ifdef CONFIG_RPS
304     static int rps_sock_flow_sysctl(ctl_table *table, int write,
305     void __user *buffer, size_t *lenp, loff_t *ppos)
306     @@ -197,7 +200,9 @@ static struct ctl_table netns_core_table[] = {
307     .data = &init_net.core.sysctl_somaxconn,
308     .maxlen = sizeof(int),
309     .mode = 0644,
310     - .proc_handler = proc_dointvec
311     + .extra1 = &zero,
312     + .extra2 = &ushort_max,
313     + .proc_handler = proc_dointvec_minmax
314     },
315     { }
316     };
317     diff --git a/net/ipv4/fib_trie.c b/net/ipv4/fib_trie.c
318     index 30b88d7..424704a 100644
319     --- a/net/ipv4/fib_trie.c
320     +++ b/net/ipv4/fib_trie.c
321     @@ -71,7 +71,6 @@
322     #include <linux/init.h>
323     #include <linux/list.h>
324     #include <linux/slab.h>
325     -#include <linux/prefetch.h>
326     #include <linux/export.h>
327     #include <net/net_namespace.h>
328     #include <net/ip.h>
329     @@ -1772,10 +1771,8 @@ static struct leaf *leaf_walk_rcu(struct tnode *p, struct rt_trie_node *c)
330     if (!c)
331     continue;
332    
333     - if (IS_LEAF(c)) {
334     - prefetch(rcu_dereference_rtnl(p->child[idx]));
335     + if (IS_LEAF(c))
336     return (struct leaf *) c;
337     - }
338    
339     /* Rescan start scanning in new node */
340     p = (struct tnode *) c;
341     diff --git a/net/ipv4/tcp_cubic.c b/net/ipv4/tcp_cubic.c
342     index a9077f4..b6ae92a 100644
343     --- a/net/ipv4/tcp_cubic.c
344     +++ b/net/ipv4/tcp_cubic.c
345     @@ -206,8 +206,8 @@ static u32 cubic_root(u64 a)
346     */
347     static inline void bictcp_update(struct bictcp *ca, u32 cwnd)
348     {
349     - u64 offs;
350     - u32 delta, t, bic_target, max_cnt;
351     + u32 delta, bic_target, max_cnt;
352     + u64 offs, t;
353    
354     ca->ack_cnt++; /* count the number of ACKs */
355    
356     @@ -250,9 +250,11 @@ static inline void bictcp_update(struct bictcp *ca, u32 cwnd)
357     * if the cwnd < 1 million packets !!!
358     */
359    
360     + t = (s32)(tcp_time_stamp - ca->epoch_start);
361     + t += msecs_to_jiffies(ca->delay_min >> 3);
362     /* change the unit from HZ to bictcp_HZ */
363     - t = ((tcp_time_stamp + msecs_to_jiffies(ca->delay_min>>3)
364     - - ca->epoch_start) << BICTCP_HZ) / HZ;
365     + t <<= BICTCP_HZ;
366     + do_div(t, HZ);
367    
368     if (t < ca->bic_K) /* t - K */
369     offs = ca->bic_K - t;
370     @@ -414,7 +416,7 @@ static void bictcp_acked(struct sock *sk, u32 cnt, s32 rtt_us)
371     return;
372    
373     /* Discard delay samples right after fast recovery */
374     - if ((s32)(tcp_time_stamp - ca->epoch_start) < HZ)
375     + if (ca->epoch_start && (s32)(tcp_time_stamp - ca->epoch_start) < HZ)
376     return;
377    
378     delay = (rtt_us << 3) / USEC_PER_MSEC;
379     diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c
380     index d427f1b..abfa007 100644
381     --- a/net/ipv6/addrconf.c
382     +++ b/net/ipv6/addrconf.c
383     @@ -910,12 +910,10 @@ retry:
384     if (ifp->flags & IFA_F_OPTIMISTIC)
385     addr_flags |= IFA_F_OPTIMISTIC;
386    
387     - ift = !max_addresses ||
388     - ipv6_count_addresses(idev) < max_addresses ?
389     - ipv6_add_addr(idev, &addr, tmp_plen,
390     - ipv6_addr_type(&addr)&IPV6_ADDR_SCOPE_MASK,
391     - addr_flags) : NULL;
392     - if (!ift || IS_ERR(ift)) {
393     + ift = ipv6_add_addr(idev, &addr, tmp_plen,
394     + ipv6_addr_type(&addr)&IPV6_ADDR_SCOPE_MASK,
395     + addr_flags);
396     + if (IS_ERR(ift)) {
397     in6_ifa_put(ifp);
398     in6_dev_put(idev);
399     printk(KERN_INFO
400     diff --git a/net/ipv6/icmp.c b/net/ipv6/icmp.c
401     index 27ac95a..dbf20f6 100644
402     --- a/net/ipv6/icmp.c
403     +++ b/net/ipv6/icmp.c
404     @@ -917,6 +917,14 @@ static const struct icmp6_err {
405     .err = ECONNREFUSED,
406     .fatal = 1,
407     },
408     + { /* POLICY_FAIL */
409     + .err = EACCES,
410     + .fatal = 1,
411     + },
412     + { /* REJECT_ROUTE */
413     + .err = EACCES,
414     + .fatal = 1,
415     + },
416     };
417    
418     int icmpv6_err_convert(u8 type, u8 code, int *err)
419     @@ -928,7 +936,7 @@ int icmpv6_err_convert(u8 type, u8 code, int *err)
420     switch (type) {
421     case ICMPV6_DEST_UNREACH:
422     fatal = 1;
423     - if (code <= ICMPV6_PORT_UNREACH) {
424     + if (code < ARRAY_SIZE(tab_unreach)) {
425     *err = tab_unreach[code].err;
426     fatal = tab_unreach[code].fatal;
427     }
428     diff --git a/net/ipv6/ip6_fib.c b/net/ipv6/ip6_fib.c
429     index c3a007d..5bb77a6 100644
430     --- a/net/ipv6/ip6_fib.c
431     +++ b/net/ipv6/ip6_fib.c
432     @@ -949,14 +949,22 @@ static struct fib6_node * fib6_lookup_1(struct fib6_node *root,
433    
434     if (ipv6_prefix_equal(&key->addr, args->addr, key->plen)) {
435     #ifdef CONFIG_IPV6_SUBTREES
436     - if (fn->subtree)
437     - fn = fib6_lookup_1(fn->subtree, args + 1);
438     + if (fn->subtree) {
439     + struct fib6_node *sfn;
440     + sfn = fib6_lookup_1(fn->subtree,
441     + args + 1);
442     + if (!sfn)
443     + goto backtrack;
444     + fn = sfn;
445     + }
446     #endif
447     - if (!fn || fn->fn_flags & RTN_RTINFO)
448     + if (fn->fn_flags & RTN_RTINFO)
449     return fn;
450     }
451     }
452     -
453     +#ifdef CONFIG_IPV6_SUBTREES
454     +backtrack:
455     +#endif
456     if (fn->fn_flags & RTN_ROOT)
457     break;
458    
459     diff --git a/net/ipv6/ndisc.c b/net/ipv6/ndisc.c
460     index 843d6eb..5cc78e6 100644
461     --- a/net/ipv6/ndisc.c
462     +++ b/net/ipv6/ndisc.c
463     @@ -441,7 +441,6 @@ struct sk_buff *ndisc_build_skb(struct net_device *dev,
464     int hlen = LL_RESERVED_SPACE(dev);
465     int tlen = dev->needed_tailroom;
466     int len;
467     - int err;
468     u8 *opt;
469    
470     if (!dev->addr_len)
471     @@ -451,14 +450,12 @@ struct sk_buff *ndisc_build_skb(struct net_device *dev,
472     if (llinfo)
473     len += ndisc_opt_addr_space(dev);
474    
475     - skb = sock_alloc_send_skb(sk,
476     - (MAX_HEADER + sizeof(struct ipv6hdr) +
477     - len + hlen + tlen),
478     - 1, &err);
479     + skb = alloc_skb((MAX_HEADER + sizeof(struct ipv6hdr) +
480     + len + hlen + tlen), GFP_ATOMIC);
481     if (!skb) {
482     ND_PRINTK0(KERN_ERR
483     - "ICMPv6 ND: %s() failed to allocate an skb, err=%d.\n",
484     - __func__, err);
485     + "ICMPv6 ND: %s() failed to allocate an skb.\n",
486     + __func__);
487     return NULL;
488     }
489    
490     @@ -486,6 +483,11 @@ struct sk_buff *ndisc_build_skb(struct net_device *dev,
491     csum_partial(hdr,
492     len, 0));
493    
494     + /* Manually assign socket ownership as we avoid calling
495     + * sock_alloc_send_pskb() to bypass wmem buffer limits
496     + */
497     + skb_set_owner_w(skb, sk);
498     +
499     return skb;
500     }
501    
502     diff --git a/net/ipv6/reassembly.c b/net/ipv6/reassembly.c
503     index 6bda7aa..ff900ac 100644
504     --- a/net/ipv6/reassembly.c
505     +++ b/net/ipv6/reassembly.c
506     @@ -516,6 +516,7 @@ static int ip6_frag_reasm(struct frag_queue *fq, struct sk_buff *prev,
507     head->tstamp = fq->q.stamp;
508     ipv6_hdr(head)->payload_len = htons(payload_len);
509     IP6CB(head)->nhoff = nhoff;
510     + IP6CB(head)->flags |= IP6SKB_FRAGMENTED;
511    
512     /* Yes, and fold redundant checksum back. 8) */
513     if (head->ip_summed == CHECKSUM_COMPLETE)
514     @@ -551,6 +552,9 @@ static int ipv6_frag_rcv(struct sk_buff *skb)
515     const struct ipv6hdr *hdr = ipv6_hdr(skb);
516     struct net *net = dev_net(skb_dst(skb)->dev);
517    
518     + if (IP6CB(skb)->flags & IP6SKB_FRAGMENTED)
519     + goto fail_hdr;
520     +
521     IP6_INC_STATS_BH(net, ip6_dst_idev(skb_dst(skb)), IPSTATS_MIB_REASMREQDS);
522    
523     /* Jumbo payload inhibits frag. header */
524     @@ -571,6 +575,7 @@ static int ipv6_frag_rcv(struct sk_buff *skb)
525     ip6_dst_idev(skb_dst(skb)), IPSTATS_MIB_REASMOKS);
526    
527     IP6CB(skb)->nhoff = (u8 *)fhdr - skb_network_header(skb);
528     + IP6CB(skb)->flags |= IP6SKB_FRAGMENTED;
529     return 1;
530     }
531    
532     diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c
533     index 7ee7121..c4717e8 100644
534     --- a/net/ipv6/tcp_ipv6.c
535     +++ b/net/ipv6/tcp_ipv6.c
536     @@ -1571,7 +1571,7 @@ ipv6_pktoptions:
537     if (np->rxopt.bits.rxhlim || np->rxopt.bits.rxohlim)
538     np->mcast_hops = ipv6_hdr(opt_skb)->hop_limit;
539     if (np->rxopt.bits.rxtclass)
540     - np->rcv_tclass = ipv6_tclass(ipv6_hdr(skb));
541     + np->rcv_tclass = ipv6_tclass(ipv6_hdr(opt_skb));
542     if (ipv6_opt_accepted(sk, opt_skb)) {
543     skb_set_owner_r(opt_skb, sk);
544     opt_skb = xchg(&np->pktoptions, opt_skb);
545     diff --git a/net/sched/sch_htb.c b/net/sched/sch_htb.c
546     index f08b9166..caa5aff 100644
547     --- a/net/sched/sch_htb.c
548     +++ b/net/sched/sch_htb.c
549     @@ -86,7 +86,7 @@ struct htb_class {
550     unsigned int children;
551     struct htb_class *parent; /* parent class */
552    
553     - int prio; /* these two are used only by leaves... */
554     + u32 prio; /* these two are used only by leaves... */
555     int quantum; /* but stored for parent-to-leaf return */
556    
557     union {
558     diff --git a/net/tipc/eth_media.c b/net/tipc/eth_media.c
559     index 527e3f0..dd625af 100644
560     --- a/net/tipc/eth_media.c
561     +++ b/net/tipc/eth_media.c
562     @@ -53,6 +53,7 @@ struct eth_bearer {
563     struct tipc_bearer *bearer;
564     struct net_device *dev;
565     struct packet_type tipc_packet_type;
566     + struct work_struct setup;
567     struct work_struct cleanup;
568     };
569    
570     @@ -138,6 +139,17 @@ static int recv_msg(struct sk_buff *buf, struct net_device *dev,
571     }
572    
573     /**
574     + * setup_bearer - setup association between Ethernet bearer and interface
575     + */
576     +static void setup_bearer(struct work_struct *work)
577     +{
578     + struct eth_bearer *eb_ptr =
579     + container_of(work, struct eth_bearer, setup);
580     +
581     + dev_add_pack(&eb_ptr->tipc_packet_type);
582     +}
583     +
584     +/**
585     * enable_bearer - attach TIPC bearer to an Ethernet interface
586     */
587    
588     @@ -181,7 +193,8 @@ static int enable_bearer(struct tipc_bearer *tb_ptr)
589     eb_ptr->tipc_packet_type.func = recv_msg;
590     eb_ptr->tipc_packet_type.af_packet_priv = eb_ptr;
591     INIT_LIST_HEAD(&(eb_ptr->tipc_packet_type.list));
592     - dev_add_pack(&eb_ptr->tipc_packet_type);
593     + INIT_WORK(&eb_ptr->setup, setup_bearer);
594     + schedule_work(&eb_ptr->setup);
595    
596     /* Associate TIPC bearer with Ethernet bearer */
597