Magellan Linux

Annotation of /trunk/kernel-magellan/patches-3.16/0100-3.16.1-all-fixes.patch

Parent Directory Parent Directory | Revision Log Revision Log


Revision 2497 - (hide annotations) (download)
Tue Sep 9 06:58:13 2014 UTC (9 years, 8 months ago) by niro
File size: 17219 byte(s)
-linux-3.16.1
1 niro 2497 diff --git a/Makefile b/Makefile
2     index d0901b46b4bf..87663a2d1d10 100644
3     --- a/Makefile
4     +++ b/Makefile
5     @@ -1,8 +1,8 @@
6     VERSION = 3
7     PATCHLEVEL = 16
8     -SUBLEVEL = 0
9     +SUBLEVEL = 1
10     EXTRAVERSION =
11     -NAME = Shuffling Zombie Juror
12     +NAME = Museum of Fishiegoodies
13    
14     # *DOCUMENTATION*
15     # To see a list of typical targets execute "make help"
16     diff --git a/arch/sparc/include/asm/tlbflush_64.h b/arch/sparc/include/asm/tlbflush_64.h
17     index 816d8202fa0a..dea1cfa2122b 100644
18     --- a/arch/sparc/include/asm/tlbflush_64.h
19     +++ b/arch/sparc/include/asm/tlbflush_64.h
20     @@ -34,6 +34,8 @@ static inline void flush_tlb_range(struct vm_area_struct *vma,
21     {
22     }
23    
24     +void flush_tlb_kernel_range(unsigned long start, unsigned long end);
25     +
26     #define __HAVE_ARCH_ENTER_LAZY_MMU_MODE
27    
28     void flush_tlb_pending(void);
29     @@ -48,11 +50,6 @@ void __flush_tlb_kernel_range(unsigned long start, unsigned long end);
30    
31     #ifndef CONFIG_SMP
32    
33     -#define flush_tlb_kernel_range(start,end) \
34     -do { flush_tsb_kernel_range(start,end); \
35     - __flush_tlb_kernel_range(start,end); \
36     -} while (0)
37     -
38     static inline void global_flush_tlb_page(struct mm_struct *mm, unsigned long vaddr)
39     {
40     __flush_tlb_page(CTX_HWBITS(mm->context), vaddr);
41     @@ -63,11 +60,6 @@ static inline void global_flush_tlb_page(struct mm_struct *mm, unsigned long vad
42     void smp_flush_tlb_kernel_range(unsigned long start, unsigned long end);
43     void smp_flush_tlb_page(struct mm_struct *mm, unsigned long vaddr);
44    
45     -#define flush_tlb_kernel_range(start, end) \
46     -do { flush_tsb_kernel_range(start,end); \
47     - smp_flush_tlb_kernel_range(start, end); \
48     -} while (0)
49     -
50     #define global_flush_tlb_page(mm, vaddr) \
51     smp_flush_tlb_page(mm, vaddr)
52    
53     diff --git a/arch/sparc/kernel/ldc.c b/arch/sparc/kernel/ldc.c
54     index e01d75d40329..66dacd56bb10 100644
55     --- a/arch/sparc/kernel/ldc.c
56     +++ b/arch/sparc/kernel/ldc.c
57     @@ -1336,7 +1336,7 @@ int ldc_connect(struct ldc_channel *lp)
58     if (!(lp->flags & LDC_FLAG_ALLOCED_QUEUES) ||
59     !(lp->flags & LDC_FLAG_REGISTERED_QUEUES) ||
60     lp->hs_state != LDC_HS_OPEN)
61     - err = -EINVAL;
62     + err = ((lp->hs_state > LDC_HS_OPEN) ? 0 : -EINVAL);
63     else
64     err = start_handshake(lp);
65    
66     diff --git a/arch/sparc/math-emu/math_32.c b/arch/sparc/math-emu/math_32.c
67     index aa4d55b0bdf0..5ce8f2f64604 100644
68     --- a/arch/sparc/math-emu/math_32.c
69     +++ b/arch/sparc/math-emu/math_32.c
70     @@ -499,7 +499,7 @@ static int do_one_mathemu(u32 insn, unsigned long *pfsr, unsigned long *fregs)
71     case 0: fsr = *pfsr;
72     if (IR == -1) IR = 2;
73     /* fcc is always fcc0 */
74     - fsr &= ~0xc00; fsr |= (IR << 10); break;
75     + fsr &= ~0xc00; fsr |= (IR << 10);
76     *pfsr = fsr;
77     break;
78     case 1: rd->s = IR; break;
79     diff --git a/arch/sparc/mm/init_64.c b/arch/sparc/mm/init_64.c
80     index 16b58ff11e65..2cfb0f25e0ed 100644
81     --- a/arch/sparc/mm/init_64.c
82     +++ b/arch/sparc/mm/init_64.c
83     @@ -351,6 +351,10 @@ void update_mmu_cache(struct vm_area_struct *vma, unsigned long address, pte_t *
84    
85     mm = vma->vm_mm;
86    
87     + /* Don't insert a non-valid PTE into the TSB, we'll deadlock. */
88     + if (!pte_accessible(mm, pte))
89     + return;
90     +
91     spin_lock_irqsave(&mm->context.lock, flags);
92    
93     #if defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE)
94     @@ -2619,6 +2623,10 @@ void update_mmu_cache_pmd(struct vm_area_struct *vma, unsigned long addr,
95    
96     pte = pmd_val(entry);
97    
98     + /* Don't insert a non-valid PMD into the TSB, we'll deadlock. */
99     + if (!(pte & _PAGE_VALID))
100     + return;
101     +
102     /* We are fabricating 8MB pages using 4MB real hw pages. */
103     pte |= (addr & (1UL << REAL_HPAGE_SHIFT));
104    
105     @@ -2699,3 +2707,26 @@ void hugetlb_setup(struct pt_regs *regs)
106     }
107     }
108     #endif
109     +
110     +#ifdef CONFIG_SMP
111     +#define do_flush_tlb_kernel_range smp_flush_tlb_kernel_range
112     +#else
113     +#define do_flush_tlb_kernel_range __flush_tlb_kernel_range
114     +#endif
115     +
116     +void flush_tlb_kernel_range(unsigned long start, unsigned long end)
117     +{
118     + if (start < HI_OBP_ADDRESS && end > LOW_OBP_ADDRESS) {
119     + if (start < LOW_OBP_ADDRESS) {
120     + flush_tsb_kernel_range(start, LOW_OBP_ADDRESS);
121     + do_flush_tlb_kernel_range(start, LOW_OBP_ADDRESS);
122     + }
123     + if (end > HI_OBP_ADDRESS) {
124     + flush_tsb_kernel_range(end, HI_OBP_ADDRESS);
125     + do_flush_tlb_kernel_range(end, HI_OBP_ADDRESS);
126     + }
127     + } else {
128     + flush_tsb_kernel_range(start, end);
129     + do_flush_tlb_kernel_range(start, end);
130     + }
131     +}
132     diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c
133     index 8afa579e7c40..a3dd5dc64f4c 100644
134     --- a/drivers/net/ethernet/broadcom/tg3.c
135     +++ b/drivers/net/ethernet/broadcom/tg3.c
136     @@ -7830,17 +7830,18 @@ static int tigon3_dma_hwbug_workaround(struct tg3_napi *tnapi,
137    
138     static netdev_tx_t tg3_start_xmit(struct sk_buff *, struct net_device *);
139    
140     -/* Use GSO to workaround a rare TSO bug that may be triggered when the
141     - * TSO header is greater than 80 bytes.
142     +/* Use GSO to workaround all TSO packets that meet HW bug conditions
143     + * indicated in tg3_tx_frag_set()
144     */
145     -static int tg3_tso_bug(struct tg3 *tp, struct sk_buff *skb)
146     +static int tg3_tso_bug(struct tg3 *tp, struct tg3_napi *tnapi,
147     + struct netdev_queue *txq, struct sk_buff *skb)
148     {
149     struct sk_buff *segs, *nskb;
150     u32 frag_cnt_est = skb_shinfo(skb)->gso_segs * 3;
151    
152     /* Estimate the number of fragments in the worst case */
153     - if (unlikely(tg3_tx_avail(&tp->napi[0]) <= frag_cnt_est)) {
154     - netif_stop_queue(tp->dev);
155     + if (unlikely(tg3_tx_avail(tnapi) <= frag_cnt_est)) {
156     + netif_tx_stop_queue(txq);
157    
158     /* netif_tx_stop_queue() must be done before checking
159     * checking tx index in tg3_tx_avail() below, because in
160     @@ -7848,13 +7849,14 @@ static int tg3_tso_bug(struct tg3 *tp, struct sk_buff *skb)
161     * netif_tx_queue_stopped().
162     */
163     smp_mb();
164     - if (tg3_tx_avail(&tp->napi[0]) <= frag_cnt_est)
165     + if (tg3_tx_avail(tnapi) <= frag_cnt_est)
166     return NETDEV_TX_BUSY;
167    
168     - netif_wake_queue(tp->dev);
169     + netif_tx_wake_queue(txq);
170     }
171    
172     - segs = skb_gso_segment(skb, tp->dev->features & ~(NETIF_F_TSO | NETIF_F_TSO6));
173     + segs = skb_gso_segment(skb, tp->dev->features &
174     + ~(NETIF_F_TSO | NETIF_F_TSO6));
175     if (IS_ERR(segs) || !segs)
176     goto tg3_tso_bug_end;
177    
178     @@ -7930,7 +7932,7 @@ static netdev_tx_t tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
179     if (!skb_is_gso_v6(skb)) {
180     if (unlikely((ETH_HLEN + hdr_len) > 80) &&
181     tg3_flag(tp, TSO_BUG))
182     - return tg3_tso_bug(tp, skb);
183     + return tg3_tso_bug(tp, tnapi, txq, skb);
184    
185     ip_csum = iph->check;
186     ip_tot_len = iph->tot_len;
187     @@ -8061,7 +8063,7 @@ static netdev_tx_t tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
188     iph->tot_len = ip_tot_len;
189     }
190     tcph->check = tcp_csum;
191     - return tg3_tso_bug(tp, skb);
192     + return tg3_tso_bug(tp, tnapi, txq, skb);
193     }
194    
195     /* If the workaround fails due to memory/mapping
196     diff --git a/drivers/net/ethernet/brocade/bna/bnad.c b/drivers/net/ethernet/brocade/bna/bnad.c
197     index 3a77f9ead004..556aab75f490 100644
198     --- a/drivers/net/ethernet/brocade/bna/bnad.c
199     +++ b/drivers/net/ethernet/brocade/bna/bnad.c
200     @@ -600,9 +600,9 @@ bnad_cq_process(struct bnad *bnad, struct bna_ccb *ccb, int budget)
201     prefetch(bnad->netdev);
202    
203     cq = ccb->sw_q;
204     - cmpl = &cq[ccb->producer_index];
205    
206     while (packets < budget) {
207     + cmpl = &cq[ccb->producer_index];
208     if (!cmpl->valid)
209     break;
210     /* The 'valid' field is set by the adapter, only after writing
211     diff --git a/drivers/net/macvlan.c b/drivers/net/macvlan.c
212     index 958df383068a..ef8a5c20236a 100644
213     --- a/drivers/net/macvlan.c
214     +++ b/drivers/net/macvlan.c
215     @@ -646,6 +646,7 @@ static int macvlan_init(struct net_device *dev)
216     (lowerdev->state & MACVLAN_STATE_MASK);
217     dev->features = lowerdev->features & MACVLAN_FEATURES;
218     dev->features |= ALWAYS_ON_FEATURES;
219     + dev->vlan_features = lowerdev->vlan_features & MACVLAN_FEATURES;
220     dev->gso_max_size = lowerdev->gso_max_size;
221     dev->iflink = lowerdev->ifindex;
222     dev->hard_header_len = lowerdev->hard_header_len;
223     diff --git a/drivers/net/phy/mdio_bus.c b/drivers/net/phy/mdio_bus.c
224     index 203651ebccb0..4eaadcfcb0fe 100644
225     --- a/drivers/net/phy/mdio_bus.c
226     +++ b/drivers/net/phy/mdio_bus.c
227     @@ -255,7 +255,6 @@ int mdiobus_register(struct mii_bus *bus)
228    
229     bus->dev.parent = bus->parent;
230     bus->dev.class = &mdio_bus_class;
231     - bus->dev.driver = bus->parent->driver;
232     bus->dev.groups = NULL;
233     dev_set_name(&bus->dev, "%s", bus->id);
234    
235     diff --git a/drivers/sbus/char/bbc_envctrl.c b/drivers/sbus/char/bbc_envctrl.c
236     index 160e7510aca6..0787b9756165 100644
237     --- a/drivers/sbus/char/bbc_envctrl.c
238     +++ b/drivers/sbus/char/bbc_envctrl.c
239     @@ -452,6 +452,9 @@ static void attach_one_temp(struct bbc_i2c_bus *bp, struct platform_device *op,
240     if (!tp)
241     return;
242    
243     + INIT_LIST_HEAD(&tp->bp_list);
244     + INIT_LIST_HEAD(&tp->glob_list);
245     +
246     tp->client = bbc_i2c_attach(bp, op);
247     if (!tp->client) {
248     kfree(tp);
249     @@ -497,6 +500,9 @@ static void attach_one_fan(struct bbc_i2c_bus *bp, struct platform_device *op,
250     if (!fp)
251     return;
252    
253     + INIT_LIST_HEAD(&fp->bp_list);
254     + INIT_LIST_HEAD(&fp->glob_list);
255     +
256     fp->client = bbc_i2c_attach(bp, op);
257     if (!fp->client) {
258     kfree(fp);
259     diff --git a/drivers/sbus/char/bbc_i2c.c b/drivers/sbus/char/bbc_i2c.c
260     index c7763e482eb2..812b5f0361b6 100644
261     --- a/drivers/sbus/char/bbc_i2c.c
262     +++ b/drivers/sbus/char/bbc_i2c.c
263     @@ -300,13 +300,18 @@ static struct bbc_i2c_bus * attach_one_i2c(struct platform_device *op, int index
264     if (!bp)
265     return NULL;
266    
267     + INIT_LIST_HEAD(&bp->temps);
268     + INIT_LIST_HEAD(&bp->fans);
269     +
270     bp->i2c_control_regs = of_ioremap(&op->resource[0], 0, 0x2, "bbc_i2c_regs");
271     if (!bp->i2c_control_regs)
272     goto fail;
273    
274     - bp->i2c_bussel_reg = of_ioremap(&op->resource[1], 0, 0x1, "bbc_i2c_bussel");
275     - if (!bp->i2c_bussel_reg)
276     - goto fail;
277     + if (op->num_resources == 2) {
278     + bp->i2c_bussel_reg = of_ioremap(&op->resource[1], 0, 0x1, "bbc_i2c_bussel");
279     + if (!bp->i2c_bussel_reg)
280     + goto fail;
281     + }
282    
283     bp->waiting = 0;
284     init_waitqueue_head(&bp->wq);
285     diff --git a/drivers/tty/serial/sunsab.c b/drivers/tty/serial/sunsab.c
286     index 2f57df9a71d9..a1e09c0d46f2 100644
287     --- a/drivers/tty/serial/sunsab.c
288     +++ b/drivers/tty/serial/sunsab.c
289     @@ -157,6 +157,15 @@ receive_chars(struct uart_sunsab_port *up,
290     (up->port.line == up->port.cons->index))
291     saw_console_brk = 1;
292    
293     + if (count == 0) {
294     + if (unlikely(stat->sreg.isr1 & SAB82532_ISR1_BRK)) {
295     + stat->sreg.isr0 &= ~(SAB82532_ISR0_PERR |
296     + SAB82532_ISR0_FERR);
297     + up->port.icount.brk++;
298     + uart_handle_break(&up->port);
299     + }
300     + }
301     +
302     for (i = 0; i < count; i++) {
303     unsigned char ch = buf[i], flag;
304    
305     diff --git a/include/net/ip_tunnels.h b/include/net/ip_tunnels.h
306     index a4daf9eb8562..8dd8cab88b87 100644
307     --- a/include/net/ip_tunnels.h
308     +++ b/include/net/ip_tunnels.h
309     @@ -40,6 +40,7 @@ struct ip_tunnel_prl_entry {
310    
311     struct ip_tunnel_dst {
312     struct dst_entry __rcu *dst;
313     + __be32 saddr;
314     };
315    
316     struct ip_tunnel {
317     diff --git a/lib/iovec.c b/lib/iovec.c
318     index 7a7c2da4cddf..df3abd1eaa4a 100644
319     --- a/lib/iovec.c
320     +++ b/lib/iovec.c
321     @@ -85,6 +85,10 @@ EXPORT_SYMBOL(memcpy_toiovecend);
322     int memcpy_fromiovecend(unsigned char *kdata, const struct iovec *iov,
323     int offset, int len)
324     {
325     + /* No data? Done! */
326     + if (len == 0)
327     + return 0;
328     +
329     /* Skip over the finished iovecs */
330     while (offset >= iov->iov_len) {
331     offset -= iov->iov_len;
332     diff --git a/net/batman-adv/fragmentation.c b/net/batman-adv/fragmentation.c
333     index f14e54a05691..022d18ab27a6 100644
334     --- a/net/batman-adv/fragmentation.c
335     +++ b/net/batman-adv/fragmentation.c
336     @@ -128,6 +128,7 @@ static bool batadv_frag_insert_packet(struct batadv_orig_node *orig_node,
337     {
338     struct batadv_frag_table_entry *chain;
339     struct batadv_frag_list_entry *frag_entry_new = NULL, *frag_entry_curr;
340     + struct batadv_frag_list_entry *frag_entry_last = NULL;
341     struct batadv_frag_packet *frag_packet;
342     uint8_t bucket;
343     uint16_t seqno, hdr_size = sizeof(struct batadv_frag_packet);
344     @@ -180,11 +181,14 @@ static bool batadv_frag_insert_packet(struct batadv_orig_node *orig_node,
345     ret = true;
346     goto out;
347     }
348     +
349     + /* store current entry because it could be the last in list */
350     + frag_entry_last = frag_entry_curr;
351     }
352    
353     - /* Reached the end of the list, so insert after 'frag_entry_curr'. */
354     - if (likely(frag_entry_curr)) {
355     - hlist_add_after(&frag_entry_curr->list, &frag_entry_new->list);
356     + /* Reached the end of the list, so insert after 'frag_entry_last'. */
357     + if (likely(frag_entry_last)) {
358     + hlist_add_after(&frag_entry_last->list, &frag_entry_new->list);
359     chain->size += skb->len - hdr_size;
360     chain->timestamp = jiffies;
361     ret = true;
362     diff --git a/net/core/skbuff.c b/net/core/skbuff.c
363     index c1a33033cbe2..58ff88edbefd 100644
364     --- a/net/core/skbuff.c
365     +++ b/net/core/skbuff.c
366     @@ -2976,9 +2976,9 @@ struct sk_buff *skb_segment(struct sk_buff *head_skb,
367     tail = nskb;
368    
369     __copy_skb_header(nskb, head_skb);
370     - nskb->mac_len = head_skb->mac_len;
371    
372     skb_headers_offset_update(nskb, skb_headroom(nskb) - headroom);
373     + skb_reset_mac_len(nskb);
374    
375     skb_copy_from_linear_data_offset(head_skb, -tnl_hlen,
376     nskb->data - tnl_hlen,
377     diff --git a/net/ipv4/ip_tunnel.c b/net/ipv4/ip_tunnel.c
378     index 6f9de61dce5f..45920d928341 100644
379     --- a/net/ipv4/ip_tunnel.c
380     +++ b/net/ipv4/ip_tunnel.c
381     @@ -69,23 +69,25 @@ static unsigned int ip_tunnel_hash(__be32 key, __be32 remote)
382     }
383    
384     static void __tunnel_dst_set(struct ip_tunnel_dst *idst,
385     - struct dst_entry *dst)
386     + struct dst_entry *dst, __be32 saddr)
387     {
388     struct dst_entry *old_dst;
389    
390     dst_clone(dst);
391     old_dst = xchg((__force struct dst_entry **)&idst->dst, dst);
392     dst_release(old_dst);
393     + idst->saddr = saddr;
394     }
395    
396     -static void tunnel_dst_set(struct ip_tunnel *t, struct dst_entry *dst)
397     +static void tunnel_dst_set(struct ip_tunnel *t,
398     + struct dst_entry *dst, __be32 saddr)
399     {
400     - __tunnel_dst_set(this_cpu_ptr(t->dst_cache), dst);
401     + __tunnel_dst_set(this_cpu_ptr(t->dst_cache), dst, saddr);
402     }
403    
404     static void tunnel_dst_reset(struct ip_tunnel *t)
405     {
406     - tunnel_dst_set(t, NULL);
407     + tunnel_dst_set(t, NULL, 0);
408     }
409    
410     void ip_tunnel_dst_reset_all(struct ip_tunnel *t)
411     @@ -93,20 +95,25 @@ void ip_tunnel_dst_reset_all(struct ip_tunnel *t)
412     int i;
413    
414     for_each_possible_cpu(i)
415     - __tunnel_dst_set(per_cpu_ptr(t->dst_cache, i), NULL);
416     + __tunnel_dst_set(per_cpu_ptr(t->dst_cache, i), NULL, 0);
417     }
418     EXPORT_SYMBOL(ip_tunnel_dst_reset_all);
419    
420     -static struct rtable *tunnel_rtable_get(struct ip_tunnel *t, u32 cookie)
421     +static struct rtable *tunnel_rtable_get(struct ip_tunnel *t,
422     + u32 cookie, __be32 *saddr)
423     {
424     + struct ip_tunnel_dst *idst;
425     struct dst_entry *dst;
426    
427     rcu_read_lock();
428     - dst = rcu_dereference(this_cpu_ptr(t->dst_cache)->dst);
429     + idst = this_cpu_ptr(t->dst_cache);
430     + dst = rcu_dereference(idst->dst);
431     if (dst && !atomic_inc_not_zero(&dst->__refcnt))
432     dst = NULL;
433     if (dst) {
434     - if (dst->obsolete && dst->ops->check(dst, cookie) == NULL) {
435     + if (!dst->obsolete || dst->ops->check(dst, cookie)) {
436     + *saddr = idst->saddr;
437     + } else {
438     tunnel_dst_reset(t);
439     dst_release(dst);
440     dst = NULL;
441     @@ -367,7 +374,7 @@ static int ip_tunnel_bind_dev(struct net_device *dev)
442    
443     if (!IS_ERR(rt)) {
444     tdev = rt->dst.dev;
445     - tunnel_dst_set(tunnel, &rt->dst);
446     + tunnel_dst_set(tunnel, &rt->dst, fl4.saddr);
447     ip_rt_put(rt);
448     }
449     if (dev->type != ARPHRD_ETHER)
450     @@ -610,7 +617,7 @@ void ip_tunnel_xmit(struct sk_buff *skb, struct net_device *dev,
451     init_tunnel_flow(&fl4, protocol, dst, tnl_params->saddr,
452     tunnel->parms.o_key, RT_TOS(tos), tunnel->parms.link);
453    
454     - rt = connected ? tunnel_rtable_get(tunnel, 0) : NULL;
455     + rt = connected ? tunnel_rtable_get(tunnel, 0, &fl4.saddr) : NULL;
456    
457     if (!rt) {
458     rt = ip_route_output_key(tunnel->net, &fl4);
459     @@ -620,7 +627,7 @@ void ip_tunnel_xmit(struct sk_buff *skb, struct net_device *dev,
460     goto tx_error;
461     }
462     if (connected)
463     - tunnel_dst_set(tunnel, &rt->dst);
464     + tunnel_dst_set(tunnel, &rt->dst, fl4.saddr);
465     }
466    
467     if (rt->dst.dev == dev) {
468     diff --git a/net/ipv4/tcp_vegas.c b/net/ipv4/tcp_vegas.c
469     index 9a5e05f27f4f..b40ad897f945 100644
470     --- a/net/ipv4/tcp_vegas.c
471     +++ b/net/ipv4/tcp_vegas.c
472     @@ -218,7 +218,8 @@ static void tcp_vegas_cong_avoid(struct sock *sk, u32 ack, u32 acked)
473     * This is:
474     * (actual rate in segments) * baseRTT
475     */
476     - target_cwnd = tp->snd_cwnd * vegas->baseRTT / rtt;
477     + target_cwnd = (u64)tp->snd_cwnd * vegas->baseRTT;
478     + do_div(target_cwnd, rtt);
479    
480     /* Calculate the difference between the window we had,
481     * and the window we would like to have. This quantity
482     diff --git a/net/ipv4/tcp_veno.c b/net/ipv4/tcp_veno.c
483     index 27b9825753d1..8276977d2c85 100644
484     --- a/net/ipv4/tcp_veno.c
485     +++ b/net/ipv4/tcp_veno.c
486     @@ -144,7 +144,7 @@ static void tcp_veno_cong_avoid(struct sock *sk, u32 ack, u32 acked)
487    
488     rtt = veno->minrtt;
489    
490     - target_cwnd = (tp->snd_cwnd * veno->basertt);
491     + target_cwnd = (u64)tp->snd_cwnd * veno->basertt;
492     target_cwnd <<= V_PARAM_SHIFT;
493     do_div(target_cwnd, rtt);
494    
495     diff --git a/net/sctp/output.c b/net/sctp/output.c
496     index 01ab8e0723f0..407ae2bf97b0 100644
497     --- a/net/sctp/output.c
498     +++ b/net/sctp/output.c
499     @@ -599,7 +599,7 @@ out:
500     return err;
501     no_route:
502     kfree_skb(nskb);
503     - IP_INC_STATS_BH(sock_net(asoc->base.sk), IPSTATS_MIB_OUTNOROUTES);
504     + IP_INC_STATS(sock_net(asoc->base.sk), IPSTATS_MIB_OUTNOROUTES);
505    
506     /* FIXME: Returning the 'err' will effect all the associations
507     * associated with a socket, although only one of the paths of the