Magellan Linux

Annotation of /trunk/kernel-alx/patches-4.4/0145-4.4.46-all-fixes.patch

Parent Directory Parent Directory | Revision Log Revision Log


Revision 2881 - (hide annotations) (download)
Mon Mar 27 13:49:20 2017 UTC (7 years, 1 month ago) by niro
File size: 28783 byte(s)
linux-4.4.46
1 niro 2881 diff --git a/Makefile b/Makefile
2     index a3dfc73da722..2dd5cb2fe182 100644
3     --- a/Makefile
4     +++ b/Makefile
5     @@ -1,6 +1,6 @@
6     VERSION = 4
7     PATCHLEVEL = 4
8     -SUBLEVEL = 45
9     +SUBLEVEL = 46
10     EXTRAVERSION =
11     NAME = Blurry Fish Butt
12    
13     diff --git a/arch/arc/include/asm/delay.h b/arch/arc/include/asm/delay.h
14     index a36e8601114d..d5da2115d78a 100644
15     --- a/arch/arc/include/asm/delay.h
16     +++ b/arch/arc/include/asm/delay.h
17     @@ -26,7 +26,9 @@ static inline void __delay(unsigned long loops)
18     " lp 1f \n"
19     " nop \n"
20     "1: \n"
21     - : : "r"(loops));
22     + :
23     + : "r"(loops)
24     + : "lp_count");
25     }
26    
27     extern void __bad_udelay(void);
28     diff --git a/arch/arc/kernel/unaligned.c b/arch/arc/kernel/unaligned.c
29     index abd961f3e763..91ebe382147f 100644
30     --- a/arch/arc/kernel/unaligned.c
31     +++ b/arch/arc/kernel/unaligned.c
32     @@ -241,8 +241,9 @@ int misaligned_fixup(unsigned long address, struct pt_regs *regs,
33     if (state.fault)
34     goto fault;
35    
36     + /* clear any remanants of delay slot */
37     if (delay_mode(regs)) {
38     - regs->ret = regs->bta;
39     + regs->ret = regs->bta ~1U;
40     regs->status32 &= ~STATUS_DE_MASK;
41     } else {
42     regs->ret += state.instr_len;
43     diff --git a/arch/parisc/include/asm/bitops.h b/arch/parisc/include/asm/bitops.h
44     index 3f9406d9b9d6..da87943328a5 100644
45     --- a/arch/parisc/include/asm/bitops.h
46     +++ b/arch/parisc/include/asm/bitops.h
47     @@ -6,7 +6,7 @@
48     #endif
49    
50     #include <linux/compiler.h>
51     -#include <asm/types.h> /* for BITS_PER_LONG/SHIFT_PER_LONG */
52     +#include <asm/types.h>
53     #include <asm/byteorder.h>
54     #include <asm/barrier.h>
55     #include <linux/atomic.h>
56     @@ -17,6 +17,12 @@
57     * to include/asm-i386/bitops.h or kerneldoc
58     */
59    
60     +#if __BITS_PER_LONG == 64
61     +#define SHIFT_PER_LONG 6
62     +#else
63     +#define SHIFT_PER_LONG 5
64     +#endif
65     +
66     #define CHOP_SHIFTCOUNT(x) (((unsigned long) (x)) & (BITS_PER_LONG - 1))
67    
68    
69     diff --git a/arch/parisc/include/uapi/asm/bitsperlong.h b/arch/parisc/include/uapi/asm/bitsperlong.h
70     index e0a23c7bdd43..07fa7e50bdc0 100644
71     --- a/arch/parisc/include/uapi/asm/bitsperlong.h
72     +++ b/arch/parisc/include/uapi/asm/bitsperlong.h
73     @@ -3,10 +3,8 @@
74    
75     #if defined(__LP64__)
76     #define __BITS_PER_LONG 64
77     -#define SHIFT_PER_LONG 6
78     #else
79     #define __BITS_PER_LONG 32
80     -#define SHIFT_PER_LONG 5
81     #endif
82    
83     #include <asm-generic/bitsperlong.h>
84     diff --git a/arch/parisc/include/uapi/asm/swab.h b/arch/parisc/include/uapi/asm/swab.h
85     index e78403b129ef..928e1bbac98f 100644
86     --- a/arch/parisc/include/uapi/asm/swab.h
87     +++ b/arch/parisc/include/uapi/asm/swab.h
88     @@ -1,6 +1,7 @@
89     #ifndef _PARISC_SWAB_H
90     #define _PARISC_SWAB_H
91    
92     +#include <asm/bitsperlong.h>
93     #include <linux/types.h>
94     #include <linux/compiler.h>
95    
96     @@ -38,7 +39,7 @@ static inline __attribute_const__ __u32 __arch_swab32(__u32 x)
97     }
98     #define __arch_swab32 __arch_swab32
99    
100     -#if BITS_PER_LONG > 32
101     +#if __BITS_PER_LONG > 32
102     /*
103     ** From "PA-RISC 2.0 Architecture", HP Professional Books.
104     ** See Appendix I page 8 , "Endian Byte Swapping".
105     @@ -61,6 +62,6 @@ static inline __attribute_const__ __u64 __arch_swab64(__u64 x)
106     return x;
107     }
108     #define __arch_swab64 __arch_swab64
109     -#endif /* BITS_PER_LONG > 32 */
110     +#endif /* __BITS_PER_LONG > 32 */
111    
112     #endif /* _PARISC_SWAB_H */
113     diff --git a/arch/s390/kernel/ptrace.c b/arch/s390/kernel/ptrace.c
114     index 01c37b36caf9..02bd587b610b 100644
115     --- a/arch/s390/kernel/ptrace.c
116     +++ b/arch/s390/kernel/ptrace.c
117     @@ -963,6 +963,11 @@ static int s390_fpregs_set(struct task_struct *target,
118     if (target == current)
119     save_fpu_regs();
120    
121     + if (MACHINE_HAS_VX)
122     + convert_vx_to_fp(fprs, target->thread.fpu.vxrs);
123     + else
124     + memcpy(&fprs, target->thread.fpu.fprs, sizeof(fprs));
125     +
126     /* If setting FPC, must validate it first. */
127     if (count > 0 && pos < offsetof(s390_fp_regs, fprs)) {
128     u32 ufpc[2] = { target->thread.fpu.fpc, 0 };
129     @@ -1067,6 +1072,9 @@ static int s390_vxrs_low_set(struct task_struct *target,
130     if (target == current)
131     save_fpu_regs();
132    
133     + for (i = 0; i < __NUM_VXRS_LOW; i++)
134     + vxrs[i] = *((__u64 *)(target->thread.fpu.vxrs + i) + 1);
135     +
136     rc = user_regset_copyin(&pos, &count, &kbuf, &ubuf, vxrs, 0, -1);
137     if (rc == 0)
138     for (i = 0; i < __NUM_VXRS_LOW; i++)
139     diff --git a/arch/tile/kernel/ptrace.c b/arch/tile/kernel/ptrace.c
140     index bdc126faf741..6239aa155f6d 100644
141     --- a/arch/tile/kernel/ptrace.c
142     +++ b/arch/tile/kernel/ptrace.c
143     @@ -111,7 +111,7 @@ static int tile_gpr_set(struct task_struct *target,
144     const void *kbuf, const void __user *ubuf)
145     {
146     int ret;
147     - struct pt_regs regs;
148     + struct pt_regs regs = *task_pt_regs(target);
149    
150     ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &regs, 0,
151     sizeof(regs));
152     diff --git a/drivers/gpu/drm/drm_modes.c b/drivers/gpu/drm/drm_modes.c
153     index 39e30abddf08..71a10f08522e 100644
154     --- a/drivers/gpu/drm/drm_modes.c
155     +++ b/drivers/gpu/drm/drm_modes.c
156     @@ -1401,6 +1401,13 @@ drm_mode_create_from_cmdline_mode(struct drm_device *dev,
157     return NULL;
158    
159     mode->type |= DRM_MODE_TYPE_USERDEF;
160     + /* fix up 1368x768: GFT/CVT can't express 1366 width due to alignment */
161     + if (cmd->xres == 1366 && mode->hdisplay == 1368) {
162     + mode->hdisplay = 1366;
163     + mode->hsync_start--;
164     + mode->hsync_end--;
165     + drm_mode_set_name(mode);
166     + }
167     drm_mode_set_crtcinfo(mode, CRTC_INTERLACE_HALVE_V);
168     return mode;
169     }
170     diff --git a/drivers/gpu/drm/i915/intel_crt.c b/drivers/gpu/drm/i915/intel_crt.c
171     index 97d1ed20418b..63fea6a2869c 100644
172     --- a/drivers/gpu/drm/i915/intel_crt.c
173     +++ b/drivers/gpu/drm/i915/intel_crt.c
174     @@ -445,6 +445,7 @@ static bool intel_crt_detect_ddc(struct drm_connector *connector)
175     struct drm_i915_private *dev_priv = crt->base.base.dev->dev_private;
176     struct edid *edid;
177     struct i2c_adapter *i2c;
178     + bool ret = false;
179    
180     BUG_ON(crt->base.type != INTEL_OUTPUT_ANALOG);
181    
182     @@ -461,17 +462,17 @@ static bool intel_crt_detect_ddc(struct drm_connector *connector)
183     */
184     if (!is_digital) {
185     DRM_DEBUG_KMS("CRT detected via DDC:0x50 [EDID]\n");
186     - return true;
187     + ret = true;
188     + } else {
189     + DRM_DEBUG_KMS("CRT not detected via DDC:0x50 [EDID reports a digital panel]\n");
190     }
191     -
192     - DRM_DEBUG_KMS("CRT not detected via DDC:0x50 [EDID reports a digital panel]\n");
193     } else {
194     DRM_DEBUG_KMS("CRT not detected via DDC:0x50 [no valid EDID found]\n");
195     }
196    
197     kfree(edid);
198    
199     - return false;
200     + return ret;
201     }
202    
203     static enum drm_connector_status
204     diff --git a/drivers/infiniband/core/cma.c b/drivers/infiniband/core/cma.c
205     index 17a15c56028c..c9dcad6a53bf 100644
206     --- a/drivers/infiniband/core/cma.c
207     +++ b/drivers/infiniband/core/cma.c
208     @@ -2578,7 +2578,8 @@ static int cma_bind_addr(struct rdma_cm_id *id, struct sockaddr *src_addr,
209     if (!src_addr || !src_addr->sa_family) {
210     src_addr = (struct sockaddr *) &id->route.addr.src_addr;
211     src_addr->sa_family = dst_addr->sa_family;
212     - if (dst_addr->sa_family == AF_INET6) {
213     + if (IS_ENABLED(CONFIG_IPV6) &&
214     + dst_addr->sa_family == AF_INET6) {
215     struct sockaddr_in6 *src_addr6 = (struct sockaddr_in6 *) src_addr;
216     struct sockaddr_in6 *dst_addr6 = (struct sockaddr_in6 *) dst_addr;
217     src_addr6->sin6_scope_id = dst_addr6->sin6_scope_id;
218     diff --git a/drivers/infiniband/core/umem.c b/drivers/infiniband/core/umem.c
219     index 04f3c0db9126..0ae337bec4f2 100644
220     --- a/drivers/infiniband/core/umem.c
221     +++ b/drivers/infiniband/core/umem.c
222     @@ -134,6 +134,7 @@ struct ib_umem *ib_umem_get(struct ib_ucontext *context, unsigned long addr,
223     IB_ACCESS_REMOTE_ATOMIC | IB_ACCESS_MW_BIND));
224    
225     if (access & IB_ACCESS_ON_DEMAND) {
226     + put_pid(umem->pid);
227     ret = ib_umem_odp_get(context, umem);
228     if (ret) {
229     kfree(umem);
230     @@ -149,6 +150,7 @@ struct ib_umem *ib_umem_get(struct ib_ucontext *context, unsigned long addr,
231    
232     page_list = (struct page **) __get_free_page(GFP_KERNEL);
233     if (!page_list) {
234     + put_pid(umem->pid);
235     kfree(umem);
236     return ERR_PTR(-ENOMEM);
237     }
238     diff --git a/drivers/infiniband/ulp/ipoib/ipoib.h b/drivers/infiniband/ulp/ipoib/ipoib.h
239     index 69a151ae8261..07cfcc326863 100644
240     --- a/drivers/infiniband/ulp/ipoib/ipoib.h
241     +++ b/drivers/infiniband/ulp/ipoib/ipoib.h
242     @@ -63,6 +63,8 @@ enum ipoib_flush_level {
243    
244     enum {
245     IPOIB_ENCAP_LEN = 4,
246     + IPOIB_PSEUDO_LEN = 20,
247     + IPOIB_HARD_LEN = IPOIB_ENCAP_LEN + IPOIB_PSEUDO_LEN,
248    
249     IPOIB_UD_HEAD_SIZE = IB_GRH_BYTES + IPOIB_ENCAP_LEN,
250     IPOIB_UD_RX_SG = 2, /* max buffer needed for 4K mtu */
251     @@ -131,15 +133,21 @@ struct ipoib_header {
252     u16 reserved;
253     };
254    
255     -struct ipoib_cb {
256     - struct qdisc_skb_cb qdisc_cb;
257     - u8 hwaddr[INFINIBAND_ALEN];
258     +struct ipoib_pseudo_header {
259     + u8 hwaddr[INFINIBAND_ALEN];
260     };
261    
262     -static inline struct ipoib_cb *ipoib_skb_cb(const struct sk_buff *skb)
263     +static inline void skb_add_pseudo_hdr(struct sk_buff *skb)
264     {
265     - BUILD_BUG_ON(sizeof(skb->cb) < sizeof(struct ipoib_cb));
266     - return (struct ipoib_cb *)skb->cb;
267     + char *data = skb_push(skb, IPOIB_PSEUDO_LEN);
268     +
269     + /*
270     + * only the ipoib header is present now, make room for a dummy
271     + * pseudo header and set skb field accordingly
272     + */
273     + memset(data, 0, IPOIB_PSEUDO_LEN);
274     + skb_reset_mac_header(skb);
275     + skb_pull(skb, IPOIB_HARD_LEN);
276     }
277    
278     /* Used for all multicast joins (broadcast, IPv4 mcast and IPv6 mcast) */
279     diff --git a/drivers/infiniband/ulp/ipoib/ipoib_cm.c b/drivers/infiniband/ulp/ipoib/ipoib_cm.c
280     index de5e2b01ab05..3ba7de5f9379 100644
281     --- a/drivers/infiniband/ulp/ipoib/ipoib_cm.c
282     +++ b/drivers/infiniband/ulp/ipoib/ipoib_cm.c
283     @@ -63,6 +63,8 @@ MODULE_PARM_DESC(cm_data_debug_level,
284     #define IPOIB_CM_RX_DELAY (3 * 256 * HZ)
285     #define IPOIB_CM_RX_UPDATE_MASK (0x3)
286    
287     +#define IPOIB_CM_RX_RESERVE (ALIGN(IPOIB_HARD_LEN, 16) - IPOIB_ENCAP_LEN)
288     +
289     static struct ib_qp_attr ipoib_cm_err_attr = {
290     .qp_state = IB_QPS_ERR
291     };
292     @@ -147,15 +149,15 @@ static struct sk_buff *ipoib_cm_alloc_rx_skb(struct net_device *dev,
293     struct sk_buff *skb;
294     int i;
295    
296     - skb = dev_alloc_skb(IPOIB_CM_HEAD_SIZE + 12);
297     + skb = dev_alloc_skb(ALIGN(IPOIB_CM_HEAD_SIZE + IPOIB_PSEUDO_LEN, 16));
298     if (unlikely(!skb))
299     return NULL;
300    
301     /*
302     - * IPoIB adds a 4 byte header. So we need 12 more bytes to align the
303     + * IPoIB adds a IPOIB_ENCAP_LEN byte header, this will align the
304     * IP header to a multiple of 16.
305     */
306     - skb_reserve(skb, 12);
307     + skb_reserve(skb, IPOIB_CM_RX_RESERVE);
308    
309     mapping[0] = ib_dma_map_single(priv->ca, skb->data, IPOIB_CM_HEAD_SIZE,
310     DMA_FROM_DEVICE);
311     @@ -624,9 +626,9 @@ void ipoib_cm_handle_rx_wc(struct net_device *dev, struct ib_wc *wc)
312     if (wc->byte_len < IPOIB_CM_COPYBREAK) {
313     int dlen = wc->byte_len;
314    
315     - small_skb = dev_alloc_skb(dlen + 12);
316     + small_skb = dev_alloc_skb(dlen + IPOIB_CM_RX_RESERVE);
317     if (small_skb) {
318     - skb_reserve(small_skb, 12);
319     + skb_reserve(small_skb, IPOIB_CM_RX_RESERVE);
320     ib_dma_sync_single_for_cpu(priv->ca, rx_ring[wr_id].mapping[0],
321     dlen, DMA_FROM_DEVICE);
322     skb_copy_from_linear_data(skb, small_skb->data, dlen);
323     @@ -663,8 +665,7 @@ void ipoib_cm_handle_rx_wc(struct net_device *dev, struct ib_wc *wc)
324    
325     copied:
326     skb->protocol = ((struct ipoib_header *) skb->data)->proto;
327     - skb_reset_mac_header(skb);
328     - skb_pull(skb, IPOIB_ENCAP_LEN);
329     + skb_add_pseudo_hdr(skb);
330    
331     ++dev->stats.rx_packets;
332     dev->stats.rx_bytes += skb->len;
333     diff --git a/drivers/infiniband/ulp/ipoib/ipoib_ib.c b/drivers/infiniband/ulp/ipoib/ipoib_ib.c
334     index 85de078fb0ce..8f8c3af9f4e8 100644
335     --- a/drivers/infiniband/ulp/ipoib/ipoib_ib.c
336     +++ b/drivers/infiniband/ulp/ipoib/ipoib_ib.c
337     @@ -130,16 +130,15 @@ static struct sk_buff *ipoib_alloc_rx_skb(struct net_device *dev, int id)
338    
339     buf_size = IPOIB_UD_BUF_SIZE(priv->max_ib_mtu);
340    
341     - skb = dev_alloc_skb(buf_size + IPOIB_ENCAP_LEN);
342     + skb = dev_alloc_skb(buf_size + IPOIB_HARD_LEN);
343     if (unlikely(!skb))
344     return NULL;
345    
346     /*
347     - * IB will leave a 40 byte gap for a GRH and IPoIB adds a 4 byte
348     - * header. So we need 4 more bytes to get to 48 and align the
349     - * IP header to a multiple of 16.
350     + * the IP header will be at IPOIP_HARD_LEN + IB_GRH_BYTES, that is
351     + * 64 bytes aligned
352     */
353     - skb_reserve(skb, 4);
354     + skb_reserve(skb, sizeof(struct ipoib_pseudo_header));
355    
356     mapping = priv->rx_ring[id].mapping;
357     mapping[0] = ib_dma_map_single(priv->ca, skb->data, buf_size,
358     @@ -242,8 +241,7 @@ static void ipoib_ib_handle_rx_wc(struct net_device *dev, struct ib_wc *wc)
359     skb_pull(skb, IB_GRH_BYTES);
360    
361     skb->protocol = ((struct ipoib_header *) skb->data)->proto;
362     - skb_reset_mac_header(skb);
363     - skb_pull(skb, IPOIB_ENCAP_LEN);
364     + skb_add_pseudo_hdr(skb);
365    
366     ++dev->stats.rx_packets;
367     dev->stats.rx_bytes += skb->len;
368     diff --git a/drivers/infiniband/ulp/ipoib/ipoib_main.c b/drivers/infiniband/ulp/ipoib/ipoib_main.c
369     index 5f7681b975d0..8a4d10452d61 100644
370     --- a/drivers/infiniband/ulp/ipoib/ipoib_main.c
371     +++ b/drivers/infiniband/ulp/ipoib/ipoib_main.c
372     @@ -850,9 +850,12 @@ static void neigh_add_path(struct sk_buff *skb, u8 *daddr,
373     ipoib_neigh_free(neigh);
374     goto err_drop;
375     }
376     - if (skb_queue_len(&neigh->queue) < IPOIB_MAX_PATH_REC_QUEUE)
377     + if (skb_queue_len(&neigh->queue) <
378     + IPOIB_MAX_PATH_REC_QUEUE) {
379     + /* put pseudoheader back on for next time */
380     + skb_push(skb, IPOIB_PSEUDO_LEN);
381     __skb_queue_tail(&neigh->queue, skb);
382     - else {
383     + } else {
384     ipoib_warn(priv, "queue length limit %d. Packet drop.\n",
385     skb_queue_len(&neigh->queue));
386     goto err_drop;
387     @@ -889,7 +892,7 @@ err_drop:
388     }
389    
390     static void unicast_arp_send(struct sk_buff *skb, struct net_device *dev,
391     - struct ipoib_cb *cb)
392     + struct ipoib_pseudo_header *phdr)
393     {
394     struct ipoib_dev_priv *priv = netdev_priv(dev);
395     struct ipoib_path *path;
396     @@ -897,16 +900,18 @@ static void unicast_arp_send(struct sk_buff *skb, struct net_device *dev,
397    
398     spin_lock_irqsave(&priv->lock, flags);
399    
400     - path = __path_find(dev, cb->hwaddr + 4);
401     + path = __path_find(dev, phdr->hwaddr + 4);
402     if (!path || !path->valid) {
403     int new_path = 0;
404    
405     if (!path) {
406     - path = path_rec_create(dev, cb->hwaddr + 4);
407     + path = path_rec_create(dev, phdr->hwaddr + 4);
408     new_path = 1;
409     }
410     if (path) {
411     if (skb_queue_len(&path->queue) < IPOIB_MAX_PATH_REC_QUEUE) {
412     + /* put pseudoheader back on for next time */
413     + skb_push(skb, IPOIB_PSEUDO_LEN);
414     __skb_queue_tail(&path->queue, skb);
415     } else {
416     ++dev->stats.tx_dropped;
417     @@ -934,10 +939,12 @@ static void unicast_arp_send(struct sk_buff *skb, struct net_device *dev,
418     be16_to_cpu(path->pathrec.dlid));
419    
420     spin_unlock_irqrestore(&priv->lock, flags);
421     - ipoib_send(dev, skb, path->ah, IPOIB_QPN(cb->hwaddr));
422     + ipoib_send(dev, skb, path->ah, IPOIB_QPN(phdr->hwaddr));
423     return;
424     } else if ((path->query || !path_rec_start(dev, path)) &&
425     skb_queue_len(&path->queue) < IPOIB_MAX_PATH_REC_QUEUE) {
426     + /* put pseudoheader back on for next time */
427     + skb_push(skb, IPOIB_PSEUDO_LEN);
428     __skb_queue_tail(&path->queue, skb);
429     } else {
430     ++dev->stats.tx_dropped;
431     @@ -951,13 +958,15 @@ static int ipoib_start_xmit(struct sk_buff *skb, struct net_device *dev)
432     {
433     struct ipoib_dev_priv *priv = netdev_priv(dev);
434     struct ipoib_neigh *neigh;
435     - struct ipoib_cb *cb = ipoib_skb_cb(skb);
436     + struct ipoib_pseudo_header *phdr;
437     struct ipoib_header *header;
438     unsigned long flags;
439    
440     + phdr = (struct ipoib_pseudo_header *) skb->data;
441     + skb_pull(skb, sizeof(*phdr));
442     header = (struct ipoib_header *) skb->data;
443    
444     - if (unlikely(cb->hwaddr[4] == 0xff)) {
445     + if (unlikely(phdr->hwaddr[4] == 0xff)) {
446     /* multicast, arrange "if" according to probability */
447     if ((header->proto != htons(ETH_P_IP)) &&
448     (header->proto != htons(ETH_P_IPV6)) &&
449     @@ -970,13 +979,13 @@ static int ipoib_start_xmit(struct sk_buff *skb, struct net_device *dev)
450     return NETDEV_TX_OK;
451     }
452     /* Add in the P_Key for multicast*/
453     - cb->hwaddr[8] = (priv->pkey >> 8) & 0xff;
454     - cb->hwaddr[9] = priv->pkey & 0xff;
455     + phdr->hwaddr[8] = (priv->pkey >> 8) & 0xff;
456     + phdr->hwaddr[9] = priv->pkey & 0xff;
457    
458     - neigh = ipoib_neigh_get(dev, cb->hwaddr);
459     + neigh = ipoib_neigh_get(dev, phdr->hwaddr);
460     if (likely(neigh))
461     goto send_using_neigh;
462     - ipoib_mcast_send(dev, cb->hwaddr, skb);
463     + ipoib_mcast_send(dev, phdr->hwaddr, skb);
464     return NETDEV_TX_OK;
465     }
466    
467     @@ -985,16 +994,16 @@ static int ipoib_start_xmit(struct sk_buff *skb, struct net_device *dev)
468     case htons(ETH_P_IP):
469     case htons(ETH_P_IPV6):
470     case htons(ETH_P_TIPC):
471     - neigh = ipoib_neigh_get(dev, cb->hwaddr);
472     + neigh = ipoib_neigh_get(dev, phdr->hwaddr);
473     if (unlikely(!neigh)) {
474     - neigh_add_path(skb, cb->hwaddr, dev);
475     + neigh_add_path(skb, phdr->hwaddr, dev);
476     return NETDEV_TX_OK;
477     }
478     break;
479     case htons(ETH_P_ARP):
480     case htons(ETH_P_RARP):
481     /* for unicast ARP and RARP should always perform path find */
482     - unicast_arp_send(skb, dev, cb);
483     + unicast_arp_send(skb, dev, phdr);
484     return NETDEV_TX_OK;
485     default:
486     /* ethertype not supported by IPoIB */
487     @@ -1011,11 +1020,13 @@ send_using_neigh:
488     goto unref;
489     }
490     } else if (neigh->ah) {
491     - ipoib_send(dev, skb, neigh->ah, IPOIB_QPN(cb->hwaddr));
492     + ipoib_send(dev, skb, neigh->ah, IPOIB_QPN(phdr->hwaddr));
493     goto unref;
494     }
495    
496     if (skb_queue_len(&neigh->queue) < IPOIB_MAX_PATH_REC_QUEUE) {
497     + /* put pseudoheader back on for next time */
498     + skb_push(skb, sizeof(*phdr));
499     spin_lock_irqsave(&priv->lock, flags);
500     __skb_queue_tail(&neigh->queue, skb);
501     spin_unlock_irqrestore(&priv->lock, flags);
502     @@ -1047,8 +1058,8 @@ static int ipoib_hard_header(struct sk_buff *skb,
503     unsigned short type,
504     const void *daddr, const void *saddr, unsigned len)
505     {
506     + struct ipoib_pseudo_header *phdr;
507     struct ipoib_header *header;
508     - struct ipoib_cb *cb = ipoib_skb_cb(skb);
509    
510     header = (struct ipoib_header *) skb_push(skb, sizeof *header);
511    
512     @@ -1057,12 +1068,13 @@ static int ipoib_hard_header(struct sk_buff *skb,
513    
514     /*
515     * we don't rely on dst_entry structure, always stuff the
516     - * destination address into skb->cb so we can figure out where
517     + * destination address into skb hard header so we can figure out where
518     * to send the packet later.
519     */
520     - memcpy(cb->hwaddr, daddr, INFINIBAND_ALEN);
521     + phdr = (struct ipoib_pseudo_header *) skb_push(skb, sizeof(*phdr));
522     + memcpy(phdr->hwaddr, daddr, INFINIBAND_ALEN);
523    
524     - return sizeof *header;
525     + return IPOIB_HARD_LEN;
526     }
527    
528     static void ipoib_set_mcast_list(struct net_device *dev)
529     @@ -1638,7 +1650,7 @@ void ipoib_setup(struct net_device *dev)
530    
531     dev->flags |= IFF_BROADCAST | IFF_MULTICAST;
532    
533     - dev->hard_header_len = IPOIB_ENCAP_LEN;
534     + dev->hard_header_len = IPOIB_HARD_LEN;
535     dev->addr_len = INFINIBAND_ALEN;
536     dev->type = ARPHRD_INFINIBAND;
537     dev->tx_queue_len = ipoib_sendq_size * 2;
538     diff --git a/drivers/infiniband/ulp/ipoib/ipoib_multicast.c b/drivers/infiniband/ulp/ipoib/ipoib_multicast.c
539     index 8ec99bdea76b..5580ab0b5781 100644
540     --- a/drivers/infiniband/ulp/ipoib/ipoib_multicast.c
541     +++ b/drivers/infiniband/ulp/ipoib/ipoib_multicast.c
542     @@ -756,9 +756,11 @@ void ipoib_mcast_send(struct net_device *dev, u8 *daddr, struct sk_buff *skb)
543     __ipoib_mcast_add(dev, mcast);
544     list_add_tail(&mcast->list, &priv->multicast_list);
545     }
546     - if (skb_queue_len(&mcast->pkt_queue) < IPOIB_MAX_MCAST_QUEUE)
547     + if (skb_queue_len(&mcast->pkt_queue) < IPOIB_MAX_MCAST_QUEUE) {
548     + /* put pseudoheader back on for next time */
549     + skb_push(skb, sizeof(struct ipoib_pseudo_header));
550     skb_queue_tail(&mcast->pkt_queue, skb);
551     - else {
552     + } else {
553     ++dev->stats.tx_dropped;
554     dev_kfree_skb_any(skb);
555     }
556     diff --git a/drivers/isdn/hardware/eicon/message.c b/drivers/isdn/hardware/eicon/message.c
557     index d7c286656a25..7b4ddf0a39ec 100644
558     --- a/drivers/isdn/hardware/eicon/message.c
559     +++ b/drivers/isdn/hardware/eicon/message.c
560     @@ -11304,7 +11304,8 @@ static void mixer_notify_update(PLCI *plci, byte others)
561     ((CAPI_MSG *) msg)->header.ncci = 0;
562     ((CAPI_MSG *) msg)->info.facility_req.Selector = SELECTOR_LINE_INTERCONNECT;
563     ((CAPI_MSG *) msg)->info.facility_req.structs[0] = 3;
564     - PUT_WORD(&(((CAPI_MSG *) msg)->info.facility_req.structs[1]), LI_REQ_SILENT_UPDATE);
565     + ((CAPI_MSG *) msg)->info.facility_req.structs[1] = LI_REQ_SILENT_UPDATE & 0xff;
566     + ((CAPI_MSG *) msg)->info.facility_req.structs[2] = LI_REQ_SILENT_UPDATE >> 8;
567     ((CAPI_MSG *) msg)->info.facility_req.structs[3] = 0;
568     w = api_put(notify_plci->appl, (CAPI_MSG *) msg);
569     if (w != _QUEUE_FULL)
570     diff --git a/drivers/media/i2c/Kconfig b/drivers/media/i2c/Kconfig
571     index 521bbf1b29bc..670240c0ece8 100644
572     --- a/drivers/media/i2c/Kconfig
573     +++ b/drivers/media/i2c/Kconfig
574     @@ -607,6 +607,7 @@ config VIDEO_S5K6A3
575     config VIDEO_S5K4ECGX
576     tristate "Samsung S5K4ECGX sensor support"
577     depends on I2C && VIDEO_V4L2 && VIDEO_V4L2_SUBDEV_API
578     + select CRC32
579     ---help---
580     This is a V4L2 sensor-level driver for Samsung S5K4ECGX 5M
581     camera sensor with an embedded SoC image signal processor.
582     diff --git a/drivers/net/can/c_can/c_can_pci.c b/drivers/net/can/c_can/c_can_pci.c
583     index 7be393c96b1a..cf7c18947189 100644
584     --- a/drivers/net/can/c_can/c_can_pci.c
585     +++ b/drivers/net/can/c_can/c_can_pci.c
586     @@ -161,6 +161,7 @@ static int c_can_pci_probe(struct pci_dev *pdev,
587    
588     dev->irq = pdev->irq;
589     priv->base = addr;
590     + priv->device = &pdev->dev;
591    
592     if (!c_can_pci_data->freq) {
593     dev_err(&pdev->dev, "no clock frequency defined\n");
594     diff --git a/drivers/net/can/ti_hecc.c b/drivers/net/can/ti_hecc.c
595     index 680d1ff07a55..6749b1829469 100644
596     --- a/drivers/net/can/ti_hecc.c
597     +++ b/drivers/net/can/ti_hecc.c
598     @@ -948,7 +948,12 @@ static int ti_hecc_probe(struct platform_device *pdev)
599     netif_napi_add(ndev, &priv->napi, ti_hecc_rx_poll,
600     HECC_DEF_NAPI_WEIGHT);
601    
602     - clk_enable(priv->clk);
603     + err = clk_prepare_enable(priv->clk);
604     + if (err) {
605     + dev_err(&pdev->dev, "clk_prepare_enable() failed\n");
606     + goto probe_exit_clk;
607     + }
608     +
609     err = register_candev(ndev);
610     if (err) {
611     dev_err(&pdev->dev, "register_candev() failed\n");
612     @@ -981,7 +986,7 @@ static int ti_hecc_remove(struct platform_device *pdev)
613     struct ti_hecc_priv *priv = netdev_priv(ndev);
614    
615     unregister_candev(ndev);
616     - clk_disable(priv->clk);
617     + clk_disable_unprepare(priv->clk);
618     clk_put(priv->clk);
619     res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
620     iounmap(priv->base);
621     @@ -1006,7 +1011,7 @@ static int ti_hecc_suspend(struct platform_device *pdev, pm_message_t state)
622     hecc_set_bit(priv, HECC_CANMC, HECC_CANMC_PDR);
623     priv->can.state = CAN_STATE_SLEEPING;
624    
625     - clk_disable(priv->clk);
626     + clk_disable_unprepare(priv->clk);
627    
628     return 0;
629     }
630     @@ -1015,8 +1020,11 @@ static int ti_hecc_resume(struct platform_device *pdev)
631     {
632     struct net_device *dev = platform_get_drvdata(pdev);
633     struct ti_hecc_priv *priv = netdev_priv(dev);
634     + int err;
635    
636     - clk_enable(priv->clk);
637     + err = clk_prepare_enable(priv->clk);
638     + if (err)
639     + return err;
640    
641     hecc_clear_bit(priv, HECC_CANMC, HECC_CANMC_PDR);
642     priv->can.state = CAN_STATE_ERROR_ACTIVE;
643     diff --git a/drivers/pinctrl/intel/pinctrl-broxton.c b/drivers/pinctrl/intel/pinctrl-broxton.c
644     index 5979d38c46b2..7329500943a3 100644
645     --- a/drivers/pinctrl/intel/pinctrl-broxton.c
646     +++ b/drivers/pinctrl/intel/pinctrl-broxton.c
647     @@ -19,7 +19,7 @@
648    
649     #define BXT_PAD_OWN 0x020
650     #define BXT_HOSTSW_OWN 0x080
651     -#define BXT_PADCFGLOCK 0x090
652     +#define BXT_PADCFGLOCK 0x060
653     #define BXT_GPI_IE 0x110
654    
655     #define BXT_COMMUNITY(s, e) \
656     diff --git a/drivers/platform/x86/intel_mid_powerbtn.c b/drivers/platform/x86/intel_mid_powerbtn.c
657     index 1fc0de870ff8..361770568ad0 100644
658     --- a/drivers/platform/x86/intel_mid_powerbtn.c
659     +++ b/drivers/platform/x86/intel_mid_powerbtn.c
660     @@ -77,7 +77,7 @@ static int mfld_pb_probe(struct platform_device *pdev)
661    
662     input_set_capability(input, EV_KEY, KEY_POWER);
663    
664     - error = request_threaded_irq(irq, NULL, mfld_pb_isr, 0,
665     + error = request_threaded_irq(irq, NULL, mfld_pb_isr, IRQF_ONESHOT,
666     DRIVER_NAME, input);
667     if (error) {
668     dev_err(&pdev->dev, "Unable to request irq %d for mfld power"
669     diff --git a/drivers/video/fbdev/core/fbcmap.c b/drivers/video/fbdev/core/fbcmap.c
670     index f89245b8ba8e..68a113594808 100644
671     --- a/drivers/video/fbdev/core/fbcmap.c
672     +++ b/drivers/video/fbdev/core/fbcmap.c
673     @@ -163,17 +163,18 @@ void fb_dealloc_cmap(struct fb_cmap *cmap)
674    
675     int fb_copy_cmap(const struct fb_cmap *from, struct fb_cmap *to)
676     {
677     - int tooff = 0, fromoff = 0;
678     - int size;
679     + unsigned int tooff = 0, fromoff = 0;
680     + size_t size;
681    
682     if (to->start > from->start)
683     fromoff = to->start - from->start;
684     else
685     tooff = from->start - to->start;
686     - size = to->len - tooff;
687     - if (size > (int) (from->len - fromoff))
688     - size = from->len - fromoff;
689     - if (size <= 0)
690     + if (fromoff >= from->len || tooff >= to->len)
691     + return -EINVAL;
692     +
693     + size = min_t(size_t, to->len - tooff, from->len - fromoff);
694     + if (size == 0)
695     return -EINVAL;
696     size *= sizeof(u16);
697    
698     @@ -187,17 +188,18 @@ int fb_copy_cmap(const struct fb_cmap *from, struct fb_cmap *to)
699    
700     int fb_cmap_to_user(const struct fb_cmap *from, struct fb_cmap_user *to)
701     {
702     - int tooff = 0, fromoff = 0;
703     - int size;
704     + unsigned int tooff = 0, fromoff = 0;
705     + size_t size;
706    
707     if (to->start > from->start)
708     fromoff = to->start - from->start;
709     else
710     tooff = from->start - to->start;
711     - size = to->len - tooff;
712     - if (size > (int) (from->len - fromoff))
713     - size = from->len - fromoff;
714     - if (size <= 0)
715     + if (fromoff >= from->len || tooff >= to->len)
716     + return -EINVAL;
717     +
718     + size = min_t(size_t, to->len - tooff, from->len - fromoff);
719     + if (size == 0)
720     return -EINVAL;
721     size *= sizeof(u16);
722    
723     diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c
724     index 3c69299c01ab..9a524e763c3e 100644
725     --- a/fs/nfs/nfs4proc.c
726     +++ b/fs/nfs/nfs4proc.c
727     @@ -2422,7 +2422,8 @@ static inline void nfs4_exclusive_attrset(struct nfs4_opendata *opendata,
728     sattr->ia_valid |= ATTR_MTIME;
729    
730     /* Except MODE, it seems harmless of setting twice. */
731     - if ((attrset[1] & FATTR4_WORD1_MODE))
732     + if (opendata->o_arg.createmode != NFS4_CREATE_EXCLUSIVE &&
733     + attrset[1] & FATTR4_WORD1_MODE)
734     sattr->ia_valid &= ~ATTR_MODE;
735    
736     if (attrset[2] & FATTR4_WORD2_SECURITY_LABEL)
737     diff --git a/include/linux/nfs4.h b/include/linux/nfs4.h
738     index e7e78537aea2..63a817631f06 100644
739     --- a/include/linux/nfs4.h
740     +++ b/include/linux/nfs4.h
741     @@ -266,7 +266,7 @@ enum nfsstat4 {
742    
743     static inline bool seqid_mutating_err(u32 err)
744     {
745     - /* rfc 3530 section 8.1.5: */
746     + /* See RFC 7530, section 9.1.7 */
747     switch (err) {
748     case NFS4ERR_STALE_CLIENTID:
749     case NFS4ERR_STALE_STATEID:
750     @@ -275,6 +275,7 @@ static inline bool seqid_mutating_err(u32 err)
751     case NFS4ERR_BADXDR:
752     case NFS4ERR_RESOURCE:
753     case NFS4ERR_NOFILEHANDLE:
754     + case NFS4ERR_MOVED:
755     return false;
756     };
757     return true;
758     diff --git a/include/linux/sunrpc/clnt.h b/include/linux/sunrpc/clnt.h
759     index 9b6027c51736..316a5525b730 100644
760     --- a/include/linux/sunrpc/clnt.h
761     +++ b/include/linux/sunrpc/clnt.h
762     @@ -180,5 +180,6 @@ const char *rpc_peeraddr2str(struct rpc_clnt *, enum rpc_display_format_t);
763     int rpc_localaddr(struct rpc_clnt *, struct sockaddr *, size_t);
764    
765     const char *rpc_proc_name(const struct rpc_task *task);
766     +void rpc_cleanup_clids(void);
767     #endif /* __KERNEL__ */
768     #endif /* _LINUX_SUNRPC_CLNT_H */
769     diff --git a/kernel/sysctl.c b/kernel/sysctl.c
770     index 999e025bf68e..2f0d157258a2 100644
771     --- a/kernel/sysctl.c
772     +++ b/kernel/sysctl.c
773     @@ -2414,6 +2414,7 @@ static int __do_proc_doulongvec_minmax(void *data, struct ctl_table *table, int
774     break;
775     if (neg)
776     continue;
777     + val = convmul * val / convdiv;
778     if ((min && val < *min) || (max && val > *max))
779     continue;
780     *i = val;
781     diff --git a/mm/memcontrol.c b/mm/memcontrol.c
782     index 5d9c8a3136bc..43eefe9d834c 100644
783     --- a/mm/memcontrol.c
784     +++ b/mm/memcontrol.c
785     @@ -4496,9 +4496,9 @@ static int mem_cgroup_do_precharge(unsigned long count)
786     return ret;
787     }
788    
789     - /* Try charges one by one with reclaim */
790     + /* Try charges one by one with reclaim, but do not retry */
791     while (count--) {
792     - ret = try_charge(mc.to, GFP_KERNEL & ~__GFP_NORETRY, 1);
793     + ret = try_charge(mc.to, GFP_KERNEL | __GFP_NORETRY, 1);
794     if (ret)
795     return ret;
796     mc.precharge++;
797     diff --git a/mm/mempolicy.c b/mm/mempolicy.c
798     index 87a177917cb2..a4217fe60dff 100644
799     --- a/mm/mempolicy.c
800     +++ b/mm/mempolicy.c
801     @@ -2006,8 +2006,8 @@ retry_cpuset:
802    
803     nmask = policy_nodemask(gfp, pol);
804     zl = policy_zonelist(gfp, pol, node);
805     - mpol_cond_put(pol);
806     page = __alloc_pages_nodemask(gfp, order, zl, nmask);
807     + mpol_cond_put(pol);
808     out:
809     if (unlikely(!page && read_mems_allowed_retry(cpuset_mems_cookie)))
810     goto retry_cpuset;
811     diff --git a/net/sunrpc/clnt.c b/net/sunrpc/clnt.c
812     index 7a93922457ff..f28aeb2cfd32 100644
813     --- a/net/sunrpc/clnt.c
814     +++ b/net/sunrpc/clnt.c
815     @@ -337,6 +337,11 @@ out:
816    
817     static DEFINE_IDA(rpc_clids);
818    
819     +void rpc_cleanup_clids(void)
820     +{
821     + ida_destroy(&rpc_clids);
822     +}
823     +
824     static int rpc_alloc_clid(struct rpc_clnt *clnt)
825     {
826     int clid;
827     diff --git a/net/sunrpc/sunrpc_syms.c b/net/sunrpc/sunrpc_syms.c
828     index ee5d3d253102..3142f38d1104 100644
829     --- a/net/sunrpc/sunrpc_syms.c
830     +++ b/net/sunrpc/sunrpc_syms.c
831     @@ -119,6 +119,7 @@ out:
832     static void __exit
833     cleanup_sunrpc(void)
834     {
835     + rpc_cleanup_clids();
836     rpcauth_remove_module();
837     cleanup_socket_xprt();
838     svc_cleanup_xprt_sock();