Magellan Linux

Annotation of /trunk/kernel-alx/patches-4.4/0134-4.4.35-all-fixes.patch

Parent Directory Parent Directory | Revision Log Revision Log


Revision 2870 - (hide annotations) (download)
Mon Mar 27 13:49:13 2017 UTC (7 years, 1 month ago) by niro
File size: 38198 byte(s)
linux-4.4.35
1 niro 2870 diff --git a/Makefile b/Makefile
2     index 30924aabf1b4..f88830af1533 100644
3     --- a/Makefile
4     +++ b/Makefile
5     @@ -1,6 +1,6 @@
6     VERSION = 4
7     PATCHLEVEL = 4
8     -SUBLEVEL = 34
9     +SUBLEVEL = 35
10     EXTRAVERSION =
11     NAME = Blurry Fish Butt
12    
13     @@ -395,11 +395,12 @@ KBUILD_CFLAGS := -Wall -Wundef -Wstrict-prototypes -Wno-trigraphs \
14     -fno-strict-aliasing -fno-common \
15     -Werror-implicit-function-declaration \
16     -Wno-format-security \
17     - -std=gnu89
18     + -std=gnu89 $(call cc-option,-fno-PIE)
19     +
20    
21     KBUILD_AFLAGS_KERNEL :=
22     KBUILD_CFLAGS_KERNEL :=
23     -KBUILD_AFLAGS := -D__ASSEMBLY__
24     +KBUILD_AFLAGS := -D__ASSEMBLY__ $(call cc-option,-fno-PIE)
25     KBUILD_AFLAGS_MODULE := -DMODULE
26     KBUILD_CFLAGS_MODULE := -DMODULE
27     KBUILD_LDFLAGS_MODULE := -T $(srctree)/scripts/module-common.lds
28     diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c
29     index 6cb5834062a3..e2defc7593a4 100644
30     --- a/arch/x86/kernel/cpu/amd.c
31     +++ b/arch/x86/kernel/cpu/amd.c
32     @@ -352,7 +352,6 @@ static void amd_detect_cmp(struct cpuinfo_x86 *c)
33     #ifdef CONFIG_SMP
34     unsigned bits;
35     int cpu = smp_processor_id();
36     - unsigned int socket_id, core_complex_id;
37    
38     bits = c->x86_coreid_bits;
39     /* Low order bits define the core id (index of core in socket) */
40     @@ -370,10 +369,7 @@ static void amd_detect_cmp(struct cpuinfo_x86 *c)
41     if (c->x86 != 0x17 || !cpuid_edx(0x80000006))
42     return;
43    
44     - socket_id = (c->apicid >> bits) - 1;
45     - core_complex_id = (c->apicid & ((1 << bits) - 1)) >> 3;
46     -
47     - per_cpu(cpu_llc_id, cpu) = (socket_id << 3) | core_complex_id;
48     + per_cpu(cpu_llc_id, cpu) = c->apicid >> 3;
49     #endif
50     }
51    
52     diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
53     index 685ef431a41d..7429d481a311 100644
54     --- a/arch/x86/kvm/x86.c
55     +++ b/arch/x86/kvm/x86.c
56     @@ -199,7 +199,18 @@ static void kvm_on_user_return(struct user_return_notifier *urn)
57     struct kvm_shared_msrs *locals
58     = container_of(urn, struct kvm_shared_msrs, urn);
59     struct kvm_shared_msr_values *values;
60     + unsigned long flags;
61    
62     + /*
63     + * Disabling irqs at this point since the following code could be
64     + * interrupted and executed through kvm_arch_hardware_disable()
65     + */
66     + local_irq_save(flags);
67     + if (locals->registered) {
68     + locals->registered = false;
69     + user_return_notifier_unregister(urn);
70     + }
71     + local_irq_restore(flags);
72     for (slot = 0; slot < shared_msrs_global.nr; ++slot) {
73     values = &locals->values[slot];
74     if (values->host != values->curr) {
75     @@ -207,8 +218,6 @@ static void kvm_on_user_return(struct user_return_notifier *urn)
76     values->curr = values->host;
77     }
78     }
79     - locals->registered = false;
80     - user_return_notifier_unregister(urn);
81     }
82    
83     static void shared_msr_update(unsigned slot, u32 msr)
84     @@ -3317,6 +3326,7 @@ long kvm_arch_vcpu_ioctl(struct file *filp,
85     };
86     case KVM_SET_VAPIC_ADDR: {
87     struct kvm_vapic_addr va;
88     + int idx;
89    
90     r = -EINVAL;
91     if (!lapic_in_kernel(vcpu))
92     @@ -3324,7 +3334,9 @@ long kvm_arch_vcpu_ioctl(struct file *filp,
93     r = -EFAULT;
94     if (copy_from_user(&va, argp, sizeof va))
95     goto out;
96     + idx = srcu_read_lock(&vcpu->kvm->srcu);
97     r = kvm_lapic_set_vapic_addr(vcpu, va.vapic_addr);
98     + srcu_read_unlock(&vcpu->kvm->srcu, idx);
99     break;
100     }
101     case KVM_X86_SETUP_MCE: {
102     diff --git a/arch/x86/purgatory/Makefile b/arch/x86/purgatory/Makefile
103     index 2c835e356349..d445c5f1aeb1 100644
104     --- a/arch/x86/purgatory/Makefile
105     +++ b/arch/x86/purgatory/Makefile
106     @@ -12,6 +12,7 @@ targets += purgatory.ro
107    
108     KBUILD_CFLAGS := -fno-strict-aliasing -Wall -Wstrict-prototypes -fno-zero-initialized-in-bss -fno-builtin -ffreestanding -c -MD -Os -mcmodel=large
109     KBUILD_CFLAGS += -m$(BITS)
110     +KBUILD_CFLAGS += $(call cc-option,-fno-PIE)
111    
112     $(obj)/purgatory.ro: $(PURGATORY_OBJS) FORCE
113     $(call if_changed,ld)
114     diff --git a/drivers/base/power/main.c b/drivers/base/power/main.c
115     index ae60e6357d7b..e9b713675c7c 100644
116     --- a/drivers/base/power/main.c
117     +++ b/drivers/base/power/main.c
118     @@ -1022,6 +1022,8 @@ static int __device_suspend_noirq(struct device *dev, pm_message_t state, bool a
119     TRACE_DEVICE(dev);
120     TRACE_SUSPEND(0);
121    
122     + dpm_wait_for_children(dev, async);
123     +
124     if (async_error)
125     goto Complete;
126    
127     @@ -1033,8 +1035,6 @@ static int __device_suspend_noirq(struct device *dev, pm_message_t state, bool a
128     if (dev->power.syscore || dev->power.direct_complete)
129     goto Complete;
130    
131     - dpm_wait_for_children(dev, async);
132     -
133     if (dev->pm_domain) {
134     info = "noirq power domain ";
135     callback = pm_noirq_op(&dev->pm_domain->ops, state);
136     @@ -1169,6 +1169,8 @@ static int __device_suspend_late(struct device *dev, pm_message_t state, bool as
137    
138     __pm_runtime_disable(dev, false);
139    
140     + dpm_wait_for_children(dev, async);
141     +
142     if (async_error)
143     goto Complete;
144    
145     @@ -1180,8 +1182,6 @@ static int __device_suspend_late(struct device *dev, pm_message_t state, bool as
146     if (dev->power.syscore || dev->power.direct_complete)
147     goto Complete;
148    
149     - dpm_wait_for_children(dev, async);
150     -
151     if (dev->pm_domain) {
152     info = "late power domain ";
153     callback = pm_late_early_op(&dev->pm_domain->ops, state);
154     diff --git a/drivers/clk/mmp/clk-of-mmp2.c b/drivers/clk/mmp/clk-of-mmp2.c
155     index 251533d87c65..f261b1d292c7 100644
156     --- a/drivers/clk/mmp/clk-of-mmp2.c
157     +++ b/drivers/clk/mmp/clk-of-mmp2.c
158     @@ -313,7 +313,7 @@ static void __init mmp2_clk_init(struct device_node *np)
159     }
160    
161     pxa_unit->apmu_base = of_iomap(np, 1);
162     - if (!pxa_unit->mpmu_base) {
163     + if (!pxa_unit->apmu_base) {
164     pr_err("failed to map apmu registers\n");
165     return;
166     }
167     diff --git a/drivers/clk/mmp/clk-of-pxa168.c b/drivers/clk/mmp/clk-of-pxa168.c
168     index 64eaf4141c69..427f4bb08665 100644
169     --- a/drivers/clk/mmp/clk-of-pxa168.c
170     +++ b/drivers/clk/mmp/clk-of-pxa168.c
171     @@ -262,7 +262,7 @@ static void __init pxa168_clk_init(struct device_node *np)
172     }
173    
174     pxa_unit->apmu_base = of_iomap(np, 1);
175     - if (!pxa_unit->mpmu_base) {
176     + if (!pxa_unit->apmu_base) {
177     pr_err("failed to map apmu registers\n");
178     return;
179     }
180     diff --git a/drivers/clk/mmp/clk-of-pxa910.c b/drivers/clk/mmp/clk-of-pxa910.c
181     index 13d6173326a4..cdf5ba566d3b 100644
182     --- a/drivers/clk/mmp/clk-of-pxa910.c
183     +++ b/drivers/clk/mmp/clk-of-pxa910.c
184     @@ -282,7 +282,7 @@ static void __init pxa910_clk_init(struct device_node *np)
185     }
186    
187     pxa_unit->apmu_base = of_iomap(np, 1);
188     - if (!pxa_unit->mpmu_base) {
189     + if (!pxa_unit->apmu_base) {
190     pr_err("failed to map apmu registers\n");
191     return;
192     }
193     @@ -294,7 +294,7 @@ static void __init pxa910_clk_init(struct device_node *np)
194     }
195    
196     pxa_unit->apbcp_base = of_iomap(np, 3);
197     - if (!pxa_unit->mpmu_base) {
198     + if (!pxa_unit->apbcp_base) {
199     pr_err("failed to map apbcp registers\n");
200     return;
201     }
202     diff --git a/drivers/crypto/caam/caamalg.c b/drivers/crypto/caam/caamalg.c
203     index b3044219772c..2cde3796cb82 100644
204     --- a/drivers/crypto/caam/caamalg.c
205     +++ b/drivers/crypto/caam/caamalg.c
206     @@ -4542,6 +4542,15 @@ static int __init caam_algapi_init(void)
207     if (!aes_inst && (alg_sel == OP_ALG_ALGSEL_AES))
208     continue;
209    
210     + /*
211     + * Check support for AES modes not available
212     + * on LP devices.
213     + */
214     + if ((cha_vid & CHA_ID_LS_AES_MASK) == CHA_ID_LS_AES_LP)
215     + if ((alg->class1_alg_type & OP_ALG_AAI_MASK) ==
216     + OP_ALG_AAI_XTS)
217     + continue;
218     +
219     t_alg = caam_alg_alloc(alg);
220     if (IS_ERR(t_alg)) {
221     err = PTR_ERR(t_alg);
222     diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
223     index ff5566c69f7d..e8e962f7b5cb 100644
224     --- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h
225     +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
226     @@ -532,6 +532,7 @@ struct amdgpu_bo {
227     u64 metadata_flags;
228     void *metadata;
229     u32 metadata_size;
230     + unsigned prime_shared_count;
231     /* list of all virtual address to which this bo
232     * is associated to
233     */
234     diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.c
235     index f82a2dd83874..3c7a7235988d 100644
236     --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.c
237     +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.c
238     @@ -117,7 +117,7 @@ static int amdgpu_bo_list_set(struct amdgpu_device *adev,
239     entry->allowed_domains = AMDGPU_GEM_DOMAIN_GTT;
240     }
241     entry->tv.bo = &entry->robj->tbo;
242     - entry->tv.shared = true;
243     + entry->tv.shared = !entry->robj->prime_shared_count;
244    
245     if (entry->prefered_domains == AMDGPU_GEM_DOMAIN_GDS)
246     gds_obj = entry->robj;
247     diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_prime.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_prime.c
248     index 59f735a933a9..e6a7d30c3747 100644
249     --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_prime.c
250     +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_prime.c
251     @@ -77,20 +77,36 @@ struct drm_gem_object *amdgpu_gem_prime_import_sg_table(struct drm_device *dev,
252     list_add_tail(&bo->list, &adev->gem.objects);
253     mutex_unlock(&adev->gem.mutex);
254    
255     + bo->prime_shared_count = 1;
256     return &bo->gem_base;
257     }
258    
259     int amdgpu_gem_prime_pin(struct drm_gem_object *obj)
260     {
261     struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj);
262     - int ret = 0;
263     + long ret = 0;
264    
265     ret = amdgpu_bo_reserve(bo, false);
266     if (unlikely(ret != 0))
267     return ret;
268    
269     + /*
270     + * Wait for all shared fences to complete before we switch to future
271     + * use of exclusive fence on this prime shared bo.
272     + */
273     + ret = reservation_object_wait_timeout_rcu(bo->tbo.resv, true, false,
274     + MAX_SCHEDULE_TIMEOUT);
275     + if (unlikely(ret < 0)) {
276     + DRM_DEBUG_PRIME("Fence wait failed: %li\n", ret);
277     + amdgpu_bo_unreserve(bo);
278     + return ret;
279     + }
280     +
281     /* pin buffer into GTT */
282     ret = amdgpu_bo_pin(bo, AMDGPU_GEM_DOMAIN_GTT, NULL);
283     + if (likely(ret == 0))
284     + bo->prime_shared_count++;
285     +
286     amdgpu_bo_unreserve(bo);
287     return ret;
288     }
289     @@ -105,6 +121,8 @@ void amdgpu_gem_prime_unpin(struct drm_gem_object *obj)
290     return;
291    
292     amdgpu_bo_unpin(bo);
293     + if (bo->prime_shared_count)
294     + bo->prime_shared_count--;
295     amdgpu_bo_unreserve(bo);
296     }
297    
298     diff --git a/drivers/i2c/Kconfig b/drivers/i2c/Kconfig
299     index 78fbee463628..65dbde778181 100644
300     --- a/drivers/i2c/Kconfig
301     +++ b/drivers/i2c/Kconfig
302     @@ -59,7 +59,6 @@ config I2C_CHARDEV
303    
304     config I2C_MUX
305     tristate "I2C bus multiplexing support"
306     - depends on HAS_IOMEM
307     help
308     Say Y here if you want the I2C core to support the ability to
309     handle multiplexed I2C bus topologies, by presenting each
310     diff --git a/drivers/i2c/muxes/Kconfig b/drivers/i2c/muxes/Kconfig
311     index f06b0e24673b..af2a63cb4056 100644
312     --- a/drivers/i2c/muxes/Kconfig
313     +++ b/drivers/i2c/muxes/Kconfig
314     @@ -63,6 +63,7 @@ config I2C_MUX_PINCTRL
315    
316     config I2C_MUX_REG
317     tristate "Register-based I2C multiplexer"
318     + depends on HAS_IOMEM
319     help
320     If you say yes to this option, support will be included for a
321     register based I2C multiplexer. This driver provides access to
322     diff --git a/drivers/infiniband/core/cm.c b/drivers/infiniband/core/cm.c
323     index 4d8e7f18a9af..941cd9b83941 100644
324     --- a/drivers/infiniband/core/cm.c
325     +++ b/drivers/infiniband/core/cm.c
326     @@ -80,6 +80,8 @@ static struct ib_cm {
327     __be32 random_id_operand;
328     struct list_head timewait_list;
329     struct workqueue_struct *wq;
330     + /* Sync on cm change port state */
331     + spinlock_t state_lock;
332     } cm;
333    
334     /* Counter indexes ordered by attribute ID */
335     @@ -161,6 +163,8 @@ struct cm_port {
336     struct ib_mad_agent *mad_agent;
337     struct kobject port_obj;
338     u8 port_num;
339     + struct list_head cm_priv_prim_list;
340     + struct list_head cm_priv_altr_list;
341     struct cm_counter_group counter_group[CM_COUNTER_GROUPS];
342     };
343    
344     @@ -241,6 +245,12 @@ struct cm_id_private {
345     u8 service_timeout;
346     u8 target_ack_delay;
347    
348     + struct list_head prim_list;
349     + struct list_head altr_list;
350     + /* Indicates that the send port mad is registered and av is set */
351     + int prim_send_port_not_ready;
352     + int altr_send_port_not_ready;
353     +
354     struct list_head work_list;
355     atomic_t work_count;
356     };
357     @@ -259,20 +269,47 @@ static int cm_alloc_msg(struct cm_id_private *cm_id_priv,
358     struct ib_mad_agent *mad_agent;
359     struct ib_mad_send_buf *m;
360     struct ib_ah *ah;
361     + struct cm_av *av;
362     + unsigned long flags, flags2;
363     + int ret = 0;
364    
365     + /* don't let the port to be released till the agent is down */
366     + spin_lock_irqsave(&cm.state_lock, flags2);
367     + spin_lock_irqsave(&cm.lock, flags);
368     + if (!cm_id_priv->prim_send_port_not_ready)
369     + av = &cm_id_priv->av;
370     + else if (!cm_id_priv->altr_send_port_not_ready &&
371     + (cm_id_priv->alt_av.port))
372     + av = &cm_id_priv->alt_av;
373     + else {
374     + pr_info("%s: not valid CM id\n", __func__);
375     + ret = -ENODEV;
376     + spin_unlock_irqrestore(&cm.lock, flags);
377     + goto out;
378     + }
379     + spin_unlock_irqrestore(&cm.lock, flags);
380     + /* Make sure the port haven't released the mad yet */
381     mad_agent = cm_id_priv->av.port->mad_agent;
382     - ah = ib_create_ah(mad_agent->qp->pd, &cm_id_priv->av.ah_attr);
383     - if (IS_ERR(ah))
384     - return PTR_ERR(ah);
385     + if (!mad_agent) {
386     + pr_info("%s: not a valid MAD agent\n", __func__);
387     + ret = -ENODEV;
388     + goto out;
389     + }
390     + ah = ib_create_ah(mad_agent->qp->pd, &av->ah_attr);
391     + if (IS_ERR(ah)) {
392     + ret = PTR_ERR(ah);
393     + goto out;
394     + }
395    
396     m = ib_create_send_mad(mad_agent, cm_id_priv->id.remote_cm_qpn,
397     - cm_id_priv->av.pkey_index,
398     + av->pkey_index,
399     0, IB_MGMT_MAD_HDR, IB_MGMT_MAD_DATA,
400     GFP_ATOMIC,
401     IB_MGMT_BASE_VERSION);
402     if (IS_ERR(m)) {
403     ib_destroy_ah(ah);
404     - return PTR_ERR(m);
405     + ret = PTR_ERR(m);
406     + goto out;
407     }
408    
409     /* Timeout set by caller if response is expected. */
410     @@ -282,7 +319,10 @@ static int cm_alloc_msg(struct cm_id_private *cm_id_priv,
411     atomic_inc(&cm_id_priv->refcount);
412     m->context[0] = cm_id_priv;
413     *msg = m;
414     - return 0;
415     +
416     +out:
417     + spin_unlock_irqrestore(&cm.state_lock, flags2);
418     + return ret;
419     }
420    
421     static int cm_alloc_response_msg(struct cm_port *port,
422     @@ -352,7 +392,8 @@ static void cm_init_av_for_response(struct cm_port *port, struct ib_wc *wc,
423     grh, &av->ah_attr);
424     }
425    
426     -static int cm_init_av_by_path(struct ib_sa_path_rec *path, struct cm_av *av)
427     +static int cm_init_av_by_path(struct ib_sa_path_rec *path, struct cm_av *av,
428     + struct cm_id_private *cm_id_priv)
429     {
430     struct cm_device *cm_dev;
431     struct cm_port *port = NULL;
432     @@ -387,7 +428,17 @@ static int cm_init_av_by_path(struct ib_sa_path_rec *path, struct cm_av *av)
433     &av->ah_attr);
434     av->timeout = path->packet_life_time + 1;
435    
436     - return 0;
437     + spin_lock_irqsave(&cm.lock, flags);
438     + if (&cm_id_priv->av == av)
439     + list_add_tail(&cm_id_priv->prim_list, &port->cm_priv_prim_list);
440     + else if (&cm_id_priv->alt_av == av)
441     + list_add_tail(&cm_id_priv->altr_list, &port->cm_priv_altr_list);
442     + else
443     + ret = -EINVAL;
444     +
445     + spin_unlock_irqrestore(&cm.lock, flags);
446     +
447     + return ret;
448     }
449    
450     static int cm_alloc_id(struct cm_id_private *cm_id_priv)
451     @@ -677,6 +728,8 @@ struct ib_cm_id *ib_create_cm_id(struct ib_device *device,
452     spin_lock_init(&cm_id_priv->lock);
453     init_completion(&cm_id_priv->comp);
454     INIT_LIST_HEAD(&cm_id_priv->work_list);
455     + INIT_LIST_HEAD(&cm_id_priv->prim_list);
456     + INIT_LIST_HEAD(&cm_id_priv->altr_list);
457     atomic_set(&cm_id_priv->work_count, -1);
458     atomic_set(&cm_id_priv->refcount, 1);
459     return &cm_id_priv->id;
460     @@ -892,6 +945,15 @@ retest:
461     break;
462     }
463    
464     + spin_lock_irq(&cm.lock);
465     + if (!list_empty(&cm_id_priv->altr_list) &&
466     + (!cm_id_priv->altr_send_port_not_ready))
467     + list_del(&cm_id_priv->altr_list);
468     + if (!list_empty(&cm_id_priv->prim_list) &&
469     + (!cm_id_priv->prim_send_port_not_ready))
470     + list_del(&cm_id_priv->prim_list);
471     + spin_unlock_irq(&cm.lock);
472     +
473     cm_free_id(cm_id->local_id);
474     cm_deref_id(cm_id_priv);
475     wait_for_completion(&cm_id_priv->comp);
476     @@ -1192,12 +1254,13 @@ int ib_send_cm_req(struct ib_cm_id *cm_id,
477     goto out;
478     }
479    
480     - ret = cm_init_av_by_path(param->primary_path, &cm_id_priv->av);
481     + ret = cm_init_av_by_path(param->primary_path, &cm_id_priv->av,
482     + cm_id_priv);
483     if (ret)
484     goto error1;
485     if (param->alternate_path) {
486     ret = cm_init_av_by_path(param->alternate_path,
487     - &cm_id_priv->alt_av);
488     + &cm_id_priv->alt_av, cm_id_priv);
489     if (ret)
490     goto error1;
491     }
492     @@ -1639,7 +1702,8 @@ static int cm_req_handler(struct cm_work *work)
493     cm_format_paths_from_req(req_msg, &work->path[0], &work->path[1]);
494    
495     memcpy(work->path[0].dmac, cm_id_priv->av.ah_attr.dmac, ETH_ALEN);
496     - ret = cm_init_av_by_path(&work->path[0], &cm_id_priv->av);
497     + ret = cm_init_av_by_path(&work->path[0], &cm_id_priv->av,
498     + cm_id_priv);
499     if (ret) {
500     ib_get_cached_gid(work->port->cm_dev->ib_device,
501     work->port->port_num, 0, &work->path[0].sgid,
502     @@ -1650,7 +1714,8 @@ static int cm_req_handler(struct cm_work *work)
503     goto rejected;
504     }
505     if (req_msg->alt_local_lid) {
506     - ret = cm_init_av_by_path(&work->path[1], &cm_id_priv->alt_av);
507     + ret = cm_init_av_by_path(&work->path[1], &cm_id_priv->alt_av,
508     + cm_id_priv);
509     if (ret) {
510     ib_send_cm_rej(cm_id, IB_CM_REJ_INVALID_ALT_GID,
511     &work->path[0].sgid,
512     @@ -2705,7 +2770,8 @@ int ib_send_cm_lap(struct ib_cm_id *cm_id,
513     goto out;
514     }
515    
516     - ret = cm_init_av_by_path(alternate_path, &cm_id_priv->alt_av);
517     + ret = cm_init_av_by_path(alternate_path, &cm_id_priv->alt_av,
518     + cm_id_priv);
519     if (ret)
520     goto out;
521     cm_id_priv->alt_av.timeout =
522     @@ -2817,7 +2883,8 @@ static int cm_lap_handler(struct cm_work *work)
523     cm_init_av_for_response(work->port, work->mad_recv_wc->wc,
524     work->mad_recv_wc->recv_buf.grh,
525     &cm_id_priv->av);
526     - cm_init_av_by_path(param->alternate_path, &cm_id_priv->alt_av);
527     + cm_init_av_by_path(param->alternate_path, &cm_id_priv->alt_av,
528     + cm_id_priv);
529     ret = atomic_inc_and_test(&cm_id_priv->work_count);
530     if (!ret)
531     list_add_tail(&work->list, &cm_id_priv->work_list);
532     @@ -3009,7 +3076,7 @@ int ib_send_cm_sidr_req(struct ib_cm_id *cm_id,
533     return -EINVAL;
534    
535     cm_id_priv = container_of(cm_id, struct cm_id_private, id);
536     - ret = cm_init_av_by_path(param->path, &cm_id_priv->av);
537     + ret = cm_init_av_by_path(param->path, &cm_id_priv->av, cm_id_priv);
538     if (ret)
539     goto out;
540    
541     @@ -3446,7 +3513,9 @@ out:
542     static int cm_migrate(struct ib_cm_id *cm_id)
543     {
544     struct cm_id_private *cm_id_priv;
545     + struct cm_av tmp_av;
546     unsigned long flags;
547     + int tmp_send_port_not_ready;
548     int ret = 0;
549    
550     cm_id_priv = container_of(cm_id, struct cm_id_private, id);
551     @@ -3455,7 +3524,14 @@ static int cm_migrate(struct ib_cm_id *cm_id)
552     (cm_id->lap_state == IB_CM_LAP_UNINIT ||
553     cm_id->lap_state == IB_CM_LAP_IDLE)) {
554     cm_id->lap_state = IB_CM_LAP_IDLE;
555     + /* Swap address vector */
556     + tmp_av = cm_id_priv->av;
557     cm_id_priv->av = cm_id_priv->alt_av;
558     + cm_id_priv->alt_av = tmp_av;
559     + /* Swap port send ready state */
560     + tmp_send_port_not_ready = cm_id_priv->prim_send_port_not_ready;
561     + cm_id_priv->prim_send_port_not_ready = cm_id_priv->altr_send_port_not_ready;
562     + cm_id_priv->altr_send_port_not_ready = tmp_send_port_not_ready;
563     } else
564     ret = -EINVAL;
565     spin_unlock_irqrestore(&cm_id_priv->lock, flags);
566     @@ -3875,6 +3951,9 @@ static void cm_add_one(struct ib_device *ib_device)
567     port->cm_dev = cm_dev;
568     port->port_num = i;
569    
570     + INIT_LIST_HEAD(&port->cm_priv_prim_list);
571     + INIT_LIST_HEAD(&port->cm_priv_altr_list);
572     +
573     ret = cm_create_port_fs(port);
574     if (ret)
575     goto error1;
576     @@ -3932,6 +4011,8 @@ static void cm_remove_one(struct ib_device *ib_device, void *client_data)
577     {
578     struct cm_device *cm_dev = client_data;
579     struct cm_port *port;
580     + struct cm_id_private *cm_id_priv;
581     + struct ib_mad_agent *cur_mad_agent;
582     struct ib_port_modify port_modify = {
583     .clr_port_cap_mask = IB_PORT_CM_SUP
584     };
585     @@ -3955,15 +4036,27 @@ static void cm_remove_one(struct ib_device *ib_device, void *client_data)
586    
587     port = cm_dev->port[i-1];
588     ib_modify_port(ib_device, port->port_num, 0, &port_modify);
589     + /* Mark all the cm_id's as not valid */
590     + spin_lock_irq(&cm.lock);
591     + list_for_each_entry(cm_id_priv, &port->cm_priv_altr_list, altr_list)
592     + cm_id_priv->altr_send_port_not_ready = 1;
593     + list_for_each_entry(cm_id_priv, &port->cm_priv_prim_list, prim_list)
594     + cm_id_priv->prim_send_port_not_ready = 1;
595     + spin_unlock_irq(&cm.lock);
596     /*
597     * We flush the queue here after the going_down set, this
598     * verify that no new works will be queued in the recv handler,
599     * after that we can call the unregister_mad_agent
600     */
601     flush_workqueue(cm.wq);
602     - ib_unregister_mad_agent(port->mad_agent);
603     + spin_lock_irq(&cm.state_lock);
604     + cur_mad_agent = port->mad_agent;
605     + port->mad_agent = NULL;
606     + spin_unlock_irq(&cm.state_lock);
607     + ib_unregister_mad_agent(cur_mad_agent);
608     cm_remove_port_fs(port);
609     }
610     +
611     device_unregister(cm_dev->device);
612     kfree(cm_dev);
613     }
614     @@ -3976,6 +4069,7 @@ static int __init ib_cm_init(void)
615     INIT_LIST_HEAD(&cm.device_list);
616     rwlock_init(&cm.device_lock);
617     spin_lock_init(&cm.lock);
618     + spin_lock_init(&cm.state_lock);
619     cm.listen_service_table = RB_ROOT;
620     cm.listen_service_id = be64_to_cpu(IB_CM_ASSIGN_SERVICE_ID);
621     cm.remote_id_table = RB_ROOT;
622     diff --git a/drivers/infiniband/core/umem.c b/drivers/infiniband/core/umem.c
623     index 38acb3cfc545..04f3c0db9126 100644
624     --- a/drivers/infiniband/core/umem.c
625     +++ b/drivers/infiniband/core/umem.c
626     @@ -175,7 +175,7 @@ struct ib_umem *ib_umem_get(struct ib_ucontext *context, unsigned long addr,
627    
628     cur_base = addr & PAGE_MASK;
629    
630     - if (npages == 0) {
631     + if (npages == 0 || npages > UINT_MAX) {
632     ret = -EINVAL;
633     goto out;
634     }
635     diff --git a/drivers/infiniband/core/uverbs_main.c b/drivers/infiniband/core/uverbs_main.c
636     index d625c82d6c82..7becef27cbbe 100644
637     --- a/drivers/infiniband/core/uverbs_main.c
638     +++ b/drivers/infiniband/core/uverbs_main.c
639     @@ -244,12 +244,9 @@ static int ib_uverbs_cleanup_ucontext(struct ib_uverbs_file *file,
640     container_of(uobj, struct ib_uqp_object, uevent.uobject);
641    
642     idr_remove_uobj(&ib_uverbs_qp_idr, uobj);
643     - if (qp != qp->real_qp) {
644     - ib_close_qp(qp);
645     - } else {
646     + if (qp == qp->real_qp)
647     ib_uverbs_detach_umcast(qp, uqp);
648     - ib_destroy_qp(qp);
649     - }
650     + ib_destroy_qp(qp);
651     ib_uverbs_release_uevent(file, &uqp->uevent);
652     kfree(uqp);
653     }
654     diff --git a/drivers/infiniband/hw/mlx4/ah.c b/drivers/infiniband/hw/mlx4/ah.c
655     index 06da56bda201..c007c766c61e 100644
656     --- a/drivers/infiniband/hw/mlx4/ah.c
657     +++ b/drivers/infiniband/hw/mlx4/ah.c
658     @@ -102,7 +102,10 @@ static struct ib_ah *create_iboe_ah(struct ib_pd *pd, struct ib_ah_attr *ah_attr
659     if (vlan_tag < 0x1000)
660     vlan_tag |= (ah_attr->sl & 7) << 13;
661     ah->av.eth.port_pd = cpu_to_be32(to_mpd(pd)->pdn | (ah_attr->port_num << 24));
662     - ah->av.eth.gid_index = mlx4_ib_gid_index_to_real_index(ibdev, ah_attr->port_num, ah_attr->grh.sgid_index);
663     + ret = mlx4_ib_gid_index_to_real_index(ibdev, ah_attr->port_num, ah_attr->grh.sgid_index);
664     + if (ret < 0)
665     + return ERR_PTR(ret);
666     + ah->av.eth.gid_index = ret;
667     ah->av.eth.vlan = cpu_to_be16(vlan_tag);
668     if (ah_attr->static_rate) {
669     ah->av.eth.stat_rate = ah_attr->static_rate + MLX4_STAT_RATE_OFFSET;
670     diff --git a/drivers/infiniband/hw/mlx4/cq.c b/drivers/infiniband/hw/mlx4/cq.c
671     index b88fc8f5ab18..57e1a08925d9 100644
672     --- a/drivers/infiniband/hw/mlx4/cq.c
673     +++ b/drivers/infiniband/hw/mlx4/cq.c
674     @@ -253,11 +253,14 @@ struct ib_cq *mlx4_ib_create_cq(struct ib_device *ibdev,
675     if (context)
676     if (ib_copy_to_udata(udata, &cq->mcq.cqn, sizeof (__u32))) {
677     err = -EFAULT;
678     - goto err_dbmap;
679     + goto err_cq_free;
680     }
681    
682     return &cq->ibcq;
683    
684     +err_cq_free:
685     + mlx4_cq_free(dev->dev, &cq->mcq);
686     +
687     err_dbmap:
688     if (context)
689     mlx4_ib_db_unmap_user(to_mucontext(context), &cq->db);
690     diff --git a/drivers/infiniband/hw/mlx5/cq.c b/drivers/infiniband/hw/mlx5/cq.c
691     index 8184267c7901..02c8deab1fff 100644
692     --- a/drivers/infiniband/hw/mlx5/cq.c
693     +++ b/drivers/infiniband/hw/mlx5/cq.c
694     @@ -787,8 +787,7 @@ struct ib_cq *mlx5_ib_create_cq(struct ib_device *ibdev,
695     if (err)
696     goto err_create;
697     } else {
698     - /* for now choose 64 bytes till we have a proper interface */
699     - cqe_size = 64;
700     + cqe_size = cache_line_size() == 128 ? 128 : 64;
701     err = create_cq_kernel(dev, cq, entries, cqe_size, &cqb,
702     &index, &inlen);
703     if (err)
704     diff --git a/drivers/infiniband/hw/mlx5/main.c b/drivers/infiniband/hw/mlx5/main.c
705     index bfc940ff9c8a..2a1fdcaa3044 100644
706     --- a/drivers/infiniband/hw/mlx5/main.c
707     +++ b/drivers/infiniband/hw/mlx5/main.c
708     @@ -947,13 +947,13 @@ static void mlx5_ib_event(struct mlx5_core_dev *dev, void *context,
709     {
710     struct mlx5_ib_dev *ibdev = (struct mlx5_ib_dev *)context;
711     struct ib_event ibev;
712     -
713     + bool fatal = false;
714     u8 port = 0;
715    
716     switch (event) {
717     case MLX5_DEV_EVENT_SYS_ERROR:
718     - ibdev->ib_active = false;
719     ibev.event = IB_EVENT_DEVICE_FATAL;
720     + fatal = true;
721     break;
722    
723     case MLX5_DEV_EVENT_PORT_UP:
724     @@ -998,6 +998,9 @@ static void mlx5_ib_event(struct mlx5_core_dev *dev, void *context,
725    
726     if (ibdev->ib_active)
727     ib_dispatch_event(&ibev);
728     +
729     + if (fatal)
730     + ibdev->ib_active = false;
731     }
732    
733     static void get_ext_port_caps(struct mlx5_ib_dev *dev)
734     diff --git a/drivers/mfd/intel-lpss.c b/drivers/mfd/intel-lpss.c
735     index 88e80ec772f6..fe89e5e337d5 100644
736     --- a/drivers/mfd/intel-lpss.c
737     +++ b/drivers/mfd/intel-lpss.c
738     @@ -494,9 +494,6 @@ int intel_lpss_suspend(struct device *dev)
739     for (i = 0; i < LPSS_PRIV_REG_COUNT; i++)
740     lpss->priv_ctx[i] = readl(lpss->priv + i * 4);
741    
742     - /* Put the device into reset state */
743     - writel(0, lpss->priv + LPSS_PRIV_RESETS);
744     -
745     return 0;
746     }
747     EXPORT_SYMBOL_GPL(intel_lpss_suspend);
748     diff --git a/drivers/mfd/mfd-core.c b/drivers/mfd/mfd-core.c
749     index 60b60dc63ddd..022c9374ce8b 100644
750     --- a/drivers/mfd/mfd-core.c
751     +++ b/drivers/mfd/mfd-core.c
752     @@ -354,6 +354,8 @@ int mfd_clone_cell(const char *cell, const char **clones, size_t n_clones)
753     clones[i]);
754     }
755    
756     + put_device(dev);
757     +
758     return 0;
759     }
760     EXPORT_SYMBOL(mfd_clone_cell);
761     diff --git a/drivers/net/wireless/iwlwifi/pcie/drv.c b/drivers/net/wireless/iwlwifi/pcie/drv.c
762     index d58c094f2f04..f7e6a09926dd 100644
763     --- a/drivers/net/wireless/iwlwifi/pcie/drv.c
764     +++ b/drivers/net/wireless/iwlwifi/pcie/drv.c
765     @@ -475,48 +475,64 @@ static const struct pci_device_id iwl_hw_card_ids[] = {
766     MODULE_DEVICE_TABLE(pci, iwl_hw_card_ids);
767    
768     #ifdef CONFIG_ACPI
769     -#define SPL_METHOD "SPLC"
770     -#define SPL_DOMAINTYPE_MODULE BIT(0)
771     -#define SPL_DOMAINTYPE_WIFI BIT(1)
772     -#define SPL_DOMAINTYPE_WIGIG BIT(2)
773     -#define SPL_DOMAINTYPE_RFEM BIT(3)
774     +#define ACPI_SPLC_METHOD "SPLC"
775     +#define ACPI_SPLC_DOMAIN_WIFI (0x07)
776    
777     -static u64 splx_get_pwr_limit(struct iwl_trans *trans, union acpi_object *splx)
778     +static u64 splc_get_pwr_limit(struct iwl_trans *trans, union acpi_object *splc)
779     {
780     - union acpi_object *limits, *domain_type, *power_limit;
781     -
782     - if (splx->type != ACPI_TYPE_PACKAGE ||
783     - splx->package.count != 2 ||
784     - splx->package.elements[0].type != ACPI_TYPE_INTEGER ||
785     - splx->package.elements[0].integer.value != 0) {
786     - IWL_ERR(trans, "Unsupported splx structure\n");
787     + union acpi_object *data_pkg, *dflt_pwr_limit;
788     + int i;
789     +
790     + /* We need at least two elements, one for the revision and one
791     + * for the data itself. Also check that the revision is
792     + * supported (currently only revision 0).
793     + */
794     + if (splc->type != ACPI_TYPE_PACKAGE ||
795     + splc->package.count < 2 ||
796     + splc->package.elements[0].type != ACPI_TYPE_INTEGER ||
797     + splc->package.elements[0].integer.value != 0) {
798     + IWL_DEBUG_INFO(trans,
799     + "Unsupported structure returned by the SPLC method. Ignoring.\n");
800     return 0;
801     }
802    
803     - limits = &splx->package.elements[1];
804     - if (limits->type != ACPI_TYPE_PACKAGE ||
805     - limits->package.count < 2 ||
806     - limits->package.elements[0].type != ACPI_TYPE_INTEGER ||
807     - limits->package.elements[1].type != ACPI_TYPE_INTEGER) {
808     - IWL_ERR(trans, "Invalid limits element\n");
809     - return 0;
810     + /* loop through all the packages to find the one for WiFi */
811     + for (i = 1; i < splc->package.count; i++) {
812     + union acpi_object *domain;
813     +
814     + data_pkg = &splc->package.elements[i];
815     +
816     + /* Skip anything that is not a package with the right
817     + * amount of elements (i.e. at least 2 integers).
818     + */
819     + if (data_pkg->type != ACPI_TYPE_PACKAGE ||
820     + data_pkg->package.count < 2 ||
821     + data_pkg->package.elements[0].type != ACPI_TYPE_INTEGER ||
822     + data_pkg->package.elements[1].type != ACPI_TYPE_INTEGER)
823     + continue;
824     +
825     + domain = &data_pkg->package.elements[0];
826     + if (domain->integer.value == ACPI_SPLC_DOMAIN_WIFI)
827     + break;
828     +
829     + data_pkg = NULL;
830     }
831    
832     - domain_type = &limits->package.elements[0];
833     - power_limit = &limits->package.elements[1];
834     - if (!(domain_type->integer.value & SPL_DOMAINTYPE_WIFI)) {
835     - IWL_DEBUG_INFO(trans, "WiFi power is not limited\n");
836     + if (!data_pkg) {
837     + IWL_DEBUG_INFO(trans,
838     + "No element for the WiFi domain returned by the SPLC method.\n");
839     return 0;
840     }
841    
842     - return power_limit->integer.value;
843     + dflt_pwr_limit = &data_pkg->package.elements[1];
844     + return dflt_pwr_limit->integer.value;
845     }
846    
847     static void set_dflt_pwr_limit(struct iwl_trans *trans, struct pci_dev *pdev)
848     {
849     acpi_handle pxsx_handle;
850     acpi_handle handle;
851     - struct acpi_buffer splx = {ACPI_ALLOCATE_BUFFER, NULL};
852     + struct acpi_buffer splc = {ACPI_ALLOCATE_BUFFER, NULL};
853     acpi_status status;
854    
855     pxsx_handle = ACPI_HANDLE(&pdev->dev);
856     @@ -527,23 +543,24 @@ static void set_dflt_pwr_limit(struct iwl_trans *trans, struct pci_dev *pdev)
857     }
858    
859     /* Get the method's handle */
860     - status = acpi_get_handle(pxsx_handle, (acpi_string)SPL_METHOD, &handle);
861     + status = acpi_get_handle(pxsx_handle, (acpi_string)ACPI_SPLC_METHOD,
862     + &handle);
863     if (ACPI_FAILURE(status)) {
864     - IWL_DEBUG_INFO(trans, "SPL method not found\n");
865     + IWL_DEBUG_INFO(trans, "SPLC method not found\n");
866     return;
867     }
868    
869     /* Call SPLC with no arguments */
870     - status = acpi_evaluate_object(handle, NULL, NULL, &splx);
871     + status = acpi_evaluate_object(handle, NULL, NULL, &splc);
872     if (ACPI_FAILURE(status)) {
873     IWL_ERR(trans, "SPLC invocation failed (0x%x)\n", status);
874     return;
875     }
876    
877     - trans->dflt_pwr_limit = splx_get_pwr_limit(trans, splx.pointer);
878     + trans->dflt_pwr_limit = splc_get_pwr_limit(trans, splc.pointer);
879     IWL_DEBUG_INFO(trans, "Default power limit set to %lld\n",
880     trans->dflt_pwr_limit);
881     - kfree(splx.pointer);
882     + kfree(splc.pointer);
883     }
884    
885     #else /* CONFIG_ACPI */
886     diff --git a/drivers/rtc/rtc-omap.c b/drivers/rtc/rtc-omap.c
887     index ec2e9c5fb993..22394fe30579 100644
888     --- a/drivers/rtc/rtc-omap.c
889     +++ b/drivers/rtc/rtc-omap.c
890     @@ -109,6 +109,7 @@
891     /* OMAP_RTC_OSC_REG bit fields: */
892     #define OMAP_RTC_OSC_32KCLK_EN BIT(6)
893     #define OMAP_RTC_OSC_SEL_32KCLK_SRC BIT(3)
894     +#define OMAP_RTC_OSC_OSC32K_GZ_DISABLE BIT(4)
895    
896     /* OMAP_RTC_IRQWAKEEN bit fields: */
897     #define OMAP_RTC_IRQWAKEEN_ALARM_WAKEEN BIT(1)
898     @@ -646,8 +647,9 @@ static int omap_rtc_probe(struct platform_device *pdev)
899     */
900     if (rtc->has_ext_clk) {
901     reg = rtc_read(rtc, OMAP_RTC_OSC_REG);
902     - rtc_write(rtc, OMAP_RTC_OSC_REG,
903     - reg | OMAP_RTC_OSC_SEL_32KCLK_SRC);
904     + reg &= ~OMAP_RTC_OSC_OSC32K_GZ_DISABLE;
905     + reg |= OMAP_RTC_OSC_32KCLK_EN | OMAP_RTC_OSC_SEL_32KCLK_SRC;
906     + rtc_writel(rtc, OMAP_RTC_OSC_REG, reg);
907     }
908    
909     rtc->type->lock(rtc);
910     diff --git a/drivers/uwb/lc-rc.c b/drivers/uwb/lc-rc.c
911     index d059ad4d0dbd..97ee1b46db69 100644
912     --- a/drivers/uwb/lc-rc.c
913     +++ b/drivers/uwb/lc-rc.c
914     @@ -56,8 +56,11 @@ static struct uwb_rc *uwb_rc_find_by_index(int index)
915     struct uwb_rc *rc = NULL;
916    
917     dev = class_find_device(&uwb_rc_class, NULL, &index, uwb_rc_index_match);
918     - if (dev)
919     + if (dev) {
920     rc = dev_get_drvdata(dev);
921     + put_device(dev);
922     + }
923     +
924     return rc;
925     }
926    
927     @@ -467,7 +470,9 @@ struct uwb_rc *__uwb_rc_try_get(struct uwb_rc *target_rc)
928     if (dev) {
929     rc = dev_get_drvdata(dev);
930     __uwb_rc_get(rc);
931     + put_device(dev);
932     }
933     +
934     return rc;
935     }
936     EXPORT_SYMBOL_GPL(__uwb_rc_try_get);
937     @@ -520,8 +525,11 @@ struct uwb_rc *uwb_rc_get_by_grandpa(const struct device *grandpa_dev)
938    
939     dev = class_find_device(&uwb_rc_class, NULL, grandpa_dev,
940     find_rc_grandpa);
941     - if (dev)
942     + if (dev) {
943     rc = dev_get_drvdata(dev);
944     + put_device(dev);
945     + }
946     +
947     return rc;
948     }
949     EXPORT_SYMBOL_GPL(uwb_rc_get_by_grandpa);
950     @@ -553,8 +561,10 @@ struct uwb_rc *uwb_rc_get_by_dev(const struct uwb_dev_addr *addr)
951     struct uwb_rc *rc = NULL;
952    
953     dev = class_find_device(&uwb_rc_class, NULL, addr, find_rc_dev);
954     - if (dev)
955     + if (dev) {
956     rc = dev_get_drvdata(dev);
957     + put_device(dev);
958     + }
959    
960     return rc;
961     }
962     diff --git a/drivers/uwb/pal.c b/drivers/uwb/pal.c
963     index c1304b8d4985..678e93741ae1 100644
964     --- a/drivers/uwb/pal.c
965     +++ b/drivers/uwb/pal.c
966     @@ -97,6 +97,8 @@ static bool uwb_rc_class_device_exists(struct uwb_rc *target_rc)
967    
968     dev = class_find_device(&uwb_rc_class, NULL, target_rc, find_rc);
969    
970     + put_device(dev);
971     +
972     return (dev != NULL);
973     }
974    
975     diff --git a/fs/ext4/ext4.h b/fs/ext4/ext4.h
976     index b7e921d207fb..cd5914495ad7 100644
977     --- a/fs/ext4/ext4.h
978     +++ b/fs/ext4/ext4.h
979     @@ -221,6 +221,7 @@ struct ext4_io_submit {
980     #define EXT4_MAX_BLOCK_SIZE 65536
981     #define EXT4_MIN_BLOCK_LOG_SIZE 10
982     #define EXT4_MAX_BLOCK_LOG_SIZE 16
983     +#define EXT4_MAX_CLUSTER_LOG_SIZE 30
984     #ifdef __KERNEL__
985     # define EXT4_BLOCK_SIZE(s) ((s)->s_blocksize)
986     #else
987     diff --git a/fs/ext4/super.c b/fs/ext4/super.c
988     index 5bab28caa9d4..127155b82e6e 100644
989     --- a/fs/ext4/super.c
990     +++ b/fs/ext4/super.c
991     @@ -3394,7 +3394,15 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent)
992     if (blocksize < EXT4_MIN_BLOCK_SIZE ||
993     blocksize > EXT4_MAX_BLOCK_SIZE) {
994     ext4_msg(sb, KERN_ERR,
995     - "Unsupported filesystem blocksize %d", blocksize);
996     + "Unsupported filesystem blocksize %d (%d log_block_size)",
997     + blocksize, le32_to_cpu(es->s_log_block_size));
998     + goto failed_mount;
999     + }
1000     + if (le32_to_cpu(es->s_log_block_size) >
1001     + (EXT4_MAX_BLOCK_LOG_SIZE - EXT4_MIN_BLOCK_LOG_SIZE)) {
1002     + ext4_msg(sb, KERN_ERR,
1003     + "Invalid log block size: %u",
1004     + le32_to_cpu(es->s_log_block_size));
1005     goto failed_mount;
1006     }
1007    
1008     @@ -3533,6 +3541,13 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent)
1009     "block size (%d)", clustersize, blocksize);
1010     goto failed_mount;
1011     }
1012     + if (le32_to_cpu(es->s_log_cluster_size) >
1013     + (EXT4_MAX_CLUSTER_LOG_SIZE - EXT4_MIN_BLOCK_LOG_SIZE)) {
1014     + ext4_msg(sb, KERN_ERR,
1015     + "Invalid log cluster size: %u",
1016     + le32_to_cpu(es->s_log_cluster_size));
1017     + goto failed_mount;
1018     + }
1019     sbi->s_cluster_bits = le32_to_cpu(es->s_log_cluster_size) -
1020     le32_to_cpu(es->s_log_block_size);
1021     sbi->s_clusters_per_group =
1022     diff --git a/fs/fuse/file.c b/fs/fuse/file.c
1023     index 682e79965c16..8821c380a71a 100644
1024     --- a/fs/fuse/file.c
1025     +++ b/fs/fuse/file.c
1026     @@ -1997,6 +1997,10 @@ static int fuse_write_end(struct file *file, struct address_space *mapping,
1027     {
1028     struct inode *inode = page->mapping->host;
1029    
1030     + /* Haven't copied anything? Skip zeroing, size extending, dirtying. */
1031     + if (!copied)
1032     + goto unlock;
1033     +
1034     if (!PageUptodate(page)) {
1035     /* Zero any unwritten bytes at the end of the page */
1036     size_t endoff = (pos + copied) & ~PAGE_CACHE_MASK;
1037     @@ -2007,6 +2011,8 @@ static int fuse_write_end(struct file *file, struct address_space *mapping,
1038    
1039     fuse_write_update_size(inode, pos + copied);
1040     set_page_dirty(page);
1041     +
1042     +unlock:
1043     unlock_page(page);
1044     page_cache_release(page);
1045    
1046     diff --git a/kernel/power/suspend_test.c b/kernel/power/suspend_test.c
1047     index 084452e34a12..bdff5ed57f10 100644
1048     --- a/kernel/power/suspend_test.c
1049     +++ b/kernel/power/suspend_test.c
1050     @@ -203,8 +203,10 @@ static int __init test_suspend(void)
1051    
1052     /* RTCs have initialized by now too ... can we use one? */
1053     dev = class_find_device(rtc_class, NULL, NULL, has_wakealarm);
1054     - if (dev)
1055     + if (dev) {
1056     rtc = rtc_class_open(dev_name(dev));
1057     + put_device(dev);
1058     + }
1059     if (!rtc) {
1060     printk(warn_no_rtc);
1061     return 0;
1062     diff --git a/net/can/bcm.c b/net/can/bcm.c
1063     index 6863310d6973..8ef1afacad82 100644
1064     --- a/net/can/bcm.c
1065     +++ b/net/can/bcm.c
1066     @@ -1500,24 +1500,31 @@ static int bcm_connect(struct socket *sock, struct sockaddr *uaddr, int len,
1067     struct sockaddr_can *addr = (struct sockaddr_can *)uaddr;
1068     struct sock *sk = sock->sk;
1069     struct bcm_sock *bo = bcm_sk(sk);
1070     + int ret = 0;
1071    
1072     if (len < sizeof(*addr))
1073     return -EINVAL;
1074    
1075     - if (bo->bound)
1076     - return -EISCONN;
1077     + lock_sock(sk);
1078     +
1079     + if (bo->bound) {
1080     + ret = -EISCONN;
1081     + goto fail;
1082     + }
1083    
1084     /* bind a device to this socket */
1085     if (addr->can_ifindex) {
1086     struct net_device *dev;
1087    
1088     dev = dev_get_by_index(&init_net, addr->can_ifindex);
1089     - if (!dev)
1090     - return -ENODEV;
1091     -
1092     + if (!dev) {
1093     + ret = -ENODEV;
1094     + goto fail;
1095     + }
1096     if (dev->type != ARPHRD_CAN) {
1097     dev_put(dev);
1098     - return -ENODEV;
1099     + ret = -ENODEV;
1100     + goto fail;
1101     }
1102    
1103     bo->ifindex = dev->ifindex;
1104     @@ -1528,17 +1535,24 @@ static int bcm_connect(struct socket *sock, struct sockaddr *uaddr, int len,
1105     bo->ifindex = 0;
1106     }
1107    
1108     - bo->bound = 1;
1109     -
1110     if (proc_dir) {
1111     /* unique socket address as filename */
1112     sprintf(bo->procname, "%lu", sock_i_ino(sk));
1113     bo->bcm_proc_read = proc_create_data(bo->procname, 0644,
1114     proc_dir,
1115     &bcm_proc_fops, sk);
1116     + if (!bo->bcm_proc_read) {
1117     + ret = -ENOMEM;
1118     + goto fail;
1119     + }
1120     }
1121    
1122     - return 0;
1123     + bo->bound = 1;
1124     +
1125     +fail:
1126     + release_sock(sk);
1127     +
1128     + return ret;
1129     }
1130    
1131     static int bcm_recvmsg(struct socket *sock, struct msghdr *msg, size_t size,
1132     diff --git a/net/netfilter/nft_dynset.c b/net/netfilter/nft_dynset.c
1133     index 9dec3bd1b63c..0a5df0cbaa28 100644
1134     --- a/net/netfilter/nft_dynset.c
1135     +++ b/net/netfilter/nft_dynset.c
1136     @@ -140,7 +140,8 @@ static int nft_dynset_init(const struct nft_ctx *ctx,
1137     if (tb[NFTA_DYNSET_TIMEOUT] != NULL) {
1138     if (!(set->flags & NFT_SET_TIMEOUT))
1139     return -EINVAL;
1140     - timeout = be64_to_cpu(nla_get_be64(tb[NFTA_DYNSET_TIMEOUT]));
1141     + timeout = msecs_to_jiffies(be64_to_cpu(nla_get_be64(
1142     + tb[NFTA_DYNSET_TIMEOUT])));
1143     }
1144    
1145     priv->sreg_key = nft_parse_register(tb[NFTA_DYNSET_SREG_KEY]);
1146     @@ -227,7 +228,8 @@ static int nft_dynset_dump(struct sk_buff *skb, const struct nft_expr *expr)
1147     goto nla_put_failure;
1148     if (nla_put_string(skb, NFTA_DYNSET_SET_NAME, priv->set->name))
1149     goto nla_put_failure;
1150     - if (nla_put_be64(skb, NFTA_DYNSET_TIMEOUT, cpu_to_be64(priv->timeout)))
1151     + if (nla_put_be64(skb, NFTA_DYNSET_TIMEOUT,
1152     + cpu_to_be64(jiffies_to_msecs(priv->timeout))))
1153     goto nla_put_failure;
1154     if (priv->expr && nft_expr_dump(skb, NFTA_DYNSET_EXPR, priv->expr))
1155     goto nla_put_failure;
1156     diff --git a/scripts/gcc-x86_64-has-stack-protector.sh b/scripts/gcc-x86_64-has-stack-protector.sh
1157     index 973e8c141567..17867e723a51 100755
1158     --- a/scripts/gcc-x86_64-has-stack-protector.sh
1159     +++ b/scripts/gcc-x86_64-has-stack-protector.sh
1160     @@ -1,6 +1,6 @@
1161     #!/bin/sh
1162    
1163     -echo "int foo(void) { char X[200]; return 3; }" | $* -S -x c -c -O0 -mcmodel=kernel -fstack-protector - -o - 2> /dev/null | grep -q "%gs"
1164     +echo "int foo(void) { char X[200]; return 3; }" | $* -S -x c -c -O0 -mcmodel=kernel -fno-PIE -fstack-protector - -o - 2> /dev/null | grep -q "%gs"
1165     if [ "$?" -eq "0" ] ; then
1166     echo y
1167     else