Magellan Linux

Annotation of /trunk/kernel-magellan/patches-4.16/0104-4.16.5-all-fixes.patch

Parent Directory Parent Directory | Revision Log Revision Log


Revision 3108 - (hide annotations) (download)
Wed May 16 14:24:26 2018 UTC (5 years, 11 months ago) by niro
File size: 33982 byte(s)
-linux-4.16.5
1 niro 3108 diff --git a/Makefile b/Makefile
2     index d51175192ac1..6678a90f355b 100644
3     --- a/Makefile
4     +++ b/Makefile
5     @@ -1,7 +1,7 @@
6     # SPDX-License-Identifier: GPL-2.0
7     VERSION = 4
8     PATCHLEVEL = 16
9     -SUBLEVEL = 4
10     +SUBLEVEL = 5
11     EXTRAVERSION =
12     NAME = Fearless Coyote
13    
14     diff --git a/arch/x86/kernel/acpi/boot.c b/arch/x86/kernel/acpi/boot.c
15     index 5ee33a6e33bb..9bf2a1a4bd22 100644
16     --- a/arch/x86/kernel/acpi/boot.c
17     +++ b/arch/x86/kernel/acpi/boot.c
18     @@ -215,6 +215,10 @@ acpi_parse_x2apic(struct acpi_subtable_header *header, const unsigned long end)
19     apic_id = processor->local_apic_id;
20     enabled = processor->lapic_flags & ACPI_MADT_ENABLED;
21    
22     + /* Ignore invalid ID */
23     + if (apic_id == 0xffffffff)
24     + return 0;
25     +
26     /*
27     * We need to register disabled CPU as well to permit
28     * counting disabled CPUs. This allows us to size
29     diff --git a/arch/x86/kernel/tsc.c b/arch/x86/kernel/tsc.c
30     index fb4302738410..3615c0f255e9 100644
31     --- a/arch/x86/kernel/tsc.c
32     +++ b/arch/x86/kernel/tsc.c
33     @@ -317,7 +317,7 @@ static unsigned long calc_hpet_ref(u64 deltatsc, u64 hpet1, u64 hpet2)
34     hpet2 -= hpet1;
35     tmp = ((u64)hpet2 * hpet_readl(HPET_PERIOD));
36     do_div(tmp, 1000000);
37     - do_div(deltatsc, tmp);
38     + deltatsc = div64_u64(deltatsc, tmp);
39    
40     return (unsigned long) deltatsc;
41     }
42     diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
43     index 763bb3bade63..8494dbae41b9 100644
44     --- a/arch/x86/kvm/mmu.c
45     +++ b/arch/x86/kvm/mmu.c
46     @@ -3031,7 +3031,7 @@ static int kvm_handle_bad_page(struct kvm_vcpu *vcpu, gfn_t gfn, kvm_pfn_t pfn)
47     return RET_PF_RETRY;
48     }
49    
50     - return RET_PF_EMULATE;
51     + return -EFAULT;
52     }
53    
54     static void transparent_hugepage_adjust(struct kvm_vcpu *vcpu,
55     diff --git a/drivers/clocksource/timer-imx-tpm.c b/drivers/clocksource/timer-imx-tpm.c
56     index 21bffdcb2f20..557ed25b42e3 100644
57     --- a/drivers/clocksource/timer-imx-tpm.c
58     +++ b/drivers/clocksource/timer-imx-tpm.c
59     @@ -105,7 +105,7 @@ static int tpm_set_next_event(unsigned long delta,
60     * of writing CNT registers which may cause the min_delta event got
61     * missed, so we need add a ETIME check here in case it happened.
62     */
63     - return (int)((next - now) <= 0) ? -ETIME : 0;
64     + return (int)(next - now) <= 0 ? -ETIME : 0;
65     }
66    
67     static int tpm_set_state_oneshot(struct clock_event_device *evt)
68     diff --git a/drivers/gpu/drm/drm_dp_dual_mode_helper.c b/drivers/gpu/drm/drm_dp_dual_mode_helper.c
69     index 02a50929af67..e7f4fe2848a5 100644
70     --- a/drivers/gpu/drm/drm_dp_dual_mode_helper.c
71     +++ b/drivers/gpu/drm/drm_dp_dual_mode_helper.c
72     @@ -350,19 +350,44 @@ int drm_dp_dual_mode_set_tmds_output(enum drm_dp_dual_mode_type type,
73     {
74     uint8_t tmds_oen = enable ? 0 : DP_DUAL_MODE_TMDS_DISABLE;
75     ssize_t ret;
76     + int retry;
77    
78     if (type < DRM_DP_DUAL_MODE_TYPE2_DVI)
79     return 0;
80    
81     - ret = drm_dp_dual_mode_write(adapter, DP_DUAL_MODE_TMDS_OEN,
82     - &tmds_oen, sizeof(tmds_oen));
83     - if (ret) {
84     - DRM_DEBUG_KMS("Failed to %s TMDS output buffers\n",
85     - enable ? "enable" : "disable");
86     - return ret;
87     + /*
88     + * LSPCON adapters in low-power state may ignore the first write, so
89     + * read back and verify the written value a few times.
90     + */
91     + for (retry = 0; retry < 3; retry++) {
92     + uint8_t tmp;
93     +
94     + ret = drm_dp_dual_mode_write(adapter, DP_DUAL_MODE_TMDS_OEN,
95     + &tmds_oen, sizeof(tmds_oen));
96     + if (ret) {
97     + DRM_DEBUG_KMS("Failed to %s TMDS output buffers (%d attempts)\n",
98     + enable ? "enable" : "disable",
99     + retry + 1);
100     + return ret;
101     + }
102     +
103     + ret = drm_dp_dual_mode_read(adapter, DP_DUAL_MODE_TMDS_OEN,
104     + &tmp, sizeof(tmp));
105     + if (ret) {
106     + DRM_DEBUG_KMS("I2C read failed during TMDS output buffer %s (%d attempts)\n",
107     + enable ? "enabling" : "disabling",
108     + retry + 1);
109     + return ret;
110     + }
111     +
112     + if (tmp == tmds_oen)
113     + return 0;
114     }
115    
116     - return 0;
117     + DRM_DEBUG_KMS("I2C write value mismatch during TMDS output buffer %s\n",
118     + enable ? "enabling" : "disabling");
119     +
120     + return -EIO;
121     }
122     EXPORT_SYMBOL(drm_dp_dual_mode_set_tmds_output);
123    
124     diff --git a/drivers/gpu/drm/i915/gvt/dmabuf.c b/drivers/gpu/drm/i915/gvt/dmabuf.c
125     index 2fb7b34ef561..82cd2fbe2cb3 100644
126     --- a/drivers/gpu/drm/i915/gvt/dmabuf.c
127     +++ b/drivers/gpu/drm/i915/gvt/dmabuf.c
128     @@ -323,6 +323,7 @@ static void update_fb_info(struct vfio_device_gfx_plane_info *gvt_dmabuf,
129     struct intel_vgpu_fb_info *fb_info)
130     {
131     gvt_dmabuf->drm_format = fb_info->drm_format;
132     + gvt_dmabuf->drm_format_mod = fb_info->drm_format_mod;
133     gvt_dmabuf->width = fb_info->width;
134     gvt_dmabuf->height = fb_info->height;
135     gvt_dmabuf->stride = fb_info->stride;
136     diff --git a/drivers/gpu/drm/i915/gvt/kvmgt.c b/drivers/gpu/drm/i915/gvt/kvmgt.c
137     index 021f722e2481..f34d7f1e6c4e 100644
138     --- a/drivers/gpu/drm/i915/gvt/kvmgt.c
139     +++ b/drivers/gpu/drm/i915/gvt/kvmgt.c
140     @@ -1284,7 +1284,7 @@ static long intel_vgpu_ioctl(struct mdev_device *mdev, unsigned int cmd,
141    
142     }
143    
144     - return 0;
145     + return -ENOTTY;
146     }
147    
148     static ssize_t
149     diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
150     index 3ab1ace2a6bd..df505868d65a 100644
151     --- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c
152     +++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
153     @@ -728,7 +728,7 @@ static int eb_lookup_vmas(struct i915_execbuffer *eb)
154    
155     err = radix_tree_insert(handles_vma, handle, vma);
156     if (unlikely(err)) {
157     - kfree(lut);
158     + kmem_cache_free(eb->i915->luts, lut);
159     goto err_obj;
160     }
161    
162     diff --git a/drivers/gpu/drm/i915/intel_audio.c b/drivers/gpu/drm/i915/intel_audio.c
163     index 4a01f62a392d..0ef7856d8155 100644
164     --- a/drivers/gpu/drm/i915/intel_audio.c
165     +++ b/drivers/gpu/drm/i915/intel_audio.c
166     @@ -729,7 +729,7 @@ static void i915_audio_component_codec_wake_override(struct device *kdev,
167     struct drm_i915_private *dev_priv = kdev_to_i915(kdev);
168     u32 tmp;
169    
170     - if (!IS_GEN9_BC(dev_priv))
171     + if (!IS_GEN9(dev_priv))
172     return;
173    
174     i915_audio_component_get_power(kdev);
175     diff --git a/drivers/gpu/drm/i915/intel_bios.c b/drivers/gpu/drm/i915/intel_bios.c
176     index b49a2df44430..9b992e1b5996 100644
177     --- a/drivers/gpu/drm/i915/intel_bios.c
178     +++ b/drivers/gpu/drm/i915/intel_bios.c
179     @@ -1255,7 +1255,6 @@ static void parse_ddi_port(struct drm_i915_private *dev_priv, enum port port,
180     return;
181    
182     aux_channel = child->aux_channel;
183     - ddc_pin = child->ddc_pin;
184    
185     is_dvi = child->device_type & DEVICE_TYPE_TMDS_DVI_SIGNALING;
186     is_dp = child->device_type & DEVICE_TYPE_DISPLAYPORT_OUTPUT;
187     @@ -1302,9 +1301,15 @@ static void parse_ddi_port(struct drm_i915_private *dev_priv, enum port port,
188     DRM_DEBUG_KMS("Port %c is internal DP\n", port_name(port));
189    
190     if (is_dvi) {
191     - info->alternate_ddc_pin = map_ddc_pin(dev_priv, ddc_pin);
192     -
193     - sanitize_ddc_pin(dev_priv, port);
194     + ddc_pin = map_ddc_pin(dev_priv, child->ddc_pin);
195     + if (intel_gmbus_is_valid_pin(dev_priv, ddc_pin)) {
196     + info->alternate_ddc_pin = ddc_pin;
197     + sanitize_ddc_pin(dev_priv, port);
198     + } else {
199     + DRM_DEBUG_KMS("Port %c has invalid DDC pin %d, "
200     + "sticking to defaults\n",
201     + port_name(port), ddc_pin);
202     + }
203     }
204    
205     if (is_dp) {
206     diff --git a/drivers/gpu/drm/vc4/vc4_bo.c b/drivers/gpu/drm/vc4/vc4_bo.c
207     index 2decc8e2c79f..add9cc97a3b6 100644
208     --- a/drivers/gpu/drm/vc4/vc4_bo.c
209     +++ b/drivers/gpu/drm/vc4/vc4_bo.c
210     @@ -195,6 +195,7 @@ static void vc4_bo_destroy(struct vc4_bo *bo)
211     vc4_bo_set_label(obj, -1);
212    
213     if (bo->validated_shader) {
214     + kfree(bo->validated_shader->uniform_addr_offsets);
215     kfree(bo->validated_shader->texture_samples);
216     kfree(bo->validated_shader);
217     bo->validated_shader = NULL;
218     @@ -591,6 +592,7 @@ void vc4_free_object(struct drm_gem_object *gem_bo)
219     }
220    
221     if (bo->validated_shader) {
222     + kfree(bo->validated_shader->uniform_addr_offsets);
223     kfree(bo->validated_shader->texture_samples);
224     kfree(bo->validated_shader);
225     bo->validated_shader = NULL;
226     diff --git a/drivers/gpu/drm/vc4/vc4_validate_shaders.c b/drivers/gpu/drm/vc4/vc4_validate_shaders.c
227     index d3f15bf60900..7cf82b071de2 100644
228     --- a/drivers/gpu/drm/vc4/vc4_validate_shaders.c
229     +++ b/drivers/gpu/drm/vc4/vc4_validate_shaders.c
230     @@ -942,6 +942,7 @@ vc4_validate_shader(struct drm_gem_cma_object *shader_obj)
231     fail:
232     kfree(validation_state.branch_targets);
233     if (validated_shader) {
234     + kfree(validated_shader->uniform_addr_offsets);
235     kfree(validated_shader->texture_samples);
236     kfree(validated_shader);
237     }
238     diff --git a/drivers/infiniband/hw/mlx5/qp.c b/drivers/infiniband/hw/mlx5/qp.c
239     index a2e1aa86e133..6c424afea25f 100644
240     --- a/drivers/infiniband/hw/mlx5/qp.c
241     +++ b/drivers/infiniband/hw/mlx5/qp.c
242     @@ -3157,7 +3157,8 @@ static int __mlx5_ib_modify_qp(struct ib_qp *ibqp,
243     * If we moved a kernel QP to RESET, clean up all old CQ
244     * entries and reinitialize the QP.
245     */
246     - if (new_state == IB_QPS_RESET && !ibqp->uobject) {
247     + if (new_state == IB_QPS_RESET &&
248     + !ibqp->uobject && ibqp->qp_type != IB_QPT_XRC_TGT) {
249     mlx5_ib_cq_clean(recv_cq, base->mqp.qpn,
250     ibqp->srq ? to_msrq(ibqp->srq) : NULL);
251     if (send_cq != recv_cq)
252     diff --git a/drivers/net/wireless/mac80211_hwsim.c b/drivers/net/wireless/mac80211_hwsim.c
253     index 35b21f8152bb..20af54378cc0 100644
254     --- a/drivers/net/wireless/mac80211_hwsim.c
255     +++ b/drivers/net/wireless/mac80211_hwsim.c
256     @@ -3484,8 +3484,11 @@ static void __net_exit hwsim_exit_net(struct net *net)
257     list_del(&data->list);
258     rhashtable_remove_fast(&hwsim_radios_rht, &data->rht,
259     hwsim_rht_params);
260     - INIT_WORK(&data->destroy_work, destroy_radio);
261     - queue_work(hwsim_wq, &data->destroy_work);
262     + spin_unlock_bh(&hwsim_radio_lock);
263     + mac80211_hwsim_del_radio(data,
264     + wiphy_name(data->hw->wiphy),
265     + NULL);
266     + spin_lock_bh(&hwsim_radio_lock);
267     }
268     spin_unlock_bh(&hwsim_radio_lock);
269     }
270     diff --git a/fs/btrfs/delayed-ref.c b/fs/btrfs/delayed-ref.c
271     index 7ab5e0128f0c..1e9a20a4c06c 100644
272     --- a/fs/btrfs/delayed-ref.c
273     +++ b/fs/btrfs/delayed-ref.c
274     @@ -553,8 +553,10 @@ add_delayed_ref_head(struct btrfs_fs_info *fs_info,
275     struct btrfs_delayed_ref_head *head_ref,
276     struct btrfs_qgroup_extent_record *qrecord,
277     u64 bytenr, u64 num_bytes, u64 ref_root, u64 reserved,
278     - int action, int is_data, int *qrecord_inserted_ret,
279     + int action, int is_data, int is_system,
280     + int *qrecord_inserted_ret,
281     int *old_ref_mod, int *new_ref_mod)
282     +
283     {
284     struct btrfs_delayed_ref_head *existing;
285     struct btrfs_delayed_ref_root *delayed_refs;
286     @@ -598,6 +600,7 @@ add_delayed_ref_head(struct btrfs_fs_info *fs_info,
287     head_ref->ref_mod = count_mod;
288     head_ref->must_insert_reserved = must_insert_reserved;
289     head_ref->is_data = is_data;
290     + head_ref->is_system = is_system;
291     head_ref->ref_tree = RB_ROOT;
292     INIT_LIST_HEAD(&head_ref->ref_add_list);
293     RB_CLEAR_NODE(&head_ref->href_node);
294     @@ -785,6 +788,7 @@ int btrfs_add_delayed_tree_ref(struct btrfs_fs_info *fs_info,
295     struct btrfs_delayed_ref_root *delayed_refs;
296     struct btrfs_qgroup_extent_record *record = NULL;
297     int qrecord_inserted;
298     + int is_system = (ref_root == BTRFS_CHUNK_TREE_OBJECTID);
299    
300     BUG_ON(extent_op && extent_op->is_data);
301     ref = kmem_cache_alloc(btrfs_delayed_tree_ref_cachep, GFP_NOFS);
302     @@ -813,8 +817,8 @@ int btrfs_add_delayed_tree_ref(struct btrfs_fs_info *fs_info,
303     */
304     head_ref = add_delayed_ref_head(fs_info, trans, head_ref, record,
305     bytenr, num_bytes, 0, 0, action, 0,
306     - &qrecord_inserted, old_ref_mod,
307     - new_ref_mod);
308     + is_system, &qrecord_inserted,
309     + old_ref_mod, new_ref_mod);
310    
311     add_delayed_tree_ref(fs_info, trans, head_ref, &ref->node, bytenr,
312     num_bytes, parent, ref_root, level, action);
313     @@ -881,7 +885,7 @@ int btrfs_add_delayed_data_ref(struct btrfs_fs_info *fs_info,
314     */
315     head_ref = add_delayed_ref_head(fs_info, trans, head_ref, record,
316     bytenr, num_bytes, ref_root, reserved,
317     - action, 1, &qrecord_inserted,
318     + action, 1, 0, &qrecord_inserted,
319     old_ref_mod, new_ref_mod);
320    
321     add_delayed_data_ref(fs_info, trans, head_ref, &ref->node, bytenr,
322     @@ -911,9 +915,14 @@ int btrfs_add_delayed_extent_op(struct btrfs_fs_info *fs_info,
323     delayed_refs = &trans->transaction->delayed_refs;
324     spin_lock(&delayed_refs->lock);
325    
326     + /*
327     + * extent_ops just modify the flags of an extent and they don't result
328     + * in ref count changes, hence it's safe to pass false/0 for is_system
329     + * argument
330     + */
331     add_delayed_ref_head(fs_info, trans, head_ref, NULL, bytenr,
332     num_bytes, 0, 0, BTRFS_UPDATE_DELAYED_HEAD,
333     - extent_op->is_data, NULL, NULL, NULL);
334     + extent_op->is_data, 0, NULL, NULL, NULL);
335    
336     spin_unlock(&delayed_refs->lock);
337     return 0;
338     diff --git a/fs/btrfs/delayed-ref.h b/fs/btrfs/delayed-ref.h
339     index c4f625e5a691..ba97d18cc168 100644
340     --- a/fs/btrfs/delayed-ref.h
341     +++ b/fs/btrfs/delayed-ref.h
342     @@ -139,6 +139,7 @@ struct btrfs_delayed_ref_head {
343     */
344     unsigned int must_insert_reserved:1;
345     unsigned int is_data:1;
346     + unsigned int is_system:1;
347     unsigned int processing:1;
348     };
349    
350     diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c
351     index c1618ab9fecf..16b54b1ff20e 100644
352     --- a/fs/btrfs/extent-tree.c
353     +++ b/fs/btrfs/extent-tree.c
354     @@ -2615,13 +2615,19 @@ static int cleanup_ref_head(struct btrfs_trans_handle *trans,
355     trace_run_delayed_ref_head(fs_info, head, 0);
356    
357     if (head->total_ref_mod < 0) {
358     - struct btrfs_block_group_cache *cache;
359     + struct btrfs_space_info *space_info;
360     + u64 flags;
361    
362     - cache = btrfs_lookup_block_group(fs_info, head->bytenr);
363     - ASSERT(cache);
364     - percpu_counter_add(&cache->space_info->total_bytes_pinned,
365     + if (head->is_data)
366     + flags = BTRFS_BLOCK_GROUP_DATA;
367     + else if (head->is_system)
368     + flags = BTRFS_BLOCK_GROUP_SYSTEM;
369     + else
370     + flags = BTRFS_BLOCK_GROUP_METADATA;
371     + space_info = __find_space_info(fs_info, flags);
372     + ASSERT(space_info);
373     + percpu_counter_add(&space_info->total_bytes_pinned,
374     -head->num_bytes);
375     - btrfs_put_block_group(cache);
376    
377     if (head->is_data) {
378     spin_lock(&delayed_refs->lock);
379     diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
380     index c7b75dd58fad..ef1cf323832a 100644
381     --- a/fs/btrfs/inode.c
382     +++ b/fs/btrfs/inode.c
383     @@ -44,6 +44,7 @@
384     #include <linux/uio.h>
385     #include <linux/magic.h>
386     #include <linux/iversion.h>
387     +#include <asm/unaligned.h>
388     #include "ctree.h"
389     #include "disk-io.h"
390     #include "transaction.h"
391     @@ -5951,11 +5952,13 @@ static int btrfs_filldir(void *addr, int entries, struct dir_context *ctx)
392     struct dir_entry *entry = addr;
393     char *name = (char *)(entry + 1);
394    
395     - ctx->pos = entry->offset;
396     - if (!dir_emit(ctx, name, entry->name_len, entry->ino,
397     - entry->type))
398     + ctx->pos = get_unaligned(&entry->offset);
399     + if (!dir_emit(ctx, name, get_unaligned(&entry->name_len),
400     + get_unaligned(&entry->ino),
401     + get_unaligned(&entry->type)))
402     return 1;
403     - addr += sizeof(struct dir_entry) + entry->name_len;
404     + addr += sizeof(struct dir_entry) +
405     + get_unaligned(&entry->name_len);
406     ctx->pos++;
407     }
408     return 0;
409     @@ -6045,14 +6048,15 @@ static int btrfs_real_readdir(struct file *file, struct dir_context *ctx)
410     }
411    
412     entry = addr;
413     - entry->name_len = name_len;
414     + put_unaligned(name_len, &entry->name_len);
415     name_ptr = (char *)(entry + 1);
416     read_extent_buffer(leaf, name_ptr, (unsigned long)(di + 1),
417     name_len);
418     - entry->type = btrfs_filetype_table[btrfs_dir_type(leaf, di)];
419     + put_unaligned(btrfs_filetype_table[btrfs_dir_type(leaf, di)],
420     + &entry->type);
421     btrfs_dir_item_key_to_cpu(leaf, di, &location);
422     - entry->ino = location.objectid;
423     - entry->offset = found_key.offset;
424     + put_unaligned(location.objectid, &entry->ino);
425     + put_unaligned(found_key.offset, &entry->offset);
426     entries++;
427     addr += sizeof(struct dir_entry) + name_len;
428     total_len += sizeof(struct dir_entry) + name_len;
429     diff --git a/fs/cifs/dir.c b/fs/cifs/dir.c
430     index 81ba6e0d88d8..925844343038 100644
431     --- a/fs/cifs/dir.c
432     +++ b/fs/cifs/dir.c
433     @@ -684,6 +684,9 @@ int cifs_mknod(struct inode *inode, struct dentry *direntry, umode_t mode,
434     goto mknod_out;
435     }
436    
437     + if (!S_ISCHR(mode) && !S_ISBLK(mode))
438     + goto mknod_out;
439     +
440     if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_UNX_EMUL))
441     goto mknod_out;
442    
443     @@ -692,10 +695,8 @@ int cifs_mknod(struct inode *inode, struct dentry *direntry, umode_t mode,
444    
445     buf = kmalloc(sizeof(FILE_ALL_INFO), GFP_KERNEL);
446     if (buf == NULL) {
447     - kfree(full_path);
448     rc = -ENOMEM;
449     - free_xid(xid);
450     - return rc;
451     + goto mknod_out;
452     }
453    
454     if (backup_cred(cifs_sb))
455     @@ -742,7 +743,7 @@ int cifs_mknod(struct inode *inode, struct dentry *direntry, umode_t mode,
456     pdev->minor = cpu_to_le64(MINOR(device_number));
457     rc = tcon->ses->server->ops->sync_write(xid, &fid, &io_parms,
458     &bytes_written, iov, 1);
459     - } /* else if (S_ISFIFO) */
460     + }
461     tcon->ses->server->ops->close(xid, tcon, &fid);
462     d_drop(direntry);
463    
464     diff --git a/fs/cifs/smbdirect.c b/fs/cifs/smbdirect.c
465     index 52cccdbb7e14..34be5c5d027f 100644
466     --- a/fs/cifs/smbdirect.c
467     +++ b/fs/cifs/smbdirect.c
468     @@ -2194,6 +2194,8 @@ int smbd_send(struct smbd_connection *info, struct smb_rqst *rqst)
469     goto done;
470     }
471     i++;
472     + if (i == rqst->rq_nvec)
473     + break;
474     }
475     start = i;
476     buflen = 0;
477     diff --git a/fs/super.c b/fs/super.c
478     index 672538ca9831..afbf4d220c27 100644
479     --- a/fs/super.c
480     +++ b/fs/super.c
481     @@ -166,6 +166,7 @@ static void destroy_unused_super(struct super_block *s)
482     security_sb_free(s);
483     put_user_ns(s->s_user_ns);
484     kfree(s->s_subtype);
485     + free_prealloced_shrinker(&s->s_shrink);
486     /* no delays needed */
487     destroy_super_work(&s->destroy_work);
488     }
489     @@ -251,6 +252,8 @@ static struct super_block *alloc_super(struct file_system_type *type, int flags,
490     s->s_shrink.count_objects = super_cache_count;
491     s->s_shrink.batch = 1024;
492     s->s_shrink.flags = SHRINKER_NUMA_AWARE | SHRINKER_MEMCG_AWARE;
493     + if (prealloc_shrinker(&s->s_shrink))
494     + goto fail;
495     return s;
496    
497     fail:
498     @@ -517,11 +520,7 @@ struct super_block *sget_userns(struct file_system_type *type,
499     hlist_add_head(&s->s_instances, &type->fs_supers);
500     spin_unlock(&sb_lock);
501     get_filesystem(type);
502     - err = register_shrinker(&s->s_shrink);
503     - if (err) {
504     - deactivate_locked_super(s);
505     - s = ERR_PTR(err);
506     - }
507     + register_shrinker_prepared(&s->s_shrink);
508     return s;
509     }
510    
511     diff --git a/include/linux/netfilter/x_tables.h b/include/linux/netfilter/x_tables.h
512     index 14529511c4b8..065d605adea0 100644
513     --- a/include/linux/netfilter/x_tables.h
514     +++ b/include/linux/netfilter/x_tables.h
515     @@ -301,6 +301,7 @@ int xt_data_to_user(void __user *dst, const void *src,
516    
517     void *xt_copy_counters_from_user(const void __user *user, unsigned int len,
518     struct xt_counters_info *info, bool compat);
519     +struct xt_counters *xt_counters_alloc(unsigned int counters);
520    
521     struct xt_table *xt_register_table(struct net *net,
522     const struct xt_table *table,
523     @@ -509,7 +510,7 @@ void xt_compat_unlock(u_int8_t af);
524    
525     int xt_compat_add_offset(u_int8_t af, unsigned int offset, int delta);
526     void xt_compat_flush_offsets(u_int8_t af);
527     -void xt_compat_init_offsets(u_int8_t af, unsigned int number);
528     +int xt_compat_init_offsets(u8 af, unsigned int number);
529     int xt_compat_calc_jump(u_int8_t af, unsigned int offset);
530    
531     int xt_compat_match_offset(const struct xt_match *match);
532     diff --git a/include/linux/shrinker.h b/include/linux/shrinker.h
533     index 388ff2936a87..6794490f25b2 100644
534     --- a/include/linux/shrinker.h
535     +++ b/include/linux/shrinker.h
536     @@ -75,6 +75,9 @@ struct shrinker {
537     #define SHRINKER_NUMA_AWARE (1 << 0)
538     #define SHRINKER_MEMCG_AWARE (1 << 1)
539    
540     -extern int register_shrinker(struct shrinker *);
541     -extern void unregister_shrinker(struct shrinker *);
542     +extern int prealloc_shrinker(struct shrinker *shrinker);
543     +extern void register_shrinker_prepared(struct shrinker *shrinker);
544     +extern int register_shrinker(struct shrinker *shrinker);
545     +extern void unregister_shrinker(struct shrinker *shrinker);
546     +extern void free_prealloced_shrinker(struct shrinker *shrinker);
547     #endif
548     diff --git a/kernel/events/callchain.c b/kernel/events/callchain.c
549     index 772a43fea825..73cc26e321de 100644
550     --- a/kernel/events/callchain.c
551     +++ b/kernel/events/callchain.c
552     @@ -119,19 +119,22 @@ int get_callchain_buffers(int event_max_stack)
553     goto exit;
554     }
555    
556     + /*
557     + * If requesting per event more than the global cap,
558     + * return a different error to help userspace figure
559     + * this out.
560     + *
561     + * And also do it here so that we have &callchain_mutex held.
562     + */
563     + if (event_max_stack > sysctl_perf_event_max_stack) {
564     + err = -EOVERFLOW;
565     + goto exit;
566     + }
567     +
568     if (count > 1) {
569     /* If the allocation failed, give up */
570     if (!callchain_cpus_entries)
571     err = -ENOMEM;
572     - /*
573     - * If requesting per event more than the global cap,
574     - * return a different error to help userspace figure
575     - * this out.
576     - *
577     - * And also do it here so that we have &callchain_mutex held.
578     - */
579     - if (event_max_stack > sysctl_perf_event_max_stack)
580     - err = -EOVERFLOW;
581     goto exit;
582     }
583    
584     diff --git a/kernel/events/core.c b/kernel/events/core.c
585     index b32bc0698a2a..ca7298760c83 100644
586     --- a/kernel/events/core.c
587     +++ b/kernel/events/core.c
588     @@ -9730,9 +9730,9 @@ static int perf_copy_attr(struct perf_event_attr __user *uattr,
589     * __u16 sample size limit.
590     */
591     if (attr->sample_stack_user >= USHRT_MAX)
592     - ret = -EINVAL;
593     + return -EINVAL;
594     else if (!IS_ALIGNED(attr->sample_stack_user, sizeof(u64)))
595     - ret = -EINVAL;
596     + return -EINVAL;
597     }
598    
599     if (attr->sample_type & PERF_SAMPLE_REGS_INTR)
600     diff --git a/kernel/time/alarmtimer.c b/kernel/time/alarmtimer.c
601     index ec09ce9a6012..639321bf2e39 100644
602     --- a/kernel/time/alarmtimer.c
603     +++ b/kernel/time/alarmtimer.c
604     @@ -326,6 +326,17 @@ static int alarmtimer_resume(struct device *dev)
605     }
606     #endif
607    
608     +static void
609     +__alarm_init(struct alarm *alarm, enum alarmtimer_type type,
610     + enum alarmtimer_restart (*function)(struct alarm *, ktime_t))
611     +{
612     + timerqueue_init(&alarm->node);
613     + alarm->timer.function = alarmtimer_fired;
614     + alarm->function = function;
615     + alarm->type = type;
616     + alarm->state = ALARMTIMER_STATE_INACTIVE;
617     +}
618     +
619     /**
620     * alarm_init - Initialize an alarm structure
621     * @alarm: ptr to alarm to be initialized
622     @@ -335,13 +346,9 @@ static int alarmtimer_resume(struct device *dev)
623     void alarm_init(struct alarm *alarm, enum alarmtimer_type type,
624     enum alarmtimer_restart (*function)(struct alarm *, ktime_t))
625     {
626     - timerqueue_init(&alarm->node);
627     hrtimer_init(&alarm->timer, alarm_bases[type].base_clockid,
628     - HRTIMER_MODE_ABS);
629     - alarm->timer.function = alarmtimer_fired;
630     - alarm->function = function;
631     - alarm->type = type;
632     - alarm->state = ALARMTIMER_STATE_INACTIVE;
633     + HRTIMER_MODE_ABS);
634     + __alarm_init(alarm, type, function);
635     }
636     EXPORT_SYMBOL_GPL(alarm_init);
637    
638     @@ -719,6 +726,8 @@ static int alarmtimer_do_nsleep(struct alarm *alarm, ktime_t absexp,
639    
640     __set_current_state(TASK_RUNNING);
641    
642     + destroy_hrtimer_on_stack(&alarm->timer);
643     +
644     if (!alarm->data)
645     return 0;
646    
647     @@ -740,6 +749,15 @@ static int alarmtimer_do_nsleep(struct alarm *alarm, ktime_t absexp,
648     return -ERESTART_RESTARTBLOCK;
649     }
650    
651     +static void
652     +alarm_init_on_stack(struct alarm *alarm, enum alarmtimer_type type,
653     + enum alarmtimer_restart (*function)(struct alarm *, ktime_t))
654     +{
655     + hrtimer_init_on_stack(&alarm->timer, alarm_bases[type].base_clockid,
656     + HRTIMER_MODE_ABS);
657     + __alarm_init(alarm, type, function);
658     +}
659     +
660     /**
661     * alarm_timer_nsleep_restart - restartblock alarmtimer nsleep
662     * @restart: ptr to restart block
663     @@ -752,7 +770,7 @@ static long __sched alarm_timer_nsleep_restart(struct restart_block *restart)
664     ktime_t exp = restart->nanosleep.expires;
665     struct alarm alarm;
666    
667     - alarm_init(&alarm, type, alarmtimer_nsleep_wakeup);
668     + alarm_init_on_stack(&alarm, type, alarmtimer_nsleep_wakeup);
669    
670     return alarmtimer_do_nsleep(&alarm, exp, type);
671     }
672     @@ -784,7 +802,7 @@ static int alarm_timer_nsleep(const clockid_t which_clock, int flags,
673     if (!capable(CAP_WAKE_ALARM))
674     return -EPERM;
675    
676     - alarm_init(&alarm, type, alarmtimer_nsleep_wakeup);
677     + alarm_init_on_stack(&alarm, type, alarmtimer_nsleep_wakeup);
678    
679     exp = timespec64_to_ktime(*tsreq);
680     /* Convert (if necessary) to absolute time */
681     diff --git a/kernel/time/posix-cpu-timers.c b/kernel/time/posix-cpu-timers.c
682     index 2541bd89f20e..5a6251ac6f7a 100644
683     --- a/kernel/time/posix-cpu-timers.c
684     +++ b/kernel/time/posix-cpu-timers.c
685     @@ -1205,10 +1205,12 @@ void set_process_cpu_timer(struct task_struct *tsk, unsigned int clock_idx,
686     u64 *newval, u64 *oldval)
687     {
688     u64 now;
689     + int ret;
690    
691     WARN_ON_ONCE(clock_idx == CPUCLOCK_SCHED);
692     + ret = cpu_timer_sample_group(clock_idx, tsk, &now);
693    
694     - if (oldval && cpu_timer_sample_group(clock_idx, tsk, &now) != -EINVAL) {
695     + if (oldval && ret != -EINVAL) {
696     /*
697     * We are setting itimer. The *oldval is absolute and we update
698     * it to be relative, *newval argument is relative and we update
699     diff --git a/mm/vmscan.c b/mm/vmscan.c
700     index cd5dc3faaa57..f6a1587f9f31 100644
701     --- a/mm/vmscan.c
702     +++ b/mm/vmscan.c
703     @@ -258,7 +258,7 @@ unsigned long lruvec_lru_size(struct lruvec *lruvec, enum lru_list lru, int zone
704     /*
705     * Add a shrinker callback to be called from the vm.
706     */
707     -int register_shrinker(struct shrinker *shrinker)
708     +int prealloc_shrinker(struct shrinker *shrinker)
709     {
710     size_t size = sizeof(*shrinker->nr_deferred);
711    
712     @@ -268,10 +268,29 @@ int register_shrinker(struct shrinker *shrinker)
713     shrinker->nr_deferred = kzalloc(size, GFP_KERNEL);
714     if (!shrinker->nr_deferred)
715     return -ENOMEM;
716     + return 0;
717     +}
718     +
719     +void free_prealloced_shrinker(struct shrinker *shrinker)
720     +{
721     + kfree(shrinker->nr_deferred);
722     + shrinker->nr_deferred = NULL;
723     +}
724    
725     +void register_shrinker_prepared(struct shrinker *shrinker)
726     +{
727     down_write(&shrinker_rwsem);
728     list_add_tail(&shrinker->list, &shrinker_list);
729     up_write(&shrinker_rwsem);
730     +}
731     +
732     +int register_shrinker(struct shrinker *shrinker)
733     +{
734     + int err = prealloc_shrinker(shrinker);
735     +
736     + if (err)
737     + return err;
738     + register_shrinker_prepared(shrinker);
739     return 0;
740     }
741     EXPORT_SYMBOL(register_shrinker);
742     diff --git a/net/bridge/netfilter/ebtables.c b/net/bridge/netfilter/ebtables.c
743     index a94d23b0a9af..752112539753 100644
744     --- a/net/bridge/netfilter/ebtables.c
745     +++ b/net/bridge/netfilter/ebtables.c
746     @@ -1821,10 +1821,14 @@ static int compat_table_info(const struct ebt_table_info *info,
747     {
748     unsigned int size = info->entries_size;
749     const void *entries = info->entries;
750     + int ret;
751    
752     newinfo->entries_size = size;
753    
754     - xt_compat_init_offsets(NFPROTO_BRIDGE, info->nentries);
755     + ret = xt_compat_init_offsets(NFPROTO_BRIDGE, info->nentries);
756     + if (ret)
757     + return ret;
758     +
759     return EBT_ENTRY_ITERATE(entries, size, compat_calc_entry, info,
760     entries, newinfo);
761     }
762     @@ -2268,7 +2272,9 @@ static int compat_do_replace(struct net *net, void __user *user,
763    
764     xt_compat_lock(NFPROTO_BRIDGE);
765    
766     - xt_compat_init_offsets(NFPROTO_BRIDGE, tmp.nentries);
767     + ret = xt_compat_init_offsets(NFPROTO_BRIDGE, tmp.nentries);
768     + if (ret < 0)
769     + goto out_unlock;
770     ret = compat_copy_entries(entries_tmp, tmp.entries_size, &state);
771     if (ret < 0)
772     goto out_unlock;
773     diff --git a/net/ipv4/netfilter/arp_tables.c b/net/ipv4/netfilter/arp_tables.c
774     index e3e420f3ba7b..b940d6aaa94f 100644
775     --- a/net/ipv4/netfilter/arp_tables.c
776     +++ b/net/ipv4/netfilter/arp_tables.c
777     @@ -781,7 +781,9 @@ static int compat_table_info(const struct xt_table_info *info,
778     memcpy(newinfo, info, offsetof(struct xt_table_info, entries));
779     newinfo->initial_entries = 0;
780     loc_cpu_entry = info->entries;
781     - xt_compat_init_offsets(NFPROTO_ARP, info->number);
782     + ret = xt_compat_init_offsets(NFPROTO_ARP, info->number);
783     + if (ret)
784     + return ret;
785     xt_entry_foreach(iter, loc_cpu_entry, info->size) {
786     ret = compat_calc_entry(iter, info, loc_cpu_entry, newinfo);
787     if (ret != 0)
788     @@ -895,7 +897,7 @@ static int __do_replace(struct net *net, const char *name,
789     struct arpt_entry *iter;
790    
791     ret = 0;
792     - counters = vzalloc(num_counters * sizeof(struct xt_counters));
793     + counters = xt_counters_alloc(num_counters);
794     if (!counters) {
795     ret = -ENOMEM;
796     goto out;
797     @@ -1167,7 +1169,7 @@ static int translate_compat_table(struct xt_table_info **pinfo,
798     struct compat_arpt_entry *iter0;
799     struct arpt_replace repl;
800     unsigned int size;
801     - int ret = 0;
802     + int ret;
803    
804     info = *pinfo;
805     entry0 = *pentry0;
806     @@ -1176,7 +1178,9 @@ static int translate_compat_table(struct xt_table_info **pinfo,
807    
808     j = 0;
809     xt_compat_lock(NFPROTO_ARP);
810     - xt_compat_init_offsets(NFPROTO_ARP, compatr->num_entries);
811     + ret = xt_compat_init_offsets(NFPROTO_ARP, compatr->num_entries);
812     + if (ret)
813     + goto out_unlock;
814     /* Walk through entries, checking offsets. */
815     xt_entry_foreach(iter0, entry0, compatr->size) {
816     ret = check_compat_entry_size_and_hooks(iter0, info, &size,
817     diff --git a/net/ipv4/netfilter/ip_tables.c b/net/ipv4/netfilter/ip_tables.c
818     index e38395a8dcf2..34f22450da5b 100644
819     --- a/net/ipv4/netfilter/ip_tables.c
820     +++ b/net/ipv4/netfilter/ip_tables.c
821     @@ -945,7 +945,9 @@ static int compat_table_info(const struct xt_table_info *info,
822     memcpy(newinfo, info, offsetof(struct xt_table_info, entries));
823     newinfo->initial_entries = 0;
824     loc_cpu_entry = info->entries;
825     - xt_compat_init_offsets(AF_INET, info->number);
826     + ret = xt_compat_init_offsets(AF_INET, info->number);
827     + if (ret)
828     + return ret;
829     xt_entry_foreach(iter, loc_cpu_entry, info->size) {
830     ret = compat_calc_entry(iter, info, loc_cpu_entry, newinfo);
831     if (ret != 0)
832     @@ -1057,7 +1059,7 @@ __do_replace(struct net *net, const char *name, unsigned int valid_hooks,
833     struct ipt_entry *iter;
834    
835     ret = 0;
836     - counters = vzalloc(num_counters * sizeof(struct xt_counters));
837     + counters = xt_counters_alloc(num_counters);
838     if (!counters) {
839     ret = -ENOMEM;
840     goto out;
841     @@ -1418,7 +1420,9 @@ translate_compat_table(struct net *net,
842    
843     j = 0;
844     xt_compat_lock(AF_INET);
845     - xt_compat_init_offsets(AF_INET, compatr->num_entries);
846     + ret = xt_compat_init_offsets(AF_INET, compatr->num_entries);
847     + if (ret)
848     + goto out_unlock;
849     /* Walk through entries, checking offsets. */
850     xt_entry_foreach(iter0, entry0, compatr->size) {
851     ret = check_compat_entry_size_and_hooks(iter0, info, &size,
852     diff --git a/net/ipv6/netfilter/ip6_tables.c b/net/ipv6/netfilter/ip6_tables.c
853     index 62358b93bbac..41db3c8f469f 100644
854     --- a/net/ipv6/netfilter/ip6_tables.c
855     +++ b/net/ipv6/netfilter/ip6_tables.c
856     @@ -962,7 +962,9 @@ static int compat_table_info(const struct xt_table_info *info,
857     memcpy(newinfo, info, offsetof(struct xt_table_info, entries));
858     newinfo->initial_entries = 0;
859     loc_cpu_entry = info->entries;
860     - xt_compat_init_offsets(AF_INET6, info->number);
861     + ret = xt_compat_init_offsets(AF_INET6, info->number);
862     + if (ret)
863     + return ret;
864     xt_entry_foreach(iter, loc_cpu_entry, info->size) {
865     ret = compat_calc_entry(iter, info, loc_cpu_entry, newinfo);
866     if (ret != 0)
867     @@ -1075,7 +1077,7 @@ __do_replace(struct net *net, const char *name, unsigned int valid_hooks,
868     struct ip6t_entry *iter;
869    
870     ret = 0;
871     - counters = vzalloc(num_counters * sizeof(struct xt_counters));
872     + counters = xt_counters_alloc(num_counters);
873     if (!counters) {
874     ret = -ENOMEM;
875     goto out;
876     @@ -1425,7 +1427,7 @@ translate_compat_table(struct net *net,
877     struct compat_ip6t_entry *iter0;
878     struct ip6t_replace repl;
879     unsigned int size;
880     - int ret = 0;
881     + int ret;
882    
883     info = *pinfo;
884     entry0 = *pentry0;
885     @@ -1434,7 +1436,9 @@ translate_compat_table(struct net *net,
886    
887     j = 0;
888     xt_compat_lock(AF_INET6);
889     - xt_compat_init_offsets(AF_INET6, compatr->num_entries);
890     + ret = xt_compat_init_offsets(AF_INET6, compatr->num_entries);
891     + if (ret)
892     + goto out_unlock;
893     /* Walk through entries, checking offsets. */
894     xt_entry_foreach(iter0, entry0, compatr->size) {
895     ret = check_compat_entry_size_and_hooks(iter0, info, &size,
896     diff --git a/net/netfilter/x_tables.c b/net/netfilter/x_tables.c
897     index 4aa01c90e9d1..a94c0e3cdcf0 100644
898     --- a/net/netfilter/x_tables.c
899     +++ b/net/netfilter/x_tables.c
900     @@ -40,6 +40,7 @@ MODULE_AUTHOR("Harald Welte <laforge@netfilter.org>");
901     MODULE_DESCRIPTION("{ip,ip6,arp,eb}_tables backend module");
902    
903     #define XT_PCPU_BLOCK_SIZE 4096
904     +#define XT_MAX_TABLE_SIZE (512 * 1024 * 1024)
905    
906     struct compat_delta {
907     unsigned int offset; /* offset in kernel */
908     @@ -553,14 +554,8 @@ int xt_compat_add_offset(u_int8_t af, unsigned int offset, int delta)
909     {
910     struct xt_af *xp = &xt[af];
911    
912     - if (!xp->compat_tab) {
913     - if (!xp->number)
914     - return -EINVAL;
915     - xp->compat_tab = vmalloc(sizeof(struct compat_delta) * xp->number);
916     - if (!xp->compat_tab)
917     - return -ENOMEM;
918     - xp->cur = 0;
919     - }
920     + if (WARN_ON(!xp->compat_tab))
921     + return -ENOMEM;
922    
923     if (xp->cur >= xp->number)
924     return -EINVAL;
925     @@ -603,10 +598,28 @@ int xt_compat_calc_jump(u_int8_t af, unsigned int offset)
926     }
927     EXPORT_SYMBOL_GPL(xt_compat_calc_jump);
928    
929     -void xt_compat_init_offsets(u_int8_t af, unsigned int number)
930     +int xt_compat_init_offsets(u8 af, unsigned int number)
931     {
932     + size_t mem;
933     +
934     + if (!number || number > (INT_MAX / sizeof(struct compat_delta)))
935     + return -EINVAL;
936     +
937     + if (WARN_ON(xt[af].compat_tab))
938     + return -EINVAL;
939     +
940     + mem = sizeof(struct compat_delta) * number;
941     + if (mem > XT_MAX_TABLE_SIZE)
942     + return -ENOMEM;
943     +
944     + xt[af].compat_tab = vmalloc(mem);
945     + if (!xt[af].compat_tab)
946     + return -ENOMEM;
947     +
948     xt[af].number = number;
949     xt[af].cur = 0;
950     +
951     + return 0;
952     }
953     EXPORT_SYMBOL(xt_compat_init_offsets);
954    
955     @@ -805,6 +818,9 @@ EXPORT_SYMBOL(xt_check_entry_offsets);
956     */
957     unsigned int *xt_alloc_entry_offsets(unsigned int size)
958     {
959     + if (size > XT_MAX_TABLE_SIZE / sizeof(unsigned int))
960     + return NULL;
961     +
962     return kvmalloc_array(size, sizeof(unsigned int), GFP_KERNEL | __GFP_ZERO);
963    
964     }
965     @@ -1029,7 +1045,7 @@ struct xt_table_info *xt_alloc_table_info(unsigned int size)
966     struct xt_table_info *info = NULL;
967     size_t sz = sizeof(*info) + size;
968    
969     - if (sz < sizeof(*info))
970     + if (sz < sizeof(*info) || sz >= XT_MAX_TABLE_SIZE)
971     return NULL;
972    
973     /* __GFP_NORETRY is not fully supported by kvmalloc but it should
974     @@ -1198,6 +1214,21 @@ static int xt_jumpstack_alloc(struct xt_table_info *i)
975     return 0;
976     }
977    
978     +struct xt_counters *xt_counters_alloc(unsigned int counters)
979     +{
980     + struct xt_counters *mem;
981     +
982     + if (counters == 0 || counters > INT_MAX / sizeof(*mem))
983     + return NULL;
984     +
985     + counters *= sizeof(*mem);
986     + if (counters > XT_MAX_TABLE_SIZE)
987     + return NULL;
988     +
989     + return vzalloc(counters);
990     +}
991     +EXPORT_SYMBOL(xt_counters_alloc);
992     +
993     struct xt_table_info *
994     xt_replace_table(struct xt_table *table,
995     unsigned int num_counters,