Magellan Linux

Annotation of /trunk/kernel-alx/patches-4.9/0243-4.9.144-all-fixes.patch

Parent Directory Parent Directory | Revision Log Revision Log


Revision 3296 - (hide annotations) (download)
Tue Mar 12 10:43:06 2019 UTC (5 years, 3 months ago) by niro
File size: 191186 byte(s)
-linux-4.9.144
1 niro 3296 diff --git a/Makefile b/Makefile
2     index 8ec52cd19526..c62b2b529724 100644
3     --- a/Makefile
4     +++ b/Makefile
5     @@ -1,6 +1,6 @@
6     VERSION = 4
7     PATCHLEVEL = 9
8     -SUBLEVEL = 143
9     +SUBLEVEL = 144
10     EXTRAVERSION =
11     NAME = Roaring Lionus
12    
13     @@ -802,6 +802,9 @@ KBUILD_CFLAGS += $(call cc-option,-Wdeclaration-after-statement,)
14     # disable pointer signed / unsigned warnings in gcc 4.0
15     KBUILD_CFLAGS += $(call cc-disable-warning, pointer-sign)
16    
17     +# disable stringop warnings in gcc 8+
18     +KBUILD_CFLAGS += $(call cc-disable-warning, stringop-truncation)
19     +
20     # disable invalid "can't wrap" optimizations for signed / pointers
21     KBUILD_CFLAGS += $(call cc-option,-fno-strict-overflow)
22    
23     diff --git a/arch/arc/Kconfig b/arch/arc/Kconfig
24     index b7b78cb09a37..c7a081c583b9 100644
25     --- a/arch/arc/Kconfig
26     +++ b/arch/arc/Kconfig
27     @@ -105,7 +105,7 @@ endmenu
28    
29     choice
30     prompt "ARC Instruction Set"
31     - default ISA_ARCOMPACT
32     + default ISA_ARCV2
33    
34     config ISA_ARCOMPACT
35     bool "ARCompact ISA"
36     diff --git a/arch/arc/Makefile b/arch/arc/Makefile
37     index a3b456008201..fd79faab7892 100644
38     --- a/arch/arc/Makefile
39     +++ b/arch/arc/Makefile
40     @@ -8,7 +8,7 @@
41    
42     UTS_MACHINE := arc
43    
44     -KBUILD_DEFCONFIG := nsim_700_defconfig
45     +KBUILD_DEFCONFIG := nsim_hs_defconfig
46    
47     cflags-y += -fno-common -pipe -fno-builtin -mmedium-calls -D__linux__
48     cflags-$(CONFIG_ISA_ARCOMPACT) += -mA7
49     diff --git a/arch/arc/configs/axs101_defconfig b/arch/arc/configs/axs101_defconfig
50     index dd623199bb48..cdb00af5aeac 100644
51     --- a/arch/arc/configs/axs101_defconfig
52     +++ b/arch/arc/configs/axs101_defconfig
53     @@ -15,6 +15,7 @@ CONFIG_PERF_EVENTS=y
54     # CONFIG_VM_EVENT_COUNTERS is not set
55     # CONFIG_SLUB_DEBUG is not set
56     # CONFIG_COMPAT_BRK is not set
57     +CONFIG_ISA_ARCOMPACT=y
58     CONFIG_MODULES=y
59     CONFIG_MODULE_FORCE_LOAD=y
60     CONFIG_MODULE_UNLOAD=y
61     @@ -96,6 +97,7 @@ CONFIG_VFAT_FS=y
62     CONFIG_NTFS_FS=y
63     CONFIG_TMPFS=y
64     CONFIG_NFS_FS=y
65     +CONFIG_NFS_V3_ACL=y
66     CONFIG_NLS_CODEPAGE_437=y
67     CONFIG_NLS_ISO8859_1=y
68     # CONFIG_ENABLE_WARN_DEPRECATED is not set
69     diff --git a/arch/arc/configs/axs103_defconfig b/arch/arc/configs/axs103_defconfig
70     index 2e0d7d74b8ee..02c766d2c1e0 100644
71     --- a/arch/arc/configs/axs103_defconfig
72     +++ b/arch/arc/configs/axs103_defconfig
73     @@ -97,6 +97,7 @@ CONFIG_VFAT_FS=y
74     CONFIG_NTFS_FS=y
75     CONFIG_TMPFS=y
76     CONFIG_NFS_FS=y
77     +CONFIG_NFS_V3_ACL=y
78     CONFIG_NLS_CODEPAGE_437=y
79     CONFIG_NLS_ISO8859_1=y
80     # CONFIG_ENABLE_WARN_DEPRECATED is not set
81     diff --git a/arch/arc/configs/axs103_smp_defconfig b/arch/arc/configs/axs103_smp_defconfig
82     index ec188fca2cc9..8c16093d639f 100644
83     --- a/arch/arc/configs/axs103_smp_defconfig
84     +++ b/arch/arc/configs/axs103_smp_defconfig
85     @@ -98,6 +98,7 @@ CONFIG_VFAT_FS=y
86     CONFIG_NTFS_FS=y
87     CONFIG_TMPFS=y
88     CONFIG_NFS_FS=y
89     +CONFIG_NFS_V3_ACL=y
90     CONFIG_NLS_CODEPAGE_437=y
91     CONFIG_NLS_ISO8859_1=y
92     # CONFIG_ENABLE_WARN_DEPRECATED is not set
93     diff --git a/arch/arc/configs/nps_defconfig b/arch/arc/configs/nps_defconfig
94     index ede625c76216..397742c6c84e 100644
95     --- a/arch/arc/configs/nps_defconfig
96     +++ b/arch/arc/configs/nps_defconfig
97     @@ -15,6 +15,7 @@ CONFIG_SYSCTL_SYSCALL=y
98     CONFIG_EMBEDDED=y
99     CONFIG_PERF_EVENTS=y
100     # CONFIG_COMPAT_BRK is not set
101     +CONFIG_ISA_ARCOMPACT=y
102     CONFIG_KPROBES=y
103     CONFIG_MODULES=y
104     CONFIG_MODULE_FORCE_LOAD=y
105     @@ -75,6 +76,7 @@ CONFIG_PROC_KCORE=y
106     CONFIG_TMPFS=y
107     # CONFIG_MISC_FILESYSTEMS is not set
108     CONFIG_NFS_FS=y
109     +CONFIG_NFS_V3_ACL=y
110     CONFIG_ROOT_NFS=y
111     CONFIG_DEBUG_INFO=y
112     # CONFIG_ENABLE_WARN_DEPRECATED is not set
113     diff --git a/arch/arc/configs/nsim_700_defconfig b/arch/arc/configs/nsim_700_defconfig
114     index df609fce999b..cbc6d068d1f4 100644
115     --- a/arch/arc/configs/nsim_700_defconfig
116     +++ b/arch/arc/configs/nsim_700_defconfig
117     @@ -16,6 +16,7 @@ CONFIG_EMBEDDED=y
118     CONFIG_PERF_EVENTS=y
119     # CONFIG_SLUB_DEBUG is not set
120     # CONFIG_COMPAT_BRK is not set
121     +CONFIG_ISA_ARCOMPACT=y
122     CONFIG_KPROBES=y
123     CONFIG_MODULES=y
124     # CONFIG_LBDAF is not set
125     diff --git a/arch/arc/configs/nsimosci_defconfig b/arch/arc/configs/nsimosci_defconfig
126     index 5680daa65471..d34b838a71c1 100644
127     --- a/arch/arc/configs/nsimosci_defconfig
128     +++ b/arch/arc/configs/nsimosci_defconfig
129     @@ -16,6 +16,7 @@ CONFIG_EMBEDDED=y
130     CONFIG_PERF_EVENTS=y
131     # CONFIG_SLUB_DEBUG is not set
132     # CONFIG_COMPAT_BRK is not set
133     +CONFIG_ISA_ARCOMPACT=y
134     CONFIG_KPROBES=y
135     CONFIG_MODULES=y
136     # CONFIG_LBDAF is not set
137     @@ -70,5 +71,6 @@ CONFIG_EXT2_FS_XATTR=y
138     CONFIG_TMPFS=y
139     # CONFIG_MISC_FILESYSTEMS is not set
140     CONFIG_NFS_FS=y
141     +CONFIG_NFS_V3_ACL=y
142     # CONFIG_ENABLE_WARN_DEPRECATED is not set
143     # CONFIG_ENABLE_MUST_CHECK is not set
144     diff --git a/arch/arc/configs/nsimosci_hs_defconfig b/arch/arc/configs/nsimosci_hs_defconfig
145     index 87decc491c58..e8c7dd703f13 100644
146     --- a/arch/arc/configs/nsimosci_hs_defconfig
147     +++ b/arch/arc/configs/nsimosci_hs_defconfig
148     @@ -69,5 +69,6 @@ CONFIG_EXT2_FS_XATTR=y
149     CONFIG_TMPFS=y
150     # CONFIG_MISC_FILESYSTEMS is not set
151     CONFIG_NFS_FS=y
152     +CONFIG_NFS_V3_ACL=y
153     # CONFIG_ENABLE_WARN_DEPRECATED is not set
154     # CONFIG_ENABLE_MUST_CHECK is not set
155     diff --git a/arch/arc/configs/nsimosci_hs_smp_defconfig b/arch/arc/configs/nsimosci_hs_smp_defconfig
156     index 4d14684dc74a..100d7bf0035b 100644
157     --- a/arch/arc/configs/nsimosci_hs_smp_defconfig
158     +++ b/arch/arc/configs/nsimosci_hs_smp_defconfig
159     @@ -80,6 +80,7 @@ CONFIG_EXT2_FS_XATTR=y
160     CONFIG_TMPFS=y
161     # CONFIG_MISC_FILESYSTEMS is not set
162     CONFIG_NFS_FS=y
163     +CONFIG_NFS_V3_ACL=y
164     # CONFIG_ENABLE_WARN_DEPRECATED is not set
165     # CONFIG_ENABLE_MUST_CHECK is not set
166     CONFIG_FTRACE=y
167     diff --git a/arch/arc/configs/tb10x_defconfig b/arch/arc/configs/tb10x_defconfig
168     index 4c5118384eb5..493966c0dcbe 100644
169     --- a/arch/arc/configs/tb10x_defconfig
170     +++ b/arch/arc/configs/tb10x_defconfig
171     @@ -19,6 +19,7 @@ CONFIG_KALLSYMS_ALL=y
172     # CONFIG_AIO is not set
173     CONFIG_EMBEDDED=y
174     # CONFIG_COMPAT_BRK is not set
175     +CONFIG_ISA_ARCOMPACT=y
176     CONFIG_SLAB=y
177     CONFIG_MODULES=y
178     CONFIG_MODULE_FORCE_LOAD=y
179     diff --git a/arch/arc/configs/vdk_hs38_defconfig b/arch/arc/configs/vdk_hs38_defconfig
180     index c0d6a010751a..b1d38afeba70 100644
181     --- a/arch/arc/configs/vdk_hs38_defconfig
182     +++ b/arch/arc/configs/vdk_hs38_defconfig
183     @@ -88,6 +88,7 @@ CONFIG_NTFS_FS=y
184     CONFIG_TMPFS=y
185     CONFIG_JFFS2_FS=y
186     CONFIG_NFS_FS=y
187     +CONFIG_NFS_V3_ACL=y
188     CONFIG_NLS_CODEPAGE_437=y
189     CONFIG_NLS_ISO8859_1=y
190     # CONFIG_ENABLE_WARN_DEPRECATED is not set
191     diff --git a/arch/arc/configs/vdk_hs38_smp_defconfig b/arch/arc/configs/vdk_hs38_smp_defconfig
192     index 969b206d6c67..2d103f73a265 100644
193     --- a/arch/arc/configs/vdk_hs38_smp_defconfig
194     +++ b/arch/arc/configs/vdk_hs38_smp_defconfig
195     @@ -87,6 +87,7 @@ CONFIG_NTFS_FS=y
196     CONFIG_TMPFS=y
197     CONFIG_JFFS2_FS=y
198     CONFIG_NFS_FS=y
199     +CONFIG_NFS_V3_ACL=y
200     CONFIG_NLS_CODEPAGE_437=y
201     CONFIG_NLS_ISO8859_1=y
202     # CONFIG_ENABLE_WARN_DEPRECATED is not set
203     diff --git a/arch/mips/include/asm/syscall.h b/arch/mips/include/asm/syscall.h
204     index d87882513ee3..deee3ac8c29f 100644
205     --- a/arch/mips/include/asm/syscall.h
206     +++ b/arch/mips/include/asm/syscall.h
207     @@ -51,7 +51,7 @@ static inline unsigned long mips_get_syscall_arg(unsigned long *arg,
208     #ifdef CONFIG_64BIT
209     case 4: case 5: case 6: case 7:
210     #ifdef CONFIG_MIPS32_O32
211     - if (test_thread_flag(TIF_32BIT_REGS))
212     + if (test_tsk_thread_flag(task, TIF_32BIT_REGS))
213     return get_user(*arg, (int *)usp + n);
214     else
215     #endif
216     diff --git a/arch/mips/ralink/mt7620.c b/arch/mips/ralink/mt7620.c
217     index 0696142048d5..9194b04cb689 100644
218     --- a/arch/mips/ralink/mt7620.c
219     +++ b/arch/mips/ralink/mt7620.c
220     @@ -81,7 +81,7 @@ static struct rt2880_pmx_func pcie_rst_grp[] = {
221     };
222     static struct rt2880_pmx_func nd_sd_grp[] = {
223     FUNC("nand", MT7620_GPIO_MODE_NAND, 45, 15),
224     - FUNC("sd", MT7620_GPIO_MODE_SD, 45, 15)
225     + FUNC("sd", MT7620_GPIO_MODE_SD, 47, 13)
226     };
227    
228     static struct rt2880_pmx_group mt7620a_pinmux_data[] = {
229     diff --git a/arch/x86/include/asm/suspend_64.h b/arch/x86/include/asm/suspend_64.h
230     index 6136a18152af..2bd96b4df140 100644
231     --- a/arch/x86/include/asm/suspend_64.h
232     +++ b/arch/x86/include/asm/suspend_64.h
233     @@ -42,8 +42,7 @@ struct saved_context {
234     set_debugreg((thread)->debugreg##register, register)
235    
236     /* routines for saving/restoring kernel state */
237     -extern int acpi_save_state_mem(void);
238     -extern char core_restore_code;
239     -extern char restore_registers;
240     +extern char core_restore_code[];
241     +extern char restore_registers[];
242    
243     #endif /* _ASM_X86_SUSPEND_64_H */
244     diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
245     index 4bc35ac28d11..fa1b0e3c8a06 100644
246     --- a/arch/x86/kvm/svm.c
247     +++ b/arch/x86/kvm/svm.c
248     @@ -1333,20 +1333,23 @@ static u64 *avic_get_physical_id_entry(struct kvm_vcpu *vcpu, int index)
249     static int avic_init_access_page(struct kvm_vcpu *vcpu)
250     {
251     struct kvm *kvm = vcpu->kvm;
252     - int ret;
253     + int ret = 0;
254    
255     + mutex_lock(&kvm->slots_lock);
256     if (kvm->arch.apic_access_page_done)
257     - return 0;
258     + goto out;
259    
260     - ret = x86_set_memory_region(kvm,
261     - APIC_ACCESS_PAGE_PRIVATE_MEMSLOT,
262     - APIC_DEFAULT_PHYS_BASE,
263     - PAGE_SIZE);
264     + ret = __x86_set_memory_region(kvm,
265     + APIC_ACCESS_PAGE_PRIVATE_MEMSLOT,
266     + APIC_DEFAULT_PHYS_BASE,
267     + PAGE_SIZE);
268     if (ret)
269     - return ret;
270     + goto out;
271    
272     kvm->arch.apic_access_page_done = true;
273     - return 0;
274     +out:
275     + mutex_unlock(&kvm->slots_lock);
276     + return ret;
277     }
278    
279     static int avic_init_backing_page(struct kvm_vcpu *vcpu)
280     diff --git a/arch/x86/power/hibernate_64.c b/arch/x86/power/hibernate_64.c
281     index 0cb1dd461529..fef485b789ca 100644
282     --- a/arch/x86/power/hibernate_64.c
283     +++ b/arch/x86/power/hibernate_64.c
284     @@ -126,7 +126,7 @@ static int relocate_restore_code(void)
285     if (!relocated_restore_code)
286     return -ENOMEM;
287    
288     - memcpy((void *)relocated_restore_code, &core_restore_code, PAGE_SIZE);
289     + memcpy((void *)relocated_restore_code, core_restore_code, PAGE_SIZE);
290    
291     /* Make the page containing the relocated code executable */
292     pgd = (pgd_t *)__va(read_cr3()) + pgd_index(relocated_restore_code);
293     @@ -197,8 +197,8 @@ int arch_hibernation_header_save(void *addr, unsigned int max_size)
294    
295     if (max_size < sizeof(struct restore_data_record))
296     return -EOVERFLOW;
297     - rdr->jump_address = (unsigned long)&restore_registers;
298     - rdr->jump_address_phys = __pa_symbol(&restore_registers);
299     + rdr->jump_address = (unsigned long)restore_registers;
300     + rdr->jump_address_phys = __pa_symbol(restore_registers);
301     rdr->cr3 = restore_cr3;
302     rdr->magic = RESTORE_MAGIC;
303     return 0;
304     diff --git a/drivers/android/binder.c b/drivers/android/binder.c
305     index 49199bd2ab93..80499f421a29 100644
306     --- a/drivers/android/binder.c
307     +++ b/drivers/android/binder.c
308     @@ -302,6 +302,7 @@ struct binder_proc {
309     struct mm_struct *vma_vm_mm;
310     struct task_struct *tsk;
311     struct files_struct *files;
312     + struct mutex files_lock;
313     struct hlist_node deferred_work_node;
314     int deferred_work;
315     void *buffer;
316     @@ -375,20 +376,26 @@ binder_defer_work(struct binder_proc *proc, enum binder_deferred_state defer);
317    
318     static int task_get_unused_fd_flags(struct binder_proc *proc, int flags)
319     {
320     - struct files_struct *files = proc->files;
321     unsigned long rlim_cur;
322     unsigned long irqs;
323     + int ret;
324    
325     - if (files == NULL)
326     - return -ESRCH;
327     -
328     - if (!lock_task_sighand(proc->tsk, &irqs))
329     - return -EMFILE;
330     -
331     + mutex_lock(&proc->files_lock);
332     + if (proc->files == NULL) {
333     + ret = -ESRCH;
334     + goto err;
335     + }
336     + if (!lock_task_sighand(proc->tsk, &irqs)) {
337     + ret = -EMFILE;
338     + goto err;
339     + }
340     rlim_cur = task_rlimit(proc->tsk, RLIMIT_NOFILE);
341     unlock_task_sighand(proc->tsk, &irqs);
342    
343     - return __alloc_fd(files, 0, rlim_cur, flags);
344     + ret = __alloc_fd(proc->files, 0, rlim_cur, flags);
345     +err:
346     + mutex_unlock(&proc->files_lock);
347     + return ret;
348     }
349    
350     /*
351     @@ -397,8 +404,10 @@ static int task_get_unused_fd_flags(struct binder_proc *proc, int flags)
352     static void task_fd_install(
353     struct binder_proc *proc, unsigned int fd, struct file *file)
354     {
355     + mutex_lock(&proc->files_lock);
356     if (proc->files)
357     __fd_install(proc->files, fd, file);
358     + mutex_unlock(&proc->files_lock);
359     }
360    
361     /*
362     @@ -408,9 +417,11 @@ static long task_close_fd(struct binder_proc *proc, unsigned int fd)
363     {
364     int retval;
365    
366     - if (proc->files == NULL)
367     - return -ESRCH;
368     -
369     + mutex_lock(&proc->files_lock);
370     + if (proc->files == NULL) {
371     + retval = -ESRCH;
372     + goto err;
373     + }
374     retval = __close_fd(proc->files, fd);
375     /* can't restart close syscall because file table entry was cleared */
376     if (unlikely(retval == -ERESTARTSYS ||
377     @@ -418,7 +429,8 @@ static long task_close_fd(struct binder_proc *proc, unsigned int fd)
378     retval == -ERESTARTNOHAND ||
379     retval == -ERESTART_RESTARTBLOCK))
380     retval = -EINTR;
381     -
382     +err:
383     + mutex_unlock(&proc->files_lock);
384     return retval;
385     }
386    
387     @@ -2946,7 +2958,9 @@ static int binder_mmap(struct file *filp, struct vm_area_struct *vma)
388     binder_insert_free_buffer(proc, buffer);
389     proc->free_async_space = proc->buffer_size / 2;
390     barrier();
391     + mutex_lock(&proc->files_lock);
392     proc->files = get_files_struct(current);
393     + mutex_unlock(&proc->files_lock);
394     proc->vma = vma;
395     proc->vma_vm_mm = vma->vm_mm;
396    
397     @@ -2982,6 +2996,7 @@ static int binder_open(struct inode *nodp, struct file *filp)
398     return -ENOMEM;
399     get_task_struct(current->group_leader);
400     proc->tsk = current->group_leader;
401     + mutex_init(&proc->files_lock);
402     INIT_LIST_HEAD(&proc->todo);
403     init_waitqueue_head(&proc->wait);
404     proc->default_priority = task_nice(current);
405     @@ -3220,9 +3235,11 @@ static void binder_deferred_func(struct work_struct *work)
406    
407     files = NULL;
408     if (defer & BINDER_DEFERRED_PUT_FILES) {
409     + mutex_lock(&proc->files_lock);
410     files = proc->files;
411     if (files)
412     proc->files = NULL;
413     + mutex_unlock(&proc->files_lock);
414     }
415    
416     if (defer & BINDER_DEFERRED_FLUSH)
417     diff --git a/drivers/gpu/drm/ast/ast_main.c b/drivers/gpu/drm/ast/ast_main.c
418     index fb9976254224..fabfeeb537ae 100644
419     --- a/drivers/gpu/drm/ast/ast_main.c
420     +++ b/drivers/gpu/drm/ast/ast_main.c
421     @@ -556,7 +556,8 @@ int ast_driver_unload(struct drm_device *dev)
422     drm_mode_config_cleanup(dev);
423    
424     ast_mm_fini(ast);
425     - pci_iounmap(dev->pdev, ast->ioregs);
426     + if (ast->ioregs != ast->regs + AST_IO_MM_OFFSET)
427     + pci_iounmap(dev->pdev, ast->ioregs);
428     pci_iounmap(dev->pdev, ast->regs);
429     kfree(ast);
430     return 0;
431     diff --git a/drivers/gpu/drm/drm_auth.c b/drivers/gpu/drm/drm_auth.c
432     index 6b143514a566..56b2dd9a5b68 100644
433     --- a/drivers/gpu/drm/drm_auth.c
434     +++ b/drivers/gpu/drm/drm_auth.c
435     @@ -133,6 +133,7 @@ static int drm_new_set_master(struct drm_device *dev, struct drm_file *fpriv)
436    
437     lockdep_assert_held_once(&dev->master_mutex);
438    
439     + WARN_ON(fpriv->is_master);
440     old_master = fpriv->master;
441     fpriv->master = drm_master_create(dev);
442     if (!fpriv->master) {
443     @@ -161,6 +162,7 @@ out_err:
444     /* drop references and restore old master on failure */
445     drm_master_put(&fpriv->master);
446     fpriv->master = old_master;
447     + fpriv->is_master = 0;
448    
449     return ret;
450     }
451     diff --git a/drivers/gpu/drm/gma500/mdfld_intel_display.c b/drivers/gpu/drm/gma500/mdfld_intel_display.c
452     index 92e3f93ee682..06d61e654f59 100644
453     --- a/drivers/gpu/drm/gma500/mdfld_intel_display.c
454     +++ b/drivers/gpu/drm/gma500/mdfld_intel_display.c
455     @@ -99,7 +99,7 @@ void mdfldWaitForPipeEnable(struct drm_device *dev, int pipe)
456     /* Wait for for the pipe enable to take effect. */
457     for (count = 0; count < COUNT_MAX; count++) {
458     temp = REG_READ(map->conf);
459     - if ((temp & PIPEACONF_PIPE_STATE) == 1)
460     + if (temp & PIPEACONF_PIPE_STATE)
461     break;
462     }
463     }
464     diff --git a/drivers/gpu/drm/mediatek/mtk_hdmi.c b/drivers/gpu/drm/mediatek/mtk_hdmi.c
465     index e097780752f6..863d030786e5 100644
466     --- a/drivers/gpu/drm/mediatek/mtk_hdmi.c
467     +++ b/drivers/gpu/drm/mediatek/mtk_hdmi.c
468     @@ -1446,8 +1446,7 @@ static int mtk_hdmi_dt_parse_pdata(struct mtk_hdmi *hdmi,
469     }
470    
471     /* The CEC module handles HDMI hotplug detection */
472     - cec_np = of_find_compatible_node(np->parent, NULL,
473     - "mediatek,mt8173-cec");
474     + cec_np = of_get_compatible_child(np->parent, "mediatek,mt8173-cec");
475     if (!cec_np) {
476     dev_err(dev, "Failed to find CEC node\n");
477     return -EINVAL;
478     @@ -1457,8 +1456,10 @@ static int mtk_hdmi_dt_parse_pdata(struct mtk_hdmi *hdmi,
479     if (!cec_pdev) {
480     dev_err(hdmi->dev, "Waiting for CEC device %s\n",
481     cec_np->full_name);
482     + of_node_put(cec_np);
483     return -EPROBE_DEFER;
484     }
485     + of_node_put(cec_np);
486     hdmi->cec_dev = &cec_pdev->dev;
487    
488     /*
489     diff --git a/drivers/infiniband/hw/mlx5/main.c b/drivers/infiniband/hw/mlx5/main.c
490     index d7da1dca765f..b1daf5c16117 100644
491     --- a/drivers/infiniband/hw/mlx5/main.c
492     +++ b/drivers/infiniband/hw/mlx5/main.c
493     @@ -710,31 +710,26 @@ enum mlx5_ib_width {
494     MLX5_IB_WIDTH_12X = 1 << 4
495     };
496    
497     -static int translate_active_width(struct ib_device *ibdev, u8 active_width,
498     +static void translate_active_width(struct ib_device *ibdev, u8 active_width,
499     u8 *ib_width)
500     {
501     struct mlx5_ib_dev *dev = to_mdev(ibdev);
502     - int err = 0;
503    
504     - if (active_width & MLX5_IB_WIDTH_1X) {
505     + if (active_width & MLX5_IB_WIDTH_1X)
506     *ib_width = IB_WIDTH_1X;
507     - } else if (active_width & MLX5_IB_WIDTH_2X) {
508     - mlx5_ib_dbg(dev, "active_width %d is not supported by IB spec\n",
509     - (int)active_width);
510     - err = -EINVAL;
511     - } else if (active_width & MLX5_IB_WIDTH_4X) {
512     + else if (active_width & MLX5_IB_WIDTH_4X)
513     *ib_width = IB_WIDTH_4X;
514     - } else if (active_width & MLX5_IB_WIDTH_8X) {
515     + else if (active_width & MLX5_IB_WIDTH_8X)
516     *ib_width = IB_WIDTH_8X;
517     - } else if (active_width & MLX5_IB_WIDTH_12X) {
518     + else if (active_width & MLX5_IB_WIDTH_12X)
519     *ib_width = IB_WIDTH_12X;
520     - } else {
521     - mlx5_ib_dbg(dev, "Invalid active_width %d\n",
522     + else {
523     + mlx5_ib_dbg(dev, "Invalid active_width %d, setting width to default value: 4x\n",
524     (int)active_width);
525     - err = -EINVAL;
526     + *ib_width = IB_WIDTH_4X;
527     }
528    
529     - return err;
530     + return;
531     }
532    
533     static int mlx5_mtu_to_ib_mtu(int mtu)
534     @@ -842,10 +837,8 @@ static int mlx5_query_hca_port(struct ib_device *ibdev, u8 port,
535     if (err)
536     goto out;
537    
538     - err = translate_active_width(ibdev, ib_link_width_oper,
539     - &props->active_width);
540     - if (err)
541     - goto out;
542     + translate_active_width(ibdev, ib_link_width_oper, &props->active_width);
543     +
544     err = mlx5_query_port_ib_proto_oper(mdev, &props->active_speed, port);
545     if (err)
546     goto out;
547     diff --git a/drivers/infiniband/ulp/iser/iser_verbs.c b/drivers/infiniband/ulp/iser/iser_verbs.c
548     index bc6f5bb6c524..d46424d4b71e 100644
549     --- a/drivers/infiniband/ulp/iser/iser_verbs.c
550     +++ b/drivers/infiniband/ulp/iser/iser_verbs.c
551     @@ -1110,7 +1110,9 @@ u8 iser_check_task_pi_status(struct iscsi_iser_task *iser_task,
552     IB_MR_CHECK_SIG_STATUS, &mr_status);
553     if (ret) {
554     pr_err("ib_check_mr_status failed, ret %d\n", ret);
555     - goto err;
556     + /* Not a lot we can do, return ambiguous guard error */
557     + *sector = 0;
558     + return 0x1;
559     }
560    
561     if (mr_status.fail_status & IB_MR_CHECK_SIG_STATUS) {
562     @@ -1138,9 +1140,6 @@ u8 iser_check_task_pi_status(struct iscsi_iser_task *iser_task,
563     }
564    
565     return 0;
566     -err:
567     - /* Not alot we can do here, return ambiguous guard error */
568     - return 0x1;
569     }
570    
571     void iser_err_comp(struct ib_wc *wc, const char *type)
572     diff --git a/drivers/input/joystick/xpad.c b/drivers/input/joystick/xpad.c
573     index 2e52015634f9..f55dcdf99bc5 100644
574     --- a/drivers/input/joystick/xpad.c
575     +++ b/drivers/input/joystick/xpad.c
576     @@ -483,18 +483,18 @@ static const u8 xboxone_hori_init[] = {
577     };
578    
579     /*
580     - * This packet is required for some of the PDP pads to start
581     + * This packet is required for most (all?) of the PDP pads to start
582     * sending input reports. These pads include: (0x0e6f:0x02ab),
583     - * (0x0e6f:0x02a4).
584     + * (0x0e6f:0x02a4), (0x0e6f:0x02a6).
585     */
586     static const u8 xboxone_pdp_init1[] = {
587     0x0a, 0x20, 0x00, 0x03, 0x00, 0x01, 0x14
588     };
589    
590     /*
591     - * This packet is required for some of the PDP pads to start
592     + * This packet is required for most (all?) of the PDP pads to start
593     * sending input reports. These pads include: (0x0e6f:0x02ab),
594     - * (0x0e6f:0x02a4).
595     + * (0x0e6f:0x02a4), (0x0e6f:0x02a6).
596     */
597     static const u8 xboxone_pdp_init2[] = {
598     0x06, 0x20, 0x00, 0x02, 0x01, 0x00
599     @@ -530,12 +530,8 @@ static const struct xboxone_init_packet xboxone_init_packets[] = {
600     XBOXONE_INIT_PKT(0x0e6f, 0x0165, xboxone_hori_init),
601     XBOXONE_INIT_PKT(0x0f0d, 0x0067, xboxone_hori_init),
602     XBOXONE_INIT_PKT(0x0000, 0x0000, xboxone_fw2015_init),
603     - XBOXONE_INIT_PKT(0x0e6f, 0x02ab, xboxone_pdp_init1),
604     - XBOXONE_INIT_PKT(0x0e6f, 0x02ab, xboxone_pdp_init2),
605     - XBOXONE_INIT_PKT(0x0e6f, 0x02a4, xboxone_pdp_init1),
606     - XBOXONE_INIT_PKT(0x0e6f, 0x02a4, xboxone_pdp_init2),
607     - XBOXONE_INIT_PKT(0x0e6f, 0x02a6, xboxone_pdp_init1),
608     - XBOXONE_INIT_PKT(0x0e6f, 0x02a6, xboxone_pdp_init2),
609     + XBOXONE_INIT_PKT(0x0e6f, 0x0000, xboxone_pdp_init1),
610     + XBOXONE_INIT_PKT(0x0e6f, 0x0000, xboxone_pdp_init2),
611     XBOXONE_INIT_PKT(0x24c6, 0x541a, xboxone_rumblebegin_init),
612     XBOXONE_INIT_PKT(0x24c6, 0x542a, xboxone_rumblebegin_init),
613     XBOXONE_INIT_PKT(0x24c6, 0x543a, xboxone_rumblebegin_init),
614     diff --git a/drivers/input/keyboard/matrix_keypad.c b/drivers/input/keyboard/matrix_keypad.c
615     index 795fa353de7c..c64d87442a62 100644
616     --- a/drivers/input/keyboard/matrix_keypad.c
617     +++ b/drivers/input/keyboard/matrix_keypad.c
618     @@ -405,7 +405,7 @@ matrix_keypad_parse_dt(struct device *dev)
619     struct matrix_keypad_platform_data *pdata;
620     struct device_node *np = dev->of_node;
621     unsigned int *gpios;
622     - int i, nrow, ncol;
623     + int ret, i, nrow, ncol;
624    
625     if (!np) {
626     dev_err(dev, "device lacks DT data\n");
627     @@ -447,12 +447,19 @@ matrix_keypad_parse_dt(struct device *dev)
628     return ERR_PTR(-ENOMEM);
629     }
630    
631     - for (i = 0; i < pdata->num_row_gpios; i++)
632     - gpios[i] = of_get_named_gpio(np, "row-gpios", i);
633     + for (i = 0; i < nrow; i++) {
634     + ret = of_get_named_gpio(np, "row-gpios", i);
635     + if (ret < 0)
636     + return ERR_PTR(ret);
637     + gpios[i] = ret;
638     + }
639    
640     - for (i = 0; i < pdata->num_col_gpios; i++)
641     - gpios[pdata->num_row_gpios + i] =
642     - of_get_named_gpio(np, "col-gpios", i);
643     + for (i = 0; i < ncol; i++) {
644     + ret = of_get_named_gpio(np, "col-gpios", i);
645     + if (ret < 0)
646     + return ERR_PTR(ret);
647     + gpios[nrow + i] = ret;
648     + }
649    
650     pdata->row_gpios = gpios;
651     pdata->col_gpios = &gpios[pdata->num_row_gpios];
652     @@ -479,10 +486,8 @@ static int matrix_keypad_probe(struct platform_device *pdev)
653     pdata = dev_get_platdata(&pdev->dev);
654     if (!pdata) {
655     pdata = matrix_keypad_parse_dt(&pdev->dev);
656     - if (IS_ERR(pdata)) {
657     - dev_err(&pdev->dev, "no platform data defined\n");
658     + if (IS_ERR(pdata))
659     return PTR_ERR(pdata);
660     - }
661     } else if (!pdata->keymap_data) {
662     dev_err(&pdev->dev, "no keymap data defined\n");
663     return -EINVAL;
664     diff --git a/drivers/input/mouse/elan_i2c_core.c b/drivers/input/mouse/elan_i2c_core.c
665     index b3119589a444..471984ec2db0 100644
666     --- a/drivers/input/mouse/elan_i2c_core.c
667     +++ b/drivers/input/mouse/elan_i2c_core.c
668     @@ -1253,6 +1253,9 @@ static const struct acpi_device_id elan_acpi_id[] = {
669     { "ELAN0618", 0 },
670     { "ELAN061C", 0 },
671     { "ELAN061D", 0 },
672     + { "ELAN061E", 0 },
673     + { "ELAN0620", 0 },
674     + { "ELAN0621", 0 },
675     { "ELAN0622", 0 },
676     { "ELAN1000", 0 },
677     { }
678     diff --git a/drivers/net/ethernet/qlogic/qed/qed_debug.c b/drivers/net/ethernet/qlogic/qed/qed_debug.c
679     index 68f19ca57f96..245eb02d0c4e 100644
680     --- a/drivers/net/ethernet/qlogic/qed/qed_debug.c
681     +++ b/drivers/net/ethernet/qlogic/qed/qed_debug.c
682     @@ -3039,10 +3039,10 @@ static u32 qed_grc_dump_big_ram(struct qed_hwfn *p_hwfn,
683     s_big_ram_defs[big_ram_id].num_of_blocks[dev_data->chip_id];
684     ram_size = total_blocks * BIG_RAM_BLOCK_SIZE_DWORDS;
685    
686     - strncpy(type_name, s_big_ram_defs[big_ram_id].instance_name,
687     - strlen(s_big_ram_defs[big_ram_id].instance_name));
688     - strncpy(mem_name, s_big_ram_defs[big_ram_id].instance_name,
689     - strlen(s_big_ram_defs[big_ram_id].instance_name));
690     + strscpy(type_name, s_big_ram_defs[big_ram_id].instance_name,
691     + sizeof(type_name));
692     + strscpy(mem_name, s_big_ram_defs[big_ram_id].instance_name,
693     + sizeof(mem_name));
694    
695     /* Dump memory header */
696     offset += qed_grc_dump_mem_hdr(p_hwfn,
697     diff --git a/drivers/net/wireless/ath/wil6210/wmi.c b/drivers/net/wireless/ath/wil6210/wmi.c
698     index 94a356bbb6b9..61419d1b4543 100644
699     --- a/drivers/net/wireless/ath/wil6210/wmi.c
700     +++ b/drivers/net/wireless/ath/wil6210/wmi.c
701     @@ -1302,8 +1302,14 @@ int wmi_set_ie(struct wil6210_priv *wil, u8 type, u16 ie_len, const void *ie)
702     };
703     int rc;
704     u16 len = sizeof(struct wmi_set_appie_cmd) + ie_len;
705     - struct wmi_set_appie_cmd *cmd = kzalloc(len, GFP_KERNEL);
706     + struct wmi_set_appie_cmd *cmd;
707    
708     + if (len < ie_len) {
709     + rc = -EINVAL;
710     + goto out;
711     + }
712     +
713     + cmd = kzalloc(len, GFP_KERNEL);
714     if (!cmd) {
715     rc = -ENOMEM;
716     goto out;
717     diff --git a/drivers/reset/core.c b/drivers/reset/core.c
718     index b8ae1dbd4c17..188205a55261 100644
719     --- a/drivers/reset/core.c
720     +++ b/drivers/reset/core.c
721     @@ -135,11 +135,16 @@ EXPORT_SYMBOL_GPL(devm_reset_controller_register);
722     * @rstc: reset controller
723     *
724     * Calling this on a shared reset controller is an error.
725     + *
726     + * If rstc is NULL it is an optional reset and the function will just
727     + * return 0.
728     */
729     int reset_control_reset(struct reset_control *rstc)
730     {
731     - if (WARN_ON(IS_ERR_OR_NULL(rstc)) ||
732     - WARN_ON(rstc->shared))
733     + if (!rstc)
734     + return 0;
735     +
736     + if (WARN_ON(IS_ERR(rstc)))
737     return -EINVAL;
738    
739     if (rstc->rcdev->ops->reset)
740     @@ -159,10 +164,16 @@ EXPORT_SYMBOL_GPL(reset_control_reset);
741     *
742     * For shared reset controls a driver cannot expect the hw's registers and
743     * internal state to be reset, but must be prepared for this to happen.
744     + *
745     + * If rstc is NULL it is an optional reset and the function will just
746     + * return 0.
747     */
748     int reset_control_assert(struct reset_control *rstc)
749     {
750     - if (WARN_ON(IS_ERR_OR_NULL(rstc)))
751     + if (!rstc)
752     + return 0;
753     +
754     + if (WARN_ON(IS_ERR(rstc)))
755     return -EINVAL;
756    
757     if (!rstc->rcdev->ops->assert)
758     @@ -185,10 +196,16 @@ EXPORT_SYMBOL_GPL(reset_control_assert);
759     * @rstc: reset controller
760     *
761     * After calling this function, the reset is guaranteed to be deasserted.
762     + *
763     + * If rstc is NULL it is an optional reset and the function will just
764     + * return 0.
765     */
766     int reset_control_deassert(struct reset_control *rstc)
767     {
768     - if (WARN_ON(IS_ERR_OR_NULL(rstc)))
769     + if (!rstc)
770     + return 0;
771     +
772     + if (WARN_ON(IS_ERR(rstc)))
773     return -EINVAL;
774    
775     if (!rstc->rcdev->ops->deassert)
776     @@ -206,12 +223,15 @@ EXPORT_SYMBOL_GPL(reset_control_deassert);
777     /**
778     * reset_control_status - returns a negative errno if not supported, a
779     * positive value if the reset line is asserted, or zero if the reset
780     - * line is not asserted.
781     + * line is not asserted or if the desc is NULL (optional reset).
782     * @rstc: reset controller
783     */
784     int reset_control_status(struct reset_control *rstc)
785     {
786     - if (WARN_ON(IS_ERR_OR_NULL(rstc)))
787     + if (!rstc)
788     + return 0;
789     +
790     + if (WARN_ON(IS_ERR(rstc)))
791     return -EINVAL;
792    
793     if (rstc->rcdev->ops->status)
794     @@ -221,7 +241,7 @@ int reset_control_status(struct reset_control *rstc)
795     }
796     EXPORT_SYMBOL_GPL(reset_control_status);
797    
798     -static struct reset_control *__reset_control_get(
799     +static struct reset_control *__reset_control_get_internal(
800     struct reset_controller_dev *rcdev,
801     unsigned int index, int shared)
802     {
803     @@ -254,7 +274,7 @@ static struct reset_control *__reset_control_get(
804     return rstc;
805     }
806    
807     -static void __reset_control_put(struct reset_control *rstc)
808     +static void __reset_control_put_internal(struct reset_control *rstc)
809     {
810     lockdep_assert_held(&reset_list_mutex);
811    
812     @@ -268,7 +288,8 @@ static void __reset_control_put(struct reset_control *rstc)
813     }
814    
815     struct reset_control *__of_reset_control_get(struct device_node *node,
816     - const char *id, int index, int shared)
817     + const char *id, int index, bool shared,
818     + bool optional)
819     {
820     struct reset_control *rstc;
821     struct reset_controller_dev *r, *rcdev;
822     @@ -282,14 +303,18 @@ struct reset_control *__of_reset_control_get(struct device_node *node,
823     if (id) {
824     index = of_property_match_string(node,
825     "reset-names", id);
826     + if (index == -EILSEQ)
827     + return ERR_PTR(index);
828     if (index < 0)
829     - return ERR_PTR(-ENOENT);
830     + return optional ? NULL : ERR_PTR(-ENOENT);
831     }
832    
833     ret = of_parse_phandle_with_args(node, "resets", "#reset-cells",
834     index, &args);
835     - if (ret)
836     + if (ret == -EINVAL)
837     return ERR_PTR(ret);
838     + if (ret)
839     + return optional ? NULL : ERR_PTR(ret);
840    
841     mutex_lock(&reset_list_mutex);
842     rcdev = NULL;
843     @@ -318,7 +343,7 @@ struct reset_control *__of_reset_control_get(struct device_node *node,
844     }
845    
846     /* reset_list_mutex also protects the rcdev's reset_control list */
847     - rstc = __reset_control_get(rcdev, rstc_id, shared);
848     + rstc = __reset_control_get_internal(rcdev, rstc_id, shared);
849    
850     mutex_unlock(&reset_list_mutex);
851    
852     @@ -326,6 +351,17 @@ struct reset_control *__of_reset_control_get(struct device_node *node,
853     }
854     EXPORT_SYMBOL_GPL(__of_reset_control_get);
855    
856     +struct reset_control *__reset_control_get(struct device *dev, const char *id,
857     + int index, bool shared, bool optional)
858     +{
859     + if (dev->of_node)
860     + return __of_reset_control_get(dev->of_node, id, index, shared,
861     + optional);
862     +
863     + return optional ? NULL : ERR_PTR(-EINVAL);
864     +}
865     +EXPORT_SYMBOL_GPL(__reset_control_get);
866     +
867     /**
868     * reset_control_put - free the reset controller
869     * @rstc: reset controller
870     @@ -333,11 +369,11 @@ EXPORT_SYMBOL_GPL(__of_reset_control_get);
871    
872     void reset_control_put(struct reset_control *rstc)
873     {
874     - if (IS_ERR(rstc))
875     + if (IS_ERR_OR_NULL(rstc))
876     return;
877    
878     mutex_lock(&reset_list_mutex);
879     - __reset_control_put(rstc);
880     + __reset_control_put_internal(rstc);
881     mutex_unlock(&reset_list_mutex);
882     }
883     EXPORT_SYMBOL_GPL(reset_control_put);
884     @@ -348,7 +384,8 @@ static void devm_reset_control_release(struct device *dev, void *res)
885     }
886    
887     struct reset_control *__devm_reset_control_get(struct device *dev,
888     - const char *id, int index, int shared)
889     + const char *id, int index, bool shared,
890     + bool optional)
891     {
892     struct reset_control **ptr, *rstc;
893    
894     @@ -357,8 +394,7 @@ struct reset_control *__devm_reset_control_get(struct device *dev,
895     if (!ptr)
896     return ERR_PTR(-ENOMEM);
897    
898     - rstc = __of_reset_control_get(dev ? dev->of_node : NULL,
899     - id, index, shared);
900     + rstc = __reset_control_get(dev, id, index, shared, optional);
901     if (!IS_ERR(rstc)) {
902     *ptr = rstc;
903     devres_add(dev, ptr);
904     @@ -374,17 +410,18 @@ EXPORT_SYMBOL_GPL(__devm_reset_control_get);
905     * device_reset - find reset controller associated with the device
906     * and perform reset
907     * @dev: device to be reset by the controller
908     + * @optional: whether it is optional to reset the device
909     *
910     - * Convenience wrapper for reset_control_get() and reset_control_reset().
911     + * Convenience wrapper for __reset_control_get() and reset_control_reset().
912     * This is useful for the common case of devices with single, dedicated reset
913     * lines.
914     */
915     -int device_reset(struct device *dev)
916     +int __device_reset(struct device *dev, bool optional)
917     {
918     struct reset_control *rstc;
919     int ret;
920    
921     - rstc = reset_control_get(dev, NULL);
922     + rstc = __reset_control_get(dev, NULL, 0, 0, optional);
923     if (IS_ERR(rstc))
924     return PTR_ERR(rstc);
925    
926     @@ -394,4 +431,4 @@ int device_reset(struct device *dev)
927    
928     return ret;
929     }
930     -EXPORT_SYMBOL_GPL(device_reset);
931     +EXPORT_SYMBOL_GPL(__device_reset);
932     diff --git a/drivers/scsi/bfa/bfa_fcbuild.c b/drivers/scsi/bfa/bfa_fcbuild.c
933     index b8dadc9cc993..d3b00a475aeb 100644
934     --- a/drivers/scsi/bfa/bfa_fcbuild.c
935     +++ b/drivers/scsi/bfa/bfa_fcbuild.c
936     @@ -1250,8 +1250,8 @@ fc_rspnid_build(struct fchs_s *fchs, void *pyld, u32 s_id, u16 ox_id,
937     memset(rspnid, 0, sizeof(struct fcgs_rspnid_req_s));
938    
939     rspnid->dap = s_id;
940     - rspnid->spn_len = (u8) strlen((char *)name);
941     - strncpy((char *)rspnid->spn, (char *)name, rspnid->spn_len);
942     + strlcpy(rspnid->spn, name, sizeof(rspnid->spn));
943     + rspnid->spn_len = (u8) strlen(rspnid->spn);
944    
945     return sizeof(struct fcgs_rspnid_req_s) + sizeof(struct ct_hdr_s);
946     }
947     @@ -1271,8 +1271,8 @@ fc_rsnn_nn_build(struct fchs_s *fchs, void *pyld, u32 s_id,
948     memset(rsnn_nn, 0, sizeof(struct fcgs_rsnn_nn_req_s));
949    
950     rsnn_nn->node_name = node_name;
951     - rsnn_nn->snn_len = (u8) strlen((char *)name);
952     - strncpy((char *)rsnn_nn->snn, (char *)name, rsnn_nn->snn_len);
953     + strlcpy(rsnn_nn->snn, name, sizeof(rsnn_nn->snn));
954     + rsnn_nn->snn_len = (u8) strlen(rsnn_nn->snn);
955    
956     return sizeof(struct fcgs_rsnn_nn_req_s) + sizeof(struct ct_hdr_s);
957     }
958     diff --git a/drivers/scsi/bfa/bfa_fcs.c b/drivers/scsi/bfa/bfa_fcs.c
959     index 1e7e139d71ea..f602de047087 100644
960     --- a/drivers/scsi/bfa/bfa_fcs.c
961     +++ b/drivers/scsi/bfa/bfa_fcs.c
962     @@ -832,23 +832,23 @@ bfa_fcs_fabric_psymb_init(struct bfa_fcs_fabric_s *fabric)
963     bfa_ioc_get_adapter_model(&fabric->fcs->bfa->ioc, model);
964    
965     /* Model name/number */
966     - strncpy((char *)&port_cfg->sym_name, model,
967     - BFA_FCS_PORT_SYMBNAME_MODEL_SZ);
968     - strncat((char *)&port_cfg->sym_name, BFA_FCS_PORT_SYMBNAME_SEPARATOR,
969     - sizeof(BFA_FCS_PORT_SYMBNAME_SEPARATOR));
970     + strlcpy(port_cfg->sym_name.symname, model,
971     + BFA_SYMNAME_MAXLEN);
972     + strlcat(port_cfg->sym_name.symname, BFA_FCS_PORT_SYMBNAME_SEPARATOR,
973     + BFA_SYMNAME_MAXLEN);
974    
975     /* Driver Version */
976     - strncat((char *)&port_cfg->sym_name, (char *)driver_info->version,
977     - BFA_FCS_PORT_SYMBNAME_VERSION_SZ);
978     - strncat((char *)&port_cfg->sym_name, BFA_FCS_PORT_SYMBNAME_SEPARATOR,
979     - sizeof(BFA_FCS_PORT_SYMBNAME_SEPARATOR));
980     + strlcat(port_cfg->sym_name.symname, driver_info->version,
981     + BFA_SYMNAME_MAXLEN);
982     + strlcat(port_cfg->sym_name.symname, BFA_FCS_PORT_SYMBNAME_SEPARATOR,
983     + BFA_SYMNAME_MAXLEN);
984    
985     /* Host machine name */
986     - strncat((char *)&port_cfg->sym_name,
987     - (char *)driver_info->host_machine_name,
988     - BFA_FCS_PORT_SYMBNAME_MACHINENAME_SZ);
989     - strncat((char *)&port_cfg->sym_name, BFA_FCS_PORT_SYMBNAME_SEPARATOR,
990     - sizeof(BFA_FCS_PORT_SYMBNAME_SEPARATOR));
991     + strlcat(port_cfg->sym_name.symname,
992     + driver_info->host_machine_name,
993     + BFA_SYMNAME_MAXLEN);
994     + strlcat(port_cfg->sym_name.symname, BFA_FCS_PORT_SYMBNAME_SEPARATOR,
995     + BFA_SYMNAME_MAXLEN);
996    
997     /*
998     * Host OS Info :
999     @@ -856,24 +856,24 @@ bfa_fcs_fabric_psymb_init(struct bfa_fcs_fabric_s *fabric)
1000     * OS name string and instead copy the entire OS info string (64 bytes).
1001     */
1002     if (driver_info->host_os_patch[0] == '\0') {
1003     - strncat((char *)&port_cfg->sym_name,
1004     - (char *)driver_info->host_os_name,
1005     - BFA_FCS_OS_STR_LEN);
1006     - strncat((char *)&port_cfg->sym_name,
1007     + strlcat(port_cfg->sym_name.symname,
1008     + driver_info->host_os_name,
1009     + BFA_SYMNAME_MAXLEN);
1010     + strlcat(port_cfg->sym_name.symname,
1011     BFA_FCS_PORT_SYMBNAME_SEPARATOR,
1012     - sizeof(BFA_FCS_PORT_SYMBNAME_SEPARATOR));
1013     + BFA_SYMNAME_MAXLEN);
1014     } else {
1015     - strncat((char *)&port_cfg->sym_name,
1016     - (char *)driver_info->host_os_name,
1017     - BFA_FCS_PORT_SYMBNAME_OSINFO_SZ);
1018     - strncat((char *)&port_cfg->sym_name,
1019     + strlcat(port_cfg->sym_name.symname,
1020     + driver_info->host_os_name,
1021     + BFA_SYMNAME_MAXLEN);
1022     + strlcat(port_cfg->sym_name.symname,
1023     BFA_FCS_PORT_SYMBNAME_SEPARATOR,
1024     - sizeof(BFA_FCS_PORT_SYMBNAME_SEPARATOR));
1025     + BFA_SYMNAME_MAXLEN);
1026    
1027     /* Append host OS Patch Info */
1028     - strncat((char *)&port_cfg->sym_name,
1029     - (char *)driver_info->host_os_patch,
1030     - BFA_FCS_PORT_SYMBNAME_OSPATCH_SZ);
1031     + strlcat(port_cfg->sym_name.symname,
1032     + driver_info->host_os_patch,
1033     + BFA_SYMNAME_MAXLEN);
1034     }
1035    
1036     /* null terminate */
1037     @@ -893,26 +893,26 @@ bfa_fcs_fabric_nsymb_init(struct bfa_fcs_fabric_s *fabric)
1038     bfa_ioc_get_adapter_model(&fabric->fcs->bfa->ioc, model);
1039    
1040     /* Model name/number */
1041     - strncpy((char *)&port_cfg->node_sym_name, model,
1042     - BFA_FCS_PORT_SYMBNAME_MODEL_SZ);
1043     - strncat((char *)&port_cfg->node_sym_name,
1044     + strlcpy(port_cfg->node_sym_name.symname, model,
1045     + BFA_SYMNAME_MAXLEN);
1046     + strlcat(port_cfg->node_sym_name.symname,
1047     BFA_FCS_PORT_SYMBNAME_SEPARATOR,
1048     - sizeof(BFA_FCS_PORT_SYMBNAME_SEPARATOR));
1049     + BFA_SYMNAME_MAXLEN);
1050    
1051     /* Driver Version */
1052     - strncat((char *)&port_cfg->node_sym_name, (char *)driver_info->version,
1053     - BFA_FCS_PORT_SYMBNAME_VERSION_SZ);
1054     - strncat((char *)&port_cfg->node_sym_name,
1055     + strlcat(port_cfg->node_sym_name.symname, (char *)driver_info->version,
1056     + BFA_SYMNAME_MAXLEN);
1057     + strlcat(port_cfg->node_sym_name.symname,
1058     BFA_FCS_PORT_SYMBNAME_SEPARATOR,
1059     - sizeof(BFA_FCS_PORT_SYMBNAME_SEPARATOR));
1060     + BFA_SYMNAME_MAXLEN);
1061    
1062     /* Host machine name */
1063     - strncat((char *)&port_cfg->node_sym_name,
1064     - (char *)driver_info->host_machine_name,
1065     - BFA_FCS_PORT_SYMBNAME_MACHINENAME_SZ);
1066     - strncat((char *)&port_cfg->node_sym_name,
1067     + strlcat(port_cfg->node_sym_name.symname,
1068     + driver_info->host_machine_name,
1069     + BFA_SYMNAME_MAXLEN);
1070     + strlcat(port_cfg->node_sym_name.symname,
1071     BFA_FCS_PORT_SYMBNAME_SEPARATOR,
1072     - sizeof(BFA_FCS_PORT_SYMBNAME_SEPARATOR));
1073     + BFA_SYMNAME_MAXLEN);
1074    
1075     /* null terminate */
1076     port_cfg->node_sym_name.symname[BFA_SYMNAME_MAXLEN - 1] = 0;
1077     diff --git a/drivers/scsi/bfa/bfa_fcs_lport.c b/drivers/scsi/bfa/bfa_fcs_lport.c
1078     index 4ddda72f60e6..eb87949b00c1 100644
1079     --- a/drivers/scsi/bfa/bfa_fcs_lport.c
1080     +++ b/drivers/scsi/bfa/bfa_fcs_lport.c
1081     @@ -2631,10 +2631,10 @@ bfa_fcs_fdmi_get_hbaattr(struct bfa_fcs_lport_fdmi_s *fdmi,
1082     bfa_ioc_get_adapter_fw_ver(&port->fcs->bfa->ioc,
1083     hba_attr->fw_version);
1084    
1085     - strncpy(hba_attr->driver_version, (char *)driver_info->version,
1086     + strlcpy(hba_attr->driver_version, (char *)driver_info->version,
1087     sizeof(hba_attr->driver_version));
1088    
1089     - strncpy(hba_attr->os_name, driver_info->host_os_name,
1090     + strlcpy(hba_attr->os_name, driver_info->host_os_name,
1091     sizeof(hba_attr->os_name));
1092    
1093     /*
1094     @@ -2642,23 +2642,23 @@ bfa_fcs_fdmi_get_hbaattr(struct bfa_fcs_lport_fdmi_s *fdmi,
1095     * to the os name along with a separator
1096     */
1097     if (driver_info->host_os_patch[0] != '\0') {
1098     - strncat(hba_attr->os_name, BFA_FCS_PORT_SYMBNAME_SEPARATOR,
1099     - sizeof(BFA_FCS_PORT_SYMBNAME_SEPARATOR));
1100     - strncat(hba_attr->os_name, driver_info->host_os_patch,
1101     - sizeof(driver_info->host_os_patch));
1102     + strlcat(hba_attr->os_name, BFA_FCS_PORT_SYMBNAME_SEPARATOR,
1103     + sizeof(hba_attr->os_name));
1104     + strlcat(hba_attr->os_name, driver_info->host_os_patch,
1105     + sizeof(hba_attr->os_name));
1106     }
1107    
1108     /* Retrieve the max frame size from the port attr */
1109     bfa_fcs_fdmi_get_portattr(fdmi, &fcs_port_attr);
1110     hba_attr->max_ct_pyld = fcs_port_attr.max_frm_size;
1111    
1112     - strncpy(hba_attr->node_sym_name.symname,
1113     + strlcpy(hba_attr->node_sym_name.symname,
1114     port->port_cfg.node_sym_name.symname, BFA_SYMNAME_MAXLEN);
1115     strcpy(hba_attr->vendor_info, "QLogic");
1116     hba_attr->num_ports =
1117     cpu_to_be32(bfa_ioc_get_nports(&port->fcs->bfa->ioc));
1118     hba_attr->fabric_name = port->fabric->lps->pr_nwwn;
1119     - strncpy(hba_attr->bios_ver, hba_attr->option_rom_ver, BFA_VERSION_LEN);
1120     + strlcpy(hba_attr->bios_ver, hba_attr->option_rom_ver, BFA_VERSION_LEN);
1121    
1122     }
1123    
1124     @@ -2725,20 +2725,20 @@ bfa_fcs_fdmi_get_portattr(struct bfa_fcs_lport_fdmi_s *fdmi,
1125     /*
1126     * OS device Name
1127     */
1128     - strncpy(port_attr->os_device_name, (char *)driver_info->os_device_name,
1129     + strlcpy(port_attr->os_device_name, driver_info->os_device_name,
1130     sizeof(port_attr->os_device_name));
1131    
1132     /*
1133     * Host name
1134     */
1135     - strncpy(port_attr->host_name, (char *)driver_info->host_machine_name,
1136     + strlcpy(port_attr->host_name, driver_info->host_machine_name,
1137     sizeof(port_attr->host_name));
1138    
1139     port_attr->node_name = bfa_fcs_lport_get_nwwn(port);
1140     port_attr->port_name = bfa_fcs_lport_get_pwwn(port);
1141    
1142     - strncpy(port_attr->port_sym_name.symname,
1143     - (char *)&bfa_fcs_lport_get_psym_name(port), BFA_SYMNAME_MAXLEN);
1144     + strlcpy(port_attr->port_sym_name.symname,
1145     + bfa_fcs_lport_get_psym_name(port).symname, BFA_SYMNAME_MAXLEN);
1146     bfa_fcs_lport_get_attr(port, &lport_attr);
1147     port_attr->port_type = cpu_to_be32(lport_attr.port_type);
1148     port_attr->scos = pport_attr.cos_supported;
1149     @@ -3218,7 +3218,7 @@ bfa_fcs_lport_ms_gmal_response(void *fcsarg, struct bfa_fcxp_s *fcxp,
1150     rsp_str[gmal_entry->len-1] = 0;
1151    
1152     /* copy IP Address to fabric */
1153     - strncpy(bfa_fcs_lport_get_fabric_ipaddr(port),
1154     + strlcpy(bfa_fcs_lport_get_fabric_ipaddr(port),
1155     gmal_entry->ip_addr,
1156     BFA_FCS_FABRIC_IPADDR_SZ);
1157     break;
1158     @@ -4656,21 +4656,13 @@ bfa_fcs_lport_ns_send_rspn_id(void *ns_cbarg, struct bfa_fcxp_s *fcxp_alloced)
1159     * to that of the base port.
1160     */
1161    
1162     - strncpy((char *)psymbl,
1163     - (char *) &
1164     - (bfa_fcs_lport_get_psym_name
1165     + strlcpy(symbl,
1166     + (char *)&(bfa_fcs_lport_get_psym_name
1167     (bfa_fcs_get_base_port(port->fcs))),
1168     - strlen((char *) &
1169     - bfa_fcs_lport_get_psym_name(bfa_fcs_get_base_port
1170     - (port->fcs))));
1171     -
1172     - /* Ensure we have a null terminating string. */
1173     - ((char *)psymbl)[strlen((char *) &
1174     - bfa_fcs_lport_get_psym_name(bfa_fcs_get_base_port
1175     - (port->fcs)))] = 0;
1176     - strncat((char *)psymbl,
1177     - (char *) &(bfa_fcs_lport_get_psym_name(port)),
1178     - strlen((char *) &bfa_fcs_lport_get_psym_name(port)));
1179     + sizeof(symbl));
1180     +
1181     + strlcat(symbl, (char *)&(bfa_fcs_lport_get_psym_name(port)),
1182     + sizeof(symbl));
1183     } else {
1184     psymbl = (u8 *) &(bfa_fcs_lport_get_psym_name(port));
1185     }
1186     @@ -5162,7 +5154,6 @@ bfa_fcs_lport_ns_util_send_rspn_id(void *cbarg, struct bfa_fcxp_s *fcxp_alloced)
1187     struct fchs_s fchs;
1188     struct bfa_fcxp_s *fcxp;
1189     u8 symbl[256];
1190     - u8 *psymbl = &symbl[0];
1191     int len;
1192    
1193     /* Avoid sending RSPN in the following states. */
1194     @@ -5192,22 +5183,17 @@ bfa_fcs_lport_ns_util_send_rspn_id(void *cbarg, struct bfa_fcxp_s *fcxp_alloced)
1195     * For Vports, we append the vport's port symbolic name
1196     * to that of the base port.
1197     */
1198     - strncpy((char *)psymbl, (char *)&(bfa_fcs_lport_get_psym_name
1199     + strlcpy(symbl, (char *)&(bfa_fcs_lport_get_psym_name
1200     (bfa_fcs_get_base_port(port->fcs))),
1201     - strlen((char *)&bfa_fcs_lport_get_psym_name(
1202     - bfa_fcs_get_base_port(port->fcs))));
1203     -
1204     - /* Ensure we have a null terminating string. */
1205     - ((char *)psymbl)[strlen((char *)&bfa_fcs_lport_get_psym_name(
1206     - bfa_fcs_get_base_port(port->fcs)))] = 0;
1207     + sizeof(symbl));
1208    
1209     - strncat((char *)psymbl,
1210     + strlcat(symbl,
1211     (char *)&(bfa_fcs_lport_get_psym_name(port)),
1212     - strlen((char *)&bfa_fcs_lport_get_psym_name(port)));
1213     + sizeof(symbl));
1214     }
1215    
1216     len = fc_rspnid_build(&fchs, bfa_fcxp_get_reqbuf(fcxp),
1217     - bfa_fcs_lport_get_fcid(port), 0, psymbl);
1218     + bfa_fcs_lport_get_fcid(port), 0, symbl);
1219    
1220     bfa_fcxp_send(fcxp, NULL, port->fabric->vf_id, port->lp_tag, BFA_FALSE,
1221     FC_CLASS_3, len, &fchs, NULL, NULL, FC_MAX_PDUSZ, 0);
1222     diff --git a/drivers/scsi/bfa/bfa_ioc.c b/drivers/scsi/bfa/bfa_ioc.c
1223     index a1ada4a31c97..16750416d3c0 100644
1224     --- a/drivers/scsi/bfa/bfa_ioc.c
1225     +++ b/drivers/scsi/bfa/bfa_ioc.c
1226     @@ -2803,7 +2803,7 @@ void
1227     bfa_ioc_get_adapter_manufacturer(struct bfa_ioc_s *ioc, char *manufacturer)
1228     {
1229     memset((void *)manufacturer, 0, BFA_ADAPTER_MFG_NAME_LEN);
1230     - strncpy(manufacturer, BFA_MFG_NAME, BFA_ADAPTER_MFG_NAME_LEN);
1231     + strlcpy(manufacturer, BFA_MFG_NAME, BFA_ADAPTER_MFG_NAME_LEN);
1232     }
1233    
1234     void
1235     diff --git a/drivers/scsi/bfa/bfa_svc.c b/drivers/scsi/bfa/bfa_svc.c
1236     index 12de292175ef..225883d2aeef 100644
1237     --- a/drivers/scsi/bfa/bfa_svc.c
1238     +++ b/drivers/scsi/bfa/bfa_svc.c
1239     @@ -366,8 +366,8 @@ bfa_plog_str(struct bfa_plog_s *plog, enum bfa_plog_mid mid,
1240     lp.eid = event;
1241     lp.log_type = BFA_PL_LOG_TYPE_STRING;
1242     lp.misc = misc;
1243     - strncpy(lp.log_entry.string_log, log_str,
1244     - BFA_PL_STRING_LOG_SZ - 1);
1245     + strlcpy(lp.log_entry.string_log, log_str,
1246     + BFA_PL_STRING_LOG_SZ);
1247     lp.log_entry.string_log[BFA_PL_STRING_LOG_SZ - 1] = '\0';
1248     bfa_plog_add(plog, &lp);
1249     }
1250     diff --git a/drivers/scsi/bfa/bfad.c b/drivers/scsi/bfa/bfad.c
1251     index e70410beb83a..389f8ef0b095 100644
1252     --- a/drivers/scsi/bfa/bfad.c
1253     +++ b/drivers/scsi/bfa/bfad.c
1254     @@ -983,20 +983,20 @@ bfad_start_ops(struct bfad_s *bfad) {
1255    
1256     /* Fill the driver_info info to fcs*/
1257     memset(&driver_info, 0, sizeof(driver_info));
1258     - strncpy(driver_info.version, BFAD_DRIVER_VERSION,
1259     - sizeof(driver_info.version) - 1);
1260     + strlcpy(driver_info.version, BFAD_DRIVER_VERSION,
1261     + sizeof(driver_info.version));
1262     if (host_name)
1263     - strncpy(driver_info.host_machine_name, host_name,
1264     - sizeof(driver_info.host_machine_name) - 1);
1265     + strlcpy(driver_info.host_machine_name, host_name,
1266     + sizeof(driver_info.host_machine_name));
1267     if (os_name)
1268     - strncpy(driver_info.host_os_name, os_name,
1269     - sizeof(driver_info.host_os_name) - 1);
1270     + strlcpy(driver_info.host_os_name, os_name,
1271     + sizeof(driver_info.host_os_name));
1272     if (os_patch)
1273     - strncpy(driver_info.host_os_patch, os_patch,
1274     - sizeof(driver_info.host_os_patch) - 1);
1275     + strlcpy(driver_info.host_os_patch, os_patch,
1276     + sizeof(driver_info.host_os_patch));
1277    
1278     - strncpy(driver_info.os_device_name, bfad->pci_name,
1279     - sizeof(driver_info.os_device_name) - 1);
1280     + strlcpy(driver_info.os_device_name, bfad->pci_name,
1281     + sizeof(driver_info.os_device_name));
1282    
1283     /* FCS driver info init */
1284     spin_lock_irqsave(&bfad->bfad_lock, flags);
1285     diff --git a/drivers/scsi/bfa/bfad_attr.c b/drivers/scsi/bfa/bfad_attr.c
1286     index 13db3b7bc873..d0a504af5b4f 100644
1287     --- a/drivers/scsi/bfa/bfad_attr.c
1288     +++ b/drivers/scsi/bfa/bfad_attr.c
1289     @@ -843,7 +843,7 @@ bfad_im_symbolic_name_show(struct device *dev, struct device_attribute *attr,
1290     char symname[BFA_SYMNAME_MAXLEN];
1291    
1292     bfa_fcs_lport_get_attr(&bfad->bfa_fcs.fabric.bport, &port_attr);
1293     - strncpy(symname, port_attr.port_cfg.sym_name.symname,
1294     + strlcpy(symname, port_attr.port_cfg.sym_name.symname,
1295     BFA_SYMNAME_MAXLEN);
1296     return snprintf(buf, PAGE_SIZE, "%s\n", symname);
1297     }
1298     diff --git a/drivers/scsi/bfa/bfad_bsg.c b/drivers/scsi/bfa/bfad_bsg.c
1299     index d1ad0208dfe7..a3bd23685824 100644
1300     --- a/drivers/scsi/bfa/bfad_bsg.c
1301     +++ b/drivers/scsi/bfa/bfad_bsg.c
1302     @@ -127,7 +127,7 @@ bfad_iocmd_ioc_get_attr(struct bfad_s *bfad, void *cmd)
1303    
1304     /* fill in driver attr info */
1305     strcpy(iocmd->ioc_attr.driver_attr.driver, BFAD_DRIVER_NAME);
1306     - strncpy(iocmd->ioc_attr.driver_attr.driver_ver,
1307     + strlcpy(iocmd->ioc_attr.driver_attr.driver_ver,
1308     BFAD_DRIVER_VERSION, BFA_VERSION_LEN);
1309     strcpy(iocmd->ioc_attr.driver_attr.fw_ver,
1310     iocmd->ioc_attr.adapter_attr.fw_ver);
1311     @@ -315,9 +315,9 @@ bfad_iocmd_port_get_attr(struct bfad_s *bfad, void *cmd)
1312     iocmd->attr.port_type = port_attr.port_type;
1313     iocmd->attr.loopback = port_attr.loopback;
1314     iocmd->attr.authfail = port_attr.authfail;
1315     - strncpy(iocmd->attr.port_symname.symname,
1316     + strlcpy(iocmd->attr.port_symname.symname,
1317     port_attr.port_cfg.sym_name.symname,
1318     - sizeof(port_attr.port_cfg.sym_name.symname));
1319     + sizeof(iocmd->attr.port_symname.symname));
1320    
1321     iocmd->status = BFA_STATUS_OK;
1322     return 0;
1323     diff --git a/drivers/scsi/csiostor/csio_lnode.c b/drivers/scsi/csiostor/csio_lnode.c
1324     index c00b2ff72b55..be5ee2d37815 100644
1325     --- a/drivers/scsi/csiostor/csio_lnode.c
1326     +++ b/drivers/scsi/csiostor/csio_lnode.c
1327     @@ -238,14 +238,23 @@ csio_osname(uint8_t *buf, size_t buf_len)
1328     }
1329    
1330     static inline void
1331     -csio_append_attrib(uint8_t **ptr, uint16_t type, uint8_t *val, uint16_t len)
1332     +csio_append_attrib(uint8_t **ptr, uint16_t type, void *val, size_t val_len)
1333     {
1334     + uint16_t len;
1335     struct fc_fdmi_attr_entry *ae = (struct fc_fdmi_attr_entry *)*ptr;
1336     +
1337     + if (WARN_ON(val_len > U16_MAX))
1338     + return;
1339     +
1340     + len = val_len;
1341     +
1342     ae->type = htons(type);
1343     len += 4; /* includes attribute type and length */
1344     len = (len + 3) & ~3; /* should be multiple of 4 bytes */
1345     ae->len = htons(len);
1346     - memcpy(ae->value, val, len);
1347     + memcpy(ae->value, val, val_len);
1348     + if (len > val_len)
1349     + memset(ae->value + val_len, 0, len - val_len);
1350     *ptr += len;
1351     }
1352    
1353     @@ -335,7 +344,7 @@ csio_ln_fdmi_rhba_cbfn(struct csio_hw *hw, struct csio_ioreq *fdmi_req)
1354     numattrs++;
1355     val = htonl(FC_PORTSPEED_1GBIT | FC_PORTSPEED_10GBIT);
1356     csio_append_attrib(&pld, FC_FDMI_PORT_ATTR_SUPPORTEDSPEED,
1357     - (uint8_t *)&val,
1358     + &val,
1359     FC_FDMI_PORT_ATTR_SUPPORTEDSPEED_LEN);
1360     numattrs++;
1361    
1362     @@ -346,23 +355,22 @@ csio_ln_fdmi_rhba_cbfn(struct csio_hw *hw, struct csio_ioreq *fdmi_req)
1363     else
1364     val = htonl(CSIO_HBA_PORTSPEED_UNKNOWN);
1365     csio_append_attrib(&pld, FC_FDMI_PORT_ATTR_CURRENTPORTSPEED,
1366     - (uint8_t *)&val,
1367     - FC_FDMI_PORT_ATTR_CURRENTPORTSPEED_LEN);
1368     + &val, FC_FDMI_PORT_ATTR_CURRENTPORTSPEED_LEN);
1369     numattrs++;
1370    
1371     mfs = ln->ln_sparm.csp.sp_bb_data;
1372     csio_append_attrib(&pld, FC_FDMI_PORT_ATTR_MAXFRAMESIZE,
1373     - (uint8_t *)&mfs, FC_FDMI_PORT_ATTR_MAXFRAMESIZE_LEN);
1374     + &mfs, sizeof(mfs));
1375     numattrs++;
1376    
1377     strcpy(buf, "csiostor");
1378     csio_append_attrib(&pld, FC_FDMI_PORT_ATTR_OSDEVICENAME, buf,
1379     - (uint16_t)strlen(buf));
1380     + strlen(buf));
1381     numattrs++;
1382    
1383     if (!csio_hostname(buf, sizeof(buf))) {
1384     csio_append_attrib(&pld, FC_FDMI_PORT_ATTR_HOSTNAME,
1385     - buf, (uint16_t)strlen(buf));
1386     + buf, strlen(buf));
1387     numattrs++;
1388     }
1389     attrib_blk->numattrs = htonl(numattrs);
1390     @@ -444,33 +452,32 @@ csio_ln_fdmi_dprt_cbfn(struct csio_hw *hw, struct csio_ioreq *fdmi_req)
1391    
1392     strcpy(buf, "Chelsio Communications");
1393     csio_append_attrib(&pld, FC_FDMI_HBA_ATTR_MANUFACTURER, buf,
1394     - (uint16_t)strlen(buf));
1395     + strlen(buf));
1396     numattrs++;
1397     csio_append_attrib(&pld, FC_FDMI_HBA_ATTR_SERIALNUMBER,
1398     - hw->vpd.sn, (uint16_t)sizeof(hw->vpd.sn));
1399     + hw->vpd.sn, sizeof(hw->vpd.sn));
1400     numattrs++;
1401     csio_append_attrib(&pld, FC_FDMI_HBA_ATTR_MODEL, hw->vpd.id,
1402     - (uint16_t)sizeof(hw->vpd.id));
1403     + sizeof(hw->vpd.id));
1404     numattrs++;
1405     csio_append_attrib(&pld, FC_FDMI_HBA_ATTR_MODELDESCRIPTION,
1406     - hw->model_desc, (uint16_t)strlen(hw->model_desc));
1407     + hw->model_desc, strlen(hw->model_desc));
1408     numattrs++;
1409     csio_append_attrib(&pld, FC_FDMI_HBA_ATTR_HARDWAREVERSION,
1410     - hw->hw_ver, (uint16_t)sizeof(hw->hw_ver));
1411     + hw->hw_ver, sizeof(hw->hw_ver));
1412     numattrs++;
1413     csio_append_attrib(&pld, FC_FDMI_HBA_ATTR_FIRMWAREVERSION,
1414     - hw->fwrev_str, (uint16_t)strlen(hw->fwrev_str));
1415     + hw->fwrev_str, strlen(hw->fwrev_str));
1416     numattrs++;
1417    
1418     if (!csio_osname(buf, sizeof(buf))) {
1419     csio_append_attrib(&pld, FC_FDMI_HBA_ATTR_OSNAMEVERSION,
1420     - buf, (uint16_t)strlen(buf));
1421     + buf, strlen(buf));
1422     numattrs++;
1423     }
1424    
1425     csio_append_attrib(&pld, FC_FDMI_HBA_ATTR_MAXCTPAYLOAD,
1426     - (uint8_t *)&maxpayload,
1427     - FC_FDMI_HBA_ATTR_MAXCTPAYLOAD_LEN);
1428     + &maxpayload, FC_FDMI_HBA_ATTR_MAXCTPAYLOAD_LEN);
1429     len = (uint32_t)(pld - (uint8_t *)cmd);
1430     numattrs++;
1431     attrib_blk->numattrs = htonl(numattrs);
1432     @@ -1794,6 +1801,8 @@ csio_ln_mgmt_submit_req(struct csio_ioreq *io_req,
1433     struct csio_mgmtm *mgmtm = csio_hw_to_mgmtm(hw);
1434     int rv;
1435    
1436     + BUG_ON(pld_len > pld->len);
1437     +
1438     io_req->io_cbfn = io_cbfn; /* Upper layer callback handler */
1439     io_req->fw_handle = (uintptr_t) (io_req);
1440     io_req->eq_idx = mgmtm->eq_idx;
1441     diff --git a/drivers/scsi/scsi_devinfo.c b/drivers/scsi/scsi_devinfo.c
1442     index 43d4b30cbf65..282ea00d0f87 100644
1443     --- a/drivers/scsi/scsi_devinfo.c
1444     +++ b/drivers/scsi/scsi_devinfo.c
1445     @@ -33,7 +33,6 @@ struct scsi_dev_info_list_table {
1446     };
1447    
1448    
1449     -static const char spaces[] = " "; /* 16 of them */
1450     static unsigned scsi_default_dev_flags;
1451     static LIST_HEAD(scsi_dev_info_list);
1452     static char scsi_dev_flags[256];
1453     @@ -298,20 +297,13 @@ static void scsi_strcpy_devinfo(char *name, char *to, size_t to_length,
1454     size_t from_length;
1455    
1456     from_length = strlen(from);
1457     - strncpy(to, from, min(to_length, from_length));
1458     - if (from_length < to_length) {
1459     - if (compatible) {
1460     - /*
1461     - * NUL terminate the string if it is short.
1462     - */
1463     - to[from_length] = '\0';
1464     - } else {
1465     - /*
1466     - * space pad the string if it is short.
1467     - */
1468     - strncpy(&to[from_length], spaces,
1469     - to_length - from_length);
1470     - }
1471     + /* this zero-pads the destination */
1472     + strncpy(to, from, to_length);
1473     + if (from_length < to_length && !compatible) {
1474     + /*
1475     + * space pad the string if it is short.
1476     + */
1477     + memset(&to[from_length], ' ', to_length - from_length);
1478     }
1479     if (from_length > to_length)
1480     printk(KERN_WARNING "%s: %s string '%s' is too long\n",
1481     diff --git a/drivers/staging/rts5208/sd.c b/drivers/staging/rts5208/sd.c
1482     index 9e63bdf2afe7..4e233f3e7215 100644
1483     --- a/drivers/staging/rts5208/sd.c
1484     +++ b/drivers/staging/rts5208/sd.c
1485     @@ -4110,12 +4110,6 @@ RTY_SEND_CMD:
1486     rtsx_trace(chip);
1487     return STATUS_FAIL;
1488     }
1489     -
1490     - } else if (rsp_type == SD_RSP_TYPE_R0) {
1491     - if ((ptr[3] & 0x1E) != 0x03) {
1492     - rtsx_trace(chip);
1493     - return STATUS_FAIL;
1494     - }
1495     }
1496     }
1497     }
1498     diff --git a/drivers/tty/serial/kgdboc.c b/drivers/tty/serial/kgdboc.c
1499     index 2db68dfe497d..c448225ef5ca 100644
1500     --- a/drivers/tty/serial/kgdboc.c
1501     +++ b/drivers/tty/serial/kgdboc.c
1502     @@ -131,24 +131,6 @@ static void kgdboc_unregister_kbd(void)
1503     #define kgdboc_restore_input()
1504     #endif /* ! CONFIG_KDB_KEYBOARD */
1505    
1506     -static int kgdboc_option_setup(char *opt)
1507     -{
1508     - if (!opt) {
1509     - pr_err("kgdboc: config string not provided\n");
1510     - return -EINVAL;
1511     - }
1512     -
1513     - if (strlen(opt) >= MAX_CONFIG_LEN) {
1514     - printk(KERN_ERR "kgdboc: config string too long\n");
1515     - return -ENOSPC;
1516     - }
1517     - strcpy(config, opt);
1518     -
1519     - return 0;
1520     -}
1521     -
1522     -__setup("kgdboc=", kgdboc_option_setup);
1523     -
1524     static void cleanup_kgdboc(void)
1525     {
1526     if (kgdb_unregister_nmi_console())
1527     @@ -162,15 +144,13 @@ static int configure_kgdboc(void)
1528     {
1529     struct tty_driver *p;
1530     int tty_line = 0;
1531     - int err;
1532     + int err = -ENODEV;
1533     char *cptr = config;
1534     struct console *cons;
1535    
1536     - err = kgdboc_option_setup(config);
1537     - if (err || !strlen(config) || isspace(config[0]))
1538     + if (!strlen(config) || isspace(config[0]))
1539     goto noconfig;
1540    
1541     - err = -ENODEV;
1542     kgdboc_io_ops.is_console = 0;
1543     kgdb_tty_driver = NULL;
1544    
1545     @@ -318,6 +298,25 @@ static struct kgdb_io kgdboc_io_ops = {
1546     };
1547    
1548     #ifdef CONFIG_KGDB_SERIAL_CONSOLE
1549     +static int kgdboc_option_setup(char *opt)
1550     +{
1551     + if (!opt) {
1552     + pr_err("config string not provided\n");
1553     + return -EINVAL;
1554     + }
1555     +
1556     + if (strlen(opt) >= MAX_CONFIG_LEN) {
1557     + pr_err("config string too long\n");
1558     + return -ENOSPC;
1559     + }
1560     + strcpy(config, opt);
1561     +
1562     + return 0;
1563     +}
1564     +
1565     +__setup("kgdboc=", kgdboc_option_setup);
1566     +
1567     +
1568     /* This is only available if kgdboc is a built in for early debugging */
1569     static int __init kgdboc_early_init(char *opt)
1570     {
1571     diff --git a/drivers/usb/gadget/udc/dummy_hcd.c b/drivers/usb/gadget/udc/dummy_hcd.c
1572     index ff4d6cac7ac0..ab89fa3b4118 100644
1573     --- a/drivers/usb/gadget/udc/dummy_hcd.c
1574     +++ b/drivers/usb/gadget/udc/dummy_hcd.c
1575     @@ -379,11 +379,10 @@ static void set_link_state_by_speed(struct dummy_hcd *dum_hcd)
1576     USB_PORT_STAT_CONNECTION) == 0)
1577     dum_hcd->port_status |=
1578     (USB_PORT_STAT_C_CONNECTION << 16);
1579     - if ((dum_hcd->port_status &
1580     - USB_PORT_STAT_ENABLE) == 1 &&
1581     - (dum_hcd->port_status &
1582     - USB_SS_PORT_LS_U0) == 1 &&
1583     - dum_hcd->rh_state != DUMMY_RH_SUSPENDED)
1584     + if ((dum_hcd->port_status & USB_PORT_STAT_ENABLE) &&
1585     + (dum_hcd->port_status &
1586     + USB_PORT_STAT_LINK_STATE) == USB_SS_PORT_LS_U0 &&
1587     + dum_hcd->rh_state != DUMMY_RH_SUSPENDED)
1588     dum_hcd->active = 1;
1589     }
1590     } else {
1591     diff --git a/fs/btrfs/Makefile b/fs/btrfs/Makefile
1592     index 128ce17a80b0..076ccfb44c28 100644
1593     --- a/fs/btrfs/Makefile
1594     +++ b/fs/btrfs/Makefile
1595     @@ -9,7 +9,7 @@ btrfs-y += super.o ctree.o extent-tree.o print-tree.o root-tree.o dir-item.o \
1596     export.o tree-log.o free-space-cache.o zlib.o lzo.o \
1597     compression.o delayed-ref.o relocation.o delayed-inode.o scrub.o \
1598     reada.o backref.o ulist.o qgroup.o send.o dev-replace.o raid56.o \
1599     - uuid-tree.o props.o hash.o free-space-tree.o
1600     + uuid-tree.o props.o hash.o free-space-tree.o tree-checker.o
1601    
1602     btrfs-$(CONFIG_BTRFS_FS_POSIX_ACL) += acl.o
1603     btrfs-$(CONFIG_BTRFS_FS_CHECK_INTEGRITY) += check-integrity.o
1604     diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h
1605     index 86245b884fce..a423c36bcd72 100644
1606     --- a/fs/btrfs/ctree.h
1607     +++ b/fs/btrfs/ctree.h
1608     @@ -1415,7 +1415,7 @@ do { \
1609     #define BTRFS_INODE_ROOT_ITEM_INIT (1 << 31)
1610    
1611     struct btrfs_map_token {
1612     - struct extent_buffer *eb;
1613     + const struct extent_buffer *eb;
1614     char *kaddr;
1615     unsigned long offset;
1616     };
1617     @@ -1449,18 +1449,19 @@ static inline void btrfs_init_map_token (struct btrfs_map_token *token)
1618     sizeof(((type *)0)->member)))
1619    
1620     #define DECLARE_BTRFS_SETGET_BITS(bits) \
1621     -u##bits btrfs_get_token_##bits(struct extent_buffer *eb, void *ptr, \
1622     - unsigned long off, \
1623     - struct btrfs_map_token *token); \
1624     -void btrfs_set_token_##bits(struct extent_buffer *eb, void *ptr, \
1625     +u##bits btrfs_get_token_##bits(const struct extent_buffer *eb, \
1626     + const void *ptr, unsigned long off, \
1627     + struct btrfs_map_token *token); \
1628     +void btrfs_set_token_##bits(struct extent_buffer *eb, const void *ptr, \
1629     unsigned long off, u##bits val, \
1630     struct btrfs_map_token *token); \
1631     -static inline u##bits btrfs_get_##bits(struct extent_buffer *eb, void *ptr, \
1632     +static inline u##bits btrfs_get_##bits(const struct extent_buffer *eb, \
1633     + const void *ptr, \
1634     unsigned long off) \
1635     { \
1636     return btrfs_get_token_##bits(eb, ptr, off, NULL); \
1637     } \
1638     -static inline void btrfs_set_##bits(struct extent_buffer *eb, void *ptr, \
1639     +static inline void btrfs_set_##bits(struct extent_buffer *eb, void *ptr,\
1640     unsigned long off, u##bits val) \
1641     { \
1642     btrfs_set_token_##bits(eb, ptr, off, val, NULL); \
1643     @@ -1472,7 +1473,8 @@ DECLARE_BTRFS_SETGET_BITS(32)
1644     DECLARE_BTRFS_SETGET_BITS(64)
1645    
1646     #define BTRFS_SETGET_FUNCS(name, type, member, bits) \
1647     -static inline u##bits btrfs_##name(struct extent_buffer *eb, type *s) \
1648     +static inline u##bits btrfs_##name(const struct extent_buffer *eb, \
1649     + const type *s) \
1650     { \
1651     BUILD_BUG_ON(sizeof(u##bits) != sizeof(((type *)0))->member); \
1652     return btrfs_get_##bits(eb, s, offsetof(type, member)); \
1653     @@ -1483,7 +1485,8 @@ static inline void btrfs_set_##name(struct extent_buffer *eb, type *s, \
1654     BUILD_BUG_ON(sizeof(u##bits) != sizeof(((type *)0))->member); \
1655     btrfs_set_##bits(eb, s, offsetof(type, member), val); \
1656     } \
1657     -static inline u##bits btrfs_token_##name(struct extent_buffer *eb, type *s, \
1658     +static inline u##bits btrfs_token_##name(const struct extent_buffer *eb,\
1659     + const type *s, \
1660     struct btrfs_map_token *token) \
1661     { \
1662     BUILD_BUG_ON(sizeof(u##bits) != sizeof(((type *)0))->member); \
1663     @@ -1498,9 +1501,9 @@ static inline void btrfs_set_token_##name(struct extent_buffer *eb, \
1664     }
1665    
1666     #define BTRFS_SETGET_HEADER_FUNCS(name, type, member, bits) \
1667     -static inline u##bits btrfs_##name(struct extent_buffer *eb) \
1668     +static inline u##bits btrfs_##name(const struct extent_buffer *eb) \
1669     { \
1670     - type *p = page_address(eb->pages[0]); \
1671     + const type *p = page_address(eb->pages[0]); \
1672     u##bits res = le##bits##_to_cpu(p->member); \
1673     return res; \
1674     } \
1675     @@ -1512,7 +1515,7 @@ static inline void btrfs_set_##name(struct extent_buffer *eb, \
1676     }
1677    
1678     #define BTRFS_SETGET_STACK_FUNCS(name, type, member, bits) \
1679     -static inline u##bits btrfs_##name(type *s) \
1680     +static inline u##bits btrfs_##name(const type *s) \
1681     { \
1682     return le##bits##_to_cpu(s->member); \
1683     } \
1684     @@ -1818,7 +1821,7 @@ static inline unsigned long btrfs_node_key_ptr_offset(int nr)
1685     sizeof(struct btrfs_key_ptr) * nr;
1686     }
1687    
1688     -void btrfs_node_key(struct extent_buffer *eb,
1689     +void btrfs_node_key(const struct extent_buffer *eb,
1690     struct btrfs_disk_key *disk_key, int nr);
1691    
1692     static inline void btrfs_set_node_key(struct extent_buffer *eb,
1693     @@ -1847,28 +1850,28 @@ static inline struct btrfs_item *btrfs_item_nr(int nr)
1694     return (struct btrfs_item *)btrfs_item_nr_offset(nr);
1695     }
1696    
1697     -static inline u32 btrfs_item_end(struct extent_buffer *eb,
1698     +static inline u32 btrfs_item_end(const struct extent_buffer *eb,
1699     struct btrfs_item *item)
1700     {
1701     return btrfs_item_offset(eb, item) + btrfs_item_size(eb, item);
1702     }
1703    
1704     -static inline u32 btrfs_item_end_nr(struct extent_buffer *eb, int nr)
1705     +static inline u32 btrfs_item_end_nr(const struct extent_buffer *eb, int nr)
1706     {
1707     return btrfs_item_end(eb, btrfs_item_nr(nr));
1708     }
1709    
1710     -static inline u32 btrfs_item_offset_nr(struct extent_buffer *eb, int nr)
1711     +static inline u32 btrfs_item_offset_nr(const struct extent_buffer *eb, int nr)
1712     {
1713     return btrfs_item_offset(eb, btrfs_item_nr(nr));
1714     }
1715    
1716     -static inline u32 btrfs_item_size_nr(struct extent_buffer *eb, int nr)
1717     +static inline u32 btrfs_item_size_nr(const struct extent_buffer *eb, int nr)
1718     {
1719     return btrfs_item_size(eb, btrfs_item_nr(nr));
1720     }
1721    
1722     -static inline void btrfs_item_key(struct extent_buffer *eb,
1723     +static inline void btrfs_item_key(const struct extent_buffer *eb,
1724     struct btrfs_disk_key *disk_key, int nr)
1725     {
1726     struct btrfs_item *item = btrfs_item_nr(nr);
1727     @@ -1904,8 +1907,8 @@ BTRFS_SETGET_STACK_FUNCS(stack_dir_name_len, struct btrfs_dir_item,
1728     BTRFS_SETGET_STACK_FUNCS(stack_dir_transid, struct btrfs_dir_item,
1729     transid, 64);
1730    
1731     -static inline void btrfs_dir_item_key(struct extent_buffer *eb,
1732     - struct btrfs_dir_item *item,
1733     +static inline void btrfs_dir_item_key(const struct extent_buffer *eb,
1734     + const struct btrfs_dir_item *item,
1735     struct btrfs_disk_key *key)
1736     {
1737     read_eb_member(eb, item, struct btrfs_dir_item, location, key);
1738     @@ -1913,7 +1916,7 @@ static inline void btrfs_dir_item_key(struct extent_buffer *eb,
1739    
1740     static inline void btrfs_set_dir_item_key(struct extent_buffer *eb,
1741     struct btrfs_dir_item *item,
1742     - struct btrfs_disk_key *key)
1743     + const struct btrfs_disk_key *key)
1744     {
1745     write_eb_member(eb, item, struct btrfs_dir_item, location, key);
1746     }
1747     @@ -1925,8 +1928,8 @@ BTRFS_SETGET_FUNCS(free_space_bitmaps, struct btrfs_free_space_header,
1748     BTRFS_SETGET_FUNCS(free_space_generation, struct btrfs_free_space_header,
1749     generation, 64);
1750    
1751     -static inline void btrfs_free_space_key(struct extent_buffer *eb,
1752     - struct btrfs_free_space_header *h,
1753     +static inline void btrfs_free_space_key(const struct extent_buffer *eb,
1754     + const struct btrfs_free_space_header *h,
1755     struct btrfs_disk_key *key)
1756     {
1757     read_eb_member(eb, h, struct btrfs_free_space_header, location, key);
1758     @@ -1934,7 +1937,7 @@ static inline void btrfs_free_space_key(struct extent_buffer *eb,
1759    
1760     static inline void btrfs_set_free_space_key(struct extent_buffer *eb,
1761     struct btrfs_free_space_header *h,
1762     - struct btrfs_disk_key *key)
1763     + const struct btrfs_disk_key *key)
1764     {
1765     write_eb_member(eb, h, struct btrfs_free_space_header, location, key);
1766     }
1767     @@ -1961,25 +1964,25 @@ static inline void btrfs_cpu_key_to_disk(struct btrfs_disk_key *disk,
1768     disk->objectid = cpu_to_le64(cpu->objectid);
1769     }
1770    
1771     -static inline void btrfs_node_key_to_cpu(struct extent_buffer *eb,
1772     - struct btrfs_key *key, int nr)
1773     +static inline void btrfs_node_key_to_cpu(const struct extent_buffer *eb,
1774     + struct btrfs_key *key, int nr)
1775     {
1776     struct btrfs_disk_key disk_key;
1777     btrfs_node_key(eb, &disk_key, nr);
1778     btrfs_disk_key_to_cpu(key, &disk_key);
1779     }
1780    
1781     -static inline void btrfs_item_key_to_cpu(struct extent_buffer *eb,
1782     - struct btrfs_key *key, int nr)
1783     +static inline void btrfs_item_key_to_cpu(const struct extent_buffer *eb,
1784     + struct btrfs_key *key, int nr)
1785     {
1786     struct btrfs_disk_key disk_key;
1787     btrfs_item_key(eb, &disk_key, nr);
1788     btrfs_disk_key_to_cpu(key, &disk_key);
1789     }
1790    
1791     -static inline void btrfs_dir_item_key_to_cpu(struct extent_buffer *eb,
1792     - struct btrfs_dir_item *item,
1793     - struct btrfs_key *key)
1794     +static inline void btrfs_dir_item_key_to_cpu(const struct extent_buffer *eb,
1795     + const struct btrfs_dir_item *item,
1796     + struct btrfs_key *key)
1797     {
1798     struct btrfs_disk_key disk_key;
1799     btrfs_dir_item_key(eb, item, &disk_key);
1800     @@ -2012,7 +2015,7 @@ BTRFS_SETGET_STACK_FUNCS(stack_header_nritems, struct btrfs_header,
1801     nritems, 32);
1802     BTRFS_SETGET_STACK_FUNCS(stack_header_bytenr, struct btrfs_header, bytenr, 64);
1803    
1804     -static inline int btrfs_header_flag(struct extent_buffer *eb, u64 flag)
1805     +static inline int btrfs_header_flag(const struct extent_buffer *eb, u64 flag)
1806     {
1807     return (btrfs_header_flags(eb) & flag) == flag;
1808     }
1809     @@ -2031,7 +2034,7 @@ static inline int btrfs_clear_header_flag(struct extent_buffer *eb, u64 flag)
1810     return (flags & flag) == flag;
1811     }
1812    
1813     -static inline int btrfs_header_backref_rev(struct extent_buffer *eb)
1814     +static inline int btrfs_header_backref_rev(const struct extent_buffer *eb)
1815     {
1816     u64 flags = btrfs_header_flags(eb);
1817     return flags >> BTRFS_BACKREF_REV_SHIFT;
1818     @@ -2051,12 +2054,12 @@ static inline unsigned long btrfs_header_fsid(void)
1819     return offsetof(struct btrfs_header, fsid);
1820     }
1821    
1822     -static inline unsigned long btrfs_header_chunk_tree_uuid(struct extent_buffer *eb)
1823     +static inline unsigned long btrfs_header_chunk_tree_uuid(const struct extent_buffer *eb)
1824     {
1825     return offsetof(struct btrfs_header, chunk_tree_uuid);
1826     }
1827    
1828     -static inline int btrfs_is_leaf(struct extent_buffer *eb)
1829     +static inline int btrfs_is_leaf(const struct extent_buffer *eb)
1830     {
1831     return btrfs_header_level(eb) == 0;
1832     }
1833     @@ -2090,12 +2093,12 @@ BTRFS_SETGET_STACK_FUNCS(root_stransid, struct btrfs_root_item,
1834     BTRFS_SETGET_STACK_FUNCS(root_rtransid, struct btrfs_root_item,
1835     rtransid, 64);
1836    
1837     -static inline bool btrfs_root_readonly(struct btrfs_root *root)
1838     +static inline bool btrfs_root_readonly(const struct btrfs_root *root)
1839     {
1840     return (root->root_item.flags & cpu_to_le64(BTRFS_ROOT_SUBVOL_RDONLY)) != 0;
1841     }
1842    
1843     -static inline bool btrfs_root_dead(struct btrfs_root *root)
1844     +static inline bool btrfs_root_dead(const struct btrfs_root *root)
1845     {
1846     return (root->root_item.flags & cpu_to_le64(BTRFS_ROOT_SUBVOL_DEAD)) != 0;
1847     }
1848     @@ -2152,51 +2155,51 @@ BTRFS_SETGET_STACK_FUNCS(backup_num_devices, struct btrfs_root_backup,
1849     /* struct btrfs_balance_item */
1850     BTRFS_SETGET_FUNCS(balance_flags, struct btrfs_balance_item, flags, 64);
1851    
1852     -static inline void btrfs_balance_data(struct extent_buffer *eb,
1853     - struct btrfs_balance_item *bi,
1854     +static inline void btrfs_balance_data(const struct extent_buffer *eb,
1855     + const struct btrfs_balance_item *bi,
1856     struct btrfs_disk_balance_args *ba)
1857     {
1858     read_eb_member(eb, bi, struct btrfs_balance_item, data, ba);
1859     }
1860    
1861     static inline void btrfs_set_balance_data(struct extent_buffer *eb,
1862     - struct btrfs_balance_item *bi,
1863     - struct btrfs_disk_balance_args *ba)
1864     + struct btrfs_balance_item *bi,
1865     + const struct btrfs_disk_balance_args *ba)
1866     {
1867     write_eb_member(eb, bi, struct btrfs_balance_item, data, ba);
1868     }
1869    
1870     -static inline void btrfs_balance_meta(struct extent_buffer *eb,
1871     - struct btrfs_balance_item *bi,
1872     +static inline void btrfs_balance_meta(const struct extent_buffer *eb,
1873     + const struct btrfs_balance_item *bi,
1874     struct btrfs_disk_balance_args *ba)
1875     {
1876     read_eb_member(eb, bi, struct btrfs_balance_item, meta, ba);
1877     }
1878    
1879     static inline void btrfs_set_balance_meta(struct extent_buffer *eb,
1880     - struct btrfs_balance_item *bi,
1881     - struct btrfs_disk_balance_args *ba)
1882     + struct btrfs_balance_item *bi,
1883     + const struct btrfs_disk_balance_args *ba)
1884     {
1885     write_eb_member(eb, bi, struct btrfs_balance_item, meta, ba);
1886     }
1887    
1888     -static inline void btrfs_balance_sys(struct extent_buffer *eb,
1889     - struct btrfs_balance_item *bi,
1890     +static inline void btrfs_balance_sys(const struct extent_buffer *eb,
1891     + const struct btrfs_balance_item *bi,
1892     struct btrfs_disk_balance_args *ba)
1893     {
1894     read_eb_member(eb, bi, struct btrfs_balance_item, sys, ba);
1895     }
1896    
1897     static inline void btrfs_set_balance_sys(struct extent_buffer *eb,
1898     - struct btrfs_balance_item *bi,
1899     - struct btrfs_disk_balance_args *ba)
1900     + struct btrfs_balance_item *bi,
1901     + const struct btrfs_disk_balance_args *ba)
1902     {
1903     write_eb_member(eb, bi, struct btrfs_balance_item, sys, ba);
1904     }
1905    
1906     static inline void
1907     btrfs_disk_balance_args_to_cpu(struct btrfs_balance_args *cpu,
1908     - struct btrfs_disk_balance_args *disk)
1909     + const struct btrfs_disk_balance_args *disk)
1910     {
1911     memset(cpu, 0, sizeof(*cpu));
1912    
1913     @@ -2216,7 +2219,7 @@ btrfs_disk_balance_args_to_cpu(struct btrfs_balance_args *cpu,
1914    
1915     static inline void
1916     btrfs_cpu_balance_args_to_disk(struct btrfs_disk_balance_args *disk,
1917     - struct btrfs_balance_args *cpu)
1918     + const struct btrfs_balance_args *cpu)
1919     {
1920     memset(disk, 0, sizeof(*disk));
1921    
1922     @@ -2284,7 +2287,7 @@ BTRFS_SETGET_STACK_FUNCS(super_magic, struct btrfs_super_block, magic, 64);
1923     BTRFS_SETGET_STACK_FUNCS(super_uuid_tree_generation, struct btrfs_super_block,
1924     uuid_tree_generation, 64);
1925    
1926     -static inline int btrfs_super_csum_size(struct btrfs_super_block *s)
1927     +static inline int btrfs_super_csum_size(const struct btrfs_super_block *s)
1928     {
1929     u16 t = btrfs_super_csum_type(s);
1930     /*
1931     @@ -2303,8 +2306,8 @@ static inline unsigned long btrfs_leaf_data(struct extent_buffer *l)
1932     * this returns the address of the start of the last item,
1933     * which is the stop of the leaf data stack
1934     */
1935     -static inline unsigned int leaf_data_end(struct btrfs_root *root,
1936     - struct extent_buffer *leaf)
1937     +static inline unsigned int leaf_data_end(const struct btrfs_root *root,
1938     + const struct extent_buffer *leaf)
1939     {
1940     u32 nr = btrfs_header_nritems(leaf);
1941    
1942     @@ -2329,7 +2332,7 @@ BTRFS_SETGET_STACK_FUNCS(stack_file_extent_compression,
1943     struct btrfs_file_extent_item, compression, 8);
1944    
1945     static inline unsigned long
1946     -btrfs_file_extent_inline_start(struct btrfs_file_extent_item *e)
1947     +btrfs_file_extent_inline_start(const struct btrfs_file_extent_item *e)
1948     {
1949     return (unsigned long)e + BTRFS_FILE_EXTENT_INLINE_DATA_START;
1950     }
1951     @@ -2363,8 +2366,9 @@ BTRFS_SETGET_FUNCS(file_extent_other_encoding, struct btrfs_file_extent_item,
1952     * size of any extent headers. If a file is compressed on disk, this is
1953     * the compressed size
1954     */
1955     -static inline u32 btrfs_file_extent_inline_item_len(struct extent_buffer *eb,
1956     - struct btrfs_item *e)
1957     +static inline u32 btrfs_file_extent_inline_item_len(
1958     + const struct extent_buffer *eb,
1959     + struct btrfs_item *e)
1960     {
1961     return btrfs_item_size(eb, e) - BTRFS_FILE_EXTENT_INLINE_DATA_START;
1962     }
1963     @@ -2372,9 +2376,9 @@ static inline u32 btrfs_file_extent_inline_item_len(struct extent_buffer *eb,
1964     /* this returns the number of file bytes represented by the inline item.
1965     * If an item is compressed, this is the uncompressed size
1966     */
1967     -static inline u32 btrfs_file_extent_inline_len(struct extent_buffer *eb,
1968     - int slot,
1969     - struct btrfs_file_extent_item *fi)
1970     +static inline u32 btrfs_file_extent_inline_len(const struct extent_buffer *eb,
1971     + int slot,
1972     + const struct btrfs_file_extent_item *fi)
1973     {
1974     struct btrfs_map_token token;
1975    
1976     @@ -2396,8 +2400,8 @@ static inline u32 btrfs_file_extent_inline_len(struct extent_buffer *eb,
1977    
1978    
1979     /* btrfs_dev_stats_item */
1980     -static inline u64 btrfs_dev_stats_value(struct extent_buffer *eb,
1981     - struct btrfs_dev_stats_item *ptr,
1982     +static inline u64 btrfs_dev_stats_value(const struct extent_buffer *eb,
1983     + const struct btrfs_dev_stats_item *ptr,
1984     int index)
1985     {
1986     u64 val;
1987     diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c
1988     index 57d375c68e46..77b32415d9f2 100644
1989     --- a/fs/btrfs/disk-io.c
1990     +++ b/fs/btrfs/disk-io.c
1991     @@ -50,6 +50,7 @@
1992     #include "sysfs.h"
1993     #include "qgroup.h"
1994     #include "compression.h"
1995     +#include "tree-checker.h"
1996    
1997     #ifdef CONFIG_X86
1998     #include <asm/cpufeature.h>
1999     @@ -452,9 +453,9 @@ static int btree_read_extent_buffer_pages(struct btrfs_root *root,
2000     int mirror_num = 0;
2001     int failed_mirror = 0;
2002    
2003     - clear_bit(EXTENT_BUFFER_CORRUPT, &eb->bflags);
2004     io_tree = &BTRFS_I(root->fs_info->btree_inode)->io_tree;
2005     while (1) {
2006     + clear_bit(EXTENT_BUFFER_CORRUPT, &eb->bflags);
2007     ret = read_extent_buffer_pages(io_tree, eb, WAIT_COMPLETE,
2008     btree_get_extent, mirror_num);
2009     if (!ret) {
2010     @@ -465,14 +466,6 @@ static int btree_read_extent_buffer_pages(struct btrfs_root *root,
2011     ret = -EIO;
2012     }
2013    
2014     - /*
2015     - * This buffer's crc is fine, but its contents are corrupted, so
2016     - * there is no reason to read the other copies, they won't be
2017     - * any less wrong.
2018     - */
2019     - if (test_bit(EXTENT_BUFFER_CORRUPT, &eb->bflags))
2020     - break;
2021     -
2022     num_copies = btrfs_num_copies(root->fs_info,
2023     eb->start, eb->len);
2024     if (num_copies == 1)
2025     @@ -546,145 +539,6 @@ static int check_tree_block_fsid(struct btrfs_fs_info *fs_info,
2026     return ret;
2027     }
2028    
2029     -#define CORRUPT(reason, eb, root, slot) \
2030     - btrfs_crit(root->fs_info, "corrupt %s, %s: block=%llu," \
2031     - " root=%llu, slot=%d", \
2032     - btrfs_header_level(eb) == 0 ? "leaf" : "node",\
2033     - reason, btrfs_header_bytenr(eb), root->objectid, slot)
2034     -
2035     -static noinline int check_leaf(struct btrfs_root *root,
2036     - struct extent_buffer *leaf)
2037     -{
2038     - struct btrfs_key key;
2039     - struct btrfs_key leaf_key;
2040     - u32 nritems = btrfs_header_nritems(leaf);
2041     - int slot;
2042     -
2043     - /*
2044     - * Extent buffers from a relocation tree have a owner field that
2045     - * corresponds to the subvolume tree they are based on. So just from an
2046     - * extent buffer alone we can not find out what is the id of the
2047     - * corresponding subvolume tree, so we can not figure out if the extent
2048     - * buffer corresponds to the root of the relocation tree or not. So skip
2049     - * this check for relocation trees.
2050     - */
2051     - if (nritems == 0 && !btrfs_header_flag(leaf, BTRFS_HEADER_FLAG_RELOC)) {
2052     - struct btrfs_root *check_root;
2053     -
2054     - key.objectid = btrfs_header_owner(leaf);
2055     - key.type = BTRFS_ROOT_ITEM_KEY;
2056     - key.offset = (u64)-1;
2057     -
2058     - check_root = btrfs_get_fs_root(root->fs_info, &key, false);
2059     - /*
2060     - * The only reason we also check NULL here is that during
2061     - * open_ctree() some roots has not yet been set up.
2062     - */
2063     - if (!IS_ERR_OR_NULL(check_root)) {
2064     - struct extent_buffer *eb;
2065     -
2066     - eb = btrfs_root_node(check_root);
2067     - /* if leaf is the root, then it's fine */
2068     - if (leaf != eb) {
2069     - CORRUPT("non-root leaf's nritems is 0",
2070     - leaf, check_root, 0);
2071     - free_extent_buffer(eb);
2072     - return -EIO;
2073     - }
2074     - free_extent_buffer(eb);
2075     - }
2076     - return 0;
2077     - }
2078     -
2079     - if (nritems == 0)
2080     - return 0;
2081     -
2082     - /* Check the 0 item */
2083     - if (btrfs_item_offset_nr(leaf, 0) + btrfs_item_size_nr(leaf, 0) !=
2084     - BTRFS_LEAF_DATA_SIZE(root)) {
2085     - CORRUPT("invalid item offset size pair", leaf, root, 0);
2086     - return -EIO;
2087     - }
2088     -
2089     - /*
2090     - * Check to make sure each items keys are in the correct order and their
2091     - * offsets make sense. We only have to loop through nritems-1 because
2092     - * we check the current slot against the next slot, which verifies the
2093     - * next slot's offset+size makes sense and that the current's slot
2094     - * offset is correct.
2095     - */
2096     - for (slot = 0; slot < nritems - 1; slot++) {
2097     - btrfs_item_key_to_cpu(leaf, &leaf_key, slot);
2098     - btrfs_item_key_to_cpu(leaf, &key, slot + 1);
2099     -
2100     - /* Make sure the keys are in the right order */
2101     - if (btrfs_comp_cpu_keys(&leaf_key, &key) >= 0) {
2102     - CORRUPT("bad key order", leaf, root, slot);
2103     - return -EIO;
2104     - }
2105     -
2106     - /*
2107     - * Make sure the offset and ends are right, remember that the
2108     - * item data starts at the end of the leaf and grows towards the
2109     - * front.
2110     - */
2111     - if (btrfs_item_offset_nr(leaf, slot) !=
2112     - btrfs_item_end_nr(leaf, slot + 1)) {
2113     - CORRUPT("slot offset bad", leaf, root, slot);
2114     - return -EIO;
2115     - }
2116     -
2117     - /*
2118     - * Check to make sure that we don't point outside of the leaf,
2119     - * just in case all the items are consistent to each other, but
2120     - * all point outside of the leaf.
2121     - */
2122     - if (btrfs_item_end_nr(leaf, slot) >
2123     - BTRFS_LEAF_DATA_SIZE(root)) {
2124     - CORRUPT("slot end outside of leaf", leaf, root, slot);
2125     - return -EIO;
2126     - }
2127     - }
2128     -
2129     - return 0;
2130     -}
2131     -
2132     -static int check_node(struct btrfs_root *root, struct extent_buffer *node)
2133     -{
2134     - unsigned long nr = btrfs_header_nritems(node);
2135     - struct btrfs_key key, next_key;
2136     - int slot;
2137     - u64 bytenr;
2138     - int ret = 0;
2139     -
2140     - if (nr == 0 || nr > BTRFS_NODEPTRS_PER_BLOCK(root)) {
2141     - btrfs_crit(root->fs_info,
2142     - "corrupt node: block %llu root %llu nritems %lu",
2143     - node->start, root->objectid, nr);
2144     - return -EIO;
2145     - }
2146     -
2147     - for (slot = 0; slot < nr - 1; slot++) {
2148     - bytenr = btrfs_node_blockptr(node, slot);
2149     - btrfs_node_key_to_cpu(node, &key, slot);
2150     - btrfs_node_key_to_cpu(node, &next_key, slot + 1);
2151     -
2152     - if (!bytenr) {
2153     - CORRUPT("invalid item slot", node, root, slot);
2154     - ret = -EIO;
2155     - goto out;
2156     - }
2157     -
2158     - if (btrfs_comp_cpu_keys(&key, &next_key) >= 0) {
2159     - CORRUPT("bad key order", node, root, slot);
2160     - ret = -EIO;
2161     - goto out;
2162     - }
2163     - }
2164     -out:
2165     - return ret;
2166     -}
2167     -
2168     static int btree_readpage_end_io_hook(struct btrfs_io_bio *io_bio,
2169     u64 phy_offset, struct page *page,
2170     u64 start, u64 end, int mirror)
2171     @@ -750,12 +604,12 @@ static int btree_readpage_end_io_hook(struct btrfs_io_bio *io_bio,
2172     * that we don't try and read the other copies of this block, just
2173     * return -EIO.
2174     */
2175     - if (found_level == 0 && check_leaf(root, eb)) {
2176     + if (found_level == 0 && btrfs_check_leaf_full(root, eb)) {
2177     set_bit(EXTENT_BUFFER_CORRUPT, &eb->bflags);
2178     ret = -EIO;
2179     }
2180    
2181     - if (found_level > 0 && check_node(root, eb))
2182     + if (found_level > 0 && btrfs_check_node(root, eb))
2183     ret = -EIO;
2184    
2185     if (!ret)
2186     @@ -4086,7 +3940,13 @@ void btrfs_mark_buffer_dirty(struct extent_buffer *buf)
2187     buf->len,
2188     root->fs_info->dirty_metadata_batch);
2189     #ifdef CONFIG_BTRFS_FS_CHECK_INTEGRITY
2190     - if (btrfs_header_level(buf) == 0 && check_leaf(root, buf)) {
2191     + /*
2192     + * Since btrfs_mark_buffer_dirty() can be called with item pointer set
2193     + * but item data not updated.
2194     + * So here we should only check item pointers, not item data.
2195     + */
2196     + if (btrfs_header_level(buf) == 0 &&
2197     + btrfs_check_leaf_relaxed(root, buf)) {
2198     btrfs_print_leaf(root, buf);
2199     ASSERT(0);
2200     }
2201     diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c
2202     index a775307f3b6b..7938c48c72ff 100644
2203     --- a/fs/btrfs/extent-tree.c
2204     +++ b/fs/btrfs/extent-tree.c
2205     @@ -9896,6 +9896,8 @@ static int find_first_block_group(struct btrfs_root *root,
2206     int ret = 0;
2207     struct btrfs_key found_key;
2208     struct extent_buffer *leaf;
2209     + struct btrfs_block_group_item bg;
2210     + u64 flags;
2211     int slot;
2212    
2213     ret = btrfs_search_slot(NULL, root, key, path, 0, 0);
2214     @@ -9930,8 +9932,32 @@ static int find_first_block_group(struct btrfs_root *root,
2215     "logical %llu len %llu found bg but no related chunk",
2216     found_key.objectid, found_key.offset);
2217     ret = -ENOENT;
2218     + } else if (em->start != found_key.objectid ||
2219     + em->len != found_key.offset) {
2220     + btrfs_err(root->fs_info,
2221     + "block group %llu len %llu mismatch with chunk %llu len %llu",
2222     + found_key.objectid, found_key.offset,
2223     + em->start, em->len);
2224     + ret = -EUCLEAN;
2225     } else {
2226     - ret = 0;
2227     + read_extent_buffer(leaf, &bg,
2228     + btrfs_item_ptr_offset(leaf, slot),
2229     + sizeof(bg));
2230     + flags = btrfs_block_group_flags(&bg) &
2231     + BTRFS_BLOCK_GROUP_TYPE_MASK;
2232     +
2233     + if (flags != (em->map_lookup->type &
2234     + BTRFS_BLOCK_GROUP_TYPE_MASK)) {
2235     + btrfs_err(root->fs_info,
2236     +"block group %llu len %llu type flags 0x%llx mismatch with chunk type flags 0x%llx",
2237     + found_key.objectid,
2238     + found_key.offset, flags,
2239     + (BTRFS_BLOCK_GROUP_TYPE_MASK &
2240     + em->map_lookup->type));
2241     + ret = -EUCLEAN;
2242     + } else {
2243     + ret = 0;
2244     + }
2245     }
2246     free_extent_map(em);
2247     goto out;
2248     @@ -10159,6 +10185,62 @@ btrfs_create_block_group_cache(struct btrfs_root *root, u64 start, u64 size)
2249     return cache;
2250     }
2251    
2252     +
2253     +/*
2254     + * Iterate all chunks and verify that each of them has the corresponding block
2255     + * group
2256     + */
2257     +static int check_chunk_block_group_mappings(struct btrfs_fs_info *fs_info)
2258     +{
2259     + struct btrfs_mapping_tree *map_tree = &fs_info->mapping_tree;
2260     + struct extent_map *em;
2261     + struct btrfs_block_group_cache *bg;
2262     + u64 start = 0;
2263     + int ret = 0;
2264     +
2265     + while (1) {
2266     + read_lock(&map_tree->map_tree.lock);
2267     + /*
2268     + * lookup_extent_mapping will return the first extent map
2269     + * intersecting the range, so setting @len to 1 is enough to
2270     + * get the first chunk.
2271     + */
2272     + em = lookup_extent_mapping(&map_tree->map_tree, start, 1);
2273     + read_unlock(&map_tree->map_tree.lock);
2274     + if (!em)
2275     + break;
2276     +
2277     + bg = btrfs_lookup_block_group(fs_info, em->start);
2278     + if (!bg) {
2279     + btrfs_err(fs_info,
2280     + "chunk start=%llu len=%llu doesn't have corresponding block group",
2281     + em->start, em->len);
2282     + ret = -EUCLEAN;
2283     + free_extent_map(em);
2284     + break;
2285     + }
2286     + if (bg->key.objectid != em->start ||
2287     + bg->key.offset != em->len ||
2288     + (bg->flags & BTRFS_BLOCK_GROUP_TYPE_MASK) !=
2289     + (em->map_lookup->type & BTRFS_BLOCK_GROUP_TYPE_MASK)) {
2290     + btrfs_err(fs_info,
2291     +"chunk start=%llu len=%llu flags=0x%llx doesn't match block group start=%llu len=%llu flags=0x%llx",
2292     + em->start, em->len,
2293     + em->map_lookup->type & BTRFS_BLOCK_GROUP_TYPE_MASK,
2294     + bg->key.objectid, bg->key.offset,
2295     + bg->flags & BTRFS_BLOCK_GROUP_TYPE_MASK);
2296     + ret = -EUCLEAN;
2297     + free_extent_map(em);
2298     + btrfs_put_block_group(bg);
2299     + break;
2300     + }
2301     + start = em->start + em->len;
2302     + free_extent_map(em);
2303     + btrfs_put_block_group(bg);
2304     + }
2305     + return ret;
2306     +}
2307     +
2308     int btrfs_read_block_groups(struct btrfs_root *root)
2309     {
2310     struct btrfs_path *path;
2311     @@ -10343,7 +10425,7 @@ int btrfs_read_block_groups(struct btrfs_root *root)
2312     }
2313    
2314     init_global_block_rsv(info);
2315     - ret = 0;
2316     + ret = check_chunk_block_group_mappings(info);
2317     error:
2318     btrfs_free_path(path);
2319     return ret;
2320     diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c
2321     index 5feaef9bcbda..793d4d571d8d 100644
2322     --- a/fs/btrfs/extent_io.c
2323     +++ b/fs/btrfs/extent_io.c
2324     @@ -5442,9 +5442,8 @@ unlock_exit:
2325     return ret;
2326     }
2327    
2328     -void read_extent_buffer(struct extent_buffer *eb, void *dstv,
2329     - unsigned long start,
2330     - unsigned long len)
2331     +void read_extent_buffer(const struct extent_buffer *eb, void *dstv,
2332     + unsigned long start, unsigned long len)
2333     {
2334     size_t cur;
2335     size_t offset;
2336     @@ -5473,9 +5472,9 @@ void read_extent_buffer(struct extent_buffer *eb, void *dstv,
2337     }
2338     }
2339    
2340     -int read_extent_buffer_to_user(struct extent_buffer *eb, void __user *dstv,
2341     - unsigned long start,
2342     - unsigned long len)
2343     +int read_extent_buffer_to_user(const struct extent_buffer *eb,
2344     + void __user *dstv,
2345     + unsigned long start, unsigned long len)
2346     {
2347     size_t cur;
2348     size_t offset;
2349     @@ -5515,10 +5514,10 @@ int read_extent_buffer_to_user(struct extent_buffer *eb, void __user *dstv,
2350     * return 1 if the item spans two pages.
2351     * return -EINVAL otherwise.
2352     */
2353     -int map_private_extent_buffer(struct extent_buffer *eb, unsigned long start,
2354     - unsigned long min_len, char **map,
2355     - unsigned long *map_start,
2356     - unsigned long *map_len)
2357     +int map_private_extent_buffer(const struct extent_buffer *eb,
2358     + unsigned long start, unsigned long min_len,
2359     + char **map, unsigned long *map_start,
2360     + unsigned long *map_len)
2361     {
2362     size_t offset = start & (PAGE_SIZE - 1);
2363     char *kaddr;
2364     @@ -5552,9 +5551,8 @@ int map_private_extent_buffer(struct extent_buffer *eb, unsigned long start,
2365     return 0;
2366     }
2367    
2368     -int memcmp_extent_buffer(struct extent_buffer *eb, const void *ptrv,
2369     - unsigned long start,
2370     - unsigned long len)
2371     +int memcmp_extent_buffer(const struct extent_buffer *eb, const void *ptrv,
2372     + unsigned long start, unsigned long len)
2373     {
2374     size_t cur;
2375     size_t offset;
2376     diff --git a/fs/btrfs/extent_io.h b/fs/btrfs/extent_io.h
2377     index ab31d145227e..9ecdc9584df7 100644
2378     --- a/fs/btrfs/extent_io.h
2379     +++ b/fs/btrfs/extent_io.h
2380     @@ -396,14 +396,13 @@ static inline void extent_buffer_get(struct extent_buffer *eb)
2381     atomic_inc(&eb->refs);
2382     }
2383    
2384     -int memcmp_extent_buffer(struct extent_buffer *eb, const void *ptrv,
2385     - unsigned long start,
2386     - unsigned long len);
2387     -void read_extent_buffer(struct extent_buffer *eb, void *dst,
2388     +int memcmp_extent_buffer(const struct extent_buffer *eb, const void *ptrv,
2389     + unsigned long start, unsigned long len);
2390     +void read_extent_buffer(const struct extent_buffer *eb, void *dst,
2391     unsigned long start,
2392     unsigned long len);
2393     -int read_extent_buffer_to_user(struct extent_buffer *eb, void __user *dst,
2394     - unsigned long start,
2395     +int read_extent_buffer_to_user(const struct extent_buffer *eb,
2396     + void __user *dst, unsigned long start,
2397     unsigned long len);
2398     void write_extent_buffer(struct extent_buffer *eb, const void *src,
2399     unsigned long start, unsigned long len);
2400     @@ -428,10 +427,10 @@ void set_extent_buffer_uptodate(struct extent_buffer *eb);
2401     void clear_extent_buffer_uptodate(struct extent_buffer *eb);
2402     int extent_buffer_uptodate(struct extent_buffer *eb);
2403     int extent_buffer_under_io(struct extent_buffer *eb);
2404     -int map_private_extent_buffer(struct extent_buffer *eb, unsigned long offset,
2405     - unsigned long min_len, char **map,
2406     - unsigned long *map_start,
2407     - unsigned long *map_len);
2408     +int map_private_extent_buffer(const struct extent_buffer *eb,
2409     + unsigned long offset, unsigned long min_len,
2410     + char **map, unsigned long *map_start,
2411     + unsigned long *map_len);
2412     void extent_range_clear_dirty_for_io(struct inode *inode, u64 start, u64 end);
2413     void extent_range_redirty_for_io(struct inode *inode, u64 start, u64 end);
2414     void extent_clear_unlock_delalloc(struct inode *inode, u64 start, u64 end,
2415     diff --git a/fs/btrfs/free-space-cache.c b/fs/btrfs/free-space-cache.c
2416     index 5ca0dbb9074d..69a3c11af9d4 100644
2417     --- a/fs/btrfs/free-space-cache.c
2418     +++ b/fs/btrfs/free-space-cache.c
2419     @@ -2464,6 +2464,7 @@ void btrfs_dump_free_space(struct btrfs_block_group_cache *block_group,
2420     struct rb_node *n;
2421     int count = 0;
2422    
2423     + spin_lock(&ctl->tree_lock);
2424     for (n = rb_first(&ctl->free_space_offset); n; n = rb_next(n)) {
2425     info = rb_entry(n, struct btrfs_free_space, offset_index);
2426     if (info->bytes >= bytes && !block_group->ro)
2427     @@ -2473,6 +2474,7 @@ void btrfs_dump_free_space(struct btrfs_block_group_cache *block_group,
2428     info->offset, info->bytes,
2429     (info->bitmap) ? "yes" : "no");
2430     }
2431     + spin_unlock(&ctl->tree_lock);
2432     btrfs_info(block_group->fs_info, "block group has cluster?: %s",
2433     list_empty(&block_group->cluster_list) ? "no" : "yes");
2434     btrfs_info(block_group->fs_info,
2435     diff --git a/fs/btrfs/struct-funcs.c b/fs/btrfs/struct-funcs.c
2436     index 875c757e73e2..5e2b92d83617 100644
2437     --- a/fs/btrfs/struct-funcs.c
2438     +++ b/fs/btrfs/struct-funcs.c
2439     @@ -50,8 +50,8 @@ static inline void put_unaligned_le8(u8 val, void *p)
2440     */
2441    
2442     #define DEFINE_BTRFS_SETGET_BITS(bits) \
2443     -u##bits btrfs_get_token_##bits(struct extent_buffer *eb, void *ptr, \
2444     - unsigned long off, \
2445     +u##bits btrfs_get_token_##bits(const struct extent_buffer *eb, \
2446     + const void *ptr, unsigned long off, \
2447     struct btrfs_map_token *token) \
2448     { \
2449     unsigned long part_offset = (unsigned long)ptr; \
2450     @@ -90,7 +90,8 @@ u##bits btrfs_get_token_##bits(struct extent_buffer *eb, void *ptr, \
2451     return res; \
2452     } \
2453     void btrfs_set_token_##bits(struct extent_buffer *eb, \
2454     - void *ptr, unsigned long off, u##bits val, \
2455     + const void *ptr, unsigned long off, \
2456     + u##bits val, \
2457     struct btrfs_map_token *token) \
2458     { \
2459     unsigned long part_offset = (unsigned long)ptr; \
2460     @@ -133,7 +134,7 @@ DEFINE_BTRFS_SETGET_BITS(16)
2461     DEFINE_BTRFS_SETGET_BITS(32)
2462     DEFINE_BTRFS_SETGET_BITS(64)
2463    
2464     -void btrfs_node_key(struct extent_buffer *eb,
2465     +void btrfs_node_key(const struct extent_buffer *eb,
2466     struct btrfs_disk_key *disk_key, int nr)
2467     {
2468     unsigned long ptr = btrfs_node_key_ptr_offset(nr);
2469     diff --git a/fs/btrfs/tree-checker.c b/fs/btrfs/tree-checker.c
2470     new file mode 100644
2471     index 000000000000..7b69ba78e600
2472     --- /dev/null
2473     +++ b/fs/btrfs/tree-checker.c
2474     @@ -0,0 +1,649 @@
2475     +/*
2476     + * Copyright (C) Qu Wenruo 2017. All rights reserved.
2477     + *
2478     + * This program is free software; you can redistribute it and/or
2479     + * modify it under the terms of the GNU General Public
2480     + * License v2 as published by the Free Software Foundation.
2481     + *
2482     + * This program is distributed in the hope that it will be useful,
2483     + * but WITHOUT ANY WARRANTY; without even the implied warranty of
2484     + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
2485     + * General Public License for more details.
2486     + *
2487     + * You should have received a copy of the GNU General Public
2488     + * License along with this program.
2489     + */
2490     +
2491     +/*
2492     + * The module is used to catch unexpected/corrupted tree block data.
2493     + * Such behavior can be caused either by a fuzzed image or bugs.
2494     + *
2495     + * The objective is to do leaf/node validation checks when tree block is read
2496     + * from disk, and check *every* possible member, so other code won't
2497     + * need to checking them again.
2498     + *
2499     + * Due to the potential and unwanted damage, every checker needs to be
2500     + * carefully reviewed otherwise so it does not prevent mount of valid images.
2501     + */
2502     +
2503     +#include "ctree.h"
2504     +#include "tree-checker.h"
2505     +#include "disk-io.h"
2506     +#include "compression.h"
2507     +#include "hash.h"
2508     +#include "volumes.h"
2509     +
2510     +#define CORRUPT(reason, eb, root, slot) \
2511     + btrfs_crit(root->fs_info, \
2512     + "corrupt %s, %s: block=%llu, root=%llu, slot=%d", \
2513     + btrfs_header_level(eb) == 0 ? "leaf" : "node", \
2514     + reason, btrfs_header_bytenr(eb), root->objectid, slot)
2515     +
2516     +/*
2517     + * Error message should follow the following format:
2518     + * corrupt <type>: <identifier>, <reason>[, <bad_value>]
2519     + *
2520     + * @type: leaf or node
2521     + * @identifier: the necessary info to locate the leaf/node.
2522     + * It's recommened to decode key.objecitd/offset if it's
2523     + * meaningful.
2524     + * @reason: describe the error
2525     + * @bad_value: optional, it's recommened to output bad value and its
2526     + * expected value (range).
2527     + *
2528     + * Since comma is used to separate the components, only space is allowed
2529     + * inside each component.
2530     + */
2531     +
2532     +/*
2533     + * Append generic "corrupt leaf/node root=%llu block=%llu slot=%d: " to @fmt.
2534     + * Allows callers to customize the output.
2535     + */
2536     +__printf(4, 5)
2537     +static void generic_err(const struct btrfs_root *root,
2538     + const struct extent_buffer *eb, int slot,
2539     + const char *fmt, ...)
2540     +{
2541     + struct va_format vaf;
2542     + va_list args;
2543     +
2544     + va_start(args, fmt);
2545     +
2546     + vaf.fmt = fmt;
2547     + vaf.va = &args;
2548     +
2549     + btrfs_crit(root->fs_info,
2550     + "corrupt %s: root=%llu block=%llu slot=%d, %pV",
2551     + btrfs_header_level(eb) == 0 ? "leaf" : "node",
2552     + root->objectid, btrfs_header_bytenr(eb), slot, &vaf);
2553     + va_end(args);
2554     +}
2555     +
2556     +static int check_extent_data_item(struct btrfs_root *root,
2557     + struct extent_buffer *leaf,
2558     + struct btrfs_key *key, int slot)
2559     +{
2560     + struct btrfs_file_extent_item *fi;
2561     + u32 sectorsize = root->sectorsize;
2562     + u32 item_size = btrfs_item_size_nr(leaf, slot);
2563     +
2564     + if (!IS_ALIGNED(key->offset, sectorsize)) {
2565     + CORRUPT("unaligned key offset for file extent",
2566     + leaf, root, slot);
2567     + return -EUCLEAN;
2568     + }
2569     +
2570     + fi = btrfs_item_ptr(leaf, slot, struct btrfs_file_extent_item);
2571     +
2572     + if (btrfs_file_extent_type(leaf, fi) > BTRFS_FILE_EXTENT_TYPES) {
2573     + CORRUPT("invalid file extent type", leaf, root, slot);
2574     + return -EUCLEAN;
2575     + }
2576     +
2577     + /*
2578     + * Support for new compression/encrption must introduce incompat flag,
2579     + * and must be caught in open_ctree().
2580     + */
2581     + if (btrfs_file_extent_compression(leaf, fi) > BTRFS_COMPRESS_TYPES) {
2582     + CORRUPT("invalid file extent compression", leaf, root, slot);
2583     + return -EUCLEAN;
2584     + }
2585     + if (btrfs_file_extent_encryption(leaf, fi)) {
2586     + CORRUPT("invalid file extent encryption", leaf, root, slot);
2587     + return -EUCLEAN;
2588     + }
2589     + if (btrfs_file_extent_type(leaf, fi) == BTRFS_FILE_EXTENT_INLINE) {
2590     + /* Inline extent must have 0 as key offset */
2591     + if (key->offset) {
2592     + CORRUPT("inline extent has non-zero key offset",
2593     + leaf, root, slot);
2594     + return -EUCLEAN;
2595     + }
2596     +
2597     + /* Compressed inline extent has no on-disk size, skip it */
2598     + if (btrfs_file_extent_compression(leaf, fi) !=
2599     + BTRFS_COMPRESS_NONE)
2600     + return 0;
2601     +
2602     + /* Uncompressed inline extent size must match item size */
2603     + if (item_size != BTRFS_FILE_EXTENT_INLINE_DATA_START +
2604     + btrfs_file_extent_ram_bytes(leaf, fi)) {
2605     + CORRUPT("plaintext inline extent has invalid size",
2606     + leaf, root, slot);
2607     + return -EUCLEAN;
2608     + }
2609     + return 0;
2610     + }
2611     +
2612     + /* Regular or preallocated extent has fixed item size */
2613     + if (item_size != sizeof(*fi)) {
2614     + CORRUPT(
2615     + "regluar or preallocated extent data item size is invalid",
2616     + leaf, root, slot);
2617     + return -EUCLEAN;
2618     + }
2619     + if (!IS_ALIGNED(btrfs_file_extent_ram_bytes(leaf, fi), sectorsize) ||
2620     + !IS_ALIGNED(btrfs_file_extent_disk_bytenr(leaf, fi), sectorsize) ||
2621     + !IS_ALIGNED(btrfs_file_extent_disk_num_bytes(leaf, fi), sectorsize) ||
2622     + !IS_ALIGNED(btrfs_file_extent_offset(leaf, fi), sectorsize) ||
2623     + !IS_ALIGNED(btrfs_file_extent_num_bytes(leaf, fi), sectorsize)) {
2624     + CORRUPT(
2625     + "regular or preallocated extent data item has unaligned value",
2626     + leaf, root, slot);
2627     + return -EUCLEAN;
2628     + }
2629     +
2630     + return 0;
2631     +}
2632     +
2633     +static int check_csum_item(struct btrfs_root *root, struct extent_buffer *leaf,
2634     + struct btrfs_key *key, int slot)
2635     +{
2636     + u32 sectorsize = root->sectorsize;
2637     + u32 csumsize = btrfs_super_csum_size(root->fs_info->super_copy);
2638     +
2639     + if (key->objectid != BTRFS_EXTENT_CSUM_OBJECTID) {
2640     + CORRUPT("invalid objectid for csum item", leaf, root, slot);
2641     + return -EUCLEAN;
2642     + }
2643     + if (!IS_ALIGNED(key->offset, sectorsize)) {
2644     + CORRUPT("unaligned key offset for csum item", leaf, root, slot);
2645     + return -EUCLEAN;
2646     + }
2647     + if (!IS_ALIGNED(btrfs_item_size_nr(leaf, slot), csumsize)) {
2648     + CORRUPT("unaligned csum item size", leaf, root, slot);
2649     + return -EUCLEAN;
2650     + }
2651     + return 0;
2652     +}
2653     +
2654     +/*
2655     + * Customized reported for dir_item, only important new info is key->objectid,
2656     + * which represents inode number
2657     + */
2658     +__printf(4, 5)
2659     +static void dir_item_err(const struct btrfs_root *root,
2660     + const struct extent_buffer *eb, int slot,
2661     + const char *fmt, ...)
2662     +{
2663     + struct btrfs_key key;
2664     + struct va_format vaf;
2665     + va_list args;
2666     +
2667     + btrfs_item_key_to_cpu(eb, &key, slot);
2668     + va_start(args, fmt);
2669     +
2670     + vaf.fmt = fmt;
2671     + vaf.va = &args;
2672     +
2673     + btrfs_crit(root->fs_info,
2674     + "corrupt %s: root=%llu block=%llu slot=%d ino=%llu, %pV",
2675     + btrfs_header_level(eb) == 0 ? "leaf" : "node", root->objectid,
2676     + btrfs_header_bytenr(eb), slot, key.objectid, &vaf);
2677     + va_end(args);
2678     +}
2679     +
2680     +static int check_dir_item(struct btrfs_root *root,
2681     + struct extent_buffer *leaf,
2682     + struct btrfs_key *key, int slot)
2683     +{
2684     + struct btrfs_dir_item *di;
2685     + u32 item_size = btrfs_item_size_nr(leaf, slot);
2686     + u32 cur = 0;
2687     +
2688     + di = btrfs_item_ptr(leaf, slot, struct btrfs_dir_item);
2689     + while (cur < item_size) {
2690     + u32 name_len;
2691     + u32 data_len;
2692     + u32 max_name_len;
2693     + u32 total_size;
2694     + u32 name_hash;
2695     + u8 dir_type;
2696     +
2697     + /* header itself should not cross item boundary */
2698     + if (cur + sizeof(*di) > item_size) {
2699     + dir_item_err(root, leaf, slot,
2700     + "dir item header crosses item boundary, have %zu boundary %u",
2701     + cur + sizeof(*di), item_size);
2702     + return -EUCLEAN;
2703     + }
2704     +
2705     + /* dir type check */
2706     + dir_type = btrfs_dir_type(leaf, di);
2707     + if (dir_type >= BTRFS_FT_MAX) {
2708     + dir_item_err(root, leaf, slot,
2709     + "invalid dir item type, have %u expect [0, %u)",
2710     + dir_type, BTRFS_FT_MAX);
2711     + return -EUCLEAN;
2712     + }
2713     +
2714     + if (key->type == BTRFS_XATTR_ITEM_KEY &&
2715     + dir_type != BTRFS_FT_XATTR) {
2716     + dir_item_err(root, leaf, slot,
2717     + "invalid dir item type for XATTR key, have %u expect %u",
2718     + dir_type, BTRFS_FT_XATTR);
2719     + return -EUCLEAN;
2720     + }
2721     + if (dir_type == BTRFS_FT_XATTR &&
2722     + key->type != BTRFS_XATTR_ITEM_KEY) {
2723     + dir_item_err(root, leaf, slot,
2724     + "xattr dir type found for non-XATTR key");
2725     + return -EUCLEAN;
2726     + }
2727     + if (dir_type == BTRFS_FT_XATTR)
2728     + max_name_len = XATTR_NAME_MAX;
2729     + else
2730     + max_name_len = BTRFS_NAME_LEN;
2731     +
2732     + /* Name/data length check */
2733     + name_len = btrfs_dir_name_len(leaf, di);
2734     + data_len = btrfs_dir_data_len(leaf, di);
2735     + if (name_len > max_name_len) {
2736     + dir_item_err(root, leaf, slot,
2737     + "dir item name len too long, have %u max %u",
2738     + name_len, max_name_len);
2739     + return -EUCLEAN;
2740     + }
2741     + if (name_len + data_len > BTRFS_MAX_XATTR_SIZE(root)) {
2742     + dir_item_err(root, leaf, slot,
2743     + "dir item name and data len too long, have %u max %u",
2744     + name_len + data_len,
2745     + BTRFS_MAX_XATTR_SIZE(root));
2746     + return -EUCLEAN;
2747     + }
2748     +
2749     + if (data_len && dir_type != BTRFS_FT_XATTR) {
2750     + dir_item_err(root, leaf, slot,
2751     + "dir item with invalid data len, have %u expect 0",
2752     + data_len);
2753     + return -EUCLEAN;
2754     + }
2755     +
2756     + total_size = sizeof(*di) + name_len + data_len;
2757     +
2758     + /* header and name/data should not cross item boundary */
2759     + if (cur + total_size > item_size) {
2760     + dir_item_err(root, leaf, slot,
2761     + "dir item data crosses item boundary, have %u boundary %u",
2762     + cur + total_size, item_size);
2763     + return -EUCLEAN;
2764     + }
2765     +
2766     + /*
2767     + * Special check for XATTR/DIR_ITEM, as key->offset is name
2768     + * hash, should match its name
2769     + */
2770     + if (key->type == BTRFS_DIR_ITEM_KEY ||
2771     + key->type == BTRFS_XATTR_ITEM_KEY) {
2772     + char namebuf[max(BTRFS_NAME_LEN, XATTR_NAME_MAX)];
2773     +
2774     + read_extent_buffer(leaf, namebuf,
2775     + (unsigned long)(di + 1), name_len);
2776     + name_hash = btrfs_name_hash(namebuf, name_len);
2777     + if (key->offset != name_hash) {
2778     + dir_item_err(root, leaf, slot,
2779     + "name hash mismatch with key, have 0x%016x expect 0x%016llx",
2780     + name_hash, key->offset);
2781     + return -EUCLEAN;
2782     + }
2783     + }
2784     + cur += total_size;
2785     + di = (struct btrfs_dir_item *)((void *)di + total_size);
2786     + }
2787     + return 0;
2788     +}
2789     +
2790     +__printf(4, 5)
2791     +__cold
2792     +static void block_group_err(const struct btrfs_fs_info *fs_info,
2793     + const struct extent_buffer *eb, int slot,
2794     + const char *fmt, ...)
2795     +{
2796     + struct btrfs_key key;
2797     + struct va_format vaf;
2798     + va_list args;
2799     +
2800     + btrfs_item_key_to_cpu(eb, &key, slot);
2801     + va_start(args, fmt);
2802     +
2803     + vaf.fmt = fmt;
2804     + vaf.va = &args;
2805     +
2806     + btrfs_crit(fs_info,
2807     + "corrupt %s: root=%llu block=%llu slot=%d bg_start=%llu bg_len=%llu, %pV",
2808     + btrfs_header_level(eb) == 0 ? "leaf" : "node",
2809     + btrfs_header_owner(eb), btrfs_header_bytenr(eb), slot,
2810     + key.objectid, key.offset, &vaf);
2811     + va_end(args);
2812     +}
2813     +
2814     +static int check_block_group_item(struct btrfs_fs_info *fs_info,
2815     + struct extent_buffer *leaf,
2816     + struct btrfs_key *key, int slot)
2817     +{
2818     + struct btrfs_block_group_item bgi;
2819     + u32 item_size = btrfs_item_size_nr(leaf, slot);
2820     + u64 flags;
2821     + u64 type;
2822     +
2823     + /*
2824     + * Here we don't really care about alignment since extent allocator can
2825     + * handle it. We care more about the size, as if one block group is
2826     + * larger than maximum size, it's must be some obvious corruption.
2827     + */
2828     + if (key->offset > BTRFS_MAX_DATA_CHUNK_SIZE || key->offset == 0) {
2829     + block_group_err(fs_info, leaf, slot,
2830     + "invalid block group size, have %llu expect (0, %llu]",
2831     + key->offset, BTRFS_MAX_DATA_CHUNK_SIZE);
2832     + return -EUCLEAN;
2833     + }
2834     +
2835     + if (item_size != sizeof(bgi)) {
2836     + block_group_err(fs_info, leaf, slot,
2837     + "invalid item size, have %u expect %zu",
2838     + item_size, sizeof(bgi));
2839     + return -EUCLEAN;
2840     + }
2841     +
2842     + read_extent_buffer(leaf, &bgi, btrfs_item_ptr_offset(leaf, slot),
2843     + sizeof(bgi));
2844     + if (btrfs_block_group_chunk_objectid(&bgi) !=
2845     + BTRFS_FIRST_CHUNK_TREE_OBJECTID) {
2846     + block_group_err(fs_info, leaf, slot,
2847     + "invalid block group chunk objectid, have %llu expect %llu",
2848     + btrfs_block_group_chunk_objectid(&bgi),
2849     + BTRFS_FIRST_CHUNK_TREE_OBJECTID);
2850     + return -EUCLEAN;
2851     + }
2852     +
2853     + if (btrfs_block_group_used(&bgi) > key->offset) {
2854     + block_group_err(fs_info, leaf, slot,
2855     + "invalid block group used, have %llu expect [0, %llu)",
2856     + btrfs_block_group_used(&bgi), key->offset);
2857     + return -EUCLEAN;
2858     + }
2859     +
2860     + flags = btrfs_block_group_flags(&bgi);
2861     + if (hweight64(flags & BTRFS_BLOCK_GROUP_PROFILE_MASK) > 1) {
2862     + block_group_err(fs_info, leaf, slot,
2863     +"invalid profile flags, have 0x%llx (%lu bits set) expect no more than 1 bit set",
2864     + flags & BTRFS_BLOCK_GROUP_PROFILE_MASK,
2865     + hweight64(flags & BTRFS_BLOCK_GROUP_PROFILE_MASK));
2866     + return -EUCLEAN;
2867     + }
2868     +
2869     + type = flags & BTRFS_BLOCK_GROUP_TYPE_MASK;
2870     + if (type != BTRFS_BLOCK_GROUP_DATA &&
2871     + type != BTRFS_BLOCK_GROUP_METADATA &&
2872     + type != BTRFS_BLOCK_GROUP_SYSTEM &&
2873     + type != (BTRFS_BLOCK_GROUP_METADATA |
2874     + BTRFS_BLOCK_GROUP_DATA)) {
2875     + block_group_err(fs_info, leaf, slot,
2876     +"invalid type, have 0x%llx (%lu bits set) expect either 0x%llx, 0x%llx, 0x%llx or 0x%llx",
2877     + type, hweight64(type),
2878     + BTRFS_BLOCK_GROUP_DATA, BTRFS_BLOCK_GROUP_METADATA,
2879     + BTRFS_BLOCK_GROUP_SYSTEM,
2880     + BTRFS_BLOCK_GROUP_METADATA | BTRFS_BLOCK_GROUP_DATA);
2881     + return -EUCLEAN;
2882     + }
2883     + return 0;
2884     +}
2885     +
2886     +/*
2887     + * Common point to switch the item-specific validation.
2888     + */
2889     +static int check_leaf_item(struct btrfs_root *root,
2890     + struct extent_buffer *leaf,
2891     + struct btrfs_key *key, int slot)
2892     +{
2893     + int ret = 0;
2894     +
2895     + switch (key->type) {
2896     + case BTRFS_EXTENT_DATA_KEY:
2897     + ret = check_extent_data_item(root, leaf, key, slot);
2898     + break;
2899     + case BTRFS_EXTENT_CSUM_KEY:
2900     + ret = check_csum_item(root, leaf, key, slot);
2901     + break;
2902     + case BTRFS_DIR_ITEM_KEY:
2903     + case BTRFS_DIR_INDEX_KEY:
2904     + case BTRFS_XATTR_ITEM_KEY:
2905     + ret = check_dir_item(root, leaf, key, slot);
2906     + break;
2907     + case BTRFS_BLOCK_GROUP_ITEM_KEY:
2908     + ret = check_block_group_item(root->fs_info, leaf, key, slot);
2909     + break;
2910     + }
2911     + return ret;
2912     +}
2913     +
2914     +static int check_leaf(struct btrfs_root *root, struct extent_buffer *leaf,
2915     + bool check_item_data)
2916     +{
2917     + struct btrfs_fs_info *fs_info = root->fs_info;
2918     + /* No valid key type is 0, so all key should be larger than this key */
2919     + struct btrfs_key prev_key = {0, 0, 0};
2920     + struct btrfs_key key;
2921     + u32 nritems = btrfs_header_nritems(leaf);
2922     + int slot;
2923     +
2924     + if (btrfs_header_level(leaf) != 0) {
2925     + generic_err(root, leaf, 0,
2926     + "invalid level for leaf, have %d expect 0",
2927     + btrfs_header_level(leaf));
2928     + return -EUCLEAN;
2929     + }
2930     +
2931     + /*
2932     + * Extent buffers from a relocation tree have a owner field that
2933     + * corresponds to the subvolume tree they are based on. So just from an
2934     + * extent buffer alone we can not find out what is the id of the
2935     + * corresponding subvolume tree, so we can not figure out if the extent
2936     + * buffer corresponds to the root of the relocation tree or not. So
2937     + * skip this check for relocation trees.
2938     + */
2939     + if (nritems == 0 && !btrfs_header_flag(leaf, BTRFS_HEADER_FLAG_RELOC)) {
2940     + u64 owner = btrfs_header_owner(leaf);
2941     + struct btrfs_root *check_root;
2942     +
2943     + /* These trees must never be empty */
2944     + if (owner == BTRFS_ROOT_TREE_OBJECTID ||
2945     + owner == BTRFS_CHUNK_TREE_OBJECTID ||
2946     + owner == BTRFS_EXTENT_TREE_OBJECTID ||
2947     + owner == BTRFS_DEV_TREE_OBJECTID ||
2948     + owner == BTRFS_FS_TREE_OBJECTID ||
2949     + owner == BTRFS_DATA_RELOC_TREE_OBJECTID) {
2950     + generic_err(root, leaf, 0,
2951     + "invalid root, root %llu must never be empty",
2952     + owner);
2953     + return -EUCLEAN;
2954     + }
2955     + key.objectid = owner;
2956     + key.type = BTRFS_ROOT_ITEM_KEY;
2957     + key.offset = (u64)-1;
2958     +
2959     + check_root = btrfs_get_fs_root(fs_info, &key, false);
2960     + /*
2961     + * The only reason we also check NULL here is that during
2962     + * open_ctree() some roots has not yet been set up.
2963     + */
2964     + if (!IS_ERR_OR_NULL(check_root)) {
2965     + struct extent_buffer *eb;
2966     +
2967     + eb = btrfs_root_node(check_root);
2968     + /* if leaf is the root, then it's fine */
2969     + if (leaf != eb) {
2970     + CORRUPT("non-root leaf's nritems is 0",
2971     + leaf, check_root, 0);
2972     + free_extent_buffer(eb);
2973     + return -EUCLEAN;
2974     + }
2975     + free_extent_buffer(eb);
2976     + }
2977     + return 0;
2978     + }
2979     +
2980     + if (nritems == 0)
2981     + return 0;
2982     +
2983     + /*
2984     + * Check the following things to make sure this is a good leaf, and
2985     + * leaf users won't need to bother with similar sanity checks:
2986     + *
2987     + * 1) key ordering
2988     + * 2) item offset and size
2989     + * No overlap, no hole, all inside the leaf.
2990     + * 3) item content
2991     + * If possible, do comprehensive sanity check.
2992     + * NOTE: All checks must only rely on the item data itself.
2993     + */
2994     + for (slot = 0; slot < nritems; slot++) {
2995     + u32 item_end_expected;
2996     + int ret;
2997     +
2998     + btrfs_item_key_to_cpu(leaf, &key, slot);
2999     +
3000     + /* Make sure the keys are in the right order */
3001     + if (btrfs_comp_cpu_keys(&prev_key, &key) >= 0) {
3002     + CORRUPT("bad key order", leaf, root, slot);
3003     + return -EUCLEAN;
3004     + }
3005     +
3006     + /*
3007     + * Make sure the offset and ends are right, remember that the
3008     + * item data starts at the end of the leaf and grows towards the
3009     + * front.
3010     + */
3011     + if (slot == 0)
3012     + item_end_expected = BTRFS_LEAF_DATA_SIZE(root);
3013     + else
3014     + item_end_expected = btrfs_item_offset_nr(leaf,
3015     + slot - 1);
3016     + if (btrfs_item_end_nr(leaf, slot) != item_end_expected) {
3017     + CORRUPT("slot offset bad", leaf, root, slot);
3018     + return -EUCLEAN;
3019     + }
3020     +
3021     + /*
3022     + * Check to make sure that we don't point outside of the leaf,
3023     + * just in case all the items are consistent to each other, but
3024     + * all point outside of the leaf.
3025     + */
3026     + if (btrfs_item_end_nr(leaf, slot) >
3027     + BTRFS_LEAF_DATA_SIZE(root)) {
3028     + CORRUPT("slot end outside of leaf", leaf, root, slot);
3029     + return -EUCLEAN;
3030     + }
3031     +
3032     + /* Also check if the item pointer overlaps with btrfs item. */
3033     + if (btrfs_item_nr_offset(slot) + sizeof(struct btrfs_item) >
3034     + btrfs_item_ptr_offset(leaf, slot)) {
3035     + CORRUPT("slot overlap with its data", leaf, root, slot);
3036     + return -EUCLEAN;
3037     + }
3038     +
3039     + if (check_item_data) {
3040     + /*
3041     + * Check if the item size and content meet other
3042     + * criteria
3043     + */
3044     + ret = check_leaf_item(root, leaf, &key, slot);
3045     + if (ret < 0)
3046     + return ret;
3047     + }
3048     +
3049     + prev_key.objectid = key.objectid;
3050     + prev_key.type = key.type;
3051     + prev_key.offset = key.offset;
3052     + }
3053     +
3054     + return 0;
3055     +}
3056     +
3057     +int btrfs_check_leaf_full(struct btrfs_root *root, struct extent_buffer *leaf)
3058     +{
3059     + return check_leaf(root, leaf, true);
3060     +}
3061     +
3062     +int btrfs_check_leaf_relaxed(struct btrfs_root *root,
3063     + struct extent_buffer *leaf)
3064     +{
3065     + return check_leaf(root, leaf, false);
3066     +}
3067     +
3068     +int btrfs_check_node(struct btrfs_root *root, struct extent_buffer *node)
3069     +{
3070     + unsigned long nr = btrfs_header_nritems(node);
3071     + struct btrfs_key key, next_key;
3072     + int slot;
3073     + int level = btrfs_header_level(node);
3074     + u64 bytenr;
3075     + int ret = 0;
3076     +
3077     + if (level <= 0 || level >= BTRFS_MAX_LEVEL) {
3078     + generic_err(root, node, 0,
3079     + "invalid level for node, have %d expect [1, %d]",
3080     + level, BTRFS_MAX_LEVEL - 1);
3081     + return -EUCLEAN;
3082     + }
3083     + if (nr == 0 || nr > BTRFS_NODEPTRS_PER_BLOCK(root)) {
3084     + btrfs_crit(root->fs_info,
3085     +"corrupt node: root=%llu block=%llu, nritems too %s, have %lu expect range [1,%u]",
3086     + root->objectid, node->start,
3087     + nr == 0 ? "small" : "large", nr,
3088     + BTRFS_NODEPTRS_PER_BLOCK(root));
3089     + return -EUCLEAN;
3090     + }
3091     +
3092     + for (slot = 0; slot < nr - 1; slot++) {
3093     + bytenr = btrfs_node_blockptr(node, slot);
3094     + btrfs_node_key_to_cpu(node, &key, slot);
3095     + btrfs_node_key_to_cpu(node, &next_key, slot + 1);
3096     +
3097     + if (!bytenr) {
3098     + generic_err(root, node, slot,
3099     + "invalid NULL node pointer");
3100     + ret = -EUCLEAN;
3101     + goto out;
3102     + }
3103     + if (!IS_ALIGNED(bytenr, root->sectorsize)) {
3104     + generic_err(root, node, slot,
3105     + "unaligned pointer, have %llu should be aligned to %u",
3106     + bytenr, root->sectorsize);
3107     + ret = -EUCLEAN;
3108     + goto out;
3109     + }
3110     +
3111     + if (btrfs_comp_cpu_keys(&key, &next_key) >= 0) {
3112     + generic_err(root, node, slot,
3113     + "bad key order, current (%llu %u %llu) next (%llu %u %llu)",
3114     + key.objectid, key.type, key.offset,
3115     + next_key.objectid, next_key.type,
3116     + next_key.offset);
3117     + ret = -EUCLEAN;
3118     + goto out;
3119     + }
3120     + }
3121     +out:
3122     + return ret;
3123     +}
3124     diff --git a/fs/btrfs/tree-checker.h b/fs/btrfs/tree-checker.h
3125     new file mode 100644
3126     index 000000000000..3d53e8d6fda0
3127     --- /dev/null
3128     +++ b/fs/btrfs/tree-checker.h
3129     @@ -0,0 +1,38 @@
3130     +/*
3131     + * Copyright (C) Qu Wenruo 2017. All rights reserved.
3132     + *
3133     + * This program is free software; you can redistribute it and/or
3134     + * modify it under the terms of the GNU General Public
3135     + * License v2 as published by the Free Software Foundation.
3136     + *
3137     + * This program is distributed in the hope that it will be useful,
3138     + * but WITHOUT ANY WARRANTY; without even the implied warranty of
3139     + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
3140     + * General Public License for more details.
3141     + *
3142     + * You should have received a copy of the GNU General Public
3143     + * License along with this program.
3144     + */
3145     +
3146     +#ifndef __BTRFS_TREE_CHECKER__
3147     +#define __BTRFS_TREE_CHECKER__
3148     +
3149     +#include "ctree.h"
3150     +#include "extent_io.h"
3151     +
3152     +/*
3153     + * Comprehensive leaf checker.
3154     + * Will check not only the item pointers, but also every possible member
3155     + * in item data.
3156     + */
3157     +int btrfs_check_leaf_full(struct btrfs_root *root, struct extent_buffer *leaf);
3158     +
3159     +/*
3160     + * Less strict leaf checker.
3161     + * Will only check item pointers, not reading item data.
3162     + */
3163     +int btrfs_check_leaf_relaxed(struct btrfs_root *root,
3164     + struct extent_buffer *leaf);
3165     +int btrfs_check_node(struct btrfs_root *root, struct extent_buffer *node);
3166     +
3167     +#endif
3168     diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c
3169     index 76017e1b3c0f..5aa2749eaf42 100644
3170     --- a/fs/btrfs/volumes.c
3171     +++ b/fs/btrfs/volumes.c
3172     @@ -4656,7 +4656,7 @@ static int __btrfs_alloc_chunk(struct btrfs_trans_handle *trans,
3173    
3174     if (type & BTRFS_BLOCK_GROUP_DATA) {
3175     max_stripe_size = SZ_1G;
3176     - max_chunk_size = 10 * max_stripe_size;
3177     + max_chunk_size = BTRFS_MAX_DATA_CHUNK_SIZE;
3178     if (!devs_max)
3179     devs_max = BTRFS_MAX_DEVS(info->chunk_root);
3180     } else if (type & BTRFS_BLOCK_GROUP_METADATA) {
3181     @@ -6370,6 +6370,8 @@ static int btrfs_check_chunk_valid(struct btrfs_root *root,
3182     u16 num_stripes;
3183     u16 sub_stripes;
3184     u64 type;
3185     + u64 features;
3186     + bool mixed = false;
3187    
3188     length = btrfs_chunk_length(leaf, chunk);
3189     stripe_len = btrfs_chunk_stripe_len(leaf, chunk);
3190     @@ -6410,6 +6412,32 @@ static int btrfs_check_chunk_valid(struct btrfs_root *root,
3191     btrfs_chunk_type(leaf, chunk));
3192     return -EIO;
3193     }
3194     +
3195     + if ((type & BTRFS_BLOCK_GROUP_TYPE_MASK) == 0) {
3196     + btrfs_err(root->fs_info, "missing chunk type flag: 0x%llx", type);
3197     + return -EIO;
3198     + }
3199     +
3200     + if ((type & BTRFS_BLOCK_GROUP_SYSTEM) &&
3201     + (type & (BTRFS_BLOCK_GROUP_METADATA | BTRFS_BLOCK_GROUP_DATA))) {
3202     + btrfs_err(root->fs_info,
3203     + "system chunk with data or metadata type: 0x%llx", type);
3204     + return -EIO;
3205     + }
3206     +
3207     + features = btrfs_super_incompat_flags(root->fs_info->super_copy);
3208     + if (features & BTRFS_FEATURE_INCOMPAT_MIXED_GROUPS)
3209     + mixed = true;
3210     +
3211     + if (!mixed) {
3212     + if ((type & BTRFS_BLOCK_GROUP_METADATA) &&
3213     + (type & BTRFS_BLOCK_GROUP_DATA)) {
3214     + btrfs_err(root->fs_info,
3215     + "mixed chunk type in non-mixed mode: 0x%llx", type);
3216     + return -EIO;
3217     + }
3218     + }
3219     +
3220     if ((type & BTRFS_BLOCK_GROUP_RAID10 && sub_stripes != 2) ||
3221     (type & BTRFS_BLOCK_GROUP_RAID1 && num_stripes < 1) ||
3222     (type & BTRFS_BLOCK_GROUP_RAID5 && num_stripes < 2) ||
3223     diff --git a/fs/btrfs/volumes.h b/fs/btrfs/volumes.h
3224     index 09ed29c67848..9c09aa29d6bd 100644
3225     --- a/fs/btrfs/volumes.h
3226     +++ b/fs/btrfs/volumes.h
3227     @@ -24,6 +24,8 @@
3228     #include <linux/btrfs.h>
3229     #include "async-thread.h"
3230    
3231     +#define BTRFS_MAX_DATA_CHUNK_SIZE (10ULL * SZ_1G)
3232     +
3233     extern struct mutex uuid_mutex;
3234    
3235     #define BTRFS_STRIPE_LEN SZ_64K
3236     diff --git a/fs/ceph/mds_client.c b/fs/ceph/mds_client.c
3237     index 3d2639c30018..6cbd0d805c9d 100644
3238     --- a/fs/ceph/mds_client.c
3239     +++ b/fs/ceph/mds_client.c
3240     @@ -3983,14 +3983,24 @@ static struct ceph_auth_handshake *get_authorizer(struct ceph_connection *con,
3241     return auth;
3242     }
3243    
3244     +static int add_authorizer_challenge(struct ceph_connection *con,
3245     + void *challenge_buf, int challenge_buf_len)
3246     +{
3247     + struct ceph_mds_session *s = con->private;
3248     + struct ceph_mds_client *mdsc = s->s_mdsc;
3249     + struct ceph_auth_client *ac = mdsc->fsc->client->monc.auth;
3250     +
3251     + return ceph_auth_add_authorizer_challenge(ac, s->s_auth.authorizer,
3252     + challenge_buf, challenge_buf_len);
3253     +}
3254    
3255     -static int verify_authorizer_reply(struct ceph_connection *con, int len)
3256     +static int verify_authorizer_reply(struct ceph_connection *con)
3257     {
3258     struct ceph_mds_session *s = con->private;
3259     struct ceph_mds_client *mdsc = s->s_mdsc;
3260     struct ceph_auth_client *ac = mdsc->fsc->client->monc.auth;
3261    
3262     - return ceph_auth_verify_authorizer_reply(ac, s->s_auth.authorizer, len);
3263     + return ceph_auth_verify_authorizer_reply(ac, s->s_auth.authorizer);
3264     }
3265    
3266     static int invalidate_authorizer(struct ceph_connection *con)
3267     @@ -4046,6 +4056,7 @@ static const struct ceph_connection_operations mds_con_ops = {
3268     .put = con_put,
3269     .dispatch = dispatch,
3270     .get_authorizer = get_authorizer,
3271     + .add_authorizer_challenge = add_authorizer_challenge,
3272     .verify_authorizer_reply = verify_authorizer_reply,
3273     .invalidate_authorizer = invalidate_authorizer,
3274     .peer_reset = peer_reset,
3275     diff --git a/fs/f2fs/checkpoint.c b/fs/f2fs/checkpoint.c
3276     index aee2a066a446..0b061bbf1639 100644
3277     --- a/fs/f2fs/checkpoint.c
3278     +++ b/fs/f2fs/checkpoint.c
3279     @@ -69,6 +69,7 @@ static struct page *__get_meta_page(struct f2fs_sb_info *sbi, pgoff_t index,
3280     .old_blkaddr = index,
3281     .new_blkaddr = index,
3282     .encrypted_page = NULL,
3283     + .is_meta = is_meta,
3284     };
3285    
3286     if (unlikely(!is_meta))
3287     @@ -85,8 +86,10 @@ repeat:
3288     fio.page = page;
3289    
3290     if (f2fs_submit_page_bio(&fio)) {
3291     - f2fs_put_page(page, 1);
3292     - goto repeat;
3293     + memset(page_address(page), 0, PAGE_SIZE);
3294     + f2fs_stop_checkpoint(sbi, false);
3295     + f2fs_bug_on(sbi, 1);
3296     + return page;
3297     }
3298    
3299     lock_page(page);
3300     @@ -117,7 +120,8 @@ struct page *get_tmp_page(struct f2fs_sb_info *sbi, pgoff_t index)
3301     return __get_meta_page(sbi, index, false);
3302     }
3303    
3304     -bool is_valid_blkaddr(struct f2fs_sb_info *sbi, block_t blkaddr, int type)
3305     +bool f2fs_is_valid_blkaddr(struct f2fs_sb_info *sbi,
3306     + block_t blkaddr, int type)
3307     {
3308     switch (type) {
3309     case META_NAT:
3310     @@ -137,8 +141,20 @@ bool is_valid_blkaddr(struct f2fs_sb_info *sbi, block_t blkaddr, int type)
3311     return false;
3312     break;
3313     case META_POR:
3314     + case DATA_GENERIC:
3315     if (unlikely(blkaddr >= MAX_BLKADDR(sbi) ||
3316     - blkaddr < MAIN_BLKADDR(sbi)))
3317     + blkaddr < MAIN_BLKADDR(sbi))) {
3318     + if (type == DATA_GENERIC) {
3319     + f2fs_msg(sbi->sb, KERN_WARNING,
3320     + "access invalid blkaddr:%u", blkaddr);
3321     + WARN_ON(1);
3322     + }
3323     + return false;
3324     + }
3325     + break;
3326     + case META_GENERIC:
3327     + if (unlikely(blkaddr < SEG0_BLKADDR(sbi) ||
3328     + blkaddr >= MAIN_BLKADDR(sbi)))
3329     return false;
3330     break;
3331     default:
3332     @@ -162,6 +178,7 @@ int ra_meta_pages(struct f2fs_sb_info *sbi, block_t start, int nrpages,
3333     .op = REQ_OP_READ,
3334     .op_flags = sync ? (READ_SYNC | REQ_META | REQ_PRIO) : REQ_RAHEAD,
3335     .encrypted_page = NULL,
3336     + .is_meta = (type != META_POR),
3337     };
3338     struct blk_plug plug;
3339    
3340     @@ -171,7 +188,7 @@ int ra_meta_pages(struct f2fs_sb_info *sbi, block_t start, int nrpages,
3341     blk_start_plug(&plug);
3342     for (; nrpages-- > 0; blkno++) {
3343    
3344     - if (!is_valid_blkaddr(sbi, blkno, type))
3345     + if (!f2fs_is_valid_blkaddr(sbi, blkno, type))
3346     goto out;
3347    
3348     switch (type) {
3349     @@ -706,6 +723,14 @@ static struct page *validate_checkpoint(struct f2fs_sb_info *sbi,
3350     &cp_page_1, version);
3351     if (err)
3352     return NULL;
3353     +
3354     + if (le32_to_cpu(cp_block->cp_pack_total_block_count) >
3355     + sbi->blocks_per_seg) {
3356     + f2fs_msg(sbi->sb, KERN_WARNING,
3357     + "invalid cp_pack_total_block_count:%u",
3358     + le32_to_cpu(cp_block->cp_pack_total_block_count));
3359     + goto invalid_cp;
3360     + }
3361     pre_version = *version;
3362    
3363     cp_addr += le32_to_cpu(cp_block->cp_pack_total_block_count) - 1;
3364     @@ -769,15 +794,15 @@ int get_valid_checkpoint(struct f2fs_sb_info *sbi)
3365     cp_block = (struct f2fs_checkpoint *)page_address(cur_page);
3366     memcpy(sbi->ckpt, cp_block, blk_size);
3367    
3368     - /* Sanity checking of checkpoint */
3369     - if (sanity_check_ckpt(sbi))
3370     - goto fail_no_cp;
3371     -
3372     if (cur_page == cp1)
3373     sbi->cur_cp_pack = 1;
3374     else
3375     sbi->cur_cp_pack = 2;
3376    
3377     + /* Sanity checking of checkpoint */
3378     + if (sanity_check_ckpt(sbi))
3379     + goto free_fail_no_cp;
3380     +
3381     if (cp_blks <= 1)
3382     goto done;
3383    
3384     @@ -799,6 +824,9 @@ done:
3385     f2fs_put_page(cp2, 1);
3386     return 0;
3387    
3388     +free_fail_no_cp:
3389     + f2fs_put_page(cp1, 1);
3390     + f2fs_put_page(cp2, 1);
3391     fail_no_cp:
3392     kfree(sbi->ckpt);
3393     return -EINVAL;
3394     diff --git a/fs/f2fs/data.c b/fs/f2fs/data.c
3395     index ae354ac67da1..9041805096e0 100644
3396     --- a/fs/f2fs/data.c
3397     +++ b/fs/f2fs/data.c
3398     @@ -240,6 +240,10 @@ int f2fs_submit_page_bio(struct f2fs_io_info *fio)
3399     struct page *page = fio->encrypted_page ?
3400     fio->encrypted_page : fio->page;
3401    
3402     + if (!f2fs_is_valid_blkaddr(fio->sbi, fio->new_blkaddr,
3403     + __is_meta_io(fio) ? META_GENERIC : DATA_GENERIC))
3404     + return -EFAULT;
3405     +
3406     trace_f2fs_submit_page_bio(page, fio);
3407     f2fs_trace_ios(fio, 0);
3408    
3409     @@ -266,9 +270,9 @@ void f2fs_submit_page_mbio(struct f2fs_io_info *fio)
3410    
3411     io = is_read ? &sbi->read_io : &sbi->write_io[btype];
3412    
3413     - if (fio->old_blkaddr != NEW_ADDR)
3414     - verify_block_addr(sbi, fio->old_blkaddr);
3415     - verify_block_addr(sbi, fio->new_blkaddr);
3416     + if (__is_valid_data_blkaddr(fio->old_blkaddr))
3417     + verify_block_addr(fio, fio->old_blkaddr);
3418     + verify_block_addr(fio, fio->new_blkaddr);
3419    
3420     down_write(&io->io_rwsem);
3421    
3422     @@ -722,7 +726,13 @@ next_dnode:
3423     next_block:
3424     blkaddr = datablock_addr(dn.node_page, dn.ofs_in_node);
3425    
3426     - if (blkaddr == NEW_ADDR || blkaddr == NULL_ADDR) {
3427     + if (__is_valid_data_blkaddr(blkaddr) &&
3428     + !f2fs_is_valid_blkaddr(sbi, blkaddr, DATA_GENERIC)) {
3429     + err = -EFAULT;
3430     + goto sync_out;
3431     + }
3432     +
3433     + if (!is_valid_data_blkaddr(sbi, blkaddr)) {
3434     if (create) {
3435     if (unlikely(f2fs_cp_error(sbi))) {
3436     err = -EIO;
3437     @@ -985,6 +995,9 @@ static struct bio *f2fs_grab_bio(struct inode *inode, block_t blkaddr,
3438     struct block_device *bdev = sbi->sb->s_bdev;
3439     struct bio *bio;
3440    
3441     + if (!f2fs_is_valid_blkaddr(sbi, blkaddr, DATA_GENERIC))
3442     + return ERR_PTR(-EFAULT);
3443     +
3444     if (f2fs_encrypted_inode(inode) && S_ISREG(inode->i_mode)) {
3445     ctx = fscrypt_get_ctx(inode, GFP_NOFS);
3446     if (IS_ERR(ctx))
3447     @@ -1084,6 +1097,10 @@ got_it:
3448     SetPageUptodate(page);
3449     goto confused;
3450     }
3451     +
3452     + if (!f2fs_is_valid_blkaddr(F2FS_I_SB(inode), block_nr,
3453     + DATA_GENERIC))
3454     + goto set_error_page;
3455     } else {
3456     zero_user_segment(page, 0, PAGE_SIZE);
3457     if (!PageUptodate(page))
3458     @@ -1212,11 +1229,17 @@ retry_encrypt:
3459    
3460     set_page_writeback(page);
3461    
3462     + if (__is_valid_data_blkaddr(fio->old_blkaddr) &&
3463     + !f2fs_is_valid_blkaddr(fio->sbi, fio->old_blkaddr,
3464     + DATA_GENERIC)) {
3465     + err = -EFAULT;
3466     + goto out_writepage;
3467     + }
3468     /*
3469     * If current allocation needs SSR,
3470     * it had better in-place writes for updated data.
3471     */
3472     - if (unlikely(fio->old_blkaddr != NEW_ADDR &&
3473     + if (unlikely(is_valid_data_blkaddr(fio->sbi, fio->old_blkaddr) &&
3474     !is_cold_data(page) &&
3475     !IS_ATOMIC_WRITTEN_PAGE(page) &&
3476     need_inplace_update(inode))) {
3477     diff --git a/fs/f2fs/f2fs.h b/fs/f2fs/f2fs.h
3478     index 88e111ab068b..9c380885b0fc 100644
3479     --- a/fs/f2fs/f2fs.h
3480     +++ b/fs/f2fs/f2fs.h
3481     @@ -145,7 +145,7 @@ struct cp_control {
3482     };
3483    
3484     /*
3485     - * For CP/NAT/SIT/SSA readahead
3486     + * indicate meta/data type
3487     */
3488     enum {
3489     META_CP,
3490     @@ -153,6 +153,8 @@ enum {
3491     META_SIT,
3492     META_SSA,
3493     META_POR,
3494     + DATA_GENERIC,
3495     + META_GENERIC,
3496     };
3497    
3498     /* for the list of ino */
3499     @@ -694,6 +696,7 @@ struct f2fs_io_info {
3500     block_t old_blkaddr; /* old block address before Cow */
3501     struct page *page; /* page to be written */
3502     struct page *encrypted_page; /* encrypted page */
3503     + bool is_meta; /* indicate borrow meta inode mapping or not */
3504     };
3505    
3506     #define is_read_io(rw) (rw == READ)
3507     @@ -1929,6 +1932,39 @@ static inline void *f2fs_kvzalloc(size_t size, gfp_t flags)
3508     (pgofs - ADDRS_PER_INODE(inode) + ADDRS_PER_BLOCK) / \
3509     ADDRS_PER_BLOCK * ADDRS_PER_BLOCK + ADDRS_PER_INODE(inode))
3510    
3511     +#define __is_meta_io(fio) (PAGE_TYPE_OF_BIO(fio->type) == META && \
3512     + (!is_read_io(fio->op) || fio->is_meta))
3513     +
3514     +bool f2fs_is_valid_blkaddr(struct f2fs_sb_info *sbi,
3515     + block_t blkaddr, int type);
3516     +void f2fs_msg(struct super_block *sb, const char *level, const char *fmt, ...);
3517     +static inline void verify_blkaddr(struct f2fs_sb_info *sbi,
3518     + block_t blkaddr, int type)
3519     +{
3520     + if (!f2fs_is_valid_blkaddr(sbi, blkaddr, type)) {
3521     + f2fs_msg(sbi->sb, KERN_ERR,
3522     + "invalid blkaddr: %u, type: %d, run fsck to fix.",
3523     + blkaddr, type);
3524     + f2fs_bug_on(sbi, 1);
3525     + }
3526     +}
3527     +
3528     +static inline bool __is_valid_data_blkaddr(block_t blkaddr)
3529     +{
3530     + if (blkaddr == NEW_ADDR || blkaddr == NULL_ADDR)
3531     + return false;
3532     + return true;
3533     +}
3534     +
3535     +static inline bool is_valid_data_blkaddr(struct f2fs_sb_info *sbi,
3536     + block_t blkaddr)
3537     +{
3538     + if (!__is_valid_data_blkaddr(blkaddr))
3539     + return false;
3540     + verify_blkaddr(sbi, blkaddr, DATA_GENERIC);
3541     + return true;
3542     +}
3543     +
3544     /*
3545     * file.c
3546     */
3547     @@ -2114,7 +2150,8 @@ void f2fs_stop_checkpoint(struct f2fs_sb_info *, bool);
3548     struct page *grab_meta_page(struct f2fs_sb_info *, pgoff_t);
3549     struct page *get_meta_page(struct f2fs_sb_info *, pgoff_t);
3550     struct page *get_tmp_page(struct f2fs_sb_info *, pgoff_t);
3551     -bool is_valid_blkaddr(struct f2fs_sb_info *, block_t, int);
3552     +bool f2fs_is_valid_blkaddr(struct f2fs_sb_info *sbi,
3553     + block_t blkaddr, int type);
3554     int ra_meta_pages(struct f2fs_sb_info *, block_t, int, int, bool);
3555     void ra_meta_pages_cond(struct f2fs_sb_info *, pgoff_t);
3556     long sync_meta_pages(struct f2fs_sb_info *, enum page_type, long);
3557     diff --git a/fs/f2fs/file.c b/fs/f2fs/file.c
3558     index 594e6e20d6dd..b768f495603e 100644
3559     --- a/fs/f2fs/file.c
3560     +++ b/fs/f2fs/file.c
3561     @@ -310,13 +310,13 @@ static pgoff_t __get_first_dirty_index(struct address_space *mapping,
3562     return pgofs;
3563     }
3564    
3565     -static bool __found_offset(block_t blkaddr, pgoff_t dirty, pgoff_t pgofs,
3566     - int whence)
3567     +static bool __found_offset(struct f2fs_sb_info *sbi, block_t blkaddr,
3568     + pgoff_t dirty, pgoff_t pgofs, int whence)
3569     {
3570     switch (whence) {
3571     case SEEK_DATA:
3572     if ((blkaddr == NEW_ADDR && dirty == pgofs) ||
3573     - (blkaddr != NEW_ADDR && blkaddr != NULL_ADDR))
3574     + is_valid_data_blkaddr(sbi, blkaddr))
3575     return true;
3576     break;
3577     case SEEK_HOLE:
3578     @@ -378,7 +378,15 @@ static loff_t f2fs_seek_block(struct file *file, loff_t offset, int whence)
3579     block_t blkaddr;
3580     blkaddr = datablock_addr(dn.node_page, dn.ofs_in_node);
3581    
3582     - if (__found_offset(blkaddr, dirty, pgofs, whence)) {
3583     + if (__is_valid_data_blkaddr(blkaddr) &&
3584     + !f2fs_is_valid_blkaddr(F2FS_I_SB(inode),
3585     + blkaddr, DATA_GENERIC)) {
3586     + f2fs_put_dnode(&dn);
3587     + goto fail;
3588     + }
3589     +
3590     + if (__found_offset(F2FS_I_SB(inode), blkaddr, dirty,
3591     + pgofs, whence)) {
3592     f2fs_put_dnode(&dn);
3593     goto found;
3594     }
3595     @@ -481,6 +489,11 @@ int truncate_data_blocks_range(struct dnode_of_data *dn, int count)
3596    
3597     dn->data_blkaddr = NULL_ADDR;
3598     set_data_blkaddr(dn);
3599     +
3600     + if (__is_valid_data_blkaddr(blkaddr) &&
3601     + !f2fs_is_valid_blkaddr(sbi, blkaddr, DATA_GENERIC))
3602     + continue;
3603     +
3604     invalidate_blocks(sbi, blkaddr);
3605     if (dn->ofs_in_node == 0 && IS_INODE(dn->node_page))
3606     clear_inode_flag(dn->inode, FI_FIRST_BLOCK_WRITTEN);
3607     diff --git a/fs/f2fs/inode.c b/fs/f2fs/inode.c
3608     index d7369895a78a..1de02c31756b 100644
3609     --- a/fs/f2fs/inode.c
3610     +++ b/fs/f2fs/inode.c
3611     @@ -59,13 +59,16 @@ static void __get_inode_rdev(struct inode *inode, struct f2fs_inode *ri)
3612     }
3613     }
3614    
3615     -static bool __written_first_block(struct f2fs_inode *ri)
3616     +static int __written_first_block(struct f2fs_sb_info *sbi,
3617     + struct f2fs_inode *ri)
3618     {
3619     block_t addr = le32_to_cpu(ri->i_addr[0]);
3620    
3621     - if (addr != NEW_ADDR && addr != NULL_ADDR)
3622     - return true;
3623     - return false;
3624     + if (!__is_valid_data_blkaddr(addr))
3625     + return 1;
3626     + if (!f2fs_is_valid_blkaddr(sbi, addr, DATA_GENERIC))
3627     + return -EFAULT;
3628     + return 0;
3629     }
3630    
3631     static void __set_inode_rdev(struct inode *inode, struct f2fs_inode *ri)
3632     @@ -103,12 +106,57 @@ static void __recover_inline_status(struct inode *inode, struct page *ipage)
3633     return;
3634     }
3635    
3636     +static bool sanity_check_inode(struct inode *inode, struct page *node_page)
3637     +{
3638     + struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
3639     + unsigned long long iblocks;
3640     +
3641     + iblocks = le64_to_cpu(F2FS_INODE(node_page)->i_blocks);
3642     + if (!iblocks) {
3643     + set_sbi_flag(sbi, SBI_NEED_FSCK);
3644     + f2fs_msg(sbi->sb, KERN_WARNING,
3645     + "%s: corrupted inode i_blocks i_ino=%lx iblocks=%llu, "
3646     + "run fsck to fix.",
3647     + __func__, inode->i_ino, iblocks);
3648     + return false;
3649     + }
3650     +
3651     + if (ino_of_node(node_page) != nid_of_node(node_page)) {
3652     + set_sbi_flag(sbi, SBI_NEED_FSCK);
3653     + f2fs_msg(sbi->sb, KERN_WARNING,
3654     + "%s: corrupted inode footer i_ino=%lx, ino,nid: "
3655     + "[%u, %u] run fsck to fix.",
3656     + __func__, inode->i_ino,
3657     + ino_of_node(node_page), nid_of_node(node_page));
3658     + return false;
3659     + }
3660     +
3661     + if (F2FS_I(inode)->extent_tree) {
3662     + struct extent_info *ei = &F2FS_I(inode)->extent_tree->largest;
3663     +
3664     + if (ei->len &&
3665     + (!f2fs_is_valid_blkaddr(sbi, ei->blk, DATA_GENERIC) ||
3666     + !f2fs_is_valid_blkaddr(sbi, ei->blk + ei->len - 1,
3667     + DATA_GENERIC))) {
3668     + set_sbi_flag(sbi, SBI_NEED_FSCK);
3669     + f2fs_msg(sbi->sb, KERN_WARNING,
3670     + "%s: inode (ino=%lx) extent info [%u, %u, %u] "
3671     + "is incorrect, run fsck to fix",
3672     + __func__, inode->i_ino,
3673     + ei->blk, ei->fofs, ei->len);
3674     + return false;
3675     + }
3676     + }
3677     + return true;
3678     +}
3679     +
3680     static int do_read_inode(struct inode *inode)
3681     {
3682     struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
3683     struct f2fs_inode_info *fi = F2FS_I(inode);
3684     struct page *node_page;
3685     struct f2fs_inode *ri;
3686     + int err;
3687    
3688     /* Check if ino is within scope */
3689     if (check_nid_range(sbi, inode->i_ino)) {
3690     @@ -152,6 +200,11 @@ static int do_read_inode(struct inode *inode)
3691    
3692     get_inline_info(inode, ri);
3693    
3694     + if (!sanity_check_inode(inode, node_page)) {
3695     + f2fs_put_page(node_page, 1);
3696     + return -EINVAL;
3697     + }
3698     +
3699     /* check data exist */
3700     if (f2fs_has_inline_data(inode) && !f2fs_exist_data(inode))
3701     __recover_inline_status(inode, node_page);
3702     @@ -159,7 +212,12 @@ static int do_read_inode(struct inode *inode)
3703     /* get rdev by using inline_info */
3704     __get_inode_rdev(inode, ri);
3705    
3706     - if (__written_first_block(ri))
3707     + err = __written_first_block(sbi, ri);
3708     + if (err < 0) {
3709     + f2fs_put_page(node_page, 1);
3710     + return err;
3711     + }
3712     + if (!err)
3713     set_inode_flag(inode, FI_FIRST_BLOCK_WRITTEN);
3714    
3715     if (!need_inode_block_update(sbi, inode->i_ino))
3716     diff --git a/fs/f2fs/node.c b/fs/f2fs/node.c
3717     index addff6a3b176..f4fe54047fb7 100644
3718     --- a/fs/f2fs/node.c
3719     +++ b/fs/f2fs/node.c
3720     @@ -304,8 +304,7 @@ static void set_node_addr(struct f2fs_sb_info *sbi, struct node_info *ni,
3721     new_blkaddr == NULL_ADDR);
3722     f2fs_bug_on(sbi, nat_get_blkaddr(e) == NEW_ADDR &&
3723     new_blkaddr == NEW_ADDR);
3724     - f2fs_bug_on(sbi, nat_get_blkaddr(e) != NEW_ADDR &&
3725     - nat_get_blkaddr(e) != NULL_ADDR &&
3726     + f2fs_bug_on(sbi, is_valid_data_blkaddr(sbi, nat_get_blkaddr(e)) &&
3727     new_blkaddr == NEW_ADDR);
3728    
3729     /* increment version no as node is removed */
3730     @@ -320,7 +319,7 @@ static void set_node_addr(struct f2fs_sb_info *sbi, struct node_info *ni,
3731    
3732     /* change address */
3733     nat_set_blkaddr(e, new_blkaddr);
3734     - if (new_blkaddr == NEW_ADDR || new_blkaddr == NULL_ADDR)
3735     + if (!is_valid_data_blkaddr(sbi, new_blkaddr))
3736     set_nat_flag(e, IS_CHECKPOINTED, false);
3737     __set_nat_cache_dirty(nm_i, e);
3738    
3739     @@ -1606,6 +1605,12 @@ static int f2fs_write_node_page(struct page *page,
3740     return 0;
3741     }
3742    
3743     + if (__is_valid_data_blkaddr(ni.blk_addr) &&
3744     + !f2fs_is_valid_blkaddr(sbi, ni.blk_addr, DATA_GENERIC)) {
3745     + up_read(&sbi->node_write);
3746     + goto redirty_out;
3747     + }
3748     +
3749     set_page_writeback(page);
3750     fio.old_blkaddr = ni.blk_addr;
3751     write_node_page(nid, &fio);
3752     @@ -1704,8 +1709,9 @@ static void __del_from_free_nid_list(struct f2fs_nm_info *nm_i,
3753     static int add_free_nid(struct f2fs_sb_info *sbi, nid_t nid, bool build)
3754     {
3755     struct f2fs_nm_info *nm_i = NM_I(sbi);
3756     - struct free_nid *i;
3757     + struct free_nid *i, *e;
3758     struct nat_entry *ne;
3759     + int err = -EINVAL;
3760    
3761     if (!available_free_memory(sbi, FREE_NIDS))
3762     return -1;
3763     @@ -1714,35 +1720,58 @@ static int add_free_nid(struct f2fs_sb_info *sbi, nid_t nid, bool build)
3764     if (unlikely(nid == 0))
3765     return 0;
3766    
3767     - if (build) {
3768     - /* do not add allocated nids */
3769     - ne = __lookup_nat_cache(nm_i, nid);
3770     - if (ne && (!get_nat_flag(ne, IS_CHECKPOINTED) ||
3771     - nat_get_blkaddr(ne) != NULL_ADDR))
3772     - return 0;
3773     - }
3774     -
3775     i = f2fs_kmem_cache_alloc(free_nid_slab, GFP_NOFS);
3776     i->nid = nid;
3777     i->state = NID_NEW;
3778    
3779     - if (radix_tree_preload(GFP_NOFS)) {
3780     - kmem_cache_free(free_nid_slab, i);
3781     - return 0;
3782     - }
3783     + if (radix_tree_preload(GFP_NOFS))
3784     + goto err;
3785    
3786     spin_lock(&nm_i->free_nid_list_lock);
3787     - if (radix_tree_insert(&nm_i->free_nid_root, i->nid, i)) {
3788     - spin_unlock(&nm_i->free_nid_list_lock);
3789     - radix_tree_preload_end();
3790     - kmem_cache_free(free_nid_slab, i);
3791     - return 0;
3792     +
3793     + if (build) {
3794     + /*
3795     + * Thread A Thread B
3796     + * - f2fs_create
3797     + * - f2fs_new_inode
3798     + * - alloc_nid
3799     + * - __insert_nid_to_list(ALLOC_NID_LIST)
3800     + * - f2fs_balance_fs_bg
3801     + * - build_free_nids
3802     + * - __build_free_nids
3803     + * - scan_nat_page
3804     + * - add_free_nid
3805     + * - __lookup_nat_cache
3806     + * - f2fs_add_link
3807     + * - init_inode_metadata
3808     + * - new_inode_page
3809     + * - new_node_page
3810     + * - set_node_addr
3811     + * - alloc_nid_done
3812     + * - __remove_nid_from_list(ALLOC_NID_LIST)
3813     + * - __insert_nid_to_list(FREE_NID_LIST)
3814     + */
3815     + ne = __lookup_nat_cache(nm_i, nid);
3816     + if (ne && (!get_nat_flag(ne, IS_CHECKPOINTED) ||
3817     + nat_get_blkaddr(ne) != NULL_ADDR))
3818     + goto err_out;
3819     +
3820     + e = __lookup_free_nid_list(nm_i, nid);
3821     + if (e)
3822     + goto err_out;
3823     }
3824     + if (radix_tree_insert(&nm_i->free_nid_root, i->nid, i))
3825     + goto err_out;
3826     + err = 0;
3827     list_add_tail(&i->list, &nm_i->free_nid_list);
3828     nm_i->fcnt++;
3829     +err_out:
3830     spin_unlock(&nm_i->free_nid_list_lock);
3831     radix_tree_preload_end();
3832     - return 1;
3833     +err:
3834     + if (err)
3835     + kmem_cache_free(free_nid_slab, i);
3836     + return !err;
3837     }
3838    
3839     static void remove_free_nid(struct f2fs_nm_info *nm_i, nid_t nid)
3840     diff --git a/fs/f2fs/recovery.c b/fs/f2fs/recovery.c
3841     index 98c1a63a4614..ab4cbb4be423 100644
3842     --- a/fs/f2fs/recovery.c
3843     +++ b/fs/f2fs/recovery.c
3844     @@ -236,7 +236,7 @@ static int find_fsync_dnodes(struct f2fs_sb_info *sbi, struct list_head *head)
3845     while (1) {
3846     struct fsync_inode_entry *entry;
3847    
3848     - if (!is_valid_blkaddr(sbi, blkaddr, META_POR))
3849     + if (!f2fs_is_valid_blkaddr(sbi, blkaddr, META_POR))
3850     return 0;
3851    
3852     page = get_tmp_page(sbi, blkaddr);
3853     @@ -468,7 +468,7 @@ retry_dn:
3854     }
3855    
3856     /* dest is valid block, try to recover from src to dest */
3857     - if (is_valid_blkaddr(sbi, dest, META_POR)) {
3858     + if (f2fs_is_valid_blkaddr(sbi, dest, META_POR)) {
3859    
3860     if (src == NULL_ADDR) {
3861     err = reserve_new_block(&dn);
3862     @@ -527,7 +527,7 @@ static int recover_data(struct f2fs_sb_info *sbi, struct list_head *inode_list,
3863     while (1) {
3864     struct fsync_inode_entry *entry;
3865    
3866     - if (!is_valid_blkaddr(sbi, blkaddr, META_POR))
3867     + if (!f2fs_is_valid_blkaddr(sbi, blkaddr, META_POR))
3868     break;
3869    
3870     ra_meta_pages_cond(sbi, blkaddr);
3871     diff --git a/fs/f2fs/segment.c b/fs/f2fs/segment.c
3872     index 35d48ef0573c..2fb99a081de8 100644
3873     --- a/fs/f2fs/segment.c
3874     +++ b/fs/f2fs/segment.c
3875     @@ -493,6 +493,9 @@ int create_flush_cmd_control(struct f2fs_sb_info *sbi)
3876     init_waitqueue_head(&fcc->flush_wait_queue);
3877     init_llist_head(&fcc->issue_list);
3878     SM_I(sbi)->cmd_control_info = fcc;
3879     + if (!test_opt(sbi, FLUSH_MERGE))
3880     + return err;
3881     +
3882     fcc->f2fs_issue_flush = kthread_run(issue_flush_thread, sbi,
3883     "f2fs_flush-%u:%u", MAJOR(dev), MINOR(dev));
3884     if (IS_ERR(fcc->f2fs_issue_flush)) {
3885     @@ -941,7 +944,7 @@ bool is_checkpointed_data(struct f2fs_sb_info *sbi, block_t blkaddr)
3886     struct seg_entry *se;
3887     bool is_cp = false;
3888    
3889     - if (blkaddr == NEW_ADDR || blkaddr == NULL_ADDR)
3890     + if (!is_valid_data_blkaddr(sbi, blkaddr))
3891     return true;
3892    
3893     mutex_lock(&sit_i->sentry_lock);
3894     @@ -1665,7 +1668,7 @@ void f2fs_wait_on_encrypted_page_writeback(struct f2fs_sb_info *sbi,
3895     {
3896     struct page *cpage;
3897    
3898     - if (blkaddr == NEW_ADDR || blkaddr == NULL_ADDR)
3899     + if (!is_valid_data_blkaddr(sbi, blkaddr))
3900     return;
3901    
3902     cpage = find_lock_page(META_MAPPING(sbi), blkaddr);
3903     @@ -2319,7 +2322,7 @@ static int build_curseg(struct f2fs_sb_info *sbi)
3904     return restore_curseg_summaries(sbi);
3905     }
3906    
3907     -static void build_sit_entries(struct f2fs_sb_info *sbi)
3908     +static int build_sit_entries(struct f2fs_sb_info *sbi)
3909     {
3910     struct sit_info *sit_i = SIT_I(sbi);
3911     struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_COLD_DATA);
3912     @@ -2330,6 +2333,7 @@ static void build_sit_entries(struct f2fs_sb_info *sbi)
3913     unsigned int i, start, end;
3914     unsigned int readed, start_blk = 0;
3915     int nrpages = MAX_BIO_BLOCKS(sbi) * 8;
3916     + int err = 0;
3917    
3918     do {
3919     readed = ra_meta_pages(sbi, start_blk, nrpages, META_SIT, true);
3920     @@ -2347,7 +2351,9 @@ static void build_sit_entries(struct f2fs_sb_info *sbi)
3921     sit = sit_blk->entries[SIT_ENTRY_OFFSET(sit_i, start)];
3922     f2fs_put_page(page, 1);
3923    
3924     - check_block_count(sbi, start, &sit);
3925     + err = check_block_count(sbi, start, &sit);
3926     + if (err)
3927     + return err;
3928     seg_info_from_raw_sit(se, &sit);
3929    
3930     /* build discard map only one time */
3931     @@ -2370,12 +2376,23 @@ static void build_sit_entries(struct f2fs_sb_info *sbi)
3932     unsigned int old_valid_blocks;
3933    
3934     start = le32_to_cpu(segno_in_journal(journal, i));
3935     + if (start >= MAIN_SEGS(sbi)) {
3936     + f2fs_msg(sbi->sb, KERN_ERR,
3937     + "Wrong journal entry on segno %u",
3938     + start);
3939     + set_sbi_flag(sbi, SBI_NEED_FSCK);
3940     + err = -EINVAL;
3941     + break;
3942     + }
3943     +
3944     se = &sit_i->sentries[start];
3945     sit = sit_in_journal(journal, i);
3946    
3947     old_valid_blocks = se->valid_blocks;
3948    
3949     - check_block_count(sbi, start, &sit);
3950     + err = check_block_count(sbi, start, &sit);
3951     + if (err)
3952     + break;
3953     seg_info_from_raw_sit(se, &sit);
3954    
3955     if (f2fs_discard_en(sbi)) {
3956     @@ -2390,6 +2407,7 @@ static void build_sit_entries(struct f2fs_sb_info *sbi)
3957     se->valid_blocks - old_valid_blocks;
3958     }
3959     up_read(&curseg->journal_rwsem);
3960     + return err;
3961     }
3962    
3963     static void init_free_segmap(struct f2fs_sb_info *sbi)
3964     @@ -2539,7 +2557,7 @@ int build_segment_manager(struct f2fs_sb_info *sbi)
3965    
3966     INIT_LIST_HEAD(&sm_info->sit_entry_set);
3967    
3968     - if (test_opt(sbi, FLUSH_MERGE) && !f2fs_readonly(sbi->sb)) {
3969     + if (!f2fs_readonly(sbi->sb)) {
3970     err = create_flush_cmd_control(sbi);
3971     if (err)
3972     return err;
3973     @@ -2556,7 +2574,9 @@ int build_segment_manager(struct f2fs_sb_info *sbi)
3974     return err;
3975    
3976     /* reinit free segmap based on SIT */
3977     - build_sit_entries(sbi);
3978     + err = build_sit_entries(sbi);
3979     + if (err)
3980     + return err;
3981    
3982     init_free_segmap(sbi);
3983     err = build_dirty_segmap(sbi);
3984     diff --git a/fs/f2fs/segment.h b/fs/f2fs/segment.h
3985     index 3d9b9e98c4c2..893723978f5e 100644
3986     --- a/fs/f2fs/segment.h
3987     +++ b/fs/f2fs/segment.h
3988     @@ -18,6 +18,8 @@
3989     #define DEF_RECLAIM_PREFREE_SEGMENTS 5 /* 5% over total segments */
3990     #define DEF_MAX_RECLAIM_PREFREE_SEGMENTS 4096 /* 8GB in maximum */
3991    
3992     +#define F2FS_MIN_SEGMENTS 9 /* SB + 2 (CP + SIT + NAT) + SSA + MAIN */
3993     +
3994     /* L: Logical segment # in volume, R: Relative segment # in main area */
3995     #define GET_L2R_SEGNO(free_i, segno) (segno - free_i->start_segno)
3996     #define GET_R2L_SEGNO(free_i, segno) (segno + free_i->start_segno)
3997     @@ -47,13 +49,19 @@
3998     (secno == CURSEG_I(sbi, CURSEG_COLD_NODE)->segno / \
3999     sbi->segs_per_sec)) \
4000    
4001     -#define MAIN_BLKADDR(sbi) (SM_I(sbi)->main_blkaddr)
4002     -#define SEG0_BLKADDR(sbi) (SM_I(sbi)->seg0_blkaddr)
4003     +#define MAIN_BLKADDR(sbi) \
4004     + (SM_I(sbi) ? SM_I(sbi)->main_blkaddr : \
4005     + le32_to_cpu(F2FS_RAW_SUPER(sbi)->main_blkaddr))
4006     +#define SEG0_BLKADDR(sbi) \
4007     + (SM_I(sbi) ? SM_I(sbi)->seg0_blkaddr : \
4008     + le32_to_cpu(F2FS_RAW_SUPER(sbi)->segment0_blkaddr))
4009    
4010     #define MAIN_SEGS(sbi) (SM_I(sbi)->main_segments)
4011     #define MAIN_SECS(sbi) (sbi->total_sections)
4012    
4013     -#define TOTAL_SEGS(sbi) (SM_I(sbi)->segment_count)
4014     +#define TOTAL_SEGS(sbi) \
4015     + (SM_I(sbi) ? SM_I(sbi)->segment_count : \
4016     + le32_to_cpu(F2FS_RAW_SUPER(sbi)->segment_count))
4017     #define TOTAL_BLKS(sbi) (TOTAL_SEGS(sbi) << sbi->log_blocks_per_seg)
4018    
4019     #define MAX_BLKADDR(sbi) (SEG0_BLKADDR(sbi) + TOTAL_BLKS(sbi))
4020     @@ -73,7 +81,7 @@
4021     (GET_SEGOFF_FROM_SEG0(sbi, blk_addr) & (sbi->blocks_per_seg - 1))
4022    
4023     #define GET_SEGNO(sbi, blk_addr) \
4024     - (((blk_addr == NULL_ADDR) || (blk_addr == NEW_ADDR)) ? \
4025     + ((!is_valid_data_blkaddr(sbi, blk_addr)) ? \
4026     NULL_SEGNO : GET_L2R_SEGNO(FREE_I(sbi), \
4027     GET_SEGNO_FROM_SEG0(sbi, blk_addr)))
4028     #define GET_SECNO(sbi, segno) \
4029     @@ -589,16 +597,20 @@ static inline void check_seg_range(struct f2fs_sb_info *sbi, unsigned int segno)
4030     f2fs_bug_on(sbi, segno > TOTAL_SEGS(sbi) - 1);
4031     }
4032    
4033     -static inline void verify_block_addr(struct f2fs_sb_info *sbi, block_t blk_addr)
4034     +static inline void verify_block_addr(struct f2fs_io_info *fio, block_t blk_addr)
4035     {
4036     - BUG_ON(blk_addr < SEG0_BLKADDR(sbi)
4037     - || blk_addr >= MAX_BLKADDR(sbi));
4038     + struct f2fs_sb_info *sbi = fio->sbi;
4039     +
4040     + if (__is_meta_io(fio))
4041     + verify_blkaddr(sbi, blk_addr, META_GENERIC);
4042     + else
4043     + verify_blkaddr(sbi, blk_addr, DATA_GENERIC);
4044     }
4045    
4046     /*
4047     * Summary block is always treated as an invalid block
4048     */
4049     -static inline void check_block_count(struct f2fs_sb_info *sbi,
4050     +static inline int check_block_count(struct f2fs_sb_info *sbi,
4051     int segno, struct f2fs_sit_entry *raw_sit)
4052     {
4053     #ifdef CONFIG_F2FS_CHECK_FS
4054     @@ -620,11 +632,25 @@ static inline void check_block_count(struct f2fs_sb_info *sbi,
4055     cur_pos = next_pos;
4056     is_valid = !is_valid;
4057     } while (cur_pos < sbi->blocks_per_seg);
4058     - BUG_ON(GET_SIT_VBLOCKS(raw_sit) != valid_blocks);
4059     +
4060     + if (unlikely(GET_SIT_VBLOCKS(raw_sit) != valid_blocks)) {
4061     + f2fs_msg(sbi->sb, KERN_ERR,
4062     + "Mismatch valid blocks %d vs. %d",
4063     + GET_SIT_VBLOCKS(raw_sit), valid_blocks);
4064     + set_sbi_flag(sbi, SBI_NEED_FSCK);
4065     + return -EINVAL;
4066     + }
4067     #endif
4068     /* check segment usage, and check boundary of a given segment number */
4069     - f2fs_bug_on(sbi, GET_SIT_VBLOCKS(raw_sit) > sbi->blocks_per_seg
4070     - || segno > TOTAL_SEGS(sbi) - 1);
4071     + if (unlikely(GET_SIT_VBLOCKS(raw_sit) > sbi->blocks_per_seg
4072     + || segno > TOTAL_SEGS(sbi) - 1)) {
4073     + f2fs_msg(sbi->sb, KERN_ERR,
4074     + "Wrong valid blocks %d or segno %u",
4075     + GET_SIT_VBLOCKS(raw_sit), segno);
4076     + set_sbi_flag(sbi, SBI_NEED_FSCK);
4077     + return -EINVAL;
4078     + }
4079     + return 0;
4080     }
4081    
4082     static inline pgoff_t current_sit_addr(struct f2fs_sb_info *sbi,
4083     diff --git a/fs/f2fs/super.c b/fs/f2fs/super.c
4084     index 91bf72334722..c8f408d8a582 100644
4085     --- a/fs/f2fs/super.c
4086     +++ b/fs/f2fs/super.c
4087     @@ -1337,6 +1337,8 @@ static inline bool sanity_check_area_boundary(struct f2fs_sb_info *sbi,
4088     static int sanity_check_raw_super(struct f2fs_sb_info *sbi,
4089     struct buffer_head *bh)
4090     {
4091     + block_t segment_count, segs_per_sec, secs_per_zone;
4092     + block_t total_sections, blocks_per_seg;
4093     struct f2fs_super_block *raw_super = (struct f2fs_super_block *)
4094     (bh->b_data + F2FS_SUPER_OFFSET);
4095     struct super_block *sb = sbi->sb;
4096     @@ -1393,6 +1395,68 @@ static int sanity_check_raw_super(struct f2fs_sb_info *sbi,
4097     return 1;
4098     }
4099    
4100     + segment_count = le32_to_cpu(raw_super->segment_count);
4101     + segs_per_sec = le32_to_cpu(raw_super->segs_per_sec);
4102     + secs_per_zone = le32_to_cpu(raw_super->secs_per_zone);
4103     + total_sections = le32_to_cpu(raw_super->section_count);
4104     +
4105     + /* blocks_per_seg should be 512, given the above check */
4106     + blocks_per_seg = 1 << le32_to_cpu(raw_super->log_blocks_per_seg);
4107     +
4108     + if (segment_count > F2FS_MAX_SEGMENT ||
4109     + segment_count < F2FS_MIN_SEGMENTS) {
4110     + f2fs_msg(sb, KERN_INFO,
4111     + "Invalid segment count (%u)",
4112     + segment_count);
4113     + return 1;
4114     + }
4115     +
4116     + if (total_sections > segment_count ||
4117     + total_sections < F2FS_MIN_SEGMENTS ||
4118     + segs_per_sec > segment_count || !segs_per_sec) {
4119     + f2fs_msg(sb, KERN_INFO,
4120     + "Invalid segment/section count (%u, %u x %u)",
4121     + segment_count, total_sections, segs_per_sec);
4122     + return 1;
4123     + }
4124     +
4125     + if ((segment_count / segs_per_sec) < total_sections) {
4126     + f2fs_msg(sb, KERN_INFO,
4127     + "Small segment_count (%u < %u * %u)",
4128     + segment_count, segs_per_sec, total_sections);
4129     + return 1;
4130     + }
4131     +
4132     + if (segment_count > (le32_to_cpu(raw_super->block_count) >> 9)) {
4133     + f2fs_msg(sb, KERN_INFO,
4134     + "Wrong segment_count / block_count (%u > %u)",
4135     + segment_count, le32_to_cpu(raw_super->block_count));
4136     + return 1;
4137     + }
4138     +
4139     + if (secs_per_zone > total_sections || !secs_per_zone) {
4140     + f2fs_msg(sb, KERN_INFO,
4141     + "Wrong secs_per_zone / total_sections (%u, %u)",
4142     + secs_per_zone, total_sections);
4143     + return 1;
4144     + }
4145     + if (le32_to_cpu(raw_super->extension_count) > F2FS_MAX_EXTENSION) {
4146     + f2fs_msg(sb, KERN_INFO,
4147     + "Corrupted extension count (%u > %u)",
4148     + le32_to_cpu(raw_super->extension_count),
4149     + F2FS_MAX_EXTENSION);
4150     + return 1;
4151     + }
4152     +
4153     + if (le32_to_cpu(raw_super->cp_payload) >
4154     + (blocks_per_seg - F2FS_CP_PACKS)) {
4155     + f2fs_msg(sb, KERN_INFO,
4156     + "Insane cp_payload (%u > %u)",
4157     + le32_to_cpu(raw_super->cp_payload),
4158     + blocks_per_seg - F2FS_CP_PACKS);
4159     + return 1;
4160     + }
4161     +
4162     /* check reserved ino info */
4163     if (le32_to_cpu(raw_super->node_ino) != 1 ||
4164     le32_to_cpu(raw_super->meta_ino) != 2 ||
4165     @@ -1405,13 +1469,6 @@ static int sanity_check_raw_super(struct f2fs_sb_info *sbi,
4166     return 1;
4167     }
4168    
4169     - if (le32_to_cpu(raw_super->segment_count) > F2FS_MAX_SEGMENT) {
4170     - f2fs_msg(sb, KERN_INFO,
4171     - "Invalid segment count (%u)",
4172     - le32_to_cpu(raw_super->segment_count));
4173     - return 1;
4174     - }
4175     -
4176     /* check CP/SIT/NAT/SSA/MAIN_AREA area boundary */
4177     if (sanity_check_area_boundary(sbi, bh))
4178     return 1;
4179     @@ -1424,10 +1481,14 @@ int sanity_check_ckpt(struct f2fs_sb_info *sbi)
4180     unsigned int total, fsmeta;
4181     struct f2fs_super_block *raw_super = F2FS_RAW_SUPER(sbi);
4182     struct f2fs_checkpoint *ckpt = F2FS_CKPT(sbi);
4183     + unsigned int ovp_segments, reserved_segments;
4184     unsigned int main_segs, blocks_per_seg;
4185     unsigned int sit_segs, nat_segs;
4186     unsigned int sit_bitmap_size, nat_bitmap_size;
4187     unsigned int log_blocks_per_seg;
4188     + unsigned int segment_count_main;
4189     + unsigned int cp_pack_start_sum, cp_payload;
4190     + block_t user_block_count;
4191     int i;
4192    
4193     total = le32_to_cpu(raw_super->segment_count);
4194     @@ -1442,6 +1503,26 @@ int sanity_check_ckpt(struct f2fs_sb_info *sbi)
4195     if (unlikely(fsmeta >= total))
4196     return 1;
4197    
4198     + ovp_segments = le32_to_cpu(ckpt->overprov_segment_count);
4199     + reserved_segments = le32_to_cpu(ckpt->rsvd_segment_count);
4200     +
4201     + if (unlikely(fsmeta < F2FS_MIN_SEGMENTS ||
4202     + ovp_segments == 0 || reserved_segments == 0)) {
4203     + f2fs_msg(sbi->sb, KERN_ERR,
4204     + "Wrong layout: check mkfs.f2fs version");
4205     + return 1;
4206     + }
4207     +
4208     + user_block_count = le64_to_cpu(ckpt->user_block_count);
4209     + segment_count_main = le32_to_cpu(raw_super->segment_count_main);
4210     + log_blocks_per_seg = le32_to_cpu(raw_super->log_blocks_per_seg);
4211     + if (!user_block_count || user_block_count >=
4212     + segment_count_main << log_blocks_per_seg) {
4213     + f2fs_msg(sbi->sb, KERN_ERR,
4214     + "Wrong user_block_count: %u", user_block_count);
4215     + return 1;
4216     + }
4217     +
4218     main_segs = le32_to_cpu(raw_super->segment_count_main);
4219     blocks_per_seg = sbi->blocks_per_seg;
4220    
4221     @@ -1458,7 +1539,6 @@ int sanity_check_ckpt(struct f2fs_sb_info *sbi)
4222    
4223     sit_bitmap_size = le32_to_cpu(ckpt->sit_ver_bitmap_bytesize);
4224     nat_bitmap_size = le32_to_cpu(ckpt->nat_ver_bitmap_bytesize);
4225     - log_blocks_per_seg = le32_to_cpu(raw_super->log_blocks_per_seg);
4226    
4227     if (sit_bitmap_size != ((sit_segs / 2) << log_blocks_per_seg) / 8 ||
4228     nat_bitmap_size != ((nat_segs / 2) << log_blocks_per_seg) / 8) {
4229     @@ -1468,6 +1548,17 @@ int sanity_check_ckpt(struct f2fs_sb_info *sbi)
4230     return 1;
4231     }
4232    
4233     + cp_pack_start_sum = __start_sum_addr(sbi);
4234     + cp_payload = __cp_payload(sbi);
4235     + if (cp_pack_start_sum < cp_payload + 1 ||
4236     + cp_pack_start_sum > blocks_per_seg - 1 -
4237     + NR_CURSEG_TYPE) {
4238     + f2fs_msg(sbi->sb, KERN_ERR,
4239     + "Wrong cp_pack_start_sum: %u",
4240     + cp_pack_start_sum);
4241     + return 1;
4242     + }
4243     +
4244     if (unlikely(f2fs_cp_error(sbi))) {
4245     f2fs_msg(sbi->sb, KERN_ERR, "A bug case: need to run fsck");
4246     return 1;
4247     diff --git a/fs/hugetlbfs/inode.c b/fs/hugetlbfs/inode.c
4248     index 2c2f182cde03..f53c139c312e 100644
4249     --- a/fs/hugetlbfs/inode.c
4250     +++ b/fs/hugetlbfs/inode.c
4251     @@ -118,6 +118,16 @@ static void huge_pagevec_release(struct pagevec *pvec)
4252     pagevec_reinit(pvec);
4253     }
4254    
4255     +/*
4256     + * Mask used when checking the page offset value passed in via system
4257     + * calls. This value will be converted to a loff_t which is signed.
4258     + * Therefore, we want to check the upper PAGE_SHIFT + 1 bits of the
4259     + * value. The extra bit (- 1 in the shift value) is to take the sign
4260     + * bit into account.
4261     + */
4262     +#define PGOFF_LOFFT_MAX \
4263     + (((1UL << (PAGE_SHIFT + 1)) - 1) << (BITS_PER_LONG - (PAGE_SHIFT + 1)))
4264     +
4265     static int hugetlbfs_file_mmap(struct file *file, struct vm_area_struct *vma)
4266     {
4267     struct inode *inode = file_inode(file);
4268     @@ -136,17 +146,31 @@ static int hugetlbfs_file_mmap(struct file *file, struct vm_area_struct *vma)
4269     vma->vm_flags |= VM_HUGETLB | VM_DONTEXPAND;
4270     vma->vm_ops = &hugetlb_vm_ops;
4271    
4272     + /*
4273     + * page based offset in vm_pgoff could be sufficiently large to
4274     + * overflow a loff_t when converted to byte offset. This can
4275     + * only happen on architectures where sizeof(loff_t) ==
4276     + * sizeof(unsigned long). So, only check in those instances.
4277     + */
4278     + if (sizeof(unsigned long) == sizeof(loff_t)) {
4279     + if (vma->vm_pgoff & PGOFF_LOFFT_MAX)
4280     + return -EINVAL;
4281     + }
4282     +
4283     + /* must be huge page aligned */
4284     if (vma->vm_pgoff & (~huge_page_mask(h) >> PAGE_SHIFT))
4285     return -EINVAL;
4286    
4287     vma_len = (loff_t)(vma->vm_end - vma->vm_start);
4288     + len = vma_len + ((loff_t)vma->vm_pgoff << PAGE_SHIFT);
4289     + /* check for overflow */
4290     + if (len < vma_len)
4291     + return -EINVAL;
4292    
4293     inode_lock(inode);
4294     file_accessed(file);
4295    
4296     ret = -ENOMEM;
4297     - len = vma_len + ((loff_t)vma->vm_pgoff << PAGE_SHIFT);
4298     -
4299     if (hugetlb_reserve_pages(inode,
4300     vma->vm_pgoff >> huge_page_order(h),
4301     len >> huge_page_shift(h), vma,
4302     @@ -155,7 +179,7 @@ static int hugetlbfs_file_mmap(struct file *file, struct vm_area_struct *vma)
4303    
4304     ret = 0;
4305     if (vma->vm_flags & VM_WRITE && inode->i_size < len)
4306     - inode->i_size = len;
4307     + i_size_write(inode, len);
4308     out:
4309     inode_unlock(inode);
4310    
4311     diff --git a/fs/kernfs/symlink.c b/fs/kernfs/symlink.c
4312     index 9b43ca02b7ab..80317b04c84a 100644
4313     --- a/fs/kernfs/symlink.c
4314     +++ b/fs/kernfs/symlink.c
4315     @@ -88,7 +88,7 @@ static int kernfs_get_target_path(struct kernfs_node *parent,
4316     int slen = strlen(kn->name);
4317    
4318     len -= slen;
4319     - strncpy(s + len, kn->name, slen);
4320     + memcpy(s + len, kn->name, slen);
4321     if (len)
4322     s[--len] = '/';
4323    
4324     diff --git a/fs/udf/super.c b/fs/udf/super.c
4325     index 12467ad608cd..03369a89600e 100644
4326     --- a/fs/udf/super.c
4327     +++ b/fs/udf/super.c
4328     @@ -929,16 +929,20 @@ static int udf_load_pvoldesc(struct super_block *sb, sector_t block)
4329     }
4330    
4331     ret = udf_dstrCS0toUTF8(outstr, 31, pvoldesc->volIdent, 32);
4332     - if (ret < 0)
4333     - goto out_bh;
4334     -
4335     - strncpy(UDF_SB(sb)->s_volume_ident, outstr, ret);
4336     + if (ret < 0) {
4337     + strcpy(UDF_SB(sb)->s_volume_ident, "InvalidName");
4338     + pr_warn("incorrect volume identification, setting to "
4339     + "'InvalidName'\n");
4340     + } else {
4341     + strncpy(UDF_SB(sb)->s_volume_ident, outstr, ret);
4342     + }
4343     udf_debug("volIdent[] = '%s'\n", UDF_SB(sb)->s_volume_ident);
4344    
4345     ret = udf_dstrCS0toUTF8(outstr, 127, pvoldesc->volSetIdent, 128);
4346     - if (ret < 0)
4347     + if (ret < 0) {
4348     + ret = 0;
4349     goto out_bh;
4350     -
4351     + }
4352     outstr[ret] = 0;
4353     udf_debug("volSetIdent[] = '%s'\n", outstr);
4354    
4355     diff --git a/fs/udf/unicode.c b/fs/udf/unicode.c
4356     index 3a3be23689b3..61a1738895b7 100644
4357     --- a/fs/udf/unicode.c
4358     +++ b/fs/udf/unicode.c
4359     @@ -341,6 +341,11 @@ try_again:
4360     return u_len;
4361     }
4362    
4363     +/*
4364     + * Convert CS0 dstring to output charset. Warning: This function may truncate
4365     + * input string if it is too long as it is used for informational strings only
4366     + * and it is better to truncate the string than to refuse mounting a media.
4367     + */
4368     int udf_dstrCS0toUTF8(uint8_t *utf_o, int o_len,
4369     const uint8_t *ocu_i, int i_len)
4370     {
4371     @@ -349,9 +354,12 @@ int udf_dstrCS0toUTF8(uint8_t *utf_o, int o_len,
4372     if (i_len > 0) {
4373     s_len = ocu_i[i_len - 1];
4374     if (s_len >= i_len) {
4375     - pr_err("incorrect dstring lengths (%d/%d)\n",
4376     - s_len, i_len);
4377     - return -EINVAL;
4378     + pr_warn("incorrect dstring lengths (%d/%d),"
4379     + " truncating\n", s_len, i_len);
4380     + s_len = i_len - 1;
4381     + /* 2-byte encoding? Need to round properly... */
4382     + if (ocu_i[0] == 16)
4383     + s_len -= (s_len - 1) & 2;
4384     }
4385     }
4386    
4387     diff --git a/fs/xfs/libxfs/xfs_attr.c b/fs/xfs/libxfs/xfs_attr.c
4388     index 6622d46ddec3..9687208c676f 100644
4389     --- a/fs/xfs/libxfs/xfs_attr.c
4390     +++ b/fs/xfs/libxfs/xfs_attr.c
4391     @@ -487,7 +487,14 @@ xfs_attr_shortform_addname(xfs_da_args_t *args)
4392     if (args->flags & ATTR_CREATE)
4393     return retval;
4394     retval = xfs_attr_shortform_remove(args);
4395     - ASSERT(retval == 0);
4396     + if (retval)
4397     + return retval;
4398     + /*
4399     + * Since we have removed the old attr, clear ATTR_REPLACE so
4400     + * that the leaf format add routine won't trip over the attr
4401     + * not being around.
4402     + */
4403     + args->flags &= ~ATTR_REPLACE;
4404     }
4405    
4406     if (args->namelen >= XFS_ATTR_SF_ENTSIZE_MAX ||
4407     diff --git a/include/linux/bpf_verifier.h b/include/linux/bpf_verifier.h
4408     index 070fc49e39e2..5031defe59c5 100644
4409     --- a/include/linux/bpf_verifier.h
4410     +++ b/include/linux/bpf_verifier.h
4411     @@ -71,6 +71,7 @@ struct bpf_insn_aux_data {
4412     enum bpf_reg_type ptr_type; /* pointer type for load/store insns */
4413     struct bpf_map *map_ptr; /* pointer for call insn into lookup_elem */
4414     };
4415     + int sanitize_stack_off; /* stack slot to be cleared */
4416     bool seen; /* this insn was processed by the verifier */
4417     };
4418    
4419     diff --git a/include/linux/ceph/auth.h b/include/linux/ceph/auth.h
4420     index 374bb1c4ef52..035f26a04364 100644
4421     --- a/include/linux/ceph/auth.h
4422     +++ b/include/linux/ceph/auth.h
4423     @@ -63,8 +63,12 @@ struct ceph_auth_client_ops {
4424     /* ensure that an existing authorizer is up to date */
4425     int (*update_authorizer)(struct ceph_auth_client *ac, int peer_type,
4426     struct ceph_auth_handshake *auth);
4427     + int (*add_authorizer_challenge)(struct ceph_auth_client *ac,
4428     + struct ceph_authorizer *a,
4429     + void *challenge_buf,
4430     + int challenge_buf_len);
4431     int (*verify_authorizer_reply)(struct ceph_auth_client *ac,
4432     - struct ceph_authorizer *a, size_t len);
4433     + struct ceph_authorizer *a);
4434     void (*invalidate_authorizer)(struct ceph_auth_client *ac,
4435     int peer_type);
4436    
4437     @@ -117,9 +121,12 @@ void ceph_auth_destroy_authorizer(struct ceph_authorizer *a);
4438     extern int ceph_auth_update_authorizer(struct ceph_auth_client *ac,
4439     int peer_type,
4440     struct ceph_auth_handshake *a);
4441     +int ceph_auth_add_authorizer_challenge(struct ceph_auth_client *ac,
4442     + struct ceph_authorizer *a,
4443     + void *challenge_buf,
4444     + int challenge_buf_len);
4445     extern int ceph_auth_verify_authorizer_reply(struct ceph_auth_client *ac,
4446     - struct ceph_authorizer *a,
4447     - size_t len);
4448     + struct ceph_authorizer *a);
4449     extern void ceph_auth_invalidate_authorizer(struct ceph_auth_client *ac,
4450     int peer_type);
4451    
4452     diff --git a/include/linux/ceph/ceph_features.h b/include/linux/ceph/ceph_features.h
4453     index ae2f66833762..cf765db39c95 100644
4454     --- a/include/linux/ceph/ceph_features.h
4455     +++ b/include/linux/ceph/ceph_features.h
4456     @@ -76,6 +76,7 @@
4457     // duplicated since it was introduced at the same time as CEPH_FEATURE_CRUSH_TUNABLES5
4458     #define CEPH_FEATURE_NEW_OSDOPREPLY_ENCODING (1ULL<<58) /* New, v7 encoding */
4459     #define CEPH_FEATURE_FS_FILE_LAYOUT_V2 (1ULL<<58) /* file_layout_t */
4460     +#define CEPH_FEATURE_CEPHX_V2 (1ULL<<61) // *do not share this bit*
4461    
4462     /*
4463     * The introduction of CEPH_FEATURE_OSD_SNAPMAPPER caused the feature
4464     @@ -124,7 +125,8 @@ static inline u64 ceph_sanitize_features(u64 features)
4465     CEPH_FEATURE_MSGR_KEEPALIVE2 | \
4466     CEPH_FEATURE_CRUSH_V4 | \
4467     CEPH_FEATURE_CRUSH_TUNABLES5 | \
4468     - CEPH_FEATURE_NEW_OSDOPREPLY_ENCODING)
4469     + CEPH_FEATURE_NEW_OSDOPREPLY_ENCODING | \
4470     + CEPH_FEATURE_CEPHX_V2)
4471    
4472     #define CEPH_FEATURES_REQUIRED_DEFAULT \
4473     (CEPH_FEATURE_NOSRCADDR | \
4474     diff --git a/include/linux/ceph/messenger.h b/include/linux/ceph/messenger.h
4475     index 8dbd7879fdc6..5e1c9c80d536 100644
4476     --- a/include/linux/ceph/messenger.h
4477     +++ b/include/linux/ceph/messenger.h
4478     @@ -30,7 +30,10 @@ struct ceph_connection_operations {
4479     struct ceph_auth_handshake *(*get_authorizer) (
4480     struct ceph_connection *con,
4481     int *proto, int force_new);
4482     - int (*verify_authorizer_reply) (struct ceph_connection *con, int len);
4483     + int (*add_authorizer_challenge)(struct ceph_connection *con,
4484     + void *challenge_buf,
4485     + int challenge_buf_len);
4486     + int (*verify_authorizer_reply) (struct ceph_connection *con);
4487     int (*invalidate_authorizer)(struct ceph_connection *con);
4488    
4489     /* there was some error on the socket (disconnect, whatever) */
4490     @@ -200,9 +203,8 @@ struct ceph_connection {
4491     attempt for this connection, client */
4492     u32 peer_global_seq; /* peer's global seq for this connection */
4493    
4494     + struct ceph_auth_handshake *auth;
4495     int auth_retry; /* true if we need a newer authorizer */
4496     - void *auth_reply_buf; /* where to put the authorizer reply */
4497     - int auth_reply_buf_len;
4498    
4499     struct mutex mutex;
4500    
4501     diff --git a/include/linux/ceph/msgr.h b/include/linux/ceph/msgr.h
4502     index 0fe2656ac415..063f9d7f1b74 100644
4503     --- a/include/linux/ceph/msgr.h
4504     +++ b/include/linux/ceph/msgr.h
4505     @@ -90,7 +90,7 @@ struct ceph_entity_inst {
4506     #define CEPH_MSGR_TAG_SEQ 13 /* 64-bit int follows with seen seq number */
4507     #define CEPH_MSGR_TAG_KEEPALIVE2 14 /* keepalive2 byte + ceph_timespec */
4508     #define CEPH_MSGR_TAG_KEEPALIVE2_ACK 15 /* keepalive2 reply */
4509     -
4510     +#define CEPH_MSGR_TAG_CHALLENGE_AUTHORIZER 16 /* cephx v2 doing server challenge */
4511    
4512     /*
4513     * connection negotiation
4514     diff --git a/include/linux/reset.h b/include/linux/reset.h
4515     index 5daff15722d3..7e99690dbc81 100644
4516     --- a/include/linux/reset.h
4517     +++ b/include/linux/reset.h
4518     @@ -13,76 +13,82 @@ int reset_control_deassert(struct reset_control *rstc);
4519     int reset_control_status(struct reset_control *rstc);
4520    
4521     struct reset_control *__of_reset_control_get(struct device_node *node,
4522     - const char *id, int index, int shared);
4523     + const char *id, int index, bool shared,
4524     + bool optional);
4525     +struct reset_control *__reset_control_get(struct device *dev, const char *id,
4526     + int index, bool shared,
4527     + bool optional);
4528     void reset_control_put(struct reset_control *rstc);
4529     +int __device_reset(struct device *dev, bool optional);
4530     struct reset_control *__devm_reset_control_get(struct device *dev,
4531     - const char *id, int index, int shared);
4532     -
4533     -int __must_check device_reset(struct device *dev);
4534     -
4535     -static inline int device_reset_optional(struct device *dev)
4536     -{
4537     - return device_reset(dev);
4538     -}
4539     + const char *id, int index, bool shared,
4540     + bool optional);
4541    
4542     #else
4543    
4544     static inline int reset_control_reset(struct reset_control *rstc)
4545     {
4546     - WARN_ON(1);
4547     return 0;
4548     }
4549    
4550     static inline int reset_control_assert(struct reset_control *rstc)
4551     {
4552     - WARN_ON(1);
4553     return 0;
4554     }
4555    
4556     static inline int reset_control_deassert(struct reset_control *rstc)
4557     {
4558     - WARN_ON(1);
4559     return 0;
4560     }
4561    
4562     static inline int reset_control_status(struct reset_control *rstc)
4563     {
4564     - WARN_ON(1);
4565     return 0;
4566     }
4567    
4568     static inline void reset_control_put(struct reset_control *rstc)
4569     {
4570     - WARN_ON(1);
4571     }
4572    
4573     -static inline int __must_check device_reset(struct device *dev)
4574     +static inline int __device_reset(struct device *dev, bool optional)
4575     {
4576     - WARN_ON(1);
4577     - return -ENOTSUPP;
4578     + return optional ? 0 : -ENOTSUPP;
4579     }
4580    
4581     -static inline int device_reset_optional(struct device *dev)
4582     +static inline struct reset_control *__of_reset_control_get(
4583     + struct device_node *node,
4584     + const char *id, int index, bool shared,
4585     + bool optional)
4586     {
4587     - return -ENOTSUPP;
4588     + return optional ? NULL : ERR_PTR(-ENOTSUPP);
4589     }
4590    
4591     -static inline struct reset_control *__of_reset_control_get(
4592     - struct device_node *node,
4593     - const char *id, int index, int shared)
4594     +static inline struct reset_control *__reset_control_get(
4595     + struct device *dev, const char *id,
4596     + int index, bool shared, bool optional)
4597     {
4598     - return ERR_PTR(-ENOTSUPP);
4599     + return optional ? NULL : ERR_PTR(-ENOTSUPP);
4600     }
4601    
4602     static inline struct reset_control *__devm_reset_control_get(
4603     - struct device *dev,
4604     - const char *id, int index, int shared)
4605     + struct device *dev, const char *id,
4606     + int index, bool shared, bool optional)
4607     {
4608     - return ERR_PTR(-ENOTSUPP);
4609     + return optional ? NULL : ERR_PTR(-ENOTSUPP);
4610     }
4611    
4612     #endif /* CONFIG_RESET_CONTROLLER */
4613    
4614     +static inline int __must_check device_reset(struct device *dev)
4615     +{
4616     + return __device_reset(dev, false);
4617     +}
4618     +
4619     +static inline int device_reset_optional(struct device *dev)
4620     +{
4621     + return __device_reset(dev, true);
4622     +}
4623     +
4624     /**
4625     * reset_control_get_exclusive - Lookup and obtain an exclusive reference
4626     * to a reset controller.
4627     @@ -101,10 +107,7 @@ static inline struct reset_control *__devm_reset_control_get(
4628     static inline struct reset_control *
4629     __must_check reset_control_get_exclusive(struct device *dev, const char *id)
4630     {
4631     -#ifndef CONFIG_RESET_CONTROLLER
4632     - WARN_ON(1);
4633     -#endif
4634     - return __of_reset_control_get(dev ? dev->of_node : NULL, id, 0, 0);
4635     + return __reset_control_get(dev, id, 0, false, false);
4636     }
4637    
4638     /**
4639     @@ -132,19 +135,19 @@ __must_check reset_control_get_exclusive(struct device *dev, const char *id)
4640     static inline struct reset_control *reset_control_get_shared(
4641     struct device *dev, const char *id)
4642     {
4643     - return __of_reset_control_get(dev ? dev->of_node : NULL, id, 0, 1);
4644     + return __reset_control_get(dev, id, 0, true, false);
4645     }
4646    
4647     static inline struct reset_control *reset_control_get_optional_exclusive(
4648     struct device *dev, const char *id)
4649     {
4650     - return __of_reset_control_get(dev ? dev->of_node : NULL, id, 0, 0);
4651     + return __reset_control_get(dev, id, 0, false, true);
4652     }
4653    
4654     static inline struct reset_control *reset_control_get_optional_shared(
4655     struct device *dev, const char *id)
4656     {
4657     - return __of_reset_control_get(dev ? dev->of_node : NULL, id, 0, 1);
4658     + return __reset_control_get(dev, id, 0, true, true);
4659     }
4660    
4661     /**
4662     @@ -160,7 +163,7 @@ static inline struct reset_control *reset_control_get_optional_shared(
4663     static inline struct reset_control *of_reset_control_get_exclusive(
4664     struct device_node *node, const char *id)
4665     {
4666     - return __of_reset_control_get(node, id, 0, 0);
4667     + return __of_reset_control_get(node, id, 0, false, false);
4668     }
4669    
4670     /**
4671     @@ -185,7 +188,7 @@ static inline struct reset_control *of_reset_control_get_exclusive(
4672     static inline struct reset_control *of_reset_control_get_shared(
4673     struct device_node *node, const char *id)
4674     {
4675     - return __of_reset_control_get(node, id, 0, 1);
4676     + return __of_reset_control_get(node, id, 0, true, false);
4677     }
4678    
4679     /**
4680     @@ -202,7 +205,7 @@ static inline struct reset_control *of_reset_control_get_shared(
4681     static inline struct reset_control *of_reset_control_get_exclusive_by_index(
4682     struct device_node *node, int index)
4683     {
4684     - return __of_reset_control_get(node, NULL, index, 0);
4685     + return __of_reset_control_get(node, NULL, index, false, false);
4686     }
4687    
4688     /**
4689     @@ -230,7 +233,7 @@ static inline struct reset_control *of_reset_control_get_exclusive_by_index(
4690     static inline struct reset_control *of_reset_control_get_shared_by_index(
4691     struct device_node *node, int index)
4692     {
4693     - return __of_reset_control_get(node, NULL, index, 1);
4694     + return __of_reset_control_get(node, NULL, index, true, false);
4695     }
4696    
4697     /**
4698     @@ -249,10 +252,7 @@ static inline struct reset_control *
4699     __must_check devm_reset_control_get_exclusive(struct device *dev,
4700     const char *id)
4701     {
4702     -#ifndef CONFIG_RESET_CONTROLLER
4703     - WARN_ON(1);
4704     -#endif
4705     - return __devm_reset_control_get(dev, id, 0, 0);
4706     + return __devm_reset_control_get(dev, id, 0, false, false);
4707     }
4708    
4709     /**
4710     @@ -267,19 +267,19 @@ __must_check devm_reset_control_get_exclusive(struct device *dev,
4711     static inline struct reset_control *devm_reset_control_get_shared(
4712     struct device *dev, const char *id)
4713     {
4714     - return __devm_reset_control_get(dev, id, 0, 1);
4715     + return __devm_reset_control_get(dev, id, 0, true, false);
4716     }
4717    
4718     static inline struct reset_control *devm_reset_control_get_optional_exclusive(
4719     struct device *dev, const char *id)
4720     {
4721     - return __devm_reset_control_get(dev, id, 0, 0);
4722     + return __devm_reset_control_get(dev, id, 0, false, true);
4723     }
4724    
4725     static inline struct reset_control *devm_reset_control_get_optional_shared(
4726     struct device *dev, const char *id)
4727     {
4728     - return __devm_reset_control_get(dev, id, 0, 1);
4729     + return __devm_reset_control_get(dev, id, 0, true, true);
4730     }
4731    
4732     /**
4733     @@ -297,7 +297,7 @@ static inline struct reset_control *devm_reset_control_get_optional_shared(
4734     static inline struct reset_control *
4735     devm_reset_control_get_exclusive_by_index(struct device *dev, int index)
4736     {
4737     - return __devm_reset_control_get(dev, NULL, index, 0);
4738     + return __devm_reset_control_get(dev, NULL, index, false, false);
4739     }
4740    
4741     /**
4742     @@ -313,7 +313,7 @@ devm_reset_control_get_exclusive_by_index(struct device *dev, int index)
4743     static inline struct reset_control *
4744     devm_reset_control_get_shared_by_index(struct device *dev, int index)
4745     {
4746     - return __devm_reset_control_get(dev, NULL, index, 1);
4747     + return __devm_reset_control_get(dev, NULL, index, true, false);
4748     }
4749    
4750     /*
4751     diff --git a/include/uapi/linux/btrfs_tree.h b/include/uapi/linux/btrfs_tree.h
4752     index c794c9af6c0f..a1ded2a1bf1d 100644
4753     --- a/include/uapi/linux/btrfs_tree.h
4754     +++ b/include/uapi/linux/btrfs_tree.h
4755     @@ -730,6 +730,7 @@ struct btrfs_balance_item {
4756     #define BTRFS_FILE_EXTENT_INLINE 0
4757     #define BTRFS_FILE_EXTENT_REG 1
4758     #define BTRFS_FILE_EXTENT_PREALLOC 2
4759     +#define BTRFS_FILE_EXTENT_TYPES 2
4760    
4761     struct btrfs_file_extent_item {
4762     /*
4763     diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c
4764     index dafa2708ce9e..1438b7396cb4 100644
4765     --- a/kernel/bpf/verifier.c
4766     +++ b/kernel/bpf/verifier.c
4767     @@ -540,10 +540,11 @@ static bool is_spillable_regtype(enum bpf_reg_type type)
4768     /* check_stack_read/write functions track spill/fill of registers,
4769     * stack boundary and alignment are checked in check_mem_access()
4770     */
4771     -static int check_stack_write(struct bpf_verifier_state *state, int off,
4772     - int size, int value_regno)
4773     +static int check_stack_write(struct bpf_verifier_env *env,
4774     + struct bpf_verifier_state *state, int off,
4775     + int size, int value_regno, int insn_idx)
4776     {
4777     - int i;
4778     + int i, spi = (MAX_BPF_STACK + off) / BPF_REG_SIZE;
4779     /* caller checked that off % size == 0 and -MAX_BPF_STACK <= off < 0,
4780     * so it's aligned access and [off, off + size) are within stack limits
4781     */
4782     @@ -558,15 +559,37 @@ static int check_stack_write(struct bpf_verifier_state *state, int off,
4783     }
4784    
4785     /* save register state */
4786     - state->spilled_regs[(MAX_BPF_STACK + off) / BPF_REG_SIZE] =
4787     - state->regs[value_regno];
4788     -
4789     - for (i = 0; i < BPF_REG_SIZE; i++)
4790     + state->spilled_regs[spi] = state->regs[value_regno];
4791     +
4792     + for (i = 0; i < BPF_REG_SIZE; i++) {
4793     + if (state->stack_slot_type[MAX_BPF_STACK + off + i] == STACK_MISC &&
4794     + !env->allow_ptr_leaks) {
4795     + int *poff = &env->insn_aux_data[insn_idx].sanitize_stack_off;
4796     + int soff = (-spi - 1) * BPF_REG_SIZE;
4797     +
4798     + /* detected reuse of integer stack slot with a pointer
4799     + * which means either llvm is reusing stack slot or
4800     + * an attacker is trying to exploit CVE-2018-3639
4801     + * (speculative store bypass)
4802     + * Have to sanitize that slot with preemptive
4803     + * store of zero.
4804     + */
4805     + if (*poff && *poff != soff) {
4806     + /* disallow programs where single insn stores
4807     + * into two different stack slots, since verifier
4808     + * cannot sanitize them
4809     + */
4810     + verbose("insn %d cannot access two stack slots fp%d and fp%d",
4811     + insn_idx, *poff, soff);
4812     + return -EINVAL;
4813     + }
4814     + *poff = soff;
4815     + }
4816     state->stack_slot_type[MAX_BPF_STACK + off + i] = STACK_SPILL;
4817     + }
4818     } else {
4819     /* regular write of data into stack */
4820     - state->spilled_regs[(MAX_BPF_STACK + off) / BPF_REG_SIZE] =
4821     - (struct bpf_reg_state) {};
4822     + state->spilled_regs[spi] = (struct bpf_reg_state) {};
4823    
4824     for (i = 0; i < size; i++)
4825     state->stack_slot_type[MAX_BPF_STACK + off + i] = STACK_MISC;
4826     @@ -747,7 +770,7 @@ static int check_ptr_alignment(struct bpf_verifier_env *env,
4827     * if t==write && value_regno==-1, some unknown value is stored into memory
4828     * if t==read && value_regno==-1, don't care what we read from memory
4829     */
4830     -static int check_mem_access(struct bpf_verifier_env *env, u32 regno, int off,
4831     +static int check_mem_access(struct bpf_verifier_env *env, int insn_idx, u32 regno, int off,
4832     int bpf_size, enum bpf_access_type t,
4833     int value_regno)
4834     {
4835     @@ -843,7 +866,8 @@ static int check_mem_access(struct bpf_verifier_env *env, u32 regno, int off,
4836     verbose("attempt to corrupt spilled pointer on stack\n");
4837     return -EACCES;
4838     }
4839     - err = check_stack_write(state, off, size, value_regno);
4840     + err = check_stack_write(env, state, off, size,
4841     + value_regno, insn_idx);
4842     } else {
4843     err = check_stack_read(state, off, size, value_regno);
4844     }
4845     @@ -877,7 +901,7 @@ static int check_mem_access(struct bpf_verifier_env *env, u32 regno, int off,
4846     return err;
4847     }
4848    
4849     -static int check_xadd(struct bpf_verifier_env *env, struct bpf_insn *insn)
4850     +static int check_xadd(struct bpf_verifier_env *env, int insn_idx, struct bpf_insn *insn)
4851     {
4852     struct bpf_reg_state *regs = env->cur_state.regs;
4853     int err;
4854     @@ -910,13 +934,13 @@ static int check_xadd(struct bpf_verifier_env *env, struct bpf_insn *insn)
4855     }
4856    
4857     /* check whether atomic_add can read the memory */
4858     - err = check_mem_access(env, insn->dst_reg, insn->off,
4859     + err = check_mem_access(env, insn_idx, insn->dst_reg, insn->off,
4860     BPF_SIZE(insn->code), BPF_READ, -1);
4861     if (err)
4862     return err;
4863    
4864     /* check whether atomic_add can write into the same memory */
4865     - return check_mem_access(env, insn->dst_reg, insn->off,
4866     + return check_mem_access(env, insn_idx, insn->dst_reg, insn->off,
4867     BPF_SIZE(insn->code), BPF_WRITE, -1);
4868     }
4869    
4870     @@ -1272,7 +1296,7 @@ static int check_call(struct bpf_verifier_env *env, int func_id, int insn_idx)
4871     * is inferred from register state.
4872     */
4873     for (i = 0; i < meta.access_size; i++) {
4874     - err = check_mem_access(env, meta.regno, i, BPF_B, BPF_WRITE, -1);
4875     + err = check_mem_access(env, insn_idx, meta.regno, i, BPF_B, BPF_WRITE, -1);
4876     if (err)
4877     return err;
4878     }
4879     @@ -2938,7 +2962,7 @@ static int do_check(struct bpf_verifier_env *env)
4880     /* check that memory (src_reg + off) is readable,
4881     * the state of dst_reg will be updated by this func
4882     */
4883     - err = check_mem_access(env, insn->src_reg, insn->off,
4884     + err = check_mem_access(env, insn_idx, insn->src_reg, insn->off,
4885     BPF_SIZE(insn->code), BPF_READ,
4886     insn->dst_reg);
4887     if (err)
4888     @@ -2978,7 +3002,7 @@ static int do_check(struct bpf_verifier_env *env)
4889     enum bpf_reg_type *prev_dst_type, dst_reg_type;
4890    
4891     if (BPF_MODE(insn->code) == BPF_XADD) {
4892     - err = check_xadd(env, insn);
4893     + err = check_xadd(env, insn_idx, insn);
4894     if (err)
4895     return err;
4896     insn_idx++;
4897     @@ -2997,7 +3021,7 @@ static int do_check(struct bpf_verifier_env *env)
4898     dst_reg_type = regs[insn->dst_reg].type;
4899    
4900     /* check that memory (dst_reg + off) is writeable */
4901     - err = check_mem_access(env, insn->dst_reg, insn->off,
4902     + err = check_mem_access(env, insn_idx, insn->dst_reg, insn->off,
4903     BPF_SIZE(insn->code), BPF_WRITE,
4904     insn->src_reg);
4905     if (err)
4906     @@ -3032,7 +3056,7 @@ static int do_check(struct bpf_verifier_env *env)
4907     }
4908    
4909     /* check that memory (dst_reg + off) is writeable */
4910     - err = check_mem_access(env, insn->dst_reg, insn->off,
4911     + err = check_mem_access(env, insn_idx, insn->dst_reg, insn->off,
4912     BPF_SIZE(insn->code), BPF_WRITE,
4913     -1);
4914     if (err)
4915     @@ -3369,6 +3393,34 @@ static int convert_ctx_accesses(struct bpf_verifier_env *env)
4916     else
4917     continue;
4918    
4919     + if (type == BPF_WRITE &&
4920     + env->insn_aux_data[i + delta].sanitize_stack_off) {
4921     + struct bpf_insn patch[] = {
4922     + /* Sanitize suspicious stack slot with zero.
4923     + * There are no memory dependencies for this store,
4924     + * since it's only using frame pointer and immediate
4925     + * constant of zero
4926     + */
4927     + BPF_ST_MEM(BPF_DW, BPF_REG_FP,
4928     + env->insn_aux_data[i + delta].sanitize_stack_off,
4929     + 0),
4930     + /* the original STX instruction will immediately
4931     + * overwrite the same stack slot with appropriate value
4932     + */
4933     + *insn,
4934     + };
4935     +
4936     + cnt = ARRAY_SIZE(patch);
4937     + new_prog = bpf_patch_insn_data(env, i + delta, patch, cnt);
4938     + if (!new_prog)
4939     + return -ENOMEM;
4940     +
4941     + delta += cnt - 1;
4942     + env->prog = new_prog;
4943     + insn = new_prog->insnsi + i + delta;
4944     + continue;
4945     + }
4946     +
4947     if (env->insn_aux_data[i + delta].ptr_type != PTR_TO_CTX)
4948     continue;
4949    
4950     diff --git a/kernel/debug/kdb/kdb_support.c b/kernel/debug/kdb/kdb_support.c
4951     index 2aed4a33521b..61cd704a21c8 100644
4952     --- a/kernel/debug/kdb/kdb_support.c
4953     +++ b/kernel/debug/kdb/kdb_support.c
4954     @@ -129,13 +129,13 @@ int kdbnearsym(unsigned long addr, kdb_symtab_t *symtab)
4955     }
4956     if (i >= ARRAY_SIZE(kdb_name_table)) {
4957     debug_kfree(kdb_name_table[0]);
4958     - memcpy(kdb_name_table, kdb_name_table+1,
4959     + memmove(kdb_name_table, kdb_name_table+1,
4960     sizeof(kdb_name_table[0]) *
4961     (ARRAY_SIZE(kdb_name_table)-1));
4962     } else {
4963     debug_kfree(knt1);
4964     knt1 = kdb_name_table[i];
4965     - memcpy(kdb_name_table+i, kdb_name_table+i+1,
4966     + memmove(kdb_name_table+i, kdb_name_table+i+1,
4967     sizeof(kdb_name_table[0]) *
4968     (ARRAY_SIZE(kdb_name_table)-i-1));
4969     }
4970     diff --git a/kernel/events/uprobes.c b/kernel/events/uprobes.c
4971     index a1de021dccba..fbfab5722254 100644
4972     --- a/kernel/events/uprobes.c
4973     +++ b/kernel/events/uprobes.c
4974     @@ -608,7 +608,7 @@ static int prepare_uprobe(struct uprobe *uprobe, struct file *file,
4975     BUG_ON((uprobe->offset & ~PAGE_MASK) +
4976     UPROBE_SWBP_INSN_SIZE > PAGE_SIZE);
4977    
4978     - smp_wmb(); /* pairs with rmb() in find_active_uprobe() */
4979     + smp_wmb(); /* pairs with the smp_rmb() in handle_swbp() */
4980     set_bit(UPROBE_COPY_INSN, &uprobe->flags);
4981    
4982     out:
4983     @@ -1902,10 +1902,18 @@ static void handle_swbp(struct pt_regs *regs)
4984     * After we hit the bp, _unregister + _register can install the
4985     * new and not-yet-analyzed uprobe at the same address, restart.
4986     */
4987     - smp_rmb(); /* pairs with wmb() in install_breakpoint() */
4988     if (unlikely(!test_bit(UPROBE_COPY_INSN, &uprobe->flags)))
4989     goto out;
4990    
4991     + /*
4992     + * Pairs with the smp_wmb() in prepare_uprobe().
4993     + *
4994     + * Guarantees that if we see the UPROBE_COPY_INSN bit set, then
4995     + * we must also see the stores to &uprobe->arch performed by the
4996     + * prepare_uprobe() call.
4997     + */
4998     + smp_rmb();
4999     +
5000     /* Tracing handlers use ->utask to communicate with fetch methods */
5001     if (!get_utask())
5002     goto out;
5003     diff --git a/lib/kobject.c b/lib/kobject.c
5004     index b733a83e5294..f58c7f2b229c 100644
5005     --- a/lib/kobject.c
5006     +++ b/lib/kobject.c
5007     @@ -127,7 +127,7 @@ static void fill_kobj_path(struct kobject *kobj, char *path, int length)
5008     int cur = strlen(kobject_name(parent));
5009     /* back up enough to print this name with '/' */
5010     length -= cur;
5011     - strncpy(path + length, kobject_name(parent), cur);
5012     + memcpy(path + length, kobject_name(parent), cur);
5013     *(path + --length) = '/';
5014     }
5015    
5016     diff --git a/lib/test_hexdump.c b/lib/test_hexdump.c
5017     index 3f415d8101f3..1c3c513add77 100644
5018     --- a/lib/test_hexdump.c
5019     +++ b/lib/test_hexdump.c
5020     @@ -81,7 +81,7 @@ static void __init test_hexdump_prepare_test(size_t len, int rowsize,
5021     const char *q = *result++;
5022     size_t amount = strlen(q);
5023    
5024     - strncpy(p, q, amount);
5025     + memcpy(p, q, amount);
5026     p += amount;
5027    
5028     *p++ = ' ';
5029     diff --git a/mm/hugetlb.c b/mm/hugetlb.c
5030     index 5e3a4db36310..3e50fcfe6ad8 100644
5031     --- a/mm/hugetlb.c
5032     +++ b/mm/hugetlb.c
5033     @@ -4170,6 +4170,12 @@ int hugetlb_reserve_pages(struct inode *inode,
5034     struct resv_map *resv_map;
5035     long gbl_reserve;
5036    
5037     + /* This should never happen */
5038     + if (from > to) {
5039     + VM_WARN(1, "%s called with a negative range\n", __func__);
5040     + return -EINVAL;
5041     + }
5042     +
5043     /*
5044     * Only apply hugepage reservation if asked. At fault time, an
5045     * attempt will be made for VM_NORESERVE to allocate a page
5046     @@ -4259,7 +4265,9 @@ int hugetlb_reserve_pages(struct inode *inode,
5047     return 0;
5048     out_err:
5049     if (!vma || vma->vm_flags & VM_MAYSHARE)
5050     - region_abort(resv_map, from, to);
5051     + /* Don't call region_abort if region_chg failed */
5052     + if (chg >= 0)
5053     + region_abort(resv_map, from, to);
5054     if (vma && is_vma_resv_set(vma, HPAGE_RESV_OWNER))
5055     kref_put(&resv_map->refs, resv_map_release);
5056     return ret;
5057     diff --git a/mm/truncate.c b/mm/truncate.c
5058     index 9c809e7d73c3..befdc6f575d2 100644
5059     --- a/mm/truncate.c
5060     +++ b/mm/truncate.c
5061     @@ -443,9 +443,13 @@ void truncate_inode_pages_final(struct address_space *mapping)
5062     */
5063     spin_lock_irq(&mapping->tree_lock);
5064     spin_unlock_irq(&mapping->tree_lock);
5065     -
5066     - truncate_inode_pages(mapping, 0);
5067     }
5068     +
5069     + /*
5070     + * Cleancache needs notification even if there are no pages or shadow
5071     + * entries.
5072     + */
5073     + truncate_inode_pages(mapping, 0);
5074     }
5075     EXPORT_SYMBOL(truncate_inode_pages_final);
5076    
5077     diff --git a/net/ceph/auth.c b/net/ceph/auth.c
5078     index c822b3ae1bd3..8e79dca81748 100644
5079     --- a/net/ceph/auth.c
5080     +++ b/net/ceph/auth.c
5081     @@ -314,14 +314,30 @@ int ceph_auth_update_authorizer(struct ceph_auth_client *ac,
5082     }
5083     EXPORT_SYMBOL(ceph_auth_update_authorizer);
5084    
5085     +int ceph_auth_add_authorizer_challenge(struct ceph_auth_client *ac,
5086     + struct ceph_authorizer *a,
5087     + void *challenge_buf,
5088     + int challenge_buf_len)
5089     +{
5090     + int ret = 0;
5091     +
5092     + mutex_lock(&ac->mutex);
5093     + if (ac->ops && ac->ops->add_authorizer_challenge)
5094     + ret = ac->ops->add_authorizer_challenge(ac, a, challenge_buf,
5095     + challenge_buf_len);
5096     + mutex_unlock(&ac->mutex);
5097     + return ret;
5098     +}
5099     +EXPORT_SYMBOL(ceph_auth_add_authorizer_challenge);
5100     +
5101     int ceph_auth_verify_authorizer_reply(struct ceph_auth_client *ac,
5102     - struct ceph_authorizer *a, size_t len)
5103     + struct ceph_authorizer *a)
5104     {
5105     int ret = 0;
5106    
5107     mutex_lock(&ac->mutex);
5108     if (ac->ops && ac->ops->verify_authorizer_reply)
5109     - ret = ac->ops->verify_authorizer_reply(ac, a, len);
5110     + ret = ac->ops->verify_authorizer_reply(ac, a);
5111     mutex_unlock(&ac->mutex);
5112     return ret;
5113     }
5114     diff --git a/net/ceph/auth_x.c b/net/ceph/auth_x.c
5115     index b216131915e7..29e23b5cb2ed 100644
5116     --- a/net/ceph/auth_x.c
5117     +++ b/net/ceph/auth_x.c
5118     @@ -8,6 +8,7 @@
5119    
5120     #include <linux/ceph/decode.h>
5121     #include <linux/ceph/auth.h>
5122     +#include <linux/ceph/ceph_features.h>
5123     #include <linux/ceph/libceph.h>
5124     #include <linux/ceph/messenger.h>
5125    
5126     @@ -69,25 +70,40 @@ static int ceph_x_encrypt(struct ceph_crypto_key *secret, void *buf,
5127     return sizeof(u32) + ciphertext_len;
5128     }
5129    
5130     +static int __ceph_x_decrypt(struct ceph_crypto_key *secret, void *p,
5131     + int ciphertext_len)
5132     +{
5133     + struct ceph_x_encrypt_header *hdr = p;
5134     + int plaintext_len;
5135     + int ret;
5136     +
5137     + ret = ceph_crypt(secret, false, p, ciphertext_len, ciphertext_len,
5138     + &plaintext_len);
5139     + if (ret)
5140     + return ret;
5141     +
5142     + if (le64_to_cpu(hdr->magic) != CEPHX_ENC_MAGIC) {
5143     + pr_err("%s bad magic\n", __func__);
5144     + return -EINVAL;
5145     + }
5146     +
5147     + return plaintext_len - sizeof(*hdr);
5148     +}
5149     +
5150     static int ceph_x_decrypt(struct ceph_crypto_key *secret, void **p, void *end)
5151     {
5152     - struct ceph_x_encrypt_header *hdr = *p + sizeof(u32);
5153     - int ciphertext_len, plaintext_len;
5154     + int ciphertext_len;
5155     int ret;
5156    
5157     ceph_decode_32_safe(p, end, ciphertext_len, e_inval);
5158     ceph_decode_need(p, end, ciphertext_len, e_inval);
5159    
5160     - ret = ceph_crypt(secret, false, *p, end - *p, ciphertext_len,
5161     - &plaintext_len);
5162     - if (ret)
5163     + ret = __ceph_x_decrypt(secret, *p, ciphertext_len);
5164     + if (ret < 0)
5165     return ret;
5166    
5167     - if (hdr->struct_v != 1 || le64_to_cpu(hdr->magic) != CEPHX_ENC_MAGIC)
5168     - return -EPERM;
5169     -
5170     *p += ciphertext_len;
5171     - return plaintext_len - sizeof(struct ceph_x_encrypt_header);
5172     + return ret;
5173    
5174     e_inval:
5175     return -EINVAL;
5176     @@ -271,6 +287,51 @@ bad:
5177     return -EINVAL;
5178     }
5179    
5180     +/*
5181     + * Encode and encrypt the second part (ceph_x_authorize_b) of the
5182     + * authorizer. The first part (ceph_x_authorize_a) should already be
5183     + * encoded.
5184     + */
5185     +static int encrypt_authorizer(struct ceph_x_authorizer *au,
5186     + u64 *server_challenge)
5187     +{
5188     + struct ceph_x_authorize_a *msg_a;
5189     + struct ceph_x_authorize_b *msg_b;
5190     + void *p, *end;
5191     + int ret;
5192     +
5193     + msg_a = au->buf->vec.iov_base;
5194     + WARN_ON(msg_a->ticket_blob.secret_id != cpu_to_le64(au->secret_id));
5195     + p = (void *)(msg_a + 1) + le32_to_cpu(msg_a->ticket_blob.blob_len);
5196     + end = au->buf->vec.iov_base + au->buf->vec.iov_len;
5197     +
5198     + msg_b = p + ceph_x_encrypt_offset();
5199     + msg_b->struct_v = 2;
5200     + msg_b->nonce = cpu_to_le64(au->nonce);
5201     + if (server_challenge) {
5202     + msg_b->have_challenge = 1;
5203     + msg_b->server_challenge_plus_one =
5204     + cpu_to_le64(*server_challenge + 1);
5205     + } else {
5206     + msg_b->have_challenge = 0;
5207     + msg_b->server_challenge_plus_one = 0;
5208     + }
5209     +
5210     + ret = ceph_x_encrypt(&au->session_key, p, end - p, sizeof(*msg_b));
5211     + if (ret < 0)
5212     + return ret;
5213     +
5214     + p += ret;
5215     + if (server_challenge) {
5216     + WARN_ON(p != end);
5217     + } else {
5218     + WARN_ON(p > end);
5219     + au->buf->vec.iov_len = p - au->buf->vec.iov_base;
5220     + }
5221     +
5222     + return 0;
5223     +}
5224     +
5225     static void ceph_x_authorizer_cleanup(struct ceph_x_authorizer *au)
5226     {
5227     ceph_crypto_key_destroy(&au->session_key);
5228     @@ -287,7 +348,6 @@ static int ceph_x_build_authorizer(struct ceph_auth_client *ac,
5229     int maxlen;
5230     struct ceph_x_authorize_a *msg_a;
5231     struct ceph_x_authorize_b *msg_b;
5232     - void *p, *end;
5233     int ret;
5234     int ticket_blob_len =
5235     (th->ticket_blob ? th->ticket_blob->vec.iov_len : 0);
5236     @@ -331,21 +391,13 @@ static int ceph_x_build_authorizer(struct ceph_auth_client *ac,
5237     dout(" th %p secret_id %lld %lld\n", th, th->secret_id,
5238     le64_to_cpu(msg_a->ticket_blob.secret_id));
5239    
5240     - p = msg_a + 1;
5241     - p += ticket_blob_len;
5242     - end = au->buf->vec.iov_base + au->buf->vec.iov_len;
5243     -
5244     - msg_b = p + ceph_x_encrypt_offset();
5245     - msg_b->struct_v = 1;
5246     get_random_bytes(&au->nonce, sizeof(au->nonce));
5247     - msg_b->nonce = cpu_to_le64(au->nonce);
5248     - ret = ceph_x_encrypt(&au->session_key, p, end - p, sizeof(*msg_b));
5249     - if (ret < 0)
5250     + ret = encrypt_authorizer(au, NULL);
5251     + if (ret) {
5252     + pr_err("failed to encrypt authorizer: %d", ret);
5253     goto out_au;
5254     + }
5255    
5256     - p += ret;
5257     - WARN_ON(p > end);
5258     - au->buf->vec.iov_len = p - au->buf->vec.iov_base;
5259     dout(" built authorizer nonce %llx len %d\n", au->nonce,
5260     (int)au->buf->vec.iov_len);
5261     return 0;
5262     @@ -622,8 +674,56 @@ static int ceph_x_update_authorizer(
5263     return 0;
5264     }
5265    
5266     +static int decrypt_authorize_challenge(struct ceph_x_authorizer *au,
5267     + void *challenge_buf,
5268     + int challenge_buf_len,
5269     + u64 *server_challenge)
5270     +{
5271     + struct ceph_x_authorize_challenge *ch =
5272     + challenge_buf + sizeof(struct ceph_x_encrypt_header);
5273     + int ret;
5274     +
5275     + /* no leading len */
5276     + ret = __ceph_x_decrypt(&au->session_key, challenge_buf,
5277     + challenge_buf_len);
5278     + if (ret < 0)
5279     + return ret;
5280     + if (ret < sizeof(*ch)) {
5281     + pr_err("bad size %d for ceph_x_authorize_challenge\n", ret);
5282     + return -EINVAL;
5283     + }
5284     +
5285     + *server_challenge = le64_to_cpu(ch->server_challenge);
5286     + return 0;
5287     +}
5288     +
5289     +static int ceph_x_add_authorizer_challenge(struct ceph_auth_client *ac,
5290     + struct ceph_authorizer *a,
5291     + void *challenge_buf,
5292     + int challenge_buf_len)
5293     +{
5294     + struct ceph_x_authorizer *au = (void *)a;
5295     + u64 server_challenge;
5296     + int ret;
5297     +
5298     + ret = decrypt_authorize_challenge(au, challenge_buf, challenge_buf_len,
5299     + &server_challenge);
5300     + if (ret) {
5301     + pr_err("failed to decrypt authorize challenge: %d", ret);
5302     + return ret;
5303     + }
5304     +
5305     + ret = encrypt_authorizer(au, &server_challenge);
5306     + if (ret) {
5307     + pr_err("failed to encrypt authorizer w/ challenge: %d", ret);
5308     + return ret;
5309     + }
5310     +
5311     + return 0;
5312     +}
5313     +
5314     static int ceph_x_verify_authorizer_reply(struct ceph_auth_client *ac,
5315     - struct ceph_authorizer *a, size_t len)
5316     + struct ceph_authorizer *a)
5317     {
5318     struct ceph_x_authorizer *au = (void *)a;
5319     void *p = au->enc_buf;
5320     @@ -633,8 +733,10 @@ static int ceph_x_verify_authorizer_reply(struct ceph_auth_client *ac,
5321     ret = ceph_x_decrypt(&au->session_key, &p, p + CEPHX_AU_ENC_BUF_LEN);
5322     if (ret < 0)
5323     return ret;
5324     - if (ret != sizeof(*reply))
5325     - return -EPERM;
5326     + if (ret < sizeof(*reply)) {
5327     + pr_err("bad size %d for ceph_x_authorize_reply\n", ret);
5328     + return -EINVAL;
5329     + }
5330    
5331     if (au->nonce + 1 != le64_to_cpu(reply->nonce_plus_one))
5332     ret = -EPERM;
5333     @@ -700,26 +802,64 @@ static int calc_signature(struct ceph_x_authorizer *au, struct ceph_msg *msg,
5334     __le64 *psig)
5335     {
5336     void *enc_buf = au->enc_buf;
5337     - struct {
5338     - __le32 len;
5339     - __le32 header_crc;
5340     - __le32 front_crc;
5341     - __le32 middle_crc;
5342     - __le32 data_crc;
5343     - } __packed *sigblock = enc_buf + ceph_x_encrypt_offset();
5344     int ret;
5345    
5346     - sigblock->len = cpu_to_le32(4*sizeof(u32));
5347     - sigblock->header_crc = msg->hdr.crc;
5348     - sigblock->front_crc = msg->footer.front_crc;
5349     - sigblock->middle_crc = msg->footer.middle_crc;
5350     - sigblock->data_crc = msg->footer.data_crc;
5351     - ret = ceph_x_encrypt(&au->session_key, enc_buf, CEPHX_AU_ENC_BUF_LEN,
5352     - sizeof(*sigblock));
5353     - if (ret < 0)
5354     - return ret;
5355     + if (msg->con->peer_features & CEPH_FEATURE_CEPHX_V2) {
5356     + struct {
5357     + __le32 len;
5358     + __le32 header_crc;
5359     + __le32 front_crc;
5360     + __le32 middle_crc;
5361     + __le32 data_crc;
5362     + } __packed *sigblock = enc_buf + ceph_x_encrypt_offset();
5363     +
5364     + sigblock->len = cpu_to_le32(4*sizeof(u32));
5365     + sigblock->header_crc = msg->hdr.crc;
5366     + sigblock->front_crc = msg->footer.front_crc;
5367     + sigblock->middle_crc = msg->footer.middle_crc;
5368     + sigblock->data_crc = msg->footer.data_crc;
5369     +
5370     + ret = ceph_x_encrypt(&au->session_key, enc_buf,
5371     + CEPHX_AU_ENC_BUF_LEN, sizeof(*sigblock));
5372     + if (ret < 0)
5373     + return ret;
5374     +
5375     + *psig = *(__le64 *)(enc_buf + sizeof(u32));
5376     + } else {
5377     + struct {
5378     + __le32 header_crc;
5379     + __le32 front_crc;
5380     + __le32 front_len;
5381     + __le32 middle_crc;
5382     + __le32 middle_len;
5383     + __le32 data_crc;
5384     + __le32 data_len;
5385     + __le32 seq_lower_word;
5386     + } __packed *sigblock = enc_buf;
5387     + struct {
5388     + __le64 a, b, c, d;
5389     + } __packed *penc = enc_buf;
5390     + int ciphertext_len;
5391     +
5392     + sigblock->header_crc = msg->hdr.crc;
5393     + sigblock->front_crc = msg->footer.front_crc;
5394     + sigblock->front_len = msg->hdr.front_len;
5395     + sigblock->middle_crc = msg->footer.middle_crc;
5396     + sigblock->middle_len = msg->hdr.middle_len;
5397     + sigblock->data_crc = msg->footer.data_crc;
5398     + sigblock->data_len = msg->hdr.data_len;
5399     + sigblock->seq_lower_word = *(__le32 *)&msg->hdr.seq;
5400     +
5401     + /* no leading len, no ceph_x_encrypt_header */
5402     + ret = ceph_crypt(&au->session_key, true, enc_buf,
5403     + CEPHX_AU_ENC_BUF_LEN, sizeof(*sigblock),
5404     + &ciphertext_len);
5405     + if (ret)
5406     + return ret;
5407     +
5408     + *psig = penc->a ^ penc->b ^ penc->c ^ penc->d;
5409     + }
5410    
5411     - *psig = *(__le64 *)(enc_buf + sizeof(u32));
5412     return 0;
5413     }
5414    
5415     @@ -774,6 +914,7 @@ static const struct ceph_auth_client_ops ceph_x_ops = {
5416     .handle_reply = ceph_x_handle_reply,
5417     .create_authorizer = ceph_x_create_authorizer,
5418     .update_authorizer = ceph_x_update_authorizer,
5419     + .add_authorizer_challenge = ceph_x_add_authorizer_challenge,
5420     .verify_authorizer_reply = ceph_x_verify_authorizer_reply,
5421     .invalidate_authorizer = ceph_x_invalidate_authorizer,
5422     .reset = ceph_x_reset,
5423     diff --git a/net/ceph/auth_x_protocol.h b/net/ceph/auth_x_protocol.h
5424     index 671d30576c4f..a7cd203aacc2 100644
5425     --- a/net/ceph/auth_x_protocol.h
5426     +++ b/net/ceph/auth_x_protocol.h
5427     @@ -69,6 +69,13 @@ struct ceph_x_authorize_a {
5428     struct ceph_x_authorize_b {
5429     __u8 struct_v;
5430     __le64 nonce;
5431     + __u8 have_challenge;
5432     + __le64 server_challenge_plus_one;
5433     +} __attribute__ ((packed));
5434     +
5435     +struct ceph_x_authorize_challenge {
5436     + __u8 struct_v;
5437     + __le64 server_challenge;
5438     } __attribute__ ((packed));
5439    
5440     struct ceph_x_authorize_reply {
5441     diff --git a/net/ceph/messenger.c b/net/ceph/messenger.c
5442     index 68acf94fae72..5a8075d9f2e7 100644
5443     --- a/net/ceph/messenger.c
5444     +++ b/net/ceph/messenger.c
5445     @@ -1394,30 +1394,26 @@ static void prepare_write_keepalive(struct ceph_connection *con)
5446     * Connection negotiation.
5447     */
5448    
5449     -static struct ceph_auth_handshake *get_connect_authorizer(struct ceph_connection *con,
5450     - int *auth_proto)
5451     +static int get_connect_authorizer(struct ceph_connection *con)
5452     {
5453     struct ceph_auth_handshake *auth;
5454     + int auth_proto;
5455    
5456     if (!con->ops->get_authorizer) {
5457     + con->auth = NULL;
5458     con->out_connect.authorizer_protocol = CEPH_AUTH_UNKNOWN;
5459     con->out_connect.authorizer_len = 0;
5460     - return NULL;
5461     + return 0;
5462     }
5463    
5464     - /* Can't hold the mutex while getting authorizer */
5465     - mutex_unlock(&con->mutex);
5466     - auth = con->ops->get_authorizer(con, auth_proto, con->auth_retry);
5467     - mutex_lock(&con->mutex);
5468     -
5469     + auth = con->ops->get_authorizer(con, &auth_proto, con->auth_retry);
5470     if (IS_ERR(auth))
5471     - return auth;
5472     - if (con->state != CON_STATE_NEGOTIATING)
5473     - return ERR_PTR(-EAGAIN);
5474     + return PTR_ERR(auth);
5475    
5476     - con->auth_reply_buf = auth->authorizer_reply_buf;
5477     - con->auth_reply_buf_len = auth->authorizer_reply_buf_len;
5478     - return auth;
5479     + con->auth = auth;
5480     + con->out_connect.authorizer_protocol = cpu_to_le32(auth_proto);
5481     + con->out_connect.authorizer_len = cpu_to_le32(auth->authorizer_buf_len);
5482     + return 0;
5483     }
5484    
5485     /*
5486     @@ -1433,12 +1429,22 @@ static void prepare_write_banner(struct ceph_connection *con)
5487     con_flag_set(con, CON_FLAG_WRITE_PENDING);
5488     }
5489    
5490     +static void __prepare_write_connect(struct ceph_connection *con)
5491     +{
5492     + con_out_kvec_add(con, sizeof(con->out_connect), &con->out_connect);
5493     + if (con->auth)
5494     + con_out_kvec_add(con, con->auth->authorizer_buf_len,
5495     + con->auth->authorizer_buf);
5496     +
5497     + con->out_more = 0;
5498     + con_flag_set(con, CON_FLAG_WRITE_PENDING);
5499     +}
5500     +
5501     static int prepare_write_connect(struct ceph_connection *con)
5502     {
5503     unsigned int global_seq = get_global_seq(con->msgr, 0);
5504     int proto;
5505     - int auth_proto;
5506     - struct ceph_auth_handshake *auth;
5507     + int ret;
5508    
5509     switch (con->peer_name.type) {
5510     case CEPH_ENTITY_TYPE_MON:
5511     @@ -1465,24 +1471,11 @@ static int prepare_write_connect(struct ceph_connection *con)
5512     con->out_connect.protocol_version = cpu_to_le32(proto);
5513     con->out_connect.flags = 0;
5514    
5515     - auth_proto = CEPH_AUTH_UNKNOWN;
5516     - auth = get_connect_authorizer(con, &auth_proto);
5517     - if (IS_ERR(auth))
5518     - return PTR_ERR(auth);
5519     -
5520     - con->out_connect.authorizer_protocol = cpu_to_le32(auth_proto);
5521     - con->out_connect.authorizer_len = auth ?
5522     - cpu_to_le32(auth->authorizer_buf_len) : 0;
5523     -
5524     - con_out_kvec_add(con, sizeof (con->out_connect),
5525     - &con->out_connect);
5526     - if (auth && auth->authorizer_buf_len)
5527     - con_out_kvec_add(con, auth->authorizer_buf_len,
5528     - auth->authorizer_buf);
5529     -
5530     - con->out_more = 0;
5531     - con_flag_set(con, CON_FLAG_WRITE_PENDING);
5532     + ret = get_connect_authorizer(con);
5533     + if (ret)
5534     + return ret;
5535    
5536     + __prepare_write_connect(con);
5537     return 0;
5538     }
5539    
5540     @@ -1743,11 +1736,21 @@ static int read_partial_connect(struct ceph_connection *con)
5541     if (ret <= 0)
5542     goto out;
5543    
5544     - size = le32_to_cpu(con->in_reply.authorizer_len);
5545     - end += size;
5546     - ret = read_partial(con, end, size, con->auth_reply_buf);
5547     - if (ret <= 0)
5548     - goto out;
5549     + if (con->auth) {
5550     + size = le32_to_cpu(con->in_reply.authorizer_len);
5551     + if (size > con->auth->authorizer_reply_buf_len) {
5552     + pr_err("authorizer reply too big: %d > %zu\n", size,
5553     + con->auth->authorizer_reply_buf_len);
5554     + ret = -EINVAL;
5555     + goto out;
5556     + }
5557     +
5558     + end += size;
5559     + ret = read_partial(con, end, size,
5560     + con->auth->authorizer_reply_buf);
5561     + if (ret <= 0)
5562     + goto out;
5563     + }
5564    
5565     dout("read_partial_connect %p tag %d, con_seq = %u, g_seq = %u\n",
5566     con, (int)con->in_reply.tag,
5567     @@ -1755,7 +1758,6 @@ static int read_partial_connect(struct ceph_connection *con)
5568     le32_to_cpu(con->in_reply.global_seq));
5569     out:
5570     return ret;
5571     -
5572     }
5573    
5574     /*
5575     @@ -2039,13 +2041,28 @@ static int process_connect(struct ceph_connection *con)
5576    
5577     dout("process_connect on %p tag %d\n", con, (int)con->in_tag);
5578    
5579     - if (con->auth_reply_buf) {
5580     + if (con->auth) {
5581     /*
5582     * Any connection that defines ->get_authorizer()
5583     - * should also define ->verify_authorizer_reply().
5584     + * should also define ->add_authorizer_challenge() and
5585     + * ->verify_authorizer_reply().
5586     + *
5587     * See get_connect_authorizer().
5588     */
5589     - ret = con->ops->verify_authorizer_reply(con, 0);
5590     + if (con->in_reply.tag == CEPH_MSGR_TAG_CHALLENGE_AUTHORIZER) {
5591     + ret = con->ops->add_authorizer_challenge(
5592     + con, con->auth->authorizer_reply_buf,
5593     + le32_to_cpu(con->in_reply.authorizer_len));
5594     + if (ret < 0)
5595     + return ret;
5596     +
5597     + con_out_kvec_reset(con);
5598     + __prepare_write_connect(con);
5599     + prepare_read_connect(con);
5600     + return 0;
5601     + }
5602     +
5603     + ret = con->ops->verify_authorizer_reply(con);
5604     if (ret < 0) {
5605     con->error_msg = "bad authorize reply";
5606     return ret;
5607     diff --git a/net/ceph/osd_client.c b/net/ceph/osd_client.c
5608     index 0ffeb60cfe67..70ccb0716fc5 100644
5609     --- a/net/ceph/osd_client.c
5610     +++ b/net/ceph/osd_client.c
5611     @@ -4478,14 +4478,24 @@ static struct ceph_auth_handshake *get_authorizer(struct ceph_connection *con,
5612     return auth;
5613     }
5614    
5615     +static int add_authorizer_challenge(struct ceph_connection *con,
5616     + void *challenge_buf, int challenge_buf_len)
5617     +{
5618     + struct ceph_osd *o = con->private;
5619     + struct ceph_osd_client *osdc = o->o_osdc;
5620     + struct ceph_auth_client *ac = osdc->client->monc.auth;
5621     +
5622     + return ceph_auth_add_authorizer_challenge(ac, o->o_auth.authorizer,
5623     + challenge_buf, challenge_buf_len);
5624     +}
5625    
5626     -static int verify_authorizer_reply(struct ceph_connection *con, int len)
5627     +static int verify_authorizer_reply(struct ceph_connection *con)
5628     {
5629     struct ceph_osd *o = con->private;
5630     struct ceph_osd_client *osdc = o->o_osdc;
5631     struct ceph_auth_client *ac = osdc->client->monc.auth;
5632    
5633     - return ceph_auth_verify_authorizer_reply(ac, o->o_auth.authorizer, len);
5634     + return ceph_auth_verify_authorizer_reply(ac, o->o_auth.authorizer);
5635     }
5636    
5637     static int invalidate_authorizer(struct ceph_connection *con)
5638     @@ -4519,6 +4529,7 @@ static const struct ceph_connection_operations osd_con_ops = {
5639     .put = put_osd_con,
5640     .dispatch = dispatch,
5641     .get_authorizer = get_authorizer,
5642     + .add_authorizer_challenge = add_authorizer_challenge,
5643     .verify_authorizer_reply = verify_authorizer_reply,
5644     .invalidate_authorizer = invalidate_authorizer,
5645     .alloc_msg = alloc_msg,
5646     diff --git a/net/ipv4/ip_tunnel.c b/net/ipv4/ip_tunnel.c
5647     index d8d99c21a9c1..e6ee6acac80c 100644
5648     --- a/net/ipv4/ip_tunnel.c
5649     +++ b/net/ipv4/ip_tunnel.c
5650     @@ -261,8 +261,8 @@ static struct net_device *__ip_tunnel_create(struct net *net,
5651     } else {
5652     if (strlen(ops->kind) > (IFNAMSIZ - 3))
5653     goto failed;
5654     - strlcpy(name, ops->kind, IFNAMSIZ);
5655     - strncat(name, "%d", 2);
5656     + strcpy(name, ops->kind);
5657     + strcat(name, "%d");
5658     }
5659    
5660     ASSERT_RTNL();
5661     diff --git a/net/tipc/subscr.c b/net/tipc/subscr.c
5662     index c2646446e157..d62affeb2a38 100644
5663     --- a/net/tipc/subscr.c
5664     +++ b/net/tipc/subscr.c
5665     @@ -389,7 +389,7 @@ int tipc_topsrv_start(struct net *net)
5666     topsrv->tipc_conn_new = tipc_subscrb_connect_cb;
5667     topsrv->tipc_conn_release = tipc_subscrb_release_cb;
5668    
5669     - strncpy(topsrv->name, name, strlen(name) + 1);
5670     + strscpy(topsrv->name, name, sizeof(topsrv->name));
5671     tn->topsrv = topsrv;
5672     atomic_set(&tn->subscription_count, 0);
5673    
5674     diff --git a/scripts/Makefile.extrawarn b/scripts/Makefile.extrawarn
5675     index fb3522fd8702..d08b6fbdfa85 100644
5676     --- a/scripts/Makefile.extrawarn
5677     +++ b/scripts/Makefile.extrawarn
5678     @@ -10,6 +10,8 @@
5679     # are not supported by all versions of the compiler
5680     # ==========================================================================
5681    
5682     +KBUILD_CFLAGS += $(call cc-disable-warning, packed-not-aligned)
5683     +
5684     ifeq ("$(origin W)", "command line")
5685     export KBUILD_ENABLE_EXTRA_GCC_CHECKS := $(W)
5686     endif
5687     @@ -25,6 +27,7 @@ warning-1 += -Wold-style-definition
5688     warning-1 += $(call cc-option, -Wmissing-include-dirs)
5689     warning-1 += $(call cc-option, -Wunused-but-set-variable)
5690     warning-1 += $(call cc-option, -Wunused-const-variable)
5691     +warning-1 += $(call cc-option, -Wpacked-not-aligned)
5692     warning-1 += $(call cc-disable-warning, missing-field-initializers)
5693     warning-1 += $(call cc-disable-warning, sign-compare)
5694    
5695     diff --git a/scripts/unifdef.c b/scripts/unifdef.c
5696     index 7493c0ee51cc..db00e3e30a59 100644
5697     --- a/scripts/unifdef.c
5698     +++ b/scripts/unifdef.c
5699     @@ -395,7 +395,7 @@ usage(void)
5700     * When we have processed a group that starts off with a known-false
5701     * #if/#elif sequence (which has therefore been deleted) followed by a
5702     * #elif that we don't understand and therefore must keep, we edit the
5703     - * latter into a #if to keep the nesting correct. We use strncpy() to
5704     + * latter into a #if to keep the nesting correct. We use memcpy() to
5705     * overwrite the 4 byte token "elif" with "if " without a '\0' byte.
5706     *
5707     * When we find a true #elif in a group, the following block will
5708     @@ -450,7 +450,7 @@ static void Idrop (void) { Fdrop(); ignoreon(); }
5709     static void Itrue (void) { Ftrue(); ignoreon(); }
5710     static void Ifalse(void) { Ffalse(); ignoreon(); }
5711     /* modify this line */
5712     -static void Mpass (void) { strncpy(keyword, "if ", 4); Pelif(); }
5713     +static void Mpass (void) { memcpy(keyword, "if ", 4); Pelif(); }
5714     static void Mtrue (void) { keywordedit("else"); state(IS_TRUE_MIDDLE); }
5715     static void Melif (void) { keywordedit("endif"); state(IS_FALSE_TRAILER); }
5716     static void Melse (void) { keywordedit("endif"); state(IS_FALSE_ELSE); }
5717     diff --git a/sound/pci/trident/trident.c b/sound/pci/trident/trident.c
5718     index cedf13b64803..2f18b1cdc2cd 100644
5719     --- a/sound/pci/trident/trident.c
5720     +++ b/sound/pci/trident/trident.c
5721     @@ -123,7 +123,7 @@ static int snd_trident_probe(struct pci_dev *pci,
5722     } else {
5723     strcpy(card->shortname, "Trident ");
5724     }
5725     - strcat(card->shortname, card->driver);
5726     + strcat(card->shortname, str);
5727     sprintf(card->longname, "%s PCI Audio at 0x%lx, irq %d",
5728     card->shortname, trident->port, trident->irq);
5729