Contents of /trunk/kernel-alx/patches-3.18/0118-3.18.19-all-fixes.patch
Parent Directory | Revision Log
Revision 2689 -
(show annotations)
(download)
Mon Aug 31 12:19:32 2015 UTC (9 years ago) by niro
File size: 58410 byte(s)
Mon Aug 31 12:19:32 2015 UTC (9 years ago) by niro
File size: 58410 byte(s)
-linux-3.18.19
1 | diff --git a/Makefile b/Makefile |
2 | index 35faaf8fb651..eab97c3d462d 100644 |
3 | --- a/Makefile |
4 | +++ b/Makefile |
5 | @@ -1,6 +1,6 @@ |
6 | VERSION = 3 |
7 | PATCHLEVEL = 18 |
8 | -SUBLEVEL = 18 |
9 | +SUBLEVEL = 19 |
10 | EXTRAVERSION = |
11 | NAME = Diseased Newt |
12 | |
13 | diff --git a/arch/arm/kvm/interrupts.S b/arch/arm/kvm/interrupts.S |
14 | index 01dcb0e752d9..d66d608f7ce7 100644 |
15 | --- a/arch/arm/kvm/interrupts.S |
16 | +++ b/arch/arm/kvm/interrupts.S |
17 | @@ -159,13 +159,9 @@ __kvm_vcpu_return: |
18 | @ Don't trap coprocessor accesses for host kernel |
19 | set_hstr vmexit |
20 | set_hdcr vmexit |
21 | - set_hcptr vmexit, (HCPTR_TTA | HCPTR_TCP(10) | HCPTR_TCP(11)) |
22 | + set_hcptr vmexit, (HCPTR_TTA | HCPTR_TCP(10) | HCPTR_TCP(11)), after_vfp_restore |
23 | |
24 | #ifdef CONFIG_VFPv3 |
25 | - @ Save floating point registers we if let guest use them. |
26 | - tst r2, #(HCPTR_TCP(10) | HCPTR_TCP(11)) |
27 | - bne after_vfp_restore |
28 | - |
29 | @ Switch VFP/NEON hardware state to the host's |
30 | add r7, vcpu, #VCPU_VFP_GUEST |
31 | store_vfp_state r7 |
32 | @@ -177,6 +173,8 @@ after_vfp_restore: |
33 | @ Restore FPEXC_EN which we clobbered on entry |
34 | pop {r2} |
35 | VFPFMXR FPEXC, r2 |
36 | +#else |
37 | +after_vfp_restore: |
38 | #endif |
39 | |
40 | @ Reset Hyp-role |
41 | @@ -472,7 +470,7 @@ switch_to_guest_vfp: |
42 | push {r3-r7} |
43 | |
44 | @ NEON/VFP used. Turn on VFP access. |
45 | - set_hcptr vmexit, (HCPTR_TCP(10) | HCPTR_TCP(11)) |
46 | + set_hcptr vmtrap, (HCPTR_TCP(10) | HCPTR_TCP(11)) |
47 | |
48 | @ Switch VFP/NEON hardware state to the guest's |
49 | add r7, r0, #VCPU_VFP_HOST |
50 | diff --git a/arch/arm/kvm/interrupts_head.S b/arch/arm/kvm/interrupts_head.S |
51 | index 14d488388480..f6f14812d106 100644 |
52 | --- a/arch/arm/kvm/interrupts_head.S |
53 | +++ b/arch/arm/kvm/interrupts_head.S |
54 | @@ -599,8 +599,13 @@ ARM_BE8(rev r6, r6 ) |
55 | .endm |
56 | |
57 | /* Configures the HCPTR (Hyp Coprocessor Trap Register) on entry/return |
58 | - * (hardware reset value is 0). Keep previous value in r2. */ |
59 | -.macro set_hcptr operation, mask |
60 | + * (hardware reset value is 0). Keep previous value in r2. |
61 | + * An ISB is emited on vmexit/vmtrap, but executed on vmexit only if |
62 | + * VFP wasn't already enabled (always executed on vmtrap). |
63 | + * If a label is specified with vmexit, it is branched to if VFP wasn't |
64 | + * enabled. |
65 | + */ |
66 | +.macro set_hcptr operation, mask, label = none |
67 | mrc p15, 4, r2, c1, c1, 2 |
68 | ldr r3, =\mask |
69 | .if \operation == vmentry |
70 | @@ -609,6 +614,17 @@ ARM_BE8(rev r6, r6 ) |
71 | bic r3, r2, r3 @ Don't trap defined coproc-accesses |
72 | .endif |
73 | mcr p15, 4, r3, c1, c1, 2 |
74 | + .if \operation != vmentry |
75 | + .if \operation == vmexit |
76 | + tst r2, #(HCPTR_TCP(10) | HCPTR_TCP(11)) |
77 | + beq 1f |
78 | + .endif |
79 | + isb |
80 | + .if \label != none |
81 | + b \label |
82 | + .endif |
83 | +1: |
84 | + .endif |
85 | .endm |
86 | |
87 | /* Configures the HDCR (Hyp Debug Configuration Register) on entry/return |
88 | diff --git a/arch/arm/mach-imx/clk-imx6q.c b/arch/arm/mach-imx/clk-imx6q.c |
89 | index 2daef619d053..5474a76803f0 100644 |
90 | --- a/arch/arm/mach-imx/clk-imx6q.c |
91 | +++ b/arch/arm/mach-imx/clk-imx6q.c |
92 | @@ -439,7 +439,7 @@ static void __init imx6q_clocks_init(struct device_node *ccm_node) |
93 | clk[IMX6QDL_CLK_GPMI_IO] = imx_clk_gate2("gpmi_io", "enfc", base + 0x78, 28); |
94 | clk[IMX6QDL_CLK_GPMI_APB] = imx_clk_gate2("gpmi_apb", "usdhc3", base + 0x78, 30); |
95 | clk[IMX6QDL_CLK_ROM] = imx_clk_gate2("rom", "ahb", base + 0x7c, 0); |
96 | - clk[IMX6QDL_CLK_SATA] = imx_clk_gate2("sata", "ipg", base + 0x7c, 4); |
97 | + clk[IMX6QDL_CLK_SATA] = imx_clk_gate2("sata", "ahb", base + 0x7c, 4); |
98 | clk[IMX6QDL_CLK_SDMA] = imx_clk_gate2("sdma", "ahb", base + 0x7c, 6); |
99 | clk[IMX6QDL_CLK_SPBA] = imx_clk_gate2("spba", "ipg", base + 0x7c, 12); |
100 | clk[IMX6QDL_CLK_SPDIF] = imx_clk_gate2("spdif", "spdif_podf", base + 0x7c, 14); |
101 | diff --git a/arch/arm64/include/asm/kvm_arm.h b/arch/arm64/include/asm/kvm_arm.h |
102 | index 7fd3e27e3ccc..8afb863f5a9e 100644 |
103 | --- a/arch/arm64/include/asm/kvm_arm.h |
104 | +++ b/arch/arm64/include/asm/kvm_arm.h |
105 | @@ -18,6 +18,7 @@ |
106 | #ifndef __ARM64_KVM_ARM_H__ |
107 | #define __ARM64_KVM_ARM_H__ |
108 | |
109 | +#include <asm/memory.h> |
110 | #include <asm/types.h> |
111 | |
112 | /* Hyp Configuration Register (HCR) bits */ |
113 | @@ -160,9 +161,9 @@ |
114 | #endif |
115 | |
116 | #define VTTBR_BADDR_SHIFT (VTTBR_X - 1) |
117 | -#define VTTBR_BADDR_MASK (((1LLU << (PHYS_MASK_SHIFT - VTTBR_X)) - 1) << VTTBR_BADDR_SHIFT) |
118 | -#define VTTBR_VMID_SHIFT (48LLU) |
119 | -#define VTTBR_VMID_MASK (0xffLLU << VTTBR_VMID_SHIFT) |
120 | +#define VTTBR_BADDR_MASK (((UL(1) << (PHYS_MASK_SHIFT - VTTBR_X)) - 1) << VTTBR_BADDR_SHIFT) |
121 | +#define VTTBR_VMID_SHIFT (UL(48)) |
122 | +#define VTTBR_VMID_MASK (UL(0xFF) << VTTBR_VMID_SHIFT) |
123 | |
124 | /* Hyp System Trap Register */ |
125 | #define HSTR_EL2_TTEE (1 << 16) |
126 | @@ -185,13 +186,13 @@ |
127 | |
128 | /* Exception Syndrome Register (ESR) bits */ |
129 | #define ESR_EL2_EC_SHIFT (26) |
130 | -#define ESR_EL2_EC (0x3fU << ESR_EL2_EC_SHIFT) |
131 | -#define ESR_EL2_IL (1U << 25) |
132 | +#define ESR_EL2_EC (UL(0x3f) << ESR_EL2_EC_SHIFT) |
133 | +#define ESR_EL2_IL (UL(1) << 25) |
134 | #define ESR_EL2_ISS (ESR_EL2_IL - 1) |
135 | #define ESR_EL2_ISV_SHIFT (24) |
136 | -#define ESR_EL2_ISV (1U << ESR_EL2_ISV_SHIFT) |
137 | +#define ESR_EL2_ISV (UL(1) << ESR_EL2_ISV_SHIFT) |
138 | #define ESR_EL2_SAS_SHIFT (22) |
139 | -#define ESR_EL2_SAS (3U << ESR_EL2_SAS_SHIFT) |
140 | +#define ESR_EL2_SAS (UL(3) << ESR_EL2_SAS_SHIFT) |
141 | #define ESR_EL2_SSE (1 << 21) |
142 | #define ESR_EL2_SRT_SHIFT (16) |
143 | #define ESR_EL2_SRT_MASK (0x1f << ESR_EL2_SRT_SHIFT) |
144 | @@ -205,16 +206,16 @@ |
145 | #define ESR_EL2_FSC_TYPE (0x3c) |
146 | |
147 | #define ESR_EL2_CV_SHIFT (24) |
148 | -#define ESR_EL2_CV (1U << ESR_EL2_CV_SHIFT) |
149 | +#define ESR_EL2_CV (UL(1) << ESR_EL2_CV_SHIFT) |
150 | #define ESR_EL2_COND_SHIFT (20) |
151 | -#define ESR_EL2_COND (0xfU << ESR_EL2_COND_SHIFT) |
152 | +#define ESR_EL2_COND (UL(0xf) << ESR_EL2_COND_SHIFT) |
153 | |
154 | |
155 | #define FSC_FAULT (0x04) |
156 | #define FSC_PERM (0x0c) |
157 | |
158 | /* Hyp Prefetch Fault Address Register (HPFAR/HDFAR) */ |
159 | -#define HPFAR_MASK (~0xFUL) |
160 | +#define HPFAR_MASK (~UL(0xf)) |
161 | |
162 | #define ESR_EL2_EC_UNKNOWN (0x00) |
163 | #define ESR_EL2_EC_WFI (0x01) |
164 | diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig |
165 | index 3635fff7b32d..c9148e268512 100644 |
166 | --- a/arch/x86/Kconfig |
167 | +++ b/arch/x86/Kconfig |
168 | @@ -173,7 +173,7 @@ config SBUS |
169 | |
170 | config NEED_DMA_MAP_STATE |
171 | def_bool y |
172 | - depends on X86_64 || INTEL_IOMMU || DMA_API_DEBUG |
173 | + depends on X86_64 || INTEL_IOMMU || DMA_API_DEBUG || SWIOTLB |
174 | |
175 | config NEED_SG_DMA_LENGTH |
176 | def_bool y |
177 | diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h |
178 | index 6c0709ff2f38..306d152336cd 100644 |
179 | --- a/arch/x86/include/asm/kvm_host.h |
180 | +++ b/arch/x86/include/asm/kvm_host.h |
181 | @@ -571,7 +571,7 @@ struct kvm_arch { |
182 | struct kvm_pic *vpic; |
183 | struct kvm_ioapic *vioapic; |
184 | struct kvm_pit *vpit; |
185 | - int vapics_in_nmi_mode; |
186 | + atomic_t vapics_in_nmi_mode; |
187 | struct mutex apic_map_lock; |
188 | struct kvm_apic_map *apic_map; |
189 | |
190 | diff --git a/arch/x86/kernel/cpu/microcode/intel_early.c b/arch/x86/kernel/cpu/microcode/intel_early.c |
191 | index ec9df6f9cd47..5e109a31f62b 100644 |
192 | --- a/arch/x86/kernel/cpu/microcode/intel_early.c |
193 | +++ b/arch/x86/kernel/cpu/microcode/intel_early.c |
194 | @@ -321,7 +321,7 @@ get_matching_model_microcode(int cpu, unsigned long start, |
195 | unsigned int mc_saved_count = mc_saved_data->mc_saved_count; |
196 | int i; |
197 | |
198 | - while (leftover) { |
199 | + while (leftover && mc_saved_count < ARRAY_SIZE(mc_saved_tmp)) { |
200 | mc_header = (struct microcode_header_intel *)ucode_ptr; |
201 | |
202 | mc_size = get_totalsize(mc_header); |
203 | diff --git a/arch/x86/kernel/cpu/perf_event_intel.c b/arch/x86/kernel/cpu/perf_event_intel.c |
204 | index 498b6d967138..df11583a9041 100644 |
205 | --- a/arch/x86/kernel/cpu/perf_event_intel.c |
206 | +++ b/arch/x86/kernel/cpu/perf_event_intel.c |
207 | @@ -2604,13 +2604,13 @@ __init int intel_pmu_init(void) |
208 | * counter, so do not extend mask to generic counters |
209 | */ |
210 | for_each_event_constraint(c, x86_pmu.event_constraints) { |
211 | - if (c->cmask != FIXED_EVENT_FLAGS |
212 | - || c->idxmsk64 == INTEL_PMC_MSK_FIXED_REF_CYCLES) { |
213 | - continue; |
214 | + if (c->cmask == FIXED_EVENT_FLAGS |
215 | + && c->idxmsk64 != INTEL_PMC_MSK_FIXED_REF_CYCLES) { |
216 | + c->idxmsk64 |= (1ULL << x86_pmu.num_counters) - 1; |
217 | } |
218 | - |
219 | - c->idxmsk64 |= (1ULL << x86_pmu.num_counters) - 1; |
220 | - c->weight += x86_pmu.num_counters; |
221 | + c->idxmsk64 &= |
222 | + ~(~0UL << (INTEL_PMC_IDX_FIXED + x86_pmu.num_counters_fixed)); |
223 | + c->weight = hweight64(c->idxmsk64); |
224 | } |
225 | } |
226 | |
227 | diff --git a/arch/x86/kernel/entry_32.S b/arch/x86/kernel/entry_32.S |
228 | index 344b63f18d14..3dddb89ba320 100644 |
229 | --- a/arch/x86/kernel/entry_32.S |
230 | +++ b/arch/x86/kernel/entry_32.S |
231 | @@ -982,6 +982,9 @@ ENTRY(xen_hypervisor_callback) |
232 | ENTRY(xen_do_upcall) |
233 | 1: mov %esp, %eax |
234 | call xen_evtchn_do_upcall |
235 | +#ifndef CONFIG_PREEMPT |
236 | + call xen_maybe_preempt_hcall |
237 | +#endif |
238 | jmp ret_from_intr |
239 | CFI_ENDPROC |
240 | ENDPROC(xen_hypervisor_callback) |
241 | diff --git a/arch/x86/kernel/entry_64.S b/arch/x86/kernel/entry_64.S |
242 | index f1dc27f457f1..e36d9815ef56 100644 |
243 | --- a/arch/x86/kernel/entry_64.S |
244 | +++ b/arch/x86/kernel/entry_64.S |
245 | @@ -1173,6 +1173,9 @@ ENTRY(xen_do_hypervisor_callback) # do_hypervisor_callback(struct *pt_regs) |
246 | popq %rsp |
247 | CFI_DEF_CFA_REGISTER rsp |
248 | decl PER_CPU_VAR(irq_count) |
249 | +#ifndef CONFIG_PREEMPT |
250 | + call xen_maybe_preempt_hcall |
251 | +#endif |
252 | jmp error_exit |
253 | CFI_ENDPROC |
254 | END(xen_do_hypervisor_callback) |
255 | diff --git a/arch/x86/kernel/head_32.S b/arch/x86/kernel/head_32.S |
256 | index 30a2aa3782fa..e7be5290fe1f 100644 |
257 | --- a/arch/x86/kernel/head_32.S |
258 | +++ b/arch/x86/kernel/head_32.S |
259 | @@ -61,9 +61,16 @@ |
260 | #define PAGE_TABLE_SIZE(pages) ((pages) / PTRS_PER_PGD) |
261 | #endif |
262 | |
263 | -/* Number of possible pages in the lowmem region */ |
264 | -LOWMEM_PAGES = (((1<<32) - __PAGE_OFFSET) >> PAGE_SHIFT) |
265 | - |
266 | +/* |
267 | + * Number of possible pages in the lowmem region. |
268 | + * |
269 | + * We shift 2 by 31 instead of 1 by 32 to the left in order to avoid a |
270 | + * gas warning about overflowing shift count when gas has been compiled |
271 | + * with only a host target support using a 32-bit type for internal |
272 | + * representation. |
273 | + */ |
274 | +LOWMEM_PAGES = (((2<<31) - __PAGE_OFFSET) >> PAGE_SHIFT) |
275 | + |
276 | /* Enough space to fit pagetables for the low memory linear map */ |
277 | MAPPING_BEYOND_END = PAGE_TABLE_SIZE(LOWMEM_PAGES) << PAGE_SHIFT |
278 | |
279 | diff --git a/arch/x86/kernel/kprobes/core.c b/arch/x86/kernel/kprobes/core.c |
280 | index 93d2c04c6f8f..f2e281cf8c19 100644 |
281 | --- a/arch/x86/kernel/kprobes/core.c |
282 | +++ b/arch/x86/kernel/kprobes/core.c |
283 | @@ -330,13 +330,16 @@ int __copy_instruction(u8 *dest, u8 *src) |
284 | { |
285 | struct insn insn; |
286 | kprobe_opcode_t buf[MAX_INSN_SIZE]; |
287 | + int length; |
288 | |
289 | kernel_insn_init(&insn, (void *)recover_probed_instruction(buf, (unsigned long)src)); |
290 | insn_get_length(&insn); |
291 | + length = insn.length; |
292 | + |
293 | /* Another subsystem puts a breakpoint, failed to recover */ |
294 | if (insn.opcode.bytes[0] == BREAKPOINT_INSTRUCTION) |
295 | return 0; |
296 | - memcpy(dest, insn.kaddr, insn.length); |
297 | + memcpy(dest, insn.kaddr, length); |
298 | |
299 | #ifdef CONFIG_X86_64 |
300 | if (insn_rip_relative(&insn)) { |
301 | @@ -366,7 +369,7 @@ int __copy_instruction(u8 *dest, u8 *src) |
302 | *(s32 *) disp = (s32) newdisp; |
303 | } |
304 | #endif |
305 | - return insn.length; |
306 | + return length; |
307 | } |
308 | |
309 | static int arch_copy_kprobe(struct kprobe *p) |
310 | diff --git a/arch/x86/kvm/i8254.c b/arch/x86/kvm/i8254.c |
311 | index 298781d4cfb4..1406ffde3e35 100644 |
312 | --- a/arch/x86/kvm/i8254.c |
313 | +++ b/arch/x86/kvm/i8254.c |
314 | @@ -305,7 +305,7 @@ static void pit_do_work(struct kthread_work *work) |
315 | * LVT0 to NMI delivery. Other PIC interrupts are just sent to |
316 | * VCPU0, and only if its LVT0 is in EXTINT mode. |
317 | */ |
318 | - if (kvm->arch.vapics_in_nmi_mode > 0) |
319 | + if (atomic_read(&kvm->arch.vapics_in_nmi_mode) > 0) |
320 | kvm_for_each_vcpu(i, vcpu, kvm) |
321 | kvm_apic_nmi_wd_deliver(vcpu); |
322 | } |
323 | diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c |
324 | index b8345dd41b25..de8e50040124 100644 |
325 | --- a/arch/x86/kvm/lapic.c |
326 | +++ b/arch/x86/kvm/lapic.c |
327 | @@ -1112,10 +1112,10 @@ static void apic_manage_nmi_watchdog(struct kvm_lapic *apic, u32 lvt0_val) |
328 | if (!nmi_wd_enabled) { |
329 | apic_debug("Receive NMI setting on APIC_LVT0 " |
330 | "for cpu %d\n", apic->vcpu->vcpu_id); |
331 | - apic->vcpu->kvm->arch.vapics_in_nmi_mode++; |
332 | + atomic_inc(&apic->vcpu->kvm->arch.vapics_in_nmi_mode); |
333 | } |
334 | } else if (nmi_wd_enabled) |
335 | - apic->vcpu->kvm->arch.vapics_in_nmi_mode--; |
336 | + atomic_dec(&apic->vcpu->kvm->arch.vapics_in_nmi_mode); |
337 | } |
338 | |
339 | static int apic_reg_write(struct kvm_lapic *apic, u32 reg, u32 val) |
340 | @@ -1687,6 +1687,7 @@ void kvm_apic_post_state_restore(struct kvm_vcpu *vcpu, |
341 | |
342 | apic_update_ppr(apic); |
343 | hrtimer_cancel(&apic->lapic_timer.timer); |
344 | + apic_manage_nmi_watchdog(apic, kvm_apic_get_reg(apic, APIC_LVT0)); |
345 | update_divide_count(apic); |
346 | start_apic_timer(apic); |
347 | apic->irr_pending = true; |
348 | diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c |
349 | index 170e7d49ba65..b83bff87408f 100644 |
350 | --- a/arch/x86/kvm/svm.c |
351 | +++ b/arch/x86/kvm/svm.c |
352 | @@ -511,8 +511,10 @@ static void skip_emulated_instruction(struct kvm_vcpu *vcpu) |
353 | { |
354 | struct vcpu_svm *svm = to_svm(vcpu); |
355 | |
356 | - if (svm->vmcb->control.next_rip != 0) |
357 | + if (svm->vmcb->control.next_rip != 0) { |
358 | + WARN_ON(!static_cpu_has(X86_FEATURE_NRIPS)); |
359 | svm->next_rip = svm->vmcb->control.next_rip; |
360 | + } |
361 | |
362 | if (!svm->next_rip) { |
363 | if (emulate_instruction(vcpu, EMULTYPE_SKIP) != |
364 | @@ -4306,7 +4308,9 @@ static int svm_check_intercept(struct kvm_vcpu *vcpu, |
365 | break; |
366 | } |
367 | |
368 | - vmcb->control.next_rip = info->next_rip; |
369 | + /* TODO: Advertise NRIPS to guest hypervisor unconditionally */ |
370 | + if (static_cpu_has(X86_FEATURE_NRIPS)) |
371 | + vmcb->control.next_rip = info->next_rip; |
372 | vmcb->control.exit_code = icpt_info.exit_code; |
373 | vmexit = nested_svm_exit_handled(svm); |
374 | |
375 | diff --git a/drivers/edac/sb_edac.c b/drivers/edac/sb_edac.c |
376 | index a46c4af2ac98..15697c630139 100644 |
377 | --- a/drivers/edac/sb_edac.c |
378 | +++ b/drivers/edac/sb_edac.c |
379 | @@ -910,7 +910,7 @@ static void get_memory_layout(const struct mem_ctl_info *mci) |
380 | u32 reg; |
381 | u64 limit, prv = 0; |
382 | u64 tmp_mb; |
383 | - u32 mb, kb; |
384 | + u32 gb, mb; |
385 | u32 rir_way; |
386 | |
387 | /* |
388 | @@ -920,15 +920,17 @@ static void get_memory_layout(const struct mem_ctl_info *mci) |
389 | pvt->tolm = pvt->info.get_tolm(pvt); |
390 | tmp_mb = (1 + pvt->tolm) >> 20; |
391 | |
392 | - mb = div_u64_rem(tmp_mb, 1000, &kb); |
393 | - edac_dbg(0, "TOLM: %u.%03u GB (0x%016Lx)\n", mb, kb, (u64)pvt->tolm); |
394 | + gb = div_u64_rem(tmp_mb, 1024, &mb); |
395 | + edac_dbg(0, "TOLM: %u.%03u GB (0x%016Lx)\n", |
396 | + gb, (mb*1000)/1024, (u64)pvt->tolm); |
397 | |
398 | /* Address range is already 45:25 */ |
399 | pvt->tohm = pvt->info.get_tohm(pvt); |
400 | tmp_mb = (1 + pvt->tohm) >> 20; |
401 | |
402 | - mb = div_u64_rem(tmp_mb, 1000, &kb); |
403 | - edac_dbg(0, "TOHM: %u.%03u GB (0x%016Lx)\n", mb, kb, (u64)pvt->tohm); |
404 | + gb = div_u64_rem(tmp_mb, 1024, &mb); |
405 | + edac_dbg(0, "TOHM: %u.%03u GB (0x%016Lx)\n", |
406 | + gb, (mb*1000)/1024, (u64)pvt->tohm); |
407 | |
408 | /* |
409 | * Step 2) Get SAD range and SAD Interleave list |
410 | @@ -950,11 +952,11 @@ static void get_memory_layout(const struct mem_ctl_info *mci) |
411 | break; |
412 | |
413 | tmp_mb = (limit + 1) >> 20; |
414 | - mb = div_u64_rem(tmp_mb, 1000, &kb); |
415 | + gb = div_u64_rem(tmp_mb, 1024, &mb); |
416 | edac_dbg(0, "SAD#%d %s up to %u.%03u GB (0x%016Lx) Interleave: %s reg=0x%08x\n", |
417 | n_sads, |
418 | get_dram_attr(reg), |
419 | - mb, kb, |
420 | + gb, (mb*1000)/1024, |
421 | ((u64)tmp_mb) << 20L, |
422 | INTERLEAVE_MODE(reg) ? "8:6" : "[8:6]XOR[18:16]", |
423 | reg); |
424 | @@ -985,9 +987,9 @@ static void get_memory_layout(const struct mem_ctl_info *mci) |
425 | break; |
426 | tmp_mb = (limit + 1) >> 20; |
427 | |
428 | - mb = div_u64_rem(tmp_mb, 1000, &kb); |
429 | + gb = div_u64_rem(tmp_mb, 1024, &mb); |
430 | edac_dbg(0, "TAD#%d: up to %u.%03u GB (0x%016Lx), socket interleave %d, memory interleave %d, TGT: %d, %d, %d, %d, reg=0x%08x\n", |
431 | - n_tads, mb, kb, |
432 | + n_tads, gb, (mb*1000)/1024, |
433 | ((u64)tmp_mb) << 20L, |
434 | (u32)TAD_SOCK(reg), |
435 | (u32)TAD_CH(reg), |
436 | @@ -1010,10 +1012,10 @@ static void get_memory_layout(const struct mem_ctl_info *mci) |
437 | tad_ch_nilv_offset[j], |
438 | ®); |
439 | tmp_mb = TAD_OFFSET(reg) >> 20; |
440 | - mb = div_u64_rem(tmp_mb, 1000, &kb); |
441 | + gb = div_u64_rem(tmp_mb, 1024, &mb); |
442 | edac_dbg(0, "TAD CH#%d, offset #%d: %u.%03u GB (0x%016Lx), reg=0x%08x\n", |
443 | i, j, |
444 | - mb, kb, |
445 | + gb, (mb*1000)/1024, |
446 | ((u64)tmp_mb) << 20L, |
447 | reg); |
448 | } |
449 | @@ -1035,10 +1037,10 @@ static void get_memory_layout(const struct mem_ctl_info *mci) |
450 | |
451 | tmp_mb = pvt->info.rir_limit(reg) >> 20; |
452 | rir_way = 1 << RIR_WAY(reg); |
453 | - mb = div_u64_rem(tmp_mb, 1000, &kb); |
454 | + gb = div_u64_rem(tmp_mb, 1024, &mb); |
455 | edac_dbg(0, "CH#%d RIR#%d, limit: %u.%03u GB (0x%016Lx), way: %d, reg=0x%08x\n", |
456 | i, j, |
457 | - mb, kb, |
458 | + gb, (mb*1000)/1024, |
459 | ((u64)tmp_mb) << 20L, |
460 | rir_way, |
461 | reg); |
462 | @@ -1049,10 +1051,10 @@ static void get_memory_layout(const struct mem_ctl_info *mci) |
463 | ®); |
464 | tmp_mb = RIR_OFFSET(reg) << 6; |
465 | |
466 | - mb = div_u64_rem(tmp_mb, 1000, &kb); |
467 | + gb = div_u64_rem(tmp_mb, 1024, &mb); |
468 | edac_dbg(0, "CH#%d RIR#%d INTL#%d, offset %u.%03u GB (0x%016Lx), tgt: %d, reg=0x%08x\n", |
469 | i, j, k, |
470 | - mb, kb, |
471 | + gb, (mb*1000)/1024, |
472 | ((u64)tmp_mb) << 20L, |
473 | (u32)RIR_RNK_TGT(reg), |
474 | reg); |
475 | @@ -1090,7 +1092,7 @@ static int get_memory_error_data(struct mem_ctl_info *mci, |
476 | u8 ch_way, sck_way, pkg, sad_ha = 0; |
477 | u32 tad_offset; |
478 | u32 rir_way; |
479 | - u32 mb, kb; |
480 | + u32 mb, gb; |
481 | u64 ch_addr, offset, limit = 0, prv = 0; |
482 | |
483 | |
484 | @@ -1359,10 +1361,10 @@ static int get_memory_error_data(struct mem_ctl_info *mci, |
485 | continue; |
486 | |
487 | limit = pvt->info.rir_limit(reg); |
488 | - mb = div_u64_rem(limit >> 20, 1000, &kb); |
489 | + gb = div_u64_rem(limit >> 20, 1024, &mb); |
490 | edac_dbg(0, "RIR#%d, limit: %u.%03u GB (0x%016Lx), way: %d\n", |
491 | n_rir, |
492 | - mb, kb, |
493 | + gb, (mb*1000)/1024, |
494 | limit, |
495 | 1 << RIR_WAY(reg)); |
496 | if (ch_addr <= limit) |
497 | diff --git a/drivers/input/mouse/synaptics.c b/drivers/input/mouse/synaptics.c |
498 | index f599357e8392..826ef3df2dc7 100644 |
499 | --- a/drivers/input/mouse/synaptics.c |
500 | +++ b/drivers/input/mouse/synaptics.c |
501 | @@ -149,6 +149,7 @@ static const struct min_max_quirk min_max_pnpid_table[] = { |
502 | }, |
503 | { |
504 | (const char * const []){"LEN2000", NULL}, |
505 | + {ANY_BOARD_ID, ANY_BOARD_ID}, |
506 | 1024, 5113, 2021, 4832 |
507 | }, |
508 | { |
509 | diff --git a/drivers/iommu/amd_iommu.c b/drivers/iommu/amd_iommu.c |
510 | index 505a9adac2d5..fab0ea1a46d1 100644 |
511 | --- a/drivers/iommu/amd_iommu.c |
512 | +++ b/drivers/iommu/amd_iommu.c |
513 | @@ -1870,9 +1870,15 @@ static void free_pt_##LVL (unsigned long __pt) \ |
514 | pt = (u64 *)__pt; \ |
515 | \ |
516 | for (i = 0; i < 512; ++i) { \ |
517 | + /* PTE present? */ \ |
518 | if (!IOMMU_PTE_PRESENT(pt[i])) \ |
519 | continue; \ |
520 | \ |
521 | + /* Large PTE? */ \ |
522 | + if (PM_PTE_LEVEL(pt[i]) == 0 || \ |
523 | + PM_PTE_LEVEL(pt[i]) == 7) \ |
524 | + continue; \ |
525 | + \ |
526 | p = (unsigned long)IOMMU_PTE_PAGE(pt[i]); \ |
527 | FN(p); \ |
528 | } \ |
529 | diff --git a/drivers/net/can/dev.c b/drivers/net/can/dev.c |
530 | index 573b53b38af4..bb686e15102c 100644 |
531 | --- a/drivers/net/can/dev.c |
532 | +++ b/drivers/net/can/dev.c |
533 | @@ -360,6 +360,9 @@ unsigned int can_get_echo_skb(struct net_device *dev, unsigned int idx) |
534 | struct can_frame *cf = (struct can_frame *)skb->data; |
535 | u8 dlc = cf->can_dlc; |
536 | |
537 | + if (!(skb->tstamp.tv64)) |
538 | + __net_timestamp(skb); |
539 | + |
540 | netif_rx(priv->echo_skb[idx]); |
541 | priv->echo_skb[idx] = NULL; |
542 | |
543 | @@ -496,6 +499,7 @@ struct sk_buff *alloc_can_skb(struct net_device *dev, struct can_frame **cf) |
544 | if (unlikely(!skb)) |
545 | return NULL; |
546 | |
547 | + __net_timestamp(skb); |
548 | skb->protocol = htons(ETH_P_CAN); |
549 | skb->pkt_type = PACKET_BROADCAST; |
550 | skb->ip_summed = CHECKSUM_UNNECESSARY; |
551 | @@ -524,6 +528,7 @@ struct sk_buff *alloc_canfd_skb(struct net_device *dev, |
552 | if (unlikely(!skb)) |
553 | return NULL; |
554 | |
555 | + __net_timestamp(skb); |
556 | skb->protocol = htons(ETH_P_CANFD); |
557 | skb->pkt_type = PACKET_BROADCAST; |
558 | skb->ip_summed = CHECKSUM_UNNECESSARY; |
559 | diff --git a/drivers/net/can/slcan.c b/drivers/net/can/slcan.c |
560 | index acb5b92ace92..cb6b4723af4a 100644 |
561 | --- a/drivers/net/can/slcan.c |
562 | +++ b/drivers/net/can/slcan.c |
563 | @@ -210,6 +210,7 @@ static void slc_bump(struct slcan *sl) |
564 | if (!skb) |
565 | return; |
566 | |
567 | + __net_timestamp(skb); |
568 | skb->dev = sl->dev; |
569 | skb->protocol = htons(ETH_P_CAN); |
570 | skb->pkt_type = PACKET_BROADCAST; |
571 | diff --git a/drivers/net/can/vcan.c b/drivers/net/can/vcan.c |
572 | index 4e94057ef5cf..30e4627a0c01 100644 |
573 | --- a/drivers/net/can/vcan.c |
574 | +++ b/drivers/net/can/vcan.c |
575 | @@ -81,6 +81,9 @@ static void vcan_rx(struct sk_buff *skb, struct net_device *dev) |
576 | skb->dev = dev; |
577 | skb->ip_summed = CHECKSUM_UNNECESSARY; |
578 | |
579 | + if (!(skb->tstamp.tv64)) |
580 | + __net_timestamp(skb); |
581 | + |
582 | netif_rx_ni(skb); |
583 | } |
584 | |
585 | diff --git a/drivers/s390/kvm/virtio_ccw.c b/drivers/s390/kvm/virtio_ccw.c |
586 | index bda52f18e967..397e6b8b9656 100644 |
587 | --- a/drivers/s390/kvm/virtio_ccw.c |
588 | +++ b/drivers/s390/kvm/virtio_ccw.c |
589 | @@ -64,6 +64,7 @@ struct virtio_ccw_device { |
590 | bool is_thinint; |
591 | bool going_away; |
592 | bool device_lost; |
593 | + unsigned int config_ready; |
594 | void *airq_info; |
595 | }; |
596 | |
597 | @@ -758,8 +759,11 @@ static void virtio_ccw_get_config(struct virtio_device *vdev, |
598 | if (ret) |
599 | goto out_free; |
600 | |
601 | - memcpy(vcdev->config, config_area, sizeof(vcdev->config)); |
602 | - memcpy(buf, &vcdev->config[offset], len); |
603 | + memcpy(vcdev->config, config_area, offset + len); |
604 | + if (buf) |
605 | + memcpy(buf, &vcdev->config[offset], len); |
606 | + if (vcdev->config_ready < offset + len) |
607 | + vcdev->config_ready = offset + len; |
608 | |
609 | out_free: |
610 | kfree(config_area); |
611 | @@ -782,6 +786,9 @@ static void virtio_ccw_set_config(struct virtio_device *vdev, |
612 | if (!config_area) |
613 | goto out_free; |
614 | |
615 | + /* Make sure we don't overwrite fields. */ |
616 | + if (vcdev->config_ready < offset) |
617 | + virtio_ccw_get_config(vdev, 0, NULL, offset); |
618 | memcpy(&vcdev->config[offset], buf, len); |
619 | /* Write the config area to the host. */ |
620 | memcpy(config_area, vcdev->config, sizeof(vcdev->config)); |
621 | diff --git a/drivers/tty/serial/atmel_serial.c b/drivers/tty/serial/atmel_serial.c |
622 | index 11300f7b49cb..daaed7c79e4f 100644 |
623 | --- a/drivers/tty/serial/atmel_serial.c |
624 | +++ b/drivers/tty/serial/atmel_serial.c |
625 | @@ -311,8 +311,7 @@ void atmel_config_rs485(struct uart_port *port, struct serial_rs485 *rs485conf) |
626 | if (rs485conf->flags & SER_RS485_ENABLED) { |
627 | dev_dbg(port->dev, "Setting UART to RS485\n"); |
628 | atmel_port->tx_done_mask = ATMEL_US_TXEMPTY; |
629 | - if ((rs485conf->delay_rts_after_send) > 0) |
630 | - UART_PUT_TTGR(port, rs485conf->delay_rts_after_send); |
631 | + UART_PUT_TTGR(port, rs485conf->delay_rts_after_send); |
632 | mode |= ATMEL_US_USMODE_RS485; |
633 | } else { |
634 | dev_dbg(port->dev, "Setting UART to RS232\n"); |
635 | @@ -2016,9 +2015,7 @@ static void atmel_set_termios(struct uart_port *port, struct ktermios *termios, |
636 | mode &= ~ATMEL_US_USMODE; |
637 | |
638 | if (atmel_port->rs485.flags & SER_RS485_ENABLED) { |
639 | - if ((atmel_port->rs485.delay_rts_after_send) > 0) |
640 | - UART_PUT_TTGR(port, |
641 | - atmel_port->rs485.delay_rts_after_send); |
642 | + UART_PUT_TTGR(port, atmel_port->rs485.delay_rts_after_send); |
643 | mode |= ATMEL_US_USMODE_RS485; |
644 | } |
645 | |
646 | diff --git a/drivers/usb/gadget/function/f_fs.c b/drivers/usb/gadget/function/f_fs.c |
647 | index 63314ede7ba6..ab9b7ac63407 100644 |
648 | --- a/drivers/usb/gadget/function/f_fs.c |
649 | +++ b/drivers/usb/gadget/function/f_fs.c |
650 | @@ -3400,6 +3400,7 @@ done: |
651 | static void ffs_closed(struct ffs_data *ffs) |
652 | { |
653 | struct ffs_dev *ffs_obj; |
654 | + struct f_fs_opts *opts; |
655 | |
656 | ENTER(); |
657 | ffs_dev_lock(); |
658 | @@ -3413,8 +3414,13 @@ static void ffs_closed(struct ffs_data *ffs) |
659 | if (ffs_obj->ffs_closed_callback) |
660 | ffs_obj->ffs_closed_callback(ffs); |
661 | |
662 | - if (!ffs_obj->opts || ffs_obj->opts->no_configfs |
663 | - || !ffs_obj->opts->func_inst.group.cg_item.ci_parent) |
664 | + if (ffs_obj->opts) |
665 | + opts = ffs_obj->opts; |
666 | + else |
667 | + goto done; |
668 | + |
669 | + if (opts->no_configfs || !opts->func_inst.group.cg_item.ci_parent |
670 | + || !atomic_read(&opts->func_inst.group.cg_item.ci_kref.refcount)) |
671 | goto done; |
672 | |
673 | unregister_gadget_item(ffs_obj->opts-> |
674 | diff --git a/drivers/xen/Makefile b/drivers/xen/Makefile |
675 | index 2140398a2a8c..2ccd3592d41f 100644 |
676 | --- a/drivers/xen/Makefile |
677 | +++ b/drivers/xen/Makefile |
678 | @@ -2,7 +2,7 @@ ifeq ($(filter y, $(CONFIG_ARM) $(CONFIG_ARM64)),) |
679 | obj-$(CONFIG_HOTPLUG_CPU) += cpu_hotplug.o |
680 | endif |
681 | obj-$(CONFIG_X86) += fallback.o |
682 | -obj-y += grant-table.o features.o balloon.o manage.o |
683 | +obj-y += grant-table.o features.o balloon.o manage.o preempt.o |
684 | obj-y += events/ |
685 | obj-y += xenbus/ |
686 | |
687 | diff --git a/drivers/xen/preempt.c b/drivers/xen/preempt.c |
688 | new file mode 100644 |
689 | index 000000000000..a1800c150839 |
690 | --- /dev/null |
691 | +++ b/drivers/xen/preempt.c |
692 | @@ -0,0 +1,44 @@ |
693 | +/* |
694 | + * Preemptible hypercalls |
695 | + * |
696 | + * Copyright (C) 2014 Citrix Systems R&D ltd. |
697 | + * |
698 | + * This source code is free software; you can redistribute it and/or |
699 | + * modify it under the terms of the GNU General Public License as |
700 | + * published by the Free Software Foundation; either version 2 of the |
701 | + * License, or (at your option) any later version. |
702 | + */ |
703 | + |
704 | +#include <linux/sched.h> |
705 | +#include <xen/xen-ops.h> |
706 | + |
707 | +#ifndef CONFIG_PREEMPT |
708 | + |
709 | +/* |
710 | + * Some hypercalls issued by the toolstack can take many 10s of |
711 | + * seconds. Allow tasks running hypercalls via the privcmd driver to |
712 | + * be voluntarily preempted even if full kernel preemption is |
713 | + * disabled. |
714 | + * |
715 | + * Such preemptible hypercalls are bracketed by |
716 | + * xen_preemptible_hcall_begin() and xen_preemptible_hcall_end() |
717 | + * calls. |
718 | + */ |
719 | + |
720 | +DEFINE_PER_CPU(bool, xen_in_preemptible_hcall); |
721 | +EXPORT_SYMBOL_GPL(xen_in_preemptible_hcall); |
722 | + |
723 | +asmlinkage __visible void xen_maybe_preempt_hcall(void) |
724 | +{ |
725 | + if (unlikely(__this_cpu_read(xen_in_preemptible_hcall) |
726 | + && should_resched())) { |
727 | + /* |
728 | + * Clear flag as we may be rescheduled on a different |
729 | + * cpu. |
730 | + */ |
731 | + __this_cpu_write(xen_in_preemptible_hcall, false); |
732 | + _cond_resched(); |
733 | + __this_cpu_write(xen_in_preemptible_hcall, true); |
734 | + } |
735 | +} |
736 | +#endif /* CONFIG_PREEMPT */ |
737 | diff --git a/drivers/xen/privcmd.c b/drivers/xen/privcmd.c |
738 | index 569a13b9e856..59ac71c4a043 100644 |
739 | --- a/drivers/xen/privcmd.c |
740 | +++ b/drivers/xen/privcmd.c |
741 | @@ -56,10 +56,12 @@ static long privcmd_ioctl_hypercall(void __user *udata) |
742 | if (copy_from_user(&hypercall, udata, sizeof(hypercall))) |
743 | return -EFAULT; |
744 | |
745 | + xen_preemptible_hcall_begin(); |
746 | ret = privcmd_call(hypercall.op, |
747 | hypercall.arg[0], hypercall.arg[1], |
748 | hypercall.arg[2], hypercall.arg[3], |
749 | hypercall.arg[4]); |
750 | + xen_preemptible_hcall_end(); |
751 | |
752 | return ret; |
753 | } |
754 | diff --git a/fs/btrfs/ctree.c b/fs/btrfs/ctree.c |
755 | index c81ce0c7c1a9..f54511dd287e 100644 |
756 | --- a/fs/btrfs/ctree.c |
757 | +++ b/fs/btrfs/ctree.c |
758 | @@ -2920,7 +2920,7 @@ done: |
759 | */ |
760 | if (!p->leave_spinning) |
761 | btrfs_set_path_blocking(p); |
762 | - if (ret < 0) |
763 | + if (ret < 0 && !p->skip_release_on_error) |
764 | btrfs_release_path(p); |
765 | return ret; |
766 | } |
767 | diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h |
768 | index fe69edda11fb..ba5aec76e6f8 100644 |
769 | --- a/fs/btrfs/ctree.h |
770 | +++ b/fs/btrfs/ctree.h |
771 | @@ -607,6 +607,7 @@ struct btrfs_path { |
772 | unsigned int leave_spinning:1; |
773 | unsigned int search_commit_root:1; |
774 | unsigned int need_commit_sem:1; |
775 | + unsigned int skip_release_on_error:1; |
776 | }; |
777 | |
778 | /* |
779 | @@ -3686,6 +3687,10 @@ struct btrfs_dir_item *btrfs_lookup_xattr(struct btrfs_trans_handle *trans, |
780 | int verify_dir_item(struct btrfs_root *root, |
781 | struct extent_buffer *leaf, |
782 | struct btrfs_dir_item *dir_item); |
783 | +struct btrfs_dir_item *btrfs_match_dir_item_name(struct btrfs_root *root, |
784 | + struct btrfs_path *path, |
785 | + const char *name, |
786 | + int name_len); |
787 | |
788 | /* orphan.c */ |
789 | int btrfs_insert_orphan_item(struct btrfs_trans_handle *trans, |
790 | diff --git a/fs/btrfs/dir-item.c b/fs/btrfs/dir-item.c |
791 | index fc8df866e919..1752625fb4dd 100644 |
792 | --- a/fs/btrfs/dir-item.c |
793 | +++ b/fs/btrfs/dir-item.c |
794 | @@ -21,10 +21,6 @@ |
795 | #include "hash.h" |
796 | #include "transaction.h" |
797 | |
798 | -static struct btrfs_dir_item *btrfs_match_dir_item_name(struct btrfs_root *root, |
799 | - struct btrfs_path *path, |
800 | - const char *name, int name_len); |
801 | - |
802 | /* |
803 | * insert a name into a directory, doing overflow properly if there is a hash |
804 | * collision. data_size indicates how big the item inserted should be. On |
805 | @@ -383,9 +379,9 @@ struct btrfs_dir_item *btrfs_lookup_xattr(struct btrfs_trans_handle *trans, |
806 | * this walks through all the entries in a dir item and finds one |
807 | * for a specific name. |
808 | */ |
809 | -static struct btrfs_dir_item *btrfs_match_dir_item_name(struct btrfs_root *root, |
810 | - struct btrfs_path *path, |
811 | - const char *name, int name_len) |
812 | +struct btrfs_dir_item *btrfs_match_dir_item_name(struct btrfs_root *root, |
813 | + struct btrfs_path *path, |
814 | + const char *name, int name_len) |
815 | { |
816 | struct btrfs_dir_item *dir_item; |
817 | unsigned long name_ptr; |
818 | diff --git a/fs/btrfs/xattr.c b/fs/btrfs/xattr.c |
819 | index 00eacd83ce3d..01bad724b5f7 100644 |
820 | --- a/fs/btrfs/xattr.c |
821 | +++ b/fs/btrfs/xattr.c |
822 | @@ -29,6 +29,7 @@ |
823 | #include "xattr.h" |
824 | #include "disk-io.h" |
825 | #include "props.h" |
826 | +#include "locking.h" |
827 | |
828 | |
829 | ssize_t __btrfs_getxattr(struct inode *inode, const char *name, |
830 | @@ -91,7 +92,7 @@ static int do_setxattr(struct btrfs_trans_handle *trans, |
831 | struct inode *inode, const char *name, |
832 | const void *value, size_t size, int flags) |
833 | { |
834 | - struct btrfs_dir_item *di; |
835 | + struct btrfs_dir_item *di = NULL; |
836 | struct btrfs_root *root = BTRFS_I(inode)->root; |
837 | struct btrfs_path *path; |
838 | size_t name_len = strlen(name); |
839 | @@ -103,84 +104,119 @@ static int do_setxattr(struct btrfs_trans_handle *trans, |
840 | path = btrfs_alloc_path(); |
841 | if (!path) |
842 | return -ENOMEM; |
843 | + path->skip_release_on_error = 1; |
844 | + |
845 | + if (!value) { |
846 | + di = btrfs_lookup_xattr(trans, root, path, btrfs_ino(inode), |
847 | + name, name_len, -1); |
848 | + if (!di && (flags & XATTR_REPLACE)) |
849 | + ret = -ENODATA; |
850 | + else if (di) |
851 | + ret = btrfs_delete_one_dir_name(trans, root, path, di); |
852 | + goto out; |
853 | + } |
854 | |
855 | + /* |
856 | + * For a replace we can't just do the insert blindly. |
857 | + * Do a lookup first (read-only btrfs_search_slot), and return if xattr |
858 | + * doesn't exist. If it exists, fall down below to the insert/replace |
859 | + * path - we can't race with a concurrent xattr delete, because the VFS |
860 | + * locks the inode's i_mutex before calling setxattr or removexattr. |
861 | + */ |
862 | if (flags & XATTR_REPLACE) { |
863 | - di = btrfs_lookup_xattr(trans, root, path, btrfs_ino(inode), name, |
864 | - name_len, -1); |
865 | - if (IS_ERR(di)) { |
866 | - ret = PTR_ERR(di); |
867 | - goto out; |
868 | - } else if (!di) { |
869 | + ASSERT(mutex_is_locked(&inode->i_mutex)); |
870 | + di = btrfs_lookup_xattr(NULL, root, path, btrfs_ino(inode), |
871 | + name, name_len, 0); |
872 | + if (!di) { |
873 | ret = -ENODATA; |
874 | goto out; |
875 | } |
876 | - ret = btrfs_delete_one_dir_name(trans, root, path, di); |
877 | - if (ret) |
878 | - goto out; |
879 | btrfs_release_path(path); |
880 | + di = NULL; |
881 | + } |
882 | |
883 | + ret = btrfs_insert_xattr_item(trans, root, path, btrfs_ino(inode), |
884 | + name, name_len, value, size); |
885 | + if (ret == -EOVERFLOW) { |
886 | /* |
887 | - * remove the attribute |
888 | + * We have an existing item in a leaf, split_leaf couldn't |
889 | + * expand it. That item might have or not a dir_item that |
890 | + * matches our target xattr, so lets check. |
891 | */ |
892 | - if (!value) |
893 | - goto out; |
894 | - } else { |
895 | - di = btrfs_lookup_xattr(NULL, root, path, btrfs_ino(inode), |
896 | - name, name_len, 0); |
897 | - if (IS_ERR(di)) { |
898 | - ret = PTR_ERR(di); |
899 | + ret = 0; |
900 | + btrfs_assert_tree_locked(path->nodes[0]); |
901 | + di = btrfs_match_dir_item_name(root, path, name, name_len); |
902 | + if (!di && !(flags & XATTR_REPLACE)) { |
903 | + ret = -ENOSPC; |
904 | goto out; |
905 | } |
906 | - if (!di && !value) |
907 | - goto out; |
908 | - btrfs_release_path(path); |
909 | + } else if (ret == -EEXIST) { |
910 | + ret = 0; |
911 | + di = btrfs_match_dir_item_name(root, path, name, name_len); |
912 | + ASSERT(di); /* logic error */ |
913 | + } else if (ret) { |
914 | + goto out; |
915 | } |
916 | |
917 | -again: |
918 | - ret = btrfs_insert_xattr_item(trans, root, path, btrfs_ino(inode), |
919 | - name, name_len, value, size); |
920 | - /* |
921 | - * If we're setting an xattr to a new value but the new value is say |
922 | - * exactly BTRFS_MAX_XATTR_SIZE, we could end up with EOVERFLOW getting |
923 | - * back from split_leaf. This is because it thinks we'll be extending |
924 | - * the existing item size, but we're asking for enough space to add the |
925 | - * item itself. So if we get EOVERFLOW just set ret to EEXIST and let |
926 | - * the rest of the function figure it out. |
927 | - */ |
928 | - if (ret == -EOVERFLOW) |
929 | + if (di && (flags & XATTR_CREATE)) { |
930 | ret = -EEXIST; |
931 | + goto out; |
932 | + } |
933 | |
934 | - if (ret == -EEXIST) { |
935 | - if (flags & XATTR_CREATE) |
936 | - goto out; |
937 | + if (di) { |
938 | /* |
939 | - * We can't use the path we already have since we won't have the |
940 | - * proper locking for a delete, so release the path and |
941 | - * re-lookup to delete the thing. |
942 | + * We're doing a replace, and it must be atomic, that is, at |
943 | + * any point in time we have either the old or the new xattr |
944 | + * value in the tree. We don't want readers (getxattr and |
945 | + * listxattrs) to miss a value, this is specially important |
946 | + * for ACLs. |
947 | */ |
948 | - btrfs_release_path(path); |
949 | - di = btrfs_lookup_xattr(trans, root, path, btrfs_ino(inode), |
950 | - name, name_len, -1); |
951 | - if (IS_ERR(di)) { |
952 | - ret = PTR_ERR(di); |
953 | - goto out; |
954 | - } else if (!di) { |
955 | - /* Shouldn't happen but just in case... */ |
956 | - btrfs_release_path(path); |
957 | - goto again; |
958 | + const int slot = path->slots[0]; |
959 | + struct extent_buffer *leaf = path->nodes[0]; |
960 | + const u16 old_data_len = btrfs_dir_data_len(leaf, di); |
961 | + const u32 item_size = btrfs_item_size_nr(leaf, slot); |
962 | + const u32 data_size = sizeof(*di) + name_len + size; |
963 | + struct btrfs_item *item; |
964 | + unsigned long data_ptr; |
965 | + char *ptr; |
966 | + |
967 | + if (size > old_data_len) { |
968 | + if (btrfs_leaf_free_space(root, leaf) < |
969 | + (size - old_data_len)) { |
970 | + ret = -ENOSPC; |
971 | + goto out; |
972 | + } |
973 | } |
974 | |
975 | - ret = btrfs_delete_one_dir_name(trans, root, path, di); |
976 | - if (ret) |
977 | - goto out; |
978 | + if (old_data_len + name_len + sizeof(*di) == item_size) { |
979 | + /* No other xattrs packed in the same leaf item. */ |
980 | + if (size > old_data_len) |
981 | + btrfs_extend_item(root, path, |
982 | + size - old_data_len); |
983 | + else if (size < old_data_len) |
984 | + btrfs_truncate_item(root, path, data_size, 1); |
985 | + } else { |
986 | + /* There are other xattrs packed in the same item. */ |
987 | + ret = btrfs_delete_one_dir_name(trans, root, path, di); |
988 | + if (ret) |
989 | + goto out; |
990 | + btrfs_extend_item(root, path, data_size); |
991 | + } |
992 | |
993 | + item = btrfs_item_nr(slot); |
994 | + ptr = btrfs_item_ptr(leaf, slot, char); |
995 | + ptr += btrfs_item_size(leaf, item) - data_size; |
996 | + di = (struct btrfs_dir_item *)ptr; |
997 | + btrfs_set_dir_data_len(leaf, di, size); |
998 | + data_ptr = ((unsigned long)(di + 1)) + name_len; |
999 | + write_extent_buffer(leaf, value, data_ptr, size); |
1000 | + btrfs_mark_buffer_dirty(leaf); |
1001 | + } else { |
1002 | /* |
1003 | - * We have a value to set, so go back and try to insert it now. |
1004 | + * Insert, and we had space for the xattr, so path->slots[0] is |
1005 | + * where our xattr dir_item is and btrfs_insert_xattr_item() |
1006 | + * filled it. |
1007 | */ |
1008 | - if (value) { |
1009 | - btrfs_release_path(path); |
1010 | - goto again; |
1011 | - } |
1012 | } |
1013 | out: |
1014 | btrfs_free_path(path); |
1015 | diff --git a/fs/dcache.c b/fs/dcache.c |
1016 | index e0750b8f51aa..d0539a4a1ab1 100644 |
1017 | --- a/fs/dcache.c |
1018 | +++ b/fs/dcache.c |
1019 | @@ -2893,17 +2893,6 @@ restart: |
1020 | vfsmnt = &mnt->mnt; |
1021 | continue; |
1022 | } |
1023 | - /* |
1024 | - * Filesystems needing to implement special "root names" |
1025 | - * should do so with ->d_dname() |
1026 | - */ |
1027 | - if (IS_ROOT(dentry) && |
1028 | - (dentry->d_name.len != 1 || |
1029 | - dentry->d_name.name[0] != '/')) { |
1030 | - WARN(1, "Root dentry has weird name <%.*s>\n", |
1031 | - (int) dentry->d_name.len, |
1032 | - dentry->d_name.name); |
1033 | - } |
1034 | if (!error) |
1035 | error = is_mounted(vfsmnt) ? 1 : 2; |
1036 | break; |
1037 | diff --git a/fs/inode.c b/fs/inode.c |
1038 | index 26753ba7b6d6..56d1d2b4bf31 100644 |
1039 | --- a/fs/inode.c |
1040 | +++ b/fs/inode.c |
1041 | @@ -1631,8 +1631,8 @@ int file_remove_suid(struct file *file) |
1042 | error = security_inode_killpriv(dentry); |
1043 | if (!error && killsuid) |
1044 | error = __remove_suid(dentry, killsuid); |
1045 | - if (!error && (inode->i_sb->s_flags & MS_NOSEC)) |
1046 | - inode->i_flags |= S_NOSEC; |
1047 | + if (!error) |
1048 | + inode_has_no_xattr(inode); |
1049 | |
1050 | return error; |
1051 | } |
1052 | diff --git a/fs/namespace.c b/fs/namespace.c |
1053 | index a19d05c4ebe5..da23ad8a2c85 100644 |
1054 | --- a/fs/namespace.c |
1055 | +++ b/fs/namespace.c |
1056 | @@ -2297,6 +2297,8 @@ unlock: |
1057 | return err; |
1058 | } |
1059 | |
1060 | +static bool fs_fully_visible(struct file_system_type *fs_type, int *new_mnt_flags); |
1061 | + |
1062 | /* |
1063 | * create a new mount for userspace and request it to be added into the |
1064 | * namespace's tree |
1065 | @@ -2328,6 +2330,10 @@ static int do_new_mount(struct path *path, const char *fstype, int flags, |
1066 | flags |= MS_NODEV; |
1067 | mnt_flags |= MNT_NODEV | MNT_LOCK_NODEV; |
1068 | } |
1069 | + if (type->fs_flags & FS_USERNS_VISIBLE) { |
1070 | + if (!fs_fully_visible(type, &mnt_flags)) |
1071 | + return -EPERM; |
1072 | + } |
1073 | } |
1074 | |
1075 | mnt = vfs_kern_mount(type, flags, name, data); |
1076 | @@ -3125,9 +3131,10 @@ bool current_chrooted(void) |
1077 | return chrooted; |
1078 | } |
1079 | |
1080 | -bool fs_fully_visible(struct file_system_type *type) |
1081 | +static bool fs_fully_visible(struct file_system_type *type, int *new_mnt_flags) |
1082 | { |
1083 | struct mnt_namespace *ns = current->nsproxy->mnt_ns; |
1084 | + int new_flags = *new_mnt_flags; |
1085 | struct mount *mnt; |
1086 | bool visible = false; |
1087 | |
1088 | @@ -3146,16 +3153,37 @@ bool fs_fully_visible(struct file_system_type *type) |
1089 | if (mnt->mnt.mnt_root != mnt->mnt.mnt_sb->s_root) |
1090 | continue; |
1091 | |
1092 | - /* This mount is not fully visible if there are any child mounts |
1093 | - * that cover anything except for empty directories. |
1094 | + /* Verify the mount flags are equal to or more permissive |
1095 | + * than the proposed new mount. |
1096 | + */ |
1097 | + if ((mnt->mnt.mnt_flags & MNT_LOCK_READONLY) && |
1098 | + !(new_flags & MNT_READONLY)) |
1099 | + continue; |
1100 | + if ((mnt->mnt.mnt_flags & MNT_LOCK_NODEV) && |
1101 | + !(new_flags & MNT_NODEV)) |
1102 | + continue; |
1103 | + if ((mnt->mnt.mnt_flags & MNT_LOCK_ATIME) && |
1104 | + ((mnt->mnt.mnt_flags & MNT_ATIME_MASK) != (new_flags & MNT_ATIME_MASK))) |
1105 | + continue; |
1106 | + |
1107 | + /* This mount is not fully visible if there are any |
1108 | + * locked child mounts that cover anything except for |
1109 | + * empty directories. |
1110 | */ |
1111 | list_for_each_entry(child, &mnt->mnt_mounts, mnt_child) { |
1112 | struct inode *inode = child->mnt_mountpoint->d_inode; |
1113 | + /* Only worry about locked mounts */ |
1114 | + if (!(mnt->mnt.mnt_flags & MNT_LOCKED)) |
1115 | + continue; |
1116 | if (!S_ISDIR(inode->i_mode)) |
1117 | goto next; |
1118 | if (inode->i_nlink > 2) |
1119 | goto next; |
1120 | } |
1121 | + /* Preserve the locked attributes */ |
1122 | + *new_mnt_flags |= mnt->mnt.mnt_flags & (MNT_LOCK_READONLY | \ |
1123 | + MNT_LOCK_NODEV | \ |
1124 | + MNT_LOCK_ATIME); |
1125 | visible = true; |
1126 | goto found; |
1127 | next: ; |
1128 | diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c |
1129 | index b13edc0865f7..c9ff4a176a25 100644 |
1130 | --- a/fs/nfs/nfs4proc.c |
1131 | +++ b/fs/nfs/nfs4proc.c |
1132 | @@ -5356,7 +5356,6 @@ static struct nfs4_unlockdata *nfs4_alloc_unlockdata(struct file_lock *fl, |
1133 | atomic_inc(&lsp->ls_count); |
1134 | /* Ensure we don't close file until we're done freeing locks! */ |
1135 | p->ctx = get_nfs_open_context(ctx); |
1136 | - get_file(fl->fl_file); |
1137 | memcpy(&p->fl, fl, sizeof(p->fl)); |
1138 | p->server = NFS_SERVER(inode); |
1139 | return p; |
1140 | @@ -5368,7 +5367,6 @@ static void nfs4_locku_release_calldata(void *data) |
1141 | nfs_free_seqid(calldata->arg.seqid); |
1142 | nfs4_put_lock_state(calldata->lsp); |
1143 | put_nfs_open_context(calldata->ctx); |
1144 | - fput(calldata->fl.fl_file); |
1145 | kfree(calldata); |
1146 | } |
1147 | |
1148 | diff --git a/fs/proc/root.c b/fs/proc/root.c |
1149 | index 094e44d4a6be..9e772f1a5386 100644 |
1150 | --- a/fs/proc/root.c |
1151 | +++ b/fs/proc/root.c |
1152 | @@ -112,9 +112,6 @@ static struct dentry *proc_mount(struct file_system_type *fs_type, |
1153 | ns = task_active_pid_ns(current); |
1154 | options = data; |
1155 | |
1156 | - if (!capable(CAP_SYS_ADMIN) && !fs_fully_visible(fs_type)) |
1157 | - return ERR_PTR(-EPERM); |
1158 | - |
1159 | /* Does the mounter have privilege over the pid namespace? */ |
1160 | if (!ns_capable(ns->user_ns, CAP_SYS_ADMIN)) |
1161 | return ERR_PTR(-EPERM); |
1162 | @@ -159,7 +156,7 @@ static struct file_system_type proc_fs_type = { |
1163 | .name = "proc", |
1164 | .mount = proc_mount, |
1165 | .kill_sb = proc_kill_sb, |
1166 | - .fs_flags = FS_USERNS_MOUNT, |
1167 | + .fs_flags = FS_USERNS_VISIBLE | FS_USERNS_MOUNT, |
1168 | }; |
1169 | |
1170 | void __init proc_root_init(void) |
1171 | diff --git a/fs/sysfs/mount.c b/fs/sysfs/mount.c |
1172 | index 8a49486bf30c..1c6ac6fcee9f 100644 |
1173 | --- a/fs/sysfs/mount.c |
1174 | +++ b/fs/sysfs/mount.c |
1175 | @@ -31,9 +31,6 @@ static struct dentry *sysfs_mount(struct file_system_type *fs_type, |
1176 | bool new_sb; |
1177 | |
1178 | if (!(flags & MS_KERNMOUNT)) { |
1179 | - if (!capable(CAP_SYS_ADMIN) && !fs_fully_visible(fs_type)) |
1180 | - return ERR_PTR(-EPERM); |
1181 | - |
1182 | if (!kobj_ns_current_may_mount(KOBJ_NS_TYPE_NET)) |
1183 | return ERR_PTR(-EPERM); |
1184 | } |
1185 | @@ -58,7 +55,7 @@ static struct file_system_type sysfs_fs_type = { |
1186 | .name = "sysfs", |
1187 | .mount = sysfs_mount, |
1188 | .kill_sb = sysfs_kill_sb, |
1189 | - .fs_flags = FS_USERNS_MOUNT, |
1190 | + .fs_flags = FS_USERNS_VISIBLE | FS_USERNS_MOUNT, |
1191 | }; |
1192 | |
1193 | int __init sysfs_init(void) |
1194 | diff --git a/fs/ufs/balloc.c b/fs/ufs/balloc.c |
1195 | index 2c1036080d52..a7106eda5024 100644 |
1196 | --- a/fs/ufs/balloc.c |
1197 | +++ b/fs/ufs/balloc.c |
1198 | @@ -51,8 +51,8 @@ void ufs_free_fragments(struct inode *inode, u64 fragment, unsigned count) |
1199 | |
1200 | if (ufs_fragnum(fragment) + count > uspi->s_fpg) |
1201 | ufs_error (sb, "ufs_free_fragments", "internal error"); |
1202 | - |
1203 | - lock_ufs(sb); |
1204 | + |
1205 | + mutex_lock(&UFS_SB(sb)->s_lock); |
1206 | |
1207 | cgno = ufs_dtog(uspi, fragment); |
1208 | bit = ufs_dtogd(uspi, fragment); |
1209 | @@ -115,13 +115,13 @@ void ufs_free_fragments(struct inode *inode, u64 fragment, unsigned count) |
1210 | if (sb->s_flags & MS_SYNCHRONOUS) |
1211 | ubh_sync_block(UCPI_UBH(ucpi)); |
1212 | ufs_mark_sb_dirty(sb); |
1213 | - |
1214 | - unlock_ufs(sb); |
1215 | + |
1216 | + mutex_unlock(&UFS_SB(sb)->s_lock); |
1217 | UFSD("EXIT\n"); |
1218 | return; |
1219 | |
1220 | failed: |
1221 | - unlock_ufs(sb); |
1222 | + mutex_unlock(&UFS_SB(sb)->s_lock); |
1223 | UFSD("EXIT (FAILED)\n"); |
1224 | return; |
1225 | } |
1226 | @@ -151,7 +151,7 @@ void ufs_free_blocks(struct inode *inode, u64 fragment, unsigned count) |
1227 | goto failed; |
1228 | } |
1229 | |
1230 | - lock_ufs(sb); |
1231 | + mutex_lock(&UFS_SB(sb)->s_lock); |
1232 | |
1233 | do_more: |
1234 | overflow = 0; |
1235 | @@ -211,12 +211,12 @@ do_more: |
1236 | } |
1237 | |
1238 | ufs_mark_sb_dirty(sb); |
1239 | - unlock_ufs(sb); |
1240 | + mutex_unlock(&UFS_SB(sb)->s_lock); |
1241 | UFSD("EXIT\n"); |
1242 | return; |
1243 | |
1244 | failed_unlock: |
1245 | - unlock_ufs(sb); |
1246 | + mutex_unlock(&UFS_SB(sb)->s_lock); |
1247 | failed: |
1248 | UFSD("EXIT (FAILED)\n"); |
1249 | return; |
1250 | @@ -357,7 +357,7 @@ u64 ufs_new_fragments(struct inode *inode, void *p, u64 fragment, |
1251 | usb1 = ubh_get_usb_first(uspi); |
1252 | *err = -ENOSPC; |
1253 | |
1254 | - lock_ufs(sb); |
1255 | + mutex_lock(&UFS_SB(sb)->s_lock); |
1256 | tmp = ufs_data_ptr_to_cpu(sb, p); |
1257 | |
1258 | if (count + ufs_fragnum(fragment) > uspi->s_fpb) { |
1259 | @@ -378,19 +378,19 @@ u64 ufs_new_fragments(struct inode *inode, void *p, u64 fragment, |
1260 | "fragment %llu, tmp %llu\n", |
1261 | (unsigned long long)fragment, |
1262 | (unsigned long long)tmp); |
1263 | - unlock_ufs(sb); |
1264 | + mutex_unlock(&UFS_SB(sb)->s_lock); |
1265 | return INVBLOCK; |
1266 | } |
1267 | if (fragment < UFS_I(inode)->i_lastfrag) { |
1268 | UFSD("EXIT (ALREADY ALLOCATED)\n"); |
1269 | - unlock_ufs(sb); |
1270 | + mutex_unlock(&UFS_SB(sb)->s_lock); |
1271 | return 0; |
1272 | } |
1273 | } |
1274 | else { |
1275 | if (tmp) { |
1276 | UFSD("EXIT (ALREADY ALLOCATED)\n"); |
1277 | - unlock_ufs(sb); |
1278 | + mutex_unlock(&UFS_SB(sb)->s_lock); |
1279 | return 0; |
1280 | } |
1281 | } |
1282 | @@ -399,7 +399,7 @@ u64 ufs_new_fragments(struct inode *inode, void *p, u64 fragment, |
1283 | * There is not enough space for user on the device |
1284 | */ |
1285 | if (!capable(CAP_SYS_RESOURCE) && ufs_freespace(uspi, UFS_MINFREE) <= 0) { |
1286 | - unlock_ufs(sb); |
1287 | + mutex_unlock(&UFS_SB(sb)->s_lock); |
1288 | UFSD("EXIT (FAILED)\n"); |
1289 | return 0; |
1290 | } |
1291 | @@ -424,7 +424,7 @@ u64 ufs_new_fragments(struct inode *inode, void *p, u64 fragment, |
1292 | ufs_clear_frags(inode, result + oldcount, |
1293 | newcount - oldcount, locked_page != NULL); |
1294 | } |
1295 | - unlock_ufs(sb); |
1296 | + mutex_unlock(&UFS_SB(sb)->s_lock); |
1297 | UFSD("EXIT, result %llu\n", (unsigned long long)result); |
1298 | return result; |
1299 | } |
1300 | @@ -439,7 +439,7 @@ u64 ufs_new_fragments(struct inode *inode, void *p, u64 fragment, |
1301 | fragment + count); |
1302 | ufs_clear_frags(inode, result + oldcount, newcount - oldcount, |
1303 | locked_page != NULL); |
1304 | - unlock_ufs(sb); |
1305 | + mutex_unlock(&UFS_SB(sb)->s_lock); |
1306 | UFSD("EXIT, result %llu\n", (unsigned long long)result); |
1307 | return result; |
1308 | } |
1309 | @@ -477,7 +477,7 @@ u64 ufs_new_fragments(struct inode *inode, void *p, u64 fragment, |
1310 | *err = 0; |
1311 | UFS_I(inode)->i_lastfrag = max(UFS_I(inode)->i_lastfrag, |
1312 | fragment + count); |
1313 | - unlock_ufs(sb); |
1314 | + mutex_unlock(&UFS_SB(sb)->s_lock); |
1315 | if (newcount < request) |
1316 | ufs_free_fragments (inode, result + newcount, request - newcount); |
1317 | ufs_free_fragments (inode, tmp, oldcount); |
1318 | @@ -485,7 +485,7 @@ u64 ufs_new_fragments(struct inode *inode, void *p, u64 fragment, |
1319 | return result; |
1320 | } |
1321 | |
1322 | - unlock_ufs(sb); |
1323 | + mutex_unlock(&UFS_SB(sb)->s_lock); |
1324 | UFSD("EXIT (FAILED)\n"); |
1325 | return 0; |
1326 | } |
1327 | diff --git a/fs/ufs/ialloc.c b/fs/ufs/ialloc.c |
1328 | index 7caa01652888..fd0203ce1f7f 100644 |
1329 | --- a/fs/ufs/ialloc.c |
1330 | +++ b/fs/ufs/ialloc.c |
1331 | @@ -69,11 +69,11 @@ void ufs_free_inode (struct inode * inode) |
1332 | |
1333 | ino = inode->i_ino; |
1334 | |
1335 | - lock_ufs(sb); |
1336 | + mutex_lock(&UFS_SB(sb)->s_lock); |
1337 | |
1338 | if (!((ino > 1) && (ino < (uspi->s_ncg * uspi->s_ipg )))) { |
1339 | ufs_warning(sb, "ufs_free_inode", "reserved inode or nonexistent inode %u\n", ino); |
1340 | - unlock_ufs(sb); |
1341 | + mutex_unlock(&UFS_SB(sb)->s_lock); |
1342 | return; |
1343 | } |
1344 | |
1345 | @@ -81,7 +81,7 @@ void ufs_free_inode (struct inode * inode) |
1346 | bit = ufs_inotocgoff (ino); |
1347 | ucpi = ufs_load_cylinder (sb, cg); |
1348 | if (!ucpi) { |
1349 | - unlock_ufs(sb); |
1350 | + mutex_unlock(&UFS_SB(sb)->s_lock); |
1351 | return; |
1352 | } |
1353 | ucg = ubh_get_ucg(UCPI_UBH(ucpi)); |
1354 | @@ -115,7 +115,7 @@ void ufs_free_inode (struct inode * inode) |
1355 | ubh_sync_block(UCPI_UBH(ucpi)); |
1356 | |
1357 | ufs_mark_sb_dirty(sb); |
1358 | - unlock_ufs(sb); |
1359 | + mutex_unlock(&UFS_SB(sb)->s_lock); |
1360 | UFSD("EXIT\n"); |
1361 | } |
1362 | |
1363 | @@ -193,7 +193,7 @@ struct inode *ufs_new_inode(struct inode *dir, umode_t mode) |
1364 | sbi = UFS_SB(sb); |
1365 | uspi = sbi->s_uspi; |
1366 | |
1367 | - lock_ufs(sb); |
1368 | + mutex_lock(&sbi->s_lock); |
1369 | |
1370 | /* |
1371 | * Try to place the inode in its parent directory |
1372 | @@ -331,21 +331,21 @@ cg_found: |
1373 | sync_dirty_buffer(bh); |
1374 | brelse(bh); |
1375 | } |
1376 | - unlock_ufs(sb); |
1377 | + mutex_unlock(&sbi->s_lock); |
1378 | |
1379 | UFSD("allocating inode %lu\n", inode->i_ino); |
1380 | UFSD("EXIT\n"); |
1381 | return inode; |
1382 | |
1383 | fail_remove_inode: |
1384 | - unlock_ufs(sb); |
1385 | + mutex_unlock(&sbi->s_lock); |
1386 | clear_nlink(inode); |
1387 | unlock_new_inode(inode); |
1388 | iput(inode); |
1389 | UFSD("EXIT (FAILED): err %d\n", err); |
1390 | return ERR_PTR(err); |
1391 | failed: |
1392 | - unlock_ufs(sb); |
1393 | + mutex_unlock(&sbi->s_lock); |
1394 | make_bad_inode(inode); |
1395 | iput (inode); |
1396 | UFSD("EXIT (FAILED): err %d\n", err); |
1397 | diff --git a/fs/ufs/inode.c b/fs/ufs/inode.c |
1398 | index be7d42c7d938..2d93ab07da8a 100644 |
1399 | --- a/fs/ufs/inode.c |
1400 | +++ b/fs/ufs/inode.c |
1401 | @@ -902,6 +902,9 @@ void ufs_evict_inode(struct inode * inode) |
1402 | invalidate_inode_buffers(inode); |
1403 | clear_inode(inode); |
1404 | |
1405 | - if (want_delete) |
1406 | + if (want_delete) { |
1407 | + lock_ufs(inode->i_sb); |
1408 | ufs_free_inode(inode); |
1409 | + unlock_ufs(inode->i_sb); |
1410 | + } |
1411 | } |
1412 | diff --git a/fs/ufs/namei.c b/fs/ufs/namei.c |
1413 | index fd65deb4b5f0..e8ee2985b068 100644 |
1414 | --- a/fs/ufs/namei.c |
1415 | +++ b/fs/ufs/namei.c |
1416 | @@ -128,12 +128,12 @@ static int ufs_symlink (struct inode * dir, struct dentry * dentry, |
1417 | if (l > sb->s_blocksize) |
1418 | goto out_notlocked; |
1419 | |
1420 | + lock_ufs(dir->i_sb); |
1421 | inode = ufs_new_inode(dir, S_IFLNK | S_IRWXUGO); |
1422 | err = PTR_ERR(inode); |
1423 | if (IS_ERR(inode)) |
1424 | - goto out_notlocked; |
1425 | + goto out; |
1426 | |
1427 | - lock_ufs(dir->i_sb); |
1428 | if (l > UFS_SB(sb)->s_uspi->s_maxsymlinklen) { |
1429 | /* slow symlink */ |
1430 | inode->i_op = &ufs_symlink_inode_operations; |
1431 | @@ -174,7 +174,12 @@ static int ufs_link (struct dentry * old_dentry, struct inode * dir, |
1432 | inode_inc_link_count(inode); |
1433 | ihold(inode); |
1434 | |
1435 | - error = ufs_add_nondir(dentry, inode); |
1436 | + error = ufs_add_link(dentry, inode); |
1437 | + if (error) { |
1438 | + inode_dec_link_count(inode); |
1439 | + iput(inode); |
1440 | + } else |
1441 | + d_instantiate(dentry, inode); |
1442 | unlock_ufs(dir->i_sb); |
1443 | return error; |
1444 | } |
1445 | @@ -184,9 +189,13 @@ static int ufs_mkdir(struct inode * dir, struct dentry * dentry, umode_t mode) |
1446 | struct inode * inode; |
1447 | int err; |
1448 | |
1449 | + lock_ufs(dir->i_sb); |
1450 | + inode_inc_link_count(dir); |
1451 | + |
1452 | inode = ufs_new_inode(dir, S_IFDIR|mode); |
1453 | + err = PTR_ERR(inode); |
1454 | if (IS_ERR(inode)) |
1455 | - return PTR_ERR(inode); |
1456 | + goto out_dir; |
1457 | |
1458 | inode->i_op = &ufs_dir_inode_operations; |
1459 | inode->i_fop = &ufs_dir_operations; |
1460 | @@ -194,9 +203,6 @@ static int ufs_mkdir(struct inode * dir, struct dentry * dentry, umode_t mode) |
1461 | |
1462 | inode_inc_link_count(inode); |
1463 | |
1464 | - lock_ufs(dir->i_sb); |
1465 | - inode_inc_link_count(dir); |
1466 | - |
1467 | err = ufs_make_empty(inode, dir); |
1468 | if (err) |
1469 | goto out_fail; |
1470 | @@ -206,6 +212,7 @@ static int ufs_mkdir(struct inode * dir, struct dentry * dentry, umode_t mode) |
1471 | goto out_fail; |
1472 | unlock_ufs(dir->i_sb); |
1473 | |
1474 | + unlock_new_inode(inode); |
1475 | d_instantiate(dentry, inode); |
1476 | out: |
1477 | return err; |
1478 | @@ -215,6 +222,7 @@ out_fail: |
1479 | inode_dec_link_count(inode); |
1480 | unlock_new_inode(inode); |
1481 | iput (inode); |
1482 | +out_dir: |
1483 | inode_dec_link_count(dir); |
1484 | unlock_ufs(dir->i_sb); |
1485 | goto out; |
1486 | diff --git a/fs/ufs/super.c b/fs/ufs/super.c |
1487 | index da73801301d5..ce02dff5572f 100644 |
1488 | --- a/fs/ufs/super.c |
1489 | +++ b/fs/ufs/super.c |
1490 | @@ -698,6 +698,7 @@ static int ufs_sync_fs(struct super_block *sb, int wait) |
1491 | unsigned flags; |
1492 | |
1493 | lock_ufs(sb); |
1494 | + mutex_lock(&UFS_SB(sb)->s_lock); |
1495 | |
1496 | UFSD("ENTER\n"); |
1497 | |
1498 | @@ -715,6 +716,7 @@ static int ufs_sync_fs(struct super_block *sb, int wait) |
1499 | ufs_put_cstotal(sb); |
1500 | |
1501 | UFSD("EXIT\n"); |
1502 | + mutex_unlock(&UFS_SB(sb)->s_lock); |
1503 | unlock_ufs(sb); |
1504 | |
1505 | return 0; |
1506 | @@ -803,6 +805,7 @@ static int ufs_fill_super(struct super_block *sb, void *data, int silent) |
1507 | UFSD("flag %u\n", (int)(sb->s_flags & MS_RDONLY)); |
1508 | |
1509 | mutex_init(&sbi->mutex); |
1510 | + mutex_init(&sbi->s_lock); |
1511 | spin_lock_init(&sbi->work_lock); |
1512 | INIT_DELAYED_WORK(&sbi->sync_work, delayed_sync_fs); |
1513 | /* |
1514 | @@ -1281,6 +1284,7 @@ static int ufs_remount (struct super_block *sb, int *mount_flags, char *data) |
1515 | |
1516 | sync_filesystem(sb); |
1517 | lock_ufs(sb); |
1518 | + mutex_lock(&UFS_SB(sb)->s_lock); |
1519 | uspi = UFS_SB(sb)->s_uspi; |
1520 | flags = UFS_SB(sb)->s_flags; |
1521 | usb1 = ubh_get_usb_first(uspi); |
1522 | @@ -1294,6 +1298,7 @@ static int ufs_remount (struct super_block *sb, int *mount_flags, char *data) |
1523 | new_mount_opt = 0; |
1524 | ufs_set_opt (new_mount_opt, ONERROR_LOCK); |
1525 | if (!ufs_parse_options (data, &new_mount_opt)) { |
1526 | + mutex_unlock(&UFS_SB(sb)->s_lock); |
1527 | unlock_ufs(sb); |
1528 | return -EINVAL; |
1529 | } |
1530 | @@ -1301,12 +1306,14 @@ static int ufs_remount (struct super_block *sb, int *mount_flags, char *data) |
1531 | new_mount_opt |= ufstype; |
1532 | } else if ((new_mount_opt & UFS_MOUNT_UFSTYPE) != ufstype) { |
1533 | pr_err("ufstype can't be changed during remount\n"); |
1534 | + mutex_unlock(&UFS_SB(sb)->s_lock); |
1535 | unlock_ufs(sb); |
1536 | return -EINVAL; |
1537 | } |
1538 | |
1539 | if ((*mount_flags & MS_RDONLY) == (sb->s_flags & MS_RDONLY)) { |
1540 | UFS_SB(sb)->s_mount_opt = new_mount_opt; |
1541 | + mutex_unlock(&UFS_SB(sb)->s_lock); |
1542 | unlock_ufs(sb); |
1543 | return 0; |
1544 | } |
1545 | @@ -1330,6 +1337,7 @@ static int ufs_remount (struct super_block *sb, int *mount_flags, char *data) |
1546 | */ |
1547 | #ifndef CONFIG_UFS_FS_WRITE |
1548 | pr_err("ufs was compiled with read-only support, can't be mounted as read-write\n"); |
1549 | + mutex_unlock(&UFS_SB(sb)->s_lock); |
1550 | unlock_ufs(sb); |
1551 | return -EINVAL; |
1552 | #else |
1553 | @@ -1339,11 +1347,13 @@ static int ufs_remount (struct super_block *sb, int *mount_flags, char *data) |
1554 | ufstype != UFS_MOUNT_UFSTYPE_SUNx86 && |
1555 | ufstype != UFS_MOUNT_UFSTYPE_UFS2) { |
1556 | pr_err("this ufstype is read-only supported\n"); |
1557 | + mutex_unlock(&UFS_SB(sb)->s_lock); |
1558 | unlock_ufs(sb); |
1559 | return -EINVAL; |
1560 | } |
1561 | if (!ufs_read_cylinder_structures(sb)) { |
1562 | pr_err("failed during remounting\n"); |
1563 | + mutex_unlock(&UFS_SB(sb)->s_lock); |
1564 | unlock_ufs(sb); |
1565 | return -EPERM; |
1566 | } |
1567 | @@ -1351,6 +1361,7 @@ static int ufs_remount (struct super_block *sb, int *mount_flags, char *data) |
1568 | #endif |
1569 | } |
1570 | UFS_SB(sb)->s_mount_opt = new_mount_opt; |
1571 | + mutex_unlock(&UFS_SB(sb)->s_lock); |
1572 | unlock_ufs(sb); |
1573 | return 0; |
1574 | } |
1575 | diff --git a/fs/ufs/ufs.h b/fs/ufs/ufs.h |
1576 | index 2a07396d5f9e..cf6368d42d4a 100644 |
1577 | --- a/fs/ufs/ufs.h |
1578 | +++ b/fs/ufs/ufs.h |
1579 | @@ -30,6 +30,7 @@ struct ufs_sb_info { |
1580 | int work_queued; /* non-zero if the delayed work is queued */ |
1581 | struct delayed_work sync_work; /* FS sync delayed work */ |
1582 | spinlock_t work_lock; /* protects sync_work and work_queued */ |
1583 | + struct mutex s_lock; |
1584 | }; |
1585 | |
1586 | struct ufs_inode_info { |
1587 | diff --git a/include/linux/fs.h b/include/linux/fs.h |
1588 | index 9ab779e8a63c..84d672914bd8 100644 |
1589 | --- a/include/linux/fs.h |
1590 | +++ b/include/linux/fs.h |
1591 | @@ -1791,6 +1791,7 @@ struct file_system_type { |
1592 | #define FS_HAS_SUBTYPE 4 |
1593 | #define FS_USERNS_MOUNT 8 /* Can be mounted by userns root */ |
1594 | #define FS_USERNS_DEV_MOUNT 16 /* A userns mount does not imply MNT_NODEV */ |
1595 | +#define FS_USERNS_VISIBLE 32 /* FS must already be visible */ |
1596 | #define FS_RENAME_DOES_D_MOVE 32768 /* FS will handle d_move() during rename() internally. */ |
1597 | struct dentry *(*mount) (struct file_system_type *, int, |
1598 | const char *, void *); |
1599 | @@ -1878,7 +1879,6 @@ extern int vfs_ustat(dev_t, struct kstatfs *); |
1600 | extern int freeze_super(struct super_block *super); |
1601 | extern int thaw_super(struct super_block *super); |
1602 | extern bool our_mnt(struct vfsmount *mnt); |
1603 | -extern bool fs_fully_visible(struct file_system_type *); |
1604 | |
1605 | extern int current_umask(void); |
1606 | |
1607 | diff --git a/include/xen/xen-ops.h b/include/xen/xen-ops.h |
1608 | index 7491ee5d8164..83338210ee04 100644 |
1609 | --- a/include/xen/xen-ops.h |
1610 | +++ b/include/xen/xen-ops.h |
1611 | @@ -46,4 +46,30 @@ static inline efi_system_table_t __init *xen_efi_probe(void) |
1612 | } |
1613 | #endif |
1614 | |
1615 | +#ifdef CONFIG_PREEMPT |
1616 | + |
1617 | +static inline void xen_preemptible_hcall_begin(void) |
1618 | +{ |
1619 | +} |
1620 | + |
1621 | +static inline void xen_preemptible_hcall_end(void) |
1622 | +{ |
1623 | +} |
1624 | + |
1625 | +#else |
1626 | + |
1627 | +DECLARE_PER_CPU(bool, xen_in_preemptible_hcall); |
1628 | + |
1629 | +static inline void xen_preemptible_hcall_begin(void) |
1630 | +{ |
1631 | + __this_cpu_write(xen_in_preemptible_hcall, true); |
1632 | +} |
1633 | + |
1634 | +static inline void xen_preemptible_hcall_end(void) |
1635 | +{ |
1636 | + __this_cpu_write(xen_in_preemptible_hcall, false); |
1637 | +} |
1638 | + |
1639 | +#endif /* CONFIG_PREEMPT */ |
1640 | + |
1641 | #endif /* INCLUDE_XEN_OPS_H */ |
1642 | diff --git a/kernel/events/core.c b/kernel/events/core.c |
1643 | index e631dacdb165..cb86038cad47 100644 |
1644 | --- a/kernel/events/core.c |
1645 | +++ b/kernel/events/core.c |
1646 | @@ -4057,20 +4057,20 @@ static void ring_buffer_attach(struct perf_event *event, |
1647 | WARN_ON_ONCE(event->rcu_pending); |
1648 | |
1649 | old_rb = event->rb; |
1650 | - event->rcu_batches = get_state_synchronize_rcu(); |
1651 | - event->rcu_pending = 1; |
1652 | - |
1653 | spin_lock_irqsave(&old_rb->event_lock, flags); |
1654 | list_del_rcu(&event->rb_entry); |
1655 | spin_unlock_irqrestore(&old_rb->event_lock, flags); |
1656 | - } |
1657 | |
1658 | - if (event->rcu_pending && rb) { |
1659 | - cond_synchronize_rcu(event->rcu_batches); |
1660 | - event->rcu_pending = 0; |
1661 | + event->rcu_batches = get_state_synchronize_rcu(); |
1662 | + event->rcu_pending = 1; |
1663 | } |
1664 | |
1665 | if (rb) { |
1666 | + if (event->rcu_pending) { |
1667 | + cond_synchronize_rcu(event->rcu_batches); |
1668 | + event->rcu_pending = 0; |
1669 | + } |
1670 | + |
1671 | spin_lock_irqsave(&rb->event_lock, flags); |
1672 | list_add_rcu(&event->rb_entry, &rb->event_list); |
1673 | spin_unlock_irqrestore(&rb->event_lock, flags); |
1674 | diff --git a/kernel/trace/trace_events_filter.c b/kernel/trace/trace_events_filter.c |
1675 | index bcb9145a7913..51afcb76d32b 100644 |
1676 | --- a/kernel/trace/trace_events_filter.c |
1677 | +++ b/kernel/trace/trace_events_filter.c |
1678 | @@ -1369,19 +1369,24 @@ static int check_preds(struct filter_parse_state *ps) |
1679 | { |
1680 | int n_normal_preds = 0, n_logical_preds = 0; |
1681 | struct postfix_elt *elt; |
1682 | + int cnt = 0; |
1683 | |
1684 | list_for_each_entry(elt, &ps->postfix, list) { |
1685 | - if (elt->op == OP_NONE) |
1686 | + if (elt->op == OP_NONE) { |
1687 | + cnt++; |
1688 | continue; |
1689 | + } |
1690 | |
1691 | + cnt--; |
1692 | if (elt->op == OP_AND || elt->op == OP_OR) { |
1693 | n_logical_preds++; |
1694 | continue; |
1695 | } |
1696 | n_normal_preds++; |
1697 | + WARN_ON_ONCE(cnt < 0); |
1698 | } |
1699 | |
1700 | - if (!n_normal_preds || n_logical_preds >= n_normal_preds) { |
1701 | + if (cnt != 1 || !n_normal_preds || n_logical_preds >= n_normal_preds) { |
1702 | parse_error(ps, FILT_ERR_INVALID_FILTER, 0); |
1703 | return -EINVAL; |
1704 | } |
1705 | diff --git a/net/can/af_can.c b/net/can/af_can.c |
1706 | index d6030d6949df..9a3244941a5c 100644 |
1707 | --- a/net/can/af_can.c |
1708 | +++ b/net/can/af_can.c |
1709 | @@ -313,8 +313,12 @@ int can_send(struct sk_buff *skb, int loop) |
1710 | return err; |
1711 | } |
1712 | |
1713 | - if (newskb) |
1714 | + if (newskb) { |
1715 | + if (!(newskb->tstamp.tv64)) |
1716 | + __net_timestamp(newskb); |
1717 | + |
1718 | netif_rx_ni(newskb); |
1719 | + } |
1720 | |
1721 | /* update statistics */ |
1722 | can_stats.tx_frames++; |
1723 | diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c |
1724 | index 24d3242f0e01..c522f7a00eab 100644 |
1725 | --- a/net/core/rtnetlink.c |
1726 | +++ b/net/core/rtnetlink.c |
1727 | @@ -2229,9 +2229,6 @@ void rtmsg_ifinfo(int type, struct net_device *dev, unsigned int change, |
1728 | int err = -ENOBUFS; |
1729 | size_t if_info_size; |
1730 | |
1731 | - if (dev->reg_state != NETREG_REGISTERED) |
1732 | - return; |
1733 | - |
1734 | skb = nlmsg_new((if_info_size = if_nlmsg_size(dev, 0)), flags); |
1735 | if (skb == NULL) |
1736 | goto errout; |
1737 | diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c |
1738 | index 71b574c7bde9..9fe2baa01fbe 100644 |
1739 | --- a/net/netfilter/nf_tables_api.c |
1740 | +++ b/net/netfilter/nf_tables_api.c |
1741 | @@ -1221,7 +1221,10 @@ static int nf_tables_newchain(struct sock *nlsk, struct sk_buff *skb, |
1742 | |
1743 | if (nla[NFTA_CHAIN_POLICY]) { |
1744 | if ((chain != NULL && |
1745 | - !(chain->flags & NFT_BASE_CHAIN)) || |
1746 | + !(chain->flags & NFT_BASE_CHAIN))) |
1747 | + return -EOPNOTSUPP; |
1748 | + |
1749 | + if (chain == NULL && |
1750 | nla[NFTA_CHAIN_HOOK] == NULL) |
1751 | return -EOPNOTSUPP; |
1752 | |
1753 | diff --git a/net/netfilter/nfnetlink_cthelper.c b/net/netfilter/nfnetlink_cthelper.c |
1754 | index 9e287cb56a04..54330fb5efaf 100644 |
1755 | --- a/net/netfilter/nfnetlink_cthelper.c |
1756 | +++ b/net/netfilter/nfnetlink_cthelper.c |
1757 | @@ -77,6 +77,9 @@ nfnl_cthelper_parse_tuple(struct nf_conntrack_tuple *tuple, |
1758 | if (!tb[NFCTH_TUPLE_L3PROTONUM] || !tb[NFCTH_TUPLE_L4PROTONUM]) |
1759 | return -EINVAL; |
1760 | |
1761 | + /* Not all fields are initialized so first zero the tuple */ |
1762 | + memset(tuple, 0, sizeof(struct nf_conntrack_tuple)); |
1763 | + |
1764 | tuple->src.l3num = ntohs(nla_get_be16(tb[NFCTH_TUPLE_L3PROTONUM])); |
1765 | tuple->dst.protonum = nla_get_u8(tb[NFCTH_TUPLE_L4PROTONUM]); |
1766 | |
1767 | @@ -86,7 +89,7 @@ nfnl_cthelper_parse_tuple(struct nf_conntrack_tuple *tuple, |
1768 | static int |
1769 | nfnl_cthelper_from_nlattr(struct nlattr *attr, struct nf_conn *ct) |
1770 | { |
1771 | - const struct nf_conn_help *help = nfct_help(ct); |
1772 | + struct nf_conn_help *help = nfct_help(ct); |
1773 | |
1774 | if (attr == NULL) |
1775 | return -EINVAL; |
1776 | @@ -94,7 +97,7 @@ nfnl_cthelper_from_nlattr(struct nlattr *attr, struct nf_conn *ct) |
1777 | if (help->helper->data_len == 0) |
1778 | return -EINVAL; |
1779 | |
1780 | - memcpy(&help->data, nla_data(attr), help->helper->data_len); |
1781 | + memcpy(help->data, nla_data(attr), help->helper->data_len); |
1782 | return 0; |
1783 | } |
1784 | |
1785 | diff --git a/net/netfilter/nft_compat.c b/net/netfilter/nft_compat.c |
1786 | index 265e190f2218..e22a2961cc39 100644 |
1787 | --- a/net/netfilter/nft_compat.c |
1788 | +++ b/net/netfilter/nft_compat.c |
1789 | @@ -97,6 +97,9 @@ nft_target_set_tgchk_param(struct xt_tgchk_param *par, |
1790 | entry->e4.ip.invflags = inv ? IPT_INV_PROTO : 0; |
1791 | break; |
1792 | case AF_INET6: |
1793 | + if (proto) |
1794 | + entry->e6.ipv6.flags |= IP6T_F_PROTO; |
1795 | + |
1796 | entry->e6.ipv6.proto = proto; |
1797 | entry->e6.ipv6.invflags = inv ? IP6T_INV_PROTO : 0; |
1798 | break; |
1799 | @@ -304,6 +307,9 @@ nft_match_set_mtchk_param(struct xt_mtchk_param *par, const struct nft_ctx *ctx, |
1800 | entry->e4.ip.invflags = inv ? IPT_INV_PROTO : 0; |
1801 | break; |
1802 | case AF_INET6: |
1803 | + if (proto) |
1804 | + entry->e6.ipv6.flags |= IP6T_F_PROTO; |
1805 | + |
1806 | entry->e6.ipv6.proto = proto; |
1807 | entry->e6.ipv6.invflags = inv ? IP6T_INV_PROTO : 0; |
1808 | break; |
1809 | diff --git a/security/selinux/hooks.c b/security/selinux/hooks.c |
1810 | index c603b20356ad..b1e455b47b82 100644 |
1811 | --- a/security/selinux/hooks.c |
1812 | +++ b/security/selinux/hooks.c |
1813 | @@ -405,7 +405,8 @@ static int selinux_is_sblabel_mnt(struct super_block *sb) |
1814 | |
1815 | if (sbsec->behavior == SECURITY_FS_USE_XATTR || |
1816 | sbsec->behavior == SECURITY_FS_USE_TRANS || |
1817 | - sbsec->behavior == SECURITY_FS_USE_TASK) |
1818 | + sbsec->behavior == SECURITY_FS_USE_TASK || |
1819 | + sbsec->behavior == SECURITY_FS_USE_NATIVE) |
1820 | return 1; |
1821 | |
1822 | /* Special handling for sysfs. Is genfs but also has setxattr handler*/ |