Magellan Linux

Annotation of /trunk/kernel-alx/patches-3.10/0182-3.10.83-all-fixes.patch

Parent Directory Parent Directory | Revision Log Revision Log


Revision 2670 - (hide annotations) (download)
Tue Jul 21 16:20:25 2015 UTC (8 years, 10 months ago) by niro
File size: 57570 byte(s)
-linux-3.10.83
1 niro 2670 diff --git a/Makefile b/Makefile
2     index 5e3e665a10b7..21529dbcc11d 100644
3     --- a/Makefile
4     +++ b/Makefile
5     @@ -1,6 +1,6 @@
6     VERSION = 3
7     PATCHLEVEL = 10
8     -SUBLEVEL = 82
9     +SUBLEVEL = 83
10     EXTRAVERSION =
11     NAME = TOSSUG Baby Fish
12    
13     diff --git a/arch/arm/mach-dove/common.c b/arch/arm/mach-dove/common.c
14     index e2b5da031f96..8d4f5dc56910 100644
15     --- a/arch/arm/mach-dove/common.c
16     +++ b/arch/arm/mach-dove/common.c
17     @@ -226,7 +226,7 @@ void __init dove_init_early(void)
18     orion_time_set_base(TIMER_VIRT_BASE);
19     mvebu_mbus_init("marvell,dove-mbus",
20     BRIDGE_WINS_BASE, BRIDGE_WINS_SZ,
21     - DOVE_MC_WINS_BASE, DOVE_MC_WINS_SZ);
22     + DOVE_MC_WINS_BASE, DOVE_MC_WINS_SZ, 0);
23     }
24    
25     static int __init dove_find_tclk(void)
26     diff --git a/arch/arm/mach-imx/clk-imx6q.c b/arch/arm/mach-imx/clk-imx6q.c
27     index 2acaded8025d..ed00c9e3bfc6 100644
28     --- a/arch/arm/mach-imx/clk-imx6q.c
29     +++ b/arch/arm/mach-imx/clk-imx6q.c
30     @@ -515,7 +515,7 @@ int __init mx6q_clocks_init(void)
31     clk[gpmi_io] = imx_clk_gate2("gpmi_io", "enfc", base + 0x78, 28);
32     clk[gpmi_apb] = imx_clk_gate2("gpmi_apb", "usdhc3", base + 0x78, 30);
33     clk[rom] = imx_clk_gate2("rom", "ahb", base + 0x7c, 0);
34     - clk[sata] = imx_clk_gate2("sata", "ipg", base + 0x7c, 4);
35     + clk[sata] = imx_clk_gate2("sata", "ahb", base + 0x7c, 4);
36     clk[sdma] = imx_clk_gate2("sdma", "ahb", base + 0x7c, 6);
37     clk[spba] = imx_clk_gate2("spba", "ipg", base + 0x7c, 12);
38     clk[ssi1_ipg] = imx_clk_gate2("ssi1_ipg", "ipg", base + 0x7c, 18);
39     diff --git a/arch/arm/mach-kirkwood/common.c b/arch/arm/mach-kirkwood/common.c
40     index f38922897563..4f6831ea88c5 100644
41     --- a/arch/arm/mach-kirkwood/common.c
42     +++ b/arch/arm/mach-kirkwood/common.c
43     @@ -530,7 +530,7 @@ void __init kirkwood_init_early(void)
44    
45     mvebu_mbus_init("marvell,kirkwood-mbus",
46     BRIDGE_WINS_BASE, BRIDGE_WINS_SZ,
47     - DDR_WINDOW_CPU_BASE, DDR_WINDOW_CPU_SZ);
48     + DDR_WINDOW_CPU_BASE, DDR_WINDOW_CPU_SZ, 0);
49     }
50    
51     int kirkwood_tclk;
52     diff --git a/arch/arm/mach-mv78xx0/common.c b/arch/arm/mach-mv78xx0/common.c
53     index 749a7f8c4992..4722c98dc1bb 100644
54     --- a/arch/arm/mach-mv78xx0/common.c
55     +++ b/arch/arm/mach-mv78xx0/common.c
56     @@ -337,11 +337,11 @@ void __init mv78xx0_init_early(void)
57     if (mv78xx0_core_index() == 0)
58     mvebu_mbus_init("marvell,mv78xx0-mbus",
59     BRIDGE_WINS_CPU0_BASE, BRIDGE_WINS_SZ,
60     - DDR_WINDOW_CPU0_BASE, DDR_WINDOW_CPU_SZ);
61     + DDR_WINDOW_CPU0_BASE, DDR_WINDOW_CPU_SZ, 0);
62     else
63     mvebu_mbus_init("marvell,mv78xx0-mbus",
64     BRIDGE_WINS_CPU1_BASE, BRIDGE_WINS_SZ,
65     - DDR_WINDOW_CPU1_BASE, DDR_WINDOW_CPU_SZ);
66     + DDR_WINDOW_CPU1_BASE, DDR_WINDOW_CPU_SZ, 0);
67     }
68    
69     void __init_refok mv78xx0_timer_init(void)
70     diff --git a/arch/arm/mach-mvebu/armada-370-xp.c b/arch/arm/mach-mvebu/armada-370-xp.c
71     index 1c48890bb72b..4377c3484a62 100644
72     --- a/arch/arm/mach-mvebu/armada-370-xp.c
73     +++ b/arch/arm/mach-mvebu/armada-370-xp.c
74     @@ -66,7 +66,8 @@ void __init armada_370_xp_init_early(void)
75     ARMADA_370_XP_MBUS_WINS_BASE,
76     ARMADA_370_XP_MBUS_WINS_SIZE,
77     ARMADA_370_XP_SDRAM_WINS_BASE,
78     - ARMADA_370_XP_SDRAM_WINS_SIZE);
79     + ARMADA_370_XP_SDRAM_WINS_SIZE,
80     + coherency_available());
81    
82     #ifdef CONFIG_CACHE_L2X0
83     l2x0_of_init(0, ~0UL);
84     diff --git a/arch/arm/mach-mvebu/coherency.c b/arch/arm/mach-mvebu/coherency.c
85     index 3ee701f1d38e..ea26ebb5bb5a 100644
86     --- a/arch/arm/mach-mvebu/coherency.c
87     +++ b/arch/arm/mach-mvebu/coherency.c
88     @@ -137,6 +137,20 @@ static struct notifier_block mvebu_hwcc_platform_nb = {
89     .notifier_call = mvebu_hwcc_platform_notifier,
90     };
91    
92     +/*
93     + * Keep track of whether we have IO hardware coherency enabled or not.
94     + * On Armada 370's we will not be using it for example. We need to make
95     + * that available [through coherency_available()] so the mbus controller
96     + * doesn't enable the IO coherency bit in the attribute bits of the
97     + * chip selects.
98     + */
99     +static int coherency_enabled;
100     +
101     +int coherency_available(void)
102     +{
103     + return coherency_enabled;
104     +}
105     +
106     int __init coherency_init(void)
107     {
108     struct device_node *np;
109     @@ -170,6 +184,7 @@ int __init coherency_init(void)
110     coherency_base = of_iomap(np, 0);
111     coherency_cpu_base = of_iomap(np, 1);
112     set_cpu_coherent(cpu_logical_map(smp_processor_id()), 0);
113     + coherency_enabled = 1;
114     bus_register_notifier(&platform_bus_type,
115     &mvebu_hwcc_platform_nb);
116     }
117     diff --git a/arch/arm/mach-mvebu/coherency.h b/arch/arm/mach-mvebu/coherency.h
118     index 2f428137f6fe..1501a4e5eea0 100644
119     --- a/arch/arm/mach-mvebu/coherency.h
120     +++ b/arch/arm/mach-mvebu/coherency.h
121     @@ -19,6 +19,7 @@ int coherency_get_cpu_count(void);
122     #endif
123    
124     int set_cpu_coherent(int cpu_id, int smp_group_id);
125     +int coherency_available(void);
126     int coherency_init(void);
127    
128     #endif /* __MACH_370_XP_COHERENCY_H */
129     diff --git a/arch/arm/mach-orion5x/common.c b/arch/arm/mach-orion5x/common.c
130     index f8a6db9239bf..048773926ad4 100644
131     --- a/arch/arm/mach-orion5x/common.c
132     +++ b/arch/arm/mach-orion5x/common.c
133     @@ -213,7 +213,7 @@ void __init orion5x_init_early(void)
134     mbus_soc_name = NULL;
135     mvebu_mbus_init(mbus_soc_name, ORION5X_BRIDGE_WINS_BASE,
136     ORION5X_BRIDGE_WINS_SZ,
137     - ORION5X_DDR_WINS_BASE, ORION5X_DDR_WINS_SZ);
138     + ORION5X_DDR_WINS_BASE, ORION5X_DDR_WINS_SZ, 0);
139     }
140    
141     void orion5x_setup_wins(void)
142     diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
143     index 4e5b80d883c8..105ae30a176b 100644
144     --- a/arch/x86/Kconfig
145     +++ b/arch/x86/Kconfig
146     @@ -154,7 +154,7 @@ config SBUS
147    
148     config NEED_DMA_MAP_STATE
149     def_bool y
150     - depends on X86_64 || INTEL_IOMMU || DMA_API_DEBUG
151     + depends on X86_64 || INTEL_IOMMU || DMA_API_DEBUG || SWIOTLB
152    
153     config NEED_SG_DMA_LENGTH
154     def_bool y
155     diff --git a/arch/x86/kernel/microcode_intel_early.c b/arch/x86/kernel/microcode_intel_early.c
156     index 2e9e12871c2b..a883942aee44 100644
157     --- a/arch/x86/kernel/microcode_intel_early.c
158     +++ b/arch/x86/kernel/microcode_intel_early.c
159     @@ -321,7 +321,7 @@ get_matching_model_microcode(int cpu, unsigned long start,
160     unsigned int mc_saved_count = mc_saved_data->mc_saved_count;
161     int i;
162    
163     - while (leftover) {
164     + while (leftover && mc_saved_count < ARRAY_SIZE(mc_saved_tmp)) {
165     mc_header = (struct microcode_header_intel *)ucode_ptr;
166    
167     mc_size = get_totalsize(mc_header);
168     diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
169     index 8bf40a243d75..224d2ef754cc 100644
170     --- a/arch/x86/kvm/svm.c
171     +++ b/arch/x86/kvm/svm.c
172     @@ -495,8 +495,10 @@ static void skip_emulated_instruction(struct kvm_vcpu *vcpu)
173     {
174     struct vcpu_svm *svm = to_svm(vcpu);
175    
176     - if (svm->vmcb->control.next_rip != 0)
177     + if (svm->vmcb->control.next_rip != 0) {
178     + WARN_ON(!static_cpu_has(X86_FEATURE_NRIPS));
179     svm->next_rip = svm->vmcb->control.next_rip;
180     + }
181    
182     if (!svm->next_rip) {
183     if (emulate_instruction(vcpu, EMULTYPE_SKIP) !=
184     @@ -4229,7 +4231,9 @@ static int svm_check_intercept(struct kvm_vcpu *vcpu,
185     break;
186     }
187    
188     - vmcb->control.next_rip = info->next_rip;
189     + /* TODO: Advertise NRIPS to guest hypervisor unconditionally */
190     + if (static_cpu_has(X86_FEATURE_NRIPS))
191     + vmcb->control.next_rip = info->next_rip;
192     vmcb->control.exit_code = icpt_info.exit_code;
193     vmexit = nested_svm_exit_handled(svm);
194    
195     diff --git a/drivers/acpi/acpica/acmacros.h b/drivers/acpi/acpica/acmacros.h
196     index 53666bd9193d..32b0bf32364a 100644
197     --- a/drivers/acpi/acpica/acmacros.h
198     +++ b/drivers/acpi/acpica/acmacros.h
199     @@ -63,19 +63,15 @@
200     #define ACPI_SET64(ptr, val) (*ACPI_CAST64 (ptr) = (u64) (val))
201    
202     /*
203     - * printf() format helpers
204     + * printf() format helper. This macros is a workaround for the difficulties
205     + * with emitting 64-bit integers and 64-bit pointers with the same code
206     + * for both 32-bit and 64-bit hosts.
207     */
208    
209     /* Split 64-bit integer into two 32-bit values. Use with %8.8X%8.8X */
210    
211     #define ACPI_FORMAT_UINT64(i) ACPI_HIDWORD(i), ACPI_LODWORD(i)
212    
213     -#if ACPI_MACHINE_WIDTH == 64
214     -#define ACPI_FORMAT_NATIVE_UINT(i) ACPI_FORMAT_UINT64(i)
215     -#else
216     -#define ACPI_FORMAT_NATIVE_UINT(i) 0, (i)
217     -#endif
218     -
219     /*
220     * Macros for moving data around to/from buffers that are possibly unaligned.
221     * If the hardware supports the transfer of unaligned data, just do the store.
222     diff --git a/drivers/acpi/acpica/dsopcode.c b/drivers/acpi/acpica/dsopcode.c
223     index e9b13b92ba1e..46a37aeaedae 100644
224     --- a/drivers/acpi/acpica/dsopcode.c
225     +++ b/drivers/acpi/acpica/dsopcode.c
226     @@ -446,7 +446,7 @@ acpi_ds_eval_region_operands(struct acpi_walk_state *walk_state,
227    
228     ACPI_DEBUG_PRINT((ACPI_DB_EXEC, "RgnObj %p Addr %8.8X%8.8X Len %X\n",
229     obj_desc,
230     - ACPI_FORMAT_NATIVE_UINT(obj_desc->region.address),
231     + ACPI_FORMAT_UINT64(obj_desc->region.address),
232     obj_desc->region.length));
233    
234     /* Now the address and length are valid for this opregion */
235     @@ -544,7 +544,7 @@ acpi_ds_eval_table_region_operands(struct acpi_walk_state *walk_state,
236    
237     ACPI_DEBUG_PRINT((ACPI_DB_EXEC, "RgnObj %p Addr %8.8X%8.8X Len %X\n",
238     obj_desc,
239     - ACPI_FORMAT_NATIVE_UINT(obj_desc->region.address),
240     + ACPI_FORMAT_UINT64(obj_desc->region.address),
241     obj_desc->region.length));
242    
243     /* Now the address and length are valid for this opregion */
244     diff --git a/drivers/acpi/acpica/evregion.c b/drivers/acpi/acpica/evregion.c
245     index 8fab9262d98a..ad698893e829 100644
246     --- a/drivers/acpi/acpica/evregion.c
247     +++ b/drivers/acpi/acpica/evregion.c
248     @@ -276,7 +276,7 @@ acpi_ev_address_space_dispatch(union acpi_operand_object *region_obj,
249     ACPI_DEBUG_PRINT((ACPI_DB_OPREGION,
250     "Handler %p (@%p) Address %8.8X%8.8X [%s]\n",
251     &region_obj->region.handler->address_space, handler,
252     - ACPI_FORMAT_NATIVE_UINT(address),
253     + ACPI_FORMAT_UINT64(address),
254     acpi_ut_get_region_name(region_obj->region.
255     space_id)));
256    
257     diff --git a/drivers/acpi/acpica/exdump.c b/drivers/acpi/acpica/exdump.c
258     index e5a3c249f7fa..7e6a56fe1d6e 100644
259     --- a/drivers/acpi/acpica/exdump.c
260     +++ b/drivers/acpi/acpica/exdump.c
261     @@ -621,8 +621,8 @@ void acpi_ex_dump_operand(union acpi_operand_object *obj_desc, u32 depth)
262     acpi_os_printf("\n");
263     } else {
264     acpi_os_printf(" base %8.8X%8.8X Length %X\n",
265     - ACPI_FORMAT_NATIVE_UINT(obj_desc->region.
266     - address),
267     + ACPI_FORMAT_UINT64(obj_desc->region.
268     + address),
269     obj_desc->region.length);
270     }
271     break;
272     diff --git a/drivers/acpi/acpica/exfldio.c b/drivers/acpi/acpica/exfldio.c
273     index c84ee956fa4c..dc210c379277 100644
274     --- a/drivers/acpi/acpica/exfldio.c
275     +++ b/drivers/acpi/acpica/exfldio.c
276     @@ -269,17 +269,15 @@ acpi_ex_access_region(union acpi_operand_object *obj_desc,
277     }
278    
279     ACPI_DEBUG_PRINT_RAW((ACPI_DB_BFIELD,
280     - " Region [%s:%X], Width %X, ByteBase %X, Offset %X at %p\n",
281     + " Region [%s:%X], Width %X, ByteBase %X, Offset %X at %8.8X%8.8X\n",
282     acpi_ut_get_region_name(rgn_desc->region.
283     space_id),
284     rgn_desc->region.space_id,
285     obj_desc->common_field.access_byte_width,
286     obj_desc->common_field.base_byte_offset,
287     - field_datum_byte_offset, ACPI_CAST_PTR(void,
288     - (rgn_desc->
289     - region.
290     - address +
291     - region_offset))));
292     + field_datum_byte_offset,
293     + ACPI_FORMAT_UINT64(rgn_desc->region.address +
294     + region_offset)));
295    
296     /* Invoke the appropriate address_space/op_region handler */
297    
298     diff --git a/drivers/acpi/acpica/exregion.c b/drivers/acpi/acpica/exregion.c
299     index 182abaf045e1..e90c59d35a16 100644
300     --- a/drivers/acpi/acpica/exregion.c
301     +++ b/drivers/acpi/acpica/exregion.c
302     @@ -176,7 +176,7 @@ acpi_ex_system_memory_space_handler(u32 function,
303     if (!mem_info->mapped_logical_address) {
304     ACPI_ERROR((AE_INFO,
305     "Could not map memory at 0x%8.8X%8.8X, size %u",
306     - ACPI_FORMAT_NATIVE_UINT(address),
307     + ACPI_FORMAT_UINT64(address),
308     (u32) map_length));
309     mem_info->mapped_length = 0;
310     return_ACPI_STATUS(AE_NO_MEMORY);
311     @@ -197,8 +197,7 @@ acpi_ex_system_memory_space_handler(u32 function,
312    
313     ACPI_DEBUG_PRINT((ACPI_DB_INFO,
314     "System-Memory (width %u) R/W %u Address=%8.8X%8.8X\n",
315     - bit_width, function,
316     - ACPI_FORMAT_NATIVE_UINT(address)));
317     + bit_width, function, ACPI_FORMAT_UINT64(address)));
318    
319     /*
320     * Perform the memory read or write
321     @@ -300,8 +299,7 @@ acpi_ex_system_io_space_handler(u32 function,
322    
323     ACPI_DEBUG_PRINT((ACPI_DB_INFO,
324     "System-IO (width %u) R/W %u Address=%8.8X%8.8X\n",
325     - bit_width, function,
326     - ACPI_FORMAT_NATIVE_UINT(address)));
327     + bit_width, function, ACPI_FORMAT_UINT64(address)));
328    
329     /* Decode the function parameter */
330    
331     diff --git a/drivers/acpi/acpica/hwvalid.c b/drivers/acpi/acpica/hwvalid.c
332     index eab70d58852a..fae57584a182 100644
333     --- a/drivers/acpi/acpica/hwvalid.c
334     +++ b/drivers/acpi/acpica/hwvalid.c
335     @@ -142,17 +142,17 @@ acpi_hw_validate_io_request(acpi_io_address address, u32 bit_width)
336     byte_width = ACPI_DIV_8(bit_width);
337     last_address = address + byte_width - 1;
338    
339     - ACPI_DEBUG_PRINT((ACPI_DB_IO, "Address %p LastAddress %p Length %X",
340     - ACPI_CAST_PTR(void, address), ACPI_CAST_PTR(void,
341     - last_address),
342     - byte_width));
343     + ACPI_DEBUG_PRINT((ACPI_DB_IO,
344     + "Address %8.8X%8.8X LastAddress %8.8X%8.8X Length %X",
345     + ACPI_FORMAT_UINT64(address),
346     + ACPI_FORMAT_UINT64(last_address), byte_width));
347    
348     /* Maximum 16-bit address in I/O space */
349    
350     if (last_address > ACPI_UINT16_MAX) {
351     ACPI_ERROR((AE_INFO,
352     - "Illegal I/O port address/length above 64K: %p/0x%X",
353     - ACPI_CAST_PTR(void, address), byte_width));
354     + "Illegal I/O port address/length above 64K: %8.8X%8.8X/0x%X",
355     + ACPI_FORMAT_UINT64(address), byte_width));
356     return_ACPI_STATUS(AE_LIMIT);
357     }
358    
359     @@ -181,8 +181,8 @@ acpi_hw_validate_io_request(acpi_io_address address, u32 bit_width)
360    
361     if (acpi_gbl_osi_data >= port_info->osi_dependency) {
362     ACPI_DEBUG_PRINT((ACPI_DB_IO,
363     - "Denied AML access to port 0x%p/%X (%s 0x%.4X-0x%.4X)",
364     - ACPI_CAST_PTR(void, address),
365     + "Denied AML access to port 0x%8.8X%8.8X/%X (%s 0x%.4X-0x%.4X)",
366     + ACPI_FORMAT_UINT64(address),
367     byte_width, port_info->name,
368     port_info->start,
369     port_info->end));
370     diff --git a/drivers/acpi/acpica/nsdump.c b/drivers/acpi/acpica/nsdump.c
371     index ce6e97326205..20ae5b9bb9f2 100644
372     --- a/drivers/acpi/acpica/nsdump.c
373     +++ b/drivers/acpi/acpica/nsdump.c
374     @@ -258,12 +258,11 @@ acpi_ns_dump_one_object(acpi_handle obj_handle,
375     switch (type) {
376     case ACPI_TYPE_PROCESSOR:
377    
378     - acpi_os_printf("ID %02X Len %02X Addr %p\n",
379     + acpi_os_printf("ID %02X Len %02X Addr %8.8X%8.8X\n",
380     obj_desc->processor.proc_id,
381     obj_desc->processor.length,
382     - ACPI_CAST_PTR(void,
383     - obj_desc->processor.
384     - address));
385     + ACPI_FORMAT_UINT64(obj_desc->processor.
386     + address));
387     break;
388    
389     case ACPI_TYPE_DEVICE:
390     @@ -334,8 +333,9 @@ acpi_ns_dump_one_object(acpi_handle obj_handle,
391     space_id));
392     if (obj_desc->region.flags & AOPOBJ_DATA_VALID) {
393     acpi_os_printf(" Addr %8.8X%8.8X Len %.4X\n",
394     - ACPI_FORMAT_NATIVE_UINT
395     - (obj_desc->region.address),
396     + ACPI_FORMAT_UINT64(obj_desc->
397     + region.
398     + address),
399     obj_desc->region.length);
400     } else {
401     acpi_os_printf
402     diff --git a/drivers/acpi/acpica/tbutils.c b/drivers/acpi/acpica/tbutils.c
403     index ce3d5db39a9c..5c67b2840c58 100644
404     --- a/drivers/acpi/acpica/tbutils.c
405     +++ b/drivers/acpi/acpica/tbutils.c
406     @@ -246,16 +246,12 @@ acpi_tb_print_table_header(acpi_physical_address address,
407     {
408     struct acpi_table_header local_header;
409    
410     - /*
411     - * The reason that the Address is cast to a void pointer is so that we
412     - * can use %p which will work properly on both 32-bit and 64-bit hosts.
413     - */
414     if (ACPI_COMPARE_NAME(header->signature, ACPI_SIG_FACS)) {
415    
416     /* FACS only has signature and length fields */
417    
418     - ACPI_INFO((AE_INFO, "%4.4s %p %05X",
419     - header->signature, ACPI_CAST_PTR(void, address),
420     + ACPI_INFO((AE_INFO, "%4.4s 0x%8.8X%8.8X %05X",
421     + header->signature, ACPI_FORMAT_UINT64(address),
422     header->length));
423     } else if (ACPI_COMPARE_NAME(header->signature, ACPI_SIG_RSDP)) {
424    
425     @@ -266,8 +262,8 @@ acpi_tb_print_table_header(acpi_physical_address address,
426     header)->oem_id, ACPI_OEM_ID_SIZE);
427     acpi_tb_fix_string(local_header.oem_id, ACPI_OEM_ID_SIZE);
428    
429     - ACPI_INFO((AE_INFO, "RSDP %p %05X (v%.2d %6.6s)",
430     - ACPI_CAST_PTR (void, address),
431     + ACPI_INFO((AE_INFO, "RSDP 0x%8.8X%8.8X %05X (v%.2d %6.6s)",
432     + ACPI_FORMAT_UINT64(address),
433     (ACPI_CAST_PTR(struct acpi_table_rsdp, header)->
434     revision >
435     0) ? ACPI_CAST_PTR(struct acpi_table_rsdp,
436     @@ -281,8 +277,8 @@ acpi_tb_print_table_header(acpi_physical_address address,
437     acpi_tb_cleanup_table_header(&local_header, header);
438    
439     ACPI_INFO((AE_INFO,
440     - "%4.4s %p %05X (v%.2d %6.6s %8.8s %08X %4.4s %08X)",
441     - local_header.signature, ACPI_CAST_PTR(void, address),
442     + "%-4.4s 0x%8.8X%8.8X %05X (v%.2d %-6.6s %-8.8s %08X %-4.4s %08X)",
443     + local_header.signature, ACPI_FORMAT_UINT64(address),
444     local_header.length, local_header.revision,
445     local_header.oem_id, local_header.oem_table_id,
446     local_header.oem_revision,
447     @@ -474,8 +470,8 @@ acpi_tb_install_table(acpi_physical_address address,
448     table = acpi_os_map_memory(address, sizeof(struct acpi_table_header));
449     if (!table) {
450     ACPI_ERROR((AE_INFO,
451     - "Could not map memory for table [%s] at %p",
452     - signature, ACPI_CAST_PTR(void, address)));
453     + "Could not map memory for table [%s] at %8.8X%8.8X",
454     + signature, ACPI_FORMAT_UINT64(address)));
455     return;
456     }
457    
458     diff --git a/drivers/acpi/acpica/utaddress.c b/drivers/acpi/acpica/utaddress.c
459     index e0a2e2779c2e..3c7770d75773 100644
460     --- a/drivers/acpi/acpica/utaddress.c
461     +++ b/drivers/acpi/acpica/utaddress.c
462     @@ -107,10 +107,10 @@ acpi_ut_add_address_range(acpi_adr_space_type space_id,
463     acpi_gbl_address_range_list[space_id] = range_info;
464    
465     ACPI_DEBUG_PRINT((ACPI_DB_NAMES,
466     - "\nAdded [%4.4s] address range: 0x%p-0x%p\n",
467     + "\nAdded [%4.4s] address range: 0x%8.8X%8.8X-0x%8.8X%8.8X\n",
468     acpi_ut_get_node_name(range_info->region_node),
469     - ACPI_CAST_PTR(void, address),
470     - ACPI_CAST_PTR(void, range_info->end_address)));
471     + ACPI_FORMAT_UINT64(address),
472     + ACPI_FORMAT_UINT64(range_info->end_address)));
473    
474     (void)acpi_ut_release_mutex(ACPI_MTX_NAMESPACE);
475     return_ACPI_STATUS(AE_OK);
476     @@ -160,15 +160,13 @@ acpi_ut_remove_address_range(acpi_adr_space_type space_id,
477     }
478    
479     ACPI_DEBUG_PRINT((ACPI_DB_NAMES,
480     - "\nRemoved [%4.4s] address range: 0x%p-0x%p\n",
481     + "\nRemoved [%4.4s] address range: 0x%8.8X%8.8X-0x%8.8X%8.8X\n",
482     acpi_ut_get_node_name(range_info->
483     region_node),
484     - ACPI_CAST_PTR(void,
485     - range_info->
486     - start_address),
487     - ACPI_CAST_PTR(void,
488     - range_info->
489     - end_address)));
490     + ACPI_FORMAT_UINT64(range_info->
491     + start_address),
492     + ACPI_FORMAT_UINT64(range_info->
493     + end_address)));
494    
495     ACPI_FREE(range_info);
496     return_VOID;
497     @@ -244,9 +242,9 @@ acpi_ut_check_address_range(acpi_adr_space_type space_id,
498     region_node);
499    
500     ACPI_WARNING((AE_INFO,
501     - "0x%p-0x%p %s conflicts with Region %s %d",
502     - ACPI_CAST_PTR(void, address),
503     - ACPI_CAST_PTR(void, end_address),
504     + "0x%8.8X%8.8X-0x%8.8X%8.8X %s conflicts with Region %s %d",
505     + ACPI_FORMAT_UINT64(address),
506     + ACPI_FORMAT_UINT64(end_address),
507     acpi_ut_get_region_name(space_id),
508     pathname, overlap_count));
509     ACPI_FREE(pathname);
510     diff --git a/drivers/bus/mvebu-mbus.c b/drivers/bus/mvebu-mbus.c
511     index 711dcf4a0313..7c437826c2f9 100644
512     --- a/drivers/bus/mvebu-mbus.c
513     +++ b/drivers/bus/mvebu-mbus.c
514     @@ -838,7 +838,7 @@ fs_initcall(mvebu_mbus_debugfs_init);
515     int __init mvebu_mbus_init(const char *soc, phys_addr_t mbuswins_phys_base,
516     size_t mbuswins_size,
517     phys_addr_t sdramwins_phys_base,
518     - size_t sdramwins_size)
519     + size_t sdramwins_size, int is_coherent)
520     {
521     struct mvebu_mbus_state *mbus = &mbus_state;
522     const struct of_device_id *of_id;
523     @@ -865,8 +865,7 @@ int __init mvebu_mbus_init(const char *soc, phys_addr_t mbuswins_phys_base,
524     return -ENOMEM;
525     }
526    
527     - if (of_find_compatible_node(NULL, NULL, "marvell,coherency-fabric"))
528     - mbus->hw_io_coherency = 1;
529     + mbus->hw_io_coherency = is_coherent;
530    
531     for (win = 0; win < mbus->soc->num_wins; win++)
532     mvebu_mbus_disable_window(mbus, win);
533     diff --git a/drivers/edac/sb_edac.c b/drivers/edac/sb_edac.c
534     index f505e4ca6d58..3bdefbfb4377 100644
535     --- a/drivers/edac/sb_edac.c
536     +++ b/drivers/edac/sb_edac.c
537     @@ -623,7 +623,7 @@ static void get_memory_layout(const struct mem_ctl_info *mci)
538     u32 reg;
539     u64 limit, prv = 0;
540     u64 tmp_mb;
541     - u32 mb, kb;
542     + u32 gb, mb;
543     u32 rir_way;
544    
545     /*
546     @@ -636,8 +636,9 @@ static void get_memory_layout(const struct mem_ctl_info *mci)
547     pvt->tolm = GET_TOLM(reg);
548     tmp_mb = (1 + pvt->tolm) >> 20;
549    
550     - mb = div_u64_rem(tmp_mb, 1000, &kb);
551     - edac_dbg(0, "TOLM: %u.%03u GB (0x%016Lx)\n", mb, kb, (u64)pvt->tolm);
552     + gb = div_u64_rem(tmp_mb, 1024, &mb);
553     + edac_dbg(0, "TOLM: %u.%03u GB (0x%016Lx)\n",
554     + gb, (mb*1000)/1024, (u64)pvt->tolm);
555    
556     /* Address range is already 45:25 */
557     pci_read_config_dword(pvt->pci_sad1, TOHM,
558     @@ -645,8 +646,9 @@ static void get_memory_layout(const struct mem_ctl_info *mci)
559     pvt->tohm = GET_TOHM(reg);
560     tmp_mb = (1 + pvt->tohm) >> 20;
561    
562     - mb = div_u64_rem(tmp_mb, 1000, &kb);
563     - edac_dbg(0, "TOHM: %u.%03u GB (0x%016Lx)\n", mb, kb, (u64)pvt->tohm);
564     + gb = div_u64_rem(tmp_mb, 1024, &mb);
565     + edac_dbg(0, "TOHM: %u.%03u GB (0x%016Lx)\n",
566     + gb, (mb*1000)/1024, (u64)pvt->tohm);
567    
568     /*
569     * Step 2) Get SAD range and SAD Interleave list
570     @@ -668,11 +670,11 @@ static void get_memory_layout(const struct mem_ctl_info *mci)
571     break;
572    
573     tmp_mb = (limit + 1) >> 20;
574     - mb = div_u64_rem(tmp_mb, 1000, &kb);
575     + gb = div_u64_rem(tmp_mb, 1024, &mb);
576     edac_dbg(0, "SAD#%d %s up to %u.%03u GB (0x%016Lx) Interleave: %s reg=0x%08x\n",
577     n_sads,
578     get_dram_attr(reg),
579     - mb, kb,
580     + gb, (mb*1000)/1024,
581     ((u64)tmp_mb) << 20L,
582     INTERLEAVE_MODE(reg) ? "8:6" : "[8:6]XOR[18:16]",
583     reg);
584     @@ -702,9 +704,9 @@ static void get_memory_layout(const struct mem_ctl_info *mci)
585     break;
586     tmp_mb = (limit + 1) >> 20;
587    
588     - mb = div_u64_rem(tmp_mb, 1000, &kb);
589     + gb = div_u64_rem(tmp_mb, 1024, &mb);
590     edac_dbg(0, "TAD#%d: up to %u.%03u GB (0x%016Lx), socket interleave %d, memory interleave %d, TGT: %d, %d, %d, %d, reg=0x%08x\n",
591     - n_tads, mb, kb,
592     + n_tads, gb, (mb*1000)/1024,
593     ((u64)tmp_mb) << 20L,
594     (u32)TAD_SOCK(reg),
595     (u32)TAD_CH(reg),
596     @@ -727,10 +729,10 @@ static void get_memory_layout(const struct mem_ctl_info *mci)
597     tad_ch_nilv_offset[j],
598     &reg);
599     tmp_mb = TAD_OFFSET(reg) >> 20;
600     - mb = div_u64_rem(tmp_mb, 1000, &kb);
601     + gb = div_u64_rem(tmp_mb, 1024, &mb);
602     edac_dbg(0, "TAD CH#%d, offset #%d: %u.%03u GB (0x%016Lx), reg=0x%08x\n",
603     i, j,
604     - mb, kb,
605     + gb, (mb*1000)/1024,
606     ((u64)tmp_mb) << 20L,
607     reg);
608     }
609     @@ -752,10 +754,10 @@ static void get_memory_layout(const struct mem_ctl_info *mci)
610    
611     tmp_mb = RIR_LIMIT(reg) >> 20;
612     rir_way = 1 << RIR_WAY(reg);
613     - mb = div_u64_rem(tmp_mb, 1000, &kb);
614     + gb = div_u64_rem(tmp_mb, 1024, &mb);
615     edac_dbg(0, "CH#%d RIR#%d, limit: %u.%03u GB (0x%016Lx), way: %d, reg=0x%08x\n",
616     i, j,
617     - mb, kb,
618     + gb, (mb*1000)/1024,
619     ((u64)tmp_mb) << 20L,
620     rir_way,
621     reg);
622     @@ -766,10 +768,10 @@ static void get_memory_layout(const struct mem_ctl_info *mci)
623     &reg);
624     tmp_mb = RIR_OFFSET(reg) << 6;
625    
626     - mb = div_u64_rem(tmp_mb, 1000, &kb);
627     + gb = div_u64_rem(tmp_mb, 1024, &mb);
628     edac_dbg(0, "CH#%d RIR#%d INTL#%d, offset %u.%03u GB (0x%016Lx), tgt: %d, reg=0x%08x\n",
629     i, j, k,
630     - mb, kb,
631     + gb, (mb*1000)/1024,
632     ((u64)tmp_mb) << 20L,
633     (u32)RIR_RNK_TGT(reg),
634     reg);
635     @@ -806,7 +808,7 @@ static int get_memory_error_data(struct mem_ctl_info *mci,
636     u8 ch_way,sck_way;
637     u32 tad_offset;
638     u32 rir_way;
639     - u32 mb, kb;
640     + u32 mb, gb;
641     u64 ch_addr, offset, limit, prv = 0;
642    
643    
644     @@ -1022,10 +1024,10 @@ static int get_memory_error_data(struct mem_ctl_info *mci,
645     continue;
646    
647     limit = RIR_LIMIT(reg);
648     - mb = div_u64_rem(limit >> 20, 1000, &kb);
649     + gb = div_u64_rem(limit >> 20, 1024, &mb);
650     edac_dbg(0, "RIR#%d, limit: %u.%03u GB (0x%016Lx), way: %d\n",
651     n_rir,
652     - mb, kb,
653     + gb, (mb*1000)/1024,
654     limit,
655     1 << RIR_WAY(reg));
656     if (ch_addr <= limit)
657     diff --git a/drivers/scsi/hpsa.c b/drivers/scsi/hpsa.c
658     index 62ed744bbe06..a6cdf17e27dc 100644
659     --- a/drivers/scsi/hpsa.c
660     +++ b/drivers/scsi/hpsa.c
661     @@ -3898,10 +3898,6 @@ static int hpsa_kdump_hard_reset_controller(struct pci_dev *pdev)
662    
663     /* Save the PCI command register */
664     pci_read_config_word(pdev, 4, &command_register);
665     - /* Turn the board off. This is so that later pci_restore_state()
666     - * won't turn the board on before the rest of config space is ready.
667     - */
668     - pci_disable_device(pdev);
669     pci_save_state(pdev);
670    
671     /* find the first memory BAR, so we can find the cfg table */
672     @@ -3949,11 +3945,6 @@ static int hpsa_kdump_hard_reset_controller(struct pci_dev *pdev)
673     goto unmap_cfgtable;
674    
675     pci_restore_state(pdev);
676     - rc = pci_enable_device(pdev);
677     - if (rc) {
678     - dev_warn(&pdev->dev, "failed to enable device.\n");
679     - goto unmap_cfgtable;
680     - }
681     pci_write_config_word(pdev, 4, command_register);
682    
683     /* Some devices (notably the HP Smart Array 5i Controller)
684     @@ -4448,6 +4439,23 @@ static int hpsa_init_reset_devices(struct pci_dev *pdev)
685     if (!reset_devices)
686     return 0;
687    
688     + /* kdump kernel is loading, we don't know in which state is
689     + * the pci interface. The dev->enable_cnt is equal zero
690     + * so we call enable+disable, wait a while and switch it on.
691     + */
692     + rc = pci_enable_device(pdev);
693     + if (rc) {
694     + dev_warn(&pdev->dev, "Failed to enable PCI device\n");
695     + return -ENODEV;
696     + }
697     + pci_disable_device(pdev);
698     + msleep(260); /* a randomly chosen number */
699     + rc = pci_enable_device(pdev);
700     + if (rc) {
701     + dev_warn(&pdev->dev, "failed to enable device.\n");
702     + return -ENODEV;
703     + }
704     + pci_set_master(pdev);
705     /* Reset the controller with a PCI power-cycle or via doorbell */
706     rc = hpsa_kdump_hard_reset_controller(pdev);
707    
708     @@ -4456,10 +4464,11 @@ static int hpsa_init_reset_devices(struct pci_dev *pdev)
709     * "performant mode". Or, it might be 640x, which can't reset
710     * due to concerns about shared bbwc between 6402/6404 pair.
711     */
712     - if (rc == -ENOTSUPP)
713     - return rc; /* just try to do the kdump anyhow. */
714     - if (rc)
715     - return -ENODEV;
716     + if (rc) {
717     + if (rc != -ENOTSUPP) /* just try to do the kdump anyhow. */
718     + rc = -ENODEV;
719     + goto out_disable;
720     + }
721    
722     /* Now try to get the controller to respond to a no-op */
723     dev_warn(&pdev->dev, "Waiting for controller to respond to no-op\n");
724     @@ -4470,7 +4479,11 @@ static int hpsa_init_reset_devices(struct pci_dev *pdev)
725     dev_warn(&pdev->dev, "no-op failed%s\n",
726     (i < 11 ? "; re-trying" : ""));
727     }
728     - return 0;
729     +
730     +out_disable:
731     +
732     + pci_disable_device(pdev);
733     + return rc;
734     }
735    
736     static int hpsa_allocate_cmd_pool(struct ctlr_info *h)
737     @@ -4613,6 +4626,7 @@ static void hpsa_undo_allocations_after_kdump_soft_reset(struct ctlr_info *h)
738     iounmap(h->transtable);
739     if (h->cfgtable)
740     iounmap(h->cfgtable);
741     + pci_disable_device(h->pdev);
742     pci_release_regions(h->pdev);
743     kfree(h);
744     }
745     diff --git a/fs/btrfs/ctree.c b/fs/btrfs/ctree.c
746     index 7fb054ba1b60..82f14a1da542 100644
747     --- a/fs/btrfs/ctree.c
748     +++ b/fs/btrfs/ctree.c
749     @@ -2769,7 +2769,7 @@ done:
750     */
751     if (!p->leave_spinning)
752     btrfs_set_path_blocking(p);
753     - if (ret < 0)
754     + if (ret < 0 && !p->skip_release_on_error)
755     btrfs_release_path(p);
756     return ret;
757     }
758     diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h
759     index d6dd49b51ba8..c19444e412be 100644
760     --- a/fs/btrfs/ctree.h
761     +++ b/fs/btrfs/ctree.h
762     @@ -586,6 +586,7 @@ struct btrfs_path {
763     unsigned int skip_locking:1;
764     unsigned int leave_spinning:1;
765     unsigned int search_commit_root:1;
766     + unsigned int skip_release_on_error:1;
767     };
768    
769     /*
770     @@ -3406,6 +3407,10 @@ struct btrfs_dir_item *btrfs_lookup_xattr(struct btrfs_trans_handle *trans,
771     int verify_dir_item(struct btrfs_root *root,
772     struct extent_buffer *leaf,
773     struct btrfs_dir_item *dir_item);
774     +struct btrfs_dir_item *btrfs_match_dir_item_name(struct btrfs_root *root,
775     + struct btrfs_path *path,
776     + const char *name,
777     + int name_len);
778    
779     /* orphan.c */
780     int btrfs_insert_orphan_item(struct btrfs_trans_handle *trans,
781     diff --git a/fs/btrfs/dir-item.c b/fs/btrfs/dir-item.c
782     index 79e594e341c7..6f61b9b1526f 100644
783     --- a/fs/btrfs/dir-item.c
784     +++ b/fs/btrfs/dir-item.c
785     @@ -21,10 +21,6 @@
786     #include "hash.h"
787     #include "transaction.h"
788    
789     -static struct btrfs_dir_item *btrfs_match_dir_item_name(struct btrfs_root *root,
790     - struct btrfs_path *path,
791     - const char *name, int name_len);
792     -
793     /*
794     * insert a name into a directory, doing overflow properly if there is a hash
795     * collision. data_size indicates how big the item inserted should be. On
796     @@ -383,9 +379,9 @@ struct btrfs_dir_item *btrfs_lookup_xattr(struct btrfs_trans_handle *trans,
797     * this walks through all the entries in a dir item and finds one
798     * for a specific name.
799     */
800     -static struct btrfs_dir_item *btrfs_match_dir_item_name(struct btrfs_root *root,
801     - struct btrfs_path *path,
802     - const char *name, int name_len)
803     +struct btrfs_dir_item *btrfs_match_dir_item_name(struct btrfs_root *root,
804     + struct btrfs_path *path,
805     + const char *name, int name_len)
806     {
807     struct btrfs_dir_item *dir_item;
808     unsigned long name_ptr;
809     diff --git a/fs/btrfs/xattr.c b/fs/btrfs/xattr.c
810     index 05740b9789e4..9cf20d63cc99 100644
811     --- a/fs/btrfs/xattr.c
812     +++ b/fs/btrfs/xattr.c
813     @@ -27,6 +27,7 @@
814     #include "transaction.h"
815     #include "xattr.h"
816     #include "disk-io.h"
817     +#include "locking.h"
818    
819    
820     ssize_t __btrfs_getxattr(struct inode *inode, const char *name,
821     @@ -89,7 +90,7 @@ static int do_setxattr(struct btrfs_trans_handle *trans,
822     struct inode *inode, const char *name,
823     const void *value, size_t size, int flags)
824     {
825     - struct btrfs_dir_item *di;
826     + struct btrfs_dir_item *di = NULL;
827     struct btrfs_root *root = BTRFS_I(inode)->root;
828     struct btrfs_path *path;
829     size_t name_len = strlen(name);
830     @@ -101,84 +102,128 @@ static int do_setxattr(struct btrfs_trans_handle *trans,
831     path = btrfs_alloc_path();
832     if (!path)
833     return -ENOMEM;
834     + path->skip_release_on_error = 1;
835     +
836     + if (!value) {
837     + di = btrfs_lookup_xattr(trans, root, path, btrfs_ino(inode),
838     + name, name_len, -1);
839     + if (!di && (flags & XATTR_REPLACE))
840     + ret = -ENODATA;
841     + else if (di)
842     + ret = btrfs_delete_one_dir_name(trans, root, path, di);
843     + goto out;
844     + }
845    
846     + /*
847     + * For a replace we can't just do the insert blindly.
848     + * Do a lookup first (read-only btrfs_search_slot), and return if xattr
849     + * doesn't exist. If it exists, fall down below to the insert/replace
850     + * path - we can't race with a concurrent xattr delete, because the VFS
851     + * locks the inode's i_mutex before calling setxattr or removexattr.
852     + */
853     if (flags & XATTR_REPLACE) {
854     - di = btrfs_lookup_xattr(trans, root, path, btrfs_ino(inode), name,
855     - name_len, -1);
856     - if (IS_ERR(di)) {
857     - ret = PTR_ERR(di);
858     - goto out;
859     - } else if (!di) {
860     + if(!mutex_is_locked(&inode->i_mutex)) {
861     + pr_err("BTRFS: assertion failed: %s, file: %s, line: %d",
862     + "mutex_is_locked(&inode->i_mutex)", __FILE__,
863     + __LINE__);
864     + BUG();
865     + }
866     + di = btrfs_lookup_xattr(NULL, root, path, btrfs_ino(inode),
867     + name, name_len, 0);
868     + if (!di) {
869     ret = -ENODATA;
870     goto out;
871     }
872     - ret = btrfs_delete_one_dir_name(trans, root, path, di);
873     - if (ret)
874     - goto out;
875     btrfs_release_path(path);
876     + di = NULL;
877     + }
878    
879     + ret = btrfs_insert_xattr_item(trans, root, path, btrfs_ino(inode),
880     + name, name_len, value, size);
881     + if (ret == -EOVERFLOW) {
882     /*
883     - * remove the attribute
884     + * We have an existing item in a leaf, split_leaf couldn't
885     + * expand it. That item might have or not a dir_item that
886     + * matches our target xattr, so lets check.
887     */
888     - if (!value)
889     - goto out;
890     - } else {
891     - di = btrfs_lookup_xattr(NULL, root, path, btrfs_ino(inode),
892     - name, name_len, 0);
893     - if (IS_ERR(di)) {
894     - ret = PTR_ERR(di);
895     + ret = 0;
896     + btrfs_assert_tree_locked(path->nodes[0]);
897     + di = btrfs_match_dir_item_name(root, path, name, name_len);
898     + if (!di && !(flags & XATTR_REPLACE)) {
899     + ret = -ENOSPC;
900     goto out;
901     }
902     - if (!di && !value)
903     - goto out;
904     - btrfs_release_path(path);
905     + } else if (ret == -EEXIST) {
906     + ret = 0;
907     + di = btrfs_match_dir_item_name(root, path, name, name_len);
908     + if(!di) { /* logic error */
909     + pr_err("BTRFS: assertion failed: %s, file: %s, line: %d",
910     + "di", __FILE__, __LINE__);
911     + BUG();
912     + }
913     + } else if (ret) {
914     + goto out;
915     }
916    
917     -again:
918     - ret = btrfs_insert_xattr_item(trans, root, path, btrfs_ino(inode),
919     - name, name_len, value, size);
920     - /*
921     - * If we're setting an xattr to a new value but the new value is say
922     - * exactly BTRFS_MAX_XATTR_SIZE, we could end up with EOVERFLOW getting
923     - * back from split_leaf. This is because it thinks we'll be extending
924     - * the existing item size, but we're asking for enough space to add the
925     - * item itself. So if we get EOVERFLOW just set ret to EEXIST and let
926     - * the rest of the function figure it out.
927     - */
928     - if (ret == -EOVERFLOW)
929     + if (di && (flags & XATTR_CREATE)) {
930     ret = -EEXIST;
931     + goto out;
932     + }
933    
934     - if (ret == -EEXIST) {
935     - if (flags & XATTR_CREATE)
936     - goto out;
937     + if (di) {
938     /*
939     - * We can't use the path we already have since we won't have the
940     - * proper locking for a delete, so release the path and
941     - * re-lookup to delete the thing.
942     + * We're doing a replace, and it must be atomic, that is, at
943     + * any point in time we have either the old or the new xattr
944     + * value in the tree. We don't want readers (getxattr and
945     + * listxattrs) to miss a value, this is specially important
946     + * for ACLs.
947     */
948     - btrfs_release_path(path);
949     - di = btrfs_lookup_xattr(trans, root, path, btrfs_ino(inode),
950     - name, name_len, -1);
951     - if (IS_ERR(di)) {
952     - ret = PTR_ERR(di);
953     - goto out;
954     - } else if (!di) {
955     - /* Shouldn't happen but just in case... */
956     - btrfs_release_path(path);
957     - goto again;
958     + const int slot = path->slots[0];
959     + struct extent_buffer *leaf = path->nodes[0];
960     + const u16 old_data_len = btrfs_dir_data_len(leaf, di);
961     + const u32 item_size = btrfs_item_size_nr(leaf, slot);
962     + const u32 data_size = sizeof(*di) + name_len + size;
963     + struct btrfs_item *item;
964     + unsigned long data_ptr;
965     + char *ptr;
966     +
967     + if (size > old_data_len) {
968     + if (btrfs_leaf_free_space(root, leaf) <
969     + (size - old_data_len)) {
970     + ret = -ENOSPC;
971     + goto out;
972     + }
973     }
974    
975     - ret = btrfs_delete_one_dir_name(trans, root, path, di);
976     - if (ret)
977     - goto out;
978     + if (old_data_len + name_len + sizeof(*di) == item_size) {
979     + /* No other xattrs packed in the same leaf item. */
980     + if (size > old_data_len)
981     + btrfs_extend_item(root, path,
982     + size - old_data_len);
983     + else if (size < old_data_len)
984     + btrfs_truncate_item(root, path, data_size, 1);
985     + } else {
986     + /* There are other xattrs packed in the same item. */
987     + ret = btrfs_delete_one_dir_name(trans, root, path, di);
988     + if (ret)
989     + goto out;
990     + btrfs_extend_item(root, path, data_size);
991     + }
992    
993     + item = btrfs_item_nr(NULL, slot);
994     + ptr = btrfs_item_ptr(leaf, slot, char);
995     + ptr += btrfs_item_size(leaf, item) - data_size;
996     + di = (struct btrfs_dir_item *)ptr;
997     + btrfs_set_dir_data_len(leaf, di, size);
998     + data_ptr = ((unsigned long)(di + 1)) + name_len;
999     + write_extent_buffer(leaf, value, data_ptr, size);
1000     + btrfs_mark_buffer_dirty(leaf);
1001     + } else {
1002     /*
1003     - * We have a value to set, so go back and try to insert it now.
1004     + * Insert, and we had space for the xattr, so path->slots[0] is
1005     + * where our xattr dir_item is and btrfs_insert_xattr_item()
1006     + * filled it.
1007     */
1008     - if (value) {
1009     - btrfs_release_path(path);
1010     - goto again;
1011     - }
1012     }
1013     out:
1014     btrfs_free_path(path);
1015     diff --git a/fs/dcache.c b/fs/dcache.c
1016     index e2800926ae05..38c4a302fab4 100644
1017     --- a/fs/dcache.c
1018     +++ b/fs/dcache.c
1019     @@ -1053,13 +1053,13 @@ ascend:
1020     /* might go back up the wrong parent if we have had a rename. */
1021     if (!locked && read_seqretry(&rename_lock, seq))
1022     goto rename_retry;
1023     - next = child->d_child.next;
1024     - while (unlikely(child->d_flags & DCACHE_DENTRY_KILLED)) {
1025     + /* go into the first sibling still alive */
1026     + do {
1027     + next = child->d_child.next;
1028     if (next == &this_parent->d_subdirs)
1029     goto ascend;
1030     child = list_entry(next, struct dentry, d_child);
1031     - next = next->next;
1032     - }
1033     + } while (unlikely(child->d_flags & DCACHE_DENTRY_KILLED));
1034     rcu_read_unlock();
1035     goto resume;
1036     }
1037     @@ -2977,13 +2977,13 @@ ascend:
1038     /* might go back up the wrong parent if we have had a rename. */
1039     if (!locked && read_seqretry(&rename_lock, seq))
1040     goto rename_retry;
1041     - next = child->d_child.next;
1042     - while (unlikely(child->d_flags & DCACHE_DENTRY_KILLED)) {
1043     + /* go into the first sibling still alive */
1044     + do {
1045     + next = child->d_child.next;
1046     if (next == &this_parent->d_subdirs)
1047     goto ascend;
1048     child = list_entry(next, struct dentry, d_child);
1049     - next = next->next;
1050     - }
1051     + } while (unlikely(child->d_flags & DCACHE_DENTRY_KILLED));
1052     rcu_read_unlock();
1053     goto resume;
1054     }
1055     diff --git a/fs/exec.c b/fs/exec.c
1056     index dd6aa61c8548..acbd7ac2deda 100644
1057     --- a/fs/exec.c
1058     +++ b/fs/exec.c
1059     @@ -1265,6 +1265,53 @@ static int check_unsafe_exec(struct linux_binprm *bprm)
1060     return res;
1061     }
1062    
1063     +static void bprm_fill_uid(struct linux_binprm *bprm)
1064     +{
1065     + struct inode *inode;
1066     + unsigned int mode;
1067     + kuid_t uid;
1068     + kgid_t gid;
1069     +
1070     + /* clear any previous set[ug]id data from a previous binary */
1071     + bprm->cred->euid = current_euid();
1072     + bprm->cred->egid = current_egid();
1073     +
1074     + if (bprm->file->f_path.mnt->mnt_flags & MNT_NOSUID)
1075     + return;
1076     +
1077     + if (current->no_new_privs)
1078     + return;
1079     +
1080     + inode = file_inode(bprm->file);
1081     + mode = ACCESS_ONCE(inode->i_mode);
1082     + if (!(mode & (S_ISUID|S_ISGID)))
1083     + return;
1084     +
1085     + /* Be careful if suid/sgid is set */
1086     + mutex_lock(&inode->i_mutex);
1087     +
1088     + /* reload atomically mode/uid/gid now that lock held */
1089     + mode = inode->i_mode;
1090     + uid = inode->i_uid;
1091     + gid = inode->i_gid;
1092     + mutex_unlock(&inode->i_mutex);
1093     +
1094     + /* We ignore suid/sgid if there are no mappings for them in the ns */
1095     + if (!kuid_has_mapping(bprm->cred->user_ns, uid) ||
1096     + !kgid_has_mapping(bprm->cred->user_ns, gid))
1097     + return;
1098     +
1099     + if (mode & S_ISUID) {
1100     + bprm->per_clear |= PER_CLEAR_ON_SETID;
1101     + bprm->cred->euid = uid;
1102     + }
1103     +
1104     + if ((mode & (S_ISGID | S_IXGRP)) == (S_ISGID | S_IXGRP)) {
1105     + bprm->per_clear |= PER_CLEAR_ON_SETID;
1106     + bprm->cred->egid = gid;
1107     + }
1108     +}
1109     +
1110     /*
1111     * Fill the binprm structure from the inode.
1112     * Check permissions, then read the first 128 (BINPRM_BUF_SIZE) bytes
1113     @@ -1273,39 +1320,12 @@ static int check_unsafe_exec(struct linux_binprm *bprm)
1114     */
1115     int prepare_binprm(struct linux_binprm *bprm)
1116     {
1117     - umode_t mode;
1118     - struct inode * inode = file_inode(bprm->file);
1119     int retval;
1120    
1121     - mode = inode->i_mode;
1122     if (bprm->file->f_op == NULL)
1123     return -EACCES;
1124    
1125     - /* clear any previous set[ug]id data from a previous binary */
1126     - bprm->cred->euid = current_euid();
1127     - bprm->cred->egid = current_egid();
1128     -
1129     - if (!(bprm->file->f_path.mnt->mnt_flags & MNT_NOSUID) &&
1130     - !current->no_new_privs &&
1131     - kuid_has_mapping(bprm->cred->user_ns, inode->i_uid) &&
1132     - kgid_has_mapping(bprm->cred->user_ns, inode->i_gid)) {
1133     - /* Set-uid? */
1134     - if (mode & S_ISUID) {
1135     - bprm->per_clear |= PER_CLEAR_ON_SETID;
1136     - bprm->cred->euid = inode->i_uid;
1137     - }
1138     -
1139     - /* Set-gid? */
1140     - /*
1141     - * If setgid is set but no group execute bit then this
1142     - * is a candidate for mandatory locking, not a setgid
1143     - * executable.
1144     - */
1145     - if ((mode & (S_ISGID | S_IXGRP)) == (S_ISGID | S_IXGRP)) {
1146     - bprm->per_clear |= PER_CLEAR_ON_SETID;
1147     - bprm->cred->egid = inode->i_gid;
1148     - }
1149     - }
1150     + bprm_fill_uid(bprm);
1151    
1152     /* fill in binprm security blob */
1153     retval = security_bprm_set_creds(bprm);
1154     diff --git a/fs/file_table.c b/fs/file_table.c
1155     index 54a34be444f9..28f02a7cbba1 100644
1156     --- a/fs/file_table.c
1157     +++ b/fs/file_table.c
1158     @@ -36,8 +36,6 @@ struct files_stat_struct files_stat = {
1159     .max_files = NR_FILE
1160     };
1161    
1162     -DEFINE_STATIC_LGLOCK(files_lglock);
1163     -
1164     /* SLAB cache for file structures */
1165     static struct kmem_cache *filp_cachep __read_mostly;
1166    
1167     @@ -134,7 +132,6 @@ struct file *get_empty_filp(void)
1168     return ERR_PTR(error);
1169     }
1170    
1171     - INIT_LIST_HEAD(&f->f_u.fu_list);
1172     atomic_long_set(&f->f_count, 1);
1173     rwlock_init(&f->f_owner.lock);
1174     spin_lock_init(&f->f_lock);
1175     @@ -265,18 +262,15 @@ static void __fput(struct file *file)
1176     mntput(mnt);
1177     }
1178    
1179     -static DEFINE_SPINLOCK(delayed_fput_lock);
1180     -static LIST_HEAD(delayed_fput_list);
1181     +static LLIST_HEAD(delayed_fput_list);
1182     static void delayed_fput(struct work_struct *unused)
1183     {
1184     - LIST_HEAD(head);
1185     - spin_lock_irq(&delayed_fput_lock);
1186     - list_splice_init(&delayed_fput_list, &head);
1187     - spin_unlock_irq(&delayed_fput_lock);
1188     - while (!list_empty(&head)) {
1189     - struct file *f = list_first_entry(&head, struct file, f_u.fu_list);
1190     - list_del_init(&f->f_u.fu_list);
1191     - __fput(f);
1192     + struct llist_node *node = llist_del_all(&delayed_fput_list);
1193     + struct llist_node *next;
1194     +
1195     + for (; node; node = next) {
1196     + next = llist_next(node);
1197     + __fput(llist_entry(node, struct file, f_u.fu_llist));
1198     }
1199     }
1200    
1201     @@ -306,18 +300,15 @@ void fput(struct file *file)
1202     {
1203     if (atomic_long_dec_and_test(&file->f_count)) {
1204     struct task_struct *task = current;
1205     - unsigned long flags;
1206    
1207     - file_sb_list_del(file);
1208     if (likely(!in_interrupt() && !(task->flags & PF_KTHREAD))) {
1209     init_task_work(&file->f_u.fu_rcuhead, ____fput);
1210     if (!task_work_add(task, &file->f_u.fu_rcuhead, true))
1211     return;
1212     }
1213     - spin_lock_irqsave(&delayed_fput_lock, flags);
1214     - list_add(&file->f_u.fu_list, &delayed_fput_list);
1215     - schedule_work(&delayed_fput_work);
1216     - spin_unlock_irqrestore(&delayed_fput_lock, flags);
1217     +
1218     + if (llist_add(&file->f_u.fu_llist, &delayed_fput_list))
1219     + schedule_work(&delayed_fput_work);
1220     }
1221     }
1222    
1223     @@ -333,7 +324,6 @@ void __fput_sync(struct file *file)
1224     {
1225     if (atomic_long_dec_and_test(&file->f_count)) {
1226     struct task_struct *task = current;
1227     - file_sb_list_del(file);
1228     BUG_ON(!(task->flags & PF_KTHREAD));
1229     __fput(file);
1230     }
1231     @@ -345,127 +335,10 @@ void put_filp(struct file *file)
1232     {
1233     if (atomic_long_dec_and_test(&file->f_count)) {
1234     security_file_free(file);
1235     - file_sb_list_del(file);
1236     file_free(file);
1237     }
1238     }
1239    
1240     -static inline int file_list_cpu(struct file *file)
1241     -{
1242     -#ifdef CONFIG_SMP
1243     - return file->f_sb_list_cpu;
1244     -#else
1245     - return smp_processor_id();
1246     -#endif
1247     -}
1248     -
1249     -/* helper for file_sb_list_add to reduce ifdefs */
1250     -static inline void __file_sb_list_add(struct file *file, struct super_block *sb)
1251     -{
1252     - struct list_head *list;
1253     -#ifdef CONFIG_SMP
1254     - int cpu;
1255     - cpu = smp_processor_id();
1256     - file->f_sb_list_cpu = cpu;
1257     - list = per_cpu_ptr(sb->s_files, cpu);
1258     -#else
1259     - list = &sb->s_files;
1260     -#endif
1261     - list_add(&file->f_u.fu_list, list);
1262     -}
1263     -
1264     -/**
1265     - * file_sb_list_add - add a file to the sb's file list
1266     - * @file: file to add
1267     - * @sb: sb to add it to
1268     - *
1269     - * Use this function to associate a file with the superblock of the inode it
1270     - * refers to.
1271     - */
1272     -void file_sb_list_add(struct file *file, struct super_block *sb)
1273     -{
1274     - lg_local_lock(&files_lglock);
1275     - __file_sb_list_add(file, sb);
1276     - lg_local_unlock(&files_lglock);
1277     -}
1278     -
1279     -/**
1280     - * file_sb_list_del - remove a file from the sb's file list
1281     - * @file: file to remove
1282     - * @sb: sb to remove it from
1283     - *
1284     - * Use this function to remove a file from its superblock.
1285     - */
1286     -void file_sb_list_del(struct file *file)
1287     -{
1288     - if (!list_empty(&file->f_u.fu_list)) {
1289     - lg_local_lock_cpu(&files_lglock, file_list_cpu(file));
1290     - list_del_init(&file->f_u.fu_list);
1291     - lg_local_unlock_cpu(&files_lglock, file_list_cpu(file));
1292     - }
1293     -}
1294     -
1295     -#ifdef CONFIG_SMP
1296     -
1297     -/*
1298     - * These macros iterate all files on all CPUs for a given superblock.
1299     - * files_lglock must be held globally.
1300     - */
1301     -#define do_file_list_for_each_entry(__sb, __file) \
1302     -{ \
1303     - int i; \
1304     - for_each_possible_cpu(i) { \
1305     - struct list_head *list; \
1306     - list = per_cpu_ptr((__sb)->s_files, i); \
1307     - list_for_each_entry((__file), list, f_u.fu_list)
1308     -
1309     -#define while_file_list_for_each_entry \
1310     - } \
1311     -}
1312     -
1313     -#else
1314     -
1315     -#define do_file_list_for_each_entry(__sb, __file) \
1316     -{ \
1317     - struct list_head *list; \
1318     - list = &(sb)->s_files; \
1319     - list_for_each_entry((__file), list, f_u.fu_list)
1320     -
1321     -#define while_file_list_for_each_entry \
1322     -}
1323     -
1324     -#endif
1325     -
1326     -/**
1327     - * mark_files_ro - mark all files read-only
1328     - * @sb: superblock in question
1329     - *
1330     - * All files are marked read-only. We don't care about pending
1331     - * delete files so this should be used in 'force' mode only.
1332     - */
1333     -void mark_files_ro(struct super_block *sb)
1334     -{
1335     - struct file *f;
1336     -
1337     - lg_global_lock(&files_lglock);
1338     - do_file_list_for_each_entry(sb, f) {
1339     - if (!S_ISREG(file_inode(f)->i_mode))
1340     - continue;
1341     - if (!file_count(f))
1342     - continue;
1343     - if (!(f->f_mode & FMODE_WRITE))
1344     - continue;
1345     - spin_lock(&f->f_lock);
1346     - f->f_mode &= ~FMODE_WRITE;
1347     - spin_unlock(&f->f_lock);
1348     - if (file_check_writeable(f) != 0)
1349     - continue;
1350     - __mnt_drop_write(f->f_path.mnt);
1351     - file_release_write(f);
1352     - } while_file_list_for_each_entry;
1353     - lg_global_unlock(&files_lglock);
1354     -}
1355     -
1356     void __init files_init(unsigned long mempages)
1357     {
1358     unsigned long n;
1359     @@ -481,6 +354,5 @@ void __init files_init(unsigned long mempages)
1360     n = (mempages * (PAGE_SIZE / 1024)) / 10;
1361     files_stat.max_files = max_t(unsigned long, n, NR_FILE);
1362     files_defer_init();
1363     - lg_lock_init(&files_lglock, "files_lglock");
1364     percpu_counter_init(&nr_files, 0);
1365     }
1366     diff --git a/fs/internal.h b/fs/internal.h
1367     index 68121584ae37..2ffa65a36ca0 100644
1368     --- a/fs/internal.h
1369     +++ b/fs/internal.h
1370     @@ -74,9 +74,6 @@ extern void chroot_fs_refs(const struct path *, const struct path *);
1371     /*
1372     * file_table.c
1373     */
1374     -extern void file_sb_list_add(struct file *f, struct super_block *sb);
1375     -extern void file_sb_list_del(struct file *f);
1376     -extern void mark_files_ro(struct super_block *);
1377     extern struct file *get_empty_filp(void);
1378    
1379     /*
1380     diff --git a/fs/open.c b/fs/open.c
1381     index 86092bde31f4..5f129683b7d7 100644
1382     --- a/fs/open.c
1383     +++ b/fs/open.c
1384     @@ -674,7 +674,6 @@ static int do_dentry_open(struct file *f,
1385     }
1386    
1387     f->f_mapping = inode->i_mapping;
1388     - file_sb_list_add(f, inode->i_sb);
1389    
1390     if (unlikely(f->f_mode & FMODE_PATH)) {
1391     f->f_op = &empty_fops;
1392     @@ -709,7 +708,6 @@ static int do_dentry_open(struct file *f,
1393    
1394     cleanup_all:
1395     fops_put(f->f_op);
1396     - file_sb_list_del(f);
1397     if (f->f_mode & FMODE_WRITE) {
1398     if (!special_file(inode->i_mode)) {
1399     /*
1400     diff --git a/fs/super.c b/fs/super.c
1401     index e028b508db25..97280e76179c 100644
1402     --- a/fs/super.c
1403     +++ b/fs/super.c
1404     @@ -163,19 +163,6 @@ static struct super_block *alloc_super(struct file_system_type *type, int flags)
1405     s = NULL;
1406     goto out;
1407     }
1408     -#ifdef CONFIG_SMP
1409     - s->s_files = alloc_percpu(struct list_head);
1410     - if (!s->s_files)
1411     - goto err_out;
1412     - else {
1413     - int i;
1414     -
1415     - for_each_possible_cpu(i)
1416     - INIT_LIST_HEAD(per_cpu_ptr(s->s_files, i));
1417     - }
1418     -#else
1419     - INIT_LIST_HEAD(&s->s_files);
1420     -#endif
1421     if (init_sb_writers(s, type))
1422     goto err_out;
1423     s->s_flags = flags;
1424     @@ -225,10 +212,6 @@ out:
1425     return s;
1426     err_out:
1427     security_sb_free(s);
1428     -#ifdef CONFIG_SMP
1429     - if (s->s_files)
1430     - free_percpu(s->s_files);
1431     -#endif
1432     destroy_sb_writers(s);
1433     kfree(s);
1434     s = NULL;
1435     @@ -243,9 +226,6 @@ err_out:
1436     */
1437     static inline void destroy_super(struct super_block *s)
1438     {
1439     -#ifdef CONFIG_SMP
1440     - free_percpu(s->s_files);
1441     -#endif
1442     destroy_sb_writers(s);
1443     security_sb_free(s);
1444     WARN_ON(!list_empty(&s->s_mounts));
1445     @@ -727,7 +707,8 @@ int do_remount_sb(struct super_block *sb, int flags, void *data, int force)
1446     make sure there are no rw files opened */
1447     if (remount_ro) {
1448     if (force) {
1449     - mark_files_ro(sb);
1450     + sb->s_readonly_remount = 1;
1451     + smp_wmb();
1452     } else {
1453     retval = sb_prepare_remount_readonly(sb);
1454     if (retval)
1455     diff --git a/include/linux/fs.h b/include/linux/fs.h
1456     index d57bc5df7225..5c9dc8471da5 100644
1457     --- a/include/linux/fs.h
1458     +++ b/include/linux/fs.h
1459     @@ -10,6 +10,7 @@
1460     #include <linux/stat.h>
1461     #include <linux/cache.h>
1462     #include <linux/list.h>
1463     +#include <linux/llist.h>
1464     #include <linux/radix-tree.h>
1465     #include <linux/rbtree.h>
1466     #include <linux/init.h>
1467     @@ -761,12 +762,8 @@ static inline int ra_has_index(struct file_ra_state *ra, pgoff_t index)
1468     #define FILE_MNT_WRITE_RELEASED 2
1469    
1470     struct file {
1471     - /*
1472     - * fu_list becomes invalid after file_free is called and queued via
1473     - * fu_rcuhead for RCU freeing
1474     - */
1475     union {
1476     - struct list_head fu_list;
1477     + struct llist_node fu_llist;
1478     struct rcu_head fu_rcuhead;
1479     } f_u;
1480     struct path f_path;
1481     @@ -779,9 +776,6 @@ struct file {
1482     * Must not be taken from IRQ context.
1483     */
1484     spinlock_t f_lock;
1485     -#ifdef CONFIG_SMP
1486     - int f_sb_list_cpu;
1487     -#endif
1488     atomic_long_t f_count;
1489     unsigned int f_flags;
1490     fmode_t f_mode;
1491     @@ -1257,11 +1251,6 @@ struct super_block {
1492    
1493     struct list_head s_inodes; /* all inodes */
1494     struct hlist_bl_head s_anon; /* anonymous dentries for (nfs) exporting */
1495     -#ifdef CONFIG_SMP
1496     - struct list_head __percpu *s_files;
1497     -#else
1498     - struct list_head s_files;
1499     -#endif
1500     struct list_head s_mounts; /* list of mounts; _not_ for fs use */
1501     /* s_dentry_lru, s_nr_dentry_unused protected by dcache.c lru locks */
1502     struct list_head s_dentry_lru; /* unused dentry lru */
1503     diff --git a/include/linux/mbus.h b/include/linux/mbus.h
1504     index dba482e31a13..e80b9c7ec8da 100644
1505     --- a/include/linux/mbus.h
1506     +++ b/include/linux/mbus.h
1507     @@ -67,6 +67,6 @@ int mvebu_mbus_add_window(const char *devname, phys_addr_t base,
1508     int mvebu_mbus_del_window(phys_addr_t base, size_t size);
1509     int mvebu_mbus_init(const char *soc, phys_addr_t mbus_phys_base,
1510     size_t mbus_size, phys_addr_t sdram_phys_base,
1511     - size_t sdram_size);
1512     + size_t sdram_size, int is_coherent);
1513    
1514     #endif /* __LINUX_MBUS_H */
1515     diff --git a/include/linux/sched.h b/include/linux/sched.h
1516     index 00c1d4f45072..7cf305d036db 100644
1517     --- a/include/linux/sched.h
1518     +++ b/include/linux/sched.h
1519     @@ -2203,15 +2203,15 @@ static inline bool thread_group_leader(struct task_struct *p)
1520     * all we care about is that we have a task with the appropriate
1521     * pid, we don't actually care if we have the right task.
1522     */
1523     -static inline int has_group_leader_pid(struct task_struct *p)
1524     +static inline bool has_group_leader_pid(struct task_struct *p)
1525     {
1526     - return p->pid == p->tgid;
1527     + return task_pid(p) == p->signal->leader_pid;
1528     }
1529    
1530     static inline
1531     -int same_thread_group(struct task_struct *p1, struct task_struct *p2)
1532     +bool same_thread_group(struct task_struct *p1, struct task_struct *p2)
1533     {
1534     - return p1->tgid == p2->tgid;
1535     + return p1->signal == p2->signal;
1536     }
1537    
1538     static inline struct task_struct *next_thread(const struct task_struct *p)
1539     diff --git a/include/net/ip6_fib.h b/include/net/ip6_fib.h
1540     index 665e0cee59bd..5e661a979694 100644
1541     --- a/include/net/ip6_fib.h
1542     +++ b/include/net/ip6_fib.h
1543     @@ -301,7 +301,7 @@ extern void inet6_rt_notify(int event, struct rt6_info *rt,
1544     struct nl_info *info);
1545    
1546     extern void fib6_run_gc(unsigned long expires,
1547     - struct net *net);
1548     + struct net *net, bool force);
1549    
1550     extern void fib6_gc_cleanup(void);
1551    
1552     diff --git a/kernel/ptrace.c b/kernel/ptrace.c
1553     index 118323bc8529..30ab20623bca 100644
1554     --- a/kernel/ptrace.c
1555     +++ b/kernel/ptrace.c
1556     @@ -236,7 +236,7 @@ static int __ptrace_may_access(struct task_struct *task, unsigned int mode)
1557     */
1558     int dumpable = 0;
1559     /* Don't let security modules deny introspection */
1560     - if (task == current)
1561     + if (same_thread_group(task, current))
1562     return 0;
1563     rcu_read_lock();
1564     tcred = __task_cred(task);
1565     diff --git a/net/ipv4/xfrm4_policy.c b/net/ipv4/xfrm4_policy.c
1566     index 9a459be24af7..9b5b5ddf8cd4 100644
1567     --- a/net/ipv4/xfrm4_policy.c
1568     +++ b/net/ipv4/xfrm4_policy.c
1569     @@ -235,7 +235,7 @@ static struct dst_ops xfrm4_dst_ops = {
1570     .destroy = xfrm4_dst_destroy,
1571     .ifdown = xfrm4_dst_ifdown,
1572     .local_out = __ip_local_out,
1573     - .gc_thresh = 1024,
1574     + .gc_thresh = 32768,
1575     };
1576    
1577     static struct xfrm_policy_afinfo xfrm4_policy_afinfo = {
1578     diff --git a/net/ipv6/ip6_fib.c b/net/ipv6/ip6_fib.c
1579     index ceeb9458bb60..46458ee31939 100644
1580     --- a/net/ipv6/ip6_fib.c
1581     +++ b/net/ipv6/ip6_fib.c
1582     @@ -1648,27 +1648,28 @@ static int fib6_age(struct rt6_info *rt, void *arg)
1583    
1584     static DEFINE_SPINLOCK(fib6_gc_lock);
1585    
1586     -void fib6_run_gc(unsigned long expires, struct net *net)
1587     +void fib6_run_gc(unsigned long expires, struct net *net, bool force)
1588     {
1589     - if (expires != ~0UL) {
1590     + unsigned long now;
1591     +
1592     + if (force) {
1593     spin_lock_bh(&fib6_gc_lock);
1594     - gc_args.timeout = expires ? (int)expires :
1595     - net->ipv6.sysctl.ip6_rt_gc_interval;
1596     - } else {
1597     - if (!spin_trylock_bh(&fib6_gc_lock)) {
1598     - mod_timer(&net->ipv6.ip6_fib_timer, jiffies + HZ);
1599     - return;
1600     - }
1601     - gc_args.timeout = net->ipv6.sysctl.ip6_rt_gc_interval;
1602     + } else if (!spin_trylock_bh(&fib6_gc_lock)) {
1603     + mod_timer(&net->ipv6.ip6_fib_timer, jiffies + HZ);
1604     + return;
1605     }
1606     + gc_args.timeout = expires ? (int)expires :
1607     + net->ipv6.sysctl.ip6_rt_gc_interval;
1608    
1609     gc_args.more = icmp6_dst_gc();
1610    
1611     fib6_clean_all(net, fib6_age, 0, NULL);
1612     + now = jiffies;
1613     + net->ipv6.ip6_rt_last_gc = now;
1614    
1615     if (gc_args.more)
1616     mod_timer(&net->ipv6.ip6_fib_timer,
1617     - round_jiffies(jiffies
1618     + round_jiffies(now
1619     + net->ipv6.sysctl.ip6_rt_gc_interval));
1620     else
1621     del_timer(&net->ipv6.ip6_fib_timer);
1622     @@ -1677,7 +1678,7 @@ void fib6_run_gc(unsigned long expires, struct net *net)
1623    
1624     static void fib6_gc_timer_cb(unsigned long arg)
1625     {
1626     - fib6_run_gc(0, (struct net *)arg);
1627     + fib6_run_gc(0, (struct net *)arg, true);
1628     }
1629    
1630     static int __net_init fib6_net_init(struct net *net)
1631     diff --git a/net/ipv6/ndisc.c b/net/ipv6/ndisc.c
1632     index 05f361338c2e..deedf7ddbc6e 100644
1633     --- a/net/ipv6/ndisc.c
1634     +++ b/net/ipv6/ndisc.c
1635     @@ -1584,7 +1584,7 @@ static int ndisc_netdev_event(struct notifier_block *this, unsigned long event,
1636     switch (event) {
1637     case NETDEV_CHANGEADDR:
1638     neigh_changeaddr(&nd_tbl, dev);
1639     - fib6_run_gc(~0UL, net);
1640     + fib6_run_gc(0, net, false);
1641     idev = in6_dev_get(dev);
1642     if (!idev)
1643     break;
1644     @@ -1594,7 +1594,7 @@ static int ndisc_netdev_event(struct notifier_block *this, unsigned long event,
1645     break;
1646     case NETDEV_DOWN:
1647     neigh_ifdown(&nd_tbl, dev);
1648     - fib6_run_gc(~0UL, net);
1649     + fib6_run_gc(0, net, false);
1650     break;
1651     case NETDEV_NOTIFY_PEERS:
1652     ndisc_send_unsol_na(dev);
1653     diff --git a/net/ipv6/route.c b/net/ipv6/route.c
1654     index d94d224f7e68..6ebefd46f718 100644
1655     --- a/net/ipv6/route.c
1656     +++ b/net/ipv6/route.c
1657     @@ -1334,7 +1334,6 @@ static void icmp6_clean_all(int (*func)(struct rt6_info *rt, void *arg),
1658    
1659     static int ip6_dst_gc(struct dst_ops *ops)
1660     {
1661     - unsigned long now = jiffies;
1662     struct net *net = container_of(ops, struct net, ipv6.ip6_dst_ops);
1663     int rt_min_interval = net->ipv6.sysctl.ip6_rt_gc_min_interval;
1664     int rt_max_size = net->ipv6.sysctl.ip6_rt_max_size;
1665     @@ -1344,13 +1343,12 @@ static int ip6_dst_gc(struct dst_ops *ops)
1666     int entries;
1667    
1668     entries = dst_entries_get_fast(ops);
1669     - if (time_after(rt_last_gc + rt_min_interval, now) &&
1670     + if (time_after(rt_last_gc + rt_min_interval, jiffies) &&
1671     entries <= rt_max_size)
1672     goto out;
1673    
1674     net->ipv6.ip6_rt_gc_expire++;
1675     - fib6_run_gc(net->ipv6.ip6_rt_gc_expire, net);
1676     - net->ipv6.ip6_rt_last_gc = now;
1677     + fib6_run_gc(net->ipv6.ip6_rt_gc_expire, net, entries > rt_max_size);
1678     entries = dst_entries_get_slow(ops);
1679     if (entries < ops->gc_thresh)
1680     net->ipv6.ip6_rt_gc_expire = rt_gc_timeout>>1;
1681     @@ -2849,7 +2847,7 @@ int ipv6_sysctl_rtcache_flush(ctl_table *ctl, int write,
1682     net = (struct net *)ctl->extra1;
1683     delay = net->ipv6.sysctl.flush_delay;
1684     proc_dointvec(ctl, write, buffer, lenp, ppos);
1685     - fib6_run_gc(delay <= 0 ? ~0UL : (unsigned long)delay, net);
1686     + fib6_run_gc(delay <= 0 ? 0 : (unsigned long)delay, net, delay > 0);
1687     return 0;
1688     }
1689    
1690     diff --git a/net/ipv6/xfrm6_policy.c b/net/ipv6/xfrm6_policy.c
1691     index 23ed03d786c8..1c2e0c9ba8a1 100644
1692     --- a/net/ipv6/xfrm6_policy.c
1693     +++ b/net/ipv6/xfrm6_policy.c
1694     @@ -284,7 +284,7 @@ static struct dst_ops xfrm6_dst_ops = {
1695     .destroy = xfrm6_dst_destroy,
1696     .ifdown = xfrm6_dst_ifdown,
1697     .local_out = __ip6_local_out,
1698     - .gc_thresh = 1024,
1699     + .gc_thresh = 32768,
1700     };
1701    
1702     static struct xfrm_policy_afinfo xfrm6_policy_afinfo = {
1703     diff --git a/net/netfilter/nfnetlink_cthelper.c b/net/netfilter/nfnetlink_cthelper.c
1704     index a191b6db657e..3b283edec027 100644
1705     --- a/net/netfilter/nfnetlink_cthelper.c
1706     +++ b/net/netfilter/nfnetlink_cthelper.c
1707     @@ -74,6 +74,9 @@ nfnl_cthelper_parse_tuple(struct nf_conntrack_tuple *tuple,
1708     if (!tb[NFCTH_TUPLE_L3PROTONUM] || !tb[NFCTH_TUPLE_L4PROTONUM])
1709     return -EINVAL;
1710    
1711     + /* Not all fields are initialized so first zero the tuple */
1712     + memset(tuple, 0, sizeof(struct nf_conntrack_tuple));
1713     +
1714     tuple->src.l3num = ntohs(nla_get_be16(tb[NFCTH_TUPLE_L3PROTONUM]));
1715     tuple->dst.protonum = nla_get_u8(tb[NFCTH_TUPLE_L4PROTONUM]);
1716    
1717     @@ -83,7 +86,7 @@ nfnl_cthelper_parse_tuple(struct nf_conntrack_tuple *tuple,
1718     static int
1719     nfnl_cthelper_from_nlattr(struct nlattr *attr, struct nf_conn *ct)
1720     {
1721     - const struct nf_conn_help *help = nfct_help(ct);
1722     + struct nf_conn_help *help = nfct_help(ct);
1723    
1724     if (attr == NULL)
1725     return -EINVAL;
1726     @@ -91,7 +94,7 @@ nfnl_cthelper_from_nlattr(struct nlattr *attr, struct nf_conn *ct)
1727     if (help->helper->data_len == 0)
1728     return -EINVAL;
1729    
1730     - memcpy(&help->data, nla_data(attr), help->helper->data_len);
1731     + memcpy(help->data, nla_data(attr), help->helper->data_len);
1732     return 0;
1733     }
1734