Contents of /trunk/kernel-alx-legacy/patches-4.9/0356-4.9.257-all-fixes.patch
Parent Directory | Revision Log
Revision 3658 -
(show annotations)
(download)
Mon Oct 24 14:07:36 2022 UTC (23 months ago) by niro
File size: 60677 byte(s)
Mon Oct 24 14:07:36 2022 UTC (23 months ago) by niro
File size: 60677 byte(s)
-linux-4.9.257
1 | diff --git a/Makefile b/Makefile |
2 | index 69af44d3dcd14..e53096154f816 100644 |
3 | --- a/Makefile |
4 | +++ b/Makefile |
5 | @@ -1,6 +1,6 @@ |
6 | VERSION = 4 |
7 | PATCHLEVEL = 9 |
8 | -SUBLEVEL = 256 |
9 | +SUBLEVEL = 257 |
10 | EXTRAVERSION = |
11 | NAME = Roaring Lionus |
12 | |
13 | @@ -841,12 +841,6 @@ KBUILD_CFLAGS += $(call cc-option,-Werror=designated-init) |
14 | # change __FILE__ to the relative path from the srctree |
15 | KBUILD_CFLAGS += $(call cc-option,-fmacro-prefix-map=$(srctree)/=) |
16 | |
17 | -# ensure -fcf-protection is disabled when using retpoline as it is |
18 | -# incompatible with -mindirect-branch=thunk-extern |
19 | -ifdef CONFIG_RETPOLINE |
20 | -KBUILD_CFLAGS += $(call cc-option,-fcf-protection=none) |
21 | -endif |
22 | - |
23 | # use the deterministic mode of AR if available |
24 | KBUILD_ARFLAGS := $(call ar-option,D) |
25 | |
26 | @@ -1141,7 +1135,7 @@ endef |
27 | |
28 | define filechk_version.h |
29 | (echo \#define LINUX_VERSION_CODE $(shell \ |
30 | - expr $(VERSION) \* 65536 + 0$(PATCHLEVEL) \* 256 + 0$(SUBLEVEL)); \ |
31 | + expr $(VERSION) \* 65536 + 0$(PATCHLEVEL) \* 256 + 255); \ |
32 | echo '#define KERNEL_VERSION(a,b,c) (((a) << 16) + ((b) << 8) + (c))';) |
33 | endef |
34 | |
35 | diff --git a/arch/arm/mach-footbridge/dc21285.c b/arch/arm/mach-footbridge/dc21285.c |
36 | index 96a3d73ef4bf4..fd6c9169fa78e 100644 |
37 | --- a/arch/arm/mach-footbridge/dc21285.c |
38 | +++ b/arch/arm/mach-footbridge/dc21285.c |
39 | @@ -69,15 +69,15 @@ dc21285_read_config(struct pci_bus *bus, unsigned int devfn, int where, |
40 | if (addr) |
41 | switch (size) { |
42 | case 1: |
43 | - asm("ldrb %0, [%1, %2]" |
44 | + asm volatile("ldrb %0, [%1, %2]" |
45 | : "=r" (v) : "r" (addr), "r" (where) : "cc"); |
46 | break; |
47 | case 2: |
48 | - asm("ldrh %0, [%1, %2]" |
49 | + asm volatile("ldrh %0, [%1, %2]" |
50 | : "=r" (v) : "r" (addr), "r" (where) : "cc"); |
51 | break; |
52 | case 4: |
53 | - asm("ldr %0, [%1, %2]" |
54 | + asm volatile("ldr %0, [%1, %2]" |
55 | : "=r" (v) : "r" (addr), "r" (where) : "cc"); |
56 | break; |
57 | } |
58 | @@ -103,17 +103,17 @@ dc21285_write_config(struct pci_bus *bus, unsigned int devfn, int where, |
59 | if (addr) |
60 | switch (size) { |
61 | case 1: |
62 | - asm("strb %0, [%1, %2]" |
63 | + asm volatile("strb %0, [%1, %2]" |
64 | : : "r" (value), "r" (addr), "r" (where) |
65 | : "cc"); |
66 | break; |
67 | case 2: |
68 | - asm("strh %0, [%1, %2]" |
69 | + asm volatile("strh %0, [%1, %2]" |
70 | : : "r" (value), "r" (addr), "r" (where) |
71 | : "cc"); |
72 | break; |
73 | case 4: |
74 | - asm("str %0, [%1, %2]" |
75 | + asm volatile("str %0, [%1, %2]" |
76 | : : "r" (value), "r" (addr), "r" (where) |
77 | : "cc"); |
78 | break; |
79 | diff --git a/arch/x86/Makefile b/arch/x86/Makefile |
80 | index 940ed27a62123..a95d414663b1e 100644 |
81 | --- a/arch/x86/Makefile |
82 | +++ b/arch/x86/Makefile |
83 | @@ -137,6 +137,9 @@ else |
84 | KBUILD_CFLAGS += -mno-red-zone |
85 | KBUILD_CFLAGS += -mcmodel=kernel |
86 | |
87 | + # Intel CET isn't enabled in the kernel |
88 | + KBUILD_CFLAGS += $(call cc-option,-fcf-protection=none) |
89 | + |
90 | # -funit-at-a-time shrinks the kernel .text considerably |
91 | # unfortunately it makes reading oopses harder. |
92 | KBUILD_CFLAGS += $(call cc-option,-funit-at-a-time) |
93 | diff --git a/arch/x86/include/asm/apic.h b/arch/x86/include/asm/apic.h |
94 | index f39fd349cef65..a6d034257b7bb 100644 |
95 | --- a/arch/x86/include/asm/apic.h |
96 | +++ b/arch/x86/include/asm/apic.h |
97 | @@ -176,16 +176,6 @@ static inline void lapic_update_tsc_freq(void) { } |
98 | #endif /* !CONFIG_X86_LOCAL_APIC */ |
99 | |
100 | #ifdef CONFIG_X86_X2APIC |
101 | -/* |
102 | - * Make previous memory operations globally visible before |
103 | - * sending the IPI through x2apic wrmsr. We need a serializing instruction or |
104 | - * mfence for this. |
105 | - */ |
106 | -static inline void x2apic_wrmsr_fence(void) |
107 | -{ |
108 | - asm volatile("mfence" : : : "memory"); |
109 | -} |
110 | - |
111 | static inline void native_apic_msr_write(u32 reg, u32 v) |
112 | { |
113 | if (reg == APIC_DFR || reg == APIC_ID || reg == APIC_LDR || |
114 | diff --git a/arch/x86/include/asm/barrier.h b/arch/x86/include/asm/barrier.h |
115 | index a0f450b21d676..89b75edf24af8 100644 |
116 | --- a/arch/x86/include/asm/barrier.h |
117 | +++ b/arch/x86/include/asm/barrier.h |
118 | @@ -110,4 +110,22 @@ do { \ |
119 | |
120 | #include <asm-generic/barrier.h> |
121 | |
122 | +/* |
123 | + * Make previous memory operations globally visible before |
124 | + * a WRMSR. |
125 | + * |
126 | + * MFENCE makes writes visible, but only affects load/store |
127 | + * instructions. WRMSR is unfortunately not a load/store |
128 | + * instruction and is unaffected by MFENCE. The LFENCE ensures |
129 | + * that the WRMSR is not reordered. |
130 | + * |
131 | + * Most WRMSRs are full serializing instructions themselves and |
132 | + * do not require this barrier. This is only required for the |
133 | + * IA32_TSC_DEADLINE and X2APIC MSRs. |
134 | + */ |
135 | +static inline void weak_wrmsr_fence(void) |
136 | +{ |
137 | + asm volatile("mfence; lfence" : : : "memory"); |
138 | +} |
139 | + |
140 | #endif /* _ASM_X86_BARRIER_H */ |
141 | diff --git a/arch/x86/kernel/apic/apic.c b/arch/x86/kernel/apic/apic.c |
142 | index 722a76b88bcc0..107a9eff587bf 100644 |
143 | --- a/arch/x86/kernel/apic/apic.c |
144 | +++ b/arch/x86/kernel/apic/apic.c |
145 | @@ -42,6 +42,7 @@ |
146 | #include <asm/x86_init.h> |
147 | #include <asm/pgalloc.h> |
148 | #include <linux/atomic.h> |
149 | +#include <asm/barrier.h> |
150 | #include <asm/mpspec.h> |
151 | #include <asm/i8259.h> |
152 | #include <asm/proto.h> |
153 | @@ -476,6 +477,9 @@ static int lapic_next_deadline(unsigned long delta, |
154 | { |
155 | u64 tsc; |
156 | |
157 | + /* This MSR is special and need a special fence: */ |
158 | + weak_wrmsr_fence(); |
159 | + |
160 | tsc = rdtsc(); |
161 | wrmsrl(MSR_IA32_TSC_DEADLINE, tsc + (((u64) delta) * TSC_DIVISOR)); |
162 | return 0; |
163 | diff --git a/arch/x86/kernel/apic/x2apic_cluster.c b/arch/x86/kernel/apic/x2apic_cluster.c |
164 | index 200af5ae96626..ca64c150d1c53 100644 |
165 | --- a/arch/x86/kernel/apic/x2apic_cluster.c |
166 | +++ b/arch/x86/kernel/apic/x2apic_cluster.c |
167 | @@ -27,7 +27,8 @@ static void x2apic_send_IPI(int cpu, int vector) |
168 | { |
169 | u32 dest = per_cpu(x86_cpu_to_logical_apicid, cpu); |
170 | |
171 | - x2apic_wrmsr_fence(); |
172 | + /* x2apic MSRs are special and need a special fence: */ |
173 | + weak_wrmsr_fence(); |
174 | __x2apic_send_IPI_dest(dest, vector, APIC_DEST_LOGICAL); |
175 | } |
176 | |
177 | @@ -40,7 +41,8 @@ __x2apic_send_IPI_mask(const struct cpumask *mask, int vector, int apic_dest) |
178 | unsigned long flags; |
179 | u32 dest; |
180 | |
181 | - x2apic_wrmsr_fence(); |
182 | + /* x2apic MSRs are special and need a special fence: */ |
183 | + weak_wrmsr_fence(); |
184 | |
185 | local_irq_save(flags); |
186 | |
187 | diff --git a/arch/x86/kernel/apic/x2apic_phys.c b/arch/x86/kernel/apic/x2apic_phys.c |
188 | index ff111f05a3145..8889420ea7c6f 100644 |
189 | --- a/arch/x86/kernel/apic/x2apic_phys.c |
190 | +++ b/arch/x86/kernel/apic/x2apic_phys.c |
191 | @@ -40,7 +40,8 @@ static void x2apic_send_IPI(int cpu, int vector) |
192 | { |
193 | u32 dest = per_cpu(x86_cpu_to_apicid, cpu); |
194 | |
195 | - x2apic_wrmsr_fence(); |
196 | + /* x2apic MSRs are special and need a special fence: */ |
197 | + weak_wrmsr_fence(); |
198 | __x2apic_send_IPI_dest(dest, vector, APIC_DEST_PHYSICAL); |
199 | } |
200 | |
201 | @@ -51,7 +52,8 @@ __x2apic_send_IPI_mask(const struct cpumask *mask, int vector, int apic_dest) |
202 | unsigned long this_cpu; |
203 | unsigned long flags; |
204 | |
205 | - x2apic_wrmsr_fence(); |
206 | + /* x2apic MSRs are special and need a special fence: */ |
207 | + weak_wrmsr_fence(); |
208 | |
209 | local_irq_save(flags); |
210 | |
211 | diff --git a/drivers/acpi/thermal.c b/drivers/acpi/thermal.c |
212 | index 35e8fbca10ad5..c53c88b531639 100644 |
213 | --- a/drivers/acpi/thermal.c |
214 | +++ b/drivers/acpi/thermal.c |
215 | @@ -188,6 +188,8 @@ struct acpi_thermal { |
216 | int tz_enabled; |
217 | int kelvin_offset; |
218 | struct work_struct thermal_check_work; |
219 | + struct mutex thermal_check_lock; |
220 | + atomic_t thermal_check_count; |
221 | }; |
222 | |
223 | /* -------------------------------------------------------------------------- |
224 | @@ -513,17 +515,6 @@ static int acpi_thermal_get_trip_points(struct acpi_thermal *tz) |
225 | return 0; |
226 | } |
227 | |
228 | -static void acpi_thermal_check(void *data) |
229 | -{ |
230 | - struct acpi_thermal *tz = data; |
231 | - |
232 | - if (!tz->tz_enabled) |
233 | - return; |
234 | - |
235 | - thermal_zone_device_update(tz->thermal_zone, |
236 | - THERMAL_EVENT_UNSPECIFIED); |
237 | -} |
238 | - |
239 | /* sys I/F for generic thermal sysfs support */ |
240 | |
241 | static int thermal_get_temp(struct thermal_zone_device *thermal, int *temp) |
242 | @@ -557,6 +548,8 @@ static int thermal_get_mode(struct thermal_zone_device *thermal, |
243 | return 0; |
244 | } |
245 | |
246 | +static void acpi_thermal_check_fn(struct work_struct *work); |
247 | + |
248 | static int thermal_set_mode(struct thermal_zone_device *thermal, |
249 | enum thermal_device_mode mode) |
250 | { |
251 | @@ -582,7 +575,7 @@ static int thermal_set_mode(struct thermal_zone_device *thermal, |
252 | ACPI_DEBUG_PRINT((ACPI_DB_INFO, |
253 | "%s kernel ACPI thermal control\n", |
254 | tz->tz_enabled ? "Enable" : "Disable")); |
255 | - acpi_thermal_check(tz); |
256 | + acpi_thermal_check_fn(&tz->thermal_check_work); |
257 | } |
258 | return 0; |
259 | } |
260 | @@ -951,6 +944,12 @@ static void acpi_thermal_unregister_thermal_zone(struct acpi_thermal *tz) |
261 | Driver Interface |
262 | -------------------------------------------------------------------------- */ |
263 | |
264 | +static void acpi_queue_thermal_check(struct acpi_thermal *tz) |
265 | +{ |
266 | + if (!work_pending(&tz->thermal_check_work)) |
267 | + queue_work(acpi_thermal_pm_queue, &tz->thermal_check_work); |
268 | +} |
269 | + |
270 | static void acpi_thermal_notify(struct acpi_device *device, u32 event) |
271 | { |
272 | struct acpi_thermal *tz = acpi_driver_data(device); |
273 | @@ -961,17 +960,17 @@ static void acpi_thermal_notify(struct acpi_device *device, u32 event) |
274 | |
275 | switch (event) { |
276 | case ACPI_THERMAL_NOTIFY_TEMPERATURE: |
277 | - acpi_thermal_check(tz); |
278 | + acpi_queue_thermal_check(tz); |
279 | break; |
280 | case ACPI_THERMAL_NOTIFY_THRESHOLDS: |
281 | acpi_thermal_trips_update(tz, ACPI_TRIPS_REFRESH_THRESHOLDS); |
282 | - acpi_thermal_check(tz); |
283 | + acpi_queue_thermal_check(tz); |
284 | acpi_bus_generate_netlink_event(device->pnp.device_class, |
285 | dev_name(&device->dev), event, 0); |
286 | break; |
287 | case ACPI_THERMAL_NOTIFY_DEVICES: |
288 | acpi_thermal_trips_update(tz, ACPI_TRIPS_REFRESH_DEVICES); |
289 | - acpi_thermal_check(tz); |
290 | + acpi_queue_thermal_check(tz); |
291 | acpi_bus_generate_netlink_event(device->pnp.device_class, |
292 | dev_name(&device->dev), event, 0); |
293 | break; |
294 | @@ -1071,7 +1070,27 @@ static void acpi_thermal_check_fn(struct work_struct *work) |
295 | { |
296 | struct acpi_thermal *tz = container_of(work, struct acpi_thermal, |
297 | thermal_check_work); |
298 | - acpi_thermal_check(tz); |
299 | + |
300 | + if (!tz->tz_enabled) |
301 | + return; |
302 | + /* |
303 | + * In general, it is not sufficient to check the pending bit, because |
304 | + * subsequent instances of this function may be queued after one of them |
305 | + * has started running (e.g. if _TMP sleeps). Avoid bailing out if just |
306 | + * one of them is running, though, because it may have done the actual |
307 | + * check some time ago, so allow at least one of them to block on the |
308 | + * mutex while another one is running the update. |
309 | + */ |
310 | + if (!atomic_add_unless(&tz->thermal_check_count, -1, 1)) |
311 | + return; |
312 | + |
313 | + mutex_lock(&tz->thermal_check_lock); |
314 | + |
315 | + thermal_zone_device_update(tz->thermal_zone, THERMAL_EVENT_UNSPECIFIED); |
316 | + |
317 | + atomic_inc(&tz->thermal_check_count); |
318 | + |
319 | + mutex_unlock(&tz->thermal_check_lock); |
320 | } |
321 | |
322 | static int acpi_thermal_add(struct acpi_device *device) |
323 | @@ -1103,6 +1122,8 @@ static int acpi_thermal_add(struct acpi_device *device) |
324 | if (result) |
325 | goto free_memory; |
326 | |
327 | + atomic_set(&tz->thermal_check_count, 3); |
328 | + mutex_init(&tz->thermal_check_lock); |
329 | INIT_WORK(&tz->thermal_check_work, acpi_thermal_check_fn); |
330 | |
331 | pr_info(PREFIX "%s [%s] (%ld C)\n", acpi_device_name(device), |
332 | @@ -1168,7 +1189,7 @@ static int acpi_thermal_resume(struct device *dev) |
333 | tz->state.active |= tz->trips.active[i].flags.enabled; |
334 | } |
335 | |
336 | - queue_work(acpi_thermal_pm_queue, &tz->thermal_check_work); |
337 | + acpi_queue_thermal_check(tz); |
338 | |
339 | return AE_OK; |
340 | } |
341 | diff --git a/drivers/input/joystick/xpad.c b/drivers/input/joystick/xpad.c |
342 | index 637f1347cd13d..815b69d35722c 100644 |
343 | --- a/drivers/input/joystick/xpad.c |
344 | +++ b/drivers/input/joystick/xpad.c |
345 | @@ -232,9 +232,17 @@ static const struct xpad_device { |
346 | { 0x0e6f, 0x0213, "Afterglow Gamepad for Xbox 360", 0, XTYPE_XBOX360 }, |
347 | { 0x0e6f, 0x021f, "Rock Candy Gamepad for Xbox 360", 0, XTYPE_XBOX360 }, |
348 | { 0x0e6f, 0x0246, "Rock Candy Gamepad for Xbox One 2015", 0, XTYPE_XBOXONE }, |
349 | - { 0x0e6f, 0x02ab, "PDP Controller for Xbox One", 0, XTYPE_XBOXONE }, |
350 | + { 0x0e6f, 0x02a0, "PDP Xbox One Controller", 0, XTYPE_XBOXONE }, |
351 | + { 0x0e6f, 0x02a1, "PDP Xbox One Controller", 0, XTYPE_XBOXONE }, |
352 | + { 0x0e6f, 0x02a2, "PDP Wired Controller for Xbox One - Crimson Red", 0, XTYPE_XBOXONE }, |
353 | { 0x0e6f, 0x02a4, "PDP Wired Controller for Xbox One - Stealth Series", 0, XTYPE_XBOXONE }, |
354 | { 0x0e6f, 0x02a6, "PDP Wired Controller for Xbox One - Camo Series", 0, XTYPE_XBOXONE }, |
355 | + { 0x0e6f, 0x02a7, "PDP Xbox One Controller", 0, XTYPE_XBOXONE }, |
356 | + { 0x0e6f, 0x02a8, "PDP Xbox One Controller", 0, XTYPE_XBOXONE }, |
357 | + { 0x0e6f, 0x02ab, "PDP Controller for Xbox One", 0, XTYPE_XBOXONE }, |
358 | + { 0x0e6f, 0x02ad, "PDP Wired Controller for Xbox One - Stealth Series", 0, XTYPE_XBOXONE }, |
359 | + { 0x0e6f, 0x02b3, "Afterglow Prismatic Wired Controller", 0, XTYPE_XBOXONE }, |
360 | + { 0x0e6f, 0x02b8, "Afterglow Prismatic Wired Controller", 0, XTYPE_XBOXONE }, |
361 | { 0x0e6f, 0x0301, "Logic3 Controller", 0, XTYPE_XBOX360 }, |
362 | { 0x0e6f, 0x0346, "Rock Candy Gamepad for Xbox One 2016", 0, XTYPE_XBOXONE }, |
363 | { 0x0e6f, 0x0401, "Logic3 Controller", 0, XTYPE_XBOX360 }, |
364 | @@ -313,6 +321,9 @@ static const struct xpad_device { |
365 | { 0x1bad, 0xfa01, "MadCatz GamePad", 0, XTYPE_XBOX360 }, |
366 | { 0x1bad, 0xfd00, "Razer Onza TE", 0, XTYPE_XBOX360 }, |
367 | { 0x1bad, 0xfd01, "Razer Onza", 0, XTYPE_XBOX360 }, |
368 | + { 0x20d6, 0x2001, "BDA Xbox Series X Wired Controller", 0, XTYPE_XBOXONE }, |
369 | + { 0x20d6, 0x281f, "PowerA Wired Controller For Xbox 360", 0, XTYPE_XBOX360 }, |
370 | + { 0x2e24, 0x0652, "Hyperkin Duke X-Box One pad", 0, XTYPE_XBOXONE }, |
371 | { 0x24c6, 0x5000, "Razer Atrox Arcade Stick", MAP_TRIGGERS_TO_BUTTONS, XTYPE_XBOX360 }, |
372 | { 0x24c6, 0x5300, "PowerA MINI PROEX Controller", 0, XTYPE_XBOX360 }, |
373 | { 0x24c6, 0x5303, "Xbox Airflo wired controller", 0, XTYPE_XBOX360 }, |
374 | @@ -446,8 +457,12 @@ static const struct usb_device_id xpad_table[] = { |
375 | XPAD_XBOX360_VENDOR(0x162e), /* Joytech X-Box 360 controllers */ |
376 | XPAD_XBOX360_VENDOR(0x1689), /* Razer Onza */ |
377 | XPAD_XBOX360_VENDOR(0x1bad), /* Harminix Rock Band Guitar and Drums */ |
378 | + XPAD_XBOX360_VENDOR(0x20d6), /* PowerA Controllers */ |
379 | + XPAD_XBOXONE_VENDOR(0x20d6), /* PowerA Controllers */ |
380 | XPAD_XBOX360_VENDOR(0x24c6), /* PowerA Controllers */ |
381 | XPAD_XBOXONE_VENDOR(0x24c6), /* PowerA Controllers */ |
382 | + XPAD_XBOXONE_VENDOR(0x2e24), /* Hyperkin Duke X-Box One pad */ |
383 | + XPAD_XBOX360_VENDOR(0x2f24), /* GameSir Controllers */ |
384 | { } |
385 | }; |
386 | |
387 | diff --git a/drivers/input/serio/i8042-x86ia64io.h b/drivers/input/serio/i8042-x86ia64io.h |
388 | index fa07be0b4500e..2317f8d3fef6f 100644 |
389 | --- a/drivers/input/serio/i8042-x86ia64io.h |
390 | +++ b/drivers/input/serio/i8042-x86ia64io.h |
391 | @@ -223,6 +223,8 @@ static const struct dmi_system_id __initconst i8042_dmi_noloop_table[] = { |
392 | DMI_MATCH(DMI_SYS_VENDOR, "PEGATRON CORPORATION"), |
393 | DMI_MATCH(DMI_PRODUCT_NAME, "C15B"), |
394 | }, |
395 | + }, |
396 | + { |
397 | .matches = { |
398 | DMI_MATCH(DMI_SYS_VENDOR, "ByteSpeed LLC"), |
399 | DMI_MATCH(DMI_PRODUCT_NAME, "ByteSpeed Laptop C15B"), |
400 | diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c |
401 | index 593a4bfcba428..b2061cfa05ab4 100644 |
402 | --- a/drivers/iommu/intel-iommu.c |
403 | +++ b/drivers/iommu/intel-iommu.c |
404 | @@ -3323,6 +3323,12 @@ static int __init init_dmars(void) |
405 | |
406 | if (!ecap_pass_through(iommu->ecap)) |
407 | hw_pass_through = 0; |
408 | + |
409 | + if (!intel_iommu_strict && cap_caching_mode(iommu->cap)) { |
410 | + pr_info("Disable batched IOTLB flush due to virtualization"); |
411 | + intel_iommu_strict = 1; |
412 | + } |
413 | + |
414 | #ifdef CONFIG_INTEL_IOMMU_SVM |
415 | if (pasid_enabled(iommu)) |
416 | intel_svm_alloc_pasid_tables(iommu); |
417 | diff --git a/drivers/mmc/core/sdio_cis.c b/drivers/mmc/core/sdio_cis.c |
418 | index 934c4816d78bf..14c56cf66ddc6 100644 |
419 | --- a/drivers/mmc/core/sdio_cis.c |
420 | +++ b/drivers/mmc/core/sdio_cis.c |
421 | @@ -24,6 +24,8 @@ |
422 | #include "sdio_cis.h" |
423 | #include "sdio_ops.h" |
424 | |
425 | +#define SDIO_READ_CIS_TIMEOUT_MS (10 * 1000) /* 10s */ |
426 | + |
427 | static int cistpl_vers_1(struct mmc_card *card, struct sdio_func *func, |
428 | const unsigned char *buf, unsigned size) |
429 | { |
430 | @@ -269,6 +271,8 @@ static int sdio_read_cis(struct mmc_card *card, struct sdio_func *func) |
431 | |
432 | do { |
433 | unsigned char tpl_code, tpl_link; |
434 | + unsigned long timeout = jiffies + |
435 | + msecs_to_jiffies(SDIO_READ_CIS_TIMEOUT_MS); |
436 | |
437 | ret = mmc_io_rw_direct(card, 0, 0, ptr++, 0, &tpl_code); |
438 | if (ret) |
439 | @@ -321,6 +325,8 @@ static int sdio_read_cis(struct mmc_card *card, struct sdio_func *func) |
440 | prev = &this->next; |
441 | |
442 | if (ret == -ENOENT) { |
443 | + if (time_after(jiffies, timeout)) |
444 | + break; |
445 | /* warn about unknown tuples */ |
446 | pr_warn_ratelimited("%s: queuing unknown" |
447 | " CIS tuple 0x%02x (%u bytes)\n", |
448 | diff --git a/drivers/net/dsa/bcm_sf2.c b/drivers/net/dsa/bcm_sf2.c |
449 | index 84def5819d2ec..a3742a3b413cd 100644 |
450 | --- a/drivers/net/dsa/bcm_sf2.c |
451 | +++ b/drivers/net/dsa/bcm_sf2.c |
452 | @@ -515,15 +515,19 @@ static int bcm_sf2_mdio_register(struct dsa_switch *ds) |
453 | /* Find our integrated MDIO bus node */ |
454 | dn = of_find_compatible_node(NULL, NULL, "brcm,unimac-mdio"); |
455 | priv->master_mii_bus = of_mdio_find_bus(dn); |
456 | - if (!priv->master_mii_bus) |
457 | + if (!priv->master_mii_bus) { |
458 | + of_node_put(dn); |
459 | return -EPROBE_DEFER; |
460 | + } |
461 | |
462 | get_device(&priv->master_mii_bus->dev); |
463 | priv->master_mii_dn = dn; |
464 | |
465 | priv->slave_mii_bus = devm_mdiobus_alloc(ds->dev); |
466 | - if (!priv->slave_mii_bus) |
467 | + if (!priv->slave_mii_bus) { |
468 | + of_node_put(dn); |
469 | return -ENOMEM; |
470 | + } |
471 | |
472 | priv->slave_mii_bus->priv = priv; |
473 | priv->slave_mii_bus->name = "sf2 slave mii"; |
474 | diff --git a/drivers/net/ethernet/ibm/ibmvnic.c b/drivers/net/ethernet/ibm/ibmvnic.c |
475 | index a8ff4f8a6b87d..f23559a2b2bd1 100644 |
476 | --- a/drivers/net/ethernet/ibm/ibmvnic.c |
477 | +++ b/drivers/net/ethernet/ibm/ibmvnic.c |
478 | @@ -3496,6 +3496,12 @@ static irqreturn_t ibmvnic_interrupt(int irq, void *instance) |
479 | while (!done) { |
480 | /* Pull all the valid messages off the CRQ */ |
481 | while ((crq = ibmvnic_next_crq(adapter)) != NULL) { |
482 | + /* This barrier makes sure ibmvnic_next_crq()'s |
483 | + * crq->generic.first & IBMVNIC_CRQ_CMD_RSP is loaded |
484 | + * before ibmvnic_handle_crq()'s |
485 | + * switch(gen_crq->first) and switch(gen_crq->cmd). |
486 | + */ |
487 | + dma_rmb(); |
488 | ibmvnic_handle_crq(crq, adapter); |
489 | crq->generic.first = 0; |
490 | } |
491 | diff --git a/drivers/scsi/ibmvscsi/ibmvfc.c b/drivers/scsi/ibmvscsi/ibmvfc.c |
492 | index 04b3ac17531db..7865feb8e5e83 100644 |
493 | --- a/drivers/scsi/ibmvscsi/ibmvfc.c |
494 | +++ b/drivers/scsi/ibmvscsi/ibmvfc.c |
495 | @@ -2891,8 +2891,10 @@ static int ibmvfc_slave_configure(struct scsi_device *sdev) |
496 | unsigned long flags = 0; |
497 | |
498 | spin_lock_irqsave(shost->host_lock, flags); |
499 | - if (sdev->type == TYPE_DISK) |
500 | + if (sdev->type == TYPE_DISK) { |
501 | sdev->allow_restart = 1; |
502 | + blk_queue_rq_timeout(sdev->request_queue, 120 * HZ); |
503 | + } |
504 | spin_unlock_irqrestore(shost->host_lock, flags); |
505 | return 0; |
506 | } |
507 | diff --git a/drivers/scsi/libfc/fc_exch.c b/drivers/scsi/libfc/fc_exch.c |
508 | index d0a86ef806522..59fd6101f188b 100644 |
509 | --- a/drivers/scsi/libfc/fc_exch.c |
510 | +++ b/drivers/scsi/libfc/fc_exch.c |
511 | @@ -1585,8 +1585,13 @@ static void fc_exch_recv_seq_resp(struct fc_exch_mgr *mp, struct fc_frame *fp) |
512 | rc = fc_exch_done_locked(ep); |
513 | WARN_ON(fc_seq_exch(sp) != ep); |
514 | spin_unlock_bh(&ep->ex_lock); |
515 | - if (!rc) |
516 | + if (!rc) { |
517 | fc_exch_delete(ep); |
518 | + } else { |
519 | + FC_EXCH_DBG(ep, "ep is completed already," |
520 | + "hence skip calling the resp\n"); |
521 | + goto skip_resp; |
522 | + } |
523 | } |
524 | |
525 | /* |
526 | @@ -1605,6 +1610,7 @@ static void fc_exch_recv_seq_resp(struct fc_exch_mgr *mp, struct fc_frame *fp) |
527 | if (!fc_invoke_resp(ep, sp, fp)) |
528 | fc_frame_free(fp); |
529 | |
530 | +skip_resp: |
531 | fc_exch_release(ep); |
532 | return; |
533 | rel: |
534 | @@ -1848,10 +1854,16 @@ static void fc_exch_reset(struct fc_exch *ep) |
535 | |
536 | fc_exch_hold(ep); |
537 | |
538 | - if (!rc) |
539 | + if (!rc) { |
540 | fc_exch_delete(ep); |
541 | + } else { |
542 | + FC_EXCH_DBG(ep, "ep is completed already," |
543 | + "hence skip calling the resp\n"); |
544 | + goto skip_resp; |
545 | + } |
546 | |
547 | fc_invoke_resp(ep, sp, ERR_PTR(-FC_EX_CLOSED)); |
548 | +skip_resp: |
549 | fc_seq_set_resp(sp, NULL, ep->arg); |
550 | fc_exch_release(ep); |
551 | } |
552 | diff --git a/drivers/usb/class/usblp.c b/drivers/usb/class/usblp.c |
553 | index 76701d6ce92c3..582099f4f449f 100644 |
554 | --- a/drivers/usb/class/usblp.c |
555 | +++ b/drivers/usb/class/usblp.c |
556 | @@ -1349,14 +1349,17 @@ static int usblp_set_protocol(struct usblp *usblp, int protocol) |
557 | if (protocol < USBLP_FIRST_PROTOCOL || protocol > USBLP_LAST_PROTOCOL) |
558 | return -EINVAL; |
559 | |
560 | - alts = usblp->protocol[protocol].alt_setting; |
561 | - if (alts < 0) |
562 | - return -EINVAL; |
563 | - r = usb_set_interface(usblp->dev, usblp->ifnum, alts); |
564 | - if (r < 0) { |
565 | - printk(KERN_ERR "usblp: can't set desired altsetting %d on interface %d\n", |
566 | - alts, usblp->ifnum); |
567 | - return r; |
568 | + /* Don't unnecessarily set the interface if there's a single alt. */ |
569 | + if (usblp->intf->num_altsetting > 1) { |
570 | + alts = usblp->protocol[protocol].alt_setting; |
571 | + if (alts < 0) |
572 | + return -EINVAL; |
573 | + r = usb_set_interface(usblp->dev, usblp->ifnum, alts); |
574 | + if (r < 0) { |
575 | + printk(KERN_ERR "usblp: can't set desired altsetting %d on interface %d\n", |
576 | + alts, usblp->ifnum); |
577 | + return r; |
578 | + } |
579 | } |
580 | |
581 | usblp->bidir = (usblp->protocol[protocol].epread != NULL); |
582 | diff --git a/drivers/usb/dwc2/gadget.c b/drivers/usb/dwc2/gadget.c |
583 | index 9381a108a9851..6bde5db1490a1 100644 |
584 | --- a/drivers/usb/dwc2/gadget.c |
585 | +++ b/drivers/usb/dwc2/gadget.c |
586 | @@ -942,7 +942,6 @@ static void dwc2_hsotg_complete_oursetup(struct usb_ep *ep, |
587 | static struct dwc2_hsotg_ep *ep_from_windex(struct dwc2_hsotg *hsotg, |
588 | u32 windex) |
589 | { |
590 | - struct dwc2_hsotg_ep *ep; |
591 | int dir = (windex & USB_DIR_IN) ? 1 : 0; |
592 | int idx = windex & 0x7F; |
593 | |
594 | @@ -952,12 +951,7 @@ static struct dwc2_hsotg_ep *ep_from_windex(struct dwc2_hsotg *hsotg, |
595 | if (idx > hsotg->num_of_eps) |
596 | return NULL; |
597 | |
598 | - ep = index_to_ep(hsotg, idx, dir); |
599 | - |
600 | - if (idx && ep->dir_in != dir) |
601 | - return NULL; |
602 | - |
603 | - return ep; |
604 | + return index_to_ep(hsotg, idx, dir); |
605 | } |
606 | |
607 | /** |
608 | diff --git a/drivers/usb/gadget/legacy/ether.c b/drivers/usb/gadget/legacy/ether.c |
609 | index 25a2c2e485920..3396e7193dba2 100644 |
610 | --- a/drivers/usb/gadget/legacy/ether.c |
611 | +++ b/drivers/usb/gadget/legacy/ether.c |
612 | @@ -407,8 +407,10 @@ static int eth_bind(struct usb_composite_dev *cdev) |
613 | struct usb_descriptor_header *usb_desc; |
614 | |
615 | usb_desc = usb_otg_descriptor_alloc(gadget); |
616 | - if (!usb_desc) |
617 | + if (!usb_desc) { |
618 | + status = -ENOMEM; |
619 | goto fail1; |
620 | + } |
621 | usb_otg_descriptor_init(gadget, usb_desc); |
622 | otg_desc[0] = usb_desc; |
623 | otg_desc[1] = NULL; |
624 | diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c |
625 | index 6e1d65fda97e0..69dbfffde4df0 100644 |
626 | --- a/drivers/usb/host/xhci-ring.c |
627 | +++ b/drivers/usb/host/xhci-ring.c |
628 | @@ -692,11 +692,16 @@ void xhci_unmap_td_bounce_buffer(struct xhci_hcd *xhci, struct xhci_ring *ring, |
629 | dma_unmap_single(dev, seg->bounce_dma, ring->bounce_buf_len, |
630 | DMA_FROM_DEVICE); |
631 | /* for in tranfers we need to copy the data from bounce to sg */ |
632 | - len = sg_pcopy_from_buffer(urb->sg, urb->num_sgs, seg->bounce_buf, |
633 | - seg->bounce_len, seg->bounce_offs); |
634 | - if (len != seg->bounce_len) |
635 | - xhci_warn(xhci, "WARN Wrong bounce buffer read length: %zu != %d\n", |
636 | - len, seg->bounce_len); |
637 | + if (urb->num_sgs) { |
638 | + len = sg_pcopy_from_buffer(urb->sg, urb->num_sgs, seg->bounce_buf, |
639 | + seg->bounce_len, seg->bounce_offs); |
640 | + if (len != seg->bounce_len) |
641 | + xhci_warn(xhci, "WARN Wrong bounce buffer read length: %zu != %d\n", |
642 | + len, seg->bounce_len); |
643 | + } else { |
644 | + memcpy(urb->transfer_buffer + seg->bounce_offs, seg->bounce_buf, |
645 | + seg->bounce_len); |
646 | + } |
647 | seg->bounce_len = 0; |
648 | seg->bounce_offs = 0; |
649 | } |
650 | @@ -3196,12 +3201,16 @@ static int xhci_align_td(struct xhci_hcd *xhci, struct urb *urb, u32 enqd_len, |
651 | |
652 | /* create a max max_pkt sized bounce buffer pointed to by last trb */ |
653 | if (usb_urb_dir_out(urb)) { |
654 | - len = sg_pcopy_to_buffer(urb->sg, urb->num_sgs, |
655 | - seg->bounce_buf, new_buff_len, enqd_len); |
656 | - if (len != new_buff_len) |
657 | - xhci_warn(xhci, |
658 | - "WARN Wrong bounce buffer write length: %zu != %d\n", |
659 | - len, new_buff_len); |
660 | + if (urb->num_sgs) { |
661 | + len = sg_pcopy_to_buffer(urb->sg, urb->num_sgs, |
662 | + seg->bounce_buf, new_buff_len, enqd_len); |
663 | + if (len != new_buff_len) |
664 | + xhci_warn(xhci, "WARN Wrong bounce buffer write length: %zu != %d\n", |
665 | + len, new_buff_len); |
666 | + } else { |
667 | + memcpy(seg->bounce_buf, urb->transfer_buffer + enqd_len, new_buff_len); |
668 | + } |
669 | + |
670 | seg->bounce_dma = dma_map_single(dev, seg->bounce_buf, |
671 | max_pkt, DMA_TO_DEVICE); |
672 | } else { |
673 | diff --git a/drivers/usb/serial/cp210x.c b/drivers/usb/serial/cp210x.c |
674 | index abf8a3cac2651..1847074b4d819 100644 |
675 | --- a/drivers/usb/serial/cp210x.c |
676 | +++ b/drivers/usb/serial/cp210x.c |
677 | @@ -58,6 +58,7 @@ static const struct usb_device_id id_table[] = { |
678 | { USB_DEVICE(0x08e6, 0x5501) }, /* Gemalto Prox-PU/CU contactless smartcard reader */ |
679 | { USB_DEVICE(0x08FD, 0x000A) }, /* Digianswer A/S , ZigBee/802.15.4 MAC Device */ |
680 | { USB_DEVICE(0x0908, 0x01FF) }, /* Siemens RUGGEDCOM USB Serial Console */ |
681 | + { USB_DEVICE(0x0988, 0x0578) }, /* Teraoka AD2000 */ |
682 | { USB_DEVICE(0x0B00, 0x3070) }, /* Ingenico 3070 */ |
683 | { USB_DEVICE(0x0BED, 0x1100) }, /* MEI (TM) Cashflow-SC Bill/Voucher Acceptor */ |
684 | { USB_DEVICE(0x0BED, 0x1101) }, /* MEI series 2000 Combo Acceptor */ |
685 | @@ -198,6 +199,7 @@ static const struct usb_device_id id_table[] = { |
686 | { USB_DEVICE(0x1901, 0x0194) }, /* GE Healthcare Remote Alarm Box */ |
687 | { USB_DEVICE(0x1901, 0x0195) }, /* GE B850/B650/B450 CP2104 DP UART interface */ |
688 | { USB_DEVICE(0x1901, 0x0196) }, /* GE B850 CP2105 DP UART interface */ |
689 | + { USB_DEVICE(0x199B, 0xBA30) }, /* LORD WSDA-200-USB */ |
690 | { USB_DEVICE(0x19CF, 0x3000) }, /* Parrot NMEA GPS Flight Recorder */ |
691 | { USB_DEVICE(0x1ADB, 0x0001) }, /* Schweitzer Engineering C662 Cable */ |
692 | { USB_DEVICE(0x1B1C, 0x1C00) }, /* Corsair USB Dongle */ |
693 | diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c |
694 | index 1998b314368e0..3c536eed07541 100644 |
695 | --- a/drivers/usb/serial/option.c |
696 | +++ b/drivers/usb/serial/option.c |
697 | @@ -425,6 +425,8 @@ static void option_instat_callback(struct urb *urb); |
698 | #define CINTERION_PRODUCT_AHXX_2RMNET 0x0084 |
699 | #define CINTERION_PRODUCT_AHXX_AUDIO 0x0085 |
700 | #define CINTERION_PRODUCT_CLS8 0x00b0 |
701 | +#define CINTERION_PRODUCT_MV31_MBIM 0x00b3 |
702 | +#define CINTERION_PRODUCT_MV31_RMNET 0x00b7 |
703 | |
704 | /* Olivetti products */ |
705 | #define OLIVETTI_VENDOR_ID 0x0b3c |
706 | @@ -1896,6 +1898,10 @@ static const struct usb_device_id option_ids[] = { |
707 | { USB_DEVICE(SIEMENS_VENDOR_ID, CINTERION_PRODUCT_HC25_MDMNET) }, |
708 | { USB_DEVICE(SIEMENS_VENDOR_ID, CINTERION_PRODUCT_HC28_MDM) }, /* HC28 enumerates with Siemens or Cinterion VID depending on FW revision */ |
709 | { USB_DEVICE(SIEMENS_VENDOR_ID, CINTERION_PRODUCT_HC28_MDMNET) }, |
710 | + { USB_DEVICE_INTERFACE_CLASS(CINTERION_VENDOR_ID, CINTERION_PRODUCT_MV31_MBIM, 0xff), |
711 | + .driver_info = RSVD(3)}, |
712 | + { USB_DEVICE_INTERFACE_CLASS(CINTERION_VENDOR_ID, CINTERION_PRODUCT_MV31_RMNET, 0xff), |
713 | + .driver_info = RSVD(0)}, |
714 | { USB_DEVICE(OLIVETTI_VENDOR_ID, OLIVETTI_PRODUCT_OLICARD100), |
715 | .driver_info = RSVD(4) }, |
716 | { USB_DEVICE(OLIVETTI_VENDOR_ID, OLIVETTI_PRODUCT_OLICARD120), |
717 | diff --git a/fs/cifs/dir.c b/fs/cifs/dir.c |
718 | index 0262c8f7e7c76..09f49dab77393 100644 |
719 | --- a/fs/cifs/dir.c |
720 | +++ b/fs/cifs/dir.c |
721 | @@ -830,6 +830,7 @@ static int |
722 | cifs_d_revalidate(struct dentry *direntry, unsigned int flags) |
723 | { |
724 | struct inode *inode; |
725 | + int rc; |
726 | |
727 | if (flags & LOOKUP_RCU) |
728 | return -ECHILD; |
729 | @@ -839,8 +840,25 @@ cifs_d_revalidate(struct dentry *direntry, unsigned int flags) |
730 | if ((flags & LOOKUP_REVAL) && !CIFS_CACHE_READ(CIFS_I(inode))) |
731 | CIFS_I(inode)->time = 0; /* force reval */ |
732 | |
733 | - if (cifs_revalidate_dentry(direntry)) |
734 | - return 0; |
735 | + rc = cifs_revalidate_dentry(direntry); |
736 | + if (rc) { |
737 | + cifs_dbg(FYI, "cifs_revalidate_dentry failed with rc=%d", rc); |
738 | + switch (rc) { |
739 | + case -ENOENT: |
740 | + case -ESTALE: |
741 | + /* |
742 | + * Those errors mean the dentry is invalid |
743 | + * (file was deleted or recreated) |
744 | + */ |
745 | + return 0; |
746 | + default: |
747 | + /* |
748 | + * Otherwise some unexpected error happened |
749 | + * report it as-is to VFS layer |
750 | + */ |
751 | + return rc; |
752 | + } |
753 | + } |
754 | else { |
755 | /* |
756 | * If the inode wasn't known to be a dfs entry when |
757 | diff --git a/fs/hugetlbfs/inode.c b/fs/hugetlbfs/inode.c |
758 | index 253b03451b727..ba1909b887efb 100644 |
759 | --- a/fs/hugetlbfs/inode.c |
760 | +++ b/fs/hugetlbfs/inode.c |
761 | @@ -665,8 +665,9 @@ static long hugetlbfs_fallocate(struct file *file, int mode, loff_t offset, |
762 | |
763 | mutex_unlock(&hugetlb_fault_mutex_table[hash]); |
764 | |
765 | + set_page_huge_active(page); |
766 | /* |
767 | - * page_put due to reference from alloc_huge_page() |
768 | + * put_page() due to reference from alloc_huge_page() |
769 | * unlock_page because locked by add_to_page_cache() |
770 | */ |
771 | put_page(page); |
772 | diff --git a/include/linux/elfcore.h b/include/linux/elfcore.h |
773 | index 698d51a0eea3f..4adf7faeaeb59 100644 |
774 | --- a/include/linux/elfcore.h |
775 | +++ b/include/linux/elfcore.h |
776 | @@ -55,6 +55,7 @@ static inline int elf_core_copy_task_xfpregs(struct task_struct *t, elf_fpxregse |
777 | } |
778 | #endif |
779 | |
780 | +#if defined(CONFIG_UM) || defined(CONFIG_IA64) |
781 | /* |
782 | * These functions parameterize elf_core_dump in fs/binfmt_elf.c to write out |
783 | * extra segments containing the gate DSO contents. Dumping its |
784 | @@ -69,5 +70,26 @@ elf_core_write_extra_phdrs(struct coredump_params *cprm, loff_t offset); |
785 | extern int |
786 | elf_core_write_extra_data(struct coredump_params *cprm); |
787 | extern size_t elf_core_extra_data_size(void); |
788 | +#else |
789 | +static inline Elf_Half elf_core_extra_phdrs(void) |
790 | +{ |
791 | + return 0; |
792 | +} |
793 | + |
794 | +static inline int elf_core_write_extra_phdrs(struct coredump_params *cprm, loff_t offset) |
795 | +{ |
796 | + return 1; |
797 | +} |
798 | + |
799 | +static inline int elf_core_write_extra_data(struct coredump_params *cprm) |
800 | +{ |
801 | + return 1; |
802 | +} |
803 | + |
804 | +static inline size_t elf_core_extra_data_size(void) |
805 | +{ |
806 | + return 0; |
807 | +} |
808 | +#endif |
809 | |
810 | #endif /* _LINUX_ELFCORE_H */ |
811 | diff --git a/include/linux/hugetlb.h b/include/linux/hugetlb.h |
812 | index 6b8a7b654771a..4e4c35a6bfc5a 100644 |
813 | --- a/include/linux/hugetlb.h |
814 | +++ b/include/linux/hugetlb.h |
815 | @@ -502,6 +502,9 @@ static inline void hugetlb_count_sub(long l, struct mm_struct *mm) |
816 | { |
817 | atomic_long_sub(l, &mm->hugetlb_usage); |
818 | } |
819 | + |
820 | +void set_page_huge_active(struct page *page); |
821 | + |
822 | #else /* CONFIG_HUGETLB_PAGE */ |
823 | struct hstate {}; |
824 | #define alloc_huge_page(v, a, r) NULL |
825 | diff --git a/kernel/Makefile b/kernel/Makefile |
826 | index 92488cf6ad913..6c4a28cf680e4 100644 |
827 | --- a/kernel/Makefile |
828 | +++ b/kernel/Makefile |
829 | @@ -90,7 +90,6 @@ obj-$(CONFIG_TASK_DELAY_ACCT) += delayacct.o |
830 | obj-$(CONFIG_TASKSTATS) += taskstats.o tsacct.o |
831 | obj-$(CONFIG_TRACEPOINTS) += tracepoint.o |
832 | obj-$(CONFIG_LATENCYTOP) += latencytop.o |
833 | -obj-$(CONFIG_ELFCORE) += elfcore.o |
834 | obj-$(CONFIG_FUNCTION_TRACER) += trace/ |
835 | obj-$(CONFIG_TRACING) += trace/ |
836 | obj-$(CONFIG_TRACE_CLOCK) += trace/ |
837 | diff --git a/kernel/elfcore.c b/kernel/elfcore.c |
838 | deleted file mode 100644 |
839 | index a2b29b9bdfcb2..0000000000000 |
840 | --- a/kernel/elfcore.c |
841 | +++ /dev/null |
842 | @@ -1,25 +0,0 @@ |
843 | -#include <linux/elf.h> |
844 | -#include <linux/fs.h> |
845 | -#include <linux/mm.h> |
846 | -#include <linux/binfmts.h> |
847 | -#include <linux/elfcore.h> |
848 | - |
849 | -Elf_Half __weak elf_core_extra_phdrs(void) |
850 | -{ |
851 | - return 0; |
852 | -} |
853 | - |
854 | -int __weak elf_core_write_extra_phdrs(struct coredump_params *cprm, loff_t offset) |
855 | -{ |
856 | - return 1; |
857 | -} |
858 | - |
859 | -int __weak elf_core_write_extra_data(struct coredump_params *cprm) |
860 | -{ |
861 | - return 1; |
862 | -} |
863 | - |
864 | -size_t __weak elf_core_extra_data_size(void) |
865 | -{ |
866 | - return 0; |
867 | -} |
868 | diff --git a/kernel/futex.c b/kernel/futex.c |
869 | index 2ef8c5aef35d0..83db5787c67ef 100644 |
870 | --- a/kernel/futex.c |
871 | +++ b/kernel/futex.c |
872 | @@ -837,6 +837,29 @@ static struct futex_pi_state * alloc_pi_state(void) |
873 | return pi_state; |
874 | } |
875 | |
876 | +static void pi_state_update_owner(struct futex_pi_state *pi_state, |
877 | + struct task_struct *new_owner) |
878 | +{ |
879 | + struct task_struct *old_owner = pi_state->owner; |
880 | + |
881 | + lockdep_assert_held(&pi_state->pi_mutex.wait_lock); |
882 | + |
883 | + if (old_owner) { |
884 | + raw_spin_lock(&old_owner->pi_lock); |
885 | + WARN_ON(list_empty(&pi_state->list)); |
886 | + list_del_init(&pi_state->list); |
887 | + raw_spin_unlock(&old_owner->pi_lock); |
888 | + } |
889 | + |
890 | + if (new_owner) { |
891 | + raw_spin_lock(&new_owner->pi_lock); |
892 | + WARN_ON(!list_empty(&pi_state->list)); |
893 | + list_add(&pi_state->list, &new_owner->pi_state_list); |
894 | + pi_state->owner = new_owner; |
895 | + raw_spin_unlock(&new_owner->pi_lock); |
896 | + } |
897 | +} |
898 | + |
899 | /* |
900 | * Drops a reference to the pi_state object and frees or caches it |
901 | * when the last reference is gone. |
902 | @@ -856,11 +879,8 @@ static void put_pi_state(struct futex_pi_state *pi_state) |
903 | * and has cleaned up the pi_state already |
904 | */ |
905 | if (pi_state->owner) { |
906 | - raw_spin_lock_irq(&pi_state->owner->pi_lock); |
907 | - list_del_init(&pi_state->list); |
908 | - raw_spin_unlock_irq(&pi_state->owner->pi_lock); |
909 | - |
910 | - rt_mutex_proxy_unlock(&pi_state->pi_mutex, pi_state->owner); |
911 | + pi_state_update_owner(pi_state, NULL); |
912 | + rt_mutex_proxy_unlock(&pi_state->pi_mutex); |
913 | } |
914 | |
915 | if (current->pi_state_cache) |
916 | @@ -941,7 +961,7 @@ static void exit_pi_state_list(struct task_struct *curr) |
917 | pi_state->owner = NULL; |
918 | raw_spin_unlock_irq(&curr->pi_lock); |
919 | |
920 | - rt_mutex_unlock(&pi_state->pi_mutex); |
921 | + rt_mutex_futex_unlock(&pi_state->pi_mutex); |
922 | |
923 | spin_unlock(&hb->lock); |
924 | |
925 | @@ -997,7 +1017,8 @@ static void exit_pi_state_list(struct task_struct *curr) |
926 | * FUTEX_OWNER_DIED bit. See [4] |
927 | * |
928 | * [10] There is no transient state which leaves owner and user space |
929 | - * TID out of sync. |
930 | + * TID out of sync. Except one error case where the kernel is denied |
931 | + * write access to the user address, see fixup_pi_state_owner(). |
932 | */ |
933 | |
934 | /* |
935 | @@ -1394,12 +1415,19 @@ static int wake_futex_pi(u32 __user *uaddr, u32 uval, struct futex_q *this, |
936 | new_owner = rt_mutex_next_owner(&pi_state->pi_mutex); |
937 | |
938 | /* |
939 | - * It is possible that the next waiter (the one that brought |
940 | - * this owner to the kernel) timed out and is no longer |
941 | - * waiting on the lock. |
942 | + * When we interleave with futex_lock_pi() where it does |
943 | + * rt_mutex_timed_futex_lock(), we might observe @this futex_q waiter, |
944 | + * but the rt_mutex's wait_list can be empty (either still, or again, |
945 | + * depending on which side we land). |
946 | + * |
947 | + * When this happens, give up our locks and try again, giving the |
948 | + * futex_lock_pi() instance time to complete, either by waiting on the |
949 | + * rtmutex or removing itself from the futex queue. |
950 | */ |
951 | - if (!new_owner) |
952 | - new_owner = this->task; |
953 | + if (!new_owner) { |
954 | + raw_spin_unlock_irq(&pi_state->pi_mutex.wait_lock); |
955 | + return -EAGAIN; |
956 | + } |
957 | |
958 | /* |
959 | * We pass it to the next owner. The WAITERS bit is always |
960 | @@ -1425,36 +1453,24 @@ static int wake_futex_pi(u32 __user *uaddr, u32 uval, struct futex_q *this, |
961 | else |
962 | ret = -EINVAL; |
963 | } |
964 | - if (ret) { |
965 | - raw_spin_unlock_irq(&pi_state->pi_mutex.wait_lock); |
966 | - return ret; |
967 | - } |
968 | - |
969 | - raw_spin_lock(&pi_state->owner->pi_lock); |
970 | - WARN_ON(list_empty(&pi_state->list)); |
971 | - list_del_init(&pi_state->list); |
972 | - raw_spin_unlock(&pi_state->owner->pi_lock); |
973 | |
974 | - raw_spin_lock(&new_owner->pi_lock); |
975 | - WARN_ON(!list_empty(&pi_state->list)); |
976 | - list_add(&pi_state->list, &new_owner->pi_state_list); |
977 | - pi_state->owner = new_owner; |
978 | - raw_spin_unlock(&new_owner->pi_lock); |
979 | + if (!ret) { |
980 | + /* |
981 | + * This is a point of no return; once we modified the uval |
982 | + * there is no going back and subsequent operations must |
983 | + * not fail. |
984 | + */ |
985 | + pi_state_update_owner(pi_state, new_owner); |
986 | + deboost = __rt_mutex_futex_unlock(&pi_state->pi_mutex, &wake_q); |
987 | + } |
988 | |
989 | raw_spin_unlock_irq(&pi_state->pi_mutex.wait_lock); |
990 | - |
991 | - deboost = rt_mutex_futex_unlock(&pi_state->pi_mutex, &wake_q); |
992 | - |
993 | - /* |
994 | - * First unlock HB so the waiter does not spin on it once he got woken |
995 | - * up. Second wake up the waiter before the priority is adjusted. If we |
996 | - * deboost first (and lose our higher priority), then the task might get |
997 | - * scheduled away before the wake up can take place. |
998 | - */ |
999 | spin_unlock(&hb->lock); |
1000 | - wake_up_q(&wake_q); |
1001 | - if (deboost) |
1002 | + |
1003 | + if (deboost) { |
1004 | + wake_up_q(&wake_q); |
1005 | rt_mutex_adjust_prio(current); |
1006 | + } |
1007 | |
1008 | return 0; |
1009 | } |
1010 | @@ -2257,30 +2273,32 @@ static void unqueue_me_pi(struct futex_q *q) |
1011 | spin_unlock(q->lock_ptr); |
1012 | } |
1013 | |
1014 | -/* |
1015 | - * Fixup the pi_state owner with the new owner. |
1016 | - * |
1017 | - * Must be called with hash bucket lock held and mm->sem held for non |
1018 | - * private futexes. |
1019 | - */ |
1020 | -static int fixup_pi_state_owner(u32 __user *uaddr, struct futex_q *q, |
1021 | - struct task_struct *newowner) |
1022 | +static int __fixup_pi_state_owner(u32 __user *uaddr, struct futex_q *q, |
1023 | + struct task_struct *argowner) |
1024 | { |
1025 | - u32 newtid = task_pid_vnr(newowner) | FUTEX_WAITERS; |
1026 | struct futex_pi_state *pi_state = q->pi_state; |
1027 | - struct task_struct *oldowner = pi_state->owner; |
1028 | - u32 uval, uninitialized_var(curval), newval; |
1029 | - int ret; |
1030 | + struct task_struct *oldowner, *newowner; |
1031 | + u32 uval, curval, newval, newtid; |
1032 | + int err = 0; |
1033 | + |
1034 | + oldowner = pi_state->owner; |
1035 | |
1036 | /* Owner died? */ |
1037 | if (!pi_state->owner) |
1038 | newtid |= FUTEX_OWNER_DIED; |
1039 | |
1040 | /* |
1041 | - * We are here either because we stole the rtmutex from the |
1042 | - * previous highest priority waiter or we are the highest priority |
1043 | - * waiter but failed to get the rtmutex the first time. |
1044 | - * We have to replace the newowner TID in the user space variable. |
1045 | + * We are here because either: |
1046 | + * |
1047 | + * - we stole the lock and pi_state->owner needs updating to reflect |
1048 | + * that (@argowner == current), |
1049 | + * |
1050 | + * or: |
1051 | + * |
1052 | + * - someone stole our lock and we need to fix things to point to the |
1053 | + * new owner (@argowner == NULL). |
1054 | + * |
1055 | + * Either way, we have to replace the TID in the user space variable. |
1056 | * This must be atomic as we have to preserve the owner died bit here. |
1057 | * |
1058 | * Note: We write the user space value _before_ changing the pi_state |
1059 | @@ -2294,6 +2312,39 @@ static int fixup_pi_state_owner(u32 __user *uaddr, struct futex_q *q, |
1060 | * in lookup_pi_state. |
1061 | */ |
1062 | retry: |
1063 | + if (!argowner) { |
1064 | + if (oldowner != current) { |
1065 | + /* |
1066 | + * We raced against a concurrent self; things are |
1067 | + * already fixed up. Nothing to do. |
1068 | + */ |
1069 | + return 0; |
1070 | + } |
1071 | + |
1072 | + if (__rt_mutex_futex_trylock(&pi_state->pi_mutex)) { |
1073 | + /* We got the lock after all, nothing to fix. */ |
1074 | + return 1; |
1075 | + } |
1076 | + |
1077 | + /* |
1078 | + * Since we just failed the trylock; there must be an owner. |
1079 | + */ |
1080 | + newowner = rt_mutex_owner(&pi_state->pi_mutex); |
1081 | + BUG_ON(!newowner); |
1082 | + } else { |
1083 | + WARN_ON_ONCE(argowner != current); |
1084 | + if (oldowner == current) { |
1085 | + /* |
1086 | + * We raced against a concurrent self; things are |
1087 | + * already fixed up. Nothing to do. |
1088 | + */ |
1089 | + return 1; |
1090 | + } |
1091 | + newowner = argowner; |
1092 | + } |
1093 | + |
1094 | + newtid = task_pid_vnr(newowner) | FUTEX_WAITERS; |
1095 | + |
1096 | if (get_futex_value_locked(&uval, uaddr)) |
1097 | goto handle_fault; |
1098 | |
1099 | @@ -2311,19 +2362,8 @@ retry: |
1100 | * We fixed up user space. Now we need to fix the pi_state |
1101 | * itself. |
1102 | */ |
1103 | - if (pi_state->owner != NULL) { |
1104 | - raw_spin_lock_irq(&pi_state->owner->pi_lock); |
1105 | - WARN_ON(list_empty(&pi_state->list)); |
1106 | - list_del_init(&pi_state->list); |
1107 | - raw_spin_unlock_irq(&pi_state->owner->pi_lock); |
1108 | - } |
1109 | - |
1110 | - pi_state->owner = newowner; |
1111 | + pi_state_update_owner(pi_state, newowner); |
1112 | |
1113 | - raw_spin_lock_irq(&newowner->pi_lock); |
1114 | - WARN_ON(!list_empty(&pi_state->list)); |
1115 | - list_add(&pi_state->list, &newowner->pi_state_list); |
1116 | - raw_spin_unlock_irq(&newowner->pi_lock); |
1117 | return 0; |
1118 | |
1119 | /* |
1120 | @@ -2339,7 +2379,7 @@ retry: |
1121 | handle_fault: |
1122 | spin_unlock(q->lock_ptr); |
1123 | |
1124 | - ret = fault_in_user_writeable(uaddr); |
1125 | + err = fault_in_user_writeable(uaddr); |
1126 | |
1127 | spin_lock(q->lock_ptr); |
1128 | |
1129 | @@ -2347,12 +2387,45 @@ handle_fault: |
1130 | * Check if someone else fixed it for us: |
1131 | */ |
1132 | if (pi_state->owner != oldowner) |
1133 | - return 0; |
1134 | + return argowner == current; |
1135 | |
1136 | - if (ret) |
1137 | - return ret; |
1138 | + /* Retry if err was -EAGAIN or the fault in succeeded */ |
1139 | + if (!err) |
1140 | + goto retry; |
1141 | |
1142 | - goto retry; |
1143 | + /* |
1144 | + * fault_in_user_writeable() failed so user state is immutable. At |
1145 | + * best we can make the kernel state consistent but user state will |
1146 | + * be most likely hosed and any subsequent unlock operation will be |
1147 | + * rejected due to PI futex rule [10]. |
1148 | + * |
1149 | + * Ensure that the rtmutex owner is also the pi_state owner despite |
1150 | + * the user space value claiming something different. There is no |
1151 | + * point in unlocking the rtmutex if current is the owner as it |
1152 | + * would need to wait until the next waiter has taken the rtmutex |
1153 | + * to guarantee consistent state. Keep it simple. Userspace asked |
1154 | + * for this wreckaged state. |
1155 | + * |
1156 | + * The rtmutex has an owner - either current or some other |
1157 | + * task. See the EAGAIN loop above. |
1158 | + */ |
1159 | + pi_state_update_owner(pi_state, rt_mutex_owner(&pi_state->pi_mutex)); |
1160 | + |
1161 | + return err; |
1162 | +} |
1163 | + |
1164 | +static int fixup_pi_state_owner(u32 __user *uaddr, struct futex_q *q, |
1165 | + struct task_struct *argowner) |
1166 | +{ |
1167 | + struct futex_pi_state *pi_state = q->pi_state; |
1168 | + int ret; |
1169 | + |
1170 | + lockdep_assert_held(q->lock_ptr); |
1171 | + |
1172 | + raw_spin_lock_irq(&pi_state->pi_mutex.wait_lock); |
1173 | + ret = __fixup_pi_state_owner(uaddr, q, argowner); |
1174 | + raw_spin_unlock_irq(&pi_state->pi_mutex.wait_lock); |
1175 | + return ret; |
1176 | } |
1177 | |
1178 | static long futex_wait_restart(struct restart_block *restart); |
1179 | @@ -2374,13 +2447,16 @@ static long futex_wait_restart(struct restart_block *restart); |
1180 | */ |
1181 | static int fixup_owner(u32 __user *uaddr, struct futex_q *q, int locked) |
1182 | { |
1183 | - struct task_struct *owner; |
1184 | int ret = 0; |
1185 | |
1186 | if (locked) { |
1187 | /* |
1188 | * Got the lock. We might not be the anticipated owner if we |
1189 | * did a lock-steal - fix up the PI-state in that case: |
1190 | + * |
1191 | + * Speculative pi_state->owner read (we don't hold wait_lock); |
1192 | + * since we own the lock pi_state->owner == current is the |
1193 | + * stable state, anything else needs more attention. |
1194 | */ |
1195 | if (q->pi_state->owner != current) |
1196 | ret = fixup_pi_state_owner(uaddr, q, current); |
1197 | @@ -2388,43 +2464,24 @@ static int fixup_owner(u32 __user *uaddr, struct futex_q *q, int locked) |
1198 | } |
1199 | |
1200 | /* |
1201 | - * Catch the rare case, where the lock was released when we were on the |
1202 | - * way back before we locked the hash bucket. |
1203 | + * If we didn't get the lock; check if anybody stole it from us. In |
1204 | + * that case, we need to fix up the uval to point to them instead of |
1205 | + * us, otherwise bad things happen. [10] |
1206 | + * |
1207 | + * Another speculative read; pi_state->owner == current is unstable |
1208 | + * but needs our attention. |
1209 | */ |
1210 | if (q->pi_state->owner == current) { |
1211 | - /* |
1212 | - * Try to get the rt_mutex now. This might fail as some other |
1213 | - * task acquired the rt_mutex after we removed ourself from the |
1214 | - * rt_mutex waiters list. |
1215 | - */ |
1216 | - if (rt_mutex_trylock(&q->pi_state->pi_mutex)) { |
1217 | - locked = 1; |
1218 | - goto out; |
1219 | - } |
1220 | - |
1221 | - /* |
1222 | - * pi_state is incorrect, some other task did a lock steal and |
1223 | - * we returned due to timeout or signal without taking the |
1224 | - * rt_mutex. Too late. |
1225 | - */ |
1226 | - raw_spin_lock_irq(&q->pi_state->pi_mutex.wait_lock); |
1227 | - owner = rt_mutex_owner(&q->pi_state->pi_mutex); |
1228 | - if (!owner) |
1229 | - owner = rt_mutex_next_owner(&q->pi_state->pi_mutex); |
1230 | - raw_spin_unlock_irq(&q->pi_state->pi_mutex.wait_lock); |
1231 | - ret = fixup_pi_state_owner(uaddr, q, owner); |
1232 | + ret = fixup_pi_state_owner(uaddr, q, NULL); |
1233 | goto out; |
1234 | } |
1235 | |
1236 | /* |
1237 | * Paranoia check. If we did not take the lock, then we should not be |
1238 | - * the owner of the rt_mutex. |
1239 | + * the owner of the rt_mutex. Warn and establish consistent state. |
1240 | */ |
1241 | - if (rt_mutex_owner(&q->pi_state->pi_mutex) == current) |
1242 | - printk(KERN_ERR "fixup_owner: ret = %d pi-mutex: %p " |
1243 | - "pi-state %p\n", ret, |
1244 | - q->pi_state->pi_mutex.owner, |
1245 | - q->pi_state->owner); |
1246 | + if (WARN_ON_ONCE(rt_mutex_owner(&q->pi_state->pi_mutex) == current)) |
1247 | + return fixup_pi_state_owner(uaddr, q, current); |
1248 | |
1249 | out: |
1250 | return ret ? ret : locked; |
1251 | @@ -2721,7 +2778,7 @@ retry_private: |
1252 | if (!trylock) { |
1253 | ret = rt_mutex_timed_futex_lock(&q.pi_state->pi_mutex, to); |
1254 | } else { |
1255 | - ret = rt_mutex_trylock(&q.pi_state->pi_mutex); |
1256 | + ret = rt_mutex_futex_trylock(&q.pi_state->pi_mutex); |
1257 | /* Fixup the trylock return value: */ |
1258 | ret = ret ? 0 : -EWOULDBLOCK; |
1259 | } |
1260 | @@ -2739,13 +2796,6 @@ retry_private: |
1261 | if (res) |
1262 | ret = (res < 0) ? res : 0; |
1263 | |
1264 | - /* |
1265 | - * If fixup_owner() faulted and was unable to handle the fault, unlock |
1266 | - * it and return the fault to userspace. |
1267 | - */ |
1268 | - if (ret && (rt_mutex_owner(&q.pi_state->pi_mutex) == current)) |
1269 | - rt_mutex_unlock(&q.pi_state->pi_mutex); |
1270 | - |
1271 | /* Unqueue and drop the lock */ |
1272 | unqueue_me_pi(&q); |
1273 | |
1274 | @@ -3050,8 +3100,6 @@ static int futex_wait_requeue_pi(u32 __user *uaddr, unsigned int flags, |
1275 | if (q.pi_state && (q.pi_state->owner != current)) { |
1276 | spin_lock(q.lock_ptr); |
1277 | ret = fixup_pi_state_owner(uaddr2, &q, current); |
1278 | - if (ret && rt_mutex_owner(&q.pi_state->pi_mutex) == current) |
1279 | - rt_mutex_unlock(&q.pi_state->pi_mutex); |
1280 | /* |
1281 | * Drop the reference to the pi state which |
1282 | * the requeue_pi() code acquired for us. |
1283 | @@ -3088,14 +3136,6 @@ static int futex_wait_requeue_pi(u32 __user *uaddr, unsigned int flags, |
1284 | if (res) |
1285 | ret = (res < 0) ? res : 0; |
1286 | |
1287 | - /* |
1288 | - * If fixup_pi_state_owner() faulted and was unable to handle |
1289 | - * the fault, unlock the rt_mutex and return the fault to |
1290 | - * userspace. |
1291 | - */ |
1292 | - if (ret && rt_mutex_owner(pi_mutex) == current) |
1293 | - rt_mutex_unlock(pi_mutex); |
1294 | - |
1295 | /* Unqueue and drop the lock. */ |
1296 | unqueue_me_pi(&q); |
1297 | } |
1298 | diff --git a/kernel/kprobes.c b/kernel/kprobes.c |
1299 | index 3938e4670b89b..51867a2e537fa 100644 |
1300 | --- a/kernel/kprobes.c |
1301 | +++ b/kernel/kprobes.c |
1302 | @@ -1884,6 +1884,10 @@ int register_kretprobe(struct kretprobe *rp) |
1303 | int i; |
1304 | void *addr; |
1305 | |
1306 | + /* If only rp->kp.addr is specified, check reregistering kprobes */ |
1307 | + if (rp->kp.addr && check_kprobe_rereg(&rp->kp)) |
1308 | + return -EINVAL; |
1309 | + |
1310 | if (kretprobe_blacklist_size) { |
1311 | addr = kprobe_addr(&rp->kp); |
1312 | if (IS_ERR(addr)) |
1313 | diff --git a/kernel/locking/rtmutex-debug.c b/kernel/locking/rtmutex-debug.c |
1314 | index 62b6cee8ea7f9..0613c4b1d0596 100644 |
1315 | --- a/kernel/locking/rtmutex-debug.c |
1316 | +++ b/kernel/locking/rtmutex-debug.c |
1317 | @@ -173,12 +173,3 @@ void debug_rt_mutex_init(struct rt_mutex *lock, const char *name) |
1318 | lock->name = name; |
1319 | } |
1320 | |
1321 | -void |
1322 | -rt_mutex_deadlock_account_lock(struct rt_mutex *lock, struct task_struct *task) |
1323 | -{ |
1324 | -} |
1325 | - |
1326 | -void rt_mutex_deadlock_account_unlock(struct task_struct *task) |
1327 | -{ |
1328 | -} |
1329 | - |
1330 | diff --git a/kernel/locking/rtmutex-debug.h b/kernel/locking/rtmutex-debug.h |
1331 | index d0519c3432b67..b585af9a1b508 100644 |
1332 | --- a/kernel/locking/rtmutex-debug.h |
1333 | +++ b/kernel/locking/rtmutex-debug.h |
1334 | @@ -9,9 +9,6 @@ |
1335 | * This file contains macros used solely by rtmutex.c. Debug version. |
1336 | */ |
1337 | |
1338 | -extern void |
1339 | -rt_mutex_deadlock_account_lock(struct rt_mutex *lock, struct task_struct *task); |
1340 | -extern void rt_mutex_deadlock_account_unlock(struct task_struct *task); |
1341 | extern void debug_rt_mutex_init_waiter(struct rt_mutex_waiter *waiter); |
1342 | extern void debug_rt_mutex_free_waiter(struct rt_mutex_waiter *waiter); |
1343 | extern void debug_rt_mutex_init(struct rt_mutex *lock, const char *name); |
1344 | diff --git a/kernel/locking/rtmutex.c b/kernel/locking/rtmutex.c |
1345 | index 7615e7722258c..6ff4156b3929e 100644 |
1346 | --- a/kernel/locking/rtmutex.c |
1347 | +++ b/kernel/locking/rtmutex.c |
1348 | @@ -956,8 +956,6 @@ takeit: |
1349 | */ |
1350 | rt_mutex_set_owner(lock, task); |
1351 | |
1352 | - rt_mutex_deadlock_account_lock(lock, task); |
1353 | - |
1354 | return 1; |
1355 | } |
1356 | |
1357 | @@ -1316,6 +1314,19 @@ rt_mutex_slowlock(struct rt_mutex *lock, int state, |
1358 | return ret; |
1359 | } |
1360 | |
1361 | +static inline int __rt_mutex_slowtrylock(struct rt_mutex *lock) |
1362 | +{ |
1363 | + int ret = try_to_take_rt_mutex(lock, current, NULL); |
1364 | + |
1365 | + /* |
1366 | + * try_to_take_rt_mutex() sets the lock waiters bit |
1367 | + * unconditionally. Clean this up. |
1368 | + */ |
1369 | + fixup_rt_mutex_waiters(lock); |
1370 | + |
1371 | + return ret; |
1372 | +} |
1373 | + |
1374 | /* |
1375 | * Slow path try-lock function: |
1376 | */ |
1377 | @@ -1338,13 +1349,7 @@ static inline int rt_mutex_slowtrylock(struct rt_mutex *lock) |
1378 | */ |
1379 | raw_spin_lock_irqsave(&lock->wait_lock, flags); |
1380 | |
1381 | - ret = try_to_take_rt_mutex(lock, current, NULL); |
1382 | - |
1383 | - /* |
1384 | - * try_to_take_rt_mutex() sets the lock waiters bit |
1385 | - * unconditionally. Clean this up. |
1386 | - */ |
1387 | - fixup_rt_mutex_waiters(lock); |
1388 | + ret = __rt_mutex_slowtrylock(lock); |
1389 | |
1390 | raw_spin_unlock_irqrestore(&lock->wait_lock, flags); |
1391 | |
1392 | @@ -1365,8 +1370,6 @@ static bool __sched rt_mutex_slowunlock(struct rt_mutex *lock, |
1393 | |
1394 | debug_rt_mutex_unlock(lock); |
1395 | |
1396 | - rt_mutex_deadlock_account_unlock(current); |
1397 | - |
1398 | /* |
1399 | * We must be careful here if the fast path is enabled. If we |
1400 | * have no waiters queued we cannot set owner to NULL here |
1401 | @@ -1432,11 +1435,10 @@ rt_mutex_fastlock(struct rt_mutex *lock, int state, |
1402 | struct hrtimer_sleeper *timeout, |
1403 | enum rtmutex_chainwalk chwalk)) |
1404 | { |
1405 | - if (likely(rt_mutex_cmpxchg_acquire(lock, NULL, current))) { |
1406 | - rt_mutex_deadlock_account_lock(lock, current); |
1407 | + if (likely(rt_mutex_cmpxchg_acquire(lock, NULL, current))) |
1408 | return 0; |
1409 | - } else |
1410 | - return slowfn(lock, state, NULL, RT_MUTEX_MIN_CHAINWALK); |
1411 | + |
1412 | + return slowfn(lock, state, NULL, RT_MUTEX_MIN_CHAINWALK); |
1413 | } |
1414 | |
1415 | static inline int |
1416 | @@ -1448,21 +1450,19 @@ rt_mutex_timed_fastlock(struct rt_mutex *lock, int state, |
1417 | enum rtmutex_chainwalk chwalk)) |
1418 | { |
1419 | if (chwalk == RT_MUTEX_MIN_CHAINWALK && |
1420 | - likely(rt_mutex_cmpxchg_acquire(lock, NULL, current))) { |
1421 | - rt_mutex_deadlock_account_lock(lock, current); |
1422 | + likely(rt_mutex_cmpxchg_acquire(lock, NULL, current))) |
1423 | return 0; |
1424 | - } else |
1425 | - return slowfn(lock, state, timeout, chwalk); |
1426 | + |
1427 | + return slowfn(lock, state, timeout, chwalk); |
1428 | } |
1429 | |
1430 | static inline int |
1431 | rt_mutex_fasttrylock(struct rt_mutex *lock, |
1432 | int (*slowfn)(struct rt_mutex *lock)) |
1433 | { |
1434 | - if (likely(rt_mutex_cmpxchg_acquire(lock, NULL, current))) { |
1435 | - rt_mutex_deadlock_account_lock(lock, current); |
1436 | + if (likely(rt_mutex_cmpxchg_acquire(lock, NULL, current))) |
1437 | return 1; |
1438 | - } |
1439 | + |
1440 | return slowfn(lock); |
1441 | } |
1442 | |
1443 | @@ -1472,19 +1472,18 @@ rt_mutex_fastunlock(struct rt_mutex *lock, |
1444 | struct wake_q_head *wqh)) |
1445 | { |
1446 | WAKE_Q(wake_q); |
1447 | + bool deboost; |
1448 | |
1449 | - if (likely(rt_mutex_cmpxchg_release(lock, current, NULL))) { |
1450 | - rt_mutex_deadlock_account_unlock(current); |
1451 | + if (likely(rt_mutex_cmpxchg_release(lock, current, NULL))) |
1452 | + return; |
1453 | |
1454 | - } else { |
1455 | - bool deboost = slowfn(lock, &wake_q); |
1456 | + deboost = slowfn(lock, &wake_q); |
1457 | |
1458 | - wake_up_q(&wake_q); |
1459 | + wake_up_q(&wake_q); |
1460 | |
1461 | - /* Undo pi boosting if necessary: */ |
1462 | - if (deboost) |
1463 | - rt_mutex_adjust_prio(current); |
1464 | - } |
1465 | + /* Undo pi boosting if necessary: */ |
1466 | + if (deboost) |
1467 | + rt_mutex_adjust_prio(current); |
1468 | } |
1469 | |
1470 | /** |
1471 | @@ -1519,15 +1518,28 @@ EXPORT_SYMBOL_GPL(rt_mutex_lock_interruptible); |
1472 | |
1473 | /* |
1474 | * Futex variant with full deadlock detection. |
1475 | + * Futex variants must not use the fast-path, see __rt_mutex_futex_unlock(). |
1476 | */ |
1477 | -int rt_mutex_timed_futex_lock(struct rt_mutex *lock, |
1478 | +int __sched rt_mutex_timed_futex_lock(struct rt_mutex *lock, |
1479 | struct hrtimer_sleeper *timeout) |
1480 | { |
1481 | might_sleep(); |
1482 | |
1483 | - return rt_mutex_timed_fastlock(lock, TASK_INTERRUPTIBLE, timeout, |
1484 | - RT_MUTEX_FULL_CHAINWALK, |
1485 | - rt_mutex_slowlock); |
1486 | + return rt_mutex_slowlock(lock, TASK_INTERRUPTIBLE, |
1487 | + timeout, RT_MUTEX_FULL_CHAINWALK); |
1488 | +} |
1489 | + |
1490 | +/* |
1491 | + * Futex variant, must not use fastpath. |
1492 | + */ |
1493 | +int __sched rt_mutex_futex_trylock(struct rt_mutex *lock) |
1494 | +{ |
1495 | + return rt_mutex_slowtrylock(lock); |
1496 | +} |
1497 | + |
1498 | +int __sched __rt_mutex_futex_trylock(struct rt_mutex *lock) |
1499 | +{ |
1500 | + return __rt_mutex_slowtrylock(lock); |
1501 | } |
1502 | |
1503 | /** |
1504 | @@ -1586,20 +1598,38 @@ void __sched rt_mutex_unlock(struct rt_mutex *lock) |
1505 | EXPORT_SYMBOL_GPL(rt_mutex_unlock); |
1506 | |
1507 | /** |
1508 | - * rt_mutex_futex_unlock - Futex variant of rt_mutex_unlock |
1509 | - * @lock: the rt_mutex to be unlocked |
1510 | - * |
1511 | - * Returns: true/false indicating whether priority adjustment is |
1512 | - * required or not. |
1513 | + * Futex variant, that since futex variants do not use the fast-path, can be |
1514 | + * simple and will not need to retry. |
1515 | */ |
1516 | -bool __sched rt_mutex_futex_unlock(struct rt_mutex *lock, |
1517 | - struct wake_q_head *wqh) |
1518 | +bool __sched __rt_mutex_futex_unlock(struct rt_mutex *lock, |
1519 | + struct wake_q_head *wake_q) |
1520 | { |
1521 | - if (likely(rt_mutex_cmpxchg_release(lock, current, NULL))) { |
1522 | - rt_mutex_deadlock_account_unlock(current); |
1523 | - return false; |
1524 | + lockdep_assert_held(&lock->wait_lock); |
1525 | + |
1526 | + debug_rt_mutex_unlock(lock); |
1527 | + |
1528 | + if (!rt_mutex_has_waiters(lock)) { |
1529 | + lock->owner = NULL; |
1530 | + return false; /* done */ |
1531 | + } |
1532 | + |
1533 | + mark_wakeup_next_waiter(wake_q, lock); |
1534 | + return true; /* deboost and wakeups */ |
1535 | +} |
1536 | + |
1537 | +void __sched rt_mutex_futex_unlock(struct rt_mutex *lock) |
1538 | +{ |
1539 | + WAKE_Q(wake_q); |
1540 | + bool deboost; |
1541 | + |
1542 | + raw_spin_lock_irq(&lock->wait_lock); |
1543 | + deboost = __rt_mutex_futex_unlock(lock, &wake_q); |
1544 | + raw_spin_unlock_irq(&lock->wait_lock); |
1545 | + |
1546 | + if (deboost) { |
1547 | + wake_up_q(&wake_q); |
1548 | + rt_mutex_adjust_prio(current); |
1549 | } |
1550 | - return rt_mutex_slowunlock(lock, wqh); |
1551 | } |
1552 | |
1553 | /** |
1554 | @@ -1656,7 +1686,6 @@ void rt_mutex_init_proxy_locked(struct rt_mutex *lock, |
1555 | __rt_mutex_init(lock, NULL); |
1556 | debug_rt_mutex_proxy_lock(lock, proxy_owner); |
1557 | rt_mutex_set_owner(lock, proxy_owner); |
1558 | - rt_mutex_deadlock_account_lock(lock, proxy_owner); |
1559 | } |
1560 | |
1561 | /** |
1562 | @@ -1667,12 +1696,10 @@ void rt_mutex_init_proxy_locked(struct rt_mutex *lock, |
1563 | * No locking. Caller has to do serializing itself |
1564 | * Special API call for PI-futex support |
1565 | */ |
1566 | -void rt_mutex_proxy_unlock(struct rt_mutex *lock, |
1567 | - struct task_struct *proxy_owner) |
1568 | +void rt_mutex_proxy_unlock(struct rt_mutex *lock) |
1569 | { |
1570 | debug_rt_mutex_proxy_unlock(lock); |
1571 | rt_mutex_set_owner(lock, NULL); |
1572 | - rt_mutex_deadlock_account_unlock(proxy_owner); |
1573 | } |
1574 | |
1575 | /** |
1576 | diff --git a/kernel/locking/rtmutex.h b/kernel/locking/rtmutex.h |
1577 | index c4060584c4076..6607802efa8bd 100644 |
1578 | --- a/kernel/locking/rtmutex.h |
1579 | +++ b/kernel/locking/rtmutex.h |
1580 | @@ -11,8 +11,6 @@ |
1581 | */ |
1582 | |
1583 | #define rt_mutex_deadlock_check(l) (0) |
1584 | -#define rt_mutex_deadlock_account_lock(m, t) do { } while (0) |
1585 | -#define rt_mutex_deadlock_account_unlock(l) do { } while (0) |
1586 | #define debug_rt_mutex_init_waiter(w) do { } while (0) |
1587 | #define debug_rt_mutex_free_waiter(w) do { } while (0) |
1588 | #define debug_rt_mutex_lock(l) do { } while (0) |
1589 | diff --git a/kernel/locking/rtmutex_common.h b/kernel/locking/rtmutex_common.h |
1590 | index 14cbafed00142..bea5d677fe343 100644 |
1591 | --- a/kernel/locking/rtmutex_common.h |
1592 | +++ b/kernel/locking/rtmutex_common.h |
1593 | @@ -102,8 +102,7 @@ enum rtmutex_chainwalk { |
1594 | extern struct task_struct *rt_mutex_next_owner(struct rt_mutex *lock); |
1595 | extern void rt_mutex_init_proxy_locked(struct rt_mutex *lock, |
1596 | struct task_struct *proxy_owner); |
1597 | -extern void rt_mutex_proxy_unlock(struct rt_mutex *lock, |
1598 | - struct task_struct *proxy_owner); |
1599 | +extern void rt_mutex_proxy_unlock(struct rt_mutex *lock); |
1600 | extern int rt_mutex_start_proxy_lock(struct rt_mutex *lock, |
1601 | struct rt_mutex_waiter *waiter, |
1602 | struct task_struct *task); |
1603 | @@ -113,8 +112,13 @@ extern int rt_mutex_wait_proxy_lock(struct rt_mutex *lock, |
1604 | extern bool rt_mutex_cleanup_proxy_lock(struct rt_mutex *lock, |
1605 | struct rt_mutex_waiter *waiter); |
1606 | extern int rt_mutex_timed_futex_lock(struct rt_mutex *l, struct hrtimer_sleeper *to); |
1607 | -extern bool rt_mutex_futex_unlock(struct rt_mutex *lock, |
1608 | - struct wake_q_head *wqh); |
1609 | +extern int rt_mutex_futex_trylock(struct rt_mutex *l); |
1610 | +extern int __rt_mutex_futex_trylock(struct rt_mutex *l); |
1611 | + |
1612 | +extern void rt_mutex_futex_unlock(struct rt_mutex *lock); |
1613 | +extern bool __rt_mutex_futex_unlock(struct rt_mutex *lock, |
1614 | + struct wake_q_head *wqh); |
1615 | + |
1616 | extern void rt_mutex_adjust_prio(struct task_struct *task); |
1617 | |
1618 | #ifdef CONFIG_DEBUG_RT_MUTEXES |
1619 | diff --git a/mm/huge_memory.c b/mm/huge_memory.c |
1620 | index 0385c57a2b7af..05ca01ef97f7f 100644 |
1621 | --- a/mm/huge_memory.c |
1622 | +++ b/mm/huge_memory.c |
1623 | @@ -1753,7 +1753,7 @@ void __split_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd, |
1624 | spinlock_t *ptl; |
1625 | struct mm_struct *mm = vma->vm_mm; |
1626 | unsigned long haddr = address & HPAGE_PMD_MASK; |
1627 | - bool was_locked = false; |
1628 | + bool do_unlock_page = false; |
1629 | pmd_t _pmd; |
1630 | |
1631 | mmu_notifier_invalidate_range_start(mm, haddr, haddr + HPAGE_PMD_SIZE); |
1632 | @@ -1766,7 +1766,6 @@ void __split_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd, |
1633 | VM_BUG_ON(freeze && !page); |
1634 | if (page) { |
1635 | VM_WARN_ON_ONCE(!PageLocked(page)); |
1636 | - was_locked = true; |
1637 | if (page != pmd_page(*pmd)) |
1638 | goto out; |
1639 | } |
1640 | @@ -1775,19 +1774,29 @@ repeat: |
1641 | if (pmd_trans_huge(*pmd)) { |
1642 | if (!page) { |
1643 | page = pmd_page(*pmd); |
1644 | - if (unlikely(!trylock_page(page))) { |
1645 | - get_page(page); |
1646 | - _pmd = *pmd; |
1647 | - spin_unlock(ptl); |
1648 | - lock_page(page); |
1649 | - spin_lock(ptl); |
1650 | - if (unlikely(!pmd_same(*pmd, _pmd))) { |
1651 | - unlock_page(page); |
1652 | + /* |
1653 | + * An anonymous page must be locked, to ensure that a |
1654 | + * concurrent reuse_swap_page() sees stable mapcount; |
1655 | + * but reuse_swap_page() is not used on shmem or file, |
1656 | + * and page lock must not be taken when zap_pmd_range() |
1657 | + * calls __split_huge_pmd() while i_mmap_lock is held. |
1658 | + */ |
1659 | + if (PageAnon(page)) { |
1660 | + if (unlikely(!trylock_page(page))) { |
1661 | + get_page(page); |
1662 | + _pmd = *pmd; |
1663 | + spin_unlock(ptl); |
1664 | + lock_page(page); |
1665 | + spin_lock(ptl); |
1666 | + if (unlikely(!pmd_same(*pmd, _pmd))) { |
1667 | + unlock_page(page); |
1668 | + put_page(page); |
1669 | + page = NULL; |
1670 | + goto repeat; |
1671 | + } |
1672 | put_page(page); |
1673 | - page = NULL; |
1674 | - goto repeat; |
1675 | } |
1676 | - put_page(page); |
1677 | + do_unlock_page = true; |
1678 | } |
1679 | } |
1680 | if (PageMlocked(page)) |
1681 | @@ -1797,7 +1806,7 @@ repeat: |
1682 | __split_huge_pmd_locked(vma, pmd, haddr, freeze); |
1683 | out: |
1684 | spin_unlock(ptl); |
1685 | - if (!was_locked && page) |
1686 | + if (do_unlock_page) |
1687 | unlock_page(page); |
1688 | mmu_notifier_invalidate_range_end(mm, haddr, haddr + HPAGE_PMD_SIZE); |
1689 | } |
1690 | diff --git a/mm/hugetlb.c b/mm/hugetlb.c |
1691 | index 52b5e0e026d60..5a16d892c891c 100644 |
1692 | --- a/mm/hugetlb.c |
1693 | +++ b/mm/hugetlb.c |
1694 | @@ -1210,12 +1210,11 @@ struct hstate *size_to_hstate(unsigned long size) |
1695 | */ |
1696 | bool page_huge_active(struct page *page) |
1697 | { |
1698 | - VM_BUG_ON_PAGE(!PageHuge(page), page); |
1699 | - return PageHead(page) && PagePrivate(&page[1]); |
1700 | + return PageHeadHuge(page) && PagePrivate(&page[1]); |
1701 | } |
1702 | |
1703 | /* never called for tail page */ |
1704 | -static void set_page_huge_active(struct page *page) |
1705 | +void set_page_huge_active(struct page *page) |
1706 | { |
1707 | VM_BUG_ON_PAGE(!PageHeadHuge(page), page); |
1708 | SetPagePrivate(&page[1]); |
1709 | @@ -4657,9 +4656,9 @@ bool isolate_huge_page(struct page *page, struct list_head *list) |
1710 | { |
1711 | bool ret = true; |
1712 | |
1713 | - VM_BUG_ON_PAGE(!PageHead(page), page); |
1714 | spin_lock(&hugetlb_lock); |
1715 | - if (!page_huge_active(page) || !get_page_unless_zero(page)) { |
1716 | + if (!PageHeadHuge(page) || !page_huge_active(page) || |
1717 | + !get_page_unless_zero(page)) { |
1718 | ret = false; |
1719 | goto unlock; |
1720 | } |
1721 | diff --git a/net/lapb/lapb_out.c b/net/lapb/lapb_out.c |
1722 | index 482c94d9d958a..d1c7dcc234486 100644 |
1723 | --- a/net/lapb/lapb_out.c |
1724 | +++ b/net/lapb/lapb_out.c |
1725 | @@ -87,7 +87,8 @@ void lapb_kick(struct lapb_cb *lapb) |
1726 | skb = skb_dequeue(&lapb->write_queue); |
1727 | |
1728 | do { |
1729 | - if ((skbn = skb_clone(skb, GFP_ATOMIC)) == NULL) { |
1730 | + skbn = skb_copy(skb, GFP_ATOMIC); |
1731 | + if (!skbn) { |
1732 | skb_queue_head(&lapb->write_queue, skb); |
1733 | break; |
1734 | } |
1735 | diff --git a/net/mac80211/driver-ops.c b/net/mac80211/driver-ops.c |
1736 | index f783d1377d9a8..9f0f437a09b95 100644 |
1737 | --- a/net/mac80211/driver-ops.c |
1738 | +++ b/net/mac80211/driver-ops.c |
1739 | @@ -128,8 +128,11 @@ int drv_sta_state(struct ieee80211_local *local, |
1740 | } else if (old_state == IEEE80211_STA_AUTH && |
1741 | new_state == IEEE80211_STA_ASSOC) { |
1742 | ret = drv_sta_add(local, sdata, &sta->sta); |
1743 | - if (ret == 0) |
1744 | + if (ret == 0) { |
1745 | sta->uploaded = true; |
1746 | + if (rcu_access_pointer(sta->sta.rates)) |
1747 | + drv_sta_rate_tbl_update(local, sdata, &sta->sta); |
1748 | + } |
1749 | } else if (old_state == IEEE80211_STA_ASSOC && |
1750 | new_state == IEEE80211_STA_AUTH) { |
1751 | drv_sta_remove(local, sdata, &sta->sta); |
1752 | diff --git a/net/mac80211/rate.c b/net/mac80211/rate.c |
1753 | index e6096dfd02105..41421609a0f02 100644 |
1754 | --- a/net/mac80211/rate.c |
1755 | +++ b/net/mac80211/rate.c |
1756 | @@ -892,7 +892,8 @@ int rate_control_set_rates(struct ieee80211_hw *hw, |
1757 | if (old) |
1758 | kfree_rcu(old, rcu_head); |
1759 | |
1760 | - drv_sta_rate_tbl_update(hw_to_local(hw), sta->sdata, pubsta); |
1761 | + if (sta->uploaded) |
1762 | + drv_sta_rate_tbl_update(hw_to_local(hw), sta->sdata, pubsta); |
1763 | |
1764 | return 0; |
1765 | } |
1766 | diff --git a/net/mac80211/rx.c b/net/mac80211/rx.c |
1767 | index 9be82ed02e0e5..c38d68131d02e 100644 |
1768 | --- a/net/mac80211/rx.c |
1769 | +++ b/net/mac80211/rx.c |
1770 | @@ -3802,6 +3802,8 @@ void ieee80211_check_fast_rx(struct sta_info *sta) |
1771 | |
1772 | rcu_read_lock(); |
1773 | key = rcu_dereference(sta->ptk[sta->ptk_idx]); |
1774 | + if (!key) |
1775 | + key = rcu_dereference(sdata->default_unicast_key); |
1776 | if (key) { |
1777 | switch (key->conf.cipher) { |
1778 | case WLAN_CIPHER_SUITE_TKIP: |
1779 | diff --git a/net/sched/sch_api.c b/net/sched/sch_api.c |
1780 | index 912ed9b901ac9..45038c837eab4 100644 |
1781 | --- a/net/sched/sch_api.c |
1782 | +++ b/net/sched/sch_api.c |
1783 | @@ -393,7 +393,8 @@ struct qdisc_rate_table *qdisc_get_rtab(struct tc_ratespec *r, |
1784 | { |
1785 | struct qdisc_rate_table *rtab; |
1786 | |
1787 | - if (tab == NULL || r->rate == 0 || r->cell_log == 0 || |
1788 | + if (tab == NULL || r->rate == 0 || |
1789 | + r->cell_log == 0 || r->cell_log >= 32 || |
1790 | nla_len(tab) != TC_RTAB_SIZE) |
1791 | return NULL; |
1792 | |
1793 | diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c |
1794 | index 720de648510dc..b003cb07254a8 100644 |
1795 | --- a/sound/pci/hda/patch_realtek.c |
1796 | +++ b/sound/pci/hda/patch_realtek.c |
1797 | @@ -6284,7 +6284,7 @@ static const struct snd_hda_pin_quirk alc269_pin_fixup_tbl[] = { |
1798 | SND_HDA_PIN_QUIRK(0x10ec0299, 0x1028, "Dell", ALC269_FIXUP_DELL4_MIC_NO_PRESENCE, |
1799 | ALC225_STANDARD_PINS, |
1800 | {0x12, 0xb7a60130}, |
1801 | - {0x13, 0xb8a60140}, |
1802 | + {0x13, 0xb8a61140}, |
1803 | {0x17, 0x90170110}), |
1804 | {} |
1805 | }; |
1806 | diff --git a/tools/objtool/elf.c b/tools/objtool/elf.c |
1807 | index d84c28eac262d..0ba5bb51bd93c 100644 |
1808 | --- a/tools/objtool/elf.c |
1809 | +++ b/tools/objtool/elf.c |
1810 | @@ -226,8 +226,11 @@ static int read_symbols(struct elf *elf) |
1811 | |
1812 | symtab = find_section_by_name(elf, ".symtab"); |
1813 | if (!symtab) { |
1814 | - WARN("missing symbol table"); |
1815 | - return -1; |
1816 | + /* |
1817 | + * A missing symbol table is actually possible if it's an empty |
1818 | + * .o file. This can happen for thunk_64.o. |
1819 | + */ |
1820 | + return 0; |
1821 | } |
1822 | |
1823 | symbols_nr = symtab->sh.sh_size / symtab->sh.sh_entsize; |