Contents of /trunk/kernel-alx/patches-4.9/0256-4.9.157-all-fixes.patch
Parent Directory | Revision Log
Revision 3309 -
(show annotations)
(download)
Tue Mar 12 10:43:14 2019 UTC (5 years, 6 months ago) by niro
File size: 27802 byte(s)
Tue Mar 12 10:43:14 2019 UTC (5 years, 6 months ago) by niro
File size: 27802 byte(s)
-linux-4.9.157
1 | diff --git a/Makefile b/Makefile |
2 | index 956923115f7e..4eb7a17e18f1 100644 |
3 | --- a/Makefile |
4 | +++ b/Makefile |
5 | @@ -1,6 +1,6 @@ |
6 | VERSION = 4 |
7 | PATCHLEVEL = 9 |
8 | -SUBLEVEL = 156 |
9 | +SUBLEVEL = 157 |
10 | EXTRAVERSION = |
11 | NAME = Roaring Lionus |
12 | |
13 | diff --git a/arch/arm/mach-iop32x/n2100.c b/arch/arm/mach-iop32x/n2100.c |
14 | index c1cd80ecc219..a904244264ce 100644 |
15 | --- a/arch/arm/mach-iop32x/n2100.c |
16 | +++ b/arch/arm/mach-iop32x/n2100.c |
17 | @@ -75,8 +75,7 @@ void __init n2100_map_io(void) |
18 | /* |
19 | * N2100 PCI. |
20 | */ |
21 | -static int __init |
22 | -n2100_pci_map_irq(const struct pci_dev *dev, u8 slot, u8 pin) |
23 | +static int n2100_pci_map_irq(const struct pci_dev *dev, u8 slot, u8 pin) |
24 | { |
25 | int irq; |
26 | |
27 | diff --git a/arch/arm/mach-tango/pm.c b/arch/arm/mach-tango/pm.c |
28 | index b05c6d6f99d0..08d813234b2d 100644 |
29 | --- a/arch/arm/mach-tango/pm.c |
30 | +++ b/arch/arm/mach-tango/pm.c |
31 | @@ -2,6 +2,7 @@ |
32 | #include <linux/suspend.h> |
33 | #include <asm/suspend.h> |
34 | #include "smc.h" |
35 | +#include "pm.h" |
36 | |
37 | static int tango_pm_powerdown(unsigned long arg) |
38 | { |
39 | @@ -23,10 +24,7 @@ static const struct platform_suspend_ops tango_pm_ops = { |
40 | .valid = suspend_valid_only_mem, |
41 | }; |
42 | |
43 | -static int __init tango_pm_init(void) |
44 | +void __init tango_pm_init(void) |
45 | { |
46 | suspend_set_ops(&tango_pm_ops); |
47 | - return 0; |
48 | } |
49 | - |
50 | -late_initcall(tango_pm_init); |
51 | diff --git a/arch/arm/mach-tango/pm.h b/arch/arm/mach-tango/pm.h |
52 | new file mode 100644 |
53 | index 000000000000..35ea705a0ee2 |
54 | --- /dev/null |
55 | +++ b/arch/arm/mach-tango/pm.h |
56 | @@ -0,0 +1,7 @@ |
57 | +/* SPDX-License-Identifier: GPL-2.0 */ |
58 | + |
59 | +#ifdef CONFIG_SUSPEND |
60 | +void __init tango_pm_init(void); |
61 | +#else |
62 | +#define tango_pm_init NULL |
63 | +#endif |
64 | diff --git a/arch/arm/mach-tango/setup.c b/arch/arm/mach-tango/setup.c |
65 | index f14b6c7d255b..2b48e1098ea3 100644 |
66 | --- a/arch/arm/mach-tango/setup.c |
67 | +++ b/arch/arm/mach-tango/setup.c |
68 | @@ -1,6 +1,7 @@ |
69 | #include <asm/mach/arch.h> |
70 | #include <asm/hardware/cache-l2x0.h> |
71 | #include "smc.h" |
72 | +#include "pm.h" |
73 | |
74 | static void tango_l2c_write(unsigned long val, unsigned int reg) |
75 | { |
76 | @@ -14,4 +15,5 @@ DT_MACHINE_START(TANGO_DT, "Sigma Tango DT") |
77 | .dt_compat = tango_dt_compat, |
78 | .l2c_aux_mask = ~0, |
79 | .l2c_write_sec = tango_l2c_write, |
80 | + .init_late = tango_pm_init, |
81 | MACHINE_END |
82 | diff --git a/arch/mips/kernel/mips-cm.c b/arch/mips/kernel/mips-cm.c |
83 | index 659e6d3ae335..60177a612cb1 100644 |
84 | --- a/arch/mips/kernel/mips-cm.c |
85 | +++ b/arch/mips/kernel/mips-cm.c |
86 | @@ -424,5 +424,5 @@ void mips_cm_error_report(void) |
87 | } |
88 | |
89 | /* reprime cause register */ |
90 | - write_gcr_error_cause(0); |
91 | + write_gcr_error_cause(cm_error); |
92 | } |
93 | diff --git a/arch/mips/pci/pci-octeon.c b/arch/mips/pci/pci-octeon.c |
94 | index 308d051fc45c..7c512834a8f1 100644 |
95 | --- a/arch/mips/pci/pci-octeon.c |
96 | +++ b/arch/mips/pci/pci-octeon.c |
97 | @@ -573,6 +573,11 @@ static int __init octeon_pci_setup(void) |
98 | if (octeon_has_feature(OCTEON_FEATURE_PCIE)) |
99 | return 0; |
100 | |
101 | + if (!octeon_is_pci_host()) { |
102 | + pr_notice("Not in host mode, PCI Controller not initialized\n"); |
103 | + return 0; |
104 | + } |
105 | + |
106 | /* Point pcibios_map_irq() to the PCI version of it */ |
107 | octeon_pcibios_map_irq = octeon_pci_pcibios_map_irq; |
108 | |
109 | @@ -584,11 +589,6 @@ static int __init octeon_pci_setup(void) |
110 | else |
111 | octeon_dma_bar_type = OCTEON_DMA_BAR_TYPE_BIG; |
112 | |
113 | - if (!octeon_is_pci_host()) { |
114 | - pr_notice("Not in host mode, PCI Controller not initialized\n"); |
115 | - return 0; |
116 | - } |
117 | - |
118 | /* PCI I/O and PCI MEM values */ |
119 | set_io_port_base(OCTEON_PCI_IOSPACE_BASE); |
120 | ioport_resource.start = 0; |
121 | diff --git a/arch/mips/vdso/Makefile b/arch/mips/vdso/Makefile |
122 | index c3dc12a8b7d9..0b845cc7fbdc 100644 |
123 | --- a/arch/mips/vdso/Makefile |
124 | +++ b/arch/mips/vdso/Makefile |
125 | @@ -116,7 +116,7 @@ $(obj)/%-o32.o: $(src)/%.c FORCE |
126 | $(call cmd,force_checksrc) |
127 | $(call if_changed_rule,cc_o_c) |
128 | |
129 | -$(obj)/vdso-o32.lds: KBUILD_CPPFLAGS := -mabi=32 |
130 | +$(obj)/vdso-o32.lds: KBUILD_CPPFLAGS := $(ccflags-vdso) -mabi=32 |
131 | $(obj)/vdso-o32.lds: $(src)/vdso.lds.S FORCE |
132 | $(call if_changed_dep,cpp_lds_S) |
133 | |
134 | @@ -156,7 +156,7 @@ $(obj)/%-n32.o: $(src)/%.c FORCE |
135 | $(call cmd,force_checksrc) |
136 | $(call if_changed_rule,cc_o_c) |
137 | |
138 | -$(obj)/vdso-n32.lds: KBUILD_CPPFLAGS := -mabi=n32 |
139 | +$(obj)/vdso-n32.lds: KBUILD_CPPFLAGS := $(ccflags-vdso) -mabi=n32 |
140 | $(obj)/vdso-n32.lds: $(src)/vdso.lds.S FORCE |
141 | $(call if_changed_dep,cpp_lds_S) |
142 | |
143 | diff --git a/drivers/gpu/drm/drm_modes.c b/drivers/gpu/drm/drm_modes.c |
144 | index e14366de0e6e..97387cfbbeb5 100644 |
145 | --- a/drivers/gpu/drm/drm_modes.c |
146 | +++ b/drivers/gpu/drm/drm_modes.c |
147 | @@ -753,7 +753,7 @@ int drm_mode_hsync(const struct drm_display_mode *mode) |
148 | if (mode->hsync) |
149 | return mode->hsync; |
150 | |
151 | - if (mode->htotal < 0) |
152 | + if (mode->htotal <= 0) |
153 | return 0; |
154 | |
155 | calc_val = (mode->clock * 1000) / mode->htotal; /* hsync in Hz */ |
156 | diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c |
157 | index 29abd28c19b3..4b556e698f13 100644 |
158 | --- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c |
159 | +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c |
160 | @@ -605,13 +605,16 @@ out_fixup: |
161 | static int vmw_dma_masks(struct vmw_private *dev_priv) |
162 | { |
163 | struct drm_device *dev = dev_priv->dev; |
164 | + int ret = 0; |
165 | |
166 | - if (intel_iommu_enabled && |
167 | + ret = dma_set_mask_and_coherent(dev->dev, DMA_BIT_MASK(64)); |
168 | + if (dev_priv->map_mode != vmw_dma_phys && |
169 | (sizeof(unsigned long) == 4 || vmw_restrict_dma_mask)) { |
170 | DRM_INFO("Restricting DMA addresses to 44 bits.\n"); |
171 | - return dma_set_mask(dev->dev, DMA_BIT_MASK(44)); |
172 | + return dma_set_mask_and_coherent(dev->dev, DMA_BIT_MASK(44)); |
173 | } |
174 | - return 0; |
175 | + |
176 | + return ret; |
177 | } |
178 | #else |
179 | static int vmw_dma_masks(struct vmw_private *dev_priv) |
180 | diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c |
181 | index 81f5a552e32f..9fe8eda7c859 100644 |
182 | --- a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c |
183 | +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c |
184 | @@ -3769,7 +3769,7 @@ int vmw_execbuf_fence_commands(struct drm_file *file_priv, |
185 | *p_fence = NULL; |
186 | } |
187 | |
188 | - return 0; |
189 | + return ret; |
190 | } |
191 | |
192 | /** |
193 | diff --git a/drivers/hid/hid-debug.c b/drivers/hid/hid-debug.c |
194 | index 29423691c105..d7179dd3c9ef 100644 |
195 | --- a/drivers/hid/hid-debug.c |
196 | +++ b/drivers/hid/hid-debug.c |
197 | @@ -30,6 +30,7 @@ |
198 | |
199 | #include <linux/debugfs.h> |
200 | #include <linux/seq_file.h> |
201 | +#include <linux/kfifo.h> |
202 | #include <linux/sched.h> |
203 | #include <linux/export.h> |
204 | #include <linux/slab.h> |
205 | @@ -455,7 +456,7 @@ static char *resolv_usage_page(unsigned page, struct seq_file *f) { |
206 | char *buf = NULL; |
207 | |
208 | if (!f) { |
209 | - buf = kzalloc(sizeof(char) * HID_DEBUG_BUFSIZE, GFP_ATOMIC); |
210 | + buf = kzalloc(HID_DEBUG_BUFSIZE, GFP_ATOMIC); |
211 | if (!buf) |
212 | return ERR_PTR(-ENOMEM); |
213 | } |
214 | @@ -659,17 +660,12 @@ EXPORT_SYMBOL_GPL(hid_dump_device); |
215 | /* enqueue string to 'events' ring buffer */ |
216 | void hid_debug_event(struct hid_device *hdev, char *buf) |
217 | { |
218 | - unsigned i; |
219 | struct hid_debug_list *list; |
220 | unsigned long flags; |
221 | |
222 | spin_lock_irqsave(&hdev->debug_list_lock, flags); |
223 | - list_for_each_entry(list, &hdev->debug_list, node) { |
224 | - for (i = 0; buf[i]; i++) |
225 | - list->hid_debug_buf[(list->tail + i) % HID_DEBUG_BUFSIZE] = |
226 | - buf[i]; |
227 | - list->tail = (list->tail + i) % HID_DEBUG_BUFSIZE; |
228 | - } |
229 | + list_for_each_entry(list, &hdev->debug_list, node) |
230 | + kfifo_in(&list->hid_debug_fifo, buf, strlen(buf)); |
231 | spin_unlock_irqrestore(&hdev->debug_list_lock, flags); |
232 | |
233 | wake_up_interruptible(&hdev->debug_wait); |
234 | @@ -720,8 +716,7 @@ void hid_dump_input(struct hid_device *hdev, struct hid_usage *usage, __s32 valu |
235 | hid_debug_event(hdev, buf); |
236 | |
237 | kfree(buf); |
238 | - wake_up_interruptible(&hdev->debug_wait); |
239 | - |
240 | + wake_up_interruptible(&hdev->debug_wait); |
241 | } |
242 | EXPORT_SYMBOL_GPL(hid_dump_input); |
243 | |
244 | @@ -1086,8 +1081,8 @@ static int hid_debug_events_open(struct inode *inode, struct file *file) |
245 | goto out; |
246 | } |
247 | |
248 | - if (!(list->hid_debug_buf = kzalloc(sizeof(char) * HID_DEBUG_BUFSIZE, GFP_KERNEL))) { |
249 | - err = -ENOMEM; |
250 | + err = kfifo_alloc(&list->hid_debug_fifo, HID_DEBUG_FIFOSIZE, GFP_KERNEL); |
251 | + if (err) { |
252 | kfree(list); |
253 | goto out; |
254 | } |
255 | @@ -1107,77 +1102,57 @@ static ssize_t hid_debug_events_read(struct file *file, char __user *buffer, |
256 | size_t count, loff_t *ppos) |
257 | { |
258 | struct hid_debug_list *list = file->private_data; |
259 | - int ret = 0, len; |
260 | + int ret = 0, copied; |
261 | DECLARE_WAITQUEUE(wait, current); |
262 | |
263 | mutex_lock(&list->read_mutex); |
264 | - while (ret == 0) { |
265 | - if (list->head == list->tail) { |
266 | - add_wait_queue(&list->hdev->debug_wait, &wait); |
267 | - set_current_state(TASK_INTERRUPTIBLE); |
268 | - |
269 | - while (list->head == list->tail) { |
270 | - if (file->f_flags & O_NONBLOCK) { |
271 | - ret = -EAGAIN; |
272 | - break; |
273 | - } |
274 | - if (signal_pending(current)) { |
275 | - ret = -ERESTARTSYS; |
276 | - break; |
277 | - } |
278 | + if (kfifo_is_empty(&list->hid_debug_fifo)) { |
279 | + add_wait_queue(&list->hdev->debug_wait, &wait); |
280 | + set_current_state(TASK_INTERRUPTIBLE); |
281 | + |
282 | + while (kfifo_is_empty(&list->hid_debug_fifo)) { |
283 | + if (file->f_flags & O_NONBLOCK) { |
284 | + ret = -EAGAIN; |
285 | + break; |
286 | + } |
287 | |
288 | - if (!list->hdev || !list->hdev->debug) { |
289 | - ret = -EIO; |
290 | - set_current_state(TASK_RUNNING); |
291 | - goto out; |
292 | - } |
293 | + if (signal_pending(current)) { |
294 | + ret = -ERESTARTSYS; |
295 | + break; |
296 | + } |
297 | |
298 | - /* allow O_NONBLOCK from other threads */ |
299 | - mutex_unlock(&list->read_mutex); |
300 | - schedule(); |
301 | - mutex_lock(&list->read_mutex); |
302 | - set_current_state(TASK_INTERRUPTIBLE); |
303 | + /* if list->hdev is NULL we cannot remove_wait_queue(). |
304 | + * if list->hdev->debug is 0 then hid_debug_unregister() |
305 | + * was already called and list->hdev is being destroyed. |
306 | + * if we add remove_wait_queue() here we can hit a race. |
307 | + */ |
308 | + if (!list->hdev || !list->hdev->debug) { |
309 | + ret = -EIO; |
310 | + set_current_state(TASK_RUNNING); |
311 | + goto out; |
312 | } |
313 | |
314 | - set_current_state(TASK_RUNNING); |
315 | - remove_wait_queue(&list->hdev->debug_wait, &wait); |
316 | + /* allow O_NONBLOCK from other threads */ |
317 | + mutex_unlock(&list->read_mutex); |
318 | + schedule(); |
319 | + mutex_lock(&list->read_mutex); |
320 | + set_current_state(TASK_INTERRUPTIBLE); |
321 | } |
322 | |
323 | - if (ret) |
324 | - goto out; |
325 | + __set_current_state(TASK_RUNNING); |
326 | + remove_wait_queue(&list->hdev->debug_wait, &wait); |
327 | |
328 | - /* pass the ringbuffer contents to userspace */ |
329 | -copy_rest: |
330 | - if (list->tail == list->head) |
331 | + if (ret) |
332 | goto out; |
333 | - if (list->tail > list->head) { |
334 | - len = list->tail - list->head; |
335 | - if (len > count) |
336 | - len = count; |
337 | - |
338 | - if (copy_to_user(buffer + ret, &list->hid_debug_buf[list->head], len)) { |
339 | - ret = -EFAULT; |
340 | - goto out; |
341 | - } |
342 | - ret += len; |
343 | - list->head += len; |
344 | - } else { |
345 | - len = HID_DEBUG_BUFSIZE - list->head; |
346 | - if (len > count) |
347 | - len = count; |
348 | - |
349 | - if (copy_to_user(buffer, &list->hid_debug_buf[list->head], len)) { |
350 | - ret = -EFAULT; |
351 | - goto out; |
352 | - } |
353 | - list->head = 0; |
354 | - ret += len; |
355 | - count -= len; |
356 | - if (count > 0) |
357 | - goto copy_rest; |
358 | - } |
359 | - |
360 | } |
361 | + |
362 | + /* pass the fifo content to userspace, locking is not needed with only |
363 | + * one concurrent reader and one concurrent writer |
364 | + */ |
365 | + ret = kfifo_to_user(&list->hid_debug_fifo, buffer, count, &copied); |
366 | + if (ret) |
367 | + goto out; |
368 | + ret = copied; |
369 | out: |
370 | mutex_unlock(&list->read_mutex); |
371 | return ret; |
372 | @@ -1188,7 +1163,7 @@ static unsigned int hid_debug_events_poll(struct file *file, poll_table *wait) |
373 | struct hid_debug_list *list = file->private_data; |
374 | |
375 | poll_wait(file, &list->hdev->debug_wait, wait); |
376 | - if (list->head != list->tail) |
377 | + if (!kfifo_is_empty(&list->hid_debug_fifo)) |
378 | return POLLIN | POLLRDNORM; |
379 | if (!list->hdev->debug) |
380 | return POLLERR | POLLHUP; |
381 | @@ -1203,7 +1178,7 @@ static int hid_debug_events_release(struct inode *inode, struct file *file) |
382 | spin_lock_irqsave(&list->hdev->debug_list_lock, flags); |
383 | list_del(&list->node); |
384 | spin_unlock_irqrestore(&list->hdev->debug_list_lock, flags); |
385 | - kfree(list->hid_debug_buf); |
386 | + kfifo_free(&list->hid_debug_fifo); |
387 | kfree(list); |
388 | |
389 | return 0; |
390 | @@ -1254,4 +1229,3 @@ void hid_debug_exit(void) |
391 | { |
392 | debugfs_remove_recursive(hid_debug_root); |
393 | } |
394 | - |
395 | diff --git a/drivers/iio/chemical/atlas-ph-sensor.c b/drivers/iio/chemical/atlas-ph-sensor.c |
396 | index ef761a508630..dad2a8be6830 100644 |
397 | --- a/drivers/iio/chemical/atlas-ph-sensor.c |
398 | +++ b/drivers/iio/chemical/atlas-ph-sensor.c |
399 | @@ -453,9 +453,8 @@ static int atlas_read_raw(struct iio_dev *indio_dev, |
400 | case IIO_CHAN_INFO_SCALE: |
401 | switch (chan->type) { |
402 | case IIO_TEMP: |
403 | - *val = 1; /* 0.01 */ |
404 | - *val2 = 100; |
405 | - break; |
406 | + *val = 10; |
407 | + return IIO_VAL_INT; |
408 | case IIO_PH: |
409 | *val = 1; /* 0.001 */ |
410 | *val2 = 1000; |
411 | @@ -486,7 +485,7 @@ static int atlas_write_raw(struct iio_dev *indio_dev, |
412 | int val, int val2, long mask) |
413 | { |
414 | struct atlas_data *data = iio_priv(indio_dev); |
415 | - __be32 reg = cpu_to_be32(val); |
416 | + __be32 reg = cpu_to_be32(val / 10); |
417 | |
418 | if (val2 != 0 || val < 0 || val > 20000) |
419 | return -EINVAL; |
420 | diff --git a/drivers/misc/vexpress-syscfg.c b/drivers/misc/vexpress-syscfg.c |
421 | index c344483fa7d6..9f257c53e6d4 100644 |
422 | --- a/drivers/misc/vexpress-syscfg.c |
423 | +++ b/drivers/misc/vexpress-syscfg.c |
424 | @@ -61,7 +61,7 @@ static int vexpress_syscfg_exec(struct vexpress_syscfg_func *func, |
425 | int tries; |
426 | long timeout; |
427 | |
428 | - if (WARN_ON(index > func->num_templates)) |
429 | + if (WARN_ON(index >= func->num_templates)) |
430 | return -EINVAL; |
431 | |
432 | command = readl(syscfg->base + SYS_CFGCTRL); |
433 | diff --git a/drivers/mtd/nand/gpmi-nand/gpmi-lib.c b/drivers/mtd/nand/gpmi-nand/gpmi-lib.c |
434 | index 141bd70a49c2..b9509230ce4d 100644 |
435 | --- a/drivers/mtd/nand/gpmi-nand/gpmi-lib.c |
436 | +++ b/drivers/mtd/nand/gpmi-nand/gpmi-lib.c |
437 | @@ -168,9 +168,10 @@ int gpmi_init(struct gpmi_nand_data *this) |
438 | |
439 | /* |
440 | * Reset BCH here, too. We got failures otherwise :( |
441 | - * See later BCH reset for explanation of MX23 handling |
442 | + * See later BCH reset for explanation of MX23 and MX28 handling |
443 | */ |
444 | - ret = gpmi_reset_block(r->bch_regs, GPMI_IS_MX23(this)); |
445 | + ret = gpmi_reset_block(r->bch_regs, |
446 | + GPMI_IS_MX23(this) || GPMI_IS_MX28(this)); |
447 | if (ret) |
448 | goto err_out; |
449 | |
450 | @@ -275,13 +276,11 @@ int bch_set_geometry(struct gpmi_nand_data *this) |
451 | |
452 | /* |
453 | * Due to erratum #2847 of the MX23, the BCH cannot be soft reset on this |
454 | - * chip, otherwise it will lock up. So we skip resetting BCH on the MX23. |
455 | - * On the other hand, the MX28 needs the reset, because one case has been |
456 | - * seen where the BCH produced ECC errors constantly after 10000 |
457 | - * consecutive reboots. The latter case has not been seen on the MX23 |
458 | - * yet, still we don't know if it could happen there as well. |
459 | + * chip, otherwise it will lock up. So we skip resetting BCH on the MX23 |
460 | + * and MX28. |
461 | */ |
462 | - ret = gpmi_reset_block(r->bch_regs, GPMI_IS_MX23(this)); |
463 | + ret = gpmi_reset_block(r->bch_regs, |
464 | + GPMI_IS_MX23(this) || GPMI_IS_MX28(this)); |
465 | if (ret) |
466 | goto err_out; |
467 | |
468 | diff --git a/fs/cifs/Kconfig b/fs/cifs/Kconfig |
469 | index 8bef27b8f85d..e7b478b49985 100644 |
470 | --- a/fs/cifs/Kconfig |
471 | +++ b/fs/cifs/Kconfig |
472 | @@ -111,7 +111,7 @@ config CIFS_XATTR |
473 | |
474 | config CIFS_POSIX |
475 | bool "CIFS POSIX Extensions" |
476 | - depends on CIFS && CIFS_ALLOW_INSECURE_LEGACY && CIFS_XATTR |
477 | + depends on CIFS_XATTR |
478 | help |
479 | Enabling this option will cause the cifs client to attempt to |
480 | negotiate a newer dialect with servers, such as Samba 3.0.5 |
481 | diff --git a/fs/debugfs/inode.c b/fs/debugfs/inode.c |
482 | index 3d7de9f4f545..77e9cd7a0137 100644 |
483 | --- a/fs/debugfs/inode.c |
484 | +++ b/fs/debugfs/inode.c |
485 | @@ -732,6 +732,13 @@ struct dentry *debugfs_rename(struct dentry *old_dir, struct dentry *old_dentry, |
486 | struct dentry *dentry = NULL, *trap; |
487 | struct name_snapshot old_name; |
488 | |
489 | + if (IS_ERR(old_dir)) |
490 | + return old_dir; |
491 | + if (IS_ERR(new_dir)) |
492 | + return new_dir; |
493 | + if (IS_ERR_OR_NULL(old_dentry)) |
494 | + return old_dentry; |
495 | + |
496 | trap = lock_rename(new_dir, old_dir); |
497 | /* Source or destination directories don't exist? */ |
498 | if (d_really_is_negative(old_dir) || d_really_is_negative(new_dir)) |
499 | diff --git a/fs/nfsd/nfs4state.c b/fs/nfsd/nfs4state.c |
500 | index 12d780718b48..3656f87d11e3 100644 |
501 | --- a/fs/nfsd/nfs4state.c |
502 | +++ b/fs/nfsd/nfs4state.c |
503 | @@ -1472,8 +1472,10 @@ free_session_slots(struct nfsd4_session *ses) |
504 | { |
505 | int i; |
506 | |
507 | - for (i = 0; i < ses->se_fchannel.maxreqs; i++) |
508 | + for (i = 0; i < ses->se_fchannel.maxreqs; i++) { |
509 | + free_svc_cred(&ses->se_slots[i]->sl_cred); |
510 | kfree(ses->se_slots[i]); |
511 | + } |
512 | } |
513 | |
514 | /* |
515 | @@ -2344,14 +2346,18 @@ nfsd4_store_cache_entry(struct nfsd4_compoundres *resp) |
516 | |
517 | dprintk("--> %s slot %p\n", __func__, slot); |
518 | |
519 | + slot->sl_flags |= NFSD4_SLOT_INITIALIZED; |
520 | slot->sl_opcnt = resp->opcnt; |
521 | slot->sl_status = resp->cstate.status; |
522 | + free_svc_cred(&slot->sl_cred); |
523 | + copy_cred(&slot->sl_cred, &resp->rqstp->rq_cred); |
524 | |
525 | - slot->sl_flags |= NFSD4_SLOT_INITIALIZED; |
526 | - if (nfsd4_not_cached(resp)) { |
527 | - slot->sl_datalen = 0; |
528 | + if (!nfsd4_cache_this(resp)) { |
529 | + slot->sl_flags &= ~NFSD4_SLOT_CACHED; |
530 | return; |
531 | } |
532 | + slot->sl_flags |= NFSD4_SLOT_CACHED; |
533 | + |
534 | base = resp->cstate.data_offset; |
535 | slot->sl_datalen = buf->len - base; |
536 | if (read_bytes_from_xdr_buf(buf, base, slot->sl_data, slot->sl_datalen)) |
537 | @@ -2378,8 +2384,16 @@ nfsd4_enc_sequence_replay(struct nfsd4_compoundargs *args, |
538 | op = &args->ops[resp->opcnt - 1]; |
539 | nfsd4_encode_operation(resp, op); |
540 | |
541 | - /* Return nfserr_retry_uncached_rep in next operation. */ |
542 | - if (args->opcnt > 1 && !(slot->sl_flags & NFSD4_SLOT_CACHETHIS)) { |
543 | + if (slot->sl_flags & NFSD4_SLOT_CACHED) |
544 | + return op->status; |
545 | + if (args->opcnt == 1) { |
546 | + /* |
547 | + * The original operation wasn't a solo sequence--we |
548 | + * always cache those--so this retry must not match the |
549 | + * original: |
550 | + */ |
551 | + op->status = nfserr_seq_false_retry; |
552 | + } else { |
553 | op = &args->ops[resp->opcnt++]; |
554 | op->status = nfserr_retry_uncached_rep; |
555 | nfsd4_encode_operation(resp, op); |
556 | @@ -3039,6 +3053,34 @@ static bool nfsd4_request_too_big(struct svc_rqst *rqstp, |
557 | return xb->len > session->se_fchannel.maxreq_sz; |
558 | } |
559 | |
560 | +static bool replay_matches_cache(struct svc_rqst *rqstp, |
561 | + struct nfsd4_sequence *seq, struct nfsd4_slot *slot) |
562 | +{ |
563 | + struct nfsd4_compoundargs *argp = rqstp->rq_argp; |
564 | + |
565 | + if ((bool)(slot->sl_flags & NFSD4_SLOT_CACHETHIS) != |
566 | + (bool)seq->cachethis) |
567 | + return false; |
568 | + /* |
569 | + * If there's an error than the reply can have fewer ops than |
570 | + * the call. But if we cached a reply with *more* ops than the |
571 | + * call you're sending us now, then this new call is clearly not |
572 | + * really a replay of the old one: |
573 | + */ |
574 | + if (slot->sl_opcnt < argp->opcnt) |
575 | + return false; |
576 | + /* This is the only check explicitly called by spec: */ |
577 | + if (!same_creds(&rqstp->rq_cred, &slot->sl_cred)) |
578 | + return false; |
579 | + /* |
580 | + * There may be more comparisons we could actually do, but the |
581 | + * spec doesn't require us to catch every case where the calls |
582 | + * don't match (that would require caching the call as well as |
583 | + * the reply), so we don't bother. |
584 | + */ |
585 | + return true; |
586 | +} |
587 | + |
588 | __be32 |
589 | nfsd4_sequence(struct svc_rqst *rqstp, |
590 | struct nfsd4_compound_state *cstate, |
591 | @@ -3098,6 +3140,9 @@ nfsd4_sequence(struct svc_rqst *rqstp, |
592 | status = nfserr_seq_misordered; |
593 | if (!(slot->sl_flags & NFSD4_SLOT_INITIALIZED)) |
594 | goto out_put_session; |
595 | + status = nfserr_seq_false_retry; |
596 | + if (!replay_matches_cache(rqstp, seq, slot)) |
597 | + goto out_put_session; |
598 | cstate->slot = slot; |
599 | cstate->session = session; |
600 | cstate->clp = clp; |
601 | diff --git a/fs/nfsd/state.h b/fs/nfsd/state.h |
602 | index 005c911b34ac..86aa92d200e1 100644 |
603 | --- a/fs/nfsd/state.h |
604 | +++ b/fs/nfsd/state.h |
605 | @@ -169,11 +169,13 @@ static inline struct nfs4_delegation *delegstateid(struct nfs4_stid *s) |
606 | struct nfsd4_slot { |
607 | u32 sl_seqid; |
608 | __be32 sl_status; |
609 | + struct svc_cred sl_cred; |
610 | u32 sl_datalen; |
611 | u16 sl_opcnt; |
612 | #define NFSD4_SLOT_INUSE (1 << 0) |
613 | #define NFSD4_SLOT_CACHETHIS (1 << 1) |
614 | #define NFSD4_SLOT_INITIALIZED (1 << 2) |
615 | +#define NFSD4_SLOT_CACHED (1 << 3) |
616 | u8 sl_flags; |
617 | char sl_data[]; |
618 | }; |
619 | diff --git a/fs/nfsd/xdr4.h b/fs/nfsd/xdr4.h |
620 | index 8fda4abdf3b1..448e74e32344 100644 |
621 | --- a/fs/nfsd/xdr4.h |
622 | +++ b/fs/nfsd/xdr4.h |
623 | @@ -645,9 +645,18 @@ static inline bool nfsd4_is_solo_sequence(struct nfsd4_compoundres *resp) |
624 | return resp->opcnt == 1 && args->ops[0].opnum == OP_SEQUENCE; |
625 | } |
626 | |
627 | -static inline bool nfsd4_not_cached(struct nfsd4_compoundres *resp) |
628 | +/* |
629 | + * The session reply cache only needs to cache replies that the client |
630 | + * actually asked us to. But it's almost free for us to cache compounds |
631 | + * consisting of only a SEQUENCE op, so we may as well cache those too. |
632 | + * Also, the protocol doesn't give us a convenient response in the case |
633 | + * of a replay of a solo SEQUENCE op that wasn't cached |
634 | + * (RETRY_UNCACHED_REP can only be returned in the second op of a |
635 | + * compound). |
636 | + */ |
637 | +static inline bool nfsd4_cache_this(struct nfsd4_compoundres *resp) |
638 | { |
639 | - return !(resp->cstate.slot->sl_flags & NFSD4_SLOT_CACHETHIS) |
640 | + return (resp->cstate.slot->sl_flags & NFSD4_SLOT_CACHETHIS) |
641 | || nfsd4_is_solo_sequence(resp); |
642 | } |
643 | |
644 | diff --git a/include/linux/hid-debug.h b/include/linux/hid-debug.h |
645 | index 8663f216c563..2d6100edf204 100644 |
646 | --- a/include/linux/hid-debug.h |
647 | +++ b/include/linux/hid-debug.h |
648 | @@ -24,7 +24,10 @@ |
649 | |
650 | #ifdef CONFIG_DEBUG_FS |
651 | |
652 | +#include <linux/kfifo.h> |
653 | + |
654 | #define HID_DEBUG_BUFSIZE 512 |
655 | +#define HID_DEBUG_FIFOSIZE 512 |
656 | |
657 | void hid_dump_input(struct hid_device *, struct hid_usage *, __s32); |
658 | void hid_dump_report(struct hid_device *, int , u8 *, int); |
659 | @@ -37,11 +40,8 @@ void hid_debug_init(void); |
660 | void hid_debug_exit(void); |
661 | void hid_debug_event(struct hid_device *, char *); |
662 | |
663 | - |
664 | struct hid_debug_list { |
665 | - char *hid_debug_buf; |
666 | - int head; |
667 | - int tail; |
668 | + DECLARE_KFIFO_PTR(hid_debug_fifo, char); |
669 | struct fasync_struct *fasync; |
670 | struct hid_device *hdev; |
671 | struct list_head node; |
672 | @@ -64,4 +64,3 @@ struct hid_debug_list { |
673 | #endif |
674 | |
675 | #endif |
676 | - |
677 | diff --git a/kernel/signal.c b/kernel/signal.c |
678 | index 049929a5f4ce..798b8f495ae2 100644 |
679 | --- a/kernel/signal.c |
680 | +++ b/kernel/signal.c |
681 | @@ -696,6 +696,48 @@ static inline bool si_fromuser(const struct siginfo *info) |
682 | (!is_si_special(info) && SI_FROMUSER(info)); |
683 | } |
684 | |
685 | +static int dequeue_synchronous_signal(siginfo_t *info) |
686 | +{ |
687 | + struct task_struct *tsk = current; |
688 | + struct sigpending *pending = &tsk->pending; |
689 | + struct sigqueue *q, *sync = NULL; |
690 | + |
691 | + /* |
692 | + * Might a synchronous signal be in the queue? |
693 | + */ |
694 | + if (!((pending->signal.sig[0] & ~tsk->blocked.sig[0]) & SYNCHRONOUS_MASK)) |
695 | + return 0; |
696 | + |
697 | + /* |
698 | + * Return the first synchronous signal in the queue. |
699 | + */ |
700 | + list_for_each_entry(q, &pending->list, list) { |
701 | + /* Synchronous signals have a postive si_code */ |
702 | + if ((q->info.si_code > SI_USER) && |
703 | + (sigmask(q->info.si_signo) & SYNCHRONOUS_MASK)) { |
704 | + sync = q; |
705 | + goto next; |
706 | + } |
707 | + } |
708 | + return 0; |
709 | +next: |
710 | + /* |
711 | + * Check if there is another siginfo for the same signal. |
712 | + */ |
713 | + list_for_each_entry_continue(q, &pending->list, list) { |
714 | + if (q->info.si_signo == sync->info.si_signo) |
715 | + goto still_pending; |
716 | + } |
717 | + |
718 | + sigdelset(&pending->signal, sync->info.si_signo); |
719 | + recalc_sigpending(); |
720 | +still_pending: |
721 | + list_del_init(&sync->list); |
722 | + copy_siginfo(info, &sync->info); |
723 | + __sigqueue_free(sync); |
724 | + return info->si_signo; |
725 | +} |
726 | + |
727 | /* |
728 | * called with RCU read lock from check_kill_permission() |
729 | */ |
730 | @@ -2198,6 +2240,11 @@ relock: |
731 | goto relock; |
732 | } |
733 | |
734 | + /* Has this task already been marked for death? */ |
735 | + ksig->info.si_signo = signr = SIGKILL; |
736 | + if (signal_group_exit(signal)) |
737 | + goto fatal; |
738 | + |
739 | for (;;) { |
740 | struct k_sigaction *ka; |
741 | |
742 | @@ -2211,7 +2258,15 @@ relock: |
743 | goto relock; |
744 | } |
745 | |
746 | - signr = dequeue_signal(current, ¤t->blocked, &ksig->info); |
747 | + /* |
748 | + * Signals generated by the execution of an instruction |
749 | + * need to be delivered before any other pending signals |
750 | + * so that the instruction pointer in the signal stack |
751 | + * frame points to the faulting instruction. |
752 | + */ |
753 | + signr = dequeue_synchronous_signal(&ksig->info); |
754 | + if (!signr) |
755 | + signr = dequeue_signal(current, ¤t->blocked, &ksig->info); |
756 | |
757 | if (!signr) |
758 | break; /* will return 0 */ |
759 | @@ -2293,6 +2348,7 @@ relock: |
760 | continue; |
761 | } |
762 | |
763 | + fatal: |
764 | spin_unlock_irq(&sighand->siglock); |
765 | |
766 | /* |
767 | diff --git a/net/batman-adv/hard-interface.c b/net/batman-adv/hard-interface.c |
768 | index 08ce36147c4c..8f7883b7d717 100644 |
769 | --- a/net/batman-adv/hard-interface.c |
770 | +++ b/net/batman-adv/hard-interface.c |
771 | @@ -19,7 +19,6 @@ |
772 | #include "main.h" |
773 | |
774 | #include <linux/atomic.h> |
775 | -#include <linux/bug.h> |
776 | #include <linux/byteorder/generic.h> |
777 | #include <linux/errno.h> |
778 | #include <linux/fs.h> |
779 | @@ -172,8 +171,10 @@ static bool batadv_is_on_batman_iface(const struct net_device *net_dev) |
780 | parent_dev = __dev_get_by_index((struct net *)parent_net, |
781 | dev_get_iflink(net_dev)); |
782 | /* if we got a NULL parent_dev there is something broken.. */ |
783 | - if (WARN(!parent_dev, "Cannot find parent device")) |
784 | + if (!parent_dev) { |
785 | + pr_err("Cannot find parent device\n"); |
786 | return false; |
787 | + } |
788 | |
789 | if (batadv_mutual_parents(net_dev, net, parent_dev, parent_net)) |
790 | return false; |
791 | diff --git a/net/batman-adv/soft-interface.c b/net/batman-adv/soft-interface.c |
792 | index 05bc176decf0..835af771a9fd 100644 |
793 | --- a/net/batman-adv/soft-interface.c |
794 | +++ b/net/batman-adv/soft-interface.c |
795 | @@ -211,6 +211,8 @@ static int batadv_interface_tx(struct sk_buff *skb, |
796 | |
797 | netif_trans_update(soft_iface); |
798 | vid = batadv_get_vid(skb, 0); |
799 | + |
800 | + skb_reset_mac_header(skb); |
801 | ethhdr = eth_hdr(skb); |
802 | |
803 | switch (ntohs(ethhdr->h_proto)) { |
804 | diff --git a/net/ceph/messenger.c b/net/ceph/messenger.c |
805 | index 5a8075d9f2e7..93eb606f7628 100644 |
806 | --- a/net/ceph/messenger.c |
807 | +++ b/net/ceph/messenger.c |
808 | @@ -3186,9 +3186,10 @@ void ceph_con_keepalive(struct ceph_connection *con) |
809 | dout("con_keepalive %p\n", con); |
810 | mutex_lock(&con->mutex); |
811 | clear_standby(con); |
812 | + con_flag_set(con, CON_FLAG_KEEPALIVE_PENDING); |
813 | mutex_unlock(&con->mutex); |
814 | - if (con_flag_test_and_set(con, CON_FLAG_KEEPALIVE_PENDING) == 0 && |
815 | - con_flag_test_and_set(con, CON_FLAG_WRITE_PENDING) == 0) |
816 | + |
817 | + if (con_flag_test_and_set(con, CON_FLAG_WRITE_PENDING) == 0) |
818 | queue_con(con); |
819 | } |
820 | EXPORT_SYMBOL(ceph_con_keepalive); |
821 | diff --git a/net/mac80211/tx.c b/net/mac80211/tx.c |
822 | index 6a0fb9dbc1ba..f8de166b788a 100644 |
823 | --- a/net/mac80211/tx.c |
824 | +++ b/net/mac80211/tx.c |
825 | @@ -1852,9 +1852,16 @@ static int ieee80211_skb_resize(struct ieee80211_sub_if_data *sdata, |
826 | int head_need, bool may_encrypt) |
827 | { |
828 | struct ieee80211_local *local = sdata->local; |
829 | + struct ieee80211_hdr *hdr; |
830 | + bool enc_tailroom; |
831 | int tail_need = 0; |
832 | |
833 | - if (may_encrypt && sdata->crypto_tx_tailroom_needed_cnt) { |
834 | + hdr = (struct ieee80211_hdr *) skb->data; |
835 | + enc_tailroom = may_encrypt && |
836 | + (sdata->crypto_tx_tailroom_needed_cnt || |
837 | + ieee80211_is_mgmt(hdr->frame_control)); |
838 | + |
839 | + if (enc_tailroom) { |
840 | tail_need = IEEE80211_ENCRYPT_TAILROOM; |
841 | tail_need -= skb_tailroom(skb); |
842 | tail_need = max_t(int, tail_need, 0); |
843 | @@ -1862,8 +1869,7 @@ static int ieee80211_skb_resize(struct ieee80211_sub_if_data *sdata, |
844 | |
845 | if (skb_cloned(skb) && |
846 | (!ieee80211_hw_check(&local->hw, SUPPORTS_CLONED_SKBS) || |
847 | - !skb_clone_writable(skb, ETH_HLEN) || |
848 | - (may_encrypt && sdata->crypto_tx_tailroom_needed_cnt))) |
849 | + !skb_clone_writable(skb, ETH_HLEN) || enc_tailroom)) |
850 | I802_DEBUG_INC(local->tx_expand_skb_head_cloned); |
851 | else if (head_need || tail_need) |
852 | I802_DEBUG_INC(local->tx_expand_skb_head); |
853 | diff --git a/net/xfrm/xfrm_user.c b/net/xfrm/xfrm_user.c |
854 | index 026770884d46..f6f91c3b2de0 100644 |
855 | --- a/net/xfrm/xfrm_user.c |
856 | +++ b/net/xfrm/xfrm_user.c |
857 | @@ -1408,10 +1408,15 @@ static int validate_tmpl(int nr, struct xfrm_user_tmpl *ut, u16 family) |
858 | if (!ut[i].family) |
859 | ut[i].family = family; |
860 | |
861 | - if ((ut[i].mode == XFRM_MODE_TRANSPORT) && |
862 | - (ut[i].family != prev_family)) |
863 | - return -EINVAL; |
864 | - |
865 | + switch (ut[i].mode) { |
866 | + case XFRM_MODE_TUNNEL: |
867 | + case XFRM_MODE_BEET: |
868 | + break; |
869 | + default: |
870 | + if (ut[i].family != prev_family) |
871 | + return -EINVAL; |
872 | + break; |
873 | + } |
874 | if (ut[i].mode >= XFRM_MODE_MAX) |
875 | return -EINVAL; |
876 | |
877 | diff --git a/samples/mei/mei-amt-version.c b/samples/mei/mei-amt-version.c |
878 | index 57d0d871dcf7..bb9988914a56 100644 |
879 | --- a/samples/mei/mei-amt-version.c |
880 | +++ b/samples/mei/mei-amt-version.c |
881 | @@ -117,7 +117,7 @@ static bool mei_init(struct mei *me, const uuid_le *guid, |
882 | |
883 | me->verbose = verbose; |
884 | |
885 | - me->fd = open("/dev/mei", O_RDWR); |
886 | + me->fd = open("/dev/mei0", O_RDWR); |
887 | if (me->fd == -1) { |
888 | mei_err(me, "Cannot establish a handle to the Intel MEI driver\n"); |
889 | goto err; |