Contents of /trunk/kernel-alx-legacy/patches-4.9/0337-4.9.238-all-fixes.patch
Parent Directory | Revision Log
Revision 3639 -
(show annotations)
(download)
Mon Oct 24 14:07:19 2022 UTC (23 months ago) by niro
File size: 149144 byte(s)
Mon Oct 24 14:07:19 2022 UTC (23 months ago) by niro
File size: 149144 byte(s)
-linux-4.9.238
1 | diff --git a/Documentation/DocBook/libata.tmpl b/Documentation/DocBook/libata.tmpl |
2 | index d7fcdc5a43792..9b55778ab024f 100644 |
3 | --- a/Documentation/DocBook/libata.tmpl |
4 | +++ b/Documentation/DocBook/libata.tmpl |
5 | @@ -324,7 +324,7 @@ Many legacy IDE drivers use ata_bmdma_status() as the bmdma_status() hook. |
6 | |
7 | <sect2><title>High-level taskfile hooks</title> |
8 | <programlisting> |
9 | -void (*qc_prep) (struct ata_queued_cmd *qc); |
10 | +enum ata_completion_errors (*qc_prep) (struct ata_queued_cmd *qc); |
11 | int (*qc_issue) (struct ata_queued_cmd *qc); |
12 | </programlisting> |
13 | |
14 | diff --git a/Documentation/devicetree/bindings/sound/wm8994.txt b/Documentation/devicetree/bindings/sound/wm8994.txt |
15 | index 68c4e8d96bed6..b309de00cd836 100644 |
16 | --- a/Documentation/devicetree/bindings/sound/wm8994.txt |
17 | +++ b/Documentation/devicetree/bindings/sound/wm8994.txt |
18 | @@ -14,9 +14,15 @@ Required properties: |
19 | - #gpio-cells : Must be 2. The first cell is the pin number and the |
20 | second cell is used to specify optional parameters (currently unused). |
21 | |
22 | - - AVDD2-supply, DBVDD1-supply, DBVDD2-supply, DBVDD3-supply, CPVDD-supply, |
23 | - SPKVDD1-supply, SPKVDD2-supply : power supplies for the device, as covered |
24 | - in Documentation/devicetree/bindings/regulator/regulator.txt |
25 | + - power supplies for the device, as covered in |
26 | + Documentation/devicetree/bindings/regulator/regulator.txt, depending |
27 | + on compatible: |
28 | + - for wlf,wm1811 and wlf,wm8958: |
29 | + AVDD1-supply, AVDD2-supply, DBVDD1-supply, DBVDD2-supply, DBVDD3-supply, |
30 | + DCVDD-supply, CPVDD-supply, SPKVDD1-supply, SPKVDD2-supply |
31 | + - for wlf,wm8994: |
32 | + AVDD1-supply, AVDD2-supply, DBVDD-supply, DCVDD-supply, CPVDD-supply, |
33 | + SPKVDD1-supply, SPKVDD2-supply |
34 | |
35 | Optional properties: |
36 | |
37 | @@ -68,11 +74,11 @@ codec: wm8994@1a { |
38 | |
39 | lineout1-se; |
40 | |
41 | + AVDD1-supply = <®ulator>; |
42 | AVDD2-supply = <®ulator>; |
43 | CPVDD-supply = <®ulator>; |
44 | - DBVDD1-supply = <®ulator>; |
45 | - DBVDD2-supply = <®ulator>; |
46 | - DBVDD3-supply = <®ulator>; |
47 | + DBVDD-supply = <®ulator>; |
48 | + DCVDD-supply = <®ulator>; |
49 | SPKVDD1-supply = <®ulator>; |
50 | SPKVDD2-supply = <®ulator>; |
51 | }; |
52 | diff --git a/Makefile b/Makefile |
53 | index 3c78b28c6a0da..41a7d6626e354 100644 |
54 | --- a/Makefile |
55 | +++ b/Makefile |
56 | @@ -1,6 +1,6 @@ |
57 | VERSION = 4 |
58 | PATCHLEVEL = 9 |
59 | -SUBLEVEL = 237 |
60 | +SUBLEVEL = 238 |
61 | EXTRAVERSION = |
62 | NAME = Roaring Lionus |
63 | |
64 | diff --git a/arch/m68k/q40/config.c b/arch/m68k/q40/config.c |
65 | index ea89a24f46000..cc0f924bbdd2d 100644 |
66 | --- a/arch/m68k/q40/config.c |
67 | +++ b/arch/m68k/q40/config.c |
68 | @@ -303,6 +303,7 @@ static int q40_get_rtc_pll(struct rtc_pll_info *pll) |
69 | { |
70 | int tmp = Q40_RTC_CTRL; |
71 | |
72 | + pll->pll_ctrl = 0; |
73 | pll->pll_value = tmp & Q40_RTC_PLL_MASK; |
74 | if (tmp & Q40_RTC_PLL_SIGN) |
75 | pll->pll_value = -pll->pll_value; |
76 | diff --git a/arch/mips/include/asm/cpu-type.h b/arch/mips/include/asm/cpu-type.h |
77 | index bdd6dc18e65c6..941efd8783344 100644 |
78 | --- a/arch/mips/include/asm/cpu-type.h |
79 | +++ b/arch/mips/include/asm/cpu-type.h |
80 | @@ -47,6 +47,7 @@ static inline int __pure __get_cpu_type(const int cpu_type) |
81 | case CPU_34K: |
82 | case CPU_1004K: |
83 | case CPU_74K: |
84 | + case CPU_1074K: |
85 | case CPU_M14KC: |
86 | case CPU_M14KEC: |
87 | case CPU_INTERAPTIV: |
88 | diff --git a/arch/s390/kernel/setup.c b/arch/s390/kernel/setup.c |
89 | index a559908d180ec..ce49c2b9db7ee 100644 |
90 | --- a/arch/s390/kernel/setup.c |
91 | +++ b/arch/s390/kernel/setup.c |
92 | @@ -529,7 +529,7 @@ static struct notifier_block kdump_mem_nb = { |
93 | /* |
94 | * Make sure that the area behind memory_end is protected |
95 | */ |
96 | -static void reserve_memory_end(void) |
97 | +static void __init reserve_memory_end(void) |
98 | { |
99 | #ifdef CONFIG_CRASH_DUMP |
100 | if (ipl_info.type == IPL_TYPE_FCP_DUMP && |
101 | @@ -547,7 +547,7 @@ static void reserve_memory_end(void) |
102 | /* |
103 | * Make sure that oldmem, where the dump is stored, is protected |
104 | */ |
105 | -static void reserve_oldmem(void) |
106 | +static void __init reserve_oldmem(void) |
107 | { |
108 | #ifdef CONFIG_CRASH_DUMP |
109 | if (OLDMEM_BASE) |
110 | @@ -559,7 +559,7 @@ static void reserve_oldmem(void) |
111 | /* |
112 | * Make sure that oldmem, where the dump is stored, is protected |
113 | */ |
114 | -static void remove_oldmem(void) |
115 | +static void __init remove_oldmem(void) |
116 | { |
117 | #ifdef CONFIG_CRASH_DUMP |
118 | if (OLDMEM_BASE) |
119 | diff --git a/arch/x86/include/asm/nospec-branch.h b/arch/x86/include/asm/nospec-branch.h |
120 | index 4af16acc001a3..204a5ce65afda 100644 |
121 | --- a/arch/x86/include/asm/nospec-branch.h |
122 | +++ b/arch/x86/include/asm/nospec-branch.h |
123 | @@ -321,7 +321,7 @@ DECLARE_STATIC_KEY_FALSE(mds_idle_clear); |
124 | * combination with microcode which triggers a CPU buffer flush when the |
125 | * instruction is executed. |
126 | */ |
127 | -static inline void mds_clear_cpu_buffers(void) |
128 | +static __always_inline void mds_clear_cpu_buffers(void) |
129 | { |
130 | static const u16 ds = __KERNEL_DS; |
131 | |
132 | @@ -342,7 +342,7 @@ static inline void mds_clear_cpu_buffers(void) |
133 | * |
134 | * Clear CPU buffers if the corresponding static key is enabled |
135 | */ |
136 | -static inline void mds_user_clear_cpu_buffers(void) |
137 | +static __always_inline void mds_user_clear_cpu_buffers(void) |
138 | { |
139 | if (static_branch_likely(&mds_user_clear)) |
140 | mds_clear_cpu_buffers(); |
141 | diff --git a/arch/x86/include/asm/pkeys.h b/arch/x86/include/asm/pkeys.h |
142 | index c50d6dcf4a227..4e7273e176cb7 100644 |
143 | --- a/arch/x86/include/asm/pkeys.h |
144 | +++ b/arch/x86/include/asm/pkeys.h |
145 | @@ -3,6 +3,11 @@ |
146 | |
147 | #define ARCH_DEFAULT_PKEY 0 |
148 | |
149 | +/* |
150 | + * If more than 16 keys are ever supported, a thorough audit |
151 | + * will be necessary to ensure that the types that store key |
152 | + * numbers and masks have sufficient capacity. |
153 | + */ |
154 | #define arch_max_pkey() (boot_cpu_has(X86_FEATURE_OSPKE) ? 16 : 1) |
155 | |
156 | extern int arch_set_user_pkey_access(struct task_struct *tsk, int pkey, |
157 | diff --git a/arch/x86/kernel/fpu/xstate.c b/arch/x86/kernel/fpu/xstate.c |
158 | index e9d7f461b7fa5..dbd396c913488 100644 |
159 | --- a/arch/x86/kernel/fpu/xstate.c |
160 | +++ b/arch/x86/kernel/fpu/xstate.c |
161 | @@ -871,8 +871,6 @@ const void *get_xsave_field_ptr(int xsave_state) |
162 | |
163 | #ifdef CONFIG_ARCH_HAS_PKEYS |
164 | |
165 | -#define NR_VALID_PKRU_BITS (CONFIG_NR_PROTECTION_KEYS * 2) |
166 | -#define PKRU_VALID_MASK (NR_VALID_PKRU_BITS - 1) |
167 | /* |
168 | * This will go out and modify PKRU register to set the access |
169 | * rights for @pkey to @init_val. |
170 | @@ -891,6 +889,13 @@ int arch_set_user_pkey_access(struct task_struct *tsk, int pkey, |
171 | if (!boot_cpu_has(X86_FEATURE_OSPKE)) |
172 | return -EINVAL; |
173 | |
174 | + /* |
175 | + * This code should only be called with valid 'pkey' |
176 | + * values originating from in-kernel users. Complain |
177 | + * if a bad value is observed. |
178 | + */ |
179 | + WARN_ON_ONCE(pkey >= arch_max_pkey()); |
180 | + |
181 | /* Set the bits we need in PKRU: */ |
182 | if (init_val & PKEY_DISABLE_ACCESS) |
183 | new_pkru_bits |= PKRU_AD_BIT; |
184 | diff --git a/arch/x86/kvm/mmutrace.h b/arch/x86/kvm/mmutrace.h |
185 | index 756b14ecc957a..df1076b0eabf3 100644 |
186 | --- a/arch/x86/kvm/mmutrace.h |
187 | +++ b/arch/x86/kvm/mmutrace.h |
188 | @@ -336,7 +336,7 @@ TRACE_EVENT( |
189 | /* These depend on page entry type, so compute them now. */ |
190 | __field(bool, r) |
191 | __field(bool, x) |
192 | - __field(u8, u) |
193 | + __field(signed char, u) |
194 | ), |
195 | |
196 | TP_fast_assign( |
197 | diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c |
198 | index 6b7faa14c27bb..3c0f9be107e42 100644 |
199 | --- a/arch/x86/kvm/x86.c |
200 | +++ b/arch/x86/kvm/x86.c |
201 | @@ -4263,10 +4263,13 @@ long kvm_arch_vm_ioctl(struct file *filp, |
202 | r = -EFAULT; |
203 | if (copy_from_user(&u.ps, argp, sizeof u.ps)) |
204 | goto out; |
205 | + mutex_lock(&kvm->lock); |
206 | r = -ENXIO; |
207 | if (!kvm->arch.vpit) |
208 | - goto out; |
209 | + goto set_pit_out; |
210 | r = kvm_vm_ioctl_set_pit(kvm, &u.ps); |
211 | +set_pit_out: |
212 | + mutex_unlock(&kvm->lock); |
213 | break; |
214 | } |
215 | case KVM_GET_PIT2: { |
216 | @@ -4286,10 +4289,13 @@ long kvm_arch_vm_ioctl(struct file *filp, |
217 | r = -EFAULT; |
218 | if (copy_from_user(&u.ps2, argp, sizeof(u.ps2))) |
219 | goto out; |
220 | + mutex_lock(&kvm->lock); |
221 | r = -ENXIO; |
222 | if (!kvm->arch.vpit) |
223 | - goto out; |
224 | + goto set_pit2_out; |
225 | r = kvm_vm_ioctl_set_pit2(kvm, &u.ps2); |
226 | +set_pit2_out: |
227 | + mutex_unlock(&kvm->lock); |
228 | break; |
229 | } |
230 | case KVM_REINJECT_CONTROL: { |
231 | diff --git a/drivers/acpi/ec.c b/drivers/acpi/ec.c |
232 | index 307b3e28f34ce..8781b5dc97f1c 100644 |
233 | --- a/drivers/acpi/ec.c |
234 | +++ b/drivers/acpi/ec.c |
235 | @@ -1049,29 +1049,21 @@ void acpi_ec_unblock_transactions(void) |
236 | /* -------------------------------------------------------------------------- |
237 | Event Management |
238 | -------------------------------------------------------------------------- */ |
239 | -static struct acpi_ec_query_handler * |
240 | -acpi_ec_get_query_handler(struct acpi_ec_query_handler *handler) |
241 | -{ |
242 | - if (handler) |
243 | - kref_get(&handler->kref); |
244 | - return handler; |
245 | -} |
246 | - |
247 | static struct acpi_ec_query_handler * |
248 | acpi_ec_get_query_handler_by_value(struct acpi_ec *ec, u8 value) |
249 | { |
250 | struct acpi_ec_query_handler *handler; |
251 | - bool found = false; |
252 | |
253 | mutex_lock(&ec->mutex); |
254 | list_for_each_entry(handler, &ec->list, node) { |
255 | if (value == handler->query_bit) { |
256 | - found = true; |
257 | - break; |
258 | + kref_get(&handler->kref); |
259 | + mutex_unlock(&ec->mutex); |
260 | + return handler; |
261 | } |
262 | } |
263 | mutex_unlock(&ec->mutex); |
264 | - return found ? acpi_ec_get_query_handler(handler) : NULL; |
265 | + return NULL; |
266 | } |
267 | |
268 | static void acpi_ec_query_handler_release(struct kref *kref) |
269 | diff --git a/drivers/ata/acard-ahci.c b/drivers/ata/acard-ahci.c |
270 | index ed6a30cd681a0..98581ae397c12 100644 |
271 | --- a/drivers/ata/acard-ahci.c |
272 | +++ b/drivers/ata/acard-ahci.c |
273 | @@ -72,7 +72,7 @@ struct acard_sg { |
274 | __le32 size; /* bit 31 (EOT) max==0x10000 (64k) */ |
275 | }; |
276 | |
277 | -static void acard_ahci_qc_prep(struct ata_queued_cmd *qc); |
278 | +static enum ata_completion_errors acard_ahci_qc_prep(struct ata_queued_cmd *qc); |
279 | static bool acard_ahci_qc_fill_rtf(struct ata_queued_cmd *qc); |
280 | static int acard_ahci_port_start(struct ata_port *ap); |
281 | static int acard_ahci_init_one(struct pci_dev *pdev, const struct pci_device_id *ent); |
282 | @@ -257,7 +257,7 @@ static unsigned int acard_ahci_fill_sg(struct ata_queued_cmd *qc, void *cmd_tbl) |
283 | return si; |
284 | } |
285 | |
286 | -static void acard_ahci_qc_prep(struct ata_queued_cmd *qc) |
287 | +static enum ata_completion_errors acard_ahci_qc_prep(struct ata_queued_cmd *qc) |
288 | { |
289 | struct ata_port *ap = qc->ap; |
290 | struct ahci_port_priv *pp = ap->private_data; |
291 | @@ -295,6 +295,8 @@ static void acard_ahci_qc_prep(struct ata_queued_cmd *qc) |
292 | opts |= AHCI_CMD_ATAPI | AHCI_CMD_PREFETCH; |
293 | |
294 | ahci_fill_cmd_slot(pp, qc->tag, opts); |
295 | + |
296 | + return AC_ERR_OK; |
297 | } |
298 | |
299 | static bool acard_ahci_qc_fill_rtf(struct ata_queued_cmd *qc) |
300 | diff --git a/drivers/ata/libahci.c b/drivers/ata/libahci.c |
301 | index 1610fff19bb39..984260902d0be 100644 |
302 | --- a/drivers/ata/libahci.c |
303 | +++ b/drivers/ata/libahci.c |
304 | @@ -73,7 +73,7 @@ static int ahci_scr_write(struct ata_link *link, unsigned int sc_reg, u32 val); |
305 | static bool ahci_qc_fill_rtf(struct ata_queued_cmd *qc); |
306 | static int ahci_port_start(struct ata_port *ap); |
307 | static void ahci_port_stop(struct ata_port *ap); |
308 | -static void ahci_qc_prep(struct ata_queued_cmd *qc); |
309 | +static enum ata_completion_errors ahci_qc_prep(struct ata_queued_cmd *qc); |
310 | static int ahci_pmp_qc_defer(struct ata_queued_cmd *qc); |
311 | static void ahci_freeze(struct ata_port *ap); |
312 | static void ahci_thaw(struct ata_port *ap); |
313 | @@ -1607,7 +1607,7 @@ static int ahci_pmp_qc_defer(struct ata_queued_cmd *qc) |
314 | return sata_pmp_qc_defer_cmd_switch(qc); |
315 | } |
316 | |
317 | -static void ahci_qc_prep(struct ata_queued_cmd *qc) |
318 | +static enum ata_completion_errors ahci_qc_prep(struct ata_queued_cmd *qc) |
319 | { |
320 | struct ata_port *ap = qc->ap; |
321 | struct ahci_port_priv *pp = ap->private_data; |
322 | @@ -1643,6 +1643,8 @@ static void ahci_qc_prep(struct ata_queued_cmd *qc) |
323 | opts |= AHCI_CMD_ATAPI | AHCI_CMD_PREFETCH; |
324 | |
325 | ahci_fill_cmd_slot(pp, qc->tag, opts); |
326 | + |
327 | + return AC_ERR_OK; |
328 | } |
329 | |
330 | static void ahci_fbs_dec_intr(struct ata_port *ap) |
331 | diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c |
332 | index 2aa10cd4c5b75..228a4cfb0e7d2 100644 |
333 | --- a/drivers/ata/libata-core.c |
334 | +++ b/drivers/ata/libata-core.c |
335 | @@ -4895,7 +4895,10 @@ int ata_std_qc_defer(struct ata_queued_cmd *qc) |
336 | return ATA_DEFER_LINK; |
337 | } |
338 | |
339 | -void ata_noop_qc_prep(struct ata_queued_cmd *qc) { } |
340 | +enum ata_completion_errors ata_noop_qc_prep(struct ata_queued_cmd *qc) |
341 | +{ |
342 | + return AC_ERR_OK; |
343 | +} |
344 | |
345 | /** |
346 | * ata_sg_init - Associate command with scatter-gather table. |
347 | @@ -5313,7 +5316,9 @@ void ata_qc_issue(struct ata_queued_cmd *qc) |
348 | return; |
349 | } |
350 | |
351 | - ap->ops->qc_prep(qc); |
352 | + qc->err_mask |= ap->ops->qc_prep(qc); |
353 | + if (unlikely(qc->err_mask)) |
354 | + goto err; |
355 | trace_ata_qc_issue(qc); |
356 | qc->err_mask |= ap->ops->qc_issue(qc); |
357 | if (unlikely(qc->err_mask)) |
358 | diff --git a/drivers/ata/libata-sff.c b/drivers/ata/libata-sff.c |
359 | index 0e2bc5b9a78c1..0edd83cae0fd0 100644 |
360 | --- a/drivers/ata/libata-sff.c |
361 | +++ b/drivers/ata/libata-sff.c |
362 | @@ -2742,12 +2742,14 @@ static void ata_bmdma_fill_sg_dumb(struct ata_queued_cmd *qc) |
363 | * LOCKING: |
364 | * spin_lock_irqsave(host lock) |
365 | */ |
366 | -void ata_bmdma_qc_prep(struct ata_queued_cmd *qc) |
367 | +enum ata_completion_errors ata_bmdma_qc_prep(struct ata_queued_cmd *qc) |
368 | { |
369 | if (!(qc->flags & ATA_QCFLAG_DMAMAP)) |
370 | - return; |
371 | + return AC_ERR_OK; |
372 | |
373 | ata_bmdma_fill_sg(qc); |
374 | + |
375 | + return AC_ERR_OK; |
376 | } |
377 | EXPORT_SYMBOL_GPL(ata_bmdma_qc_prep); |
378 | |
379 | @@ -2760,12 +2762,14 @@ EXPORT_SYMBOL_GPL(ata_bmdma_qc_prep); |
380 | * LOCKING: |
381 | * spin_lock_irqsave(host lock) |
382 | */ |
383 | -void ata_bmdma_dumb_qc_prep(struct ata_queued_cmd *qc) |
384 | +enum ata_completion_errors ata_bmdma_dumb_qc_prep(struct ata_queued_cmd *qc) |
385 | { |
386 | if (!(qc->flags & ATA_QCFLAG_DMAMAP)) |
387 | - return; |
388 | + return AC_ERR_OK; |
389 | |
390 | ata_bmdma_fill_sg_dumb(qc); |
391 | + |
392 | + return AC_ERR_OK; |
393 | } |
394 | EXPORT_SYMBOL_GPL(ata_bmdma_dumb_qc_prep); |
395 | |
396 | diff --git a/drivers/ata/pata_macio.c b/drivers/ata/pata_macio.c |
397 | index e347e7acd8edb..d8000bbd1e11d 100644 |
398 | --- a/drivers/ata/pata_macio.c |
399 | +++ b/drivers/ata/pata_macio.c |
400 | @@ -507,7 +507,7 @@ static int pata_macio_cable_detect(struct ata_port *ap) |
401 | return ATA_CBL_PATA40; |
402 | } |
403 | |
404 | -static void pata_macio_qc_prep(struct ata_queued_cmd *qc) |
405 | +static enum ata_completion_errors pata_macio_qc_prep(struct ata_queued_cmd *qc) |
406 | { |
407 | unsigned int write = (qc->tf.flags & ATA_TFLAG_WRITE); |
408 | struct ata_port *ap = qc->ap; |
409 | @@ -520,7 +520,7 @@ static void pata_macio_qc_prep(struct ata_queued_cmd *qc) |
410 | __func__, qc, qc->flags, write, qc->dev->devno); |
411 | |
412 | if (!(qc->flags & ATA_QCFLAG_DMAMAP)) |
413 | - return; |
414 | + return AC_ERR_OK; |
415 | |
416 | table = (struct dbdma_cmd *) priv->dma_table_cpu; |
417 | |
418 | @@ -565,6 +565,8 @@ static void pata_macio_qc_prep(struct ata_queued_cmd *qc) |
419 | table->command = cpu_to_le16(DBDMA_STOP); |
420 | |
421 | dev_dbgdma(priv->dev, "%s: %d DMA list entries\n", __func__, pi); |
422 | + |
423 | + return AC_ERR_OK; |
424 | } |
425 | |
426 | |
427 | diff --git a/drivers/ata/pata_pxa.c b/drivers/ata/pata_pxa.c |
428 | index f6c46e9a4dc0f..d7186a503e358 100644 |
429 | --- a/drivers/ata/pata_pxa.c |
430 | +++ b/drivers/ata/pata_pxa.c |
431 | @@ -59,25 +59,27 @@ static void pxa_ata_dma_irq(void *d) |
432 | /* |
433 | * Prepare taskfile for submission. |
434 | */ |
435 | -static void pxa_qc_prep(struct ata_queued_cmd *qc) |
436 | +static enum ata_completion_errors pxa_qc_prep(struct ata_queued_cmd *qc) |
437 | { |
438 | struct pata_pxa_data *pd = qc->ap->private_data; |
439 | struct dma_async_tx_descriptor *tx; |
440 | enum dma_transfer_direction dir; |
441 | |
442 | if (!(qc->flags & ATA_QCFLAG_DMAMAP)) |
443 | - return; |
444 | + return AC_ERR_OK; |
445 | |
446 | dir = (qc->dma_dir == DMA_TO_DEVICE ? DMA_MEM_TO_DEV : DMA_DEV_TO_MEM); |
447 | tx = dmaengine_prep_slave_sg(pd->dma_chan, qc->sg, qc->n_elem, dir, |
448 | DMA_PREP_INTERRUPT); |
449 | if (!tx) { |
450 | ata_dev_err(qc->dev, "prep_slave_sg() failed\n"); |
451 | - return; |
452 | + return AC_ERR_OK; |
453 | } |
454 | tx->callback = pxa_ata_dma_irq; |
455 | tx->callback_param = pd; |
456 | pd->dma_cookie = dmaengine_submit(tx); |
457 | + |
458 | + return AC_ERR_OK; |
459 | } |
460 | |
461 | /* |
462 | diff --git a/drivers/ata/pdc_adma.c b/drivers/ata/pdc_adma.c |
463 | index 64d682c6ee57e..11da13bea2c93 100644 |
464 | --- a/drivers/ata/pdc_adma.c |
465 | +++ b/drivers/ata/pdc_adma.c |
466 | @@ -132,7 +132,7 @@ static int adma_ata_init_one(struct pci_dev *pdev, |
467 | const struct pci_device_id *ent); |
468 | static int adma_port_start(struct ata_port *ap); |
469 | static void adma_port_stop(struct ata_port *ap); |
470 | -static void adma_qc_prep(struct ata_queued_cmd *qc); |
471 | +static enum ata_completion_errors adma_qc_prep(struct ata_queued_cmd *qc); |
472 | static unsigned int adma_qc_issue(struct ata_queued_cmd *qc); |
473 | static int adma_check_atapi_dma(struct ata_queued_cmd *qc); |
474 | static void adma_freeze(struct ata_port *ap); |
475 | @@ -311,7 +311,7 @@ static int adma_fill_sg(struct ata_queued_cmd *qc) |
476 | return i; |
477 | } |
478 | |
479 | -static void adma_qc_prep(struct ata_queued_cmd *qc) |
480 | +static enum ata_completion_errors adma_qc_prep(struct ata_queued_cmd *qc) |
481 | { |
482 | struct adma_port_priv *pp = qc->ap->private_data; |
483 | u8 *buf = pp->pkt; |
484 | @@ -322,7 +322,7 @@ static void adma_qc_prep(struct ata_queued_cmd *qc) |
485 | |
486 | adma_enter_reg_mode(qc->ap); |
487 | if (qc->tf.protocol != ATA_PROT_DMA) |
488 | - return; |
489 | + return AC_ERR_OK; |
490 | |
491 | buf[i++] = 0; /* Response flags */ |
492 | buf[i++] = 0; /* reserved */ |
493 | @@ -387,6 +387,7 @@ static void adma_qc_prep(struct ata_queued_cmd *qc) |
494 | printk("%s\n", obuf); |
495 | } |
496 | #endif |
497 | + return AC_ERR_OK; |
498 | } |
499 | |
500 | static inline void adma_packet_start(struct ata_queued_cmd *qc) |
501 | diff --git a/drivers/ata/sata_fsl.c b/drivers/ata/sata_fsl.c |
502 | index a723ae9297831..100b5a3621ef3 100644 |
503 | --- a/drivers/ata/sata_fsl.c |
504 | +++ b/drivers/ata/sata_fsl.c |
505 | @@ -513,7 +513,7 @@ static unsigned int sata_fsl_fill_sg(struct ata_queued_cmd *qc, void *cmd_desc, |
506 | return num_prde; |
507 | } |
508 | |
509 | -static void sata_fsl_qc_prep(struct ata_queued_cmd *qc) |
510 | +static enum ata_completion_errors sata_fsl_qc_prep(struct ata_queued_cmd *qc) |
511 | { |
512 | struct ata_port *ap = qc->ap; |
513 | struct sata_fsl_port_priv *pp = ap->private_data; |
514 | @@ -559,6 +559,8 @@ static void sata_fsl_qc_prep(struct ata_queued_cmd *qc) |
515 | |
516 | VPRINTK("SATA FSL : xx_qc_prep, di = 0x%x, ttl = %d, num_prde = %d\n", |
517 | desc_info, ttl_dwords, num_prde); |
518 | + |
519 | + return AC_ERR_OK; |
520 | } |
521 | |
522 | static unsigned int sata_fsl_qc_issue(struct ata_queued_cmd *qc) |
523 | diff --git a/drivers/ata/sata_inic162x.c b/drivers/ata/sata_inic162x.c |
524 | index e81a8217f1ff7..349a175f02675 100644 |
525 | --- a/drivers/ata/sata_inic162x.c |
526 | +++ b/drivers/ata/sata_inic162x.c |
527 | @@ -472,7 +472,7 @@ static void inic_fill_sg(struct inic_prd *prd, struct ata_queued_cmd *qc) |
528 | prd[-1].flags |= PRD_END; |
529 | } |
530 | |
531 | -static void inic_qc_prep(struct ata_queued_cmd *qc) |
532 | +static enum ata_completion_errors inic_qc_prep(struct ata_queued_cmd *qc) |
533 | { |
534 | struct inic_port_priv *pp = qc->ap->private_data; |
535 | struct inic_pkt *pkt = pp->pkt; |
536 | @@ -532,6 +532,8 @@ static void inic_qc_prep(struct ata_queued_cmd *qc) |
537 | inic_fill_sg(prd, qc); |
538 | |
539 | pp->cpb_tbl[0] = pp->pkt_dma; |
540 | + |
541 | + return AC_ERR_OK; |
542 | } |
543 | |
544 | static unsigned int inic_qc_issue(struct ata_queued_cmd *qc) |
545 | diff --git a/drivers/ata/sata_mv.c b/drivers/ata/sata_mv.c |
546 | index 2f32782cea6d9..513ef298dd960 100644 |
547 | --- a/drivers/ata/sata_mv.c |
548 | +++ b/drivers/ata/sata_mv.c |
549 | @@ -605,8 +605,8 @@ static int mv5_scr_write(struct ata_link *link, unsigned int sc_reg_in, u32 val) |
550 | static int mv_port_start(struct ata_port *ap); |
551 | static void mv_port_stop(struct ata_port *ap); |
552 | static int mv_qc_defer(struct ata_queued_cmd *qc); |
553 | -static void mv_qc_prep(struct ata_queued_cmd *qc); |
554 | -static void mv_qc_prep_iie(struct ata_queued_cmd *qc); |
555 | +static enum ata_completion_errors mv_qc_prep(struct ata_queued_cmd *qc); |
556 | +static enum ata_completion_errors mv_qc_prep_iie(struct ata_queued_cmd *qc); |
557 | static unsigned int mv_qc_issue(struct ata_queued_cmd *qc); |
558 | static int mv_hardreset(struct ata_link *link, unsigned int *class, |
559 | unsigned long deadline); |
560 | @@ -2044,7 +2044,7 @@ static void mv_rw_multi_errata_sata24(struct ata_queued_cmd *qc) |
561 | * LOCKING: |
562 | * Inherited from caller. |
563 | */ |
564 | -static void mv_qc_prep(struct ata_queued_cmd *qc) |
565 | +static enum ata_completion_errors mv_qc_prep(struct ata_queued_cmd *qc) |
566 | { |
567 | struct ata_port *ap = qc->ap; |
568 | struct mv_port_priv *pp = ap->private_data; |
569 | @@ -2056,15 +2056,15 @@ static void mv_qc_prep(struct ata_queued_cmd *qc) |
570 | switch (tf->protocol) { |
571 | case ATA_PROT_DMA: |
572 | if (tf->command == ATA_CMD_DSM) |
573 | - return; |
574 | + return AC_ERR_OK; |
575 | /* fall-thru */ |
576 | case ATA_PROT_NCQ: |
577 | break; /* continue below */ |
578 | case ATA_PROT_PIO: |
579 | mv_rw_multi_errata_sata24(qc); |
580 | - return; |
581 | + return AC_ERR_OK; |
582 | default: |
583 | - return; |
584 | + return AC_ERR_OK; |
585 | } |
586 | |
587 | /* Fill in command request block |
588 | @@ -2111,12 +2111,10 @@ static void mv_qc_prep(struct ata_queued_cmd *qc) |
589 | * non-NCQ mode are: [RW] STREAM DMA and W DMA FUA EXT, none |
590 | * of which are defined/used by Linux. If we get here, this |
591 | * driver needs work. |
592 | - * |
593 | - * FIXME: modify libata to give qc_prep a return value and |
594 | - * return error here. |
595 | */ |
596 | - BUG_ON(tf->command); |
597 | - break; |
598 | + ata_port_err(ap, "%s: unsupported command: %.2x\n", __func__, |
599 | + tf->command); |
600 | + return AC_ERR_INVALID; |
601 | } |
602 | mv_crqb_pack_cmd(cw++, tf->nsect, ATA_REG_NSECT, 0); |
603 | mv_crqb_pack_cmd(cw++, tf->hob_lbal, ATA_REG_LBAL, 0); |
604 | @@ -2129,8 +2127,10 @@ static void mv_qc_prep(struct ata_queued_cmd *qc) |
605 | mv_crqb_pack_cmd(cw++, tf->command, ATA_REG_CMD, 1); /* last */ |
606 | |
607 | if (!(qc->flags & ATA_QCFLAG_DMAMAP)) |
608 | - return; |
609 | + return AC_ERR_OK; |
610 | mv_fill_sg(qc); |
611 | + |
612 | + return AC_ERR_OK; |
613 | } |
614 | |
615 | /** |
616 | @@ -2145,7 +2145,7 @@ static void mv_qc_prep(struct ata_queued_cmd *qc) |
617 | * LOCKING: |
618 | * Inherited from caller. |
619 | */ |
620 | -static void mv_qc_prep_iie(struct ata_queued_cmd *qc) |
621 | +static enum ata_completion_errors mv_qc_prep_iie(struct ata_queued_cmd *qc) |
622 | { |
623 | struct ata_port *ap = qc->ap; |
624 | struct mv_port_priv *pp = ap->private_data; |
625 | @@ -2156,9 +2156,9 @@ static void mv_qc_prep_iie(struct ata_queued_cmd *qc) |
626 | |
627 | if ((tf->protocol != ATA_PROT_DMA) && |
628 | (tf->protocol != ATA_PROT_NCQ)) |
629 | - return; |
630 | + return AC_ERR_OK; |
631 | if (tf->command == ATA_CMD_DSM) |
632 | - return; /* use bmdma for this */ |
633 | + return AC_ERR_OK; /* use bmdma for this */ |
634 | |
635 | /* Fill in Gen IIE command request block */ |
636 | if (!(tf->flags & ATA_TFLAG_WRITE)) |
637 | @@ -2199,8 +2199,10 @@ static void mv_qc_prep_iie(struct ata_queued_cmd *qc) |
638 | ); |
639 | |
640 | if (!(qc->flags & ATA_QCFLAG_DMAMAP)) |
641 | - return; |
642 | + return AC_ERR_OK; |
643 | mv_fill_sg(qc); |
644 | + |
645 | + return AC_ERR_OK; |
646 | } |
647 | |
648 | /** |
649 | diff --git a/drivers/ata/sata_nv.c b/drivers/ata/sata_nv.c |
650 | index 734f563b8d37b..bb098c4ae1775 100644 |
651 | --- a/drivers/ata/sata_nv.c |
652 | +++ b/drivers/ata/sata_nv.c |
653 | @@ -313,7 +313,7 @@ static void nv_ck804_freeze(struct ata_port *ap); |
654 | static void nv_ck804_thaw(struct ata_port *ap); |
655 | static int nv_adma_slave_config(struct scsi_device *sdev); |
656 | static int nv_adma_check_atapi_dma(struct ata_queued_cmd *qc); |
657 | -static void nv_adma_qc_prep(struct ata_queued_cmd *qc); |
658 | +static enum ata_completion_errors nv_adma_qc_prep(struct ata_queued_cmd *qc); |
659 | static unsigned int nv_adma_qc_issue(struct ata_queued_cmd *qc); |
660 | static irqreturn_t nv_adma_interrupt(int irq, void *dev_instance); |
661 | static void nv_adma_irq_clear(struct ata_port *ap); |
662 | @@ -335,7 +335,7 @@ static void nv_mcp55_freeze(struct ata_port *ap); |
663 | static void nv_swncq_error_handler(struct ata_port *ap); |
664 | static int nv_swncq_slave_config(struct scsi_device *sdev); |
665 | static int nv_swncq_port_start(struct ata_port *ap); |
666 | -static void nv_swncq_qc_prep(struct ata_queued_cmd *qc); |
667 | +static enum ata_completion_errors nv_swncq_qc_prep(struct ata_queued_cmd *qc); |
668 | static void nv_swncq_fill_sg(struct ata_queued_cmd *qc); |
669 | static unsigned int nv_swncq_qc_issue(struct ata_queued_cmd *qc); |
670 | static void nv_swncq_irq_clear(struct ata_port *ap, u16 fis); |
671 | @@ -1382,7 +1382,7 @@ static int nv_adma_use_reg_mode(struct ata_queued_cmd *qc) |
672 | return 1; |
673 | } |
674 | |
675 | -static void nv_adma_qc_prep(struct ata_queued_cmd *qc) |
676 | +static enum ata_completion_errors nv_adma_qc_prep(struct ata_queued_cmd *qc) |
677 | { |
678 | struct nv_adma_port_priv *pp = qc->ap->private_data; |
679 | struct nv_adma_cpb *cpb = &pp->cpb[qc->tag]; |
680 | @@ -1394,7 +1394,7 @@ static void nv_adma_qc_prep(struct ata_queued_cmd *qc) |
681 | (qc->flags & ATA_QCFLAG_DMAMAP)); |
682 | nv_adma_register_mode(qc->ap); |
683 | ata_bmdma_qc_prep(qc); |
684 | - return; |
685 | + return AC_ERR_OK; |
686 | } |
687 | |
688 | cpb->resp_flags = NV_CPB_RESP_DONE; |
689 | @@ -1426,6 +1426,8 @@ static void nv_adma_qc_prep(struct ata_queued_cmd *qc) |
690 | cpb->ctl_flags = ctl_flags; |
691 | wmb(); |
692 | cpb->resp_flags = 0; |
693 | + |
694 | + return AC_ERR_OK; |
695 | } |
696 | |
697 | static unsigned int nv_adma_qc_issue(struct ata_queued_cmd *qc) |
698 | @@ -1989,17 +1991,19 @@ static int nv_swncq_port_start(struct ata_port *ap) |
699 | return 0; |
700 | } |
701 | |
702 | -static void nv_swncq_qc_prep(struct ata_queued_cmd *qc) |
703 | +static enum ata_completion_errors nv_swncq_qc_prep(struct ata_queued_cmd *qc) |
704 | { |
705 | if (qc->tf.protocol != ATA_PROT_NCQ) { |
706 | ata_bmdma_qc_prep(qc); |
707 | - return; |
708 | + return AC_ERR_OK; |
709 | } |
710 | |
711 | if (!(qc->flags & ATA_QCFLAG_DMAMAP)) |
712 | - return; |
713 | + return AC_ERR_OK; |
714 | |
715 | nv_swncq_fill_sg(qc); |
716 | + |
717 | + return AC_ERR_OK; |
718 | } |
719 | |
720 | static void nv_swncq_fill_sg(struct ata_queued_cmd *qc) |
721 | diff --git a/drivers/ata/sata_promise.c b/drivers/ata/sata_promise.c |
722 | index 0fa211e2831cd..8ad8b376a642c 100644 |
723 | --- a/drivers/ata/sata_promise.c |
724 | +++ b/drivers/ata/sata_promise.c |
725 | @@ -155,7 +155,7 @@ static int pdc_sata_scr_write(struct ata_link *link, unsigned int sc_reg, u32 va |
726 | static int pdc_ata_init_one(struct pci_dev *pdev, const struct pci_device_id *ent); |
727 | static int pdc_common_port_start(struct ata_port *ap); |
728 | static int pdc_sata_port_start(struct ata_port *ap); |
729 | -static void pdc_qc_prep(struct ata_queued_cmd *qc); |
730 | +static enum ata_completion_errors pdc_qc_prep(struct ata_queued_cmd *qc); |
731 | static void pdc_tf_load_mmio(struct ata_port *ap, const struct ata_taskfile *tf); |
732 | static void pdc_exec_command_mmio(struct ata_port *ap, const struct ata_taskfile *tf); |
733 | static int pdc_check_atapi_dma(struct ata_queued_cmd *qc); |
734 | @@ -649,7 +649,7 @@ static void pdc_fill_sg(struct ata_queued_cmd *qc) |
735 | prd[idx - 1].flags_len |= cpu_to_le32(ATA_PRD_EOT); |
736 | } |
737 | |
738 | -static void pdc_qc_prep(struct ata_queued_cmd *qc) |
739 | +static enum ata_completion_errors pdc_qc_prep(struct ata_queued_cmd *qc) |
740 | { |
741 | struct pdc_port_priv *pp = qc->ap->private_data; |
742 | unsigned int i; |
743 | @@ -681,6 +681,8 @@ static void pdc_qc_prep(struct ata_queued_cmd *qc) |
744 | default: |
745 | break; |
746 | } |
747 | + |
748 | + return AC_ERR_OK; |
749 | } |
750 | |
751 | static int pdc_is_sataii_tx4(unsigned long flags) |
752 | diff --git a/drivers/ata/sata_qstor.c b/drivers/ata/sata_qstor.c |
753 | index af987a4f33d19..80ff3bbfc8269 100644 |
754 | --- a/drivers/ata/sata_qstor.c |
755 | +++ b/drivers/ata/sata_qstor.c |
756 | @@ -116,7 +116,7 @@ static int qs_scr_write(struct ata_link *link, unsigned int sc_reg, u32 val); |
757 | static int qs_ata_init_one(struct pci_dev *pdev, const struct pci_device_id *ent); |
758 | static int qs_port_start(struct ata_port *ap); |
759 | static void qs_host_stop(struct ata_host *host); |
760 | -static void qs_qc_prep(struct ata_queued_cmd *qc); |
761 | +static enum ata_completion_errors qs_qc_prep(struct ata_queued_cmd *qc); |
762 | static unsigned int qs_qc_issue(struct ata_queued_cmd *qc); |
763 | static int qs_check_atapi_dma(struct ata_queued_cmd *qc); |
764 | static void qs_freeze(struct ata_port *ap); |
765 | @@ -276,7 +276,7 @@ static unsigned int qs_fill_sg(struct ata_queued_cmd *qc) |
766 | return si; |
767 | } |
768 | |
769 | -static void qs_qc_prep(struct ata_queued_cmd *qc) |
770 | +static enum ata_completion_errors qs_qc_prep(struct ata_queued_cmd *qc) |
771 | { |
772 | struct qs_port_priv *pp = qc->ap->private_data; |
773 | u8 dflags = QS_DF_PORD, *buf = pp->pkt; |
774 | @@ -288,7 +288,7 @@ static void qs_qc_prep(struct ata_queued_cmd *qc) |
775 | |
776 | qs_enter_reg_mode(qc->ap); |
777 | if (qc->tf.protocol != ATA_PROT_DMA) |
778 | - return; |
779 | + return AC_ERR_OK; |
780 | |
781 | nelem = qs_fill_sg(qc); |
782 | |
783 | @@ -311,6 +311,8 @@ static void qs_qc_prep(struct ata_queued_cmd *qc) |
784 | |
785 | /* frame information structure (FIS) */ |
786 | ata_tf_to_fis(&qc->tf, 0, 1, &buf[32]); |
787 | + |
788 | + return AC_ERR_OK; |
789 | } |
790 | |
791 | static inline void qs_packet_start(struct ata_queued_cmd *qc) |
792 | diff --git a/drivers/ata/sata_rcar.c b/drivers/ata/sata_rcar.c |
793 | index 07e146b772ead..301419325b975 100644 |
794 | --- a/drivers/ata/sata_rcar.c |
795 | +++ b/drivers/ata/sata_rcar.c |
796 | @@ -551,12 +551,14 @@ static void sata_rcar_bmdma_fill_sg(struct ata_queued_cmd *qc) |
797 | prd[si - 1].addr |= cpu_to_le32(SATA_RCAR_DTEND); |
798 | } |
799 | |
800 | -static void sata_rcar_qc_prep(struct ata_queued_cmd *qc) |
801 | +static enum ata_completion_errors sata_rcar_qc_prep(struct ata_queued_cmd *qc) |
802 | { |
803 | if (!(qc->flags & ATA_QCFLAG_DMAMAP)) |
804 | - return; |
805 | + return AC_ERR_OK; |
806 | |
807 | sata_rcar_bmdma_fill_sg(qc); |
808 | + |
809 | + return AC_ERR_OK; |
810 | } |
811 | |
812 | static void sata_rcar_bmdma_setup(struct ata_queued_cmd *qc) |
813 | diff --git a/drivers/ata/sata_sil.c b/drivers/ata/sata_sil.c |
814 | index 29bcff086bced..73156a301912f 100644 |
815 | --- a/drivers/ata/sata_sil.c |
816 | +++ b/drivers/ata/sata_sil.c |
817 | @@ -119,7 +119,7 @@ static void sil_dev_config(struct ata_device *dev); |
818 | static int sil_scr_read(struct ata_link *link, unsigned int sc_reg, u32 *val); |
819 | static int sil_scr_write(struct ata_link *link, unsigned int sc_reg, u32 val); |
820 | static int sil_set_mode(struct ata_link *link, struct ata_device **r_failed); |
821 | -static void sil_qc_prep(struct ata_queued_cmd *qc); |
822 | +static enum ata_completion_errors sil_qc_prep(struct ata_queued_cmd *qc); |
823 | static void sil_bmdma_setup(struct ata_queued_cmd *qc); |
824 | static void sil_bmdma_start(struct ata_queued_cmd *qc); |
825 | static void sil_bmdma_stop(struct ata_queued_cmd *qc); |
826 | @@ -333,12 +333,14 @@ static void sil_fill_sg(struct ata_queued_cmd *qc) |
827 | last_prd->flags_len |= cpu_to_le32(ATA_PRD_EOT); |
828 | } |
829 | |
830 | -static void sil_qc_prep(struct ata_queued_cmd *qc) |
831 | +static enum ata_completion_errors sil_qc_prep(struct ata_queued_cmd *qc) |
832 | { |
833 | if (!(qc->flags & ATA_QCFLAG_DMAMAP)) |
834 | - return; |
835 | + return AC_ERR_OK; |
836 | |
837 | sil_fill_sg(qc); |
838 | + |
839 | + return AC_ERR_OK; |
840 | } |
841 | |
842 | static unsigned char sil_get_device_cache_line(struct pci_dev *pdev) |
843 | diff --git a/drivers/ata/sata_sil24.c b/drivers/ata/sata_sil24.c |
844 | index 4b1995e2d044b..ffa3bf724054d 100644 |
845 | --- a/drivers/ata/sata_sil24.c |
846 | +++ b/drivers/ata/sata_sil24.c |
847 | @@ -336,7 +336,7 @@ static void sil24_dev_config(struct ata_device *dev); |
848 | static int sil24_scr_read(struct ata_link *link, unsigned sc_reg, u32 *val); |
849 | static int sil24_scr_write(struct ata_link *link, unsigned sc_reg, u32 val); |
850 | static int sil24_qc_defer(struct ata_queued_cmd *qc); |
851 | -static void sil24_qc_prep(struct ata_queued_cmd *qc); |
852 | +static enum ata_completion_errors sil24_qc_prep(struct ata_queued_cmd *qc); |
853 | static unsigned int sil24_qc_issue(struct ata_queued_cmd *qc); |
854 | static bool sil24_qc_fill_rtf(struct ata_queued_cmd *qc); |
855 | static void sil24_pmp_attach(struct ata_port *ap); |
856 | @@ -840,7 +840,7 @@ static int sil24_qc_defer(struct ata_queued_cmd *qc) |
857 | return ata_std_qc_defer(qc); |
858 | } |
859 | |
860 | -static void sil24_qc_prep(struct ata_queued_cmd *qc) |
861 | +static enum ata_completion_errors sil24_qc_prep(struct ata_queued_cmd *qc) |
862 | { |
863 | struct ata_port *ap = qc->ap; |
864 | struct sil24_port_priv *pp = ap->private_data; |
865 | @@ -884,6 +884,8 @@ static void sil24_qc_prep(struct ata_queued_cmd *qc) |
866 | |
867 | if (qc->flags & ATA_QCFLAG_DMAMAP) |
868 | sil24_fill_sg(qc, sge); |
869 | + |
870 | + return AC_ERR_OK; |
871 | } |
872 | |
873 | static unsigned int sil24_qc_issue(struct ata_queued_cmd *qc) |
874 | diff --git a/drivers/ata/sata_sx4.c b/drivers/ata/sata_sx4.c |
875 | index 48301cb3a3165..043953200b52a 100644 |
876 | --- a/drivers/ata/sata_sx4.c |
877 | +++ b/drivers/ata/sata_sx4.c |
878 | @@ -218,7 +218,7 @@ static void pdc_error_handler(struct ata_port *ap); |
879 | static void pdc_freeze(struct ata_port *ap); |
880 | static void pdc_thaw(struct ata_port *ap); |
881 | static int pdc_port_start(struct ata_port *ap); |
882 | -static void pdc20621_qc_prep(struct ata_queued_cmd *qc); |
883 | +static enum ata_completion_errors pdc20621_qc_prep(struct ata_queued_cmd *qc); |
884 | static void pdc_tf_load_mmio(struct ata_port *ap, const struct ata_taskfile *tf); |
885 | static void pdc_exec_command_mmio(struct ata_port *ap, const struct ata_taskfile *tf); |
886 | static unsigned int pdc20621_dimm_init(struct ata_host *host); |
887 | @@ -546,7 +546,7 @@ static void pdc20621_nodata_prep(struct ata_queued_cmd *qc) |
888 | VPRINTK("ata pkt buf ofs %u, mmio copied\n", i); |
889 | } |
890 | |
891 | -static void pdc20621_qc_prep(struct ata_queued_cmd *qc) |
892 | +static enum ata_completion_errors pdc20621_qc_prep(struct ata_queued_cmd *qc) |
893 | { |
894 | switch (qc->tf.protocol) { |
895 | case ATA_PROT_DMA: |
896 | @@ -558,6 +558,8 @@ static void pdc20621_qc_prep(struct ata_queued_cmd *qc) |
897 | default: |
898 | break; |
899 | } |
900 | + |
901 | + return AC_ERR_OK; |
902 | } |
903 | |
904 | static void __pdc20621_push_hdma(struct ata_queued_cmd *qc, |
905 | diff --git a/drivers/atm/eni.c b/drivers/atm/eni.c |
906 | index 88819409e0beb..9d16743c49178 100644 |
907 | --- a/drivers/atm/eni.c |
908 | +++ b/drivers/atm/eni.c |
909 | @@ -2243,7 +2243,7 @@ static int eni_init_one(struct pci_dev *pci_dev, |
910 | |
911 | rc = dma_set_mask_and_coherent(&pci_dev->dev, DMA_BIT_MASK(32)); |
912 | if (rc < 0) |
913 | - goto out; |
914 | + goto err_disable; |
915 | |
916 | rc = -ENOMEM; |
917 | eni_dev = kmalloc(sizeof(struct eni_dev), GFP_KERNEL); |
918 | diff --git a/drivers/char/tlclk.c b/drivers/char/tlclk.c |
919 | index 100cd1de9939d..59e1e94d12c01 100644 |
920 | --- a/drivers/char/tlclk.c |
921 | +++ b/drivers/char/tlclk.c |
922 | @@ -777,17 +777,21 @@ static int __init tlclk_init(void) |
923 | { |
924 | int ret; |
925 | |
926 | + telclk_interrupt = (inb(TLCLK_REG7) & 0x0f); |
927 | + |
928 | + alarm_events = kzalloc( sizeof(struct tlclk_alarms), GFP_KERNEL); |
929 | + if (!alarm_events) { |
930 | + ret = -ENOMEM; |
931 | + goto out1; |
932 | + } |
933 | + |
934 | ret = register_chrdev(tlclk_major, "telco_clock", &tlclk_fops); |
935 | if (ret < 0) { |
936 | printk(KERN_ERR "tlclk: can't get major %d.\n", tlclk_major); |
937 | + kfree(alarm_events); |
938 | return ret; |
939 | } |
940 | tlclk_major = ret; |
941 | - alarm_events = kzalloc( sizeof(struct tlclk_alarms), GFP_KERNEL); |
942 | - if (!alarm_events) { |
943 | - ret = -ENOMEM; |
944 | - goto out1; |
945 | - } |
946 | |
947 | /* Read telecom clock IRQ number (Set by BIOS) */ |
948 | if (!request_region(TLCLK_BASE, 8, "telco_clock")) { |
949 | @@ -796,7 +800,6 @@ static int __init tlclk_init(void) |
950 | ret = -EBUSY; |
951 | goto out2; |
952 | } |
953 | - telclk_interrupt = (inb(TLCLK_REG7) & 0x0f); |
954 | |
955 | if (0x0F == telclk_interrupt ) { /* not MCPBL0010 ? */ |
956 | printk(KERN_ERR "telclk_interrupt = 0x%x non-mcpbl0010 hw.\n", |
957 | @@ -837,8 +840,8 @@ out3: |
958 | release_region(TLCLK_BASE, 8); |
959 | out2: |
960 | kfree(alarm_events); |
961 | -out1: |
962 | unregister_chrdev(tlclk_major, "telco_clock"); |
963 | +out1: |
964 | return ret; |
965 | } |
966 | |
967 | diff --git a/drivers/char/tpm/tpm_ibmvtpm.c b/drivers/char/tpm/tpm_ibmvtpm.c |
968 | index 84eca4f93b828..0fad6cf37bab4 100644 |
969 | --- a/drivers/char/tpm/tpm_ibmvtpm.c |
970 | +++ b/drivers/char/tpm/tpm_ibmvtpm.c |
971 | @@ -550,6 +550,7 @@ static irqreturn_t ibmvtpm_interrupt(int irq, void *vtpm_instance) |
972 | */ |
973 | while ((crq = ibmvtpm_crq_get_next(ibmvtpm)) != NULL) { |
974 | ibmvtpm_crq_process(crq, ibmvtpm); |
975 | + wake_up_interruptible(&ibmvtpm->crq_queue.wq); |
976 | crq->valid = 0; |
977 | smp_wmb(); |
978 | } |
979 | @@ -596,6 +597,7 @@ static int tpm_ibmvtpm_probe(struct vio_dev *vio_dev, |
980 | } |
981 | |
982 | crq_q->num_entry = CRQ_RES_BUF_SIZE / sizeof(*crq_q->crq_addr); |
983 | + init_waitqueue_head(&crq_q->wq); |
984 | ibmvtpm->crq_dma_handle = dma_map_single(dev, crq_q->crq_addr, |
985 | CRQ_RES_BUF_SIZE, |
986 | DMA_BIDIRECTIONAL); |
987 | @@ -648,6 +650,13 @@ static int tpm_ibmvtpm_probe(struct vio_dev *vio_dev, |
988 | if (rc) |
989 | goto init_irq_cleanup; |
990 | |
991 | + if (!wait_event_timeout(ibmvtpm->crq_queue.wq, |
992 | + ibmvtpm->rtce_buf != NULL, |
993 | + HZ)) { |
994 | + dev_err(dev, "CRQ response timed out\n"); |
995 | + goto init_irq_cleanup; |
996 | + } |
997 | + |
998 | return tpm_chip_register(chip); |
999 | init_irq_cleanup: |
1000 | do { |
1001 | diff --git a/drivers/char/tpm/tpm_ibmvtpm.h b/drivers/char/tpm/tpm_ibmvtpm.h |
1002 | index 91dfe766d0800..4f6a124601db4 100644 |
1003 | --- a/drivers/char/tpm/tpm_ibmvtpm.h |
1004 | +++ b/drivers/char/tpm/tpm_ibmvtpm.h |
1005 | @@ -31,6 +31,7 @@ struct ibmvtpm_crq_queue { |
1006 | struct ibmvtpm_crq *crq_addr; |
1007 | u32 index; |
1008 | u32 num_entry; |
1009 | + wait_queue_head_t wq; |
1010 | }; |
1011 | |
1012 | struct ibmvtpm_dev { |
1013 | diff --git a/drivers/clk/ti/adpll.c b/drivers/clk/ti/adpll.c |
1014 | index 255cafb18336a..9345eaf00938e 100644 |
1015 | --- a/drivers/clk/ti/adpll.c |
1016 | +++ b/drivers/clk/ti/adpll.c |
1017 | @@ -193,15 +193,8 @@ static const char *ti_adpll_clk_get_name(struct ti_adpll_data *d, |
1018 | if (err) |
1019 | return NULL; |
1020 | } else { |
1021 | - const char *base_name = "adpll"; |
1022 | - char *buf; |
1023 | - |
1024 | - buf = devm_kzalloc(d->dev, 8 + 1 + strlen(base_name) + 1 + |
1025 | - strlen(postfix), GFP_KERNEL); |
1026 | - if (!buf) |
1027 | - return NULL; |
1028 | - sprintf(buf, "%08lx.%s.%s", d->pa, base_name, postfix); |
1029 | - name = buf; |
1030 | + name = devm_kasprintf(d->dev, GFP_KERNEL, "%08lx.adpll.%s", |
1031 | + d->pa, postfix); |
1032 | } |
1033 | |
1034 | return name; |
1035 | diff --git a/drivers/clocksource/h8300_timer8.c b/drivers/clocksource/h8300_timer8.c |
1036 | index 546bb180f5a44..8202e49ac64cd 100644 |
1037 | --- a/drivers/clocksource/h8300_timer8.c |
1038 | +++ b/drivers/clocksource/h8300_timer8.c |
1039 | @@ -176,7 +176,7 @@ static int __init h8300_8timer_init(struct device_node *node) |
1040 | return PTR_ERR(clk); |
1041 | } |
1042 | |
1043 | - ret = ENXIO; |
1044 | + ret = -ENXIO; |
1045 | base = of_iomap(node, 0); |
1046 | if (!base) { |
1047 | pr_err("failed to map registers for clockevent\n"); |
1048 | diff --git a/drivers/cpufreq/powernv-cpufreq.c b/drivers/cpufreq/powernv-cpufreq.c |
1049 | index b4fc65512aad3..c3b05676e0dbe 100644 |
1050 | --- a/drivers/cpufreq/powernv-cpufreq.c |
1051 | +++ b/drivers/cpufreq/powernv-cpufreq.c |
1052 | @@ -802,6 +802,7 @@ static struct notifier_block powernv_cpufreq_reboot_nb = { |
1053 | void powernv_cpufreq_work_fn(struct work_struct *work) |
1054 | { |
1055 | struct chip *chip = container_of(work, struct chip, throttle); |
1056 | + struct cpufreq_policy *policy; |
1057 | unsigned int cpu; |
1058 | cpumask_t mask; |
1059 | |
1060 | @@ -816,12 +817,14 @@ void powernv_cpufreq_work_fn(struct work_struct *work) |
1061 | chip->restore = false; |
1062 | for_each_cpu(cpu, &mask) { |
1063 | int index; |
1064 | - struct cpufreq_policy policy; |
1065 | |
1066 | - cpufreq_get_policy(&policy, cpu); |
1067 | - index = cpufreq_table_find_index_c(&policy, policy.cur); |
1068 | - powernv_cpufreq_target_index(&policy, index); |
1069 | - cpumask_andnot(&mask, &mask, policy.cpus); |
1070 | + policy = cpufreq_cpu_get(cpu); |
1071 | + if (!policy) |
1072 | + continue; |
1073 | + index = cpufreq_table_find_index_c(policy, policy->cur); |
1074 | + powernv_cpufreq_target_index(policy, index); |
1075 | + cpumask_andnot(&mask, &mask, policy->cpus); |
1076 | + cpufreq_cpu_put(policy); |
1077 | } |
1078 | out: |
1079 | put_online_cpus(); |
1080 | diff --git a/drivers/devfreq/tegra-devfreq.c b/drivers/devfreq/tegra-devfreq.c |
1081 | index fe9dce0245bf0..a20267d93f8a4 100644 |
1082 | --- a/drivers/devfreq/tegra-devfreq.c |
1083 | +++ b/drivers/devfreq/tegra-devfreq.c |
1084 | @@ -79,6 +79,8 @@ |
1085 | |
1086 | #define KHZ 1000 |
1087 | |
1088 | +#define KHZ_MAX (ULONG_MAX / KHZ) |
1089 | + |
1090 | /* Assume that the bus is saturated if the utilization is 25% */ |
1091 | #define BUS_SATURATION_RATIO 25 |
1092 | |
1093 | @@ -179,7 +181,7 @@ struct tegra_actmon_emc_ratio { |
1094 | }; |
1095 | |
1096 | static struct tegra_actmon_emc_ratio actmon_emc_ratios[] = { |
1097 | - { 1400000, ULONG_MAX }, |
1098 | + { 1400000, KHZ_MAX }, |
1099 | { 1200000, 750000 }, |
1100 | { 1100000, 600000 }, |
1101 | { 1000000, 500000 }, |
1102 | diff --git a/drivers/dma/tegra20-apb-dma.c b/drivers/dma/tegra20-apb-dma.c |
1103 | index 4eaf92b2b8868..909739426f78c 100644 |
1104 | --- a/drivers/dma/tegra20-apb-dma.c |
1105 | +++ b/drivers/dma/tegra20-apb-dma.c |
1106 | @@ -1208,8 +1208,7 @@ static void tegra_dma_free_chan_resources(struct dma_chan *dc) |
1107 | |
1108 | dev_dbg(tdc2dev(tdc), "Freeing channel %d\n", tdc->id); |
1109 | |
1110 | - if (tdc->busy) |
1111 | - tegra_dma_terminate_all(dc); |
1112 | + tegra_dma_terminate_all(dc); |
1113 | |
1114 | spin_lock_irqsave(&tdc->lock, flags); |
1115 | list_splice_init(&tdc->pending_sg_req, &sg_req_list); |
1116 | diff --git a/drivers/dma/xilinx/zynqmp_dma.c b/drivers/dma/xilinx/zynqmp_dma.c |
1117 | index 9069fb8543196..514763dcc3758 100644 |
1118 | --- a/drivers/dma/xilinx/zynqmp_dma.c |
1119 | +++ b/drivers/dma/xilinx/zynqmp_dma.c |
1120 | @@ -125,10 +125,12 @@ |
1121 | /* Max transfer size per descriptor */ |
1122 | #define ZYNQMP_DMA_MAX_TRANS_LEN 0x40000000 |
1123 | |
1124 | +/* Max burst lengths */ |
1125 | +#define ZYNQMP_DMA_MAX_DST_BURST_LEN 32768U |
1126 | +#define ZYNQMP_DMA_MAX_SRC_BURST_LEN 32768U |
1127 | + |
1128 | /* Reset values for data attributes */ |
1129 | #define ZYNQMP_DMA_AXCACHE_VAL 0xF |
1130 | -#define ZYNQMP_DMA_ARLEN_RST_VAL 0xF |
1131 | -#define ZYNQMP_DMA_AWLEN_RST_VAL 0xF |
1132 | |
1133 | #define ZYNQMP_DMA_SRC_ISSUE_RST_VAL 0x1F |
1134 | |
1135 | @@ -527,17 +529,19 @@ static void zynqmp_dma_handle_ovfl_int(struct zynqmp_dma_chan *chan, u32 status) |
1136 | |
1137 | static void zynqmp_dma_config(struct zynqmp_dma_chan *chan) |
1138 | { |
1139 | - u32 val; |
1140 | + u32 val, burst_val; |
1141 | |
1142 | val = readl(chan->regs + ZYNQMP_DMA_CTRL0); |
1143 | val |= ZYNQMP_DMA_POINT_TYPE_SG; |
1144 | writel(val, chan->regs + ZYNQMP_DMA_CTRL0); |
1145 | |
1146 | val = readl(chan->regs + ZYNQMP_DMA_DATA_ATTR); |
1147 | + burst_val = __ilog2_u32(chan->src_burst_len); |
1148 | val = (val & ~ZYNQMP_DMA_ARLEN) | |
1149 | - (chan->src_burst_len << ZYNQMP_DMA_ARLEN_OFST); |
1150 | + ((burst_val << ZYNQMP_DMA_ARLEN_OFST) & ZYNQMP_DMA_ARLEN); |
1151 | + burst_val = __ilog2_u32(chan->dst_burst_len); |
1152 | val = (val & ~ZYNQMP_DMA_AWLEN) | |
1153 | - (chan->dst_burst_len << ZYNQMP_DMA_AWLEN_OFST); |
1154 | + ((burst_val << ZYNQMP_DMA_AWLEN_OFST) & ZYNQMP_DMA_AWLEN); |
1155 | writel(val, chan->regs + ZYNQMP_DMA_DATA_ATTR); |
1156 | } |
1157 | |
1158 | @@ -551,8 +555,10 @@ static int zynqmp_dma_device_config(struct dma_chan *dchan, |
1159 | { |
1160 | struct zynqmp_dma_chan *chan = to_chan(dchan); |
1161 | |
1162 | - chan->src_burst_len = config->src_maxburst; |
1163 | - chan->dst_burst_len = config->dst_maxburst; |
1164 | + chan->src_burst_len = clamp(config->src_maxburst, 1U, |
1165 | + ZYNQMP_DMA_MAX_SRC_BURST_LEN); |
1166 | + chan->dst_burst_len = clamp(config->dst_maxburst, 1U, |
1167 | + ZYNQMP_DMA_MAX_DST_BURST_LEN); |
1168 | |
1169 | return 0; |
1170 | } |
1171 | @@ -968,8 +974,8 @@ static int zynqmp_dma_chan_probe(struct zynqmp_dma_device *zdev, |
1172 | return PTR_ERR(chan->regs); |
1173 | |
1174 | chan->bus_width = ZYNQMP_DMA_BUS_WIDTH_64; |
1175 | - chan->dst_burst_len = ZYNQMP_DMA_AWLEN_RST_VAL; |
1176 | - chan->src_burst_len = ZYNQMP_DMA_ARLEN_RST_VAL; |
1177 | + chan->dst_burst_len = ZYNQMP_DMA_MAX_DST_BURST_LEN; |
1178 | + chan->src_burst_len = ZYNQMP_DMA_MAX_SRC_BURST_LEN; |
1179 | err = of_property_read_u32(node, "xlnx,bus-width", &chan->bus_width); |
1180 | if (err < 0) { |
1181 | dev_err(&pdev->dev, "missing xlnx,bus-width property\n"); |
1182 | diff --git a/drivers/gpu/drm/amd/amdgpu/atom.c b/drivers/gpu/drm/amd/amdgpu/atom.c |
1183 | index 1b50e6c13fb3f..5fbf99d600587 100644 |
1184 | --- a/drivers/gpu/drm/amd/amdgpu/atom.c |
1185 | +++ b/drivers/gpu/drm/amd/amdgpu/atom.c |
1186 | @@ -748,8 +748,8 @@ static void atom_op_jump(atom_exec_context *ctx, int *ptr, int arg) |
1187 | cjiffies = jiffies; |
1188 | if (time_after(cjiffies, ctx->last_jump_jiffies)) { |
1189 | cjiffies -= ctx->last_jump_jiffies; |
1190 | - if ((jiffies_to_msecs(cjiffies) > 5000)) { |
1191 | - DRM_ERROR("atombios stuck in loop for more than 5secs aborting\n"); |
1192 | + if ((jiffies_to_msecs(cjiffies) > 10000)) { |
1193 | + DRM_ERROR("atombios stuck in loop for more than 10secs aborting\n"); |
1194 | ctx->abort = true; |
1195 | } |
1196 | } else { |
1197 | diff --git a/drivers/gpu/drm/gma500/cdv_intel_display.c b/drivers/gpu/drm/gma500/cdv_intel_display.c |
1198 | index 17db4b4749d5a..2e8479744ca4a 100644 |
1199 | --- a/drivers/gpu/drm/gma500/cdv_intel_display.c |
1200 | +++ b/drivers/gpu/drm/gma500/cdv_intel_display.c |
1201 | @@ -415,6 +415,8 @@ static bool cdv_intel_find_dp_pll(const struct gma_limit_t *limit, |
1202 | struct gma_crtc *gma_crtc = to_gma_crtc(crtc); |
1203 | struct gma_clock_t clock; |
1204 | |
1205 | + memset(&clock, 0, sizeof(clock)); |
1206 | + |
1207 | switch (refclk) { |
1208 | case 27000: |
1209 | if (target < 200000) { |
1210 | diff --git a/drivers/gpu/drm/omapdrm/dss/omapdss-boot-init.c b/drivers/gpu/drm/omapdrm/dss/omapdss-boot-init.c |
1211 | index 136d30484d023..46111e9ee9a25 100644 |
1212 | --- a/drivers/gpu/drm/omapdrm/dss/omapdss-boot-init.c |
1213 | +++ b/drivers/gpu/drm/omapdrm/dss/omapdss-boot-init.c |
1214 | @@ -194,7 +194,7 @@ static int __init omapdss_boot_init(void) |
1215 | dss = of_find_matching_node(NULL, omapdss_of_match); |
1216 | |
1217 | if (dss == NULL || !of_device_is_available(dss)) |
1218 | - return 0; |
1219 | + goto put_node; |
1220 | |
1221 | omapdss_walk_device(dss, true); |
1222 | |
1223 | @@ -219,6 +219,8 @@ static int __init omapdss_boot_init(void) |
1224 | kfree(n); |
1225 | } |
1226 | |
1227 | +put_node: |
1228 | + of_node_put(dss); |
1229 | return 0; |
1230 | } |
1231 | |
1232 | diff --git a/drivers/i2c/i2c-core.c b/drivers/i2c/i2c-core.c |
1233 | index 80d82c6792d8d..4fd7bfda2f9de 100644 |
1234 | --- a/drivers/i2c/i2c-core.c |
1235 | +++ b/drivers/i2c/i2c-core.c |
1236 | @@ -1858,8 +1858,8 @@ static int i2c_register_adapter(struct i2c_adapter *adap) |
1237 | |
1238 | /* create pre-declared device nodes */ |
1239 | of_i2c_register_devices(adap); |
1240 | - i2c_acpi_register_devices(adap); |
1241 | i2c_acpi_install_space_handler(adap); |
1242 | + i2c_acpi_register_devices(adap); |
1243 | |
1244 | if (adap->nr < __i2c_first_dynamic_bus_num) |
1245 | i2c_scan_static_board_info(adap); |
1246 | diff --git a/drivers/infiniband/core/ucma.c b/drivers/infiniband/core/ucma.c |
1247 | index a4f4cd4932657..bb0d728f4b76f 100644 |
1248 | --- a/drivers/infiniband/core/ucma.c |
1249 | +++ b/drivers/infiniband/core/ucma.c |
1250 | @@ -1296,13 +1296,13 @@ static ssize_t ucma_set_option(struct ucma_file *file, const char __user *inbuf, |
1251 | if (copy_from_user(&cmd, inbuf, sizeof(cmd))) |
1252 | return -EFAULT; |
1253 | |
1254 | + if (unlikely(cmd.optlen > KMALLOC_MAX_SIZE)) |
1255 | + return -EINVAL; |
1256 | + |
1257 | ctx = ucma_get_ctx(file, cmd.id); |
1258 | if (IS_ERR(ctx)) |
1259 | return PTR_ERR(ctx); |
1260 | |
1261 | - if (unlikely(cmd.optlen > KMALLOC_MAX_SIZE)) |
1262 | - return -EINVAL; |
1263 | - |
1264 | optval = memdup_user((void __user *) (unsigned long) cmd.optval, |
1265 | cmd.optlen); |
1266 | if (IS_ERR(optval)) { |
1267 | diff --git a/drivers/infiniband/hw/cxgb4/cm.c b/drivers/infiniband/hw/cxgb4/cm.c |
1268 | index a04a53acb24ff..a60e1c1b4b5e8 100644 |
1269 | --- a/drivers/infiniband/hw/cxgb4/cm.c |
1270 | +++ b/drivers/infiniband/hw/cxgb4/cm.c |
1271 | @@ -3245,7 +3245,7 @@ int c4iw_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param) |
1272 | if (raddr->sin_addr.s_addr == htonl(INADDR_ANY)) { |
1273 | err = pick_local_ipaddrs(dev, cm_id); |
1274 | if (err) |
1275 | - goto fail2; |
1276 | + goto fail3; |
1277 | } |
1278 | |
1279 | /* find a route */ |
1280 | @@ -3267,7 +3267,7 @@ int c4iw_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param) |
1281 | if (ipv6_addr_type(&raddr6->sin6_addr) == IPV6_ADDR_ANY) { |
1282 | err = pick_local_ip6addrs(dev, cm_id); |
1283 | if (err) |
1284 | - goto fail2; |
1285 | + goto fail3; |
1286 | } |
1287 | |
1288 | /* find a route */ |
1289 | diff --git a/drivers/infiniband/hw/i40iw/i40iw_cm.c b/drivers/infiniband/hw/i40iw/i40iw_cm.c |
1290 | index 282a726351c81..ce1a4817ab923 100644 |
1291 | --- a/drivers/infiniband/hw/i40iw/i40iw_cm.c |
1292 | +++ b/drivers/infiniband/hw/i40iw/i40iw_cm.c |
1293 | @@ -2036,9 +2036,9 @@ static int i40iw_addr_resolve_neigh_ipv6(struct i40iw_device *iwdev, |
1294 | dst = i40iw_get_dst_ipv6(&src_addr, &dst_addr); |
1295 | if (!dst || dst->error) { |
1296 | if (dst) { |
1297 | - dst_release(dst); |
1298 | i40iw_pr_err("ip6_route_output returned dst->error = %d\n", |
1299 | dst->error); |
1300 | + dst_release(dst); |
1301 | } |
1302 | return rc; |
1303 | } |
1304 | diff --git a/drivers/infiniband/sw/rxe/rxe_qp.c b/drivers/infiniband/sw/rxe/rxe_qp.c |
1305 | index d6672127808b7..186da467060cc 100644 |
1306 | --- a/drivers/infiniband/sw/rxe/rxe_qp.c |
1307 | +++ b/drivers/infiniband/sw/rxe/rxe_qp.c |
1308 | @@ -597,15 +597,16 @@ int rxe_qp_from_attr(struct rxe_qp *qp, struct ib_qp_attr *attr, int mask, |
1309 | struct ib_gid_attr sgid_attr; |
1310 | |
1311 | if (mask & IB_QP_MAX_QP_RD_ATOMIC) { |
1312 | - int max_rd_atomic = __roundup_pow_of_two(attr->max_rd_atomic); |
1313 | + int max_rd_atomic = attr->max_rd_atomic ? |
1314 | + roundup_pow_of_two(attr->max_rd_atomic) : 0; |
1315 | |
1316 | qp->attr.max_rd_atomic = max_rd_atomic; |
1317 | atomic_set(&qp->req.rd_atomic, max_rd_atomic); |
1318 | } |
1319 | |
1320 | if (mask & IB_QP_MAX_DEST_RD_ATOMIC) { |
1321 | - int max_dest_rd_atomic = |
1322 | - __roundup_pow_of_two(attr->max_dest_rd_atomic); |
1323 | + int max_dest_rd_atomic = attr->max_dest_rd_atomic ? |
1324 | + roundup_pow_of_two(attr->max_dest_rd_atomic) : 0; |
1325 | |
1326 | qp->attr.max_dest_rd_atomic = max_dest_rd_atomic; |
1327 | |
1328 | diff --git a/drivers/md/bcache/bcache.h b/drivers/md/bcache/bcache.h |
1329 | index 7fe7df56fa334..f0939fc1cfe55 100644 |
1330 | --- a/drivers/md/bcache/bcache.h |
1331 | +++ b/drivers/md/bcache/bcache.h |
1332 | @@ -547,6 +547,7 @@ struct cache_set { |
1333 | */ |
1334 | wait_queue_head_t btree_cache_wait; |
1335 | struct task_struct *btree_cache_alloc_lock; |
1336 | + spinlock_t btree_cannibalize_lock; |
1337 | |
1338 | /* |
1339 | * When we free a btree node, we increment the gen of the bucket the |
1340 | diff --git a/drivers/md/bcache/btree.c b/drivers/md/bcache/btree.c |
1341 | index 764d519a7f1c6..26e56a9952d09 100644 |
1342 | --- a/drivers/md/bcache/btree.c |
1343 | +++ b/drivers/md/bcache/btree.c |
1344 | @@ -836,15 +836,17 @@ out: |
1345 | |
1346 | static int mca_cannibalize_lock(struct cache_set *c, struct btree_op *op) |
1347 | { |
1348 | - struct task_struct *old; |
1349 | - |
1350 | - old = cmpxchg(&c->btree_cache_alloc_lock, NULL, current); |
1351 | - if (old && old != current) { |
1352 | + spin_lock(&c->btree_cannibalize_lock); |
1353 | + if (likely(c->btree_cache_alloc_lock == NULL)) { |
1354 | + c->btree_cache_alloc_lock = current; |
1355 | + } else if (c->btree_cache_alloc_lock != current) { |
1356 | if (op) |
1357 | prepare_to_wait(&c->btree_cache_wait, &op->wait, |
1358 | TASK_UNINTERRUPTIBLE); |
1359 | + spin_unlock(&c->btree_cannibalize_lock); |
1360 | return -EINTR; |
1361 | } |
1362 | + spin_unlock(&c->btree_cannibalize_lock); |
1363 | |
1364 | return 0; |
1365 | } |
1366 | @@ -879,10 +881,12 @@ static struct btree *mca_cannibalize(struct cache_set *c, struct btree_op *op, |
1367 | */ |
1368 | static void bch_cannibalize_unlock(struct cache_set *c) |
1369 | { |
1370 | + spin_lock(&c->btree_cannibalize_lock); |
1371 | if (c->btree_cache_alloc_lock == current) { |
1372 | c->btree_cache_alloc_lock = NULL; |
1373 | wake_up(&c->btree_cache_wait); |
1374 | } |
1375 | + spin_unlock(&c->btree_cannibalize_lock); |
1376 | } |
1377 | |
1378 | static struct btree *mca_alloc(struct cache_set *c, struct btree_op *op, |
1379 | diff --git a/drivers/md/bcache/super.c b/drivers/md/bcache/super.c |
1380 | index 95e9a33de06a2..263c0d987929e 100644 |
1381 | --- a/drivers/md/bcache/super.c |
1382 | +++ b/drivers/md/bcache/super.c |
1383 | @@ -1510,6 +1510,7 @@ struct cache_set *bch_cache_set_alloc(struct cache_sb *sb) |
1384 | sema_init(&c->sb_write_mutex, 1); |
1385 | mutex_init(&c->bucket_lock); |
1386 | init_waitqueue_head(&c->btree_cache_wait); |
1387 | + spin_lock_init(&c->btree_cannibalize_lock); |
1388 | init_waitqueue_head(&c->bucket_wait); |
1389 | init_waitqueue_head(&c->gc_wait); |
1390 | sema_init(&c->uuid_write_mutex, 1); |
1391 | diff --git a/drivers/media/dvb-frontends/tda10071.c b/drivers/media/dvb-frontends/tda10071.c |
1392 | index 37ebeef2bbd0b..43343091ea93e 100644 |
1393 | --- a/drivers/media/dvb-frontends/tda10071.c |
1394 | +++ b/drivers/media/dvb-frontends/tda10071.c |
1395 | @@ -483,10 +483,11 @@ static int tda10071_read_status(struct dvb_frontend *fe, enum fe_status *status) |
1396 | goto error; |
1397 | |
1398 | if (dev->delivery_system == SYS_DVBS) { |
1399 | - dev->dvbv3_ber = buf[0] << 24 | buf[1] << 16 | |
1400 | - buf[2] << 8 | buf[3] << 0; |
1401 | - dev->post_bit_error += buf[0] << 24 | buf[1] << 16 | |
1402 | - buf[2] << 8 | buf[3] << 0; |
1403 | + u32 bit_error = buf[0] << 24 | buf[1] << 16 | |
1404 | + buf[2] << 8 | buf[3] << 0; |
1405 | + |
1406 | + dev->dvbv3_ber = bit_error; |
1407 | + dev->post_bit_error += bit_error; |
1408 | c->post_bit_error.stat[0].scale = FE_SCALE_COUNTER; |
1409 | c->post_bit_error.stat[0].uvalue = dev->post_bit_error; |
1410 | dev->block_error += buf[4] << 8 | buf[5] << 0; |
1411 | diff --git a/drivers/media/platform/ti-vpe/cal.c b/drivers/media/platform/ti-vpe/cal.c |
1412 | index 563b9636ab63b..803e0794ca131 100644 |
1413 | --- a/drivers/media/platform/ti-vpe/cal.c |
1414 | +++ b/drivers/media/platform/ti-vpe/cal.c |
1415 | @@ -690,12 +690,13 @@ static void pix_proc_config(struct cal_ctx *ctx) |
1416 | } |
1417 | |
1418 | static void cal_wr_dma_config(struct cal_ctx *ctx, |
1419 | - unsigned int width) |
1420 | + unsigned int width, unsigned int height) |
1421 | { |
1422 | u32 val; |
1423 | |
1424 | val = reg_read(ctx->dev, CAL_WR_DMA_CTRL(ctx->csi2_port)); |
1425 | set_field(&val, ctx->csi2_port, CAL_WR_DMA_CTRL_CPORT_MASK); |
1426 | + set_field(&val, height, CAL_WR_DMA_CTRL_YSIZE_MASK); |
1427 | set_field(&val, CAL_WR_DMA_CTRL_DTAG_PIX_DAT, |
1428 | CAL_WR_DMA_CTRL_DTAG_MASK); |
1429 | set_field(&val, CAL_WR_DMA_CTRL_MODE_CONST, |
1430 | @@ -1321,7 +1322,8 @@ static int cal_start_streaming(struct vb2_queue *vq, unsigned int count) |
1431 | csi2_lane_config(ctx); |
1432 | csi2_ctx_config(ctx); |
1433 | pix_proc_config(ctx); |
1434 | - cal_wr_dma_config(ctx, ctx->v_fmt.fmt.pix.bytesperline); |
1435 | + cal_wr_dma_config(ctx, ctx->v_fmt.fmt.pix.bytesperline, |
1436 | + ctx->v_fmt.fmt.pix.height); |
1437 | cal_wr_dma_addr(ctx, addr); |
1438 | csi2_ppi_enable(ctx); |
1439 | |
1440 | diff --git a/drivers/media/usb/go7007/go7007-usb.c b/drivers/media/usb/go7007/go7007-usb.c |
1441 | index ed9bcaf08d5ec..ddfaabd4c0813 100644 |
1442 | --- a/drivers/media/usb/go7007/go7007-usb.c |
1443 | +++ b/drivers/media/usb/go7007/go7007-usb.c |
1444 | @@ -1052,6 +1052,7 @@ static int go7007_usb_probe(struct usb_interface *intf, |
1445 | struct go7007_usb *usb; |
1446 | const struct go7007_usb_board *board; |
1447 | struct usb_device *usbdev = interface_to_usbdev(intf); |
1448 | + struct usb_host_endpoint *ep; |
1449 | unsigned num_i2c_devs; |
1450 | char *name; |
1451 | int video_pipe, i, v_urb_len; |
1452 | @@ -1147,7 +1148,8 @@ static int go7007_usb_probe(struct usb_interface *intf, |
1453 | if (usb->intr_urb->transfer_buffer == NULL) |
1454 | goto allocfail; |
1455 | |
1456 | - if (go->board_id == GO7007_BOARDID_SENSORAY_2250) |
1457 | + ep = usb->usbdev->ep_in[4]; |
1458 | + if (usb_endpoint_type(&ep->desc) == USB_ENDPOINT_XFER_BULK) |
1459 | usb_fill_bulk_urb(usb->intr_urb, usb->usbdev, |
1460 | usb_rcvbulkpipe(usb->usbdev, 4), |
1461 | usb->intr_urb->transfer_buffer, 2*sizeof(u16), |
1462 | diff --git a/drivers/mfd/mfd-core.c b/drivers/mfd/mfd-core.c |
1463 | index 5c8ed2150c8bf..fb687368ac98c 100644 |
1464 | --- a/drivers/mfd/mfd-core.c |
1465 | +++ b/drivers/mfd/mfd-core.c |
1466 | @@ -32,6 +32,11 @@ int mfd_cell_enable(struct platform_device *pdev) |
1467 | const struct mfd_cell *cell = mfd_get_cell(pdev); |
1468 | int err = 0; |
1469 | |
1470 | + if (!cell->enable) { |
1471 | + dev_dbg(&pdev->dev, "No .enable() call-back registered\n"); |
1472 | + return 0; |
1473 | + } |
1474 | + |
1475 | /* only call enable hook if the cell wasn't previously enabled */ |
1476 | if (atomic_inc_return(cell->usage_count) == 1) |
1477 | err = cell->enable(pdev); |
1478 | @@ -49,6 +54,11 @@ int mfd_cell_disable(struct platform_device *pdev) |
1479 | const struct mfd_cell *cell = mfd_get_cell(pdev); |
1480 | int err = 0; |
1481 | |
1482 | + if (!cell->disable) { |
1483 | + dev_dbg(&pdev->dev, "No .disable() call-back registered\n"); |
1484 | + return 0; |
1485 | + } |
1486 | + |
1487 | /* only disable if no other clients are using it */ |
1488 | if (atomic_dec_return(cell->usage_count) == 0) |
1489 | err = cell->disable(pdev); |
1490 | diff --git a/drivers/mtd/chips/cfi_cmdset_0002.c b/drivers/mtd/chips/cfi_cmdset_0002.c |
1491 | index 00ba09fa6f16d..3c4819a05bf03 100644 |
1492 | --- a/drivers/mtd/chips/cfi_cmdset_0002.c |
1493 | +++ b/drivers/mtd/chips/cfi_cmdset_0002.c |
1494 | @@ -722,7 +722,6 @@ static struct mtd_info *cfi_amdstd_setup(struct mtd_info *mtd) |
1495 | kfree(mtd->eraseregions); |
1496 | kfree(mtd); |
1497 | kfree(cfi->cmdset_priv); |
1498 | - kfree(cfi->cfiq); |
1499 | return NULL; |
1500 | } |
1501 | |
1502 | diff --git a/drivers/mtd/cmdlinepart.c b/drivers/mtd/cmdlinepart.c |
1503 | index fbd5affc0acfe..04fd845de05fb 100644 |
1504 | --- a/drivers/mtd/cmdlinepart.c |
1505 | +++ b/drivers/mtd/cmdlinepart.c |
1506 | @@ -228,12 +228,29 @@ static int mtdpart_setup_real(char *s) |
1507 | struct cmdline_mtd_partition *this_mtd; |
1508 | struct mtd_partition *parts; |
1509 | int mtd_id_len, num_parts; |
1510 | - char *p, *mtd_id; |
1511 | + char *p, *mtd_id, *semicol; |
1512 | + |
1513 | + /* |
1514 | + * Replace the first ';' by a NULL char so strrchr can work |
1515 | + * properly. |
1516 | + */ |
1517 | + semicol = strchr(s, ';'); |
1518 | + if (semicol) |
1519 | + *semicol = '\0'; |
1520 | |
1521 | mtd_id = s; |
1522 | |
1523 | - /* fetch <mtd-id> */ |
1524 | - p = strchr(s, ':'); |
1525 | + /* |
1526 | + * fetch <mtd-id>. We use strrchr to ignore all ':' that could |
1527 | + * be present in the MTD name, only the last one is interpreted |
1528 | + * as an <mtd-id>/<part-definition> separator. |
1529 | + */ |
1530 | + p = strrchr(s, ':'); |
1531 | + |
1532 | + /* Restore the ';' now. */ |
1533 | + if (semicol) |
1534 | + *semicol = ';'; |
1535 | + |
1536 | if (!p) { |
1537 | pr_err("no mtd-id\n"); |
1538 | return -EINVAL; |
1539 | diff --git a/drivers/mtd/nand/omap_elm.c b/drivers/mtd/nand/omap_elm.c |
1540 | index a3f32f939cc17..6736777a41567 100644 |
1541 | --- a/drivers/mtd/nand/omap_elm.c |
1542 | +++ b/drivers/mtd/nand/omap_elm.c |
1543 | @@ -421,6 +421,7 @@ static int elm_probe(struct platform_device *pdev) |
1544 | pm_runtime_enable(&pdev->dev); |
1545 | if (pm_runtime_get_sync(&pdev->dev) < 0) { |
1546 | ret = -EINVAL; |
1547 | + pm_runtime_put_sync(&pdev->dev); |
1548 | pm_runtime_disable(&pdev->dev); |
1549 | dev_err(&pdev->dev, "can't enable clock\n"); |
1550 | return ret; |
1551 | diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c |
1552 | index ac03bba10e4fd..8634337e1a99d 100644 |
1553 | --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c |
1554 | +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c |
1555 | @@ -1000,9 +1000,12 @@ static int bnxt_set_pauseparam(struct net_device *dev, |
1556 | if (!BNXT_SINGLE_PF(bp)) |
1557 | return -EOPNOTSUPP; |
1558 | |
1559 | + mutex_lock(&bp->link_lock); |
1560 | if (epause->autoneg) { |
1561 | - if (!(link_info->autoneg & BNXT_AUTONEG_SPEED)) |
1562 | - return -EINVAL; |
1563 | + if (!(link_info->autoneg & BNXT_AUTONEG_SPEED)) { |
1564 | + rc = -EINVAL; |
1565 | + goto pause_exit; |
1566 | + } |
1567 | |
1568 | link_info->autoneg |= BNXT_AUTONEG_FLOW_CTRL; |
1569 | if (bp->hwrm_spec_code >= 0x10201) |
1570 | @@ -1023,11 +1026,11 @@ static int bnxt_set_pauseparam(struct net_device *dev, |
1571 | if (epause->tx_pause) |
1572 | link_info->req_flow_ctrl |= BNXT_LINK_PAUSE_TX; |
1573 | |
1574 | - if (netif_running(dev)) { |
1575 | - mutex_lock(&bp->link_lock); |
1576 | + if (netif_running(dev)) |
1577 | rc = bnxt_hwrm_set_pause(bp); |
1578 | - mutex_unlock(&bp->link_lock); |
1579 | - } |
1580 | + |
1581 | +pause_exit: |
1582 | + mutex_unlock(&bp->link_lock); |
1583 | return rc; |
1584 | } |
1585 | |
1586 | @@ -1671,8 +1674,7 @@ static int bnxt_set_eee(struct net_device *dev, struct ethtool_eee *edata) |
1587 | struct bnxt *bp = netdev_priv(dev); |
1588 | struct ethtool_eee *eee = &bp->eee; |
1589 | struct bnxt_link_info *link_info = &bp->link_info; |
1590 | - u32 advertising = |
1591 | - _bnxt_fw_to_ethtool_adv_spds(link_info->advertising, 0); |
1592 | + u32 advertising; |
1593 | int rc = 0; |
1594 | |
1595 | if (!BNXT_SINGLE_PF(bp)) |
1596 | @@ -1681,19 +1683,23 @@ static int bnxt_set_eee(struct net_device *dev, struct ethtool_eee *edata) |
1597 | if (!(bp->flags & BNXT_FLAG_EEE_CAP)) |
1598 | return -EOPNOTSUPP; |
1599 | |
1600 | + mutex_lock(&bp->link_lock); |
1601 | + advertising = _bnxt_fw_to_ethtool_adv_spds(link_info->advertising, 0); |
1602 | if (!edata->eee_enabled) |
1603 | goto eee_ok; |
1604 | |
1605 | if (!(link_info->autoneg & BNXT_AUTONEG_SPEED)) { |
1606 | netdev_warn(dev, "EEE requires autoneg\n"); |
1607 | - return -EINVAL; |
1608 | + rc = -EINVAL; |
1609 | + goto eee_exit; |
1610 | } |
1611 | if (edata->tx_lpi_enabled) { |
1612 | if (bp->lpi_tmr_hi && (edata->tx_lpi_timer > bp->lpi_tmr_hi || |
1613 | edata->tx_lpi_timer < bp->lpi_tmr_lo)) { |
1614 | netdev_warn(dev, "Valid LPI timer range is %d and %d microsecs\n", |
1615 | bp->lpi_tmr_lo, bp->lpi_tmr_hi); |
1616 | - return -EINVAL; |
1617 | + rc = -EINVAL; |
1618 | + goto eee_exit; |
1619 | } else if (!bp->lpi_tmr_hi) { |
1620 | edata->tx_lpi_timer = eee->tx_lpi_timer; |
1621 | } |
1622 | @@ -1703,7 +1709,8 @@ static int bnxt_set_eee(struct net_device *dev, struct ethtool_eee *edata) |
1623 | } else if (edata->advertised & ~advertising) { |
1624 | netdev_warn(dev, "EEE advertised %x must be a subset of autoneg advertised speeds %x\n", |
1625 | edata->advertised, advertising); |
1626 | - return -EINVAL; |
1627 | + rc = -EINVAL; |
1628 | + goto eee_exit; |
1629 | } |
1630 | |
1631 | eee->advertised = edata->advertised; |
1632 | @@ -1715,6 +1722,8 @@ eee_ok: |
1633 | if (netif_running(dev)) |
1634 | rc = bnxt_hwrm_set_link_setting(bp, false, true); |
1635 | |
1636 | +eee_exit: |
1637 | + mutex_unlock(&bp->link_lock); |
1638 | return rc; |
1639 | } |
1640 | |
1641 | diff --git a/drivers/net/ethernet/intel/e1000/e1000_main.c b/drivers/net/ethernet/intel/e1000/e1000_main.c |
1642 | index 3b16ee0de246e..c30792b761ee3 100644 |
1643 | --- a/drivers/net/ethernet/intel/e1000/e1000_main.c |
1644 | +++ b/drivers/net/ethernet/intel/e1000/e1000_main.c |
1645 | @@ -568,8 +568,13 @@ void e1000_reinit_locked(struct e1000_adapter *adapter) |
1646 | WARN_ON(in_interrupt()); |
1647 | while (test_and_set_bit(__E1000_RESETTING, &adapter->flags)) |
1648 | msleep(1); |
1649 | - e1000_down(adapter); |
1650 | - e1000_up(adapter); |
1651 | + |
1652 | + /* only run the task if not already down */ |
1653 | + if (!test_bit(__E1000_DOWN, &adapter->flags)) { |
1654 | + e1000_down(adapter); |
1655 | + e1000_up(adapter); |
1656 | + } |
1657 | + |
1658 | clear_bit(__E1000_RESETTING, &adapter->flags); |
1659 | } |
1660 | |
1661 | @@ -1456,10 +1461,15 @@ int e1000_close(struct net_device *netdev) |
1662 | struct e1000_hw *hw = &adapter->hw; |
1663 | int count = E1000_CHECK_RESET_COUNT; |
1664 | |
1665 | - while (test_bit(__E1000_RESETTING, &adapter->flags) && count--) |
1666 | + while (test_and_set_bit(__E1000_RESETTING, &adapter->flags) && count--) |
1667 | usleep_range(10000, 20000); |
1668 | |
1669 | - WARN_ON(test_bit(__E1000_RESETTING, &adapter->flags)); |
1670 | + WARN_ON(count < 0); |
1671 | + |
1672 | + /* signal that we're down so that the reset task will no longer run */ |
1673 | + set_bit(__E1000_DOWN, &adapter->flags); |
1674 | + clear_bit(__E1000_RESETTING, &adapter->flags); |
1675 | + |
1676 | e1000_down(adapter); |
1677 | e1000_power_down_phy(adapter); |
1678 | e1000_free_irq(adapter); |
1679 | diff --git a/drivers/net/ieee802154/adf7242.c b/drivers/net/ieee802154/adf7242.c |
1680 | index 1b980f12663af..a605dfb15bb75 100644 |
1681 | --- a/drivers/net/ieee802154/adf7242.c |
1682 | +++ b/drivers/net/ieee802154/adf7242.c |
1683 | @@ -834,7 +834,9 @@ static int adf7242_rx(struct adf7242_local *lp) |
1684 | int ret; |
1685 | u8 lqi, len_u8, *data; |
1686 | |
1687 | - adf7242_read_reg(lp, 0, &len_u8); |
1688 | + ret = adf7242_read_reg(lp, 0, &len_u8); |
1689 | + if (ret) |
1690 | + return ret; |
1691 | |
1692 | len = len_u8; |
1693 | |
1694 | diff --git a/drivers/net/phy/phy_device.c b/drivers/net/phy/phy_device.c |
1695 | index 2f55873060220..a3ba95e96695e 100644 |
1696 | --- a/drivers/net/phy/phy_device.c |
1697 | +++ b/drivers/net/phy/phy_device.c |
1698 | @@ -1013,7 +1013,8 @@ void phy_detach(struct phy_device *phydev) |
1699 | phydev->attached_dev = NULL; |
1700 | phy_suspend(phydev); |
1701 | |
1702 | - module_put(phydev->mdio.dev.driver->owner); |
1703 | + if (phydev->mdio.dev.driver) |
1704 | + module_put(phydev->mdio.dev.driver->owner); |
1705 | |
1706 | /* If the device had no specific driver before (i.e. - it |
1707 | * was using the generic driver), we unbind the device |
1708 | diff --git a/drivers/net/wan/hdlc_ppp.c b/drivers/net/wan/hdlc_ppp.c |
1709 | index 8a9aced850be3..63546d1317982 100644 |
1710 | --- a/drivers/net/wan/hdlc_ppp.c |
1711 | +++ b/drivers/net/wan/hdlc_ppp.c |
1712 | @@ -386,11 +386,8 @@ static void ppp_cp_parse_cr(struct net_device *dev, u16 pid, u8 id, |
1713 | } |
1714 | |
1715 | for (opt = data; len; len -= opt[1], opt += opt[1]) { |
1716 | - if (len < 2 || len < opt[1]) { |
1717 | - dev->stats.rx_errors++; |
1718 | - kfree(out); |
1719 | - return; /* bad packet, drop silently */ |
1720 | - } |
1721 | + if (len < 2 || opt[1] < 2 || len < opt[1]) |
1722 | + goto err_out; |
1723 | |
1724 | if (pid == PID_LCP) |
1725 | switch (opt[0]) { |
1726 | @@ -398,6 +395,8 @@ static void ppp_cp_parse_cr(struct net_device *dev, u16 pid, u8 id, |
1727 | continue; /* MRU always OK and > 1500 bytes? */ |
1728 | |
1729 | case LCP_OPTION_ACCM: /* async control character map */ |
1730 | + if (opt[1] < sizeof(valid_accm)) |
1731 | + goto err_out; |
1732 | if (!memcmp(opt, valid_accm, |
1733 | sizeof(valid_accm))) |
1734 | continue; |
1735 | @@ -409,6 +408,8 @@ static void ppp_cp_parse_cr(struct net_device *dev, u16 pid, u8 id, |
1736 | } |
1737 | break; |
1738 | case LCP_OPTION_MAGIC: |
1739 | + if (len < 6) |
1740 | + goto err_out; |
1741 | if (opt[1] != 6 || (!opt[2] && !opt[3] && |
1742 | !opt[4] && !opt[5])) |
1743 | break; /* reject invalid magic number */ |
1744 | @@ -427,6 +428,11 @@ static void ppp_cp_parse_cr(struct net_device *dev, u16 pid, u8 id, |
1745 | ppp_cp_event(dev, pid, RCR_GOOD, CP_CONF_ACK, id, req_len, data); |
1746 | |
1747 | kfree(out); |
1748 | + return; |
1749 | + |
1750 | +err_out: |
1751 | + dev->stats.rx_errors++; |
1752 | + kfree(out); |
1753 | } |
1754 | |
1755 | static int ppp_rx(struct sk_buff *skb) |
1756 | diff --git a/drivers/net/wireless/ath/ar5523/ar5523.c b/drivers/net/wireless/ath/ar5523/ar5523.c |
1757 | index e492c7f0d311a..9f4ee1d125b68 100644 |
1758 | --- a/drivers/net/wireless/ath/ar5523/ar5523.c |
1759 | +++ b/drivers/net/wireless/ath/ar5523/ar5523.c |
1760 | @@ -1769,6 +1769,8 @@ static struct usb_device_id ar5523_id_table[] = { |
1761 | AR5523_DEVICE_UX(0x0846, 0x4300), /* Netgear / WG111U */ |
1762 | AR5523_DEVICE_UG(0x0846, 0x4250), /* Netgear / WG111T */ |
1763 | AR5523_DEVICE_UG(0x0846, 0x5f00), /* Netgear / WPN111 */ |
1764 | + AR5523_DEVICE_UG(0x083a, 0x4506), /* SMC / EZ Connect |
1765 | + SMCWUSBT-G2 */ |
1766 | AR5523_DEVICE_UG(0x157e, 0x3006), /* Umedia / AR5523_1 */ |
1767 | AR5523_DEVICE_UX(0x157e, 0x3205), /* Umedia / AR5523_2 */ |
1768 | AR5523_DEVICE_UG(0x157e, 0x3006), /* Umedia / TEW444UBEU */ |
1769 | diff --git a/drivers/net/wireless/marvell/mwifiex/fw.h b/drivers/net/wireless/marvell/mwifiex/fw.h |
1770 | index 395d6ece2cacb..341f6ed5b3556 100644 |
1771 | --- a/drivers/net/wireless/marvell/mwifiex/fw.h |
1772 | +++ b/drivers/net/wireless/marvell/mwifiex/fw.h |
1773 | @@ -921,7 +921,7 @@ struct mwifiex_tkip_param { |
1774 | struct mwifiex_aes_param { |
1775 | u8 pn[WPA_PN_SIZE]; |
1776 | __le16 key_len; |
1777 | - u8 key[WLAN_KEY_LEN_CCMP]; |
1778 | + u8 key[WLAN_KEY_LEN_CCMP_256]; |
1779 | } __packed; |
1780 | |
1781 | struct mwifiex_wapi_param { |
1782 | diff --git a/drivers/net/wireless/marvell/mwifiex/sta_cmdresp.c b/drivers/net/wireless/marvell/mwifiex/sta_cmdresp.c |
1783 | index 1e26936c0d727..aa84fdb709830 100644 |
1784 | --- a/drivers/net/wireless/marvell/mwifiex/sta_cmdresp.c |
1785 | +++ b/drivers/net/wireless/marvell/mwifiex/sta_cmdresp.c |
1786 | @@ -625,7 +625,7 @@ static int mwifiex_ret_802_11_key_material_v2(struct mwifiex_private *priv, |
1787 | key_v2 = &resp->params.key_material_v2; |
1788 | |
1789 | len = le16_to_cpu(key_v2->key_param_set.key_params.aes.key_len); |
1790 | - if (len > WLAN_KEY_LEN_CCMP) |
1791 | + if (len > sizeof(key_v2->key_param_set.key_params.aes.key)) |
1792 | return -EINVAL; |
1793 | |
1794 | if (le16_to_cpu(key_v2->action) == HostCmd_ACT_GEN_SET) { |
1795 | @@ -641,7 +641,7 @@ static int mwifiex_ret_802_11_key_material_v2(struct mwifiex_private *priv, |
1796 | return 0; |
1797 | |
1798 | memset(priv->aes_key_v2.key_param_set.key_params.aes.key, 0, |
1799 | - WLAN_KEY_LEN_CCMP); |
1800 | + sizeof(key_v2->key_param_set.key_params.aes.key)); |
1801 | priv->aes_key_v2.key_param_set.key_params.aes.key_len = |
1802 | cpu_to_le16(len); |
1803 | memcpy(priv->aes_key_v2.key_param_set.key_params.aes.key, |
1804 | diff --git a/drivers/phy/phy-s5pv210-usb2.c b/drivers/phy/phy-s5pv210-usb2.c |
1805 | index 004d320767e4d..bb36cfd4e3e90 100644 |
1806 | --- a/drivers/phy/phy-s5pv210-usb2.c |
1807 | +++ b/drivers/phy/phy-s5pv210-usb2.c |
1808 | @@ -142,6 +142,10 @@ static void s5pv210_phy_pwr(struct samsung_usb2_phy_instance *inst, bool on) |
1809 | udelay(10); |
1810 | rst &= ~rstbits; |
1811 | writel(rst, drv->reg_phy + S5PV210_UPHYRST); |
1812 | + /* The following delay is necessary for the reset sequence to be |
1813 | + * completed |
1814 | + */ |
1815 | + udelay(80); |
1816 | } else { |
1817 | pwr = readl(drv->reg_phy + S5PV210_UPHYPWR); |
1818 | pwr |= phypwr; |
1819 | diff --git a/drivers/scsi/aacraid/aachba.c b/drivers/scsi/aacraid/aachba.c |
1820 | index 065f11a1964d4..39deea8601d68 100644 |
1821 | --- a/drivers/scsi/aacraid/aachba.c |
1822 | +++ b/drivers/scsi/aacraid/aachba.c |
1823 | @@ -1929,13 +1929,13 @@ static int aac_read(struct scsi_cmnd * scsicmd) |
1824 | scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 | |
1825 | SAM_STAT_CHECK_CONDITION; |
1826 | set_sense(&dev->fsa_dev[cid].sense_data, |
1827 | - HARDWARE_ERROR, SENCODE_INTERNAL_TARGET_FAILURE, |
1828 | + ILLEGAL_REQUEST, SENCODE_LBA_OUT_OF_RANGE, |
1829 | ASENCODE_INTERNAL_TARGET_FAILURE, 0, 0); |
1830 | memcpy(scsicmd->sense_buffer, &dev->fsa_dev[cid].sense_data, |
1831 | min_t(size_t, sizeof(dev->fsa_dev[cid].sense_data), |
1832 | SCSI_SENSE_BUFFERSIZE)); |
1833 | scsicmd->scsi_done(scsicmd); |
1834 | - return 1; |
1835 | + return 0; |
1836 | } |
1837 | |
1838 | dprintk((KERN_DEBUG "aac_read[cpu %d]: lba = %llu, t = %ld.\n", |
1839 | @@ -2023,13 +2023,13 @@ static int aac_write(struct scsi_cmnd * scsicmd) |
1840 | scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 | |
1841 | SAM_STAT_CHECK_CONDITION; |
1842 | set_sense(&dev->fsa_dev[cid].sense_data, |
1843 | - HARDWARE_ERROR, SENCODE_INTERNAL_TARGET_FAILURE, |
1844 | + ILLEGAL_REQUEST, SENCODE_LBA_OUT_OF_RANGE, |
1845 | ASENCODE_INTERNAL_TARGET_FAILURE, 0, 0); |
1846 | memcpy(scsicmd->sense_buffer, &dev->fsa_dev[cid].sense_data, |
1847 | min_t(size_t, sizeof(dev->fsa_dev[cid].sense_data), |
1848 | SCSI_SENSE_BUFFERSIZE)); |
1849 | scsicmd->scsi_done(scsicmd); |
1850 | - return 1; |
1851 | + return 0; |
1852 | } |
1853 | |
1854 | dprintk((KERN_DEBUG "aac_write[cpu %d]: lba = %llu, t = %ld.\n", |
1855 | diff --git a/drivers/scsi/lpfc/lpfc_ct.c b/drivers/scsi/lpfc/lpfc_ct.c |
1856 | index 52afbcff362f9..b7940fffca637 100644 |
1857 | --- a/drivers/scsi/lpfc/lpfc_ct.c |
1858 | +++ b/drivers/scsi/lpfc/lpfc_ct.c |
1859 | @@ -1541,8 +1541,8 @@ lpfc_fdmi_hba_attr_wwnn(struct lpfc_vport *vport, struct lpfc_fdmi_attr_def *ad) |
1860 | struct lpfc_fdmi_attr_entry *ae; |
1861 | uint32_t size; |
1862 | |
1863 | - ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue; |
1864 | - memset(ae, 0, sizeof(struct lpfc_name)); |
1865 | + ae = &ad->AttrValue; |
1866 | + memset(ae, 0, sizeof(*ae)); |
1867 | |
1868 | memcpy(&ae->un.AttrWWN, &vport->fc_sparam.nodeName, |
1869 | sizeof(struct lpfc_name)); |
1870 | @@ -1558,8 +1558,8 @@ lpfc_fdmi_hba_attr_manufacturer(struct lpfc_vport *vport, |
1871 | struct lpfc_fdmi_attr_entry *ae; |
1872 | uint32_t len, size; |
1873 | |
1874 | - ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue; |
1875 | - memset(ae, 0, 256); |
1876 | + ae = &ad->AttrValue; |
1877 | + memset(ae, 0, sizeof(*ae)); |
1878 | |
1879 | /* This string MUST be consistent with other FC platforms |
1880 | * supported by Broadcom. |
1881 | @@ -1583,8 +1583,8 @@ lpfc_fdmi_hba_attr_sn(struct lpfc_vport *vport, struct lpfc_fdmi_attr_def *ad) |
1882 | struct lpfc_fdmi_attr_entry *ae; |
1883 | uint32_t len, size; |
1884 | |
1885 | - ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue; |
1886 | - memset(ae, 0, 256); |
1887 | + ae = &ad->AttrValue; |
1888 | + memset(ae, 0, sizeof(*ae)); |
1889 | |
1890 | strncpy(ae->un.AttrString, phba->SerialNumber, |
1891 | sizeof(ae->un.AttrString)); |
1892 | @@ -1605,8 +1605,8 @@ lpfc_fdmi_hba_attr_model(struct lpfc_vport *vport, |
1893 | struct lpfc_fdmi_attr_entry *ae; |
1894 | uint32_t len, size; |
1895 | |
1896 | - ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue; |
1897 | - memset(ae, 0, 256); |
1898 | + ae = &ad->AttrValue; |
1899 | + memset(ae, 0, sizeof(*ae)); |
1900 | |
1901 | strncpy(ae->un.AttrString, phba->ModelName, |
1902 | sizeof(ae->un.AttrString)); |
1903 | @@ -1626,8 +1626,8 @@ lpfc_fdmi_hba_attr_description(struct lpfc_vport *vport, |
1904 | struct lpfc_fdmi_attr_entry *ae; |
1905 | uint32_t len, size; |
1906 | |
1907 | - ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue; |
1908 | - memset(ae, 0, 256); |
1909 | + ae = &ad->AttrValue; |
1910 | + memset(ae, 0, sizeof(*ae)); |
1911 | |
1912 | strncpy(ae->un.AttrString, phba->ModelDesc, |
1913 | sizeof(ae->un.AttrString)); |
1914 | @@ -1649,8 +1649,8 @@ lpfc_fdmi_hba_attr_hdw_ver(struct lpfc_vport *vport, |
1915 | struct lpfc_fdmi_attr_entry *ae; |
1916 | uint32_t i, j, incr, size; |
1917 | |
1918 | - ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue; |
1919 | - memset(ae, 0, 256); |
1920 | + ae = &ad->AttrValue; |
1921 | + memset(ae, 0, sizeof(*ae)); |
1922 | |
1923 | /* Convert JEDEC ID to ascii for hardware version */ |
1924 | incr = vp->rev.biuRev; |
1925 | @@ -1679,8 +1679,8 @@ lpfc_fdmi_hba_attr_drvr_ver(struct lpfc_vport *vport, |
1926 | struct lpfc_fdmi_attr_entry *ae; |
1927 | uint32_t len, size; |
1928 | |
1929 | - ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue; |
1930 | - memset(ae, 0, 256); |
1931 | + ae = &ad->AttrValue; |
1932 | + memset(ae, 0, sizeof(*ae)); |
1933 | |
1934 | strncpy(ae->un.AttrString, lpfc_release_version, |
1935 | sizeof(ae->un.AttrString)); |
1936 | @@ -1701,8 +1701,8 @@ lpfc_fdmi_hba_attr_rom_ver(struct lpfc_vport *vport, |
1937 | struct lpfc_fdmi_attr_entry *ae; |
1938 | uint32_t len, size; |
1939 | |
1940 | - ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue; |
1941 | - memset(ae, 0, 256); |
1942 | + ae = &ad->AttrValue; |
1943 | + memset(ae, 0, sizeof(*ae)); |
1944 | |
1945 | if (phba->sli_rev == LPFC_SLI_REV4) |
1946 | lpfc_decode_firmware_rev(phba, ae->un.AttrString, 1); |
1947 | @@ -1726,8 +1726,8 @@ lpfc_fdmi_hba_attr_fmw_ver(struct lpfc_vport *vport, |
1948 | struct lpfc_fdmi_attr_entry *ae; |
1949 | uint32_t len, size; |
1950 | |
1951 | - ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue; |
1952 | - memset(ae, 0, 256); |
1953 | + ae = &ad->AttrValue; |
1954 | + memset(ae, 0, sizeof(*ae)); |
1955 | |
1956 | lpfc_decode_firmware_rev(phba, ae->un.AttrString, 1); |
1957 | len = strnlen(ae->un.AttrString, |
1958 | @@ -1746,8 +1746,8 @@ lpfc_fdmi_hba_attr_os_ver(struct lpfc_vport *vport, |
1959 | struct lpfc_fdmi_attr_entry *ae; |
1960 | uint32_t len, size; |
1961 | |
1962 | - ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue; |
1963 | - memset(ae, 0, 256); |
1964 | + ae = &ad->AttrValue; |
1965 | + memset(ae, 0, sizeof(*ae)); |
1966 | |
1967 | snprintf(ae->un.AttrString, sizeof(ae->un.AttrString), "%s %s %s", |
1968 | init_utsname()->sysname, |
1969 | @@ -1769,7 +1769,7 @@ lpfc_fdmi_hba_attr_ct_len(struct lpfc_vport *vport, |
1970 | struct lpfc_fdmi_attr_entry *ae; |
1971 | uint32_t size; |
1972 | |
1973 | - ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue; |
1974 | + ae = &ad->AttrValue; |
1975 | |
1976 | ae->un.AttrInt = cpu_to_be32(LPFC_MAX_CT_SIZE); |
1977 | size = FOURBYTES + sizeof(uint32_t); |
1978 | @@ -1785,8 +1785,8 @@ lpfc_fdmi_hba_attr_symbolic_name(struct lpfc_vport *vport, |
1979 | struct lpfc_fdmi_attr_entry *ae; |
1980 | uint32_t len, size; |
1981 | |
1982 | - ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue; |
1983 | - memset(ae, 0, 256); |
1984 | + ae = &ad->AttrValue; |
1985 | + memset(ae, 0, sizeof(*ae)); |
1986 | |
1987 | len = lpfc_vport_symbolic_node_name(vport, |
1988 | ae->un.AttrString, 256); |
1989 | @@ -1804,7 +1804,7 @@ lpfc_fdmi_hba_attr_vendor_info(struct lpfc_vport *vport, |
1990 | struct lpfc_fdmi_attr_entry *ae; |
1991 | uint32_t size; |
1992 | |
1993 | - ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue; |
1994 | + ae = &ad->AttrValue; |
1995 | |
1996 | /* Nothing is defined for this currently */ |
1997 | ae->un.AttrInt = cpu_to_be32(0); |
1998 | @@ -1821,7 +1821,7 @@ lpfc_fdmi_hba_attr_num_ports(struct lpfc_vport *vport, |
1999 | struct lpfc_fdmi_attr_entry *ae; |
2000 | uint32_t size; |
2001 | |
2002 | - ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue; |
2003 | + ae = &ad->AttrValue; |
2004 | |
2005 | /* Each driver instance corresponds to a single port */ |
2006 | ae->un.AttrInt = cpu_to_be32(1); |
2007 | @@ -1838,8 +1838,8 @@ lpfc_fdmi_hba_attr_fabric_wwnn(struct lpfc_vport *vport, |
2008 | struct lpfc_fdmi_attr_entry *ae; |
2009 | uint32_t size; |
2010 | |
2011 | - ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue; |
2012 | - memset(ae, 0, sizeof(struct lpfc_name)); |
2013 | + ae = &ad->AttrValue; |
2014 | + memset(ae, 0, sizeof(*ae)); |
2015 | |
2016 | memcpy(&ae->un.AttrWWN, &vport->fabric_nodename, |
2017 | sizeof(struct lpfc_name)); |
2018 | @@ -1857,8 +1857,8 @@ lpfc_fdmi_hba_attr_bios_ver(struct lpfc_vport *vport, |
2019 | struct lpfc_fdmi_attr_entry *ae; |
2020 | uint32_t len, size; |
2021 | |
2022 | - ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue; |
2023 | - memset(ae, 0, 256); |
2024 | + ae = &ad->AttrValue; |
2025 | + memset(ae, 0, sizeof(*ae)); |
2026 | |
2027 | lpfc_decode_firmware_rev(phba, ae->un.AttrString, 1); |
2028 | len = strnlen(ae->un.AttrString, |
2029 | @@ -1877,7 +1877,7 @@ lpfc_fdmi_hba_attr_bios_state(struct lpfc_vport *vport, |
2030 | struct lpfc_fdmi_attr_entry *ae; |
2031 | uint32_t size; |
2032 | |
2033 | - ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue; |
2034 | + ae = &ad->AttrValue; |
2035 | |
2036 | /* Driver doesn't have access to this information */ |
2037 | ae->un.AttrInt = cpu_to_be32(0); |
2038 | @@ -1894,8 +1894,8 @@ lpfc_fdmi_hba_attr_vendor_id(struct lpfc_vport *vport, |
2039 | struct lpfc_fdmi_attr_entry *ae; |
2040 | uint32_t len, size; |
2041 | |
2042 | - ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue; |
2043 | - memset(ae, 0, 256); |
2044 | + ae = &ad->AttrValue; |
2045 | + memset(ae, 0, sizeof(*ae)); |
2046 | |
2047 | strncpy(ae->un.AttrString, "EMULEX", |
2048 | sizeof(ae->un.AttrString)); |
2049 | @@ -1916,8 +1916,8 @@ lpfc_fdmi_port_attr_fc4type(struct lpfc_vport *vport, |
2050 | struct lpfc_fdmi_attr_entry *ae; |
2051 | uint32_t size; |
2052 | |
2053 | - ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue; |
2054 | - memset(ae, 0, 32); |
2055 | + ae = &ad->AttrValue; |
2056 | + memset(ae, 0, sizeof(*ae)); |
2057 | |
2058 | ae->un.AttrTypes[3] = 0x02; /* Type 1 - ELS */ |
2059 | ae->un.AttrTypes[2] = 0x01; /* Type 8 - FCP */ |
2060 | @@ -1936,7 +1936,7 @@ lpfc_fdmi_port_attr_support_speed(struct lpfc_vport *vport, |
2061 | struct lpfc_fdmi_attr_entry *ae; |
2062 | uint32_t size; |
2063 | |
2064 | - ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue; |
2065 | + ae = &ad->AttrValue; |
2066 | |
2067 | ae->un.AttrInt = 0; |
2068 | if (!(phba->hba_flag & HBA_FCOE_MODE)) { |
2069 | @@ -1986,7 +1986,7 @@ lpfc_fdmi_port_attr_speed(struct lpfc_vport *vport, |
2070 | struct lpfc_fdmi_attr_entry *ae; |
2071 | uint32_t size; |
2072 | |
2073 | - ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue; |
2074 | + ae = &ad->AttrValue; |
2075 | |
2076 | if (!(phba->hba_flag & HBA_FCOE_MODE)) { |
2077 | switch (phba->fc_linkspeed) { |
2078 | @@ -2050,7 +2050,7 @@ lpfc_fdmi_port_attr_max_frame(struct lpfc_vport *vport, |
2079 | struct lpfc_fdmi_attr_entry *ae; |
2080 | uint32_t size; |
2081 | |
2082 | - ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue; |
2083 | + ae = &ad->AttrValue; |
2084 | |
2085 | hsp = (struct serv_parm *)&vport->fc_sparam; |
2086 | ae->un.AttrInt = (((uint32_t) hsp->cmn.bbRcvSizeMsb) << 8) | |
2087 | @@ -2070,8 +2070,8 @@ lpfc_fdmi_port_attr_os_devname(struct lpfc_vport *vport, |
2088 | struct lpfc_fdmi_attr_entry *ae; |
2089 | uint32_t len, size; |
2090 | |
2091 | - ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue; |
2092 | - memset(ae, 0, 256); |
2093 | + ae = &ad->AttrValue; |
2094 | + memset(ae, 0, sizeof(*ae)); |
2095 | |
2096 | snprintf(ae->un.AttrString, sizeof(ae->un.AttrString), |
2097 | "/sys/class/scsi_host/host%d", shost->host_no); |
2098 | @@ -2091,8 +2091,8 @@ lpfc_fdmi_port_attr_host_name(struct lpfc_vport *vport, |
2099 | struct lpfc_fdmi_attr_entry *ae; |
2100 | uint32_t len, size; |
2101 | |
2102 | - ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue; |
2103 | - memset(ae, 0, 256); |
2104 | + ae = &ad->AttrValue; |
2105 | + memset(ae, 0, sizeof(*ae)); |
2106 | |
2107 | snprintf(ae->un.AttrString, sizeof(ae->un.AttrString), "%s", |
2108 | init_utsname()->nodename); |
2109 | @@ -2112,8 +2112,8 @@ lpfc_fdmi_port_attr_wwnn(struct lpfc_vport *vport, |
2110 | struct lpfc_fdmi_attr_entry *ae; |
2111 | uint32_t size; |
2112 | |
2113 | - ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue; |
2114 | - memset(ae, 0, sizeof(struct lpfc_name)); |
2115 | + ae = &ad->AttrValue; |
2116 | + memset(ae, 0, sizeof(*ae)); |
2117 | |
2118 | memcpy(&ae->un.AttrWWN, &vport->fc_sparam.nodeName, |
2119 | sizeof(struct lpfc_name)); |
2120 | @@ -2130,8 +2130,8 @@ lpfc_fdmi_port_attr_wwpn(struct lpfc_vport *vport, |
2121 | struct lpfc_fdmi_attr_entry *ae; |
2122 | uint32_t size; |
2123 | |
2124 | - ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue; |
2125 | - memset(ae, 0, sizeof(struct lpfc_name)); |
2126 | + ae = &ad->AttrValue; |
2127 | + memset(ae, 0, sizeof(*ae)); |
2128 | |
2129 | memcpy(&ae->un.AttrWWN, &vport->fc_sparam.portName, |
2130 | sizeof(struct lpfc_name)); |
2131 | @@ -2148,8 +2148,8 @@ lpfc_fdmi_port_attr_symbolic_name(struct lpfc_vport *vport, |
2132 | struct lpfc_fdmi_attr_entry *ae; |
2133 | uint32_t len, size; |
2134 | |
2135 | - ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue; |
2136 | - memset(ae, 0, 256); |
2137 | + ae = &ad->AttrValue; |
2138 | + memset(ae, 0, sizeof(*ae)); |
2139 | |
2140 | len = lpfc_vport_symbolic_port_name(vport, ae->un.AttrString, 256); |
2141 | len += (len & 3) ? (4 - (len & 3)) : 4; |
2142 | @@ -2167,7 +2167,7 @@ lpfc_fdmi_port_attr_port_type(struct lpfc_vport *vport, |
2143 | struct lpfc_fdmi_attr_entry *ae; |
2144 | uint32_t size; |
2145 | |
2146 | - ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue; |
2147 | + ae = &ad->AttrValue; |
2148 | if (phba->fc_topology == LPFC_TOPOLOGY_LOOP) |
2149 | ae->un.AttrInt = cpu_to_be32(LPFC_FDMI_PORTTYPE_NLPORT); |
2150 | else |
2151 | @@ -2185,7 +2185,7 @@ lpfc_fdmi_port_attr_class(struct lpfc_vport *vport, |
2152 | struct lpfc_fdmi_attr_entry *ae; |
2153 | uint32_t size; |
2154 | |
2155 | - ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue; |
2156 | + ae = &ad->AttrValue; |
2157 | ae->un.AttrInt = cpu_to_be32(FC_COS_CLASS2 | FC_COS_CLASS3); |
2158 | size = FOURBYTES + sizeof(uint32_t); |
2159 | ad->AttrLen = cpu_to_be16(size); |
2160 | @@ -2200,8 +2200,8 @@ lpfc_fdmi_port_attr_fabric_wwpn(struct lpfc_vport *vport, |
2161 | struct lpfc_fdmi_attr_entry *ae; |
2162 | uint32_t size; |
2163 | |
2164 | - ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue; |
2165 | - memset(ae, 0, sizeof(struct lpfc_name)); |
2166 | + ae = &ad->AttrValue; |
2167 | + memset(ae, 0, sizeof(*ae)); |
2168 | |
2169 | memcpy(&ae->un.AttrWWN, &vport->fabric_portname, |
2170 | sizeof(struct lpfc_name)); |
2171 | @@ -2218,8 +2218,8 @@ lpfc_fdmi_port_attr_active_fc4type(struct lpfc_vport *vport, |
2172 | struct lpfc_fdmi_attr_entry *ae; |
2173 | uint32_t size; |
2174 | |
2175 | - ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue; |
2176 | - memset(ae, 0, 32); |
2177 | + ae = &ad->AttrValue; |
2178 | + memset(ae, 0, sizeof(*ae)); |
2179 | |
2180 | ae->un.AttrTypes[3] = 0x02; /* Type 1 - ELS */ |
2181 | ae->un.AttrTypes[2] = 0x01; /* Type 8 - FCP */ |
2182 | @@ -2237,7 +2237,7 @@ lpfc_fdmi_port_attr_port_state(struct lpfc_vport *vport, |
2183 | struct lpfc_fdmi_attr_entry *ae; |
2184 | uint32_t size; |
2185 | |
2186 | - ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue; |
2187 | + ae = &ad->AttrValue; |
2188 | /* Link Up - operational */ |
2189 | ae->un.AttrInt = cpu_to_be32(LPFC_FDMI_PORTSTATE_ONLINE); |
2190 | size = FOURBYTES + sizeof(uint32_t); |
2191 | @@ -2253,7 +2253,7 @@ lpfc_fdmi_port_attr_num_disc(struct lpfc_vport *vport, |
2192 | struct lpfc_fdmi_attr_entry *ae; |
2193 | uint32_t size; |
2194 | |
2195 | - ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue; |
2196 | + ae = &ad->AttrValue; |
2197 | vport->fdmi_num_disc = lpfc_find_map_node(vport); |
2198 | ae->un.AttrInt = cpu_to_be32(vport->fdmi_num_disc); |
2199 | size = FOURBYTES + sizeof(uint32_t); |
2200 | @@ -2269,7 +2269,7 @@ lpfc_fdmi_port_attr_nportid(struct lpfc_vport *vport, |
2201 | struct lpfc_fdmi_attr_entry *ae; |
2202 | uint32_t size; |
2203 | |
2204 | - ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue; |
2205 | + ae = &ad->AttrValue; |
2206 | ae->un.AttrInt = cpu_to_be32(vport->fc_myDID); |
2207 | size = FOURBYTES + sizeof(uint32_t); |
2208 | ad->AttrLen = cpu_to_be16(size); |
2209 | @@ -2284,8 +2284,8 @@ lpfc_fdmi_smart_attr_service(struct lpfc_vport *vport, |
2210 | struct lpfc_fdmi_attr_entry *ae; |
2211 | uint32_t len, size; |
2212 | |
2213 | - ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue; |
2214 | - memset(ae, 0, 256); |
2215 | + ae = &ad->AttrValue; |
2216 | + memset(ae, 0, sizeof(*ae)); |
2217 | |
2218 | strncpy(ae->un.AttrString, "Smart SAN Initiator", |
2219 | sizeof(ae->un.AttrString)); |
2220 | @@ -2305,8 +2305,8 @@ lpfc_fdmi_smart_attr_guid(struct lpfc_vport *vport, |
2221 | struct lpfc_fdmi_attr_entry *ae; |
2222 | uint32_t size; |
2223 | |
2224 | - ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue; |
2225 | - memset(ae, 0, 256); |
2226 | + ae = &ad->AttrValue; |
2227 | + memset(ae, 0, sizeof(*ae)); |
2228 | |
2229 | memcpy(&ae->un.AttrString, &vport->fc_sparam.nodeName, |
2230 | sizeof(struct lpfc_name)); |
2231 | @@ -2326,8 +2326,8 @@ lpfc_fdmi_smart_attr_version(struct lpfc_vport *vport, |
2232 | struct lpfc_fdmi_attr_entry *ae; |
2233 | uint32_t len, size; |
2234 | |
2235 | - ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue; |
2236 | - memset(ae, 0, 256); |
2237 | + ae = &ad->AttrValue; |
2238 | + memset(ae, 0, sizeof(*ae)); |
2239 | |
2240 | strncpy(ae->un.AttrString, "Smart SAN Version 2.0", |
2241 | sizeof(ae->un.AttrString)); |
2242 | @@ -2348,8 +2348,8 @@ lpfc_fdmi_smart_attr_model(struct lpfc_vport *vport, |
2243 | struct lpfc_fdmi_attr_entry *ae; |
2244 | uint32_t len, size; |
2245 | |
2246 | - ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue; |
2247 | - memset(ae, 0, 256); |
2248 | + ae = &ad->AttrValue; |
2249 | + memset(ae, 0, sizeof(*ae)); |
2250 | |
2251 | strncpy(ae->un.AttrString, phba->ModelName, |
2252 | sizeof(ae->un.AttrString)); |
2253 | @@ -2368,7 +2368,7 @@ lpfc_fdmi_smart_attr_port_info(struct lpfc_vport *vport, |
2254 | struct lpfc_fdmi_attr_entry *ae; |
2255 | uint32_t size; |
2256 | |
2257 | - ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue; |
2258 | + ae = &ad->AttrValue; |
2259 | |
2260 | /* SRIOV (type 3) is not supported */ |
2261 | if (vport->vpi) |
2262 | @@ -2388,7 +2388,7 @@ lpfc_fdmi_smart_attr_qos(struct lpfc_vport *vport, |
2263 | struct lpfc_fdmi_attr_entry *ae; |
2264 | uint32_t size; |
2265 | |
2266 | - ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue; |
2267 | + ae = &ad->AttrValue; |
2268 | ae->un.AttrInt = cpu_to_be32(0); |
2269 | size = FOURBYTES + sizeof(uint32_t); |
2270 | ad->AttrLen = cpu_to_be16(size); |
2271 | @@ -2403,7 +2403,7 @@ lpfc_fdmi_smart_attr_security(struct lpfc_vport *vport, |
2272 | struct lpfc_fdmi_attr_entry *ae; |
2273 | uint32_t size; |
2274 | |
2275 | - ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue; |
2276 | + ae = &ad->AttrValue; |
2277 | ae->un.AttrInt = cpu_to_be32(1); |
2278 | size = FOURBYTES + sizeof(uint32_t); |
2279 | ad->AttrLen = cpu_to_be16(size); |
2280 | @@ -2551,7 +2551,8 @@ lpfc_fdmi_cmd(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, |
2281 | /* Registered Port List */ |
2282 | /* One entry (port) per adapter */ |
2283 | rh->rpl.EntryCnt = cpu_to_be32(1); |
2284 | - memcpy(&rh->rpl.pe, &phba->pport->fc_sparam.portName, |
2285 | + memcpy(&rh->rpl.pe.PortName, |
2286 | + &phba->pport->fc_sparam.portName, |
2287 | sizeof(struct lpfc_name)); |
2288 | |
2289 | /* point to the HBA attribute block */ |
2290 | diff --git a/drivers/scsi/lpfc/lpfc_hw.h b/drivers/scsi/lpfc/lpfc_hw.h |
2291 | index 3b970d3706008..daab21f940fb8 100644 |
2292 | --- a/drivers/scsi/lpfc/lpfc_hw.h |
2293 | +++ b/drivers/scsi/lpfc/lpfc_hw.h |
2294 | @@ -1289,25 +1289,8 @@ struct fc_rdp_res_frame { |
2295 | /* lpfc_sli_ct_request defines the CT_IU preamble for FDMI commands */ |
2296 | #define SLI_CT_FDMI_Subtypes 0x10 /* Management Service Subtype */ |
2297 | |
2298 | -/* |
2299 | - * Registered Port List Format |
2300 | - */ |
2301 | -struct lpfc_fdmi_reg_port_list { |
2302 | - uint32_t EntryCnt; |
2303 | - uint32_t pe; /* Variable-length array */ |
2304 | -}; |
2305 | - |
2306 | - |
2307 | /* Definitions for HBA / Port attribute entries */ |
2308 | |
2309 | -struct lpfc_fdmi_attr_def { /* Defined in TLV format */ |
2310 | - /* Structure is in Big Endian format */ |
2311 | - uint32_t AttrType:16; |
2312 | - uint32_t AttrLen:16; |
2313 | - uint32_t AttrValue; /* Marks start of Value (ATTRIBUTE_ENTRY) */ |
2314 | -}; |
2315 | - |
2316 | - |
2317 | /* Attribute Entry */ |
2318 | struct lpfc_fdmi_attr_entry { |
2319 | union { |
2320 | @@ -1318,7 +1301,13 @@ struct lpfc_fdmi_attr_entry { |
2321 | } un; |
2322 | }; |
2323 | |
2324 | -#define LPFC_FDMI_MAX_AE_SIZE sizeof(struct lpfc_fdmi_attr_entry) |
2325 | +struct lpfc_fdmi_attr_def { /* Defined in TLV format */ |
2326 | + /* Structure is in Big Endian format */ |
2327 | + uint32_t AttrType:16; |
2328 | + uint32_t AttrLen:16; |
2329 | + /* Marks start of Value (ATTRIBUTE_ENTRY) */ |
2330 | + struct lpfc_fdmi_attr_entry AttrValue; |
2331 | +} __packed; |
2332 | |
2333 | /* |
2334 | * HBA Attribute Block |
2335 | @@ -1342,13 +1331,20 @@ struct lpfc_fdmi_hba_ident { |
2336 | struct lpfc_name PortName; |
2337 | }; |
2338 | |
2339 | +/* |
2340 | + * Registered Port List Format |
2341 | + */ |
2342 | +struct lpfc_fdmi_reg_port_list { |
2343 | + uint32_t EntryCnt; |
2344 | + struct lpfc_fdmi_port_entry pe; |
2345 | +} __packed; |
2346 | + |
2347 | /* |
2348 | * Register HBA(RHBA) |
2349 | */ |
2350 | struct lpfc_fdmi_reg_hba { |
2351 | struct lpfc_fdmi_hba_ident hi; |
2352 | - struct lpfc_fdmi_reg_port_list rpl; /* variable-length array */ |
2353 | -/* struct lpfc_fdmi_attr_block ab; */ |
2354 | + struct lpfc_fdmi_reg_port_list rpl; |
2355 | }; |
2356 | |
2357 | /* |
2358 | diff --git a/drivers/scsi/lpfc/lpfc_sli.c b/drivers/scsi/lpfc/lpfc_sli.c |
2359 | index 1c34dc3355498..08c76c361e8dc 100644 |
2360 | --- a/drivers/scsi/lpfc/lpfc_sli.c |
2361 | +++ b/drivers/scsi/lpfc/lpfc_sli.c |
2362 | @@ -15648,6 +15648,10 @@ lpfc_prep_seq(struct lpfc_vport *vport, struct hbq_dmabuf *seq_dmabuf) |
2363 | list_add_tail(&iocbq->list, &first_iocbq->list); |
2364 | } |
2365 | } |
2366 | + /* Free the sequence's header buffer */ |
2367 | + if (!first_iocbq) |
2368 | + lpfc_in_buf_free(vport->phba, &seq_dmabuf->dbuf); |
2369 | + |
2370 | return first_iocbq; |
2371 | } |
2372 | |
2373 | diff --git a/drivers/tty/serial/8250/8250_core.c b/drivers/tty/serial/8250/8250_core.c |
2374 | index 6624cc07ac044..8dbfd4ffd6355 100644 |
2375 | --- a/drivers/tty/serial/8250/8250_core.c |
2376 | +++ b/drivers/tty/serial/8250/8250_core.c |
2377 | @@ -1046,8 +1046,10 @@ int serial8250_register_8250_port(struct uart_8250_port *up) |
2378 | |
2379 | ret = uart_add_one_port(&serial8250_reg, |
2380 | &uart->port); |
2381 | - if (ret == 0) |
2382 | - ret = uart->port.line; |
2383 | + if (ret) |
2384 | + goto err; |
2385 | + |
2386 | + ret = uart->port.line; |
2387 | } else { |
2388 | dev_info(uart->port.dev, |
2389 | "skipping CIR port at 0x%lx / 0x%llx, IRQ %d\n", |
2390 | @@ -1061,6 +1063,11 @@ int serial8250_register_8250_port(struct uart_8250_port *up) |
2391 | mutex_unlock(&serial_mutex); |
2392 | |
2393 | return ret; |
2394 | + |
2395 | +err: |
2396 | + uart->port.dev = NULL; |
2397 | + mutex_unlock(&serial_mutex); |
2398 | + return ret; |
2399 | } |
2400 | EXPORT_SYMBOL(serial8250_register_8250_port); |
2401 | |
2402 | diff --git a/drivers/tty/serial/8250/8250_omap.c b/drivers/tty/serial/8250/8250_omap.c |
2403 | index a3adf21f9dcec..d41be02abced2 100644 |
2404 | --- a/drivers/tty/serial/8250/8250_omap.c |
2405 | +++ b/drivers/tty/serial/8250/8250_omap.c |
2406 | @@ -773,7 +773,10 @@ static void __dma_rx_do_complete(struct uart_8250_port *p) |
2407 | dmaengine_tx_status(dma->rxchan, dma->rx_cookie, &state); |
2408 | |
2409 | count = dma->rx_size - state.residue; |
2410 | - |
2411 | + if (count < dma->rx_size) |
2412 | + dmaengine_terminate_async(dma->rxchan); |
2413 | + if (!count) |
2414 | + goto unlock; |
2415 | ret = tty_insert_flip_string(tty_port, dma->rx_buf, count); |
2416 | |
2417 | p->port.icount.rx += ret; |
2418 | @@ -811,7 +814,6 @@ static void omap_8250_rx_dma_flush(struct uart_8250_port *p) |
2419 | spin_unlock_irqrestore(&priv->rx_dma_lock, flags); |
2420 | |
2421 | __dma_rx_do_complete(p); |
2422 | - dmaengine_terminate_all(dma->rxchan); |
2423 | } |
2424 | |
2425 | static int omap_8250_rx_dma(struct uart_8250_port *p) |
2426 | @@ -1194,11 +1196,11 @@ static int omap8250_probe(struct platform_device *pdev) |
2427 | spin_lock_init(&priv->rx_dma_lock); |
2428 | |
2429 | device_init_wakeup(&pdev->dev, true); |
2430 | + pm_runtime_enable(&pdev->dev); |
2431 | pm_runtime_use_autosuspend(&pdev->dev); |
2432 | pm_runtime_set_autosuspend_delay(&pdev->dev, -1); |
2433 | |
2434 | pm_runtime_irq_safe(&pdev->dev); |
2435 | - pm_runtime_enable(&pdev->dev); |
2436 | |
2437 | pm_runtime_get_sync(&pdev->dev); |
2438 | |
2439 | diff --git a/drivers/tty/serial/8250/8250_port.c b/drivers/tty/serial/8250/8250_port.c |
2440 | index 5641b877dca53..827a641ac336e 100644 |
2441 | --- a/drivers/tty/serial/8250/8250_port.c |
2442 | +++ b/drivers/tty/serial/8250/8250_port.c |
2443 | @@ -1806,6 +1806,7 @@ int serial8250_handle_irq(struct uart_port *port, unsigned int iir) |
2444 | unsigned char status; |
2445 | unsigned long flags; |
2446 | struct uart_8250_port *up = up_to_u8250p(port); |
2447 | + bool skip_rx = false; |
2448 | |
2449 | if (iir & UART_IIR_NO_INT) |
2450 | return 0; |
2451 | @@ -1814,7 +1815,20 @@ int serial8250_handle_irq(struct uart_port *port, unsigned int iir) |
2452 | |
2453 | status = serial_port_in(port, UART_LSR); |
2454 | |
2455 | - if (status & (UART_LSR_DR | UART_LSR_BI)) { |
2456 | + /* |
2457 | + * If port is stopped and there are no error conditions in the |
2458 | + * FIFO, then don't drain the FIFO, as this may lead to TTY buffer |
2459 | + * overflow. Not servicing, RX FIFO would trigger auto HW flow |
2460 | + * control when FIFO occupancy reaches preset threshold, thus |
2461 | + * halting RX. This only works when auto HW flow control is |
2462 | + * available. |
2463 | + */ |
2464 | + if (!(status & (UART_LSR_FIFOE | UART_LSR_BRK_ERROR_BITS)) && |
2465 | + (port->status & (UPSTAT_AUTOCTS | UPSTAT_AUTORTS)) && |
2466 | + !(port->read_status_mask & UART_LSR_DR)) |
2467 | + skip_rx = true; |
2468 | + |
2469 | + if (status & (UART_LSR_DR | UART_LSR_BI) && !skip_rx) { |
2470 | if (!up->dma || handle_rx_dma(up, iir)) |
2471 | status = serial8250_rx_chars(up, status); |
2472 | } |
2473 | diff --git a/drivers/tty/serial/samsung.c b/drivers/tty/serial/samsung.c |
2474 | index 4dfdb59061bea..8c89697c53573 100644 |
2475 | --- a/drivers/tty/serial/samsung.c |
2476 | +++ b/drivers/tty/serial/samsung.c |
2477 | @@ -1157,14 +1157,14 @@ static unsigned int s3c24xx_serial_getclk(struct s3c24xx_uart_port *ourport, |
2478 | struct s3c24xx_uart_info *info = ourport->info; |
2479 | struct clk *clk; |
2480 | unsigned long rate; |
2481 | - unsigned int cnt, baud, quot, clk_sel, best_quot = 0; |
2482 | + unsigned int cnt, baud, quot, best_quot = 0; |
2483 | char clkname[MAX_CLK_NAME_LENGTH]; |
2484 | int calc_deviation, deviation = (1 << 30) - 1; |
2485 | |
2486 | - clk_sel = (ourport->cfg->clk_sel) ? ourport->cfg->clk_sel : |
2487 | - ourport->info->def_clk_sel; |
2488 | for (cnt = 0; cnt < info->num_clks; cnt++) { |
2489 | - if (!(clk_sel & (1 << cnt))) |
2490 | + /* Keep selected clock if provided */ |
2491 | + if (ourport->cfg->clk_sel && |
2492 | + !(ourport->cfg->clk_sel & (1 << cnt))) |
2493 | continue; |
2494 | |
2495 | sprintf(clkname, "clk_uart_baud%d", cnt); |
2496 | diff --git a/drivers/usb/host/ehci-mv.c b/drivers/usb/host/ehci-mv.c |
2497 | index 849806a75f1ce..b29610899c9f6 100644 |
2498 | --- a/drivers/usb/host/ehci-mv.c |
2499 | +++ b/drivers/usb/host/ehci-mv.c |
2500 | @@ -196,12 +196,10 @@ static int mv_ehci_probe(struct platform_device *pdev) |
2501 | hcd->rsrc_len = resource_size(r); |
2502 | hcd->regs = ehci_mv->op_regs; |
2503 | |
2504 | - hcd->irq = platform_get_irq(pdev, 0); |
2505 | - if (!hcd->irq) { |
2506 | - dev_err(&pdev->dev, "Cannot get irq."); |
2507 | - retval = -ENODEV; |
2508 | + retval = platform_get_irq(pdev, 0); |
2509 | + if (retval < 0) |
2510 | goto err_disable_clk; |
2511 | - } |
2512 | + hcd->irq = retval; |
2513 | |
2514 | ehci = hcd_to_ehci(hcd); |
2515 | ehci->caps = (struct ehci_caps *) ehci_mv->cap_regs; |
2516 | diff --git a/drivers/vfio/pci/vfio_pci.c b/drivers/vfio/pci/vfio_pci.c |
2517 | index 2254c281cc766..237d5aceb302d 100644 |
2518 | --- a/drivers/vfio/pci/vfio_pci.c |
2519 | +++ b/drivers/vfio/pci/vfio_pci.c |
2520 | @@ -392,6 +392,19 @@ static void vfio_pci_release(void *device_data) |
2521 | if (!(--vdev->refcnt)) { |
2522 | vfio_spapr_pci_eeh_release(vdev->pdev); |
2523 | vfio_pci_disable(vdev); |
2524 | + mutex_lock(&vdev->igate); |
2525 | + if (vdev->err_trigger) { |
2526 | + eventfd_ctx_put(vdev->err_trigger); |
2527 | + vdev->err_trigger = NULL; |
2528 | + } |
2529 | + mutex_unlock(&vdev->igate); |
2530 | + |
2531 | + mutex_lock(&vdev->igate); |
2532 | + if (vdev->req_trigger) { |
2533 | + eventfd_ctx_put(vdev->req_trigger); |
2534 | + vdev->req_trigger = NULL; |
2535 | + } |
2536 | + mutex_unlock(&vdev->igate); |
2537 | } |
2538 | |
2539 | mutex_unlock(&driver_lock); |
2540 | diff --git a/fs/block_dev.c b/fs/block_dev.c |
2541 | index 06f7cbe201326..98b37e77683d3 100644 |
2542 | --- a/fs/block_dev.c |
2543 | +++ b/fs/block_dev.c |
2544 | @@ -1586,6 +1586,16 @@ static void __blkdev_put(struct block_device *bdev, fmode_t mode, int for_part) |
2545 | struct gendisk *disk = bdev->bd_disk; |
2546 | struct block_device *victim = NULL; |
2547 | |
2548 | + /* |
2549 | + * Sync early if it looks like we're the last one. If someone else |
2550 | + * opens the block device between now and the decrement of bd_openers |
2551 | + * then we did a sync that we didn't need to, but that's not the end |
2552 | + * of the world and we want to avoid long (could be several minute) |
2553 | + * syncs while holding the mutex. |
2554 | + */ |
2555 | + if (bdev->bd_openers == 1) |
2556 | + sync_blockdev(bdev); |
2557 | + |
2558 | mutex_lock_nested(&bdev->bd_mutex, for_part); |
2559 | if (for_part) |
2560 | bdev->bd_part_count--; |
2561 | diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c |
2562 | index c0033a0d00787..b5bff1e760a34 100644 |
2563 | --- a/fs/btrfs/extent-tree.c |
2564 | +++ b/fs/btrfs/extent-tree.c |
2565 | @@ -9435,8 +9435,6 @@ out: |
2566 | */ |
2567 | if (!for_reloc && root_dropped == false) |
2568 | btrfs_add_dead_root(root); |
2569 | - if (err && err != -EAGAIN) |
2570 | - btrfs_handle_fs_error(fs_info, err, NULL); |
2571 | return err; |
2572 | } |
2573 | |
2574 | diff --git a/fs/ceph/caps.c b/fs/ceph/caps.c |
2575 | index e11aacb35d6b5..cbd92dd89de16 100644 |
2576 | --- a/fs/ceph/caps.c |
2577 | +++ b/fs/ceph/caps.c |
2578 | @@ -1807,12 +1807,24 @@ ack: |
2579 | if (mutex_trylock(&session->s_mutex) == 0) { |
2580 | dout("inverting session/ino locks on %p\n", |
2581 | session); |
2582 | + session = ceph_get_mds_session(session); |
2583 | spin_unlock(&ci->i_ceph_lock); |
2584 | if (took_snap_rwsem) { |
2585 | up_read(&mdsc->snap_rwsem); |
2586 | took_snap_rwsem = 0; |
2587 | } |
2588 | - mutex_lock(&session->s_mutex); |
2589 | + if (session) { |
2590 | + mutex_lock(&session->s_mutex); |
2591 | + ceph_put_mds_session(session); |
2592 | + } else { |
2593 | + /* |
2594 | + * Because we take the reference while |
2595 | + * holding the i_ceph_lock, it should |
2596 | + * never be NULL. Throw a warning if it |
2597 | + * ever is. |
2598 | + */ |
2599 | + WARN_ON_ONCE(true); |
2600 | + } |
2601 | goto retry; |
2602 | } |
2603 | } |
2604 | diff --git a/fs/cifs/cifsglob.h b/fs/cifs/cifsglob.h |
2605 | index 7ae21ad420fbf..a12258c32e8a3 100644 |
2606 | --- a/fs/cifs/cifsglob.h |
2607 | +++ b/fs/cifs/cifsglob.h |
2608 | @@ -242,8 +242,9 @@ struct smb_version_operations { |
2609 | int (*check_message)(char *, unsigned int, struct TCP_Server_Info *); |
2610 | bool (*is_oplock_break)(char *, struct TCP_Server_Info *); |
2611 | int (*handle_cancelled_mid)(char *, struct TCP_Server_Info *); |
2612 | - void (*downgrade_oplock)(struct TCP_Server_Info *, |
2613 | - struct cifsInodeInfo *, bool); |
2614 | + void (*downgrade_oplock)(struct TCP_Server_Info *server, |
2615 | + struct cifsInodeInfo *cinode, __u32 oplock, |
2616 | + unsigned int epoch, bool *purge_cache); |
2617 | /* process transaction2 response */ |
2618 | bool (*check_trans2)(struct mid_q_entry *, struct TCP_Server_Info *, |
2619 | char *, int); |
2620 | @@ -1080,6 +1081,8 @@ struct cifsFileInfo { |
2621 | unsigned int f_flags; |
2622 | bool invalidHandle:1; /* file closed via session abend */ |
2623 | bool oplock_break_cancelled:1; |
2624 | + unsigned int oplock_epoch; /* epoch from the lease break */ |
2625 | + __u32 oplock_level; /* oplock/lease level from the lease break */ |
2626 | int count; |
2627 | spinlock_t file_info_lock; /* protects four flag/count fields above */ |
2628 | struct mutex fh_mutex; /* prevents reopen race after dead ses*/ |
2629 | @@ -1191,7 +1194,7 @@ struct cifsInodeInfo { |
2630 | unsigned int epoch; /* used to track lease state changes */ |
2631 | #define CIFS_INODE_PENDING_OPLOCK_BREAK (0) /* oplock break in progress */ |
2632 | #define CIFS_INODE_PENDING_WRITERS (1) /* Writes in progress */ |
2633 | -#define CIFS_INODE_DOWNGRADE_OPLOCK_TO_L2 (2) /* Downgrade oplock to L2 */ |
2634 | +#define CIFS_INODE_FLAG_UNUSED (2) /* Unused flag */ |
2635 | #define CIFS_INO_DELETE_PENDING (3) /* delete pending on server */ |
2636 | #define CIFS_INO_INVALID_MAPPING (4) /* pagecache is invalid */ |
2637 | #define CIFS_INO_LOCK (5) /* lock bit for synchronization */ |
2638 | diff --git a/fs/cifs/file.c b/fs/cifs/file.c |
2639 | index b2919166855f5..24508b69e78b7 100644 |
2640 | --- a/fs/cifs/file.c |
2641 | +++ b/fs/cifs/file.c |
2642 | @@ -3531,7 +3531,8 @@ readpages_get_pages(struct address_space *mapping, struct list_head *page_list, |
2643 | break; |
2644 | |
2645 | __SetPageLocked(page); |
2646 | - if (add_to_page_cache_locked(page, mapping, page->index, gfp)) { |
2647 | + rc = add_to_page_cache_locked(page, mapping, page->index, gfp); |
2648 | + if (rc) { |
2649 | __ClearPageLocked(page); |
2650 | break; |
2651 | } |
2652 | @@ -3547,6 +3548,7 @@ static int cifs_readpages(struct file *file, struct address_space *mapping, |
2653 | struct list_head *page_list, unsigned num_pages) |
2654 | { |
2655 | int rc; |
2656 | + int err = 0; |
2657 | struct list_head tmplist; |
2658 | struct cifsFileInfo *open_file = file->private_data; |
2659 | struct cifs_sb_info *cifs_sb = CIFS_FILE_SB(file); |
2660 | @@ -3587,7 +3589,7 @@ static int cifs_readpages(struct file *file, struct address_space *mapping, |
2661 | * the order of declining indexes. When we put the pages in |
2662 | * the rdata->pages, then we want them in increasing order. |
2663 | */ |
2664 | - while (!list_empty(page_list)) { |
2665 | + while (!list_empty(page_list) && !err) { |
2666 | unsigned int i, nr_pages, bytes, rsize; |
2667 | loff_t offset; |
2668 | struct page *page, *tpage; |
2669 | @@ -3610,9 +3612,10 @@ static int cifs_readpages(struct file *file, struct address_space *mapping, |
2670 | return 0; |
2671 | } |
2672 | |
2673 | - rc = readpages_get_pages(mapping, page_list, rsize, &tmplist, |
2674 | + nr_pages = 0; |
2675 | + err = readpages_get_pages(mapping, page_list, rsize, &tmplist, |
2676 | &nr_pages, &offset, &bytes); |
2677 | - if (rc) { |
2678 | + if (!nr_pages) { |
2679 | add_credits_and_wake_if(server, credits, 0); |
2680 | break; |
2681 | } |
2682 | @@ -3912,12 +3915,13 @@ void cifs_oplock_break(struct work_struct *work) |
2683 | struct cifs_tcon *tcon = tlink_tcon(cfile->tlink); |
2684 | struct TCP_Server_Info *server = tcon->ses->server; |
2685 | int rc = 0; |
2686 | + bool purge_cache = false; |
2687 | |
2688 | wait_on_bit(&cinode->flags, CIFS_INODE_PENDING_WRITERS, |
2689 | TASK_UNINTERRUPTIBLE); |
2690 | |
2691 | - server->ops->downgrade_oplock(server, cinode, |
2692 | - test_bit(CIFS_INODE_DOWNGRADE_OPLOCK_TO_L2, &cinode->flags)); |
2693 | + server->ops->downgrade_oplock(server, cinode, cfile->oplock_level, |
2694 | + cfile->oplock_epoch, &purge_cache); |
2695 | |
2696 | if (!CIFS_CACHE_WRITE(cinode) && CIFS_CACHE_READ(cinode) && |
2697 | cifs_has_mand_locks(cinode)) { |
2698 | @@ -3932,18 +3936,21 @@ void cifs_oplock_break(struct work_struct *work) |
2699 | else |
2700 | break_lease(inode, O_WRONLY); |
2701 | rc = filemap_fdatawrite(inode->i_mapping); |
2702 | - if (!CIFS_CACHE_READ(cinode)) { |
2703 | + if (!CIFS_CACHE_READ(cinode) || purge_cache) { |
2704 | rc = filemap_fdatawait(inode->i_mapping); |
2705 | mapping_set_error(inode->i_mapping, rc); |
2706 | cifs_zap_mapping(inode); |
2707 | } |
2708 | cifs_dbg(FYI, "Oplock flush inode %p rc %d\n", inode, rc); |
2709 | + if (CIFS_CACHE_WRITE(cinode)) |
2710 | + goto oplock_break_ack; |
2711 | } |
2712 | |
2713 | rc = cifs_push_locks(cfile); |
2714 | if (rc) |
2715 | cifs_dbg(VFS, "Push locks rc = %d\n", rc); |
2716 | |
2717 | +oplock_break_ack: |
2718 | /* |
2719 | * releasing stale oplock after recent reconnect of smb session using |
2720 | * a now incorrect file handle is not a data integrity issue but do |
2721 | diff --git a/fs/cifs/misc.c b/fs/cifs/misc.c |
2722 | index 5e75df69062d8..bdf151e949166 100644 |
2723 | --- a/fs/cifs/misc.c |
2724 | +++ b/fs/cifs/misc.c |
2725 | @@ -481,21 +481,10 @@ is_valid_oplock_break(char *buffer, struct TCP_Server_Info *srv) |
2726 | set_bit(CIFS_INODE_PENDING_OPLOCK_BREAK, |
2727 | &pCifsInode->flags); |
2728 | |
2729 | - /* |
2730 | - * Set flag if the server downgrades the oplock |
2731 | - * to L2 else clear. |
2732 | - */ |
2733 | - if (pSMB->OplockLevel) |
2734 | - set_bit( |
2735 | - CIFS_INODE_DOWNGRADE_OPLOCK_TO_L2, |
2736 | - &pCifsInode->flags); |
2737 | - else |
2738 | - clear_bit( |
2739 | - CIFS_INODE_DOWNGRADE_OPLOCK_TO_L2, |
2740 | - &pCifsInode->flags); |
2741 | - |
2742 | - cifs_queue_oplock_break(netfile); |
2743 | + netfile->oplock_epoch = 0; |
2744 | + netfile->oplock_level = pSMB->OplockLevel; |
2745 | netfile->oplock_break_cancelled = false; |
2746 | + cifs_queue_oplock_break(netfile); |
2747 | |
2748 | spin_unlock(&tcon->open_file_lock); |
2749 | spin_unlock(&cifs_tcp_ses_lock); |
2750 | diff --git a/fs/cifs/smb1ops.c b/fs/cifs/smb1ops.c |
2751 | index 6f5d78b172bac..9a1f01c2f0209 100644 |
2752 | --- a/fs/cifs/smb1ops.c |
2753 | +++ b/fs/cifs/smb1ops.c |
2754 | @@ -378,12 +378,10 @@ coalesce_t2(char *second_buf, struct smb_hdr *target_hdr) |
2755 | |
2756 | static void |
2757 | cifs_downgrade_oplock(struct TCP_Server_Info *server, |
2758 | - struct cifsInodeInfo *cinode, bool set_level2) |
2759 | + struct cifsInodeInfo *cinode, __u32 oplock, |
2760 | + unsigned int epoch, bool *purge_cache) |
2761 | { |
2762 | - if (set_level2) |
2763 | - cifs_set_oplock_level(cinode, OPLOCK_READ); |
2764 | - else |
2765 | - cifs_set_oplock_level(cinode, 0); |
2766 | + cifs_set_oplock_level(cinode, oplock); |
2767 | } |
2768 | |
2769 | static bool |
2770 | diff --git a/fs/cifs/smb2misc.c b/fs/cifs/smb2misc.c |
2771 | index 7b7b47e26dbd4..bddb2d7b39824 100644 |
2772 | --- a/fs/cifs/smb2misc.c |
2773 | +++ b/fs/cifs/smb2misc.c |
2774 | @@ -491,7 +491,7 @@ smb2_tcon_has_lease(struct cifs_tcon *tcon, struct smb2_lease_break *rsp, |
2775 | |
2776 | cifs_dbg(FYI, "found in the open list\n"); |
2777 | cifs_dbg(FYI, "lease key match, lease break 0x%x\n", |
2778 | - le32_to_cpu(rsp->NewLeaseState)); |
2779 | + lease_state); |
2780 | |
2781 | if (ack_req) |
2782 | cfile->oplock_break_cancelled = false; |
2783 | @@ -500,17 +500,8 @@ smb2_tcon_has_lease(struct cifs_tcon *tcon, struct smb2_lease_break *rsp, |
2784 | |
2785 | set_bit(CIFS_INODE_PENDING_OPLOCK_BREAK, &cinode->flags); |
2786 | |
2787 | - /* |
2788 | - * Set or clear flags depending on the lease state being READ. |
2789 | - * HANDLE caching flag should be added when the client starts |
2790 | - * to defer closing remote file handles with HANDLE leases. |
2791 | - */ |
2792 | - if (lease_state & SMB2_LEASE_READ_CACHING_HE) |
2793 | - set_bit(CIFS_INODE_DOWNGRADE_OPLOCK_TO_L2, |
2794 | - &cinode->flags); |
2795 | - else |
2796 | - clear_bit(CIFS_INODE_DOWNGRADE_OPLOCK_TO_L2, |
2797 | - &cinode->flags); |
2798 | + cfile->oplock_epoch = le16_to_cpu(rsp->Epoch); |
2799 | + cfile->oplock_level = lease_state; |
2800 | |
2801 | cifs_queue_oplock_break(cfile); |
2802 | kfree(lw); |
2803 | @@ -533,7 +524,7 @@ smb2_tcon_has_lease(struct cifs_tcon *tcon, struct smb2_lease_break *rsp, |
2804 | |
2805 | cifs_dbg(FYI, "found in the pending open list\n"); |
2806 | cifs_dbg(FYI, "lease key match, lease break 0x%x\n", |
2807 | - le32_to_cpu(rsp->NewLeaseState)); |
2808 | + lease_state); |
2809 | |
2810 | open->oplock = lease_state; |
2811 | } |
2812 | @@ -645,18 +636,9 @@ smb2_is_valid_oplock_break(char *buffer, struct TCP_Server_Info *server) |
2813 | set_bit(CIFS_INODE_PENDING_OPLOCK_BREAK, |
2814 | &cinode->flags); |
2815 | |
2816 | - /* |
2817 | - * Set flag if the server downgrades the oplock |
2818 | - * to L2 else clear. |
2819 | - */ |
2820 | - if (rsp->OplockLevel) |
2821 | - set_bit( |
2822 | - CIFS_INODE_DOWNGRADE_OPLOCK_TO_L2, |
2823 | - &cinode->flags); |
2824 | - else |
2825 | - clear_bit( |
2826 | - CIFS_INODE_DOWNGRADE_OPLOCK_TO_L2, |
2827 | - &cinode->flags); |
2828 | + cfile->oplock_epoch = 0; |
2829 | + cfile->oplock_level = rsp->OplockLevel; |
2830 | + |
2831 | spin_unlock(&cfile->file_info_lock); |
2832 | |
2833 | cifs_queue_oplock_break(cfile); |
2834 | diff --git a/fs/cifs/smb2ops.c b/fs/cifs/smb2ops.c |
2835 | index edd4c7292be00..67edd6e03f803 100644 |
2836 | --- a/fs/cifs/smb2ops.c |
2837 | +++ b/fs/cifs/smb2ops.c |
2838 | @@ -1379,22 +1379,38 @@ static long smb3_fallocate(struct file *file, struct cifs_tcon *tcon, int mode, |
2839 | |
2840 | static void |
2841 | smb2_downgrade_oplock(struct TCP_Server_Info *server, |
2842 | - struct cifsInodeInfo *cinode, bool set_level2) |
2843 | + struct cifsInodeInfo *cinode, __u32 oplock, |
2844 | + unsigned int epoch, bool *purge_cache) |
2845 | { |
2846 | - if (set_level2) |
2847 | - server->ops->set_oplock_level(cinode, SMB2_OPLOCK_LEVEL_II, |
2848 | - 0, NULL); |
2849 | - else |
2850 | - server->ops->set_oplock_level(cinode, 0, 0, NULL); |
2851 | + server->ops->set_oplock_level(cinode, oplock, 0, NULL); |
2852 | } |
2853 | |
2854 | static void |
2855 | -smb21_downgrade_oplock(struct TCP_Server_Info *server, |
2856 | - struct cifsInodeInfo *cinode, bool set_level2) |
2857 | +smb21_set_oplock_level(struct cifsInodeInfo *cinode, __u32 oplock, |
2858 | + unsigned int epoch, bool *purge_cache); |
2859 | + |
2860 | +static void |
2861 | +smb3_downgrade_oplock(struct TCP_Server_Info *server, |
2862 | + struct cifsInodeInfo *cinode, __u32 oplock, |
2863 | + unsigned int epoch, bool *purge_cache) |
2864 | { |
2865 | - server->ops->set_oplock_level(cinode, |
2866 | - set_level2 ? SMB2_LEASE_READ_CACHING_HE : |
2867 | - 0, 0, NULL); |
2868 | + unsigned int old_state = cinode->oplock; |
2869 | + unsigned int old_epoch = cinode->epoch; |
2870 | + unsigned int new_state; |
2871 | + |
2872 | + if (epoch > old_epoch) { |
2873 | + smb21_set_oplock_level(cinode, oplock, 0, NULL); |
2874 | + cinode->epoch = epoch; |
2875 | + } |
2876 | + |
2877 | + new_state = cinode->oplock; |
2878 | + *purge_cache = false; |
2879 | + |
2880 | + if ((old_state & CIFS_CACHE_READ_FLG) != 0 && |
2881 | + (new_state & CIFS_CACHE_READ_FLG) == 0) |
2882 | + *purge_cache = true; |
2883 | + else if (old_state == new_state && (epoch - old_epoch > 1)) |
2884 | + *purge_cache = true; |
2885 | } |
2886 | |
2887 | static void |
2888 | @@ -1709,7 +1725,7 @@ struct smb_version_operations smb21_operations = { |
2889 | .print_stats = smb2_print_stats, |
2890 | .is_oplock_break = smb2_is_valid_oplock_break, |
2891 | .handle_cancelled_mid = smb2_handle_cancelled_mid, |
2892 | - .downgrade_oplock = smb21_downgrade_oplock, |
2893 | + .downgrade_oplock = smb2_downgrade_oplock, |
2894 | .need_neg = smb2_need_neg, |
2895 | .negotiate = smb2_negotiate, |
2896 | .negotiate_wsize = smb2_negotiate_wsize, |
2897 | @@ -1793,7 +1809,7 @@ struct smb_version_operations smb30_operations = { |
2898 | .dump_share_caps = smb2_dump_share_caps, |
2899 | .is_oplock_break = smb2_is_valid_oplock_break, |
2900 | .handle_cancelled_mid = smb2_handle_cancelled_mid, |
2901 | - .downgrade_oplock = smb21_downgrade_oplock, |
2902 | + .downgrade_oplock = smb3_downgrade_oplock, |
2903 | .need_neg = smb2_need_neg, |
2904 | .negotiate = smb2_negotiate, |
2905 | .negotiate_wsize = smb2_negotiate_wsize, |
2906 | @@ -1883,7 +1899,7 @@ struct smb_version_operations smb311_operations = { |
2907 | .dump_share_caps = smb2_dump_share_caps, |
2908 | .is_oplock_break = smb2_is_valid_oplock_break, |
2909 | .handle_cancelled_mid = smb2_handle_cancelled_mid, |
2910 | - .downgrade_oplock = smb21_downgrade_oplock, |
2911 | + .downgrade_oplock = smb3_downgrade_oplock, |
2912 | .need_neg = smb2_need_neg, |
2913 | .negotiate = smb2_negotiate, |
2914 | .negotiate_wsize = smb2_negotiate_wsize, |
2915 | diff --git a/fs/cifs/smb2pdu.h b/fs/cifs/smb2pdu.h |
2916 | index 1af7afae3ad18..1a0c480745738 100644 |
2917 | --- a/fs/cifs/smb2pdu.h |
2918 | +++ b/fs/cifs/smb2pdu.h |
2919 | @@ -1025,7 +1025,7 @@ struct smb2_oplock_break { |
2920 | struct smb2_lease_break { |
2921 | struct smb2_hdr hdr; |
2922 | __le16 StructureSize; /* Must be 44 */ |
2923 | - __le16 Reserved; |
2924 | + __le16 Epoch; |
2925 | __le32 Flags; |
2926 | __u8 LeaseKey[16]; |
2927 | __le32 CurrentLeaseState; |
2928 | diff --git a/fs/fuse/dev.c b/fs/fuse/dev.c |
2929 | index b99225e117120..f0129c033bd66 100644 |
2930 | --- a/fs/fuse/dev.c |
2931 | +++ b/fs/fuse/dev.c |
2932 | @@ -825,7 +825,6 @@ static int fuse_check_page(struct page *page) |
2933 | { |
2934 | if (page_mapcount(page) || |
2935 | page->mapping != NULL || |
2936 | - page_count(page) != 1 || |
2937 | (page->flags & PAGE_FLAGS_CHECK_AT_PREP & |
2938 | ~(1 << PG_locked | |
2939 | 1 << PG_referenced | |
2940 | diff --git a/fs/ubifs/io.c b/fs/ubifs/io.c |
2941 | index 97be412153328..9213a9e046ae0 100644 |
2942 | --- a/fs/ubifs/io.c |
2943 | +++ b/fs/ubifs/io.c |
2944 | @@ -237,7 +237,7 @@ int ubifs_is_mapped(const struct ubifs_info *c, int lnum) |
2945 | int ubifs_check_node(const struct ubifs_info *c, const void *buf, int lnum, |
2946 | int offs, int quiet, int must_chk_crc) |
2947 | { |
2948 | - int err = -EINVAL, type, node_len; |
2949 | + int err = -EINVAL, type, node_len, dump_node = 1; |
2950 | uint32_t crc, node_crc, magic; |
2951 | const struct ubifs_ch *ch = buf; |
2952 | |
2953 | @@ -290,10 +290,22 @@ int ubifs_check_node(const struct ubifs_info *c, const void *buf, int lnum, |
2954 | out_len: |
2955 | if (!quiet) |
2956 | ubifs_err(c, "bad node length %d", node_len); |
2957 | + if (type == UBIFS_DATA_NODE && node_len > UBIFS_DATA_NODE_SZ) |
2958 | + dump_node = 0; |
2959 | out: |
2960 | if (!quiet) { |
2961 | ubifs_err(c, "bad node at LEB %d:%d", lnum, offs); |
2962 | - ubifs_dump_node(c, buf); |
2963 | + if (dump_node) { |
2964 | + ubifs_dump_node(c, buf); |
2965 | + } else { |
2966 | + int safe_len = min3(node_len, c->leb_size - offs, |
2967 | + (int)UBIFS_MAX_DATA_NODE_SZ); |
2968 | + pr_err("\tprevent out-of-bounds memory access\n"); |
2969 | + pr_err("\ttruncated data node length %d\n", safe_len); |
2970 | + pr_err("\tcorrupted data node:\n"); |
2971 | + print_hex_dump(KERN_ERR, "\t", DUMP_PREFIX_OFFSET, 32, 1, |
2972 | + buf, safe_len, 0); |
2973 | + } |
2974 | dump_stack(); |
2975 | } |
2976 | return err; |
2977 | diff --git a/fs/xfs/libxfs/xfs_attr_leaf.c b/fs/xfs/libxfs/xfs_attr_leaf.c |
2978 | index 7b9dd76403bfd..537acde2c497b 100644 |
2979 | --- a/fs/xfs/libxfs/xfs_attr_leaf.c |
2980 | +++ b/fs/xfs/libxfs/xfs_attr_leaf.c |
2981 | @@ -1332,7 +1332,9 @@ xfs_attr3_leaf_add_work( |
2982 | for (i = 0; i < XFS_ATTR_LEAF_MAPSIZE; i++) { |
2983 | if (ichdr->freemap[i].base == tmp) { |
2984 | ichdr->freemap[i].base += sizeof(xfs_attr_leaf_entry_t); |
2985 | - ichdr->freemap[i].size -= sizeof(xfs_attr_leaf_entry_t); |
2986 | + ichdr->freemap[i].size -= |
2987 | + min_t(uint16_t, ichdr->freemap[i].size, |
2988 | + sizeof(xfs_attr_leaf_entry_t)); |
2989 | } |
2990 | } |
2991 | ichdr->usedbytes += xfs_attr_leaf_entsize(leaf, args->index); |
2992 | diff --git a/fs/xfs/libxfs/xfs_dir2_node.c b/fs/xfs/libxfs/xfs_dir2_node.c |
2993 | index bbd1238852b3c..df7f33e60a4f6 100644 |
2994 | --- a/fs/xfs/libxfs/xfs_dir2_node.c |
2995 | +++ b/fs/xfs/libxfs/xfs_dir2_node.c |
2996 | @@ -212,6 +212,7 @@ __xfs_dir3_free_read( |
2997 | xfs_buf_ioerror(*bpp, -EFSCORRUPTED); |
2998 | xfs_verifier_error(*bpp); |
2999 | xfs_trans_brelse(tp, *bpp); |
3000 | + *bpp = NULL; |
3001 | return -EFSCORRUPTED; |
3002 | } |
3003 | |
3004 | diff --git a/include/linux/debugfs.h b/include/linux/debugfs.h |
3005 | index b20a0945b5500..7aea750538840 100644 |
3006 | --- a/include/linux/debugfs.h |
3007 | +++ b/include/linux/debugfs.h |
3008 | @@ -77,6 +77,8 @@ static const struct file_operations __fops = { \ |
3009 | .llseek = generic_file_llseek, \ |
3010 | } |
3011 | |
3012 | +typedef struct vfsmount *(*debugfs_automount_t)(struct dentry *, void *); |
3013 | + |
3014 | #if defined(CONFIG_DEBUG_FS) |
3015 | |
3016 | struct dentry *debugfs_create_file(const char *name, umode_t mode, |
3017 | @@ -96,7 +98,6 @@ struct dentry *debugfs_create_dir(const char *name, struct dentry *parent); |
3018 | struct dentry *debugfs_create_symlink(const char *name, struct dentry *parent, |
3019 | const char *dest); |
3020 | |
3021 | -typedef struct vfsmount *(*debugfs_automount_t)(struct dentry *, void *); |
3022 | struct dentry *debugfs_create_automount(const char *name, |
3023 | struct dentry *parent, |
3024 | debugfs_automount_t f, |
3025 | @@ -211,7 +212,7 @@ static inline struct dentry *debugfs_create_symlink(const char *name, |
3026 | |
3027 | static inline struct dentry *debugfs_create_automount(const char *name, |
3028 | struct dentry *parent, |
3029 | - struct vfsmount *(*f)(void *), |
3030 | + debugfs_automount_t f, |
3031 | void *data) |
3032 | { |
3033 | return ERR_PTR(-ENODEV); |
3034 | diff --git a/include/linux/libata.h b/include/linux/libata.h |
3035 | index e2dac33eae964..3fabf57fd6e0d 100644 |
3036 | --- a/include/linux/libata.h |
3037 | +++ b/include/linux/libata.h |
3038 | @@ -499,6 +499,7 @@ enum hsm_task_states { |
3039 | }; |
3040 | |
3041 | enum ata_completion_errors { |
3042 | + AC_ERR_OK = 0, /* no error */ |
3043 | AC_ERR_DEV = (1 << 0), /* device reported error */ |
3044 | AC_ERR_HSM = (1 << 1), /* host state machine violation */ |
3045 | AC_ERR_TIMEOUT = (1 << 2), /* timeout */ |
3046 | @@ -903,9 +904,9 @@ struct ata_port_operations { |
3047 | /* |
3048 | * Command execution |
3049 | */ |
3050 | - int (*qc_defer)(struct ata_queued_cmd *qc); |
3051 | - int (*check_atapi_dma)(struct ata_queued_cmd *qc); |
3052 | - void (*qc_prep)(struct ata_queued_cmd *qc); |
3053 | + int (*qc_defer)(struct ata_queued_cmd *qc); |
3054 | + int (*check_atapi_dma)(struct ata_queued_cmd *qc); |
3055 | + enum ata_completion_errors (*qc_prep)(struct ata_queued_cmd *qc); |
3056 | unsigned int (*qc_issue)(struct ata_queued_cmd *qc); |
3057 | bool (*qc_fill_rtf)(struct ata_queued_cmd *qc); |
3058 | |
3059 | @@ -1168,7 +1169,7 @@ extern int ata_xfer_mode2shift(unsigned long xfer_mode); |
3060 | extern const char *ata_mode_string(unsigned long xfer_mask); |
3061 | extern unsigned long ata_id_xfermask(const u16 *id); |
3062 | extern int ata_std_qc_defer(struct ata_queued_cmd *qc); |
3063 | -extern void ata_noop_qc_prep(struct ata_queued_cmd *qc); |
3064 | +extern enum ata_completion_errors ata_noop_qc_prep(struct ata_queued_cmd *qc); |
3065 | extern void ata_sg_init(struct ata_queued_cmd *qc, struct scatterlist *sg, |
3066 | unsigned int n_elem); |
3067 | extern unsigned int ata_dev_classify(const struct ata_taskfile *tf); |
3068 | @@ -1881,9 +1882,9 @@ extern const struct ata_port_operations ata_bmdma_port_ops; |
3069 | .sg_tablesize = LIBATA_MAX_PRD, \ |
3070 | .dma_boundary = ATA_DMA_BOUNDARY |
3071 | |
3072 | -extern void ata_bmdma_qc_prep(struct ata_queued_cmd *qc); |
3073 | +extern enum ata_completion_errors ata_bmdma_qc_prep(struct ata_queued_cmd *qc); |
3074 | extern unsigned int ata_bmdma_qc_issue(struct ata_queued_cmd *qc); |
3075 | -extern void ata_bmdma_dumb_qc_prep(struct ata_queued_cmd *qc); |
3076 | +extern enum ata_completion_errors ata_bmdma_dumb_qc_prep(struct ata_queued_cmd *qc); |
3077 | extern unsigned int ata_bmdma_port_intr(struct ata_port *ap, |
3078 | struct ata_queued_cmd *qc); |
3079 | extern irqreturn_t ata_bmdma_interrupt(int irq, void *dev_instance); |
3080 | diff --git a/include/linux/mtd/map.h b/include/linux/mtd/map.h |
3081 | index b5b43f94f3116..01b990e4b228a 100644 |
3082 | --- a/include/linux/mtd/map.h |
3083 | +++ b/include/linux/mtd/map.h |
3084 | @@ -312,7 +312,7 @@ void map_destroy(struct mtd_info *mtd); |
3085 | ({ \ |
3086 | int i, ret = 1; \ |
3087 | for (i = 0; i < map_words(map); i++) { \ |
3088 | - if (((val1).x[i] & (val2).x[i]) != (val2).x[i]) { \ |
3089 | + if (((val1).x[i] & (val2).x[i]) != (val3).x[i]) { \ |
3090 | ret = 0; \ |
3091 | break; \ |
3092 | } \ |
3093 | diff --git a/include/linux/seqlock.h b/include/linux/seqlock.h |
3094 | index ead97654c4e9a..1613fe5c668e1 100644 |
3095 | --- a/include/linux/seqlock.h |
3096 | +++ b/include/linux/seqlock.h |
3097 | @@ -242,6 +242,13 @@ static inline void raw_write_seqcount_end(seqcount_t *s) |
3098 | * usual consistency guarantee. It is one wmb cheaper, because we can |
3099 | * collapse the two back-to-back wmb()s. |
3100 | * |
3101 | + * Note that, writes surrounding the barrier should be declared atomic (e.g. |
3102 | + * via WRITE_ONCE): a) to ensure the writes become visible to other threads |
3103 | + * atomically, avoiding compiler optimizations; b) to document which writes are |
3104 | + * meant to propagate to the reader critical section. This is necessary because |
3105 | + * neither writes before and after the barrier are enclosed in a seq-writer |
3106 | + * critical section that would ensure readers are aware of ongoing writes. |
3107 | + * |
3108 | * seqcount_t seq; |
3109 | * bool X = true, Y = false; |
3110 | * |
3111 | @@ -261,11 +268,11 @@ static inline void raw_write_seqcount_end(seqcount_t *s) |
3112 | * |
3113 | * void write(void) |
3114 | * { |
3115 | - * Y = true; |
3116 | + * WRITE_ONCE(Y, true); |
3117 | * |
3118 | * raw_write_seqcount_barrier(seq); |
3119 | * |
3120 | - * X = false; |
3121 | + * WRITE_ONCE(X, false); |
3122 | * } |
3123 | */ |
3124 | static inline void raw_write_seqcount_barrier(seqcount_t *s) |
3125 | diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h |
3126 | index e37112ac332f7..67b798b7115d8 100644 |
3127 | --- a/include/linux/skbuff.h |
3128 | +++ b/include/linux/skbuff.h |
3129 | @@ -1549,6 +1549,18 @@ static inline __u32 skb_queue_len(const struct sk_buff_head *list_) |
3130 | return list_->qlen; |
3131 | } |
3132 | |
3133 | +/** |
3134 | + * skb_queue_len_lockless - get queue length |
3135 | + * @list_: list to measure |
3136 | + * |
3137 | + * Return the length of an &sk_buff queue. |
3138 | + * This variant can be used in lockless contexts. |
3139 | + */ |
3140 | +static inline __u32 skb_queue_len_lockless(const struct sk_buff_head *list_) |
3141 | +{ |
3142 | + return READ_ONCE(list_->qlen); |
3143 | +} |
3144 | + |
3145 | /** |
3146 | * __skb_queue_head_init - initialize non-spinlock portions of sk_buff_head |
3147 | * @list: queue to initialize |
3148 | @@ -1752,7 +1764,7 @@ static inline void __skb_unlink(struct sk_buff *skb, struct sk_buff_head *list) |
3149 | { |
3150 | struct sk_buff *next, *prev; |
3151 | |
3152 | - list->qlen--; |
3153 | + WRITE_ONCE(list->qlen, list->qlen - 1); |
3154 | next = skb->next; |
3155 | prev = skb->prev; |
3156 | skb->next = skb->prev = NULL; |
3157 | @@ -2795,7 +2807,7 @@ static inline int skb_padto(struct sk_buff *skb, unsigned int len) |
3158 | * is untouched. Otherwise it is extended. Returns zero on |
3159 | * success. The skb is freed on error. |
3160 | */ |
3161 | -static inline int skb_put_padto(struct sk_buff *skb, unsigned int len) |
3162 | +static inline int __must_check skb_put_padto(struct sk_buff *skb, unsigned int len) |
3163 | { |
3164 | unsigned int size = skb->len; |
3165 | |
3166 | diff --git a/kernel/audit_watch.c b/kernel/audit_watch.c |
3167 | index 712469a3103ac..54b30c9bd8b13 100644 |
3168 | --- a/kernel/audit_watch.c |
3169 | +++ b/kernel/audit_watch.c |
3170 | @@ -316,8 +316,6 @@ static void audit_update_watch(struct audit_parent *parent, |
3171 | if (oentry->rule.exe) |
3172 | audit_remove_mark(oentry->rule.exe); |
3173 | |
3174 | - audit_watch_log_rule_change(r, owatch, "updated_rules"); |
3175 | - |
3176 | call_rcu(&oentry->rcu, audit_free_rule_rcu); |
3177 | } |
3178 | |
3179 | diff --git a/kernel/bpf/hashtab.c b/kernel/bpf/hashtab.c |
3180 | index 8648d7d297081..1253261fdb3ba 100644 |
3181 | --- a/kernel/bpf/hashtab.c |
3182 | +++ b/kernel/bpf/hashtab.c |
3183 | @@ -427,15 +427,7 @@ static void htab_elem_free_rcu(struct rcu_head *head) |
3184 | struct htab_elem *l = container_of(head, struct htab_elem, rcu); |
3185 | struct bpf_htab *htab = l->htab; |
3186 | |
3187 | - /* must increment bpf_prog_active to avoid kprobe+bpf triggering while |
3188 | - * we're calling kfree, otherwise deadlock is possible if kprobes |
3189 | - * are placed somewhere inside of slub |
3190 | - */ |
3191 | - preempt_disable(); |
3192 | - __this_cpu_inc(bpf_prog_active); |
3193 | htab_elem_free(htab, l); |
3194 | - __this_cpu_dec(bpf_prog_active); |
3195 | - preempt_enable(); |
3196 | } |
3197 | |
3198 | static void free_htab_elem(struct bpf_htab *htab, struct htab_elem *l) |
3199 | diff --git a/kernel/kprobes.c b/kernel/kprobes.c |
3200 | index 9aa2dbe6a4568..3938e4670b89b 100644 |
3201 | --- a/kernel/kprobes.c |
3202 | +++ b/kernel/kprobes.c |
3203 | @@ -2012,6 +2012,9 @@ static void kill_kprobe(struct kprobe *p) |
3204 | { |
3205 | struct kprobe *kp; |
3206 | |
3207 | + if (WARN_ON_ONCE(kprobe_gone(p))) |
3208 | + return; |
3209 | + |
3210 | p->flags |= KPROBE_FLAG_GONE; |
3211 | if (kprobe_aggrprobe(p)) { |
3212 | /* |
3213 | @@ -2032,9 +2035,10 @@ static void kill_kprobe(struct kprobe *p) |
3214 | |
3215 | /* |
3216 | * The module is going away. We should disarm the kprobe which |
3217 | - * is using ftrace. |
3218 | + * is using ftrace, because ftrace framework is still available at |
3219 | + * MODULE_STATE_GOING notification. |
3220 | */ |
3221 | - if (kprobe_ftrace(p)) |
3222 | + if (kprobe_ftrace(p) && !kprobe_disabled(p) && !kprobes_all_disarmed) |
3223 | disarm_kprobe_ftrace(p); |
3224 | } |
3225 | |
3226 | @@ -2154,7 +2158,10 @@ static int kprobes_module_callback(struct notifier_block *nb, |
3227 | mutex_lock(&kprobe_mutex); |
3228 | for (i = 0; i < KPROBE_TABLE_SIZE; i++) { |
3229 | head = &kprobe_table[i]; |
3230 | - hlist_for_each_entry_rcu(p, head, hlist) |
3231 | + hlist_for_each_entry_rcu(p, head, hlist) { |
3232 | + if (kprobe_gone(p)) |
3233 | + continue; |
3234 | + |
3235 | if (within_module_init((unsigned long)p->addr, mod) || |
3236 | (checkcore && |
3237 | within_module_core((unsigned long)p->addr, mod))) { |
3238 | @@ -2165,6 +2172,7 @@ static int kprobes_module_callback(struct notifier_block *nb, |
3239 | */ |
3240 | kill_kprobe(p); |
3241 | } |
3242 | + } |
3243 | } |
3244 | mutex_unlock(&kprobe_mutex); |
3245 | return NOTIFY_DONE; |
3246 | diff --git a/kernel/printk/printk.c b/kernel/printk/printk.c |
3247 | index c1873d325ebda..7acae2f2478d9 100644 |
3248 | --- a/kernel/printk/printk.c |
3249 | +++ b/kernel/printk/printk.c |
3250 | @@ -2035,6 +2035,9 @@ static int __init console_setup(char *str) |
3251 | char *s, *options, *brl_options = NULL; |
3252 | int idx; |
3253 | |
3254 | + if (str[0] == 0) |
3255 | + return 1; |
3256 | + |
3257 | if (_braille_console_setup(&str, &brl_options)) |
3258 | return 1; |
3259 | |
3260 | diff --git a/kernel/sys.c b/kernel/sys.c |
3261 | index 157277cbf83aa..546cdc911dad4 100644 |
3262 | --- a/kernel/sys.c |
3263 | +++ b/kernel/sys.c |
3264 | @@ -1183,11 +1183,13 @@ SYSCALL_DEFINE1(uname, struct old_utsname __user *, name) |
3265 | |
3266 | SYSCALL_DEFINE1(olduname, struct oldold_utsname __user *, name) |
3267 | { |
3268 | - struct oldold_utsname tmp = {}; |
3269 | + struct oldold_utsname tmp; |
3270 | |
3271 | if (!name) |
3272 | return -EFAULT; |
3273 | |
3274 | + memset(&tmp, 0, sizeof(tmp)); |
3275 | + |
3276 | down_read(&uts_sem); |
3277 | memcpy(&tmp.sysname, &utsname()->sysname, __OLD_UTS_LEN); |
3278 | memcpy(&tmp.nodename, &utsname()->nodename, __OLD_UTS_LEN); |
3279 | diff --git a/kernel/time/timekeeping.c b/kernel/time/timekeeping.c |
3280 | index e24e1f0c56906..e21b4d8b72405 100644 |
3281 | --- a/kernel/time/timekeeping.c |
3282 | +++ b/kernel/time/timekeeping.c |
3283 | @@ -950,9 +950,8 @@ static int scale64_check_overflow(u64 mult, u64 div, u64 *base) |
3284 | ((int)sizeof(u64)*8 - fls64(mult) < fls64(rem))) |
3285 | return -EOVERFLOW; |
3286 | tmp *= mult; |
3287 | - rem *= mult; |
3288 | |
3289 | - do_div(rem, div); |
3290 | + rem = div64_u64(rem * mult, div); |
3291 | *base = tmp + rem; |
3292 | return 0; |
3293 | } |
3294 | diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c |
3295 | index b2fb25aefb2fc..2388fb50d1885 100644 |
3296 | --- a/kernel/trace/trace.c |
3297 | +++ b/kernel/trace/trace.c |
3298 | @@ -2600,6 +2600,9 @@ int trace_array_printk(struct trace_array *tr, |
3299 | if (!(global_trace.trace_flags & TRACE_ITER_PRINTK)) |
3300 | return 0; |
3301 | |
3302 | + if (!tr) |
3303 | + return -ENOENT; |
3304 | + |
3305 | va_start(ap, fmt); |
3306 | ret = trace_array_vprintk(tr, ip, fmt, ap); |
3307 | va_end(ap); |
3308 | @@ -7693,7 +7696,7 @@ __init static int tracer_alloc_buffers(void) |
3309 | goto out_free_buffer_mask; |
3310 | |
3311 | /* Only allocate trace_printk buffers if a trace_printk exists */ |
3312 | - if (__stop___trace_bprintk_fmt != __start___trace_bprintk_fmt) |
3313 | + if (&__stop___trace_bprintk_fmt != &__start___trace_bprintk_fmt) |
3314 | /* Must be called before global_trace.buffer is allocated */ |
3315 | trace_printk_init_buffers(); |
3316 | |
3317 | diff --git a/kernel/trace/trace_entries.h b/kernel/trace/trace_entries.h |
3318 | index d1cc37e78f997..1430f6bbb1a07 100644 |
3319 | --- a/kernel/trace/trace_entries.h |
3320 | +++ b/kernel/trace/trace_entries.h |
3321 | @@ -178,7 +178,7 @@ FTRACE_ENTRY(kernel_stack, stack_entry, |
3322 | |
3323 | F_STRUCT( |
3324 | __field( int, size ) |
3325 | - __dynamic_array(unsigned long, caller ) |
3326 | + __array( unsigned long, caller, FTRACE_STACK_ENTRIES ) |
3327 | ), |
3328 | |
3329 | F_printk("\t=> (" IP_FMT ")\n\t=> (" IP_FMT ")\n\t=> (" IP_FMT ")\n" |
3330 | diff --git a/kernel/trace/trace_events.c b/kernel/trace/trace_events.c |
3331 | index af969f753e5e9..5bf072e437c41 100644 |
3332 | --- a/kernel/trace/trace_events.c |
3333 | +++ b/kernel/trace/trace_events.c |
3334 | @@ -790,6 +790,8 @@ static int ftrace_set_clr_event(struct trace_array *tr, char *buf, int set) |
3335 | char *event = NULL, *sub = NULL, *match; |
3336 | int ret; |
3337 | |
3338 | + if (!tr) |
3339 | + return -ENOENT; |
3340 | /* |
3341 | * The buf format can be <subsystem>:<event-name> |
3342 | * *:<event-name> means any event by that name. |
3343 | diff --git a/lib/string.c b/lib/string.c |
3344 | index 8f1a2a04e22f5..d099762a9bd60 100644 |
3345 | --- a/lib/string.c |
3346 | +++ b/lib/string.c |
3347 | @@ -235,6 +235,30 @@ ssize_t strscpy(char *dest, const char *src, size_t count) |
3348 | EXPORT_SYMBOL(strscpy); |
3349 | #endif |
3350 | |
3351 | +/** |
3352 | + * stpcpy - copy a string from src to dest returning a pointer to the new end |
3353 | + * of dest, including src's %NUL-terminator. May overrun dest. |
3354 | + * @dest: pointer to end of string being copied into. Must be large enough |
3355 | + * to receive copy. |
3356 | + * @src: pointer to the beginning of string being copied from. Must not overlap |
3357 | + * dest. |
3358 | + * |
3359 | + * stpcpy differs from strcpy in a key way: the return value is a pointer |
3360 | + * to the new %NUL-terminating character in @dest. (For strcpy, the return |
3361 | + * value is a pointer to the start of @dest). This interface is considered |
3362 | + * unsafe as it doesn't perform bounds checking of the inputs. As such it's |
3363 | + * not recommended for usage. Instead, its definition is provided in case |
3364 | + * the compiler lowers other libcalls to stpcpy. |
3365 | + */ |
3366 | +char *stpcpy(char *__restrict__ dest, const char *__restrict__ src); |
3367 | +char *stpcpy(char *__restrict__ dest, const char *__restrict__ src) |
3368 | +{ |
3369 | + while ((*dest++ = *src++) != '\0') |
3370 | + /* nothing */; |
3371 | + return --dest; |
3372 | +} |
3373 | +EXPORT_SYMBOL(stpcpy); |
3374 | + |
3375 | #ifndef __HAVE_ARCH_STRCAT |
3376 | /** |
3377 | * strcat - Append one %NUL-terminated string to another |
3378 | diff --git a/mm/filemap.c b/mm/filemap.c |
3379 | index b046d8f147e20..05af91f495f53 100644 |
3380 | --- a/mm/filemap.c |
3381 | +++ b/mm/filemap.c |
3382 | @@ -2474,6 +2474,14 @@ filler: |
3383 | unlock_page(page); |
3384 | goto out; |
3385 | } |
3386 | + |
3387 | + /* |
3388 | + * A previous I/O error may have been due to temporary |
3389 | + * failures. |
3390 | + * Clear page error before actual read, PG_error will be |
3391 | + * set again if read page fails. |
3392 | + */ |
3393 | + ClearPageError(page); |
3394 | goto filler; |
3395 | |
3396 | out: |
3397 | diff --git a/mm/mmap.c b/mm/mmap.c |
3398 | index 7109f886e739e..7c8815636c482 100644 |
3399 | --- a/mm/mmap.c |
3400 | +++ b/mm/mmap.c |
3401 | @@ -2028,6 +2028,7 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr, |
3402 | info.low_limit = mm->mmap_base; |
3403 | info.high_limit = TASK_SIZE; |
3404 | info.align_mask = 0; |
3405 | + info.align_offset = 0; |
3406 | return vm_unmapped_area(&info); |
3407 | } |
3408 | #endif |
3409 | @@ -2069,6 +2070,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0, |
3410 | info.low_limit = max(PAGE_SIZE, mmap_min_addr); |
3411 | info.high_limit = mm->mmap_base; |
3412 | info.align_mask = 0; |
3413 | + info.align_offset = 0; |
3414 | addr = vm_unmapped_area(&info); |
3415 | |
3416 | /* |
3417 | diff --git a/mm/pagewalk.c b/mm/pagewalk.c |
3418 | index d95341cffc2f6..8d6290502631a 100644 |
3419 | --- a/mm/pagewalk.c |
3420 | +++ b/mm/pagewalk.c |
3421 | @@ -14,9 +14,9 @@ static int walk_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end, |
3422 | err = walk->pte_entry(pte, addr, addr + PAGE_SIZE, walk); |
3423 | if (err) |
3424 | break; |
3425 | - addr += PAGE_SIZE; |
3426 | - if (addr == end) |
3427 | + if (addr >= end - PAGE_SIZE) |
3428 | break; |
3429 | + addr += PAGE_SIZE; |
3430 | pte++; |
3431 | } |
3432 | |
3433 | diff --git a/net/atm/lec.c b/net/atm/lec.c |
3434 | index 704892d79bf19..756429c95e859 100644 |
3435 | --- a/net/atm/lec.c |
3436 | +++ b/net/atm/lec.c |
3437 | @@ -1290,6 +1290,12 @@ static void lec_arp_clear_vccs(struct lec_arp_table *entry) |
3438 | entry->vcc = NULL; |
3439 | } |
3440 | if (entry->recv_vcc) { |
3441 | + struct atm_vcc *vcc = entry->recv_vcc; |
3442 | + struct lec_vcc_priv *vpriv = LEC_VCC_PRIV(vcc); |
3443 | + |
3444 | + kfree(vpriv); |
3445 | + vcc->user_back = NULL; |
3446 | + |
3447 | entry->recv_vcc->push = entry->old_recv_push; |
3448 | vcc_release_async(entry->recv_vcc, -EPIPE); |
3449 | entry->recv_vcc = NULL; |
3450 | diff --git a/net/batman-adv/bridge_loop_avoidance.c b/net/batman-adv/bridge_loop_avoidance.c |
3451 | index e545b42ab0b98..516c45771d59b 100644 |
3452 | --- a/net/batman-adv/bridge_loop_avoidance.c |
3453 | +++ b/net/batman-adv/bridge_loop_avoidance.c |
3454 | @@ -36,6 +36,7 @@ |
3455 | #include <linux/lockdep.h> |
3456 | #include <linux/netdevice.h> |
3457 | #include <linux/netlink.h> |
3458 | +#include <linux/preempt.h> |
3459 | #include <linux/rculist.h> |
3460 | #include <linux/rcupdate.h> |
3461 | #include <linux/seq_file.h> |
3462 | @@ -95,11 +96,12 @@ static inline u32 batadv_choose_claim(const void *data, u32 size) |
3463 | */ |
3464 | static inline u32 batadv_choose_backbone_gw(const void *data, u32 size) |
3465 | { |
3466 | - const struct batadv_bla_claim *claim = (struct batadv_bla_claim *)data; |
3467 | + const struct batadv_bla_backbone_gw *gw; |
3468 | u32 hash = 0; |
3469 | |
3470 | - hash = jhash(&claim->addr, sizeof(claim->addr), hash); |
3471 | - hash = jhash(&claim->vid, sizeof(claim->vid), hash); |
3472 | + gw = (struct batadv_bla_backbone_gw *)data; |
3473 | + hash = jhash(&gw->orig, sizeof(gw->orig), hash); |
3474 | + hash = jhash(&gw->vid, sizeof(gw->vid), hash); |
3475 | |
3476 | return hash % size; |
3477 | } |
3478 | @@ -1820,7 +1822,7 @@ batadv_bla_loopdetect_check(struct batadv_priv *bat_priv, struct sk_buff *skb, |
3479 | * @bat_priv: the bat priv with all the soft interface information |
3480 | * @skb: the frame to be checked |
3481 | * @vid: the VLAN ID of the frame |
3482 | - * @is_bcast: the packet came in a broadcast packet type. |
3483 | + * @packet_type: the batman packet type this frame came in |
3484 | * |
3485 | * batadv_bla_rx avoidance checks if: |
3486 | * * we have to race for a claim |
3487 | @@ -1832,7 +1834,7 @@ batadv_bla_loopdetect_check(struct batadv_priv *bat_priv, struct sk_buff *skb, |
3488 | * further process the skb. |
3489 | */ |
3490 | bool batadv_bla_rx(struct batadv_priv *bat_priv, struct sk_buff *skb, |
3491 | - unsigned short vid, bool is_bcast) |
3492 | + unsigned short vid, int packet_type) |
3493 | { |
3494 | struct batadv_bla_backbone_gw *backbone_gw; |
3495 | struct ethhdr *ethhdr; |
3496 | @@ -1854,9 +1856,24 @@ bool batadv_bla_rx(struct batadv_priv *bat_priv, struct sk_buff *skb, |
3497 | goto handled; |
3498 | |
3499 | if (unlikely(atomic_read(&bat_priv->bla.num_requests))) |
3500 | - /* don't allow broadcasts while requests are in flight */ |
3501 | - if (is_multicast_ether_addr(ethhdr->h_dest) && is_bcast) |
3502 | - goto handled; |
3503 | + /* don't allow multicast packets while requests are in flight */ |
3504 | + if (is_multicast_ether_addr(ethhdr->h_dest)) |
3505 | + /* Both broadcast flooding or multicast-via-unicasts |
3506 | + * delivery might send to multiple backbone gateways |
3507 | + * sharing the same LAN and therefore need to coordinate |
3508 | + * which backbone gateway forwards into the LAN, |
3509 | + * by claiming the payload source address. |
3510 | + * |
3511 | + * Broadcast flooding and multicast-via-unicasts |
3512 | + * delivery use the following two batman packet types. |
3513 | + * Note: explicitly exclude BATADV_UNICAST_4ADDR, |
3514 | + * as the DHCP gateway feature will send explicitly |
3515 | + * to only one BLA gateway, so the claiming process |
3516 | + * should be avoided there. |
3517 | + */ |
3518 | + if (packet_type == BATADV_BCAST || |
3519 | + packet_type == BATADV_UNICAST) |
3520 | + goto handled; |
3521 | |
3522 | ether_addr_copy(search_claim.addr, ethhdr->h_source); |
3523 | search_claim.vid = vid; |
3524 | @@ -1884,13 +1901,14 @@ bool batadv_bla_rx(struct batadv_priv *bat_priv, struct sk_buff *skb, |
3525 | goto allow; |
3526 | } |
3527 | |
3528 | - /* if it is a broadcast ... */ |
3529 | - if (is_multicast_ether_addr(ethhdr->h_dest) && is_bcast) { |
3530 | + /* if it is a multicast ... */ |
3531 | + if (is_multicast_ether_addr(ethhdr->h_dest) && |
3532 | + (packet_type == BATADV_BCAST || packet_type == BATADV_UNICAST)) { |
3533 | /* ... drop it. the responsible gateway is in charge. |
3534 | * |
3535 | - * We need to check is_bcast because with the gateway |
3536 | + * We need to check packet type because with the gateway |
3537 | * feature, broadcasts (like DHCP requests) may be sent |
3538 | - * using a unicast packet type. |
3539 | + * using a unicast 4 address packet type. See comment above. |
3540 | */ |
3541 | goto handled; |
3542 | } else { |
3543 | diff --git a/net/batman-adv/bridge_loop_avoidance.h b/net/batman-adv/bridge_loop_avoidance.h |
3544 | index 1ae93e46fb984..40b8ec9d4b1b5 100644 |
3545 | --- a/net/batman-adv/bridge_loop_avoidance.h |
3546 | +++ b/net/batman-adv/bridge_loop_avoidance.h |
3547 | @@ -29,7 +29,7 @@ struct sk_buff; |
3548 | |
3549 | #ifdef CONFIG_BATMAN_ADV_BLA |
3550 | bool batadv_bla_rx(struct batadv_priv *bat_priv, struct sk_buff *skb, |
3551 | - unsigned short vid, bool is_bcast); |
3552 | + unsigned short vid, int packet_type); |
3553 | bool batadv_bla_tx(struct batadv_priv *bat_priv, struct sk_buff *skb, |
3554 | unsigned short vid); |
3555 | bool batadv_bla_is_backbone_gw(struct sk_buff *skb, |
3556 | @@ -56,7 +56,7 @@ int batadv_bla_claim_dump(struct sk_buff *msg, struct netlink_callback *cb); |
3557 | |
3558 | static inline bool batadv_bla_rx(struct batadv_priv *bat_priv, |
3559 | struct sk_buff *skb, unsigned short vid, |
3560 | - bool is_bcast) |
3561 | + int packet_type) |
3562 | { |
3563 | return false; |
3564 | } |
3565 | diff --git a/net/batman-adv/routing.c b/net/batman-adv/routing.c |
3566 | index 19059ae26e519..1ba205c3ea9fa 100644 |
3567 | --- a/net/batman-adv/routing.c |
3568 | +++ b/net/batman-adv/routing.c |
3569 | @@ -803,6 +803,10 @@ static bool batadv_check_unicast_ttvn(struct batadv_priv *bat_priv, |
3570 | vid = batadv_get_vid(skb, hdr_len); |
3571 | ethhdr = (struct ethhdr *)(skb->data + hdr_len); |
3572 | |
3573 | + /* do not reroute multicast frames in a unicast header */ |
3574 | + if (is_multicast_ether_addr(ethhdr->h_dest)) |
3575 | + return true; |
3576 | + |
3577 | /* check if the destination client was served by this node and it is now |
3578 | * roaming. In this case, it means that the node has got a ROAM_ADV |
3579 | * message and that it knows the new destination in the mesh to re-route |
3580 | diff --git a/net/batman-adv/soft-interface.c b/net/batman-adv/soft-interface.c |
3581 | index 99d2c453c8722..af0a8439cf08a 100644 |
3582 | --- a/net/batman-adv/soft-interface.c |
3583 | +++ b/net/batman-adv/soft-interface.c |
3584 | @@ -415,10 +415,10 @@ void batadv_interface_rx(struct net_device *soft_iface, |
3585 | struct vlan_ethhdr *vhdr; |
3586 | struct ethhdr *ethhdr; |
3587 | unsigned short vid; |
3588 | - bool is_bcast; |
3589 | + int packet_type; |
3590 | |
3591 | batadv_bcast_packet = (struct batadv_bcast_packet *)skb->data; |
3592 | - is_bcast = (batadv_bcast_packet->packet_type == BATADV_BCAST); |
3593 | + packet_type = batadv_bcast_packet->packet_type; |
3594 | |
3595 | skb_pull_rcsum(skb, hdr_size); |
3596 | skb_reset_mac_header(skb); |
3597 | @@ -463,7 +463,7 @@ void batadv_interface_rx(struct net_device *soft_iface, |
3598 | /* Let the bridge loop avoidance check the packet. If will |
3599 | * not handle it, we can safely push it up. |
3600 | */ |
3601 | - if (batadv_bla_rx(bat_priv, skb, vid, is_bcast)) |
3602 | + if (batadv_bla_rx(bat_priv, skb, vid, packet_type)) |
3603 | goto out; |
3604 | |
3605 | if (orig_node) |
3606 | diff --git a/net/bluetooth/hci_event.c b/net/bluetooth/hci_event.c |
3607 | index 757977c54d9ef..d6da119f5082e 100644 |
3608 | --- a/net/bluetooth/hci_event.c |
3609 | +++ b/net/bluetooth/hci_event.c |
3610 | @@ -41,12 +41,27 @@ |
3611 | |
3612 | /* Handle HCI Event packets */ |
3613 | |
3614 | -static void hci_cc_inquiry_cancel(struct hci_dev *hdev, struct sk_buff *skb) |
3615 | +static void hci_cc_inquiry_cancel(struct hci_dev *hdev, struct sk_buff *skb, |
3616 | + u8 *new_status) |
3617 | { |
3618 | __u8 status = *((__u8 *) skb->data); |
3619 | |
3620 | BT_DBG("%s status 0x%2.2x", hdev->name, status); |
3621 | |
3622 | + /* It is possible that we receive Inquiry Complete event right |
3623 | + * before we receive Inquiry Cancel Command Complete event, in |
3624 | + * which case the latter event should have status of Command |
3625 | + * Disallowed (0x0c). This should not be treated as error, since |
3626 | + * we actually achieve what Inquiry Cancel wants to achieve, |
3627 | + * which is to end the last Inquiry session. |
3628 | + */ |
3629 | + if (status == 0x0c && !test_bit(HCI_INQUIRY, &hdev->flags)) { |
3630 | + bt_dev_warn(hdev, "Ignoring error of Inquiry Cancel command"); |
3631 | + status = 0x00; |
3632 | + } |
3633 | + |
3634 | + *new_status = status; |
3635 | + |
3636 | if (status) |
3637 | return; |
3638 | |
3639 | @@ -2772,7 +2787,7 @@ static void hci_cmd_complete_evt(struct hci_dev *hdev, struct sk_buff *skb, |
3640 | |
3641 | switch (*opcode) { |
3642 | case HCI_OP_INQUIRY_CANCEL: |
3643 | - hci_cc_inquiry_cancel(hdev, skb); |
3644 | + hci_cc_inquiry_cancel(hdev, skb, status); |
3645 | break; |
3646 | |
3647 | case HCI_OP_PERIODIC_INQ: |
3648 | @@ -5257,6 +5272,11 @@ void hci_event_packet(struct hci_dev *hdev, struct sk_buff *skb) |
3649 | u8 status = 0, event = hdr->evt, req_evt = 0; |
3650 | u16 opcode = HCI_OP_NOP; |
3651 | |
3652 | + if (!event) { |
3653 | + bt_dev_warn(hdev, "Received unexpected HCI Event 00000000"); |
3654 | + goto done; |
3655 | + } |
3656 | + |
3657 | if (hdev->sent_cmd && bt_cb(hdev->sent_cmd)->hci.req_event == event) { |
3658 | struct hci_command_hdr *cmd_hdr = (void *) hdev->sent_cmd->data; |
3659 | opcode = __le16_to_cpu(cmd_hdr->opcode); |
3660 | @@ -5468,6 +5488,7 @@ void hci_event_packet(struct hci_dev *hdev, struct sk_buff *skb) |
3661 | req_complete_skb(hdev, status, opcode, orig_skb); |
3662 | } |
3663 | |
3664 | +done: |
3665 | kfree_skb(orig_skb); |
3666 | kfree_skb(skb); |
3667 | hdev->stat.evt_rx++; |
3668 | diff --git a/net/bluetooth/l2cap_core.c b/net/bluetooth/l2cap_core.c |
3669 | index 11012a5090708..5e3f5c1ba07d6 100644 |
3670 | --- a/net/bluetooth/l2cap_core.c |
3671 | +++ b/net/bluetooth/l2cap_core.c |
3672 | @@ -414,6 +414,9 @@ static void l2cap_chan_timeout(struct work_struct *work) |
3673 | BT_DBG("chan %p state %s", chan, state_to_string(chan->state)); |
3674 | |
3675 | mutex_lock(&conn->chan_lock); |
3676 | + /* __set_chan_timer() calls l2cap_chan_hold(chan) while scheduling |
3677 | + * this work. No need to call l2cap_chan_hold(chan) here again. |
3678 | + */ |
3679 | l2cap_chan_lock(chan); |
3680 | |
3681 | if (chan->state == BT_CONNECTED || chan->state == BT_CONFIG) |
3682 | @@ -426,12 +429,12 @@ static void l2cap_chan_timeout(struct work_struct *work) |
3683 | |
3684 | l2cap_chan_close(chan, reason); |
3685 | |
3686 | - l2cap_chan_unlock(chan); |
3687 | - |
3688 | chan->ops->close(chan); |
3689 | - mutex_unlock(&conn->chan_lock); |
3690 | |
3691 | + l2cap_chan_unlock(chan); |
3692 | l2cap_chan_put(chan); |
3693 | + |
3694 | + mutex_unlock(&conn->chan_lock); |
3695 | } |
3696 | |
3697 | struct l2cap_chan *l2cap_chan_create(void) |
3698 | @@ -1725,9 +1728,9 @@ static void l2cap_conn_del(struct hci_conn *hcon, int err) |
3699 | |
3700 | l2cap_chan_del(chan, err); |
3701 | |
3702 | - l2cap_chan_unlock(chan); |
3703 | - |
3704 | chan->ops->close(chan); |
3705 | + |
3706 | + l2cap_chan_unlock(chan); |
3707 | l2cap_chan_put(chan); |
3708 | } |
3709 | |
3710 | @@ -4104,7 +4107,8 @@ static inline int l2cap_config_req(struct l2cap_conn *conn, |
3711 | return 0; |
3712 | } |
3713 | |
3714 | - if (chan->state != BT_CONFIG && chan->state != BT_CONNECT2) { |
3715 | + if (chan->state != BT_CONFIG && chan->state != BT_CONNECT2 && |
3716 | + chan->state != BT_CONNECTED) { |
3717 | cmd_reject_invalid_cid(conn, cmd->ident, chan->scid, |
3718 | chan->dcid); |
3719 | goto unlock; |
3720 | @@ -4327,6 +4331,7 @@ static inline int l2cap_disconnect_req(struct l2cap_conn *conn, |
3721 | return 0; |
3722 | } |
3723 | |
3724 | + l2cap_chan_hold(chan); |
3725 | l2cap_chan_lock(chan); |
3726 | |
3727 | rsp.dcid = cpu_to_le16(chan->scid); |
3728 | @@ -4335,12 +4340,11 @@ static inline int l2cap_disconnect_req(struct l2cap_conn *conn, |
3729 | |
3730 | chan->ops->set_shutdown(chan); |
3731 | |
3732 | - l2cap_chan_hold(chan); |
3733 | l2cap_chan_del(chan, ECONNRESET); |
3734 | |
3735 | - l2cap_chan_unlock(chan); |
3736 | - |
3737 | chan->ops->close(chan); |
3738 | + |
3739 | + l2cap_chan_unlock(chan); |
3740 | l2cap_chan_put(chan); |
3741 | |
3742 | mutex_unlock(&conn->chan_lock); |
3743 | @@ -4372,20 +4376,21 @@ static inline int l2cap_disconnect_rsp(struct l2cap_conn *conn, |
3744 | return 0; |
3745 | } |
3746 | |
3747 | + l2cap_chan_hold(chan); |
3748 | l2cap_chan_lock(chan); |
3749 | |
3750 | if (chan->state != BT_DISCONN) { |
3751 | l2cap_chan_unlock(chan); |
3752 | + l2cap_chan_put(chan); |
3753 | mutex_unlock(&conn->chan_lock); |
3754 | return 0; |
3755 | } |
3756 | |
3757 | - l2cap_chan_hold(chan); |
3758 | l2cap_chan_del(chan, 0); |
3759 | |
3760 | - l2cap_chan_unlock(chan); |
3761 | - |
3762 | chan->ops->close(chan); |
3763 | + |
3764 | + l2cap_chan_unlock(chan); |
3765 | l2cap_chan_put(chan); |
3766 | |
3767 | mutex_unlock(&conn->chan_lock); |
3768 | diff --git a/net/bluetooth/l2cap_sock.c b/net/bluetooth/l2cap_sock.c |
3769 | index a8ba752732c98..bbf08c6092f4a 100644 |
3770 | --- a/net/bluetooth/l2cap_sock.c |
3771 | +++ b/net/bluetooth/l2cap_sock.c |
3772 | @@ -1038,7 +1038,7 @@ done: |
3773 | } |
3774 | |
3775 | /* Kill socket (only if zapped and orphan) |
3776 | - * Must be called on unlocked socket. |
3777 | + * Must be called on unlocked socket, with l2cap channel lock. |
3778 | */ |
3779 | static void l2cap_sock_kill(struct sock *sk) |
3780 | { |
3781 | @@ -1189,6 +1189,7 @@ static int l2cap_sock_release(struct socket *sock) |
3782 | { |
3783 | struct sock *sk = sock->sk; |
3784 | int err; |
3785 | + struct l2cap_chan *chan; |
3786 | |
3787 | BT_DBG("sock %p, sk %p", sock, sk); |
3788 | |
3789 | @@ -1198,9 +1199,17 @@ static int l2cap_sock_release(struct socket *sock) |
3790 | bt_sock_unlink(&l2cap_sk_list, sk); |
3791 | |
3792 | err = l2cap_sock_shutdown(sock, 2); |
3793 | + chan = l2cap_pi(sk)->chan; |
3794 | + |
3795 | + l2cap_chan_hold(chan); |
3796 | + l2cap_chan_lock(chan); |
3797 | |
3798 | sock_orphan(sk); |
3799 | l2cap_sock_kill(sk); |
3800 | + |
3801 | + l2cap_chan_unlock(chan); |
3802 | + l2cap_chan_put(chan); |
3803 | + |
3804 | return err; |
3805 | } |
3806 | |
3807 | @@ -1218,12 +1227,15 @@ static void l2cap_sock_cleanup_listen(struct sock *parent) |
3808 | BT_DBG("child chan %p state %s", chan, |
3809 | state_to_string(chan->state)); |
3810 | |
3811 | + l2cap_chan_hold(chan); |
3812 | l2cap_chan_lock(chan); |
3813 | + |
3814 | __clear_chan_timer(chan); |
3815 | l2cap_chan_close(chan, ECONNRESET); |
3816 | - l2cap_chan_unlock(chan); |
3817 | - |
3818 | l2cap_sock_kill(sk); |
3819 | + |
3820 | + l2cap_chan_unlock(chan); |
3821 | + l2cap_chan_put(chan); |
3822 | } |
3823 | } |
3824 | |
3825 | diff --git a/net/core/neighbour.c b/net/core/neighbour.c |
3826 | index 6578d1f8e6c4a..d267dc04d9f74 100644 |
3827 | --- a/net/core/neighbour.c |
3828 | +++ b/net/core/neighbour.c |
3829 | @@ -2797,6 +2797,7 @@ static void *neigh_stat_seq_next(struct seq_file *seq, void *v, loff_t *pos) |
3830 | *pos = cpu+1; |
3831 | return per_cpu_ptr(tbl->stats, cpu); |
3832 | } |
3833 | + (*pos)++; |
3834 | return NULL; |
3835 | } |
3836 | |
3837 | diff --git a/net/hsr/hsr_device.c b/net/hsr/hsr_device.c |
3838 | index aead5ac4dbf68..ea07c91ab3488 100644 |
3839 | --- a/net/hsr/hsr_device.c |
3840 | +++ b/net/hsr/hsr_device.c |
3841 | @@ -315,7 +315,8 @@ static void send_hsr_supervision_frame(struct hsr_port *master, |
3842 | hsr_sp = (typeof(hsr_sp)) skb_put(skb, sizeof(struct hsr_sup_payload)); |
3843 | ether_addr_copy(hsr_sp->MacAddressA, master->dev->dev_addr); |
3844 | |
3845 | - skb_put_padto(skb, ETH_ZLEN + HSR_HLEN); |
3846 | + if (skb_put_padto(skb, ETH_ZLEN + HSR_HLEN)) |
3847 | + return; |
3848 | |
3849 | hsr_forward_skb(skb, master); |
3850 | return; |
3851 | diff --git a/net/ipv4/ip_output.c b/net/ipv4/ip_output.c |
3852 | index 4f3decbe6a3a5..c37e9598262e5 100644 |
3853 | --- a/net/ipv4/ip_output.c |
3854 | +++ b/net/ipv4/ip_output.c |
3855 | @@ -73,6 +73,7 @@ |
3856 | #include <net/icmp.h> |
3857 | #include <net/checksum.h> |
3858 | #include <net/inetpeer.h> |
3859 | +#include <net/inet_ecn.h> |
3860 | #include <net/lwtunnel.h> |
3861 | #include <linux/igmp.h> |
3862 | #include <linux/netfilter_ipv4.h> |
3863 | @@ -1611,7 +1612,7 @@ void ip_send_unicast_reply(struct sock *sk, struct sk_buff *skb, |
3864 | if (IS_ERR(rt)) |
3865 | return; |
3866 | |
3867 | - inet_sk(sk)->tos = arg->tos; |
3868 | + inet_sk(sk)->tos = arg->tos & ~INET_ECN_MASK; |
3869 | |
3870 | sk->sk_priority = skb->priority; |
3871 | sk->sk_protocol = ip_hdr(skb)->protocol; |
3872 | diff --git a/net/ipv4/route.c b/net/ipv4/route.c |
3873 | index c8c51bd2d695b..e9aae4686536a 100644 |
3874 | --- a/net/ipv4/route.c |
3875 | +++ b/net/ipv4/route.c |
3876 | @@ -271,6 +271,7 @@ static void *rt_cpu_seq_next(struct seq_file *seq, void *v, loff_t *pos) |
3877 | *pos = cpu+1; |
3878 | return &per_cpu(rt_cache_stat, cpu); |
3879 | } |
3880 | + (*pos)++; |
3881 | return NULL; |
3882 | |
3883 | } |
3884 | diff --git a/net/key/af_key.c b/net/key/af_key.c |
3885 | index d2ec620319d76..76a008b1cbe5f 100644 |
3886 | --- a/net/key/af_key.c |
3887 | +++ b/net/key/af_key.c |
3888 | @@ -1873,6 +1873,13 @@ static int pfkey_dump(struct sock *sk, struct sk_buff *skb, const struct sadb_ms |
3889 | if (ext_hdrs[SADB_X_EXT_FILTER - 1]) { |
3890 | struct sadb_x_filter *xfilter = ext_hdrs[SADB_X_EXT_FILTER - 1]; |
3891 | |
3892 | + if ((xfilter->sadb_x_filter_splen >= |
3893 | + (sizeof(xfrm_address_t) << 3)) || |
3894 | + (xfilter->sadb_x_filter_dplen >= |
3895 | + (sizeof(xfrm_address_t) << 3))) { |
3896 | + mutex_unlock(&pfk->dump_lock); |
3897 | + return -EINVAL; |
3898 | + } |
3899 | filter = kmalloc(sizeof(*filter), GFP_KERNEL); |
3900 | if (filter == NULL) { |
3901 | mutex_unlock(&pfk->dump_lock); |
3902 | diff --git a/net/mac802154/tx.c b/net/mac802154/tx.c |
3903 | index bcd1a5e6ebf42..2f873a0dc5836 100644 |
3904 | --- a/net/mac802154/tx.c |
3905 | +++ b/net/mac802154/tx.c |
3906 | @@ -42,11 +42,11 @@ void ieee802154_xmit_worker(struct work_struct *work) |
3907 | if (res) |
3908 | goto err_tx; |
3909 | |
3910 | - ieee802154_xmit_complete(&local->hw, skb, false); |
3911 | - |
3912 | dev->stats.tx_packets++; |
3913 | dev->stats.tx_bytes += skb->len; |
3914 | |
3915 | + ieee802154_xmit_complete(&local->hw, skb, false); |
3916 | + |
3917 | return; |
3918 | |
3919 | err_tx: |
3920 | @@ -86,6 +86,8 @@ ieee802154_tx(struct ieee802154_local *local, struct sk_buff *skb) |
3921 | |
3922 | /* async is priority, otherwise sync is fallback */ |
3923 | if (local->ops->xmit_async) { |
3924 | + unsigned int len = skb->len; |
3925 | + |
3926 | ret = drv_xmit_async(local, skb); |
3927 | if (ret) { |
3928 | ieee802154_wake_queue(&local->hw); |
3929 | @@ -93,7 +95,7 @@ ieee802154_tx(struct ieee802154_local *local, struct sk_buff *skb) |
3930 | } |
3931 | |
3932 | dev->stats.tx_packets++; |
3933 | - dev->stats.tx_bytes += skb->len; |
3934 | + dev->stats.tx_bytes += len; |
3935 | } else { |
3936 | local->tx_skb = skb; |
3937 | queue_work(local->workqueue, &local->tx_work); |
3938 | diff --git a/net/sunrpc/svc_xprt.c b/net/sunrpc/svc_xprt.c |
3939 | index 42ce3ed216376..56e4ac8e2e994 100644 |
3940 | --- a/net/sunrpc/svc_xprt.c |
3941 | +++ b/net/sunrpc/svc_xprt.c |
3942 | @@ -103,8 +103,17 @@ void svc_unreg_xprt_class(struct svc_xprt_class *xcl) |
3943 | } |
3944 | EXPORT_SYMBOL_GPL(svc_unreg_xprt_class); |
3945 | |
3946 | -/* |
3947 | - * Format the transport list for printing |
3948 | +/** |
3949 | + * svc_print_xprts - Format the transport list for printing |
3950 | + * @buf: target buffer for formatted address |
3951 | + * @maxlen: length of target buffer |
3952 | + * |
3953 | + * Fills in @buf with a string containing a list of transport names, each name |
3954 | + * terminated with '\n'. If the buffer is too small, some entries may be |
3955 | + * missing, but it is guaranteed that all lines in the output buffer are |
3956 | + * complete. |
3957 | + * |
3958 | + * Returns positive length of the filled-in string. |
3959 | */ |
3960 | int svc_print_xprts(char *buf, int maxlen) |
3961 | { |
3962 | @@ -117,9 +126,9 @@ int svc_print_xprts(char *buf, int maxlen) |
3963 | list_for_each_entry(xcl, &svc_xprt_class_list, xcl_list) { |
3964 | int slen; |
3965 | |
3966 | - sprintf(tmpstr, "%s %d\n", xcl->xcl_name, xcl->xcl_max_payload); |
3967 | - slen = strlen(tmpstr); |
3968 | - if (len + slen > maxlen) |
3969 | + slen = snprintf(tmpstr, sizeof(tmpstr), "%s %d\n", |
3970 | + xcl->xcl_name, xcl->xcl_max_payload); |
3971 | + if (slen >= sizeof(tmpstr) || len + slen >= maxlen) |
3972 | break; |
3973 | len += slen; |
3974 | strcat(buf, tmpstr); |
3975 | diff --git a/net/sunrpc/xprtrdma/svc_rdma_backchannel.c b/net/sunrpc/xprtrdma/svc_rdma_backchannel.c |
3976 | index 6035c5a380a6b..b3d48c6243c80 100644 |
3977 | --- a/net/sunrpc/xprtrdma/svc_rdma_backchannel.c |
3978 | +++ b/net/sunrpc/xprtrdma/svc_rdma_backchannel.c |
3979 | @@ -277,6 +277,7 @@ xprt_rdma_bc_put(struct rpc_xprt *xprt) |
3980 | { |
3981 | dprintk("svcrdma: %s: xprt %p\n", __func__, xprt); |
3982 | |
3983 | + xprt_rdma_free_addresses(xprt); |
3984 | xprt_free(xprt); |
3985 | module_put(THIS_MODULE); |
3986 | } |
3987 | diff --git a/net/tipc/msg.c b/net/tipc/msg.c |
3988 | index 912f1fb97c06d..ea554756a786d 100644 |
3989 | --- a/net/tipc/msg.c |
3990 | +++ b/net/tipc/msg.c |
3991 | @@ -140,7 +140,8 @@ int tipc_buf_append(struct sk_buff **headbuf, struct sk_buff **buf) |
3992 | if (fragid == FIRST_FRAGMENT) { |
3993 | if (unlikely(head)) |
3994 | goto err; |
3995 | - if (unlikely(skb_unclone(frag, GFP_ATOMIC))) |
3996 | + frag = skb_unshare(frag, GFP_ATOMIC); |
3997 | + if (unlikely(!frag)) |
3998 | goto err; |
3999 | head = *headbuf = frag; |
4000 | *buf = NULL; |
4001 | diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c |
4002 | index 32ae82a5596d9..bcd6ed6e7e25c 100644 |
4003 | --- a/net/unix/af_unix.c |
4004 | +++ b/net/unix/af_unix.c |
4005 | @@ -191,11 +191,17 @@ static inline int unix_may_send(struct sock *sk, struct sock *osk) |
4006 | return unix_peer(osk) == NULL || unix_our_peer(sk, osk); |
4007 | } |
4008 | |
4009 | -static inline int unix_recvq_full(struct sock const *sk) |
4010 | +static inline int unix_recvq_full(const struct sock *sk) |
4011 | { |
4012 | return skb_queue_len(&sk->sk_receive_queue) > sk->sk_max_ack_backlog; |
4013 | } |
4014 | |
4015 | +static inline int unix_recvq_full_lockless(const struct sock *sk) |
4016 | +{ |
4017 | + return skb_queue_len_lockless(&sk->sk_receive_queue) > |
4018 | + READ_ONCE(sk->sk_max_ack_backlog); |
4019 | +} |
4020 | + |
4021 | struct sock *unix_peer_get(struct sock *s) |
4022 | { |
4023 | struct sock *peer; |
4024 | @@ -1793,7 +1799,8 @@ restart_locked: |
4025 | * - unix_peer(sk) == sk by time of get but disconnected before lock |
4026 | */ |
4027 | if (other != sk && |
4028 | - unlikely(unix_peer(other) != sk && unix_recvq_full(other))) { |
4029 | + unlikely(unix_peer(other) != sk && |
4030 | + unix_recvq_full_lockless(other))) { |
4031 | if (timeo) { |
4032 | timeo = unix_wait_for_peer(other, timeo); |
4033 | |
4034 | diff --git a/security/selinux/selinuxfs.c b/security/selinux/selinuxfs.c |
4035 | index 72c145dd799f1..ef1226c1c3add 100644 |
4036 | --- a/security/selinux/selinuxfs.c |
4037 | +++ b/security/selinux/selinuxfs.c |
4038 | @@ -1416,6 +1416,7 @@ static struct avc_cache_stats *sel_avc_get_stat_idx(loff_t *idx) |
4039 | *idx = cpu + 1; |
4040 | return &per_cpu(avc_cache_stats, cpu); |
4041 | } |
4042 | + (*idx)++; |
4043 | return NULL; |
4044 | } |
4045 | |
4046 | diff --git a/sound/hda/hdac_bus.c b/sound/hda/hdac_bus.c |
4047 | index 0e81ea89a5965..e3f68a76d90eb 100644 |
4048 | --- a/sound/hda/hdac_bus.c |
4049 | +++ b/sound/hda/hdac_bus.c |
4050 | @@ -155,6 +155,7 @@ static void process_unsol_events(struct work_struct *work) |
4051 | struct hdac_driver *drv; |
4052 | unsigned int rp, caddr, res; |
4053 | |
4054 | + spin_lock_irq(&bus->reg_lock); |
4055 | while (bus->unsol_rp != bus->unsol_wp) { |
4056 | rp = (bus->unsol_rp + 1) % HDA_UNSOL_QUEUE_SIZE; |
4057 | bus->unsol_rp = rp; |
4058 | @@ -166,10 +167,13 @@ static void process_unsol_events(struct work_struct *work) |
4059 | codec = bus->caddr_tbl[caddr & 0x0f]; |
4060 | if (!codec || !codec->dev.driver) |
4061 | continue; |
4062 | + spin_unlock_irq(&bus->reg_lock); |
4063 | drv = drv_to_hdac_driver(codec->dev.driver); |
4064 | if (drv->unsol_event) |
4065 | drv->unsol_event(codec, res); |
4066 | + spin_lock_irq(&bus->reg_lock); |
4067 | } |
4068 | + spin_unlock_irq(&bus->reg_lock); |
4069 | } |
4070 | |
4071 | /** |
4072 | diff --git a/sound/pci/asihpi/hpioctl.c b/sound/pci/asihpi/hpioctl.c |
4073 | index 3ef9af53ef497..0d5ff00cdabca 100644 |
4074 | --- a/sound/pci/asihpi/hpioctl.c |
4075 | +++ b/sound/pci/asihpi/hpioctl.c |
4076 | @@ -346,7 +346,7 @@ int asihpi_adapter_probe(struct pci_dev *pci_dev, |
4077 | struct hpi_message hm; |
4078 | struct hpi_response hr; |
4079 | struct hpi_adapter adapter; |
4080 | - struct hpi_pci pci; |
4081 | + struct hpi_pci pci = { 0 }; |
4082 | |
4083 | memset(&adapter, 0, sizeof(adapter)); |
4084 | |
4085 | @@ -502,7 +502,7 @@ int asihpi_adapter_probe(struct pci_dev *pci_dev, |
4086 | return 0; |
4087 | |
4088 | err: |
4089 | - for (idx = 0; idx < HPI_MAX_ADAPTER_MEM_SPACES; idx++) { |
4090 | + while (--idx >= 0) { |
4091 | if (pci.ap_mem_base[idx]) { |
4092 | iounmap(pci.ap_mem_base[idx]); |
4093 | pci.ap_mem_base[idx] = NULL; |
4094 | diff --git a/sound/pci/hda/hda_controller.c b/sound/pci/hda/hda_controller.c |
4095 | index bd0e4710d15d7..79043b481d7b6 100644 |
4096 | --- a/sound/pci/hda/hda_controller.c |
4097 | +++ b/sound/pci/hda/hda_controller.c |
4098 | @@ -1158,16 +1158,23 @@ irqreturn_t azx_interrupt(int irq, void *dev_id) |
4099 | if (snd_hdac_bus_handle_stream_irq(bus, status, stream_update)) |
4100 | active = true; |
4101 | |
4102 | - /* clear rirb int */ |
4103 | status = azx_readb(chip, RIRBSTS); |
4104 | if (status & RIRB_INT_MASK) { |
4105 | + /* |
4106 | + * Clearing the interrupt status here ensures that no |
4107 | + * interrupt gets masked after the RIRB wp is read in |
4108 | + * snd_hdac_bus_update_rirb. This avoids a possible |
4109 | + * race condition where codec response in RIRB may |
4110 | + * remain unserviced by IRQ, eventually falling back |
4111 | + * to polling mode in azx_rirb_get_response. |
4112 | + */ |
4113 | + azx_writeb(chip, RIRBSTS, RIRB_INT_MASK); |
4114 | active = true; |
4115 | if (status & RIRB_INT_RESPONSE) { |
4116 | if (chip->driver_caps & AZX_DCAPS_CTX_WORKAROUND) |
4117 | udelay(80); |
4118 | snd_hdac_bus_update_rirb(bus); |
4119 | } |
4120 | - azx_writeb(chip, RIRBSTS, RIRB_INT_MASK); |
4121 | } |
4122 | } while (active && ++repeat < 10); |
4123 | |
4124 | diff --git a/sound/soc/kirkwood/kirkwood-dma.c b/sound/soc/kirkwood/kirkwood-dma.c |
4125 | index dafd22e874e99..e655425e4819e 100644 |
4126 | --- a/sound/soc/kirkwood/kirkwood-dma.c |
4127 | +++ b/sound/soc/kirkwood/kirkwood-dma.c |
4128 | @@ -136,7 +136,7 @@ static int kirkwood_dma_open(struct snd_pcm_substream *substream) |
4129 | err = request_irq(priv->irq, kirkwood_dma_irq, IRQF_SHARED, |
4130 | "kirkwood-i2s", priv); |
4131 | if (err) |
4132 | - return -EBUSY; |
4133 | + return err; |
4134 | |
4135 | /* |
4136 | * Enable Error interrupts. We're only ack'ing them but |
4137 | diff --git a/sound/usb/midi.c b/sound/usb/midi.c |
4138 | index 0676e7d485def..b8d4b5b3e54a1 100644 |
4139 | --- a/sound/usb/midi.c |
4140 | +++ b/sound/usb/midi.c |
4141 | @@ -1805,6 +1805,28 @@ static int snd_usbmidi_create_endpoints(struct snd_usb_midi *umidi, |
4142 | return 0; |
4143 | } |
4144 | |
4145 | +static struct usb_ms_endpoint_descriptor *find_usb_ms_endpoint_descriptor( |
4146 | + struct usb_host_endpoint *hostep) |
4147 | +{ |
4148 | + unsigned char *extra = hostep->extra; |
4149 | + int extralen = hostep->extralen; |
4150 | + |
4151 | + while (extralen > 3) { |
4152 | + struct usb_ms_endpoint_descriptor *ms_ep = |
4153 | + (struct usb_ms_endpoint_descriptor *)extra; |
4154 | + |
4155 | + if (ms_ep->bLength > 3 && |
4156 | + ms_ep->bDescriptorType == USB_DT_CS_ENDPOINT && |
4157 | + ms_ep->bDescriptorSubtype == UAC_MS_GENERAL) |
4158 | + return ms_ep; |
4159 | + if (!extra[0]) |
4160 | + break; |
4161 | + extralen -= extra[0]; |
4162 | + extra += extra[0]; |
4163 | + } |
4164 | + return NULL; |
4165 | +} |
4166 | + |
4167 | /* |
4168 | * Returns MIDIStreaming device capabilities. |
4169 | */ |
4170 | @@ -1842,11 +1864,8 @@ static int snd_usbmidi_get_ms_info(struct snd_usb_midi *umidi, |
4171 | ep = get_ep_desc(hostep); |
4172 | if (!usb_endpoint_xfer_bulk(ep) && !usb_endpoint_xfer_int(ep)) |
4173 | continue; |
4174 | - ms_ep = (struct usb_ms_endpoint_descriptor *)hostep->extra; |
4175 | - if (hostep->extralen < 4 || |
4176 | - ms_ep->bLength < 4 || |
4177 | - ms_ep->bDescriptorType != USB_DT_CS_ENDPOINT || |
4178 | - ms_ep->bDescriptorSubtype != UAC_MS_GENERAL) |
4179 | + ms_ep = find_usb_ms_endpoint_descriptor(hostep); |
4180 | + if (!ms_ep) |
4181 | continue; |
4182 | if (usb_endpoint_dir_out(ep)) { |
4183 | if (endpoints[epidx].out_ep) { |
4184 | diff --git a/sound/usb/quirks.c b/sound/usb/quirks.c |
4185 | index 08e1af85af384..66b7ccb33c7b1 100644 |
4186 | --- a/sound/usb/quirks.c |
4187 | +++ b/sound/usb/quirks.c |
4188 | @@ -1320,12 +1320,13 @@ void snd_usb_ctl_msg_quirk(struct usb_device *dev, unsigned int pipe, |
4189 | && (requesttype & USB_TYPE_MASK) == USB_TYPE_CLASS) |
4190 | mdelay(20); |
4191 | |
4192 | - /* Zoom R16/24, Logitech H650e, Jabra 550a, Kingston HyperX needs a tiny |
4193 | - * delay here, otherwise requests like get/set frequency return as |
4194 | - * failed despite actually succeeding. |
4195 | + /* Zoom R16/24, Logitech H650e/H570e, Jabra 550a, Kingston HyperX |
4196 | + * needs a tiny delay here, otherwise requests like get/set |
4197 | + * frequency return as failed despite actually succeeding. |
4198 | */ |
4199 | if ((chip->usb_id == USB_ID(0x1686, 0x00dd) || |
4200 | chip->usb_id == USB_ID(0x046d, 0x0a46) || |
4201 | + chip->usb_id == USB_ID(0x046d, 0x0a56) || |
4202 | chip->usb_id == USB_ID(0x0b0e, 0x0349) || |
4203 | chip->usb_id == USB_ID(0x0951, 0x16ad)) && |
4204 | (requesttype & USB_TYPE_MASK) == USB_TYPE_CLASS) |
4205 | diff --git a/tools/gpio/gpio-hammer.c b/tools/gpio/gpio-hammer.c |
4206 | index 37b3f141053df..85f45800f881f 100644 |
4207 | --- a/tools/gpio/gpio-hammer.c |
4208 | +++ b/tools/gpio/gpio-hammer.c |
4209 | @@ -171,7 +171,14 @@ int main(int argc, char **argv) |
4210 | device_name = optarg; |
4211 | break; |
4212 | case 'o': |
4213 | - lines[i] = strtoul(optarg, NULL, 10); |
4214 | + /* |
4215 | + * Avoid overflow. Do not immediately error, we want to |
4216 | + * be able to accurately report on the amount of times |
4217 | + * '-o' was given to give an accurate error message |
4218 | + */ |
4219 | + if (i < GPIOHANDLES_MAX) |
4220 | + lines[i] = strtoul(optarg, NULL, 10); |
4221 | + |
4222 | i++; |
4223 | break; |
4224 | case '?': |
4225 | @@ -179,6 +186,14 @@ int main(int argc, char **argv) |
4226 | return -1; |
4227 | } |
4228 | } |
4229 | + |
4230 | + if (i >= GPIOHANDLES_MAX) { |
4231 | + fprintf(stderr, |
4232 | + "Only %d occurences of '-o' are allowed, %d were found\n", |
4233 | + GPIOHANDLES_MAX, i + 1); |
4234 | + return -1; |
4235 | + } |
4236 | + |
4237 | nlines = i; |
4238 | |
4239 | if (!device_name || !nlines) { |
4240 | diff --git a/tools/objtool/check.c b/tools/objtool/check.c |
4241 | index c7399d7f4bc77..31c512f19662e 100644 |
4242 | --- a/tools/objtool/check.c |
4243 | +++ b/tools/objtool/check.c |
4244 | @@ -502,7 +502,7 @@ static int add_jump_destinations(struct objtool_file *file) |
4245 | insn->type != INSN_JUMP_UNCONDITIONAL) |
4246 | continue; |
4247 | |
4248 | - if (insn->ignore || insn->offset == FAKE_JUMP_OFFSET) |
4249 | + if (insn->offset == FAKE_JUMP_OFFSET) |
4250 | continue; |
4251 | |
4252 | rela = find_rela_by_dest_range(insn->sec, insn->offset, |
4253 | diff --git a/tools/perf/util/sort.c b/tools/perf/util/sort.c |
4254 | index 031e64ce71564..013e3f5102258 100644 |
4255 | --- a/tools/perf/util/sort.c |
4256 | +++ b/tools/perf/util/sort.c |
4257 | @@ -2532,7 +2532,7 @@ static char *prefix_if_not_in(const char *pre, char *str) |
4258 | return str; |
4259 | |
4260 | if (asprintf(&n, "%s,%s", pre, str) < 0) |
4261 | - return NULL; |
4262 | + n = NULL; |
4263 | |
4264 | free(str); |
4265 | return n; |
4266 | diff --git a/tools/perf/util/symbol-elf.c b/tools/perf/util/symbol-elf.c |
4267 | index 5a50326c8158f..e155783c601ab 100644 |
4268 | --- a/tools/perf/util/symbol-elf.c |
4269 | +++ b/tools/perf/util/symbol-elf.c |
4270 | @@ -1421,6 +1421,7 @@ struct kcore_copy_info { |
4271 | u64 first_symbol; |
4272 | u64 last_symbol; |
4273 | u64 first_module; |
4274 | + u64 first_module_symbol; |
4275 | u64 last_module_symbol; |
4276 | struct phdr_data kernel_map; |
4277 | struct phdr_data modules_map; |
4278 | @@ -1435,6 +1436,8 @@ static int kcore_copy__process_kallsyms(void *arg, const char *name, char type, |
4279 | return 0; |
4280 | |
4281 | if (strchr(name, '[')) { |
4282 | + if (!kci->first_module_symbol || start < kci->first_module_symbol) |
4283 | + kci->first_module_symbol = start; |
4284 | if (start > kci->last_module_symbol) |
4285 | kci->last_module_symbol = start; |
4286 | return 0; |
4287 | @@ -1559,6 +1562,10 @@ static int kcore_copy__calc_maps(struct kcore_copy_info *kci, const char *dir, |
4288 | kci->etext += page_size; |
4289 | } |
4290 | |
4291 | + if (kci->first_module_symbol && |
4292 | + (!kci->first_module || kci->first_module_symbol < kci->first_module)) |
4293 | + kci->first_module = kci->first_module_symbol; |
4294 | + |
4295 | kci->first_module = round_down(kci->first_module, page_size); |
4296 | |
4297 | if (kci->last_module_symbol) { |
4298 | diff --git a/tools/testing/selftests/x86/syscall_nt.c b/tools/testing/selftests/x86/syscall_nt.c |
4299 | index 43fcab367fb0a..74e6b3fc2d09e 100644 |
4300 | --- a/tools/testing/selftests/x86/syscall_nt.c |
4301 | +++ b/tools/testing/selftests/x86/syscall_nt.c |
4302 | @@ -67,6 +67,7 @@ static void do_it(unsigned long extraflags) |
4303 | set_eflags(get_eflags() | extraflags); |
4304 | syscall(SYS_getpid); |
4305 | flags = get_eflags(); |
4306 | + set_eflags(X86_EFLAGS_IF | X86_EFLAGS_FIXED); |
4307 | if ((flags & extraflags) == extraflags) { |
4308 | printf("[OK]\tThe syscall worked and flags are still set\n"); |
4309 | } else { |
4310 | diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c |
4311 | index 4e4bb5dd2dcd5..5bddabb3de7c3 100644 |
4312 | --- a/virt/kvm/kvm_main.c |
4313 | +++ b/virt/kvm/kvm_main.c |
4314 | @@ -154,6 +154,7 @@ bool kvm_is_reserved_pfn(kvm_pfn_t pfn) |
4315 | */ |
4316 | if (pfn_valid(pfn)) |
4317 | return PageReserved(pfn_to_page(pfn)) && |
4318 | + !is_zero_pfn(pfn) && |
4319 | !kvm_is_zone_device_pfn(pfn); |
4320 | |
4321 | return true; |
4322 | @@ -3639,7 +3640,7 @@ int kvm_io_bus_register_dev(struct kvm *kvm, enum kvm_bus bus_idx, gpa_t addr, |
4323 | void kvm_io_bus_unregister_dev(struct kvm *kvm, enum kvm_bus bus_idx, |
4324 | struct kvm_io_device *dev) |
4325 | { |
4326 | - int i; |
4327 | + int i, j; |
4328 | struct kvm_io_bus *new_bus, *bus; |
4329 | |
4330 | bus = kvm->buses[bus_idx]; |
4331 | @@ -3656,17 +3657,20 @@ void kvm_io_bus_unregister_dev(struct kvm *kvm, enum kvm_bus bus_idx, |
4332 | |
4333 | new_bus = kmalloc(sizeof(*bus) + ((bus->dev_count - 1) * |
4334 | sizeof(struct kvm_io_range)), GFP_KERNEL); |
4335 | - if (!new_bus) { |
4336 | + if (new_bus) { |
4337 | + memcpy(new_bus, bus, sizeof(*bus) + i * sizeof(struct kvm_io_range)); |
4338 | + new_bus->dev_count--; |
4339 | + memcpy(new_bus->range + i, bus->range + i + 1, |
4340 | + (new_bus->dev_count - i) * sizeof(struct kvm_io_range)); |
4341 | + } else { |
4342 | pr_err("kvm: failed to shrink bus, removing it completely\n"); |
4343 | - goto broken; |
4344 | + for (j = 0; j < bus->dev_count; j++) { |
4345 | + if (j == i) |
4346 | + continue; |
4347 | + kvm_iodevice_destructor(bus->range[j].dev); |
4348 | + } |
4349 | } |
4350 | |
4351 | - memcpy(new_bus, bus, sizeof(*bus) + i * sizeof(struct kvm_io_range)); |
4352 | - new_bus->dev_count--; |
4353 | - memcpy(new_bus->range + i, bus->range + i + 1, |
4354 | - (new_bus->dev_count - i) * sizeof(struct kvm_io_range)); |
4355 | - |
4356 | -broken: |
4357 | rcu_assign_pointer(kvm->buses[bus_idx], new_bus); |
4358 | synchronize_srcu_expedited(&kvm->srcu); |
4359 | kfree(bus); |