Magellan Linux

Contents of /trunk/kernel-alx/patches-4.9/0122-4.9.23-all-fixes.patch

Parent Directory Parent Directory | Revision Log Revision Log


Revision 2956 - (show annotations) (download)
Mon Jul 24 12:03:46 2017 UTC (6 years, 9 months ago) by niro
File size: 40950 byte(s)
-added patches-4.9
1 diff --git a/Makefile b/Makefile
2 index 4bf4648d97db..0de75976cad5 100644
3 --- a/Makefile
4 +++ b/Makefile
5 @@ -1,6 +1,6 @@
6 VERSION = 4
7 PATCHLEVEL = 9
8 -SUBLEVEL = 22
9 +SUBLEVEL = 23
10 EXTRAVERSION =
11 NAME = Roaring Lionus
12
13 diff --git a/arch/mips/Kconfig b/arch/mips/Kconfig
14 index 9a6e11b6f457..5a4f2eb9d0d5 100644
15 --- a/arch/mips/Kconfig
16 +++ b/arch/mips/Kconfig
17 @@ -9,6 +9,7 @@ config MIPS
18 select HAVE_CONTEXT_TRACKING
19 select HAVE_GENERIC_DMA_COHERENT
20 select HAVE_IDE
21 + select HAVE_IRQ_EXIT_ON_IRQ_STACK
22 select HAVE_OPROFILE
23 select HAVE_PERF_EVENTS
24 select PERF_USE_VMALLOC
25 diff --git a/arch/mips/include/asm/irq.h b/arch/mips/include/asm/irq.h
26 index 6bf10e796553..956db6e201d1 100644
27 --- a/arch/mips/include/asm/irq.h
28 +++ b/arch/mips/include/asm/irq.h
29 @@ -17,6 +17,18 @@
30
31 #include <irq.h>
32
33 +#define IRQ_STACK_SIZE THREAD_SIZE
34 +
35 +extern void *irq_stack[NR_CPUS];
36 +
37 +static inline bool on_irq_stack(int cpu, unsigned long sp)
38 +{
39 + unsigned long low = (unsigned long)irq_stack[cpu];
40 + unsigned long high = low + IRQ_STACK_SIZE;
41 +
42 + return (low <= sp && sp <= high);
43 +}
44 +
45 #ifdef CONFIG_I8259
46 static inline int irq_canonicalize(int irq)
47 {
48 diff --git a/arch/mips/include/asm/stackframe.h b/arch/mips/include/asm/stackframe.h
49 index eebf39549606..2f182bdf024f 100644
50 --- a/arch/mips/include/asm/stackframe.h
51 +++ b/arch/mips/include/asm/stackframe.h
52 @@ -216,12 +216,19 @@
53 LONG_S $25, PT_R25(sp)
54 LONG_S $28, PT_R28(sp)
55 LONG_S $31, PT_R31(sp)
56 +
57 + /* Set thread_info if we're coming from user mode */
58 + mfc0 k0, CP0_STATUS
59 + sll k0, 3 /* extract cu0 bit */
60 + bltz k0, 9f
61 +
62 ori $28, sp, _THREAD_MASK
63 xori $28, _THREAD_MASK
64 #ifdef CONFIG_CPU_CAVIUM_OCTEON
65 .set mips64
66 pref 0, 0($28) /* Prefetch the current pointer */
67 #endif
68 +9:
69 .set pop
70 .endm
71
72 diff --git a/arch/mips/kernel/asm-offsets.c b/arch/mips/kernel/asm-offsets.c
73 index fae2f9447792..4be2763f835d 100644
74 --- a/arch/mips/kernel/asm-offsets.c
75 +++ b/arch/mips/kernel/asm-offsets.c
76 @@ -102,6 +102,7 @@ void output_thread_info_defines(void)
77 OFFSET(TI_REGS, thread_info, regs);
78 DEFINE(_THREAD_SIZE, THREAD_SIZE);
79 DEFINE(_THREAD_MASK, THREAD_MASK);
80 + DEFINE(_IRQ_STACK_SIZE, IRQ_STACK_SIZE);
81 BLANK();
82 }
83
84 diff --git a/arch/mips/kernel/genex.S b/arch/mips/kernel/genex.S
85 index 52a4fdfc8513..2ac6c2625c13 100644
86 --- a/arch/mips/kernel/genex.S
87 +++ b/arch/mips/kernel/genex.S
88 @@ -187,9 +187,44 @@ NESTED(handle_int, PT_SIZE, sp)
89
90 LONG_L s0, TI_REGS($28)
91 LONG_S sp, TI_REGS($28)
92 - PTR_LA ra, ret_from_irq
93 - PTR_LA v0, plat_irq_dispatch
94 - jr v0
95 +
96 + /*
97 + * SAVE_ALL ensures we are using a valid kernel stack for the thread.
98 + * Check if we are already using the IRQ stack.
99 + */
100 + move s1, sp # Preserve the sp
101 +
102 + /* Get IRQ stack for this CPU */
103 + ASM_CPUID_MFC0 k0, ASM_SMP_CPUID_REG
104 +#if defined(CONFIG_32BIT) || defined(KBUILD_64BIT_SYM32)
105 + lui k1, %hi(irq_stack)
106 +#else
107 + lui k1, %highest(irq_stack)
108 + daddiu k1, %higher(irq_stack)
109 + dsll k1, 16
110 + daddiu k1, %hi(irq_stack)
111 + dsll k1, 16
112 +#endif
113 + LONG_SRL k0, SMP_CPUID_PTRSHIFT
114 + LONG_ADDU k1, k0
115 + LONG_L t0, %lo(irq_stack)(k1)
116 +
117 + # Check if already on IRQ stack
118 + PTR_LI t1, ~(_THREAD_SIZE-1)
119 + and t1, t1, sp
120 + beq t0, t1, 2f
121 +
122 + /* Switch to IRQ stack */
123 + li t1, _IRQ_STACK_SIZE
124 + PTR_ADD sp, t0, t1
125 +
126 +2:
127 + jal plat_irq_dispatch
128 +
129 + /* Restore sp */
130 + move sp, s1
131 +
132 + j ret_from_irq
133 #ifdef CONFIG_CPU_MICROMIPS
134 nop
135 #endif
136 @@ -262,8 +297,44 @@ NESTED(except_vec_vi_handler, 0, sp)
137
138 LONG_L s0, TI_REGS($28)
139 LONG_S sp, TI_REGS($28)
140 - PTR_LA ra, ret_from_irq
141 - jr v0
142 +
143 + /*
144 + * SAVE_ALL ensures we are using a valid kernel stack for the thread.
145 + * Check if we are already using the IRQ stack.
146 + */
147 + move s1, sp # Preserve the sp
148 +
149 + /* Get IRQ stack for this CPU */
150 + ASM_CPUID_MFC0 k0, ASM_SMP_CPUID_REG
151 +#if defined(CONFIG_32BIT) || defined(KBUILD_64BIT_SYM32)
152 + lui k1, %hi(irq_stack)
153 +#else
154 + lui k1, %highest(irq_stack)
155 + daddiu k1, %higher(irq_stack)
156 + dsll k1, 16
157 + daddiu k1, %hi(irq_stack)
158 + dsll k1, 16
159 +#endif
160 + LONG_SRL k0, SMP_CPUID_PTRSHIFT
161 + LONG_ADDU k1, k0
162 + LONG_L t0, %lo(irq_stack)(k1)
163 +
164 + # Check if already on IRQ stack
165 + PTR_LI t1, ~(_THREAD_SIZE-1)
166 + and t1, t1, sp
167 + beq t0, t1, 2f
168 +
169 + /* Switch to IRQ stack */
170 + li t1, _IRQ_STACK_SIZE
171 + PTR_ADD sp, t0, t1
172 +
173 +2:
174 + jalr v0
175 +
176 + /* Restore sp */
177 + move sp, s1
178 +
179 + j ret_from_irq
180 END(except_vec_vi_handler)
181
182 /*
183 diff --git a/arch/mips/kernel/irq.c b/arch/mips/kernel/irq.c
184 index f25f7eab7307..2b0a371b42af 100644
185 --- a/arch/mips/kernel/irq.c
186 +++ b/arch/mips/kernel/irq.c
187 @@ -25,6 +25,8 @@
188 #include <linux/atomic.h>
189 #include <asm/uaccess.h>
190
191 +void *irq_stack[NR_CPUS];
192 +
193 /*
194 * 'what should we do if we get a hw irq event on an illegal vector'.
195 * each architecture has to answer this themselves.
196 @@ -58,6 +60,15 @@ void __init init_IRQ(void)
197 clear_c0_status(ST0_IM);
198
199 arch_init_irq();
200 +
201 + for_each_possible_cpu(i) {
202 + int irq_pages = IRQ_STACK_SIZE / PAGE_SIZE;
203 + void *s = (void *)__get_free_pages(GFP_KERNEL, irq_pages);
204 +
205 + irq_stack[i] = s;
206 + pr_debug("CPU%d IRQ stack at 0x%p - 0x%p\n", i,
207 + irq_stack[i], irq_stack[i] + IRQ_STACK_SIZE);
208 + }
209 }
210
211 #ifdef CONFIG_DEBUG_STACKOVERFLOW
212 diff --git a/arch/mips/kernel/process.c b/arch/mips/kernel/process.c
213 index 1652f36acad1..fbbf5fcc695a 100644
214 --- a/arch/mips/kernel/process.c
215 +++ b/arch/mips/kernel/process.c
216 @@ -33,6 +33,7 @@
217 #include <asm/dsemul.h>
218 #include <asm/dsp.h>
219 #include <asm/fpu.h>
220 +#include <asm/irq.h>
221 #include <asm/msa.h>
222 #include <asm/pgtable.h>
223 #include <asm/mipsregs.h>
224 @@ -556,7 +557,19 @@ EXPORT_SYMBOL(unwind_stack_by_address);
225 unsigned long unwind_stack(struct task_struct *task, unsigned long *sp,
226 unsigned long pc, unsigned long *ra)
227 {
228 - unsigned long stack_page = (unsigned long)task_stack_page(task);
229 + unsigned long stack_page = 0;
230 + int cpu;
231 +
232 + for_each_possible_cpu(cpu) {
233 + if (on_irq_stack(cpu, *sp)) {
234 + stack_page = (unsigned long)irq_stack[cpu];
235 + break;
236 + }
237 + }
238 +
239 + if (!stack_page)
240 + stack_page = (unsigned long)task_stack_page(task);
241 +
242 return unwind_stack_by_address(stack_page, sp, pc, ra);
243 }
244 #endif
245 diff --git a/block/blk-mq.c b/block/blk-mq.c
246 index ee54ad01f7ac..7b597ec4e9c5 100644
247 --- a/block/blk-mq.c
248 +++ b/block/blk-mq.c
249 @@ -1474,7 +1474,7 @@ static struct blk_mq_tags *blk_mq_init_rq_map(struct blk_mq_tag_set *set,
250 INIT_LIST_HEAD(&tags->page_list);
251
252 tags->rqs = kzalloc_node(set->queue_depth * sizeof(struct request *),
253 - GFP_KERNEL | __GFP_NOWARN | __GFP_NORETRY,
254 + GFP_NOIO | __GFP_NOWARN | __GFP_NORETRY,
255 set->numa_node);
256 if (!tags->rqs) {
257 blk_mq_free_tags(tags);
258 @@ -1500,7 +1500,7 @@ static struct blk_mq_tags *blk_mq_init_rq_map(struct blk_mq_tag_set *set,
259
260 do {
261 page = alloc_pages_node(set->numa_node,
262 - GFP_KERNEL | __GFP_NOWARN | __GFP_NORETRY | __GFP_ZERO,
263 + GFP_NOIO | __GFP_NOWARN | __GFP_NORETRY | __GFP_ZERO,
264 this_order);
265 if (page)
266 break;
267 @@ -1521,7 +1521,7 @@ static struct blk_mq_tags *blk_mq_init_rq_map(struct blk_mq_tag_set *set,
268 * Allow kmemleak to scan these pages as they contain pointers
269 * to additional allocations like via ops->init_request().
270 */
271 - kmemleak_alloc(p, order_to_size(this_order), 1, GFP_KERNEL);
272 + kmemleak_alloc(p, order_to_size(this_order), 1, GFP_NOIO);
273 entries_per_page = order_to_size(this_order) / rq_size;
274 to_do = min(entries_per_page, set->queue_depth - i);
275 left -= to_do * rq_size;
276 diff --git a/drivers/crypto/caam/caampkc.c b/drivers/crypto/caam/caampkc.c
277 index 851015e652b8..354a16ab5a16 100644
278 --- a/drivers/crypto/caam/caampkc.c
279 +++ b/drivers/crypto/caam/caampkc.c
280 @@ -506,7 +506,7 @@ static int caam_rsa_init_tfm(struct crypto_akcipher *tfm)
281 ctx->dev = caam_jr_alloc();
282
283 if (IS_ERR(ctx->dev)) {
284 - dev_err(ctx->dev, "Job Ring Device allocation for transform failed\n");
285 + pr_err("Job Ring Device allocation for transform failed\n");
286 return PTR_ERR(ctx->dev);
287 }
288
289 diff --git a/drivers/crypto/caam/ctrl.c b/drivers/crypto/caam/ctrl.c
290 index e483b78c6343..98468b96c32f 100644
291 --- a/drivers/crypto/caam/ctrl.c
292 +++ b/drivers/crypto/caam/ctrl.c
293 @@ -282,7 +282,8 @@ static int deinstantiate_rng(struct device *ctrldev, int state_handle_mask)
294 /* Try to run it through DECO0 */
295 ret = run_descriptor_deco0(ctrldev, desc, &status);
296
297 - if (ret || status) {
298 + if (ret ||
299 + (status && status != JRSTA_SSRC_JUMP_HALT_CC)) {
300 dev_err(ctrldev,
301 "Failed to deinstantiate RNG4 SH%d\n",
302 sh_idx);
303 diff --git a/drivers/dma-buf/dma-buf.c b/drivers/dma-buf/dma-buf.c
304 index cf04d249a6a4..6b54e02da10c 100644
305 --- a/drivers/dma-buf/dma-buf.c
306 +++ b/drivers/dma-buf/dma-buf.c
307 @@ -303,6 +303,9 @@ static const struct file_operations dma_buf_fops = {
308 .llseek = dma_buf_llseek,
309 .poll = dma_buf_poll,
310 .unlocked_ioctl = dma_buf_ioctl,
311 +#ifdef CONFIG_COMPAT
312 + .compat_ioctl = dma_buf_ioctl,
313 +#endif
314 };
315
316 /*
317 diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
318 index 670beebc32f6..923150de46cb 100644
319 --- a/drivers/gpu/drm/i915/i915_drv.c
320 +++ b/drivers/gpu/drm/i915/i915_drv.c
321 @@ -240,6 +240,7 @@ static int i915_getparam(struct drm_device *dev, void *data,
322 case I915_PARAM_IRQ_ACTIVE:
323 case I915_PARAM_ALLOW_BATCHBUFFER:
324 case I915_PARAM_LAST_DISPATCH:
325 + case I915_PARAM_HAS_EXEC_CONSTANTS:
326 /* Reject all old ums/dri params. */
327 return -ENODEV;
328 case I915_PARAM_CHIPSET_ID:
329 @@ -266,9 +267,6 @@ static int i915_getparam(struct drm_device *dev, void *data,
330 case I915_PARAM_HAS_BSD2:
331 value = intel_engine_initialized(&dev_priv->engine[VCS2]);
332 break;
333 - case I915_PARAM_HAS_EXEC_CONSTANTS:
334 - value = INTEL_GEN(dev_priv) >= 4;
335 - break;
336 case I915_PARAM_HAS_LLC:
337 value = HAS_LLC(dev_priv);
338 break;
339 diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
340 index da832d3cdca7..e0d72457b23c 100644
341 --- a/drivers/gpu/drm/i915/i915_drv.h
342 +++ b/drivers/gpu/drm/i915/i915_drv.h
343 @@ -1225,7 +1225,7 @@ struct intel_gen6_power_mgmt {
344 unsigned boosts;
345
346 /* manual wa residency calculations */
347 - struct intel_rps_ei up_ei, down_ei;
348 + struct intel_rps_ei ei;
349
350 /*
351 * Protects RPS/RC6 register access and PCU communication.
352 @@ -1751,8 +1751,6 @@ struct drm_i915_private {
353
354 const struct intel_device_info info;
355
356 - int relative_constants_mode;
357 -
358 void __iomem *regs;
359
360 struct intel_uncore uncore;
361 diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
362 index 00eb4814b913..7b2030925825 100644
363 --- a/drivers/gpu/drm/i915/i915_gem.c
364 +++ b/drivers/gpu/drm/i915/i915_gem.c
365 @@ -4587,8 +4587,6 @@ i915_gem_load_init(struct drm_device *dev)
366 init_waitqueue_head(&dev_priv->gpu_error.wait_queue);
367 init_waitqueue_head(&dev_priv->gpu_error.reset_queue);
368
369 - dev_priv->relative_constants_mode = I915_EXEC_CONSTANTS_REL_GENERAL;
370 -
371 init_waitqueue_head(&dev_priv->pending_flip_queue);
372
373 dev_priv->mm.interruptible = true;
374 diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
375 index 0c400f852a76..2117f172d7a2 100644
376 --- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c
377 +++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
378 @@ -1454,10 +1454,7 @@ execbuf_submit(struct i915_execbuffer_params *params,
379 struct drm_i915_gem_execbuffer2 *args,
380 struct list_head *vmas)
381 {
382 - struct drm_i915_private *dev_priv = params->request->i915;
383 u64 exec_start, exec_len;
384 - int instp_mode;
385 - u32 instp_mask;
386 int ret;
387
388 ret = i915_gem_execbuffer_move_to_gpu(params->request, vmas);
389 @@ -1468,56 +1465,11 @@ execbuf_submit(struct i915_execbuffer_params *params,
390 if (ret)
391 return ret;
392
393 - instp_mode = args->flags & I915_EXEC_CONSTANTS_MASK;
394 - instp_mask = I915_EXEC_CONSTANTS_MASK;
395 - switch (instp_mode) {
396 - case I915_EXEC_CONSTANTS_REL_GENERAL:
397 - case I915_EXEC_CONSTANTS_ABSOLUTE:
398 - case I915_EXEC_CONSTANTS_REL_SURFACE:
399 - if (instp_mode != 0 && params->engine->id != RCS) {
400 - DRM_DEBUG("non-0 rel constants mode on non-RCS\n");
401 - return -EINVAL;
402 - }
403 -
404 - if (instp_mode != dev_priv->relative_constants_mode) {
405 - if (INTEL_INFO(dev_priv)->gen < 4) {
406 - DRM_DEBUG("no rel constants on pre-gen4\n");
407 - return -EINVAL;
408 - }
409 -
410 - if (INTEL_INFO(dev_priv)->gen > 5 &&
411 - instp_mode == I915_EXEC_CONSTANTS_REL_SURFACE) {
412 - DRM_DEBUG("rel surface constants mode invalid on gen5+\n");
413 - return -EINVAL;
414 - }
415 -
416 - /* The HW changed the meaning on this bit on gen6 */
417 - if (INTEL_INFO(dev_priv)->gen >= 6)
418 - instp_mask &= ~I915_EXEC_CONSTANTS_REL_SURFACE;
419 - }
420 - break;
421 - default:
422 - DRM_DEBUG("execbuf with unknown constants: %d\n", instp_mode);
423 + if (args->flags & I915_EXEC_CONSTANTS_MASK) {
424 + DRM_DEBUG("I915_EXEC_CONSTANTS_* unsupported\n");
425 return -EINVAL;
426 }
427
428 - if (params->engine->id == RCS &&
429 - instp_mode != dev_priv->relative_constants_mode) {
430 - struct intel_ring *ring = params->request->ring;
431 -
432 - ret = intel_ring_begin(params->request, 4);
433 - if (ret)
434 - return ret;
435 -
436 - intel_ring_emit(ring, MI_NOOP);
437 - intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1));
438 - intel_ring_emit_reg(ring, INSTPM);
439 - intel_ring_emit(ring, instp_mask << 16 | instp_mode);
440 - intel_ring_advance(ring);
441 -
442 - dev_priv->relative_constants_mode = instp_mode;
443 - }
444 -
445 if (args->flags & I915_EXEC_GEN7_SOL_RESET) {
446 ret = i915_reset_gen7_sol_offsets(params->request);
447 if (ret)
448 diff --git a/drivers/gpu/drm/i915/i915_gem_shrinker.c b/drivers/gpu/drm/i915/i915_gem_shrinker.c
449 index 1c237d02f30b..755d78832a66 100644
450 --- a/drivers/gpu/drm/i915/i915_gem_shrinker.c
451 +++ b/drivers/gpu/drm/i915/i915_gem_shrinker.c
452 @@ -233,7 +233,7 @@ unsigned long i915_gem_shrink_all(struct drm_i915_private *dev_priv)
453 I915_SHRINK_BOUND |
454 I915_SHRINK_UNBOUND |
455 I915_SHRINK_ACTIVE);
456 - rcu_barrier(); /* wait until our RCU delayed slab frees are completed */
457 + synchronize_rcu(); /* wait for our earlier RCU delayed slab frees */
458
459 return freed;
460 }
461 diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
462 index 3fc286cd1157..02908e37c228 100644
463 --- a/drivers/gpu/drm/i915/i915_irq.c
464 +++ b/drivers/gpu/drm/i915/i915_irq.c
465 @@ -990,68 +990,51 @@ static void vlv_c0_read(struct drm_i915_private *dev_priv,
466 ei->media_c0 = I915_READ(VLV_MEDIA_C0_COUNT);
467 }
468
469 -static bool vlv_c0_above(struct drm_i915_private *dev_priv,
470 - const struct intel_rps_ei *old,
471 - const struct intel_rps_ei *now,
472 - int threshold)
473 -{
474 - u64 time, c0;
475 - unsigned int mul = 100;
476 -
477 - if (old->cz_clock == 0)
478 - return false;
479 -
480 - if (I915_READ(VLV_COUNTER_CONTROL) & VLV_COUNT_RANGE_HIGH)
481 - mul <<= 8;
482 -
483 - time = now->cz_clock - old->cz_clock;
484 - time *= threshold * dev_priv->czclk_freq;
485 -
486 - /* Workload can be split between render + media, e.g. SwapBuffers
487 - * being blitted in X after being rendered in mesa. To account for
488 - * this we need to combine both engines into our activity counter.
489 - */
490 - c0 = now->render_c0 - old->render_c0;
491 - c0 += now->media_c0 - old->media_c0;
492 - c0 *= mul * VLV_CZ_CLOCK_TO_MILLI_SEC;
493 -
494 - return c0 >= time;
495 -}
496 -
497 void gen6_rps_reset_ei(struct drm_i915_private *dev_priv)
498 {
499 - vlv_c0_read(dev_priv, &dev_priv->rps.down_ei);
500 - dev_priv->rps.up_ei = dev_priv->rps.down_ei;
501 + memset(&dev_priv->rps.ei, 0, sizeof(dev_priv->rps.ei));
502 }
503
504 static u32 vlv_wa_c0_ei(struct drm_i915_private *dev_priv, u32 pm_iir)
505 {
506 + const struct intel_rps_ei *prev = &dev_priv->rps.ei;
507 struct intel_rps_ei now;
508 u32 events = 0;
509
510 - if ((pm_iir & (GEN6_PM_RP_DOWN_EI_EXPIRED | GEN6_PM_RP_UP_EI_EXPIRED)) == 0)
511 + if ((pm_iir & GEN6_PM_RP_UP_EI_EXPIRED) == 0)
512 return 0;
513
514 vlv_c0_read(dev_priv, &now);
515 if (now.cz_clock == 0)
516 return 0;
517
518 - if (pm_iir & GEN6_PM_RP_DOWN_EI_EXPIRED) {
519 - if (!vlv_c0_above(dev_priv,
520 - &dev_priv->rps.down_ei, &now,
521 - dev_priv->rps.down_threshold))
522 - events |= GEN6_PM_RP_DOWN_THRESHOLD;
523 - dev_priv->rps.down_ei = now;
524 - }
525 + if (prev->cz_clock) {
526 + u64 time, c0;
527 + unsigned int mul;
528 +
529 + mul = VLV_CZ_CLOCK_TO_MILLI_SEC * 100; /* scale to threshold% */
530 + if (I915_READ(VLV_COUNTER_CONTROL) & VLV_COUNT_RANGE_HIGH)
531 + mul <<= 8;
532
533 - if (pm_iir & GEN6_PM_RP_UP_EI_EXPIRED) {
534 - if (vlv_c0_above(dev_priv,
535 - &dev_priv->rps.up_ei, &now,
536 - dev_priv->rps.up_threshold))
537 - events |= GEN6_PM_RP_UP_THRESHOLD;
538 - dev_priv->rps.up_ei = now;
539 + time = now.cz_clock - prev->cz_clock;
540 + time *= dev_priv->czclk_freq;
541 +
542 + /* Workload can be split between render + media,
543 + * e.g. SwapBuffers being blitted in X after being rendered in
544 + * mesa. To account for this we need to combine both engines
545 + * into our activity counter.
546 + */
547 + c0 = now.render_c0 - prev->render_c0;
548 + c0 += now.media_c0 - prev->media_c0;
549 + c0 *= mul;
550 +
551 + if (c0 > time * dev_priv->rps.up_threshold)
552 + events = GEN6_PM_RP_UP_THRESHOLD;
553 + else if (c0 < time * dev_priv->rps.down_threshold)
554 + events = GEN6_PM_RP_DOWN_THRESHOLD;
555 }
556
557 + dev_priv->rps.ei = now;
558 return events;
559 }
560
561 @@ -4490,7 +4473,7 @@ void intel_irq_init(struct drm_i915_private *dev_priv)
562 /* Let's track the enabled rps events */
563 if (IS_VALLEYVIEW(dev_priv))
564 /* WaGsvRC0ResidencyMethod:vlv */
565 - dev_priv->pm_rps_events = GEN6_PM_RP_DOWN_EI_EXPIRED | GEN6_PM_RP_UP_EI_EXPIRED;
566 + dev_priv->pm_rps_events = GEN6_PM_RP_UP_EI_EXPIRED;
567 else
568 dev_priv->pm_rps_events = GEN6_PM_RPS_EVENTS;
569
570 @@ -4531,6 +4514,16 @@ void intel_irq_init(struct drm_i915_private *dev_priv)
571 if (!IS_GEN2(dev_priv))
572 dev->vblank_disable_immediate = true;
573
574 + /* Most platforms treat the display irq block as an always-on
575 + * power domain. vlv/chv can disable it at runtime and need
576 + * special care to avoid writing any of the display block registers
577 + * outside of the power domain. We defer setting up the display irqs
578 + * in this case to the runtime pm.
579 + */
580 + dev_priv->display_irqs_enabled = true;
581 + if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
582 + dev_priv->display_irqs_enabled = false;
583 +
584 dev->driver->get_vblank_timestamp = i915_get_vblank_timestamp;
585 dev->driver->get_scanout_position = i915_get_crtc_scanoutpos;
586
587 diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
588 index b9be8a6141d8..5dc6082639db 100644
589 --- a/drivers/gpu/drm/i915/intel_display.c
590 +++ b/drivers/gpu/drm/i915/intel_display.c
591 @@ -3696,10 +3696,6 @@ static void intel_update_pipe_config(struct intel_crtc *crtc,
592 /* drm_atomic_helper_update_legacy_modeset_state might not be called. */
593 crtc->base.mode = crtc->base.state->mode;
594
595 - DRM_DEBUG_KMS("Updating pipe size %ix%i -> %ix%i\n",
596 - old_crtc_state->pipe_src_w, old_crtc_state->pipe_src_h,
597 - pipe_config->pipe_src_w, pipe_config->pipe_src_h);
598 -
599 /*
600 * Update pipe size and adjust fitter if needed: the reason for this is
601 * that in compute_mode_changes we check the native mode (not the pfit
602 @@ -4832,23 +4828,17 @@ static void skylake_pfit_enable(struct intel_crtc *crtc)
603 struct intel_crtc_scaler_state *scaler_state =
604 &crtc->config->scaler_state;
605
606 - DRM_DEBUG_KMS("for crtc_state = %p\n", crtc->config);
607 -
608 if (crtc->config->pch_pfit.enabled) {
609 int id;
610
611 - if (WARN_ON(crtc->config->scaler_state.scaler_id < 0)) {
612 - DRM_ERROR("Requesting pfit without getting a scaler first\n");
613 + if (WARN_ON(crtc->config->scaler_state.scaler_id < 0))
614 return;
615 - }
616
617 id = scaler_state->scaler_id;
618 I915_WRITE(SKL_PS_CTRL(pipe, id), PS_SCALER_EN |
619 PS_FILTER_MEDIUM | scaler_state->scalers[id].mode);
620 I915_WRITE(SKL_PS_WIN_POS(pipe, id), crtc->config->pch_pfit.pos);
621 I915_WRITE(SKL_PS_WIN_SZ(pipe, id), crtc->config->pch_pfit.size);
622 -
623 - DRM_DEBUG_KMS("for crtc_state = %p scaler_id = %d\n", crtc->config, id);
624 }
625 }
626
627 diff --git a/drivers/gpu/drm/i915/intel_hotplug.c b/drivers/gpu/drm/i915/intel_hotplug.c
628 index 334d47b5811a..db3afdf698ca 100644
629 --- a/drivers/gpu/drm/i915/intel_hotplug.c
630 +++ b/drivers/gpu/drm/i915/intel_hotplug.c
631 @@ -219,7 +219,7 @@ static void intel_hpd_irq_storm_reenable_work(struct work_struct *work)
632 }
633 }
634 }
635 - if (dev_priv->display.hpd_irq_setup)
636 + if (dev_priv->display_irqs_enabled && dev_priv->display.hpd_irq_setup)
637 dev_priv->display.hpd_irq_setup(dev_priv);
638 spin_unlock_irq(&dev_priv->irq_lock);
639
640 @@ -425,7 +425,7 @@ void intel_hpd_irq_handler(struct drm_i915_private *dev_priv,
641 }
642 }
643
644 - if (storm_detected)
645 + if (storm_detected && dev_priv->display_irqs_enabled)
646 dev_priv->display.hpd_irq_setup(dev_priv);
647 spin_unlock(&dev_priv->irq_lock);
648
649 @@ -471,10 +471,12 @@ void intel_hpd_init(struct drm_i915_private *dev_priv)
650 * Interrupt setup is already guaranteed to be single-threaded, this is
651 * just to make the assert_spin_locked checks happy.
652 */
653 - spin_lock_irq(&dev_priv->irq_lock);
654 - if (dev_priv->display.hpd_irq_setup)
655 - dev_priv->display.hpd_irq_setup(dev_priv);
656 - spin_unlock_irq(&dev_priv->irq_lock);
657 + if (dev_priv->display_irqs_enabled && dev_priv->display.hpd_irq_setup) {
658 + spin_lock_irq(&dev_priv->irq_lock);
659 + if (dev_priv->display_irqs_enabled)
660 + dev_priv->display.hpd_irq_setup(dev_priv);
661 + spin_unlock_irq(&dev_priv->irq_lock);
662 + }
663 }
664
665 static void i915_hpd_poll_init_work(struct work_struct *work)
666 diff --git a/drivers/gpu/drm/i915/intel_lrc.c b/drivers/gpu/drm/i915/intel_lrc.c
667 index 4147e51cf893..67db1577ee49 100644
668 --- a/drivers/gpu/drm/i915/intel_lrc.c
669 +++ b/drivers/gpu/drm/i915/intel_lrc.c
670 @@ -2152,42 +2152,30 @@ static int execlists_context_deferred_alloc(struct i915_gem_context *ctx,
671
672 void intel_lr_context_resume(struct drm_i915_private *dev_priv)
673 {
674 + struct i915_gem_context *ctx = dev_priv->kernel_context;
675 struct intel_engine_cs *engine;
676 - struct i915_gem_context *ctx;
677 -
678 - /* Because we emit WA_TAIL_DWORDS there may be a disparity
679 - * between our bookkeeping in ce->ring->head and ce->ring->tail and
680 - * that stored in context. As we only write new commands from
681 - * ce->ring->tail onwards, everything before that is junk. If the GPU
682 - * starts reading from its RING_HEAD from the context, it may try to
683 - * execute that junk and die.
684 - *
685 - * So to avoid that we reset the context images upon resume. For
686 - * simplicity, we just zero everything out.
687 - */
688 - list_for_each_entry(ctx, &dev_priv->context_list, link) {
689 - for_each_engine(engine, dev_priv) {
690 - struct intel_context *ce = &ctx->engine[engine->id];
691 - u32 *reg;
692
693 - if (!ce->state)
694 - continue;
695 + for_each_engine(engine, dev_priv) {
696 + struct intel_context *ce = &ctx->engine[engine->id];
697 + void *vaddr;
698 + uint32_t *reg_state;
699
700 - reg = i915_gem_object_pin_map(ce->state->obj,
701 - I915_MAP_WB);
702 - if (WARN_ON(IS_ERR(reg)))
703 - continue;
704 + if (!ce->state)
705 + continue;
706
707 - reg += LRC_STATE_PN * PAGE_SIZE / sizeof(*reg);
708 - reg[CTX_RING_HEAD+1] = 0;
709 - reg[CTX_RING_TAIL+1] = 0;
710 + vaddr = i915_gem_object_pin_map(ce->state->obj, I915_MAP_WB);
711 + if (WARN_ON(IS_ERR(vaddr)))
712 + continue;
713
714 - ce->state->obj->dirty = true;
715 - i915_gem_object_unpin_map(ce->state->obj);
716 + reg_state = vaddr + LRC_STATE_PN * PAGE_SIZE;
717
718 - ce->ring->head = ce->ring->tail = 0;
719 - ce->ring->last_retired_head = -1;
720 - intel_ring_update_space(ce->ring);
721 - }
722 + reg_state[CTX_RING_HEAD+1] = 0;
723 + reg_state[CTX_RING_TAIL+1] = 0;
724 +
725 + ce->state->obj->dirty = true;
726 + i915_gem_object_unpin_map(ce->state->obj);
727 +
728 + ce->ring->head = 0;
729 + ce->ring->tail = 0;
730 }
731 }
732 diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c
733 index e559a45ff1f7..2c6d59d4b6d3 100644
734 --- a/drivers/gpu/drm/i915/intel_pm.c
735 +++ b/drivers/gpu/drm/i915/intel_pm.c
736 @@ -4903,6 +4903,12 @@ static void gen6_set_rps_thresholds(struct drm_i915_private *dev_priv, u8 val)
737 break;
738 }
739
740 + /* When byt can survive without system hang with dynamic
741 + * sw freq adjustments, this restriction can be lifted.
742 + */
743 + if (IS_VALLEYVIEW(dev_priv))
744 + goto skip_hw_write;
745 +
746 I915_WRITE(GEN6_RP_UP_EI,
747 GT_INTERVAL_FROM_US(dev_priv, ei_up));
748 I915_WRITE(GEN6_RP_UP_THRESHOLD,
749 @@ -4923,6 +4929,7 @@ static void gen6_set_rps_thresholds(struct drm_i915_private *dev_priv, u8 val)
750 GEN6_RP_UP_BUSY_AVG |
751 GEN6_RP_DOWN_IDLE_AVG);
752
753 +skip_hw_write:
754 dev_priv->rps.power = new_power;
755 dev_priv->rps.up_threshold = threshold_up;
756 dev_priv->rps.down_threshold = threshold_down;
757 @@ -4933,8 +4940,9 @@ static u32 gen6_rps_pm_mask(struct drm_i915_private *dev_priv, u8 val)
758 {
759 u32 mask = 0;
760
761 + /* We use UP_EI_EXPIRED interupts for both up/down in manual mode */
762 if (val > dev_priv->rps.min_freq_softlimit)
763 - mask |= GEN6_PM_RP_DOWN_EI_EXPIRED | GEN6_PM_RP_DOWN_THRESHOLD | GEN6_PM_RP_DOWN_TIMEOUT;
764 + mask |= GEN6_PM_RP_UP_EI_EXPIRED | GEN6_PM_RP_DOWN_THRESHOLD | GEN6_PM_RP_DOWN_TIMEOUT;
765 if (val < dev_priv->rps.max_freq_softlimit)
766 mask |= GEN6_PM_RP_UP_EI_EXPIRED | GEN6_PM_RP_UP_THRESHOLD;
767
768 @@ -5034,7 +5042,7 @@ void gen6_rps_busy(struct drm_i915_private *dev_priv)
769 {
770 mutex_lock(&dev_priv->rps.hw_lock);
771 if (dev_priv->rps.enabled) {
772 - if (dev_priv->pm_rps_events & (GEN6_PM_RP_DOWN_EI_EXPIRED | GEN6_PM_RP_UP_EI_EXPIRED))
773 + if (dev_priv->pm_rps_events & GEN6_PM_RP_UP_EI_EXPIRED)
774 gen6_rps_reset_ei(dev_priv);
775 I915_WRITE(GEN6_PMINTRMSK,
776 gen6_rps_pm_mask(dev_priv, dev_priv->rps.cur_freq));
777 @@ -7960,10 +7968,10 @@ static bool skl_pcode_try_request(struct drm_i915_private *dev_priv, u32 mbox,
778 * @timeout_base_ms: timeout for polling with preemption enabled
779 *
780 * Keep resending the @request to @mbox until PCODE acknowledges it, PCODE
781 - * reports an error or an overall timeout of @timeout_base_ms+10 ms expires.
782 + * reports an error or an overall timeout of @timeout_base_ms+50 ms expires.
783 * The request is acknowledged once the PCODE reply dword equals @reply after
784 * applying @reply_mask. Polling is first attempted with preemption enabled
785 - * for @timeout_base_ms and if this times out for another 10 ms with
786 + * for @timeout_base_ms and if this times out for another 50 ms with
787 * preemption disabled.
788 *
789 * Returns 0 on success, %-ETIMEDOUT in case of a timeout, <0 in case of some
790 @@ -7999,14 +8007,15 @@ int skl_pcode_request(struct drm_i915_private *dev_priv, u32 mbox, u32 request,
791 * worst case) _and_ PCODE was busy for some reason even after a
792 * (queued) request and @timeout_base_ms delay. As a workaround retry
793 * the poll with preemption disabled to maximize the number of
794 - * requests. Increase the timeout from @timeout_base_ms to 10ms to
795 + * requests. Increase the timeout from @timeout_base_ms to 50ms to
796 * account for interrupts that could reduce the number of these
797 - * requests.
798 + * requests, and for any quirks of the PCODE firmware that delays
799 + * the request completion.
800 */
801 DRM_DEBUG_KMS("PCODE timeout, retrying with preemption disabled\n");
802 WARN_ON_ONCE(timeout_base_ms > 3);
803 preempt_disable();
804 - ret = wait_for_atomic(COND, 10);
805 + ret = wait_for_atomic(COND, 50);
806 preempt_enable();
807
808 out:
809 diff --git a/drivers/i2c/busses/i2c-bcm2835.c b/drivers/i2c/busses/i2c-bcm2835.c
810 index d4f3239b5686..f283b714aa79 100644
811 --- a/drivers/i2c/busses/i2c-bcm2835.c
812 +++ b/drivers/i2c/busses/i2c-bcm2835.c
813 @@ -64,6 +64,7 @@ struct bcm2835_i2c_dev {
814 int irq;
815 struct i2c_adapter adapter;
816 struct completion completion;
817 + struct i2c_msg *curr_msg;
818 u32 msg_err;
819 u8 *msg_buf;
820 size_t msg_buf_remaining;
821 @@ -126,14 +127,13 @@ static irqreturn_t bcm2835_i2c_isr(int this_irq, void *data)
822 return IRQ_HANDLED;
823 }
824
825 - if (val & BCM2835_I2C_S_RXD) {
826 - bcm2835_drain_rxfifo(i2c_dev);
827 - if (!(val & BCM2835_I2C_S_DONE))
828 - return IRQ_HANDLED;
829 - }
830 -
831 if (val & BCM2835_I2C_S_DONE) {
832 - if (i2c_dev->msg_buf_remaining)
833 + if (i2c_dev->curr_msg->flags & I2C_M_RD) {
834 + bcm2835_drain_rxfifo(i2c_dev);
835 + val = bcm2835_i2c_readl(i2c_dev, BCM2835_I2C_S);
836 + }
837 +
838 + if ((val & BCM2835_I2C_S_RXD) || i2c_dev->msg_buf_remaining)
839 i2c_dev->msg_err = BCM2835_I2C_S_LEN;
840 else
841 i2c_dev->msg_err = 0;
842 @@ -141,11 +141,16 @@ static irqreturn_t bcm2835_i2c_isr(int this_irq, void *data)
843 return IRQ_HANDLED;
844 }
845
846 - if (val & BCM2835_I2C_S_TXD) {
847 + if (val & BCM2835_I2C_S_TXW) {
848 bcm2835_fill_txfifo(i2c_dev);
849 return IRQ_HANDLED;
850 }
851
852 + if (val & BCM2835_I2C_S_RXR) {
853 + bcm2835_drain_rxfifo(i2c_dev);
854 + return IRQ_HANDLED;
855 + }
856 +
857 return IRQ_NONE;
858 }
859
860 @@ -155,6 +160,7 @@ static int bcm2835_i2c_xfer_msg(struct bcm2835_i2c_dev *i2c_dev,
861 u32 c;
862 unsigned long time_left;
863
864 + i2c_dev->curr_msg = msg;
865 i2c_dev->msg_buf = msg->buf;
866 i2c_dev->msg_buf_remaining = msg->len;
867 reinit_completion(&i2c_dev->completion);
868 diff --git a/drivers/mtd/bcm47xxpart.c b/drivers/mtd/bcm47xxpart.c
869 index 377947580203..283ff7e17a0f 100644
870 --- a/drivers/mtd/bcm47xxpart.c
871 +++ b/drivers/mtd/bcm47xxpart.c
872 @@ -229,12 +229,10 @@ static int bcm47xxpart_parse(struct mtd_info *master,
873
874 last_trx_part = curr_part - 1;
875
876 - /*
877 - * We have whole TRX scanned, skip to the next part. Use
878 - * roundown (not roundup), as the loop will increase
879 - * offset in next step.
880 - */
881 - offset = rounddown(offset + trx->length, blocksize);
882 + /* Jump to the end of TRX */
883 + offset = roundup(offset + trx->length, blocksize);
884 + /* Next loop iteration will increase the offset */
885 + offset -= blocksize;
886 continue;
887 }
888
889 diff --git a/drivers/net/ethernet/mellanox/mlx4/cq.c b/drivers/net/ethernet/mellanox/mlx4/cq.c
890 index a849da92f857..6b8635378f1f 100644
891 --- a/drivers/net/ethernet/mellanox/mlx4/cq.c
892 +++ b/drivers/net/ethernet/mellanox/mlx4/cq.c
893 @@ -101,13 +101,19 @@ void mlx4_cq_completion(struct mlx4_dev *dev, u32 cqn)
894 {
895 struct mlx4_cq *cq;
896
897 + rcu_read_lock();
898 cq = radix_tree_lookup(&mlx4_priv(dev)->cq_table.tree,
899 cqn & (dev->caps.num_cqs - 1));
900 + rcu_read_unlock();
901 +
902 if (!cq) {
903 mlx4_dbg(dev, "Completion event for bogus CQ %08x\n", cqn);
904 return;
905 }
906
907 + /* Acessing the CQ outside of rcu_read_lock is safe, because
908 + * the CQ is freed only after interrupt handling is completed.
909 + */
910 ++cq->arm_sn;
911
912 cq->comp(cq);
913 @@ -118,23 +124,19 @@ void mlx4_cq_event(struct mlx4_dev *dev, u32 cqn, int event_type)
914 struct mlx4_cq_table *cq_table = &mlx4_priv(dev)->cq_table;
915 struct mlx4_cq *cq;
916
917 - spin_lock(&cq_table->lock);
918 -
919 + rcu_read_lock();
920 cq = radix_tree_lookup(&cq_table->tree, cqn & (dev->caps.num_cqs - 1));
921 - if (cq)
922 - atomic_inc(&cq->refcount);
923 -
924 - spin_unlock(&cq_table->lock);
925 + rcu_read_unlock();
926
927 if (!cq) {
928 - mlx4_warn(dev, "Async event for bogus CQ %08x\n", cqn);
929 + mlx4_dbg(dev, "Async event for bogus CQ %08x\n", cqn);
930 return;
931 }
932
933 + /* Acessing the CQ outside of rcu_read_lock is safe, because
934 + * the CQ is freed only after interrupt handling is completed.
935 + */
936 cq->event(cq, event_type);
937 -
938 - if (atomic_dec_and_test(&cq->refcount))
939 - complete(&cq->free);
940 }
941
942 static int mlx4_SW2HW_CQ(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *mailbox,
943 @@ -301,9 +303,9 @@ int mlx4_cq_alloc(struct mlx4_dev *dev, int nent,
944 if (err)
945 return err;
946
947 - spin_lock_irq(&cq_table->lock);
948 + spin_lock(&cq_table->lock);
949 err = radix_tree_insert(&cq_table->tree, cq->cqn, cq);
950 - spin_unlock_irq(&cq_table->lock);
951 + spin_unlock(&cq_table->lock);
952 if (err)
953 goto err_icm;
954
955 @@ -349,9 +351,9 @@ int mlx4_cq_alloc(struct mlx4_dev *dev, int nent,
956 return 0;
957
958 err_radix:
959 - spin_lock_irq(&cq_table->lock);
960 + spin_lock(&cq_table->lock);
961 radix_tree_delete(&cq_table->tree, cq->cqn);
962 - spin_unlock_irq(&cq_table->lock);
963 + spin_unlock(&cq_table->lock);
964
965 err_icm:
966 mlx4_cq_free_icm(dev, cq->cqn);
967 @@ -370,15 +372,15 @@ void mlx4_cq_free(struct mlx4_dev *dev, struct mlx4_cq *cq)
968 if (err)
969 mlx4_warn(dev, "HW2SW_CQ failed (%d) for CQN %06x\n", err, cq->cqn);
970
971 + spin_lock(&cq_table->lock);
972 + radix_tree_delete(&cq_table->tree, cq->cqn);
973 + spin_unlock(&cq_table->lock);
974 +
975 synchronize_irq(priv->eq_table.eq[MLX4_CQ_TO_EQ_VECTOR(cq->vector)].irq);
976 if (priv->eq_table.eq[MLX4_CQ_TO_EQ_VECTOR(cq->vector)].irq !=
977 priv->eq_table.eq[MLX4_EQ_ASYNC].irq)
978 synchronize_irq(priv->eq_table.eq[MLX4_EQ_ASYNC].irq);
979
980 - spin_lock_irq(&cq_table->lock);
981 - radix_tree_delete(&cq_table->tree, cq->cqn);
982 - spin_unlock_irq(&cq_table->lock);
983 -
984 if (atomic_dec_and_test(&cq->refcount))
985 complete(&cq->free);
986 wait_for_completion(&cq->free);
987 diff --git a/drivers/net/ethernet/mellanox/mlx4/en_rx.c b/drivers/net/ethernet/mellanox/mlx4/en_rx.c
988 index 4d3ddc2f7e43..5d484581becd 100644
989 --- a/drivers/net/ethernet/mellanox/mlx4/en_rx.c
990 +++ b/drivers/net/ethernet/mellanox/mlx4/en_rx.c
991 @@ -444,8 +444,14 @@ int mlx4_en_activate_rx_rings(struct mlx4_en_priv *priv)
992 ring->cqn = priv->rx_cq[ring_ind]->mcq.cqn;
993
994 ring->stride = stride;
995 - if (ring->stride <= TXBB_SIZE)
996 + if (ring->stride <= TXBB_SIZE) {
997 + /* Stamp first unused send wqe */
998 + __be32 *ptr = (__be32 *)ring->buf;
999 + __be32 stamp = cpu_to_be32(1 << STAMP_SHIFT);
1000 + *ptr = stamp;
1001 + /* Move pointer to start of rx section */
1002 ring->buf += TXBB_SIZE;
1003 + }
1004
1005 ring->log_stride = ffs(ring->stride) - 1;
1006 ring->buf_size = ring->size * ring->stride;
1007 diff --git a/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c b/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
1008 index c548beaaf910..32f76bf018c3 100644
1009 --- a/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
1010 +++ b/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
1011 @@ -2980,6 +2980,9 @@ int mlx4_RST2INIT_QP_wrapper(struct mlx4_dev *dev, int slave,
1012 put_res(dev, slave, srqn, RES_SRQ);
1013 qp->srq = srq;
1014 }
1015 +
1016 + /* Save param3 for dynamic changes from VST back to VGT */
1017 + qp->param3 = qpc->param3;
1018 put_res(dev, slave, rcqn, RES_CQ);
1019 put_res(dev, slave, mtt_base, RES_MTT);
1020 res_end_move(dev, slave, RES_QP, qpn);
1021 @@ -3772,7 +3775,6 @@ int mlx4_INIT2RTR_QP_wrapper(struct mlx4_dev *dev, int slave,
1022 int qpn = vhcr->in_modifier & 0x7fffff;
1023 struct res_qp *qp;
1024 u8 orig_sched_queue;
1025 - __be32 orig_param3 = qpc->param3;
1026 u8 orig_vlan_control = qpc->pri_path.vlan_control;
1027 u8 orig_fvl_rx = qpc->pri_path.fvl_rx;
1028 u8 orig_pri_path_fl = qpc->pri_path.fl;
1029 @@ -3814,7 +3816,6 @@ int mlx4_INIT2RTR_QP_wrapper(struct mlx4_dev *dev, int slave,
1030 */
1031 if (!err) {
1032 qp->sched_queue = orig_sched_queue;
1033 - qp->param3 = orig_param3;
1034 qp->vlan_control = orig_vlan_control;
1035 qp->fvl_rx = orig_fvl_rx;
1036 qp->pri_path_fl = orig_pri_path_fl;
1037 diff --git a/drivers/net/wireless/ralink/rt2x00/rt2x00dev.c b/drivers/net/wireless/ralink/rt2x00/rt2x00dev.c
1038 index 4e0c5653054b..b7273be9303d 100644
1039 --- a/drivers/net/wireless/ralink/rt2x00/rt2x00dev.c
1040 +++ b/drivers/net/wireless/ralink/rt2x00/rt2x00dev.c
1041 @@ -1422,7 +1422,7 @@ void rt2x00lib_remove_dev(struct rt2x00_dev *rt2x00dev)
1042 cancel_work_sync(&rt2x00dev->intf_work);
1043 cancel_delayed_work_sync(&rt2x00dev->autowakeup_work);
1044 cancel_work_sync(&rt2x00dev->sleep_work);
1045 -#ifdef CONFIG_RT2X00_LIB_USB
1046 +#if IS_ENABLED(CONFIG_RT2X00_LIB_USB)
1047 if (rt2x00_is_usb(rt2x00dev)) {
1048 usb_kill_anchored_urbs(rt2x00dev->anchor);
1049 hrtimer_cancel(&rt2x00dev->txstatus_timer);
1050 diff --git a/drivers/net/wireless/ralink/rt2x00/rt2x00usb.c b/drivers/net/wireless/ralink/rt2x00/rt2x00usb.c
1051 index 6005e14213ca..662705e31136 100644
1052 --- a/drivers/net/wireless/ralink/rt2x00/rt2x00usb.c
1053 +++ b/drivers/net/wireless/ralink/rt2x00/rt2x00usb.c
1054 @@ -319,10 +319,8 @@ static bool rt2x00usb_kick_tx_entry(struct queue_entry *entry, void *data)
1055 entry->skb->data, length,
1056 rt2x00usb_interrupt_txdone, entry);
1057
1058 - usb_anchor_urb(entry_priv->urb, rt2x00dev->anchor);
1059 status = usb_submit_urb(entry_priv->urb, GFP_ATOMIC);
1060 if (status) {
1061 - usb_unanchor_urb(entry_priv->urb);
1062 if (status == -ENODEV)
1063 clear_bit(DEVICE_STATE_PRESENT, &rt2x00dev->flags);
1064 set_bit(ENTRY_DATA_IO_FAILED, &entry->flags);
1065 @@ -410,10 +408,8 @@ static bool rt2x00usb_kick_rx_entry(struct queue_entry *entry, void *data)
1066 entry->skb->data, entry->skb->len,
1067 rt2x00usb_interrupt_rxdone, entry);
1068
1069 - usb_anchor_urb(entry_priv->urb, rt2x00dev->anchor);
1070 status = usb_submit_urb(entry_priv->urb, GFP_ATOMIC);
1071 if (status) {
1072 - usb_unanchor_urb(entry_priv->urb);
1073 if (status == -ENODEV)
1074 clear_bit(DEVICE_STATE_PRESENT, &rt2x00dev->flags);
1075 set_bit(ENTRY_DATA_IO_FAILED, &entry->flags);
1076 @@ -824,10 +820,6 @@ int rt2x00usb_probe(struct usb_interface *usb_intf,
1077 if (retval)
1078 goto exit_free_device;
1079
1080 - retval = rt2x00lib_probe_dev(rt2x00dev);
1081 - if (retval)
1082 - goto exit_free_reg;
1083 -
1084 rt2x00dev->anchor = devm_kmalloc(&usb_dev->dev,
1085 sizeof(struct usb_anchor),
1086 GFP_KERNEL);
1087 @@ -835,10 +827,17 @@ int rt2x00usb_probe(struct usb_interface *usb_intf,
1088 retval = -ENOMEM;
1089 goto exit_free_reg;
1090 }
1091 -
1092 init_usb_anchor(rt2x00dev->anchor);
1093 +
1094 + retval = rt2x00lib_probe_dev(rt2x00dev);
1095 + if (retval)
1096 + goto exit_free_anchor;
1097 +
1098 return 0;
1099
1100 +exit_free_anchor:
1101 + usb_kill_anchored_urbs(rt2x00dev->anchor);
1102 +
1103 exit_free_reg:
1104 rt2x00usb_free_reg(rt2x00dev);
1105
1106 diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c
1107 index c28ccf1b5a1f..35fb2bef0e45 100644
1108 --- a/drivers/usb/core/hub.c
1109 +++ b/drivers/usb/core/hub.c
1110 @@ -2650,8 +2650,15 @@ static int hub_port_wait_reset(struct usb_hub *hub, int port1,
1111 if (ret < 0)
1112 return ret;
1113
1114 - /* The port state is unknown until the reset completes. */
1115 - if (!(portstatus & USB_PORT_STAT_RESET))
1116 + /*
1117 + * The port state is unknown until the reset completes.
1118 + *
1119 + * On top of that, some chips may require additional time
1120 + * to re-establish a connection after the reset is complete,
1121 + * so also wait for the connection to be re-established.
1122 + */
1123 + if (!(portstatus & USB_PORT_STAT_RESET) &&
1124 + (portstatus & USB_PORT_STAT_CONNECTION))
1125 break;
1126
1127 /* switch to the long delay after two short delay failures */
1128 diff --git a/fs/orangefs/devorangefs-req.c b/fs/orangefs/devorangefs-req.c
1129 index 516ffb4dc9a0..f419dd999581 100644
1130 --- a/fs/orangefs/devorangefs-req.c
1131 +++ b/fs/orangefs/devorangefs-req.c
1132 @@ -402,8 +402,9 @@ static ssize_t orangefs_devreq_write_iter(struct kiocb *iocb,
1133 /* remove the op from the in progress hash table */
1134 op = orangefs_devreq_remove_op(head.tag);
1135 if (!op) {
1136 - gossip_err("WARNING: No one's waiting for tag %llu\n",
1137 - llu(head.tag));
1138 + gossip_debug(GOSSIP_DEV_DEBUG,
1139 + "%s: No one's waiting for tag %llu\n",
1140 + __func__, llu(head.tag));
1141 return ret;
1142 }
1143
1144 diff --git a/fs/orangefs/orangefs-debugfs.c b/fs/orangefs/orangefs-debugfs.c
1145 index 38887cc5577f..0748a26598fc 100644
1146 --- a/fs/orangefs/orangefs-debugfs.c
1147 +++ b/fs/orangefs/orangefs-debugfs.c
1148 @@ -671,8 +671,10 @@ int orangefs_prepare_debugfs_help_string(int at_boot)
1149 */
1150 cdm_element_count =
1151 orangefs_prepare_cdm_array(client_debug_array_string);
1152 - if (cdm_element_count <= 0)
1153 + if (cdm_element_count <= 0) {
1154 + kfree(new);
1155 goto out;
1156 + }
1157
1158 for (i = 0; i < cdm_element_count; i++) {
1159 strlcat(new, "\t", string_size);
1160 @@ -963,13 +965,13 @@ int orangefs_debugfs_new_client_string(void __user *arg)
1161 int ret;
1162
1163 ret = copy_from_user(&client_debug_array_string,
1164 - (void __user *)arg,
1165 - ORANGEFS_MAX_DEBUG_STRING_LEN);
1166 + (void __user *)arg,
1167 + ORANGEFS_MAX_DEBUG_STRING_LEN);
1168
1169 if (ret != 0) {
1170 pr_info("%s: CLIENT_STRING: copy_from_user failed\n",
1171 __func__);
1172 - return -EIO;
1173 + return -EFAULT;
1174 }
1175
1176 /*
1177 @@ -984,17 +986,18 @@ int orangefs_debugfs_new_client_string(void __user *arg)
1178 */
1179 client_debug_array_string[ORANGEFS_MAX_DEBUG_STRING_LEN - 1] =
1180 '\0';
1181 -
1182 +
1183 pr_info("%s: client debug array string has been received.\n",
1184 __func__);
1185
1186 if (!help_string_initialized) {
1187
1188 /* Build a proper debug help string. */
1189 - if (orangefs_prepare_debugfs_help_string(0)) {
1190 + ret = orangefs_prepare_debugfs_help_string(0);
1191 + if (ret) {
1192 gossip_err("%s: no debug help string \n",
1193 __func__);
1194 - return -EIO;
1195 + return ret;
1196 }
1197
1198 }
1199 @@ -1007,7 +1010,7 @@ int orangefs_debugfs_new_client_string(void __user *arg)
1200
1201 help_string_initialized++;
1202
1203 - return ret;
1204 + return 0;
1205 }
1206
1207 int orangefs_debugfs_new_debug(void __user *arg)
1208 diff --git a/fs/orangefs/orangefs-dev-proto.h b/fs/orangefs/orangefs-dev-proto.h
1209 index a3d84ffee905..f380f9ed1b28 100644
1210 --- a/fs/orangefs/orangefs-dev-proto.h
1211 +++ b/fs/orangefs/orangefs-dev-proto.h
1212 @@ -50,8 +50,7 @@
1213 * Misc constants. Please retain them as multiples of 8!
1214 * Otherwise 32-64 bit interactions will be messed up :)
1215 */
1216 -#define ORANGEFS_MAX_DEBUG_STRING_LEN 0x00000400
1217 -#define ORANGEFS_MAX_DEBUG_ARRAY_LEN 0x00000800
1218 +#define ORANGEFS_MAX_DEBUG_STRING_LEN 0x00000800
1219
1220 /*
1221 * The maximum number of directory entries in a single request is 96.
1222 diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
1223 index f2b04a77258d..8ab0974f4ee2 100644
1224 --- a/net/packet/af_packet.c
1225 +++ b/net/packet/af_packet.c
1226 @@ -4235,8 +4235,8 @@ static int packet_set_ring(struct sock *sk, union tpacket_req_u *req_u,
1227 if (unlikely(!PAGE_ALIGNED(req->tp_block_size)))
1228 goto out;
1229 if (po->tp_version >= TPACKET_V3 &&
1230 - (int)(req->tp_block_size -
1231 - BLK_PLUS_PRIV(req_u->req3.tp_sizeof_priv)) <= 0)
1232 + req->tp_block_size <=
1233 + BLK_PLUS_PRIV((u64)req_u->req3.tp_sizeof_priv))
1234 goto out;
1235 if (unlikely(req->tp_frame_size < po->tp_hdrlen +
1236 po->tp_reserve))