Magellan Linux

Contents of /trunk/kernel-alx/patches-4.9/0300-4.9.201-all-fixes.patch

Parent Directory Parent Directory | Revision Log Revision Log


Revision 3575 - (show annotations) (download)
Thu Aug 13 10:21:17 2020 UTC (3 years, 8 months ago) by niro
File size: 158843 byte(s)
linux-201
1 diff --git a/Makefile b/Makefile
2 index 84410351b27c..4741bbdfaa10 100644
3 --- a/Makefile
4 +++ b/Makefile
5 @@ -1,6 +1,6 @@
6 VERSION = 4
7 PATCHLEVEL = 9
8 -SUBLEVEL = 200
9 +SUBLEVEL = 201
10 EXTRAVERSION =
11 NAME = Roaring Lionus
12
13 diff --git a/arch/x86/events/amd/ibs.c b/arch/x86/events/amd/ibs.c
14 index 112e3c4636b4..5f72b473f3ed 100644
15 --- a/arch/x86/events/amd/ibs.c
16 +++ b/arch/x86/events/amd/ibs.c
17 @@ -388,7 +388,8 @@ static inline void perf_ibs_disable_event(struct perf_ibs *perf_ibs,
18 struct hw_perf_event *hwc, u64 config)
19 {
20 config &= ~perf_ibs->cnt_mask;
21 - wrmsrl(hwc->config_base, config);
22 + if (boot_cpu_data.x86 == 0x10)
23 + wrmsrl(hwc->config_base, config);
24 config &= ~perf_ibs->enable_mask;
25 wrmsrl(hwc->config_base, config);
26 }
27 @@ -563,7 +564,8 @@ static struct perf_ibs perf_ibs_op = {
28 },
29 .msr = MSR_AMD64_IBSOPCTL,
30 .config_mask = IBS_OP_CONFIG_MASK,
31 - .cnt_mask = IBS_OP_MAX_CNT,
32 + .cnt_mask = IBS_OP_MAX_CNT | IBS_OP_CUR_CNT |
33 + IBS_OP_CUR_CNT_RAND,
34 .enable_mask = IBS_OP_ENABLE,
35 .valid_mask = IBS_OP_VAL,
36 .max_period = IBS_OP_MAX_CNT << 4,
37 @@ -624,7 +626,7 @@ fail:
38 if (event->attr.sample_type & PERF_SAMPLE_RAW)
39 offset_max = perf_ibs->offset_max;
40 else if (check_rip)
41 - offset_max = 2;
42 + offset_max = 3;
43 else
44 offset_max = 1;
45 do {
46 diff --git a/arch/x86/include/asm/smp.h b/arch/x86/include/asm/smp.h
47 index d25fb6beb2f0..dcaf7100b69c 100644
48 --- a/arch/x86/include/asm/smp.h
49 +++ b/arch/x86/include/asm/smp.h
50 @@ -177,16 +177,6 @@ extern int safe_smp_processor_id(void);
51 #endif
52
53 #ifdef CONFIG_X86_LOCAL_APIC
54 -
55 -#ifndef CONFIG_X86_64
56 -static inline int logical_smp_processor_id(void)
57 -{
58 - /* we don't want to mark this access volatile - bad code generation */
59 - return GET_APIC_LOGICAL_ID(apic_read(APIC_LDR));
60 -}
61 -
62 -#endif
63 -
64 extern int hard_smp_processor_id(void);
65
66 #else /* CONFIG_X86_LOCAL_APIC */
67 diff --git a/arch/x86/kernel/apic/apic.c b/arch/x86/kernel/apic/apic.c
68 index 232350519062..722a76b88bcc 100644
69 --- a/arch/x86/kernel/apic/apic.c
70 +++ b/arch/x86/kernel/apic/apic.c
71 @@ -1281,6 +1281,56 @@ static void lapic_setup_esr(void)
72 oldvalue, value);
73 }
74
75 +static void apic_pending_intr_clear(void)
76 +{
77 + long long max_loops = cpu_khz ? cpu_khz : 1000000;
78 + unsigned long long tsc = 0, ntsc;
79 + unsigned int value, queued;
80 + int i, j, acked = 0;
81 +
82 + if (boot_cpu_has(X86_FEATURE_TSC))
83 + tsc = rdtsc();
84 + /*
85 + * After a crash, we no longer service the interrupts and a pending
86 + * interrupt from previous kernel might still have ISR bit set.
87 + *
88 + * Most probably by now CPU has serviced that pending interrupt and
89 + * it might not have done the ack_APIC_irq() because it thought,
90 + * interrupt came from i8259 as ExtInt. LAPIC did not get EOI so it
91 + * does not clear the ISR bit and cpu thinks it has already serivced
92 + * the interrupt. Hence a vector might get locked. It was noticed
93 + * for timer irq (vector 0x31). Issue an extra EOI to clear ISR.
94 + */
95 + do {
96 + queued = 0;
97 + for (i = APIC_ISR_NR - 1; i >= 0; i--)
98 + queued |= apic_read(APIC_IRR + i*0x10);
99 +
100 + for (i = APIC_ISR_NR - 1; i >= 0; i--) {
101 + value = apic_read(APIC_ISR + i*0x10);
102 + for (j = 31; j >= 0; j--) {
103 + if (value & (1<<j)) {
104 + ack_APIC_irq();
105 + acked++;
106 + }
107 + }
108 + }
109 + if (acked > 256) {
110 + printk(KERN_ERR "LAPIC pending interrupts after %d EOI\n",
111 + acked);
112 + break;
113 + }
114 + if (queued) {
115 + if (boot_cpu_has(X86_FEATURE_TSC) && cpu_khz) {
116 + ntsc = rdtsc();
117 + max_loops = (cpu_khz << 10) - (ntsc - tsc);
118 + } else
119 + max_loops--;
120 + }
121 + } while (queued && max_loops > 0);
122 + WARN_ON(max_loops <= 0);
123 +}
124 +
125 /**
126 * setup_local_APIC - setup the local APIC
127 *
128 @@ -1290,13 +1340,8 @@ static void lapic_setup_esr(void)
129 void setup_local_APIC(void)
130 {
131 int cpu = smp_processor_id();
132 - unsigned int value, queued;
133 - int i, j, acked = 0;
134 - unsigned long long tsc = 0, ntsc;
135 - long long max_loops = cpu_khz ? cpu_khz : 1000000;
136 + unsigned int value;
137
138 - if (boot_cpu_has(X86_FEATURE_TSC))
139 - tsc = rdtsc();
140
141 if (disable_apic) {
142 disable_ioapic_support();
143 @@ -1336,16 +1381,21 @@ void setup_local_APIC(void)
144 apic->init_apic_ldr();
145
146 #ifdef CONFIG_X86_32
147 - /*
148 - * APIC LDR is initialized. If logical_apicid mapping was
149 - * initialized during get_smp_config(), make sure it matches the
150 - * actual value.
151 - */
152 - i = early_per_cpu(x86_cpu_to_logical_apicid, cpu);
153 - WARN_ON(i != BAD_APICID && i != logical_smp_processor_id());
154 - /* always use the value from LDR */
155 - early_per_cpu(x86_cpu_to_logical_apicid, cpu) =
156 - logical_smp_processor_id();
157 + if (apic->dest_logical) {
158 + int logical_apicid, ldr_apicid;
159 +
160 + /*
161 + * APIC LDR is initialized. If logical_apicid mapping was
162 + * initialized during get_smp_config(), make sure it matches
163 + * the actual value.
164 + */
165 + logical_apicid = early_per_cpu(x86_cpu_to_logical_apicid, cpu);
166 + ldr_apicid = GET_APIC_LOGICAL_ID(apic_read(APIC_LDR));
167 + if (logical_apicid != BAD_APICID)
168 + WARN_ON(logical_apicid != ldr_apicid);
169 + /* Always use the value from LDR. */
170 + early_per_cpu(x86_cpu_to_logical_apicid, cpu) = ldr_apicid;
171 + }
172 #endif
173
174 /*
175 @@ -1356,45 +1406,7 @@ void setup_local_APIC(void)
176 value &= ~APIC_TPRI_MASK;
177 apic_write(APIC_TASKPRI, value);
178
179 - /*
180 - * After a crash, we no longer service the interrupts and a pending
181 - * interrupt from previous kernel might still have ISR bit set.
182 - *
183 - * Most probably by now CPU has serviced that pending interrupt and
184 - * it might not have done the ack_APIC_irq() because it thought,
185 - * interrupt came from i8259 as ExtInt. LAPIC did not get EOI so it
186 - * does not clear the ISR bit and cpu thinks it has already serivced
187 - * the interrupt. Hence a vector might get locked. It was noticed
188 - * for timer irq (vector 0x31). Issue an extra EOI to clear ISR.
189 - */
190 - do {
191 - queued = 0;
192 - for (i = APIC_ISR_NR - 1; i >= 0; i--)
193 - queued |= apic_read(APIC_IRR + i*0x10);
194 -
195 - for (i = APIC_ISR_NR - 1; i >= 0; i--) {
196 - value = apic_read(APIC_ISR + i*0x10);
197 - for (j = 31; j >= 0; j--) {
198 - if (value & (1<<j)) {
199 - ack_APIC_irq();
200 - acked++;
201 - }
202 - }
203 - }
204 - if (acked > 256) {
205 - printk(KERN_ERR "LAPIC pending interrupts after %d EOI\n",
206 - acked);
207 - break;
208 - }
209 - if (queued) {
210 - if (boot_cpu_has(X86_FEATURE_TSC) && cpu_khz) {
211 - ntsc = rdtsc();
212 - max_loops = (cpu_khz << 10) - (ntsc - tsc);
213 - } else
214 - max_loops--;
215 - }
216 - } while (queued && max_loops > 0);
217 - WARN_ON(max_loops <= 0);
218 + apic_pending_intr_clear();
219
220 /*
221 * Now that we are all set up, enable the APIC
222 diff --git a/drivers/dma/xilinx/xilinx_dma.c b/drivers/dma/xilinx/xilinx_dma.c
223 index 8288fe4d17c3..cd271f782605 100644
224 --- a/drivers/dma/xilinx/xilinx_dma.c
225 +++ b/drivers/dma/xilinx/xilinx_dma.c
226 @@ -72,6 +72,9 @@
227 #define XILINX_DMA_DMACR_CIRC_EN BIT(1)
228 #define XILINX_DMA_DMACR_RUNSTOP BIT(0)
229 #define XILINX_DMA_DMACR_FSYNCSRC_MASK GENMASK(6, 5)
230 +#define XILINX_DMA_DMACR_DELAY_MASK GENMASK(31, 24)
231 +#define XILINX_DMA_DMACR_FRAME_COUNT_MASK GENMASK(23, 16)
232 +#define XILINX_DMA_DMACR_MASTER_MASK GENMASK(11, 8)
233
234 #define XILINX_DMA_REG_DMASR 0x0004
235 #define XILINX_DMA_DMASR_EOL_LATE_ERR BIT(15)
236 @@ -2054,8 +2057,10 @@ int xilinx_vdma_channel_set_config(struct dma_chan *dchan,
237 chan->config.gen_lock = cfg->gen_lock;
238 chan->config.master = cfg->master;
239
240 + dmacr &= ~XILINX_DMA_DMACR_GENLOCK_EN;
241 if (cfg->gen_lock && chan->genlock) {
242 dmacr |= XILINX_DMA_DMACR_GENLOCK_EN;
243 + dmacr &= ~XILINX_DMA_DMACR_MASTER_MASK;
244 dmacr |= cfg->master << XILINX_DMA_DMACR_MASTER_SHIFT;
245 }
246
247 @@ -2069,11 +2074,13 @@ int xilinx_vdma_channel_set_config(struct dma_chan *dchan,
248 chan->config.delay = cfg->delay;
249
250 if (cfg->coalesc <= XILINX_DMA_DMACR_FRAME_COUNT_MAX) {
251 + dmacr &= ~XILINX_DMA_DMACR_FRAME_COUNT_MASK;
252 dmacr |= cfg->coalesc << XILINX_DMA_DMACR_FRAME_COUNT_SHIFT;
253 chan->config.coalesc = cfg->coalesc;
254 }
255
256 if (cfg->delay <= XILINX_DMA_DMACR_DELAY_MAX) {
257 + dmacr &= ~XILINX_DMA_DMACR_DELAY_MASK;
258 dmacr |= cfg->delay << XILINX_DMA_DMACR_DELAY_SHIFT;
259 chan->config.delay = cfg->delay;
260 }
261 diff --git a/drivers/gpu/drm/drm_gem.c b/drivers/gpu/drm/drm_gem.c
262 index 48e99ab525c3..ae5c0952a7a3 100644
263 --- a/drivers/gpu/drm/drm_gem.c
264 +++ b/drivers/gpu/drm/drm_gem.c
265 @@ -996,6 +996,15 @@ int drm_gem_mmap(struct file *filp, struct vm_area_struct *vma)
266 return -EACCES;
267 }
268
269 + if (node->readonly) {
270 + if (vma->vm_flags & VM_WRITE) {
271 + drm_gem_object_unreference_unlocked(obj);
272 + return -EINVAL;
273 + }
274 +
275 + vma->vm_flags &= ~VM_MAYWRITE;
276 + }
277 +
278 ret = drm_gem_mmap_obj(obj, drm_vma_node_size(node) << PAGE_SHIFT,
279 vma);
280
281 diff --git a/drivers/gpu/drm/i915/i915_cmd_parser.c b/drivers/gpu/drm/i915/i915_cmd_parser.c
282 index 70980f82a15b..1e104518192d 100644
283 --- a/drivers/gpu/drm/i915/i915_cmd_parser.c
284 +++ b/drivers/gpu/drm/i915/i915_cmd_parser.c
285 @@ -26,6 +26,7 @@
286 */
287
288 #include "i915_drv.h"
289 +#include "intel_ringbuffer.h"
290
291 /**
292 * DOC: batch buffer command parser
293 @@ -50,13 +51,11 @@
294 * granting userspace undue privileges. There are three categories of privilege.
295 *
296 * First, commands which are explicitly defined as privileged or which should
297 - * only be used by the kernel driver. The parser generally rejects such
298 - * commands, though it may allow some from the drm master process.
299 + * only be used by the kernel driver. The parser rejects such commands
300 *
301 * Second, commands which access registers. To support correct/enhanced
302 * userspace functionality, particularly certain OpenGL extensions, the parser
303 - * provides a whitelist of registers which userspace may safely access (for both
304 - * normal and drm master processes).
305 + * provides a whitelist of registers which userspace may safely access
306 *
307 * Third, commands which access privileged memory (i.e. GGTT, HWS page, etc).
308 * The parser always rejects such commands.
309 @@ -81,11 +80,104 @@
310 * in the per-engine command tables.
311 *
312 * Other command table entries map fairly directly to high level categories
313 - * mentioned above: rejected, master-only, register whitelist. The parser
314 - * implements a number of checks, including the privileged memory checks, via a
315 - * general bitmasking mechanism.
316 + * mentioned above: rejected, register whitelist. The parser implements a number
317 + * of checks, including the privileged memory checks, via a general bitmasking
318 + * mechanism.
319 */
320
321 +/*
322 + * A command that requires special handling by the command parser.
323 + */
324 +struct drm_i915_cmd_descriptor {
325 + /*
326 + * Flags describing how the command parser processes the command.
327 + *
328 + * CMD_DESC_FIXED: The command has a fixed length if this is set,
329 + * a length mask if not set
330 + * CMD_DESC_SKIP: The command is allowed but does not follow the
331 + * standard length encoding for the opcode range in
332 + * which it falls
333 + * CMD_DESC_REJECT: The command is never allowed
334 + * CMD_DESC_REGISTER: The command should be checked against the
335 + * register whitelist for the appropriate ring
336 + */
337 + u32 flags;
338 +#define CMD_DESC_FIXED (1<<0)
339 +#define CMD_DESC_SKIP (1<<1)
340 +#define CMD_DESC_REJECT (1<<2)
341 +#define CMD_DESC_REGISTER (1<<3)
342 +#define CMD_DESC_BITMASK (1<<4)
343 +
344 + /*
345 + * The command's unique identification bits and the bitmask to get them.
346 + * This isn't strictly the opcode field as defined in the spec and may
347 + * also include type, subtype, and/or subop fields.
348 + */
349 + struct {
350 + u32 value;
351 + u32 mask;
352 + } cmd;
353 +
354 + /*
355 + * The command's length. The command is either fixed length (i.e. does
356 + * not include a length field) or has a length field mask. The flag
357 + * CMD_DESC_FIXED indicates a fixed length. Otherwise, the command has
358 + * a length mask. All command entries in a command table must include
359 + * length information.
360 + */
361 + union {
362 + u32 fixed;
363 + u32 mask;
364 + } length;
365 +
366 + /*
367 + * Describes where to find a register address in the command to check
368 + * against the ring's register whitelist. Only valid if flags has the
369 + * CMD_DESC_REGISTER bit set.
370 + *
371 + * A non-zero step value implies that the command may access multiple
372 + * registers in sequence (e.g. LRI), in that case step gives the
373 + * distance in dwords between individual offset fields.
374 + */
375 + struct {
376 + u32 offset;
377 + u32 mask;
378 + u32 step;
379 + } reg;
380 +
381 +#define MAX_CMD_DESC_BITMASKS 3
382 + /*
383 + * Describes command checks where a particular dword is masked and
384 + * compared against an expected value. If the command does not match
385 + * the expected value, the parser rejects it. Only valid if flags has
386 + * the CMD_DESC_BITMASK bit set. Only entries where mask is non-zero
387 + * are valid.
388 + *
389 + * If the check specifies a non-zero condition_mask then the parser
390 + * only performs the check when the bits specified by condition_mask
391 + * are non-zero.
392 + */
393 + struct {
394 + u32 offset;
395 + u32 mask;
396 + u32 expected;
397 + u32 condition_offset;
398 + u32 condition_mask;
399 + } bits[MAX_CMD_DESC_BITMASKS];
400 +};
401 +
402 +/*
403 + * A table of commands requiring special handling by the command parser.
404 + *
405 + * Each engine has an array of tables. Each table consists of an array of
406 + * command descriptors, which must be sorted with command opcodes in
407 + * ascending order.
408 + */
409 +struct drm_i915_cmd_table {
410 + const struct drm_i915_cmd_descriptor *table;
411 + int count;
412 +};
413 +
414 #define STD_MI_OPCODE_SHIFT (32 - 9)
415 #define STD_3D_OPCODE_SHIFT (32 - 16)
416 #define STD_2D_OPCODE_SHIFT (32 - 10)
417 @@ -95,7 +187,7 @@
418 #define CMD(op, opm, f, lm, fl, ...) \
419 { \
420 .flags = (fl) | ((f) ? CMD_DESC_FIXED : 0), \
421 - .cmd = { (op), ~0u << (opm) }, \
422 + .cmd = { (op & ~0u << (opm)), ~0u << (opm) }, \
423 .length = { (lm) }, \
424 __VA_ARGS__ \
425 }
426 @@ -110,14 +202,13 @@
427 #define R CMD_DESC_REJECT
428 #define W CMD_DESC_REGISTER
429 #define B CMD_DESC_BITMASK
430 -#define M CMD_DESC_MASTER
431
432 /* Command Mask Fixed Len Action
433 ---------------------------------------------------------- */
434 -static const struct drm_i915_cmd_descriptor common_cmds[] = {
435 +static const struct drm_i915_cmd_descriptor gen7_common_cmds[] = {
436 CMD( MI_NOOP, SMI, F, 1, S ),
437 CMD( MI_USER_INTERRUPT, SMI, F, 1, R ),
438 - CMD( MI_WAIT_FOR_EVENT, SMI, F, 1, M ),
439 + CMD( MI_WAIT_FOR_EVENT, SMI, F, 1, R ),
440 CMD( MI_ARB_CHECK, SMI, F, 1, S ),
441 CMD( MI_REPORT_HEAD, SMI, F, 1, S ),
442 CMD( MI_SUSPEND_FLUSH, SMI, F, 1, S ),
443 @@ -147,7 +238,7 @@ static const struct drm_i915_cmd_descriptor common_cmds[] = {
444 CMD( MI_BATCH_BUFFER_START, SMI, !F, 0xFF, S ),
445 };
446
447 -static const struct drm_i915_cmd_descriptor render_cmds[] = {
448 +static const struct drm_i915_cmd_descriptor gen7_render_cmds[] = {
449 CMD( MI_FLUSH, SMI, F, 1, S ),
450 CMD( MI_ARB_ON_OFF, SMI, F, 1, R ),
451 CMD( MI_PREDICATE, SMI, F, 1, S ),
452 @@ -214,7 +305,7 @@ static const struct drm_i915_cmd_descriptor hsw_render_cmds[] = {
453 CMD( MI_URB_ATOMIC_ALLOC, SMI, F, 1, S ),
454 CMD( MI_SET_APPID, SMI, F, 1, S ),
455 CMD( MI_RS_CONTEXT, SMI, F, 1, S ),
456 - CMD( MI_LOAD_SCAN_LINES_INCL, SMI, !F, 0x3F, M ),
457 + CMD( MI_LOAD_SCAN_LINES_INCL, SMI, !F, 0x3F, R ),
458 CMD( MI_LOAD_SCAN_LINES_EXCL, SMI, !F, 0x3F, R ),
459 CMD( MI_LOAD_REGISTER_REG, SMI, !F, 0xFF, W,
460 .reg = { .offset = 1, .mask = 0x007FFFFC, .step = 1 } ),
461 @@ -231,7 +322,7 @@ static const struct drm_i915_cmd_descriptor hsw_render_cmds[] = {
462 CMD( GFX_OP_3DSTATE_BINDING_TABLE_EDIT_PS, S3D, !F, 0x1FF, S ),
463 };
464
465 -static const struct drm_i915_cmd_descriptor video_cmds[] = {
466 +static const struct drm_i915_cmd_descriptor gen7_video_cmds[] = {
467 CMD( MI_ARB_ON_OFF, SMI, F, 1, R ),
468 CMD( MI_SET_APPID, SMI, F, 1, S ),
469 CMD( MI_STORE_DWORD_IMM, SMI, !F, 0xFF, B,
470 @@ -275,7 +366,7 @@ static const struct drm_i915_cmd_descriptor video_cmds[] = {
471 CMD( MFX_WAIT, SMFX, F, 1, S ),
472 };
473
474 -static const struct drm_i915_cmd_descriptor vecs_cmds[] = {
475 +static const struct drm_i915_cmd_descriptor gen7_vecs_cmds[] = {
476 CMD( MI_ARB_ON_OFF, SMI, F, 1, R ),
477 CMD( MI_SET_APPID, SMI, F, 1, S ),
478 CMD( MI_STORE_DWORD_IMM, SMI, !F, 0xFF, B,
479 @@ -313,7 +404,7 @@ static const struct drm_i915_cmd_descriptor vecs_cmds[] = {
480 }}, ),
481 };
482
483 -static const struct drm_i915_cmd_descriptor blt_cmds[] = {
484 +static const struct drm_i915_cmd_descriptor gen7_blt_cmds[] = {
485 CMD( MI_DISPLAY_FLIP, SMI, !F, 0xFF, R ),
486 CMD( MI_STORE_DWORD_IMM, SMI, !F, 0x3FF, B,
487 .bits = {{
488 @@ -347,10 +438,64 @@ static const struct drm_i915_cmd_descriptor blt_cmds[] = {
489 };
490
491 static const struct drm_i915_cmd_descriptor hsw_blt_cmds[] = {
492 - CMD( MI_LOAD_SCAN_LINES_INCL, SMI, !F, 0x3F, M ),
493 + CMD( MI_LOAD_SCAN_LINES_INCL, SMI, !F, 0x3F, R ),
494 CMD( MI_LOAD_SCAN_LINES_EXCL, SMI, !F, 0x3F, R ),
495 };
496
497 +/*
498 + * For Gen9 we can still rely on the h/w to enforce cmd security, and only
499 + * need to re-enforce the register access checks. We therefore only need to
500 + * teach the cmdparser how to find the end of each command, and identify
501 + * register accesses. The table doesn't need to reject any commands, and so
502 + * the only commands listed here are:
503 + * 1) Those that touch registers
504 + * 2) Those that do not have the default 8-bit length
505 + *
506 + * Note that the default MI length mask chosen for this table is 0xFF, not
507 + * the 0x3F used on older devices. This is because the vast majority of MI
508 + * cmds on Gen9 use a standard 8-bit Length field.
509 + * All the Gen9 blitter instructions are standard 0xFF length mask, and
510 + * none allow access to non-general registers, so in fact no BLT cmds are
511 + * included in the table at all.
512 + *
513 + */
514 +static const struct drm_i915_cmd_descriptor gen9_blt_cmds[] = {
515 + CMD( MI_NOOP, SMI, F, 1, S ),
516 + CMD( MI_USER_INTERRUPT, SMI, F, 1, S ),
517 + CMD( MI_WAIT_FOR_EVENT, SMI, F, 1, S ),
518 + CMD( MI_FLUSH, SMI, F, 1, S ),
519 + CMD( MI_ARB_CHECK, SMI, F, 1, S ),
520 + CMD( MI_REPORT_HEAD, SMI, F, 1, S ),
521 + CMD( MI_ARB_ON_OFF, SMI, F, 1, S ),
522 + CMD( MI_SUSPEND_FLUSH, SMI, F, 1, S ),
523 + CMD( MI_LOAD_SCAN_LINES_INCL, SMI, !F, 0x3F, S ),
524 + CMD( MI_LOAD_SCAN_LINES_EXCL, SMI, !F, 0x3F, S ),
525 + CMD( MI_STORE_DWORD_IMM, SMI, !F, 0x3FF, S ),
526 + CMD( MI_LOAD_REGISTER_IMM(1), SMI, !F, 0xFF, W,
527 + .reg = { .offset = 1, .mask = 0x007FFFFC, .step = 2 } ),
528 + CMD( MI_UPDATE_GTT, SMI, !F, 0x3FF, S ),
529 + CMD( MI_STORE_REGISTER_MEM_GEN8, SMI, F, 4, W,
530 + .reg = { .offset = 1, .mask = 0x007FFFFC } ),
531 + CMD( MI_FLUSH_DW, SMI, !F, 0x3F, S ),
532 + CMD( MI_LOAD_REGISTER_MEM_GEN8, SMI, F, 4, W,
533 + .reg = { .offset = 1, .mask = 0x007FFFFC } ),
534 + CMD( MI_LOAD_REGISTER_REG, SMI, !F, 0xFF, W,
535 + .reg = { .offset = 1, .mask = 0x007FFFFC, .step = 1 } ),
536 +
537 + /*
538 + * We allow BB_START but apply further checks. We just sanitize the
539 + * basic fields here.
540 + */
541 +#define MI_BB_START_OPERAND_MASK GENMASK(SMI-1, 0)
542 +#define MI_BB_START_OPERAND_EXPECT (MI_BATCH_PPGTT_HSW | 1)
543 + CMD( MI_BATCH_BUFFER_START_GEN8, SMI, !F, 0xFF, B,
544 + .bits = {{
545 + .offset = 0,
546 + .mask = MI_BB_START_OPERAND_MASK,
547 + .expected = MI_BB_START_OPERAND_EXPECT,
548 + }}, ),
549 +};
550 +
551 static const struct drm_i915_cmd_descriptor noop_desc =
552 CMD(MI_NOOP, SMI, F, 1, S);
553
554 @@ -364,40 +509,44 @@ static const struct drm_i915_cmd_descriptor noop_desc =
555 #undef R
556 #undef W
557 #undef B
558 -#undef M
559
560 -static const struct drm_i915_cmd_table gen7_render_cmds[] = {
561 - { common_cmds, ARRAY_SIZE(common_cmds) },
562 - { render_cmds, ARRAY_SIZE(render_cmds) },
563 +static const struct drm_i915_cmd_table gen7_render_cmd_table[] = {
564 + { gen7_common_cmds, ARRAY_SIZE(gen7_common_cmds) },
565 + { gen7_render_cmds, ARRAY_SIZE(gen7_render_cmds) },
566 };
567
568 -static const struct drm_i915_cmd_table hsw_render_ring_cmds[] = {
569 - { common_cmds, ARRAY_SIZE(common_cmds) },
570 - { render_cmds, ARRAY_SIZE(render_cmds) },
571 +static const struct drm_i915_cmd_table hsw_render_ring_cmd_table[] = {
572 + { gen7_common_cmds, ARRAY_SIZE(gen7_common_cmds) },
573 + { gen7_render_cmds, ARRAY_SIZE(gen7_render_cmds) },
574 { hsw_render_cmds, ARRAY_SIZE(hsw_render_cmds) },
575 };
576
577 -static const struct drm_i915_cmd_table gen7_video_cmds[] = {
578 - { common_cmds, ARRAY_SIZE(common_cmds) },
579 - { video_cmds, ARRAY_SIZE(video_cmds) },
580 +static const struct drm_i915_cmd_table gen7_video_cmd_table[] = {
581 + { gen7_common_cmds, ARRAY_SIZE(gen7_common_cmds) },
582 + { gen7_video_cmds, ARRAY_SIZE(gen7_video_cmds) },
583 };
584
585 -static const struct drm_i915_cmd_table hsw_vebox_cmds[] = {
586 - { common_cmds, ARRAY_SIZE(common_cmds) },
587 - { vecs_cmds, ARRAY_SIZE(vecs_cmds) },
588 +static const struct drm_i915_cmd_table hsw_vebox_cmd_table[] = {
589 + { gen7_common_cmds, ARRAY_SIZE(gen7_common_cmds) },
590 + { gen7_vecs_cmds, ARRAY_SIZE(gen7_vecs_cmds) },
591 };
592
593 -static const struct drm_i915_cmd_table gen7_blt_cmds[] = {
594 - { common_cmds, ARRAY_SIZE(common_cmds) },
595 - { blt_cmds, ARRAY_SIZE(blt_cmds) },
596 +static const struct drm_i915_cmd_table gen7_blt_cmd_table[] = {
597 + { gen7_common_cmds, ARRAY_SIZE(gen7_common_cmds) },
598 + { gen7_blt_cmds, ARRAY_SIZE(gen7_blt_cmds) },
599 };
600
601 -static const struct drm_i915_cmd_table hsw_blt_ring_cmds[] = {
602 - { common_cmds, ARRAY_SIZE(common_cmds) },
603 - { blt_cmds, ARRAY_SIZE(blt_cmds) },
604 +static const struct drm_i915_cmd_table hsw_blt_ring_cmd_table[] = {
605 + { gen7_common_cmds, ARRAY_SIZE(gen7_common_cmds) },
606 + { gen7_blt_cmds, ARRAY_SIZE(gen7_blt_cmds) },
607 { hsw_blt_cmds, ARRAY_SIZE(hsw_blt_cmds) },
608 };
609
610 +static const struct drm_i915_cmd_table gen9_blt_cmd_table[] = {
611 + { gen9_blt_cmds, ARRAY_SIZE(gen9_blt_cmds) },
612 +};
613 +
614 +
615 /*
616 * Register whitelists, sorted by increasing register offset.
617 */
618 @@ -450,7 +599,6 @@ static const struct drm_i915_reg_descriptor gen7_render_regs[] = {
619 REG64(PS_INVOCATION_COUNT),
620 REG64(PS_DEPTH_COUNT),
621 REG64_IDX(RING_TIMESTAMP, RENDER_RING_BASE),
622 - REG32(OACONTROL), /* Only allowed for LRI and SRM. See below. */
623 REG64(MI_PREDICATE_SRC0),
624 REG64(MI_PREDICATE_SRC1),
625 REG32(GEN7_3DPRIM_END_OFFSET),
626 @@ -514,17 +662,27 @@ static const struct drm_i915_reg_descriptor gen7_blt_regs[] = {
627 REG64_IDX(RING_TIMESTAMP, BLT_RING_BASE),
628 };
629
630 -static const struct drm_i915_reg_descriptor ivb_master_regs[] = {
631 - REG32(FORCEWAKE_MT),
632 - REG32(DERRMR),
633 - REG32(GEN7_PIPE_DE_LOAD_SL(PIPE_A)),
634 - REG32(GEN7_PIPE_DE_LOAD_SL(PIPE_B)),
635 - REG32(GEN7_PIPE_DE_LOAD_SL(PIPE_C)),
636 -};
637 -
638 -static const struct drm_i915_reg_descriptor hsw_master_regs[] = {
639 - REG32(FORCEWAKE_MT),
640 - REG32(DERRMR),
641 +static const struct drm_i915_reg_descriptor gen9_blt_regs[] = {
642 + REG64_IDX(RING_TIMESTAMP, RENDER_RING_BASE),
643 + REG64_IDX(RING_TIMESTAMP, BSD_RING_BASE),
644 + REG32(BCS_SWCTRL),
645 + REG64_IDX(RING_TIMESTAMP, BLT_RING_BASE),
646 + REG64_IDX(BCS_GPR, 0),
647 + REG64_IDX(BCS_GPR, 1),
648 + REG64_IDX(BCS_GPR, 2),
649 + REG64_IDX(BCS_GPR, 3),
650 + REG64_IDX(BCS_GPR, 4),
651 + REG64_IDX(BCS_GPR, 5),
652 + REG64_IDX(BCS_GPR, 6),
653 + REG64_IDX(BCS_GPR, 7),
654 + REG64_IDX(BCS_GPR, 8),
655 + REG64_IDX(BCS_GPR, 9),
656 + REG64_IDX(BCS_GPR, 10),
657 + REG64_IDX(BCS_GPR, 11),
658 + REG64_IDX(BCS_GPR, 12),
659 + REG64_IDX(BCS_GPR, 13),
660 + REG64_IDX(BCS_GPR, 14),
661 + REG64_IDX(BCS_GPR, 15),
662 };
663
664 #undef REG64
665 @@ -533,33 +691,32 @@ static const struct drm_i915_reg_descriptor hsw_master_regs[] = {
666 struct drm_i915_reg_table {
667 const struct drm_i915_reg_descriptor *regs;
668 int num_regs;
669 - bool master;
670 };
671
672 static const struct drm_i915_reg_table ivb_render_reg_tables[] = {
673 - { gen7_render_regs, ARRAY_SIZE(gen7_render_regs), false },
674 - { ivb_master_regs, ARRAY_SIZE(ivb_master_regs), true },
675 + { gen7_render_regs, ARRAY_SIZE(gen7_render_regs) },
676 };
677
678 static const struct drm_i915_reg_table ivb_blt_reg_tables[] = {
679 - { gen7_blt_regs, ARRAY_SIZE(gen7_blt_regs), false },
680 - { ivb_master_regs, ARRAY_SIZE(ivb_master_regs), true },
681 + { gen7_blt_regs, ARRAY_SIZE(gen7_blt_regs) },
682 };
683
684 static const struct drm_i915_reg_table hsw_render_reg_tables[] = {
685 - { gen7_render_regs, ARRAY_SIZE(gen7_render_regs), false },
686 - { hsw_render_regs, ARRAY_SIZE(hsw_render_regs), false },
687 - { hsw_master_regs, ARRAY_SIZE(hsw_master_regs), true },
688 + { gen7_render_regs, ARRAY_SIZE(gen7_render_regs) },
689 + { hsw_render_regs, ARRAY_SIZE(hsw_render_regs) },
690 };
691
692 static const struct drm_i915_reg_table hsw_blt_reg_tables[] = {
693 - { gen7_blt_regs, ARRAY_SIZE(gen7_blt_regs), false },
694 - { hsw_master_regs, ARRAY_SIZE(hsw_master_regs), true },
695 + { gen7_blt_regs, ARRAY_SIZE(gen7_blt_regs) },
696 +};
697 +
698 +static const struct drm_i915_reg_table gen9_blt_reg_tables[] = {
699 + { gen9_blt_regs, ARRAY_SIZE(gen9_blt_regs) },
700 };
701
702 static u32 gen7_render_get_cmd_length_mask(u32 cmd_header)
703 {
704 - u32 client = (cmd_header & INSTR_CLIENT_MASK) >> INSTR_CLIENT_SHIFT;
705 + u32 client = cmd_header >> INSTR_CLIENT_SHIFT;
706 u32 subclient =
707 (cmd_header & INSTR_SUBCLIENT_MASK) >> INSTR_SUBCLIENT_SHIFT;
708
709 @@ -578,7 +735,7 @@ static u32 gen7_render_get_cmd_length_mask(u32 cmd_header)
710
711 static u32 gen7_bsd_get_cmd_length_mask(u32 cmd_header)
712 {
713 - u32 client = (cmd_header & INSTR_CLIENT_MASK) >> INSTR_CLIENT_SHIFT;
714 + u32 client = cmd_header >> INSTR_CLIENT_SHIFT;
715 u32 subclient =
716 (cmd_header & INSTR_SUBCLIENT_MASK) >> INSTR_SUBCLIENT_SHIFT;
717 u32 op = (cmd_header & INSTR_26_TO_24_MASK) >> INSTR_26_TO_24_SHIFT;
718 @@ -601,7 +758,7 @@ static u32 gen7_bsd_get_cmd_length_mask(u32 cmd_header)
719
720 static u32 gen7_blt_get_cmd_length_mask(u32 cmd_header)
721 {
722 - u32 client = (cmd_header & INSTR_CLIENT_MASK) >> INSTR_CLIENT_SHIFT;
723 + u32 client = cmd_header >> INSTR_CLIENT_SHIFT;
724
725 if (client == INSTR_MI_CLIENT)
726 return 0x3F;
727 @@ -612,6 +769,17 @@ static u32 gen7_blt_get_cmd_length_mask(u32 cmd_header)
728 return 0;
729 }
730
731 +static u32 gen9_blt_get_cmd_length_mask(u32 cmd_header)
732 +{
733 + u32 client = cmd_header >> INSTR_CLIENT_SHIFT;
734 +
735 + if (client == INSTR_MI_CLIENT || client == INSTR_BC_CLIENT)
736 + return 0xFF;
737 +
738 + DRM_DEBUG_DRIVER("CMD: Abnormal blt cmd length! 0x%08X\n", cmd_header);
739 + return 0;
740 +}
741 +
742 static bool validate_cmds_sorted(const struct intel_engine_cs *engine,
743 const struct drm_i915_cmd_table *cmd_tables,
744 int cmd_table_count)
745 @@ -703,22 +871,15 @@ struct cmd_node {
746 */
747 static inline u32 cmd_header_key(u32 x)
748 {
749 - u32 shift;
750 -
751 switch (x >> INSTR_CLIENT_SHIFT) {
752 default:
753 case INSTR_MI_CLIENT:
754 - shift = STD_MI_OPCODE_SHIFT;
755 - break;
756 + return x >> STD_MI_OPCODE_SHIFT;
757 case INSTR_RC_CLIENT:
758 - shift = STD_3D_OPCODE_SHIFT;
759 - break;
760 + return x >> STD_3D_OPCODE_SHIFT;
761 case INSTR_BC_CLIENT:
762 - shift = STD_2D_OPCODE_SHIFT;
763 - break;
764 + return x >> STD_2D_OPCODE_SHIFT;
765 }
766 -
767 - return x >> shift;
768 }
769
770 static int init_hash_table(struct intel_engine_cs *engine,
771 @@ -776,18 +937,19 @@ void intel_engine_init_cmd_parser(struct intel_engine_cs *engine)
772 int cmd_table_count;
773 int ret;
774
775 - if (!IS_GEN7(engine->i915))
776 + if (!IS_GEN7(engine->i915) && !(IS_GEN9(engine->i915) &&
777 + engine->id == BCS))
778 return;
779
780 switch (engine->id) {
781 case RCS:
782 if (IS_HASWELL(engine->i915)) {
783 - cmd_tables = hsw_render_ring_cmds;
784 + cmd_tables = hsw_render_ring_cmd_table;
785 cmd_table_count =
786 - ARRAY_SIZE(hsw_render_ring_cmds);
787 + ARRAY_SIZE(hsw_render_ring_cmd_table);
788 } else {
789 - cmd_tables = gen7_render_cmds;
790 - cmd_table_count = ARRAY_SIZE(gen7_render_cmds);
791 + cmd_tables = gen7_render_cmd_table;
792 + cmd_table_count = ARRAY_SIZE(gen7_render_cmd_table);
793 }
794
795 if (IS_HASWELL(engine->i915)) {
796 @@ -797,36 +959,46 @@ void intel_engine_init_cmd_parser(struct intel_engine_cs *engine)
797 engine->reg_tables = ivb_render_reg_tables;
798 engine->reg_table_count = ARRAY_SIZE(ivb_render_reg_tables);
799 }
800 -
801 engine->get_cmd_length_mask = gen7_render_get_cmd_length_mask;
802 break;
803 case VCS:
804 - cmd_tables = gen7_video_cmds;
805 - cmd_table_count = ARRAY_SIZE(gen7_video_cmds);
806 + cmd_tables = gen7_video_cmd_table;
807 + cmd_table_count = ARRAY_SIZE(gen7_video_cmd_table);
808 engine->get_cmd_length_mask = gen7_bsd_get_cmd_length_mask;
809 break;
810 case BCS:
811 - if (IS_HASWELL(engine->i915)) {
812 - cmd_tables = hsw_blt_ring_cmds;
813 - cmd_table_count = ARRAY_SIZE(hsw_blt_ring_cmds);
814 + engine->get_cmd_length_mask = gen7_blt_get_cmd_length_mask;
815 + if (IS_GEN9(engine->i915)) {
816 + cmd_tables = gen9_blt_cmd_table;
817 + cmd_table_count = ARRAY_SIZE(gen9_blt_cmd_table);
818 + engine->get_cmd_length_mask =
819 + gen9_blt_get_cmd_length_mask;
820 +
821 + /* BCS Engine unsafe without parser */
822 + engine->flags |= I915_ENGINE_REQUIRES_CMD_PARSER;
823 + } else if (IS_HASWELL(engine->i915)) {
824 + cmd_tables = hsw_blt_ring_cmd_table;
825 + cmd_table_count = ARRAY_SIZE(hsw_blt_ring_cmd_table);
826 } else {
827 - cmd_tables = gen7_blt_cmds;
828 - cmd_table_count = ARRAY_SIZE(gen7_blt_cmds);
829 + cmd_tables = gen7_blt_cmd_table;
830 + cmd_table_count = ARRAY_SIZE(gen7_blt_cmd_table);
831 }
832
833 - if (IS_HASWELL(engine->i915)) {
834 + if (IS_GEN9(engine->i915)) {
835 + engine->reg_tables = gen9_blt_reg_tables;
836 + engine->reg_table_count =
837 + ARRAY_SIZE(gen9_blt_reg_tables);
838 + } else if (IS_HASWELL(engine->i915)) {
839 engine->reg_tables = hsw_blt_reg_tables;
840 engine->reg_table_count = ARRAY_SIZE(hsw_blt_reg_tables);
841 } else {
842 engine->reg_tables = ivb_blt_reg_tables;
843 engine->reg_table_count = ARRAY_SIZE(ivb_blt_reg_tables);
844 }
845 -
846 - engine->get_cmd_length_mask = gen7_blt_get_cmd_length_mask;
847 break;
848 case VECS:
849 - cmd_tables = hsw_vebox_cmds;
850 - cmd_table_count = ARRAY_SIZE(hsw_vebox_cmds);
851 + cmd_tables = hsw_vebox_cmd_table;
852 + cmd_table_count = ARRAY_SIZE(hsw_vebox_cmd_table);
853 /* VECS can use the same length_mask function as VCS */
854 engine->get_cmd_length_mask = gen7_bsd_get_cmd_length_mask;
855 break;
856 @@ -852,7 +1024,7 @@ void intel_engine_init_cmd_parser(struct intel_engine_cs *engine)
857 return;
858 }
859
860 - engine->needs_cmd_parser = true;
861 + engine->flags |= I915_ENGINE_USING_CMD_PARSER;
862 }
863
864 /**
865 @@ -864,7 +1036,7 @@ void intel_engine_init_cmd_parser(struct intel_engine_cs *engine)
866 */
867 void intel_engine_cleanup_cmd_parser(struct intel_engine_cs *engine)
868 {
869 - if (!engine->needs_cmd_parser)
870 + if (!intel_engine_using_cmd_parser(engine))
871 return;
872
873 fini_hash_table(engine);
874 @@ -938,22 +1110,16 @@ __find_reg(const struct drm_i915_reg_descriptor *table, int count, u32 addr)
875 }
876
877 static const struct drm_i915_reg_descriptor *
878 -find_reg(const struct intel_engine_cs *engine, bool is_master, u32 addr)
879 +find_reg(const struct intel_engine_cs *engine, u32 addr)
880 {
881 const struct drm_i915_reg_table *table = engine->reg_tables;
882 + const struct drm_i915_reg_descriptor *reg = NULL;
883 int count = engine->reg_table_count;
884
885 - do {
886 - if (!table->master || is_master) {
887 - const struct drm_i915_reg_descriptor *reg;
888 -
889 - reg = __find_reg(table->regs, table->num_regs, addr);
890 - if (reg != NULL)
891 - return reg;
892 - }
893 - } while (table++, --count);
894 + for (; !reg && (count > 0); ++table, --count)
895 + reg = __find_reg(table->regs, table->num_regs, addr);
896
897 - return NULL;
898 + return reg;
899 }
900
901 /* Returns a vmap'd pointer to dst_obj, which the caller must unmap */
902 @@ -1036,32 +1202,9 @@ unpin_src:
903 return dst;
904 }
905
906 -/**
907 - * intel_engine_needs_cmd_parser() - should a given engine use software
908 - * command parsing?
909 - * @engine: the engine in question
910 - *
911 - * Only certain platforms require software batch buffer command parsing, and
912 - * only when enabled via module parameter.
913 - *
914 - * Return: true if the engine requires software command parsing
915 - */
916 -bool intel_engine_needs_cmd_parser(struct intel_engine_cs *engine)
917 -{
918 - if (!engine->needs_cmd_parser)
919 - return false;
920 -
921 - if (!USES_PPGTT(engine->i915))
922 - return false;
923 -
924 - return (i915.enable_cmd_parser == 1);
925 -}
926 -
927 static bool check_cmd(const struct intel_engine_cs *engine,
928 const struct drm_i915_cmd_descriptor *desc,
929 - const u32 *cmd, u32 length,
930 - const bool is_master,
931 - bool *oacontrol_set)
932 + const u32 *cmd, u32 length)
933 {
934 if (desc->flags & CMD_DESC_SKIP)
935 return true;
936 @@ -1071,12 +1214,6 @@ static bool check_cmd(const struct intel_engine_cs *engine,
937 return false;
938 }
939
940 - if ((desc->flags & CMD_DESC_MASTER) && !is_master) {
941 - DRM_DEBUG_DRIVER("CMD: Rejected master-only command: 0x%08X\n",
942 - *cmd);
943 - return false;
944 - }
945 -
946 if (desc->flags & CMD_DESC_REGISTER) {
947 /*
948 * Get the distance between individual register offset
949 @@ -1090,7 +1227,7 @@ static bool check_cmd(const struct intel_engine_cs *engine,
950 offset += step) {
951 const u32 reg_addr = cmd[offset] & desc->reg.mask;
952 const struct drm_i915_reg_descriptor *reg =
953 - find_reg(engine, is_master, reg_addr);
954 + find_reg(engine, reg_addr);
955
956 if (!reg) {
957 DRM_DEBUG_DRIVER("CMD: Rejected register 0x%08X in command: 0x%08X (exec_id=%d)\n",
958 @@ -1098,31 +1235,6 @@ static bool check_cmd(const struct intel_engine_cs *engine,
959 return false;
960 }
961
962 - /*
963 - * OACONTROL requires some special handling for
964 - * writes. We want to make sure that any batch which
965 - * enables OA also disables it before the end of the
966 - * batch. The goal is to prevent one process from
967 - * snooping on the perf data from another process. To do
968 - * that, we need to check the value that will be written
969 - * to the register. Hence, limit OACONTROL writes to
970 - * only MI_LOAD_REGISTER_IMM commands.
971 - */
972 - if (reg_addr == i915_mmio_reg_offset(OACONTROL)) {
973 - if (desc->cmd.value == MI_LOAD_REGISTER_MEM) {
974 - DRM_DEBUG_DRIVER("CMD: Rejected LRM to OACONTROL\n");
975 - return false;
976 - }
977 -
978 - if (desc->cmd.value == MI_LOAD_REGISTER_REG) {
979 - DRM_DEBUG_DRIVER("CMD: Rejected LRR to OACONTROL\n");
980 - return false;
981 - }
982 -
983 - if (desc->cmd.value == MI_LOAD_REGISTER_IMM(1))
984 - *oacontrol_set = (cmd[offset + 1] != 0);
985 - }
986 -
987 /*
988 * Check the value written to the register against the
989 * allowed mask/value pair given in the whitelist entry.
990 @@ -1170,6 +1282,12 @@ static bool check_cmd(const struct intel_engine_cs *engine,
991 continue;
992 }
993
994 + if (desc->bits[i].offset >= length) {
995 + DRM_DEBUG_DRIVER("CMD: Rejected command 0x%08X, too short to check bitmask (%s)\n",
996 + *cmd, engine->name);
997 + return false;
998 + }
999 +
1000 dword = cmd[desc->bits[i].offset] &
1001 desc->bits[i].mask;
1002
1003 @@ -1187,16 +1305,112 @@ static bool check_cmd(const struct intel_engine_cs *engine,
1004 return true;
1005 }
1006
1007 +static int check_bbstart(const struct i915_gem_context *ctx,
1008 + u32 *cmd, u32 offset, u32 length,
1009 + u32 batch_len,
1010 + u64 batch_start,
1011 + u64 shadow_batch_start)
1012 +{
1013 + u64 jump_offset, jump_target;
1014 + u32 target_cmd_offset, target_cmd_index;
1015 +
1016 + /* For igt compatibility on older platforms */
1017 + if (CMDPARSER_USES_GGTT(ctx->i915)) {
1018 + DRM_DEBUG("CMD: Rejecting BB_START for ggtt based submission\n");
1019 + return -EACCES;
1020 + }
1021 +
1022 + if (length != 3) {
1023 + DRM_DEBUG("CMD: Recursive BB_START with bad length(%u)\n",
1024 + length);
1025 + return -EINVAL;
1026 + }
1027 +
1028 + jump_target = *(u64*)(cmd+1);
1029 + jump_offset = jump_target - batch_start;
1030 +
1031 + /*
1032 + * Any underflow of jump_target is guaranteed to be outside the range
1033 + * of a u32, so >= test catches both too large and too small
1034 + */
1035 + if (jump_offset >= batch_len) {
1036 + DRM_DEBUG("CMD: BB_START to 0x%llx jumps out of BB\n",
1037 + jump_target);
1038 + return -EINVAL;
1039 + }
1040 +
1041 + /*
1042 + * This cannot overflow a u32 because we already checked jump_offset
1043 + * is within the BB, and the batch_len is a u32
1044 + */
1045 + target_cmd_offset = lower_32_bits(jump_offset);
1046 + target_cmd_index = target_cmd_offset / sizeof(u32);
1047 +
1048 + *(u64*)(cmd + 1) = shadow_batch_start + target_cmd_offset;
1049 +
1050 + if (target_cmd_index == offset)
1051 + return 0;
1052 +
1053 + if (ctx->jump_whitelist_cmds <= target_cmd_index) {
1054 + DRM_DEBUG("CMD: Rejecting BB_START - truncated whitelist array\n");
1055 + return -EINVAL;
1056 + } else if (!test_bit(target_cmd_index, ctx->jump_whitelist)) {
1057 + DRM_DEBUG("CMD: BB_START to 0x%llx not a previously executed cmd\n",
1058 + jump_target);
1059 + return -EINVAL;
1060 + }
1061 +
1062 + return 0;
1063 +}
1064 +
1065 +static void init_whitelist(struct i915_gem_context *ctx, u32 batch_len)
1066 +{
1067 + const u32 batch_cmds = DIV_ROUND_UP(batch_len, sizeof(u32));
1068 + const u32 exact_size = BITS_TO_LONGS(batch_cmds);
1069 + u32 next_size = BITS_TO_LONGS(roundup_pow_of_two(batch_cmds));
1070 + unsigned long *next_whitelist;
1071 +
1072 + if (CMDPARSER_USES_GGTT(ctx->i915))
1073 + return;
1074 +
1075 + if (batch_cmds <= ctx->jump_whitelist_cmds) {
1076 + bitmap_zero(ctx->jump_whitelist, batch_cmds);
1077 + return;
1078 + }
1079 +
1080 +again:
1081 + next_whitelist = kcalloc(next_size, sizeof(long), GFP_KERNEL);
1082 + if (next_whitelist) {
1083 + kfree(ctx->jump_whitelist);
1084 + ctx->jump_whitelist = next_whitelist;
1085 + ctx->jump_whitelist_cmds =
1086 + next_size * BITS_PER_BYTE * sizeof(long);
1087 + return;
1088 + }
1089 +
1090 + if (next_size > exact_size) {
1091 + next_size = exact_size;
1092 + goto again;
1093 + }
1094 +
1095 + DRM_DEBUG("CMD: Failed to extend whitelist. BB_START may be disallowed\n");
1096 + bitmap_zero(ctx->jump_whitelist, ctx->jump_whitelist_cmds);
1097 +
1098 + return;
1099 +}
1100 +
1101 #define LENGTH_BIAS 2
1102
1103 /**
1104 * i915_parse_cmds() - parse a submitted batch buffer for privilege violations
1105 + * @ctx: the context in which the batch is to execute
1106 * @engine: the engine on which the batch is to execute
1107 * @batch_obj: the batch buffer in question
1108 - * @shadow_batch_obj: copy of the batch buffer in question
1109 + * @batch_start: Canonical base address of batch
1110 * @batch_start_offset: byte offset in the batch at which execution starts
1111 * @batch_len: length of the commands in batch_obj
1112 - * @is_master: is the submitting process the drm master?
1113 + * @shadow_batch_obj: copy of the batch buffer in question
1114 + * @shadow_batch_start: Canonical base address of shadow_batch_obj
1115 *
1116 * Parses the specified batch buffer looking for privilege violations as
1117 * described in the overview.
1118 @@ -1204,17 +1418,19 @@ static bool check_cmd(const struct intel_engine_cs *engine,
1119 * Return: non-zero if the parser finds violations or otherwise fails; -EACCES
1120 * if the batch appears legal but should use hardware parsing
1121 */
1122 -int intel_engine_cmd_parser(struct intel_engine_cs *engine,
1123 +
1124 +int intel_engine_cmd_parser(struct i915_gem_context *ctx,
1125 + struct intel_engine_cs *engine,
1126 struct drm_i915_gem_object *batch_obj,
1127 - struct drm_i915_gem_object *shadow_batch_obj,
1128 + u64 batch_start,
1129 u32 batch_start_offset,
1130 u32 batch_len,
1131 - bool is_master)
1132 + struct drm_i915_gem_object *shadow_batch_obj,
1133 + u64 shadow_batch_start)
1134 {
1135 - u32 *cmd, *batch_end;
1136 + u32 *cmd, *batch_end, offset = 0;
1137 struct drm_i915_cmd_descriptor default_desc = noop_desc;
1138 const struct drm_i915_cmd_descriptor *desc = &default_desc;
1139 - bool oacontrol_set = false; /* OACONTROL tracking. See check_cmd() */
1140 bool needs_clflush_after = false;
1141 int ret = 0;
1142
1143 @@ -1226,13 +1442,15 @@ int intel_engine_cmd_parser(struct intel_engine_cs *engine,
1144 return PTR_ERR(cmd);
1145 }
1146
1147 + init_whitelist(ctx, batch_len);
1148 +
1149 /*
1150 * We use the batch length as size because the shadow object is as
1151 * large or larger and copy_batch() will write MI_NOPs to the extra
1152 * space. Parsing should be faster in some cases this way.
1153 */
1154 batch_end = cmd + (batch_len / sizeof(*batch_end));
1155 - while (cmd < batch_end) {
1156 + do {
1157 u32 length;
1158
1159 if (*cmd == MI_BATCH_BUFFER_END)
1160 @@ -1243,17 +1461,7 @@ int intel_engine_cmd_parser(struct intel_engine_cs *engine,
1161 DRM_DEBUG_DRIVER("CMD: Unrecognized command: 0x%08X\n",
1162 *cmd);
1163 ret = -EINVAL;
1164 - break;
1165 - }
1166 -
1167 - /*
1168 - * If the batch buffer contains a chained batch, return an
1169 - * error that tells the caller to abort and dispatch the
1170 - * workload as a non-secure batch.
1171 - */
1172 - if (desc->cmd.value == MI_BATCH_BUFFER_START) {
1173 - ret = -EACCES;
1174 - break;
1175 + goto err;
1176 }
1177
1178 if (desc->flags & CMD_DESC_FIXED)
1179 @@ -1267,32 +1475,44 @@ int intel_engine_cmd_parser(struct intel_engine_cs *engine,
1180 length,
1181 batch_end - cmd);
1182 ret = -EINVAL;
1183 - break;
1184 + goto err;
1185 }
1186
1187 - if (!check_cmd(engine, desc, cmd, length, is_master,
1188 - &oacontrol_set)) {
1189 - ret = -EINVAL;
1190 + if (!check_cmd(engine, desc, cmd, length)) {
1191 + ret = -EACCES;
1192 + goto err;
1193 + }
1194 +
1195 + if (desc->cmd.value == MI_BATCH_BUFFER_START) {
1196 + ret = check_bbstart(ctx, cmd, offset, length,
1197 + batch_len, batch_start,
1198 + shadow_batch_start);
1199 +
1200 + if (ret)
1201 + goto err;
1202 break;
1203 }
1204
1205 - cmd += length;
1206 - }
1207 + if (ctx->jump_whitelist_cmds > offset)
1208 + set_bit(offset, ctx->jump_whitelist);
1209
1210 - if (oacontrol_set) {
1211 - DRM_DEBUG_DRIVER("CMD: batch set OACONTROL but did not clear it\n");
1212 - ret = -EINVAL;
1213 - }
1214 + cmd += length;
1215 + offset += length;
1216 + if (cmd >= batch_end) {
1217 + DRM_DEBUG_DRIVER("CMD: Got to the end of the buffer w/o a BBE cmd!\n");
1218 + ret = -EINVAL;
1219 + goto err;
1220 + }
1221 + } while (1);
1222
1223 - if (cmd >= batch_end) {
1224 - DRM_DEBUG_DRIVER("CMD: Got to the end of the buffer w/o a BBE cmd!\n");
1225 - ret = -EINVAL;
1226 + if (needs_clflush_after) {
1227 + void *ptr = ptr_mask_bits(shadow_batch_obj->mapping);
1228 + drm_clflush_virt_range(ptr,
1229 + (void *)(cmd + 1) - ptr);
1230 }
1231
1232 - if (ret == 0 && needs_clflush_after)
1233 - drm_clflush_virt_range(shadow_batch_obj->mapping, batch_len);
1234 +err:
1235 i915_gem_object_unpin_map(shadow_batch_obj);
1236 -
1237 return ret;
1238 }
1239
1240 @@ -1312,7 +1532,7 @@ int i915_cmd_parser_get_version(struct drm_i915_private *dev_priv)
1241
1242 /* If the command parser is not enabled, report 0 - unsupported */
1243 for_each_engine(engine, dev_priv) {
1244 - if (intel_engine_needs_cmd_parser(engine)) {
1245 + if (intel_engine_using_cmd_parser(engine)) {
1246 active = true;
1247 break;
1248 }
1249 @@ -1332,6 +1552,12 @@ int i915_cmd_parser_get_version(struct drm_i915_private *dev_priv)
1250 * 5. GPGPU dispatch compute indirect registers.
1251 * 6. TIMESTAMP register and Haswell CS GPR registers
1252 * 7. Allow MI_LOAD_REGISTER_REG between whitelisted registers.
1253 + * 8. Don't report cmd_check() failures as EINVAL errors to userspace;
1254 + * rely on the HW to NOOP disallowed commands as it would without
1255 + * the parser enabled.
1256 + * 9. Don't whitelist or handle oacontrol specially, as ownership
1257 + * for oacontrol state is moving to i915-perf.
1258 + * 10. Support for Gen9 BCS Parsing
1259 */
1260 - return 7;
1261 + return 10;
1262 }
1263 diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
1264 index bae62cf934cf..ff61229d963b 100644
1265 --- a/drivers/gpu/drm/i915/i915_drv.c
1266 +++ b/drivers/gpu/drm/i915/i915_drv.c
1267 @@ -280,7 +280,7 @@ static int i915_getparam(struct drm_device *dev, void *data,
1268 value = i915.semaphores;
1269 break;
1270 case I915_PARAM_HAS_SECURE_BATCHES:
1271 - value = capable(CAP_SYS_ADMIN);
1272 + value = HAS_SECURE_BATCHES(dev_priv) && capable(CAP_SYS_ADMIN);
1273 break;
1274 case I915_PARAM_CMD_PARSER_VERSION:
1275 value = i915_cmd_parser_get_version(dev_priv);
1276 @@ -1470,6 +1470,7 @@ static int i915_drm_suspend_late(struct drm_device *dev, bool hibernation)
1277 disable_rpm_wakeref_asserts(dev_priv);
1278
1279 intel_display_set_init_power(dev_priv, false);
1280 + i915_rc6_ctx_wa_suspend(dev_priv);
1281
1282 fw_csr = !IS_BROXTON(dev_priv) &&
1283 suspend_to_idle(dev_priv) && dev_priv->csr.dmc_payload;
1284 @@ -1706,6 +1707,8 @@ static int i915_drm_resume_early(struct drm_device *dev)
1285 else
1286 intel_display_set_init_power(dev_priv, true);
1287
1288 + i915_rc6_ctx_wa_resume(dev_priv);
1289 +
1290 enable_rpm_wakeref_asserts(dev_priv);
1291
1292 out:
1293 diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
1294 index e23748cca0c0..c4f155663ca9 100644
1295 --- a/drivers/gpu/drm/i915/i915_drv.h
1296 +++ b/drivers/gpu/drm/i915/i915_drv.h
1297 @@ -943,6 +943,13 @@ struct i915_gem_context {
1298 struct list_head link;
1299
1300 u8 remap_slice;
1301 +
1302 + /** jump_whitelist: Bit array for tracking cmds during cmdparsing */
1303 + unsigned long *jump_whitelist;
1304 +
1305 + /** jump_whitelist_cmds: No of cmd slots available */
1306 + u32 jump_whitelist_cmds;
1307 +
1308 bool closed:1;
1309 };
1310
1311 @@ -1221,6 +1228,7 @@ struct intel_gen6_power_mgmt {
1312 bool client_boost;
1313
1314 bool enabled;
1315 + bool ctx_corrupted;
1316 struct delayed_work autoenable_work;
1317 unsigned boosts;
1318
1319 @@ -2339,6 +2347,18 @@ i915_gem_object_put_unlocked(struct drm_i915_gem_object *obj)
1320 __deprecated
1321 extern void drm_gem_object_unreference_unlocked(struct drm_gem_object *);
1322
1323 +static inline void
1324 +i915_gem_object_set_readonly(struct drm_i915_gem_object *obj)
1325 +{
1326 + obj->base.vma_node.readonly = true;
1327 +}
1328 +
1329 +static inline bool
1330 +i915_gem_object_is_readonly(const struct drm_i915_gem_object *obj)
1331 +{
1332 + return obj->base.vma_node.readonly;
1333 +}
1334 +
1335 static inline bool
1336 i915_gem_object_has_struct_page(const struct drm_i915_gem_object *obj)
1337 {
1338 @@ -2476,102 +2496,6 @@ static inline struct scatterlist *__sg_next(struct scatterlist *sg)
1339 (((__iter).curr += PAGE_SIZE) < (__iter).max) || \
1340 ((__iter) = __sgt_iter(__sg_next((__iter).sgp), false), 0))
1341
1342 -/*
1343 - * A command that requires special handling by the command parser.
1344 - */
1345 -struct drm_i915_cmd_descriptor {
1346 - /*
1347 - * Flags describing how the command parser processes the command.
1348 - *
1349 - * CMD_DESC_FIXED: The command has a fixed length if this is set,
1350 - * a length mask if not set
1351 - * CMD_DESC_SKIP: The command is allowed but does not follow the
1352 - * standard length encoding for the opcode range in
1353 - * which it falls
1354 - * CMD_DESC_REJECT: The command is never allowed
1355 - * CMD_DESC_REGISTER: The command should be checked against the
1356 - * register whitelist for the appropriate ring
1357 - * CMD_DESC_MASTER: The command is allowed if the submitting process
1358 - * is the DRM master
1359 - */
1360 - u32 flags;
1361 -#define CMD_DESC_FIXED (1<<0)
1362 -#define CMD_DESC_SKIP (1<<1)
1363 -#define CMD_DESC_REJECT (1<<2)
1364 -#define CMD_DESC_REGISTER (1<<3)
1365 -#define CMD_DESC_BITMASK (1<<4)
1366 -#define CMD_DESC_MASTER (1<<5)
1367 -
1368 - /*
1369 - * The command's unique identification bits and the bitmask to get them.
1370 - * This isn't strictly the opcode field as defined in the spec and may
1371 - * also include type, subtype, and/or subop fields.
1372 - */
1373 - struct {
1374 - u32 value;
1375 - u32 mask;
1376 - } cmd;
1377 -
1378 - /*
1379 - * The command's length. The command is either fixed length (i.e. does
1380 - * not include a length field) or has a length field mask. The flag
1381 - * CMD_DESC_FIXED indicates a fixed length. Otherwise, the command has
1382 - * a length mask. All command entries in a command table must include
1383 - * length information.
1384 - */
1385 - union {
1386 - u32 fixed;
1387 - u32 mask;
1388 - } length;
1389 -
1390 - /*
1391 - * Describes where to find a register address in the command to check
1392 - * against the ring's register whitelist. Only valid if flags has the
1393 - * CMD_DESC_REGISTER bit set.
1394 - *
1395 - * A non-zero step value implies that the command may access multiple
1396 - * registers in sequence (e.g. LRI), in that case step gives the
1397 - * distance in dwords between individual offset fields.
1398 - */
1399 - struct {
1400 - u32 offset;
1401 - u32 mask;
1402 - u32 step;
1403 - } reg;
1404 -
1405 -#define MAX_CMD_DESC_BITMASKS 3
1406 - /*
1407 - * Describes command checks where a particular dword is masked and
1408 - * compared against an expected value. If the command does not match
1409 - * the expected value, the parser rejects it. Only valid if flags has
1410 - * the CMD_DESC_BITMASK bit set. Only entries where mask is non-zero
1411 - * are valid.
1412 - *
1413 - * If the check specifies a non-zero condition_mask then the parser
1414 - * only performs the check when the bits specified by condition_mask
1415 - * are non-zero.
1416 - */
1417 - struct {
1418 - u32 offset;
1419 - u32 mask;
1420 - u32 expected;
1421 - u32 condition_offset;
1422 - u32 condition_mask;
1423 - } bits[MAX_CMD_DESC_BITMASKS];
1424 -};
1425 -
1426 -/*
1427 - * A table of commands requiring special handling by the command parser.
1428 - *
1429 - * Each engine has an array of tables. Each table consists of an array of
1430 - * command descriptors, which must be sorted with command opcodes in
1431 - * ascending order.
1432 - */
1433 -struct drm_i915_cmd_table {
1434 - const struct drm_i915_cmd_descriptor *table;
1435 - int count;
1436 -};
1437 -
1438 /* Note that the (struct drm_i915_private *) cast is just to shut up gcc. */
1439 #define __I915__(p) ({ \
1440 struct drm_i915_private *__p; \
1441 @@ -2729,6 +2653,12 @@ struct drm_i915_cmd_table {
1442 #define IS_GEN8(dev) (!!(INTEL_INFO(dev)->gen_mask & BIT(7)))
1443 #define IS_GEN9(dev) (!!(INTEL_INFO(dev)->gen_mask & BIT(8)))
1444
1445 +/*
1446 + * The Gen7 cmdparser copies the scanned buffer to the ggtt for execution
1447 + * All later gens can run the final buffer from the ppgtt
1448 + */
1449 +#define CMDPARSER_USES_GGTT(dev_priv) IS_GEN7(dev_priv)
1450 +
1451 #define ENGINE_MASK(id) BIT(id)
1452 #define RENDER_RING ENGINE_MASK(RCS)
1453 #define BSD_RING ENGINE_MASK(VCS)
1454 @@ -2745,6 +2675,8 @@ struct drm_i915_cmd_table {
1455 #define HAS_BLT(dev_priv) HAS_ENGINE(dev_priv, BCS)
1456 #define HAS_VEBOX(dev_priv) HAS_ENGINE(dev_priv, VECS)
1457
1458 +#define HAS_SECURE_BATCHES(dev_priv) (INTEL_GEN(dev_priv) < 6)
1459 +
1460 #define HAS_LLC(dev) (INTEL_INFO(dev)->has_llc)
1461 #define HAS_SNOOP(dev) (INTEL_INFO(dev)->has_snoop)
1462 #define HAS_EDRAM(dev) (!!(__I915__(dev)->edram_cap & EDRAM_ENABLED))
1463 @@ -2764,11 +2696,13 @@ struct drm_i915_cmd_table {
1464 /* Early gen2 have a totally busted CS tlb and require pinned batches. */
1465 #define HAS_BROKEN_CS_TLB(dev) (IS_I830(dev) || IS_845G(dev))
1466
1467 +#define NEEDS_RC6_CTX_CORRUPTION_WA(dev_priv) \
1468 + (IS_BROADWELL(dev_priv) || INTEL_GEN(dev_priv) == 9)
1469 +
1470 /* WaRsDisableCoarsePowerGating:skl,bxt */
1471 #define NEEDS_WaRsDisableCoarsePowerGating(dev_priv) \
1472 (IS_BXT_REVID(dev_priv, 0, BXT_REVID_A1) || \
1473 - IS_SKL_GT3(dev_priv) || \
1474 - IS_SKL_GT4(dev_priv))
1475 + (INTEL_GEN(dev_priv) == 9))
1476
1477 /*
1478 * dp aux and gmbus irq on gen4 seems to be able to generate legacy interrupts
1479 @@ -3098,6 +3032,14 @@ i915_gem_object_ggtt_pin(struct drm_i915_gem_object *obj,
1480 u64 alignment,
1481 u64 flags);
1482
1483 +struct i915_vma * __must_check
1484 +i915_gem_object_pin(struct drm_i915_gem_object *obj,
1485 + struct i915_address_space *vm,
1486 + const struct i915_ggtt_view *view,
1487 + u64 size,
1488 + u64 alignment,
1489 + u64 flags);
1490 +
1491 int i915_vma_bind(struct i915_vma *vma, enum i915_cache_level cache_level,
1492 u32 flags);
1493 void __i915_vma_set_map_and_fenceable(struct i915_vma *vma);
1494 @@ -3551,13 +3493,14 @@ const char *i915_cache_level_str(struct drm_i915_private *i915, int type);
1495 int i915_cmd_parser_get_version(struct drm_i915_private *dev_priv);
1496 void intel_engine_init_cmd_parser(struct intel_engine_cs *engine);
1497 void intel_engine_cleanup_cmd_parser(struct intel_engine_cs *engine);
1498 -bool intel_engine_needs_cmd_parser(struct intel_engine_cs *engine);
1499 -int intel_engine_cmd_parser(struct intel_engine_cs *engine,
1500 +int intel_engine_cmd_parser(struct i915_gem_context *cxt,
1501 + struct intel_engine_cs *engine,
1502 struct drm_i915_gem_object *batch_obj,
1503 - struct drm_i915_gem_object *shadow_batch_obj,
1504 + u64 user_batch_start,
1505 u32 batch_start_offset,
1506 u32 batch_len,
1507 - bool is_master);
1508 + struct drm_i915_gem_object *shadow_batch_obj,
1509 + u64 shadow_batch_start);
1510
1511 /* i915_suspend.c */
1512 extern int i915_save_state(struct drm_device *dev);
1513 diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
1514 index 26c4befcd234..3fb4f9acacba 100644
1515 --- a/drivers/gpu/drm/i915/i915_gem.c
1516 +++ b/drivers/gpu/drm/i915/i915_gem.c
1517 @@ -1773,6 +1773,10 @@ int i915_gem_fault(struct vm_area_struct *area, struct vm_fault *vmf)
1518 unsigned int flags;
1519 int ret;
1520
1521 + /* Sanity check that we allow writing into this object */
1522 + if (i915_gem_object_is_readonly(obj) && write)
1523 + return VM_FAULT_SIGBUS;
1524 +
1525 /* We don't use vmf->pgoff since that has the fake offset */
1526 page_offset = ((unsigned long)vmf->virtual_address - area->vm_start) >>
1527 PAGE_SHIFT;
1528 @@ -2759,6 +2763,12 @@ i915_gem_idle_work_handler(struct work_struct *work)
1529
1530 if (INTEL_GEN(dev_priv) >= 6)
1531 gen6_rps_idle(dev_priv);
1532 +
1533 + if (NEEDS_RC6_CTX_CORRUPTION_WA(dev_priv)) {
1534 + i915_rc6_ctx_wa_check(dev_priv);
1535 + intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
1536 + }
1537 +
1538 intel_runtime_pm_put(dev_priv);
1539 out_unlock:
1540 mutex_unlock(&dev->struct_mutex);
1541 @@ -3822,6 +3832,19 @@ i915_gem_object_ggtt_pin(struct drm_i915_gem_object *obj,
1542 u64 flags)
1543 {
1544 struct i915_address_space *vm = &to_i915(obj->base.dev)->ggtt.base;
1545 +
1546 + return i915_gem_object_pin(obj, vm, view, size, alignment,
1547 + flags | PIN_GLOBAL);
1548 +}
1549 +
1550 +struct i915_vma *
1551 +i915_gem_object_pin(struct drm_i915_gem_object *obj,
1552 + struct i915_address_space *vm,
1553 + const struct i915_ggtt_view *view,
1554 + u64 size,
1555 + u64 alignment,
1556 + u64 flags)
1557 +{
1558 struct i915_vma *vma;
1559 int ret;
1560
1561 @@ -3846,7 +3869,7 @@ i915_gem_object_ggtt_pin(struct drm_i915_gem_object *obj,
1562 return ERR_PTR(ret);
1563 }
1564
1565 - ret = i915_vma_pin(vma, size, alignment, flags | PIN_GLOBAL);
1566 + ret = i915_vma_pin(vma, size, alignment, flags);
1567 if (ret)
1568 return ERR_PTR(ret);
1569
1570 diff --git a/drivers/gpu/drm/i915/i915_gem_context.c b/drivers/gpu/drm/i915/i915_gem_context.c
1571 index df10f4e95736..5d55cd159e89 100644
1572 --- a/drivers/gpu/drm/i915/i915_gem_context.c
1573 +++ b/drivers/gpu/drm/i915/i915_gem_context.c
1574 @@ -158,6 +158,8 @@ void i915_gem_context_free(struct kref *ctx_ref)
1575 i915_vma_put(ce->state);
1576 }
1577
1578 + kfree(ctx->jump_whitelist);
1579 +
1580 put_pid(ctx->pid);
1581 list_del(&ctx->link);
1582
1583 @@ -327,6 +329,9 @@ __create_hw_context(struct drm_device *dev,
1584 GEN8_CTX_ADDRESSING_MODE_SHIFT;
1585 ATOMIC_INIT_NOTIFIER_HEAD(&ctx->status_notifier);
1586
1587 + ctx->jump_whitelist = NULL;
1588 + ctx->jump_whitelist_cmds = 0;
1589 +
1590 return ctx;
1591
1592 err_out:
1593 diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
1594 index 2117f172d7a2..4548d89abcdc 100644
1595 --- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c
1596 +++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
1597 @@ -55,6 +55,7 @@ struct i915_execbuffer_params {
1598 struct i915_vma *batch;
1599 u32 dispatch_flags;
1600 u32 args_batch_start_offset;
1601 + u64 args_batch_len;
1602 struct intel_engine_cs *engine;
1603 struct i915_gem_context *ctx;
1604 struct drm_i915_gem_request *request;
1605 @@ -1401,41 +1402,85 @@ i915_reset_gen7_sol_offsets(struct drm_i915_gem_request *req)
1606 return 0;
1607 }
1608
1609 +static struct i915_vma*
1610 +shadow_batch_pin(struct drm_i915_gem_object *obj, struct i915_address_space *vm)
1611 +{
1612 + struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
1613 + u64 flags;
1614 +
1615 + /*
1616 + * PPGTT backed shadow buffers must be mapped RO, to prevent
1617 + * post-scan tampering
1618 + */
1619 + if (CMDPARSER_USES_GGTT(dev_priv)) {
1620 + flags = PIN_GLOBAL;
1621 + vm = &dev_priv->ggtt.base;
1622 + } else if (vm->has_read_only) {
1623 + flags = PIN_USER;
1624 + i915_gem_object_set_readonly(obj);
1625 + } else {
1626 + DRM_DEBUG("Cannot prevent post-scan tampering without RO capable vm\n");
1627 + return ERR_PTR(-EINVAL);
1628 + }
1629 +
1630 + return i915_gem_object_pin(obj, vm, NULL, 0, 0, flags);
1631 +}
1632 +
1633 static struct i915_vma *
1634 i915_gem_execbuffer_parse(struct intel_engine_cs *engine,
1635 struct drm_i915_gem_exec_object2 *shadow_exec_entry,
1636 - struct drm_i915_gem_object *batch_obj,
1637 + struct i915_execbuffer_params *params,
1638 struct eb_vmas *eb,
1639 - u32 batch_start_offset,
1640 - u32 batch_len,
1641 - bool is_master)
1642 + struct i915_address_space *vm)
1643 {
1644 + struct drm_i915_gem_object *batch_obj = params->batch->obj;
1645 struct drm_i915_gem_object *shadow_batch_obj;
1646 struct i915_vma *vma;
1647 + u64 batch_start;
1648 + u32 batch_start_offset = params->args_batch_start_offset;
1649 + u32 batch_len = params->args_batch_len;
1650 + u64 shadow_batch_start;
1651 int ret;
1652
1653 +
1654 shadow_batch_obj = i915_gem_batch_pool_get(&engine->batch_pool,
1655 PAGE_ALIGN(batch_len));
1656 if (IS_ERR(shadow_batch_obj))
1657 return ERR_CAST(shadow_batch_obj);
1658
1659 - ret = intel_engine_cmd_parser(engine,
1660 + vma = shadow_batch_pin(shadow_batch_obj, vm);
1661 + if (IS_ERR(vma))
1662 + goto out;
1663 +
1664 + batch_start = gen8_canonical_addr(params->batch->node.start) +
1665 + batch_start_offset;
1666 + shadow_batch_start = gen8_canonical_addr(vma->node.start);
1667 +
1668 + ret = intel_engine_cmd_parser(params->ctx,
1669 + engine,
1670 batch_obj,
1671 - shadow_batch_obj,
1672 + batch_start,
1673 batch_start_offset,
1674 batch_len,
1675 - is_master);
1676 + shadow_batch_obj,
1677 + shadow_batch_start);
1678 if (ret) {
1679 - if (ret == -EACCES) /* unhandled chained batch */
1680 + i915_vma_unpin(vma);
1681 +
1682 + /*
1683 + * Unsafe GGTT-backed buffers can still be submitted safely
1684 + * as non-secure.
1685 + * For PPGTT backing however, we have no choice but to forcibly
1686 + * reject unsafe buffers
1687 + */
1688 + if (CMDPARSER_USES_GGTT(eb->i915) && (ret == -EACCES))
1689 + /* Execute original buffer non-secure */
1690 vma = NULL;
1691 else
1692 vma = ERR_PTR(ret);
1693 - goto out;
1694 - }
1695
1696 - vma = i915_gem_object_ggtt_pin(shadow_batch_obj, NULL, 0, 0, 0);
1697 - if (IS_ERR(vma))
1698 goto out;
1699 + }
1700
1701 memset(shadow_exec_entry, 0, sizeof(*shadow_exec_entry));
1702
1703 @@ -1476,13 +1521,10 @@ execbuf_submit(struct i915_execbuffer_params *params,
1704 return ret;
1705 }
1706
1707 - exec_len = args->batch_len;
1708 + exec_len = params->args_batch_len;
1709 exec_start = params->batch->node.start +
1710 params->args_batch_start_offset;
1711
1712 - if (exec_len == 0)
1713 - exec_len = params->batch->size - params->args_batch_start_offset;
1714 -
1715 ret = params->engine->emit_bb_start(params->request,
1716 exec_start, exec_len,
1717 params->dispatch_flags);
1718 @@ -1601,8 +1643,15 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
1719
1720 dispatch_flags = 0;
1721 if (args->flags & I915_EXEC_SECURE) {
1722 + if (INTEL_GEN(dev_priv) >= 11)
1723 + return -ENODEV;
1724 +
1725 + /* Return -EPERM to trigger fallback code on old binaries. */
1726 + if (!HAS_SECURE_BATCHES(dev_priv))
1727 + return -EPERM;
1728 +
1729 if (!drm_is_current_master(file) || !capable(CAP_SYS_ADMIN))
1730 - return -EPERM;
1731 + return -EPERM;
1732
1733 dispatch_flags |= I915_DISPATCH_SECURE;
1734 }
1735 @@ -1710,32 +1759,26 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
1736 goto err;
1737 }
1738
1739 + params->ctx = ctx;
1740 params->args_batch_start_offset = args->batch_start_offset;
1741 - if (intel_engine_needs_cmd_parser(engine) && args->batch_len) {
1742 + params->args_batch_len = args->batch_len;
1743 + if (args->batch_len == 0)
1744 + params->args_batch_len = params->batch->size - params->args_batch_start_offset;
1745 +
1746 + if (intel_engine_requires_cmd_parser(engine) ||
1747 + (intel_engine_using_cmd_parser(engine) && args->batch_len)) {
1748 struct i915_vma *vma;
1749
1750 vma = i915_gem_execbuffer_parse(engine, &shadow_exec_entry,
1751 - params->batch->obj,
1752 - eb,
1753 - args->batch_start_offset,
1754 - args->batch_len,
1755 - drm_is_current_master(file));
1756 + params, eb, vm);
1757 if (IS_ERR(vma)) {
1758 ret = PTR_ERR(vma);
1759 goto err;
1760 }
1761
1762 if (vma) {
1763 - /*
1764 - * Batch parsed and accepted:
1765 - *
1766 - * Set the DISPATCH_SECURE bit to remove the NON_SECURE
1767 - * bit from MI_BATCH_BUFFER_START commands issued in
1768 - * the dispatch_execbuffer implementations. We
1769 - * specifically don't want that set on batches the
1770 - * command parser has accepted.
1771 - */
1772 - dispatch_flags |= I915_DISPATCH_SECURE;
1773 + if (CMDPARSER_USES_GGTT(dev_priv))
1774 + dispatch_flags |= I915_DISPATCH_SECURE;
1775 params->args_batch_start_offset = 0;
1776 params->batch = vma;
1777 }
1778 @@ -1798,7 +1841,6 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
1779 params->file = file;
1780 params->engine = engine;
1781 params->dispatch_flags = dispatch_flags;
1782 - params->ctx = ctx;
1783
1784 ret = execbuf_submit(params, args, &eb->vmas);
1785 err_request:
1786 diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c
1787 index 0bb4232f66bc..16f56f14f4d0 100644
1788 --- a/drivers/gpu/drm/i915/i915_gem_gtt.c
1789 +++ b/drivers/gpu/drm/i915/i915_gem_gtt.c
1790 @@ -140,7 +140,8 @@ int intel_sanitize_enable_ppgtt(struct drm_i915_private *dev_priv,
1791 if (enable_ppgtt == 0 && INTEL_GEN(dev_priv) < 9)
1792 return 0;
1793
1794 - if (enable_ppgtt == 1)
1795 + /* Full PPGTT is required by the Gen9 cmdparser */
1796 + if (enable_ppgtt == 1 && INTEL_GEN(dev_priv) != 9)
1797 return 1;
1798
1799 if (enable_ppgtt == 2 && has_full_ppgtt)
1800 @@ -177,8 +178,8 @@ static int ppgtt_bind_vma(struct i915_vma *vma,
1801
1802 vma->pages = vma->obj->pages;
1803
1804 - /* Currently applicable only to VLV */
1805 - if (vma->obj->gt_ro)
1806 + /* Applicable to VLV, and gen8+ */
1807 + if (i915_gem_object_is_readonly(vma->obj))
1808 pte_flags |= PTE_READ_ONLY;
1809
1810 vma->vm->insert_entries(vma->vm, vma->pages, vma->node.start,
1811 @@ -197,11 +198,14 @@ static void ppgtt_unbind_vma(struct i915_vma *vma)
1812
1813 static gen8_pte_t gen8_pte_encode(dma_addr_t addr,
1814 enum i915_cache_level level,
1815 - bool valid)
1816 + bool valid, u32 flags)
1817 {
1818 gen8_pte_t pte = valid ? _PAGE_PRESENT | _PAGE_RW : 0;
1819 pte |= addr;
1820
1821 + if (unlikely(flags & PTE_READ_ONLY))
1822 + pte &= ~_PAGE_RW;
1823 +
1824 switch (level) {
1825 case I915_CACHE_NONE:
1826 pte |= PPAT_UNCACHED_INDEX;
1827 @@ -472,7 +476,7 @@ static void gen8_initialize_pt(struct i915_address_space *vm,
1828 gen8_pte_t scratch_pte;
1829
1830 scratch_pte = gen8_pte_encode(vm->scratch_page.daddr,
1831 - I915_CACHE_LLC, true);
1832 + I915_CACHE_LLC, true, 0);
1833
1834 fill_px(vm->dev, pt, scratch_pte);
1835 }
1836 @@ -769,7 +773,7 @@ static void gen8_ppgtt_clear_range(struct i915_address_space *vm,
1837 {
1838 struct i915_hw_ppgtt *ppgtt = i915_vm_to_ppgtt(vm);
1839 gen8_pte_t scratch_pte = gen8_pte_encode(vm->scratch_page.daddr,
1840 - I915_CACHE_LLC, use_scratch);
1841 + I915_CACHE_LLC, use_scratch, 0);
1842
1843 if (!USES_FULL_48BIT_PPGTT(vm->dev)) {
1844 gen8_ppgtt_clear_pte_range(vm, &ppgtt->pdp, start, length,
1845 @@ -790,7 +794,8 @@ gen8_ppgtt_insert_pte_entries(struct i915_address_space *vm,
1846 struct i915_page_directory_pointer *pdp,
1847 struct sg_page_iter *sg_iter,
1848 uint64_t start,
1849 - enum i915_cache_level cache_level)
1850 + enum i915_cache_level cache_level,
1851 + u32 flags)
1852 {
1853 struct i915_hw_ppgtt *ppgtt = i915_vm_to_ppgtt(vm);
1854 gen8_pte_t *pt_vaddr;
1855 @@ -809,7 +814,7 @@ gen8_ppgtt_insert_pte_entries(struct i915_address_space *vm,
1856
1857 pt_vaddr[pte] =
1858 gen8_pte_encode(sg_page_iter_dma_address(sg_iter),
1859 - cache_level, true);
1860 + cache_level, true, flags);
1861 if (++pte == GEN8_PTES) {
1862 kunmap_px(ppgtt, pt_vaddr);
1863 pt_vaddr = NULL;
1864 @@ -830,7 +835,7 @@ static void gen8_ppgtt_insert_entries(struct i915_address_space *vm,
1865 struct sg_table *pages,
1866 uint64_t start,
1867 enum i915_cache_level cache_level,
1868 - u32 unused)
1869 + u32 flags)
1870 {
1871 struct i915_hw_ppgtt *ppgtt = i915_vm_to_ppgtt(vm);
1872 struct sg_page_iter sg_iter;
1873 @@ -839,7 +844,7 @@ static void gen8_ppgtt_insert_entries(struct i915_address_space *vm,
1874
1875 if (!USES_FULL_48BIT_PPGTT(vm->dev)) {
1876 gen8_ppgtt_insert_pte_entries(vm, &ppgtt->pdp, &sg_iter, start,
1877 - cache_level);
1878 + cache_level, flags);
1879 } else {
1880 struct i915_page_directory_pointer *pdp;
1881 uint64_t pml4e;
1882 @@ -847,7 +852,7 @@ static void gen8_ppgtt_insert_entries(struct i915_address_space *vm,
1883
1884 gen8_for_each_pml4e(pdp, &ppgtt->pml4, start, length, pml4e) {
1885 gen8_ppgtt_insert_pte_entries(vm, pdp, &sg_iter,
1886 - start, cache_level);
1887 + start, cache_level, flags);
1888 }
1889 }
1890 }
1891 @@ -1452,7 +1457,7 @@ static void gen8_dump_ppgtt(struct i915_hw_ppgtt *ppgtt, struct seq_file *m)
1892 uint64_t start = ppgtt->base.start;
1893 uint64_t length = ppgtt->base.total;
1894 gen8_pte_t scratch_pte = gen8_pte_encode(vm->scratch_page.daddr,
1895 - I915_CACHE_LLC, true);
1896 + I915_CACHE_LLC, true, 0);
1897
1898 if (!USES_FULL_48BIT_PPGTT(vm->dev)) {
1899 gen8_dump_pdp(&ppgtt->pdp, start, length, scratch_pte, m);
1900 @@ -1520,6 +1525,14 @@ static int gen8_ppgtt_init(struct i915_hw_ppgtt *ppgtt)
1901 ppgtt->base.clear_range = gen8_ppgtt_clear_range;
1902 ppgtt->base.unbind_vma = ppgtt_unbind_vma;
1903 ppgtt->base.bind_vma = ppgtt_bind_vma;
1904 +
1905 + /*
1906 + * From bdw, there is support for read-only pages in the PPGTT.
1907 + *
1908 + * XXX GVT is not honouring the lack of RW in the PTE bits.
1909 + */
1910 + ppgtt->base.has_read_only = !intel_vgpu_active(to_i915(ppgtt->base.dev));
1911 +
1912 ppgtt->debug_dump = gen8_dump_ppgtt;
1913
1914 if (USES_FULL_48BIT_PPGTT(ppgtt->base.dev)) {
1915 @@ -2321,7 +2334,7 @@ static void gen8_ggtt_insert_page(struct i915_address_space *vm,
1916
1917 rpm_atomic_seq = assert_rpm_atomic_begin(dev_priv);
1918
1919 - gen8_set_pte(pte, gen8_pte_encode(addr, level, true));
1920 + gen8_set_pte(pte, gen8_pte_encode(addr, level, true, 0));
1921
1922 I915_WRITE(GFX_FLSH_CNTL_GEN6, GFX_FLSH_CNTL_EN);
1923 POSTING_READ(GFX_FLSH_CNTL_GEN6);
1924 @@ -2332,7 +2345,7 @@ static void gen8_ggtt_insert_page(struct i915_address_space *vm,
1925 static void gen8_ggtt_insert_entries(struct i915_address_space *vm,
1926 struct sg_table *st,
1927 uint64_t start,
1928 - enum i915_cache_level level, u32 unused)
1929 + enum i915_cache_level level, u32 flags)
1930 {
1931 struct drm_i915_private *dev_priv = to_i915(vm->dev);
1932 struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm);
1933 @@ -2343,12 +2356,20 @@ static void gen8_ggtt_insert_entries(struct i915_address_space *vm,
1934 int rpm_atomic_seq;
1935 int i = 0;
1936
1937 + /* The GTT does not support read-only mappings */
1938 + GEM_BUG_ON(flags & PTE_READ_ONLY);
1939 +
1940 rpm_atomic_seq = assert_rpm_atomic_begin(dev_priv);
1941
1942 + /*
1943 + * Note that we ignore PTE_READ_ONLY here. The caller must be careful
1944 + * not to allow the user to override access to a read only page.
1945 + */
1946 +
1947 gtt_entries = (gen8_pte_t __iomem *)ggtt->gsm + (start >> PAGE_SHIFT);
1948
1949 for_each_sgt_dma(addr, sgt_iter, st) {
1950 - gtt_entry = gen8_pte_encode(addr, level, true);
1951 + gtt_entry = gen8_pte_encode(addr, level, true, 0);
1952 gen8_set_pte(&gtt_entries[i++], gtt_entry);
1953 }
1954
1955 @@ -2499,7 +2520,7 @@ static void gen8_ggtt_clear_range(struct i915_address_space *vm,
1956
1957 scratch_pte = gen8_pte_encode(vm->scratch_page.daddr,
1958 I915_CACHE_LLC,
1959 - use_scratch);
1960 + use_scratch, 0);
1961 for (i = 0; i < num_entries; i++)
1962 gen8_set_pte(&gtt_base[i], scratch_pte);
1963 readl(gtt_base);
1964 @@ -2604,8 +2625,8 @@ static int ggtt_bind_vma(struct i915_vma *vma,
1965 if (ret)
1966 return ret;
1967
1968 - /* Currently applicable only to VLV */
1969 - if (obj->gt_ro)
1970 + /* Applicable to VLV (gen8+ do not support RO in the GGTT) */
1971 + if (i915_gem_object_is_readonly(obj))
1972 pte_flags |= PTE_READ_ONLY;
1973
1974 vma->vm->insert_entries(vma->vm, vma->pages, vma->node.start,
1975 @@ -2634,7 +2655,7 @@ static int aliasing_gtt_bind_vma(struct i915_vma *vma,
1976
1977 /* Currently applicable only to VLV */
1978 pte_flags = 0;
1979 - if (vma->obj->gt_ro)
1980 + if (i915_gem_object_is_readonly(vma->obj))
1981 pte_flags |= PTE_READ_ONLY;
1982
1983
1984 @@ -3193,6 +3214,10 @@ int i915_ggtt_init_hw(struct drm_i915_private *dev_priv)
1985 ggtt->base.total -= PAGE_SIZE;
1986 i915_address_space_init(&ggtt->base, dev_priv);
1987 ggtt->base.total += PAGE_SIZE;
1988 +
1989 + /* Only VLV supports read-only GGTT mappings */
1990 + ggtt->base.has_read_only = IS_VALLEYVIEW(dev_priv);
1991 +
1992 if (!HAS_LLC(dev_priv))
1993 ggtt->base.mm.color_adjust = i915_gtt_color_adjust;
1994
1995 diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.h b/drivers/gpu/drm/i915/i915_gem_gtt.h
1996 index ec78be2f8c77..43a0192242eb 100644
1997 --- a/drivers/gpu/drm/i915/i915_gem_gtt.h
1998 +++ b/drivers/gpu/drm/i915/i915_gem_gtt.h
1999 @@ -392,6 +392,9 @@ struct i915_address_space {
2000 */
2001 struct list_head unbound_list;
2002
2003 + /* Some systems support read-only mappings for GGTT and/or PPGTT */
2004 + bool has_read_only:1;
2005 +
2006 /* FIXME: Need a more generic return type */
2007 gen6_pte_t (*pte_encode)(dma_addr_t addr,
2008 enum i915_cache_level level,
2009 diff --git a/drivers/gpu/drm/i915/i915_gem_request.c b/drivers/gpu/drm/i915/i915_gem_request.c
2010 index 8832f8ec1583..f597261c264f 100644
2011 --- a/drivers/gpu/drm/i915/i915_gem_request.c
2012 +++ b/drivers/gpu/drm/i915/i915_gem_request.c
2013 @@ -558,6 +558,10 @@ static void i915_gem_mark_busy(const struct intel_engine_cs *engine)
2014 return;
2015
2016 intel_runtime_pm_get_noresume(dev_priv);
2017 +
2018 + if (NEEDS_RC6_CTX_CORRUPTION_WA(dev_priv))
2019 + intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
2020 +
2021 dev_priv->gt.awake = true;
2022
2023 intel_enable_gt_powersave(dev_priv);
2024 diff --git a/drivers/gpu/drm/i915/i915_params.c b/drivers/gpu/drm/i915/i915_params.c
2025 index 768ad89d9cd4..9d9dfe194b9b 100644
2026 --- a/drivers/gpu/drm/i915/i915_params.c
2027 +++ b/drivers/gpu/drm/i915/i915_params.c
2028 @@ -49,7 +49,7 @@ struct i915_params i915 __read_mostly = {
2029 .reset = true,
2030 .invert_brightness = 0,
2031 .disable_display = 0,
2032 - .enable_cmd_parser = 1,
2033 + .enable_cmd_parser = true,
2034 .use_mmio_flip = 0,
2035 .mmio_debug = 0,
2036 .verbose_state_checks = 1,
2037 @@ -178,9 +178,9 @@ MODULE_PARM_DESC(invert_brightness,
2038 module_param_named(disable_display, i915.disable_display, bool, 0400);
2039 MODULE_PARM_DESC(disable_display, "Disable display (default: false)");
2040
2041 -module_param_named_unsafe(enable_cmd_parser, i915.enable_cmd_parser, int, 0600);
2042 +module_param_named_unsafe(enable_cmd_parser, i915.enable_cmd_parser, bool, 0400);
2043 MODULE_PARM_DESC(enable_cmd_parser,
2044 - "Enable command parsing (1=enabled [default], 0=disabled)");
2045 + "Enable command parsing (true=enabled [default], false=disabled)");
2046
2047 module_param_named_unsafe(use_mmio_flip, i915.use_mmio_flip, int, 0600);
2048 MODULE_PARM_DESC(use_mmio_flip,
2049 diff --git a/drivers/gpu/drm/i915/i915_params.h b/drivers/gpu/drm/i915/i915_params.h
2050 index 3a0dd78ddb38..82ac6e886eed 100644
2051 --- a/drivers/gpu/drm/i915/i915_params.h
2052 +++ b/drivers/gpu/drm/i915/i915_params.h
2053 @@ -44,7 +44,6 @@ struct i915_params {
2054 int disable_power_well;
2055 int enable_ips;
2056 int invert_brightness;
2057 - int enable_cmd_parser;
2058 int enable_guc_loading;
2059 int enable_guc_submission;
2060 int guc_log_level;
2061 @@ -53,6 +52,7 @@ struct i915_params {
2062 int edp_vswing;
2063 unsigned int inject_load_failure;
2064 /* leave bools at the end to not create holes */
2065 + bool enable_cmd_parser;
2066 bool enable_hangcheck;
2067 bool fastboot;
2068 bool prefault_disable;
2069 diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
2070 index 70d96162def6..5468e69bf520 100644
2071 --- a/drivers/gpu/drm/i915/i915_reg.h
2072 +++ b/drivers/gpu/drm/i915/i915_reg.h
2073 @@ -223,6 +223,8 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg)
2074 #define GEN8_CONFIG0 _MMIO(0xD00)
2075 #define GEN9_DEFAULT_FIXES (1 << 3 | 1 << 2 | 1 << 1)
2076
2077 +#define GEN8_RC6_CTX_INFO _MMIO(0x8504)
2078 +
2079 #define GAC_ECO_BITS _MMIO(0x14090)
2080 #define ECOBITS_SNB_BIT (1<<13)
2081 #define ECOBITS_PPGTT_CACHE64B (3<<8)
2082 @@ -295,7 +297,6 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg)
2083 * Instruction field definitions used by the command parser
2084 */
2085 #define INSTR_CLIENT_SHIFT 29
2086 -#define INSTR_CLIENT_MASK 0xE0000000
2087 #define INSTR_MI_CLIENT 0x0
2088 #define INSTR_BC_CLIENT 0x2
2089 #define INSTR_RC_CLIENT 0x3
2090 @@ -569,6 +570,10 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg)
2091 */
2092 #define BCS_SWCTRL _MMIO(0x22200)
2093
2094 +/* There are 16 GPR registers */
2095 +#define BCS_GPR(n) _MMIO(0x22600 + (n) * 8)
2096 +#define BCS_GPR_UDW(n) _MMIO(0x22600 + (n) * 8 + 4)
2097 +
2098 #define GPGPU_THREADS_DISPATCHED _MMIO(0x2290)
2099 #define GPGPU_THREADS_DISPATCHED_UDW _MMIO(0x2290 + 4)
2100 #define HS_INVOCATION_COUNT _MMIO(0x2300)
2101 @@ -5936,6 +5941,10 @@ enum {
2102 #define SKL_CSR_DC5_DC6_COUNT _MMIO(0x8002C)
2103 #define BXT_CSR_DC3_DC5_COUNT _MMIO(0x80038)
2104
2105 +/* Display Internal Timeout Register */
2106 +#define RM_TIMEOUT _MMIO(0x42060)
2107 +#define MMIO_TIMEOUT_US(us) ((us) << 0)
2108 +
2109 /* interrupts */
2110 #define DE_MASTER_IRQ_CONTROL (1 << 31)
2111 #define DE_SPRITEB_FLIP_DONE (1 << 29)
2112 diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h
2113 index 8aafb9601540..b3af565b7027 100644
2114 --- a/drivers/gpu/drm/i915/intel_drv.h
2115 +++ b/drivers/gpu/drm/i915/intel_drv.h
2116 @@ -1730,6 +1730,9 @@ void intel_enable_gt_powersave(struct drm_i915_private *dev_priv);
2117 void intel_autoenable_gt_powersave(struct drm_i915_private *dev_priv);
2118 void intel_disable_gt_powersave(struct drm_i915_private *dev_priv);
2119 void intel_suspend_gt_powersave(struct drm_i915_private *dev_priv);
2120 +bool i915_rc6_ctx_wa_check(struct drm_i915_private *i915);
2121 +void i915_rc6_ctx_wa_suspend(struct drm_i915_private *i915);
2122 +void i915_rc6_ctx_wa_resume(struct drm_i915_private *i915);
2123 void gen6_rps_busy(struct drm_i915_private *dev_priv);
2124 void gen6_rps_reset_ei(struct drm_i915_private *dev_priv);
2125 void gen6_rps_idle(struct drm_i915_private *dev_priv);
2126 diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c
2127 index 05427d292457..07d2a8e7f78c 100644
2128 --- a/drivers/gpu/drm/i915/intel_pm.c
2129 +++ b/drivers/gpu/drm/i915/intel_pm.c
2130 @@ -105,6 +105,13 @@ static void bxt_init_clock_gating(struct drm_device *dev)
2131 if (IS_BXT_REVID(dev_priv, BXT_REVID_B0, REVID_FOREVER))
2132 I915_WRITE(GEN9_CLKGATE_DIS_0, I915_READ(GEN9_CLKGATE_DIS_0) |
2133 PWM1_GATING_DIS | PWM2_GATING_DIS);
2134 + /*
2135 + * Lower the display internal timeout.
2136 + * This is needed to avoid any hard hangs when DSI port PLL
2137 + * is off and a MMIO access is attempted by any privilege
2138 + * application, using batch buffers or any other means.
2139 + */
2140 + I915_WRITE(RM_TIMEOUT, MMIO_TIMEOUT_US(950));
2141 }
2142
2143 static void i915_pineview_get_mem_freq(struct drm_device *dev)
2144 @@ -5149,19 +5156,23 @@ static void gen9_disable_rps(struct drm_i915_private *dev_priv)
2145 I915_WRITE(GEN6_RP_CONTROL, 0);
2146 }
2147
2148 -static void gen6_disable_rps(struct drm_i915_private *dev_priv)
2149 +static void gen6_disable_rc6(struct drm_i915_private *dev_priv)
2150 {
2151 I915_WRITE(GEN6_RC_CONTROL, 0);
2152 +}
2153 +
2154 +static void gen6_disable_rps(struct drm_i915_private *dev_priv)
2155 +{
2156 I915_WRITE(GEN6_RPNSWREQ, 1 << 31);
2157 I915_WRITE(GEN6_RP_CONTROL, 0);
2158 }
2159
2160 -static void cherryview_disable_rps(struct drm_i915_private *dev_priv)
2161 +static void cherryview_disable_rc6(struct drm_i915_private *dev_priv)
2162 {
2163 I915_WRITE(GEN6_RC_CONTROL, 0);
2164 }
2165
2166 -static void valleyview_disable_rps(struct drm_i915_private *dev_priv)
2167 +static void valleyview_disable_rc6(struct drm_i915_private *dev_priv)
2168 {
2169 /* we're doing forcewake before Disabling RC6,
2170 * This what the BIOS expects when going into suspend */
2171 @@ -5426,7 +5437,8 @@ static void gen9_enable_rc6(struct drm_i915_private *dev_priv)
2172 I915_WRITE(GEN9_RENDER_PG_IDLE_HYSTERESIS, 25);
2173
2174 /* 3a: Enable RC6 */
2175 - if (intel_enable_rc6() & INTEL_RC6_ENABLE)
2176 + if (!dev_priv->rps.ctx_corrupted &&
2177 + intel_enable_rc6() & INTEL_RC6_ENABLE)
2178 rc6_mask = GEN6_RC_CTL_RC6_ENABLE;
2179 DRM_INFO("RC6 %s\n", onoff(rc6_mask & GEN6_RC_CTL_RC6_ENABLE));
2180 /* WaRsUseTimeoutMode */
2181 @@ -5484,7 +5496,8 @@ static void gen8_enable_rps(struct drm_i915_private *dev_priv)
2182 I915_WRITE(GEN6_RC6_THRESHOLD, 50000); /* 50/125ms per EI */
2183
2184 /* 3: Enable RC6 */
2185 - if (intel_enable_rc6() & INTEL_RC6_ENABLE)
2186 + if (!dev_priv->rps.ctx_corrupted &&
2187 + intel_enable_rc6() & INTEL_RC6_ENABLE)
2188 rc6_mask = GEN6_RC_CTL_RC6_ENABLE;
2189 intel_print_rc6_info(dev_priv, rc6_mask);
2190 if (IS_BROADWELL(dev_priv))
2191 @@ -6655,6 +6668,95 @@ static void intel_init_emon(struct drm_i915_private *dev_priv)
2192 dev_priv->ips.corr = (lcfuse & LCFUSE_HIV_MASK);
2193 }
2194
2195 +static bool i915_rc6_ctx_corrupted(struct drm_i915_private *dev_priv)
2196 +{
2197 + return !I915_READ(GEN8_RC6_CTX_INFO);
2198 +}
2199 +
2200 +static void i915_rc6_ctx_wa_init(struct drm_i915_private *i915)
2201 +{
2202 + if (!NEEDS_RC6_CTX_CORRUPTION_WA(i915))
2203 + return;
2204 +
2205 + if (i915_rc6_ctx_corrupted(i915)) {
2206 + DRM_INFO("RC6 context corrupted, disabling runtime power management\n");
2207 + i915->rps.ctx_corrupted = true;
2208 + intel_runtime_pm_get(i915);
2209 + }
2210 +}
2211 +
2212 +static void i915_rc6_ctx_wa_cleanup(struct drm_i915_private *i915)
2213 +{
2214 + if (i915->rps.ctx_corrupted) {
2215 + intel_runtime_pm_put(i915);
2216 + i915->rps.ctx_corrupted = false;
2217 + }
2218 +}
2219 +
2220 +/**
2221 + * i915_rc6_ctx_wa_suspend - system suspend sequence for the RC6 CTX WA
2222 + * @i915: i915 device
2223 + *
2224 + * Perform any steps needed to clean up the RC6 CTX WA before system suspend.
2225 + */
2226 +void i915_rc6_ctx_wa_suspend(struct drm_i915_private *i915)
2227 +{
2228 + if (i915->rps.ctx_corrupted)
2229 + intel_runtime_pm_put(i915);
2230 +}
2231 +
2232 +/**
2233 + * i915_rc6_ctx_wa_resume - system resume sequence for the RC6 CTX WA
2234 + * @i915: i915 device
2235 + *
2236 + * Perform any steps needed to re-init the RC6 CTX WA after system resume.
2237 + */
2238 +void i915_rc6_ctx_wa_resume(struct drm_i915_private *i915)
2239 +{
2240 + if (!i915->rps.ctx_corrupted)
2241 + return;
2242 +
2243 + if (i915_rc6_ctx_corrupted(i915)) {
2244 + intel_runtime_pm_get(i915);
2245 + return;
2246 + }
2247 +
2248 + DRM_INFO("RC6 context restored, re-enabling runtime power management\n");
2249 + i915->rps.ctx_corrupted = false;
2250 +}
2251 +
2252 +static void intel_disable_rc6(struct drm_i915_private *dev_priv);
2253 +
2254 +/**
2255 + * i915_rc6_ctx_wa_check - check for a new RC6 CTX corruption
2256 + * @i915: i915 device
2257 + *
2258 + * Check if an RC6 CTX corruption has happened since the last check and if so
2259 + * disable RC6 and runtime power management.
2260 + *
2261 + * Return false if no context corruption has happened since the last call of
2262 + * this function, true otherwise.
2263 +*/
2264 +bool i915_rc6_ctx_wa_check(struct drm_i915_private *i915)
2265 +{
2266 + if (!NEEDS_RC6_CTX_CORRUPTION_WA(i915))
2267 + return false;
2268 +
2269 + if (i915->rps.ctx_corrupted)
2270 + return false;
2271 +
2272 + if (!i915_rc6_ctx_corrupted(i915))
2273 + return false;
2274 +
2275 + DRM_NOTE("RC6 context corruption, disabling runtime power management\n");
2276 +
2277 + intel_disable_rc6(i915);
2278 + i915->rps.ctx_corrupted = true;
2279 + intel_runtime_pm_get_noresume(i915);
2280 +
2281 + return true;
2282 +}
2283 +
2284 void intel_init_gt_powersave(struct drm_i915_private *dev_priv)
2285 {
2286 /*
2287 @@ -6669,6 +6771,8 @@ void intel_init_gt_powersave(struct drm_i915_private *dev_priv)
2288 mutex_lock(&dev_priv->drm.struct_mutex);
2289 mutex_lock(&dev_priv->rps.hw_lock);
2290
2291 + i915_rc6_ctx_wa_init(dev_priv);
2292 +
2293 /* Initialize RPS limits (for userspace) */
2294 if (IS_CHERRYVIEW(dev_priv))
2295 cherryview_init_gt_powersave(dev_priv);
2296 @@ -6718,6 +6822,8 @@ void intel_cleanup_gt_powersave(struct drm_i915_private *dev_priv)
2297 if (IS_VALLEYVIEW(dev_priv))
2298 valleyview_cleanup_gt_powersave(dev_priv);
2299
2300 + i915_rc6_ctx_wa_cleanup(dev_priv);
2301 +
2302 if (!i915.enable_rc6)
2303 intel_runtime_pm_put(dev_priv);
2304 }
2305 @@ -6749,27 +6855,47 @@ void intel_sanitize_gt_powersave(struct drm_i915_private *dev_priv)
2306 gen6_reset_rps_interrupts(dev_priv);
2307 }
2308
2309 -void intel_disable_gt_powersave(struct drm_i915_private *dev_priv)
2310 +static void __intel_disable_rc6(struct drm_i915_private *dev_priv)
2311 {
2312 - if (!READ_ONCE(dev_priv->rps.enabled))
2313 - return;
2314 + if (INTEL_GEN(dev_priv) >= 9)
2315 + gen9_disable_rc6(dev_priv);
2316 + else if (IS_CHERRYVIEW(dev_priv))
2317 + cherryview_disable_rc6(dev_priv);
2318 + else if (IS_VALLEYVIEW(dev_priv))
2319 + valleyview_disable_rc6(dev_priv);
2320 + else if (INTEL_GEN(dev_priv) >= 6)
2321 + gen6_disable_rc6(dev_priv);
2322 +}
2323
2324 +static void intel_disable_rc6(struct drm_i915_private *dev_priv)
2325 +{
2326 mutex_lock(&dev_priv->rps.hw_lock);
2327 + __intel_disable_rc6(dev_priv);
2328 + mutex_unlock(&dev_priv->rps.hw_lock);
2329 +}
2330
2331 - if (INTEL_GEN(dev_priv) >= 9) {
2332 - gen9_disable_rc6(dev_priv);
2333 +static void intel_disable_rps(struct drm_i915_private *dev_priv)
2334 +{
2335 + if (INTEL_GEN(dev_priv) >= 9)
2336 gen9_disable_rps(dev_priv);
2337 - } else if (IS_CHERRYVIEW(dev_priv)) {
2338 - cherryview_disable_rps(dev_priv);
2339 - } else if (IS_VALLEYVIEW(dev_priv)) {
2340 - valleyview_disable_rps(dev_priv);
2341 - } else if (INTEL_GEN(dev_priv) >= 6) {
2342 + else if (INTEL_GEN(dev_priv) >= 6)
2343 gen6_disable_rps(dev_priv);
2344 - } else if (IS_IRONLAKE_M(dev_priv)) {
2345 + else if (IS_IRONLAKE_M(dev_priv))
2346 ironlake_disable_drps(dev_priv);
2347 - }
2348 +}
2349 +
2350 +void intel_disable_gt_powersave(struct drm_i915_private *dev_priv)
2351 +{
2352 + if (!READ_ONCE(dev_priv->rps.enabled))
2353 + return;
2354 +
2355 + mutex_lock(&dev_priv->rps.hw_lock);
2356 +
2357 + __intel_disable_rc6(dev_priv);
2358 + intel_disable_rps(dev_priv);
2359
2360 dev_priv->rps.enabled = false;
2361 +
2362 mutex_unlock(&dev_priv->rps.hw_lock);
2363 }
2364
2365 diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c
2366 index 8babfe0ce4e3..29c3123840ae 100644
2367 --- a/drivers/gpu/drm/i915/intel_ringbuffer.c
2368 +++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
2369 @@ -1951,6 +1951,7 @@ void intel_ring_unpin(struct intel_ring *ring)
2370 static struct i915_vma *
2371 intel_ring_create_vma(struct drm_i915_private *dev_priv, int size)
2372 {
2373 + struct i915_address_space *vm = &dev_priv->ggtt.base;
2374 struct drm_i915_gem_object *obj;
2375 struct i915_vma *vma;
2376
2377 @@ -1960,10 +1961,14 @@ intel_ring_create_vma(struct drm_i915_private *dev_priv, int size)
2378 if (IS_ERR(obj))
2379 return ERR_CAST(obj);
2380
2381 - /* mark ring buffers as read-only from GPU side by default */
2382 - obj->gt_ro = 1;
2383 + /*
2384 + * Mark ring buffers as read-only from GPU side (so no stray overwrites)
2385 + * if supported by the platform's GGTT.
2386 + */
2387 + if (vm->has_read_only)
2388 + i915_gem_object_set_readonly(obj);
2389
2390 - vma = i915_vma_create(obj, &dev_priv->ggtt.base, NULL);
2391 + vma = i915_vma_create(obj, vm, NULL);
2392 if (IS_ERR(vma))
2393 goto err;
2394
2395 diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.h b/drivers/gpu/drm/i915/intel_ringbuffer.h
2396 index ec0b4a0c605d..ce14cd8495e8 100644
2397 --- a/drivers/gpu/drm/i915/intel_ringbuffer.h
2398 +++ b/drivers/gpu/drm/i915/intel_ringbuffer.h
2399 @@ -341,7 +341,9 @@ struct intel_engine_cs {
2400
2401 struct intel_engine_hangcheck hangcheck;
2402
2403 - bool needs_cmd_parser;
2404 +#define I915_ENGINE_USING_CMD_PARSER BIT(0)
2405 +#define I915_ENGINE_REQUIRES_CMD_PARSER BIT(3)
2406 + unsigned int flags;
2407
2408 /*
2409 * Table of commands the command parser needs to know about
2410 @@ -374,7 +376,19 @@ intel_engine_initialized(const struct intel_engine_cs *engine)
2411 return engine->i915 != NULL;
2412 }
2413
2414 -static inline unsigned
2415 +static inline bool
2416 +intel_engine_using_cmd_parser(const struct intel_engine_cs *engine)
2417 +{
2418 + return engine->flags & I915_ENGINE_USING_CMD_PARSER;
2419 +}
2420 +
2421 +static inline bool
2422 +intel_engine_requires_cmd_parser(const struct intel_engine_cs *engine)
2423 +{
2424 + return engine->flags & I915_ENGINE_REQUIRES_CMD_PARSER;
2425 +}
2426 +
2427 +static inline unsigned int
2428 intel_engine_flag(const struct intel_engine_cs *engine)
2429 {
2430 return 1 << engine->id;
2431 diff --git a/drivers/gpu/drm/radeon/si_dpm.c b/drivers/gpu/drm/radeon/si_dpm.c
2432 index b82ef5ed727c..ac7ae206f2e7 100644
2433 --- a/drivers/gpu/drm/radeon/si_dpm.c
2434 +++ b/drivers/gpu/drm/radeon/si_dpm.c
2435 @@ -1956,6 +1956,7 @@ static void si_initialize_powertune_defaults(struct radeon_device *rdev)
2436 case 0x682C:
2437 si_pi->cac_weights = cac_weights_cape_verde_pro;
2438 si_pi->dte_data = dte_data_sun_xt;
2439 + update_dte_from_pl2 = true;
2440 break;
2441 case 0x6825:
2442 case 0x6827:
2443 diff --git a/drivers/hid/intel-ish-hid/ishtp/client-buffers.c b/drivers/hid/intel-ish-hid/ishtp/client-buffers.c
2444 index b9b917d2d50d..c41dbb167c91 100644
2445 --- a/drivers/hid/intel-ish-hid/ishtp/client-buffers.c
2446 +++ b/drivers/hid/intel-ish-hid/ishtp/client-buffers.c
2447 @@ -90,7 +90,7 @@ int ishtp_cl_alloc_tx_ring(struct ishtp_cl *cl)
2448 return 0;
2449 out:
2450 dev_err(&cl->device->dev, "error in allocating Tx pool\n");
2451 - ishtp_cl_free_rx_ring(cl);
2452 + ishtp_cl_free_tx_ring(cl);
2453 return -ENOMEM;
2454 }
2455
2456 diff --git a/drivers/iio/imu/adis16480.c b/drivers/iio/imu/adis16480.c
2457 index 12898424d838..6f975538996c 100644
2458 --- a/drivers/iio/imu/adis16480.c
2459 +++ b/drivers/iio/imu/adis16480.c
2460 @@ -266,8 +266,11 @@ static int adis16480_set_freq(struct iio_dev *indio_dev, int val, int val2)
2461 struct adis16480 *st = iio_priv(indio_dev);
2462 unsigned int t;
2463
2464 + if (val < 0 || val2 < 0)
2465 + return -EINVAL;
2466 +
2467 t = val * 1000 + val2 / 1000;
2468 - if (t <= 0)
2469 + if (t == 0)
2470 return -EINVAL;
2471
2472 t = 2460000 / t;
2473 diff --git a/drivers/infiniband/hw/cxgb4/cm.c b/drivers/infiniband/hw/cxgb4/cm.c
2474 index e5752352e0fb..605d50ad123c 100644
2475 --- a/drivers/infiniband/hw/cxgb4/cm.c
2476 +++ b/drivers/infiniband/hw/cxgb4/cm.c
2477 @@ -490,7 +490,6 @@ static int _put_ep_safe(struct c4iw_dev *dev, struct sk_buff *skb)
2478
2479 ep = *((struct c4iw_ep **)(skb->cb + 2 * sizeof(void *)));
2480 release_ep_resources(ep);
2481 - kfree_skb(skb);
2482 return 0;
2483 }
2484
2485 @@ -501,7 +500,6 @@ static int _put_pass_ep_safe(struct c4iw_dev *dev, struct sk_buff *skb)
2486 ep = *((struct c4iw_ep **)(skb->cb + 2 * sizeof(void *)));
2487 c4iw_put_ep(&ep->parent_ep->com);
2488 release_ep_resources(ep);
2489 - kfree_skb(skb);
2490 return 0;
2491 }
2492
2493 diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
2494 index c1971bca62fb..d52fd842ef1f 100644
2495 --- a/drivers/net/bonding/bond_main.c
2496 +++ b/drivers/net/bonding/bond_main.c
2497 @@ -1759,7 +1759,8 @@ err_detach:
2498 slave_disable_netpoll(new_slave);
2499
2500 err_close:
2501 - slave_dev->priv_flags &= ~IFF_BONDING;
2502 + if (!netif_is_bond_master(slave_dev))
2503 + slave_dev->priv_flags &= ~IFF_BONDING;
2504 dev_close(slave_dev);
2505
2506 err_restore_mac:
2507 @@ -1960,7 +1961,8 @@ static int __bond_release_one(struct net_device *bond_dev,
2508
2509 dev_set_mtu(slave_dev, slave->original_mtu);
2510
2511 - slave_dev->priv_flags &= ~IFF_BONDING;
2512 + if (!netif_is_bond_master(slave_dev))
2513 + slave_dev->priv_flags &= ~IFF_BONDING;
2514
2515 bond_free_slave(slave);
2516
2517 diff --git a/drivers/net/can/c_can/c_can.c b/drivers/net/can/c_can/c_can.c
2518 index e3dccd3200d5..7d35f6737499 100644
2519 --- a/drivers/net/can/c_can/c_can.c
2520 +++ b/drivers/net/can/c_can/c_can.c
2521 @@ -97,6 +97,9 @@
2522 #define BTR_TSEG2_SHIFT 12
2523 #define BTR_TSEG2_MASK (0x7 << BTR_TSEG2_SHIFT)
2524
2525 +/* interrupt register */
2526 +#define INT_STS_PENDING 0x8000
2527 +
2528 /* brp extension register */
2529 #define BRP_EXT_BRPE_MASK 0x0f
2530 #define BRP_EXT_BRPE_SHIFT 0
2531 @@ -1029,10 +1032,16 @@ static int c_can_poll(struct napi_struct *napi, int quota)
2532 u16 curr, last = priv->last_status;
2533 int work_done = 0;
2534
2535 - priv->last_status = curr = priv->read_reg(priv, C_CAN_STS_REG);
2536 - /* Ack status on C_CAN. D_CAN is self clearing */
2537 - if (priv->type != BOSCH_D_CAN)
2538 - priv->write_reg(priv, C_CAN_STS_REG, LEC_UNUSED);
2539 + /* Only read the status register if a status interrupt was pending */
2540 + if (atomic_xchg(&priv->sie_pending, 0)) {
2541 + priv->last_status = curr = priv->read_reg(priv, C_CAN_STS_REG);
2542 + /* Ack status on C_CAN. D_CAN is self clearing */
2543 + if (priv->type != BOSCH_D_CAN)
2544 + priv->write_reg(priv, C_CAN_STS_REG, LEC_UNUSED);
2545 + } else {
2546 + /* no change detected ... */
2547 + curr = last;
2548 + }
2549
2550 /* handle state changes */
2551 if ((curr & STATUS_EWARN) && (!(last & STATUS_EWARN))) {
2552 @@ -1083,10 +1092,16 @@ static irqreturn_t c_can_isr(int irq, void *dev_id)
2553 {
2554 struct net_device *dev = (struct net_device *)dev_id;
2555 struct c_can_priv *priv = netdev_priv(dev);
2556 + int reg_int;
2557
2558 - if (!priv->read_reg(priv, C_CAN_INT_REG))
2559 + reg_int = priv->read_reg(priv, C_CAN_INT_REG);
2560 + if (!reg_int)
2561 return IRQ_NONE;
2562
2563 + /* save for later use */
2564 + if (reg_int & INT_STS_PENDING)
2565 + atomic_set(&priv->sie_pending, 1);
2566 +
2567 /* disable all interrupts and schedule the NAPI */
2568 c_can_irq_control(priv, false);
2569 napi_schedule(&priv->napi);
2570 diff --git a/drivers/net/can/c_can/c_can.h b/drivers/net/can/c_can/c_can.h
2571 index 8acdc7fa4792..d5567a7c1c6d 100644
2572 --- a/drivers/net/can/c_can/c_can.h
2573 +++ b/drivers/net/can/c_can/c_can.h
2574 @@ -198,6 +198,7 @@ struct c_can_priv {
2575 struct net_device *dev;
2576 struct device *device;
2577 atomic_t tx_active;
2578 + atomic_t sie_pending;
2579 unsigned long tx_dir;
2580 int last_status;
2581 u16 (*read_reg) (const struct c_can_priv *priv, enum reg index);
2582 diff --git a/drivers/net/can/flexcan.c b/drivers/net/can/flexcan.c
2583 index baef09b9449f..6b866d0451b2 100644
2584 --- a/drivers/net/can/flexcan.c
2585 +++ b/drivers/net/can/flexcan.c
2586 @@ -923,6 +923,7 @@ static int flexcan_chip_start(struct net_device *dev)
2587 reg_mecr = flexcan_read(&regs->mecr);
2588 reg_mecr &= ~FLEXCAN_MECR_ECRWRDIS;
2589 flexcan_write(reg_mecr, &regs->mecr);
2590 + reg_mecr |= FLEXCAN_MECR_ECCDIS;
2591 reg_mecr &= ~(FLEXCAN_MECR_NCEFAFRZ | FLEXCAN_MECR_HANCEI_MSK |
2592 FLEXCAN_MECR_FANCEI_MSK);
2593 flexcan_write(reg_mecr, &regs->mecr);
2594 diff --git a/drivers/net/can/usb/gs_usb.c b/drivers/net/can/usb/gs_usb.c
2595 index 5d5012337d9e..014b9ae3dc17 100644
2596 --- a/drivers/net/can/usb/gs_usb.c
2597 +++ b/drivers/net/can/usb/gs_usb.c
2598 @@ -632,6 +632,7 @@ static int gs_can_open(struct net_device *netdev)
2599 rc);
2600
2601 usb_unanchor_urb(urb);
2602 + usb_free_urb(urb);
2603 break;
2604 }
2605
2606 diff --git a/drivers/net/can/usb/peak_usb/pcan_usb.c b/drivers/net/can/usb/peak_usb/pcan_usb.c
2607 index 838545ce468d..e626c2afbbb1 100644
2608 --- a/drivers/net/can/usb/peak_usb/pcan_usb.c
2609 +++ b/drivers/net/can/usb/peak_usb/pcan_usb.c
2610 @@ -108,7 +108,7 @@ struct pcan_usb_msg_context {
2611 u8 *end;
2612 u8 rec_cnt;
2613 u8 rec_idx;
2614 - u8 rec_data_idx;
2615 + u8 rec_ts_idx;
2616 struct net_device *netdev;
2617 struct pcan_usb *pdev;
2618 };
2619 @@ -552,10 +552,15 @@ static int pcan_usb_decode_status(struct pcan_usb_msg_context *mc,
2620 mc->ptr += PCAN_USB_CMD_ARGS;
2621
2622 if (status_len & PCAN_USB_STATUSLEN_TIMESTAMP) {
2623 - int err = pcan_usb_decode_ts(mc, !mc->rec_idx);
2624 + int err = pcan_usb_decode_ts(mc, !mc->rec_ts_idx);
2625
2626 if (err)
2627 return err;
2628 +
2629 + /* Next packet in the buffer will have a timestamp on a single
2630 + * byte
2631 + */
2632 + mc->rec_ts_idx++;
2633 }
2634
2635 switch (f) {
2636 @@ -638,10 +643,13 @@ static int pcan_usb_decode_data(struct pcan_usb_msg_context *mc, u8 status_len)
2637
2638 cf->can_dlc = get_can_dlc(rec_len);
2639
2640 - /* first data packet timestamp is a word */
2641 - if (pcan_usb_decode_ts(mc, !mc->rec_data_idx))
2642 + /* Only first packet timestamp is a word */
2643 + if (pcan_usb_decode_ts(mc, !mc->rec_ts_idx))
2644 goto decode_failed;
2645
2646 + /* Next packet in the buffer will have a timestamp on a single byte */
2647 + mc->rec_ts_idx++;
2648 +
2649 /* read data */
2650 memset(cf->data, 0x0, sizeof(cf->data));
2651 if (status_len & PCAN_USB_STATUSLEN_RTR) {
2652 @@ -695,7 +703,6 @@ static int pcan_usb_decode_msg(struct peak_usb_device *dev, u8 *ibuf, u32 lbuf)
2653 /* handle normal can frames here */
2654 } else {
2655 err = pcan_usb_decode_data(&mc, sl);
2656 - mc.rec_data_idx++;
2657 }
2658 }
2659
2660 diff --git a/drivers/net/can/usb/peak_usb/pcan_usb_core.c b/drivers/net/can/usb/peak_usb/pcan_usb_core.c
2661 index ce0a352a5eaa..6cd4317fe94d 100644
2662 --- a/drivers/net/can/usb/peak_usb/pcan_usb_core.c
2663 +++ b/drivers/net/can/usb/peak_usb/pcan_usb_core.c
2664 @@ -774,7 +774,7 @@ static int peak_usb_create_dev(const struct peak_usb_adapter *peak_usb_adapter,
2665 dev = netdev_priv(netdev);
2666
2667 /* allocate a buffer large enough to send commands */
2668 - dev->cmd_buf = kmalloc(PCAN_USB_MAX_CMD_LEN, GFP_KERNEL);
2669 + dev->cmd_buf = kzalloc(PCAN_USB_MAX_CMD_LEN, GFP_KERNEL);
2670 if (!dev->cmd_buf) {
2671 err = -ENOMEM;
2672 goto lbl_free_candev;
2673 diff --git a/drivers/net/can/usb/usb_8dev.c b/drivers/net/can/usb/usb_8dev.c
2674 index 27861c417c94..3e4416473607 100644
2675 --- a/drivers/net/can/usb/usb_8dev.c
2676 +++ b/drivers/net/can/usb/usb_8dev.c
2677 @@ -1007,9 +1007,8 @@ static void usb_8dev_disconnect(struct usb_interface *intf)
2678 netdev_info(priv->netdev, "device disconnected\n");
2679
2680 unregister_netdev(priv->netdev);
2681 - free_candev(priv->netdev);
2682 -
2683 unlink_all_urbs(priv);
2684 + free_candev(priv->netdev);
2685 }
2686
2687 }
2688 diff --git a/drivers/net/ethernet/arc/emac_rockchip.c b/drivers/net/ethernet/arc/emac_rockchip.c
2689 index c770ca37c9b2..a7d30731d376 100644
2690 --- a/drivers/net/ethernet/arc/emac_rockchip.c
2691 +++ b/drivers/net/ethernet/arc/emac_rockchip.c
2692 @@ -261,6 +261,9 @@ static int emac_rockchip_remove(struct platform_device *pdev)
2693 if (priv->regulator)
2694 regulator_disable(priv->regulator);
2695
2696 + if (priv->soc_data->need_div_macclk)
2697 + clk_disable_unprepare(priv->macclk);
2698 +
2699 free_netdev(ndev);
2700 return err;
2701 }
2702 diff --git a/drivers/net/ethernet/hisilicon/hip04_eth.c b/drivers/net/ethernet/hisilicon/hip04_eth.c
2703 index 407e1177d9d1..4436a0307f32 100644
2704 --- a/drivers/net/ethernet/hisilicon/hip04_eth.c
2705 +++ b/drivers/net/ethernet/hisilicon/hip04_eth.c
2706 @@ -953,7 +953,6 @@ static int hip04_remove(struct platform_device *pdev)
2707
2708 hip04_free_ring(ndev, d);
2709 unregister_netdev(ndev);
2710 - free_irq(ndev->irq, ndev);
2711 of_node_put(priv->phy_node);
2712 cancel_work_sync(&priv->tx_timeout_task);
2713 free_netdev(ndev);
2714 diff --git a/drivers/net/ethernet/intel/e1000/e1000_ethtool.c b/drivers/net/ethernet/intel/e1000/e1000_ethtool.c
2715 index 2a81f6d72140..8936f19e9325 100644
2716 --- a/drivers/net/ethernet/intel/e1000/e1000_ethtool.c
2717 +++ b/drivers/net/ethernet/intel/e1000/e1000_ethtool.c
2718 @@ -628,6 +628,7 @@ static int e1000_set_ringparam(struct net_device *netdev,
2719 for (i = 0; i < adapter->num_rx_queues; i++)
2720 rxdr[i].count = rxdr->count;
2721
2722 + err = 0;
2723 if (netif_running(adapter->netdev)) {
2724 /* Try to get new resources before deleting old */
2725 err = e1000_setup_all_rx_resources(adapter);
2726 @@ -648,14 +649,13 @@ static int e1000_set_ringparam(struct net_device *netdev,
2727 adapter->rx_ring = rxdr;
2728 adapter->tx_ring = txdr;
2729 err = e1000_up(adapter);
2730 - if (err)
2731 - goto err_setup;
2732 }
2733 kfree(tx_old);
2734 kfree(rx_old);
2735
2736 clear_bit(__E1000_RESETTING, &adapter->flags);
2737 - return 0;
2738 + return err;
2739 +
2740 err_setup_tx:
2741 e1000_free_all_rx_resources(adapter);
2742 err_setup_rx:
2743 @@ -667,7 +667,6 @@ err_alloc_rx:
2744 err_alloc_tx:
2745 if (netif_running(adapter->netdev))
2746 e1000_up(adapter);
2747 -err_setup:
2748 clear_bit(__E1000_RESETTING, &adapter->flags);
2749 return err;
2750 }
2751 diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c
2752 index 7956176c2c73..7e35bd665630 100644
2753 --- a/drivers/net/ethernet/intel/igb/igb_main.c
2754 +++ b/drivers/net/ethernet/intel/igb/igb_main.c
2755 @@ -1677,7 +1677,8 @@ static void igb_check_swap_media(struct igb_adapter *adapter)
2756 if ((hw->phy.media_type == e1000_media_type_copper) &&
2757 (!(connsw & E1000_CONNSW_AUTOSENSE_EN))) {
2758 swap_now = true;
2759 - } else if (!(connsw & E1000_CONNSW_SERDESD)) {
2760 + } else if ((hw->phy.media_type != e1000_media_type_copper) &&
2761 + !(connsw & E1000_CONNSW_SERDESD)) {
2762 /* copper signal takes time to appear */
2763 if (adapter->copper_tries < 4) {
2764 adapter->copper_tries++;
2765 diff --git a/drivers/net/ethernet/qlogic/qede/qede_main.c b/drivers/net/ethernet/qlogic/qede/qede_main.c
2766 index 85f46dbecd5b..9b1920b58594 100644
2767 --- a/drivers/net/ethernet/qlogic/qede/qede_main.c
2768 +++ b/drivers/net/ethernet/qlogic/qede/qede_main.c
2769 @@ -2619,8 +2619,16 @@ enum qede_remove_mode {
2770 static void __qede_remove(struct pci_dev *pdev, enum qede_remove_mode mode)
2771 {
2772 struct net_device *ndev = pci_get_drvdata(pdev);
2773 - struct qede_dev *edev = netdev_priv(ndev);
2774 - struct qed_dev *cdev = edev->cdev;
2775 + struct qede_dev *edev;
2776 + struct qed_dev *cdev;
2777 +
2778 + if (!ndev) {
2779 + dev_info(&pdev->dev, "Device has already been removed\n");
2780 + return;
2781 + }
2782 +
2783 + edev = netdev_priv(ndev);
2784 + cdev = edev->cdev;
2785
2786 DP_INFO(edev, "Starting qede_remove\n");
2787
2788 diff --git a/drivers/net/fjes/fjes_main.c b/drivers/net/fjes/fjes_main.c
2789 index 7ea8ead4fd1c..bbc983b04561 100644
2790 --- a/drivers/net/fjes/fjes_main.c
2791 +++ b/drivers/net/fjes/fjes_main.c
2792 @@ -1187,8 +1187,17 @@ static int fjes_probe(struct platform_device *plat_dev)
2793 adapter->open_guard = false;
2794
2795 adapter->txrx_wq = alloc_workqueue(DRV_NAME "/txrx", WQ_MEM_RECLAIM, 0);
2796 + if (unlikely(!adapter->txrx_wq)) {
2797 + err = -ENOMEM;
2798 + goto err_free_netdev;
2799 + }
2800 +
2801 adapter->control_wq = alloc_workqueue(DRV_NAME "/control",
2802 WQ_MEM_RECLAIM, 0);
2803 + if (unlikely(!adapter->control_wq)) {
2804 + err = -ENOMEM;
2805 + goto err_free_txrx_wq;
2806 + }
2807
2808 INIT_WORK(&adapter->tx_stall_task, fjes_tx_stall_task);
2809 INIT_WORK(&adapter->raise_intr_rxdata_task,
2810 @@ -1205,7 +1214,7 @@ static int fjes_probe(struct platform_device *plat_dev)
2811 hw->hw_res.irq = platform_get_irq(plat_dev, 0);
2812 err = fjes_hw_init(&adapter->hw);
2813 if (err)
2814 - goto err_free_netdev;
2815 + goto err_free_control_wq;
2816
2817 /* setup MAC address (02:00:00:00:00:[epid])*/
2818 netdev->dev_addr[0] = 2;
2819 @@ -1225,6 +1234,10 @@ static int fjes_probe(struct platform_device *plat_dev)
2820
2821 err_hw_exit:
2822 fjes_hw_exit(&adapter->hw);
2823 +err_free_control_wq:
2824 + destroy_workqueue(adapter->control_wq);
2825 +err_free_txrx_wq:
2826 + destroy_workqueue(adapter->txrx_wq);
2827 err_free_netdev:
2828 free_netdev(netdev);
2829 err_out:
2830 diff --git a/drivers/net/usb/cdc_ncm.c b/drivers/net/usb/cdc_ncm.c
2831 index 43e28d2b0de7..cbb9b4343d1e 100644
2832 --- a/drivers/net/usb/cdc_ncm.c
2833 +++ b/drivers/net/usb/cdc_ncm.c
2834 @@ -576,8 +576,8 @@ static void cdc_ncm_set_dgram_size(struct usbnet *dev, int new_size)
2835 /* read current mtu value from device */
2836 err = usbnet_read_cmd(dev, USB_CDC_GET_MAX_DATAGRAM_SIZE,
2837 USB_TYPE_CLASS | USB_DIR_IN | USB_RECIP_INTERFACE,
2838 - 0, iface_no, &max_datagram_size, 2);
2839 - if (err < 0) {
2840 + 0, iface_no, &max_datagram_size, sizeof(max_datagram_size));
2841 + if (err < sizeof(max_datagram_size)) {
2842 dev_dbg(&dev->intf->dev, "GET_MAX_DATAGRAM_SIZE failed\n");
2843 goto out;
2844 }
2845 @@ -588,7 +588,7 @@ static void cdc_ncm_set_dgram_size(struct usbnet *dev, int new_size)
2846 max_datagram_size = cpu_to_le16(ctx->max_datagram_size);
2847 err = usbnet_write_cmd(dev, USB_CDC_SET_MAX_DATAGRAM_SIZE,
2848 USB_TYPE_CLASS | USB_DIR_OUT | USB_RECIP_INTERFACE,
2849 - 0, iface_no, &max_datagram_size, 2);
2850 + 0, iface_no, &max_datagram_size, sizeof(max_datagram_size));
2851 if (err < 0)
2852 dev_dbg(&dev->intf->dev, "SET_MAX_DATAGRAM_SIZE failed\n");
2853
2854 diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c
2855 index 0d48714c3f28..de7b431fdd6b 100644
2856 --- a/drivers/net/usb/qmi_wwan.c
2857 +++ b/drivers/net/usb/qmi_wwan.c
2858 @@ -951,6 +951,7 @@ static const struct usb_device_id products[] = {
2859 {QMI_FIXED_INTF(0x413c, 0x81b6, 8)}, /* Dell Wireless 5811e */
2860 {QMI_FIXED_INTF(0x413c, 0x81b6, 10)}, /* Dell Wireless 5811e */
2861 {QMI_FIXED_INTF(0x413c, 0x81d7, 0)}, /* Dell Wireless 5821e */
2862 + {QMI_FIXED_INTF(0x413c, 0x81e0, 0)}, /* Dell Wireless 5821e with eSIM support*/
2863 {QMI_FIXED_INTF(0x03f0, 0x4e1d, 8)}, /* HP lt4111 LTE/EV-DO/HSPA+ Gobi 4G Module */
2864 {QMI_FIXED_INTF(0x03f0, 0x9d1d, 1)}, /* HP lt4120 Snapdragon X5 LTE */
2865 {QMI_FIXED_INTF(0x22de, 0x9061, 3)}, /* WeTelecom WPD-600N */
2866 diff --git a/drivers/nfc/fdp/i2c.c b/drivers/nfc/fdp/i2c.c
2867 index 712936f5d2d6..f1addfd7b31a 100644
2868 --- a/drivers/nfc/fdp/i2c.c
2869 +++ b/drivers/nfc/fdp/i2c.c
2870 @@ -268,7 +268,7 @@ static void fdp_nci_i2c_read_device_properties(struct device *dev,
2871 *fw_vsc_cfg, len);
2872
2873 if (r) {
2874 - devm_kfree(dev, fw_vsc_cfg);
2875 + devm_kfree(dev, *fw_vsc_cfg);
2876 goto vsc_read_err;
2877 }
2878 } else {
2879 diff --git a/drivers/nfc/st21nfca/core.c b/drivers/nfc/st21nfca/core.c
2880 index dacb9166081b..2f08e16ba566 100644
2881 --- a/drivers/nfc/st21nfca/core.c
2882 +++ b/drivers/nfc/st21nfca/core.c
2883 @@ -719,6 +719,7 @@ static int st21nfca_hci_complete_target_discovered(struct nfc_hci_dev *hdev,
2884 NFC_PROTO_FELICA_MASK;
2885 } else {
2886 kfree_skb(nfcid_skb);
2887 + nfcid_skb = NULL;
2888 /* P2P in type A */
2889 r = nfc_hci_get_param(hdev, ST21NFCA_RF_READER_F_GATE,
2890 ST21NFCA_RF_READER_F_NFCID1,
2891 diff --git a/drivers/pci/host/pci-tegra.c b/drivers/pci/host/pci-tegra.c
2892 index 8e101b19c4d6..90be00c1bab9 100644
2893 --- a/drivers/pci/host/pci-tegra.c
2894 +++ b/drivers/pci/host/pci-tegra.c
2895 @@ -603,12 +603,15 @@ DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_NVIDIA, 0x0bf1, tegra_pcie_fixup_class);
2896 DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_NVIDIA, 0x0e1c, tegra_pcie_fixup_class);
2897 DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_NVIDIA, 0x0e1d, tegra_pcie_fixup_class);
2898
2899 -/* Tegra PCIE requires relaxed ordering */
2900 +/* Tegra20 and Tegra30 PCIE requires relaxed ordering */
2901 static void tegra_pcie_relax_enable(struct pci_dev *dev)
2902 {
2903 pcie_capability_set_word(dev, PCI_EXP_DEVCTL, PCI_EXP_DEVCTL_RELAX_EN);
2904 }
2905 -DECLARE_PCI_FIXUP_FINAL(PCI_ANY_ID, PCI_ANY_ID, tegra_pcie_relax_enable);
2906 +DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_NVIDIA, 0x0bf0, tegra_pcie_relax_enable);
2907 +DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_NVIDIA, 0x0bf1, tegra_pcie_relax_enable);
2908 +DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_NVIDIA, 0x0e1c, tegra_pcie_relax_enable);
2909 +DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_NVIDIA, 0x0e1d, tegra_pcie_relax_enable);
2910
2911 static int tegra_pcie_setup(int nr, struct pci_sys_data *sys)
2912 {
2913 diff --git a/drivers/scsi/lpfc/lpfc_nportdisc.c b/drivers/scsi/lpfc/lpfc_nportdisc.c
2914 index 56a3df4fddb0..21ec7b5b6c85 100644
2915 --- a/drivers/scsi/lpfc/lpfc_nportdisc.c
2916 +++ b/drivers/scsi/lpfc/lpfc_nportdisc.c
2917 @@ -759,9 +759,9 @@ lpfc_disc_set_adisc(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
2918
2919 if (!(vport->fc_flag & FC_PT2PT)) {
2920 /* Check config parameter use-adisc or FCP-2 */
2921 - if ((vport->cfg_use_adisc && (vport->fc_flag & FC_RSCN_MODE)) ||
2922 + if (vport->cfg_use_adisc && ((vport->fc_flag & FC_RSCN_MODE) ||
2923 ((ndlp->nlp_fcp_info & NLP_FCP_2_DEVICE) &&
2924 - (ndlp->nlp_type & NLP_FCP_TARGET))) {
2925 + (ndlp->nlp_type & NLP_FCP_TARGET)))) {
2926 spin_lock_irq(shost->host_lock);
2927 ndlp->nlp_flag |= NLP_NPR_ADISC;
2928 spin_unlock_irq(shost->host_lock);
2929 diff --git a/drivers/scsi/qla2xxx/qla_bsg.c b/drivers/scsi/qla2xxx/qla_bsg.c
2930 index 4a6e086279f9..33e4dceb895f 100644
2931 --- a/drivers/scsi/qla2xxx/qla_bsg.c
2932 +++ b/drivers/scsi/qla2xxx/qla_bsg.c
2933 @@ -252,7 +252,7 @@ qla2x00_process_els(struct fc_bsg_job *bsg_job)
2934 srb_t *sp;
2935 const char *type;
2936 int req_sg_cnt, rsp_sg_cnt;
2937 - int rval = (DRIVER_ERROR << 16);
2938 + int rval = (DID_ERROR << 16);
2939 uint16_t nextlid = 0;
2940
2941 if (bsg_job->request->msgcode == FC_BSG_RPT_ELS) {
2942 @@ -426,7 +426,7 @@ qla2x00_process_ct(struct fc_bsg_job *bsg_job)
2943 struct Scsi_Host *host = bsg_job->shost;
2944 scsi_qla_host_t *vha = shost_priv(host);
2945 struct qla_hw_data *ha = vha->hw;
2946 - int rval = (DRIVER_ERROR << 16);
2947 + int rval = (DID_ERROR << 16);
2948 int req_sg_cnt, rsp_sg_cnt;
2949 uint16_t loop_id;
2950 struct fc_port *fcport;
2951 @@ -1911,7 +1911,7 @@ qlafx00_mgmt_cmd(struct fc_bsg_job *bsg_job)
2952 struct Scsi_Host *host = bsg_job->shost;
2953 scsi_qla_host_t *vha = shost_priv(host);
2954 struct qla_hw_data *ha = vha->hw;
2955 - int rval = (DRIVER_ERROR << 16);
2956 + int rval = (DID_ERROR << 16);
2957 struct qla_mt_iocb_rqst_fx00 *piocb_rqst;
2958 srb_t *sp;
2959 int req_sg_cnt = 0, rsp_sg_cnt = 0;
2960 diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c
2961 index c813c9b75a10..3bae56b202f8 100644
2962 --- a/drivers/scsi/qla2xxx/qla_os.c
2963 +++ b/drivers/scsi/qla2xxx/qla_os.c
2964 @@ -3077,6 +3077,10 @@ qla2x00_shutdown(struct pci_dev *pdev)
2965 /* Stop currently executing firmware. */
2966 qla2x00_try_to_stop_firmware(vha);
2967
2968 + /* Disable timer */
2969 + if (vha->timer_active)
2970 + qla2x00_stop_timer(vha);
2971 +
2972 /* Turn adapter off line */
2973 vha->flags.online = 0;
2974
2975 diff --git a/drivers/usb/core/config.c b/drivers/usb/core/config.c
2976 index 94ec2dc27748..e8061b02b7e3 100644
2977 --- a/drivers/usb/core/config.c
2978 +++ b/drivers/usb/core/config.c
2979 @@ -343,6 +343,11 @@ static int usb_parse_endpoint(struct device *ddev, int cfgno, int inum,
2980
2981 /* Validate the wMaxPacketSize field */
2982 maxp = usb_endpoint_maxp(&endpoint->desc);
2983 + if (maxp == 0) {
2984 + dev_warn(ddev, "config %d interface %d altsetting %d endpoint 0x%X has wMaxPacketSize 0, skipping\n",
2985 + cfgno, inum, asnum, d->bEndpointAddress);
2986 + goto skip_to_next_endpoint_or_interface_descriptor;
2987 + }
2988
2989 /* Find the highest legal maxpacket size for this endpoint */
2990 i = 0; /* additional transactions per microframe */
2991 diff --git a/drivers/usb/dwc3/core.c b/drivers/usb/dwc3/core.c
2992 index 73dc5a6c6108..7154a93f0114 100644
2993 --- a/drivers/usb/dwc3/core.c
2994 +++ b/drivers/usb/dwc3/core.c
2995 @@ -227,8 +227,7 @@ static void dwc3_frame_length_adjustment(struct dwc3 *dwc)
2996
2997 reg = dwc3_readl(dwc->regs, DWC3_GFLADJ);
2998 dft = reg & DWC3_GFLADJ_30MHZ_MASK;
2999 - if (!dev_WARN_ONCE(dwc->dev, dft == dwc->fladj,
3000 - "request value same as default, ignoring\n")) {
3001 + if (dft != dwc->fladj) {
3002 reg &= ~DWC3_GFLADJ_30MHZ_MASK;
3003 reg |= DWC3_GFLADJ_30MHZ_SDBND_SEL | dwc->fladj;
3004 dwc3_writel(dwc->regs, DWC3_GFLADJ, reg);
3005 diff --git a/drivers/usb/gadget/composite.c b/drivers/usb/gadget/composite.c
3006 index 9fa168af847b..854c4ec0af2c 100644
3007 --- a/drivers/usb/gadget/composite.c
3008 +++ b/drivers/usb/gadget/composite.c
3009 @@ -2179,14 +2179,18 @@ void composite_dev_cleanup(struct usb_composite_dev *cdev)
3010 usb_ep_dequeue(cdev->gadget->ep0, cdev->os_desc_req);
3011
3012 kfree(cdev->os_desc_req->buf);
3013 + cdev->os_desc_req->buf = NULL;
3014 usb_ep_free_request(cdev->gadget->ep0, cdev->os_desc_req);
3015 + cdev->os_desc_req = NULL;
3016 }
3017 if (cdev->req) {
3018 if (cdev->setup_pending)
3019 usb_ep_dequeue(cdev->gadget->ep0, cdev->req);
3020
3021 kfree(cdev->req->buf);
3022 + cdev->req->buf = NULL;
3023 usb_ep_free_request(cdev->gadget->ep0, cdev->req);
3024 + cdev->req = NULL;
3025 }
3026 cdev->next_string_id = 0;
3027 device_remove_file(&cdev->gadget->dev, &dev_attr_suspended);
3028 diff --git a/drivers/usb/gadget/configfs.c b/drivers/usb/gadget/configfs.c
3029 index a5ca409dc97e..b5315a47f0b9 100644
3030 --- a/drivers/usb/gadget/configfs.c
3031 +++ b/drivers/usb/gadget/configfs.c
3032 @@ -60,6 +60,8 @@ struct gadget_info {
3033 bool use_os_desc;
3034 char b_vendor_code;
3035 char qw_sign[OS_STRING_QW_SIGN_LEN];
3036 + spinlock_t spinlock;
3037 + bool unbind;
3038 };
3039
3040 static inline struct gadget_info *to_gadget_info(struct config_item *item)
3041 @@ -1241,6 +1243,7 @@ static int configfs_composite_bind(struct usb_gadget *gadget,
3042 int ret;
3043
3044 /* the gi->lock is hold by the caller */
3045 + gi->unbind = 0;
3046 cdev->gadget = gadget;
3047 set_gadget_data(gadget, cdev);
3048 ret = composite_dev_prepare(composite, cdev);
3049 @@ -1373,31 +1376,128 @@ static void configfs_composite_unbind(struct usb_gadget *gadget)
3050 {
3051 struct usb_composite_dev *cdev;
3052 struct gadget_info *gi;
3053 + unsigned long flags;
3054
3055 /* the gi->lock is hold by the caller */
3056
3057 cdev = get_gadget_data(gadget);
3058 gi = container_of(cdev, struct gadget_info, cdev);
3059 + spin_lock_irqsave(&gi->spinlock, flags);
3060 + gi->unbind = 1;
3061 + spin_unlock_irqrestore(&gi->spinlock, flags);
3062
3063 kfree(otg_desc[0]);
3064 otg_desc[0] = NULL;
3065 purge_configs_funcs(gi);
3066 composite_dev_cleanup(cdev);
3067 usb_ep_autoconfig_reset(cdev->gadget);
3068 + spin_lock_irqsave(&gi->spinlock, flags);
3069 cdev->gadget = NULL;
3070 set_gadget_data(gadget, NULL);
3071 + spin_unlock_irqrestore(&gi->spinlock, flags);
3072 +}
3073 +
3074 +static int configfs_composite_setup(struct usb_gadget *gadget,
3075 + const struct usb_ctrlrequest *ctrl)
3076 +{
3077 + struct usb_composite_dev *cdev;
3078 + struct gadget_info *gi;
3079 + unsigned long flags;
3080 + int ret;
3081 +
3082 + cdev = get_gadget_data(gadget);
3083 + if (!cdev)
3084 + return 0;
3085 +
3086 + gi = container_of(cdev, struct gadget_info, cdev);
3087 + spin_lock_irqsave(&gi->spinlock, flags);
3088 + cdev = get_gadget_data(gadget);
3089 + if (!cdev || gi->unbind) {
3090 + spin_unlock_irqrestore(&gi->spinlock, flags);
3091 + return 0;
3092 + }
3093 +
3094 + ret = composite_setup(gadget, ctrl);
3095 + spin_unlock_irqrestore(&gi->spinlock, flags);
3096 + return ret;
3097 +}
3098 +
3099 +static void configfs_composite_disconnect(struct usb_gadget *gadget)
3100 +{
3101 + struct usb_composite_dev *cdev;
3102 + struct gadget_info *gi;
3103 + unsigned long flags;
3104 +
3105 + cdev = get_gadget_data(gadget);
3106 + if (!cdev)
3107 + return;
3108 +
3109 + gi = container_of(cdev, struct gadget_info, cdev);
3110 + spin_lock_irqsave(&gi->spinlock, flags);
3111 + cdev = get_gadget_data(gadget);
3112 + if (!cdev || gi->unbind) {
3113 + spin_unlock_irqrestore(&gi->spinlock, flags);
3114 + return;
3115 + }
3116 +
3117 + composite_disconnect(gadget);
3118 + spin_unlock_irqrestore(&gi->spinlock, flags);
3119 +}
3120 +
3121 +static void configfs_composite_suspend(struct usb_gadget *gadget)
3122 +{
3123 + struct usb_composite_dev *cdev;
3124 + struct gadget_info *gi;
3125 + unsigned long flags;
3126 +
3127 + cdev = get_gadget_data(gadget);
3128 + if (!cdev)
3129 + return;
3130 +
3131 + gi = container_of(cdev, struct gadget_info, cdev);
3132 + spin_lock_irqsave(&gi->spinlock, flags);
3133 + cdev = get_gadget_data(gadget);
3134 + if (!cdev || gi->unbind) {
3135 + spin_unlock_irqrestore(&gi->spinlock, flags);
3136 + return;
3137 + }
3138 +
3139 + composite_suspend(gadget);
3140 + spin_unlock_irqrestore(&gi->spinlock, flags);
3141 +}
3142 +
3143 +static void configfs_composite_resume(struct usb_gadget *gadget)
3144 +{
3145 + struct usb_composite_dev *cdev;
3146 + struct gadget_info *gi;
3147 + unsigned long flags;
3148 +
3149 + cdev = get_gadget_data(gadget);
3150 + if (!cdev)
3151 + return;
3152 +
3153 + gi = container_of(cdev, struct gadget_info, cdev);
3154 + spin_lock_irqsave(&gi->spinlock, flags);
3155 + cdev = get_gadget_data(gadget);
3156 + if (!cdev || gi->unbind) {
3157 + spin_unlock_irqrestore(&gi->spinlock, flags);
3158 + return;
3159 + }
3160 +
3161 + composite_resume(gadget);
3162 + spin_unlock_irqrestore(&gi->spinlock, flags);
3163 }
3164
3165 static const struct usb_gadget_driver configfs_driver_template = {
3166 .bind = configfs_composite_bind,
3167 .unbind = configfs_composite_unbind,
3168
3169 - .setup = composite_setup,
3170 - .reset = composite_disconnect,
3171 - .disconnect = composite_disconnect,
3172 + .setup = configfs_composite_setup,
3173 + .reset = configfs_composite_disconnect,
3174 + .disconnect = configfs_composite_disconnect,
3175
3176 - .suspend = composite_suspend,
3177 - .resume = composite_resume,
3178 + .suspend = configfs_composite_suspend,
3179 + .resume = configfs_composite_resume,
3180
3181 .max_speed = USB_SPEED_SUPER,
3182 .driver = {
3183 diff --git a/drivers/usb/gadget/udc/atmel_usba_udc.c b/drivers/usb/gadget/udc/atmel_usba_udc.c
3184 index 9705bcdbc577..57dd3bad9539 100644
3185 --- a/drivers/usb/gadget/udc/atmel_usba_udc.c
3186 +++ b/drivers/usb/gadget/udc/atmel_usba_udc.c
3187 @@ -403,9 +403,11 @@ static void submit_request(struct usba_ep *ep, struct usba_request *req)
3188 next_fifo_transaction(ep, req);
3189 if (req->last_transaction) {
3190 usba_ep_writel(ep, CTL_DIS, USBA_TX_PK_RDY);
3191 - usba_ep_writel(ep, CTL_ENB, USBA_TX_COMPLETE);
3192 + if (ep_is_control(ep))
3193 + usba_ep_writel(ep, CTL_ENB, USBA_TX_COMPLETE);
3194 } else {
3195 - usba_ep_writel(ep, CTL_DIS, USBA_TX_COMPLETE);
3196 + if (ep_is_control(ep))
3197 + usba_ep_writel(ep, CTL_DIS, USBA_TX_COMPLETE);
3198 usba_ep_writel(ep, CTL_ENB, USBA_TX_PK_RDY);
3199 }
3200 }
3201 diff --git a/drivers/usb/gadget/udc/fsl_udc_core.c b/drivers/usb/gadget/udc/fsl_udc_core.c
3202 index 8991a4070792..bd98557caa28 100644
3203 --- a/drivers/usb/gadget/udc/fsl_udc_core.c
3204 +++ b/drivers/usb/gadget/udc/fsl_udc_core.c
3205 @@ -2570,7 +2570,7 @@ static int fsl_udc_remove(struct platform_device *pdev)
3206 dma_pool_destroy(udc_controller->td_pool);
3207 free_irq(udc_controller->irq, udc_controller);
3208 iounmap(dr_regs);
3209 - if (pdata->operating_mode == FSL_USB2_DR_DEVICE)
3210 + if (res && (pdata->operating_mode == FSL_USB2_DR_DEVICE))
3211 release_mem_region(res->start, resource_size(res));
3212
3213 /* free udc --wait for the release() finished */
3214 diff --git a/drivers/usb/usbip/stub_rx.c b/drivers/usb/usbip/stub_rx.c
3215 index 777a4058c407..d47176f9c310 100644
3216 --- a/drivers/usb/usbip/stub_rx.c
3217 +++ b/drivers/usb/usbip/stub_rx.c
3218 @@ -353,14 +353,6 @@ static int get_pipe(struct stub_device *sdev, struct usbip_header *pdu)
3219
3220 epd = &ep->desc;
3221
3222 - /* validate transfer_buffer_length */
3223 - if (pdu->u.cmd_submit.transfer_buffer_length > INT_MAX) {
3224 - dev_err(&sdev->udev->dev,
3225 - "CMD_SUBMIT: -EMSGSIZE transfer_buffer_length %d\n",
3226 - pdu->u.cmd_submit.transfer_buffer_length);
3227 - return -1;
3228 - }
3229 -
3230 if (usb_endpoint_xfer_control(epd)) {
3231 if (dir == USBIP_DIR_OUT)
3232 return usb_sndctrlpipe(udev, epnum);
3233 @@ -487,8 +479,7 @@ static void stub_recv_cmd_submit(struct stub_device *sdev,
3234 }
3235
3236 /* allocate urb transfer buffer, if needed */
3237 - if (pdu->u.cmd_submit.transfer_buffer_length > 0 &&
3238 - pdu->u.cmd_submit.transfer_buffer_length <= INT_MAX) {
3239 + if (pdu->u.cmd_submit.transfer_buffer_length > 0) {
3240 priv->urb->transfer_buffer =
3241 kzalloc(pdu->u.cmd_submit.transfer_buffer_length,
3242 GFP_KERNEL);
3243 diff --git a/drivers/usb/usbip/vhci_hcd.c b/drivers/usb/usbip/vhci_hcd.c
3244 index 9936a2f199b1..8bda6455dfcb 100644
3245 --- a/drivers/usb/usbip/vhci_hcd.c
3246 +++ b/drivers/usb/usbip/vhci_hcd.c
3247 @@ -318,6 +318,7 @@ static int vhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
3248 default:
3249 break;
3250 }
3251 + break;
3252 default:
3253 usbip_dbg_vhci_rh(" ClearPortFeature: default %x\n",
3254 wValue);
3255 @@ -465,13 +466,14 @@ static void vhci_tx_urb(struct urb *urb)
3256 {
3257 struct vhci_device *vdev = get_vdev(urb->dev);
3258 struct vhci_priv *priv;
3259 - struct vhci_hcd *vhci = vdev_to_vhci(vdev);
3260 + struct vhci_hcd *vhci;
3261 unsigned long flags;
3262
3263 if (!vdev) {
3264 pr_err("could not get virtual device");
3265 return;
3266 }
3267 + vhci = vdev_to_vhci(vdev);
3268
3269 priv = kzalloc(sizeof(struct vhci_priv), GFP_ATOMIC);
3270 if (!priv) {
3271 @@ -512,8 +514,10 @@ static int vhci_urb_enqueue(struct usb_hcd *hcd, struct urb *urb,
3272 }
3273 vdev = &vhci->vdev[portnum-1];
3274
3275 - /* patch to usb_sg_init() is in 2.5.60 */
3276 - BUG_ON(!urb->transfer_buffer && urb->transfer_buffer_length);
3277 + if (!urb->transfer_buffer && urb->transfer_buffer_length) {
3278 + dev_dbg(dev, "Null URB transfer buffer\n");
3279 + return -EINVAL;
3280 + }
3281
3282 spin_lock_irqsave(&vhci->lock, flags);
3283
3284 diff --git a/fs/ceph/caps.c b/fs/ceph/caps.c
3285 index 82df349b84f7..f5d9835264aa 100644
3286 --- a/fs/ceph/caps.c
3287 +++ b/fs/ceph/caps.c
3288 @@ -933,6 +933,11 @@ void __ceph_remove_cap(struct ceph_cap *cap, bool queue_release)
3289
3290 dout("__ceph_remove_cap %p from %p\n", cap, &ci->vfs_inode);
3291
3292 + /* remove from inode's cap rbtree, and clear auth cap */
3293 + rb_erase(&cap->ci_node, &ci->i_caps);
3294 + if (ci->i_auth_cap == cap)
3295 + ci->i_auth_cap = NULL;
3296 +
3297 /* remove from session list */
3298 spin_lock(&session->s_cap_lock);
3299 if (session->s_cap_iterator == cap) {
3300 @@ -968,11 +973,6 @@ void __ceph_remove_cap(struct ceph_cap *cap, bool queue_release)
3301
3302 spin_unlock(&session->s_cap_lock);
3303
3304 - /* remove from inode list */
3305 - rb_erase(&cap->ci_node, &ci->i_caps);
3306 - if (ci->i_auth_cap == cap)
3307 - ci->i_auth_cap = NULL;
3308 -
3309 if (removed)
3310 ceph_put_cap(mdsc, cap);
3311
3312 diff --git a/fs/configfs/configfs_internal.h b/fs/configfs/configfs_internal.h
3313 index ccc31fa6f1a7..16eb59adf5aa 100644
3314 --- a/fs/configfs/configfs_internal.h
3315 +++ b/fs/configfs/configfs_internal.h
3316 @@ -34,6 +34,15 @@
3317 #include <linux/list.h>
3318 #include <linux/spinlock.h>
3319
3320 +struct configfs_fragment {
3321 + atomic_t frag_count;
3322 + struct rw_semaphore frag_sem;
3323 + bool frag_dead;
3324 +};
3325 +
3326 +void put_fragment(struct configfs_fragment *);
3327 +struct configfs_fragment *get_fragment(struct configfs_fragment *);
3328 +
3329 struct configfs_dirent {
3330 atomic_t s_count;
3331 int s_dependent_count;
3332 @@ -48,6 +57,7 @@ struct configfs_dirent {
3333 #ifdef CONFIG_LOCKDEP
3334 int s_depth;
3335 #endif
3336 + struct configfs_fragment *s_frag;
3337 };
3338
3339 #define CONFIGFS_ROOT 0x0001
3340 @@ -75,8 +85,8 @@ extern int configfs_create(struct dentry *, umode_t mode, void (*init)(struct in
3341 extern int configfs_create_file(struct config_item *, const struct configfs_attribute *);
3342 extern int configfs_create_bin_file(struct config_item *,
3343 const struct configfs_bin_attribute *);
3344 -extern int configfs_make_dirent(struct configfs_dirent *,
3345 - struct dentry *, void *, umode_t, int);
3346 +extern int configfs_make_dirent(struct configfs_dirent *, struct dentry *,
3347 + void *, umode_t, int, struct configfs_fragment *);
3348 extern int configfs_dirent_is_ready(struct configfs_dirent *);
3349
3350 extern void configfs_hash_and_remove(struct dentry * dir, const char * name);
3351 @@ -151,6 +161,7 @@ static inline void release_configfs_dirent(struct configfs_dirent * sd)
3352 {
3353 if (!(sd->s_type & CONFIGFS_ROOT)) {
3354 kfree(sd->s_iattr);
3355 + put_fragment(sd->s_frag);
3356 kmem_cache_free(configfs_dir_cachep, sd);
3357 }
3358 }
3359 diff --git a/fs/configfs/dir.c b/fs/configfs/dir.c
3360 index a1985a9ad2d6..c2ef617d2f97 100644
3361 --- a/fs/configfs/dir.c
3362 +++ b/fs/configfs/dir.c
3363 @@ -164,11 +164,38 @@ configfs_adjust_dir_dirent_depth_after_populate(struct configfs_dirent *sd)
3364
3365 #endif /* CONFIG_LOCKDEP */
3366
3367 +static struct configfs_fragment *new_fragment(void)
3368 +{
3369 + struct configfs_fragment *p;
3370 +
3371 + p = kmalloc(sizeof(struct configfs_fragment), GFP_KERNEL);
3372 + if (p) {
3373 + atomic_set(&p->frag_count, 1);
3374 + init_rwsem(&p->frag_sem);
3375 + p->frag_dead = false;
3376 + }
3377 + return p;
3378 +}
3379 +
3380 +void put_fragment(struct configfs_fragment *frag)
3381 +{
3382 + if (frag && atomic_dec_and_test(&frag->frag_count))
3383 + kfree(frag);
3384 +}
3385 +
3386 +struct configfs_fragment *get_fragment(struct configfs_fragment *frag)
3387 +{
3388 + if (likely(frag))
3389 + atomic_inc(&frag->frag_count);
3390 + return frag;
3391 +}
3392 +
3393 /*
3394 * Allocates a new configfs_dirent and links it to the parent configfs_dirent
3395 */
3396 static struct configfs_dirent *configfs_new_dirent(struct configfs_dirent *parent_sd,
3397 - void *element, int type)
3398 + void *element, int type,
3399 + struct configfs_fragment *frag)
3400 {
3401 struct configfs_dirent * sd;
3402
3403 @@ -188,6 +215,7 @@ static struct configfs_dirent *configfs_new_dirent(struct configfs_dirent *paren
3404 kmem_cache_free(configfs_dir_cachep, sd);
3405 return ERR_PTR(-ENOENT);
3406 }
3407 + sd->s_frag = get_fragment(frag);
3408 list_add(&sd->s_sibling, &parent_sd->s_children);
3409 spin_unlock(&configfs_dirent_lock);
3410
3411 @@ -222,11 +250,11 @@ static int configfs_dirent_exists(struct configfs_dirent *parent_sd,
3412
3413 int configfs_make_dirent(struct configfs_dirent * parent_sd,
3414 struct dentry * dentry, void * element,
3415 - umode_t mode, int type)
3416 + umode_t mode, int type, struct configfs_fragment *frag)
3417 {
3418 struct configfs_dirent * sd;
3419
3420 - sd = configfs_new_dirent(parent_sd, element, type);
3421 + sd = configfs_new_dirent(parent_sd, element, type, frag);
3422 if (IS_ERR(sd))
3423 return PTR_ERR(sd);
3424
3425 @@ -273,7 +301,8 @@ static void init_symlink(struct inode * inode)
3426 * until it is validated by configfs_dir_set_ready()
3427 */
3428
3429 -static int configfs_create_dir(struct config_item *item, struct dentry *dentry)
3430 +static int configfs_create_dir(struct config_item *item, struct dentry *dentry,
3431 + struct configfs_fragment *frag)
3432 {
3433 int error;
3434 umode_t mode = S_IFDIR| S_IRWXU | S_IRUGO | S_IXUGO;
3435 @@ -286,7 +315,8 @@ static int configfs_create_dir(struct config_item *item, struct dentry *dentry)
3436 return error;
3437
3438 error = configfs_make_dirent(p->d_fsdata, dentry, item, mode,
3439 - CONFIGFS_DIR | CONFIGFS_USET_CREATING);
3440 + CONFIGFS_DIR | CONFIGFS_USET_CREATING,
3441 + frag);
3442 if (unlikely(error))
3443 return error;
3444
3445 @@ -351,9 +381,10 @@ int configfs_create_link(struct configfs_symlink *sl,
3446 {
3447 int err = 0;
3448 umode_t mode = S_IFLNK | S_IRWXUGO;
3449 + struct configfs_dirent *p = parent->d_fsdata;
3450
3451 - err = configfs_make_dirent(parent->d_fsdata, dentry, sl, mode,
3452 - CONFIGFS_ITEM_LINK);
3453 + err = configfs_make_dirent(p, dentry, sl, mode,
3454 + CONFIGFS_ITEM_LINK, p->s_frag);
3455 if (!err) {
3456 err = configfs_create(dentry, mode, init_symlink);
3457 if (err) {
3458 @@ -612,7 +643,8 @@ static int populate_attrs(struct config_item *item)
3459
3460 static int configfs_attach_group(struct config_item *parent_item,
3461 struct config_item *item,
3462 - struct dentry *dentry);
3463 + struct dentry *dentry,
3464 + struct configfs_fragment *frag);
3465 static void configfs_detach_group(struct config_item *item);
3466
3467 static void detach_groups(struct config_group *group)
3468 @@ -660,7 +692,8 @@ static void detach_groups(struct config_group *group)
3469 * try using vfs_mkdir. Just a thought.
3470 */
3471 static int create_default_group(struct config_group *parent_group,
3472 - struct config_group *group)
3473 + struct config_group *group,
3474 + struct configfs_fragment *frag)
3475 {
3476 int ret;
3477 struct configfs_dirent *sd;
3478 @@ -676,7 +709,7 @@ static int create_default_group(struct config_group *parent_group,
3479 d_add(child, NULL);
3480
3481 ret = configfs_attach_group(&parent_group->cg_item,
3482 - &group->cg_item, child);
3483 + &group->cg_item, child, frag);
3484 if (!ret) {
3485 sd = child->d_fsdata;
3486 sd->s_type |= CONFIGFS_USET_DEFAULT;
3487 @@ -690,13 +723,14 @@ static int create_default_group(struct config_group *parent_group,
3488 return ret;
3489 }
3490
3491 -static int populate_groups(struct config_group *group)
3492 +static int populate_groups(struct config_group *group,
3493 + struct configfs_fragment *frag)
3494 {
3495 struct config_group *new_group;
3496 int ret = 0;
3497
3498 list_for_each_entry(new_group, &group->default_groups, group_entry) {
3499 - ret = create_default_group(group, new_group);
3500 + ret = create_default_group(group, new_group, frag);
3501 if (ret) {
3502 detach_groups(group);
3503 break;
3504 @@ -810,11 +844,12 @@ static void link_group(struct config_group *parent_group, struct config_group *g
3505 */
3506 static int configfs_attach_item(struct config_item *parent_item,
3507 struct config_item *item,
3508 - struct dentry *dentry)
3509 + struct dentry *dentry,
3510 + struct configfs_fragment *frag)
3511 {
3512 int ret;
3513
3514 - ret = configfs_create_dir(item, dentry);
3515 + ret = configfs_create_dir(item, dentry, frag);
3516 if (!ret) {
3517 ret = populate_attrs(item);
3518 if (ret) {
3519 @@ -844,12 +879,13 @@ static void configfs_detach_item(struct config_item *item)
3520
3521 static int configfs_attach_group(struct config_item *parent_item,
3522 struct config_item *item,
3523 - struct dentry *dentry)
3524 + struct dentry *dentry,
3525 + struct configfs_fragment *frag)
3526 {
3527 int ret;
3528 struct configfs_dirent *sd;
3529
3530 - ret = configfs_attach_item(parent_item, item, dentry);
3531 + ret = configfs_attach_item(parent_item, item, dentry, frag);
3532 if (!ret) {
3533 sd = dentry->d_fsdata;
3534 sd->s_type |= CONFIGFS_USET_DIR;
3535 @@ -865,7 +901,7 @@ static int configfs_attach_group(struct config_item *parent_item,
3536 */
3537 inode_lock_nested(d_inode(dentry), I_MUTEX_CHILD);
3538 configfs_adjust_dir_dirent_depth_before_populate(sd);
3539 - ret = populate_groups(to_config_group(item));
3540 + ret = populate_groups(to_config_group(item), frag);
3541 if (ret) {
3542 configfs_detach_item(item);
3543 d_inode(dentry)->i_flags |= S_DEAD;
3544 @@ -1260,6 +1296,7 @@ static int configfs_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode
3545 struct configfs_dirent *sd;
3546 struct config_item_type *type;
3547 struct module *subsys_owner = NULL, *new_item_owner = NULL;
3548 + struct configfs_fragment *frag;
3549 char *name;
3550
3551 sd = dentry->d_parent->d_fsdata;
3552 @@ -1278,6 +1315,12 @@ static int configfs_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode
3553 goto out;
3554 }
3555
3556 + frag = new_fragment();
3557 + if (!frag) {
3558 + ret = -ENOMEM;
3559 + goto out;
3560 + }
3561 +
3562 /* Get a working ref for the duration of this function */
3563 parent_item = configfs_get_config_item(dentry->d_parent);
3564 type = parent_item->ci_type;
3565 @@ -1380,9 +1423,9 @@ static int configfs_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode
3566 spin_unlock(&configfs_dirent_lock);
3567
3568 if (group)
3569 - ret = configfs_attach_group(parent_item, item, dentry);
3570 + ret = configfs_attach_group(parent_item, item, dentry, frag);
3571 else
3572 - ret = configfs_attach_item(parent_item, item, dentry);
3573 + ret = configfs_attach_item(parent_item, item, dentry, frag);
3574
3575 spin_lock(&configfs_dirent_lock);
3576 sd->s_type &= ~CONFIGFS_USET_IN_MKDIR;
3577 @@ -1419,6 +1462,7 @@ out_put:
3578 * reference.
3579 */
3580 config_item_put(parent_item);
3581 + put_fragment(frag);
3582
3583 out:
3584 return ret;
3585 @@ -1430,6 +1474,7 @@ static int configfs_rmdir(struct inode *dir, struct dentry *dentry)
3586 struct config_item *item;
3587 struct configfs_subsystem *subsys;
3588 struct configfs_dirent *sd;
3589 + struct configfs_fragment *frag;
3590 struct module *subsys_owner = NULL, *dead_item_owner = NULL;
3591 int ret;
3592
3593 @@ -1487,6 +1532,16 @@ static int configfs_rmdir(struct inode *dir, struct dentry *dentry)
3594 }
3595 } while (ret == -EAGAIN);
3596
3597 + frag = sd->s_frag;
3598 + if (down_write_killable(&frag->frag_sem)) {
3599 + spin_lock(&configfs_dirent_lock);
3600 + configfs_detach_rollback(dentry);
3601 + spin_unlock(&configfs_dirent_lock);
3602 + return -EINTR;
3603 + }
3604 + frag->frag_dead = true;
3605 + up_write(&frag->frag_sem);
3606 +
3607 /* Get a working ref for the duration of this function */
3608 item = configfs_get_config_item(dentry);
3609
3610 @@ -1587,7 +1642,7 @@ static int configfs_dir_open(struct inode *inode, struct file *file)
3611 */
3612 err = -ENOENT;
3613 if (configfs_dirent_is_ready(parent_sd)) {
3614 - file->private_data = configfs_new_dirent(parent_sd, NULL, 0);
3615 + file->private_data = configfs_new_dirent(parent_sd, NULL, 0, NULL);
3616 if (IS_ERR(file->private_data))
3617 err = PTR_ERR(file->private_data);
3618 else
3619 @@ -1743,8 +1798,13 @@ int configfs_register_group(struct config_group *parent_group,
3620 {
3621 struct configfs_subsystem *subsys = parent_group->cg_subsys;
3622 struct dentry *parent;
3623 + struct configfs_fragment *frag;
3624 int ret;
3625
3626 + frag = new_fragment();
3627 + if (!frag)
3628 + return -ENOMEM;
3629 +
3630 mutex_lock(&subsys->su_mutex);
3631 link_group(parent_group, group);
3632 mutex_unlock(&subsys->su_mutex);
3633 @@ -1752,7 +1812,7 @@ int configfs_register_group(struct config_group *parent_group,
3634 parent = parent_group->cg_item.ci_dentry;
3635
3636 inode_lock_nested(d_inode(parent), I_MUTEX_PARENT);
3637 - ret = create_default_group(parent_group, group);
3638 + ret = create_default_group(parent_group, group, frag);
3639 if (ret)
3640 goto err_out;
3641
3642 @@ -1760,12 +1820,14 @@ int configfs_register_group(struct config_group *parent_group,
3643 configfs_dir_set_ready(group->cg_item.ci_dentry->d_fsdata);
3644 spin_unlock(&configfs_dirent_lock);
3645 inode_unlock(d_inode(parent));
3646 + put_fragment(frag);
3647 return 0;
3648 err_out:
3649 inode_unlock(d_inode(parent));
3650 mutex_lock(&subsys->su_mutex);
3651 unlink_group(group);
3652 mutex_unlock(&subsys->su_mutex);
3653 + put_fragment(frag);
3654 return ret;
3655 }
3656 EXPORT_SYMBOL(configfs_register_group);
3657 @@ -1781,16 +1843,12 @@ void configfs_unregister_group(struct config_group *group)
3658 struct configfs_subsystem *subsys = group->cg_subsys;
3659 struct dentry *dentry = group->cg_item.ci_dentry;
3660 struct dentry *parent = group->cg_item.ci_parent->ci_dentry;
3661 + struct configfs_dirent *sd = dentry->d_fsdata;
3662 + struct configfs_fragment *frag = sd->s_frag;
3663
3664 - mutex_lock(&subsys->su_mutex);
3665 - if (!group->cg_item.ci_parent->ci_group) {
3666 - /*
3667 - * The parent has already been unlinked and detached
3668 - * due to a rmdir.
3669 - */
3670 - goto unlink_group;
3671 - }
3672 - mutex_unlock(&subsys->su_mutex);
3673 + down_write(&frag->frag_sem);
3674 + frag->frag_dead = true;
3675 + up_write(&frag->frag_sem);
3676
3677 inode_lock_nested(d_inode(parent), I_MUTEX_PARENT);
3678 spin_lock(&configfs_dirent_lock);
3679 @@ -1806,7 +1864,6 @@ void configfs_unregister_group(struct config_group *group)
3680 dput(dentry);
3681
3682 mutex_lock(&subsys->su_mutex);
3683 -unlink_group:
3684 unlink_group(group);
3685 mutex_unlock(&subsys->su_mutex);
3686 }
3687 @@ -1863,10 +1920,17 @@ int configfs_register_subsystem(struct configfs_subsystem *subsys)
3688 struct dentry *dentry;
3689 struct dentry *root;
3690 struct configfs_dirent *sd;
3691 + struct configfs_fragment *frag;
3692 +
3693 + frag = new_fragment();
3694 + if (!frag)
3695 + return -ENOMEM;
3696
3697 root = configfs_pin_fs();
3698 - if (IS_ERR(root))
3699 + if (IS_ERR(root)) {
3700 + put_fragment(frag);
3701 return PTR_ERR(root);
3702 + }
3703
3704 if (!group->cg_item.ci_name)
3705 group->cg_item.ci_name = group->cg_item.ci_namebuf;
3706 @@ -1882,7 +1946,7 @@ int configfs_register_subsystem(struct configfs_subsystem *subsys)
3707 d_add(dentry, NULL);
3708
3709 err = configfs_attach_group(sd->s_element, &group->cg_item,
3710 - dentry);
3711 + dentry, frag);
3712 if (err) {
3713 BUG_ON(d_inode(dentry));
3714 d_drop(dentry);
3715 @@ -1900,6 +1964,7 @@ int configfs_register_subsystem(struct configfs_subsystem *subsys)
3716 unlink_group(group);
3717 configfs_release_fs();
3718 }
3719 + put_fragment(frag);
3720
3721 return err;
3722 }
3723 @@ -1909,12 +1974,18 @@ void configfs_unregister_subsystem(struct configfs_subsystem *subsys)
3724 struct config_group *group = &subsys->su_group;
3725 struct dentry *dentry = group->cg_item.ci_dentry;
3726 struct dentry *root = dentry->d_sb->s_root;
3727 + struct configfs_dirent *sd = dentry->d_fsdata;
3728 + struct configfs_fragment *frag = sd->s_frag;
3729
3730 if (dentry->d_parent != root) {
3731 pr_err("Tried to unregister non-subsystem!\n");
3732 return;
3733 }
3734
3735 + down_write(&frag->frag_sem);
3736 + frag->frag_dead = true;
3737 + up_write(&frag->frag_sem);
3738 +
3739 inode_lock_nested(d_inode(root),
3740 I_MUTEX_PARENT);
3741 inode_lock_nested(d_inode(dentry), I_MUTEX_CHILD);
3742 diff --git a/fs/configfs/file.c b/fs/configfs/file.c
3743 index 2c6312db8516..7285440bc62e 100644
3744 --- a/fs/configfs/file.c
3745 +++ b/fs/configfs/file.c
3746 @@ -53,40 +53,44 @@ struct configfs_buffer {
3747 bool write_in_progress;
3748 char *bin_buffer;
3749 int bin_buffer_size;
3750 + int cb_max_size;
3751 + struct config_item *item;
3752 + struct module *owner;
3753 + union {
3754 + struct configfs_attribute *attr;
3755 + struct configfs_bin_attribute *bin_attr;
3756 + };
3757 };
3758
3759 +static inline struct configfs_fragment *to_frag(struct file *file)
3760 +{
3761 + struct configfs_dirent *sd = file->f_path.dentry->d_fsdata;
3762
3763 -/**
3764 - * fill_read_buffer - allocate and fill buffer from item.
3765 - * @dentry: dentry pointer.
3766 - * @buffer: data buffer for file.
3767 - *
3768 - * Allocate @buffer->page, if it hasn't been already, then call the
3769 - * config_item's show() method to fill the buffer with this attribute's
3770 - * data.
3771 - * This is called only once, on the file's first read.
3772 - */
3773 -static int fill_read_buffer(struct dentry * dentry, struct configfs_buffer * buffer)
3774 + return sd->s_frag;
3775 +}
3776 +
3777 +static int fill_read_buffer(struct file *file, struct configfs_buffer *buffer)
3778 {
3779 - struct configfs_attribute * attr = to_attr(dentry);
3780 - struct config_item * item = to_item(dentry->d_parent);
3781 - int ret = 0;
3782 - ssize_t count;
3783 + struct configfs_fragment *frag = to_frag(file);
3784 + ssize_t count = -ENOENT;
3785
3786 if (!buffer->page)
3787 buffer->page = (char *) get_zeroed_page(GFP_KERNEL);
3788 if (!buffer->page)
3789 return -ENOMEM;
3790
3791 - count = attr->show(item, buffer->page);
3792 -
3793 - BUG_ON(count > (ssize_t)SIMPLE_ATTR_SIZE);
3794 - if (count >= 0) {
3795 - buffer->needs_read_fill = 0;
3796 - buffer->count = count;
3797 - } else
3798 - ret = count;
3799 - return ret;
3800 + down_read(&frag->frag_sem);
3801 + if (!frag->frag_dead)
3802 + count = buffer->attr->show(buffer->item, buffer->page);
3803 + up_read(&frag->frag_sem);
3804 +
3805 + if (count < 0)
3806 + return count;
3807 + if (WARN_ON_ONCE(count > (ssize_t)SIMPLE_ATTR_SIZE))
3808 + return -EIO;
3809 + buffer->needs_read_fill = 0;
3810 + buffer->count = count;
3811 + return 0;
3812 }
3813
3814 /**
3815 @@ -111,12 +115,13 @@ static int fill_read_buffer(struct dentry * dentry, struct configfs_buffer * buf
3816 static ssize_t
3817 configfs_read_file(struct file *file, char __user *buf, size_t count, loff_t *ppos)
3818 {
3819 - struct configfs_buffer * buffer = file->private_data;
3820 + struct configfs_buffer *buffer = file->private_data;
3821 ssize_t retval = 0;
3822
3823 mutex_lock(&buffer->mutex);
3824 if (buffer->needs_read_fill) {
3825 - if ((retval = fill_read_buffer(file->f_path.dentry,buffer)))
3826 + retval = fill_read_buffer(file, buffer);
3827 + if (retval)
3828 goto out;
3829 }
3830 pr_debug("%s: count = %zd, ppos = %lld, buf = %s\n",
3831 @@ -152,10 +157,8 @@ static ssize_t
3832 configfs_read_bin_file(struct file *file, char __user *buf,
3833 size_t count, loff_t *ppos)
3834 {
3835 + struct configfs_fragment *frag = to_frag(file);
3836 struct configfs_buffer *buffer = file->private_data;
3837 - struct dentry *dentry = file->f_path.dentry;
3838 - struct config_item *item = to_item(dentry->d_parent);
3839 - struct configfs_bin_attribute *bin_attr = to_bin_attr(dentry);
3840 ssize_t retval = 0;
3841 ssize_t len = min_t(size_t, count, PAGE_SIZE);
3842
3843 @@ -166,18 +169,23 @@ configfs_read_bin_file(struct file *file, char __user *buf,
3844 retval = -ETXTBSY;
3845 goto out;
3846 }
3847 - buffer->read_in_progress = 1;
3848 + buffer->read_in_progress = true;
3849
3850 if (buffer->needs_read_fill) {
3851 /* perform first read with buf == NULL to get extent */
3852 - len = bin_attr->read(item, NULL, 0);
3853 + down_read(&frag->frag_sem);
3854 + if (!frag->frag_dead)
3855 + len = buffer->bin_attr->read(buffer->item, NULL, 0);
3856 + else
3857 + len = -ENOENT;
3858 + up_read(&frag->frag_sem);
3859 if (len <= 0) {
3860 retval = len;
3861 goto out;
3862 }
3863
3864 /* do not exceed the maximum value */
3865 - if (bin_attr->cb_max_size && len > bin_attr->cb_max_size) {
3866 + if (buffer->cb_max_size && len > buffer->cb_max_size) {
3867 retval = -EFBIG;
3868 goto out;
3869 }
3870 @@ -190,7 +198,13 @@ configfs_read_bin_file(struct file *file, char __user *buf,
3871 buffer->bin_buffer_size = len;
3872
3873 /* perform second read to fill buffer */
3874 - len = bin_attr->read(item, buffer->bin_buffer, len);
3875 + down_read(&frag->frag_sem);
3876 + if (!frag->frag_dead)
3877 + len = buffer->bin_attr->read(buffer->item,
3878 + buffer->bin_buffer, len);
3879 + else
3880 + len = -ENOENT;
3881 + up_read(&frag->frag_sem);
3882 if (len < 0) {
3883 retval = len;
3884 vfree(buffer->bin_buffer);
3885 @@ -240,25 +254,17 @@ fill_write_buffer(struct configfs_buffer * buffer, const char __user * buf, size
3886 return error ? -EFAULT : count;
3887 }
3888
3889 -
3890 -/**
3891 - * flush_write_buffer - push buffer to config_item.
3892 - * @dentry: dentry to the attribute
3893 - * @buffer: data buffer for file.
3894 - * @count: number of bytes
3895 - *
3896 - * Get the correct pointers for the config_item and the attribute we're
3897 - * dealing with, then call the store() method for the attribute,
3898 - * passing the buffer that we acquired in fill_write_buffer().
3899 - */
3900 -
3901 static int
3902 -flush_write_buffer(struct dentry * dentry, struct configfs_buffer * buffer, size_t count)
3903 +flush_write_buffer(struct file *file, struct configfs_buffer *buffer, size_t count)
3904 {
3905 - struct configfs_attribute * attr = to_attr(dentry);
3906 - struct config_item * item = to_item(dentry->d_parent);
3907 -
3908 - return attr->store(item, buffer->page, count);
3909 + struct configfs_fragment *frag = to_frag(file);
3910 + int res = -ENOENT;
3911 +
3912 + down_read(&frag->frag_sem);
3913 + if (!frag->frag_dead)
3914 + res = buffer->attr->store(buffer->item, buffer->page, count);
3915 + up_read(&frag->frag_sem);
3916 + return res;
3917 }
3918
3919
3920 @@ -282,13 +288,13 @@ flush_write_buffer(struct dentry * dentry, struct configfs_buffer * buffer, size
3921 static ssize_t
3922 configfs_write_file(struct file *file, const char __user *buf, size_t count, loff_t *ppos)
3923 {
3924 - struct configfs_buffer * buffer = file->private_data;
3925 + struct configfs_buffer *buffer = file->private_data;
3926 ssize_t len;
3927
3928 mutex_lock(&buffer->mutex);
3929 len = fill_write_buffer(buffer, buf, count);
3930 if (len > 0)
3931 - len = flush_write_buffer(file->f_path.dentry, buffer, len);
3932 + len = flush_write_buffer(file, buffer, len);
3933 if (len > 0)
3934 *ppos += len;
3935 mutex_unlock(&buffer->mutex);
3936 @@ -313,8 +319,6 @@ configfs_write_bin_file(struct file *file, const char __user *buf,
3937 size_t count, loff_t *ppos)
3938 {
3939 struct configfs_buffer *buffer = file->private_data;
3940 - struct dentry *dentry = file->f_path.dentry;
3941 - struct configfs_bin_attribute *bin_attr = to_bin_attr(dentry);
3942 void *tbuf = NULL;
3943 ssize_t len;
3944
3945 @@ -325,13 +329,13 @@ configfs_write_bin_file(struct file *file, const char __user *buf,
3946 len = -ETXTBSY;
3947 goto out;
3948 }
3949 - buffer->write_in_progress = 1;
3950 + buffer->write_in_progress = true;
3951
3952 /* buffer grows? */
3953 if (*ppos + count > buffer->bin_buffer_size) {
3954
3955 - if (bin_attr->cb_max_size &&
3956 - *ppos + count > bin_attr->cb_max_size) {
3957 + if (buffer->cb_max_size &&
3958 + *ppos + count > buffer->cb_max_size) {
3959 len = -EFBIG;
3960 goto out;
3961 }
3962 @@ -363,31 +367,51 @@ out:
3963 return len;
3964 }
3965
3966 -static int check_perm(struct inode * inode, struct file * file, int type)
3967 +static int __configfs_open_file(struct inode *inode, struct file *file, int type)
3968 {
3969 - struct config_item *item = configfs_get_config_item(file->f_path.dentry->d_parent);
3970 - struct configfs_attribute * attr = to_attr(file->f_path.dentry);
3971 - struct configfs_bin_attribute *bin_attr = NULL;
3972 - struct configfs_buffer * buffer;
3973 - struct configfs_item_operations * ops = NULL;
3974 - int error = 0;
3975 + struct dentry *dentry = file->f_path.dentry;
3976 + struct configfs_fragment *frag = to_frag(file);
3977 + struct configfs_attribute *attr;
3978 + struct configfs_buffer *buffer;
3979 + int error;
3980
3981 - if (!item || !attr)
3982 - goto Einval;
3983 + error = -ENOMEM;
3984 + buffer = kzalloc(sizeof(struct configfs_buffer), GFP_KERNEL);
3985 + if (!buffer)
3986 + goto out;
3987
3988 - if (type & CONFIGFS_ITEM_BIN_ATTR)
3989 - bin_attr = to_bin_attr(file->f_path.dentry);
3990 + error = -ENOENT;
3991 + down_read(&frag->frag_sem);
3992 + if (unlikely(frag->frag_dead))
3993 + goto out_free_buffer;
3994
3995 - /* Grab the module reference for this attribute if we have one */
3996 - if (!try_module_get(attr->ca_owner)) {
3997 - error = -ENODEV;
3998 - goto Done;
3999 + error = -EINVAL;
4000 + buffer->item = to_item(dentry->d_parent);
4001 + if (!buffer->item)
4002 + goto out_free_buffer;
4003 +
4004 + attr = to_attr(dentry);
4005 + if (!attr)
4006 + goto out_put_item;
4007 +
4008 + if (type & CONFIGFS_ITEM_BIN_ATTR) {
4009 + buffer->bin_attr = to_bin_attr(dentry);
4010 + buffer->cb_max_size = buffer->bin_attr->cb_max_size;
4011 + } else {
4012 + buffer->attr = attr;
4013 }
4014
4015 - if (item->ci_type)
4016 - ops = item->ci_type->ct_item_ops;
4017 - else
4018 - goto Eaccess;
4019 + buffer->owner = attr->ca_owner;
4020 + /* Grab the module reference for this attribute if we have one */
4021 + error = -ENODEV;
4022 + if (!try_module_get(buffer->owner))
4023 + goto out_put_item;
4024 +
4025 + error = -EACCES;
4026 + if (!buffer->item->ci_type)
4027 + goto out_put_module;
4028 +
4029 + buffer->ops = buffer->item->ci_type->ct_item_ops;
4030
4031 /* File needs write support.
4032 * The inode's perms must say it's ok,
4033 @@ -395,13 +419,11 @@ static int check_perm(struct inode * inode, struct file * file, int type)
4034 */
4035 if (file->f_mode & FMODE_WRITE) {
4036 if (!(inode->i_mode & S_IWUGO))
4037 - goto Eaccess;
4038 -
4039 + goto out_put_module;
4040 if ((type & CONFIGFS_ITEM_ATTR) && !attr->store)
4041 - goto Eaccess;
4042 -
4043 - if ((type & CONFIGFS_ITEM_BIN_ATTR) && !bin_attr->write)
4044 - goto Eaccess;
4045 + goto out_put_module;
4046 + if ((type & CONFIGFS_ITEM_BIN_ATTR) && !buffer->bin_attr->write)
4047 + goto out_put_module;
4048 }
4049
4050 /* File needs read support.
4051 @@ -410,92 +432,72 @@ static int check_perm(struct inode * inode, struct file * file, int type)
4052 */
4053 if (file->f_mode & FMODE_READ) {
4054 if (!(inode->i_mode & S_IRUGO))
4055 - goto Eaccess;
4056 -
4057 + goto out_put_module;
4058 if ((type & CONFIGFS_ITEM_ATTR) && !attr->show)
4059 - goto Eaccess;
4060 -
4061 - if ((type & CONFIGFS_ITEM_BIN_ATTR) && !bin_attr->read)
4062 - goto Eaccess;
4063 + goto out_put_module;
4064 + if ((type & CONFIGFS_ITEM_BIN_ATTR) && !buffer->bin_attr->read)
4065 + goto out_put_module;
4066 }
4067
4068 - /* No error? Great, allocate a buffer for the file, and store it
4069 - * it in file->private_data for easy access.
4070 - */
4071 - buffer = kzalloc(sizeof(struct configfs_buffer),GFP_KERNEL);
4072 - if (!buffer) {
4073 - error = -ENOMEM;
4074 - goto Enomem;
4075 - }
4076 mutex_init(&buffer->mutex);
4077 buffer->needs_read_fill = 1;
4078 - buffer->read_in_progress = 0;
4079 - buffer->write_in_progress = 0;
4080 - buffer->ops = ops;
4081 + buffer->read_in_progress = false;
4082 + buffer->write_in_progress = false;
4083 file->private_data = buffer;
4084 - goto Done;
4085 + up_read(&frag->frag_sem);
4086 + return 0;
4087
4088 - Einval:
4089 - error = -EINVAL;
4090 - goto Done;
4091 - Eaccess:
4092 - error = -EACCES;
4093 - Enomem:
4094 - module_put(attr->ca_owner);
4095 - Done:
4096 - if (error && item)
4097 - config_item_put(item);
4098 +out_put_module:
4099 + module_put(buffer->owner);
4100 +out_put_item:
4101 + config_item_put(buffer->item);
4102 +out_free_buffer:
4103 + up_read(&frag->frag_sem);
4104 + kfree(buffer);
4105 +out:
4106 return error;
4107 }
4108
4109 static int configfs_release(struct inode *inode, struct file *filp)
4110 {
4111 - struct config_item * item = to_item(filp->f_path.dentry->d_parent);
4112 - struct configfs_attribute * attr = to_attr(filp->f_path.dentry);
4113 - struct module * owner = attr->ca_owner;
4114 - struct configfs_buffer * buffer = filp->private_data;
4115 -
4116 - if (item)
4117 - config_item_put(item);
4118 - /* After this point, attr should not be accessed. */
4119 - module_put(owner);
4120 -
4121 - if (buffer) {
4122 - if (buffer->page)
4123 - free_page((unsigned long)buffer->page);
4124 - mutex_destroy(&buffer->mutex);
4125 - kfree(buffer);
4126 - }
4127 + struct configfs_buffer *buffer = filp->private_data;
4128 +
4129 + module_put(buffer->owner);
4130 + if (buffer->page)
4131 + free_page((unsigned long)buffer->page);
4132 + mutex_destroy(&buffer->mutex);
4133 + kfree(buffer);
4134 return 0;
4135 }
4136
4137 static int configfs_open_file(struct inode *inode, struct file *filp)
4138 {
4139 - return check_perm(inode, filp, CONFIGFS_ITEM_ATTR);
4140 + return __configfs_open_file(inode, filp, CONFIGFS_ITEM_ATTR);
4141 }
4142
4143 static int configfs_open_bin_file(struct inode *inode, struct file *filp)
4144 {
4145 - return check_perm(inode, filp, CONFIGFS_ITEM_BIN_ATTR);
4146 + return __configfs_open_file(inode, filp, CONFIGFS_ITEM_BIN_ATTR);
4147 }
4148
4149 -static int configfs_release_bin_file(struct inode *inode, struct file *filp)
4150 +static int configfs_release_bin_file(struct inode *inode, struct file *file)
4151 {
4152 - struct configfs_buffer *buffer = filp->private_data;
4153 - struct dentry *dentry = filp->f_path.dentry;
4154 - struct config_item *item = to_item(dentry->d_parent);
4155 - struct configfs_bin_attribute *bin_attr = to_bin_attr(dentry);
4156 - ssize_t len = 0;
4157 - int ret;
4158 + struct configfs_buffer *buffer = file->private_data;
4159
4160 - buffer->read_in_progress = 0;
4161 + buffer->read_in_progress = false;
4162
4163 if (buffer->write_in_progress) {
4164 - buffer->write_in_progress = 0;
4165 -
4166 - len = bin_attr->write(item, buffer->bin_buffer,
4167 - buffer->bin_buffer_size);
4168 -
4169 + struct configfs_fragment *frag = to_frag(file);
4170 + buffer->write_in_progress = false;
4171 +
4172 + down_read(&frag->frag_sem);
4173 + if (!frag->frag_dead) {
4174 + /* result of ->release() is ignored */
4175 + buffer->bin_attr->write(buffer->item,
4176 + buffer->bin_buffer,
4177 + buffer->bin_buffer_size);
4178 + }
4179 + up_read(&frag->frag_sem);
4180 /* vfree on NULL is safe */
4181 vfree(buffer->bin_buffer);
4182 buffer->bin_buffer = NULL;
4183 @@ -503,10 +505,8 @@ static int configfs_release_bin_file(struct inode *inode, struct file *filp)
4184 buffer->needs_read_fill = 1;
4185 }
4186
4187 - ret = configfs_release(inode, filp);
4188 - if (len < 0)
4189 - return len;
4190 - return ret;
4191 + configfs_release(inode, file);
4192 + return 0;
4193 }
4194
4195
4196 @@ -541,7 +541,7 @@ int configfs_create_file(struct config_item * item, const struct configfs_attrib
4197
4198 inode_lock_nested(d_inode(dir), I_MUTEX_NORMAL);
4199 error = configfs_make_dirent(parent_sd, NULL, (void *) attr, mode,
4200 - CONFIGFS_ITEM_ATTR);
4201 + CONFIGFS_ITEM_ATTR, parent_sd->s_frag);
4202 inode_unlock(d_inode(dir));
4203
4204 return error;
4205 @@ -563,7 +563,7 @@ int configfs_create_bin_file(struct config_item *item,
4206
4207 inode_lock_nested(dir->d_inode, I_MUTEX_NORMAL);
4208 error = configfs_make_dirent(parent_sd, NULL, (void *) bin_attr, mode,
4209 - CONFIGFS_ITEM_BIN_ATTR);
4210 + CONFIGFS_ITEM_BIN_ATTR, parent_sd->s_frag);
4211 inode_unlock(dir->d_inode);
4212
4213 return error;
4214 diff --git a/fs/configfs/symlink.c b/fs/configfs/symlink.c
4215 index fea6db1ee065..afd79a1a34b3 100644
4216 --- a/fs/configfs/symlink.c
4217 +++ b/fs/configfs/symlink.c
4218 @@ -157,11 +157,42 @@ int configfs_symlink(struct inode *dir, struct dentry *dentry, const char *symna
4219 !type->ct_item_ops->allow_link)
4220 goto out_put;
4221
4222 + /*
4223 + * This is really sick. What they wanted was a hybrid of
4224 + * link(2) and symlink(2) - they wanted the target resolved
4225 + * at syscall time (as link(2) would've done), be a directory
4226 + * (which link(2) would've refused to do) *AND* be a deep
4227 + * fucking magic, making the target busy from rmdir POV.
4228 + * symlink(2) is nothing of that sort, and the locking it
4229 + * gets matches the normal symlink(2) semantics. Without
4230 + * attempts to resolve the target (which might very well
4231 + * not even exist yet) done prior to locking the parent
4232 + * directory. This perversion, OTOH, needs to resolve
4233 + * the target, which would lead to obvious deadlocks if
4234 + * attempted with any directories locked.
4235 + *
4236 + * Unfortunately, that garbage is userland ABI and we should've
4237 + * said "no" back in 2005. Too late now, so we get to
4238 + * play very ugly games with locking.
4239 + *
4240 + * Try *ANYTHING* of that sort in new code, and you will
4241 + * really regret it. Just ask yourself - what could a BOFH
4242 + * do to me and do I want to find it out first-hand?
4243 + *
4244 + * AV, a thoroughly annoyed bastard.
4245 + */
4246 + inode_unlock(dir);
4247 ret = get_target(symname, &path, &target_item, dentry->d_sb);
4248 + inode_lock(dir);
4249 if (ret)
4250 goto out_put;
4251
4252 - ret = type->ct_item_ops->allow_link(parent_item, target_item);
4253 + if (dentry->d_inode || d_unhashed(dentry))
4254 + ret = -EEXIST;
4255 + else
4256 + ret = inode_permission(dir, MAY_WRITE | MAY_EXEC);
4257 + if (!ret)
4258 + ret = type->ct_item_ops->allow_link(parent_item, target_item);
4259 if (!ret) {
4260 mutex_lock(&configfs_symlink_mutex);
4261 ret = create_link(parent_item, target_item, dentry);
4262 diff --git a/fs/fs-writeback.c b/fs/fs-writeback.c
4263 index baaed9369ab4..882e9d6830df 100644
4264 --- a/fs/fs-writeback.c
4265 +++ b/fs/fs-writeback.c
4266 @@ -582,10 +582,13 @@ void wbc_attach_and_unlock_inode(struct writeback_control *wbc,
4267 spin_unlock(&inode->i_lock);
4268
4269 /*
4270 - * A dying wb indicates that the memcg-blkcg mapping has changed
4271 - * and a new wb is already serving the memcg. Switch immediately.
4272 + * A dying wb indicates that either the blkcg associated with the
4273 + * memcg changed or the associated memcg is dying. In the first
4274 + * case, a replacement wb should already be available and we should
4275 + * refresh the wb immediately. In the second case, trying to
4276 + * refresh will keep failing.
4277 */
4278 - if (unlikely(wb_dying(wbc->wb)))
4279 + if (unlikely(wb_dying(wbc->wb) && !css_is_dying(wbc->wb->memcg_css)))
4280 inode_switch_wbs(inode, wbc->wb_id);
4281 }
4282
4283 diff --git a/fs/nfs/delegation.c b/fs/nfs/delegation.c
4284 index dff600ae0d74..46afd7cdcc37 100644
4285 --- a/fs/nfs/delegation.c
4286 +++ b/fs/nfs/delegation.c
4287 @@ -52,6 +52,16 @@ nfs4_is_valid_delegation(const struct nfs_delegation *delegation,
4288 return false;
4289 }
4290
4291 +struct nfs_delegation *nfs4_get_valid_delegation(const struct inode *inode)
4292 +{
4293 + struct nfs_delegation *delegation;
4294 +
4295 + delegation = rcu_dereference(NFS_I(inode)->delegation);
4296 + if (nfs4_is_valid_delegation(delegation, 0))
4297 + return delegation;
4298 + return NULL;
4299 +}
4300 +
4301 static int
4302 nfs4_do_check_delegation(struct inode *inode, fmode_t flags, bool mark)
4303 {
4304 diff --git a/fs/nfs/delegation.h b/fs/nfs/delegation.h
4305 index e9d555796873..2c6cb7fb7d5e 100644
4306 --- a/fs/nfs/delegation.h
4307 +++ b/fs/nfs/delegation.h
4308 @@ -62,6 +62,7 @@ int nfs4_open_delegation_recall(struct nfs_open_context *ctx, struct nfs4_state
4309 int nfs4_lock_delegation_recall(struct file_lock *fl, struct nfs4_state *state, const nfs4_stateid *stateid);
4310 bool nfs4_copy_delegation_stateid(struct inode *inode, fmode_t flags, nfs4_stateid *dst, struct rpc_cred **cred);
4311
4312 +struct nfs_delegation *nfs4_get_valid_delegation(const struct inode *inode);
4313 void nfs_mark_delegation_referenced(struct nfs_delegation *delegation);
4314 int nfs4_have_delegation(struct inode *inode, fmode_t flags);
4315 int nfs4_check_delegation(struct inode *inode, fmode_t flags);
4316 diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c
4317 index 8354dfae7038..ca4249ae644f 100644
4318 --- a/fs/nfs/nfs4proc.c
4319 +++ b/fs/nfs/nfs4proc.c
4320 @@ -1368,8 +1368,6 @@ static int can_open_delegated(struct nfs_delegation *delegation, fmode_t fmode,
4321 return 0;
4322 if ((delegation->type & fmode) != fmode)
4323 return 0;
4324 - if (test_bit(NFS_DELEGATION_RETURNING, &delegation->flags))
4325 - return 0;
4326 switch (claim) {
4327 case NFS4_OPEN_CLAIM_NULL:
4328 case NFS4_OPEN_CLAIM_FH:
4329 @@ -1628,7 +1626,6 @@ static void nfs4_return_incompatible_delegation(struct inode *inode, fmode_t fmo
4330 static struct nfs4_state *nfs4_try_open_cached(struct nfs4_opendata *opendata)
4331 {
4332 struct nfs4_state *state = opendata->state;
4333 - struct nfs_inode *nfsi = NFS_I(state->inode);
4334 struct nfs_delegation *delegation;
4335 int open_mode = opendata->o_arg.open_flags;
4336 fmode_t fmode = opendata->o_arg.fmode;
4337 @@ -1645,7 +1642,7 @@ static struct nfs4_state *nfs4_try_open_cached(struct nfs4_opendata *opendata)
4338 }
4339 spin_unlock(&state->owner->so_lock);
4340 rcu_read_lock();
4341 - delegation = rcu_dereference(nfsi->delegation);
4342 + delegation = nfs4_get_valid_delegation(state->inode);
4343 if (!can_open_delegated(delegation, fmode, claim)) {
4344 rcu_read_unlock();
4345 break;
4346 @@ -2142,7 +2139,7 @@ static void nfs4_open_prepare(struct rpc_task *task, void *calldata)
4347 if (can_open_cached(data->state, data->o_arg.fmode, data->o_arg.open_flags))
4348 goto out_no_action;
4349 rcu_read_lock();
4350 - delegation = rcu_dereference(NFS_I(data->state->inode)->delegation);
4351 + delegation = nfs4_get_valid_delegation(data->state->inode);
4352 if (can_open_delegated(delegation, data->o_arg.fmode, claim))
4353 goto unlock_no_action;
4354 rcu_read_unlock();
4355 diff --git a/include/drm/drm_vma_manager.h b/include/drm/drm_vma_manager.h
4356 index 9c03895dc479..72bf408f887f 100644
4357 --- a/include/drm/drm_vma_manager.h
4358 +++ b/include/drm/drm_vma_manager.h
4359 @@ -42,6 +42,7 @@ struct drm_vma_offset_node {
4360 rwlock_t vm_lock;
4361 struct drm_mm_node vm_node;
4362 struct rb_root vm_files;
4363 + bool readonly:1;
4364 };
4365
4366 struct drm_vma_offset_manager {
4367 diff --git a/include/linux/mm.h b/include/linux/mm.h
4368 index ade072a6fd24..ca6f213fa4f0 100644
4369 --- a/include/linux/mm.h
4370 +++ b/include/linux/mm.h
4371 @@ -504,11 +504,6 @@ static inline int is_vmalloc_or_module_addr(const void *x)
4372
4373 extern void kvfree(const void *addr);
4374
4375 -static inline atomic_t *compound_mapcount_ptr(struct page *page)
4376 -{
4377 - return &page[1].compound_mapcount;
4378 -}
4379 -
4380 static inline int compound_mapcount(struct page *page)
4381 {
4382 VM_BUG_ON_PAGE(!PageCompound(page), page);
4383 diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h
4384 index 8d6decd50220..21b18b755c0e 100644
4385 --- a/include/linux/mm_types.h
4386 +++ b/include/linux/mm_types.h
4387 @@ -262,6 +262,11 @@ struct page_frag_cache {
4388
4389 typedef unsigned long vm_flags_t;
4390
4391 +static inline atomic_t *compound_mapcount_ptr(struct page *page)
4392 +{
4393 + return &page[1].compound_mapcount;
4394 +}
4395 +
4396 /*
4397 * A region containing a mapping of a non-memory backed file under NOMMU
4398 * conditions. These are held in a global tree and are pinned by the VMAs that
4399 diff --git a/include/linux/page-flags.h b/include/linux/page-flags.h
4400 index 74e4dda91238..896e4199623a 100644
4401 --- a/include/linux/page-flags.h
4402 +++ b/include/linux/page-flags.h
4403 @@ -545,12 +545,28 @@ static inline int PageTransCompound(struct page *page)
4404 *
4405 * Unlike PageTransCompound, this is safe to be called only while
4406 * split_huge_pmd() cannot run from under us, like if protected by the
4407 - * MMU notifier, otherwise it may result in page->_mapcount < 0 false
4408 + * MMU notifier, otherwise it may result in page->_mapcount check false
4409 * positives.
4410 + *
4411 + * We have to treat page cache THP differently since every subpage of it
4412 + * would get _mapcount inc'ed once it is PMD mapped. But, it may be PTE
4413 + * mapped in the current process so comparing subpage's _mapcount to
4414 + * compound_mapcount to filter out PTE mapped case.
4415 */
4416 static inline int PageTransCompoundMap(struct page *page)
4417 {
4418 - return PageTransCompound(page) && atomic_read(&page->_mapcount) < 0;
4419 + struct page *head;
4420 +
4421 + if (!PageTransCompound(page))
4422 + return 0;
4423 +
4424 + if (PageAnon(page))
4425 + return atomic_read(&page->_mapcount) < 0;
4426 +
4427 + head = compound_head(page);
4428 + /* File THP is PMD mapped and not PTE mapped */
4429 + return atomic_read(&page->_mapcount) ==
4430 + atomic_read(compound_mapcount_ptr(head));
4431 }
4432
4433 /*
4434 diff --git a/include/net/ip_vs.h b/include/net/ip_vs.h
4435 index cd6018a9ee24..a26165744d98 100644
4436 --- a/include/net/ip_vs.h
4437 +++ b/include/net/ip_vs.h
4438 @@ -887,6 +887,7 @@ struct netns_ipvs {
4439 struct delayed_work defense_work; /* Work handler */
4440 int drop_rate;
4441 int drop_counter;
4442 + int old_secure_tcp;
4443 atomic_t dropentry;
4444 /* locks in ctl.c */
4445 spinlock_t dropentry_lock; /* drop entry handling */
4446 diff --git a/include/net/neighbour.h b/include/net/neighbour.h
4447 index f6017ddc4ded..1c0d07376125 100644
4448 --- a/include/net/neighbour.h
4449 +++ b/include/net/neighbour.h
4450 @@ -425,8 +425,8 @@ static inline int neigh_event_send(struct neighbour *neigh, struct sk_buff *skb)
4451 {
4452 unsigned long now = jiffies;
4453
4454 - if (neigh->used != now)
4455 - neigh->used = now;
4456 + if (READ_ONCE(neigh->used) != now)
4457 + WRITE_ONCE(neigh->used, now);
4458 if (!(neigh->nud_state&(NUD_CONNECTED|NUD_DELAY|NUD_PROBE)))
4459 return __neigh_event_send(neigh, skb);
4460 return 0;
4461 diff --git a/include/net/netfilter/nf_tables.h b/include/net/netfilter/nf_tables.h
4462 index 66f6b84df287..7ba9a624090f 100644
4463 --- a/include/net/netfilter/nf_tables.h
4464 +++ b/include/net/netfilter/nf_tables.h
4465 @@ -705,7 +705,8 @@ struct nft_expr_ops {
4466 */
4467 struct nft_expr {
4468 const struct nft_expr_ops *ops;
4469 - unsigned char data[];
4470 + unsigned char data[]
4471 + __attribute__((aligned(__alignof__(u64))));
4472 };
4473
4474 static inline void *nft_expr_priv(const struct nft_expr *expr)
4475 diff --git a/include/net/sock.h b/include/net/sock.h
4476 index 469c012a6d01..d8d14ae8892a 100644
4477 --- a/include/net/sock.h
4478 +++ b/include/net/sock.h
4479 @@ -2142,7 +2142,7 @@ static inline ktime_t sock_read_timestamp(struct sock *sk)
4480
4481 return kt;
4482 #else
4483 - return sk->sk_stamp;
4484 + return READ_ONCE(sk->sk_stamp);
4485 #endif
4486 }
4487
4488 @@ -2153,7 +2153,7 @@ static inline void sock_write_timestamp(struct sock *sk, ktime_t kt)
4489 sk->sk_stamp = kt;
4490 write_sequnlock(&sk->sk_stamp_seq);
4491 #else
4492 - sk->sk_stamp = kt;
4493 + WRITE_ONCE(sk->sk_stamp, kt);
4494 #endif
4495 }
4496
4497 diff --git a/lib/dump_stack.c b/lib/dump_stack.c
4498 index c30d07e99dba..72de6444934d 100644
4499 --- a/lib/dump_stack.c
4500 +++ b/lib/dump_stack.c
4501 @@ -44,7 +44,12 @@ retry:
4502 was_locked = 1;
4503 } else {
4504 local_irq_restore(flags);
4505 - cpu_relax();
4506 + /*
4507 + * Wait for the lock to release before jumping to
4508 + * atomic_cmpxchg() in order to mitigate the thundering herd
4509 + * problem.
4510 + */
4511 + do { cpu_relax(); } while (atomic_read(&dump_lock) != -1);
4512 goto retry;
4513 }
4514
4515 diff --git a/mm/filemap.c b/mm/filemap.c
4516 index 6d2f561d517c..b046d8f147e2 100644
4517 --- a/mm/filemap.c
4518 +++ b/mm/filemap.c
4519 @@ -383,7 +383,8 @@ int __filemap_fdatawrite_range(struct address_space *mapping, loff_t start,
4520 .range_end = end,
4521 };
4522
4523 - if (!mapping_cap_writeback_dirty(mapping))
4524 + if (!mapping_cap_writeback_dirty(mapping) ||
4525 + !mapping_tagged(mapping, PAGECACHE_TAG_DIRTY))
4526 return 0;
4527
4528 wbc_attach_fdatawrite_inode(&wbc, mapping->host);
4529 diff --git a/mm/vmstat.c b/mm/vmstat.c
4530 index 9af8d369e112..e60435d556e3 100644
4531 --- a/mm/vmstat.c
4532 +++ b/mm/vmstat.c
4533 @@ -1794,7 +1794,7 @@ static int __init setup_vmstat(void)
4534 #endif
4535 #ifdef CONFIG_PROC_FS
4536 proc_create("buddyinfo", S_IRUGO, NULL, &fragmentation_file_operations);
4537 - proc_create("pagetypeinfo", S_IRUGO, NULL, &pagetypeinfo_file_ops);
4538 + proc_create("pagetypeinfo", 0400, NULL, &pagetypeinfo_file_ops);
4539 proc_create("vmstat", S_IRUGO, NULL, &proc_vmstat_file_operations);
4540 proc_create("zoneinfo", S_IRUGO, NULL, &proc_zoneinfo_file_operations);
4541 #endif
4542 diff --git a/net/ipv4/fib_semantics.c b/net/ipv4/fib_semantics.c
4543 index 90c654012510..6aec95e1fc13 100644
4544 --- a/net/ipv4/fib_semantics.c
4545 +++ b/net/ipv4/fib_semantics.c
4546 @@ -1358,8 +1358,8 @@ int fib_sync_down_addr(struct net_device *dev, __be32 local)
4547 int ret = 0;
4548 unsigned int hash = fib_laddr_hashfn(local);
4549 struct hlist_head *head = &fib_info_laddrhash[hash];
4550 + int tb_id = l3mdev_fib_table(dev) ? : RT_TABLE_MAIN;
4551 struct net *net = dev_net(dev);
4552 - int tb_id = l3mdev_fib_table(dev);
4553 struct fib_info *fi;
4554
4555 if (!fib_info_laddrhash || local == 0)
4556 diff --git a/net/netfilter/ipset/ip_set_core.c b/net/netfilter/ipset/ip_set_core.c
4557 index a748b0c2c981..fa5229fd3703 100644
4558 --- a/net/netfilter/ipset/ip_set_core.c
4559 +++ b/net/netfilter/ipset/ip_set_core.c
4560 @@ -1942,8 +1942,9 @@ ip_set_sockfn_get(struct sock *sk, int optval, void __user *user, int *len)
4561 }
4562
4563 req_version->version = IPSET_PROTOCOL;
4564 - ret = copy_to_user(user, req_version,
4565 - sizeof(struct ip_set_req_version));
4566 + if (copy_to_user(user, req_version,
4567 + sizeof(struct ip_set_req_version)))
4568 + ret = -EFAULT;
4569 goto done;
4570 }
4571 case IP_SET_OP_GET_BYNAME: {
4572 @@ -2000,7 +2001,8 @@ ip_set_sockfn_get(struct sock *sk, int optval, void __user *user, int *len)
4573 } /* end of switch(op) */
4574
4575 copy:
4576 - ret = copy_to_user(user, data, copylen);
4577 + if (copy_to_user(user, data, copylen))
4578 + ret = -EFAULT;
4579
4580 done:
4581 vfree(data);
4582 diff --git a/net/netfilter/ipvs/ip_vs_ctl.c b/net/netfilter/ipvs/ip_vs_ctl.c
4583 index 8037b25ddb76..33125fc009cf 100644
4584 --- a/net/netfilter/ipvs/ip_vs_ctl.c
4585 +++ b/net/netfilter/ipvs/ip_vs_ctl.c
4586 @@ -97,7 +97,6 @@ static bool __ip_vs_addr_is_local_v6(struct net *net,
4587 static void update_defense_level(struct netns_ipvs *ipvs)
4588 {
4589 struct sysinfo i;
4590 - static int old_secure_tcp = 0;
4591 int availmem;
4592 int nomem;
4593 int to_change = -1;
4594 @@ -178,35 +177,35 @@ static void update_defense_level(struct netns_ipvs *ipvs)
4595 spin_lock(&ipvs->securetcp_lock);
4596 switch (ipvs->sysctl_secure_tcp) {
4597 case 0:
4598 - if (old_secure_tcp >= 2)
4599 + if (ipvs->old_secure_tcp >= 2)
4600 to_change = 0;
4601 break;
4602 case 1:
4603 if (nomem) {
4604 - if (old_secure_tcp < 2)
4605 + if (ipvs->old_secure_tcp < 2)
4606 to_change = 1;
4607 ipvs->sysctl_secure_tcp = 2;
4608 } else {
4609 - if (old_secure_tcp >= 2)
4610 + if (ipvs->old_secure_tcp >= 2)
4611 to_change = 0;
4612 }
4613 break;
4614 case 2:
4615 if (nomem) {
4616 - if (old_secure_tcp < 2)
4617 + if (ipvs->old_secure_tcp < 2)
4618 to_change = 1;
4619 } else {
4620 - if (old_secure_tcp >= 2)
4621 + if (ipvs->old_secure_tcp >= 2)
4622 to_change = 0;
4623 ipvs->sysctl_secure_tcp = 1;
4624 }
4625 break;
4626 case 3:
4627 - if (old_secure_tcp < 2)
4628 + if (ipvs->old_secure_tcp < 2)
4629 to_change = 1;
4630 break;
4631 }
4632 - old_secure_tcp = ipvs->sysctl_secure_tcp;
4633 + ipvs->old_secure_tcp = ipvs->sysctl_secure_tcp;
4634 if (to_change >= 0)
4635 ip_vs_protocol_timeout_change(ipvs,
4636 ipvs->sysctl_secure_tcp > 1);
4637 diff --git a/net/nfc/netlink.c b/net/nfc/netlink.c
4638 index ad878302924f..d3c8dd5dc817 100644
4639 --- a/net/nfc/netlink.c
4640 +++ b/net/nfc/netlink.c
4641 @@ -1103,7 +1103,6 @@ static int nfc_genl_llc_set_params(struct sk_buff *skb, struct genl_info *info)
4642
4643 local = nfc_llcp_find_local(dev);
4644 if (!local) {
4645 - nfc_put_device(dev);
4646 rc = -ENODEV;
4647 goto exit;
4648 }
4649 @@ -1163,7 +1162,6 @@ static int nfc_genl_llc_sdreq(struct sk_buff *skb, struct genl_info *info)
4650
4651 local = nfc_llcp_find_local(dev);
4652 if (!local) {
4653 - nfc_put_device(dev);
4654 rc = -ENODEV;
4655 goto exit;
4656 }
4657 diff --git a/sound/core/timer.c b/sound/core/timer.c
4658 index 19d90aa08218..e944d27f79c3 100644
4659 --- a/sound/core/timer.c
4660 +++ b/sound/core/timer.c
4661 @@ -297,11 +297,11 @@ int snd_timer_open(struct snd_timer_instance **ti,
4662 goto unlock;
4663 }
4664 if (!list_empty(&timer->open_list_head)) {
4665 - timeri = list_entry(timer->open_list_head.next,
4666 + struct snd_timer_instance *t =
4667 + list_entry(timer->open_list_head.next,
4668 struct snd_timer_instance, open_list);
4669 - if (timeri->flags & SNDRV_TIMER_IFLG_EXCLUSIVE) {
4670 + if (t->flags & SNDRV_TIMER_IFLG_EXCLUSIVE) {
4671 err = -EBUSY;
4672 - timeri = NULL;
4673 goto unlock;
4674 }
4675 }
4676 diff --git a/sound/firewire/bebob/bebob_focusrite.c b/sound/firewire/bebob/bebob_focusrite.c
4677 index f11090057949..d0a8736613a1 100644
4678 --- a/sound/firewire/bebob/bebob_focusrite.c
4679 +++ b/sound/firewire/bebob/bebob_focusrite.c
4680 @@ -28,6 +28,8 @@
4681 #define SAFFIRE_CLOCK_SOURCE_SPDIF 1
4682
4683 /* clock sources as returned from register of Saffire Pro 10 and 26 */
4684 +#define SAFFIREPRO_CLOCK_SOURCE_SELECT_MASK 0x000000ff
4685 +#define SAFFIREPRO_CLOCK_SOURCE_DETECT_MASK 0x0000ff00
4686 #define SAFFIREPRO_CLOCK_SOURCE_INTERNAL 0
4687 #define SAFFIREPRO_CLOCK_SOURCE_SKIP 1 /* never used on hardware */
4688 #define SAFFIREPRO_CLOCK_SOURCE_SPDIF 2
4689 @@ -190,6 +192,7 @@ saffirepro_both_clk_src_get(struct snd_bebob *bebob, unsigned int *id)
4690 map = saffirepro_clk_maps[1];
4691
4692 /* In a case that this driver cannot handle the value of register. */
4693 + value &= SAFFIREPRO_CLOCK_SOURCE_SELECT_MASK;
4694 if (value >= SAFFIREPRO_CLOCK_SOURCE_COUNT || map[value] < 0) {
4695 err = -EIO;
4696 goto end;
4697 diff --git a/sound/pci/hda/patch_ca0132.c b/sound/pci/hda/patch_ca0132.c
4698 index 280999961226..475b2c6c43d6 100644
4699 --- a/sound/pci/hda/patch_ca0132.c
4700 +++ b/sound/pci/hda/patch_ca0132.c
4701 @@ -4440,7 +4440,7 @@ static void hp_callback(struct hda_codec *codec, struct hda_jack_callback *cb)
4702 /* Delay enabling the HP amp, to let the mic-detection
4703 * state machine run.
4704 */
4705 - cancel_delayed_work_sync(&spec->unsol_hp_work);
4706 + cancel_delayed_work(&spec->unsol_hp_work);
4707 schedule_delayed_work(&spec->unsol_hp_work, msecs_to_jiffies(500));
4708 tbl = snd_hda_jack_tbl_get(codec, cb->nid);
4709 if (tbl)
4710 diff --git a/tools/perf/util/hist.c b/tools/perf/util/hist.c
4711 index 82833ceba339..32f991d28497 100644
4712 --- a/tools/perf/util/hist.c
4713 +++ b/tools/perf/util/hist.c
4714 @@ -1485,7 +1485,7 @@ int hists__collapse_resort(struct hists *hists, struct ui_progress *prog)
4715 return 0;
4716 }
4717
4718 -static int hist_entry__sort(struct hist_entry *a, struct hist_entry *b)
4719 +static int64_t hist_entry__sort(struct hist_entry *a, struct hist_entry *b)
4720 {
4721 struct hists *hists = a->hists;
4722 struct perf_hpp_fmt *fmt;