Magellan Linux

Contents of /trunk/kernel-magellan/patches-3.11/0105-3.11.6-all-fixes.patch

Parent Directory Parent Directory | Revision Log Revision Log


Revision 2313 - (show annotations) (download)
Tue Oct 22 06:44:45 2013 UTC (10 years, 6 months ago) by niro
File size: 73295 byte(s)
-linux-3.11.6
1 diff --git a/Makefile b/Makefile
2 index 83121b7..e87ba83 100644
3 --- a/Makefile
4 +++ b/Makefile
5 @@ -1,6 +1,6 @@
6 VERSION = 3
7 PATCHLEVEL = 11
8 -SUBLEVEL = 5
9 +SUBLEVEL = 6
10 EXTRAVERSION =
11 NAME = Linux for Workgroups
12
13 diff --git a/arch/arc/include/asm/delay.h b/arch/arc/include/asm/delay.h
14 index 442ce5d..43de302 100644
15 --- a/arch/arc/include/asm/delay.h
16 +++ b/arch/arc/include/asm/delay.h
17 @@ -53,11 +53,10 @@ static inline void __udelay(unsigned long usecs)
18 {
19 unsigned long loops;
20
21 - /* (long long) cast ensures 64 bit MPY - real or emulated
22 + /* (u64) cast ensures 64 bit MPY - real or emulated
23 * HZ * 4295 is pre-evaluated by gcc - hence only 2 mpy ops
24 */
25 - loops = ((long long)(usecs * 4295 * HZ) *
26 - (long long)(loops_per_jiffy)) >> 32;
27 + loops = ((u64) usecs * 4295 * HZ * loops_per_jiffy) >> 32;
28
29 __delay(loops);
30 }
31 diff --git a/arch/arc/include/asm/spinlock.h b/arch/arc/include/asm/spinlock.h
32 index f158197..b6a8c2d 100644
33 --- a/arch/arc/include/asm/spinlock.h
34 +++ b/arch/arc/include/asm/spinlock.h
35 @@ -45,7 +45,14 @@ static inline int arch_spin_trylock(arch_spinlock_t *lock)
36
37 static inline void arch_spin_unlock(arch_spinlock_t *lock)
38 {
39 - lock->slock = __ARCH_SPIN_LOCK_UNLOCKED__;
40 + unsigned int tmp = __ARCH_SPIN_LOCK_UNLOCKED__;
41 +
42 + __asm__ __volatile__(
43 + " ex %0, [%1] \n"
44 + : "+r" (tmp)
45 + : "r"(&(lock->slock))
46 + : "memory");
47 +
48 smp_mb();
49 }
50
51 diff --git a/arch/arc/include/asm/uaccess.h b/arch/arc/include/asm/uaccess.h
52 index 3242082..30c9baf 100644
53 --- a/arch/arc/include/asm/uaccess.h
54 +++ b/arch/arc/include/asm/uaccess.h
55 @@ -43,7 +43,7 @@
56 * Because it essentially checks if buffer end is within limit and @len is
57 * non-ngeative, which implies that buffer start will be within limit too.
58 *
59 - * The reason for rewriting being, for majorit yof cases, @len is generally
60 + * The reason for rewriting being, for majority of cases, @len is generally
61 * compile time constant, causing first sub-expression to be compile time
62 * subsumed.
63 *
64 @@ -53,7 +53,7 @@
65 *
66 */
67 #define __user_ok(addr, sz) (((sz) <= TASK_SIZE) && \
68 - (((addr)+(sz)) <= get_fs()))
69 + ((addr) <= (get_fs() - (sz))))
70 #define __access_ok(addr, sz) (unlikely(__kernel_ok) || \
71 likely(__user_ok((addr), (sz))))
72
73 diff --git a/arch/arc/kernel/ptrace.c b/arch/arc/kernel/ptrace.c
74 index 3332385..5d76706 100644
75 --- a/arch/arc/kernel/ptrace.c
76 +++ b/arch/arc/kernel/ptrace.c
77 @@ -102,7 +102,7 @@ static int genregs_set(struct task_struct *target,
78 REG_IGNORE_ONE(pad2);
79 REG_IN_CHUNK(callee, efa, cregs); /* callee_regs[r25..r13] */
80 REG_IGNORE_ONE(efa); /* efa update invalid */
81 - REG_IN_ONE(stop_pc, &ptregs->ret); /* stop_pc: PC update */
82 + REG_IGNORE_ONE(stop_pc); /* PC updated via @ret */
83
84 return ret;
85 }
86 diff --git a/arch/arc/kernel/signal.c b/arch/arc/kernel/signal.c
87 index ee6ef2f..7e95e1a 100644
88 --- a/arch/arc/kernel/signal.c
89 +++ b/arch/arc/kernel/signal.c
90 @@ -101,7 +101,6 @@ SYSCALL_DEFINE0(rt_sigreturn)
91 {
92 struct rt_sigframe __user *sf;
93 unsigned int magic;
94 - int err;
95 struct pt_regs *regs = current_pt_regs();
96
97 /* Always make any pending restarted system calls return -EINTR */
98 @@ -119,15 +118,16 @@ SYSCALL_DEFINE0(rt_sigreturn)
99 if (!access_ok(VERIFY_READ, sf, sizeof(*sf)))
100 goto badframe;
101
102 - err = restore_usr_regs(regs, sf);
103 - err |= __get_user(magic, &sf->sigret_magic);
104 - if (err)
105 + if (__get_user(magic, &sf->sigret_magic))
106 goto badframe;
107
108 if (unlikely(is_do_ss_needed(magic)))
109 if (restore_altstack(&sf->uc.uc_stack))
110 goto badframe;
111
112 + if (restore_usr_regs(regs, sf))
113 + goto badframe;
114 +
115 /* Don't restart from sigreturn */
116 syscall_wont_restart(regs);
117
118 @@ -191,6 +191,15 @@ setup_rt_frame(int signo, struct k_sigaction *ka, siginfo_t *info,
119 return 1;
120
121 /*
122 + * w/o SA_SIGINFO, struct ucontext is partially populated (only
123 + * uc_mcontext/uc_sigmask) for kernel's normal user state preservation
124 + * during signal handler execution. This works for SA_SIGINFO as well
125 + * although the semantics are now overloaded (the same reg state can be
126 + * inspected by userland: but are they allowed to fiddle with it ?
127 + */
128 + err |= stash_usr_regs(sf, regs, set);
129 +
130 + /*
131 * SA_SIGINFO requires 3 args to signal handler:
132 * #1: sig-no (common to any handler)
133 * #2: struct siginfo
134 @@ -213,14 +222,6 @@ setup_rt_frame(int signo, struct k_sigaction *ka, siginfo_t *info,
135 magic = MAGIC_SIGALTSTK;
136 }
137
138 - /*
139 - * w/o SA_SIGINFO, struct ucontext is partially populated (only
140 - * uc_mcontext/uc_sigmask) for kernel's normal user state preservation
141 - * during signal handler execution. This works for SA_SIGINFO as well
142 - * although the semantics are now overloaded (the same reg state can be
143 - * inspected by userland: but are they allowed to fiddle with it ?
144 - */
145 - err |= stash_usr_regs(sf, regs, set);
146 err |= __put_user(magic, &sf->sigret_magic);
147 if (err)
148 return err;
149 diff --git a/arch/arc/kernel/unaligned.c b/arch/arc/kernel/unaligned.c
150 index c0f832f..00ad070 100644
151 --- a/arch/arc/kernel/unaligned.c
152 +++ b/arch/arc/kernel/unaligned.c
153 @@ -233,6 +233,12 @@ int misaligned_fixup(unsigned long address, struct pt_regs *regs,
154 regs->status32 &= ~STATUS_DE_MASK;
155 } else {
156 regs->ret += state.instr_len;
157 +
158 + /* handle zero-overhead-loop */
159 + if ((regs->ret == regs->lp_end) && (regs->lp_count)) {
160 + regs->ret = regs->lp_start;
161 + regs->lp_count--;
162 + }
163 }
164
165 return 0;
166 diff --git a/arch/arm/include/asm/jump_label.h b/arch/arm/include/asm/jump_label.h
167 index bfc198c..863c892 100644
168 --- a/arch/arm/include/asm/jump_label.h
169 +++ b/arch/arm/include/asm/jump_label.h
170 @@ -16,7 +16,7 @@
171
172 static __always_inline bool arch_static_branch(struct static_key *key)
173 {
174 - asm goto("1:\n\t"
175 + asm_volatile_goto("1:\n\t"
176 JUMP_LABEL_NOP "\n\t"
177 ".pushsection __jump_table, \"aw\"\n\t"
178 ".word 1b, %l[l_yes], %c0\n\t"
179 diff --git a/arch/mips/include/asm/jump_label.h b/arch/mips/include/asm/jump_label.h
180 index 4d6d77e..e194f95 100644
181 --- a/arch/mips/include/asm/jump_label.h
182 +++ b/arch/mips/include/asm/jump_label.h
183 @@ -22,7 +22,7 @@
184
185 static __always_inline bool arch_static_branch(struct static_key *key)
186 {
187 - asm goto("1:\tnop\n\t"
188 + asm_volatile_goto("1:\tnop\n\t"
189 "nop\n\t"
190 ".pushsection __jump_table, \"aw\"\n\t"
191 WORD_INSN " 1b, %l[l_yes], %0\n\t"
192 diff --git a/arch/mips/kernel/octeon_switch.S b/arch/mips/kernel/octeon_switch.S
193 index 4204d76..029e002 100644
194 --- a/arch/mips/kernel/octeon_switch.S
195 +++ b/arch/mips/kernel/octeon_switch.S
196 @@ -73,7 +73,7 @@
197 3:
198
199 #if defined(CONFIG_CC_STACKPROTECTOR) && !defined(CONFIG_SMP)
200 - PTR_L t8, __stack_chk_guard
201 + PTR_LA t8, __stack_chk_guard
202 LONG_L t9, TASK_STACK_CANARY(a1)
203 LONG_S t9, 0(t8)
204 #endif
205 diff --git a/arch/mips/kernel/r2300_switch.S b/arch/mips/kernel/r2300_switch.S
206 index 38af83f..20b7b04 100644
207 --- a/arch/mips/kernel/r2300_switch.S
208 +++ b/arch/mips/kernel/r2300_switch.S
209 @@ -67,7 +67,7 @@ LEAF(resume)
210 1:
211
212 #if defined(CONFIG_CC_STACKPROTECTOR) && !defined(CONFIG_SMP)
213 - PTR_L t8, __stack_chk_guard
214 + PTR_LA t8, __stack_chk_guard
215 LONG_L t9, TASK_STACK_CANARY(a1)
216 LONG_S t9, 0(t8)
217 #endif
218 diff --git a/arch/mips/kernel/r4k_switch.S b/arch/mips/kernel/r4k_switch.S
219 index 921238a..078de5e 100644
220 --- a/arch/mips/kernel/r4k_switch.S
221 +++ b/arch/mips/kernel/r4k_switch.S
222 @@ -69,7 +69,7 @@
223 1:
224
225 #if defined(CONFIG_CC_STACKPROTECTOR) && !defined(CONFIG_SMP)
226 - PTR_L t8, __stack_chk_guard
227 + PTR_LA t8, __stack_chk_guard
228 LONG_L t9, TASK_STACK_CANARY(a1)
229 LONG_S t9, 0(t8)
230 #endif
231 diff --git a/arch/parisc/kernel/traps.c b/arch/parisc/kernel/traps.c
232 index 04e47c6..b3f87a3 100644
233 --- a/arch/parisc/kernel/traps.c
234 +++ b/arch/parisc/kernel/traps.c
235 @@ -805,14 +805,14 @@ void notrace handle_interruption(int code, struct pt_regs *regs)
236 else {
237
238 /*
239 - * The kernel should never fault on its own address space.
240 + * The kernel should never fault on its own address space,
241 + * unless pagefault_disable() was called before.
242 */
243
244 - if (fault_space == 0)
245 + if (fault_space == 0 && !in_atomic())
246 {
247 pdc_chassis_send_status(PDC_CHASSIS_DIRECT_PANIC);
248 parisc_terminate("Kernel Fault", regs, code, fault_address);
249 -
250 }
251 }
252
253 diff --git a/arch/powerpc/include/asm/jump_label.h b/arch/powerpc/include/asm/jump_label.h
254 index ae098c4..f016bb6 100644
255 --- a/arch/powerpc/include/asm/jump_label.h
256 +++ b/arch/powerpc/include/asm/jump_label.h
257 @@ -19,7 +19,7 @@
258
259 static __always_inline bool arch_static_branch(struct static_key *key)
260 {
261 - asm goto("1:\n\t"
262 + asm_volatile_goto("1:\n\t"
263 "nop\n\t"
264 ".pushsection __jump_table, \"aw\"\n\t"
265 JUMP_ENTRY_TYPE "1b, %l[l_yes], %c0\n\t"
266 diff --git a/arch/powerpc/kvm/book3s_hv_rmhandlers.S b/arch/powerpc/kvm/book3s_hv_rmhandlers.S
267 index b02f91e..7bcd4d6 100644
268 --- a/arch/powerpc/kvm/book3s_hv_rmhandlers.S
269 +++ b/arch/powerpc/kvm/book3s_hv_rmhandlers.S
270 @@ -1054,7 +1054,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206)
271 BEGIN_FTR_SECTION
272 mfspr r8, SPRN_DSCR
273 ld r7, HSTATE_DSCR(r13)
274 - std r8, VCPU_DSCR(r7)
275 + std r8, VCPU_DSCR(r9)
276 mtspr SPRN_DSCR, r7
277 END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206)
278
279 diff --git a/arch/s390/include/asm/jump_label.h b/arch/s390/include/asm/jump_label.h
280 index 6c32190..346b1c8 100644
281 --- a/arch/s390/include/asm/jump_label.h
282 +++ b/arch/s390/include/asm/jump_label.h
283 @@ -15,7 +15,7 @@
284
285 static __always_inline bool arch_static_branch(struct static_key *key)
286 {
287 - asm goto("0: brcl 0,0\n"
288 + asm_volatile_goto("0: brcl 0,0\n"
289 ".pushsection __jump_table, \"aw\"\n"
290 ASM_ALIGN "\n"
291 ASM_PTR " 0b, %l[label], %0\n"
292 diff --git a/arch/sparc/include/asm/jump_label.h b/arch/sparc/include/asm/jump_label.h
293 index 5080d16..ec2e2e2 100644
294 --- a/arch/sparc/include/asm/jump_label.h
295 +++ b/arch/sparc/include/asm/jump_label.h
296 @@ -9,7 +9,7 @@
297
298 static __always_inline bool arch_static_branch(struct static_key *key)
299 {
300 - asm goto("1:\n\t"
301 + asm_volatile_goto("1:\n\t"
302 "nop\n\t"
303 "nop\n\t"
304 ".pushsection __jump_table, \"aw\"\n\t"
305 diff --git a/arch/x86/include/asm/cpufeature.h b/arch/x86/include/asm/cpufeature.h
306 index 47538a6..7290585 100644
307 --- a/arch/x86/include/asm/cpufeature.h
308 +++ b/arch/x86/include/asm/cpufeature.h
309 @@ -373,7 +373,7 @@ static __always_inline __pure bool __static_cpu_has(u16 bit)
310 * Catch too early usage of this before alternatives
311 * have run.
312 */
313 - asm goto("1: jmp %l[t_warn]\n"
314 + asm_volatile_goto("1: jmp %l[t_warn]\n"
315 "2:\n"
316 ".section .altinstructions,\"a\"\n"
317 " .long 1b - .\n"
318 @@ -386,7 +386,7 @@ static __always_inline __pure bool __static_cpu_has(u16 bit)
319 : : "i" (X86_FEATURE_ALWAYS) : : t_warn);
320 #endif
321
322 - asm goto("1: jmp %l[t_no]\n"
323 + asm_volatile_goto("1: jmp %l[t_no]\n"
324 "2:\n"
325 ".section .altinstructions,\"a\"\n"
326 " .long 1b - .\n"
327 @@ -448,7 +448,7 @@ static __always_inline __pure bool _static_cpu_has_safe(u16 bit)
328 * have. Thus, we force the jump to the widest, 4-byte, signed relative
329 * offset even though the last would often fit in less bytes.
330 */
331 - asm goto("1: .byte 0xe9\n .long %l[t_dynamic] - 2f\n"
332 + asm_volatile_goto("1: .byte 0xe9\n .long %l[t_dynamic] - 2f\n"
333 "2:\n"
334 ".section .altinstructions,\"a\"\n"
335 " .long 1b - .\n" /* src offset */
336 diff --git a/arch/x86/include/asm/e820.h b/arch/x86/include/asm/e820.h
337 index cccd07f..779c2ef 100644
338 --- a/arch/x86/include/asm/e820.h
339 +++ b/arch/x86/include/asm/e820.h
340 @@ -29,7 +29,7 @@ extern void e820_setup_gap(void);
341 extern int e820_search_gap(unsigned long *gapstart, unsigned long *gapsize,
342 unsigned long start_addr, unsigned long long end_addr);
343 struct setup_data;
344 -extern void parse_e820_ext(struct setup_data *data);
345 +extern void parse_e820_ext(u64 phys_addr, u32 data_len);
346
347 #if defined(CONFIG_X86_64) || \
348 (defined(CONFIG_X86_32) && defined(CONFIG_HIBERNATION))
349 diff --git a/arch/x86/include/asm/jump_label.h b/arch/x86/include/asm/jump_label.h
350 index 3a16c14..0297669 100644
351 --- a/arch/x86/include/asm/jump_label.h
352 +++ b/arch/x86/include/asm/jump_label.h
353 @@ -13,7 +13,7 @@
354
355 static __always_inline bool arch_static_branch(struct static_key *key)
356 {
357 - asm goto("1:"
358 + asm_volatile_goto("1:"
359 STATIC_KEY_INITIAL_NOP
360 ".pushsection __jump_table, \"aw\" \n\t"
361 _ASM_ALIGN "\n\t"
362 diff --git a/arch/x86/kernel/e820.c b/arch/x86/kernel/e820.c
363 index d32abea..174da5f 100644
364 --- a/arch/x86/kernel/e820.c
365 +++ b/arch/x86/kernel/e820.c
366 @@ -658,15 +658,18 @@ __init void e820_setup_gap(void)
367 * boot_params.e820_map, others are passed via SETUP_E820_EXT node of
368 * linked list of struct setup_data, which is parsed here.
369 */
370 -void __init parse_e820_ext(struct setup_data *sdata)
371 +void __init parse_e820_ext(u64 phys_addr, u32 data_len)
372 {
373 int entries;
374 struct e820entry *extmap;
375 + struct setup_data *sdata;
376
377 + sdata = early_memremap(phys_addr, data_len);
378 entries = sdata->len / sizeof(struct e820entry);
379 extmap = (struct e820entry *)(sdata->data);
380 __append_e820_map(extmap, entries);
381 sanitize_e820_map(e820.map, ARRAY_SIZE(e820.map), &e820.nr_map);
382 + early_iounmap(sdata, data_len);
383 printk(KERN_INFO "e820: extended physical RAM map:\n");
384 e820_print_map("extended");
385 }
386 diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c
387 index f8ec578..234e1e3 100644
388 --- a/arch/x86/kernel/setup.c
389 +++ b/arch/x86/kernel/setup.c
390 @@ -426,25 +426,23 @@ static void __init reserve_initrd(void)
391 static void __init parse_setup_data(void)
392 {
393 struct setup_data *data;
394 - u64 pa_data;
395 + u64 pa_data, pa_next;
396
397 pa_data = boot_params.hdr.setup_data;
398 while (pa_data) {
399 - u32 data_len, map_len;
400 + u32 data_len, map_len, data_type;
401
402 map_len = max(PAGE_SIZE - (pa_data & ~PAGE_MASK),
403 (u64)sizeof(struct setup_data));
404 data = early_memremap(pa_data, map_len);
405 data_len = data->len + sizeof(struct setup_data);
406 - if (data_len > map_len) {
407 - early_iounmap(data, map_len);
408 - data = early_memremap(pa_data, data_len);
409 - map_len = data_len;
410 - }
411 + data_type = data->type;
412 + pa_next = data->next;
413 + early_iounmap(data, map_len);
414
415 - switch (data->type) {
416 + switch (data_type) {
417 case SETUP_E820_EXT:
418 - parse_e820_ext(data);
419 + parse_e820_ext(pa_data, data_len);
420 break;
421 case SETUP_DTB:
422 add_dtb(pa_data);
423 @@ -452,8 +450,7 @@ static void __init parse_setup_data(void)
424 default:
425 break;
426 }
427 - pa_data = data->next;
428 - early_iounmap(data, map_len);
429 + pa_data = pa_next;
430 }
431 }
432
433 diff --git a/drivers/char/random.c b/drivers/char/random.c
434 index 0d91fe5..92e6c67 100644
435 --- a/drivers/char/random.c
436 +++ b/drivers/char/random.c
437 @@ -1462,12 +1462,11 @@ struct ctl_table random_table[] = {
438
439 static u32 random_int_secret[MD5_MESSAGE_BYTES / 4] ____cacheline_aligned;
440
441 -static int __init random_int_secret_init(void)
442 +int random_int_secret_init(void)
443 {
444 get_random_bytes(random_int_secret, sizeof(random_int_secret));
445 return 0;
446 }
447 -late_initcall(random_int_secret_init);
448
449 /*
450 * Get a random word for internal kernel use only. Similar to urandom but
451 diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
452 index 342f1f3..c42d31c 100644
453 --- a/drivers/gpu/drm/i915/i915_reg.h
454 +++ b/drivers/gpu/drm/i915/i915_reg.h
455 @@ -3791,6 +3791,9 @@
456 #define GEN7_SQ_CHICKEN_MBCUNIT_CONFIG 0x9030
457 #define GEN7_SQ_CHICKEN_MBCUNIT_SQINTMOB (1<<11)
458
459 +#define HSW_SCRATCH1 0xb038
460 +#define HSW_SCRATCH1_L3_DATA_ATOMICS_DISABLE (1<<27)
461 +
462 #define HSW_FUSE_STRAP 0x42014
463 #define HSW_CDCLK_LIMIT (1 << 24)
464
465 @@ -4624,6 +4627,9 @@
466 #define GEN7_ROW_CHICKEN2_GT2 0xf4f4
467 #define DOP_CLOCK_GATING_DISABLE (1<<0)
468
469 +#define HSW_ROW_CHICKEN3 0xe49c
470 +#define HSW_ROW_CHICKEN3_L3_GLOBAL_ATOMICS_DISABLE (1 << 6)
471 +
472 #define G4X_AUD_VID_DID (dev_priv->info->display_mmio_offset + 0x62020)
473 #define INTEL_AUDIO_DEVCL 0x808629FB
474 #define INTEL_AUDIO_DEVBLC 0x80862801
475 diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
476 index 7fc8a76..90a7c17 100644
477 --- a/drivers/gpu/drm/i915/intel_display.c
478 +++ b/drivers/gpu/drm/i915/intel_display.c
479 @@ -3890,8 +3890,6 @@ static void intel_connector_check_state(struct intel_connector *connector)
480 * consider. */
481 void intel_connector_dpms(struct drm_connector *connector, int mode)
482 {
483 - struct intel_encoder *encoder = intel_attached_encoder(connector);
484 -
485 /* All the simple cases only support two dpms states. */
486 if (mode != DRM_MODE_DPMS_ON)
487 mode = DRM_MODE_DPMS_OFF;
488 @@ -3902,10 +3900,8 @@ void intel_connector_dpms(struct drm_connector *connector, int mode)
489 connector->dpms = mode;
490
491 /* Only need to change hw state when actually enabled */
492 - if (encoder->base.crtc)
493 - intel_encoder_dpms(encoder, mode);
494 - else
495 - WARN_ON(encoder->connectors_active != false);
496 + if (connector->encoder)
497 + intel_encoder_dpms(to_intel_encoder(connector->encoder), mode);
498
499 intel_modeset_check_state(connector->dev);
500 }
501 diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c
502 index b0e4a0b..cad0482 100644
503 --- a/drivers/gpu/drm/i915/intel_pm.c
504 +++ b/drivers/gpu/drm/i915/intel_pm.c
505 @@ -3603,8 +3603,6 @@ static void valleyview_enable_rps(struct drm_device *dev)
506 dev_priv->rps.rpe_delay),
507 dev_priv->rps.rpe_delay);
508
509 - INIT_DELAYED_WORK(&dev_priv->rps.vlv_work, vlv_rps_timer_work);
510 -
511 valleyview_set_rps(dev_priv->dev, dev_priv->rps.rpe_delay);
512
513 /* requires MSI enabled */
514 @@ -4699,6 +4697,11 @@ static void haswell_init_clock_gating(struct drm_device *dev)
515 I915_WRITE(GEN7_L3_CHICKEN_MODE_REGISTER,
516 GEN7_WA_L3_CHICKEN_MODE);
517
518 + /* L3 caching of data atomics doesn't work -- disable it. */
519 + I915_WRITE(HSW_SCRATCH1, HSW_SCRATCH1_L3_DATA_ATOMICS_DISABLE);
520 + I915_WRITE(HSW_ROW_CHICKEN3,
521 + _MASKED_BIT_ENABLE(HSW_ROW_CHICKEN3_L3_GLOBAL_ATOMICS_DISABLE));
522 +
523 /* This is required by WaCatErrorRejectionIssue:hsw */
524 I915_WRITE(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG,
525 I915_READ(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG) |
526 @@ -5562,6 +5565,8 @@ void intel_pm_init(struct drm_device *dev)
527
528 INIT_DELAYED_WORK(&dev_priv->rps.delayed_resume_work,
529 intel_gen6_powersave_work);
530 +
531 + INIT_DELAYED_WORK(&dev_priv->rps.vlv_work, vlv_rps_timer_work);
532 }
533
534 int sandybridge_pcode_read(struct drm_i915_private *dev_priv, u8 mbox, u32 *val)
535 diff --git a/drivers/gpu/drm/radeon/btc_dpm.c b/drivers/gpu/drm/radeon/btc_dpm.c
536 index 084e694..639b9aa 100644
537 --- a/drivers/gpu/drm/radeon/btc_dpm.c
538 +++ b/drivers/gpu/drm/radeon/btc_dpm.c
539 @@ -1913,7 +1913,7 @@ static int btc_set_mc_special_registers(struct radeon_device *rdev,
540 }
541 j++;
542
543 - if (j > SMC_EVERGREEN_MC_REGISTER_ARRAY_SIZE)
544 + if (j >= SMC_EVERGREEN_MC_REGISTER_ARRAY_SIZE)
545 return -EINVAL;
546
547 tmp = RREG32(MC_PMG_CMD_MRS);
548 @@ -1928,7 +1928,7 @@ static int btc_set_mc_special_registers(struct radeon_device *rdev,
549 }
550 j++;
551
552 - if (j > SMC_EVERGREEN_MC_REGISTER_ARRAY_SIZE)
553 + if (j >= SMC_EVERGREEN_MC_REGISTER_ARRAY_SIZE)
554 return -EINVAL;
555 break;
556 case MC_SEQ_RESERVE_M >> 2:
557 @@ -1942,7 +1942,7 @@ static int btc_set_mc_special_registers(struct radeon_device *rdev,
558 }
559 j++;
560
561 - if (j > SMC_EVERGREEN_MC_REGISTER_ARRAY_SIZE)
562 + if (j >= SMC_EVERGREEN_MC_REGISTER_ARRAY_SIZE)
563 return -EINVAL;
564 break;
565 default:
566 diff --git a/drivers/gpu/drm/radeon/evergreen.c b/drivers/gpu/drm/radeon/evergreen.c
567 index 94dab1e..8307883 100644
568 --- a/drivers/gpu/drm/radeon/evergreen.c
569 +++ b/drivers/gpu/drm/radeon/evergreen.c
570 @@ -3126,7 +3126,7 @@ static void evergreen_gpu_init(struct radeon_device *rdev)
571 rdev->config.evergreen.sx_max_export_size = 256;
572 rdev->config.evergreen.sx_max_export_pos_size = 64;
573 rdev->config.evergreen.sx_max_export_smx_size = 192;
574 - rdev->config.evergreen.max_hw_contexts = 8;
575 + rdev->config.evergreen.max_hw_contexts = 4;
576 rdev->config.evergreen.sq_num_cf_insts = 2;
577
578 rdev->config.evergreen.sc_prim_fifo_size = 0x40;
579 diff --git a/drivers/gpu/drm/radeon/evergreend.h b/drivers/gpu/drm/radeon/evergreend.h
580 index 20fd17c..6be00c9 100644
581 --- a/drivers/gpu/drm/radeon/evergreend.h
582 +++ b/drivers/gpu/drm/radeon/evergreend.h
583 @@ -1494,7 +1494,7 @@
584 * 6. COMMAND [29:22] | BYTE_COUNT [20:0]
585 */
586 # define PACKET3_CP_DMA_DST_SEL(x) ((x) << 20)
587 - /* 0 - SRC_ADDR
588 + /* 0 - DST_ADDR
589 * 1 - GDS
590 */
591 # define PACKET3_CP_DMA_ENGINE(x) ((x) << 27)
592 @@ -1509,7 +1509,7 @@
593 # define PACKET3_CP_DMA_CP_SYNC (1 << 31)
594 /* COMMAND */
595 # define PACKET3_CP_DMA_DIS_WC (1 << 21)
596 -# define PACKET3_CP_DMA_CMD_SRC_SWAP(x) ((x) << 23)
597 +# define PACKET3_CP_DMA_CMD_SRC_SWAP(x) ((x) << 22)
598 /* 0 - none
599 * 1 - 8 in 16
600 * 2 - 8 in 32
601 diff --git a/drivers/gpu/drm/radeon/r600d.h b/drivers/gpu/drm/radeon/r600d.h
602 index 7c78083..d079cb1 100644
603 --- a/drivers/gpu/drm/radeon/r600d.h
604 +++ b/drivers/gpu/drm/radeon/r600d.h
605 @@ -1487,7 +1487,7 @@
606 */
607 # define PACKET3_CP_DMA_CP_SYNC (1 << 31)
608 /* COMMAND */
609 -# define PACKET3_CP_DMA_CMD_SRC_SWAP(x) ((x) << 23)
610 +# define PACKET3_CP_DMA_CMD_SRC_SWAP(x) ((x) << 22)
611 /* 0 - none
612 * 1 - 8 in 16
613 * 2 - 8 in 32
614 diff --git a/drivers/gpu/drm/radeon/radeon_test.c b/drivers/gpu/drm/radeon/radeon_test.c
615 index f4d6bce..12e8099 100644
616 --- a/drivers/gpu/drm/radeon/radeon_test.c
617 +++ b/drivers/gpu/drm/radeon/radeon_test.c
618 @@ -36,8 +36,8 @@ static void radeon_do_test_moves(struct radeon_device *rdev, int flag)
619 struct radeon_bo *vram_obj = NULL;
620 struct radeon_bo **gtt_obj = NULL;
621 uint64_t gtt_addr, vram_addr;
622 - unsigned i, n, size;
623 - int r, ring;
624 + unsigned n, size;
625 + int i, r, ring;
626
627 switch (flag) {
628 case RADEON_TEST_COPY_DMA:
629 diff --git a/drivers/gpu/drm/radeon/si_dpm.c b/drivers/gpu/drm/radeon/si_dpm.c
630 index 1cfba39..1c23b61 100644
631 --- a/drivers/gpu/drm/radeon/si_dpm.c
632 +++ b/drivers/gpu/drm/radeon/si_dpm.c
633 @@ -5174,7 +5174,7 @@ static int si_set_mc_special_registers(struct radeon_device *rdev,
634 table->mc_reg_table_entry[k].mc_data[j] |= 0x100;
635 }
636 j++;
637 - if (j > SMC_SISLANDS_MC_REGISTER_ARRAY_SIZE)
638 + if (j >= SMC_SISLANDS_MC_REGISTER_ARRAY_SIZE)
639 return -EINVAL;
640
641 if (!pi->mem_gddr5) {
642 @@ -5184,7 +5184,7 @@ static int si_set_mc_special_registers(struct radeon_device *rdev,
643 table->mc_reg_table_entry[k].mc_data[j] =
644 (table->mc_reg_table_entry[k].mc_data[i] & 0xffff0000) >> 16;
645 j++;
646 - if (j > SMC_SISLANDS_MC_REGISTER_ARRAY_SIZE)
647 + if (j >= SMC_SISLANDS_MC_REGISTER_ARRAY_SIZE)
648 return -EINVAL;
649 }
650 break;
651 @@ -5197,7 +5197,7 @@ static int si_set_mc_special_registers(struct radeon_device *rdev,
652 (temp_reg & 0xffff0000) |
653 (table->mc_reg_table_entry[k].mc_data[i] & 0x0000ffff);
654 j++;
655 - if (j > SMC_SISLANDS_MC_REGISTER_ARRAY_SIZE)
656 + if (j >= SMC_SISLANDS_MC_REGISTER_ARRAY_SIZE)
657 return -EINVAL;
658 break;
659 default:
660 diff --git a/drivers/gpu/drm/radeon/sid.h b/drivers/gpu/drm/radeon/sid.h
661 index 2010d6b..a75d25a 100644
662 --- a/drivers/gpu/drm/radeon/sid.h
663 +++ b/drivers/gpu/drm/radeon/sid.h
664 @@ -1490,7 +1490,7 @@
665 * 6. COMMAND [30:21] | BYTE_COUNT [20:0]
666 */
667 # define PACKET3_CP_DMA_DST_SEL(x) ((x) << 20)
668 - /* 0 - SRC_ADDR
669 + /* 0 - DST_ADDR
670 * 1 - GDS
671 */
672 # define PACKET3_CP_DMA_ENGINE(x) ((x) << 27)
673 @@ -1505,7 +1505,7 @@
674 # define PACKET3_CP_DMA_CP_SYNC (1 << 31)
675 /* COMMAND */
676 # define PACKET3_CP_DMA_DIS_WC (1 << 21)
677 -# define PACKET3_CP_DMA_CMD_SRC_SWAP(x) ((x) << 23)
678 +# define PACKET3_CP_DMA_CMD_SRC_SWAP(x) ((x) << 22)
679 /* 0 - none
680 * 1 - 8 in 16
681 * 2 - 8 in 32
682 diff --git a/drivers/hwmon/applesmc.c b/drivers/hwmon/applesmc.c
683 index 98814d1..3288f13 100644
684 --- a/drivers/hwmon/applesmc.c
685 +++ b/drivers/hwmon/applesmc.c
686 @@ -230,6 +230,7 @@ static int send_argument(const char *key)
687
688 static int read_smc(u8 cmd, const char *key, u8 *buffer, u8 len)
689 {
690 + u8 status, data = 0;
691 int i;
692
693 if (send_command(cmd) || send_argument(key)) {
694 @@ -237,6 +238,7 @@ static int read_smc(u8 cmd, const char *key, u8 *buffer, u8 len)
695 return -EIO;
696 }
697
698 + /* This has no effect on newer (2012) SMCs */
699 if (send_byte(len, APPLESMC_DATA_PORT)) {
700 pr_warn("%.4s: read len fail\n", key);
701 return -EIO;
702 @@ -250,6 +252,17 @@ static int read_smc(u8 cmd, const char *key, u8 *buffer, u8 len)
703 buffer[i] = inb(APPLESMC_DATA_PORT);
704 }
705
706 + /* Read the data port until bit0 is cleared */
707 + for (i = 0; i < 16; i++) {
708 + udelay(APPLESMC_MIN_WAIT);
709 + status = inb(APPLESMC_CMD_PORT);
710 + if (!(status & 0x01))
711 + break;
712 + data = inb(APPLESMC_DATA_PORT);
713 + }
714 + if (i)
715 + pr_warn("flushed %d bytes, last value is: %d\n", i, data);
716 +
717 return 0;
718 }
719
720 diff --git a/drivers/i2c/busses/i2c-omap.c b/drivers/i2c/busses/i2c-omap.c
721 index 142b694d..e6b8dcd 100644
722 --- a/drivers/i2c/busses/i2c-omap.c
723 +++ b/drivers/i2c/busses/i2c-omap.c
724 @@ -944,6 +944,9 @@ omap_i2c_isr_thread(int this_irq, void *dev_id)
725 /*
726 * ProDB0017052: Clear ARDY bit twice
727 */
728 + if (stat & OMAP_I2C_STAT_ARDY)
729 + omap_i2c_ack_stat(dev, OMAP_I2C_STAT_ARDY);
730 +
731 if (stat & (OMAP_I2C_STAT_ARDY | OMAP_I2C_STAT_NACK |
732 OMAP_I2C_STAT_AL)) {
733 omap_i2c_ack_stat(dev, (OMAP_I2C_STAT_RRDY |
734 diff --git a/drivers/watchdog/kempld_wdt.c b/drivers/watchdog/kempld_wdt.c
735 index 491419e..5c3d4df 100644
736 --- a/drivers/watchdog/kempld_wdt.c
737 +++ b/drivers/watchdog/kempld_wdt.c
738 @@ -35,7 +35,7 @@
739 #define KEMPLD_WDT_STAGE_TIMEOUT(x) (0x1b + (x) * 4)
740 #define KEMPLD_WDT_STAGE_CFG(x) (0x18 + (x))
741 #define STAGE_CFG_GET_PRESCALER(x) (((x) & 0x30) >> 4)
742 -#define STAGE_CFG_SET_PRESCALER(x) (((x) & 0x30) << 4)
743 +#define STAGE_CFG_SET_PRESCALER(x) (((x) & 0x3) << 4)
744 #define STAGE_CFG_PRESCALER_MASK 0x30
745 #define STAGE_CFG_ACTION_MASK 0x7
746 #define STAGE_CFG_ASSERT (1 << 3)
747 diff --git a/drivers/watchdog/ts72xx_wdt.c b/drivers/watchdog/ts72xx_wdt.c
748 index 4da59b4..381999c 100644
749 --- a/drivers/watchdog/ts72xx_wdt.c
750 +++ b/drivers/watchdog/ts72xx_wdt.c
751 @@ -310,7 +310,8 @@ static long ts72xx_wdt_ioctl(struct file *file, unsigned int cmd,
752
753 case WDIOC_GETSTATUS:
754 case WDIOC_GETBOOTSTATUS:
755 - return put_user(0, p);
756 + error = put_user(0, p);
757 + break;
758
759 case WDIOC_KEEPALIVE:
760 ts72xx_wdt_kick(wdt);
761 diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
762 index d3280b2..8220491 100644
763 --- a/fs/btrfs/inode.c
764 +++ b/fs/btrfs/inode.c
765 @@ -8036,7 +8036,7 @@ static int btrfs_rename(struct inode *old_dir, struct dentry *old_dentry,
766
767
768 /* check for collisions, even if the name isn't there */
769 - ret = btrfs_check_dir_item_collision(root, new_dir->i_ino,
770 + ret = btrfs_check_dir_item_collision(dest, new_dir->i_ino,
771 new_dentry->d_name.name,
772 new_dentry->d_name.len);
773
774 diff --git a/fs/ext4/xattr.c b/fs/ext4/xattr.c
775 index c081e34..03e9beb 100644
776 --- a/fs/ext4/xattr.c
777 +++ b/fs/ext4/xattr.c
778 @@ -1350,6 +1350,8 @@ retry:
779 s_min_extra_isize) {
780 tried_min_extra_isize++;
781 new_extra_isize = s_min_extra_isize;
782 + kfree(is); is = NULL;
783 + kfree(bs); bs = NULL;
784 goto retry;
785 }
786 error = -1;
787 diff --git a/fs/statfs.c b/fs/statfs.c
788 index c219e733..083dc0a 100644
789 --- a/fs/statfs.c
790 +++ b/fs/statfs.c
791 @@ -94,7 +94,7 @@ retry:
792
793 int fd_statfs(int fd, struct kstatfs *st)
794 {
795 - struct fd f = fdget(fd);
796 + struct fd f = fdget_raw(fd);
797 int error = -EBADF;
798 if (f.file) {
799 error = vfs_statfs(&f.file->f_path, st);
800 diff --git a/include/linux/compiler-gcc4.h b/include/linux/compiler-gcc4.h
801 index 842de22..ded4299 100644
802 --- a/include/linux/compiler-gcc4.h
803 +++ b/include/linux/compiler-gcc4.h
804 @@ -65,6 +65,21 @@
805 #define __visible __attribute__((externally_visible))
806 #endif
807
808 +/*
809 + * GCC 'asm goto' miscompiles certain code sequences:
810 + *
811 + * http://gcc.gnu.org/bugzilla/show_bug.cgi?id=58670
812 + *
813 + * Work it around via a compiler barrier quirk suggested by Jakub Jelinek.
814 + * Fixed in GCC 4.8.2 and later versions.
815 + *
816 + * (asm goto is automatically volatile - the naming reflects this.)
817 + */
818 +#if GCC_VERSION <= 40801
819 +# define asm_volatile_goto(x...) do { asm goto(x); asm (""); } while (0)
820 +#else
821 +# define asm_volatile_goto(x...) do { asm goto(x); } while (0)
822 +#endif
823
824 #ifdef CONFIG_ARCH_USE_BUILTIN_BSWAP
825 #if GCC_VERSION >= 40400
826 diff --git a/include/linux/ipc_namespace.h b/include/linux/ipc_namespace.h
827 index c4d870b..19c19a5 100644
828 --- a/include/linux/ipc_namespace.h
829 +++ b/include/linux/ipc_namespace.h
830 @@ -22,7 +22,7 @@ struct ipc_ids {
831 int in_use;
832 unsigned short seq;
833 unsigned short seq_max;
834 - struct rw_semaphore rw_mutex;
835 + struct rw_semaphore rwsem;
836 struct idr ipcs_idr;
837 int next_id;
838 };
839 diff --git a/include/linux/random.h b/include/linux/random.h
840 index 3b9377d..6312dd9 100644
841 --- a/include/linux/random.h
842 +++ b/include/linux/random.h
843 @@ -17,6 +17,7 @@ extern void add_interrupt_randomness(int irq, int irq_flags);
844 extern void get_random_bytes(void *buf, int nbytes);
845 extern void get_random_bytes_arch(void *buf, int nbytes);
846 void generate_random_uuid(unsigned char uuid_out[16]);
847 +extern int random_int_secret_init(void);
848
849 #ifndef MODULE
850 extern const struct file_operations random_fops, urandom_fops;
851 diff --git a/init/main.c b/init/main.c
852 index d03d2ec..586cd33 100644
853 --- a/init/main.c
854 +++ b/init/main.c
855 @@ -75,6 +75,7 @@
856 #include <linux/blkdev.h>
857 #include <linux/elevator.h>
858 #include <linux/sched_clock.h>
859 +#include <linux/random.h>
860
861 #include <asm/io.h>
862 #include <asm/bugs.h>
863 @@ -778,6 +779,7 @@ static void __init do_basic_setup(void)
864 do_ctors();
865 usermodehelper_enable();
866 do_initcalls();
867 + random_int_secret_init();
868 }
869
870 static void __init do_pre_smp_initcalls(void)
871 diff --git a/ipc/msg.c b/ipc/msg.c
872 index a877c16..558aa91 100644
873 --- a/ipc/msg.c
874 +++ b/ipc/msg.c
875 @@ -70,8 +70,6 @@ struct msg_sender {
876
877 #define msg_ids(ns) ((ns)->ids[IPC_MSG_IDS])
878
879 -#define msg_unlock(msq) ipc_unlock(&(msq)->q_perm)
880 -
881 static void freeque(struct ipc_namespace *, struct kern_ipc_perm *);
882 static int newque(struct ipc_namespace *, struct ipc_params *);
883 #ifdef CONFIG_PROC_FS
884 @@ -181,7 +179,7 @@ static void msg_rcu_free(struct rcu_head *head)
885 * @ns: namespace
886 * @params: ptr to the structure that contains the key and msgflg
887 *
888 - * Called with msg_ids.rw_mutex held (writer)
889 + * Called with msg_ids.rwsem held (writer)
890 */
891 static int newque(struct ipc_namespace *ns, struct ipc_params *params)
892 {
893 @@ -267,8 +265,8 @@ static void expunge_all(struct msg_queue *msq, int res)
894 * removes the message queue from message queue ID IDR, and cleans up all the
895 * messages associated with this queue.
896 *
897 - * msg_ids.rw_mutex (writer) and the spinlock for this message queue are held
898 - * before freeque() is called. msg_ids.rw_mutex remains locked on exit.
899 + * msg_ids.rwsem (writer) and the spinlock for this message queue are held
900 + * before freeque() is called. msg_ids.rwsem remains locked on exit.
901 */
902 static void freeque(struct ipc_namespace *ns, struct kern_ipc_perm *ipcp)
903 {
904 @@ -278,7 +276,8 @@ static void freeque(struct ipc_namespace *ns, struct kern_ipc_perm *ipcp)
905 expunge_all(msq, -EIDRM);
906 ss_wakeup(&msq->q_senders, 1);
907 msg_rmid(ns, msq);
908 - msg_unlock(msq);
909 + ipc_unlock_object(&msq->q_perm);
910 + rcu_read_unlock();
911
912 list_for_each_entry_safe(msg, t, &msq->q_messages, m_list) {
913 atomic_dec(&ns->msg_hdrs);
914 @@ -289,7 +288,7 @@ static void freeque(struct ipc_namespace *ns, struct kern_ipc_perm *ipcp)
915 }
916
917 /*
918 - * Called with msg_ids.rw_mutex and ipcp locked.
919 + * Called with msg_ids.rwsem and ipcp locked.
920 */
921 static inline int msg_security(struct kern_ipc_perm *ipcp, int msgflg)
922 {
923 @@ -393,9 +392,9 @@ copy_msqid_from_user(struct msqid64_ds *out, void __user *buf, int version)
924 }
925
926 /*
927 - * This function handles some msgctl commands which require the rw_mutex
928 + * This function handles some msgctl commands which require the rwsem
929 * to be held in write mode.
930 - * NOTE: no locks must be held, the rw_mutex is taken inside this function.
931 + * NOTE: no locks must be held, the rwsem is taken inside this function.
932 */
933 static int msgctl_down(struct ipc_namespace *ns, int msqid, int cmd,
934 struct msqid_ds __user *buf, int version)
935 @@ -410,7 +409,7 @@ static int msgctl_down(struct ipc_namespace *ns, int msqid, int cmd,
936 return -EFAULT;
937 }
938
939 - down_write(&msg_ids(ns).rw_mutex);
940 + down_write(&msg_ids(ns).rwsem);
941 rcu_read_lock();
942
943 ipcp = ipcctl_pre_down_nolock(ns, &msg_ids(ns), msqid, cmd,
944 @@ -466,7 +465,7 @@ out_unlock0:
945 out_unlock1:
946 rcu_read_unlock();
947 out_up:
948 - up_write(&msg_ids(ns).rw_mutex);
949 + up_write(&msg_ids(ns).rwsem);
950 return err;
951 }
952
953 @@ -501,7 +500,7 @@ static int msgctl_nolock(struct ipc_namespace *ns, int msqid,
954 msginfo.msgmnb = ns->msg_ctlmnb;
955 msginfo.msgssz = MSGSSZ;
956 msginfo.msgseg = MSGSEG;
957 - down_read(&msg_ids(ns).rw_mutex);
958 + down_read(&msg_ids(ns).rwsem);
959 if (cmd == MSG_INFO) {
960 msginfo.msgpool = msg_ids(ns).in_use;
961 msginfo.msgmap = atomic_read(&ns->msg_hdrs);
962 @@ -512,7 +511,7 @@ static int msgctl_nolock(struct ipc_namespace *ns, int msqid,
963 msginfo.msgtql = MSGTQL;
964 }
965 max_id = ipc_get_maxid(&msg_ids(ns));
966 - up_read(&msg_ids(ns).rw_mutex);
967 + up_read(&msg_ids(ns).rwsem);
968 if (copy_to_user(buf, &msginfo, sizeof(struct msginfo)))
969 return -EFAULT;
970 return (max_id < 0) ? 0 : max_id;
971 diff --git a/ipc/namespace.c b/ipc/namespace.c
972 index 7ee61bf..aba9a58 100644
973 --- a/ipc/namespace.c
974 +++ b/ipc/namespace.c
975 @@ -81,7 +81,7 @@ void free_ipcs(struct ipc_namespace *ns, struct ipc_ids *ids,
976 int next_id;
977 int total, in_use;
978
979 - down_write(&ids->rw_mutex);
980 + down_write(&ids->rwsem);
981
982 in_use = ids->in_use;
983
984 @@ -89,11 +89,12 @@ void free_ipcs(struct ipc_namespace *ns, struct ipc_ids *ids,
985 perm = idr_find(&ids->ipcs_idr, next_id);
986 if (perm == NULL)
987 continue;
988 - ipc_lock_by_ptr(perm);
989 + rcu_read_lock();
990 + ipc_lock_object(perm);
991 free(ns, perm);
992 total++;
993 }
994 - up_write(&ids->rw_mutex);
995 + up_write(&ids->rwsem);
996 }
997
998 static void free_ipc_ns(struct ipc_namespace *ns)
999 diff --git a/ipc/sem.c b/ipc/sem.c
1000 index 87614511..8e2bf30 100644
1001 --- a/ipc/sem.c
1002 +++ b/ipc/sem.c
1003 @@ -248,12 +248,20 @@ static void merge_queues(struct sem_array *sma)
1004 * Caller must own sem_perm.lock.
1005 * New simple ops cannot start, because simple ops first check
1006 * that sem_perm.lock is free.
1007 + * that a) sem_perm.lock is free and b) complex_count is 0.
1008 */
1009 static void sem_wait_array(struct sem_array *sma)
1010 {
1011 int i;
1012 struct sem *sem;
1013
1014 + if (sma->complex_count) {
1015 + /* The thread that increased sma->complex_count waited on
1016 + * all sem->lock locks. Thus we don't need to wait again.
1017 + */
1018 + return;
1019 + }
1020 +
1021 for (i = 0; i < sma->sem_nsems; i++) {
1022 sem = sma->sem_base + i;
1023 spin_unlock_wait(&sem->lock);
1024 @@ -365,7 +373,7 @@ static inline void sem_unlock(struct sem_array *sma, int locknum)
1025 }
1026
1027 /*
1028 - * sem_lock_(check_) routines are called in the paths where the rw_mutex
1029 + * sem_lock_(check_) routines are called in the paths where the rwsem
1030 * is not held.
1031 *
1032 * The caller holds the RCU read lock.
1033 @@ -464,7 +472,7 @@ static inline void sem_rmid(struct ipc_namespace *ns, struct sem_array *s)
1034 * @ns: namespace
1035 * @params: ptr to the structure that contains key, semflg and nsems
1036 *
1037 - * Called with sem_ids.rw_mutex held (as a writer)
1038 + * Called with sem_ids.rwsem held (as a writer)
1039 */
1040
1041 static int newary(struct ipc_namespace *ns, struct ipc_params *params)
1042 @@ -529,7 +537,7 @@ static int newary(struct ipc_namespace *ns, struct ipc_params *params)
1043
1044
1045 /*
1046 - * Called with sem_ids.rw_mutex and ipcp locked.
1047 + * Called with sem_ids.rwsem and ipcp locked.
1048 */
1049 static inline int sem_security(struct kern_ipc_perm *ipcp, int semflg)
1050 {
1051 @@ -540,7 +548,7 @@ static inline int sem_security(struct kern_ipc_perm *ipcp, int semflg)
1052 }
1053
1054 /*
1055 - * Called with sem_ids.rw_mutex and ipcp locked.
1056 + * Called with sem_ids.rwsem and ipcp locked.
1057 */
1058 static inline int sem_more_checks(struct kern_ipc_perm *ipcp,
1059 struct ipc_params *params)
1060 @@ -910,6 +918,24 @@ again:
1061 }
1062
1063 /**
1064 + * set_semotime(sma, sops) - set sem_otime
1065 + * @sma: semaphore array
1066 + * @sops: operations that modified the array, may be NULL
1067 + *
1068 + * sem_otime is replicated to avoid cache line trashing.
1069 + * This function sets one instance to the current time.
1070 + */
1071 +static void set_semotime(struct sem_array *sma, struct sembuf *sops)
1072 +{
1073 + if (sops == NULL) {
1074 + sma->sem_base[0].sem_otime = get_seconds();
1075 + } else {
1076 + sma->sem_base[sops[0].sem_num].sem_otime =
1077 + get_seconds();
1078 + }
1079 +}
1080 +
1081 +/**
1082 * do_smart_update(sma, sops, nsops, otime, pt) - optimized update_queue
1083 * @sma: semaphore array
1084 * @sops: operations that were performed
1085 @@ -959,17 +985,10 @@ static void do_smart_update(struct sem_array *sma, struct sembuf *sops, int nsop
1086 }
1087 }
1088 }
1089 - if (otime) {
1090 - if (sops == NULL) {
1091 - sma->sem_base[0].sem_otime = get_seconds();
1092 - } else {
1093 - sma->sem_base[sops[0].sem_num].sem_otime =
1094 - get_seconds();
1095 - }
1096 - }
1097 + if (otime)
1098 + set_semotime(sma, sops);
1099 }
1100
1101 -
1102 /* The following counts are associated to each semaphore:
1103 * semncnt number of tasks waiting on semval being nonzero
1104 * semzcnt number of tasks waiting on semval being zero
1105 @@ -1031,8 +1050,8 @@ static int count_semzcnt (struct sem_array * sma, ushort semnum)
1106 return semzcnt;
1107 }
1108
1109 -/* Free a semaphore set. freeary() is called with sem_ids.rw_mutex locked
1110 - * as a writer and the spinlock for this semaphore set hold. sem_ids.rw_mutex
1111 +/* Free a semaphore set. freeary() is called with sem_ids.rwsem locked
1112 + * as a writer and the spinlock for this semaphore set hold. sem_ids.rwsem
1113 * remains locked on exit.
1114 */
1115 static void freeary(struct ipc_namespace *ns, struct kern_ipc_perm *ipcp)
1116 @@ -1152,7 +1171,7 @@ static int semctl_nolock(struct ipc_namespace *ns, int semid,
1117 seminfo.semmnu = SEMMNU;
1118 seminfo.semmap = SEMMAP;
1119 seminfo.semume = SEMUME;
1120 - down_read(&sem_ids(ns).rw_mutex);
1121 + down_read(&sem_ids(ns).rwsem);
1122 if (cmd == SEM_INFO) {
1123 seminfo.semusz = sem_ids(ns).in_use;
1124 seminfo.semaem = ns->used_sems;
1125 @@ -1161,7 +1180,7 @@ static int semctl_nolock(struct ipc_namespace *ns, int semid,
1126 seminfo.semaem = SEMAEM;
1127 }
1128 max_id = ipc_get_maxid(&sem_ids(ns));
1129 - up_read(&sem_ids(ns).rw_mutex);
1130 + up_read(&sem_ids(ns).rwsem);
1131 if (copy_to_user(p, &seminfo, sizeof(struct seminfo)))
1132 return -EFAULT;
1133 return (max_id < 0) ? 0: max_id;
1134 @@ -1467,9 +1486,9 @@ copy_semid_from_user(struct semid64_ds *out, void __user *buf, int version)
1135 }
1136
1137 /*
1138 - * This function handles some semctl commands which require the rw_mutex
1139 + * This function handles some semctl commands which require the rwsem
1140 * to be held in write mode.
1141 - * NOTE: no locks must be held, the rw_mutex is taken inside this function.
1142 + * NOTE: no locks must be held, the rwsem is taken inside this function.
1143 */
1144 static int semctl_down(struct ipc_namespace *ns, int semid,
1145 int cmd, int version, void __user *p)
1146 @@ -1484,7 +1503,7 @@ static int semctl_down(struct ipc_namespace *ns, int semid,
1147 return -EFAULT;
1148 }
1149
1150 - down_write(&sem_ids(ns).rw_mutex);
1151 + down_write(&sem_ids(ns).rwsem);
1152 rcu_read_lock();
1153
1154 ipcp = ipcctl_pre_down_nolock(ns, &sem_ids(ns), semid, cmd,
1155 @@ -1523,7 +1542,7 @@ out_unlock0:
1156 out_unlock1:
1157 rcu_read_unlock();
1158 out_up:
1159 - up_write(&sem_ids(ns).rw_mutex);
1160 + up_write(&sem_ids(ns).rwsem);
1161 return err;
1162 }
1163
1164 @@ -1831,12 +1850,17 @@ SYSCALL_DEFINE4(semtimedop, int, semid, struct sembuf __user *, tsops,
1165
1166 error = perform_atomic_semop(sma, sops, nsops, un,
1167 task_tgid_vnr(current));
1168 - if (error <= 0) {
1169 - if (alter && error == 0)
1170 + if (error == 0) {
1171 + /* If the operation was successful, then do
1172 + * the required updates.
1173 + */
1174 + if (alter)
1175 do_smart_update(sma, sops, nsops, 1, &tasks);
1176 -
1177 - goto out_unlock_free;
1178 + else
1179 + set_semotime(sma, sops);
1180 }
1181 + if (error <= 0)
1182 + goto out_unlock_free;
1183
1184 /* We need to sleep on this operation, so we put the current
1185 * task into the pending queue and go to sleep.
1186 @@ -2095,6 +2119,14 @@ static int sysvipc_sem_proc_show(struct seq_file *s, void *it)
1187 struct sem_array *sma = it;
1188 time_t sem_otime;
1189
1190 + /*
1191 + * The proc interface isn't aware of sem_lock(), it calls
1192 + * ipc_lock_object() directly (in sysvipc_find_ipc).
1193 + * In order to stay compatible with sem_lock(), we must wait until
1194 + * all simple semop() calls have left their critical regions.
1195 + */
1196 + sem_wait_array(sma);
1197 +
1198 sem_otime = get_semotime(sma);
1199
1200 return seq_printf(s,
1201 diff --git a/ipc/shm.c b/ipc/shm.c
1202 index 2d6833d..d697396 100644
1203 --- a/ipc/shm.c
1204 +++ b/ipc/shm.c
1205 @@ -19,6 +19,9 @@
1206 * namespaces support
1207 * OpenVZ, SWsoft Inc.
1208 * Pavel Emelianov <xemul@openvz.org>
1209 + *
1210 + * Better ipc lock (kern_ipc_perm.lock) handling
1211 + * Davidlohr Bueso <davidlohr.bueso@hp.com>, June 2013.
1212 */
1213
1214 #include <linux/slab.h>
1215 @@ -80,8 +83,8 @@ void shm_init_ns(struct ipc_namespace *ns)
1216 }
1217
1218 /*
1219 - * Called with shm_ids.rw_mutex (writer) and the shp structure locked.
1220 - * Only shm_ids.rw_mutex remains locked on exit.
1221 + * Called with shm_ids.rwsem (writer) and the shp structure locked.
1222 + * Only shm_ids.rwsem remains locked on exit.
1223 */
1224 static void do_shm_rmid(struct ipc_namespace *ns, struct kern_ipc_perm *ipcp)
1225 {
1226 @@ -124,8 +127,28 @@ void __init shm_init (void)
1227 IPC_SHM_IDS, sysvipc_shm_proc_show);
1228 }
1229
1230 +static inline struct shmid_kernel *shm_obtain_object(struct ipc_namespace *ns, int id)
1231 +{
1232 + struct kern_ipc_perm *ipcp = ipc_obtain_object(&shm_ids(ns), id);
1233 +
1234 + if (IS_ERR(ipcp))
1235 + return ERR_CAST(ipcp);
1236 +
1237 + return container_of(ipcp, struct shmid_kernel, shm_perm);
1238 +}
1239 +
1240 +static inline struct shmid_kernel *shm_obtain_object_check(struct ipc_namespace *ns, int id)
1241 +{
1242 + struct kern_ipc_perm *ipcp = ipc_obtain_object_check(&shm_ids(ns), id);
1243 +
1244 + if (IS_ERR(ipcp))
1245 + return ERR_CAST(ipcp);
1246 +
1247 + return container_of(ipcp, struct shmid_kernel, shm_perm);
1248 +}
1249 +
1250 /*
1251 - * shm_lock_(check_) routines are called in the paths where the rw_mutex
1252 + * shm_lock_(check_) routines are called in the paths where the rwsem
1253 * is not necessarily held.
1254 */
1255 static inline struct shmid_kernel *shm_lock(struct ipc_namespace *ns, int id)
1256 @@ -144,17 +167,6 @@ static inline void shm_lock_by_ptr(struct shmid_kernel *ipcp)
1257 ipc_lock_object(&ipcp->shm_perm);
1258 }
1259
1260 -static inline struct shmid_kernel *shm_lock_check(struct ipc_namespace *ns,
1261 - int id)
1262 -{
1263 - struct kern_ipc_perm *ipcp = ipc_lock_check(&shm_ids(ns), id);
1264 -
1265 - if (IS_ERR(ipcp))
1266 - return (struct shmid_kernel *)ipcp;
1267 -
1268 - return container_of(ipcp, struct shmid_kernel, shm_perm);
1269 -}
1270 -
1271 static void shm_rcu_free(struct rcu_head *head)
1272 {
1273 struct ipc_rcu *p = container_of(head, struct ipc_rcu, rcu);
1274 @@ -191,7 +203,7 @@ static void shm_open(struct vm_area_struct *vma)
1275 * @ns: namespace
1276 * @shp: struct to free
1277 *
1278 - * It has to be called with shp and shm_ids.rw_mutex (writer) locked,
1279 + * It has to be called with shp and shm_ids.rwsem (writer) locked,
1280 * but returns with shp unlocked and freed.
1281 */
1282 static void shm_destroy(struct ipc_namespace *ns, struct shmid_kernel *shp)
1283 @@ -238,7 +250,7 @@ static void shm_close(struct vm_area_struct *vma)
1284 struct shmid_kernel *shp;
1285 struct ipc_namespace *ns = sfd->ns;
1286
1287 - down_write(&shm_ids(ns).rw_mutex);
1288 + down_write(&shm_ids(ns).rwsem);
1289 /* remove from the list of attaches of the shm segment */
1290 shp = shm_lock(ns, sfd->id);
1291 BUG_ON(IS_ERR(shp));
1292 @@ -249,10 +261,10 @@ static void shm_close(struct vm_area_struct *vma)
1293 shm_destroy(ns, shp);
1294 else
1295 shm_unlock(shp);
1296 - up_write(&shm_ids(ns).rw_mutex);
1297 + up_write(&shm_ids(ns).rwsem);
1298 }
1299
1300 -/* Called with ns->shm_ids(ns).rw_mutex locked */
1301 +/* Called with ns->shm_ids(ns).rwsem locked */
1302 static int shm_try_destroy_current(int id, void *p, void *data)
1303 {
1304 struct ipc_namespace *ns = data;
1305 @@ -283,7 +295,7 @@ static int shm_try_destroy_current(int id, void *p, void *data)
1306 return 0;
1307 }
1308
1309 -/* Called with ns->shm_ids(ns).rw_mutex locked */
1310 +/* Called with ns->shm_ids(ns).rwsem locked */
1311 static int shm_try_destroy_orphaned(int id, void *p, void *data)
1312 {
1313 struct ipc_namespace *ns = data;
1314 @@ -294,7 +306,7 @@ static int shm_try_destroy_orphaned(int id, void *p, void *data)
1315 * We want to destroy segments without users and with already
1316 * exit'ed originating process.
1317 *
1318 - * As shp->* are changed under rw_mutex, it's safe to skip shp locking.
1319 + * As shp->* are changed under rwsem, it's safe to skip shp locking.
1320 */
1321 if (shp->shm_creator != NULL)
1322 return 0;
1323 @@ -308,10 +320,10 @@ static int shm_try_destroy_orphaned(int id, void *p, void *data)
1324
1325 void shm_destroy_orphaned(struct ipc_namespace *ns)
1326 {
1327 - down_write(&shm_ids(ns).rw_mutex);
1328 + down_write(&shm_ids(ns).rwsem);
1329 if (shm_ids(ns).in_use)
1330 idr_for_each(&shm_ids(ns).ipcs_idr, &shm_try_destroy_orphaned, ns);
1331 - up_write(&shm_ids(ns).rw_mutex);
1332 + up_write(&shm_ids(ns).rwsem);
1333 }
1334
1335
1336 @@ -323,10 +335,10 @@ void exit_shm(struct task_struct *task)
1337 return;
1338
1339 /* Destroy all already created segments, but not mapped yet */
1340 - down_write(&shm_ids(ns).rw_mutex);
1341 + down_write(&shm_ids(ns).rwsem);
1342 if (shm_ids(ns).in_use)
1343 idr_for_each(&shm_ids(ns).ipcs_idr, &shm_try_destroy_current, ns);
1344 - up_write(&shm_ids(ns).rw_mutex);
1345 + up_write(&shm_ids(ns).rwsem);
1346 }
1347
1348 static int shm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
1349 @@ -460,7 +472,7 @@ static const struct vm_operations_struct shm_vm_ops = {
1350 * @ns: namespace
1351 * @params: ptr to the structure that contains key, size and shmflg
1352 *
1353 - * Called with shm_ids.rw_mutex held as a writer.
1354 + * Called with shm_ids.rwsem held as a writer.
1355 */
1356
1357 static int newseg(struct ipc_namespace *ns, struct ipc_params *params)
1358 @@ -567,7 +579,7 @@ no_file:
1359 }
1360
1361 /*
1362 - * Called with shm_ids.rw_mutex and ipcp locked.
1363 + * Called with shm_ids.rwsem and ipcp locked.
1364 */
1365 static inline int shm_security(struct kern_ipc_perm *ipcp, int shmflg)
1366 {
1367 @@ -578,7 +590,7 @@ static inline int shm_security(struct kern_ipc_perm *ipcp, int shmflg)
1368 }
1369
1370 /*
1371 - * Called with shm_ids.rw_mutex and ipcp locked.
1372 + * Called with shm_ids.rwsem and ipcp locked.
1373 */
1374 static inline int shm_more_checks(struct kern_ipc_perm *ipcp,
1375 struct ipc_params *params)
1376 @@ -691,7 +703,7 @@ static inline unsigned long copy_shminfo_to_user(void __user *buf, struct shminf
1377
1378 /*
1379 * Calculate and add used RSS and swap pages of a shm.
1380 - * Called with shm_ids.rw_mutex held as a reader
1381 + * Called with shm_ids.rwsem held as a reader
1382 */
1383 static void shm_add_rss_swap(struct shmid_kernel *shp,
1384 unsigned long *rss_add, unsigned long *swp_add)
1385 @@ -718,7 +730,7 @@ static void shm_add_rss_swap(struct shmid_kernel *shp,
1386 }
1387
1388 /*
1389 - * Called with shm_ids.rw_mutex held as a reader
1390 + * Called with shm_ids.rwsem held as a reader
1391 */
1392 static void shm_get_stat(struct ipc_namespace *ns, unsigned long *rss,
1393 unsigned long *swp)
1394 @@ -747,9 +759,9 @@ static void shm_get_stat(struct ipc_namespace *ns, unsigned long *rss,
1395 }
1396
1397 /*
1398 - * This function handles some shmctl commands which require the rw_mutex
1399 + * This function handles some shmctl commands which require the rwsem
1400 * to be held in write mode.
1401 - * NOTE: no locks must be held, the rw_mutex is taken inside this function.
1402 + * NOTE: no locks must be held, the rwsem is taken inside this function.
1403 */
1404 static int shmctl_down(struct ipc_namespace *ns, int shmid, int cmd,
1405 struct shmid_ds __user *buf, int version)
1406 @@ -764,14 +776,13 @@ static int shmctl_down(struct ipc_namespace *ns, int shmid, int cmd,
1407 return -EFAULT;
1408 }
1409
1410 - down_write(&shm_ids(ns).rw_mutex);
1411 + down_write(&shm_ids(ns).rwsem);
1412 rcu_read_lock();
1413
1414 - ipcp = ipcctl_pre_down(ns, &shm_ids(ns), shmid, cmd,
1415 - &shmid64.shm_perm, 0);
1416 + ipcp = ipcctl_pre_down_nolock(ns, &shm_ids(ns), shmid, cmd,
1417 + &shmid64.shm_perm, 0);
1418 if (IS_ERR(ipcp)) {
1419 err = PTR_ERR(ipcp);
1420 - /* the ipc lock is not held upon failure */
1421 goto out_unlock1;
1422 }
1423
1424 @@ -779,14 +790,16 @@ static int shmctl_down(struct ipc_namespace *ns, int shmid, int cmd,
1425
1426 err = security_shm_shmctl(shp, cmd);
1427 if (err)
1428 - goto out_unlock0;
1429 + goto out_unlock1;
1430
1431 switch (cmd) {
1432 case IPC_RMID:
1433 + ipc_lock_object(&shp->shm_perm);
1434 /* do_shm_rmid unlocks the ipc object and rcu */
1435 do_shm_rmid(ns, ipcp);
1436 goto out_up;
1437 case IPC_SET:
1438 + ipc_lock_object(&shp->shm_perm);
1439 err = ipc_update_perm(&shmid64.shm_perm, ipcp);
1440 if (err)
1441 goto out_unlock0;
1442 @@ -794,6 +807,7 @@ static int shmctl_down(struct ipc_namespace *ns, int shmid, int cmd,
1443 break;
1444 default:
1445 err = -EINVAL;
1446 + goto out_unlock1;
1447 }
1448
1449 out_unlock0:
1450 @@ -801,33 +815,28 @@ out_unlock0:
1451 out_unlock1:
1452 rcu_read_unlock();
1453 out_up:
1454 - up_write(&shm_ids(ns).rw_mutex);
1455 + up_write(&shm_ids(ns).rwsem);
1456 return err;
1457 }
1458
1459 -SYSCALL_DEFINE3(shmctl, int, shmid, int, cmd, struct shmid_ds __user *, buf)
1460 +static int shmctl_nolock(struct ipc_namespace *ns, int shmid,
1461 + int cmd, int version, void __user *buf)
1462 {
1463 + int err;
1464 struct shmid_kernel *shp;
1465 - int err, version;
1466 - struct ipc_namespace *ns;
1467
1468 - if (cmd < 0 || shmid < 0) {
1469 - err = -EINVAL;
1470 - goto out;
1471 + /* preliminary security checks for *_INFO */
1472 + if (cmd == IPC_INFO || cmd == SHM_INFO) {
1473 + err = security_shm_shmctl(NULL, cmd);
1474 + if (err)
1475 + return err;
1476 }
1477
1478 - version = ipc_parse_version(&cmd);
1479 - ns = current->nsproxy->ipc_ns;
1480 -
1481 - switch (cmd) { /* replace with proc interface ? */
1482 + switch (cmd) {
1483 case IPC_INFO:
1484 {
1485 struct shminfo64 shminfo;
1486
1487 - err = security_shm_shmctl(NULL, cmd);
1488 - if (err)
1489 - return err;
1490 -
1491 memset(&shminfo, 0, sizeof(shminfo));
1492 shminfo.shmmni = shminfo.shmseg = ns->shm_ctlmni;
1493 shminfo.shmmax = ns->shm_ctlmax;
1494 @@ -837,9 +846,9 @@ SYSCALL_DEFINE3(shmctl, int, shmid, int, cmd, struct shmid_ds __user *, buf)
1495 if(copy_shminfo_to_user (buf, &shminfo, version))
1496 return -EFAULT;
1497
1498 - down_read(&shm_ids(ns).rw_mutex);
1499 + down_read(&shm_ids(ns).rwsem);
1500 err = ipc_get_maxid(&shm_ids(ns));
1501 - up_read(&shm_ids(ns).rw_mutex);
1502 + up_read(&shm_ids(ns).rwsem);
1503
1504 if(err<0)
1505 err = 0;
1506 @@ -849,19 +858,15 @@ SYSCALL_DEFINE3(shmctl, int, shmid, int, cmd, struct shmid_ds __user *, buf)
1507 {
1508 struct shm_info shm_info;
1509
1510 - err = security_shm_shmctl(NULL, cmd);
1511 - if (err)
1512 - return err;
1513 -
1514 memset(&shm_info, 0, sizeof(shm_info));
1515 - down_read(&shm_ids(ns).rw_mutex);
1516 + down_read(&shm_ids(ns).rwsem);
1517 shm_info.used_ids = shm_ids(ns).in_use;
1518 shm_get_stat (ns, &shm_info.shm_rss, &shm_info.shm_swp);
1519 shm_info.shm_tot = ns->shm_tot;
1520 shm_info.swap_attempts = 0;
1521 shm_info.swap_successes = 0;
1522 err = ipc_get_maxid(&shm_ids(ns));
1523 - up_read(&shm_ids(ns).rw_mutex);
1524 + up_read(&shm_ids(ns).rwsem);
1525 if (copy_to_user(buf, &shm_info, sizeof(shm_info))) {
1526 err = -EFAULT;
1527 goto out;
1528 @@ -876,27 +881,31 @@ SYSCALL_DEFINE3(shmctl, int, shmid, int, cmd, struct shmid_ds __user *, buf)
1529 struct shmid64_ds tbuf;
1530 int result;
1531
1532 + rcu_read_lock();
1533 if (cmd == SHM_STAT) {
1534 - shp = shm_lock(ns, shmid);
1535 + shp = shm_obtain_object(ns, shmid);
1536 if (IS_ERR(shp)) {
1537 err = PTR_ERR(shp);
1538 - goto out;
1539 + goto out_unlock;
1540 }
1541 result = shp->shm_perm.id;
1542 } else {
1543 - shp = shm_lock_check(ns, shmid);
1544 + shp = shm_obtain_object_check(ns, shmid);
1545 if (IS_ERR(shp)) {
1546 err = PTR_ERR(shp);
1547 - goto out;
1548 + goto out_unlock;
1549 }
1550 result = 0;
1551 }
1552 +
1553 err = -EACCES;
1554 if (ipcperms(ns, &shp->shm_perm, S_IRUGO))
1555 goto out_unlock;
1556 +
1557 err = security_shm_shmctl(shp, cmd);
1558 if (err)
1559 goto out_unlock;
1560 +
1561 memset(&tbuf, 0, sizeof(tbuf));
1562 kernel_to_ipc64_perm(&shp->shm_perm, &tbuf.shm_perm);
1563 tbuf.shm_segsz = shp->shm_segsz;
1564 @@ -906,43 +915,76 @@ SYSCALL_DEFINE3(shmctl, int, shmid, int, cmd, struct shmid_ds __user *, buf)
1565 tbuf.shm_cpid = shp->shm_cprid;
1566 tbuf.shm_lpid = shp->shm_lprid;
1567 tbuf.shm_nattch = shp->shm_nattch;
1568 - shm_unlock(shp);
1569 - if(copy_shmid_to_user (buf, &tbuf, version))
1570 + rcu_read_unlock();
1571 +
1572 + if (copy_shmid_to_user(buf, &tbuf, version))
1573 err = -EFAULT;
1574 else
1575 err = result;
1576 goto out;
1577 }
1578 + default:
1579 + return -EINVAL;
1580 + }
1581 +
1582 +out_unlock:
1583 + rcu_read_unlock();
1584 +out:
1585 + return err;
1586 +}
1587 +
1588 +SYSCALL_DEFINE3(shmctl, int, shmid, int, cmd, struct shmid_ds __user *, buf)
1589 +{
1590 + struct shmid_kernel *shp;
1591 + int err, version;
1592 + struct ipc_namespace *ns;
1593 +
1594 + if (cmd < 0 || shmid < 0)
1595 + return -EINVAL;
1596 +
1597 + version = ipc_parse_version(&cmd);
1598 + ns = current->nsproxy->ipc_ns;
1599 +
1600 + switch (cmd) {
1601 + case IPC_INFO:
1602 + case SHM_INFO:
1603 + case SHM_STAT:
1604 + case IPC_STAT:
1605 + return shmctl_nolock(ns, shmid, cmd, version, buf);
1606 + case IPC_RMID:
1607 + case IPC_SET:
1608 + return shmctl_down(ns, shmid, cmd, buf, version);
1609 case SHM_LOCK:
1610 case SHM_UNLOCK:
1611 {
1612 struct file *shm_file;
1613
1614 - shp = shm_lock_check(ns, shmid);
1615 + rcu_read_lock();
1616 + shp = shm_obtain_object_check(ns, shmid);
1617 if (IS_ERR(shp)) {
1618 err = PTR_ERR(shp);
1619 - goto out;
1620 + goto out_unlock1;
1621 }
1622
1623 audit_ipc_obj(&(shp->shm_perm));
1624 + err = security_shm_shmctl(shp, cmd);
1625 + if (err)
1626 + goto out_unlock1;
1627
1628 + ipc_lock_object(&shp->shm_perm);
1629 if (!ns_capable(ns->user_ns, CAP_IPC_LOCK)) {
1630 kuid_t euid = current_euid();
1631 err = -EPERM;
1632 if (!uid_eq(euid, shp->shm_perm.uid) &&
1633 !uid_eq(euid, shp->shm_perm.cuid))
1634 - goto out_unlock;
1635 + goto out_unlock0;
1636 if (cmd == SHM_LOCK && !rlimit(RLIMIT_MEMLOCK))
1637 - goto out_unlock;
1638 + goto out_unlock0;
1639 }
1640
1641 - err = security_shm_shmctl(shp, cmd);
1642 - if (err)
1643 - goto out_unlock;
1644 -
1645 shm_file = shp->shm_file;
1646 if (is_file_hugepages(shm_file))
1647 - goto out_unlock;
1648 + goto out_unlock0;
1649
1650 if (cmd == SHM_LOCK) {
1651 struct user_struct *user = current_user();
1652 @@ -951,32 +993,31 @@ SYSCALL_DEFINE3(shmctl, int, shmid, int, cmd, struct shmid_ds __user *, buf)
1653 shp->shm_perm.mode |= SHM_LOCKED;
1654 shp->mlock_user = user;
1655 }
1656 - goto out_unlock;
1657 + goto out_unlock0;
1658 }
1659
1660 /* SHM_UNLOCK */
1661 if (!(shp->shm_perm.mode & SHM_LOCKED))
1662 - goto out_unlock;
1663 + goto out_unlock0;
1664 shmem_lock(shm_file, 0, shp->mlock_user);
1665 shp->shm_perm.mode &= ~SHM_LOCKED;
1666 shp->mlock_user = NULL;
1667 get_file(shm_file);
1668 - shm_unlock(shp);
1669 + ipc_unlock_object(&shp->shm_perm);
1670 + rcu_read_unlock();
1671 shmem_unlock_mapping(shm_file->f_mapping);
1672 +
1673 fput(shm_file);
1674 - goto out;
1675 - }
1676 - case IPC_RMID:
1677 - case IPC_SET:
1678 - err = shmctl_down(ns, shmid, cmd, buf, version);
1679 return err;
1680 + }
1681 default:
1682 return -EINVAL;
1683 }
1684
1685 -out_unlock:
1686 - shm_unlock(shp);
1687 -out:
1688 +out_unlock0:
1689 + ipc_unlock_object(&shp->shm_perm);
1690 +out_unlock1:
1691 + rcu_read_unlock();
1692 return err;
1693 }
1694
1695 @@ -1044,10 +1085,11 @@ long do_shmat(int shmid, char __user *shmaddr, int shmflg, ulong *raddr,
1696 * additional creator id...
1697 */
1698 ns = current->nsproxy->ipc_ns;
1699 - shp = shm_lock_check(ns, shmid);
1700 + rcu_read_lock();
1701 + shp = shm_obtain_object_check(ns, shmid);
1702 if (IS_ERR(shp)) {
1703 err = PTR_ERR(shp);
1704 - goto out;
1705 + goto out_unlock;
1706 }
1707
1708 err = -EACCES;
1709 @@ -1058,24 +1100,31 @@ long do_shmat(int shmid, char __user *shmaddr, int shmflg, ulong *raddr,
1710 if (err)
1711 goto out_unlock;
1712
1713 + ipc_lock_object(&shp->shm_perm);
1714 path = shp->shm_file->f_path;
1715 path_get(&path);
1716 shp->shm_nattch++;
1717 size = i_size_read(path.dentry->d_inode);
1718 - shm_unlock(shp);
1719 + ipc_unlock_object(&shp->shm_perm);
1720 + rcu_read_unlock();
1721
1722 err = -ENOMEM;
1723 sfd = kzalloc(sizeof(*sfd), GFP_KERNEL);
1724 - if (!sfd)
1725 - goto out_put_dentry;
1726 + if (!sfd) {
1727 + path_put(&path);
1728 + goto out_nattch;
1729 + }
1730
1731 file = alloc_file(&path, f_mode,
1732 is_file_hugepages(shp->shm_file) ?
1733 &shm_file_operations_huge :
1734 &shm_file_operations);
1735 err = PTR_ERR(file);
1736 - if (IS_ERR(file))
1737 - goto out_free;
1738 + if (IS_ERR(file)) {
1739 + kfree(sfd);
1740 + path_put(&path);
1741 + goto out_nattch;
1742 + }
1743
1744 file->private_data = sfd;
1745 file->f_mapping = shp->shm_file->f_mapping;
1746 @@ -1101,7 +1150,7 @@ long do_shmat(int shmid, char __user *shmaddr, int shmflg, ulong *raddr,
1747 addr > current->mm->start_stack - size - PAGE_SIZE * 5)
1748 goto invalid;
1749 }
1750 -
1751 +
1752 addr = do_mmap_pgoff(file, addr, size, prot, flags, 0, &populate);
1753 *raddr = addr;
1754 err = 0;
1755 @@ -1116,7 +1165,7 @@ out_fput:
1756 fput(file);
1757
1758 out_nattch:
1759 - down_write(&shm_ids(ns).rw_mutex);
1760 + down_write(&shm_ids(ns).rwsem);
1761 shp = shm_lock(ns, shmid);
1762 BUG_ON(IS_ERR(shp));
1763 shp->shm_nattch--;
1764 @@ -1124,20 +1173,13 @@ out_nattch:
1765 shm_destroy(ns, shp);
1766 else
1767 shm_unlock(shp);
1768 - up_write(&shm_ids(ns).rw_mutex);
1769 -
1770 -out:
1771 + up_write(&shm_ids(ns).rwsem);
1772 return err;
1773
1774 out_unlock:
1775 - shm_unlock(shp);
1776 - goto out;
1777 -
1778 -out_free:
1779 - kfree(sfd);
1780 -out_put_dentry:
1781 - path_put(&path);
1782 - goto out_nattch;
1783 + rcu_read_unlock();
1784 +out:
1785 + return err;
1786 }
1787
1788 SYSCALL_DEFINE3(shmat, int, shmid, char __user *, shmaddr, int, shmflg)
1789 @@ -1242,8 +1284,7 @@ SYSCALL_DEFINE1(shmdt, char __user *, shmaddr)
1790 #else /* CONFIG_MMU */
1791 /* under NOMMU conditions, the exact address to be destroyed must be
1792 * given */
1793 - retval = -EINVAL;
1794 - if (vma->vm_start == addr && vma->vm_ops == &shm_vm_ops) {
1795 + if (vma && vma->vm_start == addr && vma->vm_ops == &shm_vm_ops) {
1796 do_munmap(mm, vma->vm_start, vma->vm_end - vma->vm_start);
1797 retval = 0;
1798 }
1799 diff --git a/ipc/util.c b/ipc/util.c
1800 index 0c6566b..fdb8ae7 100644
1801 --- a/ipc/util.c
1802 +++ b/ipc/util.c
1803 @@ -15,6 +15,14 @@
1804 * Jun 2006 - namespaces ssupport
1805 * OpenVZ, SWsoft Inc.
1806 * Pavel Emelianov <xemul@openvz.org>
1807 + *
1808 + * General sysv ipc locking scheme:
1809 + * when doing ipc id lookups, take the ids->rwsem
1810 + * rcu_read_lock()
1811 + * obtain the ipc object (kern_ipc_perm)
1812 + * perform security, capabilities, auditing and permission checks, etc.
1813 + * acquire the ipc lock (kern_ipc_perm.lock) throught ipc_lock_object()
1814 + * perform data updates (ie: SET, RMID, LOCK/UNLOCK commands)
1815 */
1816
1817 #include <linux/mm.h>
1818 @@ -119,7 +127,7 @@ __initcall(ipc_init);
1819
1820 void ipc_init_ids(struct ipc_ids *ids)
1821 {
1822 - init_rwsem(&ids->rw_mutex);
1823 + init_rwsem(&ids->rwsem);
1824
1825 ids->in_use = 0;
1826 ids->seq = 0;
1827 @@ -174,7 +182,7 @@ void __init ipc_init_proc_interface(const char *path, const char *header,
1828 * @ids: Identifier set
1829 * @key: The key to find
1830 *
1831 - * Requires ipc_ids.rw_mutex locked.
1832 + * Requires ipc_ids.rwsem locked.
1833 * Returns the LOCKED pointer to the ipc structure if found or NULL
1834 * if not.
1835 * If key is found ipc points to the owning ipc structure
1836 @@ -197,7 +205,8 @@ static struct kern_ipc_perm *ipc_findkey(struct ipc_ids *ids, key_t key)
1837 continue;
1838 }
1839
1840 - ipc_lock_by_ptr(ipc);
1841 + rcu_read_lock();
1842 + ipc_lock_object(ipc);
1843 return ipc;
1844 }
1845
1846 @@ -208,7 +217,7 @@ static struct kern_ipc_perm *ipc_findkey(struct ipc_ids *ids, key_t key)
1847 * ipc_get_maxid - get the last assigned id
1848 * @ids: IPC identifier set
1849 *
1850 - * Called with ipc_ids.rw_mutex held.
1851 + * Called with ipc_ids.rwsem held.
1852 */
1853
1854 int ipc_get_maxid(struct ipc_ids *ids)
1855 @@ -246,7 +255,7 @@ int ipc_get_maxid(struct ipc_ids *ids)
1856 * is returned. The 'new' entry is returned in a locked state on success.
1857 * On failure the entry is not locked and a negative err-code is returned.
1858 *
1859 - * Called with writer ipc_ids.rw_mutex held.
1860 + * Called with writer ipc_ids.rwsem held.
1861 */
1862 int ipc_addid(struct ipc_ids* ids, struct kern_ipc_perm* new, int size)
1863 {
1864 @@ -312,9 +321,9 @@ static int ipcget_new(struct ipc_namespace *ns, struct ipc_ids *ids,
1865 {
1866 int err;
1867
1868 - down_write(&ids->rw_mutex);
1869 + down_write(&ids->rwsem);
1870 err = ops->getnew(ns, params);
1871 - up_write(&ids->rw_mutex);
1872 + up_write(&ids->rwsem);
1873 return err;
1874 }
1875
1876 @@ -331,7 +340,7 @@ static int ipcget_new(struct ipc_namespace *ns, struct ipc_ids *ids,
1877 *
1878 * On success, the IPC id is returned.
1879 *
1880 - * It is called with ipc_ids.rw_mutex and ipcp->lock held.
1881 + * It is called with ipc_ids.rwsem and ipcp->lock held.
1882 */
1883 static int ipc_check_perms(struct ipc_namespace *ns,
1884 struct kern_ipc_perm *ipcp,
1885 @@ -376,7 +385,7 @@ static int ipcget_public(struct ipc_namespace *ns, struct ipc_ids *ids,
1886 * Take the lock as a writer since we are potentially going to add
1887 * a new entry + read locks are not "upgradable"
1888 */
1889 - down_write(&ids->rw_mutex);
1890 + down_write(&ids->rwsem);
1891 ipcp = ipc_findkey(ids, params->key);
1892 if (ipcp == NULL) {
1893 /* key not used */
1894 @@ -402,7 +411,7 @@ static int ipcget_public(struct ipc_namespace *ns, struct ipc_ids *ids,
1895 }
1896 ipc_unlock(ipcp);
1897 }
1898 - up_write(&ids->rw_mutex);
1899 + up_write(&ids->rwsem);
1900
1901 return err;
1902 }
1903 @@ -413,7 +422,7 @@ static int ipcget_public(struct ipc_namespace *ns, struct ipc_ids *ids,
1904 * @ids: IPC identifier set
1905 * @ipcp: ipc perm structure containing the identifier to remove
1906 *
1907 - * ipc_ids.rw_mutex (as a writer) and the spinlock for this ID are held
1908 + * ipc_ids.rwsem (as a writer) and the spinlock for this ID are held
1909 * before this function is called, and remain locked on the exit.
1910 */
1911
1912 @@ -613,7 +622,7 @@ struct kern_ipc_perm *ipc_obtain_object(struct ipc_ids *ids, int id)
1913 }
1914
1915 /**
1916 - * ipc_lock - Lock an ipc structure without rw_mutex held
1917 + * ipc_lock - Lock an ipc structure without rwsem held
1918 * @ids: IPC identifier set
1919 * @id: ipc id to look for
1920 *
1921 @@ -669,22 +678,6 @@ out:
1922 return out;
1923 }
1924
1925 -struct kern_ipc_perm *ipc_lock_check(struct ipc_ids *ids, int id)
1926 -{
1927 - struct kern_ipc_perm *out;
1928 -
1929 - out = ipc_lock(ids, id);
1930 - if (IS_ERR(out))
1931 - return out;
1932 -
1933 - if (ipc_checkid(out, id)) {
1934 - ipc_unlock(out);
1935 - return ERR_PTR(-EIDRM);
1936 - }
1937 -
1938 - return out;
1939 -}
1940 -
1941 /**
1942 * ipcget - Common sys_*get() code
1943 * @ns : namsepace
1944 @@ -725,7 +718,7 @@ int ipc_update_perm(struct ipc64_perm *in, struct kern_ipc_perm *out)
1945 }
1946
1947 /**
1948 - * ipcctl_pre_down - retrieve an ipc and check permissions for some IPC_XXX cmd
1949 + * ipcctl_pre_down_nolock - retrieve an ipc and check permissions for some IPC_XXX cmd
1950 * @ns: the ipc namespace
1951 * @ids: the table of ids where to look for the ipc
1952 * @id: the id of the ipc to retrieve
1953 @@ -738,29 +731,13 @@ int ipc_update_perm(struct ipc64_perm *in, struct kern_ipc_perm *out)
1954 * It must be called without any lock held and
1955 * - retrieves the ipc with the given id in the given table.
1956 * - performs some audit and permission check, depending on the given cmd
1957 - * - returns the ipc with the ipc lock held in case of success
1958 - * or an err-code without any lock held otherwise.
1959 + * - returns a pointer to the ipc object or otherwise, the corresponding error.
1960 *
1961 - * Call holding the both the rw_mutex and the rcu read lock.
1962 + * Call holding the both the rwsem and the rcu read lock.
1963 */
1964 -struct kern_ipc_perm *ipcctl_pre_down(struct ipc_namespace *ns,
1965 - struct ipc_ids *ids, int id, int cmd,
1966 - struct ipc64_perm *perm, int extra_perm)
1967 -{
1968 - struct kern_ipc_perm *ipcp;
1969 -
1970 - ipcp = ipcctl_pre_down_nolock(ns, ids, id, cmd, perm, extra_perm);
1971 - if (IS_ERR(ipcp))
1972 - goto out;
1973 -
1974 - spin_lock(&ipcp->lock);
1975 -out:
1976 - return ipcp;
1977 -}
1978 -
1979 struct kern_ipc_perm *ipcctl_pre_down_nolock(struct ipc_namespace *ns,
1980 - struct ipc_ids *ids, int id, int cmd,
1981 - struct ipc64_perm *perm, int extra_perm)
1982 + struct ipc_ids *ids, int id, int cmd,
1983 + struct ipc64_perm *perm, int extra_perm)
1984 {
1985 kuid_t euid;
1986 int err = -EPERM;
1987 @@ -838,7 +815,8 @@ static struct kern_ipc_perm *sysvipc_find_ipc(struct ipc_ids *ids, loff_t pos,
1988 ipc = idr_find(&ids->ipcs_idr, pos);
1989 if (ipc != NULL) {
1990 *new_pos = pos + 1;
1991 - ipc_lock_by_ptr(ipc);
1992 + rcu_read_lock();
1993 + ipc_lock_object(ipc);
1994 return ipc;
1995 }
1996 }
1997 @@ -876,7 +854,7 @@ static void *sysvipc_proc_start(struct seq_file *s, loff_t *pos)
1998 * Take the lock - this will be released by the corresponding
1999 * call to stop().
2000 */
2001 - down_read(&ids->rw_mutex);
2002 + down_read(&ids->rwsem);
2003
2004 /* pos < 0 is invalid */
2005 if (*pos < 0)
2006 @@ -903,7 +881,7 @@ static void sysvipc_proc_stop(struct seq_file *s, void *it)
2007
2008 ids = &iter->ns->ids[iface->ids];
2009 /* Release the lock we took in start() */
2010 - up_read(&ids->rw_mutex);
2011 + up_read(&ids->rwsem);
2012 }
2013
2014 static int sysvipc_proc_show(struct seq_file *s, void *it)
2015 diff --git a/ipc/util.h b/ipc/util.h
2016 index 25299e7..f2f5036 100644
2017 --- a/ipc/util.h
2018 +++ b/ipc/util.h
2019 @@ -101,10 +101,10 @@ void __init ipc_init_proc_interface(const char *path, const char *header,
2020 #define ipcid_to_idx(id) ((id) % SEQ_MULTIPLIER)
2021 #define ipcid_to_seqx(id) ((id) / SEQ_MULTIPLIER)
2022
2023 -/* must be called with ids->rw_mutex acquired for writing */
2024 +/* must be called with ids->rwsem acquired for writing */
2025 int ipc_addid(struct ipc_ids *, struct kern_ipc_perm *, int);
2026
2027 -/* must be called with ids->rw_mutex acquired for reading */
2028 +/* must be called with ids->rwsem acquired for reading */
2029 int ipc_get_maxid(struct ipc_ids *);
2030
2031 /* must be called with both locks acquired. */
2032 @@ -139,9 +139,6 @@ int ipc_update_perm(struct ipc64_perm *in, struct kern_ipc_perm *out);
2033 struct kern_ipc_perm *ipcctl_pre_down_nolock(struct ipc_namespace *ns,
2034 struct ipc_ids *ids, int id, int cmd,
2035 struct ipc64_perm *perm, int extra_perm);
2036 -struct kern_ipc_perm *ipcctl_pre_down(struct ipc_namespace *ns,
2037 - struct ipc_ids *ids, int id, int cmd,
2038 - struct ipc64_perm *perm, int extra_perm);
2039
2040 #ifndef CONFIG_ARCH_WANT_IPC_PARSE_VERSION
2041 /* On IA-64, we always use the "64-bit version" of the IPC structures. */
2042 @@ -182,19 +179,12 @@ static inline void ipc_assert_locked_object(struct kern_ipc_perm *perm)
2043 assert_spin_locked(&perm->lock);
2044 }
2045
2046 -static inline void ipc_lock_by_ptr(struct kern_ipc_perm *perm)
2047 -{
2048 - rcu_read_lock();
2049 - ipc_lock_object(perm);
2050 -}
2051 -
2052 static inline void ipc_unlock(struct kern_ipc_perm *perm)
2053 {
2054 ipc_unlock_object(perm);
2055 rcu_read_unlock();
2056 }
2057
2058 -struct kern_ipc_perm *ipc_lock_check(struct ipc_ids *ids, int id);
2059 struct kern_ipc_perm *ipc_obtain_object_check(struct ipc_ids *ids, int id);
2060 int ipcget(struct ipc_namespace *ns, struct ipc_ids *ids,
2061 struct ipc_ops *ops, struct ipc_params *params);
2062 diff --git a/sound/pci/hda/patch_hdmi.c b/sound/pci/hda/patch_hdmi.c
2063 index 45850f6..4865756 100644
2064 --- a/sound/pci/hda/patch_hdmi.c
2065 +++ b/sound/pci/hda/patch_hdmi.c
2066 @@ -930,6 +930,14 @@ static void hdmi_setup_audio_infoframe(struct hda_codec *codec,
2067 }
2068
2069 /*
2070 + * always configure channel mapping, it may have been changed by the
2071 + * user in the meantime
2072 + */
2073 + hdmi_setup_channel_mapping(codec, pin_nid, non_pcm, ca,
2074 + channels, per_pin->chmap,
2075 + per_pin->chmap_set);
2076 +
2077 + /*
2078 * sizeof(ai) is used instead of sizeof(*hdmi_ai) or
2079 * sizeof(*dp_ai) to avoid partial match/update problems when
2080 * the user switches between HDMI/DP monitors.
2081 @@ -940,20 +948,10 @@ static void hdmi_setup_audio_infoframe(struct hda_codec *codec,
2082 "pin=%d channels=%d\n",
2083 pin_nid,
2084 channels);
2085 - hdmi_setup_channel_mapping(codec, pin_nid, non_pcm, ca,
2086 - channels, per_pin->chmap,
2087 - per_pin->chmap_set);
2088 hdmi_stop_infoframe_trans(codec, pin_nid);
2089 hdmi_fill_audio_infoframe(codec, pin_nid,
2090 ai.bytes, sizeof(ai));
2091 hdmi_start_infoframe_trans(codec, pin_nid);
2092 - } else {
2093 - /* For non-pcm audio switch, setup new channel mapping
2094 - * accordingly */
2095 - if (per_pin->non_pcm != non_pcm)
2096 - hdmi_setup_channel_mapping(codec, pin_nid, non_pcm, ca,
2097 - channels, per_pin->chmap,
2098 - per_pin->chmap_set);
2099 }
2100
2101 per_pin->non_pcm = non_pcm;
2102 diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
2103 index 389db4c..1383f38 100644
2104 --- a/sound/pci/hda/patch_realtek.c
2105 +++ b/sound/pci/hda/patch_realtek.c
2106 @@ -3308,6 +3308,15 @@ static void alc269_fixup_limit_int_mic_boost(struct hda_codec *codec,
2107 }
2108 }
2109
2110 +static void alc290_fixup_mono_speakers(struct hda_codec *codec,
2111 + const struct hda_fixup *fix, int action)
2112 +{
2113 + if (action == HDA_FIXUP_ACT_PRE_PROBE)
2114 + /* Remove DAC node 0x03, as it seems to be
2115 + giving mono output */
2116 + snd_hda_override_wcaps(codec, 0x03, 0);
2117 +}
2118 +
2119 enum {
2120 ALC269_FIXUP_SONY_VAIO,
2121 ALC275_FIXUP_SONY_VAIO_GPIO2,
2122 @@ -3331,9 +3340,12 @@ enum {
2123 ALC269_FIXUP_HP_GPIO_LED,
2124 ALC269_FIXUP_INV_DMIC,
2125 ALC269_FIXUP_LENOVO_DOCK,
2126 + ALC286_FIXUP_SONY_MIC_NO_PRESENCE,
2127 ALC269_FIXUP_PINCFG_NO_HP_TO_LINEOUT,
2128 ALC269_FIXUP_DELL1_MIC_NO_PRESENCE,
2129 ALC269_FIXUP_DELL2_MIC_NO_PRESENCE,
2130 + ALC269_FIXUP_DELL3_MIC_NO_PRESENCE,
2131 + ALC290_FIXUP_MONO_SPEAKERS,
2132 ALC269_FIXUP_HEADSET_MODE,
2133 ALC269_FIXUP_HEADSET_MODE_NO_HP_MIC,
2134 ALC269_FIXUP_ASUS_X101_FUNC,
2135 @@ -3521,6 +3533,15 @@ static const struct hda_fixup alc269_fixups[] = {
2136 .chained = true,
2137 .chain_id = ALC269_FIXUP_HEADSET_MODE_NO_HP_MIC
2138 },
2139 + [ALC269_FIXUP_DELL3_MIC_NO_PRESENCE] = {
2140 + .type = HDA_FIXUP_PINS,
2141 + .v.pins = (const struct hda_pintbl[]) {
2142 + { 0x1a, 0x01a1913c }, /* use as headset mic, without its own jack detect */
2143 + { }
2144 + },
2145 + .chained = true,
2146 + .chain_id = ALC269_FIXUP_HEADSET_MODE_NO_HP_MIC
2147 + },
2148 [ALC269_FIXUP_HEADSET_MODE] = {
2149 .type = HDA_FIXUP_FUNC,
2150 .v.func = alc_fixup_headset_mode,
2151 @@ -3529,6 +3550,13 @@ static const struct hda_fixup alc269_fixups[] = {
2152 .type = HDA_FIXUP_FUNC,
2153 .v.func = alc_fixup_headset_mode_no_hp_mic,
2154 },
2155 + [ALC286_FIXUP_SONY_MIC_NO_PRESENCE] = {
2156 + .type = HDA_FIXUP_PINS,
2157 + .v.pins = (const struct hda_pintbl[]) {
2158 + { 0x18, 0x01a1913c }, /* use as headset mic, without its own jack detect */
2159 + { }
2160 + },
2161 + },
2162 [ALC269_FIXUP_ASUS_X101_FUNC] = {
2163 .type = HDA_FIXUP_FUNC,
2164 .v.func = alc269_fixup_x101_headset_mic,
2165 @@ -3595,6 +3623,12 @@ static const struct hda_fixup alc269_fixups[] = {
2166 { }
2167 },
2168 },
2169 + [ALC290_FIXUP_MONO_SPEAKERS] = {
2170 + .type = HDA_FIXUP_FUNC,
2171 + .v.func = alc290_fixup_mono_speakers,
2172 + .chained = true,
2173 + .chain_id = ALC269_FIXUP_DELL3_MIC_NO_PRESENCE,
2174 + },
2175 };
2176
2177 static const struct snd_pci_quirk alc269_fixup_tbl[] = {
2178 @@ -3631,6 +3665,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
2179 SND_PCI_QUIRK(0x1028, 0x0608, "Dell", ALC269_FIXUP_DELL1_MIC_NO_PRESENCE),
2180 SND_PCI_QUIRK(0x1028, 0x0609, "Dell", ALC269_FIXUP_DELL1_MIC_NO_PRESENCE),
2181 SND_PCI_QUIRK(0x1028, 0x0613, "Dell", ALC269_FIXUP_DELL1_MIC_NO_PRESENCE),
2182 + SND_PCI_QUIRK(0x1028, 0x0616, "Dell Vostro 5470", ALC290_FIXUP_MONO_SPEAKERS),
2183 SND_PCI_QUIRK(0x1028, 0x15cc, "Dell X5 Precision", ALC269_FIXUP_DELL2_MIC_NO_PRESENCE),
2184 SND_PCI_QUIRK(0x1028, 0x15cd, "Dell X5 Precision", ALC269_FIXUP_DELL2_MIC_NO_PRESENCE),
2185 SND_PCI_QUIRK(0x103c, 0x1586, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC2),
2186 @@ -3651,6 +3686,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
2187 SND_PCI_QUIRK(0x1043, 0x8398, "ASUS P1005", ALC269_FIXUP_STEREO_DMIC),
2188 SND_PCI_QUIRK(0x1043, 0x83ce, "ASUS P1005", ALC269_FIXUP_STEREO_DMIC),
2189 SND_PCI_QUIRK(0x1043, 0x8516, "ASUS X101CH", ALC269_FIXUP_ASUS_X101),
2190 + SND_PCI_QUIRK(0x104d, 0x90b6, "Sony VAIO Pro 13", ALC286_FIXUP_SONY_MIC_NO_PRESENCE),
2191 SND_PCI_QUIRK(0x104d, 0x9073, "Sony VAIO", ALC275_FIXUP_SONY_VAIO_GPIO2),
2192 SND_PCI_QUIRK(0x104d, 0x907b, "Sony VAIO", ALC275_FIXUP_SONY_HWEQ),
2193 SND_PCI_QUIRK(0x104d, 0x9084, "Sony VAIO", ALC275_FIXUP_SONY_HWEQ),
2194 @@ -4345,6 +4381,7 @@ static const struct snd_pci_quirk alc662_fixup_tbl[] = {
2195 SND_PCI_QUIRK(0x1028, 0x05d8, "Dell", ALC668_FIXUP_DELL_MIC_NO_PRESENCE),
2196 SND_PCI_QUIRK(0x1028, 0x05db, "Dell", ALC668_FIXUP_DELL_MIC_NO_PRESENCE),
2197 SND_PCI_QUIRK(0x103c, 0x1632, "HP RP5800", ALC662_FIXUP_HP_RP5800),
2198 + SND_PCI_QUIRK(0x1043, 0x1477, "ASUS N56VZ", ALC662_FIXUP_ASUS_MODE4),
2199 SND_PCI_QUIRK(0x1043, 0x8469, "ASUS mobo", ALC662_FIXUP_NO_JACK_DETECT),
2200 SND_PCI_QUIRK(0x105b, 0x0cd6, "Foxconn", ALC662_FIXUP_ASUS_MODE2),
2201 SND_PCI_QUIRK(0x144d, 0xc051, "Samsung R720", ALC662_FIXUP_IDEAPAD),
2202 diff --git a/sound/usb/usx2y/usbusx2yaudio.c b/sound/usb/usx2y/usbusx2yaudio.c
2203 index 63fb521..6234a51 100644
2204 --- a/sound/usb/usx2y/usbusx2yaudio.c
2205 +++ b/sound/usb/usx2y/usbusx2yaudio.c
2206 @@ -299,19 +299,6 @@ static void usX2Y_error_urb_status(struct usX2Ydev *usX2Y,
2207 usX2Y_clients_stop(usX2Y);
2208 }
2209
2210 -static void usX2Y_error_sequence(struct usX2Ydev *usX2Y,
2211 - struct snd_usX2Y_substream *subs, struct urb *urb)
2212 -{
2213 - snd_printk(KERN_ERR
2214 -"Sequence Error!(hcd_frame=%i ep=%i%s;wait=%i,frame=%i).\n"
2215 -"Most probably some urb of usb-frame %i is still missing.\n"
2216 -"Cause could be too long delays in usb-hcd interrupt handling.\n",
2217 - usb_get_current_frame_number(usX2Y->dev),
2218 - subs->endpoint, usb_pipein(urb->pipe) ? "in" : "out",
2219 - usX2Y->wait_iso_frame, urb->start_frame, usX2Y->wait_iso_frame);
2220 - usX2Y_clients_stop(usX2Y);
2221 -}
2222 -
2223 static void i_usX2Y_urb_complete(struct urb *urb)
2224 {
2225 struct snd_usX2Y_substream *subs = urb->context;
2226 @@ -328,12 +315,9 @@ static void i_usX2Y_urb_complete(struct urb *urb)
2227 usX2Y_error_urb_status(usX2Y, subs, urb);
2228 return;
2229 }
2230 - if (likely((urb->start_frame & 0xFFFF) == (usX2Y->wait_iso_frame & 0xFFFF)))
2231 - subs->completed_urb = urb;
2232 - else {
2233 - usX2Y_error_sequence(usX2Y, subs, urb);
2234 - return;
2235 - }
2236 +
2237 + subs->completed_urb = urb;
2238 +
2239 {
2240 struct snd_usX2Y_substream *capsubs = usX2Y->subs[SNDRV_PCM_STREAM_CAPTURE],
2241 *playbacksubs = usX2Y->subs[SNDRV_PCM_STREAM_PLAYBACK];
2242 diff --git a/sound/usb/usx2y/usx2yhwdeppcm.c b/sound/usb/usx2y/usx2yhwdeppcm.c
2243 index f2a1acd..814d0e8 100644
2244 --- a/sound/usb/usx2y/usx2yhwdeppcm.c
2245 +++ b/sound/usb/usx2y/usx2yhwdeppcm.c
2246 @@ -244,13 +244,8 @@ static void i_usX2Y_usbpcm_urb_complete(struct urb *urb)
2247 usX2Y_error_urb_status(usX2Y, subs, urb);
2248 return;
2249 }
2250 - if (likely((urb->start_frame & 0xFFFF) == (usX2Y->wait_iso_frame & 0xFFFF)))
2251 - subs->completed_urb = urb;
2252 - else {
2253 - usX2Y_error_sequence(usX2Y, subs, urb);
2254 - return;
2255 - }
2256
2257 + subs->completed_urb = urb;
2258 capsubs = usX2Y->subs[SNDRV_PCM_STREAM_CAPTURE];
2259 capsubs2 = usX2Y->subs[SNDRV_PCM_STREAM_CAPTURE + 2];
2260 playbacksubs = usX2Y->subs[SNDRV_PCM_STREAM_PLAYBACK];