Magellan Linux

Annotation of /trunk/kernel-magellan/patches-3.9/0106-3.9.7-all-fixes.patch

Parent Directory Parent Directory | Revision Log Revision Log


Revision 2221 - (hide annotations) (download)
Mon Jul 1 09:43:31 2013 UTC (10 years, 11 months ago) by niro
File size: 94478 byte(s)
-linux-3.9.7
1 niro 2221 diff --git a/arch/arm/mach-kirkwood/mpp.c b/arch/arm/mach-kirkwood/mpp.c
2     index 827cde4..e96fd71 100644
3     --- a/arch/arm/mach-kirkwood/mpp.c
4     +++ b/arch/arm/mach-kirkwood/mpp.c
5     @@ -22,9 +22,10 @@ static unsigned int __init kirkwood_variant(void)
6    
7     kirkwood_pcie_id(&dev, &rev);
8    
9     - if ((dev == MV88F6281_DEV_ID && rev >= MV88F6281_REV_A0) ||
10     - (dev == MV88F6282_DEV_ID))
11     + if (dev == MV88F6281_DEV_ID && rev >= MV88F6281_REV_A0)
12     return MPP_F6281_MASK;
13     + if (dev == MV88F6282_DEV_ID)
14     + return MPP_F6282_MASK;
15     if (dev == MV88F6192_DEV_ID && rev >= MV88F6192_REV_A0)
16     return MPP_F6192_MASK;
17     if (dev == MV88F6180_DEV_ID)
18     diff --git a/arch/powerpc/include/asm/exception-64s.h b/arch/powerpc/include/asm/exception-64s.h
19     index 05e6d2e..0f271e3 100644
20     --- a/arch/powerpc/include/asm/exception-64s.h
21     +++ b/arch/powerpc/include/asm/exception-64s.h
22     @@ -513,7 +513,7 @@ label##_common: \
23     */
24     #define STD_EXCEPTION_COMMON_ASYNC(trap, label, hdlr) \
25     EXCEPTION_COMMON(trap, label, hdlr, ret_from_except_lite, \
26     - FINISH_NAP;RUNLATCH_ON;DISABLE_INTS)
27     + FINISH_NAP;DISABLE_INTS;RUNLATCH_ON)
28    
29     /*
30     * When the idle code in power4_idle puts the CPU into NAP mode,
31     diff --git a/arch/powerpc/kernel/exceptions-64s.S b/arch/powerpc/kernel/exceptions-64s.S
32     index 3bbe7ed..644378e 100644
33     --- a/arch/powerpc/kernel/exceptions-64s.S
34     +++ b/arch/powerpc/kernel/exceptions-64s.S
35     @@ -707,7 +707,7 @@ machine_check_common:
36     STD_EXCEPTION_COMMON(0xb00, trap_0b, .unknown_exception)
37     STD_EXCEPTION_COMMON(0xd00, single_step, .single_step_exception)
38     STD_EXCEPTION_COMMON(0xe00, trap_0e, .unknown_exception)
39     - STD_EXCEPTION_COMMON(0xe40, emulation_assist, .program_check_exception)
40     + STD_EXCEPTION_COMMON(0xe40, emulation_assist, .emulation_assist_interrupt)
41     STD_EXCEPTION_COMMON(0xe60, hmi_exception, .unknown_exception)
42     #ifdef CONFIG_PPC_DOORBELL
43     STD_EXCEPTION_COMMON_ASYNC(0xe80, h_doorbell, .doorbell_exception)
44     diff --git a/arch/powerpc/kernel/irq.c b/arch/powerpc/kernel/irq.c
45     index 4f97fe3..523d1e2 100644
46     --- a/arch/powerpc/kernel/irq.c
47     +++ b/arch/powerpc/kernel/irq.c
48     @@ -162,7 +162,7 @@ notrace unsigned int __check_irq_replay(void)
49     * in case we also had a rollover while hard disabled
50     */
51     local_paca->irq_happened &= ~PACA_IRQ_DEC;
52     - if (decrementer_check_overflow())
53     + if ((happened & PACA_IRQ_DEC) || decrementer_check_overflow())
54     return 0x900;
55    
56     /* Finally check if an external interrupt happened */
57     diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c
58     index 9600c36..0d86c8a 100644
59     --- a/arch/powerpc/kernel/process.c
60     +++ b/arch/powerpc/kernel/process.c
61     @@ -1371,7 +1371,7 @@ EXPORT_SYMBOL(dump_stack);
62    
63     #ifdef CONFIG_PPC64
64     /* Called with hard IRQs off */
65     -void __ppc64_runlatch_on(void)
66     +void notrace __ppc64_runlatch_on(void)
67     {
68     struct thread_info *ti = current_thread_info();
69     unsigned long ctrl;
70     @@ -1384,7 +1384,7 @@ void __ppc64_runlatch_on(void)
71     }
72    
73     /* Called with hard IRQs off */
74     -void __ppc64_runlatch_off(void)
75     +void notrace __ppc64_runlatch_off(void)
76     {
77     struct thread_info *ti = current_thread_info();
78     unsigned long ctrl;
79     diff --git a/arch/powerpc/kernel/traps.c b/arch/powerpc/kernel/traps.c
80     index 29857c6..bf33ace 100644
81     --- a/arch/powerpc/kernel/traps.c
82     +++ b/arch/powerpc/kernel/traps.c
83     @@ -1142,6 +1142,16 @@ void __kprobes program_check_exception(struct pt_regs *regs)
84     _exception(SIGILL, regs, ILL_ILLOPC, regs->nip);
85     }
86    
87     +/*
88     + * This occurs when running in hypervisor mode on POWER6 or later
89     + * and an illegal instruction is encountered.
90     + */
91     +void __kprobes emulation_assist_interrupt(struct pt_regs *regs)
92     +{
93     + regs->msr |= REASON_ILLEGAL;
94     + program_check_exception(regs);
95     +}
96     +
97     void alignment_exception(struct pt_regs *regs)
98     {
99     int sig, code, fixed = 0;
100     diff --git a/arch/x86/boot/compressed/eboot.c b/arch/x86/boot/compressed/eboot.c
101     index 35ee62f..c205035 100644
102     --- a/arch/x86/boot/compressed/eboot.c
103     +++ b/arch/x86/boot/compressed/eboot.c
104     @@ -251,51 +251,6 @@ static void find_bits(unsigned long mask, u8 *pos, u8 *size)
105     *size = len;
106     }
107    
108     -static efi_status_t setup_efi_vars(struct boot_params *params)
109     -{
110     - struct setup_data *data;
111     - struct efi_var_bootdata *efidata;
112     - u64 store_size, remaining_size, var_size;
113     - efi_status_t status;
114     -
115     - if (sys_table->runtime->hdr.revision < EFI_2_00_SYSTEM_TABLE_REVISION)
116     - return EFI_UNSUPPORTED;
117     -
118     - data = (struct setup_data *)(unsigned long)params->hdr.setup_data;
119     -
120     - while (data && data->next)
121     - data = (struct setup_data *)(unsigned long)data->next;
122     -
123     - status = efi_call_phys4((void *)sys_table->runtime->query_variable_info,
124     - EFI_VARIABLE_NON_VOLATILE |
125     - EFI_VARIABLE_BOOTSERVICE_ACCESS |
126     - EFI_VARIABLE_RUNTIME_ACCESS, &store_size,
127     - &remaining_size, &var_size);
128     -
129     - if (status != EFI_SUCCESS)
130     - return status;
131     -
132     - status = efi_call_phys3(sys_table->boottime->allocate_pool,
133     - EFI_LOADER_DATA, sizeof(*efidata), &efidata);
134     -
135     - if (status != EFI_SUCCESS)
136     - return status;
137     -
138     - efidata->data.type = SETUP_EFI_VARS;
139     - efidata->data.len = sizeof(struct efi_var_bootdata) -
140     - sizeof(struct setup_data);
141     - efidata->data.next = 0;
142     - efidata->store_size = store_size;
143     - efidata->remaining_size = remaining_size;
144     - efidata->max_var_size = var_size;
145     -
146     - if (data)
147     - data->next = (unsigned long)efidata;
148     - else
149     - params->hdr.setup_data = (unsigned long)efidata;
150     -
151     -}
152     -
153     static efi_status_t setup_efi_pci(struct boot_params *params)
154     {
155     efi_pci_io_protocol *pci;
156     @@ -1202,8 +1157,6 @@ struct boot_params *efi_main(void *handle, efi_system_table_t *_table,
157    
158     setup_graphics(boot_params);
159    
160     - setup_efi_vars(boot_params);
161     -
162     setup_efi_pci(boot_params);
163    
164     status = efi_call_phys3(sys_table->boottime->allocate_pool,
165     diff --git a/arch/x86/include/asm/efi.h b/arch/x86/include/asm/efi.h
166     index 2fb5d58..60c89f3 100644
167     --- a/arch/x86/include/asm/efi.h
168     +++ b/arch/x86/include/asm/efi.h
169     @@ -102,13 +102,6 @@ extern void efi_call_phys_epilog(void);
170     extern void efi_unmap_memmap(void);
171     extern void efi_memory_uc(u64 addr, unsigned long size);
172    
173     -struct efi_var_bootdata {
174     - struct setup_data data;
175     - u64 store_size;
176     - u64 remaining_size;
177     - u64 max_var_size;
178     -};
179     -
180     #ifdef CONFIG_EFI
181    
182     static inline bool efi_is_native(void)
183     diff --git a/arch/x86/include/uapi/asm/bootparam.h b/arch/x86/include/uapi/asm/bootparam.h
184     index 0874424..c15ddaf 100644
185     --- a/arch/x86/include/uapi/asm/bootparam.h
186     +++ b/arch/x86/include/uapi/asm/bootparam.h
187     @@ -6,7 +6,6 @@
188     #define SETUP_E820_EXT 1
189     #define SETUP_DTB 2
190     #define SETUP_PCI 3
191     -#define SETUP_EFI_VARS 4
192    
193     /* ram_size flags */
194     #define RAMDISK_IMAGE_START_MASK 0x07FF
195     diff --git a/arch/x86/kernel/relocate_kernel_64.S b/arch/x86/kernel/relocate_kernel_64.S
196     index 7a6f3b3..f2bb9c9 100644
197     --- a/arch/x86/kernel/relocate_kernel_64.S
198     +++ b/arch/x86/kernel/relocate_kernel_64.S
199     @@ -160,7 +160,7 @@ identity_mapped:
200     xorq %rbp, %rbp
201     xorq %r8, %r8
202     xorq %r9, %r9
203     - xorq %r10, %r9
204     + xorq %r10, %r10
205     xorq %r11, %r11
206     xorq %r12, %r12
207     xorq %r13, %r13
208     diff --git a/arch/x86/mm/init.c b/arch/x86/mm/init.c
209     index 59b7fc4..0c13708 100644
210     --- a/arch/x86/mm/init.c
211     +++ b/arch/x86/mm/init.c
212     @@ -277,6 +277,9 @@ static int __meminit split_mem_range(struct map_range *mr, int nr_range,
213     end_pfn = limit_pfn;
214     nr_range = save_mr(mr, nr_range, start_pfn, end_pfn, 0);
215    
216     + if (!after_bootmem)
217     + adjust_range_page_size_mask(mr, nr_range);
218     +
219     /* try to merge same page size and continuous */
220     for (i = 0; nr_range > 1 && i < nr_range - 1; i++) {
221     unsigned long old_start;
222     @@ -291,9 +294,6 @@ static int __meminit split_mem_range(struct map_range *mr, int nr_range,
223     nr_range--;
224     }
225    
226     - if (!after_bootmem)
227     - adjust_range_page_size_mask(mr, nr_range);
228     -
229     for (i = 0; i < nr_range; i++)
230     printk(KERN_DEBUG " [mem %#010lx-%#010lx] page %s\n",
231     mr[i].start, mr[i].end - 1,
232     diff --git a/arch/x86/platform/efi/efi.c b/arch/x86/platform/efi/efi.c
233     index e4a86a6..90f3a52 100644
234     --- a/arch/x86/platform/efi/efi.c
235     +++ b/arch/x86/platform/efi/efi.c
236     @@ -41,7 +41,6 @@
237     #include <linux/io.h>
238     #include <linux/reboot.h>
239     #include <linux/bcd.h>
240     -#include <linux/ucs2_string.h>
241    
242     #include <asm/setup.h>
243     #include <asm/efi.h>
244     @@ -52,12 +51,12 @@
245    
246     #define EFI_DEBUG 1
247    
248     -/*
249     - * There's some additional metadata associated with each
250     - * variable. Intel's reference implementation is 60 bytes - bump that
251     - * to account for potential alignment constraints
252     - */
253     -#define VAR_METADATA_SIZE 64
254     +#define EFI_MIN_RESERVE 5120
255     +
256     +#define EFI_DUMMY_GUID \
257     + EFI_GUID(0x4424ac57, 0xbe4b, 0x47dd, 0x9e, 0x97, 0xed, 0x50, 0xf0, 0x9f, 0x92, 0xa9)
258     +
259     +static efi_char16_t efi_dummy_name[6] = { 'D', 'U', 'M', 'M', 'Y', 0 };
260    
261     struct efi __read_mostly efi = {
262     .mps = EFI_INVALID_TABLE_ADDR,
263     @@ -77,13 +76,6 @@ struct efi_memory_map memmap;
264     static struct efi efi_phys __initdata;
265     static efi_system_table_t efi_systab __initdata;
266    
267     -static u64 efi_var_store_size;
268     -static u64 efi_var_remaining_size;
269     -static u64 efi_var_max_var_size;
270     -static u64 boot_used_size;
271     -static u64 boot_var_size;
272     -static u64 active_size;
273     -
274     unsigned long x86_efi_facility;
275    
276     /*
277     @@ -186,53 +178,8 @@ static efi_status_t virt_efi_get_next_variable(unsigned long *name_size,
278     efi_char16_t *name,
279     efi_guid_t *vendor)
280     {
281     - efi_status_t status;
282     - static bool finished = false;
283     - static u64 var_size;
284     -
285     - status = efi_call_virt3(get_next_variable,
286     - name_size, name, vendor);
287     -
288     - if (status == EFI_NOT_FOUND) {
289     - finished = true;
290     - if (var_size < boot_used_size) {
291     - boot_var_size = boot_used_size - var_size;
292     - active_size += boot_var_size;
293     - } else {
294     - printk(KERN_WARNING FW_BUG "efi: Inconsistent initial sizes\n");
295     - }
296     - }
297     -
298     - if (boot_used_size && !finished) {
299     - unsigned long size;
300     - u32 attr;
301     - efi_status_t s;
302     - void *tmp;
303     -
304     - s = virt_efi_get_variable(name, vendor, &attr, &size, NULL);
305     -
306     - if (s != EFI_BUFFER_TOO_SMALL || !size)
307     - return status;
308     -
309     - tmp = kmalloc(size, GFP_ATOMIC);
310     -
311     - if (!tmp)
312     - return status;
313     -
314     - s = virt_efi_get_variable(name, vendor, &attr, &size, tmp);
315     -
316     - if (s == EFI_SUCCESS && (attr & EFI_VARIABLE_NON_VOLATILE)) {
317     - var_size += size;
318     - var_size += ucs2_strsize(name, 1024);
319     - active_size += size;
320     - active_size += VAR_METADATA_SIZE;
321     - active_size += ucs2_strsize(name, 1024);
322     - }
323     -
324     - kfree(tmp);
325     - }
326     -
327     - return status;
328     + return efi_call_virt3(get_next_variable,
329     + name_size, name, vendor);
330     }
331    
332     static efi_status_t virt_efi_set_variable(efi_char16_t *name,
333     @@ -241,34 +188,9 @@ static efi_status_t virt_efi_set_variable(efi_char16_t *name,
334     unsigned long data_size,
335     void *data)
336     {
337     - efi_status_t status;
338     - u32 orig_attr = 0;
339     - unsigned long orig_size = 0;
340     -
341     - status = virt_efi_get_variable(name, vendor, &orig_attr, &orig_size,
342     - NULL);
343     -
344     - if (status != EFI_BUFFER_TOO_SMALL)
345     - orig_size = 0;
346     -
347     - status = efi_call_virt5(set_variable,
348     - name, vendor, attr,
349     - data_size, data);
350     -
351     - if (status == EFI_SUCCESS) {
352     - if (orig_size) {
353     - active_size -= orig_size;
354     - active_size -= ucs2_strsize(name, 1024);
355     - active_size -= VAR_METADATA_SIZE;
356     - }
357     - if (data_size) {
358     - active_size += data_size;
359     - active_size += ucs2_strsize(name, 1024);
360     - active_size += VAR_METADATA_SIZE;
361     - }
362     - }
363     -
364     - return status;
365     + return efi_call_virt5(set_variable,
366     + name, vendor, attr,
367     + data_size, data);
368     }
369    
370     static efi_status_t virt_efi_query_variable_info(u32 attr,
371     @@ -776,9 +698,6 @@ void __init efi_init(void)
372     char vendor[100] = "unknown";
373     int i = 0;
374     void *tmp;
375     - struct setup_data *data;
376     - struct efi_var_bootdata *efi_var_data;
377     - u64 pa_data;
378    
379     #ifdef CONFIG_X86_32
380     if (boot_params.efi_info.efi_systab_hi ||
381     @@ -796,22 +715,6 @@ void __init efi_init(void)
382     if (efi_systab_init(efi_phys.systab))
383     return;
384    
385     - pa_data = boot_params.hdr.setup_data;
386     - while (pa_data) {
387     - data = early_ioremap(pa_data, sizeof(*efi_var_data));
388     - if (data->type == SETUP_EFI_VARS) {
389     - efi_var_data = (struct efi_var_bootdata *)data;
390     -
391     - efi_var_store_size = efi_var_data->store_size;
392     - efi_var_remaining_size = efi_var_data->remaining_size;
393     - efi_var_max_var_size = efi_var_data->max_var_size;
394     - }
395     - pa_data = data->next;
396     - early_iounmap(data, sizeof(*efi_var_data));
397     - }
398     -
399     - boot_used_size = efi_var_store_size - efi_var_remaining_size;
400     -
401     set_bit(EFI_SYSTEM_TABLES, &x86_efi_facility);
402    
403     /*
404     @@ -1075,6 +978,13 @@ void __init efi_enter_virtual_mode(void)
405     runtime_code_page_mkexec();
406    
407     kfree(new_memmap);
408     +
409     + /* clean DUMMY object */
410     + efi.set_variable(efi_dummy_name, &EFI_DUMMY_GUID,
411     + EFI_VARIABLE_NON_VOLATILE |
412     + EFI_VARIABLE_BOOTSERVICE_ACCESS |
413     + EFI_VARIABLE_RUNTIME_ACCESS,
414     + 0, NULL);
415     }
416    
417     /*
418     @@ -1126,33 +1036,65 @@ efi_status_t efi_query_variable_store(u32 attributes, unsigned long size)
419     efi_status_t status;
420     u64 storage_size, remaining_size, max_size;
421    
422     + if (!(attributes & EFI_VARIABLE_NON_VOLATILE))
423     + return 0;
424     +
425     status = efi.query_variable_info(attributes, &storage_size,
426     &remaining_size, &max_size);
427     if (status != EFI_SUCCESS)
428     return status;
429    
430     - if (!max_size && remaining_size > size)
431     - printk_once(KERN_ERR FW_BUG "Broken EFI implementation"
432     - " is returning MaxVariableSize=0\n");
433     /*
434     * Some firmware implementations refuse to boot if there's insufficient
435     * space in the variable store. We account for that by refusing the
436     * write if permitting it would reduce the available space to under
437     - * 50%. However, some firmware won't reclaim variable space until
438     - * after the used (not merely the actively used) space drops below
439     - * a threshold. We can approximate that case with the value calculated
440     - * above. If both the firmware and our calculations indicate that the
441     - * available space would drop below 50%, refuse the write.
442     + * 5KB. This figure was provided by Samsung, so should be safe.
443     */
444     + if ((remaining_size - size < EFI_MIN_RESERVE) &&
445     + !efi_no_storage_paranoia) {
446     +
447     + /*
448     + * Triggering garbage collection may require that the firmware
449     + * generate a real EFI_OUT_OF_RESOURCES error. We can force
450     + * that by attempting to use more space than is available.
451     + */
452     + unsigned long dummy_size = remaining_size + 1024;
453     + void *dummy = kmalloc(dummy_size, GFP_ATOMIC);
454     +
455     + status = efi.set_variable(efi_dummy_name, &EFI_DUMMY_GUID,
456     + EFI_VARIABLE_NON_VOLATILE |
457     + EFI_VARIABLE_BOOTSERVICE_ACCESS |
458     + EFI_VARIABLE_RUNTIME_ACCESS,
459     + dummy_size, dummy);
460     +
461     + if (status == EFI_SUCCESS) {
462     + /*
463     + * This should have failed, so if it didn't make sure
464     + * that we delete it...
465     + */
466     + efi.set_variable(efi_dummy_name, &EFI_DUMMY_GUID,
467     + EFI_VARIABLE_NON_VOLATILE |
468     + EFI_VARIABLE_BOOTSERVICE_ACCESS |
469     + EFI_VARIABLE_RUNTIME_ACCESS,
470     + 0, dummy);
471     + }
472    
473     - if (!storage_size || size > remaining_size ||
474     - (max_size && size > max_size))
475     - return EFI_OUT_OF_RESOURCES;
476     + /*
477     + * The runtime code may now have triggered a garbage collection
478     + * run, so check the variable info again
479     + */
480     + status = efi.query_variable_info(attributes, &storage_size,
481     + &remaining_size, &max_size);
482    
483     - if (!efi_no_storage_paranoia &&
484     - ((active_size + size + VAR_METADATA_SIZE > storage_size / 2) &&
485     - (remaining_size - size < storage_size / 2)))
486     - return EFI_OUT_OF_RESOURCES;
487     + if (status != EFI_SUCCESS)
488     + return status;
489     +
490     + /*
491     + * There still isn't enough room, so return an error
492     + */
493     + if (remaining_size - size < EFI_MIN_RESERVE)
494     + return EFI_OUT_OF_RESOURCES;
495     + }
496    
497     return EFI_SUCCESS;
498     }
499     diff --git a/drivers/acpi/scan.c b/drivers/acpi/scan.c
500     index 5e7e991..b32fc76 100644
501     --- a/drivers/acpi/scan.c
502     +++ b/drivers/acpi/scan.c
503     @@ -830,11 +830,8 @@ acpi_bus_driver_init(struct acpi_device *device, struct acpi_driver *driver)
504     return -ENOSYS;
505    
506     result = driver->ops.add(device);
507     - if (result) {
508     - device->driver = NULL;
509     - device->driver_data = NULL;
510     + if (result)
511     return result;
512     - }
513    
514     device->driver = driver;
515    
516     diff --git a/drivers/acpi/video.c b/drivers/acpi/video.c
517     index 81a9335..c397f3f 100644
518     --- a/drivers/acpi/video.c
519     +++ b/drivers/acpi/video.c
520     @@ -1646,6 +1646,9 @@ static int acpi_video_bus_add(struct acpi_device *device)
521     int error;
522     acpi_status status;
523    
524     + if (device->handler)
525     + return -EINVAL;
526     +
527     status = acpi_walk_namespace(ACPI_TYPE_DEVICE,
528     device->parent->handle, 1,
529     acpi_video_bus_match, NULL,
530     diff --git a/drivers/block/cciss.c b/drivers/block/cciss.c
531     index 1c1b8e5..dadea48 100644
532     --- a/drivers/block/cciss.c
533     +++ b/drivers/block/cciss.c
534     @@ -162,8 +162,6 @@ static irqreturn_t do_cciss_msix_intr(int irq, void *dev_id);
535     static int cciss_open(struct block_device *bdev, fmode_t mode);
536     static int cciss_unlocked_open(struct block_device *bdev, fmode_t mode);
537     static int cciss_release(struct gendisk *disk, fmode_t mode);
538     -static int do_ioctl(struct block_device *bdev, fmode_t mode,
539     - unsigned int cmd, unsigned long arg);
540     static int cciss_ioctl(struct block_device *bdev, fmode_t mode,
541     unsigned int cmd, unsigned long arg);
542     static int cciss_getgeo(struct block_device *bdev, struct hd_geometry *geo);
543     @@ -229,7 +227,7 @@ static const struct block_device_operations cciss_fops = {
544     .owner = THIS_MODULE,
545     .open = cciss_unlocked_open,
546     .release = cciss_release,
547     - .ioctl = do_ioctl,
548     + .ioctl = cciss_ioctl,
549     .getgeo = cciss_getgeo,
550     #ifdef CONFIG_COMPAT
551     .compat_ioctl = cciss_compat_ioctl,
552     @@ -1138,16 +1136,6 @@ static int cciss_release(struct gendisk *disk, fmode_t mode)
553     return 0;
554     }
555    
556     -static int do_ioctl(struct block_device *bdev, fmode_t mode,
557     - unsigned cmd, unsigned long arg)
558     -{
559     - int ret;
560     - mutex_lock(&cciss_mutex);
561     - ret = cciss_ioctl(bdev, mode, cmd, arg);
562     - mutex_unlock(&cciss_mutex);
563     - return ret;
564     -}
565     -
566     #ifdef CONFIG_COMPAT
567    
568     static int cciss_ioctl32_passthru(struct block_device *bdev, fmode_t mode,
569     @@ -1174,7 +1162,7 @@ static int cciss_compat_ioctl(struct block_device *bdev, fmode_t mode,
570     case CCISS_REGNEWD:
571     case CCISS_RESCANDISK:
572     case CCISS_GETLUNINFO:
573     - return do_ioctl(bdev, mode, cmd, arg);
574     + return cciss_ioctl(bdev, mode, cmd, arg);
575    
576     case CCISS_PASSTHRU32:
577     return cciss_ioctl32_passthru(bdev, mode, cmd, arg);
578     @@ -1214,7 +1202,7 @@ static int cciss_ioctl32_passthru(struct block_device *bdev, fmode_t mode,
579     if (err)
580     return -EFAULT;
581    
582     - err = do_ioctl(bdev, mode, CCISS_PASSTHRU, (unsigned long)p);
583     + err = cciss_ioctl(bdev, mode, CCISS_PASSTHRU, (unsigned long)p);
584     if (err)
585     return err;
586     err |=
587     @@ -1256,7 +1244,7 @@ static int cciss_ioctl32_big_passthru(struct block_device *bdev, fmode_t mode,
588     if (err)
589     return -EFAULT;
590    
591     - err = do_ioctl(bdev, mode, CCISS_BIG_PASSTHRU, (unsigned long)p);
592     + err = cciss_ioctl(bdev, mode, CCISS_BIG_PASSTHRU, (unsigned long)p);
593     if (err)
594     return err;
595     err |=
596     @@ -1306,11 +1294,14 @@ static int cciss_getpciinfo(ctlr_info_t *h, void __user *argp)
597     static int cciss_getintinfo(ctlr_info_t *h, void __user *argp)
598     {
599     cciss_coalint_struct intinfo;
600     + unsigned long flags;
601    
602     if (!argp)
603     return -EINVAL;
604     + spin_lock_irqsave(&h->lock, flags);
605     intinfo.delay = readl(&h->cfgtable->HostWrite.CoalIntDelay);
606     intinfo.count = readl(&h->cfgtable->HostWrite.CoalIntCount);
607     + spin_unlock_irqrestore(&h->lock, flags);
608     if (copy_to_user
609     (argp, &intinfo, sizeof(cciss_coalint_struct)))
610     return -EFAULT;
611     @@ -1351,12 +1342,15 @@ static int cciss_setintinfo(ctlr_info_t *h, void __user *argp)
612     static int cciss_getnodename(ctlr_info_t *h, void __user *argp)
613     {
614     NodeName_type NodeName;
615     + unsigned long flags;
616     int i;
617    
618     if (!argp)
619     return -EINVAL;
620     + spin_lock_irqsave(&h->lock, flags);
621     for (i = 0; i < 16; i++)
622     NodeName[i] = readb(&h->cfgtable->ServerName[i]);
623     + spin_unlock_irqrestore(&h->lock, flags);
624     if (copy_to_user(argp, NodeName, sizeof(NodeName_type)))
625     return -EFAULT;
626     return 0;
627     @@ -1393,10 +1387,13 @@ static int cciss_setnodename(ctlr_info_t *h, void __user *argp)
628     static int cciss_getheartbeat(ctlr_info_t *h, void __user *argp)
629     {
630     Heartbeat_type heartbeat;
631     + unsigned long flags;
632    
633     if (!argp)
634     return -EINVAL;
635     + spin_lock_irqsave(&h->lock, flags);
636     heartbeat = readl(&h->cfgtable->HeartBeat);
637     + spin_unlock_irqrestore(&h->lock, flags);
638     if (copy_to_user(argp, &heartbeat, sizeof(Heartbeat_type)))
639     return -EFAULT;
640     return 0;
641     @@ -1405,10 +1402,13 @@ static int cciss_getheartbeat(ctlr_info_t *h, void __user *argp)
642     static int cciss_getbustypes(ctlr_info_t *h, void __user *argp)
643     {
644     BusTypes_type BusTypes;
645     + unsigned long flags;
646    
647     if (!argp)
648     return -EINVAL;
649     + spin_lock_irqsave(&h->lock, flags);
650     BusTypes = readl(&h->cfgtable->BusTypes);
651     + spin_unlock_irqrestore(&h->lock, flags);
652     if (copy_to_user(argp, &BusTypes, sizeof(BusTypes_type)))
653     return -EFAULT;
654     return 0;
655     diff --git a/drivers/block/rbd.c b/drivers/block/rbd.c
656     index b7b7a88..fe333e4 100644
657     --- a/drivers/block/rbd.c
658     +++ b/drivers/block/rbd.c
659     @@ -435,8 +435,8 @@ static const struct block_device_operations rbd_bd_ops = {
660     };
661    
662     /*
663     - * Initialize an rbd client instance.
664     - * We own *ceph_opts.
665     + * Initialize an rbd client instance. Success or not, this function
666     + * consumes ceph_opts.
667     */
668     static struct rbd_client *rbd_client_create(struct ceph_options *ceph_opts)
669     {
670     @@ -583,7 +583,8 @@ static int parse_rbd_opts_token(char *c, void *private)
671    
672     /*
673     * Get a ceph client with specific addr and configuration, if one does
674     - * not exist create it.
675     + * not exist create it. Either way, ceph_opts is consumed by this
676     + * function.
677     */
678     static struct rbd_client *rbd_get_client(struct ceph_options *ceph_opts)
679     {
680     @@ -4104,7 +4105,6 @@ static ssize_t rbd_add(struct bus_type *bus,
681     rc = PTR_ERR(rbdc);
682     goto err_out_args;
683     }
684     - ceph_opts = NULL; /* rbd_dev client now owns this */
685    
686     /* pick the pool */
687     osdc = &rbdc->client->osdc;
688     @@ -4140,8 +4140,6 @@ err_out_rbd_dev:
689     err_out_client:
690     rbd_put_client(rbdc);
691     err_out_args:
692     - if (ceph_opts)
693     - ceph_destroy_options(ceph_opts);
694     kfree(rbd_opts);
695     rbd_spec_put(spec);
696     err_out_module:
697     diff --git a/drivers/gpu/drm/gma500/cdv_intel_display.c b/drivers/gpu/drm/gma500/cdv_intel_display.c
698     index 3cfd093..d6742dc 100644
699     --- a/drivers/gpu/drm/gma500/cdv_intel_display.c
700     +++ b/drivers/gpu/drm/gma500/cdv_intel_display.c
701     @@ -1750,6 +1750,19 @@ static void cdv_intel_crtc_destroy(struct drm_crtc *crtc)
702     kfree(psb_intel_crtc);
703     }
704    
705     +static void cdv_intel_crtc_disable(struct drm_crtc *crtc)
706     +{
707     + struct gtt_range *gt;
708     + struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private;
709     +
710     + crtc_funcs->dpms(crtc, DRM_MODE_DPMS_OFF);
711     +
712     + if (crtc->fb) {
713     + gt = to_psb_fb(crtc->fb)->gtt;
714     + psb_gtt_unpin(gt);
715     + }
716     +}
717     +
718     const struct drm_crtc_helper_funcs cdv_intel_helper_funcs = {
719     .dpms = cdv_intel_crtc_dpms,
720     .mode_fixup = cdv_intel_crtc_mode_fixup,
721     @@ -1757,6 +1770,7 @@ const struct drm_crtc_helper_funcs cdv_intel_helper_funcs = {
722     .mode_set_base = cdv_intel_pipe_set_base,
723     .prepare = cdv_intel_crtc_prepare,
724     .commit = cdv_intel_crtc_commit,
725     + .disable = cdv_intel_crtc_disable,
726     };
727    
728     const struct drm_crtc_funcs cdv_intel_crtc_funcs = {
729     diff --git a/drivers/gpu/drm/gma500/psb_intel_display.c b/drivers/gpu/drm/gma500/psb_intel_display.c
730     index 9edb190..1c01b9a 100644
731     --- a/drivers/gpu/drm/gma500/psb_intel_display.c
732     +++ b/drivers/gpu/drm/gma500/psb_intel_display.c
733     @@ -1246,6 +1246,19 @@ void psb_intel_crtc_destroy(struct drm_crtc *crtc)
734     kfree(psb_intel_crtc);
735     }
736    
737     +static void psb_intel_crtc_disable(struct drm_crtc *crtc)
738     +{
739     + struct gtt_range *gt;
740     + struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private;
741     +
742     + crtc_funcs->dpms(crtc, DRM_MODE_DPMS_OFF);
743     +
744     + if (crtc->fb) {
745     + gt = to_psb_fb(crtc->fb)->gtt;
746     + psb_gtt_unpin(gt);
747     + }
748     +}
749     +
750     const struct drm_crtc_helper_funcs psb_intel_helper_funcs = {
751     .dpms = psb_intel_crtc_dpms,
752     .mode_fixup = psb_intel_crtc_mode_fixup,
753     @@ -1253,6 +1266,7 @@ const struct drm_crtc_helper_funcs psb_intel_helper_funcs = {
754     .mode_set_base = psb_intel_pipe_set_base,
755     .prepare = psb_intel_crtc_prepare,
756     .commit = psb_intel_crtc_commit,
757     + .disable = psb_intel_crtc_disable,
758     };
759    
760     const struct drm_crtc_funcs psb_intel_crtc_funcs = {
761     diff --git a/drivers/gpu/drm/i915/intel_sdvo.c b/drivers/gpu/drm/i915/intel_sdvo.c
762     index cdd78ca..859d468 100644
763     --- a/drivers/gpu/drm/i915/intel_sdvo.c
764     +++ b/drivers/gpu/drm/i915/intel_sdvo.c
765     @@ -1771,10 +1771,13 @@ static void intel_sdvo_get_lvds_modes(struct drm_connector *connector)
766     * arranged in priority order.
767     */
768     intel_ddc_get_modes(connector, &intel_sdvo->ddc);
769     - if (list_empty(&connector->probed_modes) == false)
770     - goto end;
771    
772     - /* Fetch modes from VBT */
773     + /*
774     + * Fetch modes from VBT. For SDVO prefer the VBT mode since some
775     + * SDVO->LVDS transcoders can't cope with the EDID mode. Since
776     + * drm_mode_probed_add adds the mode at the head of the list we add it
777     + * last.
778     + */
779     if (dev_priv->sdvo_lvds_vbt_mode != NULL) {
780     newmode = drm_mode_duplicate(connector->dev,
781     dev_priv->sdvo_lvds_vbt_mode);
782     @@ -1786,7 +1789,6 @@ static void intel_sdvo_get_lvds_modes(struct drm_connector *connector)
783     }
784     }
785    
786     -end:
787     list_for_each_entry(newmode, &connector->probed_modes, head) {
788     if (newmode->type & DRM_MODE_TYPE_PREFERRED) {
789     intel_sdvo->sdvo_lvds_fixed_mode =
790     diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/dacnv50.c b/drivers/gpu/drm/nouveau/core/engine/disp/dacnv50.c
791     index d0817d9..a60a5ac 100644
792     --- a/drivers/gpu/drm/nouveau/core/engine/disp/dacnv50.c
793     +++ b/drivers/gpu/drm/nouveau/core/engine/disp/dacnv50.c
794     @@ -50,11 +50,15 @@ nv50_dac_sense(struct nv50_disp_priv *priv, int or, u32 loadval)
795     {
796     const u32 doff = (or * 0x800);
797     int load = -EINVAL;
798     + nv_mask(priv, 0x61a004 + doff, 0x807f0000, 0x80150000);
799     + nv_wait(priv, 0x61a004 + doff, 0x80000000, 0x00000000);
800     nv_wr32(priv, 0x61a00c + doff, 0x00100000 | loadval);
801     udelay(9500);
802     nv_wr32(priv, 0x61a00c + doff, 0x80000000);
803     load = (nv_rd32(priv, 0x61a00c + doff) & 0x38000000) >> 27;
804     nv_wr32(priv, 0x61a00c + doff, 0x00000000);
805     + nv_mask(priv, 0x61a004 + doff, 0x807f0000, 0x80550000);
806     + nv_wait(priv, 0x61a004 + doff, 0x80000000, 0x00000000);
807     return load;
808     }
809    
810     diff --git a/drivers/gpu/drm/nouveau/core/include/core/class.h b/drivers/gpu/drm/nouveau/core/include/core/class.h
811     index 92d3ab1..dd74ced 100644
812     --- a/drivers/gpu/drm/nouveau/core/include/core/class.h
813     +++ b/drivers/gpu/drm/nouveau/core/include/core/class.h
814     @@ -216,7 +216,7 @@ struct nv04_display_class {
815     #define NV50_DISP_DAC_PWR_STATE 0x00000040
816     #define NV50_DISP_DAC_PWR_STATE_ON 0x00000000
817     #define NV50_DISP_DAC_PWR_STATE_OFF 0x00000040
818     -#define NV50_DISP_DAC_LOAD 0x0002000c
819     +#define NV50_DISP_DAC_LOAD 0x00020100
820     #define NV50_DISP_DAC_LOAD_VALUE 0x00000007
821    
822     #define NV50_DISP_PIOR_MTHD 0x00030000
823     diff --git a/drivers/gpu/drm/nouveau/nv50_display.c b/drivers/gpu/drm/nouveau/nv50_display.c
824     index 1ddc03e..dfbb42b 100644
825     --- a/drivers/gpu/drm/nouveau/nv50_display.c
826     +++ b/drivers/gpu/drm/nouveau/nv50_display.c
827     @@ -1554,7 +1554,9 @@ nv50_dac_detect(struct drm_encoder *encoder, struct drm_connector *connector)
828     {
829     struct nv50_disp *disp = nv50_disp(encoder->dev);
830     int ret, or = nouveau_encoder(encoder)->or;
831     - u32 load = 0;
832     + u32 load = nouveau_drm(encoder->dev)->vbios.dactestval;
833     + if (load == 0)
834     + load = 340;
835    
836     ret = nv_exec(disp->core, NV50_DISP_DAC_LOAD + or, &load, sizeof(load));
837     if (ret || load != 7)
838     diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
839     index 6af167f..7116798 100644
840     --- a/drivers/md/raid1.c
841     +++ b/drivers/md/raid1.c
842     @@ -427,7 +427,17 @@ static void raid1_end_write_request(struct bio *bio, int error)
843    
844     r1_bio->bios[mirror] = NULL;
845     to_put = bio;
846     - set_bit(R1BIO_Uptodate, &r1_bio->state);
847     + /*
848     + * Do not set R1BIO_Uptodate if the current device is
849     + * rebuilding or Faulty. This is because we cannot use
850     + * such device for properly reading the data back (we could
851     + * potentially use it, if the current write would have felt
852     + * before rdev->recovery_offset, but for simplicity we don't
853     + * check this here.
854     + */
855     + if (test_bit(In_sync, &conf->mirrors[mirror].rdev->flags) &&
856     + !test_bit(Faulty, &conf->mirrors[mirror].rdev->flags))
857     + set_bit(R1BIO_Uptodate, &r1_bio->state);
858    
859     /* Maybe we can clear some bad blocks. */
860     if (is_badblock(conf->mirrors[mirror].rdev,
861     @@ -880,17 +890,17 @@ static void allow_barrier(struct r1conf *conf)
862     wake_up(&conf->wait_barrier);
863     }
864    
865     -static void freeze_array(struct r1conf *conf)
866     +static void freeze_array(struct r1conf *conf, int extra)
867     {
868     /* stop syncio and normal IO and wait for everything to
869     * go quite.
870     * We increment barrier and nr_waiting, and then
871     - * wait until nr_pending match nr_queued+1
872     + * wait until nr_pending match nr_queued+extra
873     * This is called in the context of one normal IO request
874     * that has failed. Thus any sync request that might be pending
875     * will be blocked by nr_pending, and we need to wait for
876     * pending IO requests to complete or be queued for re-try.
877     - * Thus the number queued (nr_queued) plus this request (1)
878     + * Thus the number queued (nr_queued) plus this request (extra)
879     * must match the number of pending IOs (nr_pending) before
880     * we continue.
881     */
882     @@ -898,7 +908,7 @@ static void freeze_array(struct r1conf *conf)
883     conf->barrier++;
884     conf->nr_waiting++;
885     wait_event_lock_irq_cmd(conf->wait_barrier,
886     - conf->nr_pending == conf->nr_queued+1,
887     + conf->nr_pending == conf->nr_queued+extra,
888     conf->resync_lock,
889     flush_pending_writes(conf));
890     spin_unlock_irq(&conf->resync_lock);
891     @@ -1558,8 +1568,8 @@ static int raid1_add_disk(struct mddev *mddev, struct md_rdev *rdev)
892     * we wait for all outstanding requests to complete.
893     */
894     synchronize_sched();
895     - raise_barrier(conf);
896     - lower_barrier(conf);
897     + freeze_array(conf, 0);
898     + unfreeze_array(conf);
899     clear_bit(Unmerged, &rdev->flags);
900     }
901     md_integrity_add_rdev(rdev, mddev);
902     @@ -1609,11 +1619,11 @@ static int raid1_remove_disk(struct mddev *mddev, struct md_rdev *rdev)
903     */
904     struct md_rdev *repl =
905     conf->mirrors[conf->raid_disks + number].rdev;
906     - raise_barrier(conf);
907     + freeze_array(conf, 0);
908     clear_bit(Replacement, &repl->flags);
909     p->rdev = repl;
910     conf->mirrors[conf->raid_disks + number].rdev = NULL;
911     - lower_barrier(conf);
912     + unfreeze_array(conf);
913     clear_bit(WantReplacement, &rdev->flags);
914     } else
915     clear_bit(WantReplacement, &rdev->flags);
916     @@ -2230,7 +2240,7 @@ static void handle_read_error(struct r1conf *conf, struct r1bio *r1_bio)
917     * frozen
918     */
919     if (mddev->ro == 0) {
920     - freeze_array(conf);
921     + freeze_array(conf, 1);
922     fix_read_error(conf, r1_bio->read_disk,
923     r1_bio->sector, r1_bio->sectors);
924     unfreeze_array(conf);
925     @@ -2827,8 +2837,8 @@ static int run(struct mddev *mddev)
926     return PTR_ERR(conf);
927    
928     if (mddev->queue)
929     - blk_queue_max_write_same_sectors(mddev->queue,
930     - mddev->chunk_sectors);
931     + blk_queue_max_write_same_sectors(mddev->queue, 0);
932     +
933     rdev_for_each(rdev, mddev) {
934     if (!mddev->gendisk)
935     continue;
936     @@ -3009,7 +3019,7 @@ static int raid1_reshape(struct mddev *mddev)
937     return -ENOMEM;
938     }
939    
940     - raise_barrier(conf);
941     + freeze_array(conf, 0);
942    
943     /* ok, everything is stopped */
944     oldpool = conf->r1bio_pool;
945     @@ -3040,7 +3050,7 @@ static int raid1_reshape(struct mddev *mddev)
946     conf->raid_disks = mddev->raid_disks = raid_disks;
947     mddev->delta_disks = 0;
948    
949     - lower_barrier(conf);
950     + unfreeze_array(conf);
951    
952     set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
953     md_wakeup_thread(mddev->thread);
954     diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c
955     index 46c14e5..e4ea992 100644
956     --- a/drivers/md/raid10.c
957     +++ b/drivers/md/raid10.c
958     @@ -490,7 +490,17 @@ static void raid10_end_write_request(struct bio *bio, int error)
959     sector_t first_bad;
960     int bad_sectors;
961    
962     - set_bit(R10BIO_Uptodate, &r10_bio->state);
963     + /*
964     + * Do not set R10BIO_Uptodate if the current device is
965     + * rebuilding or Faulty. This is because we cannot use
966     + * such device for properly reading the data back (we could
967     + * potentially use it, if the current write would have felt
968     + * before rdev->recovery_offset, but for simplicity we don't
969     + * check this here.
970     + */
971     + if (test_bit(In_sync, &rdev->flags) &&
972     + !test_bit(Faulty, &rdev->flags))
973     + set_bit(R10BIO_Uptodate, &r10_bio->state);
974    
975     /* Maybe we can clear some bad blocks. */
976     if (is_badblock(rdev,
977     @@ -1055,17 +1065,17 @@ static void allow_barrier(struct r10conf *conf)
978     wake_up(&conf->wait_barrier);
979     }
980    
981     -static void freeze_array(struct r10conf *conf)
982     +static void freeze_array(struct r10conf *conf, int extra)
983     {
984     /* stop syncio and normal IO and wait for everything to
985     * go quiet.
986     * We increment barrier and nr_waiting, and then
987     - * wait until nr_pending match nr_queued+1
988     + * wait until nr_pending match nr_queued+extra
989     * This is called in the context of one normal IO request
990     * that has failed. Thus any sync request that might be pending
991     * will be blocked by nr_pending, and we need to wait for
992     * pending IO requests to complete or be queued for re-try.
993     - * Thus the number queued (nr_queued) plus this request (1)
994     + * Thus the number queued (nr_queued) plus this request (extra)
995     * must match the number of pending IOs (nr_pending) before
996     * we continue.
997     */
998     @@ -1073,7 +1083,7 @@ static void freeze_array(struct r10conf *conf)
999     conf->barrier++;
1000     conf->nr_waiting++;
1001     wait_event_lock_irq_cmd(conf->wait_barrier,
1002     - conf->nr_pending == conf->nr_queued+1,
1003     + conf->nr_pending == conf->nr_queued+extra,
1004     conf->resync_lock,
1005     flush_pending_writes(conf));
1006    
1007     @@ -1839,8 +1849,8 @@ static int raid10_add_disk(struct mddev *mddev, struct md_rdev *rdev)
1008     * we wait for all outstanding requests to complete.
1009     */
1010     synchronize_sched();
1011     - raise_barrier(conf, 0);
1012     - lower_barrier(conf);
1013     + freeze_array(conf, 0);
1014     + unfreeze_array(conf);
1015     clear_bit(Unmerged, &rdev->flags);
1016     }
1017     md_integrity_add_rdev(rdev, mddev);
1018     @@ -2636,7 +2646,7 @@ static void handle_read_error(struct mddev *mddev, struct r10bio *r10_bio)
1019     r10_bio->devs[slot].bio = NULL;
1020    
1021     if (mddev->ro == 0) {
1022     - freeze_array(conf);
1023     + freeze_array(conf, 1);
1024     fix_read_error(conf, mddev, r10_bio);
1025     unfreeze_array(conf);
1026     } else
1027     @@ -3625,8 +3635,7 @@ static int run(struct mddev *mddev)
1028     if (mddev->queue) {
1029     blk_queue_max_discard_sectors(mddev->queue,
1030     mddev->chunk_sectors);
1031     - blk_queue_max_write_same_sectors(mddev->queue,
1032     - mddev->chunk_sectors);
1033     + blk_queue_max_write_same_sectors(mddev->queue, 0);
1034     blk_queue_io_min(mddev->queue, chunk_size);
1035     if (conf->geo.raid_disks % conf->geo.near_copies)
1036     blk_queue_io_opt(mddev->queue, chunk_size * conf->geo.raid_disks);
1037     diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
1038     index f4e87bf..251ab64 100644
1039     --- a/drivers/md/raid5.c
1040     +++ b/drivers/md/raid5.c
1041     @@ -5457,7 +5457,7 @@ static int run(struct mddev *mddev)
1042     if (mddev->major_version == 0 &&
1043     mddev->minor_version > 90)
1044     rdev->recovery_offset = reshape_offset;
1045     -
1046     +
1047     if (rdev->recovery_offset < reshape_offset) {
1048     /* We need to check old and new layout */
1049     if (!only_parity(rdev->raid_disk,
1050     @@ -5580,6 +5580,8 @@ static int run(struct mddev *mddev)
1051     */
1052     mddev->queue->limits.discard_zeroes_data = 0;
1053    
1054     + blk_queue_max_write_same_sectors(mddev->queue, 0);
1055     +
1056     rdev_for_each(rdev, mddev) {
1057     disk_stack_limits(mddev->gendisk, rdev->bdev,
1058     rdev->data_offset << 9);
1059     diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c
1060     index 6f42e57..e313c71 100644
1061     --- a/drivers/net/ethernet/broadcom/tg3.c
1062     +++ b/drivers/net/ethernet/broadcom/tg3.c
1063     @@ -1799,6 +1799,9 @@ static int tg3_poll_fw(struct tg3 *tp)
1064     int i;
1065     u32 val;
1066    
1067     + if (tg3_flag(tp, NO_FWARE_REPORTED))
1068     + return 0;
1069     +
1070     if (tg3_flag(tp, IS_SSB_CORE)) {
1071     /* We don't use firmware. */
1072     return 0;
1073     @@ -10016,6 +10019,13 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
1074     */
1075     static int tg3_init_hw(struct tg3 *tp, int reset_phy)
1076     {
1077     + /* Chip may have been just powered on. If so, the boot code may still
1078     + * be running initialization. Wait for it to finish to avoid races in
1079     + * accessing the hardware.
1080     + */
1081     + tg3_enable_register_access(tp);
1082     + tg3_poll_fw(tp);
1083     +
1084     tg3_switch_clocks(tp);
1085    
1086     tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
1087     diff --git a/drivers/net/wireless/ath/ath9k/Kconfig b/drivers/net/wireless/ath/ath9k/Kconfig
1088     index 17507dc..cc1fd78 100644
1089     --- a/drivers/net/wireless/ath/ath9k/Kconfig
1090     +++ b/drivers/net/wireless/ath/ath9k/Kconfig
1091     @@ -92,13 +92,17 @@ config ATH9K_MAC_DEBUG
1092     This option enables collection of statistics for Rx/Tx status
1093     data and some other MAC related statistics
1094    
1095     -config ATH9K_RATE_CONTROL
1096     +config ATH9K_LEGACY_RATE_CONTROL
1097     bool "Atheros ath9k rate control"
1098     depends on ATH9K
1099     - default y
1100     + default n
1101     ---help---
1102     Say Y, if you want to use the ath9k specific rate control
1103     - module instead of minstrel_ht.
1104     + module instead of minstrel_ht. Be warned that there are various
1105     + issues with the ath9k RC and minstrel is a more robust algorithm.
1106     + Note that even if this option is selected, "ath9k_rate_control"
1107     + has to be passed to mac80211 using the module parameter,
1108     + ieee80211_default_rc_algo.
1109    
1110     config ATH9K_HTC
1111     tristate "Atheros HTC based wireless cards support"
1112     diff --git a/drivers/net/wireless/ath/ath9k/Makefile b/drivers/net/wireless/ath/ath9k/Makefile
1113     index 2ad8f94..75ee9e7 100644
1114     --- a/drivers/net/wireless/ath/ath9k/Makefile
1115     +++ b/drivers/net/wireless/ath/ath9k/Makefile
1116     @@ -8,7 +8,7 @@ ath9k-y += beacon.o \
1117     antenna.o
1118    
1119     ath9k-$(CONFIG_ATH9K_BTCOEX_SUPPORT) += mci.o
1120     -ath9k-$(CONFIG_ATH9K_RATE_CONTROL) += rc.o
1121     +ath9k-$(CONFIG_ATH9K_LEGACY_RATE_CONTROL) += rc.o
1122     ath9k-$(CONFIG_ATH9K_PCI) += pci.o
1123     ath9k-$(CONFIG_ATH9K_AHB) += ahb.o
1124     ath9k-$(CONFIG_ATH9K_DEBUGFS) += debug.o
1125     diff --git a/drivers/net/wireless/ath/ath9k/ar9003_2p2_initvals.h b/drivers/net/wireless/ath/ath9k/ar9003_2p2_initvals.h
1126     index db5ffad..7546b9a 100644
1127     --- a/drivers/net/wireless/ath/ath9k/ar9003_2p2_initvals.h
1128     +++ b/drivers/net/wireless/ath/ath9k/ar9003_2p2_initvals.h
1129     @@ -958,11 +958,11 @@ static const u32 ar9300Common_rx_gain_table_2p2[][2] = {
1130     {0x0000a074, 0x00000000},
1131     {0x0000a078, 0x00000000},
1132     {0x0000a07c, 0x00000000},
1133     - {0x0000a080, 0x1a1a1a1a},
1134     - {0x0000a084, 0x1a1a1a1a},
1135     - {0x0000a088, 0x1a1a1a1a},
1136     - {0x0000a08c, 0x1a1a1a1a},
1137     - {0x0000a090, 0x171a1a1a},
1138     + {0x0000a080, 0x22222229},
1139     + {0x0000a084, 0x1d1d1d1d},
1140     + {0x0000a088, 0x1d1d1d1d},
1141     + {0x0000a08c, 0x1d1d1d1d},
1142     + {0x0000a090, 0x171d1d1d},
1143     {0x0000a094, 0x11111717},
1144     {0x0000a098, 0x00030311},
1145     {0x0000a09c, 0x00000000},
1146     diff --git a/drivers/net/wireless/ath/ath9k/init.c b/drivers/net/wireless/ath/ath9k/init.c
1147     index 26db547..4157126 100644
1148     --- a/drivers/net/wireless/ath/ath9k/init.c
1149     +++ b/drivers/net/wireless/ath/ath9k/init.c
1150     @@ -766,8 +766,7 @@ void ath9k_set_hw_capab(struct ath_softc *sc, struct ieee80211_hw *hw)
1151     hw->wiphy->iface_combinations = &if_comb;
1152     hw->wiphy->n_iface_combinations = 1;
1153    
1154     - if (AR_SREV_5416(sc->sc_ah))
1155     - hw->wiphy->flags &= ~WIPHY_FLAG_PS_ON_BY_DEFAULT;
1156     + hw->wiphy->flags &= ~WIPHY_FLAG_PS_ON_BY_DEFAULT;
1157    
1158     hw->wiphy->flags |= WIPHY_FLAG_IBSS_RSN;
1159     hw->wiphy->flags |= WIPHY_FLAG_SUPPORTS_TDLS;
1160     @@ -809,10 +808,6 @@ void ath9k_set_hw_capab(struct ath_softc *sc, struct ieee80211_hw *hw)
1161     sc->ant_rx = hw->wiphy->available_antennas_rx;
1162     sc->ant_tx = hw->wiphy->available_antennas_tx;
1163    
1164     -#ifdef CONFIG_ATH9K_RATE_CONTROL
1165     - hw->rate_control_algorithm = "ath9k_rate_control";
1166     -#endif
1167     -
1168     if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_2GHZ)
1169     hw->wiphy->bands[IEEE80211_BAND_2GHZ] =
1170     &sc->sbands[IEEE80211_BAND_2GHZ];
1171     diff --git a/drivers/net/wireless/ath/ath9k/rc.h b/drivers/net/wireless/ath/ath9k/rc.h
1172     index 267dbfc..b9a8738 100644
1173     --- a/drivers/net/wireless/ath/ath9k/rc.h
1174     +++ b/drivers/net/wireless/ath/ath9k/rc.h
1175     @@ -231,7 +231,7 @@ static inline void ath_debug_stat_retries(struct ath_rate_priv *rc, int rix,
1176     }
1177     #endif
1178    
1179     -#ifdef CONFIG_ATH9K_RATE_CONTROL
1180     +#ifdef CONFIG_ATH9K_LEGACY_RATE_CONTROL
1181     int ath_rate_control_register(void);
1182     void ath_rate_control_unregister(void);
1183     #else
1184     diff --git a/drivers/net/wireless/b43/main.c b/drivers/net/wireless/b43/main.c
1185     index 64b637a..911c4c0 100644
1186     --- a/drivers/net/wireless/b43/main.c
1187     +++ b/drivers/net/wireless/b43/main.c
1188     @@ -2451,7 +2451,7 @@ static void b43_request_firmware(struct work_struct *work)
1189     for (i = 0; i < B43_NR_FWTYPES; i++) {
1190     errmsg = ctx->errors[i];
1191     if (strlen(errmsg))
1192     - b43err(dev->wl, errmsg);
1193     + b43err(dev->wl, "%s", errmsg);
1194     }
1195     b43_print_fw_helptext(dev->wl, 1);
1196     goto out;
1197     diff --git a/drivers/net/wireless/ti/wl12xx/wl12xx.h b/drivers/net/wireless/ti/wl12xx/wl12xx.h
1198     index d455285..66b3eee 100644
1199     --- a/drivers/net/wireless/ti/wl12xx/wl12xx.h
1200     +++ b/drivers/net/wireless/ti/wl12xx/wl12xx.h
1201     @@ -41,7 +41,7 @@
1202     #define WL127X_IFTYPE_MR_VER 5
1203     #define WL127X_MAJOR_MR_VER 7
1204     #define WL127X_SUBTYPE_MR_VER WLCORE_FW_VER_IGNORE
1205     -#define WL127X_MINOR_MR_VER 115
1206     +#define WL127X_MINOR_MR_VER 42
1207    
1208     /* FW chip version for wl128x */
1209     #define WL128X_CHIP_VER 7
1210     diff --git a/drivers/rtc/rtc-twl.c b/drivers/rtc/rtc-twl.c
1211     index 8bc6c80..c48ee4d 100644
1212     --- a/drivers/rtc/rtc-twl.c
1213     +++ b/drivers/rtc/rtc-twl.c
1214     @@ -524,6 +524,7 @@ static int twl_rtc_probe(struct platform_device *pdev)
1215     }
1216    
1217     platform_set_drvdata(pdev, rtc);
1218     + device_init_wakeup(&pdev->dev, 1);
1219     return 0;
1220    
1221     out2:
1222     diff --git a/drivers/usb/chipidea/core.c b/drivers/usb/chipidea/core.c
1223     index 57cae1f..246c191 100644
1224     --- a/drivers/usb/chipidea/core.c
1225     +++ b/drivers/usb/chipidea/core.c
1226     @@ -279,8 +279,9 @@ static void ci_role_work(struct work_struct *work)
1227    
1228     ci_role_stop(ci);
1229     ci_role_start(ci, role);
1230     - enable_irq(ci->irq);
1231     }
1232     +
1233     + enable_irq(ci->irq);
1234     }
1235    
1236     static ssize_t show_role(struct device *dev, struct device_attribute *attr,
1237     diff --git a/drivers/usb/serial/f81232.c b/drivers/usb/serial/f81232.c
1238     index a172ad5..0c65a81 100644
1239     --- a/drivers/usb/serial/f81232.c
1240     +++ b/drivers/usb/serial/f81232.c
1241     @@ -165,11 +165,12 @@ static void f81232_set_termios(struct tty_struct *tty,
1242     /* FIXME - Stubbed out for now */
1243    
1244     /* Don't change anything if nothing has changed */
1245     - if (!tty_termios_hw_change(&tty->termios, old_termios))
1246     + if (old_termios && !tty_termios_hw_change(&tty->termios, old_termios))
1247     return;
1248    
1249     /* Do the real work here... */
1250     - tty_termios_copy_hw(&tty->termios, old_termios);
1251     + if (old_termios)
1252     + tty_termios_copy_hw(&tty->termios, old_termios);
1253     }
1254    
1255     static int f81232_tiocmget(struct tty_struct *tty)
1256     @@ -187,12 +188,11 @@ static int f81232_tiocmset(struct tty_struct *tty,
1257    
1258     static int f81232_open(struct tty_struct *tty, struct usb_serial_port *port)
1259     {
1260     - struct ktermios tmp_termios;
1261     int result;
1262    
1263     /* Setup termios */
1264     if (tty)
1265     - f81232_set_termios(tty, port, &tmp_termios);
1266     + f81232_set_termios(tty, port, NULL);
1267    
1268     result = usb_submit_urb(port->interrupt_in_urb, GFP_KERNEL);
1269     if (result) {
1270     diff --git a/drivers/usb/serial/pl2303.c b/drivers/usb/serial/pl2303.c
1271     index 3b10018..3b5ba4f 100644
1272     --- a/drivers/usb/serial/pl2303.c
1273     +++ b/drivers/usb/serial/pl2303.c
1274     @@ -283,7 +283,7 @@ static void pl2303_set_termios(struct tty_struct *tty,
1275     serial settings even to the same values as before. Thus
1276     we actually need to filter in this specific case */
1277    
1278     - if (!tty_termios_hw_change(&tty->termios, old_termios))
1279     + if (old_termios && !tty_termios_hw_change(&tty->termios, old_termios))
1280     return;
1281    
1282     cflag = tty->termios.c_cflag;
1283     @@ -292,7 +292,8 @@ static void pl2303_set_termios(struct tty_struct *tty,
1284     if (!buf) {
1285     dev_err(&port->dev, "%s - out of memory.\n", __func__);
1286     /* Report back no change occurred */
1287     - tty->termios = *old_termios;
1288     + if (old_termios)
1289     + tty->termios = *old_termios;
1290     return;
1291     }
1292    
1293     @@ -432,7 +433,7 @@ static void pl2303_set_termios(struct tty_struct *tty,
1294     control = priv->line_control;
1295     if ((cflag & CBAUD) == B0)
1296     priv->line_control &= ~(CONTROL_DTR | CONTROL_RTS);
1297     - else if ((old_termios->c_cflag & CBAUD) == B0)
1298     + else if (old_termios && (old_termios->c_cflag & CBAUD) == B0)
1299     priv->line_control |= (CONTROL_DTR | CONTROL_RTS);
1300     if (control != priv->line_control) {
1301     control = priv->line_control;
1302     @@ -491,7 +492,6 @@ static void pl2303_close(struct usb_serial_port *port)
1303    
1304     static int pl2303_open(struct tty_struct *tty, struct usb_serial_port *port)
1305     {
1306     - struct ktermios tmp_termios;
1307     struct usb_serial *serial = port->serial;
1308     struct pl2303_serial_private *spriv = usb_get_serial_data(serial);
1309     int result;
1310     @@ -507,7 +507,7 @@ static int pl2303_open(struct tty_struct *tty, struct usb_serial_port *port)
1311    
1312     /* Setup termios */
1313     if (tty)
1314     - pl2303_set_termios(tty, port, &tmp_termios);
1315     + pl2303_set_termios(tty, port, NULL);
1316    
1317     result = usb_submit_urb(port->interrupt_in_urb, GFP_KERNEL);
1318     if (result) {
1319     diff --git a/drivers/usb/serial/spcp8x5.c b/drivers/usb/serial/spcp8x5.c
1320     index 549ef68..6607379 100644
1321     --- a/drivers/usb/serial/spcp8x5.c
1322     +++ b/drivers/usb/serial/spcp8x5.c
1323     @@ -314,7 +314,6 @@ static void spcp8x5_set_termios(struct tty_struct *tty,
1324     struct spcp8x5_private *priv = usb_get_serial_port_data(port);
1325     unsigned long flags;
1326     unsigned int cflag = tty->termios.c_cflag;
1327     - unsigned int old_cflag = old_termios->c_cflag;
1328     unsigned short uartdata;
1329     unsigned char buf[2] = {0, 0};
1330     int baud;
1331     @@ -323,15 +322,15 @@ static void spcp8x5_set_termios(struct tty_struct *tty,
1332    
1333    
1334     /* check that they really want us to change something */
1335     - if (!tty_termios_hw_change(&tty->termios, old_termios))
1336     + if (old_termios && !tty_termios_hw_change(&tty->termios, old_termios))
1337     return;
1338    
1339     /* set DTR/RTS active */
1340     spin_lock_irqsave(&priv->lock, flags);
1341     control = priv->line_control;
1342     - if ((old_cflag & CBAUD) == B0) {
1343     + if (old_termios && (old_termios->c_cflag & CBAUD) == B0) {
1344     priv->line_control |= MCR_DTR;
1345     - if (!(old_cflag & CRTSCTS))
1346     + if (!(old_termios->c_cflag & CRTSCTS))
1347     priv->line_control |= MCR_RTS;
1348     }
1349     if (control != priv->line_control) {
1350     @@ -421,7 +420,6 @@ static void spcp8x5_set_termios(struct tty_struct *tty,
1351     * status of the device. */
1352     static int spcp8x5_open(struct tty_struct *tty, struct usb_serial_port *port)
1353     {
1354     - struct ktermios tmp_termios;
1355     struct usb_serial *serial = port->serial;
1356     struct spcp8x5_private *priv = usb_get_serial_port_data(port);
1357     int ret;
1358     @@ -442,7 +440,7 @@ static int spcp8x5_open(struct tty_struct *tty, struct usb_serial_port *port)
1359    
1360     /* Setup termios */
1361     if (tty)
1362     - spcp8x5_set_termios(tty, port, &tmp_termios);
1363     + spcp8x5_set_termios(tty, port, NULL);
1364    
1365     spcp8x5_get_msr(serial->dev, &status, priv->type);
1366    
1367     diff --git a/fs/ceph/locks.c b/fs/ceph/locks.c
1368     index 202dd3d..ebbf680 100644
1369     --- a/fs/ceph/locks.c
1370     +++ b/fs/ceph/locks.c
1371     @@ -191,27 +191,23 @@ void ceph_count_locks(struct inode *inode, int *fcntl_count, int *flock_count)
1372     }
1373    
1374     /**
1375     - * Encode the flock and fcntl locks for the given inode into the pagelist.
1376     - * Format is: #fcntl locks, sequential fcntl locks, #flock locks,
1377     - * sequential flock locks.
1378     - * Must be called with lock_flocks() already held.
1379     - * If we encounter more of a specific lock type than expected,
1380     - * we return the value 1.
1381     + * Encode the flock and fcntl locks for the given inode into the ceph_filelock
1382     + * array. Must be called with lock_flocks() already held.
1383     + * If we encounter more of a specific lock type than expected, return -ENOSPC.
1384     */
1385     -int ceph_encode_locks(struct inode *inode, struct ceph_pagelist *pagelist,
1386     - int num_fcntl_locks, int num_flock_locks)
1387     +int ceph_encode_locks_to_buffer(struct inode *inode,
1388     + struct ceph_filelock *flocks,
1389     + int num_fcntl_locks, int num_flock_locks)
1390     {
1391     struct file_lock *lock;
1392     - struct ceph_filelock cephlock;
1393     int err = 0;
1394     int seen_fcntl = 0;
1395     int seen_flock = 0;
1396     + int l = 0;
1397    
1398     dout("encoding %d flock and %d fcntl locks", num_flock_locks,
1399     num_fcntl_locks);
1400     - err = ceph_pagelist_append(pagelist, &num_fcntl_locks, sizeof(u32));
1401     - if (err)
1402     - goto fail;
1403     +
1404     for (lock = inode->i_flock; lock != NULL; lock = lock->fl_next) {
1405     if (lock->fl_flags & FL_POSIX) {
1406     ++seen_fcntl;
1407     @@ -219,19 +215,12 @@ int ceph_encode_locks(struct inode *inode, struct ceph_pagelist *pagelist,
1408     err = -ENOSPC;
1409     goto fail;
1410     }
1411     - err = lock_to_ceph_filelock(lock, &cephlock);
1412     + err = lock_to_ceph_filelock(lock, &flocks[l]);
1413     if (err)
1414     goto fail;
1415     - err = ceph_pagelist_append(pagelist, &cephlock,
1416     - sizeof(struct ceph_filelock));
1417     + ++l;
1418     }
1419     - if (err)
1420     - goto fail;
1421     }
1422     -
1423     - err = ceph_pagelist_append(pagelist, &num_flock_locks, sizeof(u32));
1424     - if (err)
1425     - goto fail;
1426     for (lock = inode->i_flock; lock != NULL; lock = lock->fl_next) {
1427     if (lock->fl_flags & FL_FLOCK) {
1428     ++seen_flock;
1429     @@ -239,19 +228,51 @@ int ceph_encode_locks(struct inode *inode, struct ceph_pagelist *pagelist,
1430     err = -ENOSPC;
1431     goto fail;
1432     }
1433     - err = lock_to_ceph_filelock(lock, &cephlock);
1434     + err = lock_to_ceph_filelock(lock, &flocks[l]);
1435     if (err)
1436     goto fail;
1437     - err = ceph_pagelist_append(pagelist, &cephlock,
1438     - sizeof(struct ceph_filelock));
1439     + ++l;
1440     }
1441     - if (err)
1442     - goto fail;
1443     }
1444     fail:
1445     return err;
1446     }
1447    
1448     +/**
1449     + * Copy the encoded flock and fcntl locks into the pagelist.
1450     + * Format is: #fcntl locks, sequential fcntl locks, #flock locks,
1451     + * sequential flock locks.
1452     + * Returns zero on success.
1453     + */
1454     +int ceph_locks_to_pagelist(struct ceph_filelock *flocks,
1455     + struct ceph_pagelist *pagelist,
1456     + int num_fcntl_locks, int num_flock_locks)
1457     +{
1458     + int err = 0;
1459     + __le32 nlocks;
1460     +
1461     + nlocks = cpu_to_le32(num_fcntl_locks);
1462     + err = ceph_pagelist_append(pagelist, &nlocks, sizeof(nlocks));
1463     + if (err)
1464     + goto out_fail;
1465     +
1466     + err = ceph_pagelist_append(pagelist, flocks,
1467     + num_fcntl_locks * sizeof(*flocks));
1468     + if (err)
1469     + goto out_fail;
1470     +
1471     + nlocks = cpu_to_le32(num_flock_locks);
1472     + err = ceph_pagelist_append(pagelist, &nlocks, sizeof(nlocks));
1473     + if (err)
1474     + goto out_fail;
1475     +
1476     + err = ceph_pagelist_append(pagelist,
1477     + &flocks[num_fcntl_locks],
1478     + num_flock_locks * sizeof(*flocks));
1479     +out_fail:
1480     + return err;
1481     +}
1482     +
1483     /*
1484     * Given a pointer to a lock, convert it to a ceph filelock
1485     */
1486     diff --git a/fs/ceph/mds_client.c b/fs/ceph/mds_client.c
1487     index 442880d..90903a7 100644
1488     --- a/fs/ceph/mds_client.c
1489     +++ b/fs/ceph/mds_client.c
1490     @@ -364,9 +364,9 @@ void ceph_put_mds_session(struct ceph_mds_session *s)
1491     atomic_read(&s->s_ref), atomic_read(&s->s_ref)-1);
1492     if (atomic_dec_and_test(&s->s_ref)) {
1493     if (s->s_auth.authorizer)
1494     - s->s_mdsc->fsc->client->monc.auth->ops->destroy_authorizer(
1495     - s->s_mdsc->fsc->client->monc.auth,
1496     - s->s_auth.authorizer);
1497     + ceph_auth_destroy_authorizer(
1498     + s->s_mdsc->fsc->client->monc.auth,
1499     + s->s_auth.authorizer);
1500     kfree(s);
1501     }
1502     }
1503     @@ -2474,39 +2474,44 @@ static int encode_caps_cb(struct inode *inode, struct ceph_cap *cap,
1504    
1505     if (recon_state->flock) {
1506     int num_fcntl_locks, num_flock_locks;
1507     - struct ceph_pagelist_cursor trunc_point;
1508     -
1509     - ceph_pagelist_set_cursor(pagelist, &trunc_point);
1510     - do {
1511     - lock_flocks();
1512     - ceph_count_locks(inode, &num_fcntl_locks,
1513     - &num_flock_locks);
1514     - rec.v2.flock_len = (2*sizeof(u32) +
1515     - (num_fcntl_locks+num_flock_locks) *
1516     - sizeof(struct ceph_filelock));
1517     - unlock_flocks();
1518     -
1519     - /* pre-alloc pagelist */
1520     - ceph_pagelist_truncate(pagelist, &trunc_point);
1521     - err = ceph_pagelist_append(pagelist, &rec, reclen);
1522     - if (!err)
1523     - err = ceph_pagelist_reserve(pagelist,
1524     - rec.v2.flock_len);
1525     -
1526     - /* encode locks */
1527     - if (!err) {
1528     - lock_flocks();
1529     - err = ceph_encode_locks(inode,
1530     - pagelist,
1531     - num_fcntl_locks,
1532     - num_flock_locks);
1533     - unlock_flocks();
1534     - }
1535     - } while (err == -ENOSPC);
1536     + struct ceph_filelock *flocks;
1537     +
1538     +encode_again:
1539     + lock_flocks();
1540     + ceph_count_locks(inode, &num_fcntl_locks, &num_flock_locks);
1541     + unlock_flocks();
1542     + flocks = kmalloc((num_fcntl_locks+num_flock_locks) *
1543     + sizeof(struct ceph_filelock), GFP_NOFS);
1544     + if (!flocks) {
1545     + err = -ENOMEM;
1546     + goto out_free;
1547     + }
1548     + lock_flocks();
1549     + err = ceph_encode_locks_to_buffer(inode, flocks,
1550     + num_fcntl_locks,
1551     + num_flock_locks);
1552     + unlock_flocks();
1553     + if (err) {
1554     + kfree(flocks);
1555     + if (err == -ENOSPC)
1556     + goto encode_again;
1557     + goto out_free;
1558     + }
1559     + /*
1560     + * number of encoded locks is stable, so copy to pagelist
1561     + */
1562     + rec.v2.flock_len = cpu_to_le32(2*sizeof(u32) +
1563     + (num_fcntl_locks+num_flock_locks) *
1564     + sizeof(struct ceph_filelock));
1565     + err = ceph_pagelist_append(pagelist, &rec, reclen);
1566     + if (!err)
1567     + err = ceph_locks_to_pagelist(flocks, pagelist,
1568     + num_fcntl_locks,
1569     + num_flock_locks);
1570     + kfree(flocks);
1571     } else {
1572     err = ceph_pagelist_append(pagelist, &rec, reclen);
1573     }
1574     -
1575     out_free:
1576     kfree(path);
1577     out_dput:
1578     @@ -3433,13 +3438,17 @@ static struct ceph_auth_handshake *get_authorizer(struct ceph_connection *con,
1579     struct ceph_auth_handshake *auth = &s->s_auth;
1580    
1581     if (force_new && auth->authorizer) {
1582     - if (ac->ops && ac->ops->destroy_authorizer)
1583     - ac->ops->destroy_authorizer(ac, auth->authorizer);
1584     + ceph_auth_destroy_authorizer(ac, auth->authorizer);
1585     auth->authorizer = NULL;
1586     }
1587     - if (!auth->authorizer && ac->ops && ac->ops->create_authorizer) {
1588     - int ret = ac->ops->create_authorizer(ac, CEPH_ENTITY_TYPE_MDS,
1589     - auth);
1590     + if (!auth->authorizer) {
1591     + int ret = ceph_auth_create_authorizer(ac, CEPH_ENTITY_TYPE_MDS,
1592     + auth);
1593     + if (ret)
1594     + return ERR_PTR(ret);
1595     + } else {
1596     + int ret = ceph_auth_update_authorizer(ac, CEPH_ENTITY_TYPE_MDS,
1597     + auth);
1598     if (ret)
1599     return ERR_PTR(ret);
1600     }
1601     @@ -3455,7 +3464,7 @@ static int verify_authorizer_reply(struct ceph_connection *con, int len)
1602     struct ceph_mds_client *mdsc = s->s_mdsc;
1603     struct ceph_auth_client *ac = mdsc->fsc->client->monc.auth;
1604    
1605     - return ac->ops->verify_authorizer_reply(ac, s->s_auth.authorizer, len);
1606     + return ceph_auth_verify_authorizer_reply(ac, s->s_auth.authorizer, len);
1607     }
1608    
1609     static int invalidate_authorizer(struct ceph_connection *con)
1610     @@ -3464,8 +3473,7 @@ static int invalidate_authorizer(struct ceph_connection *con)
1611     struct ceph_mds_client *mdsc = s->s_mdsc;
1612     struct ceph_auth_client *ac = mdsc->fsc->client->monc.auth;
1613    
1614     - if (ac->ops->invalidate_authorizer)
1615     - ac->ops->invalidate_authorizer(ac, CEPH_ENTITY_TYPE_MDS);
1616     + ceph_auth_invalidate_authorizer(ac, CEPH_ENTITY_TYPE_MDS);
1617    
1618     return ceph_monc_validate_auth(&mdsc->fsc->client->monc);
1619     }
1620     diff --git a/fs/ceph/super.h b/fs/ceph/super.h
1621     index c7b3097..907f214 100644
1622     --- a/fs/ceph/super.h
1623     +++ b/fs/ceph/super.h
1624     @@ -841,8 +841,13 @@ extern const struct export_operations ceph_export_ops;
1625     extern int ceph_lock(struct file *file, int cmd, struct file_lock *fl);
1626     extern int ceph_flock(struct file *file, int cmd, struct file_lock *fl);
1627     extern void ceph_count_locks(struct inode *inode, int *p_num, int *f_num);
1628     -extern int ceph_encode_locks(struct inode *i, struct ceph_pagelist *p,
1629     - int p_locks, int f_locks);
1630     +extern int ceph_encode_locks_to_buffer(struct inode *inode,
1631     + struct ceph_filelock *flocks,
1632     + int num_fcntl_locks,
1633     + int num_flock_locks);
1634     +extern int ceph_locks_to_pagelist(struct ceph_filelock *flocks,
1635     + struct ceph_pagelist *pagelist,
1636     + int num_fcntl_locks, int num_flock_locks);
1637     extern int lock_to_ceph_filelock(struct file_lock *fl, struct ceph_filelock *c);
1638    
1639     /* debugfs.c */
1640     diff --git a/fs/proc/kmsg.c b/fs/proc/kmsg.c
1641     index bd4b5a7..bdfabda 100644
1642     --- a/fs/proc/kmsg.c
1643     +++ b/fs/proc/kmsg.c
1644     @@ -21,12 +21,12 @@ extern wait_queue_head_t log_wait;
1645    
1646     static int kmsg_open(struct inode * inode, struct file * file)
1647     {
1648     - return do_syslog(SYSLOG_ACTION_OPEN, NULL, 0, SYSLOG_FROM_FILE);
1649     + return do_syslog(SYSLOG_ACTION_OPEN, NULL, 0, SYSLOG_FROM_PROC);
1650     }
1651    
1652     static int kmsg_release(struct inode * inode, struct file * file)
1653     {
1654     - (void) do_syslog(SYSLOG_ACTION_CLOSE, NULL, 0, SYSLOG_FROM_FILE);
1655     + (void) do_syslog(SYSLOG_ACTION_CLOSE, NULL, 0, SYSLOG_FROM_PROC);
1656     return 0;
1657     }
1658    
1659     @@ -34,15 +34,15 @@ static ssize_t kmsg_read(struct file *file, char __user *buf,
1660     size_t count, loff_t *ppos)
1661     {
1662     if ((file->f_flags & O_NONBLOCK) &&
1663     - !do_syslog(SYSLOG_ACTION_SIZE_UNREAD, NULL, 0, SYSLOG_FROM_FILE))
1664     + !do_syslog(SYSLOG_ACTION_SIZE_UNREAD, NULL, 0, SYSLOG_FROM_PROC))
1665     return -EAGAIN;
1666     - return do_syslog(SYSLOG_ACTION_READ, buf, count, SYSLOG_FROM_FILE);
1667     + return do_syslog(SYSLOG_ACTION_READ, buf, count, SYSLOG_FROM_PROC);
1668     }
1669    
1670     static unsigned int kmsg_poll(struct file *file, poll_table *wait)
1671     {
1672     poll_wait(file, &log_wait, wait);
1673     - if (do_syslog(SYSLOG_ACTION_SIZE_UNREAD, NULL, 0, SYSLOG_FROM_FILE))
1674     + if (do_syslog(SYSLOG_ACTION_SIZE_UNREAD, NULL, 0, SYSLOG_FROM_PROC))
1675     return POLLIN | POLLRDNORM;
1676     return 0;
1677     }
1678     diff --git a/include/linux/ceph/auth.h b/include/linux/ceph/auth.h
1679     index d4080f3..5f33868 100644
1680     --- a/include/linux/ceph/auth.h
1681     +++ b/include/linux/ceph/auth.h
1682     @@ -52,6 +52,9 @@ struct ceph_auth_client_ops {
1683     */
1684     int (*create_authorizer)(struct ceph_auth_client *ac, int peer_type,
1685     struct ceph_auth_handshake *auth);
1686     + /* ensure that an existing authorizer is up to date */
1687     + int (*update_authorizer)(struct ceph_auth_client *ac, int peer_type,
1688     + struct ceph_auth_handshake *auth);
1689     int (*verify_authorizer_reply)(struct ceph_auth_client *ac,
1690     struct ceph_authorizer *a, size_t len);
1691     void (*destroy_authorizer)(struct ceph_auth_client *ac,
1692     @@ -75,6 +78,8 @@ struct ceph_auth_client {
1693     u64 global_id; /* our unique id in system */
1694     const struct ceph_crypto_key *key; /* our secret key */
1695     unsigned want_keys; /* which services we want */
1696     +
1697     + struct mutex mutex;
1698     };
1699    
1700     extern struct ceph_auth_client *ceph_auth_init(const char *name,
1701     @@ -94,5 +99,18 @@ extern int ceph_build_auth(struct ceph_auth_client *ac,
1702     void *msg_buf, size_t msg_len);
1703    
1704     extern int ceph_auth_is_authenticated(struct ceph_auth_client *ac);
1705     +extern int ceph_auth_create_authorizer(struct ceph_auth_client *ac,
1706     + int peer_type,
1707     + struct ceph_auth_handshake *auth);
1708     +extern void ceph_auth_destroy_authorizer(struct ceph_auth_client *ac,
1709     + struct ceph_authorizer *a);
1710     +extern int ceph_auth_update_authorizer(struct ceph_auth_client *ac,
1711     + int peer_type,
1712     + struct ceph_auth_handshake *a);
1713     +extern int ceph_auth_verify_authorizer_reply(struct ceph_auth_client *ac,
1714     + struct ceph_authorizer *a,
1715     + size_t len);
1716     +extern void ceph_auth_invalidate_authorizer(struct ceph_auth_client *ac,
1717     + int peer_type);
1718    
1719     #endif
1720     diff --git a/include/linux/cpu.h b/include/linux/cpu.h
1721     index ce7a074..714e792 100644
1722     --- a/include/linux/cpu.h
1723     +++ b/include/linux/cpu.h
1724     @@ -175,6 +175,8 @@ extern struct bus_type cpu_subsys;
1725    
1726     extern void get_online_cpus(void);
1727     extern void put_online_cpus(void);
1728     +extern void cpu_hotplug_disable(void);
1729     +extern void cpu_hotplug_enable(void);
1730     #define hotcpu_notifier(fn, pri) cpu_notifier(fn, pri)
1731     #define register_hotcpu_notifier(nb) register_cpu_notifier(nb)
1732     #define unregister_hotcpu_notifier(nb) unregister_cpu_notifier(nb)
1733     @@ -198,6 +200,8 @@ static inline void cpu_hotplug_driver_unlock(void)
1734    
1735     #define get_online_cpus() do { } while (0)
1736     #define put_online_cpus() do { } while (0)
1737     +#define cpu_hotplug_disable() do { } while (0)
1738     +#define cpu_hotplug_enable() do { } while (0)
1739     #define hotcpu_notifier(fn, pri) do { (void)(fn); } while (0)
1740     /* These aren't inline functions due to a GCC bug. */
1741     #define register_hotcpu_notifier(nb) ({ (void)(nb); 0; })
1742     diff --git a/include/linux/swapops.h b/include/linux/swapops.h
1743     index 47ead51..c5fd30d 100644
1744     --- a/include/linux/swapops.h
1745     +++ b/include/linux/swapops.h
1746     @@ -137,6 +137,7 @@ static inline void make_migration_entry_read(swp_entry_t *entry)
1747    
1748     extern void migration_entry_wait(struct mm_struct *mm, pmd_t *pmd,
1749     unsigned long address);
1750     +extern void migration_entry_wait_huge(struct mm_struct *mm, pte_t *pte);
1751     #else
1752    
1753     #define make_migration_entry(page, write) swp_entry(0, 0)
1754     @@ -148,6 +149,8 @@ static inline int is_migration_entry(swp_entry_t swp)
1755     static inline void make_migration_entry_read(swp_entry_t *entryp) { }
1756     static inline void migration_entry_wait(struct mm_struct *mm, pmd_t *pmd,
1757     unsigned long address) { }
1758     +static inline void migration_entry_wait_huge(struct mm_struct *mm,
1759     + pte_t *pte) { }
1760     static inline int is_write_migration_entry(swp_entry_t entry)
1761     {
1762     return 0;
1763     diff --git a/include/linux/syslog.h b/include/linux/syslog.h
1764     index 3891139..98a3153 100644
1765     --- a/include/linux/syslog.h
1766     +++ b/include/linux/syslog.h
1767     @@ -44,8 +44,8 @@
1768     /* Return size of the log buffer */
1769     #define SYSLOG_ACTION_SIZE_BUFFER 10
1770    
1771     -#define SYSLOG_FROM_CALL 0
1772     -#define SYSLOG_FROM_FILE 1
1773     +#define SYSLOG_FROM_READER 0
1774     +#define SYSLOG_FROM_PROC 1
1775    
1776     int do_syslog(int type, char __user *buf, int count, bool from_file);
1777    
1778     diff --git a/include/net/bluetooth/hci_core.h b/include/net/bluetooth/hci_core.h
1779     index 90cf75a..05bf874 100644
1780     --- a/include/net/bluetooth/hci_core.h
1781     +++ b/include/net/bluetooth/hci_core.h
1782     @@ -1065,6 +1065,7 @@ void hci_sock_dev_event(struct hci_dev *hdev, int event);
1783     int mgmt_control(struct sock *sk, struct msghdr *msg, size_t len);
1784     int mgmt_index_added(struct hci_dev *hdev);
1785     int mgmt_index_removed(struct hci_dev *hdev);
1786     +int mgmt_set_powered_failed(struct hci_dev *hdev, int err);
1787     int mgmt_powered(struct hci_dev *hdev, u8 powered);
1788     int mgmt_discoverable(struct hci_dev *hdev, u8 discoverable);
1789     int mgmt_connectable(struct hci_dev *hdev, u8 connectable);
1790     diff --git a/include/net/bluetooth/mgmt.h b/include/net/bluetooth/mgmt.h
1791     index 22980a7..9944c3e 100644
1792     --- a/include/net/bluetooth/mgmt.h
1793     +++ b/include/net/bluetooth/mgmt.h
1794     @@ -42,6 +42,7 @@
1795     #define MGMT_STATUS_NOT_POWERED 0x0f
1796     #define MGMT_STATUS_CANCELLED 0x10
1797     #define MGMT_STATUS_INVALID_INDEX 0x11
1798     +#define MGMT_STATUS_RFKILLED 0x12
1799    
1800     struct mgmt_hdr {
1801     __le16 opcode;
1802     diff --git a/kernel/audit.c b/kernel/audit.c
1803     index d596e53..8a667f10 100644
1804     --- a/kernel/audit.c
1805     +++ b/kernel/audit.c
1806     @@ -1107,7 +1107,7 @@ static inline void audit_get_stamp(struct audit_context *ctx,
1807     static void wait_for_auditd(unsigned long sleep_time)
1808     {
1809     DECLARE_WAITQUEUE(wait, current);
1810     - set_current_state(TASK_INTERRUPTIBLE);
1811     + set_current_state(TASK_UNINTERRUPTIBLE);
1812     add_wait_queue(&audit_backlog_wait, &wait);
1813    
1814     if (audit_backlog_limit &&
1815     diff --git a/kernel/cpu.c b/kernel/cpu.c
1816     index b5e4ab2..198a388 100644
1817     --- a/kernel/cpu.c
1818     +++ b/kernel/cpu.c
1819     @@ -133,6 +133,27 @@ static void cpu_hotplug_done(void)
1820     mutex_unlock(&cpu_hotplug.lock);
1821     }
1822    
1823     +/*
1824     + * Wait for currently running CPU hotplug operations to complete (if any) and
1825     + * disable future CPU hotplug (from sysfs). The 'cpu_add_remove_lock' protects
1826     + * the 'cpu_hotplug_disabled' flag. The same lock is also acquired by the
1827     + * hotplug path before performing hotplug operations. So acquiring that lock
1828     + * guarantees mutual exclusion from any currently running hotplug operations.
1829     + */
1830     +void cpu_hotplug_disable(void)
1831     +{
1832     + cpu_maps_update_begin();
1833     + cpu_hotplug_disabled = 1;
1834     + cpu_maps_update_done();
1835     +}
1836     +
1837     +void cpu_hotplug_enable(void)
1838     +{
1839     + cpu_maps_update_begin();
1840     + cpu_hotplug_disabled = 0;
1841     + cpu_maps_update_done();
1842     +}
1843     +
1844     #else /* #if CONFIG_HOTPLUG_CPU */
1845     static void cpu_hotplug_begin(void) {}
1846     static void cpu_hotplug_done(void) {}
1847     @@ -541,36 +562,6 @@ static int __init alloc_frozen_cpus(void)
1848     core_initcall(alloc_frozen_cpus);
1849    
1850     /*
1851     - * Prevent regular CPU hotplug from racing with the freezer, by disabling CPU
1852     - * hotplug when tasks are about to be frozen. Also, don't allow the freezer
1853     - * to continue until any currently running CPU hotplug operation gets
1854     - * completed.
1855     - * To modify the 'cpu_hotplug_disabled' flag, we need to acquire the
1856     - * 'cpu_add_remove_lock'. And this same lock is also taken by the regular
1857     - * CPU hotplug path and released only after it is complete. Thus, we
1858     - * (and hence the freezer) will block here until any currently running CPU
1859     - * hotplug operation gets completed.
1860     - */
1861     -void cpu_hotplug_disable_before_freeze(void)
1862     -{
1863     - cpu_maps_update_begin();
1864     - cpu_hotplug_disabled = 1;
1865     - cpu_maps_update_done();
1866     -}
1867     -
1868     -
1869     -/*
1870     - * When tasks have been thawed, re-enable regular CPU hotplug (which had been
1871     - * disabled while beginning to freeze tasks).
1872     - */
1873     -void cpu_hotplug_enable_after_thaw(void)
1874     -{
1875     - cpu_maps_update_begin();
1876     - cpu_hotplug_disabled = 0;
1877     - cpu_maps_update_done();
1878     -}
1879     -
1880     -/*
1881     * When callbacks for CPU hotplug notifications are being executed, we must
1882     * ensure that the state of the system with respect to the tasks being frozen
1883     * or not, as reported by the notification, remains unchanged *throughout the
1884     @@ -589,12 +580,12 @@ cpu_hotplug_pm_callback(struct notifier_block *nb,
1885    
1886     case PM_SUSPEND_PREPARE:
1887     case PM_HIBERNATION_PREPARE:
1888     - cpu_hotplug_disable_before_freeze();
1889     + cpu_hotplug_disable();
1890     break;
1891    
1892     case PM_POST_SUSPEND:
1893     case PM_POST_HIBERNATION:
1894     - cpu_hotplug_enable_after_thaw();
1895     + cpu_hotplug_enable();
1896     break;
1897    
1898     default:
1899     diff --git a/kernel/printk.c b/kernel/printk.c
1900     index abbdd9e..0e4eba6a 100644
1901     --- a/kernel/printk.c
1902     +++ b/kernel/printk.c
1903     @@ -368,6 +368,53 @@ static void log_store(int facility, int level,
1904     log_next_seq++;
1905     }
1906    
1907     +#ifdef CONFIG_SECURITY_DMESG_RESTRICT
1908     +int dmesg_restrict = 1;
1909     +#else
1910     +int dmesg_restrict;
1911     +#endif
1912     +
1913     +static int syslog_action_restricted(int type)
1914     +{
1915     + if (dmesg_restrict)
1916     + return 1;
1917     + /*
1918     + * Unless restricted, we allow "read all" and "get buffer size"
1919     + * for everybody.
1920     + */
1921     + return type != SYSLOG_ACTION_READ_ALL &&
1922     + type != SYSLOG_ACTION_SIZE_BUFFER;
1923     +}
1924     +
1925     +static int check_syslog_permissions(int type, bool from_file)
1926     +{
1927     + /*
1928     + * If this is from /proc/kmsg and we've already opened it, then we've
1929     + * already done the capabilities checks at open time.
1930     + */
1931     + if (from_file && type != SYSLOG_ACTION_OPEN)
1932     + return 0;
1933     +
1934     + if (syslog_action_restricted(type)) {
1935     + if (capable(CAP_SYSLOG))
1936     + return 0;
1937     + /*
1938     + * For historical reasons, accept CAP_SYS_ADMIN too, with
1939     + * a warning.
1940     + */
1941     + if (capable(CAP_SYS_ADMIN)) {
1942     + pr_warn_once("%s (%d): Attempt to access syslog with "
1943     + "CAP_SYS_ADMIN but no CAP_SYSLOG "
1944     + "(deprecated).\n",
1945     + current->comm, task_pid_nr(current));
1946     + return 0;
1947     + }
1948     + return -EPERM;
1949     + }
1950     + return security_syslog(type);
1951     +}
1952     +
1953     +
1954     /* /dev/kmsg - userspace message inject/listen interface */
1955     struct devkmsg_user {
1956     u64 seq;
1957     @@ -624,7 +671,8 @@ static int devkmsg_open(struct inode *inode, struct file *file)
1958     if ((file->f_flags & O_ACCMODE) == O_WRONLY)
1959     return 0;
1960    
1961     - err = security_syslog(SYSLOG_ACTION_READ_ALL);
1962     + err = check_syslog_permissions(SYSLOG_ACTION_READ_ALL,
1963     + SYSLOG_FROM_READER);
1964     if (err)
1965     return err;
1966    
1967     @@ -817,45 +865,6 @@ static inline void boot_delay_msec(int level)
1968     }
1969     #endif
1970    
1971     -#ifdef CONFIG_SECURITY_DMESG_RESTRICT
1972     -int dmesg_restrict = 1;
1973     -#else
1974     -int dmesg_restrict;
1975     -#endif
1976     -
1977     -static int syslog_action_restricted(int type)
1978     -{
1979     - if (dmesg_restrict)
1980     - return 1;
1981     - /* Unless restricted, we allow "read all" and "get buffer size" for everybody */
1982     - return type != SYSLOG_ACTION_READ_ALL && type != SYSLOG_ACTION_SIZE_BUFFER;
1983     -}
1984     -
1985     -static int check_syslog_permissions(int type, bool from_file)
1986     -{
1987     - /*
1988     - * If this is from /proc/kmsg and we've already opened it, then we've
1989     - * already done the capabilities checks at open time.
1990     - */
1991     - if (from_file && type != SYSLOG_ACTION_OPEN)
1992     - return 0;
1993     -
1994     - if (syslog_action_restricted(type)) {
1995     - if (capable(CAP_SYSLOG))
1996     - return 0;
1997     - /* For historical reasons, accept CAP_SYS_ADMIN too, with a warning */
1998     - if (capable(CAP_SYS_ADMIN)) {
1999     - printk_once(KERN_WARNING "%s (%d): "
2000     - "Attempt to access syslog with CAP_SYS_ADMIN "
2001     - "but no CAP_SYSLOG (deprecated).\n",
2002     - current->comm, task_pid_nr(current));
2003     - return 0;
2004     - }
2005     - return -EPERM;
2006     - }
2007     - return 0;
2008     -}
2009     -
2010     #if defined(CONFIG_PRINTK_TIME)
2011     static bool printk_time = 1;
2012     #else
2013     @@ -1253,7 +1262,7 @@ out:
2014    
2015     SYSCALL_DEFINE3(syslog, int, type, char __user *, buf, int, len)
2016     {
2017     - return do_syslog(type, buf, len, SYSLOG_FROM_CALL);
2018     + return do_syslog(type, buf, len, SYSLOG_FROM_READER);
2019     }
2020    
2021     /*
2022     diff --git a/kernel/sys.c b/kernel/sys.c
2023     index 0da73cf..e5f0aca 100644
2024     --- a/kernel/sys.c
2025     +++ b/kernel/sys.c
2026     @@ -357,6 +357,29 @@ int unregister_reboot_notifier(struct notifier_block *nb)
2027     }
2028     EXPORT_SYMBOL(unregister_reboot_notifier);
2029    
2030     +/* Add backwards compatibility for stable trees. */
2031     +#ifndef PF_NO_SETAFFINITY
2032     +#define PF_NO_SETAFFINITY PF_THREAD_BOUND
2033     +#endif
2034     +
2035     +static void migrate_to_reboot_cpu(void)
2036     +{
2037     + /* The boot cpu is always logical cpu 0 */
2038     + int cpu = 0;
2039     +
2040     + cpu_hotplug_disable();
2041     +
2042     + /* Make certain the cpu I'm about to reboot on is online */
2043     + if (!cpu_online(cpu))
2044     + cpu = cpumask_first(cpu_online_mask);
2045     +
2046     + /* Prevent races with other tasks migrating this task */
2047     + current->flags |= PF_NO_SETAFFINITY;
2048     +
2049     + /* Make certain I only run on the appropriate processor */
2050     + set_cpus_allowed_ptr(current, cpumask_of(cpu));
2051     +}
2052     +
2053     /**
2054     * kernel_restart - reboot the system
2055     * @cmd: pointer to buffer containing command to execute for restart
2056     @@ -368,7 +391,7 @@ EXPORT_SYMBOL(unregister_reboot_notifier);
2057     void kernel_restart(char *cmd)
2058     {
2059     kernel_restart_prepare(cmd);
2060     - disable_nonboot_cpus();
2061     + migrate_to_reboot_cpu();
2062     syscore_shutdown();
2063     if (!cmd)
2064     printk(KERN_EMERG "Restarting system.\n");
2065     @@ -395,7 +418,7 @@ static void kernel_shutdown_prepare(enum system_states state)
2066     void kernel_halt(void)
2067     {
2068     kernel_shutdown_prepare(SYSTEM_HALT);
2069     - disable_nonboot_cpus();
2070     + migrate_to_reboot_cpu();
2071     syscore_shutdown();
2072     printk(KERN_EMERG "System halted.\n");
2073     kmsg_dump(KMSG_DUMP_HALT);
2074     @@ -414,7 +437,7 @@ void kernel_power_off(void)
2075     kernel_shutdown_prepare(SYSTEM_POWER_OFF);
2076     if (pm_power_off_prepare)
2077     pm_power_off_prepare();
2078     - disable_nonboot_cpus();
2079     + migrate_to_reboot_cpu();
2080     syscore_shutdown();
2081     printk(KERN_EMERG "Power down.\n");
2082     kmsg_dump(KMSG_DUMP_POWEROFF);
2083     diff --git a/mm/hugetlb.c b/mm/hugetlb.c
2084     index 1a12f5b..ce4cb19 100644
2085     --- a/mm/hugetlb.c
2086     +++ b/mm/hugetlb.c
2087     @@ -2823,7 +2823,7 @@ int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
2088     if (ptep) {
2089     entry = huge_ptep_get(ptep);
2090     if (unlikely(is_hugetlb_entry_migration(entry))) {
2091     - migration_entry_wait(mm, (pmd_t *)ptep, address);
2092     + migration_entry_wait_huge(mm, ptep);
2093     return 0;
2094     } else if (unlikely(is_hugetlb_entry_hwpoisoned(entry)))
2095     return VM_FAULT_HWPOISON_LARGE |
2096     diff --git a/mm/memcontrol.c b/mm/memcontrol.c
2097     index 9630d58..f10c112 100644
2098     --- a/mm/memcontrol.c
2099     +++ b/mm/memcontrol.c
2100     @@ -3033,8 +3033,6 @@ int memcg_update_cache_size(struct kmem_cache *s, int num_groups)
2101     return -ENOMEM;
2102     }
2103    
2104     - INIT_WORK(&s->memcg_params->destroy,
2105     - kmem_cache_destroy_work_func);
2106     s->memcg_params->is_root_cache = true;
2107    
2108     /*
2109     diff --git a/mm/migrate.c b/mm/migrate.c
2110     index 22ed5c1..c04d9af 100644
2111     --- a/mm/migrate.c
2112     +++ b/mm/migrate.c
2113     @@ -200,15 +200,14 @@ static void remove_migration_ptes(struct page *old, struct page *new)
2114     * get to the page and wait until migration is finished.
2115     * When we return from this function the fault will be retried.
2116     */
2117     -void migration_entry_wait(struct mm_struct *mm, pmd_t *pmd,
2118     - unsigned long address)
2119     +static void __migration_entry_wait(struct mm_struct *mm, pte_t *ptep,
2120     + spinlock_t *ptl)
2121     {
2122     - pte_t *ptep, pte;
2123     - spinlock_t *ptl;
2124     + pte_t pte;
2125     swp_entry_t entry;
2126     struct page *page;
2127    
2128     - ptep = pte_offset_map_lock(mm, pmd, address, &ptl);
2129     + spin_lock(ptl);
2130     pte = *ptep;
2131     if (!is_swap_pte(pte))
2132     goto out;
2133     @@ -236,6 +235,20 @@ out:
2134     pte_unmap_unlock(ptep, ptl);
2135     }
2136    
2137     +void migration_entry_wait(struct mm_struct *mm, pmd_t *pmd,
2138     + unsigned long address)
2139     +{
2140     + spinlock_t *ptl = pte_lockptr(mm, pmd);
2141     + pte_t *ptep = pte_offset_map(pmd, address);
2142     + __migration_entry_wait(mm, ptep, ptl);
2143     +}
2144     +
2145     +void migration_entry_wait_huge(struct mm_struct *mm, pte_t *pte)
2146     +{
2147     + spinlock_t *ptl = &(mm)->page_table_lock;
2148     + __migration_entry_wait(mm, pte, ptl);
2149     +}
2150     +
2151     #ifdef CONFIG_BLOCK
2152     /* Returns true if all buffers are successfully locked */
2153     static bool buffer_migrate_lock_buffers(struct buffer_head *head,
2154     diff --git a/mm/page_alloc.c b/mm/page_alloc.c
2155     index 8fcced7..0d4fef2 100644
2156     --- a/mm/page_alloc.c
2157     +++ b/mm/page_alloc.c
2158     @@ -1626,6 +1626,7 @@ static bool __zone_watermark_ok(struct zone *z, int order, unsigned long mark,
2159     long min = mark;
2160     long lowmem_reserve = z->lowmem_reserve[classzone_idx];
2161     int o;
2162     + long free_cma = 0;
2163    
2164     free_pages -= (1 << order) - 1;
2165     if (alloc_flags & ALLOC_HIGH)
2166     @@ -1635,9 +1636,10 @@ static bool __zone_watermark_ok(struct zone *z, int order, unsigned long mark,
2167     #ifdef CONFIG_CMA
2168     /* If allocation can't use CMA areas don't use free CMA pages */
2169     if (!(alloc_flags & ALLOC_CMA))
2170     - free_pages -= zone_page_state(z, NR_FREE_CMA_PAGES);
2171     + free_cma = zone_page_state(z, NR_FREE_CMA_PAGES);
2172     #endif
2173     - if (free_pages <= min + lowmem_reserve)
2174     +
2175     + if (free_pages - free_cma <= min + lowmem_reserve)
2176     return false;
2177     for (o = 0; o < order; o++) {
2178     /* At the next order, this order's pages become unavailable */
2179     diff --git a/mm/swap_state.c b/mm/swap_state.c
2180     index 7efcf15..44574ce 100644
2181     --- a/mm/swap_state.c
2182     +++ b/mm/swap_state.c
2183     @@ -336,8 +336,24 @@ struct page *read_swap_cache_async(swp_entry_t entry, gfp_t gfp_mask,
2184     * Swap entry may have been freed since our caller observed it.
2185     */
2186     err = swapcache_prepare(entry);
2187     - if (err == -EEXIST) { /* seems racy */
2188     + if (err == -EEXIST) {
2189     radix_tree_preload_end();
2190     + /*
2191     + * We might race against get_swap_page() and stumble
2192     + * across a SWAP_HAS_CACHE swap_map entry whose page
2193     + * has not been brought into the swapcache yet, while
2194     + * the other end is scheduled away waiting on discard
2195     + * I/O completion at scan_swap_map().
2196     + *
2197     + * In order to avoid turning this transitory state
2198     + * into a permanent loop around this -EEXIST case
2199     + * if !CONFIG_PREEMPT and the I/O completion happens
2200     + * to be waiting on the CPU waitqueue where we are now
2201     + * busy looping, we just conditionally invoke the
2202     + * scheduler here, if there are some more important
2203     + * tasks to run.
2204     + */
2205     + cond_resched();
2206     continue;
2207     }
2208     if (err) { /* swp entry is obsolete ? */
2209     diff --git a/net/bluetooth/hci_core.c b/net/bluetooth/hci_core.c
2210     index 60793e7..b88605f 100644
2211     --- a/net/bluetooth/hci_core.c
2212     +++ b/net/bluetooth/hci_core.c
2213     @@ -1139,11 +1139,15 @@ static const struct rfkill_ops hci_rfkill_ops = {
2214     static void hci_power_on(struct work_struct *work)
2215     {
2216     struct hci_dev *hdev = container_of(work, struct hci_dev, power_on);
2217     + int err;
2218    
2219     BT_DBG("%s", hdev->name);
2220    
2221     - if (hci_dev_open(hdev->id) < 0)
2222     + err = hci_dev_open(hdev->id);
2223     + if (err < 0) {
2224     + mgmt_set_powered_failed(hdev, err);
2225     return;
2226     + }
2227    
2228     if (test_bit(HCI_AUTO_OFF, &hdev->dev_flags))
2229     queue_delayed_work(hdev->req_workqueue, &hdev->power_off,
2230     diff --git a/net/bluetooth/l2cap_core.c b/net/bluetooth/l2cap_core.c
2231     index 7c7e932..c5f9cd6 100644
2232     --- a/net/bluetooth/l2cap_core.c
2233     +++ b/net/bluetooth/l2cap_core.c
2234     @@ -3568,10 +3568,14 @@ static void l2cap_conf_rfc_get(struct l2cap_chan *chan, void *rsp, int len)
2235     }
2236    
2237     static inline int l2cap_command_rej(struct l2cap_conn *conn,
2238     - struct l2cap_cmd_hdr *cmd, u8 *data)
2239     + struct l2cap_cmd_hdr *cmd, u16 cmd_len,
2240     + u8 *data)
2241     {
2242     struct l2cap_cmd_rej_unk *rej = (struct l2cap_cmd_rej_unk *) data;
2243    
2244     + if (cmd_len < sizeof(*rej))
2245     + return -EPROTO;
2246     +
2247     if (rej->reason != L2CAP_REJ_NOT_UNDERSTOOD)
2248     return 0;
2249    
2250     @@ -3720,11 +3724,14 @@ sendresp:
2251     }
2252    
2253     static int l2cap_connect_req(struct l2cap_conn *conn,
2254     - struct l2cap_cmd_hdr *cmd, u8 *data)
2255     + struct l2cap_cmd_hdr *cmd, u16 cmd_len, u8 *data)
2256     {
2257     struct hci_dev *hdev = conn->hcon->hdev;
2258     struct hci_conn *hcon = conn->hcon;
2259    
2260     + if (cmd_len < sizeof(struct l2cap_conn_req))
2261     + return -EPROTO;
2262     +
2263     hci_dev_lock(hdev);
2264     if (test_bit(HCI_MGMT, &hdev->dev_flags) &&
2265     !test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &hcon->flags))
2266     @@ -3738,7 +3745,8 @@ static int l2cap_connect_req(struct l2cap_conn *conn,
2267     }
2268    
2269     static int l2cap_connect_create_rsp(struct l2cap_conn *conn,
2270     - struct l2cap_cmd_hdr *cmd, u8 *data)
2271     + struct l2cap_cmd_hdr *cmd, u16 cmd_len,
2272     + u8 *data)
2273     {
2274     struct l2cap_conn_rsp *rsp = (struct l2cap_conn_rsp *) data;
2275     u16 scid, dcid, result, status;
2276     @@ -3746,6 +3754,9 @@ static int l2cap_connect_create_rsp(struct l2cap_conn *conn,
2277     u8 req[128];
2278     int err;
2279    
2280     + if (cmd_len < sizeof(*rsp))
2281     + return -EPROTO;
2282     +
2283     scid = __le16_to_cpu(rsp->scid);
2284     dcid = __le16_to_cpu(rsp->dcid);
2285     result = __le16_to_cpu(rsp->result);
2286     @@ -3843,6 +3854,9 @@ static inline int l2cap_config_req(struct l2cap_conn *conn,
2287     struct l2cap_chan *chan;
2288     int len, err = 0;
2289    
2290     + if (cmd_len < sizeof(*req))
2291     + return -EPROTO;
2292     +
2293     dcid = __le16_to_cpu(req->dcid);
2294     flags = __le16_to_cpu(req->flags);
2295    
2296     @@ -3866,7 +3880,7 @@ static inline int l2cap_config_req(struct l2cap_conn *conn,
2297    
2298     /* Reject if config buffer is too small. */
2299     len = cmd_len - sizeof(*req);
2300     - if (len < 0 || chan->conf_len + len > sizeof(chan->conf_req)) {
2301     + if (chan->conf_len + len > sizeof(chan->conf_req)) {
2302     l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
2303     l2cap_build_conf_rsp(chan, rsp,
2304     L2CAP_CONF_REJECT, flags), rsp);
2305     @@ -3944,14 +3958,18 @@ unlock:
2306     }
2307    
2308     static inline int l2cap_config_rsp(struct l2cap_conn *conn,
2309     - struct l2cap_cmd_hdr *cmd, u8 *data)
2310     + struct l2cap_cmd_hdr *cmd, u16 cmd_len,
2311     + u8 *data)
2312     {
2313     struct l2cap_conf_rsp *rsp = (struct l2cap_conf_rsp *)data;
2314     u16 scid, flags, result;
2315     struct l2cap_chan *chan;
2316     - int len = le16_to_cpu(cmd->len) - sizeof(*rsp);
2317     + int len = cmd_len - sizeof(*rsp);
2318     int err = 0;
2319    
2320     + if (cmd_len < sizeof(*rsp))
2321     + return -EPROTO;
2322     +
2323     scid = __le16_to_cpu(rsp->scid);
2324     flags = __le16_to_cpu(rsp->flags);
2325     result = __le16_to_cpu(rsp->result);
2326     @@ -4052,7 +4070,8 @@ done:
2327     }
2328    
2329     static inline int l2cap_disconnect_req(struct l2cap_conn *conn,
2330     - struct l2cap_cmd_hdr *cmd, u8 *data)
2331     + struct l2cap_cmd_hdr *cmd, u16 cmd_len,
2332     + u8 *data)
2333     {
2334     struct l2cap_disconn_req *req = (struct l2cap_disconn_req *) data;
2335     struct l2cap_disconn_rsp rsp;
2336     @@ -4060,6 +4079,9 @@ static inline int l2cap_disconnect_req(struct l2cap_conn *conn,
2337     struct l2cap_chan *chan;
2338     struct sock *sk;
2339    
2340     + if (cmd_len != sizeof(*req))
2341     + return -EPROTO;
2342     +
2343     scid = __le16_to_cpu(req->scid);
2344     dcid = __le16_to_cpu(req->dcid);
2345    
2346     @@ -4099,12 +4121,16 @@ static inline int l2cap_disconnect_req(struct l2cap_conn *conn,
2347     }
2348    
2349     static inline int l2cap_disconnect_rsp(struct l2cap_conn *conn,
2350     - struct l2cap_cmd_hdr *cmd, u8 *data)
2351     + struct l2cap_cmd_hdr *cmd, u16 cmd_len,
2352     + u8 *data)
2353     {
2354     struct l2cap_disconn_rsp *rsp = (struct l2cap_disconn_rsp *) data;
2355     u16 dcid, scid;
2356     struct l2cap_chan *chan;
2357    
2358     + if (cmd_len != sizeof(*rsp))
2359     + return -EPROTO;
2360     +
2361     scid = __le16_to_cpu(rsp->scid);
2362     dcid = __le16_to_cpu(rsp->dcid);
2363    
2364     @@ -4134,11 +4160,15 @@ static inline int l2cap_disconnect_rsp(struct l2cap_conn *conn,
2365     }
2366    
2367     static inline int l2cap_information_req(struct l2cap_conn *conn,
2368     - struct l2cap_cmd_hdr *cmd, u8 *data)
2369     + struct l2cap_cmd_hdr *cmd, u16 cmd_len,
2370     + u8 *data)
2371     {
2372     struct l2cap_info_req *req = (struct l2cap_info_req *) data;
2373     u16 type;
2374    
2375     + if (cmd_len != sizeof(*req))
2376     + return -EPROTO;
2377     +
2378     type = __le16_to_cpu(req->type);
2379    
2380     BT_DBG("type 0x%4.4x", type);
2381     @@ -4185,11 +4215,15 @@ static inline int l2cap_information_req(struct l2cap_conn *conn,
2382     }
2383    
2384     static inline int l2cap_information_rsp(struct l2cap_conn *conn,
2385     - struct l2cap_cmd_hdr *cmd, u8 *data)
2386     + struct l2cap_cmd_hdr *cmd, u16 cmd_len,
2387     + u8 *data)
2388     {
2389     struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) data;
2390     u16 type, result;
2391    
2392     + if (cmd_len != sizeof(*rsp))
2393     + return -EPROTO;
2394     +
2395     type = __le16_to_cpu(rsp->type);
2396     result = __le16_to_cpu(rsp->result);
2397    
2398     @@ -5055,16 +5089,16 @@ static inline int l2cap_bredr_sig_cmd(struct l2cap_conn *conn,
2399    
2400     switch (cmd->code) {
2401     case L2CAP_COMMAND_REJ:
2402     - l2cap_command_rej(conn, cmd, data);
2403     + l2cap_command_rej(conn, cmd, cmd_len, data);
2404     break;
2405    
2406     case L2CAP_CONN_REQ:
2407     - err = l2cap_connect_req(conn, cmd, data);
2408     + err = l2cap_connect_req(conn, cmd, cmd_len, data);
2409     break;
2410    
2411     case L2CAP_CONN_RSP:
2412     case L2CAP_CREATE_CHAN_RSP:
2413     - err = l2cap_connect_create_rsp(conn, cmd, data);
2414     + err = l2cap_connect_create_rsp(conn, cmd, cmd_len, data);
2415     break;
2416    
2417     case L2CAP_CONF_REQ:
2418     @@ -5072,15 +5106,15 @@ static inline int l2cap_bredr_sig_cmd(struct l2cap_conn *conn,
2419     break;
2420    
2421     case L2CAP_CONF_RSP:
2422     - err = l2cap_config_rsp(conn, cmd, data);
2423     + err = l2cap_config_rsp(conn, cmd, cmd_len, data);
2424     break;
2425    
2426     case L2CAP_DISCONN_REQ:
2427     - err = l2cap_disconnect_req(conn, cmd, data);
2428     + err = l2cap_disconnect_req(conn, cmd, cmd_len, data);
2429     break;
2430    
2431     case L2CAP_DISCONN_RSP:
2432     - err = l2cap_disconnect_rsp(conn, cmd, data);
2433     + err = l2cap_disconnect_rsp(conn, cmd, cmd_len, data);
2434     break;
2435    
2436     case L2CAP_ECHO_REQ:
2437     @@ -5091,11 +5125,11 @@ static inline int l2cap_bredr_sig_cmd(struct l2cap_conn *conn,
2438     break;
2439    
2440     case L2CAP_INFO_REQ:
2441     - err = l2cap_information_req(conn, cmd, data);
2442     + err = l2cap_information_req(conn, cmd, cmd_len, data);
2443     break;
2444    
2445     case L2CAP_INFO_RSP:
2446     - err = l2cap_information_rsp(conn, cmd, data);
2447     + err = l2cap_information_rsp(conn, cmd, cmd_len, data);
2448     break;
2449    
2450     case L2CAP_CREATE_CHAN_REQ:
2451     diff --git a/net/bluetooth/mgmt.c b/net/bluetooth/mgmt.c
2452     index 39395c7..8b649d9 100644
2453     --- a/net/bluetooth/mgmt.c
2454     +++ b/net/bluetooth/mgmt.c
2455     @@ -3124,6 +3124,27 @@ int mgmt_powered(struct hci_dev *hdev, u8 powered)
2456     return err;
2457     }
2458    
2459     +int mgmt_set_powered_failed(struct hci_dev *hdev, int err)
2460     +{
2461     + struct pending_cmd *cmd;
2462     + u8 status;
2463     +
2464     + cmd = mgmt_pending_find(MGMT_OP_SET_POWERED, hdev);
2465     + if (!cmd)
2466     + return -ENOENT;
2467     +
2468     + if (err == -ERFKILL)
2469     + status = MGMT_STATUS_RFKILLED;
2470     + else
2471     + status = MGMT_STATUS_FAILED;
2472     +
2473     + err = cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_POWERED, status);
2474     +
2475     + mgmt_pending_remove(cmd);
2476     +
2477     + return err;
2478     +}
2479     +
2480     int mgmt_discoverable(struct hci_dev *hdev, u8 discoverable)
2481     {
2482     struct cmd_lookup match = { NULL, hdev };
2483     diff --git a/net/ceph/auth.c b/net/ceph/auth.c
2484     index b4bf4ac..6b923bc 100644
2485     --- a/net/ceph/auth.c
2486     +++ b/net/ceph/auth.c
2487     @@ -47,6 +47,7 @@ struct ceph_auth_client *ceph_auth_init(const char *name, const struct ceph_cryp
2488     if (!ac)
2489     goto out;
2490    
2491     + mutex_init(&ac->mutex);
2492     ac->negotiating = true;
2493     if (name)
2494     ac->name = name;
2495     @@ -73,10 +74,12 @@ void ceph_auth_destroy(struct ceph_auth_client *ac)
2496     */
2497     void ceph_auth_reset(struct ceph_auth_client *ac)
2498     {
2499     + mutex_lock(&ac->mutex);
2500     dout("auth_reset %p\n", ac);
2501     if (ac->ops && !ac->negotiating)
2502     ac->ops->reset(ac);
2503     ac->negotiating = true;
2504     + mutex_unlock(&ac->mutex);
2505     }
2506    
2507     int ceph_entity_name_encode(const char *name, void **p, void *end)
2508     @@ -102,6 +105,7 @@ int ceph_auth_build_hello(struct ceph_auth_client *ac, void *buf, size_t len)
2509     int i, num;
2510     int ret;
2511    
2512     + mutex_lock(&ac->mutex);
2513     dout("auth_build_hello\n");
2514     monhdr->have_version = 0;
2515     monhdr->session_mon = cpu_to_le16(-1);
2516     @@ -122,15 +126,19 @@ int ceph_auth_build_hello(struct ceph_auth_client *ac, void *buf, size_t len)
2517    
2518     ret = ceph_entity_name_encode(ac->name, &p, end);
2519     if (ret < 0)
2520     - return ret;
2521     + goto out;
2522     ceph_decode_need(&p, end, sizeof(u64), bad);
2523     ceph_encode_64(&p, ac->global_id);
2524    
2525     ceph_encode_32(&lenp, p - lenp - sizeof(u32));
2526     - return p - buf;
2527     + ret = p - buf;
2528     +out:
2529     + mutex_unlock(&ac->mutex);
2530     + return ret;
2531    
2532     bad:
2533     - return -ERANGE;
2534     + ret = -ERANGE;
2535     + goto out;
2536     }
2537    
2538     static int ceph_build_auth_request(struct ceph_auth_client *ac,
2539     @@ -151,11 +159,13 @@ static int ceph_build_auth_request(struct ceph_auth_client *ac,
2540     if (ret < 0) {
2541     pr_err("error %d building auth method %s request\n", ret,
2542     ac->ops->name);
2543     - return ret;
2544     + goto out;
2545     }
2546     dout(" built request %d bytes\n", ret);
2547     ceph_encode_32(&p, ret);
2548     - return p + ret - msg_buf;
2549     + ret = p + ret - msg_buf;
2550     +out:
2551     + return ret;
2552     }
2553    
2554     /*
2555     @@ -176,6 +186,7 @@ int ceph_handle_auth_reply(struct ceph_auth_client *ac,
2556     int result_msg_len;
2557     int ret = -EINVAL;
2558    
2559     + mutex_lock(&ac->mutex);
2560     dout("handle_auth_reply %p %p\n", p, end);
2561     ceph_decode_need(&p, end, sizeof(u32) * 3 + sizeof(u64), bad);
2562     protocol = ceph_decode_32(&p);
2563     @@ -227,33 +238,103 @@ int ceph_handle_auth_reply(struct ceph_auth_client *ac,
2564    
2565     ret = ac->ops->handle_reply(ac, result, payload, payload_end);
2566     if (ret == -EAGAIN) {
2567     - return ceph_build_auth_request(ac, reply_buf, reply_len);
2568     + ret = ceph_build_auth_request(ac, reply_buf, reply_len);
2569     } else if (ret) {
2570     pr_err("auth method '%s' error %d\n", ac->ops->name, ret);
2571     - return ret;
2572     }
2573     - return 0;
2574    
2575     -bad:
2576     - pr_err("failed to decode auth msg\n");
2577     out:
2578     + mutex_unlock(&ac->mutex);
2579     return ret;
2580     +
2581     +bad:
2582     + pr_err("failed to decode auth msg\n");
2583     + ret = -EINVAL;
2584     + goto out;
2585     }
2586    
2587     int ceph_build_auth(struct ceph_auth_client *ac,
2588     void *msg_buf, size_t msg_len)
2589     {
2590     + int ret = 0;
2591     +
2592     + mutex_lock(&ac->mutex);
2593     if (!ac->protocol)
2594     - return ceph_auth_build_hello(ac, msg_buf, msg_len);
2595     - BUG_ON(!ac->ops);
2596     - if (ac->ops->should_authenticate(ac))
2597     - return ceph_build_auth_request(ac, msg_buf, msg_len);
2598     - return 0;
2599     + ret = ceph_auth_build_hello(ac, msg_buf, msg_len);
2600     + else if (ac->ops->should_authenticate(ac))
2601     + ret = ceph_build_auth_request(ac, msg_buf, msg_len);
2602     + mutex_unlock(&ac->mutex);
2603     + return ret;
2604     }
2605    
2606     int ceph_auth_is_authenticated(struct ceph_auth_client *ac)
2607     {
2608     - if (!ac->ops)
2609     - return 0;
2610     - return ac->ops->is_authenticated(ac);
2611     + int ret = 0;
2612     +
2613     + mutex_lock(&ac->mutex);
2614     + if (ac->ops)
2615     + ret = ac->ops->is_authenticated(ac);
2616     + mutex_unlock(&ac->mutex);
2617     + return ret;
2618     +}
2619     +EXPORT_SYMBOL(ceph_auth_is_authenticated);
2620     +
2621     +int ceph_auth_create_authorizer(struct ceph_auth_client *ac,
2622     + int peer_type,
2623     + struct ceph_auth_handshake *auth)
2624     +{
2625     + int ret = 0;
2626     +
2627     + mutex_lock(&ac->mutex);
2628     + if (ac->ops && ac->ops->create_authorizer)
2629     + ret = ac->ops->create_authorizer(ac, peer_type, auth);
2630     + mutex_unlock(&ac->mutex);
2631     + return ret;
2632     +}
2633     +EXPORT_SYMBOL(ceph_auth_create_authorizer);
2634     +
2635     +void ceph_auth_destroy_authorizer(struct ceph_auth_client *ac,
2636     + struct ceph_authorizer *a)
2637     +{
2638     + mutex_lock(&ac->mutex);
2639     + if (ac->ops && ac->ops->destroy_authorizer)
2640     + ac->ops->destroy_authorizer(ac, a);
2641     + mutex_unlock(&ac->mutex);
2642     +}
2643     +EXPORT_SYMBOL(ceph_auth_destroy_authorizer);
2644     +
2645     +int ceph_auth_update_authorizer(struct ceph_auth_client *ac,
2646     + int peer_type,
2647     + struct ceph_auth_handshake *a)
2648     +{
2649     + int ret = 0;
2650     +
2651     + mutex_lock(&ac->mutex);
2652     + if (ac->ops && ac->ops->update_authorizer)
2653     + ret = ac->ops->update_authorizer(ac, peer_type, a);
2654     + mutex_unlock(&ac->mutex);
2655     + return ret;
2656     +}
2657     +EXPORT_SYMBOL(ceph_auth_update_authorizer);
2658     +
2659     +int ceph_auth_verify_authorizer_reply(struct ceph_auth_client *ac,
2660     + struct ceph_authorizer *a, size_t len)
2661     +{
2662     + int ret = 0;
2663     +
2664     + mutex_lock(&ac->mutex);
2665     + if (ac->ops && ac->ops->verify_authorizer_reply)
2666     + ret = ac->ops->verify_authorizer_reply(ac, a, len);
2667     + mutex_unlock(&ac->mutex);
2668     + return ret;
2669     +}
2670     +EXPORT_SYMBOL(ceph_auth_verify_authorizer_reply);
2671     +
2672     +void ceph_auth_invalidate_authorizer(struct ceph_auth_client *ac, int peer_type)
2673     +{
2674     + mutex_lock(&ac->mutex);
2675     + if (ac->ops && ac->ops->invalidate_authorizer)
2676     + ac->ops->invalidate_authorizer(ac, peer_type);
2677     + mutex_unlock(&ac->mutex);
2678     }
2679     +EXPORT_SYMBOL(ceph_auth_invalidate_authorizer);
2680     diff --git a/net/ceph/auth_x.c b/net/ceph/auth_x.c
2681     index a16bf14..96238ba 100644
2682     --- a/net/ceph/auth_x.c
2683     +++ b/net/ceph/auth_x.c
2684     @@ -298,6 +298,7 @@ static int ceph_x_build_authorizer(struct ceph_auth_client *ac,
2685     return -ENOMEM;
2686     }
2687     au->service = th->service;
2688     + au->secret_id = th->secret_id;
2689    
2690     msg_a = au->buf->vec.iov_base;
2691     msg_a->struct_v = 1;
2692     @@ -555,6 +556,26 @@ static int ceph_x_create_authorizer(
2693     return 0;
2694     }
2695    
2696     +static int ceph_x_update_authorizer(
2697     + struct ceph_auth_client *ac, int peer_type,
2698     + struct ceph_auth_handshake *auth)
2699     +{
2700     + struct ceph_x_authorizer *au;
2701     + struct ceph_x_ticket_handler *th;
2702     +
2703     + th = get_ticket_handler(ac, peer_type);
2704     + if (IS_ERR(th))
2705     + return PTR_ERR(th);
2706     +
2707     + au = (struct ceph_x_authorizer *)auth->authorizer;
2708     + if (au->secret_id < th->secret_id) {
2709     + dout("ceph_x_update_authorizer service %u secret %llu < %llu\n",
2710     + au->service, au->secret_id, th->secret_id);
2711     + return ceph_x_build_authorizer(ac, th, au);
2712     + }
2713     + return 0;
2714     +}
2715     +
2716     static int ceph_x_verify_authorizer_reply(struct ceph_auth_client *ac,
2717     struct ceph_authorizer *a, size_t len)
2718     {
2719     @@ -630,7 +651,7 @@ static void ceph_x_invalidate_authorizer(struct ceph_auth_client *ac,
2720    
2721     th = get_ticket_handler(ac, peer_type);
2722     if (!IS_ERR(th))
2723     - remove_ticket_handler(ac, th);
2724     + memset(&th->validity, 0, sizeof(th->validity));
2725     }
2726    
2727    
2728     @@ -641,6 +662,7 @@ static const struct ceph_auth_client_ops ceph_x_ops = {
2729     .build_request = ceph_x_build_request,
2730     .handle_reply = ceph_x_handle_reply,
2731     .create_authorizer = ceph_x_create_authorizer,
2732     + .update_authorizer = ceph_x_update_authorizer,
2733     .verify_authorizer_reply = ceph_x_verify_authorizer_reply,
2734     .destroy_authorizer = ceph_x_destroy_authorizer,
2735     .invalidate_authorizer = ceph_x_invalidate_authorizer,
2736     diff --git a/net/ceph/auth_x.h b/net/ceph/auth_x.h
2737     index f459e93..c5a058d 100644
2738     --- a/net/ceph/auth_x.h
2739     +++ b/net/ceph/auth_x.h
2740     @@ -29,6 +29,7 @@ struct ceph_x_authorizer {
2741     struct ceph_buffer *buf;
2742     unsigned int service;
2743     u64 nonce;
2744     + u64 secret_id;
2745     char reply_buf[128]; /* big enough for encrypted blob */
2746     };
2747    
2748     diff --git a/net/ceph/messenger.c b/net/ceph/messenger.c
2749     index 2c0669f..096e09f 100644
2750     --- a/net/ceph/messenger.c
2751     +++ b/net/ceph/messenger.c
2752     @@ -1597,7 +1597,6 @@ static int process_connect(struct ceph_connection *con)
2753     con->error_msg = "connect authorization failure";
2754     return -1;
2755     }
2756     - con->auth_retry = 1;
2757     con_out_kvec_reset(con);
2758     ret = prepare_write_connect(con);
2759     if (ret < 0)
2760     @@ -1682,7 +1681,7 @@ static int process_connect(struct ceph_connection *con)
2761    
2762     WARN_ON(con->state != CON_STATE_NEGOTIATING);
2763     con->state = CON_STATE_OPEN;
2764     -
2765     + con->auth_retry = 0; /* we authenticated; clear flag */
2766     con->peer_global_seq = le32_to_cpu(con->in_reply.global_seq);
2767     con->connect_seq++;
2768     con->peer_features = server_feat;
2769     diff --git a/net/ceph/mon_client.c b/net/ceph/mon_client.c
2770     index aef5b10..1fe25cd 100644
2771     --- a/net/ceph/mon_client.c
2772     +++ b/net/ceph/mon_client.c
2773     @@ -737,7 +737,7 @@ static void delayed_work(struct work_struct *work)
2774    
2775     __validate_auth(monc);
2776    
2777     - if (monc->auth->ops->is_authenticated(monc->auth))
2778     + if (ceph_auth_is_authenticated(monc->auth))
2779     __send_subscribe(monc);
2780     }
2781     __schedule_delayed(monc);
2782     @@ -892,8 +892,7 @@ static void handle_auth_reply(struct ceph_mon_client *monc,
2783    
2784     mutex_lock(&monc->mutex);
2785     had_debugfs_info = have_debugfs_info(monc);
2786     - if (monc->auth->ops)
2787     - was_auth = monc->auth->ops->is_authenticated(monc->auth);
2788     + was_auth = ceph_auth_is_authenticated(monc->auth);
2789     monc->pending_auth = 0;
2790     ret = ceph_handle_auth_reply(monc->auth, msg->front.iov_base,
2791     msg->front.iov_len,
2792     @@ -904,7 +903,7 @@ static void handle_auth_reply(struct ceph_mon_client *monc,
2793     wake_up_all(&monc->client->auth_wq);
2794     } else if (ret > 0) {
2795     __send_prepared_auth_request(monc, ret);
2796     - } else if (!was_auth && monc->auth->ops->is_authenticated(monc->auth)) {
2797     + } else if (!was_auth && ceph_auth_is_authenticated(monc->auth)) {
2798     dout("authenticated, starting session\n");
2799    
2800     monc->client->msgr.inst.name.type = CEPH_ENTITY_TYPE_CLIENT;
2801     diff --git a/net/ceph/osd_client.c b/net/ceph/osd_client.c
2802     index d730dd4..366e70e 100644
2803     --- a/net/ceph/osd_client.c
2804     +++ b/net/ceph/osd_client.c
2805     @@ -654,8 +654,7 @@ static void put_osd(struct ceph_osd *osd)
2806     if (atomic_dec_and_test(&osd->o_ref) && osd->o_auth.authorizer) {
2807     struct ceph_auth_client *ac = osd->o_osdc->client->monc.auth;
2808    
2809     - if (ac->ops && ac->ops->destroy_authorizer)
2810     - ac->ops->destroy_authorizer(ac, osd->o_auth.authorizer);
2811     + ceph_auth_destroy_authorizer(ac, osd->o_auth.authorizer);
2812     kfree(osd);
2813     }
2814     }
2815     @@ -1399,13 +1398,13 @@ static void kick_requests(struct ceph_osd_client *osdc, int force_resend)
2816     __register_request(osdc, req);
2817     __unregister_linger_request(osdc, req);
2818     }
2819     + reset_changed_osds(osdc);
2820     mutex_unlock(&osdc->request_mutex);
2821    
2822     if (needmap) {
2823     dout("%d requests for down osds, need new map\n", needmap);
2824     ceph_monc_request_next_osdmap(&osdc->client->monc);
2825     }
2826     - reset_changed_osds(osdc);
2827     }
2828    
2829    
2830     @@ -2168,13 +2167,17 @@ static struct ceph_auth_handshake *get_authorizer(struct ceph_connection *con,
2831     struct ceph_auth_handshake *auth = &o->o_auth;
2832    
2833     if (force_new && auth->authorizer) {
2834     - if (ac->ops && ac->ops->destroy_authorizer)
2835     - ac->ops->destroy_authorizer(ac, auth->authorizer);
2836     + ceph_auth_destroy_authorizer(ac, auth->authorizer);
2837     auth->authorizer = NULL;
2838     }
2839     - if (!auth->authorizer && ac->ops && ac->ops->create_authorizer) {
2840     - int ret = ac->ops->create_authorizer(ac, CEPH_ENTITY_TYPE_OSD,
2841     - auth);
2842     + if (!auth->authorizer) {
2843     + int ret = ceph_auth_create_authorizer(ac, CEPH_ENTITY_TYPE_OSD,
2844     + auth);
2845     + if (ret)
2846     + return ERR_PTR(ret);
2847     + } else {
2848     + int ret = ceph_auth_update_authorizer(ac, CEPH_ENTITY_TYPE_OSD,
2849     + auth);
2850     if (ret)
2851     return ERR_PTR(ret);
2852     }
2853     @@ -2190,11 +2193,7 @@ static int verify_authorizer_reply(struct ceph_connection *con, int len)
2854     struct ceph_osd_client *osdc = o->o_osdc;
2855     struct ceph_auth_client *ac = osdc->client->monc.auth;
2856    
2857     - /*
2858     - * XXX If ac->ops or ac->ops->verify_authorizer_reply is null,
2859     - * XXX which do we do: succeed or fail?
2860     - */
2861     - return ac->ops->verify_authorizer_reply(ac, o->o_auth.authorizer, len);
2862     + return ceph_auth_verify_authorizer_reply(ac, o->o_auth.authorizer, len);
2863     }
2864    
2865     static int invalidate_authorizer(struct ceph_connection *con)
2866     @@ -2203,9 +2202,7 @@ static int invalidate_authorizer(struct ceph_connection *con)
2867     struct ceph_osd_client *osdc = o->o_osdc;
2868     struct ceph_auth_client *ac = osdc->client->monc.auth;
2869    
2870     - if (ac->ops && ac->ops->invalidate_authorizer)
2871     - ac->ops->invalidate_authorizer(ac, CEPH_ENTITY_TYPE_OSD);
2872     -
2873     + ceph_auth_invalidate_authorizer(ac, CEPH_ENTITY_TYPE_OSD);
2874     return ceph_monc_validate_auth(&osdc->client->monc);
2875     }
2876