Magellan Linux

Annotation of /trunk/kernel-magellan/patches-3.2/0101-3.2.2-all-fixes.patch

Parent Directory Parent Directory | Revision Log Revision Log


Revision 1644 - (hide annotations) (download)
Thu Feb 16 12:24:52 2012 UTC (12 years, 3 months ago) by niro
File size: 218002 byte(s)
-3.2.6-magellan-r1
1 niro 1644 diff --git a/arch/ia64/kernel/acpi.c b/arch/ia64/kernel/acpi.c
2     index bfb4d01..5207035 100644
3     --- a/arch/ia64/kernel/acpi.c
4     +++ b/arch/ia64/kernel/acpi.c
5     @@ -429,22 +429,24 @@ static u32 __devinitdata pxm_flag[PXM_FLAG_LEN];
6     static struct acpi_table_slit __initdata *slit_table;
7     cpumask_t early_cpu_possible_map = CPU_MASK_NONE;
8    
9     -static int get_processor_proximity_domain(struct acpi_srat_cpu_affinity *pa)
10     +static int __init
11     +get_processor_proximity_domain(struct acpi_srat_cpu_affinity *pa)
12     {
13     int pxm;
14    
15     pxm = pa->proximity_domain_lo;
16     - if (ia64_platform_is("sn2"))
17     + if (ia64_platform_is("sn2") || acpi_srat_revision >= 2)
18     pxm += pa->proximity_domain_hi[0] << 8;
19     return pxm;
20     }
21    
22     -static int get_memory_proximity_domain(struct acpi_srat_mem_affinity *ma)
23     +static int __init
24     +get_memory_proximity_domain(struct acpi_srat_mem_affinity *ma)
25     {
26     int pxm;
27    
28     pxm = ma->proximity_domain;
29     - if (!ia64_platform_is("sn2"))
30     + if (!ia64_platform_is("sn2") && acpi_srat_revision <= 1)
31     pxm &= 0xff;
32    
33     return pxm;
34     diff --git a/arch/score/kernel/entry.S b/arch/score/kernel/entry.S
35     index 577abba..83bb960 100644
36     --- a/arch/score/kernel/entry.S
37     +++ b/arch/score/kernel/entry.S
38     @@ -408,7 +408,7 @@ ENTRY(handle_sys)
39     sw r9, [r0, PT_EPC]
40    
41     cmpi.c r27, __NR_syscalls # check syscall number
42     - bgtu illegal_syscall
43     + bgeu illegal_syscall
44    
45     slli r8, r27, 2 # get syscall routine
46     la r11, sys_call_table
47     diff --git a/arch/x86/include/asm/amd_nb.h b/arch/x86/include/asm/amd_nb.h
48     index 8e41071..49ad773 100644
49     --- a/arch/x86/include/asm/amd_nb.h
50     +++ b/arch/x86/include/asm/amd_nb.h
51     @@ -1,6 +1,7 @@
52     #ifndef _ASM_X86_AMD_NB_H
53     #define _ASM_X86_AMD_NB_H
54    
55     +#include <linux/ioport.h>
56     #include <linux/pci.h>
57    
58     struct amd_nb_bus_dev_range {
59     @@ -13,6 +14,7 @@ extern const struct pci_device_id amd_nb_misc_ids[];
60     extern const struct amd_nb_bus_dev_range amd_nb_bus_dev_ranges[];
61    
62     extern bool early_is_amd_nb(u32 value);
63     +extern struct resource *amd_get_mmconfig_range(struct resource *res);
64     extern int amd_cache_northbridges(void);
65     extern void amd_flush_garts(void);
66     extern int amd_numa_init(void);
67     diff --git a/arch/x86/include/asm/uv/uv_bau.h b/arch/x86/include/asm/uv/uv_bau.h
68     index 8e862aa..1b82f7e 100644
69     --- a/arch/x86/include/asm/uv/uv_bau.h
70     +++ b/arch/x86/include/asm/uv/uv_bau.h
71     @@ -65,7 +65,7 @@
72     * UV2: Bit 19 selects between
73     * (0): 10 microsecond timebase and
74     * (1): 80 microseconds
75     - * we're using 655us, similar to UV1: 65 units of 10us
76     + * we're using 560us, similar to UV1: 65 units of 10us
77     */
78     #define UV1_INTD_SOFT_ACK_TIMEOUT_PERIOD (9UL)
79     #define UV2_INTD_SOFT_ACK_TIMEOUT_PERIOD (15UL)
80     @@ -167,6 +167,7 @@
81     #define FLUSH_RETRY_TIMEOUT 2
82     #define FLUSH_GIVEUP 3
83     #define FLUSH_COMPLETE 4
84     +#define FLUSH_RETRY_BUSYBUG 5
85    
86     /*
87     * tuning the action when the numalink network is extremely delayed
88     @@ -235,10 +236,10 @@ struct bau_msg_payload {
89    
90    
91     /*
92     - * Message header: 16 bytes (128 bits) (bytes 0x30-0x3f of descriptor)
93     + * UV1 Message header: 16 bytes (128 bits) (bytes 0x30-0x3f of descriptor)
94     * see table 4.2.3.0.1 in broacast_assist spec.
95     */
96     -struct bau_msg_header {
97     +struct uv1_bau_msg_header {
98     unsigned int dest_subnodeid:6; /* must be 0x10, for the LB */
99     /* bits 5:0 */
100     unsigned int base_dest_nasid:15; /* nasid of the first bit */
101     @@ -318,19 +319,87 @@ struct bau_msg_header {
102     };
103    
104     /*
105     + * UV2 Message header: 16 bytes (128 bits) (bytes 0x30-0x3f of descriptor)
106     + * see figure 9-2 of harp_sys.pdf
107     + */
108     +struct uv2_bau_msg_header {
109     + unsigned int base_dest_nasid:15; /* nasid of the first bit */
110     + /* bits 14:0 */ /* in uvhub map */
111     + unsigned int dest_subnodeid:5; /* must be 0x10, for the LB */
112     + /* bits 19:15 */
113     + unsigned int rsvd_1:1; /* must be zero */
114     + /* bit 20 */
115     + /* Address bits 59:21 */
116     + /* bits 25:2 of address (44:21) are payload */
117     + /* these next 24 bits become bytes 12-14 of msg */
118     + /* bits 28:21 land in byte 12 */
119     + unsigned int replied_to:1; /* sent as 0 by the source to
120     + byte 12 */
121     + /* bit 21 */
122     + unsigned int msg_type:3; /* software type of the
123     + message */
124     + /* bits 24:22 */
125     + unsigned int canceled:1; /* message canceled, resource
126     + is to be freed*/
127     + /* bit 25 */
128     + unsigned int payload_1:3; /* not currently used */
129     + /* bits 28:26 */
130     +
131     + /* bits 36:29 land in byte 13 */
132     + unsigned int payload_2a:3; /* not currently used */
133     + unsigned int payload_2b:5; /* not currently used */
134     + /* bits 36:29 */
135     +
136     + /* bits 44:37 land in byte 14 */
137     + unsigned int payload_3:8; /* not currently used */
138     + /* bits 44:37 */
139     +
140     + unsigned int rsvd_2:7; /* reserved */
141     + /* bits 51:45 */
142     + unsigned int swack_flag:1; /* software acknowledge flag */
143     + /* bit 52 */
144     + unsigned int rsvd_3a:3; /* must be zero */
145     + unsigned int rsvd_3b:8; /* must be zero */
146     + unsigned int rsvd_3c:8; /* must be zero */
147     + unsigned int rsvd_3d:3; /* must be zero */
148     + /* bits 74:53 */
149     + unsigned int fairness:3; /* usually zero */
150     + /* bits 77:75 */
151     +
152     + unsigned int sequence:16; /* message sequence number */
153     + /* bits 93:78 Suppl_A */
154     + unsigned int chaining:1; /* next descriptor is part of
155     + this activation*/
156     + /* bit 94 */
157     + unsigned int multilevel:1; /* multi-level multicast
158     + format */
159     + /* bit 95 */
160     + unsigned int rsvd_4:24; /* ordered / source node /
161     + source subnode / aging
162     + must be zero */
163     + /* bits 119:96 */
164     + unsigned int command:8; /* message type */
165     + /* bits 127:120 */
166     +};
167     +
168     +/*
169     * The activation descriptor:
170     * The format of the message to send, plus all accompanying control
171     * Should be 64 bytes
172     */
173     struct bau_desc {
174     - struct pnmask distribution;
175     + struct pnmask distribution;
176     /*
177     * message template, consisting of header and payload:
178     */
179     - struct bau_msg_header header;
180     - struct bau_msg_payload payload;
181     + union bau_msg_header {
182     + struct uv1_bau_msg_header uv1_hdr;
183     + struct uv2_bau_msg_header uv2_hdr;
184     + } header;
185     +
186     + struct bau_msg_payload payload;
187     };
188     -/*
189     +/* UV1:
190     * -payload-- ---------header------
191     * bytes 0-11 bits 41-56 bits 58-81
192     * A B (2) C (3)
193     @@ -340,6 +409,16 @@ struct bau_desc {
194     * bytes 0-11 bytes 12-14 bytes 16-17 (byte 15 filled in by hw as vector)
195     * ------------payload queue-----------
196     */
197     +/* UV2:
198     + * -payload-- ---------header------
199     + * bytes 0-11 bits 70-78 bits 21-44
200     + * A B (2) C (3)
201     + *
202     + * A/B/C are moved to:
203     + * A C B
204     + * bytes 0-11 bytes 12-14 bytes 16-17 (byte 15 filled in by hw as vector)
205     + * ------------payload queue-----------
206     + */
207    
208     /*
209     * The payload queue on the destination side is an array of these.
210     @@ -385,7 +464,6 @@ struct bau_pq_entry {
211     struct msg_desc {
212     struct bau_pq_entry *msg;
213     int msg_slot;
214     - int swack_slot;
215     struct bau_pq_entry *queue_first;
216     struct bau_pq_entry *queue_last;
217     };
218     @@ -439,6 +517,9 @@ struct ptc_stats {
219     unsigned long s_retry_messages; /* retry broadcasts */
220     unsigned long s_bau_reenabled; /* for bau enable/disable */
221     unsigned long s_bau_disabled; /* for bau enable/disable */
222     + unsigned long s_uv2_wars; /* uv2 workaround, perm. busy */
223     + unsigned long s_uv2_wars_hw; /* uv2 workaround, hiwater */
224     + unsigned long s_uv2_war_waits; /* uv2 workaround, long waits */
225     /* destination statistics */
226     unsigned long d_alltlb; /* times all tlb's on this
227     cpu were flushed */
228     @@ -511,9 +592,12 @@ struct bau_control {
229     short osnode;
230     short uvhub_cpu;
231     short uvhub;
232     + short uvhub_version;
233     short cpus_in_socket;
234     short cpus_in_uvhub;
235     short partition_base_pnode;
236     + short using_desc; /* an index, like uvhub_cpu */
237     + unsigned int inuse_map;
238     unsigned short message_number;
239     unsigned short uvhub_quiesce;
240     short socket_acknowledge_count[DEST_Q_SIZE];
241     @@ -531,6 +615,7 @@ struct bau_control {
242     int cong_response_us;
243     int cong_reps;
244     int cong_period;
245     + unsigned long clocks_per_100_usec;
246     cycles_t period_time;
247     long period_requests;
248     struct hub_and_pnode *thp;
249     @@ -591,6 +676,11 @@ static inline void write_mmr_sw_ack(unsigned long mr)
250     uv_write_local_mmr(UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_ALIAS, mr);
251     }
252    
253     +static inline void write_gmmr_sw_ack(int pnode, unsigned long mr)
254     +{
255     + write_gmmr(pnode, UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_ALIAS, mr);
256     +}
257     +
258     static inline unsigned long read_mmr_sw_ack(void)
259     {
260     return read_lmmr(UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE);
261     diff --git a/arch/x86/kernel/amd_nb.c b/arch/x86/kernel/amd_nb.c
262     index 4c39baa..bae1efe 100644
263     --- a/arch/x86/kernel/amd_nb.c
264     +++ b/arch/x86/kernel/amd_nb.c
265     @@ -119,6 +119,37 @@ bool __init early_is_amd_nb(u32 device)
266     return false;
267     }
268    
269     +struct resource *amd_get_mmconfig_range(struct resource *res)
270     +{
271     + u32 address;
272     + u64 base, msr;
273     + unsigned segn_busn_bits;
274     +
275     + if (boot_cpu_data.x86_vendor != X86_VENDOR_AMD)
276     + return NULL;
277     +
278     + /* assume all cpus from fam10h have mmconfig */
279     + if (boot_cpu_data.x86 < 0x10)
280     + return NULL;
281     +
282     + address = MSR_FAM10H_MMIO_CONF_BASE;
283     + rdmsrl(address, msr);
284     +
285     + /* mmconfig is not enabled */
286     + if (!(msr & FAM10H_MMIO_CONF_ENABLE))
287     + return NULL;
288     +
289     + base = msr & (FAM10H_MMIO_CONF_BASE_MASK<<FAM10H_MMIO_CONF_BASE_SHIFT);
290     +
291     + segn_busn_bits = (msr >> FAM10H_MMIO_CONF_BUSRANGE_SHIFT) &
292     + FAM10H_MMIO_CONF_BUSRANGE_MASK;
293     +
294     + res->flags = IORESOURCE_MEM;
295     + res->start = base;
296     + res->end = base + (1ULL<<(segn_busn_bits + 20)) - 1;
297     + return res;
298     +}
299     +
300     int amd_get_subcaches(int cpu)
301     {
302     struct pci_dev *link = node_to_amd_nb(amd_get_nb_id(cpu))->link;
303     diff --git a/arch/x86/kernel/apic/x2apic_uv_x.c b/arch/x86/kernel/apic/x2apic_uv_x.c
304     index 9d59bba..79b05b8 100644
305     --- a/arch/x86/kernel/apic/x2apic_uv_x.c
306     +++ b/arch/x86/kernel/apic/x2apic_uv_x.c
307     @@ -769,7 +769,12 @@ void __init uv_system_init(void)
308     for(i = 0; i < UVH_NODE_PRESENT_TABLE_DEPTH; i++)
309     uv_possible_blades +=
310     hweight64(uv_read_local_mmr( UVH_NODE_PRESENT_TABLE + i * 8));
311     - printk(KERN_DEBUG "UV: Found %d blades\n", uv_num_possible_blades());
312     +
313     + /* uv_num_possible_blades() is really the hub count */
314     + printk(KERN_INFO "UV: Found %d blades, %d hubs\n",
315     + is_uv1_hub() ? uv_num_possible_blades() :
316     + (uv_num_possible_blades() + 1) / 2,
317     + uv_num_possible_blades());
318    
319     bytes = sizeof(struct uv_blade_info) * uv_num_possible_blades();
320     uv_blade_info = kzalloc(bytes, GFP_KERNEL);
321     diff --git a/arch/x86/mm/mmap.c b/arch/x86/mm/mmap.c
322     index 4b5ba85..845df68 100644
323     --- a/arch/x86/mm/mmap.c
324     +++ b/arch/x86/mm/mmap.c
325     @@ -75,9 +75,9 @@ static unsigned long mmap_rnd(void)
326     */
327     if (current->flags & PF_RANDOMIZE) {
328     if (mmap_is_ia32())
329     - rnd = (long)get_random_int() % (1<<8);
330     + rnd = get_random_int() % (1<<8);
331     else
332     - rnd = (long)(get_random_int() % (1<<28));
333     + rnd = get_random_int() % (1<<28);
334     }
335     return rnd << PAGE_SHIFT;
336     }
337     diff --git a/arch/x86/mm/srat.c b/arch/x86/mm/srat.c
338     index 81dbfde..7efd0c6 100644
339     --- a/arch/x86/mm/srat.c
340     +++ b/arch/x86/mm/srat.c
341     @@ -104,6 +104,8 @@ acpi_numa_processor_affinity_init(struct acpi_srat_cpu_affinity *pa)
342     if ((pa->flags & ACPI_SRAT_CPU_ENABLED) == 0)
343     return;
344     pxm = pa->proximity_domain_lo;
345     + if (acpi_srat_revision >= 2)
346     + pxm |= *((unsigned int*)pa->proximity_domain_hi) << 8;
347     node = setup_node(pxm);
348     if (node < 0) {
349     printk(KERN_ERR "SRAT: Too many proximity domains %x\n", pxm);
350     @@ -155,6 +157,8 @@ acpi_numa_memory_affinity_init(struct acpi_srat_mem_affinity *ma)
351     start = ma->base_address;
352     end = start + ma->length;
353     pxm = ma->proximity_domain;
354     + if (acpi_srat_revision <= 1)
355     + pxm &= 0xff;
356     node = setup_node(pxm);
357     if (node < 0) {
358     printk(KERN_ERR "SRAT: Too many proximity domains.\n");
359     diff --git a/arch/x86/pci/Makefile b/arch/x86/pci/Makefile
360     index 6b8759f..d24d3da 100644
361     --- a/arch/x86/pci/Makefile
362     +++ b/arch/x86/pci/Makefile
363     @@ -18,8 +18,9 @@ obj-$(CONFIG_X86_NUMAQ) += numaq_32.o
364     obj-$(CONFIG_X86_MRST) += mrst.o
365    
366     obj-y += common.o early.o
367     -obj-y += amd_bus.o bus_numa.o
368     +obj-y += bus_numa.o
369    
370     +obj-$(CONFIG_AMD_NB) += amd_bus.o
371     obj-$(CONFIG_PCI_CNB20LE_QUIRK) += broadcom_bus.o
372    
373     ifeq ($(CONFIG_PCI_DEBUG),y)
374     diff --git a/arch/x86/pci/acpi.c b/arch/x86/pci/acpi.c
375     index 404f21a..f8348ab 100644
376     --- a/arch/x86/pci/acpi.c
377     +++ b/arch/x86/pci/acpi.c
378     @@ -149,7 +149,7 @@ setup_resource(struct acpi_resource *acpi_res, void *data)
379     struct acpi_resource_address64 addr;
380     acpi_status status;
381     unsigned long flags;
382     - u64 start, end;
383     + u64 start, orig_end, end;
384    
385     status = resource_to_addr(acpi_res, &addr);
386     if (!ACPI_SUCCESS(status))
387     @@ -165,7 +165,21 @@ setup_resource(struct acpi_resource *acpi_res, void *data)
388     return AE_OK;
389    
390     start = addr.minimum + addr.translation_offset;
391     - end = addr.maximum + addr.translation_offset;
392     + orig_end = end = addr.maximum + addr.translation_offset;
393     +
394     + /* Exclude non-addressable range or non-addressable portion of range */
395     + end = min(end, (u64)iomem_resource.end);
396     + if (end <= start) {
397     + dev_info(&info->bridge->dev,
398     + "host bridge window [%#llx-%#llx] "
399     + "(ignored, not CPU addressable)\n", start, orig_end);
400     + return AE_OK;
401     + } else if (orig_end != end) {
402     + dev_info(&info->bridge->dev,
403     + "host bridge window [%#llx-%#llx] "
404     + "([%#llx-%#llx] ignored, not CPU addressable)\n",
405     + start, orig_end, end + 1, orig_end);
406     + }
407    
408     res = &info->res[info->res_num];
409     res->name = info->name;
410     diff --git a/arch/x86/pci/amd_bus.c b/arch/x86/pci/amd_bus.c
411     index 026e493..385a940 100644
412     --- a/arch/x86/pci/amd_bus.c
413     +++ b/arch/x86/pci/amd_bus.c
414     @@ -30,34 +30,6 @@ static struct pci_hostbridge_probe pci_probes[] __initdata = {
415     { 0, 0x18, PCI_VENDOR_ID_AMD, 0x1300 },
416     };
417    
418     -static u64 __initdata fam10h_mmconf_start;
419     -static u64 __initdata fam10h_mmconf_end;
420     -static void __init get_pci_mmcfg_amd_fam10h_range(void)
421     -{
422     - u32 address;
423     - u64 base, msr;
424     - unsigned segn_busn_bits;
425     -
426     - /* assume all cpus from fam10h have mmconf */
427     - if (boot_cpu_data.x86 < 0x10)
428     - return;
429     -
430     - address = MSR_FAM10H_MMIO_CONF_BASE;
431     - rdmsrl(address, msr);
432     -
433     - /* mmconfig is not enable */
434     - if (!(msr & FAM10H_MMIO_CONF_ENABLE))
435     - return;
436     -
437     - base = msr & (FAM10H_MMIO_CONF_BASE_MASK<<FAM10H_MMIO_CONF_BASE_SHIFT);
438     -
439     - segn_busn_bits = (msr >> FAM10H_MMIO_CONF_BUSRANGE_SHIFT) &
440     - FAM10H_MMIO_CONF_BUSRANGE_MASK;
441     -
442     - fam10h_mmconf_start = base;
443     - fam10h_mmconf_end = base + (1ULL<<(segn_busn_bits + 20)) - 1;
444     -}
445     -
446     #define RANGE_NUM 16
447    
448     /**
449     @@ -85,6 +57,9 @@ static int __init early_fill_mp_bus_info(void)
450     u64 val;
451     u32 address;
452     bool found;
453     + struct resource fam10h_mmconf_res, *fam10h_mmconf;
454     + u64 fam10h_mmconf_start;
455     + u64 fam10h_mmconf_end;
456    
457     if (!early_pci_allowed())
458     return -1;
459     @@ -211,12 +186,17 @@ static int __init early_fill_mp_bus_info(void)
460     subtract_range(range, RANGE_NUM, 0, end);
461    
462     /* get mmconfig */
463     - get_pci_mmcfg_amd_fam10h_range();
464     + fam10h_mmconf = amd_get_mmconfig_range(&fam10h_mmconf_res);
465     /* need to take out mmconf range */
466     - if (fam10h_mmconf_end) {
467     - printk(KERN_DEBUG "Fam 10h mmconf [%llx, %llx]\n", fam10h_mmconf_start, fam10h_mmconf_end);
468     + if (fam10h_mmconf) {
469     + printk(KERN_DEBUG "Fam 10h mmconf %pR\n", fam10h_mmconf);
470     + fam10h_mmconf_start = fam10h_mmconf->start;
471     + fam10h_mmconf_end = fam10h_mmconf->end;
472     subtract_range(range, RANGE_NUM, fam10h_mmconf_start,
473     fam10h_mmconf_end + 1);
474     + } else {
475     + fam10h_mmconf_start = 0;
476     + fam10h_mmconf_end = 0;
477     }
478    
479     /* mmio resource */
480     diff --git a/arch/x86/platform/uv/tlb_uv.c b/arch/x86/platform/uv/tlb_uv.c
481     index 5b55219..9010ca7 100644
482     --- a/arch/x86/platform/uv/tlb_uv.c
483     +++ b/arch/x86/platform/uv/tlb_uv.c
484     @@ -157,13 +157,14 @@ static int __init uvhub_to_first_apicid(int uvhub)
485     * clear of the Timeout bit (as well) will free the resource. No reply will
486     * be sent (the hardware will only do one reply per message).
487     */
488     -static void reply_to_message(struct msg_desc *mdp, struct bau_control *bcp)
489     +static void reply_to_message(struct msg_desc *mdp, struct bau_control *bcp,
490     + int do_acknowledge)
491     {
492     unsigned long dw;
493     struct bau_pq_entry *msg;
494    
495     msg = mdp->msg;
496     - if (!msg->canceled) {
497     + if (!msg->canceled && do_acknowledge) {
498     dw = (msg->swack_vec << UV_SW_ACK_NPENDING) | msg->swack_vec;
499     write_mmr_sw_ack(dw);
500     }
501     @@ -212,8 +213,8 @@ static void bau_process_retry_msg(struct msg_desc *mdp,
502     if (mmr & (msg_res << UV_SW_ACK_NPENDING)) {
503     unsigned long mr;
504     /*
505     - * is the resource timed out?
506     - * make everyone ignore the cancelled message.
507     + * Is the resource timed out?
508     + * Make everyone ignore the cancelled message.
509     */
510     msg2->canceled = 1;
511     stat->d_canceled++;
512     @@ -231,8 +232,8 @@ static void bau_process_retry_msg(struct msg_desc *mdp,
513     * Do all the things a cpu should do for a TLB shootdown message.
514     * Other cpu's may come here at the same time for this message.
515     */
516     -static void bau_process_message(struct msg_desc *mdp,
517     - struct bau_control *bcp)
518     +static void bau_process_message(struct msg_desc *mdp, struct bau_control *bcp,
519     + int do_acknowledge)
520     {
521     short socket_ack_count = 0;
522     short *sp;
523     @@ -284,8 +285,9 @@ static void bau_process_message(struct msg_desc *mdp,
524     if (msg_ack_count == bcp->cpus_in_uvhub) {
525     /*
526     * All cpus in uvhub saw it; reply
527     + * (unless we are in the UV2 workaround)
528     */
529     - reply_to_message(mdp, bcp);
530     + reply_to_message(mdp, bcp, do_acknowledge);
531     }
532     }
533    
534     @@ -491,27 +493,138 @@ static int uv1_wait_completion(struct bau_desc *bau_desc,
535     /*
536     * UV2 has an extra bit of status in the ACTIVATION_STATUS_2 register.
537     */
538     -static unsigned long uv2_read_status(unsigned long offset, int rshft, int cpu)
539     +static unsigned long uv2_read_status(unsigned long offset, int rshft, int desc)
540     {
541     unsigned long descriptor_status;
542     unsigned long descriptor_status2;
543    
544     descriptor_status = ((read_lmmr(offset) >> rshft) & UV_ACT_STATUS_MASK);
545     - descriptor_status2 = (read_mmr_uv2_status() >> cpu) & 0x1UL;
546     + descriptor_status2 = (read_mmr_uv2_status() >> desc) & 0x1UL;
547     descriptor_status = (descriptor_status << 1) | descriptor_status2;
548     return descriptor_status;
549     }
550    
551     +/*
552     + * Return whether the status of the descriptor that is normally used for this
553     + * cpu (the one indexed by its hub-relative cpu number) is busy.
554     + * The status of the original 32 descriptors is always reflected in the 64
555     + * bits of UVH_LB_BAU_SB_ACTIVATION_STATUS_0.
556     + * The bit provided by the activation_status_2 register is irrelevant to
557     + * the status if it is only being tested for busy or not busy.
558     + */
559     +int normal_busy(struct bau_control *bcp)
560     +{
561     + int cpu = bcp->uvhub_cpu;
562     + int mmr_offset;
563     + int right_shift;
564     +
565     + mmr_offset = UVH_LB_BAU_SB_ACTIVATION_STATUS_0;
566     + right_shift = cpu * UV_ACT_STATUS_SIZE;
567     + return (((((read_lmmr(mmr_offset) >> right_shift) &
568     + UV_ACT_STATUS_MASK)) << 1) == UV2H_DESC_BUSY);
569     +}
570     +
571     +/*
572     + * Entered when a bau descriptor has gone into a permanent busy wait because
573     + * of a hardware bug.
574     + * Workaround the bug.
575     + */
576     +int handle_uv2_busy(struct bau_control *bcp)
577     +{
578     + int busy_one = bcp->using_desc;
579     + int normal = bcp->uvhub_cpu;
580     + int selected = -1;
581     + int i;
582     + unsigned long descriptor_status;
583     + unsigned long status;
584     + int mmr_offset;
585     + struct bau_desc *bau_desc_old;
586     + struct bau_desc *bau_desc_new;
587     + struct bau_control *hmaster = bcp->uvhub_master;
588     + struct ptc_stats *stat = bcp->statp;
589     + cycles_t ttm;
590     +
591     + stat->s_uv2_wars++;
592     + spin_lock(&hmaster->uvhub_lock);
593     + /* try for the original first */
594     + if (busy_one != normal) {
595     + if (!normal_busy(bcp))
596     + selected = normal;
597     + }
598     + if (selected < 0) {
599     + /* can't use the normal, select an alternate */
600     + mmr_offset = UVH_LB_BAU_SB_ACTIVATION_STATUS_1;
601     + descriptor_status = read_lmmr(mmr_offset);
602     +
603     + /* scan available descriptors 32-63 */
604     + for (i = 0; i < UV_CPUS_PER_AS; i++) {
605     + if ((hmaster->inuse_map & (1 << i)) == 0) {
606     + status = ((descriptor_status >>
607     + (i * UV_ACT_STATUS_SIZE)) &
608     + UV_ACT_STATUS_MASK) << 1;
609     + if (status != UV2H_DESC_BUSY) {
610     + selected = i + UV_CPUS_PER_AS;
611     + break;
612     + }
613     + }
614     + }
615     + }
616     +
617     + if (busy_one != normal)
618     + /* mark the busy alternate as not in-use */
619     + hmaster->inuse_map &= ~(1 << (busy_one - UV_CPUS_PER_AS));
620     +
621     + if (selected >= 0) {
622     + /* switch to the selected descriptor */
623     + if (selected != normal) {
624     + /* set the selected alternate as in-use */
625     + hmaster->inuse_map |=
626     + (1 << (selected - UV_CPUS_PER_AS));
627     + if (selected > stat->s_uv2_wars_hw)
628     + stat->s_uv2_wars_hw = selected;
629     + }
630     + bau_desc_old = bcp->descriptor_base;
631     + bau_desc_old += (ITEMS_PER_DESC * busy_one);
632     + bcp->using_desc = selected;
633     + bau_desc_new = bcp->descriptor_base;
634     + bau_desc_new += (ITEMS_PER_DESC * selected);
635     + *bau_desc_new = *bau_desc_old;
636     + } else {
637     + /*
638     + * All are busy. Wait for the normal one for this cpu to
639     + * free up.
640     + */
641     + stat->s_uv2_war_waits++;
642     + spin_unlock(&hmaster->uvhub_lock);
643     + ttm = get_cycles();
644     + do {
645     + cpu_relax();
646     + } while (normal_busy(bcp));
647     + spin_lock(&hmaster->uvhub_lock);
648     + /* switch to the original descriptor */
649     + bcp->using_desc = normal;
650     + bau_desc_old = bcp->descriptor_base;
651     + bau_desc_old += (ITEMS_PER_DESC * bcp->using_desc);
652     + bcp->using_desc = (ITEMS_PER_DESC * normal);
653     + bau_desc_new = bcp->descriptor_base;
654     + bau_desc_new += (ITEMS_PER_DESC * normal);
655     + *bau_desc_new = *bau_desc_old; /* copy the entire descriptor */
656     + }
657     + spin_unlock(&hmaster->uvhub_lock);
658     + return FLUSH_RETRY_BUSYBUG;
659     +}
660     +
661     static int uv2_wait_completion(struct bau_desc *bau_desc,
662     unsigned long mmr_offset, int right_shift,
663     struct bau_control *bcp, long try)
664     {
665     unsigned long descriptor_stat;
666     cycles_t ttm;
667     - int cpu = bcp->uvhub_cpu;
668     + int desc = bcp->using_desc;
669     + long busy_reps = 0;
670     struct ptc_stats *stat = bcp->statp;
671    
672     - descriptor_stat = uv2_read_status(mmr_offset, right_shift, cpu);
673     + descriptor_stat = uv2_read_status(mmr_offset, right_shift, desc);
674    
675     /* spin on the status MMR, waiting for it to go idle */
676     while (descriptor_stat != UV2H_DESC_IDLE) {
677     @@ -542,12 +655,23 @@ static int uv2_wait_completion(struct bau_desc *bau_desc,
678     bcp->conseccompletes = 0;
679     return FLUSH_RETRY_TIMEOUT;
680     } else {
681     + busy_reps++;
682     + if (busy_reps > 1000000) {
683     + /* not to hammer on the clock */
684     + busy_reps = 0;
685     + ttm = get_cycles();
686     + if ((ttm - bcp->send_message) >
687     + (bcp->clocks_per_100_usec)) {
688     + return handle_uv2_busy(bcp);
689     + }
690     + }
691     /*
692     * descriptor_stat is still BUSY
693     */
694     cpu_relax();
695     }
696     - descriptor_stat = uv2_read_status(mmr_offset, right_shift, cpu);
697     + descriptor_stat = uv2_read_status(mmr_offset, right_shift,
698     + desc);
699     }
700     bcp->conseccompletes++;
701     return FLUSH_COMPLETE;
702     @@ -563,17 +687,17 @@ static int wait_completion(struct bau_desc *bau_desc,
703     {
704     int right_shift;
705     unsigned long mmr_offset;
706     - int cpu = bcp->uvhub_cpu;
707     + int desc = bcp->using_desc;
708    
709     - if (cpu < UV_CPUS_PER_AS) {
710     + if (desc < UV_CPUS_PER_AS) {
711     mmr_offset = UVH_LB_BAU_SB_ACTIVATION_STATUS_0;
712     - right_shift = cpu * UV_ACT_STATUS_SIZE;
713     + right_shift = desc * UV_ACT_STATUS_SIZE;
714     } else {
715     mmr_offset = UVH_LB_BAU_SB_ACTIVATION_STATUS_1;
716     - right_shift = ((cpu - UV_CPUS_PER_AS) * UV_ACT_STATUS_SIZE);
717     + right_shift = ((desc - UV_CPUS_PER_AS) * UV_ACT_STATUS_SIZE);
718     }
719    
720     - if (is_uv1_hub())
721     + if (bcp->uvhub_version == 1)
722     return uv1_wait_completion(bau_desc, mmr_offset, right_shift,
723     bcp, try);
724     else
725     @@ -752,19 +876,22 @@ static void handle_cmplt(int completion_status, struct bau_desc *bau_desc,
726     * Returns 1 if it gives up entirely and the original cpu mask is to be
727     * returned to the kernel.
728     */
729     -int uv_flush_send_and_wait(struct bau_desc *bau_desc,
730     - struct cpumask *flush_mask, struct bau_control *bcp)
731     +int uv_flush_send_and_wait(struct cpumask *flush_mask, struct bau_control *bcp)
732     {
733     int seq_number = 0;
734     int completion_stat = 0;
735     + int uv1 = 0;
736     long try = 0;
737     unsigned long index;
738     cycles_t time1;
739     cycles_t time2;
740     struct ptc_stats *stat = bcp->statp;
741     struct bau_control *hmaster = bcp->uvhub_master;
742     + struct uv1_bau_msg_header *uv1_hdr = NULL;
743     + struct uv2_bau_msg_header *uv2_hdr = NULL;
744     + struct bau_desc *bau_desc;
745    
746     - if (is_uv1_hub())
747     + if (bcp->uvhub_version == 1)
748     uv1_throttle(hmaster, stat);
749    
750     while (hmaster->uvhub_quiesce)
751     @@ -772,22 +899,39 @@ int uv_flush_send_and_wait(struct bau_desc *bau_desc,
752    
753     time1 = get_cycles();
754     do {
755     - if (try == 0) {
756     - bau_desc->header.msg_type = MSG_REGULAR;
757     + bau_desc = bcp->descriptor_base;
758     + bau_desc += (ITEMS_PER_DESC * bcp->using_desc);
759     + if (bcp->uvhub_version == 1) {
760     + uv1 = 1;
761     + uv1_hdr = &bau_desc->header.uv1_hdr;
762     + } else
763     + uv2_hdr = &bau_desc->header.uv2_hdr;
764     + if ((try == 0) || (completion_stat == FLUSH_RETRY_BUSYBUG)) {
765     + if (uv1)
766     + uv1_hdr->msg_type = MSG_REGULAR;
767     + else
768     + uv2_hdr->msg_type = MSG_REGULAR;
769     seq_number = bcp->message_number++;
770     } else {
771     - bau_desc->header.msg_type = MSG_RETRY;
772     + if (uv1)
773     + uv1_hdr->msg_type = MSG_RETRY;
774     + else
775     + uv2_hdr->msg_type = MSG_RETRY;
776     stat->s_retry_messages++;
777     }
778    
779     - bau_desc->header.sequence = seq_number;
780     - index = (1UL << AS_PUSH_SHIFT) | bcp->uvhub_cpu;
781     + if (uv1)
782     + uv1_hdr->sequence = seq_number;
783     + else
784     + uv2_hdr->sequence = seq_number;
785     + index = (1UL << AS_PUSH_SHIFT) | bcp->using_desc;
786     bcp->send_message = get_cycles();
787    
788     write_mmr_activation(index);
789    
790     try++;
791     completion_stat = wait_completion(bau_desc, bcp, try);
792     + /* UV2: wait_completion() may change the bcp->using_desc */
793    
794     handle_cmplt(completion_stat, bau_desc, bcp, hmaster, stat);
795    
796     @@ -798,6 +942,7 @@ int uv_flush_send_and_wait(struct bau_desc *bau_desc,
797     }
798     cpu_relax();
799     } while ((completion_stat == FLUSH_RETRY_PLUGGED) ||
800     + (completion_stat == FLUSH_RETRY_BUSYBUG) ||
801     (completion_stat == FLUSH_RETRY_TIMEOUT));
802    
803     time2 = get_cycles();
804     @@ -812,6 +957,7 @@ int uv_flush_send_and_wait(struct bau_desc *bau_desc,
805     record_send_stats(time1, time2, bcp, stat, completion_stat, try);
806    
807     if (completion_stat == FLUSH_GIVEUP)
808     + /* FLUSH_GIVEUP will fall back to using IPI's for tlb flush */
809     return 1;
810     return 0;
811     }
812     @@ -967,7 +1113,7 @@ const struct cpumask *uv_flush_tlb_others(const struct cpumask *cpumask,
813     stat->s_ntargself++;
814    
815     bau_desc = bcp->descriptor_base;
816     - bau_desc += ITEMS_PER_DESC * bcp->uvhub_cpu;
817     + bau_desc += (ITEMS_PER_DESC * bcp->using_desc);
818     bau_uvhubs_clear(&bau_desc->distribution, UV_DISTRIBUTION_SIZE);
819     if (set_distrib_bits(flush_mask, bcp, bau_desc, &locals, &remotes))
820     return NULL;
821     @@ -980,13 +1126,86 @@ const struct cpumask *uv_flush_tlb_others(const struct cpumask *cpumask,
822     * uv_flush_send_and_wait returns 0 if all cpu's were messaged,
823     * or 1 if it gave up and the original cpumask should be returned.
824     */
825     - if (!uv_flush_send_and_wait(bau_desc, flush_mask, bcp))
826     + if (!uv_flush_send_and_wait(flush_mask, bcp))
827     return NULL;
828     else
829     return cpumask;
830     }
831    
832     /*
833     + * Search the message queue for any 'other' message with the same software
834     + * acknowledge resource bit vector.
835     + */
836     +struct bau_pq_entry *find_another_by_swack(struct bau_pq_entry *msg,
837     + struct bau_control *bcp, unsigned char swack_vec)
838     +{
839     + struct bau_pq_entry *msg_next = msg + 1;
840     +
841     + if (msg_next > bcp->queue_last)
842     + msg_next = bcp->queue_first;
843     + while ((msg_next->swack_vec != 0) && (msg_next != msg)) {
844     + if (msg_next->swack_vec == swack_vec)
845     + return msg_next;
846     + msg_next++;
847     + if (msg_next > bcp->queue_last)
848     + msg_next = bcp->queue_first;
849     + }
850     + return NULL;
851     +}
852     +
853     +/*
854     + * UV2 needs to work around a bug in which an arriving message has not
855     + * set a bit in the UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE register.
856     + * Such a message must be ignored.
857     + */
858     +void process_uv2_message(struct msg_desc *mdp, struct bau_control *bcp)
859     +{
860     + unsigned long mmr_image;
861     + unsigned char swack_vec;
862     + struct bau_pq_entry *msg = mdp->msg;
863     + struct bau_pq_entry *other_msg;
864     +
865     + mmr_image = read_mmr_sw_ack();
866     + swack_vec = msg->swack_vec;
867     +
868     + if ((swack_vec & mmr_image) == 0) {
869     + /*
870     + * This message was assigned a swack resource, but no
871     + * reserved acknowlegment is pending.
872     + * The bug has prevented this message from setting the MMR.
873     + * And no other message has used the same sw_ack resource.
874     + * Do the requested shootdown but do not reply to the msg.
875     + * (the 0 means make no acknowledge)
876     + */
877     + bau_process_message(mdp, bcp, 0);
878     + return;
879     + }
880     +
881     + /*
882     + * Some message has set the MMR 'pending' bit; it might have been
883     + * another message. Look for that message.
884     + */
885     + other_msg = find_another_by_swack(msg, bcp, msg->swack_vec);
886     + if (other_msg) {
887     + /* There is another. Do not ack the current one. */
888     + bau_process_message(mdp, bcp, 0);
889     + /*
890     + * Let the natural processing of that message acknowledge
891     + * it. Don't get the processing of sw_ack's out of order.
892     + */
893     + return;
894     + }
895     +
896     + /*
897     + * There is no other message using this sw_ack, so it is safe to
898     + * acknowledge it.
899     + */
900     + bau_process_message(mdp, bcp, 1);
901     +
902     + return;
903     +}
904     +
905     +/*
906     * The BAU message interrupt comes here. (registered by set_intr_gate)
907     * See entry_64.S
908     *
909     @@ -1022,9 +1241,11 @@ void uv_bau_message_interrupt(struct pt_regs *regs)
910     count++;
911    
912     msgdesc.msg_slot = msg - msgdesc.queue_first;
913     - msgdesc.swack_slot = ffs(msg->swack_vec) - 1;
914     msgdesc.msg = msg;
915     - bau_process_message(&msgdesc, bcp);
916     + if (bcp->uvhub_version == 2)
917     + process_uv2_message(&msgdesc, bcp);
918     + else
919     + bau_process_message(&msgdesc, bcp, 1);
920    
921     msg++;
922     if (msg > msgdesc.queue_last)
923     @@ -1083,7 +1304,7 @@ static void __init enable_timeouts(void)
924     */
925     mmr_image |= (1L << SOFTACK_MSHIFT);
926     if (is_uv2_hub()) {
927     - mmr_image |= (1L << UV2_LEG_SHFT);
928     + mmr_image &= ~(1L << UV2_LEG_SHFT);
929     mmr_image |= (1L << UV2_EXT_SHFT);
930     }
931     write_mmr_misc_control(pnode, mmr_image);
932     @@ -1142,7 +1363,7 @@ static int ptc_seq_show(struct seq_file *file, void *data)
933     seq_printf(file,
934     "all one mult none retry canc nocan reset rcan ");
935     seq_printf(file,
936     - "disable enable\n");
937     + "disable enable wars warshw warwaits\n");
938     }
939     if (cpu < num_possible_cpus() && cpu_online(cpu)) {
940     stat = &per_cpu(ptcstats, cpu);
941     @@ -1173,8 +1394,10 @@ static int ptc_seq_show(struct seq_file *file, void *data)
942     stat->d_nomsg, stat->d_retries, stat->d_canceled,
943     stat->d_nocanceled, stat->d_resets,
944     stat->d_rcanceled);
945     - seq_printf(file, "%ld %ld\n",
946     - stat->s_bau_disabled, stat->s_bau_reenabled);
947     + seq_printf(file, "%ld %ld %ld %ld %ld\n",
948     + stat->s_bau_disabled, stat->s_bau_reenabled,
949     + stat->s_uv2_wars, stat->s_uv2_wars_hw,
950     + stat->s_uv2_war_waits);
951     }
952     return 0;
953     }
954     @@ -1432,12 +1655,15 @@ static void activation_descriptor_init(int node, int pnode, int base_pnode)
955     {
956     int i;
957     int cpu;
958     + int uv1 = 0;
959     unsigned long gpa;
960     unsigned long m;
961     unsigned long n;
962     size_t dsize;
963     struct bau_desc *bau_desc;
964     struct bau_desc *bd2;
965     + struct uv1_bau_msg_header *uv1_hdr;
966     + struct uv2_bau_msg_header *uv2_hdr;
967     struct bau_control *bcp;
968    
969     /*
970     @@ -1451,6 +1677,8 @@ static void activation_descriptor_init(int node, int pnode, int base_pnode)
971     gpa = uv_gpa(bau_desc);
972     n = uv_gpa_to_gnode(gpa);
973     m = uv_gpa_to_offset(gpa);
974     + if (is_uv1_hub())
975     + uv1 = 1;
976    
977     /* the 14-bit pnode */
978     write_mmr_descriptor_base(pnode, (n << UV_DESC_PSHIFT | m));
979     @@ -1461,21 +1689,33 @@ static void activation_descriptor_init(int node, int pnode, int base_pnode)
980     */
981     for (i = 0, bd2 = bau_desc; i < (ADP_SZ * ITEMS_PER_DESC); i++, bd2++) {
982     memset(bd2, 0, sizeof(struct bau_desc));
983     - bd2->header.swack_flag = 1;
984     - /*
985     - * The base_dest_nasid set in the message header is the nasid
986     - * of the first uvhub in the partition. The bit map will
987     - * indicate destination pnode numbers relative to that base.
988     - * They may not be consecutive if nasid striding is being used.
989     - */
990     - bd2->header.base_dest_nasid = UV_PNODE_TO_NASID(base_pnode);
991     - bd2->header.dest_subnodeid = UV_LB_SUBNODEID;
992     - bd2->header.command = UV_NET_ENDPOINT_INTD;
993     - bd2->header.int_both = 1;
994     - /*
995     - * all others need to be set to zero:
996     - * fairness chaining multilevel count replied_to
997     - */
998     + if (uv1) {
999     + uv1_hdr = &bd2->header.uv1_hdr;
1000     + uv1_hdr->swack_flag = 1;
1001     + /*
1002     + * The base_dest_nasid set in the message header
1003     + * is the nasid of the first uvhub in the partition.
1004     + * The bit map will indicate destination pnode numbers
1005     + * relative to that base. They may not be consecutive
1006     + * if nasid striding is being used.
1007     + */
1008     + uv1_hdr->base_dest_nasid =
1009     + UV_PNODE_TO_NASID(base_pnode);
1010     + uv1_hdr->dest_subnodeid = UV_LB_SUBNODEID;
1011     + uv1_hdr->command = UV_NET_ENDPOINT_INTD;
1012     + uv1_hdr->int_both = 1;
1013     + /*
1014     + * all others need to be set to zero:
1015     + * fairness chaining multilevel count replied_to
1016     + */
1017     + } else {
1018     + uv2_hdr = &bd2->header.uv2_hdr;
1019     + uv2_hdr->swack_flag = 1;
1020     + uv2_hdr->base_dest_nasid =
1021     + UV_PNODE_TO_NASID(base_pnode);
1022     + uv2_hdr->dest_subnodeid = UV_LB_SUBNODEID;
1023     + uv2_hdr->command = UV_NET_ENDPOINT_INTD;
1024     + }
1025     }
1026     for_each_present_cpu(cpu) {
1027     if (pnode != uv_blade_to_pnode(uv_cpu_to_blade_id(cpu)))
1028     @@ -1531,6 +1771,7 @@ static void pq_init(int node, int pnode)
1029     write_mmr_payload_first(pnode, pn_first);
1030     write_mmr_payload_tail(pnode, first);
1031     write_mmr_payload_last(pnode, last);
1032     + write_gmmr_sw_ack(pnode, 0xffffUL);
1033    
1034     /* in effect, all msg_type's are set to MSG_NOOP */
1035     memset(pqp, 0, sizeof(struct bau_pq_entry) * DEST_Q_SIZE);
1036     @@ -1584,14 +1825,14 @@ static int calculate_destination_timeout(void)
1037     ts_ns = base * mult1 * mult2;
1038     ret = ts_ns / 1000;
1039     } else {
1040     - /* 4 bits 0/1 for 10/80us, 3 bits of multiplier */
1041     - mmr_image = uv_read_local_mmr(UVH_AGING_PRESCALE_SEL);
1042     + /* 4 bits 0/1 for 10/80us base, 3 bits of multiplier */
1043     + mmr_image = uv_read_local_mmr(UVH_LB_BAU_MISC_CONTROL);
1044     mmr_image = (mmr_image & UV_SA_MASK) >> UV_SA_SHFT;
1045     if (mmr_image & (1L << UV2_ACK_UNITS_SHFT))
1046     - mult1 = 80;
1047     + base = 80;
1048     else
1049     - mult1 = 10;
1050     - base = mmr_image & UV2_ACK_MASK;
1051     + base = 10;
1052     + mult1 = mmr_image & UV2_ACK_MASK;
1053     ret = mult1 * base;
1054     }
1055     return ret;
1056     @@ -1618,6 +1859,7 @@ static void __init init_per_cpu_tunables(void)
1057     bcp->cong_response_us = congested_respns_us;
1058     bcp->cong_reps = congested_reps;
1059     bcp->cong_period = congested_period;
1060     + bcp->clocks_per_100_usec = usec_2_cycles(100);
1061     }
1062     }
1063    
1064     @@ -1728,8 +1970,17 @@ static int scan_sock(struct socket_desc *sdp, struct uvhub_desc *bdp,
1065     bcp->cpus_in_socket = sdp->num_cpus;
1066     bcp->socket_master = *smasterp;
1067     bcp->uvhub = bdp->uvhub;
1068     + if (is_uv1_hub())
1069     + bcp->uvhub_version = 1;
1070     + else if (is_uv2_hub())
1071     + bcp->uvhub_version = 2;
1072     + else {
1073     + printk(KERN_EMERG "uvhub version not 1 or 2\n");
1074     + return 1;
1075     + }
1076     bcp->uvhub_master = *hmasterp;
1077     bcp->uvhub_cpu = uv_cpu_hub_info(cpu)->blade_processor_id;
1078     + bcp->using_desc = bcp->uvhub_cpu;
1079     if (bcp->uvhub_cpu >= MAX_CPUS_PER_UVHUB) {
1080     printk(KERN_EMERG "%d cpus per uvhub invalid\n",
1081     bcp->uvhub_cpu);
1082     @@ -1845,6 +2096,8 @@ static int __init uv_bau_init(void)
1083     uv_base_pnode = uv_blade_to_pnode(uvhub);
1084     }
1085    
1086     + enable_timeouts();
1087     +
1088     if (init_per_cpu(nuvhubs, uv_base_pnode)) {
1089     nobau = 1;
1090     return 0;
1091     @@ -1855,7 +2108,6 @@ static int __init uv_bau_init(void)
1092     if (uv_blade_nr_possible_cpus(uvhub))
1093     init_uvhub(uvhub, vector, uv_base_pnode);
1094    
1095     - enable_timeouts();
1096     alloc_intr_gate(vector, uv_bau_message_intr1);
1097    
1098     for_each_possible_blade(uvhub) {
1099     @@ -1867,7 +2119,8 @@ static int __init uv_bau_init(void)
1100     val = 1L << 63;
1101     write_gmmr_activation(pnode, val);
1102     mmr = 1; /* should be 1 to broadcast to both sockets */
1103     - write_mmr_data_broadcast(pnode, mmr);
1104     + if (!is_uv1_hub())
1105     + write_mmr_data_broadcast(pnode, mmr);
1106     }
1107     }
1108    
1109     diff --git a/block/scsi_ioctl.c b/block/scsi_ioctl.c
1110     index fbdf0d8..688be8a 100644
1111     --- a/block/scsi_ioctl.c
1112     +++ b/block/scsi_ioctl.c
1113     @@ -24,6 +24,7 @@
1114     #include <linux/capability.h>
1115     #include <linux/completion.h>
1116     #include <linux/cdrom.h>
1117     +#include <linux/ratelimit.h>
1118     #include <linux/slab.h>
1119     #include <linux/times.h>
1120     #include <asm/uaccess.h>
1121     @@ -690,6 +691,57 @@ int scsi_cmd_ioctl(struct request_queue *q, struct gendisk *bd_disk, fmode_t mod
1122     }
1123     EXPORT_SYMBOL(scsi_cmd_ioctl);
1124    
1125     +int scsi_verify_blk_ioctl(struct block_device *bd, unsigned int cmd)
1126     +{
1127     + if (bd && bd == bd->bd_contains)
1128     + return 0;
1129     +
1130     + /* Actually none of these is particularly useful on a partition,
1131     + * but they are safe.
1132     + */
1133     + switch (cmd) {
1134     + case SCSI_IOCTL_GET_IDLUN:
1135     + case SCSI_IOCTL_GET_BUS_NUMBER:
1136     + case SCSI_IOCTL_GET_PCI:
1137     + case SCSI_IOCTL_PROBE_HOST:
1138     + case SG_GET_VERSION_NUM:
1139     + case SG_SET_TIMEOUT:
1140     + case SG_GET_TIMEOUT:
1141     + case SG_GET_RESERVED_SIZE:
1142     + case SG_SET_RESERVED_SIZE:
1143     + case SG_EMULATED_HOST:
1144     + return 0;
1145     + case CDROM_GET_CAPABILITY:
1146     + /* Keep this until we remove the printk below. udev sends it
1147     + * and we do not want to spam dmesg about it. CD-ROMs do
1148     + * not have partitions, so we get here only for disks.
1149     + */
1150     + return -ENOTTY;
1151     + default:
1152     + break;
1153     + }
1154     +
1155     + /* In particular, rule out all resets and host-specific ioctls. */
1156     + printk_ratelimited(KERN_WARNING
1157     + "%s: sending ioctl %x to a partition!\n", current->comm, cmd);
1158     +
1159     + return capable(CAP_SYS_RAWIO) ? 0 : -ENOTTY;
1160     +}
1161     +EXPORT_SYMBOL(scsi_verify_blk_ioctl);
1162     +
1163     +int scsi_cmd_blk_ioctl(struct block_device *bd, fmode_t mode,
1164     + unsigned int cmd, void __user *arg)
1165     +{
1166     + int ret;
1167     +
1168     + ret = scsi_verify_blk_ioctl(bd, cmd);
1169     + if (ret < 0)
1170     + return ret;
1171     +
1172     + return scsi_cmd_ioctl(bd->bd_disk->queue, bd->bd_disk, mode, cmd, arg);
1173     +}
1174     +EXPORT_SYMBOL(scsi_cmd_blk_ioctl);
1175     +
1176     static int __init blk_scsi_ioctl_init(void)
1177     {
1178     blk_set_cmd_filter_defaults(&blk_default_cmd_filter);
1179     diff --git a/drivers/acpi/acpica/dsargs.c b/drivers/acpi/acpica/dsargs.c
1180     index 8c7b997..42163d8 100644
1181     --- a/drivers/acpi/acpica/dsargs.c
1182     +++ b/drivers/acpi/acpica/dsargs.c
1183     @@ -387,5 +387,29 @@ acpi_status acpi_ds_get_region_arguments(union acpi_operand_object *obj_desc)
1184     status = acpi_ds_execute_arguments(node, node->parent,
1185     extra_desc->extra.aml_length,
1186     extra_desc->extra.aml_start);
1187     + if (ACPI_FAILURE(status)) {
1188     + return_ACPI_STATUS(status);
1189     + }
1190     +
1191     + /* Validate the region address/length via the host OS */
1192     +
1193     + status = acpi_os_validate_address(obj_desc->region.space_id,
1194     + obj_desc->region.address,
1195     + (acpi_size) obj_desc->region.length,
1196     + acpi_ut_get_node_name(node));
1197     +
1198     + if (ACPI_FAILURE(status)) {
1199     + /*
1200     + * Invalid address/length. We will emit an error message and mark
1201     + * the region as invalid, so that it will cause an additional error if
1202     + * it is ever used. Then return AE_OK.
1203     + */
1204     + ACPI_EXCEPTION((AE_INFO, status,
1205     + "During address validation of OpRegion [%4.4s]",
1206     + node->name.ascii));
1207     + obj_desc->common.flags |= AOPOBJ_INVALID;
1208     + status = AE_OK;
1209     + }
1210     +
1211     return_ACPI_STATUS(status);
1212     }
1213     diff --git a/drivers/acpi/numa.c b/drivers/acpi/numa.c
1214     index 3b5c318..e56f3be 100644
1215     --- a/drivers/acpi/numa.c
1216     +++ b/drivers/acpi/numa.c
1217     @@ -45,6 +45,8 @@ static int pxm_to_node_map[MAX_PXM_DOMAINS]
1218     static int node_to_pxm_map[MAX_NUMNODES]
1219     = { [0 ... MAX_NUMNODES - 1] = PXM_INVAL };
1220    
1221     +unsigned char acpi_srat_revision __initdata;
1222     +
1223     int pxm_to_node(int pxm)
1224     {
1225     if (pxm < 0)
1226     @@ -255,9 +257,13 @@ acpi_parse_memory_affinity(struct acpi_subtable_header * header,
1227    
1228     static int __init acpi_parse_srat(struct acpi_table_header *table)
1229     {
1230     + struct acpi_table_srat *srat;
1231     if (!table)
1232     return -EINVAL;
1233    
1234     + srat = (struct acpi_table_srat *)table;
1235     + acpi_srat_revision = srat->header.revision;
1236     +
1237     /* Real work done in acpi_table_parse_srat below. */
1238    
1239     return 0;
1240     diff --git a/drivers/acpi/processor_core.c b/drivers/acpi/processor_core.c
1241     index 3a0428e..c850de4 100644
1242     --- a/drivers/acpi/processor_core.c
1243     +++ b/drivers/acpi/processor_core.c
1244     @@ -173,8 +173,30 @@ int acpi_get_cpuid(acpi_handle handle, int type, u32 acpi_id)
1245     apic_id = map_mat_entry(handle, type, acpi_id);
1246     if (apic_id == -1)
1247     apic_id = map_madt_entry(type, acpi_id);
1248     - if (apic_id == -1)
1249     - return apic_id;
1250     + if (apic_id == -1) {
1251     + /*
1252     + * On UP processor, there is no _MAT or MADT table.
1253     + * So above apic_id is always set to -1.
1254     + *
1255     + * BIOS may define multiple CPU handles even for UP processor.
1256     + * For example,
1257     + *
1258     + * Scope (_PR)
1259     + * {
1260     + * Processor (CPU0, 0x00, 0x00000410, 0x06) {}
1261     + * Processor (CPU1, 0x01, 0x00000410, 0x06) {}
1262     + * Processor (CPU2, 0x02, 0x00000410, 0x06) {}
1263     + * Processor (CPU3, 0x03, 0x00000410, 0x06) {}
1264     + * }
1265     + *
1266     + * Ignores apic_id and always return 0 for CPU0's handle.
1267     + * Return -1 for other CPU's handle.
1268     + */
1269     + if (acpi_id == 0)
1270     + return acpi_id;
1271     + else
1272     + return apic_id;
1273     + }
1274    
1275     #ifdef CONFIG_SMP
1276     for_each_possible_cpu(i) {
1277     diff --git a/drivers/bcma/host_pci.c b/drivers/bcma/host_pci.c
1278     index 990f5a8..48e06be 100644
1279     --- a/drivers/bcma/host_pci.c
1280     +++ b/drivers/bcma/host_pci.c
1281     @@ -227,11 +227,14 @@ static void bcma_host_pci_remove(struct pci_dev *dev)
1282     #ifdef CONFIG_PM
1283     static int bcma_host_pci_suspend(struct pci_dev *dev, pm_message_t state)
1284     {
1285     + struct bcma_bus *bus = pci_get_drvdata(dev);
1286     +
1287     /* Host specific */
1288     pci_save_state(dev);
1289     pci_disable_device(dev);
1290     pci_set_power_state(dev, pci_choose_state(dev, state));
1291    
1292     + bus->mapped_core = NULL;
1293     return 0;
1294     }
1295    
1296     diff --git a/drivers/block/cciss.c b/drivers/block/cciss.c
1297     index 587cce5..b0f553b 100644
1298     --- a/drivers/block/cciss.c
1299     +++ b/drivers/block/cciss.c
1300     @@ -1735,7 +1735,7 @@ static int cciss_ioctl(struct block_device *bdev, fmode_t mode,
1301     case CCISS_BIG_PASSTHRU:
1302     return cciss_bigpassthru(h, argp);
1303    
1304     - /* scsi_cmd_ioctl handles these, below, though some are not */
1305     + /* scsi_cmd_blk_ioctl handles these, below, though some are not */
1306     /* very meaningful for cciss. SG_IO is the main one people want. */
1307    
1308     case SG_GET_VERSION_NUM:
1309     @@ -1746,9 +1746,9 @@ static int cciss_ioctl(struct block_device *bdev, fmode_t mode,
1310     case SG_EMULATED_HOST:
1311     case SG_IO:
1312     case SCSI_IOCTL_SEND_COMMAND:
1313     - return scsi_cmd_ioctl(disk->queue, disk, mode, cmd, argp);
1314     + return scsi_cmd_blk_ioctl(bdev, mode, cmd, argp);
1315    
1316     - /* scsi_cmd_ioctl would normally handle these, below, but */
1317     + /* scsi_cmd_blk_ioctl would normally handle these, below, but */
1318     /* they aren't a good fit for cciss, as CD-ROMs are */
1319     /* not supported, and we don't have any bus/target/lun */
1320     /* which we present to the kernel. */
1321     diff --git a/drivers/block/ub.c b/drivers/block/ub.c
1322     index 0e376d4..7333b9e 100644
1323     --- a/drivers/block/ub.c
1324     +++ b/drivers/block/ub.c
1325     @@ -1744,12 +1744,11 @@ static int ub_bd_release(struct gendisk *disk, fmode_t mode)
1326     static int ub_bd_ioctl(struct block_device *bdev, fmode_t mode,
1327     unsigned int cmd, unsigned long arg)
1328     {
1329     - struct gendisk *disk = bdev->bd_disk;
1330     void __user *usermem = (void __user *) arg;
1331     int ret;
1332    
1333     mutex_lock(&ub_mutex);
1334     - ret = scsi_cmd_ioctl(disk->queue, disk, mode, cmd, usermem);
1335     + ret = scsi_cmd_blk_ioctl(bdev, mode, cmd, usermem);
1336     mutex_unlock(&ub_mutex);
1337    
1338     return ret;
1339     diff --git a/drivers/block/virtio_blk.c b/drivers/block/virtio_blk.c
1340     index 4d0b70a..e46f2f7 100644
1341     --- a/drivers/block/virtio_blk.c
1342     +++ b/drivers/block/virtio_blk.c
1343     @@ -243,8 +243,8 @@ static int virtblk_ioctl(struct block_device *bdev, fmode_t mode,
1344     if (!virtio_has_feature(vblk->vdev, VIRTIO_BLK_F_SCSI))
1345     return -ENOTTY;
1346    
1347     - return scsi_cmd_ioctl(disk->queue, disk, mode, cmd,
1348     - (void __user *)data);
1349     + return scsi_cmd_blk_ioctl(bdev, mode, cmd,
1350     + (void __user *)data);
1351     }
1352    
1353     /* We provide getgeo only to please some old bootloader/partitioning tools */
1354     diff --git a/drivers/cdrom/cdrom.c b/drivers/cdrom/cdrom.c
1355     index f997c27..cedb231 100644
1356     --- a/drivers/cdrom/cdrom.c
1357     +++ b/drivers/cdrom/cdrom.c
1358     @@ -2747,12 +2747,11 @@ int cdrom_ioctl(struct cdrom_device_info *cdi, struct block_device *bdev,
1359     {
1360     void __user *argp = (void __user *)arg;
1361     int ret;
1362     - struct gendisk *disk = bdev->bd_disk;
1363    
1364     /*
1365     * Try the generic SCSI command ioctl's first.
1366     */
1367     - ret = scsi_cmd_ioctl(disk->queue, disk, mode, cmd, argp);
1368     + ret = scsi_cmd_blk_ioctl(bdev, mode, cmd, argp);
1369     if (ret != -ENOTTY)
1370     return ret;
1371    
1372     diff --git a/drivers/gpu/drm/radeon/r100.c b/drivers/gpu/drm/radeon/r100.c
1373     index bfc08f6..31b0d1a 100644
1374     --- a/drivers/gpu/drm/radeon/r100.c
1375     +++ b/drivers/gpu/drm/radeon/r100.c
1376     @@ -2177,6 +2177,7 @@ bool r100_gpu_is_lockup(struct radeon_device *rdev)
1377     void r100_bm_disable(struct radeon_device *rdev)
1378     {
1379     u32 tmp;
1380     + u16 tmp16;
1381    
1382     /* disable bus mastering */
1383     tmp = RREG32(R_000030_BUS_CNTL);
1384     @@ -2187,8 +2188,8 @@ void r100_bm_disable(struct radeon_device *rdev)
1385     WREG32(R_000030_BUS_CNTL, (tmp & 0xFFFFFFFF) | 0x00000040);
1386     tmp = RREG32(RADEON_BUS_CNTL);
1387     mdelay(1);
1388     - pci_read_config_word(rdev->pdev, 0x4, (u16*)&tmp);
1389     - pci_write_config_word(rdev->pdev, 0x4, tmp & 0xFFFB);
1390     + pci_read_config_word(rdev->pdev, 0x4, &tmp16);
1391     + pci_write_config_word(rdev->pdev, 0x4, tmp16 & 0xFFFB);
1392     mdelay(1);
1393     }
1394    
1395     diff --git a/drivers/gpu/drm/radeon/r600_hdmi.c b/drivers/gpu/drm/radeon/r600_hdmi.c
1396     index f5ac7e7..c45d921 100644
1397     --- a/drivers/gpu/drm/radeon/r600_hdmi.c
1398     +++ b/drivers/gpu/drm/radeon/r600_hdmi.c
1399     @@ -196,6 +196,13 @@ static void r600_hdmi_videoinfoframe(
1400     frame[0xD] = (right_bar >> 8);
1401    
1402     r600_hdmi_infoframe_checksum(0x82, 0x02, 0x0D, frame);
1403     + /* Our header values (type, version, length) should be alright, Intel
1404     + * is using the same. Checksum function also seems to be OK, it works
1405     + * fine for audio infoframe. However calculated value is always lower
1406     + * by 2 in comparison to fglrx. It breaks displaying anything in case
1407     + * of TVs that strictly check the checksum. Hack it manually here to
1408     + * workaround this issue. */
1409     + frame[0x0] += 2;
1410    
1411     WREG32(offset+R600_HDMI_VIDEOINFOFRAME_0,
1412     frame[0x0] | (frame[0x1] << 8) | (frame[0x2] << 16) | (frame[0x3] << 24));
1413     diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c
1414     index c4d00a1..9b39145 100644
1415     --- a/drivers/gpu/drm/radeon/radeon_device.c
1416     +++ b/drivers/gpu/drm/radeon/radeon_device.c
1417     @@ -224,8 +224,11 @@ int radeon_wb_init(struct radeon_device *rdev)
1418     if (radeon_no_wb == 1)
1419     rdev->wb.enabled = false;
1420     else {
1421     - /* often unreliable on AGP */
1422     if (rdev->flags & RADEON_IS_AGP) {
1423     + /* often unreliable on AGP */
1424     + rdev->wb.enabled = false;
1425     + } else if (rdev->family < CHIP_R300) {
1426     + /* often unreliable on pre-r300 */
1427     rdev->wb.enabled = false;
1428     } else {
1429     rdev->wb.enabled = true;
1430     diff --git a/drivers/gpu/drm/radeon/rs600.c b/drivers/gpu/drm/radeon/rs600.c
1431     index b1053d6..c259e21 100644
1432     --- a/drivers/gpu/drm/radeon/rs600.c
1433     +++ b/drivers/gpu/drm/radeon/rs600.c
1434     @@ -324,10 +324,10 @@ void rs600_hpd_fini(struct radeon_device *rdev)
1435    
1436     void rs600_bm_disable(struct radeon_device *rdev)
1437     {
1438     - u32 tmp;
1439     + u16 tmp;
1440    
1441     /* disable bus mastering */
1442     - pci_read_config_word(rdev->pdev, 0x4, (u16*)&tmp);
1443     + pci_read_config_word(rdev->pdev, 0x4, &tmp);
1444     pci_write_config_word(rdev->pdev, 0x4, tmp & 0xFFFB);
1445     mdelay(1);
1446     }
1447     diff --git a/drivers/hid/Kconfig b/drivers/hid/Kconfig
1448     index 22a4a05..d21f6d0 100644
1449     --- a/drivers/hid/Kconfig
1450     +++ b/drivers/hid/Kconfig
1451     @@ -335,6 +335,7 @@ config HID_MULTITOUCH
1452     Say Y here if you have one of the following devices:
1453     - 3M PCT touch screens
1454     - ActionStar dual touch panels
1455     + - Atmel panels
1456     - Cando dual touch panels
1457     - Chunghwa panels
1458     - CVTouch panels
1459     @@ -355,6 +356,7 @@ config HID_MULTITOUCH
1460     - Touch International Panels
1461     - Unitec Panels
1462     - XAT optical touch panels
1463     + - Xiroku optical touch panels
1464    
1465     If unsure, say N.
1466    
1467     @@ -620,6 +622,7 @@ config HID_WIIMOTE
1468     depends on BT_HIDP
1469     depends on LEDS_CLASS
1470     select POWER_SUPPLY
1471     + select INPUT_FF_MEMLESS
1472     ---help---
1473     Support for the Nintendo Wii Remote bluetooth device.
1474    
1475     diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c
1476     index af35384..bb656d8 100644
1477     --- a/drivers/hid/hid-core.c
1478     +++ b/drivers/hid/hid-core.c
1479     @@ -362,7 +362,7 @@ static int hid_parser_global(struct hid_parser *parser, struct hid_item *item)
1480    
1481     case HID_GLOBAL_ITEM_TAG_REPORT_SIZE:
1482     parser->global.report_size = item_udata(item);
1483     - if (parser->global.report_size > 32) {
1484     + if (parser->global.report_size > 96) {
1485     dbg_hid("invalid report_size %d\n",
1486     parser->global.report_size);
1487     return -1;
1488     @@ -1404,11 +1404,13 @@ static const struct hid_device_id hid_have_special_driver[] = {
1489     { HID_USB_DEVICE(USB_VENDOR_ID_CYPRESS, USB_DEVICE_ID_CYPRESS_TRUETOUCH) },
1490     { HID_USB_DEVICE(USB_VENDOR_ID_DRAGONRISE, 0x0006) },
1491     { HID_USB_DEVICE(USB_VENDOR_ID_DRAGONRISE, 0x0011) },
1492     - { HID_USB_DEVICE(USB_VENDOR_ID_DWAV, USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH) },
1493     - { HID_USB_DEVICE(USB_VENDOR_ID_DWAV, USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH1) },
1494     - { HID_USB_DEVICE(USB_VENDOR_ID_DWAV, USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH2) },
1495     - { HID_USB_DEVICE(USB_VENDOR_ID_DWAV, USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH3) },
1496     - { HID_USB_DEVICE(USB_VENDOR_ID_DWAV, USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH4) },
1497     + { HID_USB_DEVICE(USB_VENDOR_ID_DWAV, USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH_480D) },
1498     + { HID_USB_DEVICE(USB_VENDOR_ID_DWAV, USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH_480E) },
1499     + { HID_USB_DEVICE(USB_VENDOR_ID_DWAV, USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH_720C) },
1500     + { HID_USB_DEVICE(USB_VENDOR_ID_DWAV, USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH_726B) },
1501     + { HID_USB_DEVICE(USB_VENDOR_ID_DWAV, USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH_72A1) },
1502     + { HID_USB_DEVICE(USB_VENDOR_ID_DWAV, USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH_7302) },
1503     + { HID_USB_DEVICE(USB_VENDOR_ID_DWAV, USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH_A001) },
1504     { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_ELECOM, USB_DEVICE_ID_ELECOM_BM084) },
1505     { HID_USB_DEVICE(USB_VENDOR_ID_ELO, USB_DEVICE_ID_ELO_TS2515) },
1506     { HID_USB_DEVICE(USB_VENDOR_ID_EMS, USB_DEVICE_ID_EMS_TRIO_LINKER_PLUS_II) },
1507     @@ -1423,6 +1425,7 @@ static const struct hid_device_id hid_have_special_driver[] = {
1508     { HID_USB_DEVICE(USB_VENDOR_ID_GYRATION, USB_DEVICE_ID_GYRATION_REMOTE_2) },
1509     { HID_USB_DEVICE(USB_VENDOR_ID_GYRATION, USB_DEVICE_ID_GYRATION_REMOTE_3) },
1510     { HID_USB_DEVICE(USB_VENDOR_ID_HANVON, USB_DEVICE_ID_HANVON_MULTITOUCH) },
1511     + { HID_USB_DEVICE(USB_VENDOR_ID_HANVON_ALT, USB_DEVICE_ID_HANVON_ALT_MULTITOUCH) },
1512     { HID_USB_DEVICE(USB_VENDOR_ID_IDEACOM, USB_DEVICE_ID_IDEACOM_IDC6650) },
1513     { HID_USB_DEVICE(USB_VENDOR_ID_HOLTEK, USB_DEVICE_ID_HOLTEK_ON_LINE_GRIP) },
1514     { HID_USB_DEVICE(USB_VENDOR_ID_ILITEK, USB_DEVICE_ID_ILITEK_MULTITOUCH) },
1515     @@ -1549,6 +1552,15 @@ static const struct hid_device_id hid_have_special_driver[] = {
1516     { HID_USB_DEVICE(USB_VENDOR_ID_WALTOP, USB_DEVICE_ID_WALTOP_MEDIA_TABLET_10_6_INCH) },
1517     { HID_USB_DEVICE(USB_VENDOR_ID_WALTOP, USB_DEVICE_ID_WALTOP_MEDIA_TABLET_14_1_INCH) },
1518     { HID_USB_DEVICE(USB_VENDOR_ID_XAT, USB_DEVICE_ID_XAT_CSR) },
1519     + { HID_USB_DEVICE(USB_VENDOR_ID_XIROKU, USB_DEVICE_ID_XIROKU_SPX) },
1520     + { HID_USB_DEVICE(USB_VENDOR_ID_XIROKU, USB_DEVICE_ID_XIROKU_MPX) },
1521     + { HID_USB_DEVICE(USB_VENDOR_ID_XIROKU, USB_DEVICE_ID_XIROKU_CSR) },
1522     + { HID_USB_DEVICE(USB_VENDOR_ID_XIROKU, USB_DEVICE_ID_XIROKU_SPX1) },
1523     + { HID_USB_DEVICE(USB_VENDOR_ID_XIROKU, USB_DEVICE_ID_XIROKU_MPX1) },
1524     + { HID_USB_DEVICE(USB_VENDOR_ID_XIROKU, USB_DEVICE_ID_XIROKU_CSR1) },
1525     + { HID_USB_DEVICE(USB_VENDOR_ID_XIROKU, USB_DEVICE_ID_XIROKU_SPX2) },
1526     + { HID_USB_DEVICE(USB_VENDOR_ID_XIROKU, USB_DEVICE_ID_XIROKU_MPX2) },
1527     + { HID_USB_DEVICE(USB_VENDOR_ID_XIROKU, USB_DEVICE_ID_XIROKU_CSR2) },
1528     { HID_USB_DEVICE(USB_VENDOR_ID_X_TENSIONS, USB_DEVICE_ID_SPEEDLINK_VAD_CEZANNE) },
1529     { HID_USB_DEVICE(USB_VENDOR_ID_ZEROPLUS, 0x0005) },
1530     { HID_USB_DEVICE(USB_VENDOR_ID_ZEROPLUS, 0x0030) },
1531     diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h
1532     index 4a441a6..00cabb3 100644
1533     --- a/drivers/hid/hid-ids.h
1534     +++ b/drivers/hid/hid-ids.h
1535     @@ -21,6 +21,7 @@
1536     #define USB_VENDOR_ID_3M 0x0596
1537     #define USB_DEVICE_ID_3M1968 0x0500
1538     #define USB_DEVICE_ID_3M2256 0x0502
1539     +#define USB_DEVICE_ID_3M3266 0x0506
1540    
1541     #define USB_VENDOR_ID_A4TECH 0x09da
1542     #define USB_DEVICE_ID_A4TECH_WCP32PU 0x0006
1543     @@ -145,6 +146,9 @@
1544     #define USB_DEVICE_ID_ATEN_4PORTKVM 0x2205
1545     #define USB_DEVICE_ID_ATEN_4PORTKVMC 0x2208
1546    
1547     +#define USB_VENDOR_ID_ATMEL 0x03eb
1548     +#define USB_DEVICE_ID_ATMEL_MULTITOUCH 0x211c
1549     +
1550     #define USB_VENDOR_ID_AVERMEDIA 0x07ca
1551     #define USB_DEVICE_ID_AVER_FM_MR800 0xb800
1552    
1553     @@ -230,11 +234,14 @@
1554    
1555     #define USB_VENDOR_ID_DWAV 0x0eef
1556     #define USB_DEVICE_ID_EGALAX_TOUCHCONTROLLER 0x0001
1557     -#define USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH 0x480d
1558     -#define USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH1 0x720c
1559     -#define USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH2 0x72a1
1560     -#define USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH3 0x480e
1561     -#define USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH4 0x726b
1562     +#define USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH_480D 0x480d
1563     +#define USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH_480E 0x480e
1564     +#define USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH_720C 0x720c
1565     +#define USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH_726B 0x726b
1566     +#define USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH_72A1 0x72a1
1567     +#define USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH_72FA 0x72fa
1568     +#define USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH_7302 0x7302
1569     +#define USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH_A001 0xa001
1570    
1571     #define USB_VENDOR_ID_ELECOM 0x056e
1572     #define USB_DEVICE_ID_ELECOM_BM084 0x0061
1573     @@ -356,6 +363,9 @@
1574     #define USB_VENDOR_ID_HANVON 0x20b3
1575     #define USB_DEVICE_ID_HANVON_MULTITOUCH 0x0a18
1576    
1577     +#define USB_VENDOR_ID_HANVON_ALT 0x22ed
1578     +#define USB_DEVICE_ID_HANVON_ALT_MULTITOUCH 0x1010
1579     +
1580     #define USB_VENDOR_ID_HAPP 0x078b
1581     #define USB_DEVICE_ID_UGCI_DRIVING 0x0010
1582     #define USB_DEVICE_ID_UGCI_FLYING 0x0020
1583     @@ -707,6 +717,17 @@
1584     #define USB_VENDOR_ID_XAT 0x2505
1585     #define USB_DEVICE_ID_XAT_CSR 0x0220
1586    
1587     +#define USB_VENDOR_ID_XIROKU 0x1477
1588     +#define USB_DEVICE_ID_XIROKU_SPX 0x1006
1589     +#define USB_DEVICE_ID_XIROKU_MPX 0x1007
1590     +#define USB_DEVICE_ID_XIROKU_CSR 0x100e
1591     +#define USB_DEVICE_ID_XIROKU_SPX1 0x1021
1592     +#define USB_DEVICE_ID_XIROKU_CSR1 0x1022
1593     +#define USB_DEVICE_ID_XIROKU_MPX1 0x1023
1594     +#define USB_DEVICE_ID_XIROKU_SPX2 0x1024
1595     +#define USB_DEVICE_ID_XIROKU_CSR2 0x1025
1596     +#define USB_DEVICE_ID_XIROKU_MPX2 0x1026
1597     +
1598     #define USB_VENDOR_ID_YEALINK 0x6993
1599     #define USB_DEVICE_ID_YEALINK_P1K_P4K_B2K 0xb001
1600    
1601     diff --git a/drivers/hid/hid-multitouch.c b/drivers/hid/hid-multitouch.c
1602     index f1c909f..995fc4c 100644
1603     --- a/drivers/hid/hid-multitouch.c
1604     +++ b/drivers/hid/hid-multitouch.c
1605     @@ -609,12 +609,20 @@ static const struct hid_device_id mt_devices[] = {
1606     { .driver_data = MT_CLS_3M,
1607     HID_USB_DEVICE(USB_VENDOR_ID_3M,
1608     USB_DEVICE_ID_3M2256) },
1609     + { .driver_data = MT_CLS_3M,
1610     + HID_USB_DEVICE(USB_VENDOR_ID_3M,
1611     + USB_DEVICE_ID_3M3266) },
1612    
1613     /* ActionStar panels */
1614     { .driver_data = MT_CLS_DEFAULT,
1615     HID_USB_DEVICE(USB_VENDOR_ID_ACTIONSTAR,
1616     USB_DEVICE_ID_ACTIONSTAR_1011) },
1617    
1618     + /* Atmel panels */
1619     + { .driver_data = MT_CLS_SERIAL,
1620     + HID_USB_DEVICE(USB_VENDOR_ID_ATMEL,
1621     + USB_DEVICE_ID_ATMEL_MULTITOUCH) },
1622     +
1623     /* Cando panels */
1624     { .driver_data = MT_CLS_DUAL_INRANGE_CONTACTNUMBER,
1625     HID_USB_DEVICE(USB_VENDOR_ID_CANDO,
1626     @@ -645,23 +653,32 @@ static const struct hid_device_id mt_devices[] = {
1627     USB_DEVICE_ID_CYPRESS_TRUETOUCH) },
1628    
1629     /* eGalax devices (resistive) */
1630     - { .driver_data = MT_CLS_EGALAX,
1631     + { .driver_data = MT_CLS_EGALAX,
1632     HID_USB_DEVICE(USB_VENDOR_ID_DWAV,
1633     - USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH) },
1634     - { .driver_data = MT_CLS_EGALAX,
1635     + USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH_480D) },
1636     + { .driver_data = MT_CLS_EGALAX,
1637     HID_USB_DEVICE(USB_VENDOR_ID_DWAV,
1638     - USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH3) },
1639     + USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH_480E) },
1640    
1641     /* eGalax devices (capacitive) */
1642     - { .driver_data = MT_CLS_EGALAX,
1643     + { .driver_data = MT_CLS_EGALAX,
1644     + HID_USB_DEVICE(USB_VENDOR_ID_DWAV,
1645     + USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH_720C) },
1646     + { .driver_data = MT_CLS_EGALAX,
1647     HID_USB_DEVICE(USB_VENDOR_ID_DWAV,
1648     - USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH1) },
1649     - { .driver_data = MT_CLS_EGALAX,
1650     + USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH_726B) },
1651     + { .driver_data = MT_CLS_EGALAX,
1652     HID_USB_DEVICE(USB_VENDOR_ID_DWAV,
1653     - USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH2) },
1654     - { .driver_data = MT_CLS_EGALAX,
1655     + USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH_72A1) },
1656     + { .driver_data = MT_CLS_EGALAX,
1657     HID_USB_DEVICE(USB_VENDOR_ID_DWAV,
1658     - USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH4) },
1659     + USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH_72FA) },
1660     + { .driver_data = MT_CLS_EGALAX,
1661     + HID_USB_DEVICE(USB_VENDOR_ID_DWAV,
1662     + USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH_7302) },
1663     + { .driver_data = MT_CLS_EGALAX,
1664     + HID_USB_DEVICE(USB_VENDOR_ID_DWAV,
1665     + USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH_A001) },
1666    
1667     /* Elo TouchSystems IntelliTouch Plus panel */
1668     { .driver_data = MT_CLS_DUAL_NSMU_CONTACTID,
1669     @@ -678,6 +695,11 @@ static const struct hid_device_id mt_devices[] = {
1670     HID_USB_DEVICE(USB_VENDOR_ID_GOODTOUCH,
1671     USB_DEVICE_ID_GOODTOUCH_000f) },
1672    
1673     + /* Hanvon panels */
1674     + { .driver_data = MT_CLS_DUAL_INRANGE_CONTACTID,
1675     + HID_USB_DEVICE(USB_VENDOR_ID_HANVON_ALT,
1676     + USB_DEVICE_ID_HANVON_ALT_MULTITOUCH) },
1677     +
1678     /* Ideacom panel */
1679     { .driver_data = MT_CLS_SERIAL,
1680     HID_USB_DEVICE(USB_VENDOR_ID_IDEACOM,
1681     @@ -758,6 +780,35 @@ static const struct hid_device_id mt_devices[] = {
1682     HID_USB_DEVICE(USB_VENDOR_ID_XAT,
1683     USB_DEVICE_ID_XAT_CSR) },
1684    
1685     + /* Xiroku */
1686     + { .driver_data = MT_CLS_DEFAULT,
1687     + HID_USB_DEVICE(USB_VENDOR_ID_XIROKU,
1688     + USB_DEVICE_ID_XIROKU_SPX) },
1689     + { .driver_data = MT_CLS_DEFAULT,
1690     + HID_USB_DEVICE(USB_VENDOR_ID_XIROKU,
1691     + USB_DEVICE_ID_XIROKU_MPX) },
1692     + { .driver_data = MT_CLS_DEFAULT,
1693     + HID_USB_DEVICE(USB_VENDOR_ID_XIROKU,
1694     + USB_DEVICE_ID_XIROKU_CSR) },
1695     + { .driver_data = MT_CLS_DEFAULT,
1696     + HID_USB_DEVICE(USB_VENDOR_ID_XIROKU,
1697     + USB_DEVICE_ID_XIROKU_SPX1) },
1698     + { .driver_data = MT_CLS_DEFAULT,
1699     + HID_USB_DEVICE(USB_VENDOR_ID_XIROKU,
1700     + USB_DEVICE_ID_XIROKU_MPX1) },
1701     + { .driver_data = MT_CLS_DEFAULT,
1702     + HID_USB_DEVICE(USB_VENDOR_ID_XIROKU,
1703     + USB_DEVICE_ID_XIROKU_CSR1) },
1704     + { .driver_data = MT_CLS_DEFAULT,
1705     + HID_USB_DEVICE(USB_VENDOR_ID_XIROKU,
1706     + USB_DEVICE_ID_XIROKU_SPX2) },
1707     + { .driver_data = MT_CLS_DEFAULT,
1708     + HID_USB_DEVICE(USB_VENDOR_ID_XIROKU,
1709     + USB_DEVICE_ID_XIROKU_MPX2) },
1710     + { .driver_data = MT_CLS_DEFAULT,
1711     + HID_USB_DEVICE(USB_VENDOR_ID_XIROKU,
1712     + USB_DEVICE_ID_XIROKU_CSR2) },
1713     +
1714     { }
1715     };
1716     MODULE_DEVICE_TABLE(hid, mt_devices);
1717     diff --git a/drivers/i2c/busses/i2c-ali1535.c b/drivers/i2c/busses/i2c-ali1535.c
1718     index b6807db..5b667e5 100644
1719     --- a/drivers/i2c/busses/i2c-ali1535.c
1720     +++ b/drivers/i2c/busses/i2c-ali1535.c
1721     @@ -140,7 +140,7 @@ static unsigned short ali1535_smba;
1722     defined to make the transition easier. */
1723     static int __devinit ali1535_setup(struct pci_dev *dev)
1724     {
1725     - int retval = -ENODEV;
1726     + int retval;
1727     unsigned char temp;
1728    
1729     /* Check the following things:
1730     @@ -155,6 +155,7 @@ static int __devinit ali1535_setup(struct pci_dev *dev)
1731     if (ali1535_smba == 0) {
1732     dev_warn(&dev->dev,
1733     "ALI1535_smb region uninitialized - upgrade BIOS?\n");
1734     + retval = -ENODEV;
1735     goto exit;
1736     }
1737    
1738     @@ -167,6 +168,7 @@ static int __devinit ali1535_setup(struct pci_dev *dev)
1739     ali1535_driver.name)) {
1740     dev_err(&dev->dev, "ALI1535_smb region 0x%x already in use!\n",
1741     ali1535_smba);
1742     + retval = -EBUSY;
1743     goto exit;
1744     }
1745    
1746     @@ -174,6 +176,7 @@ static int __devinit ali1535_setup(struct pci_dev *dev)
1747     pci_read_config_byte(dev, SMBCFG, &temp);
1748     if ((temp & ALI1535_SMBIO_EN) == 0) {
1749     dev_err(&dev->dev, "SMB device not enabled - upgrade BIOS?\n");
1750     + retval = -ENODEV;
1751     goto exit_free;
1752     }
1753    
1754     @@ -181,6 +184,7 @@ static int __devinit ali1535_setup(struct pci_dev *dev)
1755     pci_read_config_byte(dev, SMBHSTCFG, &temp);
1756     if ((temp & 1) == 0) {
1757     dev_err(&dev->dev, "SMBus controller not enabled - upgrade BIOS?\n");
1758     + retval = -ENODEV;
1759     goto exit_free;
1760     }
1761    
1762     @@ -198,12 +202,11 @@ static int __devinit ali1535_setup(struct pci_dev *dev)
1763     dev_dbg(&dev->dev, "SMBREV = 0x%X\n", temp);
1764     dev_dbg(&dev->dev, "ALI1535_smba = 0x%X\n", ali1535_smba);
1765    
1766     - retval = 0;
1767     -exit:
1768     - return retval;
1769     + return 0;
1770    
1771     exit_free:
1772     release_region(ali1535_smba, ALI1535_SMB_IOSIZE);
1773     +exit:
1774     return retval;
1775     }
1776    
1777     diff --git a/drivers/i2c/busses/i2c-eg20t.c b/drivers/i2c/busses/i2c-eg20t.c
1778     index 18936ac..730215e 100644
1779     --- a/drivers/i2c/busses/i2c-eg20t.c
1780     +++ b/drivers/i2c/busses/i2c-eg20t.c
1781     @@ -243,7 +243,7 @@ static void pch_i2c_init(struct i2c_algo_pch_data *adap)
1782     if (pch_clk > PCH_MAX_CLK)
1783     pch_clk = 62500;
1784    
1785     - pch_i2cbc = (pch_clk + (pch_i2c_speed * 4)) / pch_i2c_speed * 8;
1786     + pch_i2cbc = (pch_clk + (pch_i2c_speed * 4)) / (pch_i2c_speed * 8);
1787     /* Set transfer speed in I2CBC */
1788     iowrite32(pch_i2cbc, p + PCH_I2CBC);
1789    
1790     diff --git a/drivers/i2c/busses/i2c-nforce2.c b/drivers/i2c/busses/i2c-nforce2.c
1791     index ff1e127..4853b52 100644
1792     --- a/drivers/i2c/busses/i2c-nforce2.c
1793     +++ b/drivers/i2c/busses/i2c-nforce2.c
1794     @@ -356,7 +356,7 @@ static int __devinit nforce2_probe_smb (struct pci_dev *dev, int bar,
1795     error = acpi_check_region(smbus->base, smbus->size,
1796     nforce2_driver.name);
1797     if (error)
1798     - return -1;
1799     + return error;
1800    
1801     if (!request_region(smbus->base, smbus->size, nforce2_driver.name)) {
1802     dev_err(&smbus->adapter.dev, "Error requesting region %02x .. %02X for %s\n",
1803     diff --git a/drivers/i2c/busses/i2c-omap.c b/drivers/i2c/busses/i2c-omap.c
1804     index fa23faa..257c1a5 100644
1805     --- a/drivers/i2c/busses/i2c-omap.c
1806     +++ b/drivers/i2c/busses/i2c-omap.c
1807     @@ -235,7 +235,7 @@ static const u8 reg_map_ip_v2[] = {
1808     [OMAP_I2C_BUF_REG] = 0x94,
1809     [OMAP_I2C_CNT_REG] = 0x98,
1810     [OMAP_I2C_DATA_REG] = 0x9c,
1811     - [OMAP_I2C_SYSC_REG] = 0x20,
1812     + [OMAP_I2C_SYSC_REG] = 0x10,
1813     [OMAP_I2C_CON_REG] = 0xa4,
1814     [OMAP_I2C_OA_REG] = 0xa8,
1815     [OMAP_I2C_SA_REG] = 0xac,
1816     diff --git a/drivers/i2c/busses/i2c-sis5595.c b/drivers/i2c/busses/i2c-sis5595.c
1817     index 4375866..6d60284 100644
1818     --- a/drivers/i2c/busses/i2c-sis5595.c
1819     +++ b/drivers/i2c/busses/i2c-sis5595.c
1820     @@ -147,7 +147,7 @@ static int __devinit sis5595_setup(struct pci_dev *SIS5595_dev)
1821     u16 a;
1822     u8 val;
1823     int *i;
1824     - int retval = -ENODEV;
1825     + int retval;
1826    
1827     /* Look for imposters */
1828     for (i = blacklist; *i != 0; i++) {
1829     @@ -223,7 +223,7 @@ static int __devinit sis5595_setup(struct pci_dev *SIS5595_dev)
1830    
1831     error:
1832     release_region(sis5595_base + SMB_INDEX, 2);
1833     - return retval;
1834     + return -ENODEV;
1835     }
1836    
1837     static int sis5595_transaction(struct i2c_adapter *adap)
1838     diff --git a/drivers/i2c/busses/i2c-sis630.c b/drivers/i2c/busses/i2c-sis630.c
1839     index e6f539e..b617fd0 100644
1840     --- a/drivers/i2c/busses/i2c-sis630.c
1841     +++ b/drivers/i2c/busses/i2c-sis630.c
1842     @@ -393,7 +393,7 @@ static int __devinit sis630_setup(struct pci_dev *sis630_dev)
1843     {
1844     unsigned char b;
1845     struct pci_dev *dummy = NULL;
1846     - int retval = -ENODEV, i;
1847     + int retval, i;
1848    
1849     /* check for supported SiS devices */
1850     for (i=0; supported[i] > 0 ; i++) {
1851     @@ -418,18 +418,21 @@ static int __devinit sis630_setup(struct pci_dev *sis630_dev)
1852     */
1853     if (pci_read_config_byte(sis630_dev, SIS630_BIOS_CTL_REG,&b)) {
1854     dev_err(&sis630_dev->dev, "Error: Can't read bios ctl reg\n");
1855     + retval = -ENODEV;
1856     goto exit;
1857     }
1858     /* if ACPI already enabled , do nothing */
1859     if (!(b & 0x80) &&
1860     pci_write_config_byte(sis630_dev, SIS630_BIOS_CTL_REG, b | 0x80)) {
1861     dev_err(&sis630_dev->dev, "Error: Can't enable ACPI\n");
1862     + retval = -ENODEV;
1863     goto exit;
1864     }
1865    
1866     /* Determine the ACPI base address */
1867     if (pci_read_config_word(sis630_dev,SIS630_ACPI_BASE_REG,&acpi_base)) {
1868     dev_err(&sis630_dev->dev, "Error: Can't determine ACPI base address\n");
1869     + retval = -ENODEV;
1870     goto exit;
1871     }
1872    
1873     @@ -445,6 +448,7 @@ static int __devinit sis630_setup(struct pci_dev *sis630_dev)
1874     sis630_driver.name)) {
1875     dev_err(&sis630_dev->dev, "SMBus registers 0x%04x-0x%04x already "
1876     "in use!\n", acpi_base + SMB_STS, acpi_base + SMB_SAA);
1877     + retval = -EBUSY;
1878     goto exit;
1879     }
1880    
1881     diff --git a/drivers/i2c/busses/i2c-viapro.c b/drivers/i2c/busses/i2c-viapro.c
1882     index 0b012f1..58261d4 100644
1883     --- a/drivers/i2c/busses/i2c-viapro.c
1884     +++ b/drivers/i2c/busses/i2c-viapro.c
1885     @@ -324,7 +324,7 @@ static int __devinit vt596_probe(struct pci_dev *pdev,
1886     const struct pci_device_id *id)
1887     {
1888     unsigned char temp;
1889     - int error = -ENODEV;
1890     + int error;
1891    
1892     /* Determine the address of the SMBus areas */
1893     if (force_addr) {
1894     @@ -390,6 +390,7 @@ found:
1895     dev_err(&pdev->dev, "SMBUS: Error: Host SMBus "
1896     "controller not enabled! - upgrade BIOS or "
1897     "use force=1\n");
1898     + error = -ENODEV;
1899     goto release_region;
1900     }
1901     }
1902     @@ -422,9 +423,11 @@ found:
1903     "SMBus Via Pro adapter at %04x", vt596_smba);
1904    
1905     vt596_pdev = pci_dev_get(pdev);
1906     - if (i2c_add_adapter(&vt596_adapter)) {
1907     + error = i2c_add_adapter(&vt596_adapter);
1908     + if (error) {
1909     pci_dev_put(vt596_pdev);
1910     vt596_pdev = NULL;
1911     + goto release_region;
1912     }
1913    
1914     /* Always return failure here. This is to allow other drivers to bind
1915     diff --git a/drivers/ide/ide-floppy_ioctl.c b/drivers/ide/ide-floppy_ioctl.c
1916     index d267b7a..a22ca84 100644
1917     --- a/drivers/ide/ide-floppy_ioctl.c
1918     +++ b/drivers/ide/ide-floppy_ioctl.c
1919     @@ -292,8 +292,7 @@ int ide_floppy_ioctl(ide_drive_t *drive, struct block_device *bdev,
1920     * and CDROM_SEND_PACKET (legacy) ioctls
1921     */
1922     if (cmd != CDROM_SEND_PACKET && cmd != SCSI_IOCTL_SEND_COMMAND)
1923     - err = scsi_cmd_ioctl(bdev->bd_disk->queue, bdev->bd_disk,
1924     - mode, cmd, argp);
1925     + err = scsi_cmd_blk_ioctl(bdev, mode, cmd, argp);
1926    
1927     if (err == -ENOTTY)
1928     err = generic_ide_ioctl(drive, bdev, cmd, arg);
1929     diff --git a/drivers/idle/intel_idle.c b/drivers/idle/intel_idle.c
1930     index 5d2f8e1..5b39216 100644
1931     --- a/drivers/idle/intel_idle.c
1932     +++ b/drivers/idle/intel_idle.c
1933     @@ -348,7 +348,8 @@ static int intel_idle_probe(void)
1934     cpuid(CPUID_MWAIT_LEAF, &eax, &ebx, &ecx, &mwait_substates);
1935    
1936     if (!(ecx & CPUID5_ECX_EXTENSIONS_SUPPORTED) ||
1937     - !(ecx & CPUID5_ECX_INTERRUPT_BREAK))
1938     + !(ecx & CPUID5_ECX_INTERRUPT_BREAK) ||
1939     + !mwait_substates)
1940     return -ENODEV;
1941    
1942     pr_debug(PREFIX "MWAIT substates: 0x%x\n", mwait_substates);
1943     @@ -394,7 +395,7 @@ static int intel_idle_probe(void)
1944     if (boot_cpu_has(X86_FEATURE_ARAT)) /* Always Reliable APIC Timer */
1945     lapic_timer_reliable_states = LAPIC_TIMER_ALWAYS_RELIABLE;
1946     else {
1947     - smp_call_function(__setup_broadcast_timer, (void *)true, 1);
1948     + on_each_cpu(__setup_broadcast_timer, (void *)true, 1);
1949     register_cpu_notifier(&setup_broadcast_notifier);
1950     }
1951    
1952     @@ -471,7 +472,7 @@ static int intel_idle_cpuidle_driver_init(void)
1953     }
1954    
1955     if (auto_demotion_disable_flags)
1956     - smp_call_function(auto_demotion_disable, NULL, 1);
1957     + on_each_cpu(auto_demotion_disable, NULL, 1);
1958    
1959     return 0;
1960     }
1961     @@ -568,7 +569,7 @@ static void __exit intel_idle_exit(void)
1962     cpuidle_unregister_driver(&intel_idle_driver);
1963    
1964     if (lapic_timer_reliable_states != LAPIC_TIMER_ALWAYS_RELIABLE) {
1965     - smp_call_function(__setup_broadcast_timer, (void *)false, 1);
1966     + on_each_cpu(__setup_broadcast_timer, (void *)false, 1);
1967     unregister_cpu_notifier(&setup_broadcast_notifier);
1968     }
1969    
1970     diff --git a/drivers/md/dm-flakey.c b/drivers/md/dm-flakey.c
1971     index f84c080..9fb18c1 100644
1972     --- a/drivers/md/dm-flakey.c
1973     +++ b/drivers/md/dm-flakey.c
1974     @@ -368,8 +368,17 @@ static int flakey_status(struct dm_target *ti, status_type_t type,
1975     static int flakey_ioctl(struct dm_target *ti, unsigned int cmd, unsigned long arg)
1976     {
1977     struct flakey_c *fc = ti->private;
1978     + struct dm_dev *dev = fc->dev;
1979     + int r = 0;
1980    
1981     - return __blkdev_driver_ioctl(fc->dev->bdev, fc->dev->mode, cmd, arg);
1982     + /*
1983     + * Only pass ioctls through if the device sizes match exactly.
1984     + */
1985     + if (fc->start ||
1986     + ti->len != i_size_read(dev->bdev->bd_inode) >> SECTOR_SHIFT)
1987     + r = scsi_verify_blk_ioctl(NULL, cmd);
1988     +
1989     + return r ? : __blkdev_driver_ioctl(dev->bdev, dev->mode, cmd, arg);
1990     }
1991    
1992     static int flakey_merge(struct dm_target *ti, struct bvec_merge_data *bvm,
1993     diff --git a/drivers/md/dm-linear.c b/drivers/md/dm-linear.c
1994     index 3921e3b..9728839 100644
1995     --- a/drivers/md/dm-linear.c
1996     +++ b/drivers/md/dm-linear.c
1997     @@ -116,7 +116,17 @@ static int linear_ioctl(struct dm_target *ti, unsigned int cmd,
1998     unsigned long arg)
1999     {
2000     struct linear_c *lc = (struct linear_c *) ti->private;
2001     - return __blkdev_driver_ioctl(lc->dev->bdev, lc->dev->mode, cmd, arg);
2002     + struct dm_dev *dev = lc->dev;
2003     + int r = 0;
2004     +
2005     + /*
2006     + * Only pass ioctls through if the device sizes match exactly.
2007     + */
2008     + if (lc->start ||
2009     + ti->len != i_size_read(dev->bdev->bd_inode) >> SECTOR_SHIFT)
2010     + r = scsi_verify_blk_ioctl(NULL, cmd);
2011     +
2012     + return r ? : __blkdev_driver_ioctl(dev->bdev, dev->mode, cmd, arg);
2013     }
2014    
2015     static int linear_merge(struct dm_target *ti, struct bvec_merge_data *bvm,
2016     diff --git a/drivers/md/dm-mpath.c b/drivers/md/dm-mpath.c
2017     index 5e0090e..801d92d 100644
2018     --- a/drivers/md/dm-mpath.c
2019     +++ b/drivers/md/dm-mpath.c
2020     @@ -1520,6 +1520,12 @@ static int multipath_ioctl(struct dm_target *ti, unsigned int cmd,
2021    
2022     spin_unlock_irqrestore(&m->lock, flags);
2023    
2024     + /*
2025     + * Only pass ioctls through if the device sizes match exactly.
2026     + */
2027     + if (!r && ti->len != i_size_read(bdev->bd_inode) >> SECTOR_SHIFT)
2028     + r = scsi_verify_blk_ioctl(NULL, cmd);
2029     +
2030     return r ? : __blkdev_driver_ioctl(bdev, mode, cmd, arg);
2031     }
2032    
2033     diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
2034     index ede2461..7d9e071 100644
2035     --- a/drivers/md/raid1.c
2036     +++ b/drivers/md/raid1.c
2037     @@ -525,8 +525,17 @@ static int read_balance(struct r1conf *conf, struct r1bio *r1_bio, int *max_sect
2038     if (test_bit(WriteMostly, &rdev->flags)) {
2039     /* Don't balance among write-mostly, just
2040     * use the first as a last resort */
2041     - if (best_disk < 0)
2042     + if (best_disk < 0) {
2043     + if (is_badblock(rdev, this_sector, sectors,
2044     + &first_bad, &bad_sectors)) {
2045     + if (first_bad < this_sector)
2046     + /* Cannot use this */
2047     + continue;
2048     + best_good_sectors = first_bad - this_sector;
2049     + } else
2050     + best_good_sectors = sectors;
2051     best_disk = disk;
2052     + }
2053     continue;
2054     }
2055     /* This is a reasonable device to use. It might
2056     diff --git a/drivers/media/video/cx23885/cx23885-dvb.c b/drivers/media/video/cx23885/cx23885-dvb.c
2057     index bcb45be..f0482b2 100644
2058     --- a/drivers/media/video/cx23885/cx23885-dvb.c
2059     +++ b/drivers/media/video/cx23885/cx23885-dvb.c
2060     @@ -940,6 +940,11 @@ static int dvb_register(struct cx23885_tsport *port)
2061    
2062     fe = dvb_attach(xc4000_attach, fe0->dvb.frontend,
2063     &dev->i2c_bus[1].i2c_adap, &cfg);
2064     + if (!fe) {
2065     + printk(KERN_ERR "%s/2: xc4000 attach failed\n",
2066     + dev->name);
2067     + goto frontend_detach;
2068     + }
2069     }
2070     break;
2071     case CX23885_BOARD_TBS_6920:
2072     diff --git a/drivers/media/video/cx88/cx88-cards.c b/drivers/media/video/cx88/cx88-cards.c
2073     index 0d719fa..3929d93 100644
2074     --- a/drivers/media/video/cx88/cx88-cards.c
2075     +++ b/drivers/media/video/cx88/cx88-cards.c
2076     @@ -1573,8 +1573,8 @@ static const struct cx88_board cx88_boards[] = {
2077     .name = "Pinnacle Hybrid PCTV",
2078     .tuner_type = TUNER_XC2028,
2079     .tuner_addr = 0x61,
2080     - .radio_type = TUNER_XC2028,
2081     - .radio_addr = 0x61,
2082     + .radio_type = UNSET,
2083     + .radio_addr = ADDR_UNSET,
2084     .input = { {
2085     .type = CX88_VMUX_TELEVISION,
2086     .vmux = 0,
2087     @@ -1611,8 +1611,8 @@ static const struct cx88_board cx88_boards[] = {
2088     .name = "Leadtek TV2000 XP Global",
2089     .tuner_type = TUNER_XC2028,
2090     .tuner_addr = 0x61,
2091     - .radio_type = TUNER_XC2028,
2092     - .radio_addr = 0x61,
2093     + .radio_type = UNSET,
2094     + .radio_addr = ADDR_UNSET,
2095     .input = { {
2096     .type = CX88_VMUX_TELEVISION,
2097     .vmux = 0,
2098     @@ -2043,8 +2043,8 @@ static const struct cx88_board cx88_boards[] = {
2099     .name = "Terratec Cinergy HT PCI MKII",
2100     .tuner_type = TUNER_XC2028,
2101     .tuner_addr = 0x61,
2102     - .radio_type = TUNER_XC2028,
2103     - .radio_addr = 0x61,
2104     + .radio_type = UNSET,
2105     + .radio_addr = ADDR_UNSET,
2106     .input = { {
2107     .type = CX88_VMUX_TELEVISION,
2108     .vmux = 0,
2109     @@ -2082,9 +2082,9 @@ static const struct cx88_board cx88_boards[] = {
2110     [CX88_BOARD_WINFAST_DTV1800H] = {
2111     .name = "Leadtek WinFast DTV1800 Hybrid",
2112     .tuner_type = TUNER_XC2028,
2113     - .radio_type = TUNER_XC2028,
2114     + .radio_type = UNSET,
2115     .tuner_addr = 0x61,
2116     - .radio_addr = 0x61,
2117     + .radio_addr = ADDR_UNSET,
2118     /*
2119     * GPIO setting
2120     *
2121     @@ -2123,9 +2123,9 @@ static const struct cx88_board cx88_boards[] = {
2122     [CX88_BOARD_WINFAST_DTV1800H_XC4000] = {
2123     .name = "Leadtek WinFast DTV1800 H (XC4000)",
2124     .tuner_type = TUNER_XC4000,
2125     - .radio_type = TUNER_XC4000,
2126     + .radio_type = UNSET,
2127     .tuner_addr = 0x61,
2128     - .radio_addr = 0x61,
2129     + .radio_addr = ADDR_UNSET,
2130     /*
2131     * GPIO setting
2132     *
2133     @@ -2164,9 +2164,9 @@ static const struct cx88_board cx88_boards[] = {
2134     [CX88_BOARD_WINFAST_DTV2000H_PLUS] = {
2135     .name = "Leadtek WinFast DTV2000 H PLUS",
2136     .tuner_type = TUNER_XC4000,
2137     - .radio_type = TUNER_XC4000,
2138     + .radio_type = UNSET,
2139     .tuner_addr = 0x61,
2140     - .radio_addr = 0x61,
2141     + .radio_addr = ADDR_UNSET,
2142     /*
2143     * GPIO
2144     * 2: 1: mute audio
2145     diff --git a/drivers/media/video/uvc/uvc_v4l2.c b/drivers/media/video/uvc/uvc_v4l2.c
2146     index dadf11f..cf7788f 100644
2147     --- a/drivers/media/video/uvc/uvc_v4l2.c
2148     +++ b/drivers/media/video/uvc/uvc_v4l2.c
2149     @@ -58,6 +58,15 @@ static int uvc_ioctl_ctrl_map(struct uvc_video_chain *chain,
2150     break;
2151    
2152     case V4L2_CTRL_TYPE_MENU:
2153     + /* Prevent excessive memory consumption, as well as integer
2154     + * overflows.
2155     + */
2156     + if (xmap->menu_count == 0 ||
2157     + xmap->menu_count > UVC_MAX_CONTROL_MENU_ENTRIES) {
2158     + ret = -EINVAL;
2159     + goto done;
2160     + }
2161     +
2162     size = xmap->menu_count * sizeof(*map->menu_info);
2163     map->menu_info = kmalloc(size, GFP_KERNEL);
2164     if (map->menu_info == NULL) {
2165     diff --git a/drivers/media/video/uvc/uvcvideo.h b/drivers/media/video/uvc/uvcvideo.h
2166     index 4c1392e..bc446ba 100644
2167     --- a/drivers/media/video/uvc/uvcvideo.h
2168     +++ b/drivers/media/video/uvc/uvcvideo.h
2169     @@ -113,6 +113,7 @@
2170    
2171     /* Maximum allowed number of control mappings per device */
2172     #define UVC_MAX_CONTROL_MAPPINGS 1024
2173     +#define UVC_MAX_CONTROL_MENU_ENTRIES 32
2174    
2175     /* Devices quirks */
2176     #define UVC_QUIRK_STATUS_INTERVAL 0x00000001
2177     diff --git a/drivers/media/video/v4l2-ioctl.c b/drivers/media/video/v4l2-ioctl.c
2178     index e1da8fc..639abee 100644
2179     --- a/drivers/media/video/v4l2-ioctl.c
2180     +++ b/drivers/media/video/v4l2-ioctl.c
2181     @@ -2226,6 +2226,10 @@ static int check_array_args(unsigned int cmd, void *parg, size_t *array_size,
2182     struct v4l2_ext_controls *ctrls = parg;
2183    
2184     if (ctrls->count != 0) {
2185     + if (ctrls->count > V4L2_CID_MAX_CTRLS) {
2186     + ret = -EINVAL;
2187     + break;
2188     + }
2189     *user_ptr = (void __user *)ctrls->controls;
2190     *kernel_ptr = (void *)&ctrls->controls;
2191     *array_size = sizeof(struct v4l2_ext_control)
2192     diff --git a/drivers/mmc/core/mmc.c b/drivers/mmc/core/mmc.c
2193     index d240427..fb7c27f 100644
2194     --- a/drivers/mmc/core/mmc.c
2195     +++ b/drivers/mmc/core/mmc.c
2196     @@ -1048,7 +1048,7 @@ static int mmc_init_card(struct mmc_host *host, u32 ocr,
2197     *
2198     * WARNING: eMMC rules are NOT the same as SD DDR
2199     */
2200     - if (ddr == EXT_CSD_CARD_TYPE_DDR_1_2V) {
2201     + if (ddr == MMC_1_2V_DDR_MODE) {
2202     err = mmc_set_signal_voltage(host,
2203     MMC_SIGNAL_VOLTAGE_120, 0);
2204     if (err)
2205     diff --git a/drivers/mmc/host/sdhci.c b/drivers/mmc/host/sdhci.c
2206     index 19ed580..6ce32a7 100644
2207     --- a/drivers/mmc/host/sdhci.c
2208     +++ b/drivers/mmc/host/sdhci.c
2209     @@ -1364,8 +1364,7 @@ static void sdhci_do_set_ios(struct sdhci_host *host, struct mmc_ios *ios)
2210     if ((ios->timing == MMC_TIMING_UHS_SDR50) ||
2211     (ios->timing == MMC_TIMING_UHS_SDR104) ||
2212     (ios->timing == MMC_TIMING_UHS_DDR50) ||
2213     - (ios->timing == MMC_TIMING_UHS_SDR25) ||
2214     - (ios->timing == MMC_TIMING_UHS_SDR12))
2215     + (ios->timing == MMC_TIMING_UHS_SDR25))
2216     ctrl |= SDHCI_CTRL_HISPD;
2217    
2218     ctrl_2 = sdhci_readw(host, SDHCI_HOST_CONTROL2);
2219     @@ -2336,9 +2335,8 @@ int sdhci_suspend_host(struct sdhci_host *host)
2220     /* Disable tuning since we are suspending */
2221     if (host->version >= SDHCI_SPEC_300 && host->tuning_count &&
2222     host->tuning_mode == SDHCI_TUNING_MODE_1) {
2223     + del_timer_sync(&host->tuning_timer);
2224     host->flags &= ~SDHCI_NEEDS_RETUNING;
2225     - mod_timer(&host->tuning_timer, jiffies +
2226     - host->tuning_count * HZ);
2227     }
2228    
2229     ret = mmc_suspend_host(host->mmc);
2230     diff --git a/drivers/mtd/mtd_blkdevs.c b/drivers/mtd/mtd_blkdevs.c
2231     index ed8b5e7..424ca5f 100644
2232     --- a/drivers/mtd/mtd_blkdevs.c
2233     +++ b/drivers/mtd/mtd_blkdevs.c
2234     @@ -215,7 +215,7 @@ static int blktrans_open(struct block_device *bdev, fmode_t mode)
2235    
2236     mutex_lock(&dev->lock);
2237    
2238     - if (dev->open++)
2239     + if (dev->open)
2240     goto unlock;
2241    
2242     kref_get(&dev->ref);
2243     @@ -235,6 +235,7 @@ static int blktrans_open(struct block_device *bdev, fmode_t mode)
2244     goto error_release;
2245    
2246     unlock:
2247     + dev->open++;
2248     mutex_unlock(&dev->lock);
2249     blktrans_dev_put(dev);
2250     return ret;
2251     diff --git a/drivers/mtd/mtdoops.c b/drivers/mtd/mtdoops.c
2252     index 1e2fa62..f3cdce9 100644
2253     --- a/drivers/mtd/mtdoops.c
2254     +++ b/drivers/mtd/mtdoops.c
2255     @@ -253,6 +253,9 @@ static void find_next_position(struct mtdoops_context *cxt)
2256     size_t retlen;
2257    
2258     for (page = 0; page < cxt->oops_pages; page++) {
2259     + if (mtd->block_isbad &&
2260     + mtd->block_isbad(mtd, page * record_size))
2261     + continue;
2262     /* Assume the page is used */
2263     mark_page_used(cxt, page);
2264     ret = mtd->read(mtd, page * record_size, MTDOOPS_HEADER_SIZE,
2265     @@ -369,7 +372,7 @@ static void mtdoops_notify_add(struct mtd_info *mtd)
2266    
2267     /* oops_page_used is a bit field */
2268     cxt->oops_page_used = vmalloc(DIV_ROUND_UP(mtdoops_pages,
2269     - BITS_PER_LONG));
2270     + BITS_PER_LONG) * sizeof(unsigned long));
2271     if (!cxt->oops_page_used) {
2272     printk(KERN_ERR "mtdoops: could not allocate page array\n");
2273     return;
2274     diff --git a/drivers/mtd/tests/mtd_stresstest.c b/drivers/mtd/tests/mtd_stresstest.c
2275     index 52ffd91..811642f 100644
2276     --- a/drivers/mtd/tests/mtd_stresstest.c
2277     +++ b/drivers/mtd/tests/mtd_stresstest.c
2278     @@ -284,6 +284,12 @@ static int __init mtd_stresstest_init(void)
2279     (unsigned long long)mtd->size, mtd->erasesize,
2280     pgsize, ebcnt, pgcnt, mtd->oobsize);
2281    
2282     + if (ebcnt < 2) {
2283     + printk(PRINT_PREF "error: need at least 2 eraseblocks\n");
2284     + err = -ENOSPC;
2285     + goto out_put_mtd;
2286     + }
2287     +
2288     /* Read or write up 2 eraseblocks at a time */
2289     bufsize = mtd->erasesize * 2;
2290    
2291     @@ -322,6 +328,7 @@ out:
2292     kfree(bbt);
2293     vfree(writebuf);
2294     vfree(readbuf);
2295     +out_put_mtd:
2296     put_mtd_device(mtd);
2297     if (err)
2298     printk(PRINT_PREF "error %d occurred\n", err);
2299     diff --git a/drivers/mtd/ubi/cdev.c b/drivers/mtd/ubi/cdev.c
2300     index 3320a50..ad76592 100644
2301     --- a/drivers/mtd/ubi/cdev.c
2302     +++ b/drivers/mtd/ubi/cdev.c
2303     @@ -632,6 +632,9 @@ static int verify_mkvol_req(const struct ubi_device *ubi,
2304     if (req->alignment != 1 && n)
2305     goto bad;
2306    
2307     + if (!req->name[0] || !req->name_len)
2308     + goto bad;
2309     +
2310     if (req->name_len > UBI_VOL_NAME_MAX) {
2311     err = -ENAMETOOLONG;
2312     goto bad;
2313     diff --git a/drivers/mtd/ubi/debug.h b/drivers/mtd/ubi/debug.h
2314     index 64fbb00..ead2cd1 100644
2315     --- a/drivers/mtd/ubi/debug.h
2316     +++ b/drivers/mtd/ubi/debug.h
2317     @@ -43,7 +43,10 @@
2318     pr_debug("UBI DBG " type ": " fmt "\n", ##__VA_ARGS__)
2319    
2320     /* Just a debugging messages not related to any specific UBI subsystem */
2321     -#define dbg_msg(fmt, ...) ubi_dbg_msg("msg", fmt, ##__VA_ARGS__)
2322     +#define dbg_msg(fmt, ...) \
2323     + printk(KERN_DEBUG "UBI DBG (pid %d): %s: " fmt "\n", \
2324     + current->pid, __func__, ##__VA_ARGS__)
2325     +
2326     /* General debugging messages */
2327     #define dbg_gen(fmt, ...) ubi_dbg_msg("gen", fmt, ##__VA_ARGS__)
2328     /* Messages from the eraseblock association sub-system */
2329     diff --git a/drivers/mtd/ubi/eba.c b/drivers/mtd/ubi/eba.c
2330     index fb7f19b..cd26da8 100644
2331     --- a/drivers/mtd/ubi/eba.c
2332     +++ b/drivers/mtd/ubi/eba.c
2333     @@ -1028,12 +1028,14 @@ int ubi_eba_copy_leb(struct ubi_device *ubi, int from, int to,
2334     * 'ubi_wl_put_peb()' function on the @ubi->move_mutex. In turn, we are
2335     * holding @ubi->move_mutex and go sleep on the LEB lock. So, if the
2336     * LEB is already locked, we just do not move it and return
2337     - * %MOVE_CANCEL_RACE, which means that UBI will re-try, but later.
2338     + * %MOVE_RETRY. Note, we do not return %MOVE_CANCEL_RACE here because
2339     + * we do not know the reasons of the contention - it may be just a
2340     + * normal I/O on this LEB, so we want to re-try.
2341     */
2342     err = leb_write_trylock(ubi, vol_id, lnum);
2343     if (err) {
2344     dbg_wl("contention on LEB %d:%d, cancel", vol_id, lnum);
2345     - return MOVE_CANCEL_RACE;
2346     + return MOVE_RETRY;
2347     }
2348    
2349     /*
2350     diff --git a/drivers/mtd/ubi/ubi.h b/drivers/mtd/ubi/ubi.h
2351     index dc64c76..d51d75d 100644
2352     --- a/drivers/mtd/ubi/ubi.h
2353     +++ b/drivers/mtd/ubi/ubi.h
2354     @@ -120,6 +120,7 @@ enum {
2355     * PEB
2356     * MOVE_CANCEL_BITFLIPS: canceled because a bit-flip was detected in the
2357     * target PEB
2358     + * MOVE_RETRY: retry scrubbing the PEB
2359     */
2360     enum {
2361     MOVE_CANCEL_RACE = 1,
2362     @@ -127,6 +128,7 @@ enum {
2363     MOVE_TARGET_RD_ERR,
2364     MOVE_TARGET_WR_ERR,
2365     MOVE_CANCEL_BITFLIPS,
2366     + MOVE_RETRY,
2367     };
2368    
2369     /**
2370     diff --git a/drivers/mtd/ubi/vtbl.c b/drivers/mtd/ubi/vtbl.c
2371     index 9ad18da..890754c 100644
2372     --- a/drivers/mtd/ubi/vtbl.c
2373     +++ b/drivers/mtd/ubi/vtbl.c
2374     @@ -306,7 +306,7 @@ static int create_vtbl(struct ubi_device *ubi, struct ubi_scan_info *si,
2375     int copy, void *vtbl)
2376     {
2377     int err, tries = 0;
2378     - static struct ubi_vid_hdr *vid_hdr;
2379     + struct ubi_vid_hdr *vid_hdr;
2380     struct ubi_scan_leb *new_seb;
2381    
2382     ubi_msg("create volume table (copy #%d)", copy + 1);
2383     diff --git a/drivers/mtd/ubi/wl.c b/drivers/mtd/ubi/wl.c
2384     index 42c684c..0696e36 100644
2385     --- a/drivers/mtd/ubi/wl.c
2386     +++ b/drivers/mtd/ubi/wl.c
2387     @@ -795,7 +795,10 @@ static int wear_leveling_worker(struct ubi_device *ubi, struct ubi_work *wrk,
2388     protect = 1;
2389     goto out_not_moved;
2390     }
2391     -
2392     + if (err == MOVE_RETRY) {
2393     + scrubbing = 1;
2394     + goto out_not_moved;
2395     + }
2396     if (err == MOVE_CANCEL_BITFLIPS || err == MOVE_TARGET_WR_ERR ||
2397     err == MOVE_TARGET_RD_ERR) {
2398     /*
2399     @@ -1049,7 +1052,6 @@ static int erase_worker(struct ubi_device *ubi, struct ubi_work *wl_wrk,
2400    
2401     ubi_err("failed to erase PEB %d, error %d", pnum, err);
2402     kfree(wl_wrk);
2403     - kmem_cache_free(ubi_wl_entry_slab, e);
2404    
2405     if (err == -EINTR || err == -ENOMEM || err == -EAGAIN ||
2406     err == -EBUSY) {
2407     @@ -1062,14 +1064,16 @@ static int erase_worker(struct ubi_device *ubi, struct ubi_work *wl_wrk,
2408     goto out_ro;
2409     }
2410     return err;
2411     - } else if (err != -EIO) {
2412     + }
2413     +
2414     + kmem_cache_free(ubi_wl_entry_slab, e);
2415     + if (err != -EIO)
2416     /*
2417     * If this is not %-EIO, we have no idea what to do. Scheduling
2418     * this physical eraseblock for erasure again would cause
2419     * errors again and again. Well, lets switch to R/O mode.
2420     */
2421     goto out_ro;
2422     - }
2423    
2424     /* It is %-EIO, the PEB went bad */
2425    
2426     diff --git a/drivers/net/usb/asix.c b/drivers/net/usb/asix.c
2427     index dd2625a..f5e063a 100644
2428     --- a/drivers/net/usb/asix.c
2429     +++ b/drivers/net/usb/asix.c
2430     @@ -974,6 +974,7 @@ static int ax88772_link_reset(struct usbnet *dev)
2431    
2432     static int ax88772_reset(struct usbnet *dev)
2433     {
2434     + struct asix_data *data = (struct asix_data *)&dev->data;
2435     int ret, embd_phy;
2436     u16 rx_ctl;
2437    
2438     @@ -1051,6 +1052,13 @@ static int ax88772_reset(struct usbnet *dev)
2439     goto out;
2440     }
2441    
2442     + /* Rewrite MAC address */
2443     + memcpy(data->mac_addr, dev->net->dev_addr, ETH_ALEN);
2444     + ret = asix_write_cmd(dev, AX_CMD_WRITE_NODE_ID, 0, 0, ETH_ALEN,
2445     + data->mac_addr);
2446     + if (ret < 0)
2447     + goto out;
2448     +
2449     /* Set RX_CTL to default values with 2k buffer, and enable cactus */
2450     ret = asix_write_rx_ctl(dev, AX_DEFAULT_RX_CTL);
2451     if (ret < 0)
2452     @@ -1316,6 +1324,13 @@ static int ax88178_reset(struct usbnet *dev)
2453     if (ret < 0)
2454     return ret;
2455    
2456     + /* Rewrite MAC address */
2457     + memcpy(data->mac_addr, dev->net->dev_addr, ETH_ALEN);
2458     + ret = asix_write_cmd(dev, AX_CMD_WRITE_NODE_ID, 0, 0, ETH_ALEN,
2459     + data->mac_addr);
2460     + if (ret < 0)
2461     + return ret;
2462     +
2463     ret = asix_write_rx_ctl(dev, AX_DEFAULT_RX_CTL);
2464     if (ret < 0)
2465     return ret;
2466     diff --git a/drivers/net/wireless/ath/ath9k/ar9003_mac.c b/drivers/net/wireless/ath/ath9k/ar9003_mac.c
2467     index ccde784..f5ae3c6 100644
2468     --- a/drivers/net/wireless/ath/ath9k/ar9003_mac.c
2469     +++ b/drivers/net/wireless/ath/ath9k/ar9003_mac.c
2470     @@ -526,10 +526,11 @@ int ath9k_hw_process_rxdesc_edma(struct ath_hw *ah, struct ath_rx_status *rxs,
2471     rxs->rs_status |= ATH9K_RXERR_DECRYPT;
2472     else if (rxsp->status11 & AR_MichaelErr)
2473     rxs->rs_status |= ATH9K_RXERR_MIC;
2474     - if (rxsp->status11 & AR_KeyMiss)
2475     - rxs->rs_status |= ATH9K_RXERR_KEYMISS;
2476     }
2477    
2478     + if (rxsp->status11 & AR_KeyMiss)
2479     + rxs->rs_status |= ATH9K_RXERR_KEYMISS;
2480     +
2481     return 0;
2482     }
2483     EXPORT_SYMBOL(ath9k_hw_process_rxdesc_edma);
2484     diff --git a/drivers/net/wireless/ath/ath9k/calib.c b/drivers/net/wireless/ath/ath9k/calib.c
2485     index 9953881..8ddef3e 100644
2486     --- a/drivers/net/wireless/ath/ath9k/calib.c
2487     +++ b/drivers/net/wireless/ath/ath9k/calib.c
2488     @@ -402,6 +402,7 @@ bool ath9k_hw_getnf(struct ath_hw *ah, struct ath9k_channel *chan)
2489     ah->noise = ath9k_hw_getchan_noise(ah, chan);
2490     return true;
2491     }
2492     +EXPORT_SYMBOL(ath9k_hw_getnf);
2493    
2494     void ath9k_init_nfcal_hist_buffer(struct ath_hw *ah,
2495     struct ath9k_channel *chan)
2496     diff --git a/drivers/net/wireless/ath/ath9k/mac.c b/drivers/net/wireless/ath/ath9k/mac.c
2497     index ecdb6fd..bbcb777 100644
2498     --- a/drivers/net/wireless/ath/ath9k/mac.c
2499     +++ b/drivers/net/wireless/ath/ath9k/mac.c
2500     @@ -621,10 +621,11 @@ int ath9k_hw_rxprocdesc(struct ath_hw *ah, struct ath_desc *ds,
2501     rs->rs_status |= ATH9K_RXERR_DECRYPT;
2502     else if (ads.ds_rxstatus8 & AR_MichaelErr)
2503     rs->rs_status |= ATH9K_RXERR_MIC;
2504     - if (ads.ds_rxstatus8 & AR_KeyMiss)
2505     - rs->rs_status |= ATH9K_RXERR_KEYMISS;
2506     }
2507    
2508     + if (ads.ds_rxstatus8 & AR_KeyMiss)
2509     + rs->rs_status |= ATH9K_RXERR_KEYMISS;
2510     +
2511     return 0;
2512     }
2513     EXPORT_SYMBOL(ath9k_hw_rxprocdesc);
2514     diff --git a/drivers/net/wireless/ath/ath9k/main.c b/drivers/net/wireless/ath/ath9k/main.c
2515     index a9c5ae7..f76a814 100644
2516     --- a/drivers/net/wireless/ath/ath9k/main.c
2517     +++ b/drivers/net/wireless/ath/ath9k/main.c
2518     @@ -1667,7 +1667,6 @@ static int ath9k_config(struct ieee80211_hw *hw, u32 changed)
2519    
2520     if (changed & IEEE80211_CONF_CHANGE_CHANNEL) {
2521     struct ieee80211_channel *curchan = hw->conf.channel;
2522     - struct ath9k_channel old_chan;
2523     int pos = curchan->hw_value;
2524     int old_pos = -1;
2525     unsigned long flags;
2526     @@ -1693,11 +1692,8 @@ static int ath9k_config(struct ieee80211_hw *hw, u32 changed)
2527     * Preserve the current channel values, before updating
2528     * the same channel
2529     */
2530     - if (old_pos == pos) {
2531     - memcpy(&old_chan, &sc->sc_ah->channels[pos],
2532     - sizeof(struct ath9k_channel));
2533     - ah->curchan = &old_chan;
2534     - }
2535     + if (ah->curchan && (old_pos == pos))
2536     + ath9k_hw_getnf(ah, ah->curchan);
2537    
2538     ath9k_cmn_update_ichannel(&sc->sc_ah->channels[pos],
2539     curchan, conf->channel_type);
2540     diff --git a/drivers/net/wireless/iwlegacy/iwl3945-base.c b/drivers/net/wireless/iwlegacy/iwl3945-base.c
2541     index b282d86..05f2ad1 100644
2542     --- a/drivers/net/wireless/iwlegacy/iwl3945-base.c
2543     +++ b/drivers/net/wireless/iwlegacy/iwl3945-base.c
2544     @@ -2656,14 +2656,13 @@ int iwl3945_request_scan(struct iwl_priv *priv, struct ieee80211_vif *vif)
2545     IWL_WARN(priv, "Invalid scan band\n");
2546     return -EIO;
2547     }
2548     -
2549     /*
2550     - * If active scaning is requested but a certain channel
2551     - * is marked passive, we can do active scanning if we
2552     - * detect transmissions.
2553     + * If active scaning is requested but a certain channel is marked
2554     + * passive, we can do active scanning if we detect transmissions. For
2555     + * passive only scanning disable switching to active on any channel.
2556     */
2557     scan->good_CRC_th = is_active ? IWL_GOOD_CRC_TH_DEFAULT :
2558     - IWL_GOOD_CRC_TH_DISABLED;
2559     + IWL_GOOD_CRC_TH_NEVER;
2560    
2561     len = iwl_legacy_fill_probe_req(priv, (struct ieee80211_mgmt *)scan->data,
2562     vif->addr, priv->scan_request->ie,
2563     diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-lib.c b/drivers/net/wireless/iwlwifi/iwl-agn-lib.c
2564     index 1a52ed2..6465983 100644
2565     --- a/drivers/net/wireless/iwlwifi/iwl-agn-lib.c
2566     +++ b/drivers/net/wireless/iwlwifi/iwl-agn-lib.c
2567     @@ -827,6 +827,7 @@ static int iwl_get_idle_rx_chain_count(struct iwl_priv *priv, int active_cnt)
2568     case IEEE80211_SMPS_STATIC:
2569     case IEEE80211_SMPS_DYNAMIC:
2570     return IWL_NUM_IDLE_CHAINS_SINGLE;
2571     + case IEEE80211_SMPS_AUTOMATIC:
2572     case IEEE80211_SMPS_OFF:
2573     return active_cnt;
2574     default:
2575     diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-rxon.c b/drivers/net/wireless/iwlwifi/iwl-agn-rxon.c
2576     index 5c7c17c..d552fa3 100644
2577     --- a/drivers/net/wireless/iwlwifi/iwl-agn-rxon.c
2578     +++ b/drivers/net/wireless/iwlwifi/iwl-agn-rxon.c
2579     @@ -559,6 +559,9 @@ int iwlagn_mac_config(struct ieee80211_hw *hw, u32 changed)
2580    
2581     mutex_lock(&priv->shrd->mutex);
2582    
2583     + if (test_bit(STATUS_EXIT_PENDING, &priv->shrd->status))
2584     + goto out;
2585     +
2586     if (unlikely(test_bit(STATUS_SCANNING, &priv->shrd->status))) {
2587     IWL_DEBUG_MAC80211(priv, "leave - scanning\n");
2588     goto out;
2589     diff --git a/drivers/net/wireless/rt2x00/rt2800pci.c b/drivers/net/wireless/rt2x00/rt2800pci.c
2590     index da48c8a..837b460 100644
2591     --- a/drivers/net/wireless/rt2x00/rt2800pci.c
2592     +++ b/drivers/net/wireless/rt2x00/rt2800pci.c
2593     @@ -422,7 +422,6 @@ static int rt2800pci_init_queues(struct rt2x00_dev *rt2x00dev)
2594     static void rt2800pci_toggle_irq(struct rt2x00_dev *rt2x00dev,
2595     enum dev_state state)
2596     {
2597     - int mask = (state == STATE_RADIO_IRQ_ON);
2598     u32 reg;
2599     unsigned long flags;
2600    
2601     @@ -436,25 +435,14 @@ static void rt2800pci_toggle_irq(struct rt2x00_dev *rt2x00dev,
2602     }
2603    
2604     spin_lock_irqsave(&rt2x00dev->irqmask_lock, flags);
2605     - rt2x00pci_register_read(rt2x00dev, INT_MASK_CSR, &reg);
2606     - rt2x00_set_field32(&reg, INT_MASK_CSR_RXDELAYINT, 0);
2607     - rt2x00_set_field32(&reg, INT_MASK_CSR_TXDELAYINT, 0);
2608     - rt2x00_set_field32(&reg, INT_MASK_CSR_RX_DONE, mask);
2609     - rt2x00_set_field32(&reg, INT_MASK_CSR_AC0_DMA_DONE, 0);
2610     - rt2x00_set_field32(&reg, INT_MASK_CSR_AC1_DMA_DONE, 0);
2611     - rt2x00_set_field32(&reg, INT_MASK_CSR_AC2_DMA_DONE, 0);
2612     - rt2x00_set_field32(&reg, INT_MASK_CSR_AC3_DMA_DONE, 0);
2613     - rt2x00_set_field32(&reg, INT_MASK_CSR_HCCA_DMA_DONE, 0);
2614     - rt2x00_set_field32(&reg, INT_MASK_CSR_MGMT_DMA_DONE, 0);
2615     - rt2x00_set_field32(&reg, INT_MASK_CSR_MCU_COMMAND, 0);
2616     - rt2x00_set_field32(&reg, INT_MASK_CSR_RXTX_COHERENT, 0);
2617     - rt2x00_set_field32(&reg, INT_MASK_CSR_TBTT, mask);
2618     - rt2x00_set_field32(&reg, INT_MASK_CSR_PRE_TBTT, mask);
2619     - rt2x00_set_field32(&reg, INT_MASK_CSR_TX_FIFO_STATUS, mask);
2620     - rt2x00_set_field32(&reg, INT_MASK_CSR_AUTO_WAKEUP, mask);
2621     - rt2x00_set_field32(&reg, INT_MASK_CSR_GPTIMER, 0);
2622     - rt2x00_set_field32(&reg, INT_MASK_CSR_RX_COHERENT, 0);
2623     - rt2x00_set_field32(&reg, INT_MASK_CSR_TX_COHERENT, 0);
2624     + reg = 0;
2625     + if (state == STATE_RADIO_IRQ_ON) {
2626     + rt2x00_set_field32(&reg, INT_MASK_CSR_RX_DONE, 1);
2627     + rt2x00_set_field32(&reg, INT_MASK_CSR_TBTT, 1);
2628     + rt2x00_set_field32(&reg, INT_MASK_CSR_PRE_TBTT, 1);
2629     + rt2x00_set_field32(&reg, INT_MASK_CSR_TX_FIFO_STATUS, 1);
2630     + rt2x00_set_field32(&reg, INT_MASK_CSR_AUTO_WAKEUP, 1);
2631     + }
2632     rt2x00pci_register_write(rt2x00dev, INT_MASK_CSR, reg);
2633     spin_unlock_irqrestore(&rt2x00dev->irqmask_lock, flags);
2634    
2635     diff --git a/drivers/net/wireless/rtlwifi/rtl8192se/fw.c b/drivers/net/wireless/rtlwifi/rtl8192se/fw.c
2636     index 6f91a14..3fda6b1 100644
2637     --- a/drivers/net/wireless/rtlwifi/rtl8192se/fw.c
2638     +++ b/drivers/net/wireless/rtlwifi/rtl8192se/fw.c
2639     @@ -196,6 +196,8 @@ static bool _rtl92s_firmware_downloadcode(struct ieee80211_hw *hw,
2640     /* Allocate skb buffer to contain firmware */
2641     /* info and tx descriptor info. */
2642     skb = dev_alloc_skb(frag_length);
2643     + if (!skb)
2644     + return false;
2645     skb_reserve(skb, extra_descoffset);
2646     seg_ptr = (u8 *)skb_put(skb, (u32)(frag_length -
2647     extra_descoffset));
2648     @@ -573,6 +575,8 @@ static bool _rtl92s_firmware_set_h2c_cmd(struct ieee80211_hw *hw, u8 h2c_cmd,
2649    
2650     len = _rtl92s_get_h2c_cmdlen(MAX_TRANSMIT_BUFFER_SIZE, 1, &cmd_len);
2651     skb = dev_alloc_skb(len);
2652     + if (!skb)
2653     + return false;
2654     cb_desc = (struct rtl_tcb_desc *)(skb->cb);
2655     cb_desc->queue_index = TXCMD_QUEUE;
2656     cb_desc->cmd_or_init = DESC_PACKET_TYPE_NORMAL;
2657     diff --git a/drivers/pci/msi.c b/drivers/pci/msi.c
2658     index 0e6d04d..e3efb43 100644
2659     --- a/drivers/pci/msi.c
2660     +++ b/drivers/pci/msi.c
2661     @@ -870,5 +870,15 @@ EXPORT_SYMBOL(pci_msi_enabled);
2662    
2663     void pci_msi_init_pci_dev(struct pci_dev *dev)
2664     {
2665     + int pos;
2666     INIT_LIST_HEAD(&dev->msi_list);
2667     +
2668     + /* Disable the msi hardware to avoid screaming interrupts
2669     + * during boot. This is the power on reset default so
2670     + * usually this should be a noop.
2671     + */
2672     + pos = pci_find_capability(dev, PCI_CAP_ID_MSI);
2673     + if (pos)
2674     + msi_set_enable(dev, pos, 0);
2675     + msix_set_enable(dev, 0);
2676     }
2677     diff --git a/drivers/pnp/quirks.c b/drivers/pnp/quirks.c
2678     index dfbd5a6..258fef2 100644
2679     --- a/drivers/pnp/quirks.c
2680     +++ b/drivers/pnp/quirks.c
2681     @@ -295,6 +295,45 @@ static void quirk_system_pci_resources(struct pnp_dev *dev)
2682     }
2683     }
2684    
2685     +#ifdef CONFIG_AMD_NB
2686     +
2687     +#include <asm/amd_nb.h>
2688     +
2689     +static void quirk_amd_mmconfig_area(struct pnp_dev *dev)
2690     +{
2691     + resource_size_t start, end;
2692     + struct pnp_resource *pnp_res;
2693     + struct resource *res;
2694     + struct resource mmconfig_res, *mmconfig;
2695     +
2696     + mmconfig = amd_get_mmconfig_range(&mmconfig_res);
2697     + if (!mmconfig)
2698     + return;
2699     +
2700     + list_for_each_entry(pnp_res, &dev->resources, list) {
2701     + res = &pnp_res->res;
2702     + if (res->end < mmconfig->start || res->start > mmconfig->end ||
2703     + (res->start == mmconfig->start && res->end == mmconfig->end))
2704     + continue;
2705     +
2706     + dev_info(&dev->dev, FW_BUG
2707     + "%pR covers only part of AMD MMCONFIG area %pR; adding more reservations\n",
2708     + res, mmconfig);
2709     + if (mmconfig->start < res->start) {
2710     + start = mmconfig->start;
2711     + end = res->start - 1;
2712     + pnp_add_mem_resource(dev, start, end, 0);
2713     + }
2714     + if (mmconfig->end > res->end) {
2715     + start = res->end + 1;
2716     + end = mmconfig->end;
2717     + pnp_add_mem_resource(dev, start, end, 0);
2718     + }
2719     + break;
2720     + }
2721     +}
2722     +#endif
2723     +
2724     /*
2725     * PnP Quirks
2726     * Cards or devices that need some tweaking due to incomplete resource info
2727     @@ -322,6 +361,9 @@ static struct pnp_fixup pnp_fixups[] = {
2728     /* PnP resources that might overlap PCI BARs */
2729     {"PNP0c01", quirk_system_pci_resources},
2730     {"PNP0c02", quirk_system_pci_resources},
2731     +#ifdef CONFIG_AMD_NB
2732     + {"PNP0c01", quirk_amd_mmconfig_area},
2733     +#endif
2734     {""}
2735     };
2736    
2737     diff --git a/drivers/rtc/interface.c b/drivers/rtc/interface.c
2738     index 8e28625..8a1c031 100644
2739     --- a/drivers/rtc/interface.c
2740     +++ b/drivers/rtc/interface.c
2741     @@ -228,11 +228,11 @@ int __rtc_read_alarm(struct rtc_device *rtc, struct rtc_wkalrm *alarm)
2742     alarm->time.tm_hour = now.tm_hour;
2743    
2744     /* For simplicity, only support date rollover for now */
2745     - if (alarm->time.tm_mday == -1) {
2746     + if (alarm->time.tm_mday < 1 || alarm->time.tm_mday > 31) {
2747     alarm->time.tm_mday = now.tm_mday;
2748     missing = day;
2749     }
2750     - if (alarm->time.tm_mon == -1) {
2751     + if ((unsigned)alarm->time.tm_mon >= 12) {
2752     alarm->time.tm_mon = now.tm_mon;
2753     if (missing == none)
2754     missing = month;
2755     diff --git a/drivers/scsi/mpt2sas/mpt2sas_base.c b/drivers/scsi/mpt2sas/mpt2sas_base.c
2756     index beda04a..0794c72 100644
2757     --- a/drivers/scsi/mpt2sas/mpt2sas_base.c
2758     +++ b/drivers/scsi/mpt2sas/mpt2sas_base.c
2759     @@ -65,6 +65,8 @@ static MPT_CALLBACK mpt_callbacks[MPT_MAX_CALLBACKS];
2760    
2761     #define FAULT_POLLING_INTERVAL 1000 /* in milliseconds */
2762    
2763     +#define MAX_HBA_QUEUE_DEPTH 30000
2764     +#define MAX_CHAIN_DEPTH 100000
2765     static int max_queue_depth = -1;
2766     module_param(max_queue_depth, int, 0);
2767     MODULE_PARM_DESC(max_queue_depth, " max controller queue depth ");
2768     @@ -2311,8 +2313,6 @@ _base_release_memory_pools(struct MPT2SAS_ADAPTER *ioc)
2769     }
2770     if (ioc->chain_dma_pool)
2771     pci_pool_destroy(ioc->chain_dma_pool);
2772     - }
2773     - if (ioc->chain_lookup) {
2774     free_pages((ulong)ioc->chain_lookup, ioc->chain_pages);
2775     ioc->chain_lookup = NULL;
2776     }
2777     @@ -2330,9 +2330,7 @@ static int
2778     _base_allocate_memory_pools(struct MPT2SAS_ADAPTER *ioc, int sleep_flag)
2779     {
2780     struct mpt2sas_facts *facts;
2781     - u32 queue_size, queue_diff;
2782     u16 max_sge_elements;
2783     - u16 num_of_reply_frames;
2784     u16 chains_needed_per_io;
2785     u32 sz, total_sz, reply_post_free_sz;
2786     u32 retry_sz;
2787     @@ -2359,7 +2357,8 @@ _base_allocate_memory_pools(struct MPT2SAS_ADAPTER *ioc, int sleep_flag)
2788     max_request_credit = (max_queue_depth < facts->RequestCredit)
2789     ? max_queue_depth : facts->RequestCredit;
2790     else
2791     - max_request_credit = facts->RequestCredit;
2792     + max_request_credit = min_t(u16, facts->RequestCredit,
2793     + MAX_HBA_QUEUE_DEPTH);
2794    
2795     ioc->hba_queue_depth = max_request_credit;
2796     ioc->hi_priority_depth = facts->HighPriorityCredit;
2797     @@ -2400,50 +2399,25 @@ _base_allocate_memory_pools(struct MPT2SAS_ADAPTER *ioc, int sleep_flag)
2798     }
2799     ioc->chains_needed_per_io = chains_needed_per_io;
2800    
2801     - /* reply free queue sizing - taking into account for events */
2802     - num_of_reply_frames = ioc->hba_queue_depth + 32;
2803     -
2804     - /* number of replies frames can't be a multiple of 16 */
2805     - /* decrease number of reply frames by 1 */
2806     - if (!(num_of_reply_frames % 16))
2807     - num_of_reply_frames--;
2808     -
2809     - /* calculate number of reply free queue entries
2810     - * (must be multiple of 16)
2811     - */
2812     -
2813     - /* (we know reply_free_queue_depth is not a multiple of 16) */
2814     - queue_size = num_of_reply_frames;
2815     - queue_size += 16 - (queue_size % 16);
2816     - ioc->reply_free_queue_depth = queue_size;
2817     -
2818     - /* reply descriptor post queue sizing */
2819     - /* this size should be the number of request frames + number of reply
2820     - * frames
2821     - */
2822     -
2823     - queue_size = ioc->hba_queue_depth + num_of_reply_frames + 1;
2824     - /* round up to 16 byte boundary */
2825     - if (queue_size % 16)
2826     - queue_size += 16 - (queue_size % 16);
2827     -
2828     - /* check against IOC maximum reply post queue depth */
2829     - if (queue_size > facts->MaxReplyDescriptorPostQueueDepth) {
2830     - queue_diff = queue_size -
2831     - facts->MaxReplyDescriptorPostQueueDepth;
2832     + /* reply free queue sizing - taking into account for 64 FW events */
2833     + ioc->reply_free_queue_depth = ioc->hba_queue_depth + 64;
2834    
2835     - /* round queue_diff up to multiple of 16 */
2836     - if (queue_diff % 16)
2837     - queue_diff += 16 - (queue_diff % 16);
2838     -
2839     - /* adjust hba_queue_depth, reply_free_queue_depth,
2840     - * and queue_size
2841     - */
2842     - ioc->hba_queue_depth -= (queue_diff / 2);
2843     - ioc->reply_free_queue_depth -= (queue_diff / 2);
2844     - queue_size = facts->MaxReplyDescriptorPostQueueDepth;
2845     + /* align the reply post queue on the next 16 count boundary */
2846     + if (!ioc->reply_free_queue_depth % 16)
2847     + ioc->reply_post_queue_depth = ioc->reply_free_queue_depth + 16;
2848     + else
2849     + ioc->reply_post_queue_depth = ioc->reply_free_queue_depth +
2850     + 32 - (ioc->reply_free_queue_depth % 16);
2851     + if (ioc->reply_post_queue_depth >
2852     + facts->MaxReplyDescriptorPostQueueDepth) {
2853     + ioc->reply_post_queue_depth = min_t(u16,
2854     + (facts->MaxReplyDescriptorPostQueueDepth -
2855     + (facts->MaxReplyDescriptorPostQueueDepth % 16)),
2856     + (ioc->hba_queue_depth - (ioc->hba_queue_depth % 16)));
2857     + ioc->reply_free_queue_depth = ioc->reply_post_queue_depth - 16;
2858     + ioc->hba_queue_depth = ioc->reply_free_queue_depth - 64;
2859     }
2860     - ioc->reply_post_queue_depth = queue_size;
2861     +
2862    
2863     dinitprintk(ioc, printk(MPT2SAS_INFO_FMT "scatter gather: "
2864     "sge_in_main_msg(%d), sge_per_chain(%d), sge_per_io(%d), "
2865     @@ -2529,15 +2503,12 @@ _base_allocate_memory_pools(struct MPT2SAS_ADAPTER *ioc, int sleep_flag)
2866     "depth(%d)\n", ioc->name, ioc->request,
2867     ioc->scsiio_depth));
2868    
2869     - /* loop till the allocation succeeds */
2870     - do {
2871     - sz = ioc->chain_depth * sizeof(struct chain_tracker);
2872     - ioc->chain_pages = get_order(sz);
2873     - ioc->chain_lookup = (struct chain_tracker *)__get_free_pages(
2874     - GFP_KERNEL, ioc->chain_pages);
2875     - if (ioc->chain_lookup == NULL)
2876     - ioc->chain_depth -= 100;
2877     - } while (ioc->chain_lookup == NULL);
2878     + ioc->chain_depth = min_t(u32, ioc->chain_depth, MAX_CHAIN_DEPTH);
2879     + sz = ioc->chain_depth * sizeof(struct chain_tracker);
2880     + ioc->chain_pages = get_order(sz);
2881     +
2882     + ioc->chain_lookup = (struct chain_tracker *)__get_free_pages(
2883     + GFP_KERNEL, ioc->chain_pages);
2884     ioc->chain_dma_pool = pci_pool_create("chain pool", ioc->pdev,
2885     ioc->request_sz, 16, 0);
2886     if (!ioc->chain_dma_pool) {
2887     diff --git a/drivers/scsi/mpt2sas/mpt2sas_scsih.c b/drivers/scsi/mpt2sas/mpt2sas_scsih.c
2888     index d570573..9bc6fb2 100644
2889     --- a/drivers/scsi/mpt2sas/mpt2sas_scsih.c
2890     +++ b/drivers/scsi/mpt2sas/mpt2sas_scsih.c
2891     @@ -1007,8 +1007,8 @@ _scsih_get_chain_buffer_tracker(struct MPT2SAS_ADAPTER *ioc, u16 smid)
2892     spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
2893     if (list_empty(&ioc->free_chain_list)) {
2894     spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
2895     - printk(MPT2SAS_WARN_FMT "chain buffers not available\n",
2896     - ioc->name);
2897     + dfailprintk(ioc, printk(MPT2SAS_WARN_FMT "chain buffers not "
2898     + "available\n", ioc->name));
2899     return NULL;
2900     }
2901     chain_req = list_entry(ioc->free_chain_list.next,
2902     @@ -6714,6 +6714,7 @@ _scsih_mark_responding_raid_device(struct MPT2SAS_ADAPTER *ioc, u64 wwid,
2903     } else
2904     sas_target_priv_data = NULL;
2905     raid_device->responding = 1;
2906     + spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
2907     starget_printk(KERN_INFO, raid_device->starget,
2908     "handle(0x%04x), wwid(0x%016llx)\n", handle,
2909     (unsigned long long)raid_device->wwid);
2910     @@ -6724,16 +6725,16 @@ _scsih_mark_responding_raid_device(struct MPT2SAS_ADAPTER *ioc, u64 wwid,
2911     */
2912     _scsih_init_warpdrive_properties(ioc, raid_device);
2913     if (raid_device->handle == handle)
2914     - goto out;
2915     + return;
2916     printk(KERN_INFO "\thandle changed from(0x%04x)!!!\n",
2917     raid_device->handle);
2918     raid_device->handle = handle;
2919     if (sas_target_priv_data)
2920     sas_target_priv_data->handle = handle;
2921     - goto out;
2922     + return;
2923     }
2924     }
2925     - out:
2926     +
2927     spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
2928     }
2929    
2930     diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
2931     index fa3a591..4b63c73 100644
2932     --- a/drivers/scsi/sd.c
2933     +++ b/drivers/scsi/sd.c
2934     @@ -1074,6 +1074,10 @@ static int sd_ioctl(struct block_device *bdev, fmode_t mode,
2935     SCSI_LOG_IOCTL(1, sd_printk(KERN_INFO, sdkp, "sd_ioctl: disk=%s, "
2936     "cmd=0x%x\n", disk->disk_name, cmd));
2937    
2938     + error = scsi_verify_blk_ioctl(bdev, cmd);
2939     + if (error < 0)
2940     + return error;
2941     +
2942     /*
2943     * If we are in the middle of error recovery, don't let anyone
2944     * else try and use this device. Also, if error recovery fails, it
2945     @@ -1096,7 +1100,7 @@ static int sd_ioctl(struct block_device *bdev, fmode_t mode,
2946     error = scsi_ioctl(sdp, cmd, p);
2947     break;
2948     default:
2949     - error = scsi_cmd_ioctl(disk->queue, disk, mode, cmd, p);
2950     + error = scsi_cmd_blk_ioctl(bdev, mode, cmd, p);
2951     if (error != -ENOTTY)
2952     break;
2953     error = scsi_ioctl(sdp, cmd, p);
2954     @@ -1266,6 +1270,11 @@ static int sd_compat_ioctl(struct block_device *bdev, fmode_t mode,
2955     unsigned int cmd, unsigned long arg)
2956     {
2957     struct scsi_device *sdev = scsi_disk(bdev->bd_disk)->device;
2958     + int ret;
2959     +
2960     + ret = scsi_verify_blk_ioctl(bdev, cmd);
2961     + if (ret < 0)
2962     + return -ENOIOCTLCMD;
2963    
2964     /*
2965     * If we are in the middle of error recovery, don't let anyone
2966     @@ -1277,8 +1286,6 @@ static int sd_compat_ioctl(struct block_device *bdev, fmode_t mode,
2967     return -ENODEV;
2968    
2969     if (sdev->host->hostt->compat_ioctl) {
2970     - int ret;
2971     -
2972     ret = sdev->host->hostt->compat_ioctl(sdev, cmd, (void __user *)arg);
2973    
2974     return ret;
2975     diff --git a/drivers/scsi/sym53c8xx_2/sym_glue.c b/drivers/scsi/sym53c8xx_2/sym_glue.c
2976     index b4543f5..36d1ed7 100644
2977     --- a/drivers/scsi/sym53c8xx_2/sym_glue.c
2978     +++ b/drivers/scsi/sym53c8xx_2/sym_glue.c
2979     @@ -839,6 +839,10 @@ static void sym53c8xx_slave_destroy(struct scsi_device *sdev)
2980     struct sym_lcb *lp = sym_lp(tp, sdev->lun);
2981     unsigned long flags;
2982    
2983     + /* if slave_alloc returned before allocating a sym_lcb, return */
2984     + if (!lp)
2985     + return;
2986     +
2987     spin_lock_irqsave(np->s.host->host_lock, flags);
2988    
2989     if (lp->busy_itlq || lp->busy_itl) {
2990     diff --git a/drivers/target/target_core_cdb.c b/drivers/target/target_core_cdb.c
2991     index 831468b..2e8c1be 100644
2992     --- a/drivers/target/target_core_cdb.c
2993     +++ b/drivers/target/target_core_cdb.c
2994     @@ -94,6 +94,18 @@ target_emulate_inquiry_std(struct se_cmd *cmd)
2995     buf[2] = dev->transport->get_device_rev(dev);
2996    
2997     /*
2998     + * NORMACA and HISUP = 0, RESPONSE DATA FORMAT = 2
2999     + *
3000     + * SPC4 says:
3001     + * A RESPONSE DATA FORMAT field set to 2h indicates that the
3002     + * standard INQUIRY data is in the format defined in this
3003     + * standard. Response data format values less than 2h are
3004     + * obsolete. Response data format values greater than 2h are
3005     + * reserved.
3006     + */
3007     + buf[3] = 2;
3008     +
3009     + /*
3010     * Enable SCCS and TPGS fields for Emulated ALUA
3011     */
3012     if (dev->se_sub_dev->t10_alua.alua_type == SPC3_ALUA_EMULATED)
3013     diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c
3014     index 0257658..e87d0eb 100644
3015     --- a/drivers/target/target_core_transport.c
3016     +++ b/drivers/target/target_core_transport.c
3017     @@ -4353,6 +4353,7 @@ int transport_send_check_condition_and_sense(
3018     case TCM_NON_EXISTENT_LUN:
3019     /* CURRENT ERROR */
3020     buffer[offset] = 0x70;
3021     + buffer[offset+SPC_ADD_SENSE_LEN_OFFSET] = 10;
3022     /* ILLEGAL REQUEST */
3023     buffer[offset+SPC_SENSE_KEY_OFFSET] = ILLEGAL_REQUEST;
3024     /* LOGICAL UNIT NOT SUPPORTED */
3025     @@ -4362,6 +4363,7 @@ int transport_send_check_condition_and_sense(
3026     case TCM_SECTOR_COUNT_TOO_MANY:
3027     /* CURRENT ERROR */
3028     buffer[offset] = 0x70;
3029     + buffer[offset+SPC_ADD_SENSE_LEN_OFFSET] = 10;
3030     /* ILLEGAL REQUEST */
3031     buffer[offset+SPC_SENSE_KEY_OFFSET] = ILLEGAL_REQUEST;
3032     /* INVALID COMMAND OPERATION CODE */
3033     @@ -4370,6 +4372,7 @@ int transport_send_check_condition_and_sense(
3034     case TCM_UNKNOWN_MODE_PAGE:
3035     /* CURRENT ERROR */
3036     buffer[offset] = 0x70;
3037     + buffer[offset+SPC_ADD_SENSE_LEN_OFFSET] = 10;
3038     /* ILLEGAL REQUEST */
3039     buffer[offset+SPC_SENSE_KEY_OFFSET] = ILLEGAL_REQUEST;
3040     /* INVALID FIELD IN CDB */
3041     @@ -4378,6 +4381,7 @@ int transport_send_check_condition_and_sense(
3042     case TCM_CHECK_CONDITION_ABORT_CMD:
3043     /* CURRENT ERROR */
3044     buffer[offset] = 0x70;
3045     + buffer[offset+SPC_ADD_SENSE_LEN_OFFSET] = 10;
3046     /* ABORTED COMMAND */
3047     buffer[offset+SPC_SENSE_KEY_OFFSET] = ABORTED_COMMAND;
3048     /* BUS DEVICE RESET FUNCTION OCCURRED */
3049     @@ -4387,6 +4391,7 @@ int transport_send_check_condition_and_sense(
3050     case TCM_INCORRECT_AMOUNT_OF_DATA:
3051     /* CURRENT ERROR */
3052     buffer[offset] = 0x70;
3053     + buffer[offset+SPC_ADD_SENSE_LEN_OFFSET] = 10;
3054     /* ABORTED COMMAND */
3055     buffer[offset+SPC_SENSE_KEY_OFFSET] = ABORTED_COMMAND;
3056     /* WRITE ERROR */
3057     @@ -4397,6 +4402,7 @@ int transport_send_check_condition_and_sense(
3058     case TCM_INVALID_CDB_FIELD:
3059     /* CURRENT ERROR */
3060     buffer[offset] = 0x70;
3061     + buffer[offset+SPC_ADD_SENSE_LEN_OFFSET] = 10;
3062     /* ABORTED COMMAND */
3063     buffer[offset+SPC_SENSE_KEY_OFFSET] = ABORTED_COMMAND;
3064     /* INVALID FIELD IN CDB */
3065     @@ -4405,6 +4411,7 @@ int transport_send_check_condition_and_sense(
3066     case TCM_INVALID_PARAMETER_LIST:
3067     /* CURRENT ERROR */
3068     buffer[offset] = 0x70;
3069     + buffer[offset+SPC_ADD_SENSE_LEN_OFFSET] = 10;
3070     /* ABORTED COMMAND */
3071     buffer[offset+SPC_SENSE_KEY_OFFSET] = ABORTED_COMMAND;
3072     /* INVALID FIELD IN PARAMETER LIST */
3073     @@ -4413,6 +4420,7 @@ int transport_send_check_condition_and_sense(
3074     case TCM_UNEXPECTED_UNSOLICITED_DATA:
3075     /* CURRENT ERROR */
3076     buffer[offset] = 0x70;
3077     + buffer[offset+SPC_ADD_SENSE_LEN_OFFSET] = 10;
3078     /* ABORTED COMMAND */
3079     buffer[offset+SPC_SENSE_KEY_OFFSET] = ABORTED_COMMAND;
3080     /* WRITE ERROR */
3081     @@ -4423,6 +4431,7 @@ int transport_send_check_condition_and_sense(
3082     case TCM_SERVICE_CRC_ERROR:
3083     /* CURRENT ERROR */
3084     buffer[offset] = 0x70;
3085     + buffer[offset+SPC_ADD_SENSE_LEN_OFFSET] = 10;
3086     /* ABORTED COMMAND */
3087     buffer[offset+SPC_SENSE_KEY_OFFSET] = ABORTED_COMMAND;
3088     /* PROTOCOL SERVICE CRC ERROR */
3089     @@ -4433,6 +4442,7 @@ int transport_send_check_condition_and_sense(
3090     case TCM_SNACK_REJECTED:
3091     /* CURRENT ERROR */
3092     buffer[offset] = 0x70;
3093     + buffer[offset+SPC_ADD_SENSE_LEN_OFFSET] = 10;
3094     /* ABORTED COMMAND */
3095     buffer[offset+SPC_SENSE_KEY_OFFSET] = ABORTED_COMMAND;
3096     /* READ ERROR */
3097     @@ -4443,6 +4453,7 @@ int transport_send_check_condition_and_sense(
3098     case TCM_WRITE_PROTECTED:
3099     /* CURRENT ERROR */
3100     buffer[offset] = 0x70;
3101     + buffer[offset+SPC_ADD_SENSE_LEN_OFFSET] = 10;
3102     /* DATA PROTECT */
3103     buffer[offset+SPC_SENSE_KEY_OFFSET] = DATA_PROTECT;
3104     /* WRITE PROTECTED */
3105     @@ -4451,6 +4462,7 @@ int transport_send_check_condition_and_sense(
3106     case TCM_CHECK_CONDITION_UNIT_ATTENTION:
3107     /* CURRENT ERROR */
3108     buffer[offset] = 0x70;
3109     + buffer[offset+SPC_ADD_SENSE_LEN_OFFSET] = 10;
3110     /* UNIT ATTENTION */
3111     buffer[offset+SPC_SENSE_KEY_OFFSET] = UNIT_ATTENTION;
3112     core_scsi3_ua_for_check_condition(cmd, &asc, &ascq);
3113     @@ -4460,6 +4472,7 @@ int transport_send_check_condition_and_sense(
3114     case TCM_CHECK_CONDITION_NOT_READY:
3115     /* CURRENT ERROR */
3116     buffer[offset] = 0x70;
3117     + buffer[offset+SPC_ADD_SENSE_LEN_OFFSET] = 10;
3118     /* Not Ready */
3119     buffer[offset+SPC_SENSE_KEY_OFFSET] = NOT_READY;
3120     transport_get_sense_codes(cmd, &asc, &ascq);
3121     @@ -4470,6 +4483,7 @@ int transport_send_check_condition_and_sense(
3122     default:
3123     /* CURRENT ERROR */
3124     buffer[offset] = 0x70;
3125     + buffer[offset+SPC_ADD_SENSE_LEN_OFFSET] = 10;
3126     /* ILLEGAL REQUEST */
3127     buffer[offset+SPC_SENSE_KEY_OFFSET] = ILLEGAL_REQUEST;
3128     /* LOGICAL UNIT COMMUNICATION FAILURE */
3129     diff --git a/drivers/xen/xenbus/xenbus_xs.c b/drivers/xen/xenbus/xenbus_xs.c
3130     index ede860f..a580b17 100644
3131     --- a/drivers/xen/xenbus/xenbus_xs.c
3132     +++ b/drivers/xen/xenbus/xenbus_xs.c
3133     @@ -801,6 +801,12 @@ static int process_msg(void)
3134     goto out;
3135     }
3136    
3137     + if (msg->hdr.len > XENSTORE_PAYLOAD_MAX) {
3138     + kfree(msg);
3139     + err = -EINVAL;
3140     + goto out;
3141     + }
3142     +
3143     body = kmalloc(msg->hdr.len + 1, GFP_NOIO | __GFP_HIGH);
3144     if (body == NULL) {
3145     kfree(msg);
3146     diff --git a/fs/aio.c b/fs/aio.c
3147     index 78c514c..969beb0 100644
3148     --- a/fs/aio.c
3149     +++ b/fs/aio.c
3150     @@ -476,14 +476,21 @@ static void kiocb_batch_init(struct kiocb_batch *batch, long total)
3151     batch->count = total;
3152     }
3153    
3154     -static void kiocb_batch_free(struct kiocb_batch *batch)
3155     +static void kiocb_batch_free(struct kioctx *ctx, struct kiocb_batch *batch)
3156     {
3157     struct kiocb *req, *n;
3158    
3159     + if (list_empty(&batch->head))
3160     + return;
3161     +
3162     + spin_lock_irq(&ctx->ctx_lock);
3163     list_for_each_entry_safe(req, n, &batch->head, ki_batch) {
3164     list_del(&req->ki_batch);
3165     + list_del(&req->ki_list);
3166     kmem_cache_free(kiocb_cachep, req);
3167     + ctx->reqs_active--;
3168     }
3169     + spin_unlock_irq(&ctx->ctx_lock);
3170     }
3171    
3172     /*
3173     @@ -1742,7 +1749,7 @@ long do_io_submit(aio_context_t ctx_id, long nr,
3174     }
3175     blk_finish_plug(&plug);
3176    
3177     - kiocb_batch_free(&batch);
3178     + kiocb_batch_free(ctx, &batch);
3179     put_ioctx(ctx);
3180     return i ? i : ret;
3181     }
3182     diff --git a/fs/cifs/connect.c b/fs/cifs/connect.c
3183     index f3670cf..63e4be4 100644
3184     --- a/fs/cifs/connect.c
3185     +++ b/fs/cifs/connect.c
3186     @@ -2914,18 +2914,33 @@ void cifs_setup_cifs_sb(struct smb_vol *pvolume_info,
3187     #define CIFS_DEFAULT_IOSIZE (1024 * 1024)
3188    
3189     /*
3190     - * Windows only supports a max of 60k reads. Default to that when posix
3191     - * extensions aren't in force.
3192     + * Windows only supports a max of 60kb reads and 65535 byte writes. Default to
3193     + * those values when posix extensions aren't in force. In actuality here, we
3194     + * use 65536 to allow for a write that is a multiple of 4k. Most servers seem
3195     + * to be ok with the extra byte even though Windows doesn't send writes that
3196     + * are that large.
3197     + *
3198     + * Citation:
3199     + *
3200     + * http://blogs.msdn.com/b/openspecification/archive/2009/04/10/smb-maximum-transmit-buffer-size-and-performance-tuning.aspx
3201     */
3202     #define CIFS_DEFAULT_NON_POSIX_RSIZE (60 * 1024)
3203     +#define CIFS_DEFAULT_NON_POSIX_WSIZE (65536)
3204    
3205     static unsigned int
3206     cifs_negotiate_wsize(struct cifs_tcon *tcon, struct smb_vol *pvolume_info)
3207     {
3208     __u64 unix_cap = le64_to_cpu(tcon->fsUnixInfo.Capability);
3209     struct TCP_Server_Info *server = tcon->ses->server;
3210     - unsigned int wsize = pvolume_info->wsize ? pvolume_info->wsize :
3211     - CIFS_DEFAULT_IOSIZE;
3212     + unsigned int wsize;
3213     +
3214     + /* start with specified wsize, or default */
3215     + if (pvolume_info->wsize)
3216     + wsize = pvolume_info->wsize;
3217     + else if (tcon->unix_ext && (unix_cap & CIFS_UNIX_LARGE_WRITE_CAP))
3218     + wsize = CIFS_DEFAULT_IOSIZE;
3219     + else
3220     + wsize = CIFS_DEFAULT_NON_POSIX_WSIZE;
3221    
3222     /* can server support 24-bit write sizes? (via UNIX extensions) */
3223     if (!tcon->unix_ext || !(unix_cap & CIFS_UNIX_LARGE_WRITE_CAP))
3224     diff --git a/fs/dcache.c b/fs/dcache.c
3225     index 89509b5..f7908ae 100644
3226     --- a/fs/dcache.c
3227     +++ b/fs/dcache.c
3228     @@ -242,6 +242,7 @@ static void dentry_lru_add(struct dentry *dentry)
3229     static void __dentry_lru_del(struct dentry *dentry)
3230     {
3231     list_del_init(&dentry->d_lru);
3232     + dentry->d_flags &= ~DCACHE_SHRINK_LIST;
3233     dentry->d_sb->s_nr_dentry_unused--;
3234     dentry_stat.nr_unused--;
3235     }
3236     @@ -275,15 +276,15 @@ static void dentry_lru_prune(struct dentry *dentry)
3237     }
3238     }
3239    
3240     -static void dentry_lru_move_tail(struct dentry *dentry)
3241     +static void dentry_lru_move_list(struct dentry *dentry, struct list_head *list)
3242     {
3243     spin_lock(&dcache_lru_lock);
3244     if (list_empty(&dentry->d_lru)) {
3245     - list_add_tail(&dentry->d_lru, &dentry->d_sb->s_dentry_lru);
3246     + list_add_tail(&dentry->d_lru, list);
3247     dentry->d_sb->s_nr_dentry_unused++;
3248     dentry_stat.nr_unused++;
3249     } else {
3250     - list_move_tail(&dentry->d_lru, &dentry->d_sb->s_dentry_lru);
3251     + list_move_tail(&dentry->d_lru, list);
3252     }
3253     spin_unlock(&dcache_lru_lock);
3254     }
3255     @@ -769,14 +770,18 @@ static void shrink_dentry_list(struct list_head *list)
3256     }
3257    
3258     /**
3259     - * __shrink_dcache_sb - shrink the dentry LRU on a given superblock
3260     - * @sb: superblock to shrink dentry LRU.
3261     - * @count: number of entries to prune
3262     - * @flags: flags to control the dentry processing
3263     + * prune_dcache_sb - shrink the dcache
3264     + * @sb: superblock
3265     + * @count: number of entries to try to free
3266     + *
3267     + * Attempt to shrink the superblock dcache LRU by @count entries. This is
3268     + * done when we need more memory an called from the superblock shrinker
3269     + * function.
3270     *
3271     - * If flags contains DCACHE_REFERENCED reference dentries will not be pruned.
3272     + * This function may fail to free any resources if all the dentries are in
3273     + * use.
3274     */
3275     -static void __shrink_dcache_sb(struct super_block *sb, int count, int flags)
3276     +void prune_dcache_sb(struct super_block *sb, int count)
3277     {
3278     struct dentry *dentry;
3279     LIST_HEAD(referenced);
3280     @@ -795,18 +800,13 @@ relock:
3281     goto relock;
3282     }
3283    
3284     - /*
3285     - * If we are honouring the DCACHE_REFERENCED flag and the
3286     - * dentry has this flag set, don't free it. Clear the flag
3287     - * and put it back on the LRU.
3288     - */
3289     - if (flags & DCACHE_REFERENCED &&
3290     - dentry->d_flags & DCACHE_REFERENCED) {
3291     + if (dentry->d_flags & DCACHE_REFERENCED) {
3292     dentry->d_flags &= ~DCACHE_REFERENCED;
3293     list_move(&dentry->d_lru, &referenced);
3294     spin_unlock(&dentry->d_lock);
3295     } else {
3296     list_move_tail(&dentry->d_lru, &tmp);
3297     + dentry->d_flags |= DCACHE_SHRINK_LIST;
3298     spin_unlock(&dentry->d_lock);
3299     if (!--count)
3300     break;
3301     @@ -821,23 +821,6 @@ relock:
3302     }
3303    
3304     /**
3305     - * prune_dcache_sb - shrink the dcache
3306     - * @sb: superblock
3307     - * @nr_to_scan: number of entries to try to free
3308     - *
3309     - * Attempt to shrink the superblock dcache LRU by @nr_to_scan entries. This is
3310     - * done when we need more memory an called from the superblock shrinker
3311     - * function.
3312     - *
3313     - * This function may fail to free any resources if all the dentries are in
3314     - * use.
3315     - */
3316     -void prune_dcache_sb(struct super_block *sb, int nr_to_scan)
3317     -{
3318     - __shrink_dcache_sb(sb, nr_to_scan, DCACHE_REFERENCED);
3319     -}
3320     -
3321     -/**
3322     * shrink_dcache_sb - shrink dcache for a superblock
3323     * @sb: superblock
3324     *
3325     @@ -1091,7 +1074,7 @@ EXPORT_SYMBOL(have_submounts);
3326     * drop the lock and return early due to latency
3327     * constraints.
3328     */
3329     -static int select_parent(struct dentry * parent)
3330     +static int select_parent(struct dentry *parent, struct list_head *dispose)
3331     {
3332     struct dentry *this_parent;
3333     struct list_head *next;
3334     @@ -1113,17 +1096,21 @@ resume:
3335    
3336     spin_lock_nested(&dentry->d_lock, DENTRY_D_LOCK_NESTED);
3337    
3338     - /*
3339     - * move only zero ref count dentries to the end
3340     - * of the unused list for prune_dcache
3341     + /*
3342     + * move only zero ref count dentries to the dispose list.
3343     + *
3344     + * Those which are presently on the shrink list, being processed
3345     + * by shrink_dentry_list(), shouldn't be moved. Otherwise the
3346     + * loop in shrink_dcache_parent() might not make any progress
3347     + * and loop forever.
3348     */
3349     - if (!dentry->d_count) {
3350     - dentry_lru_move_tail(dentry);
3351     - found++;
3352     - } else {
3353     + if (dentry->d_count) {
3354     dentry_lru_del(dentry);
3355     + } else if (!(dentry->d_flags & DCACHE_SHRINK_LIST)) {
3356     + dentry_lru_move_list(dentry, dispose);
3357     + dentry->d_flags |= DCACHE_SHRINK_LIST;
3358     + found++;
3359     }
3360     -
3361     /*
3362     * We can return to the caller if we have found some (this
3363     * ensures forward progress). We'll be coming back to find
3364     @@ -1180,14 +1167,13 @@ rename_retry:
3365     *
3366     * Prune the dcache to remove unused children of the parent dentry.
3367     */
3368     -
3369     void shrink_dcache_parent(struct dentry * parent)
3370     {
3371     - struct super_block *sb = parent->d_sb;
3372     + LIST_HEAD(dispose);
3373     int found;
3374    
3375     - while ((found = select_parent(parent)) != 0)
3376     - __shrink_dcache_sb(sb, found, 0);
3377     + while ((found = select_parent(parent, &dispose)) != 0)
3378     + shrink_dentry_list(&dispose);
3379     }
3380     EXPORT_SYMBOL(shrink_dcache_parent);
3381    
3382     diff --git a/fs/ext4/ioctl.c b/fs/ext4/ioctl.c
3383     index a567968..ab25f57 100644
3384     --- a/fs/ext4/ioctl.c
3385     +++ b/fs/ext4/ioctl.c
3386     @@ -182,19 +182,22 @@ setversion_out:
3387     if (err)
3388     return err;
3389    
3390     - if (get_user(n_blocks_count, (__u32 __user *)arg))
3391     - return -EFAULT;
3392     + if (get_user(n_blocks_count, (__u32 __user *)arg)) {
3393     + err = -EFAULT;
3394     + goto group_extend_out;
3395     + }
3396    
3397     if (EXT4_HAS_RO_COMPAT_FEATURE(sb,
3398     EXT4_FEATURE_RO_COMPAT_BIGALLOC)) {
3399     ext4_msg(sb, KERN_ERR,
3400     "Online resizing not supported with bigalloc");
3401     - return -EOPNOTSUPP;
3402     + err = -EOPNOTSUPP;
3403     + goto group_extend_out;
3404     }
3405    
3406     err = mnt_want_write(filp->f_path.mnt);
3407     if (err)
3408     - return err;
3409     + goto group_extend_out;
3410    
3411     err = ext4_group_extend(sb, EXT4_SB(sb)->s_es, n_blocks_count);
3412     if (EXT4_SB(sb)->s_journal) {
3413     @@ -204,9 +207,10 @@ setversion_out:
3414     }
3415     if (err == 0)
3416     err = err2;
3417     +
3418     mnt_drop_write(filp->f_path.mnt);
3419     +group_extend_out:
3420     ext4_resize_end(sb);
3421     -
3422     return err;
3423     }
3424    
3425     @@ -267,19 +271,22 @@ mext_out:
3426     return err;
3427    
3428     if (copy_from_user(&input, (struct ext4_new_group_input __user *)arg,
3429     - sizeof(input)))
3430     - return -EFAULT;
3431     + sizeof(input))) {
3432     + err = -EFAULT;
3433     + goto group_add_out;
3434     + }
3435    
3436     if (EXT4_HAS_RO_COMPAT_FEATURE(sb,
3437     EXT4_FEATURE_RO_COMPAT_BIGALLOC)) {
3438     ext4_msg(sb, KERN_ERR,
3439     "Online resizing not supported with bigalloc");
3440     - return -EOPNOTSUPP;
3441     + err = -EOPNOTSUPP;
3442     + goto group_add_out;
3443     }
3444    
3445     err = mnt_want_write(filp->f_path.mnt);
3446     if (err)
3447     - return err;
3448     + goto group_add_out;
3449    
3450     err = ext4_group_add(sb, &input);
3451     if (EXT4_SB(sb)->s_journal) {
3452     @@ -289,9 +296,10 @@ mext_out:
3453     }
3454     if (err == 0)
3455     err = err2;
3456     +
3457     mnt_drop_write(filp->f_path.mnt);
3458     +group_add_out:
3459     ext4_resize_end(sb);
3460     -
3461     return err;
3462     }
3463    
3464     diff --git a/fs/ext4/super.c b/fs/ext4/super.c
3465     index 3e1329e..9281dbe 100644
3466     --- a/fs/ext4/super.c
3467     +++ b/fs/ext4/super.c
3468     @@ -2006,17 +2006,16 @@ static int ext4_fill_flex_info(struct super_block *sb)
3469     struct ext4_group_desc *gdp = NULL;
3470     ext4_group_t flex_group_count;
3471     ext4_group_t flex_group;
3472     - int groups_per_flex = 0;
3473     + unsigned int groups_per_flex = 0;
3474     size_t size;
3475     int i;
3476    
3477     sbi->s_log_groups_per_flex = sbi->s_es->s_log_groups_per_flex;
3478     - groups_per_flex = 1 << sbi->s_log_groups_per_flex;
3479     -
3480     - if (groups_per_flex < 2) {
3481     + if (sbi->s_log_groups_per_flex < 1 || sbi->s_log_groups_per_flex > 31) {
3482     sbi->s_log_groups_per_flex = 0;
3483     return 1;
3484     }
3485     + groups_per_flex = 1 << sbi->s_log_groups_per_flex;
3486    
3487     /* We allocate both existing and potentially added groups */
3488     flex_group_count = ((sbi->s_groups_count + groups_per_flex - 1) +
3489     diff --git a/fs/nfs/blocklayout/blocklayout.c b/fs/nfs/blocklayout/blocklayout.c
3490     index 281ae95..3db6b82 100644
3491     --- a/fs/nfs/blocklayout/blocklayout.c
3492     +++ b/fs/nfs/blocklayout/blocklayout.c
3493     @@ -146,14 +146,19 @@ static struct bio *bl_alloc_init_bio(int npg, sector_t isect,
3494     {
3495     struct bio *bio;
3496    
3497     + npg = min(npg, BIO_MAX_PAGES);
3498     bio = bio_alloc(GFP_NOIO, npg);
3499     - if (!bio)
3500     - return NULL;
3501     + if (!bio && (current->flags & PF_MEMALLOC)) {
3502     + while (!bio && (npg /= 2))
3503     + bio = bio_alloc(GFP_NOIO, npg);
3504     + }
3505    
3506     - bio->bi_sector = isect - be->be_f_offset + be->be_v_offset;
3507     - bio->bi_bdev = be->be_mdev;
3508     - bio->bi_end_io = end_io;
3509     - bio->bi_private = par;
3510     + if (bio) {
3511     + bio->bi_sector = isect - be->be_f_offset + be->be_v_offset;
3512     + bio->bi_bdev = be->be_mdev;
3513     + bio->bi_end_io = end_io;
3514     + bio->bi_private = par;
3515     + }
3516     return bio;
3517     }
3518    
3519     @@ -779,16 +784,13 @@ bl_cleanup_layoutcommit(struct nfs4_layoutcommit_data *lcdata)
3520     static void free_blk_mountid(struct block_mount_id *mid)
3521     {
3522     if (mid) {
3523     - struct pnfs_block_dev *dev;
3524     - spin_lock(&mid->bm_lock);
3525     - while (!list_empty(&mid->bm_devlist)) {
3526     - dev = list_first_entry(&mid->bm_devlist,
3527     - struct pnfs_block_dev,
3528     - bm_node);
3529     + struct pnfs_block_dev *dev, *tmp;
3530     +
3531     + /* No need to take bm_lock as we are last user freeing bm_devlist */
3532     + list_for_each_entry_safe(dev, tmp, &mid->bm_devlist, bm_node) {
3533     list_del(&dev->bm_node);
3534     bl_free_block_dev(dev);
3535     }
3536     - spin_unlock(&mid->bm_lock);
3537     kfree(mid);
3538     }
3539     }
3540     diff --git a/fs/nfs/blocklayout/extents.c b/fs/nfs/blocklayout/extents.c
3541     index 19fa7b0..c69682a 100644
3542     --- a/fs/nfs/blocklayout/extents.c
3543     +++ b/fs/nfs/blocklayout/extents.c
3544     @@ -139,11 +139,13 @@ static int _set_range(struct my_tree *tree, int32_t tag, u64 s, u64 length)
3545     }
3546    
3547     /* Ensure that future operations on given range of tree will not malloc */
3548     -static int _preload_range(struct my_tree *tree, u64 offset, u64 length)
3549     +static int _preload_range(struct pnfs_inval_markings *marks,
3550     + u64 offset, u64 length)
3551     {
3552     u64 start, end, s;
3553     int count, i, used = 0, status = -ENOMEM;
3554     struct pnfs_inval_tracking **storage;
3555     + struct my_tree *tree = &marks->im_tree;
3556    
3557     dprintk("%s(%llu, %llu) enter\n", __func__, offset, length);
3558     start = normalize(offset, tree->mtt_step_size);
3559     @@ -161,12 +163,11 @@ static int _preload_range(struct my_tree *tree, u64 offset, u64 length)
3560     goto out_cleanup;
3561     }
3562    
3563     - /* Now need lock - HOW??? */
3564     -
3565     + spin_lock(&marks->im_lock);
3566     for (s = start; s < end; s += tree->mtt_step_size)
3567     used += _add_entry(tree, s, INTERNAL_EXISTS, storage[used]);
3568     + spin_unlock(&marks->im_lock);
3569    
3570     - /* Unlock - HOW??? */
3571     status = 0;
3572    
3573     out_cleanup:
3574     @@ -286,7 +287,7 @@ int bl_mark_sectors_init(struct pnfs_inval_markings *marks,
3575    
3576     start = normalize(offset, marks->im_block_size);
3577     end = normalize_up(offset + length, marks->im_block_size);
3578     - if (_preload_range(&marks->im_tree, start, end - start))
3579     + if (_preload_range(marks, start, end - start))
3580     goto outerr;
3581    
3582     spin_lock(&marks->im_lock);
3583     diff --git a/fs/nfs/callback_proc.c b/fs/nfs/callback_proc.c
3584     index 43926ad..54cea8a 100644
3585     --- a/fs/nfs/callback_proc.c
3586     +++ b/fs/nfs/callback_proc.c
3587     @@ -339,7 +339,7 @@ validate_seqid(struct nfs4_slot_table *tbl, struct cb_sequenceargs * args)
3588     dprintk("%s enter. slotid %d seqid %d\n",
3589     __func__, args->csa_slotid, args->csa_sequenceid);
3590    
3591     - if (args->csa_slotid > NFS41_BC_MAX_CALLBACKS)
3592     + if (args->csa_slotid >= NFS41_BC_MAX_CALLBACKS)
3593     return htonl(NFS4ERR_BADSLOT);
3594    
3595     slot = tbl->slots + args->csa_slotid;
3596     diff --git a/fs/nfs/file.c b/fs/nfs/file.c
3597     index 606ef0f..c43a452 100644
3598     --- a/fs/nfs/file.c
3599     +++ b/fs/nfs/file.c
3600     @@ -272,13 +272,13 @@ nfs_file_fsync(struct file *file, loff_t start, loff_t end, int datasync)
3601     datasync);
3602    
3603     ret = filemap_write_and_wait_range(inode->i_mapping, start, end);
3604     - if (ret)
3605     - return ret;
3606     mutex_lock(&inode->i_mutex);
3607    
3608     nfs_inc_stats(inode, NFSIOS_VFSFSYNC);
3609     have_error = test_and_clear_bit(NFS_CONTEXT_ERROR_WRITE, &ctx->flags);
3610     status = nfs_commit_inode(inode, FLUSH_SYNC);
3611     + if (status >= 0 && ret < 0)
3612     + status = ret;
3613     have_error |= test_bit(NFS_CONTEXT_ERROR_WRITE, &ctx->flags);
3614     if (have_error)
3615     ret = xchg(&ctx->error, 0);
3616     diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c
3617     index d9f4d78..055d702 100644
3618     --- a/fs/nfs/nfs4proc.c
3619     +++ b/fs/nfs/nfs4proc.c
3620     @@ -3430,19 +3430,6 @@ static inline int nfs4_server_supports_acls(struct nfs_server *server)
3621     */
3622     #define NFS4ACL_MAXPAGES (XATTR_SIZE_MAX >> PAGE_CACHE_SHIFT)
3623    
3624     -static void buf_to_pages(const void *buf, size_t buflen,
3625     - struct page **pages, unsigned int *pgbase)
3626     -{
3627     - const void *p = buf;
3628     -
3629     - *pgbase = offset_in_page(buf);
3630     - p -= *pgbase;
3631     - while (p < buf + buflen) {
3632     - *(pages++) = virt_to_page(p);
3633     - p += PAGE_CACHE_SIZE;
3634     - }
3635     -}
3636     -
3637     static int buf_to_pages_noslab(const void *buf, size_t buflen,
3638     struct page **pages, unsigned int *pgbase)
3639     {
3640     @@ -3539,9 +3526,19 @@ out:
3641     nfs4_set_cached_acl(inode, acl);
3642     }
3643    
3644     +/*
3645     + * The getxattr API returns the required buffer length when called with a
3646     + * NULL buf. The NFSv4 acl tool then calls getxattr again after allocating
3647     + * the required buf. On a NULL buf, we send a page of data to the server
3648     + * guessing that the ACL request can be serviced by a page. If so, we cache
3649     + * up to the page of ACL data, and the 2nd call to getxattr is serviced by
3650     + * the cache. If not so, we throw away the page, and cache the required
3651     + * length. The next getxattr call will then produce another round trip to
3652     + * the server, this time with the input buf of the required size.
3653     + */
3654     static ssize_t __nfs4_get_acl_uncached(struct inode *inode, void *buf, size_t buflen)
3655     {
3656     - struct page *pages[NFS4ACL_MAXPAGES];
3657     + struct page *pages[NFS4ACL_MAXPAGES] = {NULL, };
3658     struct nfs_getaclargs args = {
3659     .fh = NFS_FH(inode),
3660     .acl_pages = pages,
3661     @@ -3556,41 +3553,60 @@ static ssize_t __nfs4_get_acl_uncached(struct inode *inode, void *buf, size_t bu
3662     .rpc_argp = &args,
3663     .rpc_resp = &res,
3664     };
3665     - struct page *localpage = NULL;
3666     - int ret;
3667     + int ret = -ENOMEM, npages, i, acl_len = 0;
3668    
3669     - if (buflen < PAGE_SIZE) {
3670     - /* As long as we're doing a round trip to the server anyway,
3671     - * let's be prepared for a page of acl data. */
3672     - localpage = alloc_page(GFP_KERNEL);
3673     - resp_buf = page_address(localpage);
3674     - if (localpage == NULL)
3675     - return -ENOMEM;
3676     - args.acl_pages[0] = localpage;
3677     - args.acl_pgbase = 0;
3678     - args.acl_len = PAGE_SIZE;
3679     - } else {
3680     - resp_buf = buf;
3681     - buf_to_pages(buf, buflen, args.acl_pages, &args.acl_pgbase);
3682     + npages = (buflen + PAGE_SIZE - 1) >> PAGE_SHIFT;
3683     + /* As long as we're doing a round trip to the server anyway,
3684     + * let's be prepared for a page of acl data. */
3685     + if (npages == 0)
3686     + npages = 1;
3687     +
3688     + for (i = 0; i < npages; i++) {
3689     + pages[i] = alloc_page(GFP_KERNEL);
3690     + if (!pages[i])
3691     + goto out_free;
3692     + }
3693     + if (npages > 1) {
3694     + /* for decoding across pages */
3695     + args.acl_scratch = alloc_page(GFP_KERNEL);
3696     + if (!args.acl_scratch)
3697     + goto out_free;
3698     }
3699     - ret = nfs4_call_sync(NFS_SERVER(inode)->client, NFS_SERVER(inode), &msg, &args.seq_args, &res.seq_res, 0);
3700     + args.acl_len = npages * PAGE_SIZE;
3701     + args.acl_pgbase = 0;
3702     + /* Let decode_getfacl know not to fail if the ACL data is larger than
3703     + * the page we send as a guess */
3704     + if (buf == NULL)
3705     + res.acl_flags |= NFS4_ACL_LEN_REQUEST;
3706     + resp_buf = page_address(pages[0]);
3707     +
3708     + dprintk("%s buf %p buflen %ld npages %d args.acl_len %ld\n",
3709     + __func__, buf, buflen, npages, args.acl_len);
3710     + ret = nfs4_call_sync(NFS_SERVER(inode)->client, NFS_SERVER(inode),
3711     + &msg, &args.seq_args, &res.seq_res, 0);
3712     if (ret)
3713     goto out_free;
3714     - if (res.acl_len > args.acl_len)
3715     - nfs4_write_cached_acl(inode, NULL, res.acl_len);
3716     +
3717     + acl_len = res.acl_len - res.acl_data_offset;
3718     + if (acl_len > args.acl_len)
3719     + nfs4_write_cached_acl(inode, NULL, acl_len);
3720     else
3721     - nfs4_write_cached_acl(inode, resp_buf, res.acl_len);
3722     + nfs4_write_cached_acl(inode, resp_buf + res.acl_data_offset,
3723     + acl_len);
3724     if (buf) {
3725     ret = -ERANGE;
3726     - if (res.acl_len > buflen)
3727     + if (acl_len > buflen)
3728     goto out_free;
3729     - if (localpage)
3730     - memcpy(buf, resp_buf, res.acl_len);
3731     + _copy_from_pages(buf, pages, res.acl_data_offset,
3732     + res.acl_len);
3733     }
3734     - ret = res.acl_len;
3735     + ret = acl_len;
3736     out_free:
3737     - if (localpage)
3738     - __free_page(localpage);
3739     + for (i = 0; i < npages; i++)
3740     + if (pages[i])
3741     + __free_page(pages[i]);
3742     + if (args.acl_scratch)
3743     + __free_page(args.acl_scratch);
3744     return ret;
3745     }
3746    
3747     @@ -3621,6 +3637,8 @@ static ssize_t nfs4_proc_get_acl(struct inode *inode, void *buf, size_t buflen)
3748     nfs_zap_acl_cache(inode);
3749     ret = nfs4_read_cached_acl(inode, buf, buflen);
3750     if (ret != -ENOENT)
3751     + /* -ENOENT is returned if there is no ACL or if there is an ACL
3752     + * but no cached acl data, just the acl length */
3753     return ret;
3754     return nfs4_get_acl_uncached(inode, buf, buflen);
3755     }
3756     diff --git a/fs/nfs/nfs4xdr.c b/fs/nfs/nfs4xdr.c
3757     index e6161b2..dcaf693 100644
3758     --- a/fs/nfs/nfs4xdr.c
3759     +++ b/fs/nfs/nfs4xdr.c
3760     @@ -2517,11 +2517,13 @@ static void nfs4_xdr_enc_getacl(struct rpc_rqst *req, struct xdr_stream *xdr,
3761     encode_compound_hdr(xdr, req, &hdr);
3762     encode_sequence(xdr, &args->seq_args, &hdr);
3763     encode_putfh(xdr, args->fh, &hdr);
3764     - replen = hdr.replen + op_decode_hdr_maxsz + nfs4_fattr_bitmap_maxsz + 1;
3765     + replen = hdr.replen + op_decode_hdr_maxsz + 1;
3766     encode_getattr_two(xdr, FATTR4_WORD0_ACL, 0, &hdr);
3767    
3768     xdr_inline_pages(&req->rq_rcv_buf, replen << 2,
3769     args->acl_pages, args->acl_pgbase, args->acl_len);
3770     + xdr_set_scratch_buffer(xdr, page_address(args->acl_scratch), PAGE_SIZE);
3771     +
3772     encode_nops(&hdr);
3773     }
3774    
3775     @@ -4957,17 +4959,18 @@ decode_restorefh(struct xdr_stream *xdr)
3776     }
3777    
3778     static int decode_getacl(struct xdr_stream *xdr, struct rpc_rqst *req,
3779     - size_t *acl_len)
3780     + struct nfs_getaclres *res)
3781     {
3782     - __be32 *savep;
3783     + __be32 *savep, *bm_p;
3784     uint32_t attrlen,
3785     bitmap[3] = {0};
3786     struct kvec *iov = req->rq_rcv_buf.head;
3787     int status;
3788    
3789     - *acl_len = 0;
3790     + res->acl_len = 0;
3791     if ((status = decode_op_hdr(xdr, OP_GETATTR)) != 0)
3792     goto out;
3793     + bm_p = xdr->p;
3794     if ((status = decode_attr_bitmap(xdr, bitmap)) != 0)
3795     goto out;
3796     if ((status = decode_attr_length(xdr, &attrlen, &savep)) != 0)
3797     @@ -4979,18 +4982,30 @@ static int decode_getacl(struct xdr_stream *xdr, struct rpc_rqst *req,
3798     size_t hdrlen;
3799     u32 recvd;
3800    
3801     + /* The bitmap (xdr len + bitmaps) and the attr xdr len words
3802     + * are stored with the acl data to handle the problem of
3803     + * variable length bitmaps.*/
3804     + xdr->p = bm_p;
3805     + res->acl_data_offset = be32_to_cpup(bm_p) + 2;
3806     + res->acl_data_offset <<= 2;
3807     +
3808     /* We ignore &savep and don't do consistency checks on
3809     * the attr length. Let userspace figure it out.... */
3810     hdrlen = (u8 *)xdr->p - (u8 *)iov->iov_base;
3811     + attrlen += res->acl_data_offset;
3812     recvd = req->rq_rcv_buf.len - hdrlen;
3813     if (attrlen > recvd) {
3814     - dprintk("NFS: server cheating in getattr"
3815     - " acl reply: attrlen %u > recvd %u\n",
3816     + if (res->acl_flags & NFS4_ACL_LEN_REQUEST) {
3817     + /* getxattr interface called with a NULL buf */
3818     + res->acl_len = attrlen;
3819     + goto out;
3820     + }
3821     + dprintk("NFS: acl reply: attrlen %u > recvd %u\n",
3822     attrlen, recvd);
3823     return -EINVAL;
3824     }
3825     xdr_read_pages(xdr, attrlen);
3826     - *acl_len = attrlen;
3827     + res->acl_len = attrlen;
3828     } else
3829     status = -EOPNOTSUPP;
3830    
3831     @@ -6028,7 +6043,7 @@ nfs4_xdr_dec_getacl(struct rpc_rqst *rqstp, struct xdr_stream *xdr,
3832     status = decode_putfh(xdr);
3833     if (status)
3834     goto out;
3835     - status = decode_getacl(xdr, rqstp, &res->acl_len);
3836     + status = decode_getacl(xdr, rqstp, res);
3837    
3838     out:
3839     return status;
3840     diff --git a/fs/nfs/objlayout/objio_osd.c b/fs/nfs/objlayout/objio_osd.c
3841     index c807ab9..55d0128 100644
3842     --- a/fs/nfs/objlayout/objio_osd.c
3843     +++ b/fs/nfs/objlayout/objio_osd.c
3844     @@ -551,7 +551,8 @@ static const struct nfs_pageio_ops objio_pg_write_ops = {
3845     static struct pnfs_layoutdriver_type objlayout_type = {
3846     .id = LAYOUT_OSD2_OBJECTS,
3847     .name = "LAYOUT_OSD2_OBJECTS",
3848     - .flags = PNFS_LAYOUTRET_ON_SETATTR,
3849     + .flags = PNFS_LAYOUTRET_ON_SETATTR |
3850     + PNFS_LAYOUTRET_ON_ERROR,
3851    
3852     .alloc_layout_hdr = objlayout_alloc_layout_hdr,
3853     .free_layout_hdr = objlayout_free_layout_hdr,
3854     diff --git a/fs/nfs/objlayout/objlayout.c b/fs/nfs/objlayout/objlayout.c
3855     index 72074e3..b3c2903 100644
3856     --- a/fs/nfs/objlayout/objlayout.c
3857     +++ b/fs/nfs/objlayout/objlayout.c
3858     @@ -254,6 +254,8 @@ objlayout_read_done(struct objlayout_io_res *oir, ssize_t status, bool sync)
3859     oir->status = rdata->task.tk_status = status;
3860     if (status >= 0)
3861     rdata->res.count = status;
3862     + else
3863     + rdata->pnfs_error = status;
3864     objlayout_iodone(oir);
3865     /* must not use oir after this point */
3866    
3867     @@ -334,6 +336,8 @@ objlayout_write_done(struct objlayout_io_res *oir, ssize_t status, bool sync)
3868     if (status >= 0) {
3869     wdata->res.count = status;
3870     wdata->verf.committed = oir->committed;
3871     + } else {
3872     + wdata->pnfs_error = status;
3873     }
3874     objlayout_iodone(oir);
3875     /* must not use oir after this point */
3876     diff --git a/fs/nfs/pnfs.c b/fs/nfs/pnfs.c
3877     index 8e672a2..f881a63 100644
3878     --- a/fs/nfs/pnfs.c
3879     +++ b/fs/nfs/pnfs.c
3880     @@ -1178,6 +1178,15 @@ void pnfs_ld_write_done(struct nfs_write_data *data)
3881     put_lseg(data->lseg);
3882     data->lseg = NULL;
3883     dprintk("pnfs write error = %d\n", data->pnfs_error);
3884     + if (NFS_SERVER(data->inode)->pnfs_curr_ld->flags &
3885     + PNFS_LAYOUTRET_ON_ERROR) {
3886     + /* Don't lo_commit on error, Server will needs to
3887     + * preform a file recovery.
3888     + */
3889     + clear_bit(NFS_INO_LAYOUTCOMMIT,
3890     + &NFS_I(data->inode)->flags);
3891     + pnfs_return_layout(data->inode);
3892     + }
3893     }
3894     data->mds_ops->rpc_release(data);
3895     }
3896     @@ -1267,6 +1276,9 @@ static void pnfs_ld_handle_read_error(struct nfs_read_data *data)
3897     put_lseg(data->lseg);
3898     data->lseg = NULL;
3899     dprintk("pnfs write error = %d\n", data->pnfs_error);
3900     + if (NFS_SERVER(data->inode)->pnfs_curr_ld->flags &
3901     + PNFS_LAYOUTRET_ON_ERROR)
3902     + pnfs_return_layout(data->inode);
3903    
3904     nfs_pageio_init_read_mds(&pgio, data->inode);
3905    
3906     diff --git a/fs/nfs/pnfs.h b/fs/nfs/pnfs.h
3907     index 1509530..53d593a 100644
3908     --- a/fs/nfs/pnfs.h
3909     +++ b/fs/nfs/pnfs.h
3910     @@ -68,6 +68,7 @@ enum {
3911     enum layoutdriver_policy_flags {
3912     /* Should the pNFS client commit and return the layout upon a setattr */
3913     PNFS_LAYOUTRET_ON_SETATTR = 1 << 0,
3914     + PNFS_LAYOUTRET_ON_ERROR = 1 << 1,
3915     };
3916    
3917     struct nfs4_deviceid_node;
3918     diff --git a/fs/nfs/super.c b/fs/nfs/super.c
3919     index 1347774..3ada13c 100644
3920     --- a/fs/nfs/super.c
3921     +++ b/fs/nfs/super.c
3922     @@ -909,10 +909,24 @@ static struct nfs_parsed_mount_data *nfs_alloc_parsed_mount_data(unsigned int ve
3923     data->auth_flavor_len = 1;
3924     data->version = version;
3925     data->minorversion = 0;
3926     + security_init_mnt_opts(&data->lsm_opts);
3927     }
3928     return data;
3929     }
3930    
3931     +static void nfs_free_parsed_mount_data(struct nfs_parsed_mount_data *data)
3932     +{
3933     + if (data) {
3934     + kfree(data->client_address);
3935     + kfree(data->mount_server.hostname);
3936     + kfree(data->nfs_server.export_path);
3937     + kfree(data->nfs_server.hostname);
3938     + kfree(data->fscache_uniq);
3939     + security_free_mnt_opts(&data->lsm_opts);
3940     + kfree(data);
3941     + }
3942     +}
3943     +
3944     /*
3945     * Sanity-check a server address provided by the mount command.
3946     *
3947     @@ -2220,9 +2234,7 @@ static struct dentry *nfs_fs_mount(struct file_system_type *fs_type,
3948     data = nfs_alloc_parsed_mount_data(NFS_DEFAULT_VERSION);
3949     mntfh = nfs_alloc_fhandle();
3950     if (data == NULL || mntfh == NULL)
3951     - goto out_free_fh;
3952     -
3953     - security_init_mnt_opts(&data->lsm_opts);
3954     + goto out;
3955    
3956     /* Validate the mount data */
3957     error = nfs_validate_mount_data(raw_data, data, mntfh, dev_name);
3958     @@ -2234,8 +2246,6 @@ static struct dentry *nfs_fs_mount(struct file_system_type *fs_type,
3959     #ifdef CONFIG_NFS_V4
3960     if (data->version == 4) {
3961     mntroot = nfs4_try_mount(flags, dev_name, data);
3962     - kfree(data->client_address);
3963     - kfree(data->nfs_server.export_path);
3964     goto out;
3965     }
3966     #endif /* CONFIG_NFS_V4 */
3967     @@ -2290,13 +2300,8 @@ static struct dentry *nfs_fs_mount(struct file_system_type *fs_type,
3968     s->s_flags |= MS_ACTIVE;
3969    
3970     out:
3971     - kfree(data->nfs_server.hostname);
3972     - kfree(data->mount_server.hostname);
3973     - kfree(data->fscache_uniq);
3974     - security_free_mnt_opts(&data->lsm_opts);
3975     -out_free_fh:
3976     + nfs_free_parsed_mount_data(data);
3977     nfs_free_fhandle(mntfh);
3978     - kfree(data);
3979     return mntroot;
3980    
3981     out_err_nosb:
3982     @@ -2623,9 +2628,7 @@ nfs4_remote_mount(struct file_system_type *fs_type, int flags,
3983    
3984     mntfh = nfs_alloc_fhandle();
3985     if (data == NULL || mntfh == NULL)
3986     - goto out_free_fh;
3987     -
3988     - security_init_mnt_opts(&data->lsm_opts);
3989     + goto out;
3990    
3991     /* Get a volume representation */
3992     server = nfs4_create_server(data, mntfh);
3993     @@ -2677,13 +2680,10 @@ nfs4_remote_mount(struct file_system_type *fs_type, int flags,
3994    
3995     s->s_flags |= MS_ACTIVE;
3996    
3997     - security_free_mnt_opts(&data->lsm_opts);
3998     nfs_free_fhandle(mntfh);
3999     return mntroot;
4000    
4001     out:
4002     - security_free_mnt_opts(&data->lsm_opts);
4003     -out_free_fh:
4004     nfs_free_fhandle(mntfh);
4005     return ERR_PTR(error);
4006    
4007     @@ -2838,7 +2838,7 @@ static struct dentry *nfs4_mount(struct file_system_type *fs_type,
4008    
4009     data = nfs_alloc_parsed_mount_data(4);
4010     if (data == NULL)
4011     - goto out_free_data;
4012     + goto out;
4013    
4014     /* Validate the mount data */
4015     error = nfs4_validate_mount_data(raw_data, data, dev_name);
4016     @@ -2852,12 +2852,7 @@ static struct dentry *nfs4_mount(struct file_system_type *fs_type,
4017     error = PTR_ERR(res);
4018    
4019     out:
4020     - kfree(data->client_address);
4021     - kfree(data->nfs_server.export_path);
4022     - kfree(data->nfs_server.hostname);
4023     - kfree(data->fscache_uniq);
4024     -out_free_data:
4025     - kfree(data);
4026     + nfs_free_parsed_mount_data(data);
4027     dprintk("<-- nfs4_mount() = %d%s\n", error,
4028     error != 0 ? " [error]" : "");
4029     return res;
4030     diff --git a/fs/nfsd/export.c b/fs/nfsd/export.c
4031     index 62f3b90..5f312ab 100644
4032     --- a/fs/nfsd/export.c
4033     +++ b/fs/nfsd/export.c
4034     @@ -87,7 +87,7 @@ static int expkey_parse(struct cache_detail *cd, char *mesg, int mlen)
4035     struct svc_expkey key;
4036     struct svc_expkey *ek = NULL;
4037    
4038     - if (mesg[mlen-1] != '\n')
4039     + if (mlen < 1 || mesg[mlen-1] != '\n')
4040     return -EINVAL;
4041     mesg[mlen-1] = 0;
4042    
4043     diff --git a/fs/nfsd/nfs4state.c b/fs/nfsd/nfs4state.c
4044     index 47e94e3..5abced7 100644
4045     --- a/fs/nfsd/nfs4state.c
4046     +++ b/fs/nfsd/nfs4state.c
4047     @@ -3809,16 +3809,29 @@ nevermind:
4048     deny->ld_type = NFS4_WRITE_LT;
4049     }
4050    
4051     +static bool same_lockowner_ino(struct nfs4_lockowner *lo, struct inode *inode, clientid_t *clid, struct xdr_netobj *owner)
4052     +{
4053     + struct nfs4_ol_stateid *lst;
4054     +
4055     + if (!same_owner_str(&lo->lo_owner, owner, clid))
4056     + return false;
4057     + lst = list_first_entry(&lo->lo_owner.so_stateids,
4058     + struct nfs4_ol_stateid, st_perstateowner);
4059     + return lst->st_file->fi_inode == inode;
4060     +}
4061     +
4062     static struct nfs4_lockowner *
4063     find_lockowner_str(struct inode *inode, clientid_t *clid,
4064     struct xdr_netobj *owner)
4065     {
4066     unsigned int hashval = lock_ownerstr_hashval(inode, clid->cl_id, owner);
4067     + struct nfs4_lockowner *lo;
4068     struct nfs4_stateowner *op;
4069    
4070     list_for_each_entry(op, &lock_ownerstr_hashtbl[hashval], so_strhash) {
4071     - if (same_owner_str(op, owner, clid))
4072     - return lockowner(op);
4073     + lo = lockowner(op);
4074     + if (same_lockowner_ino(lo, inode, clid, owner))
4075     + return lo;
4076     }
4077     return NULL;
4078     }
4079     diff --git a/fs/notify/mark.c b/fs/notify/mark.c
4080     index e14587d..f104d56 100644
4081     --- a/fs/notify/mark.c
4082     +++ b/fs/notify/mark.c
4083     @@ -135,9 +135,6 @@ void fsnotify_destroy_mark(struct fsnotify_mark *mark)
4084    
4085     mark->flags &= ~FSNOTIFY_MARK_FLAG_ALIVE;
4086    
4087     - /* 1 from caller and 1 for being on i_list/g_list */
4088     - BUG_ON(atomic_read(&mark->refcnt) < 2);
4089     -
4090     spin_lock(&group->mark_lock);
4091    
4092     if (mark->flags & FSNOTIFY_MARK_FLAG_INODE) {
4093     @@ -182,6 +179,11 @@ void fsnotify_destroy_mark(struct fsnotify_mark *mark)
4094     iput(inode);
4095    
4096     /*
4097     + * We don't necessarily have a ref on mark from caller so the above iput
4098     + * may have already destroyed it. Don't touch from now on.
4099     + */
4100     +
4101     + /*
4102     * it's possible that this group tried to destroy itself, but this
4103     * this mark was simultaneously being freed by inode. If that's the
4104     * case, we finish freeing the group here.
4105     diff --git a/fs/proc/base.c b/fs/proc/base.c
4106     index 851ba3d..1fc1dca 100644
4107     --- a/fs/proc/base.c
4108     +++ b/fs/proc/base.c
4109     @@ -194,65 +194,7 @@ static int proc_root_link(struct inode *inode, struct path *path)
4110     return result;
4111     }
4112    
4113     -static struct mm_struct *__check_mem_permission(struct task_struct *task)
4114     -{
4115     - struct mm_struct *mm;
4116     -
4117     - mm = get_task_mm(task);
4118     - if (!mm)
4119     - return ERR_PTR(-EINVAL);
4120     -
4121     - /*
4122     - * A task can always look at itself, in case it chooses
4123     - * to use system calls instead of load instructions.
4124     - */
4125     - if (task == current)
4126     - return mm;
4127     -
4128     - /*
4129     - * If current is actively ptrace'ing, and would also be
4130     - * permitted to freshly attach with ptrace now, permit it.
4131     - */
4132     - if (task_is_stopped_or_traced(task)) {
4133     - int match;
4134     - rcu_read_lock();
4135     - match = (ptrace_parent(task) == current);
4136     - rcu_read_unlock();
4137     - if (match && ptrace_may_access(task, PTRACE_MODE_ATTACH))
4138     - return mm;
4139     - }
4140     -
4141     - /*
4142     - * No one else is allowed.
4143     - */
4144     - mmput(mm);
4145     - return ERR_PTR(-EPERM);
4146     -}
4147     -
4148     -/*
4149     - * If current may access user memory in @task return a reference to the
4150     - * corresponding mm, otherwise ERR_PTR.
4151     - */
4152     -static struct mm_struct *check_mem_permission(struct task_struct *task)
4153     -{
4154     - struct mm_struct *mm;
4155     - int err;
4156     -
4157     - /*
4158     - * Avoid racing if task exec's as we might get a new mm but validate
4159     - * against old credentials.
4160     - */
4161     - err = mutex_lock_killable(&task->signal->cred_guard_mutex);
4162     - if (err)
4163     - return ERR_PTR(err);
4164     -
4165     - mm = __check_mem_permission(task);
4166     - mutex_unlock(&task->signal->cred_guard_mutex);
4167     -
4168     - return mm;
4169     -}
4170     -
4171     -struct mm_struct *mm_for_maps(struct task_struct *task)
4172     +static struct mm_struct *mm_access(struct task_struct *task, unsigned int mode)
4173     {
4174     struct mm_struct *mm;
4175     int err;
4176     @@ -263,7 +205,7 @@ struct mm_struct *mm_for_maps(struct task_struct *task)
4177    
4178     mm = get_task_mm(task);
4179     if (mm && mm != current->mm &&
4180     - !ptrace_may_access(task, PTRACE_MODE_READ)) {
4181     + !ptrace_may_access(task, mode)) {
4182     mmput(mm);
4183     mm = ERR_PTR(-EACCES);
4184     }
4185     @@ -272,6 +214,11 @@ struct mm_struct *mm_for_maps(struct task_struct *task)
4186     return mm;
4187     }
4188    
4189     +struct mm_struct *mm_for_maps(struct task_struct *task)
4190     +{
4191     + return mm_access(task, PTRACE_MODE_READ);
4192     +}
4193     +
4194     static int proc_pid_cmdline(struct task_struct *task, char * buffer)
4195     {
4196     int res = 0;
4197     @@ -816,38 +763,39 @@ static const struct file_operations proc_single_file_operations = {
4198    
4199     static int mem_open(struct inode* inode, struct file* file)
4200     {
4201     - file->private_data = (void*)((long)current->self_exec_id);
4202     + struct task_struct *task = get_proc_task(file->f_path.dentry->d_inode);
4203     + struct mm_struct *mm;
4204     +
4205     + if (!task)
4206     + return -ESRCH;
4207     +
4208     + mm = mm_access(task, PTRACE_MODE_ATTACH);
4209     + put_task_struct(task);
4210     +
4211     + if (IS_ERR(mm))
4212     + return PTR_ERR(mm);
4213     +
4214     /* OK to pass negative loff_t, we can catch out-of-range */
4215     file->f_mode |= FMODE_UNSIGNED_OFFSET;
4216     + file->private_data = mm;
4217     +
4218     return 0;
4219     }
4220    
4221     static ssize_t mem_read(struct file * file, char __user * buf,
4222     size_t count, loff_t *ppos)
4223     {
4224     - struct task_struct *task = get_proc_task(file->f_path.dentry->d_inode);
4225     + int ret;
4226     char *page;
4227     unsigned long src = *ppos;
4228     - int ret = -ESRCH;
4229     - struct mm_struct *mm;
4230     + struct mm_struct *mm = file->private_data;
4231    
4232     - if (!task)
4233     - goto out_no_task;
4234     + if (!mm)
4235     + return 0;
4236    
4237     - ret = -ENOMEM;
4238     page = (char *)__get_free_page(GFP_TEMPORARY);
4239     if (!page)
4240     - goto out;
4241     -
4242     - mm = check_mem_permission(task);
4243     - ret = PTR_ERR(mm);
4244     - if (IS_ERR(mm))
4245     - goto out_free;
4246     -
4247     - ret = -EIO;
4248     -
4249     - if (file->private_data != (void*)((long)current->self_exec_id))
4250     - goto out_put;
4251     + return -ENOMEM;
4252    
4253     ret = 0;
4254    
4255     @@ -874,13 +822,7 @@ static ssize_t mem_read(struct file * file, char __user * buf,
4256     }
4257     *ppos = src;
4258    
4259     -out_put:
4260     - mmput(mm);
4261     -out_free:
4262     free_page((unsigned long) page);
4263     -out:
4264     - put_task_struct(task);
4265     -out_no_task:
4266     return ret;
4267     }
4268    
4269     @@ -889,27 +831,15 @@ static ssize_t mem_write(struct file * file, const char __user *buf,
4270     {
4271     int copied;
4272     char *page;
4273     - struct task_struct *task = get_proc_task(file->f_path.dentry->d_inode);
4274     unsigned long dst = *ppos;
4275     - struct mm_struct *mm;
4276     + struct mm_struct *mm = file->private_data;
4277    
4278     - copied = -ESRCH;
4279     - if (!task)
4280     - goto out_no_task;
4281     + if (!mm)
4282     + return 0;
4283    
4284     - copied = -ENOMEM;
4285     page = (char *)__get_free_page(GFP_TEMPORARY);
4286     if (!page)
4287     - goto out_task;
4288     -
4289     - mm = check_mem_permission(task);
4290     - copied = PTR_ERR(mm);
4291     - if (IS_ERR(mm))
4292     - goto out_free;
4293     -
4294     - copied = -EIO;
4295     - if (file->private_data != (void *)((long)current->self_exec_id))
4296     - goto out_mm;
4297     + return -ENOMEM;
4298    
4299     copied = 0;
4300     while (count > 0) {
4301     @@ -933,13 +863,7 @@ static ssize_t mem_write(struct file * file, const char __user *buf,
4302     }
4303     *ppos = dst;
4304    
4305     -out_mm:
4306     - mmput(mm);
4307     -out_free:
4308     free_page((unsigned long) page);
4309     -out_task:
4310     - put_task_struct(task);
4311     -out_no_task:
4312     return copied;
4313     }
4314    
4315     @@ -959,11 +883,20 @@ loff_t mem_lseek(struct file *file, loff_t offset, int orig)
4316     return file->f_pos;
4317     }
4318    
4319     +static int mem_release(struct inode *inode, struct file *file)
4320     +{
4321     + struct mm_struct *mm = file->private_data;
4322     +
4323     + mmput(mm);
4324     + return 0;
4325     +}
4326     +
4327     static const struct file_operations proc_mem_operations = {
4328     .llseek = mem_lseek,
4329     .read = mem_read,
4330     .write = mem_write,
4331     .open = mem_open,
4332     + .release = mem_release,
4333     };
4334    
4335     static ssize_t environ_read(struct file *file, char __user *buf,
4336     diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c
4337     index e418c5a..7dcd2a2 100644
4338     --- a/fs/proc/task_mmu.c
4339     +++ b/fs/proc/task_mmu.c
4340     @@ -518,6 +518,9 @@ static int clear_refs_pte_range(pmd_t *pmd, unsigned long addr,
4341     if (!page)
4342     continue;
4343    
4344     + if (PageReserved(page))
4345     + continue;
4346     +
4347     /* Clear accessed and referenced bits. */
4348     ptep_test_and_clear_young(vma, addr, pte);
4349     ClearPageReferenced(page);
4350     diff --git a/fs/proc/uptime.c b/fs/proc/uptime.c
4351     index 766b1d4..29166ec 100644
4352     --- a/fs/proc/uptime.c
4353     +++ b/fs/proc/uptime.c
4354     @@ -11,15 +11,20 @@ static int uptime_proc_show(struct seq_file *m, void *v)
4355     {
4356     struct timespec uptime;
4357     struct timespec idle;
4358     + cputime64_t idletime;
4359     + u64 nsec;
4360     + u32 rem;
4361     int i;
4362     - cputime_t idletime = cputime_zero;
4363    
4364     + idletime = 0;
4365     for_each_possible_cpu(i)
4366     idletime = cputime64_add(idletime, kstat_cpu(i).cpustat.idle);
4367    
4368     do_posix_clock_monotonic_gettime(&uptime);
4369     monotonic_to_bootbased(&uptime);
4370     - cputime_to_timespec(idletime, &idle);
4371     + nsec = cputime64_to_jiffies64(idletime) * TICK_NSEC;
4372     + idle.tv_sec = div_u64_rem(nsec, NSEC_PER_SEC, &rem);
4373     + idle.tv_nsec = rem;
4374     seq_printf(m, "%lu.%02lu %lu.%02lu\n",
4375     (unsigned long) uptime.tv_sec,
4376     (uptime.tv_nsec / (NSEC_PER_SEC / 100)),
4377     diff --git a/fs/ubifs/debug.h b/fs/ubifs/debug.h
4378     index 8d9c468..c9d2941 100644
4379     --- a/fs/ubifs/debug.h
4380     +++ b/fs/ubifs/debug.h
4381     @@ -175,22 +175,23 @@ const char *dbg_key_str1(const struct ubifs_info *c,
4382     const union ubifs_key *key);
4383    
4384     /*
4385     - * DBGKEY macros require @dbg_lock to be held, which it is in the dbg message
4386     - * macros.
4387     + * TODO: these macros are now broken because there is no locking around them
4388     + * and we use a global buffer for the key string. This means that in case of
4389     + * concurrent execution we will end up with incorrect and messy key strings.
4390     */
4391     #define DBGKEY(key) dbg_key_str0(c, (key))
4392     #define DBGKEY1(key) dbg_key_str1(c, (key))
4393    
4394     extern spinlock_t dbg_lock;
4395    
4396     -#define ubifs_dbg_msg(type, fmt, ...) do { \
4397     - spin_lock(&dbg_lock); \
4398     - pr_debug("UBIFS DBG " type ": " fmt "\n", ##__VA_ARGS__); \
4399     - spin_unlock(&dbg_lock); \
4400     -} while (0)
4401     +#define ubifs_dbg_msg(type, fmt, ...) \
4402     + pr_debug("UBIFS DBG " type ": " fmt "\n", ##__VA_ARGS__)
4403    
4404     /* Just a debugging messages not related to any specific UBIFS subsystem */
4405     -#define dbg_msg(fmt, ...) ubifs_dbg_msg("msg", fmt, ##__VA_ARGS__)
4406     +#define dbg_msg(fmt, ...) \
4407     + printk(KERN_DEBUG "UBIFS DBG (pid %d): %s: " fmt "\n", current->pid, \
4408     + __func__, ##__VA_ARGS__)
4409     +
4410     /* General messages */
4411     #define dbg_gen(fmt, ...) ubifs_dbg_msg("gen", fmt, ##__VA_ARGS__)
4412     /* Additional journal messages */
4413     diff --git a/fs/xfs/xfs_discard.c b/fs/xfs/xfs_discard.c
4414     index 8a24f0c..286a051 100644
4415     --- a/fs/xfs/xfs_discard.c
4416     +++ b/fs/xfs/xfs_discard.c
4417     @@ -68,7 +68,7 @@ xfs_trim_extents(
4418     * Look up the longest btree in the AGF and start with it.
4419     */
4420     error = xfs_alloc_lookup_le(cur, 0,
4421     - XFS_BUF_TO_AGF(agbp)->agf_longest, &i);
4422     + be32_to_cpu(XFS_BUF_TO_AGF(agbp)->agf_longest), &i);
4423     if (error)
4424     goto out_del_cursor;
4425    
4426     @@ -84,7 +84,7 @@ xfs_trim_extents(
4427     if (error)
4428     goto out_del_cursor;
4429     XFS_WANT_CORRUPTED_GOTO(i == 1, out_del_cursor);
4430     - ASSERT(flen <= XFS_BUF_TO_AGF(agbp)->agf_longest);
4431     + ASSERT(flen <= be32_to_cpu(XFS_BUF_TO_AGF(agbp)->agf_longest));
4432    
4433     /*
4434     * Too small? Give up.
4435     diff --git a/include/acpi/acpi_numa.h b/include/acpi/acpi_numa.h
4436     index 1739726..451823c 100644
4437     --- a/include/acpi/acpi_numa.h
4438     +++ b/include/acpi/acpi_numa.h
4439     @@ -15,6 +15,7 @@ extern int pxm_to_node(int);
4440     extern int node_to_pxm(int);
4441     extern void __acpi_map_pxm_to_node(int, int);
4442     extern int acpi_map_pxm_to_node(int);
4443     +extern unsigned char acpi_srat_revision;
4444    
4445     #endif /* CONFIG_ACPI_NUMA */
4446     #endif /* __ACP_NUMA_H */
4447     diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
4448     index 94acd81..0ed1eb0 100644
4449     --- a/include/linux/blkdev.h
4450     +++ b/include/linux/blkdev.h
4451     @@ -675,6 +675,9 @@ extern int blk_insert_cloned_request(struct request_queue *q,
4452     struct request *rq);
4453     extern void blk_delay_queue(struct request_queue *, unsigned long);
4454     extern void blk_recount_segments(struct request_queue *, struct bio *);
4455     +extern int scsi_verify_blk_ioctl(struct block_device *, unsigned int);
4456     +extern int scsi_cmd_blk_ioctl(struct block_device *, fmode_t,
4457     + unsigned int, void __user *);
4458     extern int scsi_cmd_ioctl(struct request_queue *, struct gendisk *, fmode_t,
4459     unsigned int, void __user *);
4460     extern int sg_scsi_ioctl(struct request_queue *, struct gendisk *, fmode_t,
4461     diff --git a/include/linux/crash_dump.h b/include/linux/crash_dump.h
4462     index 5c4abce..b936763 100644
4463     --- a/include/linux/crash_dump.h
4464     +++ b/include/linux/crash_dump.h
4465     @@ -5,6 +5,7 @@
4466     #include <linux/kexec.h>
4467     #include <linux/device.h>
4468     #include <linux/proc_fs.h>
4469     +#include <linux/elf.h>
4470    
4471     #define ELFCORE_ADDR_MAX (-1ULL)
4472     #define ELFCORE_ADDR_ERR (-2ULL)
4473     diff --git a/include/linux/dcache.h b/include/linux/dcache.h
4474     index ed9f74f..4eb8c80 100644
4475     --- a/include/linux/dcache.h
4476     +++ b/include/linux/dcache.h
4477     @@ -203,6 +203,7 @@ struct dentry_operations {
4478    
4479     #define DCACHE_CANT_MOUNT 0x0100
4480     #define DCACHE_GENOCIDE 0x0200
4481     +#define DCACHE_SHRINK_LIST 0x0400
4482    
4483     #define DCACHE_NFSFS_RENAMED 0x1000
4484     /* this dentry has been "silly renamed" and has to be deleted on the last
4485     diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h
4486     index b87068a..81572af 100644
4487     --- a/include/linux/memcontrol.h
4488     +++ b/include/linux/memcontrol.h
4489     @@ -119,6 +119,8 @@ struct zone_reclaim_stat*
4490     mem_cgroup_get_reclaim_stat_from_page(struct page *page);
4491     extern void mem_cgroup_print_oom_info(struct mem_cgroup *memcg,
4492     struct task_struct *p);
4493     +extern void mem_cgroup_replace_page_cache(struct page *oldpage,
4494     + struct page *newpage);
4495    
4496     #ifdef CONFIG_CGROUP_MEM_RES_CTLR_SWAP
4497     extern int do_swap_account;
4498     @@ -366,6 +368,10 @@ static inline
4499     void mem_cgroup_count_vm_event(struct mm_struct *mm, enum vm_event_item idx)
4500     {
4501     }
4502     +static inline void mem_cgroup_replace_page_cache(struct page *oldpage,
4503     + struct page *newpage)
4504     +{
4505     +}
4506     #endif /* CONFIG_CGROUP_MEM_CONT */
4507    
4508     #if !defined(CONFIG_CGROUP_MEM_RES_CTLR) || !defined(CONFIG_DEBUG_VM)
4509     diff --git a/include/linux/nfs_xdr.h b/include/linux/nfs_xdr.h
4510     index 2a7c533..6c898af 100644
4511     --- a/include/linux/nfs_xdr.h
4512     +++ b/include/linux/nfs_xdr.h
4513     @@ -602,11 +602,16 @@ struct nfs_getaclargs {
4514     size_t acl_len;
4515     unsigned int acl_pgbase;
4516     struct page ** acl_pages;
4517     + struct page * acl_scratch;
4518     struct nfs4_sequence_args seq_args;
4519     };
4520    
4521     +/* getxattr ACL interface flags */
4522     +#define NFS4_ACL_LEN_REQUEST 0x0001 /* zero length getxattr buffer */
4523     struct nfs_getaclres {
4524     size_t acl_len;
4525     + size_t acl_data_offset;
4526     + int acl_flags;
4527     struct nfs4_sequence_res seq_res;
4528     };
4529    
4530     diff --git a/include/linux/pci_regs.h b/include/linux/pci_regs.h
4531     index b5d9657..411c412 100644
4532     --- a/include/linux/pci_regs.h
4533     +++ b/include/linux/pci_regs.h
4534     @@ -392,7 +392,7 @@
4535     #define PCI_EXP_TYPE_DOWNSTREAM 0x6 /* Downstream Port */
4536     #define PCI_EXP_TYPE_PCI_BRIDGE 0x7 /* PCI/PCI-X Bridge */
4537     #define PCI_EXP_TYPE_RC_END 0x9 /* Root Complex Integrated Endpoint */
4538     -#define PCI_EXP_TYPE_RC_EC 0x10 /* Root Complex Event Collector */
4539     +#define PCI_EXP_TYPE_RC_EC 0xa /* Root Complex Event Collector */
4540     #define PCI_EXP_FLAGS_SLOT 0x0100 /* Slot implemented */
4541     #define PCI_EXP_FLAGS_IRQ 0x3e00 /* Interrupt message number */
4542     #define PCI_EXP_DEVCAP 4 /* Device capabilities */
4543     diff --git a/include/linux/shmem_fs.h b/include/linux/shmem_fs.h
4544     index 9291ac3..6f10c9c 100644
4545     --- a/include/linux/shmem_fs.h
4546     +++ b/include/linux/shmem_fs.h
4547     @@ -48,6 +48,7 @@ extern struct file *shmem_file_setup(const char *name,
4548     loff_t size, unsigned long flags);
4549     extern int shmem_zero_setup(struct vm_area_struct *);
4550     extern int shmem_lock(struct file *file, int lock, struct user_struct *user);
4551     +extern void shmem_unlock_mapping(struct address_space *mapping);
4552     extern struct page *shmem_read_mapping_page_gfp(struct address_space *mapping,
4553     pgoff_t index, gfp_t gfp_mask);
4554     extern void shmem_truncate_range(struct inode *inode, loff_t start, loff_t end);
4555     diff --git a/include/linux/sunrpc/svcsock.h b/include/linux/sunrpc/svcsock.h
4556     index 85c50b4..c84e974 100644
4557     --- a/include/linux/sunrpc/svcsock.h
4558     +++ b/include/linux/sunrpc/svcsock.h
4559     @@ -34,7 +34,7 @@ struct svc_sock {
4560     /*
4561     * Function prototypes.
4562     */
4563     -void svc_close_all(struct list_head *);
4564     +void svc_close_all(struct svc_serv *);
4565     int svc_recv(struct svc_rqst *, long);
4566     int svc_send(struct svc_rqst *);
4567     void svc_drop(struct svc_rqst *);
4568     diff --git a/include/linux/sunrpc/xdr.h b/include/linux/sunrpc/xdr.h
4569     index a20970e..af70af3 100644
4570     --- a/include/linux/sunrpc/xdr.h
4571     +++ b/include/linux/sunrpc/xdr.h
4572     @@ -191,6 +191,8 @@ extern int xdr_decode_array2(struct xdr_buf *buf, unsigned int base,
4573     struct xdr_array2_desc *desc);
4574     extern int xdr_encode_array2(struct xdr_buf *buf, unsigned int base,
4575     struct xdr_array2_desc *desc);
4576     +extern void _copy_from_pages(char *p, struct page **pages, size_t pgbase,
4577     + size_t len);
4578    
4579     /*
4580     * Provide some simple tools for XDR buffer overflow-checking etc.
4581     diff --git a/include/linux/swap.h b/include/linux/swap.h
4582     index 1e22e12..67b3fa3 100644
4583     --- a/include/linux/swap.h
4584     +++ b/include/linux/swap.h
4585     @@ -272,7 +272,7 @@ static inline int zone_reclaim(struct zone *z, gfp_t mask, unsigned int order)
4586     #endif
4587    
4588     extern int page_evictable(struct page *page, struct vm_area_struct *vma);
4589     -extern void scan_mapping_unevictable_pages(struct address_space *);
4590     +extern void check_move_unevictable_pages(struct page **, int nr_pages);
4591    
4592     extern unsigned long scan_unevictable_pages;
4593     extern int scan_unevictable_handler(struct ctl_table *, int,
4594     diff --git a/include/linux/videodev2.h b/include/linux/videodev2.h
4595     index 4b752d5..45a7698 100644
4596     --- a/include/linux/videodev2.h
4597     +++ b/include/linux/videodev2.h
4598     @@ -1131,6 +1131,7 @@ struct v4l2_querymenu {
4599     #define V4L2_CTRL_FLAG_NEXT_CTRL 0x80000000
4600    
4601     /* User-class control IDs defined by V4L2 */
4602     +#define V4L2_CID_MAX_CTRLS 1024
4603     #define V4L2_CID_BASE (V4L2_CTRL_CLASS_USER | 0x900)
4604     #define V4L2_CID_USER_BASE V4L2_CID_BASE
4605     /* IDs reserved for driver specific controls */
4606     diff --git a/include/media/tuner.h b/include/media/tuner.h
4607     index 89c290b..29e1920 100644
4608     --- a/include/media/tuner.h
4609     +++ b/include/media/tuner.h
4610     @@ -127,7 +127,6 @@
4611     #define TUNER_PHILIPS_FMD1216MEX_MK3 78
4612     #define TUNER_PHILIPS_FM1216MK5 79
4613     #define TUNER_PHILIPS_FQ1216LME_MK3 80 /* Active loopthrough, no FM */
4614     -#define TUNER_XC4000 81 /* Xceive Silicon Tuner */
4615    
4616     #define TUNER_PARTSNIC_PTI_5NF05 81
4617     #define TUNER_PHILIPS_CU1216L 82
4618     @@ -136,6 +135,8 @@
4619     #define TUNER_PHILIPS_FQ1236_MK5 85 /* NTSC, TDA9885, no FM radio */
4620     #define TUNER_TENA_TNF_5337 86
4621    
4622     +#define TUNER_XC4000 87 /* Xceive Silicon Tuner */
4623     +
4624     /* tv card specific */
4625     #define TDA9887_PRESENT (1<<0)
4626     #define TDA9887_PORT1_INACTIVE (1<<1)
4627     diff --git a/include/target/target_core_base.h b/include/target/target_core_base.h
4628     index 6873c7d..a79886c 100644
4629     --- a/include/target/target_core_base.h
4630     +++ b/include/target/target_core_base.h
4631     @@ -34,6 +34,7 @@
4632     #define TRANSPORT_SENSE_BUFFER SCSI_SENSE_BUFFERSIZE
4633     /* Used by transport_send_check_condition_and_sense() */
4634     #define SPC_SENSE_KEY_OFFSET 2
4635     +#define SPC_ADD_SENSE_LEN_OFFSET 7
4636     #define SPC_ASC_KEY_OFFSET 12
4637     #define SPC_ASCQ_KEY_OFFSET 13
4638     #define TRANSPORT_IQN_LEN 224
4639     diff --git a/include/xen/interface/io/xs_wire.h b/include/xen/interface/io/xs_wire.h
4640     index f6f07aa..7cdfca2 100644
4641     --- a/include/xen/interface/io/xs_wire.h
4642     +++ b/include/xen/interface/io/xs_wire.h
4643     @@ -87,4 +87,7 @@ struct xenstore_domain_interface {
4644     XENSTORE_RING_IDX rsp_cons, rsp_prod;
4645     };
4646    
4647     +/* Violating this is very bad. See docs/misc/xenstore.txt. */
4648     +#define XENSTORE_PAYLOAD_MAX 4096
4649     +
4650     #endif /* _XS_WIRE_H */
4651     diff --git a/init/do_mounts.c b/init/do_mounts.c
4652     index 0f6e1d9..db6e5ee 100644
4653     --- a/init/do_mounts.c
4654     +++ b/init/do_mounts.c
4655     @@ -398,15 +398,42 @@ out:
4656     }
4657    
4658     #ifdef CONFIG_ROOT_NFS
4659     +
4660     +#define NFSROOT_TIMEOUT_MIN 5
4661     +#define NFSROOT_TIMEOUT_MAX 30
4662     +#define NFSROOT_RETRY_MAX 5
4663     +
4664     static int __init mount_nfs_root(void)
4665     {
4666     char *root_dev, *root_data;
4667     + unsigned int timeout;
4668     + int try, err;
4669    
4670     - if (nfs_root_data(&root_dev, &root_data) != 0)
4671     - return 0;
4672     - if (do_mount_root(root_dev, "nfs", root_mountflags, root_data) != 0)
4673     + err = nfs_root_data(&root_dev, &root_data);
4674     + if (err != 0)
4675     return 0;
4676     - return 1;
4677     +
4678     + /*
4679     + * The server or network may not be ready, so try several
4680     + * times. Stop after a few tries in case the client wants
4681     + * to fall back to other boot methods.
4682     + */
4683     + timeout = NFSROOT_TIMEOUT_MIN;
4684     + for (try = 1; ; try++) {
4685     + err = do_mount_root(root_dev, "nfs",
4686     + root_mountflags, root_data);
4687     + if (err == 0)
4688     + return 1;
4689     + if (try > NFSROOT_RETRY_MAX)
4690     + break;
4691     +
4692     + /* Wait, in case the server refused us immediately */
4693     + ssleep(timeout);
4694     + timeout <<= 1;
4695     + if (timeout > NFSROOT_TIMEOUT_MAX)
4696     + timeout = NFSROOT_TIMEOUT_MAX;
4697     + }
4698     + return 0;
4699     }
4700     #endif
4701    
4702     diff --git a/ipc/shm.c b/ipc/shm.c
4703     index 02ecf2c..b76be5b 100644
4704     --- a/ipc/shm.c
4705     +++ b/ipc/shm.c
4706     @@ -870,9 +870,7 @@ SYSCALL_DEFINE3(shmctl, int, shmid, int, cmd, struct shmid_ds __user *, buf)
4707     case SHM_LOCK:
4708     case SHM_UNLOCK:
4709     {
4710     - struct file *uninitialized_var(shm_file);
4711     -
4712     - lru_add_drain_all(); /* drain pagevecs to lru lists */
4713     + struct file *shm_file;
4714    
4715     shp = shm_lock_check(ns, shmid);
4716     if (IS_ERR(shp)) {
4717     @@ -895,22 +893,31 @@ SYSCALL_DEFINE3(shmctl, int, shmid, int, cmd, struct shmid_ds __user *, buf)
4718     err = security_shm_shmctl(shp, cmd);
4719     if (err)
4720     goto out_unlock;
4721     -
4722     - if(cmd==SHM_LOCK) {
4723     +
4724     + shm_file = shp->shm_file;
4725     + if (is_file_hugepages(shm_file))
4726     + goto out_unlock;
4727     +
4728     + if (cmd == SHM_LOCK) {
4729     struct user_struct *user = current_user();
4730     - if (!is_file_hugepages(shp->shm_file)) {
4731     - err = shmem_lock(shp->shm_file, 1, user);
4732     - if (!err && !(shp->shm_perm.mode & SHM_LOCKED)){
4733     - shp->shm_perm.mode |= SHM_LOCKED;
4734     - shp->mlock_user = user;
4735     - }
4736     + err = shmem_lock(shm_file, 1, user);
4737     + if (!err && !(shp->shm_perm.mode & SHM_LOCKED)) {
4738     + shp->shm_perm.mode |= SHM_LOCKED;
4739     + shp->mlock_user = user;
4740     }
4741     - } else if (!is_file_hugepages(shp->shm_file)) {
4742     - shmem_lock(shp->shm_file, 0, shp->mlock_user);
4743     - shp->shm_perm.mode &= ~SHM_LOCKED;
4744     - shp->mlock_user = NULL;
4745     + goto out_unlock;
4746     }
4747     +
4748     + /* SHM_UNLOCK */
4749     + if (!(shp->shm_perm.mode & SHM_LOCKED))
4750     + goto out_unlock;
4751     + shmem_lock(shm_file, 0, shp->mlock_user);
4752     + shp->shm_perm.mode &= ~SHM_LOCKED;
4753     + shp->mlock_user = NULL;
4754     + get_file(shm_file);
4755     shm_unlock(shp);
4756     + shmem_unlock_mapping(shm_file->f_mapping);
4757     + fput(shm_file);
4758     goto out;
4759     }
4760     case IPC_RMID:
4761     diff --git a/kernel/kprobes.c b/kernel/kprobes.c
4762     index e5d8464..52fd049 100644
4763     --- a/kernel/kprobes.c
4764     +++ b/kernel/kprobes.c
4765     @@ -1077,6 +1077,7 @@ void __kprobes kprobe_flush_task(struct task_struct *tk)
4766     /* Early boot. kretprobe_table_locks not yet initialized. */
4767     return;
4768    
4769     + INIT_HLIST_HEAD(&empty_rp);
4770     hash = hash_ptr(tk, KPROBE_HASH_BITS);
4771     head = &kretprobe_inst_table[hash];
4772     kretprobe_table_lock(hash, &flags);
4773     @@ -1085,7 +1086,6 @@ void __kprobes kprobe_flush_task(struct task_struct *tk)
4774     recycle_rp_inst(ri, &empty_rp);
4775     }
4776     kretprobe_table_unlock(hash, &flags);
4777     - INIT_HLIST_HEAD(&empty_rp);
4778     hlist_for_each_entry_safe(ri, node, tmp, &empty_rp, hlist) {
4779     hlist_del(&ri->hlist);
4780     kfree(ri);
4781     diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
4782     index b1e8943..25b4f4d 100644
4783     --- a/kernel/trace/ftrace.c
4784     +++ b/kernel/trace/ftrace.c
4785     @@ -948,7 +948,7 @@ struct ftrace_func_probe {
4786     };
4787    
4788     enum {
4789     - FTRACE_ENABLE_CALLS = (1 << 0),
4790     + FTRACE_UPDATE_CALLS = (1 << 0),
4791     FTRACE_DISABLE_CALLS = (1 << 1),
4792     FTRACE_UPDATE_TRACE_FUNC = (1 << 2),
4793     FTRACE_START_FUNC_RET = (1 << 3),
4794     @@ -1519,7 +1519,7 @@ int ftrace_text_reserved(void *start, void *end)
4795    
4796    
4797     static int
4798     -__ftrace_replace_code(struct dyn_ftrace *rec, int enable)
4799     +__ftrace_replace_code(struct dyn_ftrace *rec, int update)
4800     {
4801     unsigned long ftrace_addr;
4802     unsigned long flag = 0UL;
4803     @@ -1527,17 +1527,17 @@ __ftrace_replace_code(struct dyn_ftrace *rec, int enable)
4804     ftrace_addr = (unsigned long)FTRACE_ADDR;
4805    
4806     /*
4807     - * If we are enabling tracing:
4808     + * If we are updating calls:
4809     *
4810     * If the record has a ref count, then we need to enable it
4811     * because someone is using it.
4812     *
4813     * Otherwise we make sure its disabled.
4814     *
4815     - * If we are disabling tracing, then disable all records that
4816     + * If we are disabling calls, then disable all records that
4817     * are enabled.
4818     */
4819     - if (enable && (rec->flags & ~FTRACE_FL_MASK))
4820     + if (update && (rec->flags & ~FTRACE_FL_MASK))
4821     flag = FTRACE_FL_ENABLED;
4822    
4823     /* If the state of this record hasn't changed, then do nothing */
4824     @@ -1553,7 +1553,7 @@ __ftrace_replace_code(struct dyn_ftrace *rec, int enable)
4825     return ftrace_make_nop(NULL, rec, ftrace_addr);
4826     }
4827    
4828     -static void ftrace_replace_code(int enable)
4829     +static void ftrace_replace_code(int update)
4830     {
4831     struct dyn_ftrace *rec;
4832     struct ftrace_page *pg;
4833     @@ -1567,7 +1567,7 @@ static void ftrace_replace_code(int enable)
4834     if (rec->flags & FTRACE_FL_FREE)
4835     continue;
4836    
4837     - failed = __ftrace_replace_code(rec, enable);
4838     + failed = __ftrace_replace_code(rec, update);
4839     if (failed) {
4840     ftrace_bug(failed, rec->ip);
4841     /* Stop processing */
4842     @@ -1623,7 +1623,7 @@ static int __ftrace_modify_code(void *data)
4843     */
4844     function_trace_stop++;
4845    
4846     - if (*command & FTRACE_ENABLE_CALLS)
4847     + if (*command & FTRACE_UPDATE_CALLS)
4848     ftrace_replace_code(1);
4849     else if (*command & FTRACE_DISABLE_CALLS)
4850     ftrace_replace_code(0);
4851     @@ -1691,7 +1691,7 @@ static int ftrace_startup(struct ftrace_ops *ops, int command)
4852     return -ENODEV;
4853    
4854     ftrace_start_up++;
4855     - command |= FTRACE_ENABLE_CALLS;
4856     + command |= FTRACE_UPDATE_CALLS;
4857    
4858     /* ops marked global share the filter hashes */
4859     if (ops->flags & FTRACE_OPS_FL_GLOBAL) {
4860     @@ -1743,8 +1743,7 @@ static void ftrace_shutdown(struct ftrace_ops *ops, int command)
4861     if (ops != &global_ops || !global_start_up)
4862     ops->flags &= ~FTRACE_OPS_FL_ENABLED;
4863    
4864     - if (!ftrace_start_up)
4865     - command |= FTRACE_DISABLE_CALLS;
4866     + command |= FTRACE_UPDATE_CALLS;
4867    
4868     if (saved_ftrace_func != ftrace_trace_function) {
4869     saved_ftrace_func = ftrace_trace_function;
4870     @@ -1766,7 +1765,7 @@ static void ftrace_startup_sysctl(void)
4871     saved_ftrace_func = NULL;
4872     /* ftrace_start_up is true if we want ftrace running */
4873     if (ftrace_start_up)
4874     - ftrace_run_update_code(FTRACE_ENABLE_CALLS);
4875     + ftrace_run_update_code(FTRACE_UPDATE_CALLS);
4876     }
4877    
4878     static void ftrace_shutdown_sysctl(void)
4879     @@ -2919,7 +2918,7 @@ ftrace_set_regex(struct ftrace_ops *ops, unsigned char *buf, int len,
4880     ret = ftrace_hash_move(ops, enable, orig_hash, hash);
4881     if (!ret && ops->flags & FTRACE_OPS_FL_ENABLED
4882     && ftrace_enabled)
4883     - ftrace_run_update_code(FTRACE_ENABLE_CALLS);
4884     + ftrace_run_update_code(FTRACE_UPDATE_CALLS);
4885    
4886     mutex_unlock(&ftrace_lock);
4887    
4888     @@ -3107,7 +3106,7 @@ ftrace_regex_release(struct inode *inode, struct file *file)
4889     orig_hash, iter->hash);
4890     if (!ret && (iter->ops->flags & FTRACE_OPS_FL_ENABLED)
4891     && ftrace_enabled)
4892     - ftrace_run_update_code(FTRACE_ENABLE_CALLS);
4893     + ftrace_run_update_code(FTRACE_UPDATE_CALLS);
4894    
4895     mutex_unlock(&ftrace_lock);
4896     }
4897     diff --git a/kernel/tracepoint.c b/kernel/tracepoint.c
4898     index db110b8..f1539de 100644
4899     --- a/kernel/tracepoint.c
4900     +++ b/kernel/tracepoint.c
4901     @@ -634,10 +634,11 @@ static int tracepoint_module_coming(struct module *mod)
4902     int ret = 0;
4903    
4904     /*
4905     - * We skip modules that tain the kernel, especially those with different
4906     - * module header (for forced load), to make sure we don't cause a crash.
4907     + * We skip modules that taint the kernel, especially those with different
4908     + * module headers (for forced load), to make sure we don't cause a crash.
4909     + * Staging and out-of-tree GPL modules are fine.
4910     */
4911     - if (mod->taints)
4912     + if (mod->taints & ~((1 << TAINT_OOT_MODULE) | (1 << TAINT_CRAP)))
4913     return 0;
4914     mutex_lock(&tracepoints_mutex);
4915     tp_mod = kmalloc(sizeof(struct tp_module), GFP_KERNEL);
4916     diff --git a/mm/filemap.c b/mm/filemap.c
4917     index 5f0a3c9..90286a4 100644
4918     --- a/mm/filemap.c
4919     +++ b/mm/filemap.c
4920     @@ -393,24 +393,11 @@ EXPORT_SYMBOL(filemap_write_and_wait_range);
4921     int replace_page_cache_page(struct page *old, struct page *new, gfp_t gfp_mask)
4922     {
4923     int error;
4924     - struct mem_cgroup *memcg = NULL;
4925    
4926     VM_BUG_ON(!PageLocked(old));
4927     VM_BUG_ON(!PageLocked(new));
4928     VM_BUG_ON(new->mapping);
4929    
4930     - /*
4931     - * This is not page migration, but prepare_migration and
4932     - * end_migration does enough work for charge replacement.
4933     - *
4934     - * In the longer term we probably want a specialized function
4935     - * for moving the charge from old to new in a more efficient
4936     - * manner.
4937     - */
4938     - error = mem_cgroup_prepare_migration(old, new, &memcg, gfp_mask);
4939     - if (error)
4940     - return error;
4941     -
4942     error = radix_tree_preload(gfp_mask & ~__GFP_HIGHMEM);
4943     if (!error) {
4944     struct address_space *mapping = old->mapping;
4945     @@ -432,13 +419,12 @@ int replace_page_cache_page(struct page *old, struct page *new, gfp_t gfp_mask)
4946     if (PageSwapBacked(new))
4947     __inc_zone_page_state(new, NR_SHMEM);
4948     spin_unlock_irq(&mapping->tree_lock);
4949     + /* mem_cgroup codes must not be called under tree_lock */
4950     + mem_cgroup_replace_page_cache(old, new);
4951     radix_tree_preload_end();
4952     if (freepage)
4953     freepage(old);
4954     page_cache_release(old);
4955     - mem_cgroup_end_migration(memcg, old, new, true);
4956     - } else {
4957     - mem_cgroup_end_migration(memcg, old, new, false);
4958     }
4959    
4960     return error;
4961     diff --git a/mm/memcontrol.c b/mm/memcontrol.c
4962     index b63f5f7..f538e9b 100644
4963     --- a/mm/memcontrol.c
4964     +++ b/mm/memcontrol.c
4965     @@ -3366,6 +3366,50 @@ void mem_cgroup_end_migration(struct mem_cgroup *memcg,
4966     cgroup_release_and_wakeup_rmdir(&memcg->css);
4967     }
4968    
4969     +/*
4970     + * At replace page cache, newpage is not under any memcg but it's on
4971     + * LRU. So, this function doesn't touch res_counter but handles LRU
4972     + * in correct way. Both pages are locked so we cannot race with uncharge.
4973     + */
4974     +void mem_cgroup_replace_page_cache(struct page *oldpage,
4975     + struct page *newpage)
4976     +{
4977     + struct mem_cgroup *memcg;
4978     + struct page_cgroup *pc;
4979     + struct zone *zone;
4980     + enum charge_type type = MEM_CGROUP_CHARGE_TYPE_CACHE;
4981     + unsigned long flags;
4982     +
4983     + if (mem_cgroup_disabled())
4984     + return;
4985     +
4986     + pc = lookup_page_cgroup(oldpage);
4987     + /* fix accounting on old pages */
4988     + lock_page_cgroup(pc);
4989     + memcg = pc->mem_cgroup;
4990     + mem_cgroup_charge_statistics(memcg, PageCgroupCache(pc), -1);
4991     + ClearPageCgroupUsed(pc);
4992     + unlock_page_cgroup(pc);
4993     +
4994     + if (PageSwapBacked(oldpage))
4995     + type = MEM_CGROUP_CHARGE_TYPE_SHMEM;
4996     +
4997     + zone = page_zone(newpage);
4998     + pc = lookup_page_cgroup(newpage);
4999     + /*
5000     + * Even if newpage->mapping was NULL before starting replacement,
5001     + * the newpage may be on LRU(or pagevec for LRU) already. We lock
5002     + * LRU while we overwrite pc->mem_cgroup.
5003     + */
5004     + spin_lock_irqsave(&zone->lru_lock, flags);
5005     + if (PageLRU(newpage))
5006     + del_page_from_lru_list(zone, newpage, page_lru(newpage));
5007     + __mem_cgroup_commit_charge(memcg, newpage, 1, pc, type);
5008     + if (PageLRU(newpage))
5009     + add_page_to_lru_list(zone, newpage, page_lru(newpage));
5010     + spin_unlock_irqrestore(&zone->lru_lock, flags);
5011     +}
5012     +
5013     #ifdef CONFIG_DEBUG_VM
5014     static struct page_cgroup *lookup_page_cgroup_used(struct page *page)
5015     {
5016     diff --git a/mm/page_alloc.c b/mm/page_alloc.c
5017     index 2b8ba3a..485be89 100644
5018     --- a/mm/page_alloc.c
5019     +++ b/mm/page_alloc.c
5020     @@ -5608,6 +5608,17 @@ __count_immobile_pages(struct zone *zone, struct page *page, int count)
5021     bool is_pageblock_removable_nolock(struct page *page)
5022     {
5023     struct zone *zone = page_zone(page);
5024     + unsigned long pfn = page_to_pfn(page);
5025     +
5026     + /*
5027     + * We have to be careful here because we are iterating over memory
5028     + * sections which are not zone aware so we might end up outside of
5029     + * the zone but still within the section.
5030     + */
5031     + if (!zone || zone->zone_start_pfn > pfn ||
5032     + zone->zone_start_pfn + zone->spanned_pages <= pfn)
5033     + return false;
5034     +
5035     return __count_immobile_pages(zone, page, 0);
5036     }
5037    
5038     diff --git a/mm/shmem.c b/mm/shmem.c
5039     index d672250..6c253f7 100644
5040     --- a/mm/shmem.c
5041     +++ b/mm/shmem.c
5042     @@ -379,7 +379,7 @@ static int shmem_free_swap(struct address_space *mapping,
5043     /*
5044     * Pagevec may contain swap entries, so shuffle up pages before releasing.
5045     */
5046     -static void shmem_pagevec_release(struct pagevec *pvec)
5047     +static void shmem_deswap_pagevec(struct pagevec *pvec)
5048     {
5049     int i, j;
5050    
5051     @@ -389,7 +389,36 @@ static void shmem_pagevec_release(struct pagevec *pvec)
5052     pvec->pages[j++] = page;
5053     }
5054     pvec->nr = j;
5055     - pagevec_release(pvec);
5056     +}
5057     +
5058     +/*
5059     + * SysV IPC SHM_UNLOCK restore Unevictable pages to their evictable lists.
5060     + */
5061     +void shmem_unlock_mapping(struct address_space *mapping)
5062     +{
5063     + struct pagevec pvec;
5064     + pgoff_t indices[PAGEVEC_SIZE];
5065     + pgoff_t index = 0;
5066     +
5067     + pagevec_init(&pvec, 0);
5068     + /*
5069     + * Minor point, but we might as well stop if someone else SHM_LOCKs it.
5070     + */
5071     + while (!mapping_unevictable(mapping)) {
5072     + /*
5073     + * Avoid pagevec_lookup(): find_get_pages() returns 0 as if it
5074     + * has finished, if it hits a row of PAGEVEC_SIZE swap entries.
5075     + */
5076     + pvec.nr = shmem_find_get_pages_and_swap(mapping, index,
5077     + PAGEVEC_SIZE, pvec.pages, indices);
5078     + if (!pvec.nr)
5079     + break;
5080     + index = indices[pvec.nr - 1] + 1;
5081     + shmem_deswap_pagevec(&pvec);
5082     + check_move_unevictable_pages(pvec.pages, pvec.nr);
5083     + pagevec_release(&pvec);
5084     + cond_resched();
5085     + }
5086     }
5087    
5088     /*
5089     @@ -440,7 +469,8 @@ void shmem_truncate_range(struct inode *inode, loff_t lstart, loff_t lend)
5090     }
5091     unlock_page(page);
5092     }
5093     - shmem_pagevec_release(&pvec);
5094     + shmem_deswap_pagevec(&pvec);
5095     + pagevec_release(&pvec);
5096     mem_cgroup_uncharge_end();
5097     cond_resched();
5098     index++;
5099     @@ -470,7 +500,8 @@ void shmem_truncate_range(struct inode *inode, loff_t lstart, loff_t lend)
5100     continue;
5101     }
5102     if (index == start && indices[0] > end) {
5103     - shmem_pagevec_release(&pvec);
5104     + shmem_deswap_pagevec(&pvec);
5105     + pagevec_release(&pvec);
5106     break;
5107     }
5108     mem_cgroup_uncharge_start();
5109     @@ -494,7 +525,8 @@ void shmem_truncate_range(struct inode *inode, loff_t lstart, loff_t lend)
5110     }
5111     unlock_page(page);
5112     }
5113     - shmem_pagevec_release(&pvec);
5114     + shmem_deswap_pagevec(&pvec);
5115     + pagevec_release(&pvec);
5116     mem_cgroup_uncharge_end();
5117     index++;
5118     }
5119     @@ -1068,13 +1100,6 @@ int shmem_lock(struct file *file, int lock, struct user_struct *user)
5120     user_shm_unlock(inode->i_size, user);
5121     info->flags &= ~VM_LOCKED;
5122     mapping_clear_unevictable(file->f_mapping);
5123     - /*
5124     - * Ensure that a racing putback_lru_page() can see
5125     - * the pages of this mapping are evictable when we
5126     - * skip them due to !PageLRU during the scan.
5127     - */
5128     - smp_mb__after_clear_bit();
5129     - scan_mapping_unevictable_pages(file->f_mapping);
5130     }
5131     retval = 0;
5132    
5133     @@ -2446,6 +2471,10 @@ int shmem_lock(struct file *file, int lock, struct user_struct *user)
5134     return 0;
5135     }
5136    
5137     +void shmem_unlock_mapping(struct address_space *mapping)
5138     +{
5139     +}
5140     +
5141     void shmem_truncate_range(struct inode *inode, loff_t lstart, loff_t lend)
5142     {
5143     truncate_inode_pages_range(inode->i_mapping, lstart, lend);
5144     diff --git a/mm/slub.c b/mm/slub.c
5145     index ed3334d..1a919f0 100644
5146     --- a/mm/slub.c
5147     +++ b/mm/slub.c
5148     @@ -2166,6 +2166,11 @@ redo:
5149     goto new_slab;
5150     }
5151    
5152     + /* must check again c->freelist in case of cpu migration or IRQ */
5153     + object = c->freelist;
5154     + if (object)
5155     + goto load_freelist;
5156     +
5157     stat(s, ALLOC_SLOWPATH);
5158    
5159     do {
5160     diff --git a/mm/vmscan.c b/mm/vmscan.c
5161     index f54a05b..cb33d9c 100644
5162     --- a/mm/vmscan.c
5163     +++ b/mm/vmscan.c
5164     @@ -636,7 +636,7 @@ redo:
5165     * When racing with an mlock or AS_UNEVICTABLE clearing
5166     * (page is unlocked) make sure that if the other thread
5167     * does not observe our setting of PG_lru and fails
5168     - * isolation/check_move_unevictable_page,
5169     + * isolation/check_move_unevictable_pages,
5170     * we see PG_mlocked/AS_UNEVICTABLE cleared below and move
5171     * the page back to the evictable list.
5172     *
5173     @@ -3353,97 +3353,59 @@ int page_evictable(struct page *page, struct vm_area_struct *vma)
5174     return 1;
5175     }
5176    
5177     +#ifdef CONFIG_SHMEM
5178     /**
5179     - * check_move_unevictable_page - check page for evictability and move to appropriate zone lru list
5180     - * @page: page to check evictability and move to appropriate lru list
5181     - * @zone: zone page is in
5182     + * check_move_unevictable_pages - check pages for evictability and move to appropriate zone lru list
5183     + * @pages: array of pages to check
5184     + * @nr_pages: number of pages to check
5185     *
5186     - * Checks a page for evictability and moves the page to the appropriate
5187     - * zone lru list.
5188     + * Checks pages for evictability and moves them to the appropriate lru list.
5189     *
5190     - * Restrictions: zone->lru_lock must be held, page must be on LRU and must
5191     - * have PageUnevictable set.
5192     + * This function is only used for SysV IPC SHM_UNLOCK.
5193     */
5194     -static void check_move_unevictable_page(struct page *page, struct zone *zone)
5195     +void check_move_unevictable_pages(struct page **pages, int nr_pages)
5196     {
5197     - VM_BUG_ON(PageActive(page));
5198     -
5199     -retry:
5200     - ClearPageUnevictable(page);
5201     - if (page_evictable(page, NULL)) {
5202     - enum lru_list l = page_lru_base_type(page);
5203     + struct zone *zone = NULL;
5204     + int pgscanned = 0;
5205     + int pgrescued = 0;
5206     + int i;
5207    
5208     - __dec_zone_state(zone, NR_UNEVICTABLE);
5209     - list_move(&page->lru, &zone->lru[l].list);
5210     - mem_cgroup_move_lists(page, LRU_UNEVICTABLE, l);
5211     - __inc_zone_state(zone, NR_INACTIVE_ANON + l);
5212     - __count_vm_event(UNEVICTABLE_PGRESCUED);
5213     - } else {
5214     - /*
5215     - * rotate unevictable list
5216     - */
5217     - SetPageUnevictable(page);
5218     - list_move(&page->lru, &zone->lru[LRU_UNEVICTABLE].list);
5219     - mem_cgroup_rotate_lru_list(page, LRU_UNEVICTABLE);
5220     - if (page_evictable(page, NULL))
5221     - goto retry;
5222     - }
5223     -}
5224     + for (i = 0; i < nr_pages; i++) {
5225     + struct page *page = pages[i];
5226     + struct zone *pagezone;
5227    
5228     -/**
5229     - * scan_mapping_unevictable_pages - scan an address space for evictable pages
5230     - * @mapping: struct address_space to scan for evictable pages
5231     - *
5232     - * Scan all pages in mapping. Check unevictable pages for
5233     - * evictability and move them to the appropriate zone lru list.
5234     - */
5235     -void scan_mapping_unevictable_pages(struct address_space *mapping)
5236     -{
5237     - pgoff_t next = 0;
5238     - pgoff_t end = (i_size_read(mapping->host) + PAGE_CACHE_SIZE - 1) >>
5239     - PAGE_CACHE_SHIFT;
5240     - struct zone *zone;
5241     - struct pagevec pvec;
5242     + pgscanned++;
5243     + pagezone = page_zone(page);
5244     + if (pagezone != zone) {
5245     + if (zone)
5246     + spin_unlock_irq(&zone->lru_lock);
5247     + zone = pagezone;
5248     + spin_lock_irq(&zone->lru_lock);
5249     + }
5250    
5251     - if (mapping->nrpages == 0)
5252     - return;
5253     + if (!PageLRU(page) || !PageUnevictable(page))
5254     + continue;
5255    
5256     - pagevec_init(&pvec, 0);
5257     - while (next < end &&
5258     - pagevec_lookup(&pvec, mapping, next, PAGEVEC_SIZE)) {
5259     - int i;
5260     - int pg_scanned = 0;
5261     -
5262     - zone = NULL;
5263     -
5264     - for (i = 0; i < pagevec_count(&pvec); i++) {
5265     - struct page *page = pvec.pages[i];
5266     - pgoff_t page_index = page->index;
5267     - struct zone *pagezone = page_zone(page);
5268     -
5269     - pg_scanned++;
5270     - if (page_index > next)
5271     - next = page_index;
5272     - next++;
5273     -
5274     - if (pagezone != zone) {
5275     - if (zone)
5276     - spin_unlock_irq(&zone->lru_lock);
5277     - zone = pagezone;
5278     - spin_lock_irq(&zone->lru_lock);
5279     - }
5280     + if (page_evictable(page, NULL)) {
5281     + enum lru_list lru = page_lru_base_type(page);
5282    
5283     - if (PageLRU(page) && PageUnevictable(page))
5284     - check_move_unevictable_page(page, zone);
5285     + VM_BUG_ON(PageActive(page));
5286     + ClearPageUnevictable(page);
5287     + __dec_zone_state(zone, NR_UNEVICTABLE);
5288     + list_move(&page->lru, &zone->lru[lru].list);
5289     + mem_cgroup_move_lists(page, LRU_UNEVICTABLE, lru);
5290     + __inc_zone_state(zone, NR_INACTIVE_ANON + lru);
5291     + pgrescued++;
5292     }
5293     - if (zone)
5294     - spin_unlock_irq(&zone->lru_lock);
5295     - pagevec_release(&pvec);
5296     -
5297     - count_vm_events(UNEVICTABLE_PGSCANNED, pg_scanned);
5298     }
5299    
5300     + if (zone) {
5301     + __count_vm_events(UNEVICTABLE_PGRESCUED, pgrescued);
5302     + __count_vm_events(UNEVICTABLE_PGSCANNED, pgscanned);
5303     + spin_unlock_irq(&zone->lru_lock);
5304     + }
5305     }
5306     +#endif /* CONFIG_SHMEM */
5307    
5308     static void warn_scan_unevictable_pages(void)
5309     {
5310     diff --git a/net/mac80211/ieee80211_i.h b/net/mac80211/ieee80211_i.h
5311     index ea10a51..73495f1 100644
5312     --- a/net/mac80211/ieee80211_i.h
5313     +++ b/net/mac80211/ieee80211_i.h
5314     @@ -702,6 +702,8 @@ struct tpt_led_trigger {
5315     * well be on the operating channel
5316     * @SCAN_HW_SCANNING: The hardware is scanning for us, we have no way to
5317     * determine if we are on the operating channel or not
5318     + * @SCAN_OFF_CHANNEL: We're off our operating channel for scanning,
5319     + * gets only set in conjunction with SCAN_SW_SCANNING
5320     * @SCAN_COMPLETED: Set for our scan work function when the driver reported
5321     * that the scan completed.
5322     * @SCAN_ABORTED: Set for our scan work function when the driver reported
5323     @@ -710,6 +712,7 @@ struct tpt_led_trigger {
5324     enum {
5325     SCAN_SW_SCANNING,
5326     SCAN_HW_SCANNING,
5327     + SCAN_OFF_CHANNEL,
5328     SCAN_COMPLETED,
5329     SCAN_ABORTED,
5330     };
5331     @@ -1140,14 +1143,10 @@ int ieee80211_request_sched_scan_stop(struct ieee80211_sub_if_data *sdata);
5332     void ieee80211_sched_scan_stopped_work(struct work_struct *work);
5333    
5334     /* off-channel helpers */
5335     -bool ieee80211_cfg_on_oper_channel(struct ieee80211_local *local);
5336     -void ieee80211_offchannel_enable_all_ps(struct ieee80211_local *local,
5337     - bool tell_ap);
5338     -void ieee80211_offchannel_stop_vifs(struct ieee80211_local *local,
5339     - bool offchannel_ps_enable);
5340     +void ieee80211_offchannel_stop_beaconing(struct ieee80211_local *local);
5341     +void ieee80211_offchannel_stop_station(struct ieee80211_local *local);
5342     void ieee80211_offchannel_return(struct ieee80211_local *local,
5343     - bool enable_beaconing,
5344     - bool offchannel_ps_disable);
5345     + bool enable_beaconing);
5346     void ieee80211_hw_roc_setup(struct ieee80211_local *local);
5347    
5348     /* interface handling */
5349     diff --git a/net/mac80211/main.c b/net/mac80211/main.c
5350     index cae4435..a7536fd 100644
5351     --- a/net/mac80211/main.c
5352     +++ b/net/mac80211/main.c
5353     @@ -92,47 +92,6 @@ static void ieee80211_reconfig_filter(struct work_struct *work)
5354     ieee80211_configure_filter(local);
5355     }
5356    
5357     -/*
5358     - * Returns true if we are logically configured to be on
5359     - * the operating channel AND the hardware-conf is currently
5360     - * configured on the operating channel. Compares channel-type
5361     - * as well.
5362     - */
5363     -bool ieee80211_cfg_on_oper_channel(struct ieee80211_local *local)
5364     -{
5365     - struct ieee80211_channel *chan, *scan_chan;
5366     - enum nl80211_channel_type channel_type;
5367     -
5368     - /* This logic needs to match logic in ieee80211_hw_config */
5369     - if (local->scan_channel) {
5370     - chan = local->scan_channel;
5371     - /* If scanning on oper channel, use whatever channel-type
5372     - * is currently in use.
5373     - */
5374     - if (chan == local->oper_channel)
5375     - channel_type = local->_oper_channel_type;
5376     - else
5377     - channel_type = NL80211_CHAN_NO_HT;
5378     - } else if (local->tmp_channel) {
5379     - chan = scan_chan = local->tmp_channel;
5380     - channel_type = local->tmp_channel_type;
5381     - } else {
5382     - chan = local->oper_channel;
5383     - channel_type = local->_oper_channel_type;
5384     - }
5385     -
5386     - if (chan != local->oper_channel ||
5387     - channel_type != local->_oper_channel_type)
5388     - return false;
5389     -
5390     - /* Check current hardware-config against oper_channel. */
5391     - if ((local->oper_channel != local->hw.conf.channel) ||
5392     - (local->_oper_channel_type != local->hw.conf.channel_type))
5393     - return false;
5394     -
5395     - return true;
5396     -}
5397     -
5398     int ieee80211_hw_config(struct ieee80211_local *local, u32 changed)
5399     {
5400     struct ieee80211_channel *chan, *scan_chan;
5401     @@ -145,9 +104,6 @@ int ieee80211_hw_config(struct ieee80211_local *local, u32 changed)
5402    
5403     scan_chan = local->scan_channel;
5404    
5405     - /* If this off-channel logic ever changes, ieee80211_on_oper_channel
5406     - * may need to change as well.
5407     - */
5408     offchannel_flag = local->hw.conf.flags & IEEE80211_CONF_OFFCHANNEL;
5409     if (scan_chan) {
5410     chan = scan_chan;
5411     @@ -158,19 +114,17 @@ int ieee80211_hw_config(struct ieee80211_local *local, u32 changed)
5412     channel_type = local->_oper_channel_type;
5413     else
5414     channel_type = NL80211_CHAN_NO_HT;
5415     - } else if (local->tmp_channel) {
5416     + local->hw.conf.flags |= IEEE80211_CONF_OFFCHANNEL;
5417     + } else if (local->tmp_channel &&
5418     + local->oper_channel != local->tmp_channel) {
5419     chan = scan_chan = local->tmp_channel;
5420     channel_type = local->tmp_channel_type;
5421     + local->hw.conf.flags |= IEEE80211_CONF_OFFCHANNEL;
5422     } else {
5423     chan = local->oper_channel;
5424     channel_type = local->_oper_channel_type;
5425     - }
5426     -
5427     - if (chan != local->oper_channel ||
5428     - channel_type != local->_oper_channel_type)
5429     - local->hw.conf.flags |= IEEE80211_CONF_OFFCHANNEL;
5430     - else
5431     local->hw.conf.flags &= ~IEEE80211_CONF_OFFCHANNEL;
5432     + }
5433    
5434     offchannel_flag ^= local->hw.conf.flags & IEEE80211_CONF_OFFCHANNEL;
5435    
5436     @@ -279,7 +233,7 @@ void ieee80211_bss_info_change_notify(struct ieee80211_sub_if_data *sdata,
5437    
5438     if (changed & BSS_CHANGED_BEACON_ENABLED) {
5439     if (local->quiescing || !ieee80211_sdata_running(sdata) ||
5440     - test_bit(SDATA_STATE_OFFCHANNEL, &sdata->state)) {
5441     + test_bit(SCAN_SW_SCANNING, &local->scanning)) {
5442     sdata->vif.bss_conf.enable_beacon = false;
5443     } else {
5444     /*
5445     diff --git a/net/mac80211/offchannel.c b/net/mac80211/offchannel.c
5446     index 3d41441..1b239be 100644
5447     --- a/net/mac80211/offchannel.c
5448     +++ b/net/mac80211/offchannel.c
5449     @@ -18,14 +18,10 @@
5450     #include "driver-trace.h"
5451    
5452     /*
5453     - * Tell our hardware to disable PS.
5454     - * Optionally inform AP that we will go to sleep so that it will buffer
5455     - * the frames while we are doing off-channel work. This is optional
5456     - * because we *may* be doing work on-operating channel, and want our
5457     - * hardware unconditionally awake, but still let the AP send us normal frames.
5458     + * inform AP that we will go to sleep so that it will buffer the frames
5459     + * while we scan
5460     */
5461     -static void ieee80211_offchannel_ps_enable(struct ieee80211_sub_if_data *sdata,
5462     - bool tell_ap)
5463     +static void ieee80211_offchannel_ps_enable(struct ieee80211_sub_if_data *sdata)
5464     {
5465     struct ieee80211_local *local = sdata->local;
5466     struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
5467     @@ -46,8 +42,8 @@ static void ieee80211_offchannel_ps_enable(struct ieee80211_sub_if_data *sdata,
5468     ieee80211_hw_config(local, IEEE80211_CONF_CHANGE_PS);
5469     }
5470    
5471     - if (tell_ap && (!local->offchannel_ps_enabled ||
5472     - !(local->hw.flags & IEEE80211_HW_PS_NULLFUNC_STACK)))
5473     + if (!(local->offchannel_ps_enabled) ||
5474     + !(local->hw.flags & IEEE80211_HW_PS_NULLFUNC_STACK))
5475     /*
5476     * If power save was enabled, no need to send a nullfunc
5477     * frame because AP knows that we are sleeping. But if the
5478     @@ -82,9 +78,6 @@ static void ieee80211_offchannel_ps_disable(struct ieee80211_sub_if_data *sdata)
5479     * we are sleeping, let's just enable power save mode in
5480     * hardware.
5481     */
5482     - /* TODO: Only set hardware if CONF_PS changed?
5483     - * TODO: Should we set offchannel_ps_enabled to false?
5484     - */
5485     local->hw.conf.flags |= IEEE80211_CONF_PS;
5486     ieee80211_hw_config(local, IEEE80211_CONF_CHANGE_PS);
5487     } else if (local->hw.conf.dynamic_ps_timeout > 0) {
5488     @@ -103,61 +96,63 @@ static void ieee80211_offchannel_ps_disable(struct ieee80211_sub_if_data *sdata)
5489     ieee80211_sta_reset_conn_monitor(sdata);
5490     }
5491    
5492     -void ieee80211_offchannel_stop_vifs(struct ieee80211_local *local,
5493     - bool offchannel_ps_enable)
5494     +void ieee80211_offchannel_stop_beaconing(struct ieee80211_local *local)
5495     {
5496     struct ieee80211_sub_if_data *sdata;
5497    
5498     - /*
5499     - * notify the AP about us leaving the channel and stop all
5500     - * STA interfaces.
5501     - */
5502     mutex_lock(&local->iflist_mtx);
5503     list_for_each_entry(sdata, &local->interfaces, list) {
5504     if (!ieee80211_sdata_running(sdata))
5505     continue;
5506    
5507     - if (sdata->vif.type != NL80211_IFTYPE_MONITOR)
5508     - set_bit(SDATA_STATE_OFFCHANNEL, &sdata->state);
5509     -
5510     - /* Check to see if we should disable beaconing. */
5511     + /* disable beaconing */
5512     if (sdata->vif.type == NL80211_IFTYPE_AP ||
5513     sdata->vif.type == NL80211_IFTYPE_ADHOC ||
5514     sdata->vif.type == NL80211_IFTYPE_MESH_POINT)
5515     ieee80211_bss_info_change_notify(
5516     sdata, BSS_CHANGED_BEACON_ENABLED);
5517    
5518     - if (sdata->vif.type != NL80211_IFTYPE_MONITOR) {
5519     + /*
5520     + * only handle non-STA interfaces here, STA interfaces
5521     + * are handled in ieee80211_offchannel_stop_station(),
5522     + * e.g., from the background scan state machine.
5523     + *
5524     + * In addition, do not stop monitor interface to allow it to be
5525     + * used from user space controlled off-channel operations.
5526     + */
5527     + if (sdata->vif.type != NL80211_IFTYPE_STATION &&
5528     + sdata->vif.type != NL80211_IFTYPE_MONITOR) {
5529     + set_bit(SDATA_STATE_OFFCHANNEL, &sdata->state);
5530     netif_tx_stop_all_queues(sdata->dev);
5531     - if (offchannel_ps_enable &&
5532     - (sdata->vif.type == NL80211_IFTYPE_STATION) &&
5533     - sdata->u.mgd.associated)
5534     - ieee80211_offchannel_ps_enable(sdata, true);
5535     }
5536     }
5537     mutex_unlock(&local->iflist_mtx);
5538     }
5539    
5540     -void ieee80211_offchannel_enable_all_ps(struct ieee80211_local *local,
5541     - bool tell_ap)
5542     +void ieee80211_offchannel_stop_station(struct ieee80211_local *local)
5543     {
5544     struct ieee80211_sub_if_data *sdata;
5545    
5546     + /*
5547     + * notify the AP about us leaving the channel and stop all STA interfaces
5548     + */
5549     mutex_lock(&local->iflist_mtx);
5550     list_for_each_entry(sdata, &local->interfaces, list) {
5551     if (!ieee80211_sdata_running(sdata))
5552     continue;
5553    
5554     - if (sdata->vif.type == NL80211_IFTYPE_STATION &&
5555     - sdata->u.mgd.associated)
5556     - ieee80211_offchannel_ps_enable(sdata, tell_ap);
5557     + if (sdata->vif.type == NL80211_IFTYPE_STATION) {
5558     + set_bit(SDATA_STATE_OFFCHANNEL, &sdata->state);
5559     + netif_tx_stop_all_queues(sdata->dev);
5560     + if (sdata->u.mgd.associated)
5561     + ieee80211_offchannel_ps_enable(sdata);
5562     + }
5563     }
5564     mutex_unlock(&local->iflist_mtx);
5565     }
5566    
5567     void ieee80211_offchannel_return(struct ieee80211_local *local,
5568     - bool enable_beaconing,
5569     - bool offchannel_ps_disable)
5570     + bool enable_beaconing)
5571     {
5572     struct ieee80211_sub_if_data *sdata;
5573    
5574     @@ -167,8 +162,7 @@ void ieee80211_offchannel_return(struct ieee80211_local *local,
5575     continue;
5576    
5577     /* Tell AP we're back */
5578     - if (offchannel_ps_disable &&
5579     - sdata->vif.type == NL80211_IFTYPE_STATION) {
5580     + if (sdata->vif.type == NL80211_IFTYPE_STATION) {
5581     if (sdata->u.mgd.associated)
5582     ieee80211_offchannel_ps_disable(sdata);
5583     }
5584     @@ -188,7 +182,7 @@ void ieee80211_offchannel_return(struct ieee80211_local *local,
5585     netif_tx_wake_all_queues(sdata->dev);
5586     }
5587    
5588     - /* Check to see if we should re-enable beaconing */
5589     + /* re-enable beaconing */
5590     if (enable_beaconing &&
5591     (sdata->vif.type == NL80211_IFTYPE_AP ||
5592     sdata->vif.type == NL80211_IFTYPE_ADHOC ||
5593     diff --git a/net/mac80211/rx.c b/net/mac80211/rx.c
5594     index fb123e2..5c51607 100644
5595     --- a/net/mac80211/rx.c
5596     +++ b/net/mac80211/rx.c
5597     @@ -421,10 +421,16 @@ ieee80211_rx_h_passive_scan(struct ieee80211_rx_data *rx)
5598     return RX_CONTINUE;
5599    
5600     if (test_bit(SCAN_HW_SCANNING, &local->scanning) ||
5601     - test_bit(SCAN_SW_SCANNING, &local->scanning) ||
5602     local->sched_scanning)
5603     return ieee80211_scan_rx(rx->sdata, skb);
5604    
5605     + if (test_bit(SCAN_SW_SCANNING, &local->scanning)) {
5606     + /* drop all the other packets during a software scan anyway */
5607     + if (ieee80211_scan_rx(rx->sdata, skb) != RX_QUEUED)
5608     + dev_kfree_skb(skb);
5609     + return RX_QUEUED;
5610     + }
5611     +
5612     /* scanning finished during invoking of handlers */
5613     I802_DEBUG_INC(local->rx_handlers_drop_passive_scan);
5614     return RX_DROP_UNUSABLE;
5615     @@ -2858,7 +2864,7 @@ static void __ieee80211_rx_handle_packet(struct ieee80211_hw *hw,
5616     local->dot11ReceivedFragmentCount++;
5617    
5618     if (unlikely(test_bit(SCAN_HW_SCANNING, &local->scanning) ||
5619     - test_bit(SCAN_SW_SCANNING, &local->scanning)))
5620     + test_bit(SCAN_OFF_CHANNEL, &local->scanning)))
5621     status->rx_flags |= IEEE80211_RX_IN_SCAN;
5622    
5623     if (ieee80211_is_mgmt(fc))
5624     diff --git a/net/mac80211/scan.c b/net/mac80211/scan.c
5625     index 105436d..5279300 100644
5626     --- a/net/mac80211/scan.c
5627     +++ b/net/mac80211/scan.c
5628     @@ -213,14 +213,6 @@ ieee80211_scan_rx(struct ieee80211_sub_if_data *sdata, struct sk_buff *skb)
5629     if (bss)
5630     ieee80211_rx_bss_put(sdata->local, bss);
5631    
5632     - /* If we are on-operating-channel, and this packet is for the
5633     - * current channel, pass the pkt on up the stack so that
5634     - * the rest of the stack can make use of it.
5635     - */
5636     - if (ieee80211_cfg_on_oper_channel(sdata->local)
5637     - && (channel == sdata->local->oper_channel))
5638     - return RX_CONTINUE;
5639     -
5640     dev_kfree_skb(skb);
5641     return RX_QUEUED;
5642     }
5643     @@ -264,8 +256,6 @@ static void __ieee80211_scan_completed(struct ieee80211_hw *hw, bool aborted,
5644     bool was_hw_scan)
5645     {
5646     struct ieee80211_local *local = hw_to_local(hw);
5647     - bool on_oper_chan;
5648     - bool enable_beacons = false;
5649    
5650     lockdep_assert_held(&local->mtx);
5651    
5652     @@ -298,25 +288,11 @@ static void __ieee80211_scan_completed(struct ieee80211_hw *hw, bool aborted,
5653     local->scanning = 0;
5654     local->scan_channel = NULL;
5655    
5656     - on_oper_chan = ieee80211_cfg_on_oper_channel(local);
5657     -
5658     - if (was_hw_scan || !on_oper_chan)
5659     - ieee80211_hw_config(local, IEEE80211_CONF_CHANGE_CHANNEL);
5660     - else
5661     - /* Set power back to normal operating levels. */
5662     - ieee80211_hw_config(local, 0);
5663     -
5664     + ieee80211_hw_config(local, IEEE80211_CONF_CHANGE_CHANNEL);
5665     if (!was_hw_scan) {
5666     - bool on_oper_chan2;
5667     ieee80211_configure_filter(local);
5668     drv_sw_scan_complete(local);
5669     - on_oper_chan2 = ieee80211_cfg_on_oper_channel(local);
5670     - /* We should always be on-channel at this point. */
5671     - WARN_ON(!on_oper_chan2);
5672     - if (on_oper_chan2 && (on_oper_chan != on_oper_chan2))
5673     - enable_beacons = true;
5674     -
5675     - ieee80211_offchannel_return(local, enable_beacons, true);
5676     + ieee80211_offchannel_return(local, true);
5677     }
5678    
5679     ieee80211_recalc_idle(local);
5680     @@ -357,15 +333,13 @@ static int ieee80211_start_sw_scan(struct ieee80211_local *local)
5681     */
5682     drv_sw_scan_start(local);
5683    
5684     + ieee80211_offchannel_stop_beaconing(local);
5685     +
5686     local->leave_oper_channel_time = 0;
5687     local->next_scan_state = SCAN_DECISION;
5688     local->scan_channel_idx = 0;
5689    
5690     - /* We always want to use off-channel PS, even if we
5691     - * are not really leaving oper-channel. Don't
5692     - * tell the AP though, as long as we are on-channel.
5693     - */
5694     - ieee80211_offchannel_enable_all_ps(local, false);
5695     + drv_flush(local, false);
5696    
5697     ieee80211_configure_filter(local);
5698    
5699     @@ -508,20 +482,7 @@ static void ieee80211_scan_state_decision(struct ieee80211_local *local,
5700     }
5701     mutex_unlock(&local->iflist_mtx);
5702    
5703     - next_chan = local->scan_req->channels[local->scan_channel_idx];
5704     -
5705     - if (ieee80211_cfg_on_oper_channel(local)) {
5706     - /* We're currently on operating channel. */
5707     - if (next_chan == local->oper_channel)
5708     - /* We don't need to move off of operating channel. */
5709     - local->next_scan_state = SCAN_SET_CHANNEL;
5710     - else
5711     - /*
5712     - * We do need to leave operating channel, as next
5713     - * scan is somewhere else.
5714     - */
5715     - local->next_scan_state = SCAN_LEAVE_OPER_CHANNEL;
5716     - } else {
5717     + if (local->scan_channel) {
5718     /*
5719     * we're currently scanning a different channel, let's
5720     * see if we can scan another channel without interfering
5721     @@ -537,6 +498,7 @@ static void ieee80211_scan_state_decision(struct ieee80211_local *local,
5722     *
5723     * Otherwise switch back to the operating channel.
5724     */
5725     + next_chan = local->scan_req->channels[local->scan_channel_idx];
5726    
5727     bad_latency = time_after(jiffies +
5728     ieee80211_scan_get_channel_time(next_chan),
5729     @@ -554,6 +516,12 @@ static void ieee80211_scan_state_decision(struct ieee80211_local *local,
5730     local->next_scan_state = SCAN_ENTER_OPER_CHANNEL;
5731     else
5732     local->next_scan_state = SCAN_SET_CHANNEL;
5733     + } else {
5734     + /*
5735     + * we're on the operating channel currently, let's
5736     + * leave that channel now to scan another one
5737     + */
5738     + local->next_scan_state = SCAN_LEAVE_OPER_CHANNEL;
5739     }
5740    
5741     *next_delay = 0;
5742     @@ -562,10 +530,9 @@ static void ieee80211_scan_state_decision(struct ieee80211_local *local,
5743     static void ieee80211_scan_state_leave_oper_channel(struct ieee80211_local *local,
5744     unsigned long *next_delay)
5745     {
5746     - /* PS will already be in off-channel mode,
5747     - * we do that once at the beginning of scanning.
5748     - */
5749     - ieee80211_offchannel_stop_vifs(local, false);
5750     + ieee80211_offchannel_stop_station(local);
5751     +
5752     + __set_bit(SCAN_OFF_CHANNEL, &local->scanning);
5753    
5754     /*
5755     * What if the nullfunc frames didn't arrive?
5756     @@ -588,15 +555,15 @@ static void ieee80211_scan_state_enter_oper_channel(struct ieee80211_local *loca
5757     {
5758     /* switch back to the operating channel */
5759     local->scan_channel = NULL;
5760     - if (!ieee80211_cfg_on_oper_channel(local))
5761     - ieee80211_hw_config(local, IEEE80211_CONF_CHANGE_CHANNEL);
5762     + ieee80211_hw_config(local, IEEE80211_CONF_CHANGE_CHANNEL);
5763    
5764     /*
5765     - * Re-enable vifs and beaconing. Leave PS
5766     - * in off-channel state..will put that back
5767     - * on-channel at the end of scanning.
5768     + * Only re-enable station mode interface now; beaconing will be
5769     + * re-enabled once the full scan has been completed.
5770     */
5771     - ieee80211_offchannel_return(local, true, false);
5772     + ieee80211_offchannel_return(local, false);
5773     +
5774     + __clear_bit(SCAN_OFF_CHANNEL, &local->scanning);
5775    
5776     *next_delay = HZ / 5;
5777     local->next_scan_state = SCAN_DECISION;
5778     diff --git a/net/mac80211/tx.c b/net/mac80211/tx.c
5779     index 1f8b120..eff1f4e 100644
5780     --- a/net/mac80211/tx.c
5781     +++ b/net/mac80211/tx.c
5782     @@ -259,8 +259,7 @@ ieee80211_tx_h_check_assoc(struct ieee80211_tx_data *tx)
5783     if (unlikely(info->flags & IEEE80211_TX_CTL_INJECTED))
5784     return TX_CONTINUE;
5785    
5786     - if (unlikely(test_bit(SCAN_SW_SCANNING, &tx->local->scanning)) &&
5787     - test_bit(SDATA_STATE_OFFCHANNEL, &tx->sdata->state) &&
5788     + if (unlikely(test_bit(SCAN_OFF_CHANNEL, &tx->local->scanning)) &&
5789     !ieee80211_is_probe_req(hdr->frame_control) &&
5790     !ieee80211_is_nullfunc(hdr->frame_control))
5791     /*
5792     diff --git a/net/mac80211/work.c b/net/mac80211/work.c
5793     index 6c53b6d..99165ef 100644
5794     --- a/net/mac80211/work.c
5795     +++ b/net/mac80211/work.c
5796     @@ -899,26 +899,6 @@ static bool ieee80211_work_ct_coexists(enum nl80211_channel_type wk_ct,
5797     return false;
5798     }
5799    
5800     -static enum nl80211_channel_type
5801     -ieee80211_calc_ct(enum nl80211_channel_type wk_ct,
5802     - enum nl80211_channel_type oper_ct)
5803     -{
5804     - switch (wk_ct) {
5805     - case NL80211_CHAN_NO_HT:
5806     - return oper_ct;
5807     - case NL80211_CHAN_HT20:
5808     - if (oper_ct != NL80211_CHAN_NO_HT)
5809     - return oper_ct;
5810     - return wk_ct;
5811     - case NL80211_CHAN_HT40MINUS:
5812     - case NL80211_CHAN_HT40PLUS:
5813     - return wk_ct;
5814     - }
5815     - WARN_ON(1); /* shouldn't get here */
5816     - return wk_ct;
5817     -}
5818     -
5819     -
5820     static void ieee80211_work_timer(unsigned long data)
5821     {
5822     struct ieee80211_local *local = (void *) data;
5823     @@ -969,52 +949,18 @@ static void ieee80211_work_work(struct work_struct *work)
5824     }
5825    
5826     if (!started && !local->tmp_channel) {
5827     - bool on_oper_chan;
5828     - bool tmp_chan_changed = false;
5829     - bool on_oper_chan2;
5830     - enum nl80211_channel_type wk_ct;
5831     - on_oper_chan = ieee80211_cfg_on_oper_channel(local);
5832     -
5833     - /* Work with existing channel type if possible. */
5834     - wk_ct = wk->chan_type;
5835     - if (wk->chan == local->hw.conf.channel)
5836     - wk_ct = ieee80211_calc_ct(wk->chan_type,
5837     - local->hw.conf.channel_type);
5838     -
5839     - if (local->tmp_channel)
5840     - if ((local->tmp_channel != wk->chan) ||
5841     - (local->tmp_channel_type != wk_ct))
5842     - tmp_chan_changed = true;
5843     -
5844     - local->tmp_channel = wk->chan;
5845     - local->tmp_channel_type = wk_ct;
5846     /*
5847     - * Leave the station vifs in awake mode if they
5848     - * happen to be on the same channel as
5849     - * the requested channel.
5850     + * TODO: could optimize this by leaving the
5851     + * station vifs in awake mode if they
5852     + * happen to be on the same channel as
5853     + * the requested channel
5854     */
5855     - on_oper_chan2 = ieee80211_cfg_on_oper_channel(local);
5856     - if (on_oper_chan != on_oper_chan2) {
5857     - if (on_oper_chan2) {
5858     - /* going off oper channel, PS too */
5859     - ieee80211_offchannel_stop_vifs(local,
5860     - true);
5861     - ieee80211_hw_config(local, 0);
5862     - } else {
5863     - /* going on channel, but leave PS
5864     - * off-channel. */
5865     - ieee80211_hw_config(local, 0);
5866     - ieee80211_offchannel_return(local,
5867     - true,
5868     - false);
5869     - }
5870     - } else if (tmp_chan_changed)
5871     - /* Still off-channel, but on some other
5872     - * channel, so update hardware.
5873     - * PS should already be off-channel.
5874     - */
5875     - ieee80211_hw_config(local, 0);
5876     + ieee80211_offchannel_stop_beaconing(local);
5877     + ieee80211_offchannel_stop_station(local);
5878    
5879     + local->tmp_channel = wk->chan;
5880     + local->tmp_channel_type = wk->chan_type;
5881     + ieee80211_hw_config(local, 0);
5882     started = true;
5883     wk->timeout = jiffies;
5884     }
5885     @@ -1100,8 +1046,7 @@ static void ieee80211_work_work(struct work_struct *work)
5886     * we still need to do a hardware config. Currently,
5887     * we cannot be here while scanning, however.
5888     */
5889     - if (!ieee80211_cfg_on_oper_channel(local))
5890     - ieee80211_hw_config(local, 0);
5891     + ieee80211_hw_config(local, 0);
5892    
5893     /* At the least, we need to disable offchannel_ps,
5894     * so just go ahead and run the entire offchannel
5895     @@ -1109,7 +1054,7 @@ static void ieee80211_work_work(struct work_struct *work)
5896     * beaconing if we were already on-oper-channel
5897     * as a future optimization.
5898     */
5899     - ieee80211_offchannel_return(local, true, true);
5900     + ieee80211_offchannel_return(local, true);
5901    
5902     /* give connection some time to breathe */
5903     run_again(local, jiffies + HZ/2);
5904     diff --git a/net/mac80211/wpa.c b/net/mac80211/wpa.c
5905     index f614ce7..28a39bb 100644
5906     --- a/net/mac80211/wpa.c
5907     +++ b/net/mac80211/wpa.c
5908     @@ -106,7 +106,7 @@ ieee80211_rx_h_michael_mic_verify(struct ieee80211_rx_data *rx)
5909     if (status->flag & RX_FLAG_MMIC_ERROR)
5910     goto mic_fail;
5911    
5912     - if (!(status->flag & RX_FLAG_IV_STRIPPED))
5913     + if (!(status->flag & RX_FLAG_IV_STRIPPED) && rx->key)
5914     goto update_iv;
5915    
5916     return RX_CONTINUE;
5917     diff --git a/net/sunrpc/svc.c b/net/sunrpc/svc.c
5918     index 6e03888..d4ad50e 100644
5919     --- a/net/sunrpc/svc.c
5920     +++ b/net/sunrpc/svc.c
5921     @@ -167,6 +167,7 @@ svc_pool_map_alloc_arrays(struct svc_pool_map *m, unsigned int maxpools)
5922    
5923     fail_free:
5924     kfree(m->to_pool);
5925     + m->to_pool = NULL;
5926     fail:
5927     return -ENOMEM;
5928     }
5929     @@ -287,7 +288,9 @@ svc_pool_map_put(void)
5930     if (!--m->count) {
5931     m->mode = SVC_POOL_DEFAULT;
5932     kfree(m->to_pool);
5933     + m->to_pool = NULL;
5934     kfree(m->pool_to);
5935     + m->pool_to = NULL;
5936     m->npools = 0;
5937     }
5938    
5939     @@ -527,17 +530,20 @@ svc_destroy(struct svc_serv *serv)
5940     printk("svc_destroy: no threads for serv=%p!\n", serv);
5941    
5942     del_timer_sync(&serv->sv_temptimer);
5943     -
5944     - svc_close_all(&serv->sv_tempsocks);
5945     + /*
5946     + * The set of xprts (contained in the sv_tempsocks and
5947     + * sv_permsocks lists) is now constant, since it is modified
5948     + * only by accepting new sockets (done by service threads in
5949     + * svc_recv) or aging old ones (done by sv_temptimer), or
5950     + * configuration changes (excluded by whatever locking the
5951     + * caller is using--nfsd_mutex in the case of nfsd). So it's
5952     + * safe to traverse those lists and shut everything down:
5953     + */
5954     + svc_close_all(serv);
5955    
5956     if (serv->sv_shutdown)
5957     serv->sv_shutdown(serv);
5958    
5959     - svc_close_all(&serv->sv_permsocks);
5960     -
5961     - BUG_ON(!list_empty(&serv->sv_permsocks));
5962     - BUG_ON(!list_empty(&serv->sv_tempsocks));
5963     -
5964     cache_clean_deferred(serv);
5965    
5966     if (svc_serv_is_pooled(serv))
5967     diff --git a/net/sunrpc/svc_xprt.c b/net/sunrpc/svc_xprt.c
5968     index 447cd0e..9ed2cd0 100644
5969     --- a/net/sunrpc/svc_xprt.c
5970     +++ b/net/sunrpc/svc_xprt.c
5971     @@ -893,14 +893,7 @@ void svc_delete_xprt(struct svc_xprt *xprt)
5972     spin_lock_bh(&serv->sv_lock);
5973     if (!test_and_set_bit(XPT_DETACHED, &xprt->xpt_flags))
5974     list_del_init(&xprt->xpt_list);
5975     - /*
5976     - * The only time we're called while xpt_ready is still on a list
5977     - * is while the list itself is about to be destroyed (in
5978     - * svc_destroy). BUT svc_xprt_enqueue could still be attempting
5979     - * to add new entries to the sp_sockets list, so we can't leave
5980     - * a freed xprt on it.
5981     - */
5982     - list_del_init(&xprt->xpt_ready);
5983     + BUG_ON(!list_empty(&xprt->xpt_ready));
5984     if (test_bit(XPT_TEMP, &xprt->xpt_flags))
5985     serv->sv_tmpcnt--;
5986     spin_unlock_bh(&serv->sv_lock);
5987     @@ -928,22 +921,48 @@ void svc_close_xprt(struct svc_xprt *xprt)
5988     }
5989     EXPORT_SYMBOL_GPL(svc_close_xprt);
5990    
5991     -void svc_close_all(struct list_head *xprt_list)
5992     +static void svc_close_list(struct list_head *xprt_list)
5993     +{
5994     + struct svc_xprt *xprt;
5995     +
5996     + list_for_each_entry(xprt, xprt_list, xpt_list) {
5997     + set_bit(XPT_CLOSE, &xprt->xpt_flags);
5998     + set_bit(XPT_BUSY, &xprt->xpt_flags);
5999     + }
6000     +}
6001     +
6002     +void svc_close_all(struct svc_serv *serv)
6003     {
6004     + struct svc_pool *pool;
6005     struct svc_xprt *xprt;
6006     struct svc_xprt *tmp;
6007     + int i;
6008     +
6009     + svc_close_list(&serv->sv_tempsocks);
6010     + svc_close_list(&serv->sv_permsocks);
6011    
6012     + for (i = 0; i < serv->sv_nrpools; i++) {
6013     + pool = &serv->sv_pools[i];
6014     +
6015     + spin_lock_bh(&pool->sp_lock);
6016     + while (!list_empty(&pool->sp_sockets)) {
6017     + xprt = list_first_entry(&pool->sp_sockets, struct svc_xprt, xpt_ready);
6018     + list_del_init(&xprt->xpt_ready);
6019     + }
6020     + spin_unlock_bh(&pool->sp_lock);
6021     + }
6022     /*
6023     - * The server is shutting down, and no more threads are running.
6024     - * svc_xprt_enqueue() might still be running, but at worst it
6025     - * will re-add the xprt to sp_sockets, which will soon get
6026     - * freed. So we don't bother with any more locking, and don't
6027     - * leave the close to the (nonexistent) server threads:
6028     + * At this point the sp_sockets lists will stay empty, since
6029     + * svc_enqueue will not add new entries without taking the
6030     + * sp_lock and checking XPT_BUSY.
6031     */
6032     - list_for_each_entry_safe(xprt, tmp, xprt_list, xpt_list) {
6033     - set_bit(XPT_CLOSE, &xprt->xpt_flags);
6034     + list_for_each_entry_safe(xprt, tmp, &serv->sv_tempsocks, xpt_list)
6035     svc_delete_xprt(xprt);
6036     - }
6037     + list_for_each_entry_safe(xprt, tmp, &serv->sv_permsocks, xpt_list)
6038     + svc_delete_xprt(xprt);
6039     +
6040     + BUG_ON(!list_empty(&serv->sv_permsocks));
6041     + BUG_ON(!list_empty(&serv->sv_tempsocks));
6042     }
6043    
6044     /*
6045     diff --git a/net/sunrpc/xdr.c b/net/sunrpc/xdr.c
6046     index 277ebd4..593f4c6 100644
6047     --- a/net/sunrpc/xdr.c
6048     +++ b/net/sunrpc/xdr.c
6049     @@ -296,7 +296,7 @@ _copy_to_pages(struct page **pages, size_t pgbase, const char *p, size_t len)
6050     * Copies data into an arbitrary memory location from an array of pages
6051     * The copy is assumed to be non-overlapping.
6052     */
6053     -static void
6054     +void
6055     _copy_from_pages(char *p, struct page **pages, size_t pgbase, size_t len)
6056     {
6057     struct page **pgfrom;
6058     @@ -324,6 +324,7 @@ _copy_from_pages(char *p, struct page **pages, size_t pgbase, size_t len)
6059    
6060     } while ((len -= copy) != 0);
6061     }
6062     +EXPORT_SYMBOL_GPL(_copy_from_pages);
6063    
6064     /*
6065     * xdr_shrink_bufhead
6066     diff --git a/scripts/kconfig/streamline_config.pl b/scripts/kconfig/streamline_config.pl
6067     index ec7afce..bccf07d 100644
6068     --- a/scripts/kconfig/streamline_config.pl
6069     +++ b/scripts/kconfig/streamline_config.pl
6070     @@ -250,33 +250,61 @@ if ($kconfig) {
6071     read_kconfig($kconfig);
6072     }
6073    
6074     +sub convert_vars {
6075     + my ($line, %vars) = @_;
6076     +
6077     + my $process = "";
6078     +
6079     + while ($line =~ s/^(.*?)(\$\((.*?)\))//) {
6080     + my $start = $1;
6081     + my $variable = $2;
6082     + my $var = $3;
6083     +
6084     + if (defined($vars{$var})) {
6085     + $process .= $start . $vars{$var};
6086     + } else {
6087     + $process .= $start . $variable;
6088     + }
6089     + }
6090     +
6091     + $process .= $line;
6092     +
6093     + return $process;
6094     +}
6095     +
6096     # Read all Makefiles to map the configs to the objects
6097     foreach my $makefile (@makefiles) {
6098    
6099     - my $cont = 0;
6100     + my $line = "";
6101     + my %make_vars;
6102    
6103     open(MIN,$makefile) || die "Can't open $makefile";
6104     while (<MIN>) {
6105     + # if this line ends with a backslash, continue
6106     + chomp;
6107     + if (/^(.*)\\$/) {
6108     + $line .= $1;
6109     + next;
6110     + }
6111     +
6112     + $line .= $_;
6113     + $_ = $line;
6114     + $line = "";
6115     +
6116     my $objs;
6117    
6118     - # is this a line after a line with a backslash?
6119     - if ($cont && /(\S.*)$/) {
6120     - $objs = $1;
6121     - }
6122     - $cont = 0;
6123     + $_ = convert_vars($_, %make_vars);
6124    
6125     # collect objects after obj-$(CONFIG_FOO_BAR)
6126     if (/obj-\$\((CONFIG_[^\)]*)\)\s*[+:]?=\s*(.*)/) {
6127     $var = $1;
6128     $objs = $2;
6129     +
6130     + # check if variables are set
6131     + } elsif (/^\s*(\S+)\s*[:]?=\s*(.*\S)/) {
6132     + $make_vars{$1} = $2;
6133     }
6134     if (defined($objs)) {
6135     - # test if the line ends with a backslash
6136     - if ($objs =~ m,(.*)\\$,) {
6137     - $objs = $1;
6138     - $cont = 1;
6139     - }
6140     -
6141     foreach my $obj (split /\s+/,$objs) {
6142     $obj =~ s/-/_/g;
6143     if ($obj =~ /(.*)\.o$/) {
6144     diff --git a/scripts/recordmcount.h b/scripts/recordmcount.h
6145     index f40a6af6..54e35c1 100644
6146     --- a/scripts/recordmcount.h
6147     +++ b/scripts/recordmcount.h
6148     @@ -462,7 +462,7 @@ __has_rel_mcount(Elf_Shdr const *const relhdr, /* is SHT_REL or SHT_RELA */
6149     succeed_file();
6150     }
6151     if (w(txthdr->sh_type) != SHT_PROGBITS ||
6152     - !(w(txthdr->sh_flags) & SHF_EXECINSTR))
6153     + !(_w(txthdr->sh_flags) & SHF_EXECINSTR))
6154     return NULL;
6155     return txtname;
6156     }
6157     diff --git a/security/integrity/ima/ima_api.c b/security/integrity/ima/ima_api.c
6158     index 0d50df0..88a2788 100644
6159     --- a/security/integrity/ima/ima_api.c
6160     +++ b/security/integrity/ima/ima_api.c
6161     @@ -178,8 +178,8 @@ void ima_store_measurement(struct integrity_iint_cache *iint,
6162     strncpy(entry->template.file_name, filename, IMA_EVENT_NAME_LEN_MAX);
6163    
6164     result = ima_store_template(entry, violation, inode);
6165     - if (!result)
6166     + if (!result || result == -EEXIST)
6167     iint->flags |= IMA_MEASURED;
6168     - else
6169     + if (result < 0)
6170     kfree(entry);
6171     }
6172     diff --git a/security/integrity/ima/ima_queue.c b/security/integrity/ima/ima_queue.c
6173     index 8e28f04..55a6271 100644
6174     --- a/security/integrity/ima/ima_queue.c
6175     +++ b/security/integrity/ima/ima_queue.c
6176     @@ -23,6 +23,8 @@
6177     #include <linux/slab.h>
6178     #include "ima.h"
6179    
6180     +#define AUDIT_CAUSE_LEN_MAX 32
6181     +
6182     LIST_HEAD(ima_measurements); /* list of all measurements */
6183    
6184     /* key: inode (before secure-hashing a file) */
6185     @@ -94,7 +96,8 @@ static int ima_pcr_extend(const u8 *hash)
6186    
6187     result = tpm_pcr_extend(TPM_ANY_NUM, CONFIG_IMA_MEASURE_PCR_IDX, hash);
6188     if (result != 0)
6189     - pr_err("IMA: Error Communicating to TPM chip\n");
6190     + pr_err("IMA: Error Communicating to TPM chip, result: %d\n",
6191     + result);
6192     return result;
6193     }
6194    
6195     @@ -106,14 +109,16 @@ int ima_add_template_entry(struct ima_template_entry *entry, int violation,
6196     {
6197     u8 digest[IMA_DIGEST_SIZE];
6198     const char *audit_cause = "hash_added";
6199     + char tpm_audit_cause[AUDIT_CAUSE_LEN_MAX];
6200     int audit_info = 1;
6201     - int result = 0;
6202     + int result = 0, tpmresult = 0;
6203    
6204     mutex_lock(&ima_extend_list_mutex);
6205     if (!violation) {
6206     memcpy(digest, entry->digest, sizeof digest);
6207     if (ima_lookup_digest_entry(digest)) {
6208     audit_cause = "hash_exists";
6209     + result = -EEXIST;
6210     goto out;
6211     }
6212     }
6213     @@ -128,9 +133,11 @@ int ima_add_template_entry(struct ima_template_entry *entry, int violation,
6214     if (violation) /* invalidate pcr */
6215     memset(digest, 0xff, sizeof digest);
6216    
6217     - result = ima_pcr_extend(digest);
6218     - if (result != 0) {
6219     - audit_cause = "TPM error";
6220     + tpmresult = ima_pcr_extend(digest);
6221     + if (tpmresult != 0) {
6222     + snprintf(tpm_audit_cause, AUDIT_CAUSE_LEN_MAX, "TPM_error(%d)",
6223     + tpmresult);
6224     + audit_cause = tpm_audit_cause;
6225     audit_info = 0;
6226     }
6227     out:
6228     diff --git a/security/tomoyo/util.c b/security/tomoyo/util.c
6229     index 4a9b4b2..867558c 100644
6230     --- a/security/tomoyo/util.c
6231     +++ b/security/tomoyo/util.c
6232     @@ -492,13 +492,13 @@ static bool tomoyo_correct_word2(const char *string, size_t len)
6233     if (d < '0' || d > '7' || e < '0' || e > '7')
6234     break;
6235     c = tomoyo_make_byte(c, d, e);
6236     - if (tomoyo_invalid(c))
6237     - continue; /* pattern is not \000 */
6238     + if (c <= ' ' || c >= 127)
6239     + continue;
6240     }
6241     goto out;
6242     } else if (in_repetition && c == '/') {
6243     goto out;
6244     - } else if (tomoyo_invalid(c)) {
6245     + } else if (c <= ' ' || c >= 127) {
6246     goto out;
6247     }
6248     }
6249     diff --git a/sound/pci/hda/hda_intel.c b/sound/pci/hda/hda_intel.c
6250     index c2f79e6..5b2b75b 100644
6251     --- a/sound/pci/hda/hda_intel.c
6252     +++ b/sound/pci/hda/hda_intel.c
6253     @@ -2509,6 +2509,7 @@ static struct snd_pci_quirk position_fix_list[] __devinitdata = {
6254     SND_PCI_QUIRK(0x1043, 0x81e7, "ASUS M2V", POS_FIX_LPIB),
6255     SND_PCI_QUIRK(0x1043, 0x83ce, "ASUS 1101HA", POS_FIX_LPIB),
6256     SND_PCI_QUIRK(0x104d, 0x9069, "Sony VPCS11V9E", POS_FIX_LPIB),
6257     + SND_PCI_QUIRK(0x10de, 0xcb89, "Macbook Pro 7,1", POS_FIX_LPIB),
6258     SND_PCI_QUIRK(0x1297, 0x3166, "Shuttle", POS_FIX_LPIB),
6259     SND_PCI_QUIRK(0x1458, 0xa022, "ga-ma770-ud3", POS_FIX_LPIB),
6260     SND_PCI_QUIRK(0x1462, 0x1002, "MSI Wind U115", POS_FIX_LPIB),
6261     diff --git a/sound/pci/hda/hda_local.h b/sound/pci/hda/hda_local.h
6262     index 618ddad..368f0c5 100644
6263     --- a/sound/pci/hda/hda_local.h
6264     +++ b/sound/pci/hda/hda_local.h
6265     @@ -487,7 +487,12 @@ static inline u32 get_wcaps(struct hda_codec *codec, hda_nid_t nid)
6266     }
6267    
6268     /* get the widget type from widget capability bits */
6269     -#define get_wcaps_type(wcaps) (((wcaps) & AC_WCAP_TYPE) >> AC_WCAP_TYPE_SHIFT)
6270     +static inline int get_wcaps_type(unsigned int wcaps)
6271     +{
6272     + if (!wcaps)
6273     + return -1; /* invalid type */
6274     + return (wcaps & AC_WCAP_TYPE) >> AC_WCAP_TYPE_SHIFT;
6275     +}
6276    
6277     static inline unsigned int get_wcaps_channels(u32 wcaps)
6278     {
6279     diff --git a/sound/pci/hda/hda_proc.c b/sound/pci/hda/hda_proc.c
6280     index 2c981b5..254ab52 100644
6281     --- a/sound/pci/hda/hda_proc.c
6282     +++ b/sound/pci/hda/hda_proc.c
6283     @@ -54,6 +54,8 @@ static const char *get_wid_type_name(unsigned int wid_value)
6284     [AC_WID_BEEP] = "Beep Generator Widget",
6285     [AC_WID_VENDOR] = "Vendor Defined Widget",
6286     };
6287     + if (wid_value == -1)
6288     + return "UNKNOWN Widget";
6289     wid_value &= 0xf;
6290     if (names[wid_value])
6291     return names[wid_value];
6292     diff --git a/sound/pci/hda/patch_cirrus.c b/sound/pci/hda/patch_cirrus.c
6293     index 70a7abd..5b0a9bb 100644
6294     --- a/sound/pci/hda/patch_cirrus.c
6295     +++ b/sound/pci/hda/patch_cirrus.c
6296     @@ -920,16 +920,14 @@ static void cs_automute(struct hda_codec *codec)
6297    
6298     /* mute speakers if spdif or hp jack is plugged in */
6299     for (i = 0; i < cfg->speaker_outs; i++) {
6300     + int pin_ctl = hp_present ? 0 : PIN_OUT;
6301     + /* detect on spdif is specific to CS421x */
6302     + if (spdif_present && (spec->vendor_nid == CS421X_VENDOR_NID))
6303     + pin_ctl = 0;
6304     +
6305     nid = cfg->speaker_pins[i];
6306     snd_hda_codec_write(codec, nid, 0,
6307     - AC_VERB_SET_PIN_WIDGET_CONTROL,
6308     - hp_present ? 0 : PIN_OUT);
6309     - /* detect on spdif is specific to CS421x */
6310     - if (spec->vendor_nid == CS421X_VENDOR_NID) {
6311     - snd_hda_codec_write(codec, nid, 0,
6312     - AC_VERB_SET_PIN_WIDGET_CONTROL,
6313     - spdif_present ? 0 : PIN_OUT);
6314     - }
6315     + AC_VERB_SET_PIN_WIDGET_CONTROL, pin_ctl);
6316     }
6317     if (spec->gpio_eapd_hp) {
6318     unsigned int gpio = hp_present ?
6319     @@ -1771,30 +1769,19 @@ static int build_cs421x_output(struct hda_codec *codec)
6320     struct auto_pin_cfg *cfg = &spec->autocfg;
6321     struct snd_kcontrol *kctl;
6322     int err;
6323     - char *name = "HP/Speakers";
6324     + char *name = "Master";
6325    
6326     fix_volume_caps(codec, dac);
6327     - if (!spec->vmaster_sw) {
6328     - err = add_vmaster(codec, dac);
6329     - if (err < 0)
6330     - return err;
6331     - }
6332    
6333     err = add_mute(codec, name, 0,
6334     HDA_COMPOSE_AMP_VAL(dac, 3, 0, HDA_OUTPUT), 0, &kctl);
6335     if (err < 0)
6336     return err;
6337     - err = snd_ctl_add_slave(spec->vmaster_sw, kctl);
6338     - if (err < 0)
6339     - return err;
6340    
6341     err = add_volume(codec, name, 0,
6342     HDA_COMPOSE_AMP_VAL(dac, 3, 0, HDA_OUTPUT), 0, &kctl);
6343     if (err < 0)
6344     return err;
6345     - err = snd_ctl_add_slave(spec->vmaster_vol, kctl);
6346     - if (err < 0)
6347     - return err;
6348    
6349     if (cfg->speaker_outs) {
6350     err = snd_hda_ctl_add(codec, 0,
6351     diff --git a/sound/pci/hda/patch_conexant.c b/sound/pci/hda/patch_conexant.c
6352     index 0de2119..7072251 100644
6353     --- a/sound/pci/hda/patch_conexant.c
6354     +++ b/sound/pci/hda/patch_conexant.c
6355     @@ -1120,8 +1120,6 @@ static const char * const cxt5045_models[CXT5045_MODELS] = {
6356    
6357     static const struct snd_pci_quirk cxt5045_cfg_tbl[] = {
6358     SND_PCI_QUIRK(0x103c, 0x30d5, "HP 530", CXT5045_LAPTOP_HP530),
6359     - SND_PCI_QUIRK_MASK(0x103c, 0xff00, 0x3000, "HP DV Series",
6360     - CXT5045_LAPTOP_HPSENSE),
6361     SND_PCI_QUIRK(0x1179, 0xff31, "Toshiba P105", CXT5045_LAPTOP_MICSENSE),
6362     SND_PCI_QUIRK(0x152d, 0x0753, "Benq R55E", CXT5045_BENQ),
6363     SND_PCI_QUIRK(0x1734, 0x10ad, "Fujitsu Si1520", CXT5045_LAPTOP_MICSENSE),
6364     diff --git a/sound/pci/hda/patch_sigmatel.c b/sound/pci/hda/patch_sigmatel.c
6365     index 616678f..f3c73a9 100644
6366     --- a/sound/pci/hda/patch_sigmatel.c
6367     +++ b/sound/pci/hda/patch_sigmatel.c
6368     @@ -1631,7 +1631,7 @@ static const struct snd_pci_quirk stac92hd73xx_cfg_tbl[] = {
6369     SND_PCI_QUIRK(PCI_VENDOR_ID_DELL, 0x02bd,
6370     "Dell Studio 1557", STAC_DELL_M6_DMIC),
6371     SND_PCI_QUIRK(PCI_VENDOR_ID_DELL, 0x02fe,
6372     - "Dell Studio XPS 1645", STAC_DELL_M6_BOTH),
6373     + "Dell Studio XPS 1645", STAC_DELL_M6_DMIC),
6374     SND_PCI_QUIRK(PCI_VENDOR_ID_DELL, 0x0413,
6375     "Dell Studio 1558", STAC_DELL_M6_DMIC),
6376     {} /* terminator */
6377     @@ -4326,6 +4326,27 @@ static void stac_store_hints(struct hda_codec *codec)
6378     }
6379     }
6380    
6381     +static void stac_issue_unsol_events(struct hda_codec *codec, int num_pins,
6382     + const hda_nid_t *pins)
6383     +{
6384     + while (num_pins--)
6385     + stac_issue_unsol_event(codec, *pins++);
6386     +}
6387     +
6388     +/* fake event to set up pins */
6389     +static void stac_fake_hp_events(struct hda_codec *codec)
6390     +{
6391     + struct sigmatel_spec *spec = codec->spec;
6392     +
6393     + if (spec->autocfg.hp_outs)
6394     + stac_issue_unsol_events(codec, spec->autocfg.hp_outs,
6395     + spec->autocfg.hp_pins);
6396     + if (spec->autocfg.line_outs &&
6397     + spec->autocfg.line_out_pins[0] != spec->autocfg.hp_pins[0])
6398     + stac_issue_unsol_events(codec, spec->autocfg.line_outs,
6399     + spec->autocfg.line_out_pins);
6400     +}
6401     +
6402     static int stac92xx_init(struct hda_codec *codec)
6403     {
6404     struct sigmatel_spec *spec = codec->spec;
6405     @@ -4376,10 +4397,7 @@ static int stac92xx_init(struct hda_codec *codec)
6406     stac92xx_auto_set_pinctl(codec, spec->autocfg.line_out_pins[0],
6407     AC_PINCTL_OUT_EN);
6408     /* fake event to set up pins */
6409     - if (cfg->hp_pins[0])
6410     - stac_issue_unsol_event(codec, cfg->hp_pins[0]);
6411     - else if (cfg->line_out_pins[0])
6412     - stac_issue_unsol_event(codec, cfg->line_out_pins[0]);
6413     + stac_fake_hp_events(codec);
6414     } else {
6415     stac92xx_auto_init_multi_out(codec);
6416     stac92xx_auto_init_hp_out(codec);
6417     @@ -5028,19 +5046,11 @@ static void stac927x_proc_hook(struct snd_info_buffer *buffer,
6418     #ifdef CONFIG_PM
6419     static int stac92xx_resume(struct hda_codec *codec)
6420     {
6421     - struct sigmatel_spec *spec = codec->spec;
6422     -
6423     stac92xx_init(codec);
6424     snd_hda_codec_resume_amp(codec);
6425     snd_hda_codec_resume_cache(codec);
6426     /* fake event to set up pins again to override cached values */
6427     - if (spec->hp_detect) {
6428     - if (spec->autocfg.hp_pins[0])
6429     - stac_issue_unsol_event(codec, spec->autocfg.hp_pins[0]);
6430     - else if (spec->autocfg.line_out_pins[0])
6431     - stac_issue_unsol_event(codec,
6432     - spec->autocfg.line_out_pins[0]);
6433     - }
6434     + stac_fake_hp_events(codec);
6435     return 0;
6436     }
6437    
6438     diff --git a/sound/pci/hda/patch_via.c b/sound/pci/hda/patch_via.c
6439     index b513762..8d69e59 100644
6440     --- a/sound/pci/hda/patch_via.c
6441     +++ b/sound/pci/hda/patch_via.c
6442     @@ -2200,7 +2200,10 @@ static int via_auto_create_loopback_switch(struct hda_codec *codec)
6443     {
6444     struct via_spec *spec = codec->spec;
6445    
6446     - if (!spec->aa_mix_nid || !spec->out_mix_path.depth)
6447     + if (!spec->aa_mix_nid)
6448     + return 0; /* no loopback switching available */
6449     + if (!(spec->out_mix_path.depth || spec->hp_mix_path.depth ||
6450     + spec->speaker_path.depth))
6451     return 0; /* no loopback switching available */
6452     if (!via_clone_control(spec, &via_aamix_ctl_enum))
6453     return -ENOMEM;
6454     diff --git a/sound/pci/ice1712/amp.c b/sound/pci/ice1712/amp.c
6455     index e328cfb..e525da2 100644
6456     --- a/sound/pci/ice1712/amp.c
6457     +++ b/sound/pci/ice1712/amp.c
6458     @@ -68,8 +68,11 @@ static int __devinit snd_vt1724_amp_init(struct snd_ice1712 *ice)
6459    
6460     static int __devinit snd_vt1724_amp_add_controls(struct snd_ice1712 *ice)
6461     {
6462     - /* we use pins 39 and 41 of the VT1616 for left and right read outputs */
6463     - snd_ac97_write_cache(ice->ac97, 0x5a, snd_ac97_read(ice->ac97, 0x5a) & ~0x8000);
6464     + if (ice->ac97)
6465     + /* we use pins 39 and 41 of the VT1616 for left and right
6466     + read outputs */
6467     + snd_ac97_write_cache(ice->ac97, 0x5a,
6468     + snd_ac97_read(ice->ac97, 0x5a) & ~0x8000);
6469     return 0;
6470     }
6471    
6472     diff --git a/sound/pci/oxygen/xonar_wm87x6.c b/sound/pci/oxygen/xonar_wm87x6.c
6473     index 42d1ab1..915546a 100644
6474     --- a/sound/pci/oxygen/xonar_wm87x6.c
6475     +++ b/sound/pci/oxygen/xonar_wm87x6.c
6476     @@ -177,6 +177,7 @@ static void wm8776_registers_init(struct oxygen *chip)
6477     struct xonar_wm87x6 *data = chip->model_data;
6478    
6479     wm8776_write(chip, WM8776_RESET, 0);
6480     + wm8776_write(chip, WM8776_PHASESWAP, WM8776_PH_MASK);
6481     wm8776_write(chip, WM8776_DACCTRL1, WM8776_DZCEN |
6482     WM8776_PL_LEFT_LEFT | WM8776_PL_RIGHT_RIGHT);
6483     wm8776_write(chip, WM8776_DACMUTE, chip->dac_mute ? WM8776_DMUTE : 0);
6484     diff --git a/sound/usb/endpoint.c b/sound/usb/endpoint.c
6485     index 81c6ede..08dcce5 100644
6486     --- a/sound/usb/endpoint.c
6487     +++ b/sound/usb/endpoint.c
6488     @@ -17,6 +17,7 @@
6489    
6490     #include <linux/gfp.h>
6491     #include <linux/init.h>
6492     +#include <linux/ratelimit.h>
6493     #include <linux/usb.h>
6494     #include <linux/usb/audio.h>
6495    
6496     @@ -458,8 +459,8 @@ static int retire_capture_urb(struct snd_usb_substream *subs,
6497    
6498     for (i = 0; i < urb->number_of_packets; i++) {
6499     cp = (unsigned char *)urb->transfer_buffer + urb->iso_frame_desc[i].offset;
6500     - if (urb->iso_frame_desc[i].status) {
6501     - snd_printd(KERN_ERR "frame %d active: %d\n", i, urb->iso_frame_desc[i].status);
6502     + if (urb->iso_frame_desc[i].status && printk_ratelimit()) {
6503     + snd_printdd("frame %d active: %d\n", i, urb->iso_frame_desc[i].status);
6504     // continue;
6505     }
6506     bytes = urb->iso_frame_desc[i].actual_length;
6507     diff --git a/sound/usb/usx2y/usb_stream.c b/sound/usb/usx2y/usb_stream.c
6508     index c400ade..1e7a47a 100644
6509     --- a/sound/usb/usx2y/usb_stream.c
6510     +++ b/sound/usb/usx2y/usb_stream.c
6511     @@ -674,7 +674,7 @@ dotry:
6512     inurb->transfer_buffer_length =
6513     inurb->number_of_packets *
6514     inurb->iso_frame_desc[0].length;
6515     - preempt_disable();
6516     +
6517     if (u == 0) {
6518     int now;
6519     struct usb_device *dev = inurb->dev;
6520     @@ -686,19 +686,17 @@ dotry:
6521     }
6522     err = usb_submit_urb(inurb, GFP_ATOMIC);
6523     if (err < 0) {
6524     - preempt_enable();
6525     snd_printk(KERN_ERR"usb_submit_urb(sk->inurb[%i])"
6526     " returned %i\n", u, err);
6527     return err;
6528     }
6529     err = usb_submit_urb(outurb, GFP_ATOMIC);
6530     if (err < 0) {
6531     - preempt_enable();
6532     snd_printk(KERN_ERR"usb_submit_urb(sk->outurb[%i])"
6533     " returned %i\n", u, err);
6534     return err;
6535     }
6536     - preempt_enable();
6537     +
6538     if (inurb->start_frame != outurb->start_frame) {
6539     snd_printd(KERN_DEBUG
6540     "u[%i] start_frames differ in:%u out:%u\n",