Magellan Linux

Annotation of /trunk/kernel-alx/patches-3.4/0130-3.4.31-all-fixes.patch

Parent Directory Parent Directory | Revision Log Revision Log


Revision 2110 - (hide annotations) (download)
Tue Mar 12 12:15:23 2013 UTC (11 years, 2 months ago) by niro
File size: 61527 byte(s)
-sync with upstream
1 niro 2110 diff --git a/MAINTAINERS b/MAINTAINERS
2     index a60009d..c744d9c 100644
3     --- a/MAINTAINERS
4     +++ b/MAINTAINERS
5     @@ -2627,7 +2627,7 @@ S: Maintained
6     F: drivers/net/ethernet/i825xx/eexpress.*
7    
8     ETHERNET BRIDGE
9     -M: Stephen Hemminger <shemminger@vyatta.com>
10     +M: Stephen Hemminger <stephen@networkplumber.org>
11     L: bridge@lists.linux-foundation.org
12     L: netdev@vger.kernel.org
13     W: http://www.linuxfoundation.org/en/Net:Bridge
14     @@ -4312,7 +4312,7 @@ S: Maintained
15    
16     MARVELL GIGABIT ETHERNET DRIVERS (skge/sky2)
17     M: Mirko Lindner <mlindner@marvell.com>
18     -M: Stephen Hemminger <shemminger@vyatta.com>
19     +M: Stephen Hemminger <stephen@networkplumber.org>
20     L: netdev@vger.kernel.org
21     S: Maintained
22     F: drivers/net/ethernet/marvell/sk*
23     @@ -4563,7 +4563,7 @@ S: Supported
24     F: drivers/infiniband/hw/nes/
25    
26     NETEM NETWORK EMULATOR
27     -M: Stephen Hemminger <shemminger@vyatta.com>
28     +M: Stephen Hemminger <stephen@networkplumber.org>
29     L: netem@lists.linux-foundation.org
30     S: Maintained
31     F: net/sched/sch_netem.c
32     diff --git a/arch/x86/include/asm/efi.h b/arch/x86/include/asm/efi.h
33     index 029189d..da37433 100644
34     --- a/arch/x86/include/asm/efi.h
35     +++ b/arch/x86/include/asm/efi.h
36     @@ -94,6 +94,7 @@ extern void __iomem *efi_ioremap(unsigned long addr, unsigned long size,
37     #endif /* CONFIG_X86_32 */
38    
39     extern int add_efi_memmap;
40     +extern unsigned long x86_efi_facility;
41     extern void efi_set_executable(efi_memory_desc_t *md, bool executable);
42     extern int efi_memblock_x86_reserve_range(void);
43     extern void efi_call_phys_prelog(void);
44     diff --git a/arch/x86/kernel/reboot.c b/arch/x86/kernel/reboot.c
45     index 3034ee5..df1b604 100644
46     --- a/arch/x86/kernel/reboot.c
47     +++ b/arch/x86/kernel/reboot.c
48     @@ -619,7 +619,7 @@ static void native_machine_emergency_restart(void)
49     break;
50    
51     case BOOT_EFI:
52     - if (efi_enabled)
53     + if (efi_enabled(EFI_RUNTIME_SERVICES))
54     efi.reset_system(reboot_mode ?
55     EFI_RESET_WARM :
56     EFI_RESET_COLD,
57     diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c
58     index b71e4a5..537dc03 100644
59     --- a/arch/x86/kernel/setup.c
60     +++ b/arch/x86/kernel/setup.c
61     @@ -818,15 +818,15 @@ void __init setup_arch(char **cmdline_p)
62     #ifdef CONFIG_EFI
63     if (!strncmp((char *)&boot_params.efi_info.efi_loader_signature,
64     "EL32", 4)) {
65     - efi_enabled = 1;
66     - efi_64bit = false;
67     + set_bit(EFI_BOOT, &x86_efi_facility);
68     } else if (!strncmp((char *)&boot_params.efi_info.efi_loader_signature,
69     "EL64", 4)) {
70     - efi_enabled = 1;
71     - efi_64bit = true;
72     + set_bit(EFI_BOOT, &x86_efi_facility);
73     + set_bit(EFI_64BIT, &x86_efi_facility);
74     }
75     - if (efi_enabled && efi_memblock_x86_reserve_range())
76     - efi_enabled = 0;
77     +
78     + if (efi_enabled(EFI_BOOT))
79     + efi_memblock_x86_reserve_range();
80     #endif
81    
82     x86_init.oem.arch_setup();
83     @@ -899,7 +899,7 @@ void __init setup_arch(char **cmdline_p)
84    
85     finish_e820_parsing();
86    
87     - if (efi_enabled)
88     + if (efi_enabled(EFI_BOOT))
89     efi_init();
90    
91     dmi_scan_machine();
92     @@ -982,7 +982,7 @@ void __init setup_arch(char **cmdline_p)
93     * The EFI specification says that boot service code won't be called
94     * after ExitBootServices(). This is, in fact, a lie.
95     */
96     - if (efi_enabled)
97     + if (efi_enabled(EFI_MEMMAP))
98     efi_reserve_boot_services();
99    
100     /* preallocate 4k for mptable mpc */
101     @@ -1119,7 +1119,7 @@ void __init setup_arch(char **cmdline_p)
102    
103     #ifdef CONFIG_VT
104     #if defined(CONFIG_VGA_CONSOLE)
105     - if (!efi_enabled || (efi_mem_type(0xa0000) != EFI_CONVENTIONAL_MEMORY))
106     + if (!efi_enabled(EFI_BOOT) || (efi_mem_type(0xa0000) != EFI_CONVENTIONAL_MEMORY))
107     conswitchp = &vga_con;
108     #elif defined(CONFIG_DUMMY_CONSOLE)
109     conswitchp = &dummy_con;
110     @@ -1136,14 +1136,14 @@ void __init setup_arch(char **cmdline_p)
111     arch_init_ideal_nops();
112    
113     #ifdef CONFIG_EFI
114     - /* Once setup is done above, disable efi_enabled on mismatched
115     - * firmware/kernel archtectures since there is no support for
116     - * runtime services.
117     + /* Once setup is done above, unmap the EFI memory map on
118     + * mismatched firmware/kernel archtectures since there is no
119     + * support for runtime services.
120     */
121     - if (efi_enabled && IS_ENABLED(CONFIG_X86_64) != efi_64bit) {
122     + if (efi_enabled(EFI_BOOT) &&
123     + IS_ENABLED(CONFIG_X86_64) != efi_enabled(EFI_64BIT)) {
124     pr_info("efi: Setup done, disabling due to 32/64-bit mismatch\n");
125     efi_unmap_memmap();
126     - efi_enabled = 0;
127     }
128     #endif
129     }
130     diff --git a/arch/x86/platform/efi/efi.c b/arch/x86/platform/efi/efi.c
131     index 6825327..6fcd4ad 100644
132     --- a/arch/x86/platform/efi/efi.c
133     +++ b/arch/x86/platform/efi/efi.c
134     @@ -50,9 +50,6 @@
135    
136     #define EFI_DEBUG 1
137    
138     -int efi_enabled;
139     -EXPORT_SYMBOL(efi_enabled);
140     -
141     struct efi __read_mostly efi = {
142     .mps = EFI_INVALID_TABLE_ADDR,
143     .acpi = EFI_INVALID_TABLE_ADDR,
144     @@ -68,19 +65,28 @@ EXPORT_SYMBOL(efi);
145    
146     struct efi_memory_map memmap;
147    
148     -bool efi_64bit;
149     -
150     static struct efi efi_phys __initdata;
151     static efi_system_table_t efi_systab __initdata;
152    
153     static inline bool efi_is_native(void)
154     {
155     - return IS_ENABLED(CONFIG_X86_64) == efi_64bit;
156     + return IS_ENABLED(CONFIG_X86_64) == efi_enabled(EFI_64BIT);
157     +}
158     +
159     +unsigned long x86_efi_facility;
160     +
161     +/*
162     + * Returns 1 if 'facility' is enabled, 0 otherwise.
163     + */
164     +int efi_enabled(int facility)
165     +{
166     + return test_bit(facility, &x86_efi_facility) != 0;
167     }
168     +EXPORT_SYMBOL(efi_enabled);
169    
170     static int __init setup_noefi(char *arg)
171     {
172     - efi_enabled = 0;
173     + clear_bit(EFI_BOOT, &x86_efi_facility);
174     return 0;
175     }
176     early_param("noefi", setup_noefi);
177     @@ -425,6 +431,7 @@ void __init efi_reserve_boot_services(void)
178    
179     void __init efi_unmap_memmap(void)
180     {
181     + clear_bit(EFI_MEMMAP, &x86_efi_facility);
182     if (memmap.map) {
183     early_iounmap(memmap.map, memmap.nr_map * memmap.desc_size);
184     memmap.map = NULL;
185     @@ -459,7 +466,7 @@ void __init efi_free_boot_services(void)
186    
187     static int __init efi_systab_init(void *phys)
188     {
189     - if (efi_64bit) {
190     + if (efi_enabled(EFI_64BIT)) {
191     efi_system_table_64_t *systab64;
192     u64 tmp = 0;
193    
194     @@ -551,7 +558,7 @@ static int __init efi_config_init(u64 tables, int nr_tables)
195     void *config_tables, *tablep;
196     int i, sz;
197    
198     - if (efi_64bit)
199     + if (efi_enabled(EFI_64BIT))
200     sz = sizeof(efi_config_table_64_t);
201     else
202     sz = sizeof(efi_config_table_32_t);
203     @@ -571,7 +578,7 @@ static int __init efi_config_init(u64 tables, int nr_tables)
204     efi_guid_t guid;
205     unsigned long table;
206    
207     - if (efi_64bit) {
208     + if (efi_enabled(EFI_64BIT)) {
209     u64 table64;
210     guid = ((efi_config_table_64_t *)tablep)->guid;
211     table64 = ((efi_config_table_64_t *)tablep)->table;
212     @@ -683,7 +690,6 @@ void __init efi_init(void)
213     if (boot_params.efi_info.efi_systab_hi ||
214     boot_params.efi_info.efi_memmap_hi) {
215     pr_info("Table located above 4GB, disabling EFI.\n");
216     - efi_enabled = 0;
217     return;
218     }
219     efi_phys.systab = (efi_system_table_t *)boot_params.efi_info.efi_systab;
220     @@ -693,10 +699,10 @@ void __init efi_init(void)
221     ((__u64)boot_params.efi_info.efi_systab_hi<<32));
222     #endif
223    
224     - if (efi_systab_init(efi_phys.systab)) {
225     - efi_enabled = 0;
226     + if (efi_systab_init(efi_phys.systab))
227     return;
228     - }
229     +
230     + set_bit(EFI_SYSTEM_TABLES, &x86_efi_facility);
231    
232     /*
233     * Show what we know for posterity
234     @@ -714,10 +720,10 @@ void __init efi_init(void)
235     efi.systab->hdr.revision >> 16,
236     efi.systab->hdr.revision & 0xffff, vendor);
237    
238     - if (efi_config_init(efi.systab->tables, efi.systab->nr_tables)) {
239     - efi_enabled = 0;
240     + if (efi_config_init(efi.systab->tables, efi.systab->nr_tables))
241     return;
242     - }
243     +
244     + set_bit(EFI_CONFIG_TABLES, &x86_efi_facility);
245    
246     /*
247     * Note: We currently don't support runtime services on an EFI
248     @@ -726,15 +732,17 @@ void __init efi_init(void)
249    
250     if (!efi_is_native())
251     pr_info("No EFI runtime due to 32/64-bit mismatch with kernel\n");
252     - else if (efi_runtime_init()) {
253     - efi_enabled = 0;
254     - return;
255     + else {
256     + if (efi_runtime_init())
257     + return;
258     + set_bit(EFI_RUNTIME_SERVICES, &x86_efi_facility);
259     }
260    
261     - if (efi_memmap_init()) {
262     - efi_enabled = 0;
263     + if (efi_memmap_init())
264     return;
265     - }
266     +
267     + set_bit(EFI_MEMMAP, &x86_efi_facility);
268     +
269     #ifdef CONFIG_X86_32
270     if (efi_is_native()) {
271     x86_platform.get_wallclock = efi_get_time;
272     @@ -943,6 +951,9 @@ u64 efi_mem_attributes(unsigned long phys_addr)
273     efi_memory_desc_t *md;
274     void *p;
275    
276     + if (!efi_enabled(EFI_MEMMAP))
277     + return 0;
278     +
279     for (p = memmap.map; p < memmap.map_end; p += memmap.desc_size) {
280     md = p;
281     if ((md->phys_addr <= phys_addr) &&
282     diff --git a/drivers/acpi/osl.c b/drivers/acpi/osl.c
283     index c3881b2..f48720c 100644
284     --- a/drivers/acpi/osl.c
285     +++ b/drivers/acpi/osl.c
286     @@ -250,7 +250,7 @@ acpi_physical_address __init acpi_os_get_root_pointer(void)
287     return acpi_rsdp;
288     #endif
289    
290     - if (efi_enabled) {
291     + if (efi_enabled(EFI_CONFIG_TABLES)) {
292     if (efi.acpi20 != EFI_INVALID_TABLE_ADDR)
293     return efi.acpi20;
294     else if (efi.acpi != EFI_INVALID_TABLE_ADDR)
295     diff --git a/drivers/atm/iphase.h b/drivers/atm/iphase.h
296     index 6a0955e..53ecac5 100644
297     --- a/drivers/atm/iphase.h
298     +++ b/drivers/atm/iphase.h
299     @@ -636,82 +636,82 @@ struct rx_buf_desc {
300     #define SEG_BASE IPHASE5575_FRAG_CONTROL_REG_BASE
301     #define REASS_BASE IPHASE5575_REASS_CONTROL_REG_BASE
302    
303     -typedef volatile u_int freg_t;
304     +typedef volatile u_int ffreg_t;
305     typedef u_int rreg_t;
306    
307     typedef struct _ffredn_t {
308     - freg_t idlehead_high; /* Idle cell header (high) */
309     - freg_t idlehead_low; /* Idle cell header (low) */
310     - freg_t maxrate; /* Maximum rate */
311     - freg_t stparms; /* Traffic Management Parameters */
312     - freg_t abrubr_abr; /* ABRUBR Priority Byte 1, TCR Byte 0 */
313     - freg_t rm_type; /* */
314     - u_int filler5[0x17 - 0x06];
315     - freg_t cmd_reg; /* Command register */
316     - u_int filler18[0x20 - 0x18];
317     - freg_t cbr_base; /* CBR Pointer Base */
318     - freg_t vbr_base; /* VBR Pointer Base */
319     - freg_t abr_base; /* ABR Pointer Base */
320     - freg_t ubr_base; /* UBR Pointer Base */
321     - u_int filler24;
322     - freg_t vbrwq_base; /* VBR Wait Queue Base */
323     - freg_t abrwq_base; /* ABR Wait Queue Base */
324     - freg_t ubrwq_base; /* UBR Wait Queue Base */
325     - freg_t vct_base; /* Main VC Table Base */
326     - freg_t vcte_base; /* Extended Main VC Table Base */
327     - u_int filler2a[0x2C - 0x2A];
328     - freg_t cbr_tab_beg; /* CBR Table Begin */
329     - freg_t cbr_tab_end; /* CBR Table End */
330     - freg_t cbr_pointer; /* CBR Pointer */
331     - u_int filler2f[0x30 - 0x2F];
332     - freg_t prq_st_adr; /* Packet Ready Queue Start Address */
333     - freg_t prq_ed_adr; /* Packet Ready Queue End Address */
334     - freg_t prq_rd_ptr; /* Packet Ready Queue read pointer */
335     - freg_t prq_wr_ptr; /* Packet Ready Queue write pointer */
336     - freg_t tcq_st_adr; /* Transmit Complete Queue Start Address*/
337     - freg_t tcq_ed_adr; /* Transmit Complete Queue End Address */
338     - freg_t tcq_rd_ptr; /* Transmit Complete Queue read pointer */
339     - freg_t tcq_wr_ptr; /* Transmit Complete Queue write pointer*/
340     - u_int filler38[0x40 - 0x38];
341     - freg_t queue_base; /* Base address for PRQ and TCQ */
342     - freg_t desc_base; /* Base address of descriptor table */
343     - u_int filler42[0x45 - 0x42];
344     - freg_t mode_reg_0; /* Mode register 0 */
345     - freg_t mode_reg_1; /* Mode register 1 */
346     - freg_t intr_status_reg;/* Interrupt Status register */
347     - freg_t mask_reg; /* Mask Register */
348     - freg_t cell_ctr_high1; /* Total cell transfer count (high) */
349     - freg_t cell_ctr_lo1; /* Total cell transfer count (low) */
350     - freg_t state_reg; /* Status register */
351     - u_int filler4c[0x58 - 0x4c];
352     - freg_t curr_desc_num; /* Contains the current descriptor num */
353     - freg_t next_desc; /* Next descriptor */
354     - freg_t next_vc; /* Next VC */
355     - u_int filler5b[0x5d - 0x5b];
356     - freg_t present_slot_cnt;/* Present slot count */
357     - u_int filler5e[0x6a - 0x5e];
358     - freg_t new_desc_num; /* New descriptor number */
359     - freg_t new_vc; /* New VC */
360     - freg_t sched_tbl_ptr; /* Schedule table pointer */
361     - freg_t vbrwq_wptr; /* VBR wait queue write pointer */
362     - freg_t vbrwq_rptr; /* VBR wait queue read pointer */
363     - freg_t abrwq_wptr; /* ABR wait queue write pointer */
364     - freg_t abrwq_rptr; /* ABR wait queue read pointer */
365     - freg_t ubrwq_wptr; /* UBR wait queue write pointer */
366     - freg_t ubrwq_rptr; /* UBR wait queue read pointer */
367     - freg_t cbr_vc; /* CBR VC */
368     - freg_t vbr_sb_vc; /* VBR SB VC */
369     - freg_t abr_sb_vc; /* ABR SB VC */
370     - freg_t ubr_sb_vc; /* UBR SB VC */
371     - freg_t vbr_next_link; /* VBR next link */
372     - freg_t abr_next_link; /* ABR next link */
373     - freg_t ubr_next_link; /* UBR next link */
374     - u_int filler7a[0x7c-0x7a];
375     - freg_t out_rate_head; /* Out of rate head */
376     - u_int filler7d[0xca-0x7d]; /* pad out to full address space */
377     - freg_t cell_ctr_high1_nc;/* Total cell transfer count (high) */
378     - freg_t cell_ctr_lo1_nc;/* Total cell transfer count (low) */
379     - u_int fillercc[0x100-0xcc]; /* pad out to full address space */
380     + ffreg_t idlehead_high; /* Idle cell header (high) */
381     + ffreg_t idlehead_low; /* Idle cell header (low) */
382     + ffreg_t maxrate; /* Maximum rate */
383     + ffreg_t stparms; /* Traffic Management Parameters */
384     + ffreg_t abrubr_abr; /* ABRUBR Priority Byte 1, TCR Byte 0 */
385     + ffreg_t rm_type; /* */
386     + u_int filler5[0x17 - 0x06];
387     + ffreg_t cmd_reg; /* Command register */
388     + u_int filler18[0x20 - 0x18];
389     + ffreg_t cbr_base; /* CBR Pointer Base */
390     + ffreg_t vbr_base; /* VBR Pointer Base */
391     + ffreg_t abr_base; /* ABR Pointer Base */
392     + ffreg_t ubr_base; /* UBR Pointer Base */
393     + u_int filler24;
394     + ffreg_t vbrwq_base; /* VBR Wait Queue Base */
395     + ffreg_t abrwq_base; /* ABR Wait Queue Base */
396     + ffreg_t ubrwq_base; /* UBR Wait Queue Base */
397     + ffreg_t vct_base; /* Main VC Table Base */
398     + ffreg_t vcte_base; /* Extended Main VC Table Base */
399     + u_int filler2a[0x2C - 0x2A];
400     + ffreg_t cbr_tab_beg; /* CBR Table Begin */
401     + ffreg_t cbr_tab_end; /* CBR Table End */
402     + ffreg_t cbr_pointer; /* CBR Pointer */
403     + u_int filler2f[0x30 - 0x2F];
404     + ffreg_t prq_st_adr; /* Packet Ready Queue Start Address */
405     + ffreg_t prq_ed_adr; /* Packet Ready Queue End Address */
406     + ffreg_t prq_rd_ptr; /* Packet Ready Queue read pointer */
407     + ffreg_t prq_wr_ptr; /* Packet Ready Queue write pointer */
408     + ffreg_t tcq_st_adr; /* Transmit Complete Queue Start Address*/
409     + ffreg_t tcq_ed_adr; /* Transmit Complete Queue End Address */
410     + ffreg_t tcq_rd_ptr; /* Transmit Complete Queue read pointer */
411     + ffreg_t tcq_wr_ptr; /* Transmit Complete Queue write pointer*/
412     + u_int filler38[0x40 - 0x38];
413     + ffreg_t queue_base; /* Base address for PRQ and TCQ */
414     + ffreg_t desc_base; /* Base address of descriptor table */
415     + u_int filler42[0x45 - 0x42];
416     + ffreg_t mode_reg_0; /* Mode register 0 */
417     + ffreg_t mode_reg_1; /* Mode register 1 */
418     + ffreg_t intr_status_reg;/* Interrupt Status register */
419     + ffreg_t mask_reg; /* Mask Register */
420     + ffreg_t cell_ctr_high1; /* Total cell transfer count (high) */
421     + ffreg_t cell_ctr_lo1; /* Total cell transfer count (low) */
422     + ffreg_t state_reg; /* Status register */
423     + u_int filler4c[0x58 - 0x4c];
424     + ffreg_t curr_desc_num; /* Contains the current descriptor num */
425     + ffreg_t next_desc; /* Next descriptor */
426     + ffreg_t next_vc; /* Next VC */
427     + u_int filler5b[0x5d - 0x5b];
428     + ffreg_t present_slot_cnt;/* Present slot count */
429     + u_int filler5e[0x6a - 0x5e];
430     + ffreg_t new_desc_num; /* New descriptor number */
431     + ffreg_t new_vc; /* New VC */
432     + ffreg_t sched_tbl_ptr; /* Schedule table pointer */
433     + ffreg_t vbrwq_wptr; /* VBR wait queue write pointer */
434     + ffreg_t vbrwq_rptr; /* VBR wait queue read pointer */
435     + ffreg_t abrwq_wptr; /* ABR wait queue write pointer */
436     + ffreg_t abrwq_rptr; /* ABR wait queue read pointer */
437     + ffreg_t ubrwq_wptr; /* UBR wait queue write pointer */
438     + ffreg_t ubrwq_rptr; /* UBR wait queue read pointer */
439     + ffreg_t cbr_vc; /* CBR VC */
440     + ffreg_t vbr_sb_vc; /* VBR SB VC */
441     + ffreg_t abr_sb_vc; /* ABR SB VC */
442     + ffreg_t ubr_sb_vc; /* UBR SB VC */
443     + ffreg_t vbr_next_link; /* VBR next link */
444     + ffreg_t abr_next_link; /* ABR next link */
445     + ffreg_t ubr_next_link; /* UBR next link */
446     + u_int filler7a[0x7c-0x7a];
447     + ffreg_t out_rate_head; /* Out of rate head */
448     + u_int filler7d[0xca-0x7d]; /* pad out to full address space */
449     + ffreg_t cell_ctr_high1_nc;/* Total cell transfer count (high) */
450     + ffreg_t cell_ctr_lo1_nc;/* Total cell transfer count (low) */
451     + u_int fillercc[0x100-0xcc]; /* pad out to full address space */
452     } ffredn_t;
453    
454     typedef struct _rfredn_t {
455     diff --git a/drivers/char/virtio_console.c b/drivers/char/virtio_console.c
456     index cdf2f54..f77e341 100644
457     --- a/drivers/char/virtio_console.c
458     +++ b/drivers/char/virtio_console.c
459     @@ -1808,7 +1808,8 @@ static void virtcons_remove(struct virtio_device *vdev)
460     /* Disable interrupts for vqs */
461     vdev->config->reset(vdev);
462     /* Finish up work that's lined up */
463     - cancel_work_sync(&portdev->control_work);
464     + if (use_multiport(portdev))
465     + cancel_work_sync(&portdev->control_work);
466    
467     list_for_each_entry_safe(port, port2, &portdev->ports, list)
468     unplug_port(port);
469     diff --git a/drivers/firmware/dmi_scan.c b/drivers/firmware/dmi_scan.c
470     index fd3ae62..982f1f5 100644
471     --- a/drivers/firmware/dmi_scan.c
472     +++ b/drivers/firmware/dmi_scan.c
473     @@ -471,7 +471,7 @@ void __init dmi_scan_machine(void)
474     char __iomem *p, *q;
475     int rc;
476    
477     - if (efi_enabled) {
478     + if (efi_enabled(EFI_CONFIG_TABLES)) {
479     if (efi.smbios == EFI_INVALID_TABLE_ADDR)
480     goto error;
481    
482     diff --git a/drivers/firmware/efivars.c b/drivers/firmware/efivars.c
483     index d10c987..bfd8f43 100644
484     --- a/drivers/firmware/efivars.c
485     +++ b/drivers/firmware/efivars.c
486     @@ -1224,7 +1224,7 @@ efivars_init(void)
487     printk(KERN_INFO "EFI Variables Facility v%s %s\n", EFIVARS_VERSION,
488     EFIVARS_DATE);
489    
490     - if (!efi_enabled)
491     + if (!efi_enabled(EFI_RUNTIME_SERVICES))
492     return 0;
493    
494     /* For now we'll register the efi directory at /sys/firmware/efi */
495     @@ -1262,7 +1262,7 @@ err_put:
496     static void __exit
497     efivars_exit(void)
498     {
499     - if (efi_enabled) {
500     + if (efi_enabled(EFI_RUNTIME_SERVICES)) {
501     unregister_efivars(&__efivars);
502     kobject_put(efi_kobj);
503     }
504     diff --git a/drivers/firmware/iscsi_ibft_find.c b/drivers/firmware/iscsi_ibft_find.c
505     index 4da4eb9..2224f1d 100644
506     --- a/drivers/firmware/iscsi_ibft_find.c
507     +++ b/drivers/firmware/iscsi_ibft_find.c
508     @@ -99,7 +99,7 @@ unsigned long __init find_ibft_region(unsigned long *sizep)
509     /* iBFT 1.03 section 1.4.3.1 mandates that UEFI machines will
510     * only use ACPI for this */
511    
512     - if (!efi_enabled)
513     + if (!efi_enabled(EFI_BOOT))
514     find_ibft_in_mem();
515    
516     if (ibft_addr) {
517     diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c
518     index de5e0b5..68c89db 100644
519     --- a/drivers/gpu/drm/radeon/radeon_device.c
520     +++ b/drivers/gpu/drm/radeon/radeon_device.c
521     @@ -358,7 +358,8 @@ bool radeon_card_posted(struct radeon_device *rdev)
522     {
523     uint32_t reg;
524    
525     - if (efi_enabled && rdev->pdev->subsystem_vendor == PCI_VENDOR_ID_APPLE)
526     + if (efi_enabled(EFI_BOOT) &&
527     + rdev->pdev->subsystem_vendor == PCI_VENDOR_ID_APPLE)
528     return false;
529    
530     /* first check CRTCs */
531     diff --git a/drivers/isdn/gigaset/capi.c b/drivers/isdn/gigaset/capi.c
532     index 579aa02..be22d5e 100644
533     --- a/drivers/isdn/gigaset/capi.c
534     +++ b/drivers/isdn/gigaset/capi.c
535     @@ -264,6 +264,8 @@ static inline void dump_rawmsg(enum debuglevel level, const char *tag,
536     CAPIMSG_APPID(data), CAPIMSG_MSGID(data), l,
537     CAPIMSG_CONTROL(data));
538     l -= 12;
539     + if (l <= 0)
540     + return;
541     dbgline = kmalloc(3 * l, GFP_ATOMIC);
542     if (!dbgline)
543     return;
544     diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c
545     index 689d2a1..e143d8c 100644
546     --- a/drivers/net/ethernet/broadcom/tg3.c
547     +++ b/drivers/net/ethernet/broadcom/tg3.c
548     @@ -1136,14 +1136,26 @@ static int tg3_phy_auxctl_write(struct tg3 *tp, int reg, u32 set)
549     return tg3_writephy(tp, MII_TG3_AUX_CTRL, set | reg);
550     }
551    
552     -#define TG3_PHY_AUXCTL_SMDSP_ENABLE(tp) \
553     - tg3_phy_auxctl_write((tp), MII_TG3_AUXCTL_SHDWSEL_AUXCTL, \
554     - MII_TG3_AUXCTL_ACTL_SMDSP_ENA | \
555     - MII_TG3_AUXCTL_ACTL_TX_6DB)
556     +static int tg3_phy_toggle_auxctl_smdsp(struct tg3 *tp, bool enable)
557     +{
558     + u32 val;
559     + int err;
560    
561     -#define TG3_PHY_AUXCTL_SMDSP_DISABLE(tp) \
562     - tg3_phy_auxctl_write((tp), MII_TG3_AUXCTL_SHDWSEL_AUXCTL, \
563     - MII_TG3_AUXCTL_ACTL_TX_6DB);
564     + err = tg3_phy_auxctl_read(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL, &val);
565     +
566     + if (err)
567     + return err;
568     + if (enable)
569     +
570     + val |= MII_TG3_AUXCTL_ACTL_SMDSP_ENA;
571     + else
572     + val &= ~MII_TG3_AUXCTL_ACTL_SMDSP_ENA;
573     +
574     + err = tg3_phy_auxctl_write((tp), MII_TG3_AUXCTL_SHDWSEL_AUXCTL,
575     + val | MII_TG3_AUXCTL_ACTL_TX_6DB);
576     +
577     + return err;
578     +}
579    
580     static int tg3_bmcr_reset(struct tg3 *tp)
581     {
582     @@ -2076,7 +2088,7 @@ static void tg3_phy_apply_otp(struct tg3 *tp)
583    
584     otp = tp->phy_otp;
585    
586     - if (TG3_PHY_AUXCTL_SMDSP_ENABLE(tp))
587     + if (tg3_phy_toggle_auxctl_smdsp(tp, true))
588     return;
589    
590     phy = ((otp & TG3_OTP_AGCTGT_MASK) >> TG3_OTP_AGCTGT_SHIFT);
591     @@ -2101,7 +2113,7 @@ static void tg3_phy_apply_otp(struct tg3 *tp)
592     ((otp & TG3_OTP_RCOFF_MASK) >> TG3_OTP_RCOFF_SHIFT);
593     tg3_phydsp_write(tp, MII_TG3_DSP_EXP97, phy);
594    
595     - TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
596     + tg3_phy_toggle_auxctl_smdsp(tp, false);
597     }
598    
599     static void tg3_phy_eee_adjust(struct tg3 *tp, u32 current_link_up)
600     @@ -2137,9 +2149,9 @@ static void tg3_phy_eee_adjust(struct tg3 *tp, u32 current_link_up)
601    
602     if (!tp->setlpicnt) {
603     if (current_link_up == 1 &&
604     - !TG3_PHY_AUXCTL_SMDSP_ENABLE(tp)) {
605     + !tg3_phy_toggle_auxctl_smdsp(tp, true)) {
606     tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, 0x0000);
607     - TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
608     + tg3_phy_toggle_auxctl_smdsp(tp, false);
609     }
610    
611     val = tr32(TG3_CPMU_EEE_MODE);
612     @@ -2155,11 +2167,11 @@ static void tg3_phy_eee_enable(struct tg3 *tp)
613     (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
614     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
615     tg3_flag(tp, 57765_CLASS)) &&
616     - !TG3_PHY_AUXCTL_SMDSP_ENABLE(tp)) {
617     + !tg3_phy_toggle_auxctl_smdsp(tp, true)) {
618     val = MII_TG3_DSP_TAP26_ALNOKO |
619     MII_TG3_DSP_TAP26_RMRXSTO;
620     tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, val);
621     - TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
622     + tg3_phy_toggle_auxctl_smdsp(tp, false);
623     }
624    
625     val = tr32(TG3_CPMU_EEE_MODE);
626     @@ -2303,7 +2315,7 @@ static int tg3_phy_reset_5703_4_5(struct tg3 *tp)
627     tg3_writephy(tp, MII_CTRL1000,
628     CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER);
629    
630     - err = TG3_PHY_AUXCTL_SMDSP_ENABLE(tp);
631     + err = tg3_phy_toggle_auxctl_smdsp(tp, true);
632     if (err)
633     return err;
634    
635     @@ -2324,7 +2336,7 @@ static int tg3_phy_reset_5703_4_5(struct tg3 *tp)
636     tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8200);
637     tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0000);
638    
639     - TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
640     + tg3_phy_toggle_auxctl_smdsp(tp, false);
641    
642     tg3_writephy(tp, MII_CTRL1000, phy9_orig);
643    
644     @@ -2413,10 +2425,10 @@ static int tg3_phy_reset(struct tg3 *tp)
645    
646     out:
647     if ((tp->phy_flags & TG3_PHYFLG_ADC_BUG) &&
648     - !TG3_PHY_AUXCTL_SMDSP_ENABLE(tp)) {
649     + !tg3_phy_toggle_auxctl_smdsp(tp, true)) {
650     tg3_phydsp_write(tp, 0x201f, 0x2aaa);
651     tg3_phydsp_write(tp, 0x000a, 0x0323);
652     - TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
653     + tg3_phy_toggle_auxctl_smdsp(tp, false);
654     }
655    
656     if (tp->phy_flags & TG3_PHYFLG_5704_A0_BUG) {
657     @@ -2425,14 +2437,14 @@ out:
658     }
659    
660     if (tp->phy_flags & TG3_PHYFLG_BER_BUG) {
661     - if (!TG3_PHY_AUXCTL_SMDSP_ENABLE(tp)) {
662     + if (!tg3_phy_toggle_auxctl_smdsp(tp, true)) {
663     tg3_phydsp_write(tp, 0x000a, 0x310b);
664     tg3_phydsp_write(tp, 0x201f, 0x9506);
665     tg3_phydsp_write(tp, 0x401f, 0x14e2);
666     - TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
667     + tg3_phy_toggle_auxctl_smdsp(tp, false);
668     }
669     } else if (tp->phy_flags & TG3_PHYFLG_JITTER_BUG) {
670     - if (!TG3_PHY_AUXCTL_SMDSP_ENABLE(tp)) {
671     + if (!tg3_phy_toggle_auxctl_smdsp(tp, true)) {
672     tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
673     if (tp->phy_flags & TG3_PHYFLG_ADJUST_TRIM) {
674     tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x110b);
675     @@ -2441,7 +2453,7 @@ out:
676     } else
677     tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x010b);
678    
679     - TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
680     + tg3_phy_toggle_auxctl_smdsp(tp, false);
681     }
682     }
683    
684     @@ -3858,7 +3870,7 @@ static int tg3_phy_autoneg_cfg(struct tg3 *tp, u32 advertise, u32 flowctrl)
685     tw32(TG3_CPMU_EEE_MODE,
686     tr32(TG3_CPMU_EEE_MODE) & ~TG3_CPMU_EEEMD_LPI_ENABLE);
687    
688     - err = TG3_PHY_AUXCTL_SMDSP_ENABLE(tp);
689     + err = tg3_phy_toggle_auxctl_smdsp(tp, true);
690     if (!err) {
691     u32 err2;
692    
693     @@ -3891,7 +3903,7 @@ static int tg3_phy_autoneg_cfg(struct tg3 *tp, u32 advertise, u32 flowctrl)
694     MII_TG3_DSP_CH34TP2_HIBW01);
695     }
696    
697     - err2 = TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
698     + err2 = tg3_phy_toggle_auxctl_smdsp(tp, false);
699     if (!err)
700     err = err2;
701     }
702     @@ -6574,6 +6586,9 @@ static void tg3_poll_controller(struct net_device *dev)
703     int i;
704     struct tg3 *tp = netdev_priv(dev);
705    
706     + if (tg3_irq_sync(tp))
707     + return;
708     +
709     for (i = 0; i < tp->irq_cnt; i++)
710     tg3_interrupt(tp->napi[i].irq_vec, &tp->napi[i]);
711     }
712     @@ -15529,6 +15544,7 @@ static int __devinit tg3_init_one(struct pci_dev *pdev,
713     tp->pm_cap = pm_cap;
714     tp->rx_mode = TG3_DEF_RX_MODE;
715     tp->tx_mode = TG3_DEF_TX_MODE;
716     + tp->irq_sync = 1;
717    
718     if (tg3_debug > 0)
719     tp->msg_enable = tg3_debug;
720     diff --git a/drivers/net/ethernet/calxeda/xgmac.c b/drivers/net/ethernet/calxeda/xgmac.c
721     index 11f667f..4ebbe6f 100644
722     --- a/drivers/net/ethernet/calxeda/xgmac.c
723     +++ b/drivers/net/ethernet/calxeda/xgmac.c
724     @@ -547,6 +547,10 @@ static int desc_get_rx_status(struct xgmac_priv *priv, struct xgmac_dma_desc *p)
725     return -1;
726     }
727    
728     + /* All frames should fit into a single buffer */
729     + if (!(status & RXDESC_FIRST_SEG) || !(status & RXDESC_LAST_SEG))
730     + return -1;
731     +
732     /* Check if packet has checksum already */
733     if ((status & RXDESC_FRAME_TYPE) && (status & RXDESC_EXT_STATUS) &&
734     !(ext_status & RXDESC_IP_PAYLOAD_MASK))
735     diff --git a/drivers/net/ethernet/emulex/benet/be.h b/drivers/net/ethernet/emulex/benet/be.h
736     index 9576ac0..dcb02c2 100644
737     --- a/drivers/net/ethernet/emulex/benet/be.h
738     +++ b/drivers/net/ethernet/emulex/benet/be.h
739     @@ -536,6 +536,11 @@ static inline void be_check_sriov_fn_type(struct be_adapter *adapter)
740     adapter->is_virtfn = (sli_intf & SLI_INTF_FT_MASK) ? 1 : 0;
741     }
742    
743     +static inline bool is_ipv4_pkt(struct sk_buff *skb)
744     +{
745     + return skb->protocol == ntohs(ETH_P_IP) && ip_hdr(skb)->version == 4;
746     +}
747     +
748     static inline void be_vf_eth_addr_generate(struct be_adapter *adapter, u8 *mac)
749     {
750     u32 addr;
751     diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c
752     index 1bbf6b3..ef1f940 100644
753     --- a/drivers/net/ethernet/emulex/benet/be_main.c
754     +++ b/drivers/net/ethernet/emulex/benet/be_main.c
755     @@ -571,6 +571,11 @@ static inline u16 be_get_tx_vlan_tag(struct be_adapter *adapter,
756     return vlan_tag;
757     }
758    
759     +static int be_vlan_tag_chk(struct be_adapter *adapter, struct sk_buff *skb)
760     +{
761     + return vlan_tx_tag_present(skb) || adapter->pvid;
762     +}
763     +
764     static void wrb_fill_hdr(struct be_adapter *adapter, struct be_eth_hdr_wrb *hdr,
765     struct sk_buff *skb, u32 wrb_cnt, u32 len)
766     {
767     @@ -698,33 +703,56 @@ dma_err:
768     return 0;
769     }
770    
771     +static struct sk_buff *be_insert_vlan_in_pkt(struct be_adapter *adapter,
772     + struct sk_buff *skb)
773     +{
774     + u16 vlan_tag = 0;
775     +
776     + skb = skb_share_check(skb, GFP_ATOMIC);
777     + if (unlikely(!skb))
778     + return skb;
779     +
780     + if (vlan_tx_tag_present(skb)) {
781     + vlan_tag = be_get_tx_vlan_tag(adapter, skb);
782     + __vlan_put_tag(skb, vlan_tag);
783     + skb->vlan_tci = 0;
784     + }
785     +
786     + return skb;
787     +}
788     +
789     static netdev_tx_t be_xmit(struct sk_buff *skb,
790     struct net_device *netdev)
791     {
792     struct be_adapter *adapter = netdev_priv(netdev);
793     struct be_tx_obj *txo = &adapter->tx_obj[skb_get_queue_mapping(skb)];
794     struct be_queue_info *txq = &txo->q;
795     + struct iphdr *ip = NULL;
796     u32 wrb_cnt = 0, copied = 0;
797     - u32 start = txq->head;
798     + u32 start = txq->head, eth_hdr_len;
799     bool dummy_wrb, stopped = false;
800    
801     - /* For vlan tagged pkts, BE
802     - * 1) calculates checksum even when CSO is not requested
803     - * 2) calculates checksum wrongly for padded pkt less than
804     - * 60 bytes long.
805     - * As a workaround disable TX vlan offloading in such cases.
806     + eth_hdr_len = ntohs(skb->protocol) == ETH_P_8021Q ?
807     + VLAN_ETH_HLEN : ETH_HLEN;
808     +
809     + /* HW has a bug which considers padding bytes as legal
810     + * and modifies the IPv4 hdr's 'tot_len' field
811     */
812     - if (unlikely(vlan_tx_tag_present(skb) &&
813     - (skb->ip_summed != CHECKSUM_PARTIAL || skb->len <= 60))) {
814     - skb = skb_share_check(skb, GFP_ATOMIC);
815     - if (unlikely(!skb))
816     - goto tx_drop;
817     + if (skb->len <= 60 && be_vlan_tag_chk(adapter, skb) &&
818     + is_ipv4_pkt(skb)) {
819     + ip = (struct iphdr *)ip_hdr(skb);
820     + pskb_trim(skb, eth_hdr_len + ntohs(ip->tot_len));
821     + }
822    
823     - skb = __vlan_put_tag(skb, be_get_tx_vlan_tag(adapter, skb));
824     + /* HW has a bug wherein it will calculate CSUM for VLAN
825     + * pkts even though it is disabled.
826     + * Manually insert VLAN in pkt.
827     + */
828     + if (skb->ip_summed != CHECKSUM_PARTIAL &&
829     + be_vlan_tag_chk(adapter, skb)) {
830     + skb = be_insert_vlan_in_pkt(adapter, skb);
831     if (unlikely(!skb))
832     goto tx_drop;
833     -
834     - skb->vlan_tci = 0;
835     }
836    
837     wrb_cnt = wrb_cnt_for_skb(adapter, skb, &dummy_wrb);
838     diff --git a/drivers/net/ethernet/mellanox/mlx4/en_tx.c b/drivers/net/ethernet/mellanox/mlx4/en_tx.c
839     index 1796824..efa3a13 100644
840     --- a/drivers/net/ethernet/mellanox/mlx4/en_tx.c
841     +++ b/drivers/net/ethernet/mellanox/mlx4/en_tx.c
842     @@ -683,10 +683,15 @@ netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev)
843     ring->tx_csum++;
844     }
845    
846     - /* Copy dst mac address to wqe */
847     - ethh = (struct ethhdr *)skb->data;
848     - tx_desc->ctrl.srcrb_flags16[0] = get_unaligned((__be16 *)ethh->h_dest);
849     - tx_desc->ctrl.imm = get_unaligned((__be32 *)(ethh->h_dest + 2));
850     + if (mlx4_is_mfunc(mdev->dev) || priv->validate_loopback) {
851     + /* Copy dst mac address to wqe. This allows loopback in eSwitch,
852     + * so that VFs and PF can communicate with each other
853     + */
854     + ethh = (struct ethhdr *)skb->data;
855     + tx_desc->ctrl.srcrb_flags16[0] = get_unaligned((__be16 *)ethh->h_dest);
856     + tx_desc->ctrl.imm = get_unaligned((__be32 *)(ethh->h_dest + 2));
857     + }
858     +
859     /* Handle LSO (TSO) packets */
860     if (lso_header_size) {
861     /* Mark opcode as LSO */
862     diff --git a/drivers/net/ethernet/mellanox/mlx4/main.c b/drivers/net/ethernet/mellanox/mlx4/main.c
863     index 8bb05b4..1995cb0 100644
864     --- a/drivers/net/ethernet/mellanox/mlx4/main.c
865     +++ b/drivers/net/ethernet/mellanox/mlx4/main.c
866     @@ -1526,15 +1526,8 @@ static void mlx4_enable_msi_x(struct mlx4_dev *dev)
867     int i;
868    
869     if (msi_x) {
870     - /* In multifunction mode each function gets 2 msi-X vectors
871     - * one for data path completions anf the other for asynch events
872     - * or command completions */
873     - if (mlx4_is_mfunc(dev)) {
874     - nreq = 2;
875     - } else {
876     - nreq = min_t(int, dev->caps.num_eqs -
877     - dev->caps.reserved_eqs, nreq);
878     - }
879     + nreq = min_t(int, dev->caps.num_eqs - dev->caps.reserved_eqs,
880     + nreq);
881    
882     entries = kcalloc(nreq, sizeof *entries, GFP_KERNEL);
883     if (!entries)
884     diff --git a/drivers/net/ethernet/qlogic/netxen/netxen_nic_init.c b/drivers/net/ethernet/qlogic/netxen/netxen_nic_init.c
885     index 718b274..83538cc 100644
886     --- a/drivers/net/ethernet/qlogic/netxen/netxen_nic_init.c
887     +++ b/drivers/net/ethernet/qlogic/netxen/netxen_nic_init.c
888     @@ -144,7 +144,7 @@ void netxen_release_tx_buffers(struct netxen_adapter *adapter)
889     buffrag->length, PCI_DMA_TODEVICE);
890     buffrag->dma = 0ULL;
891     }
892     - for (j = 0; j < cmd_buf->frag_count; j++) {
893     + for (j = 1; j < cmd_buf->frag_count; j++) {
894     buffrag++;
895     if (buffrag->dma) {
896     pci_unmap_page(adapter->pdev, buffrag->dma,
897     diff --git a/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c b/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c
898     index 22b399a..7ee9c74 100644
899     --- a/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c
900     +++ b/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c
901     @@ -1956,10 +1956,12 @@ unwind:
902     while (--i >= 0) {
903     nf = &pbuf->frag_array[i+1];
904     pci_unmap_page(pdev, nf->dma, nf->length, PCI_DMA_TODEVICE);
905     + nf->dma = 0ULL;
906     }
907    
908     nf = &pbuf->frag_array[0];
909     pci_unmap_single(pdev, nf->dma, skb_headlen(skb), PCI_DMA_TODEVICE);
910     + nf->dma = 0ULL;
911    
912     out_err:
913     return -ENOMEM;
914     diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c
915     index 06ee243..df49ce2 100644
916     --- a/drivers/net/ethernet/realtek/r8169.c
917     +++ b/drivers/net/ethernet/realtek/r8169.c
918     @@ -5450,13 +5450,6 @@ process_pkt:
919     tp->rx_stats.bytes += pkt_size;
920     u64_stats_update_end(&tp->rx_stats.syncp);
921     }
922     -
923     - /* Work around for AMD plateform. */
924     - if ((desc->opts2 & cpu_to_le32(0xfffe000)) &&
925     - (tp->mac_version == RTL_GIGA_MAC_VER_05)) {
926     - desc->opts2 = 0;
927     - cur_rx++;
928     - }
929     }
930    
931     count = cur_rx - tp->cur_rx;
932     diff --git a/drivers/net/ethernet/via/via-rhine.c b/drivers/net/ethernet/via/via-rhine.c
933     index fcfa01f..4c76db4 100644
934     --- a/drivers/net/ethernet/via/via-rhine.c
935     +++ b/drivers/net/ethernet/via/via-rhine.c
936     @@ -1802,7 +1802,7 @@ static void rhine_tx(struct net_device *dev)
937     rp->tx_skbuff[entry]->len,
938     PCI_DMA_TODEVICE);
939     }
940     - dev_kfree_skb_irq(rp->tx_skbuff[entry]);
941     + dev_kfree_skb(rp->tx_skbuff[entry]);
942     rp->tx_skbuff[entry] = NULL;
943     entry = (++rp->dirty_tx) % TX_RING_SIZE;
944     }
945     @@ -2011,11 +2011,7 @@ static void rhine_slow_event_task(struct work_struct *work)
946     if (intr_status & IntrPCIErr)
947     netif_warn(rp, hw, dev, "PCI error\n");
948    
949     - napi_disable(&rp->napi);
950     - rhine_irq_disable(rp);
951     - /* Slow and safe. Consider __napi_schedule as a replacement ? */
952     - napi_enable(&rp->napi);
953     - napi_schedule(&rp->napi);
954     + iowrite16(RHINE_EVENT & 0xffff, rp->base + IntrEnable);
955    
956     out_unlock:
957     mutex_unlock(&rp->task_lock);
958     diff --git a/drivers/net/loopback.c b/drivers/net/loopback.c
959     index 32eb94e..a3d4707 100644
960     --- a/drivers/net/loopback.c
961     +++ b/drivers/net/loopback.c
962     @@ -77,6 +77,11 @@ static netdev_tx_t loopback_xmit(struct sk_buff *skb,
963    
964     skb_orphan(skb);
965    
966     + /* Before queueing this packet to netif_rx(),
967     + * make sure dst is refcounted.
968     + */
969     + skb_dst_force(skb);
970     +
971     skb->protocol = eth_type_trans(skb, dev);
972    
973     /* it's OK to use per_cpu_ptr() because BHs are off */
974     diff --git a/drivers/net/wireless/rtlwifi/base.c b/drivers/net/wireless/rtlwifi/base.c
975     index e54488d..18d9eb3a 100644
976     --- a/drivers/net/wireless/rtlwifi/base.c
977     +++ b/drivers/net/wireless/rtlwifi/base.c
978     @@ -980,7 +980,8 @@ u8 rtl_is_special_data(struct ieee80211_hw *hw, struct sk_buff *skb, u8 is_tx)
979     is_tx ? "Tx" : "Rx");
980    
981     if (is_tx) {
982     - rtl_lps_leave(hw);
983     + schedule_work(&rtlpriv->
984     + works.lps_leave_work);
985     ppsc->last_delaylps_stamp_jiffies =
986     jiffies;
987     }
988     @@ -990,7 +991,7 @@ u8 rtl_is_special_data(struct ieee80211_hw *hw, struct sk_buff *skb, u8 is_tx)
989     }
990     } else if (ETH_P_ARP == ether_type) {
991     if (is_tx) {
992     - rtl_lps_leave(hw);
993     + schedule_work(&rtlpriv->works.lps_leave_work);
994     ppsc->last_delaylps_stamp_jiffies = jiffies;
995     }
996    
997     @@ -1000,7 +1001,7 @@ u8 rtl_is_special_data(struct ieee80211_hw *hw, struct sk_buff *skb, u8 is_tx)
998     "802.1X %s EAPOL pkt!!\n", is_tx ? "Tx" : "Rx");
999    
1000     if (is_tx) {
1001     - rtl_lps_leave(hw);
1002     + schedule_work(&rtlpriv->works.lps_leave_work);
1003     ppsc->last_delaylps_stamp_jiffies = jiffies;
1004     }
1005    
1006     diff --git a/drivers/net/wireless/rtlwifi/usb.c b/drivers/net/wireless/rtlwifi/usb.c
1007     index 8fa144f..17cd028 100644
1008     --- a/drivers/net/wireless/rtlwifi/usb.c
1009     +++ b/drivers/net/wireless/rtlwifi/usb.c
1010     @@ -542,8 +542,8 @@ static void _rtl_rx_pre_process(struct ieee80211_hw *hw, struct sk_buff *skb)
1011     WARN_ON(skb_queue_empty(&rx_queue));
1012     while (!skb_queue_empty(&rx_queue)) {
1013     _skb = skb_dequeue(&rx_queue);
1014     - _rtl_usb_rx_process_agg(hw, skb);
1015     - ieee80211_rx_irqsafe(hw, skb);
1016     + _rtl_usb_rx_process_agg(hw, _skb);
1017     + ieee80211_rx_irqsafe(hw, _skb);
1018     }
1019     }
1020    
1021     diff --git a/drivers/net/xen-netback/common.h b/drivers/net/xen-netback/common.h
1022     index 94b79c3..9d7f172 100644
1023     --- a/drivers/net/xen-netback/common.h
1024     +++ b/drivers/net/xen-netback/common.h
1025     @@ -151,6 +151,9 @@ void xen_netbk_queue_tx_skb(struct xenvif *vif, struct sk_buff *skb);
1026     /* Notify xenvif that ring now has space to send an skb to the frontend */
1027     void xenvif_notify_tx_completion(struct xenvif *vif);
1028    
1029     +/* Prevent the device from generating any further traffic. */
1030     +void xenvif_carrier_off(struct xenvif *vif);
1031     +
1032     /* Returns number of ring slots required to send an skb to the frontend */
1033     unsigned int xen_netbk_count_skb_slots(struct xenvif *vif, struct sk_buff *skb);
1034    
1035     diff --git a/drivers/net/xen-netback/interface.c b/drivers/net/xen-netback/interface.c
1036     index b7d41f8..b8c5193 100644
1037     --- a/drivers/net/xen-netback/interface.c
1038     +++ b/drivers/net/xen-netback/interface.c
1039     @@ -343,17 +343,22 @@ err:
1040     return err;
1041     }
1042    
1043     -void xenvif_disconnect(struct xenvif *vif)
1044     +void xenvif_carrier_off(struct xenvif *vif)
1045     {
1046     struct net_device *dev = vif->dev;
1047     - if (netif_carrier_ok(dev)) {
1048     - rtnl_lock();
1049     - netif_carrier_off(dev); /* discard queued packets */
1050     - if (netif_running(dev))
1051     - xenvif_down(vif);
1052     - rtnl_unlock();
1053     - xenvif_put(vif);
1054     - }
1055     +
1056     + rtnl_lock();
1057     + netif_carrier_off(dev); /* discard queued packets */
1058     + if (netif_running(dev))
1059     + xenvif_down(vif);
1060     + rtnl_unlock();
1061     + xenvif_put(vif);
1062     +}
1063     +
1064     +void xenvif_disconnect(struct xenvif *vif)
1065     +{
1066     + if (netif_carrier_ok(vif->dev))
1067     + xenvif_carrier_off(vif);
1068    
1069     atomic_dec(&vif->refcnt);
1070     wait_event(vif->waiting_to_free, atomic_read(&vif->refcnt) == 0);
1071     diff --git a/drivers/net/xen-netback/netback.c b/drivers/net/xen-netback/netback.c
1072     index 2596401..e2793d0 100644
1073     --- a/drivers/net/xen-netback/netback.c
1074     +++ b/drivers/net/xen-netback/netback.c
1075     @@ -146,7 +146,8 @@ void xen_netbk_remove_xenvif(struct xenvif *vif)
1076     atomic_dec(&netbk->netfront_count);
1077     }
1078    
1079     -static void xen_netbk_idx_release(struct xen_netbk *netbk, u16 pending_idx);
1080     +static void xen_netbk_idx_release(struct xen_netbk *netbk, u16 pending_idx,
1081     + u8 status);
1082     static void make_tx_response(struct xenvif *vif,
1083     struct xen_netif_tx_request *txp,
1084     s8 st);
1085     @@ -851,7 +852,7 @@ static void netbk_tx_err(struct xenvif *vif,
1086    
1087     do {
1088     make_tx_response(vif, txp, XEN_NETIF_RSP_ERROR);
1089     - if (cons >= end)
1090     + if (cons == end)
1091     break;
1092     txp = RING_GET_REQUEST(&vif->tx, cons++);
1093     } while (1);
1094     @@ -860,6 +861,13 @@ static void netbk_tx_err(struct xenvif *vif,
1095     xenvif_put(vif);
1096     }
1097    
1098     +static void netbk_fatal_tx_err(struct xenvif *vif)
1099     +{
1100     + netdev_err(vif->dev, "fatal error; disabling device\n");
1101     + xenvif_carrier_off(vif);
1102     + xenvif_put(vif);
1103     +}
1104     +
1105     static int netbk_count_requests(struct xenvif *vif,
1106     struct xen_netif_tx_request *first,
1107     struct xen_netif_tx_request *txp,
1108     @@ -873,19 +881,22 @@ static int netbk_count_requests(struct xenvif *vif,
1109    
1110     do {
1111     if (frags >= work_to_do) {
1112     - netdev_dbg(vif->dev, "Need more frags\n");
1113     + netdev_err(vif->dev, "Need more frags\n");
1114     + netbk_fatal_tx_err(vif);
1115     return -frags;
1116     }
1117    
1118     if (unlikely(frags >= MAX_SKB_FRAGS)) {
1119     - netdev_dbg(vif->dev, "Too many frags\n");
1120     + netdev_err(vif->dev, "Too many frags\n");
1121     + netbk_fatal_tx_err(vif);
1122     return -frags;
1123     }
1124    
1125     memcpy(txp, RING_GET_REQUEST(&vif->tx, cons + frags),
1126     sizeof(*txp));
1127     if (txp->size > first->size) {
1128     - netdev_dbg(vif->dev, "Frags galore\n");
1129     + netdev_err(vif->dev, "Frag is bigger than frame.\n");
1130     + netbk_fatal_tx_err(vif);
1131     return -frags;
1132     }
1133    
1134     @@ -893,8 +904,9 @@ static int netbk_count_requests(struct xenvif *vif,
1135     frags++;
1136    
1137     if (unlikely((txp->offset + txp->size) > PAGE_SIZE)) {
1138     - netdev_dbg(vif->dev, "txp->offset: %x, size: %u\n",
1139     + netdev_err(vif->dev, "txp->offset: %x, size: %u\n",
1140     txp->offset, txp->size);
1141     + netbk_fatal_tx_err(vif);
1142     return -frags;
1143     }
1144     } while ((txp++)->flags & XEN_NETTXF_more_data);
1145     @@ -938,7 +950,7 @@ static struct gnttab_copy *xen_netbk_get_requests(struct xen_netbk *netbk,
1146     pending_idx = netbk->pending_ring[index];
1147     page = xen_netbk_alloc_page(netbk, skb, pending_idx);
1148     if (!page)
1149     - return NULL;
1150     + goto err;
1151    
1152     gop->source.u.ref = txp->gref;
1153     gop->source.domid = vif->domid;
1154     @@ -960,6 +972,17 @@ static struct gnttab_copy *xen_netbk_get_requests(struct xen_netbk *netbk,
1155     }
1156    
1157     return gop;
1158     +err:
1159     + /* Unwind, freeing all pages and sending error responses. */
1160     + while (i-- > start) {
1161     + xen_netbk_idx_release(netbk, frag_get_pending_idx(&frags[i]),
1162     + XEN_NETIF_RSP_ERROR);
1163     + }
1164     + /* The head too, if necessary. */
1165     + if (start)
1166     + xen_netbk_idx_release(netbk, pending_idx, XEN_NETIF_RSP_ERROR);
1167     +
1168     + return NULL;
1169     }
1170    
1171     static int xen_netbk_tx_check_gop(struct xen_netbk *netbk,
1172     @@ -968,30 +991,20 @@ static int xen_netbk_tx_check_gop(struct xen_netbk *netbk,
1173     {
1174     struct gnttab_copy *gop = *gopp;
1175     u16 pending_idx = *((u16 *)skb->data);
1176     - struct pending_tx_info *pending_tx_info = netbk->pending_tx_info;
1177     - struct xenvif *vif = pending_tx_info[pending_idx].vif;
1178     - struct xen_netif_tx_request *txp;
1179     struct skb_shared_info *shinfo = skb_shinfo(skb);
1180     int nr_frags = shinfo->nr_frags;
1181     int i, err, start;
1182    
1183     /* Check status of header. */
1184     err = gop->status;
1185     - if (unlikely(err)) {
1186     - pending_ring_idx_t index;
1187     - index = pending_index(netbk->pending_prod++);
1188     - txp = &pending_tx_info[pending_idx].req;
1189     - make_tx_response(vif, txp, XEN_NETIF_RSP_ERROR);
1190     - netbk->pending_ring[index] = pending_idx;
1191     - xenvif_put(vif);
1192     - }
1193     + if (unlikely(err))
1194     + xen_netbk_idx_release(netbk, pending_idx, XEN_NETIF_RSP_ERROR);
1195    
1196     /* Skip first skb fragment if it is on same page as header fragment. */
1197     start = (frag_get_pending_idx(&shinfo->frags[0]) == pending_idx);
1198    
1199     for (i = start; i < nr_frags; i++) {
1200     int j, newerr;
1201     - pending_ring_idx_t index;
1202    
1203     pending_idx = frag_get_pending_idx(&shinfo->frags[i]);
1204    
1205     @@ -1000,16 +1013,12 @@ static int xen_netbk_tx_check_gop(struct xen_netbk *netbk,
1206     if (likely(!newerr)) {
1207     /* Had a previous error? Invalidate this fragment. */
1208     if (unlikely(err))
1209     - xen_netbk_idx_release(netbk, pending_idx);
1210     + xen_netbk_idx_release(netbk, pending_idx, XEN_NETIF_RSP_OKAY);
1211     continue;
1212     }
1213    
1214     /* Error on this fragment: respond to client with an error. */
1215     - txp = &netbk->pending_tx_info[pending_idx].req;
1216     - make_tx_response(vif, txp, XEN_NETIF_RSP_ERROR);
1217     - index = pending_index(netbk->pending_prod++);
1218     - netbk->pending_ring[index] = pending_idx;
1219     - xenvif_put(vif);
1220     + xen_netbk_idx_release(netbk, pending_idx, XEN_NETIF_RSP_ERROR);
1221    
1222     /* Not the first error? Preceding frags already invalidated. */
1223     if (err)
1224     @@ -1017,10 +1026,10 @@ static int xen_netbk_tx_check_gop(struct xen_netbk *netbk,
1225    
1226     /* First error: invalidate header and preceding fragments. */
1227     pending_idx = *((u16 *)skb->data);
1228     - xen_netbk_idx_release(netbk, pending_idx);
1229     + xen_netbk_idx_release(netbk, pending_idx, XEN_NETIF_RSP_OKAY);
1230     for (j = start; j < i; j++) {
1231     pending_idx = frag_get_pending_idx(&shinfo->frags[j]);
1232     - xen_netbk_idx_release(netbk, pending_idx);
1233     + xen_netbk_idx_release(netbk, pending_idx, XEN_NETIF_RSP_OKAY);
1234     }
1235    
1236     /* Remember the error: invalidate all subsequent fragments. */
1237     @@ -1054,7 +1063,7 @@ static void xen_netbk_fill_frags(struct xen_netbk *netbk, struct sk_buff *skb)
1238    
1239     /* Take an extra reference to offset xen_netbk_idx_release */
1240     get_page(netbk->mmap_pages[pending_idx]);
1241     - xen_netbk_idx_release(netbk, pending_idx);
1242     + xen_netbk_idx_release(netbk, pending_idx, XEN_NETIF_RSP_OKAY);
1243     }
1244     }
1245    
1246     @@ -1067,7 +1076,8 @@ static int xen_netbk_get_extras(struct xenvif *vif,
1247    
1248     do {
1249     if (unlikely(work_to_do-- <= 0)) {
1250     - netdev_dbg(vif->dev, "Missing extra info\n");
1251     + netdev_err(vif->dev, "Missing extra info\n");
1252     + netbk_fatal_tx_err(vif);
1253     return -EBADR;
1254     }
1255    
1256     @@ -1076,8 +1086,9 @@ static int xen_netbk_get_extras(struct xenvif *vif,
1257     if (unlikely(!extra.type ||
1258     extra.type >= XEN_NETIF_EXTRA_TYPE_MAX)) {
1259     vif->tx.req_cons = ++cons;
1260     - netdev_dbg(vif->dev,
1261     + netdev_err(vif->dev,
1262     "Invalid extra type: %d\n", extra.type);
1263     + netbk_fatal_tx_err(vif);
1264     return -EINVAL;
1265     }
1266    
1267     @@ -1093,13 +1104,15 @@ static int netbk_set_skb_gso(struct xenvif *vif,
1268     struct xen_netif_extra_info *gso)
1269     {
1270     if (!gso->u.gso.size) {
1271     - netdev_dbg(vif->dev, "GSO size must not be zero.\n");
1272     + netdev_err(vif->dev, "GSO size must not be zero.\n");
1273     + netbk_fatal_tx_err(vif);
1274     return -EINVAL;
1275     }
1276    
1277     /* Currently only TCPv4 S.O. is supported. */
1278     if (gso->u.gso.type != XEN_NETIF_GSO_TYPE_TCPV4) {
1279     - netdev_dbg(vif->dev, "Bad GSO type %d.\n", gso->u.gso.type);
1280     + netdev_err(vif->dev, "Bad GSO type %d.\n", gso->u.gso.type);
1281     + netbk_fatal_tx_err(vif);
1282     return -EINVAL;
1283     }
1284    
1285     @@ -1236,9 +1249,25 @@ static unsigned xen_netbk_tx_build_gops(struct xen_netbk *netbk)
1286    
1287     /* Get a netif from the list with work to do. */
1288     vif = poll_net_schedule_list(netbk);
1289     + /* This can sometimes happen because the test of
1290     + * list_empty(net_schedule_list) at the top of the
1291     + * loop is unlocked. Just go back and have another
1292     + * look.
1293     + */
1294     if (!vif)
1295     continue;
1296    
1297     + if (vif->tx.sring->req_prod - vif->tx.req_cons >
1298     + XEN_NETIF_TX_RING_SIZE) {
1299     + netdev_err(vif->dev,
1300     + "Impossible number of requests. "
1301     + "req_prod %d, req_cons %d, size %ld\n",
1302     + vif->tx.sring->req_prod, vif->tx.req_cons,
1303     + XEN_NETIF_TX_RING_SIZE);
1304     + netbk_fatal_tx_err(vif);
1305     + continue;
1306     + }
1307     +
1308     RING_FINAL_CHECK_FOR_REQUESTS(&vif->tx, work_to_do);
1309     if (!work_to_do) {
1310     xenvif_put(vif);
1311     @@ -1266,17 +1295,14 @@ static unsigned xen_netbk_tx_build_gops(struct xen_netbk *netbk)
1312     work_to_do = xen_netbk_get_extras(vif, extras,
1313     work_to_do);
1314     idx = vif->tx.req_cons;
1315     - if (unlikely(work_to_do < 0)) {
1316     - netbk_tx_err(vif, &txreq, idx);
1317     + if (unlikely(work_to_do < 0))
1318     continue;
1319     - }
1320     }
1321    
1322     ret = netbk_count_requests(vif, &txreq, txfrags, work_to_do);
1323     - if (unlikely(ret < 0)) {
1324     - netbk_tx_err(vif, &txreq, idx - ret);
1325     + if (unlikely(ret < 0))
1326     continue;
1327     - }
1328     +
1329     idx += ret;
1330    
1331     if (unlikely(txreq.size < ETH_HLEN)) {
1332     @@ -1288,11 +1314,11 @@ static unsigned xen_netbk_tx_build_gops(struct xen_netbk *netbk)
1333    
1334     /* No crossing a page as the payload mustn't fragment. */
1335     if (unlikely((txreq.offset + txreq.size) > PAGE_SIZE)) {
1336     - netdev_dbg(vif->dev,
1337     + netdev_err(vif->dev,
1338     "txreq.offset: %x, size: %u, end: %lu\n",
1339     txreq.offset, txreq.size,
1340     (txreq.offset&~PAGE_MASK) + txreq.size);
1341     - netbk_tx_err(vif, &txreq, idx);
1342     + netbk_fatal_tx_err(vif);
1343     continue;
1344     }
1345    
1346     @@ -1320,8 +1346,8 @@ static unsigned xen_netbk_tx_build_gops(struct xen_netbk *netbk)
1347     gso = &extras[XEN_NETIF_EXTRA_TYPE_GSO - 1];
1348    
1349     if (netbk_set_skb_gso(vif, skb, gso)) {
1350     + /* Failure in netbk_set_skb_gso is fatal. */
1351     kfree_skb(skb);
1352     - netbk_tx_err(vif, &txreq, idx);
1353     continue;
1354     }
1355     }
1356     @@ -1420,7 +1446,7 @@ static void xen_netbk_tx_submit(struct xen_netbk *netbk)
1357     txp->size -= data_len;
1358     } else {
1359     /* Schedule a response immediately. */
1360     - xen_netbk_idx_release(netbk, pending_idx);
1361     + xen_netbk_idx_release(netbk, pending_idx, XEN_NETIF_RSP_OKAY);
1362     }
1363    
1364     if (txp->flags & XEN_NETTXF_csum_blank)
1365     @@ -1475,7 +1501,8 @@ static void xen_netbk_tx_action(struct xen_netbk *netbk)
1366    
1367     }
1368    
1369     -static void xen_netbk_idx_release(struct xen_netbk *netbk, u16 pending_idx)
1370     +static void xen_netbk_idx_release(struct xen_netbk *netbk, u16 pending_idx,
1371     + u8 status)
1372     {
1373     struct xenvif *vif;
1374     struct pending_tx_info *pending_tx_info;
1375     @@ -1489,7 +1516,7 @@ static void xen_netbk_idx_release(struct xen_netbk *netbk, u16 pending_idx)
1376    
1377     vif = pending_tx_info->vif;
1378    
1379     - make_tx_response(vif, &pending_tx_info->req, XEN_NETIF_RSP_OKAY);
1380     + make_tx_response(vif, &pending_tx_info->req, status);
1381    
1382     index = pending_index(netbk->pending_prod++);
1383     netbk->pending_ring[index] = pending_idx;
1384     diff --git a/drivers/platform/x86/ibm_rtl.c b/drivers/platform/x86/ibm_rtl.c
1385     index 7481146..97c2be1 100644
1386     --- a/drivers/platform/x86/ibm_rtl.c
1387     +++ b/drivers/platform/x86/ibm_rtl.c
1388     @@ -244,7 +244,7 @@ static int __init ibm_rtl_init(void) {
1389     if (force)
1390     pr_warn("module loaded by force\n");
1391     /* first ensure that we are running on IBM HW */
1392     - else if (efi_enabled || !dmi_check_system(ibm_rtl_dmi_table))
1393     + else if (efi_enabled(EFI_BOOT) || !dmi_check_system(ibm_rtl_dmi_table))
1394     return -ENODEV;
1395    
1396     /* Get the address for the Extended BIOS Data Area */
1397     diff --git a/drivers/platform/x86/samsung-laptop.c b/drivers/platform/x86/samsung-laptop.c
1398     index 1afbe5e..de9f432 100644
1399     --- a/drivers/platform/x86/samsung-laptop.c
1400     +++ b/drivers/platform/x86/samsung-laptop.c
1401     @@ -26,6 +26,7 @@
1402     #include <linux/seq_file.h>
1403     #include <linux/debugfs.h>
1404     #include <linux/ctype.h>
1405     +#include <linux/efi.h>
1406     #if (defined CONFIG_ACPI_VIDEO || defined CONFIG_ACPI_VIDEO_MODULE)
1407     #include <acpi/video.h>
1408     #endif
1409     @@ -1527,6 +1528,9 @@ static int __init samsung_init(void)
1410     struct samsung_laptop *samsung;
1411     int ret;
1412    
1413     + if (efi_enabled(EFI_BOOT))
1414     + return -ENODEV;
1415     +
1416     quirks = &samsung_unknown;
1417     if (!force && !dmi_check_system(samsung_dmi_table))
1418     return -ENODEV;
1419     diff --git a/drivers/scsi/isci/init.c b/drivers/scsi/isci/init.c
1420     index 4c150df..5a1bd0c 100644
1421     --- a/drivers/scsi/isci/init.c
1422     +++ b/drivers/scsi/isci/init.c
1423     @@ -470,7 +470,7 @@ static int __devinit isci_pci_probe(struct pci_dev *pdev, const struct pci_devic
1424     return -ENOMEM;
1425     pci_set_drvdata(pdev, pci_info);
1426    
1427     - if (efi_enabled)
1428     + if (efi_enabled(EFI_RUNTIME_SERVICES))
1429     orom = isci_get_efi_var(pdev);
1430    
1431     if (!orom)
1432     diff --git a/include/linux/efi.h b/include/linux/efi.h
1433     index 5782114..eee8b0b 100644
1434     --- a/include/linux/efi.h
1435     +++ b/include/linux/efi.h
1436     @@ -539,18 +539,30 @@ extern int __init efi_setup_pcdp_console(char *);
1437     #endif
1438    
1439     /*
1440     - * We play games with efi_enabled so that the compiler will, if possible, remove
1441     - * EFI-related code altogether.
1442     + * We play games with efi_enabled so that the compiler will, if
1443     + * possible, remove EFI-related code altogether.
1444     */
1445     +#define EFI_BOOT 0 /* Were we booted from EFI? */
1446     +#define EFI_SYSTEM_TABLES 1 /* Can we use EFI system tables? */
1447     +#define EFI_CONFIG_TABLES 2 /* Can we use EFI config tables? */
1448     +#define EFI_RUNTIME_SERVICES 3 /* Can we use runtime services? */
1449     +#define EFI_MEMMAP 4 /* Can we use EFI memory map? */
1450     +#define EFI_64BIT 5 /* Is the firmware 64-bit? */
1451     +
1452     #ifdef CONFIG_EFI
1453     # ifdef CONFIG_X86
1454     - extern int efi_enabled;
1455     - extern bool efi_64bit;
1456     +extern int efi_enabled(int facility);
1457     # else
1458     -# define efi_enabled 1
1459     +static inline int efi_enabled(int facility)
1460     +{
1461     + return 1;
1462     +}
1463     # endif
1464     #else
1465     -# define efi_enabled 0
1466     +static inline int efi_enabled(int facility)
1467     +{
1468     + return 0;
1469     +}
1470     #endif
1471    
1472     /*
1473     diff --git a/init/main.c b/init/main.c
1474     index c2178d2..02c1384 100644
1475     --- a/init/main.c
1476     +++ b/init/main.c
1477     @@ -602,7 +602,7 @@ asmlinkage void __init start_kernel(void)
1478     pidmap_init();
1479     anon_vma_init();
1480     #ifdef CONFIG_X86
1481     - if (efi_enabled)
1482     + if (efi_enabled(EFI_RUNTIME_SERVICES))
1483     efi_enter_virtual_mode();
1484     #endif
1485     thread_info_cache_init();
1486     @@ -630,7 +630,7 @@ asmlinkage void __init start_kernel(void)
1487     acpi_early_init(); /* before LAPIC and SMP init */
1488     sfi_init_late();
1489    
1490     - if (efi_enabled)
1491     + if (efi_enabled(EFI_RUNTIME_SERVICES))
1492     efi_free_boot_services();
1493    
1494     ftrace_init();
1495     diff --git a/kernel/resource.c b/kernel/resource.c
1496     index 7e8ea66..bfe96b8 100644
1497     --- a/kernel/resource.c
1498     +++ b/kernel/resource.c
1499     @@ -758,6 +758,7 @@ static void __init __reserve_region_with_split(struct resource *root,
1500     struct resource *parent = root;
1501     struct resource *conflict;
1502     struct resource *res = kzalloc(sizeof(*res), GFP_ATOMIC);
1503     + struct resource *next_res = NULL;
1504    
1505     if (!res)
1506     return;
1507     @@ -767,21 +768,46 @@ static void __init __reserve_region_with_split(struct resource *root,
1508     res->end = end;
1509     res->flags = IORESOURCE_BUSY;
1510    
1511     - conflict = __request_resource(parent, res);
1512     - if (!conflict)
1513     - return;
1514     + while (1) {
1515    
1516     - /* failed, split and try again */
1517     - kfree(res);
1518     + conflict = __request_resource(parent, res);
1519     + if (!conflict) {
1520     + if (!next_res)
1521     + break;
1522     + res = next_res;
1523     + next_res = NULL;
1524     + continue;
1525     + }
1526    
1527     - /* conflict covered whole area */
1528     - if (conflict->start <= start && conflict->end >= end)
1529     - return;
1530     + /* conflict covered whole area */
1531     + if (conflict->start <= res->start &&
1532     + conflict->end >= res->end) {
1533     + kfree(res);
1534     + WARN_ON(next_res);
1535     + break;
1536     + }
1537     +
1538     + /* failed, split and try again */
1539     + if (conflict->start > res->start) {
1540     + end = res->end;
1541     + res->end = conflict->start - 1;
1542     + if (conflict->end < end) {
1543     + next_res = kzalloc(sizeof(*next_res),
1544     + GFP_ATOMIC);
1545     + if (!next_res) {
1546     + kfree(res);
1547     + break;
1548     + }
1549     + next_res->name = name;
1550     + next_res->start = conflict->end + 1;
1551     + next_res->end = end;
1552     + next_res->flags = IORESOURCE_BUSY;
1553     + }
1554     + } else {
1555     + res->start = conflict->end + 1;
1556     + }
1557     + }
1558    
1559     - if (conflict->start > start)
1560     - __reserve_region_with_split(root, start, conflict->start-1, name);
1561     - if (conflict->end < end)
1562     - __reserve_region_with_split(root, conflict->end+1, end, name);
1563     }
1564    
1565     void __init reserve_region_with_split(struct resource *root,
1566     diff --git a/net/bluetooth/smp.c b/net/bluetooth/smp.c
1567     index 208edc0..605156f 100644
1568     --- a/net/bluetooth/smp.c
1569     +++ b/net/bluetooth/smp.c
1570     @@ -852,6 +852,19 @@ int smp_sig_channel(struct l2cap_conn *conn, struct sk_buff *skb)
1571    
1572     skb_pull(skb, sizeof(code));
1573    
1574     + /*
1575     + * The SMP context must be initialized for all other PDUs except
1576     + * pairing and security requests. If we get any other PDU when
1577     + * not initialized simply disconnect (done if this function
1578     + * returns an error).
1579     + */
1580     + if (code != SMP_CMD_PAIRING_REQ && code != SMP_CMD_SECURITY_REQ &&
1581     + !conn->smp_chan) {
1582     + BT_ERR("Unexpected SMP command 0x%02x. Disconnecting.", code);
1583     + kfree_skb(skb);
1584     + return -ENOTSUPP;
1585     + }
1586     +
1587     switch (code) {
1588     case SMP_CMD_PAIRING_REQ:
1589     reason = smp_cmd_pairing_req(conn, skb);
1590     diff --git a/net/bridge/br_netfilter.c b/net/bridge/br_netfilter.c
1591     index d7f49b6..e54ef82 100644
1592     --- a/net/bridge/br_netfilter.c
1593     +++ b/net/bridge/br_netfilter.c
1594     @@ -254,6 +254,9 @@ static int br_parse_ip_options(struct sk_buff *skb)
1595     struct net_device *dev = skb->dev;
1596     u32 len;
1597    
1598     + if (!pskb_may_pull(skb, sizeof(struct iphdr)))
1599     + goto inhdr_error;
1600     +
1601     iph = ip_hdr(skb);
1602     opt = &(IPCB(skb)->opt);
1603    
1604     diff --git a/net/core/pktgen.c b/net/core/pktgen.c
1605     index 8dae76f..114d8a9 100644
1606     --- a/net/core/pktgen.c
1607     +++ b/net/core/pktgen.c
1608     @@ -1802,10 +1802,13 @@ static ssize_t pktgen_thread_write(struct file *file,
1609     return -EFAULT;
1610     i += len;
1611     mutex_lock(&pktgen_thread_lock);
1612     - pktgen_add_device(t, f);
1613     + ret = pktgen_add_device(t, f);
1614     mutex_unlock(&pktgen_thread_lock);
1615     - ret = count;
1616     - sprintf(pg_result, "OK: add_device=%s", f);
1617     + if (!ret) {
1618     + ret = count;
1619     + sprintf(pg_result, "OK: add_device=%s", f);
1620     + } else
1621     + sprintf(pg_result, "ERROR: can not add device %s", f);
1622     goto out;
1623     }
1624    
1625     diff --git a/net/ipv4/ip_sockglue.c b/net/ipv4/ip_sockglue.c
1626     index 59ef40a..3748284 100644
1627     --- a/net/ipv4/ip_sockglue.c
1628     +++ b/net/ipv4/ip_sockglue.c
1629     @@ -589,7 +589,7 @@ static int do_ip_setsockopt(struct sock *sk, int level,
1630     case IP_TTL:
1631     if (optlen < 1)
1632     goto e_inval;
1633     - if (val != -1 && (val < 0 || val > 255))
1634     + if (val != -1 && (val < 1 || val > 255))
1635     goto e_inval;
1636     inet->uc_ttl = val;
1637     break;
1638     diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
1639     index 0e0b6d0..57b20b9 100644
1640     --- a/net/ipv4/tcp_input.c
1641     +++ b/net/ipv4/tcp_input.c
1642     @@ -3639,6 +3639,11 @@ static int tcp_process_frto(struct sock *sk, int flag)
1643     }
1644     } else {
1645     if (!(flag & FLAG_DATA_ACKED) && (tp->frto_counter == 1)) {
1646     + if (!tcp_packets_in_flight(tp)) {
1647     + tcp_enter_frto_loss(sk, 2, flag);
1648     + return true;
1649     + }
1650     +
1651     /* Prevent sending of new data. */
1652     tp->snd_cwnd = min(tp->snd_cwnd,
1653     tcp_packets_in_flight(tp));
1654     diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c
1655     index 468a5ce..81e0ad2 100644
1656     --- a/net/ipv6/addrconf.c
1657     +++ b/net/ipv6/addrconf.c
1658     @@ -1736,7 +1736,7 @@ static struct rt6_info *addrconf_get_prefix_route(const struct in6_addr *pfx,
1659     continue;
1660     if ((rt->rt6i_flags & flags) != flags)
1661     continue;
1662     - if ((noflags != 0) && ((rt->rt6i_flags & flags) != 0))
1663     + if ((rt->rt6i_flags & noflags) != 0)
1664     continue;
1665     dst_hold(&rt->dst);
1666     break;
1667     diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c
1668     index 13e5399..ce661ba 100644
1669     --- a/net/ipv6/ip6_output.c
1670     +++ b/net/ipv6/ip6_output.c
1671     @@ -1287,10 +1287,10 @@ int ip6_append_data(struct sock *sk, int getfrag(void *from, char *to,
1672     cork->length = 0;
1673     sk->sk_sndmsg_page = NULL;
1674     sk->sk_sndmsg_off = 0;
1675     - exthdrlen = (opt ? opt->opt_flen : 0) - rt->rt6i_nfheader_len;
1676     + exthdrlen = (opt ? opt->opt_flen : 0);
1677     length += exthdrlen;
1678     transhdrlen += exthdrlen;
1679     - dst_exthdrlen = rt->dst.header_len;
1680     + dst_exthdrlen = rt->dst.header_len - rt->rt6i_nfheader_len;
1681     } else {
1682     rt = (struct rt6_info *)cork->dst;
1683     fl6 = &inet->cork.fl.u.ip6;
1684     diff --git a/net/ipv6/route.c b/net/ipv6/route.c
1685     index b84cba1..493490f 100644
1686     --- a/net/ipv6/route.c
1687     +++ b/net/ipv6/route.c
1688     @@ -846,7 +846,8 @@ restart:
1689     dst_hold(&rt->dst);
1690     read_unlock_bh(&table->tb6_lock);
1691    
1692     - if (!dst_get_neighbour_noref_raw(&rt->dst) && !(rt->rt6i_flags & RTF_NONEXTHOP))
1693     + if (!dst_get_neighbour_noref_raw(&rt->dst) &&
1694     + !(rt->rt6i_flags & (RTF_NONEXTHOP | RTF_LOCAL)))
1695     nrt = rt6_alloc_cow(rt, &fl6->daddr, &fl6->saddr);
1696     else if (!(rt->dst.flags & DST_HOST))
1697     nrt = rt6_alloc_clone(rt, &fl6->daddr);
1698     diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
1699     index 078fdff..38ca5e0 100644
1700     --- a/net/packet/af_packet.c
1701     +++ b/net/packet/af_packet.c
1702     @@ -2450,13 +2450,15 @@ static int packet_release(struct socket *sock)
1703    
1704     packet_flush_mclist(sk);
1705    
1706     - memset(&req_u, 0, sizeof(req_u));
1707     -
1708     - if (po->rx_ring.pg_vec)
1709     + if (po->rx_ring.pg_vec) {
1710     + memset(&req_u, 0, sizeof(req_u));
1711     packet_set_ring(sk, &req_u, 1, 0);
1712     + }
1713    
1714     - if (po->tx_ring.pg_vec)
1715     + if (po->tx_ring.pg_vec) {
1716     + memset(&req_u, 0, sizeof(req_u));
1717     packet_set_ring(sk, &req_u, 1, 1);
1718     + }
1719    
1720     fanout_release(sk);
1721    
1722     diff --git a/net/sctp/endpointola.c b/net/sctp/endpointola.c
1723     index 68a385d..58cd035 100644
1724     --- a/net/sctp/endpointola.c
1725     +++ b/net/sctp/endpointola.c
1726     @@ -248,6 +248,8 @@ void sctp_endpoint_free(struct sctp_endpoint *ep)
1727     /* Final destructor for endpoint. */
1728     static void sctp_endpoint_destroy(struct sctp_endpoint *ep)
1729     {
1730     + int i;
1731     +
1732     SCTP_ASSERT(ep->base.dead, "Endpoint is not dead", return);
1733    
1734     /* Free up the HMAC transform. */
1735     @@ -270,6 +272,9 @@ static void sctp_endpoint_destroy(struct sctp_endpoint *ep)
1736     sctp_inq_free(&ep->base.inqueue);
1737     sctp_bind_addr_free(&ep->base.bind_addr);
1738    
1739     + for (i = 0; i < SCTP_HOW_MANY_SECRETS; ++i)
1740     + memset(&ep->secret_key[i], 0, SCTP_SECRET_SIZE);
1741     +
1742     /* Remove and free the port */
1743     if (sctp_sk(ep->base.sk)->bind_hash)
1744     sctp_put_port(ep->base.sk);
1745     diff --git a/net/sctp/outqueue.c b/net/sctp/outqueue.c
1746     index cfeb1d4..96eb168 100644
1747     --- a/net/sctp/outqueue.c
1748     +++ b/net/sctp/outqueue.c
1749     @@ -223,7 +223,7 @@ void sctp_outq_init(struct sctp_association *asoc, struct sctp_outq *q)
1750    
1751     /* Free the outqueue structure and any related pending chunks.
1752     */
1753     -void sctp_outq_teardown(struct sctp_outq *q)
1754     +static void __sctp_outq_teardown(struct sctp_outq *q)
1755     {
1756     struct sctp_transport *transport;
1757     struct list_head *lchunk, *temp;
1758     @@ -276,8 +276,6 @@ void sctp_outq_teardown(struct sctp_outq *q)
1759     sctp_chunk_free(chunk);
1760     }
1761    
1762     - q->error = 0;
1763     -
1764     /* Throw away any leftover control chunks. */
1765     list_for_each_entry_safe(chunk, tmp, &q->control_chunk_list, list) {
1766     list_del_init(&chunk->list);
1767     @@ -285,11 +283,17 @@ void sctp_outq_teardown(struct sctp_outq *q)
1768     }
1769     }
1770    
1771     +void sctp_outq_teardown(struct sctp_outq *q)
1772     +{
1773     + __sctp_outq_teardown(q);
1774     + sctp_outq_init(q->asoc, q);
1775     +}
1776     +
1777     /* Free the outqueue structure and any related pending chunks. */
1778     void sctp_outq_free(struct sctp_outq *q)
1779     {
1780     /* Throw away leftover chunks. */
1781     - sctp_outq_teardown(q);
1782     + __sctp_outq_teardown(q);
1783    
1784     /* If we were kmalloc()'d, free the memory. */
1785     if (q->malloced)
1786     diff --git a/net/sctp/socket.c b/net/sctp/socket.c
1787     index 7405355..9fd05ed 100644
1788     --- a/net/sctp/socket.c
1789     +++ b/net/sctp/socket.c
1790     @@ -3375,7 +3375,7 @@ static int sctp_setsockopt_auth_key(struct sock *sk,
1791    
1792     ret = sctp_auth_set_key(sctp_sk(sk)->ep, asoc, authkey);
1793     out:
1794     - kfree(authkey);
1795     + kzfree(authkey);
1796     return ret;
1797     }
1798