Magellan Linux

Annotation of /trunk/kernel-magellan/patches-3.9/0103-3.9.4-all-fixes.patch

Parent Directory Parent Directory | Revision Log Revision Log


Revision 2181 - (hide annotations) (download)
Fri May 31 06:40:38 2013 UTC (10 years, 11 months ago) by niro
File size: 23374 byte(s)
-linux-3.9.4
1 niro 2181 diff --git a/arch/arm64/kernel/debug-monitors.c b/arch/arm64/kernel/debug-monitors.c
2     index 0c3ba9f..f4726dc 100644
3     --- a/arch/arm64/kernel/debug-monitors.c
4     +++ b/arch/arm64/kernel/debug-monitors.c
5     @@ -136,8 +136,6 @@ void disable_debug_monitors(enum debug_el el)
6     */
7     static void clear_os_lock(void *unused)
8     {
9     - asm volatile("msr mdscr_el1, %0" : : "r" (0));
10     - isb();
11     asm volatile("msr oslar_el1, %0" : : "r" (0));
12     isb();
13     }
14     diff --git a/arch/arm64/mm/cache.S b/arch/arm64/mm/cache.S
15     index abe69b8..48a3860 100644
16     --- a/arch/arm64/mm/cache.S
17     +++ b/arch/arm64/mm/cache.S
18     @@ -52,7 +52,7 @@ loop1:
19     add x2, x2, #4 // add 4 (line length offset)
20     mov x4, #0x3ff
21     and x4, x4, x1, lsr #3 // find maximum number on the way size
22     - clz x5, x4 // find bit position of way size increment
23     + clz w5, w4 // find bit position of way size increment
24     mov x7, #0x7fff
25     and x7, x7, x1, lsr #13 // extract max number of the index size
26     loop2:
27     diff --git a/arch/arm64/mm/proc.S b/arch/arm64/mm/proc.S
28     index f1d8b9b..a82ae88 100644
29     --- a/arch/arm64/mm/proc.S
30     +++ b/arch/arm64/mm/proc.S
31     @@ -119,8 +119,7 @@ ENTRY(__cpu_setup)
32    
33     mov x0, #3 << 20
34     msr cpacr_el1, x0 // Enable FP/ASIMD
35     - mov x0, #1
36     - msr oslar_el1, x0 // Set the debug OS lock
37     + msr mdscr_el1, xzr // Reset mdscr_el1
38     tlbi vmalle1is // invalidate I + D TLBs
39     /*
40     * Memory region attributes for LPAE:
41     diff --git a/arch/x86/include/asm/syscalls.h b/arch/x86/include/asm/syscalls.h
42     index 6cf0a9c..5a0be0a 100644
43     --- a/arch/x86/include/asm/syscalls.h
44     +++ b/arch/x86/include/asm/syscalls.h
45     @@ -37,8 +37,8 @@ asmlinkage int sys_get_thread_area(struct user_desc __user *);
46     unsigned long sys_sigreturn(void);
47    
48     /* kernel/vm86_32.c */
49     -int sys_vm86old(struct vm86_struct __user *);
50     -int sys_vm86(unsigned long, unsigned long);
51     +asmlinkage long sys_vm86old(struct vm86_struct __user *);
52     +asmlinkage long sys_vm86(unsigned long, unsigned long);
53    
54     #else /* CONFIG_X86_32 */
55    
56     diff --git a/arch/x86/kernel/head64.c b/arch/x86/kernel/head64.c
57     index 1c68ccb..8f3201d 100644
58     --- a/arch/x86/kernel/head64.c
59     +++ b/arch/x86/kernel/head64.c
60     @@ -34,7 +34,7 @@
61     extern pgd_t early_level4_pgt[PTRS_PER_PGD];
62     extern pmd_t early_dynamic_pgts[EARLY_DYNAMIC_PAGE_TABLES][PTRS_PER_PMD];
63     static unsigned int __initdata next_early_pgt = 2;
64     -pmdval_t __initdata early_pmd_flags = __PAGE_KERNEL_LARGE & ~(_PAGE_GLOBAL | _PAGE_NX);
65     +pmdval_t early_pmd_flags = __PAGE_KERNEL_LARGE & ~(_PAGE_GLOBAL | _PAGE_NX);
66    
67     /* Wipe all early page tables except for the kernel symbol map */
68     static void __init reset_early_page_tables(void)
69     diff --git a/arch/x86/kernel/vm86_32.c b/arch/x86/kernel/vm86_32.c
70     index 1cf5766..3dbdd9c 100644
71     --- a/arch/x86/kernel/vm86_32.c
72     +++ b/arch/x86/kernel/vm86_32.c
73     @@ -33,6 +33,7 @@
74     #include <linux/capability.h>
75     #include <linux/errno.h>
76     #include <linux/interrupt.h>
77     +#include <linux/syscalls.h>
78     #include <linux/sched.h>
79     #include <linux/kernel.h>
80     #include <linux/signal.h>
81     @@ -48,7 +49,6 @@
82     #include <asm/io.h>
83     #include <asm/tlbflush.h>
84     #include <asm/irq.h>
85     -#include <asm/syscalls.h>
86    
87     /*
88     * Known problems:
89     @@ -202,17 +202,16 @@ out:
90     static int do_vm86_irq_handling(int subfunction, int irqnumber);
91     static void do_sys_vm86(struct kernel_vm86_struct *info, struct task_struct *tsk);
92    
93     -int sys_vm86old(struct vm86_struct __user *v86)
94     +SYSCALL_DEFINE1(vm86old, struct vm86_struct __user *, v86)
95     {
96     struct kernel_vm86_struct info; /* declare this _on top_,
97     * this avoids wasting of stack space.
98     * This remains on the stack until we
99     * return to 32 bit user space.
100     */
101     - struct task_struct *tsk;
102     + struct task_struct *tsk = current;
103     int tmp, ret = -EPERM;
104    
105     - tsk = current;
106     if (tsk->thread.saved_sp0)
107     goto out;
108     tmp = copy_vm86_regs_from_user(&info.regs, &v86->regs,
109     @@ -227,11 +226,12 @@ int sys_vm86old(struct vm86_struct __user *v86)
110     do_sys_vm86(&info, tsk);
111     ret = 0; /* we never return here */
112     out:
113     + asmlinkage_protect(1, ret, v86);
114     return ret;
115     }
116    
117    
118     -int sys_vm86(unsigned long cmd, unsigned long arg)
119     +SYSCALL_DEFINE2(vm86, unsigned long, cmd, unsigned long, arg)
120     {
121     struct kernel_vm86_struct info; /* declare this _on top_,
122     * this avoids wasting of stack space.
123     @@ -278,6 +278,7 @@ int sys_vm86(unsigned long cmd, unsigned long arg)
124     do_sys_vm86(&info, tsk);
125     ret = 0; /* we never return here */
126     out:
127     + asmlinkage_protect(2, ret, cmd, arg);
128     return ret;
129     }
130    
131     diff --git a/drivers/gpu/drm/nouveau/core/subdev/bios/init.c b/drivers/gpu/drm/nouveau/core/subdev/bios/init.c
132     index 9c41b58..ad6335f 100644
133     --- a/drivers/gpu/drm/nouveau/core/subdev/bios/init.c
134     +++ b/drivers/gpu/drm/nouveau/core/subdev/bios/init.c
135     @@ -1926,8 +1926,8 @@ init_zm_mask_add(struct nvbios_init *init)
136     trace("ZM_MASK_ADD\tR[0x%06x] &= 0x%08x += 0x%08x\n", addr, mask, add);
137     init->offset += 13;
138    
139     - data = init_rd32(init, addr) & mask;
140     - data |= ((data + add) & ~mask);
141     + data = init_rd32(init, addr);
142     + data = (data & mask) | ((data + add) & ~mask);
143     init_wr32(init, addr, data);
144     }
145    
146     diff --git a/drivers/gpu/drm/radeon/evergreen.c b/drivers/gpu/drm/radeon/evergreen.c
147     index aeaa386..0ea6bdf 100644
148     --- a/drivers/gpu/drm/radeon/evergreen.c
149     +++ b/drivers/gpu/drm/radeon/evergreen.c
150     @@ -2400,8 +2400,8 @@ int evergreen_mc_init(struct radeon_device *rdev)
151     rdev->mc.real_vram_size = RREG32(CONFIG_MEMSIZE);
152     } else {
153     /* size in MB on evergreen/cayman/tn */
154     - rdev->mc.mc_vram_size = RREG32(CONFIG_MEMSIZE) * 1024 * 1024;
155     - rdev->mc.real_vram_size = RREG32(CONFIG_MEMSIZE) * 1024 * 1024;
156     + rdev->mc.mc_vram_size = RREG32(CONFIG_MEMSIZE) * 1024ULL * 1024ULL;
157     + rdev->mc.real_vram_size = RREG32(CONFIG_MEMSIZE) * 1024ULL * 1024ULL;
158     }
159     rdev->mc.visible_vram_size = rdev->mc.aper_size;
160     r700_vram_gtt_location(rdev, &rdev->mc);
161     diff --git a/drivers/gpu/drm/radeon/radeon_ttm.c b/drivers/gpu/drm/radeon/radeon_ttm.c
162     index 93f760e..6c0ce89 100644
163     --- a/drivers/gpu/drm/radeon/radeon_ttm.c
164     +++ b/drivers/gpu/drm/radeon/radeon_ttm.c
165     @@ -726,7 +726,7 @@ int radeon_ttm_init(struct radeon_device *rdev)
166     return r;
167     }
168     DRM_INFO("radeon: %uM of VRAM memory ready\n",
169     - (unsigned)rdev->mc.real_vram_size / (1024 * 1024));
170     + (unsigned) (rdev->mc.real_vram_size / (1024 * 1024)));
171     r = ttm_bo_init_mm(&rdev->mman.bdev, TTM_PL_TT,
172     rdev->mc.gtt_size >> PAGE_SHIFT);
173     if (r) {
174     diff --git a/drivers/gpu/drm/radeon/si.c b/drivers/gpu/drm/radeon/si.c
175     index 3dd7ecc..287248c 100644
176     --- a/drivers/gpu/drm/radeon/si.c
177     +++ b/drivers/gpu/drm/radeon/si.c
178     @@ -2644,8 +2644,8 @@ static int si_mc_init(struct radeon_device *rdev)
179     rdev->mc.aper_base = pci_resource_start(rdev->pdev, 0);
180     rdev->mc.aper_size = pci_resource_len(rdev->pdev, 0);
181     /* size in MB on si */
182     - rdev->mc.mc_vram_size = RREG32(CONFIG_MEMSIZE) * 1024 * 1024;
183     - rdev->mc.real_vram_size = RREG32(CONFIG_MEMSIZE) * 1024 * 1024;
184     + rdev->mc.mc_vram_size = RREG32(CONFIG_MEMSIZE) * 1024ULL * 1024ULL;
185     + rdev->mc.real_vram_size = RREG32(CONFIG_MEMSIZE) * 1024ULL * 1024ULL;
186     rdev->mc.visible_vram_size = rdev->mc.aper_size;
187     si_vram_gtt_location(rdev, &rdev->mc);
188     radeon_update_bandwidth_info(rdev);
189     diff --git a/drivers/hwmon/abituguru.c b/drivers/hwmon/abituguru.c
190     index 6119ff8..f3b3488 100644
191     --- a/drivers/hwmon/abituguru.c
192     +++ b/drivers/hwmon/abituguru.c
193     @@ -1411,14 +1411,18 @@ static int abituguru_probe(struct platform_device *pdev)
194     pr_info("found Abit uGuru\n");
195    
196     /* Register sysfs hooks */
197     - for (i = 0; i < sysfs_attr_i; i++)
198     - if (device_create_file(&pdev->dev,
199     - &data->sysfs_attr[i].dev_attr))
200     + for (i = 0; i < sysfs_attr_i; i++) {
201     + res = device_create_file(&pdev->dev,
202     + &data->sysfs_attr[i].dev_attr);
203     + if (res)
204     goto abituguru_probe_error;
205     - for (i = 0; i < ARRAY_SIZE(abituguru_sysfs_attr); i++)
206     - if (device_create_file(&pdev->dev,
207     - &abituguru_sysfs_attr[i].dev_attr))
208     + }
209     + for (i = 0; i < ARRAY_SIZE(abituguru_sysfs_attr); i++) {
210     + res = device_create_file(&pdev->dev,
211     + &abituguru_sysfs_attr[i].dev_attr);
212     + if (res)
213     goto abituguru_probe_error;
214     + }
215    
216     data->hwmon_dev = hwmon_device_register(&pdev->dev);
217     if (!IS_ERR(data->hwmon_dev))
218     diff --git a/drivers/i2c/busses/i2c-designware-core.c b/drivers/i2c/busses/i2c-designware-core.c
219     index 94fd818..2db3628 100644
220     --- a/drivers/i2c/busses/i2c-designware-core.c
221     +++ b/drivers/i2c/busses/i2c-designware-core.c
222     @@ -361,7 +361,8 @@ static void i2c_dw_xfer_init(struct dw_i2c_dev *dev)
223     /* Enable the adapter */
224     dw_writel(dev, 1, DW_IC_ENABLE);
225    
226     - /* Enable interrupts */
227     + /* Clear and enable interrupts */
228     + i2c_dw_clear_int(dev);
229     dw_writel(dev, DW_IC_INTR_DEFAULT_MASK, DW_IC_INTR_MASK);
230     }
231    
232     @@ -426,8 +427,14 @@ i2c_dw_xfer_msg(struct dw_i2c_dev *dev)
233     cmd |= BIT(9);
234    
235     if (msgs[dev->msg_write_idx].flags & I2C_M_RD) {
236     +
237     + /* avoid rx buffer overrun */
238     + if (rx_limit - dev->rx_outstanding <= 0)
239     + break;
240     +
241     dw_writel(dev, cmd | 0x100, DW_IC_DATA_CMD);
242     rx_limit--;
243     + dev->rx_outstanding++;
244     } else
245     dw_writel(dev, cmd | *buf++, DW_IC_DATA_CMD);
246     tx_limit--; buf_len--;
247     @@ -480,8 +487,10 @@ i2c_dw_read(struct dw_i2c_dev *dev)
248    
249     rx_valid = dw_readl(dev, DW_IC_RXFLR);
250    
251     - for (; len > 0 && rx_valid > 0; len--, rx_valid--)
252     + for (; len > 0 && rx_valid > 0; len--, rx_valid--) {
253     *buf++ = dw_readl(dev, DW_IC_DATA_CMD);
254     + dev->rx_outstanding--;
255     + }
256    
257     if (len > 0) {
258     dev->status |= STATUS_READ_IN_PROGRESS;
259     @@ -539,6 +548,7 @@ i2c_dw_xfer(struct i2c_adapter *adap, struct i2c_msg msgs[], int num)
260     dev->msg_err = 0;
261     dev->status = STATUS_IDLE;
262     dev->abort_source = 0;
263     + dev->rx_outstanding = 0;
264    
265     ret = i2c_dw_wait_bus_not_busy(dev);
266     if (ret < 0)
267     diff --git a/drivers/i2c/busses/i2c-designware-core.h b/drivers/i2c/busses/i2c-designware-core.h
268     index 9c1840e..e761ad1 100644
269     --- a/drivers/i2c/busses/i2c-designware-core.h
270     +++ b/drivers/i2c/busses/i2c-designware-core.h
271     @@ -60,6 +60,7 @@
272     * @adapter: i2c subsystem adapter node
273     * @tx_fifo_depth: depth of the hardware tx fifo
274     * @rx_fifo_depth: depth of the hardware rx fifo
275     + * @rx_outstanding: current master-rx elements in tx fifo
276     */
277     struct dw_i2c_dev {
278     struct device *dev;
279     @@ -88,6 +89,7 @@ struct dw_i2c_dev {
280     u32 master_cfg;
281     unsigned int tx_fifo_depth;
282     unsigned int rx_fifo_depth;
283     + int rx_outstanding;
284     };
285    
286     #define ACCESS_SWAP 0x00000001
287     diff --git a/drivers/net/ntb_netdev.c b/drivers/net/ntb_netdev.c
288     index ed947dd..f3cdf64 100644
289     --- a/drivers/net/ntb_netdev.c
290     +++ b/drivers/net/ntb_netdev.c
291     @@ -375,6 +375,8 @@ static void ntb_netdev_remove(struct pci_dev *pdev)
292     if (dev == NULL)
293     return;
294    
295     + list_del(&dev->list);
296     +
297     ndev = dev->ndev;
298    
299     unregister_netdev(ndev);
300     diff --git a/drivers/ntb/ntb_hw.c b/drivers/ntb/ntb_hw.c
301     index f802e7c..2dacd19 100644
302     --- a/drivers/ntb/ntb_hw.c
303     +++ b/drivers/ntb/ntb_hw.c
304     @@ -345,7 +345,7 @@ int ntb_read_remote_spad(struct ntb_device *ndev, unsigned int idx, u32 *val)
305     */
306     void __iomem *ntb_get_mw_vbase(struct ntb_device *ndev, unsigned int mw)
307     {
308     - if (mw > NTB_NUM_MW)
309     + if (mw >= NTB_NUM_MW)
310     return NULL;
311    
312     return ndev->mw[mw].vbase;
313     @@ -362,7 +362,7 @@ void __iomem *ntb_get_mw_vbase(struct ntb_device *ndev, unsigned int mw)
314     */
315     resource_size_t ntb_get_mw_size(struct ntb_device *ndev, unsigned int mw)
316     {
317     - if (mw > NTB_NUM_MW)
318     + if (mw >= NTB_NUM_MW)
319     return 0;
320    
321     return ndev->mw[mw].bar_sz;
322     @@ -380,7 +380,7 @@ resource_size_t ntb_get_mw_size(struct ntb_device *ndev, unsigned int mw)
323     */
324     void ntb_set_mw_addr(struct ntb_device *ndev, unsigned int mw, u64 addr)
325     {
326     - if (mw > NTB_NUM_MW)
327     + if (mw >= NTB_NUM_MW)
328     return;
329    
330     dev_dbg(&ndev->pdev->dev, "Writing addr %Lx to BAR %d\n", addr,
331     @@ -1027,8 +1027,8 @@ static int ntb_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
332     ndev->mw[i].vbase =
333     ioremap_wc(pci_resource_start(pdev, MW_TO_BAR(i)),
334     ndev->mw[i].bar_sz);
335     - dev_info(&pdev->dev, "MW %d size %d\n", i,
336     - (u32) pci_resource_len(pdev, MW_TO_BAR(i)));
337     + dev_info(&pdev->dev, "MW %d size %llu\n", i,
338     + pci_resource_len(pdev, MW_TO_BAR(i)));
339     if (!ndev->mw[i].vbase) {
340     dev_warn(&pdev->dev, "Cannot remap BAR %d\n",
341     MW_TO_BAR(i));
342     diff --git a/drivers/ntb/ntb_transport.c b/drivers/ntb/ntb_transport.c
343     index e0bdfd7..f8d7081 100644
344     --- a/drivers/ntb/ntb_transport.c
345     +++ b/drivers/ntb/ntb_transport.c
346     @@ -58,7 +58,7 @@
347     #include <linux/ntb.h>
348     #include "ntb_hw.h"
349    
350     -#define NTB_TRANSPORT_VERSION 2
351     +#define NTB_TRANSPORT_VERSION 3
352    
353     static unsigned int transport_mtu = 0x401E;
354     module_param(transport_mtu, uint, 0644);
355     @@ -173,10 +173,13 @@ struct ntb_payload_header {
356    
357     enum {
358     VERSION = 0,
359     - MW0_SZ,
360     - MW1_SZ,
361     - NUM_QPS,
362     QP_LINKS,
363     + NUM_QPS,
364     + NUM_MWS,
365     + MW0_SZ_HIGH,
366     + MW0_SZ_LOW,
367     + MW1_SZ_HIGH,
368     + MW1_SZ_LOW,
369     MAX_SPAD,
370     };
371    
372     @@ -297,7 +300,7 @@ int ntb_register_client_dev(char *device_name)
373     {
374     struct ntb_transport_client_dev *client_dev;
375     struct ntb_transport *nt;
376     - int rc;
377     + int rc, i = 0;
378    
379     if (list_empty(&ntb_transport_list))
380     return -ENODEV;
381     @@ -315,7 +318,7 @@ int ntb_register_client_dev(char *device_name)
382     dev = &client_dev->dev;
383    
384     /* setup and register client devices */
385     - dev_set_name(dev, "%s", device_name);
386     + dev_set_name(dev, "%s%d", device_name, i);
387     dev->bus = &ntb_bus_type;
388     dev->release = ntb_client_release;
389     dev->parent = &ntb_query_pdev(nt->ndev)->dev;
390     @@ -327,6 +330,7 @@ int ntb_register_client_dev(char *device_name)
391     }
392    
393     list_add_tail(&client_dev->entry, &nt->client_devs);
394     + i++;
395     }
396    
397     return 0;
398     @@ -486,12 +490,13 @@ static void ntb_transport_setup_qp_mw(struct ntb_transport *nt,
399     (qp_num / NTB_NUM_MW * rx_size);
400     rx_size -= sizeof(struct ntb_rx_info);
401    
402     - qp->rx_buff = qp->remote_rx_info + sizeof(struct ntb_rx_info);
403     - qp->rx_max_frame = min(transport_mtu, rx_size);
404     + qp->rx_buff = qp->remote_rx_info + 1;
405     + /* Due to housekeeping, there must be atleast 2 buffs */
406     + qp->rx_max_frame = min(transport_mtu, rx_size / 2);
407     qp->rx_max_entry = rx_size / qp->rx_max_frame;
408     qp->rx_index = 0;
409    
410     - qp->remote_rx_info->entry = qp->rx_max_entry;
411     + qp->remote_rx_info->entry = qp->rx_max_entry - 1;
412    
413     /* setup the hdr offsets with 0's */
414     for (i = 0; i < qp->rx_max_entry; i++) {
415     @@ -502,6 +507,19 @@ static void ntb_transport_setup_qp_mw(struct ntb_transport *nt,
416    
417     qp->rx_pkts = 0;
418     qp->tx_pkts = 0;
419     + qp->tx_index = 0;
420     +}
421     +
422     +static void ntb_free_mw(struct ntb_transport *nt, int num_mw)
423     +{
424     + struct ntb_transport_mw *mw = &nt->mw[num_mw];
425     + struct pci_dev *pdev = ntb_query_pdev(nt->ndev);
426     +
427     + if (!mw->virt_addr)
428     + return;
429     +
430     + dma_free_coherent(&pdev->dev, mw->size, mw->virt_addr, mw->dma_addr);
431     + mw->virt_addr = NULL;
432     }
433    
434     static int ntb_set_mw(struct ntb_transport *nt, int num_mw, unsigned int size)
435     @@ -509,12 +527,20 @@ static int ntb_set_mw(struct ntb_transport *nt, int num_mw, unsigned int size)
436     struct ntb_transport_mw *mw = &nt->mw[num_mw];
437     struct pci_dev *pdev = ntb_query_pdev(nt->ndev);
438    
439     + /* No need to re-setup */
440     + if (mw->size == ALIGN(size, 4096))
441     + return 0;
442     +
443     + if (mw->size != 0)
444     + ntb_free_mw(nt, num_mw);
445     +
446     /* Alloc memory for receiving data. Must be 4k aligned */
447     mw->size = ALIGN(size, 4096);
448    
449     mw->virt_addr = dma_alloc_coherent(&pdev->dev, mw->size, &mw->dma_addr,
450     GFP_KERNEL);
451     if (!mw->virt_addr) {
452     + mw->size = 0;
453     dev_err(&pdev->dev, "Unable to allocate MW buffer of size %d\n",
454     (int) mw->size);
455     return -ENOMEM;
456     @@ -604,25 +630,31 @@ static void ntb_transport_link_work(struct work_struct *work)
457     u32 val;
458     int rc, i;
459    
460     - /* send the local info */
461     - rc = ntb_write_remote_spad(ndev, VERSION, NTB_TRANSPORT_VERSION);
462     - if (rc) {
463     - dev_err(&pdev->dev, "Error writing %x to remote spad %d\n",
464     - 0, VERSION);
465     - goto out;
466     - }
467     + /* send the local info, in the opposite order of the way we read it */
468     + for (i = 0; i < NTB_NUM_MW; i++) {
469     + rc = ntb_write_remote_spad(ndev, MW0_SZ_HIGH + (i * 2),
470     + ntb_get_mw_size(ndev, i) >> 32);
471     + if (rc) {
472     + dev_err(&pdev->dev, "Error writing %u to remote spad %d\n",
473     + (u32)(ntb_get_mw_size(ndev, i) >> 32),
474     + MW0_SZ_HIGH + (i * 2));
475     + goto out;
476     + }
477    
478     - rc = ntb_write_remote_spad(ndev, MW0_SZ, ntb_get_mw_size(ndev, 0));
479     - if (rc) {
480     - dev_err(&pdev->dev, "Error writing %x to remote spad %d\n",
481     - (u32) ntb_get_mw_size(ndev, 0), MW0_SZ);
482     - goto out;
483     + rc = ntb_write_remote_spad(ndev, MW0_SZ_LOW + (i * 2),
484     + (u32) ntb_get_mw_size(ndev, i));
485     + if (rc) {
486     + dev_err(&pdev->dev, "Error writing %u to remote spad %d\n",
487     + (u32) ntb_get_mw_size(ndev, i),
488     + MW0_SZ_LOW + (i * 2));
489     + goto out;
490     + }
491     }
492    
493     - rc = ntb_write_remote_spad(ndev, MW1_SZ, ntb_get_mw_size(ndev, 1));
494     + rc = ntb_write_remote_spad(ndev, NUM_MWS, NTB_NUM_MW);
495     if (rc) {
496     dev_err(&pdev->dev, "Error writing %x to remote spad %d\n",
497     - (u32) ntb_get_mw_size(ndev, 1), MW1_SZ);
498     + NTB_NUM_MW, NUM_MWS);
499     goto out;
500     }
501    
502     @@ -633,16 +665,10 @@ static void ntb_transport_link_work(struct work_struct *work)
503     goto out;
504     }
505    
506     - rc = ntb_read_local_spad(nt->ndev, QP_LINKS, &val);
507     - if (rc) {
508     - dev_err(&pdev->dev, "Error reading spad %d\n", QP_LINKS);
509     - goto out;
510     - }
511     -
512     - rc = ntb_write_remote_spad(ndev, QP_LINKS, val);
513     + rc = ntb_write_remote_spad(ndev, VERSION, NTB_TRANSPORT_VERSION);
514     if (rc) {
515     dev_err(&pdev->dev, "Error writing %x to remote spad %d\n",
516     - val, QP_LINKS);
517     + NTB_TRANSPORT_VERSION, VERSION);
518     goto out;
519     }
520    
521     @@ -667,33 +693,43 @@ static void ntb_transport_link_work(struct work_struct *work)
522     goto out;
523     dev_dbg(&pdev->dev, "Remote max number of qps = %d\n", val);
524    
525     - rc = ntb_read_remote_spad(ndev, MW0_SZ, &val);
526     + rc = ntb_read_remote_spad(ndev, NUM_MWS, &val);
527     if (rc) {
528     - dev_err(&pdev->dev, "Error reading remote spad %d\n", MW0_SZ);
529     + dev_err(&pdev->dev, "Error reading remote spad %d\n", NUM_MWS);
530     goto out;
531     }
532    
533     - if (!val)
534     + if (val != NTB_NUM_MW)
535     goto out;
536     - dev_dbg(&pdev->dev, "Remote MW0 size = %d\n", val);
537     + dev_dbg(&pdev->dev, "Remote number of mws = %d\n", val);
538    
539     - rc = ntb_set_mw(nt, 0, val);
540     - if (rc)
541     - goto out;
542     + for (i = 0; i < NTB_NUM_MW; i++) {
543     + u64 val64;
544    
545     - rc = ntb_read_remote_spad(ndev, MW1_SZ, &val);
546     - if (rc) {
547     - dev_err(&pdev->dev, "Error reading remote spad %d\n", MW1_SZ);
548     - goto out;
549     - }
550     + rc = ntb_read_remote_spad(ndev, MW0_SZ_HIGH + (i * 2), &val);
551     + if (rc) {
552     + dev_err(&pdev->dev, "Error reading remote spad %d\n",
553     + MW0_SZ_HIGH + (i * 2));
554     + goto out1;
555     + }
556    
557     - if (!val)
558     - goto out;
559     - dev_dbg(&pdev->dev, "Remote MW1 size = %d\n", val);
560     + val64 = (u64) val << 32;
561    
562     - rc = ntb_set_mw(nt, 1, val);
563     - if (rc)
564     - goto out;
565     + rc = ntb_read_remote_spad(ndev, MW0_SZ_LOW + (i * 2), &val);
566     + if (rc) {
567     + dev_err(&pdev->dev, "Error reading remote spad %d\n",
568     + MW0_SZ_LOW + (i * 2));
569     + goto out1;
570     + }
571     +
572     + val64 |= val;
573     +
574     + dev_dbg(&pdev->dev, "Remote MW%d size = %llu\n", i, val64);
575     +
576     + rc = ntb_set_mw(nt, i, val64);
577     + if (rc)
578     + goto out1;
579     + }
580    
581     nt->transport_link = NTB_LINK_UP;
582    
583     @@ -708,6 +744,9 @@ static void ntb_transport_link_work(struct work_struct *work)
584    
585     return;
586    
587     +out1:
588     + for (i = 0; i < NTB_NUM_MW; i++)
589     + ntb_free_mw(nt, i);
590     out:
591     if (ntb_hw_link_status(ndev))
592     schedule_delayed_work(&nt->link_work,
593     @@ -780,10 +819,10 @@ static void ntb_transport_init_queue(struct ntb_transport *nt,
594     (qp_num / NTB_NUM_MW * tx_size);
595     tx_size -= sizeof(struct ntb_rx_info);
596    
597     - qp->tx_mw = qp->rx_info + sizeof(struct ntb_rx_info);
598     - qp->tx_max_frame = min(transport_mtu, tx_size);
599     + qp->tx_mw = qp->rx_info + 1;
600     + /* Due to housekeeping, there must be atleast 2 buffs */
601     + qp->tx_max_frame = min(transport_mtu, tx_size / 2);
602     qp->tx_max_entry = tx_size / qp->tx_max_frame;
603     - qp->tx_index = 0;
604    
605     if (nt->debugfs_dir) {
606     char debugfs_name[4];
607     @@ -897,10 +936,7 @@ void ntb_transport_free(void *transport)
608     pdev = ntb_query_pdev(nt->ndev);
609    
610     for (i = 0; i < NTB_NUM_MW; i++)
611     - if (nt->mw[i].virt_addr)
612     - dma_free_coherent(&pdev->dev, nt->mw[i].size,
613     - nt->mw[i].virt_addr,
614     - nt->mw[i].dma_addr);
615     + ntb_free_mw(nt, i);
616    
617     kfree(nt->qps);
618     ntb_unregister_transport(nt->ndev);
619     @@ -999,11 +1035,16 @@ out:
620     static void ntb_transport_rx(unsigned long data)
621     {
622     struct ntb_transport_qp *qp = (struct ntb_transport_qp *)data;
623     - int rc;
624     + int rc, i;
625    
626     - do {
627     + /* Limit the number of packets processed in a single interrupt to
628     + * provide fairness to others
629     + */
630     + for (i = 0; i < qp->rx_max_entry; i++) {
631     rc = ntb_process_rxc(qp);
632     - } while (!rc);
633     + if (rc)
634     + break;
635     + }
636     }
637    
638     static void ntb_transport_rxc_db(void *data, int db_num)
639     @@ -1210,12 +1251,14 @@ EXPORT_SYMBOL_GPL(ntb_transport_create_queue);
640     */
641     void ntb_transport_free_queue(struct ntb_transport_qp *qp)
642     {
643     - struct pci_dev *pdev = ntb_query_pdev(qp->ndev);
644     + struct pci_dev *pdev;
645     struct ntb_queue_entry *entry;
646    
647     if (!qp)
648     return;
649    
650     + pdev = ntb_query_pdev(qp->ndev);
651     +
652     cancel_delayed_work_sync(&qp->link_work);
653    
654     ntb_unregister_db_callback(qp->ndev, qp->qp_num);
655     @@ -1371,12 +1414,13 @@ EXPORT_SYMBOL_GPL(ntb_transport_link_up);
656     */
657     void ntb_transport_link_down(struct ntb_transport_qp *qp)
658     {
659     - struct pci_dev *pdev = ntb_query_pdev(qp->ndev);
660     + struct pci_dev *pdev;
661     int rc, val;
662    
663     if (!qp)
664     return;
665    
666     + pdev = ntb_query_pdev(qp->ndev);
667     qp->client_ready = NTB_LINK_DOWN;
668    
669     rc = ntb_read_local_spad(qp->ndev, QP_LINKS, &val);
670     @@ -1408,6 +1452,9 @@ EXPORT_SYMBOL_GPL(ntb_transport_link_down);
671     */
672     bool ntb_transport_link_query(struct ntb_transport_qp *qp)
673     {
674     + if (!qp)
675     + return false;
676     +
677     return qp->qp_link == NTB_LINK_UP;
678     }
679     EXPORT_SYMBOL_GPL(ntb_transport_link_query);
680     @@ -1422,6 +1469,9 @@ EXPORT_SYMBOL_GPL(ntb_transport_link_query);
681     */
682     unsigned char ntb_transport_qp_num(struct ntb_transport_qp *qp)
683     {
684     + if (!qp)
685     + return 0;
686     +
687     return qp->qp_num;
688     }
689     EXPORT_SYMBOL_GPL(ntb_transport_qp_num);
690     @@ -1436,6 +1486,9 @@ EXPORT_SYMBOL_GPL(ntb_transport_qp_num);
691     */
692     unsigned int ntb_transport_max_size(struct ntb_transport_qp *qp)
693     {
694     + if (!qp)
695     + return 0;
696     +
697     return qp->tx_max_frame - sizeof(struct ntb_payload_header);
698     }
699     EXPORT_SYMBOL_GPL(ntb_transport_max_size);
700     diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c
701     index 2c02310..f49b62f 100644
702     --- a/fs/btrfs/ioctl.c
703     +++ b/fs/btrfs/ioctl.c
704     @@ -1796,7 +1796,11 @@ static noinline int copy_to_sk(struct btrfs_root *root,
705     item_off = btrfs_item_ptr_offset(leaf, i);
706     item_len = btrfs_item_size_nr(leaf, i);
707    
708     - if (item_len > BTRFS_SEARCH_ARGS_BUFSIZE)
709     + btrfs_item_key_to_cpu(leaf, key, i);
710     + if (!key_in_sk(key, sk))
711     + continue;
712     +
713     + if (sizeof(sh) + item_len > BTRFS_SEARCH_ARGS_BUFSIZE)
714     item_len = 0;
715    
716     if (sizeof(sh) + item_len + *sk_offset >
717     @@ -1805,10 +1809,6 @@ static noinline int copy_to_sk(struct btrfs_root *root,
718     goto overflow;
719     }
720    
721     - btrfs_item_key_to_cpu(leaf, key, i);
722     - if (!key_in_sk(key, sk))
723     - continue;
724     -
725     sh.objectid = key->objectid;
726     sh.offset = key->offset;
727     sh.type = key->type;
728     diff --git a/include/uapi/linux/virtio_console.h b/include/uapi/linux/virtio_console.h
729     index ee13ab6..c312f16 100644
730     --- a/include/uapi/linux/virtio_console.h
731     +++ b/include/uapi/linux/virtio_console.h
732     @@ -39,7 +39,7 @@
733     #define VIRTIO_CONSOLE_F_SIZE 0 /* Does host provide console size? */
734     #define VIRTIO_CONSOLE_F_MULTIPORT 1 /* Does host provide multiple ports? */
735    
736     -#define VIRTIO_CONSOLE_BAD_ID (~(u32)0)
737     +#define VIRTIO_CONSOLE_BAD_ID (~(__u32)0)
738    
739     struct virtio_console_config {
740     /* colums of the screens */