Magellan Linux

Annotation of /trunk/kernel-alx/patches-3.4/0115-3.4.16-all-fixes.patch

Parent Directory Parent Directory | Revision Log Revision Log


Revision 1946 - (hide annotations) (download)
Wed Nov 14 15:25:09 2012 UTC (11 years, 6 months ago) by niro
File size: 59894 byte(s)
3.4.18-alx-r1
1 niro 1946 diff --git a/arch/s390/boot/compressed/vmlinux.lds.S b/arch/s390/boot/compressed/vmlinux.lds.S
2     index d80f79d..8e1fb82 100644
3     --- a/arch/s390/boot/compressed/vmlinux.lds.S
4     +++ b/arch/s390/boot/compressed/vmlinux.lds.S
5     @@ -5,7 +5,7 @@ OUTPUT_FORMAT("elf64-s390", "elf64-s390", "elf64-s390")
6     OUTPUT_ARCH(s390:64-bit)
7     #else
8     OUTPUT_FORMAT("elf32-s390", "elf32-s390", "elf32-s390")
9     -OUTPUT_ARCH(s390)
10     +OUTPUT_ARCH(s390:31-bit)
11     #endif
12    
13     ENTRY(startup)
14     diff --git a/arch/s390/kernel/vmlinux.lds.S b/arch/s390/kernel/vmlinux.lds.S
15     index 21109c6..1343d7c 100644
16     --- a/arch/s390/kernel/vmlinux.lds.S
17     +++ b/arch/s390/kernel/vmlinux.lds.S
18     @@ -8,7 +8,7 @@
19    
20     #ifndef CONFIG_64BIT
21     OUTPUT_FORMAT("elf32-s390", "elf32-s390", "elf32-s390")
22     -OUTPUT_ARCH(s390)
23     +OUTPUT_ARCH(s390:31-bit)
24     ENTRY(startup)
25     jiffies = jiffies_64 + 4;
26     #else
27     diff --git a/arch/sparc/kernel/perf_event.c b/arch/sparc/kernel/perf_event.c
28     index 28559ce..602eca8 100644
29     --- a/arch/sparc/kernel/perf_event.c
30     +++ b/arch/sparc/kernel/perf_event.c
31     @@ -557,11 +557,13 @@ static u64 nop_for_index(int idx)
32    
33     static inline void sparc_pmu_enable_event(struct cpu_hw_events *cpuc, struct hw_perf_event *hwc, int idx)
34     {
35     - u64 val, mask = mask_for_index(idx);
36     + u64 enc, val, mask = mask_for_index(idx);
37     +
38     + enc = perf_event_get_enc(cpuc->events[idx]);
39    
40     val = cpuc->pcr;
41     val &= ~mask;
42     - val |= hwc->config;
43     + val |= event_encoding(enc, idx);
44     cpuc->pcr = val;
45    
46     pcr_ops->write(cpuc->pcr);
47     @@ -1428,8 +1430,6 @@ static void perf_callchain_user_64(struct perf_callchain_entry *entry,
48     {
49     unsigned long ufp;
50    
51     - perf_callchain_store(entry, regs->tpc);
52     -
53     ufp = regs->u_regs[UREG_I6] + STACK_BIAS;
54     do {
55     struct sparc_stackf *usf, sf;
56     @@ -1450,8 +1450,6 @@ static void perf_callchain_user_32(struct perf_callchain_entry *entry,
57     {
58     unsigned long ufp;
59    
60     - perf_callchain_store(entry, regs->tpc);
61     -
62     ufp = regs->u_regs[UREG_I6] & 0xffffffffUL;
63     do {
64     struct sparc_stackf32 *usf, sf;
65     @@ -1470,6 +1468,11 @@ static void perf_callchain_user_32(struct perf_callchain_entry *entry,
66     void
67     perf_callchain_user(struct perf_callchain_entry *entry, struct pt_regs *regs)
68     {
69     + perf_callchain_store(entry, regs->tpc);
70     +
71     + if (!current->mm)
72     + return;
73     +
74     flushw_user();
75     if (test_thread_flag(TIF_32BIT))
76     perf_callchain_user_32(entry, regs);
77     diff --git a/arch/sparc/kernel/sys_sparc_64.c b/arch/sparc/kernel/sys_sparc_64.c
78     index 3ee51f1..57b7cab 100644
79     --- a/arch/sparc/kernel/sys_sparc_64.c
80     +++ b/arch/sparc/kernel/sys_sparc_64.c
81     @@ -519,12 +519,12 @@ SYSCALL_DEFINE1(sparc64_personality, unsigned long, personality)
82     {
83     int ret;
84    
85     - if (current->personality == PER_LINUX32 &&
86     - personality == PER_LINUX)
87     - personality = PER_LINUX32;
88     + if (personality(current->personality) == PER_LINUX32 &&
89     + personality(personality) == PER_LINUX)
90     + personality |= PER_LINUX32;
91     ret = sys_personality(personality);
92     - if (ret == PER_LINUX32)
93     - ret = PER_LINUX;
94     + if (personality(ret) == PER_LINUX32)
95     + ret &= ~PER_LINUX32;
96    
97     return ret;
98     }
99     diff --git a/arch/sparc/kernel/syscalls.S b/arch/sparc/kernel/syscalls.S
100     index 1d7e274..7f5f65d 100644
101     --- a/arch/sparc/kernel/syscalls.S
102     +++ b/arch/sparc/kernel/syscalls.S
103     @@ -212,24 +212,20 @@ linux_sparc_syscall:
104     3: stx %o0, [%sp + PTREGS_OFF + PT_V9_I0]
105     ret_sys_call:
106     ldx [%sp + PTREGS_OFF + PT_V9_TSTATE], %g3
107     - ldx [%sp + PTREGS_OFF + PT_V9_TNPC], %l1 ! pc = npc
108     sra %o0, 0, %o0
109     mov %ulo(TSTATE_XCARRY | TSTATE_ICARRY), %g2
110     sllx %g2, 32, %g2
111    
112     - /* Check if force_successful_syscall_return()
113     - * was invoked.
114     - */
115     - ldub [%g6 + TI_SYS_NOERROR], %l2
116     - brnz,a,pn %l2, 80f
117     - stb %g0, [%g6 + TI_SYS_NOERROR]
118     -
119     cmp %o0, -ERESTART_RESTARTBLOCK
120     bgeu,pn %xcc, 1f
121     - andcc %l0, (_TIF_SYSCALL_TRACE|_TIF_SECCOMP|_TIF_SYSCALL_AUDIT|_TIF_SYSCALL_TRACEPOINT), %l6
122     -80:
123     + andcc %l0, (_TIF_SYSCALL_TRACE|_TIF_SECCOMP|_TIF_SYSCALL_AUDIT|_TIF_SYSCALL_TRACEPOINT), %g0
124     + ldx [%sp + PTREGS_OFF + PT_V9_TNPC], %l1 ! pc = npc
125     +
126     +2:
127     + stb %g0, [%g6 + TI_SYS_NOERROR]
128     /* System call success, clear Carry condition code. */
129     andn %g3, %g2, %g3
130     +3:
131     stx %g3, [%sp + PTREGS_OFF + PT_V9_TSTATE]
132     bne,pn %icc, linux_syscall_trace2
133     add %l1, 0x4, %l2 ! npc = npc+4
134     @@ -238,20 +234,20 @@ ret_sys_call:
135     stx %l2, [%sp + PTREGS_OFF + PT_V9_TNPC]
136    
137     1:
138     + /* Check if force_successful_syscall_return()
139     + * was invoked.
140     + */
141     + ldub [%g6 + TI_SYS_NOERROR], %l2
142     + brnz,pn %l2, 2b
143     + ldx [%sp + PTREGS_OFF + PT_V9_TNPC], %l1 ! pc = npc
144     /* System call failure, set Carry condition code.
145     * Also, get abs(errno) to return to the process.
146     */
147     - andcc %l0, (_TIF_SYSCALL_TRACE|_TIF_SECCOMP|_TIF_SYSCALL_AUDIT|_TIF_SYSCALL_TRACEPOINT), %l6
148     sub %g0, %o0, %o0
149     - or %g3, %g2, %g3
150     stx %o0, [%sp + PTREGS_OFF + PT_V9_I0]
151     - stx %g3, [%sp + PTREGS_OFF + PT_V9_TSTATE]
152     - bne,pn %icc, linux_syscall_trace2
153     - add %l1, 0x4, %l2 ! npc = npc+4
154     - stx %l1, [%sp + PTREGS_OFF + PT_V9_TPC]
155     + ba,pt %xcc, 3b
156     + or %g3, %g2, %g3
157    
158     - b,pt %xcc, rtrap
159     - stx %l2, [%sp + PTREGS_OFF + PT_V9_TNPC]
160     linux_syscall_trace2:
161     call syscall_trace_leave
162     add %sp, PTREGS_OFF, %o0
163     diff --git a/arch/sparc/mm/init_64.c b/arch/sparc/mm/init_64.c
164     index 21faaee..0791618 100644
165     --- a/arch/sparc/mm/init_64.c
166     +++ b/arch/sparc/mm/init_64.c
167     @@ -2099,6 +2099,9 @@ EXPORT_SYMBOL(_PAGE_CACHE);
168     #ifdef CONFIG_SPARSEMEM_VMEMMAP
169     unsigned long vmemmap_table[VMEMMAP_SIZE];
170    
171     +static long __meminitdata addr_start, addr_end;
172     +static int __meminitdata node_start;
173     +
174     int __meminit vmemmap_populate(struct page *start, unsigned long nr, int node)
175     {
176     unsigned long vstart = (unsigned long) start;
177     @@ -2129,15 +2132,30 @@ int __meminit vmemmap_populate(struct page *start, unsigned long nr, int node)
178    
179     *vmem_pp = pte_base | __pa(block);
180    
181     - printk(KERN_INFO "[%p-%p] page_structs=%lu "
182     - "node=%d entry=%lu/%lu\n", start, block, nr,
183     - node,
184     - addr >> VMEMMAP_CHUNK_SHIFT,
185     - VMEMMAP_SIZE);
186     + /* check to see if we have contiguous blocks */
187     + if (addr_end != addr || node_start != node) {
188     + if (addr_start)
189     + printk(KERN_DEBUG " [%lx-%lx] on node %d\n",
190     + addr_start, addr_end-1, node_start);
191     + addr_start = addr;
192     + node_start = node;
193     + }
194     + addr_end = addr + VMEMMAP_CHUNK;
195     }
196     }
197     return 0;
198     }
199     +
200     +void __meminit vmemmap_populate_print_last(void)
201     +{
202     + if (addr_start) {
203     + printk(KERN_DEBUG " [%lx-%lx] on node %d\n",
204     + addr_start, addr_end-1, node_start);
205     + addr_start = 0;
206     + addr_end = 0;
207     + node_start = 0;
208     + }
209     +}
210     #endif /* CONFIG_SPARSEMEM_VMEMMAP */
211    
212     static void prot_init_common(unsigned long page_none,
213     diff --git a/arch/tile/Makefile b/arch/tile/Makefile
214     index 9520bc5..99f461d 100644
215     --- a/arch/tile/Makefile
216     +++ b/arch/tile/Makefile
217     @@ -26,6 +26,10 @@ $(error Set TILERA_ROOT or CROSS_COMPILE when building $(ARCH) on $(HOST_ARCH))
218     endif
219     endif
220    
221     +# The tile compiler may emit .eh_frame information for backtracing.
222     +# In kernel modules, this causes load failures due to unsupported relocations.
223     +KBUILD_CFLAGS += -fno-asynchronous-unwind-tables
224     +
225     ifneq ($(CONFIG_DEBUG_EXTRA_FLAGS),"")
226     KBUILD_CFLAGS += $(CONFIG_DEBUG_EXTRA_FLAGS)
227     endif
228     diff --git a/arch/x86/kernel/entry_32.S b/arch/x86/kernel/entry_32.S
229     index 7b784f4..6d4f7ba 100644
230     --- a/arch/x86/kernel/entry_32.S
231     +++ b/arch/x86/kernel/entry_32.S
232     @@ -1025,7 +1025,7 @@ ENTRY(xen_sysenter_target)
233    
234     ENTRY(xen_hypervisor_callback)
235     CFI_STARTPROC
236     - pushl_cfi $0
237     + pushl_cfi $-1 /* orig_ax = -1 => not a system call */
238     SAVE_ALL
239     TRACE_IRQS_OFF
240    
241     @@ -1067,14 +1067,16 @@ ENTRY(xen_failsafe_callback)
242     2: mov 8(%esp),%es
243     3: mov 12(%esp),%fs
244     4: mov 16(%esp),%gs
245     + /* EAX == 0 => Category 1 (Bad segment)
246     + EAX != 0 => Category 2 (Bad IRET) */
247     testl %eax,%eax
248     popl_cfi %eax
249     lea 16(%esp),%esp
250     CFI_ADJUST_CFA_OFFSET -16
251     jz 5f
252     addl $16,%esp
253     - jmp iret_exc # EAX != 0 => Category 2 (Bad IRET)
254     -5: pushl_cfi $0 # EAX == 0 => Category 1 (Bad segment)
255     + jmp iret_exc
256     +5: pushl_cfi $-1 /* orig_ax = -1 => not a system call */
257     SAVE_ALL
258     jmp ret_from_exception
259     CFI_ENDPROC
260     diff --git a/arch/x86/kernel/entry_64.S b/arch/x86/kernel/entry_64.S
261     index cdc79b5..bd6f592 100644
262     --- a/arch/x86/kernel/entry_64.S
263     +++ b/arch/x86/kernel/entry_64.S
264     @@ -1351,7 +1351,7 @@ ENTRY(xen_failsafe_callback)
265     CFI_RESTORE r11
266     addq $0x30,%rsp
267     CFI_ADJUST_CFA_OFFSET -0x30
268     - pushq_cfi $0
269     + pushq_cfi $-1 /* orig_ax = -1 => not a system call */
270     SAVE_ALL
271     jmp error_exit
272     CFI_ENDPROC
273     diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c
274     index 1a29015..fbbd1eb 100644
275     --- a/arch/x86/kernel/setup.c
276     +++ b/arch/x86/kernel/setup.c
277     @@ -927,8 +927,21 @@ void __init setup_arch(char **cmdline_p)
278    
279     #ifdef CONFIG_X86_64
280     if (max_pfn > max_low_pfn) {
281     - max_pfn_mapped = init_memory_mapping(1UL<<32,
282     - max_pfn<<PAGE_SHIFT);
283     + int i;
284     + for (i = 0; i < e820.nr_map; i++) {
285     + struct e820entry *ei = &e820.map[i];
286     +
287     + if (ei->addr + ei->size <= 1UL << 32)
288     + continue;
289     +
290     + if (ei->type == E820_RESERVED)
291     + continue;
292     +
293     + max_pfn_mapped = init_memory_mapping(
294     + ei->addr < 1UL << 32 ? 1UL << 32 : ei->addr,
295     + ei->addr + ei->size);
296     + }
297     +
298     /* can we preseve max_low_pfn ?*/
299     max_low_pfn = max_pfn;
300     }
301     diff --git a/arch/x86/oprofile/nmi_int.c b/arch/x86/oprofile/nmi_int.c
302     index 26b8a85..48768df 100644
303     --- a/arch/x86/oprofile/nmi_int.c
304     +++ b/arch/x86/oprofile/nmi_int.c
305     @@ -55,7 +55,7 @@ u64 op_x86_get_ctrl(struct op_x86_model_spec const *model,
306     val |= counter_config->extra;
307     event &= model->event_mask ? model->event_mask : 0xFF;
308     val |= event & 0xFF;
309     - val |= (event & 0x0F00) << 24;
310     + val |= (u64)(event & 0x0F00) << 24;
311    
312     return val;
313     }
314     diff --git a/drivers/edac/amd64_edac.c b/drivers/edac/amd64_edac.c
315     index 7ef73c9..a8bfe1c 100644
316     --- a/drivers/edac/amd64_edac.c
317     +++ b/drivers/edac/amd64_edac.c
318     @@ -170,8 +170,11 @@ static int __amd64_set_scrub_rate(struct pci_dev *ctl, u32 new_bw, u32 min_rate)
319     * memory controller and apply to register. Search for the first
320     * bandwidth entry that is greater or equal than the setting requested
321     * and program that. If at last entry, turn off DRAM scrubbing.
322     + *
323     + * If no suitable bandwidth is found, turn off DRAM scrubbing entirely
324     + * by falling back to the last element in scrubrates[].
325     */
326     - for (i = 0; i < ARRAY_SIZE(scrubrates); i++) {
327     + for (i = 0; i < ARRAY_SIZE(scrubrates) - 1; i++) {
328     /*
329     * skip scrub rates which aren't recommended
330     * (see F10 BKDG, F3x58)
331     @@ -181,12 +184,6 @@ static int __amd64_set_scrub_rate(struct pci_dev *ctl, u32 new_bw, u32 min_rate)
332    
333     if (scrubrates[i].bandwidth <= new_bw)
334     break;
335     -
336     - /*
337     - * if no suitable bandwidth found, turn off DRAM scrubbing
338     - * entirely by falling back to the last element in the
339     - * scrubrates array.
340     - */
341     }
342    
343     scrubval = scrubrates[i].scrubval;
344     diff --git a/drivers/infiniband/core/cma.c b/drivers/infiniband/core/cma.c
345     index e3e470f..67432e2 100644
346     --- a/drivers/infiniband/core/cma.c
347     +++ b/drivers/infiniband/core/cma.c
348     @@ -3451,7 +3451,8 @@ out:
349     }
350    
351     static const struct ibnl_client_cbs cma_cb_table[] = {
352     - [RDMA_NL_RDMA_CM_ID_STATS] = { .dump = cma_get_id_stats },
353     + [RDMA_NL_RDMA_CM_ID_STATS] = { .dump = cma_get_id_stats,
354     + .module = THIS_MODULE },
355     };
356    
357     static int __init cma_init(void)
358     diff --git a/drivers/infiniband/core/netlink.c b/drivers/infiniband/core/netlink.c
359     index 396e293..8c3b08a 100644
360     --- a/drivers/infiniband/core/netlink.c
361     +++ b/drivers/infiniband/core/netlink.c
362     @@ -151,6 +151,7 @@ static int ibnl_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
363     {
364     struct netlink_dump_control c = {
365     .dump = client->cb_table[op].dump,
366     + .module = client->cb_table[op].module,
367     };
368     return netlink_dump_start(nls, skb, nlh, &c);
369     }
370     diff --git a/drivers/iommu/tegra-smmu.c b/drivers/iommu/tegra-smmu.c
371     index 17ef6c4..035f28e 100644
372     --- a/drivers/iommu/tegra-smmu.c
373     +++ b/drivers/iommu/tegra-smmu.c
374     @@ -148,7 +148,7 @@
375    
376     #define SMMU_ADDR_TO_PFN(addr) ((addr) >> 12)
377     #define SMMU_ADDR_TO_PDN(addr) ((addr) >> 22)
378     -#define SMMU_PDN_TO_ADDR(addr) ((pdn) << 22)
379     +#define SMMU_PDN_TO_ADDR(pdn) ((pdn) << 22)
380    
381     #define _READABLE (1 << SMMU_PTB_DATA_ASID_READABLE_SHIFT)
382     #define _WRITABLE (1 << SMMU_PTB_DATA_ASID_WRITABLE_SHIFT)
383     diff --git a/drivers/media/video/au0828/au0828-video.c b/drivers/media/video/au0828/au0828-video.c
384     index 0b3e481..eab0641 100644
385     --- a/drivers/media/video/au0828/au0828-video.c
386     +++ b/drivers/media/video/au0828/au0828-video.c
387     @@ -1692,14 +1692,18 @@ static int vidioc_streamoff(struct file *file, void *priv,
388     (AUVI_INPUT(i).audio_setup)(dev, 0);
389     }
390    
391     - videobuf_streamoff(&fh->vb_vidq);
392     - res_free(fh, AU0828_RESOURCE_VIDEO);
393     + if (res_check(fh, AU0828_RESOURCE_VIDEO)) {
394     + videobuf_streamoff(&fh->vb_vidq);
395     + res_free(fh, AU0828_RESOURCE_VIDEO);
396     + }
397     } else if (fh->type == V4L2_BUF_TYPE_VBI_CAPTURE) {
398     dev->vbi_timeout_running = 0;
399     del_timer_sync(&dev->vbi_timeout);
400    
401     - videobuf_streamoff(&fh->vb_vbiq);
402     - res_free(fh, AU0828_RESOURCE_VBI);
403     + if (res_check(fh, AU0828_RESOURCE_VBI)) {
404     + videobuf_streamoff(&fh->vb_vbiq);
405     + res_free(fh, AU0828_RESOURCE_VBI);
406     + }
407     }
408    
409     return 0;
410     diff --git a/drivers/mtd/nand/nand_base.c b/drivers/mtd/nand/nand_base.c
411     index 47b19c0..eb9f5fb 100644
412     --- a/drivers/mtd/nand/nand_base.c
413     +++ b/drivers/mtd/nand/nand_base.c
414     @@ -2897,9 +2897,7 @@ static int nand_flash_detect_onfi(struct mtd_info *mtd, struct nand_chip *chip,
415     if (le16_to_cpu(p->features) & 1)
416     *busw = NAND_BUSWIDTH_16;
417    
418     - chip->options &= ~NAND_CHIPOPTIONS_MSK;
419     - chip->options |= (NAND_NO_READRDY |
420     - NAND_NO_AUTOINCR) & NAND_CHIPOPTIONS_MSK;
421     + chip->options |= NAND_NO_READRDY | NAND_NO_AUTOINCR;
422    
423     pr_info("ONFI flash detected\n");
424     return 1;
425     @@ -3064,9 +3062,8 @@ static struct nand_flash_dev *nand_get_flash_type(struct mtd_info *mtd,
426     mtd->erasesize <<= ((id_data[3] & 0x03) << 1);
427     }
428     }
429     - /* Get chip options, preserve non chip based options */
430     - chip->options &= ~NAND_CHIPOPTIONS_MSK;
431     - chip->options |= type->options & NAND_CHIPOPTIONS_MSK;
432     + /* Get chip options */
433     + chip->options |= type->options;
434    
435     /*
436     * Check if chip is not a Samsung device. Do not clear the
437     diff --git a/drivers/net/ethernet/marvell/skge.c b/drivers/net/ethernet/marvell/skge.c
438     index 5a30bf8..f4be8f7 100644
439     --- a/drivers/net/ethernet/marvell/skge.c
440     +++ b/drivers/net/ethernet/marvell/skge.c
441     @@ -4153,6 +4153,13 @@ static struct dmi_system_id skge_32bit_dma_boards[] = {
442     DMI_MATCH(DMI_BOARD_NAME, "nForce"),
443     },
444     },
445     + {
446     + .ident = "ASUS P5NSLI",
447     + .matches = {
448     + DMI_MATCH(DMI_BOARD_VENDOR, "ASUSTeK Computer INC."),
449     + DMI_MATCH(DMI_BOARD_NAME, "P5NSLI")
450     + },
451     + },
452     {}
453     };
454    
455     diff --git a/drivers/pcmcia/pxa2xx_sharpsl.c b/drivers/pcmcia/pxa2xx_sharpsl.c
456     index b066273..7dd879c 100644
457     --- a/drivers/pcmcia/pxa2xx_sharpsl.c
458     +++ b/drivers/pcmcia/pxa2xx_sharpsl.c
459     @@ -194,7 +194,7 @@ static void sharpsl_pcmcia_socket_suspend(struct soc_pcmcia_socket *skt)
460     sharpsl_pcmcia_init_reset(skt);
461     }
462    
463     -static struct pcmcia_low_level sharpsl_pcmcia_ops __initdata = {
464     +static struct pcmcia_low_level sharpsl_pcmcia_ops = {
465     .owner = THIS_MODULE,
466     .hw_init = sharpsl_pcmcia_hw_init,
467     .socket_state = sharpsl_pcmcia_socket_state,
468     diff --git a/drivers/pinctrl/pinctrl-tegra.c b/drivers/pinctrl/pinctrl-tegra.c
469     index 9b32968..2a32a2f 100644
470     --- a/drivers/pinctrl/pinctrl-tegra.c
471     +++ b/drivers/pinctrl/pinctrl-tegra.c
472     @@ -259,7 +259,7 @@ static int tegra_pinconf_reg(struct tegra_pmx *pmx,
473     *bank = g->drv_bank;
474     *reg = g->drv_reg;
475     *bit = g->lpmd_bit;
476     - *width = 1;
477     + *width = 2;
478     break;
479     case TEGRA_PINCONF_PARAM_DRIVE_DOWN_STRENGTH:
480     *bank = g->drv_bank;
481     diff --git a/drivers/pinctrl/pinctrl-tegra30.c b/drivers/pinctrl/pinctrl-tegra30.c
482     index 4d7571d..636d96c 100644
483     --- a/drivers/pinctrl/pinctrl-tegra30.c
484     +++ b/drivers/pinctrl/pinctrl-tegra30.c
485     @@ -3343,10 +3343,10 @@ static const struct tegra_function tegra30_functions[] = {
486     FUNCTION(vi_alt3),
487     };
488    
489     -#define MUXCTL_REG_A 0x3000
490     -#define PINGROUP_REG_A 0x868
491     +#define DRV_PINGROUP_REG_A 0x868 /* bank 0 */
492     +#define PINGROUP_REG_A 0x3000 /* bank 1 */
493    
494     -#define PINGROUP_REG_Y(r) ((r) - MUXCTL_REG_A)
495     +#define PINGROUP_REG_Y(r) ((r) - PINGROUP_REG_A)
496     #define PINGROUP_REG_N(r) -1
497    
498     #define PINGROUP(pg_name, f0, f1, f2, f3, f_safe, r, od, ior) \
499     @@ -3362,25 +3362,25 @@ static const struct tegra_function tegra30_functions[] = {
500     }, \
501     .func_safe = TEGRA_MUX_ ## f_safe, \
502     .mux_reg = PINGROUP_REG_Y(r), \
503     - .mux_bank = 0, \
504     + .mux_bank = 1, \
505     .mux_bit = 0, \
506     .pupd_reg = PINGROUP_REG_Y(r), \
507     - .pupd_bank = 0, \
508     + .pupd_bank = 1, \
509     .pupd_bit = 2, \
510     .tri_reg = PINGROUP_REG_Y(r), \
511     - .tri_bank = 0, \
512     + .tri_bank = 1, \
513     .tri_bit = 4, \
514     .einput_reg = PINGROUP_REG_Y(r), \
515     - .einput_bank = 0, \
516     + .einput_bank = 1, \
517     .einput_bit = 5, \
518     .odrain_reg = PINGROUP_REG_##od(r), \
519     - .odrain_bank = 0, \
520     + .odrain_bank = 1, \
521     .odrain_bit = 6, \
522     .lock_reg = PINGROUP_REG_Y(r), \
523     - .lock_bank = 0, \
524     + .lock_bank = 1, \
525     .lock_bit = 7, \
526     .ioreset_reg = PINGROUP_REG_##ior(r), \
527     - .ioreset_bank = 0, \
528     + .ioreset_bank = 1, \
529     .ioreset_bit = 8, \
530     .drv_reg = -1, \
531     }
532     @@ -3399,8 +3399,8 @@ static const struct tegra_function tegra30_functions[] = {
533     .odrain_reg = -1, \
534     .lock_reg = -1, \
535     .ioreset_reg = -1, \
536     - .drv_reg = ((r) - PINGROUP_REG_A), \
537     - .drv_bank = 1, \
538     + .drv_reg = ((r) - DRV_PINGROUP_REG_A), \
539     + .drv_bank = 0, \
540     .hsm_bit = hsm_b, \
541     .schmitt_bit = schmitt_b, \
542     .lpmd_bit = lpmd_b, \
543     diff --git a/drivers/usb/class/cdc-acm.c b/drivers/usb/class/cdc-acm.c
544     index 7f2fac1..c5f7eae 100644
545     --- a/drivers/usb/class/cdc-acm.c
546     +++ b/drivers/usb/class/cdc-acm.c
547     @@ -818,10 +818,6 @@ static const __u32 acm_tty_speed[] = {
548     2500000, 3000000, 3500000, 4000000
549     };
550    
551     -static const __u8 acm_tty_size[] = {
552     - 5, 6, 7, 8
553     -};
554     -
555     static void acm_tty_set_termios(struct tty_struct *tty,
556     struct ktermios *termios_old)
557     {
558     @@ -835,7 +831,21 @@ static void acm_tty_set_termios(struct tty_struct *tty,
559     newline.bParityType = termios->c_cflag & PARENB ?
560     (termios->c_cflag & PARODD ? 1 : 2) +
561     (termios->c_cflag & CMSPAR ? 2 : 0) : 0;
562     - newline.bDataBits = acm_tty_size[(termios->c_cflag & CSIZE) >> 4];
563     + switch (termios->c_cflag & CSIZE) {
564     + case CS5:
565     + newline.bDataBits = 5;
566     + break;
567     + case CS6:
568     + newline.bDataBits = 6;
569     + break;
570     + case CS7:
571     + newline.bDataBits = 7;
572     + break;
573     + case CS8:
574     + default:
575     + newline.bDataBits = 8;
576     + break;
577     + }
578     /* FIXME: Needs to clear unsupported bits in the termios */
579     acm->clocal = ((termios->c_cflag & CLOCAL) != 0);
580    
581     @@ -1234,7 +1244,7 @@ made_compressed_probe:
582    
583     if (usb_endpoint_xfer_int(epwrite))
584     usb_fill_int_urb(snd->urb, usb_dev,
585     - usb_sndbulkpipe(usb_dev, epwrite->bEndpointAddress),
586     + usb_sndintpipe(usb_dev, epwrite->bEndpointAddress),
587     NULL, acm->writesize, acm_write_bulk, snd, epwrite->bInterval);
588     else
589     usb_fill_bulk_urb(snd->urb, usb_dev,
590     diff --git a/drivers/usb/dwc3/gadget.c b/drivers/usb/dwc3/gadget.c
591     index b8d4697..cee0c3e 100644
592     --- a/drivers/usb/dwc3/gadget.c
593     +++ b/drivers/usb/dwc3/gadget.c
594     @@ -1777,6 +1777,7 @@ static void dwc3_stop_active_transfer(struct dwc3 *dwc, u32 epnum)
595     ret = dwc3_send_gadget_ep_cmd(dwc, dep->number, cmd, &params);
596     WARN_ON_ONCE(ret);
597     dep->res_trans_idx = 0;
598     + dep->flags &= ~DWC3_EP_BUSY;
599     }
600     }
601    
602     diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c
603     index f756231..e52ddfe 100644
604     --- a/drivers/usb/host/xhci.c
605     +++ b/drivers/usb/host/xhci.c
606     @@ -479,7 +479,8 @@ static bool compliance_mode_recovery_timer_quirk_check(void)
607    
608     if (strstr(dmi_product_name, "Z420") ||
609     strstr(dmi_product_name, "Z620") ||
610     - strstr(dmi_product_name, "Z820"))
611     + strstr(dmi_product_name, "Z820") ||
612     + strstr(dmi_product_name, "Z1"))
613     return true;
614    
615     return false;
616     diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c
617     index 57de734..17ec21e 100644
618     --- a/drivers/usb/serial/option.c
619     +++ b/drivers/usb/serial/option.c
620     @@ -503,11 +503,19 @@ static const struct option_blacklist_info net_intf5_blacklist = {
621     .reserved = BIT(5),
622     };
623    
624     +static const struct option_blacklist_info net_intf6_blacklist = {
625     + .reserved = BIT(6),
626     +};
627     +
628     static const struct option_blacklist_info zte_mf626_blacklist = {
629     .sendsetup = BIT(0) | BIT(1),
630     .reserved = BIT(4),
631     };
632    
633     +static const struct option_blacklist_info zte_1255_blacklist = {
634     + .reserved = BIT(3) | BIT(4),
635     +};
636     +
637     static const struct usb_device_id option_ids[] = {
638     { USB_DEVICE(OPTION_VENDOR_ID, OPTION_PRODUCT_COLT) },
639     { USB_DEVICE(OPTION_VENDOR_ID, OPTION_PRODUCT_RICOLA) },
640     @@ -853,13 +861,19 @@ static const struct usb_device_id option_ids[] = {
641     { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0113, 0xff, 0xff, 0xff),
642     .driver_info = (kernel_ulong_t)&net_intf5_blacklist },
643     { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0117, 0xff, 0xff, 0xff) },
644     - { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0118, 0xff, 0xff, 0xff) },
645     - { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0121, 0xff, 0xff, 0xff) },
646     + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0118, 0xff, 0xff, 0xff),
647     + .driver_info = (kernel_ulong_t)&net_intf5_blacklist },
648     + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0121, 0xff, 0xff, 0xff),
649     + .driver_info = (kernel_ulong_t)&net_intf5_blacklist },
650     { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0122, 0xff, 0xff, 0xff) },
651     - { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0123, 0xff, 0xff, 0xff) },
652     - { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0124, 0xff, 0xff, 0xff) },
653     - { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0125, 0xff, 0xff, 0xff) },
654     - { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0126, 0xff, 0xff, 0xff) },
655     + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0123, 0xff, 0xff, 0xff),
656     + .driver_info = (kernel_ulong_t)&net_intf4_blacklist },
657     + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0124, 0xff, 0xff, 0xff),
658     + .driver_info = (kernel_ulong_t)&net_intf5_blacklist },
659     + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0125, 0xff, 0xff, 0xff),
660     + .driver_info = (kernel_ulong_t)&net_intf6_blacklist },
661     + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0126, 0xff, 0xff, 0xff),
662     + .driver_info = (kernel_ulong_t)&net_intf5_blacklist },
663     { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0128, 0xff, 0xff, 0xff) },
664     { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0142, 0xff, 0xff, 0xff) },
665     { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0143, 0xff, 0xff, 0xff) },
666     @@ -872,7 +886,8 @@ static const struct usb_device_id option_ids[] = {
667     { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0156, 0xff, 0xff, 0xff) },
668     { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0157, 0xff, 0xff, 0xff),
669     .driver_info = (kernel_ulong_t)&net_intf5_blacklist },
670     - { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0158, 0xff, 0xff, 0xff) },
671     + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0158, 0xff, 0xff, 0xff),
672     + .driver_info = (kernel_ulong_t)&net_intf3_blacklist },
673     { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0159, 0xff, 0xff, 0xff) },
674     { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0161, 0xff, 0xff, 0xff) },
675     { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0162, 0xff, 0xff, 0xff) },
676     @@ -880,13 +895,22 @@ static const struct usb_device_id option_ids[] = {
677     { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0165, 0xff, 0xff, 0xff) },
678     { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0167, 0xff, 0xff, 0xff),
679     .driver_info = (kernel_ulong_t)&net_intf4_blacklist },
680     + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0191, 0xff, 0xff, 0xff), /* ZTE EuFi890 */
681     + .driver_info = (kernel_ulong_t)&net_intf4_blacklist },
682     + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0199, 0xff, 0xff, 0xff), /* ZTE MF820S */
683     + .driver_info = (kernel_ulong_t)&net_intf1_blacklist },
684     + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0257, 0xff, 0xff, 0xff), /* ZTE MF821 */
685     + .driver_info = (kernel_ulong_t)&net_intf3_blacklist },
686     { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0326, 0xff, 0xff, 0xff),
687     .driver_info = (kernel_ulong_t)&net_intf4_blacklist },
688     { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1008, 0xff, 0xff, 0xff),
689     .driver_info = (kernel_ulong_t)&net_intf4_blacklist },
690     { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1010, 0xff, 0xff, 0xff),
691     .driver_info = (kernel_ulong_t)&net_intf4_blacklist },
692     - { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1012, 0xff, 0xff, 0xff) },
693     + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1012, 0xff, 0xff, 0xff),
694     + .driver_info = (kernel_ulong_t)&net_intf4_blacklist },
695     + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1021, 0xff, 0xff, 0xff),
696     + .driver_info = (kernel_ulong_t)&net_intf2_blacklist },
697     { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1057, 0xff, 0xff, 0xff) },
698     { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1058, 0xff, 0xff, 0xff) },
699     { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1059, 0xff, 0xff, 0xff) },
700     @@ -1002,18 +1026,24 @@ static const struct usb_device_id option_ids[] = {
701     { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1169, 0xff, 0xff, 0xff) },
702     { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1170, 0xff, 0xff, 0xff) },
703     { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1244, 0xff, 0xff, 0xff) },
704     - { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1245, 0xff, 0xff, 0xff) },
705     + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1245, 0xff, 0xff, 0xff),
706     + .driver_info = (kernel_ulong_t)&net_intf4_blacklist },
707     { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1246, 0xff, 0xff, 0xff) },
708     - { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1247, 0xff, 0xff, 0xff) },
709     + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1247, 0xff, 0xff, 0xff),
710     + .driver_info = (kernel_ulong_t)&net_intf4_blacklist },
711     { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1248, 0xff, 0xff, 0xff) },
712     { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1249, 0xff, 0xff, 0xff) },
713     { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1250, 0xff, 0xff, 0xff) },
714     { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1251, 0xff, 0xff, 0xff) },
715     - { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1252, 0xff, 0xff, 0xff) },
716     + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1252, 0xff, 0xff, 0xff),
717     + .driver_info = (kernel_ulong_t)&net_intf4_blacklist },
718     { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1253, 0xff, 0xff, 0xff) },
719     - { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1254, 0xff, 0xff, 0xff) },
720     - { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1255, 0xff, 0xff, 0xff) },
721     - { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1256, 0xff, 0xff, 0xff) },
722     + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1254, 0xff, 0xff, 0xff),
723     + .driver_info = (kernel_ulong_t)&net_intf4_blacklist },
724     + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1255, 0xff, 0xff, 0xff),
725     + .driver_info = (kernel_ulong_t)&zte_1255_blacklist },
726     + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1256, 0xff, 0xff, 0xff),
727     + .driver_info = (kernel_ulong_t)&net_intf4_blacklist },
728     { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1257, 0xff, 0xff, 0xff) },
729     { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1258, 0xff, 0xff, 0xff) },
730     { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1259, 0xff, 0xff, 0xff) },
731     @@ -1058,8 +1088,16 @@ static const struct usb_device_id option_ids[] = {
732     { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1298, 0xff, 0xff, 0xff) },
733     { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1299, 0xff, 0xff, 0xff) },
734     { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1300, 0xff, 0xff, 0xff) },
735     + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1401, 0xff, 0xff, 0xff),
736     + .driver_info = (kernel_ulong_t)&net_intf2_blacklist },
737     { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1402, 0xff, 0xff, 0xff),
738     .driver_info = (kernel_ulong_t)&net_intf2_blacklist },
739     + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1424, 0xff, 0xff, 0xff),
740     + .driver_info = (kernel_ulong_t)&net_intf2_blacklist },
741     + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1425, 0xff, 0xff, 0xff),
742     + .driver_info = (kernel_ulong_t)&net_intf2_blacklist },
743     + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1426, 0xff, 0xff, 0xff), /* ZTE MF91 */
744     + .driver_info = (kernel_ulong_t)&net_intf2_blacklist },
745     { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x2002, 0xff,
746     0xff, 0xff), .driver_info = (kernel_ulong_t)&zte_k3765_z_blacklist },
747     { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x2003, 0xff, 0xff, 0xff) },
748     @@ -1071,15 +1109,21 @@ static const struct usb_device_id option_ids[] = {
749     { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0070, 0xff, 0xff, 0xff) },
750     { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0073, 0xff, 0xff, 0xff) },
751     { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0094, 0xff, 0xff, 0xff) },
752     - { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0130, 0xff, 0xff, 0xff) },
753     - { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0133, 0xff, 0xff, 0xff) },
754     - { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0141, 0xff, 0xff, 0xff) },
755     + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0130, 0xff, 0xff, 0xff),
756     + .driver_info = (kernel_ulong_t)&net_intf1_blacklist },
757     + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0133, 0xff, 0xff, 0xff),
758     + .driver_info = (kernel_ulong_t)&net_intf3_blacklist },
759     + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0141, 0xff, 0xff, 0xff),
760     + .driver_info = (kernel_ulong_t)&net_intf5_blacklist },
761     { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0147, 0xff, 0xff, 0xff) },
762     { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0152, 0xff, 0xff, 0xff) },
763     - { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0168, 0xff, 0xff, 0xff) },
764     + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0168, 0xff, 0xff, 0xff),
765     + .driver_info = (kernel_ulong_t)&net_intf4_blacklist },
766     { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0170, 0xff, 0xff, 0xff) },
767     - { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0176, 0xff, 0xff, 0xff) },
768     - { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0178, 0xff, 0xff, 0xff) },
769     + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0176, 0xff, 0xff, 0xff),
770     + .driver_info = (kernel_ulong_t)&net_intf3_blacklist },
771     + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0178, 0xff, 0xff, 0xff),
772     + .driver_info = (kernel_ulong_t)&net_intf3_blacklist },
773    
774     { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, ZTE_PRODUCT_CDMA_TECH, 0xff, 0xff, 0xff) },
775     { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, ZTE_PRODUCT_AC8710, 0xff, 0xff, 0xff) },
776     diff --git a/fs/ext4/extents.c b/fs/ext4/extents.c
777     index 8b384cc..c215985 100644
778     --- a/fs/ext4/extents.c
779     +++ b/fs/ext4/extents.c
780     @@ -52,6 +52,9 @@
781     #define EXT4_EXT_MARK_UNINIT1 0x2 /* mark first half uninitialized */
782     #define EXT4_EXT_MARK_UNINIT2 0x4 /* mark second half uninitialized */
783    
784     +#define EXT4_EXT_DATA_VALID1 0x8 /* first half contains valid data */
785     +#define EXT4_EXT_DATA_VALID2 0x10 /* second half contains valid data */
786     +
787     static int ext4_split_extent(handle_t *handle,
788     struct inode *inode,
789     struct ext4_ext_path *path,
790     @@ -2829,6 +2832,9 @@ static int ext4_split_extent_at(handle_t *handle,
791     unsigned int ee_len, depth;
792     int err = 0;
793    
794     + BUG_ON((split_flag & (EXT4_EXT_DATA_VALID1 | EXT4_EXT_DATA_VALID2)) ==
795     + (EXT4_EXT_DATA_VALID1 | EXT4_EXT_DATA_VALID2));
796     +
797     ext_debug("ext4_split_extents_at: inode %lu, logical"
798     "block %llu\n", inode->i_ino, (unsigned long long)split);
799    
800     @@ -2887,7 +2893,14 @@ static int ext4_split_extent_at(handle_t *handle,
801    
802     err = ext4_ext_insert_extent(handle, inode, path, &newex, flags);
803     if (err == -ENOSPC && (EXT4_EXT_MAY_ZEROOUT & split_flag)) {
804     - err = ext4_ext_zeroout(inode, &orig_ex);
805     + if (split_flag & (EXT4_EXT_DATA_VALID1|EXT4_EXT_DATA_VALID2)) {
806     + if (split_flag & EXT4_EXT_DATA_VALID1)
807     + err = ext4_ext_zeroout(inode, ex2);
808     + else
809     + err = ext4_ext_zeroout(inode, ex);
810     + } else
811     + err = ext4_ext_zeroout(inode, &orig_ex);
812     +
813     if (err)
814     goto fix_extent_len;
815     /* update the extent length and mark as initialized */
816     @@ -2940,12 +2953,13 @@ static int ext4_split_extent(handle_t *handle,
817     uninitialized = ext4_ext_is_uninitialized(ex);
818    
819     if (map->m_lblk + map->m_len < ee_block + ee_len) {
820     - split_flag1 = split_flag & EXT4_EXT_MAY_ZEROOUT ?
821     - EXT4_EXT_MAY_ZEROOUT : 0;
822     + split_flag1 = split_flag & EXT4_EXT_MAY_ZEROOUT;
823     flags1 = flags | EXT4_GET_BLOCKS_PRE_IO;
824     if (uninitialized)
825     split_flag1 |= EXT4_EXT_MARK_UNINIT1 |
826     EXT4_EXT_MARK_UNINIT2;
827     + if (split_flag & EXT4_EXT_DATA_VALID2)
828     + split_flag1 |= EXT4_EXT_DATA_VALID1;
829     err = ext4_split_extent_at(handle, inode, path,
830     map->m_lblk + map->m_len, split_flag1, flags1);
831     if (err)
832     @@ -2958,8 +2972,8 @@ static int ext4_split_extent(handle_t *handle,
833     return PTR_ERR(path);
834    
835     if (map->m_lblk >= ee_block) {
836     - split_flag1 = split_flag & EXT4_EXT_MAY_ZEROOUT ?
837     - EXT4_EXT_MAY_ZEROOUT : 0;
838     + split_flag1 = split_flag & (EXT4_EXT_MAY_ZEROOUT |
839     + EXT4_EXT_DATA_VALID2);
840     if (uninitialized)
841     split_flag1 |= EXT4_EXT_MARK_UNINIT1;
842     if (split_flag & EXT4_EXT_MARK_UNINIT2)
843     @@ -3237,26 +3251,47 @@ static int ext4_split_unwritten_extents(handle_t *handle,
844    
845     split_flag |= ee_block + ee_len <= eof_block ? EXT4_EXT_MAY_ZEROOUT : 0;
846     split_flag |= EXT4_EXT_MARK_UNINIT2;
847     -
848     + if (flags & EXT4_GET_BLOCKS_CONVERT)
849     + split_flag |= EXT4_EXT_DATA_VALID2;
850     flags |= EXT4_GET_BLOCKS_PRE_IO;
851     return ext4_split_extent(handle, inode, path, map, split_flag, flags);
852     }
853    
854     static int ext4_convert_unwritten_extents_endio(handle_t *handle,
855     - struct inode *inode,
856     - struct ext4_ext_path *path)
857     + struct inode *inode,
858     + struct ext4_map_blocks *map,
859     + struct ext4_ext_path *path)
860     {
861     struct ext4_extent *ex;
862     + ext4_lblk_t ee_block;
863     + unsigned int ee_len;
864     int depth;
865     int err = 0;
866    
867     depth = ext_depth(inode);
868     ex = path[depth].p_ext;
869     + ee_block = le32_to_cpu(ex->ee_block);
870     + ee_len = ext4_ext_get_actual_len(ex);
871    
872     ext_debug("ext4_convert_unwritten_extents_endio: inode %lu, logical"
873     "block %llu, max_blocks %u\n", inode->i_ino,
874     - (unsigned long long)le32_to_cpu(ex->ee_block),
875     - ext4_ext_get_actual_len(ex));
876     + (unsigned long long)ee_block, ee_len);
877     +
878     + /* If extent is larger than requested then split is required */
879     + if (ee_block != map->m_lblk || ee_len > map->m_len) {
880     + err = ext4_split_unwritten_extents(handle, inode, map, path,
881     + EXT4_GET_BLOCKS_CONVERT);
882     + if (err < 0)
883     + goto out;
884     + ext4_ext_drop_refs(path);
885     + path = ext4_ext_find_extent(inode, map->m_lblk, path);
886     + if (IS_ERR(path)) {
887     + err = PTR_ERR(path);
888     + goto out;
889     + }
890     + depth = ext_depth(inode);
891     + ex = path[depth].p_ext;
892     + }
893    
894     err = ext4_ext_get_access(handle, inode, path + depth);
895     if (err)
896     @@ -3564,7 +3599,7 @@ ext4_ext_handle_uninitialized_extents(handle_t *handle, struct inode *inode,
897     }
898     /* IO end_io complete, convert the filled extent to written */
899     if ((flags & EXT4_GET_BLOCKS_CONVERT)) {
900     - ret = ext4_convert_unwritten_extents_endio(handle, inode,
901     + ret = ext4_convert_unwritten_extents_endio(handle, inode, map,
902     path);
903     if (ret >= 0) {
904     ext4_update_inode_fsync_trans(handle, inode, 1);
905     diff --git a/fs/ext4/mballoc.c b/fs/ext4/mballoc.c
906     index 6b0a57e..e77c4fe 100644
907     --- a/fs/ext4/mballoc.c
908     +++ b/fs/ext4/mballoc.c
909     @@ -4984,8 +4984,9 @@ int ext4_trim_fs(struct super_block *sb, struct fstrim_range *range)
910     end = start + (range->len >> sb->s_blocksize_bits) - 1;
911     minlen = range->minlen >> sb->s_blocksize_bits;
912    
913     - if (unlikely(minlen > EXT4_CLUSTERS_PER_GROUP(sb)) ||
914     - unlikely(start >= max_blks))
915     + if (minlen > EXT4_CLUSTERS_PER_GROUP(sb) ||
916     + start >= max_blks ||
917     + range->len < sb->s_blocksize)
918     return -EINVAL;
919     if (end >= max_blks)
920     end = max_blks - 1;
921     diff --git a/fs/lockd/clntxdr.c b/fs/lockd/clntxdr.c
922     index d269ada..982d267 100644
923     --- a/fs/lockd/clntxdr.c
924     +++ b/fs/lockd/clntxdr.c
925     @@ -223,7 +223,7 @@ static void encode_nlm_stat(struct xdr_stream *xdr,
926     {
927     __be32 *p;
928    
929     - BUG_ON(be32_to_cpu(stat) > NLM_LCK_DENIED_GRACE_PERIOD);
930     + WARN_ON_ONCE(be32_to_cpu(stat) > NLM_LCK_DENIED_GRACE_PERIOD);
931     p = xdr_reserve_space(xdr, 4);
932     *p = stat;
933     }
934     diff --git a/fs/lockd/svcproc.c b/fs/lockd/svcproc.c
935     index d27aab1..d413af3 100644
936     --- a/fs/lockd/svcproc.c
937     +++ b/fs/lockd/svcproc.c
938     @@ -67,7 +67,8 @@ nlmsvc_retrieve_args(struct svc_rqst *rqstp, struct nlm_args *argp,
939    
940     /* Obtain file pointer. Not used by FREE_ALL call. */
941     if (filp != NULL) {
942     - if ((error = nlm_lookup_file(rqstp, &file, &lock->fh)) != 0)
943     + error = cast_status(nlm_lookup_file(rqstp, &file, &lock->fh));
944     + if (error != 0)
945     goto no_locks;
946     *filp = file;
947    
948     diff --git a/fs/proc/stat.c b/fs/proc/stat.c
949     index 64c3b31..e296572 100644
950     --- a/fs/proc/stat.c
951     +++ b/fs/proc/stat.c
952     @@ -45,10 +45,13 @@ static cputime64_t get_iowait_time(int cpu)
953    
954     static u64 get_idle_time(int cpu)
955     {
956     - u64 idle, idle_time = get_cpu_idle_time_us(cpu, NULL);
957     + u64 idle, idle_time = -1ULL;
958     +
959     + if (cpu_online(cpu))
960     + idle_time = get_cpu_idle_time_us(cpu, NULL);
961    
962     if (idle_time == -1ULL)
963     - /* !NO_HZ so we can rely on cpustat.idle */
964     + /* !NO_HZ or cpu offline so we can rely on cpustat.idle */
965     idle = kcpustat_cpu(cpu).cpustat[CPUTIME_IDLE];
966     else
967     idle = usecs_to_cputime64(idle_time);
968     @@ -58,10 +61,13 @@ static u64 get_idle_time(int cpu)
969    
970     static u64 get_iowait_time(int cpu)
971     {
972     - u64 iowait, iowait_time = get_cpu_iowait_time_us(cpu, NULL);
973     + u64 iowait, iowait_time = -1ULL;
974     +
975     + if (cpu_online(cpu))
976     + iowait_time = get_cpu_iowait_time_us(cpu, NULL);
977    
978     if (iowait_time == -1ULL)
979     - /* !NO_HZ so we can rely on cpustat.iowait */
980     + /* !NO_HZ or cpu offline so we can rely on cpustat.iowait */
981     iowait = kcpustat_cpu(cpu).cpustat[CPUTIME_IOWAIT];
982     else
983     iowait = usecs_to_cputime64(iowait_time);
984     diff --git a/include/linux/if_vlan.h b/include/linux/if_vlan.h
985     index a810987..561e130 100644
986     --- a/include/linux/if_vlan.h
987     +++ b/include/linux/if_vlan.h
988     @@ -82,6 +82,8 @@ static inline int is_vlan_dev(struct net_device *dev)
989     }
990    
991     #define vlan_tx_tag_present(__skb) ((__skb)->vlan_tci & VLAN_TAG_PRESENT)
992     +#define vlan_tx_nonzero_tag_present(__skb) \
993     + (vlan_tx_tag_present(__skb) && ((__skb)->vlan_tci & VLAN_VID_MASK))
994     #define vlan_tx_tag_get(__skb) ((__skb)->vlan_tci & ~VLAN_TAG_PRESENT)
995    
996     #if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
997     @@ -91,7 +93,7 @@ extern struct net_device *__vlan_find_dev_deep(struct net_device *real_dev,
998     extern struct net_device *vlan_dev_real_dev(const struct net_device *dev);
999     extern u16 vlan_dev_vlan_id(const struct net_device *dev);
1000    
1001     -extern bool vlan_do_receive(struct sk_buff **skb, bool last_handler);
1002     +extern bool vlan_do_receive(struct sk_buff **skb);
1003     extern struct sk_buff *vlan_untag(struct sk_buff *skb);
1004    
1005     extern int vlan_vid_add(struct net_device *dev, unsigned short vid);
1006     @@ -120,10 +122,8 @@ static inline u16 vlan_dev_vlan_id(const struct net_device *dev)
1007     return 0;
1008     }
1009    
1010     -static inline bool vlan_do_receive(struct sk_buff **skb, bool last_handler)
1011     +static inline bool vlan_do_receive(struct sk_buff **skb)
1012     {
1013     - if (((*skb)->vlan_tci & VLAN_VID_MASK) && last_handler)
1014     - (*skb)->pkt_type = PACKET_OTHERHOST;
1015     return false;
1016     }
1017    
1018     diff --git a/include/linux/mtd/nand.h b/include/linux/mtd/nand.h
1019     index 1482340..2483513 100644
1020     --- a/include/linux/mtd/nand.h
1021     +++ b/include/linux/mtd/nand.h
1022     @@ -215,9 +215,6 @@ typedef enum {
1023     #define NAND_SUBPAGE_READ(chip) ((chip->ecc.mode == NAND_ECC_SOFT) \
1024     && (chip->page_shift > 9))
1025    
1026     -/* Mask to zero out the chip options, which come from the id table */
1027     -#define NAND_CHIPOPTIONS_MSK (0x0000ffff & ~NAND_NO_AUTOINCR)
1028     -
1029     /* Non chip related options */
1030     /* This option skips the bbt scan during initialization. */
1031     #define NAND_SKIP_BBTSCAN 0x00010000
1032     diff --git a/include/linux/netlink.h b/include/linux/netlink.h
1033     index a2092f5..b23e9cd 100644
1034     --- a/include/linux/netlink.h
1035     +++ b/include/linux/netlink.h
1036     @@ -153,6 +153,7 @@ struct nlattr {
1037    
1038     #include <linux/capability.h>
1039     #include <linux/skbuff.h>
1040     +#include <linux/export.h>
1041    
1042     struct net;
1043    
1044     @@ -226,6 +227,8 @@ struct netlink_callback {
1045     struct netlink_callback *cb);
1046     int (*done)(struct netlink_callback *cb);
1047     void *data;
1048     + /* the module that dump function belong to */
1049     + struct module *module;
1050     u16 family;
1051     u16 min_dump_alloc;
1052     unsigned int prev_seq, seq;
1053     @@ -251,14 +254,24 @@ __nlmsg_put(struct sk_buff *skb, u32 pid, u32 seq, int type, int len, int flags)
1054    
1055     struct netlink_dump_control {
1056     int (*dump)(struct sk_buff *skb, struct netlink_callback *);
1057     - int (*done)(struct netlink_callback*);
1058     + int (*done)(struct netlink_callback *);
1059     void *data;
1060     + struct module *module;
1061     u16 min_dump_alloc;
1062     };
1063    
1064     -extern int netlink_dump_start(struct sock *ssk, struct sk_buff *skb,
1065     - const struct nlmsghdr *nlh,
1066     - struct netlink_dump_control *control);
1067     +extern int __netlink_dump_start(struct sock *ssk, struct sk_buff *skb,
1068     + const struct nlmsghdr *nlh,
1069     + struct netlink_dump_control *control);
1070     +static inline int netlink_dump_start(struct sock *ssk, struct sk_buff *skb,
1071     + const struct nlmsghdr *nlh,
1072     + struct netlink_dump_control *control)
1073     +{
1074     + if (!control->module)
1075     + control->module = THIS_MODULE;
1076     +
1077     + return __netlink_dump_start(ssk, skb, nlh, control);
1078     +}
1079    
1080    
1081     #define NL_NONROOT_RECV 0x1
1082     diff --git a/include/linux/sunrpc/xprt.h b/include/linux/sunrpc/xprt.h
1083     index 77d278d..005b507 100644
1084     --- a/include/linux/sunrpc/xprt.h
1085     +++ b/include/linux/sunrpc/xprt.h
1086     @@ -114,6 +114,7 @@ struct rpc_xprt_ops {
1087     void (*set_buffer_size)(struct rpc_xprt *xprt, size_t sndsize, size_t rcvsize);
1088     int (*reserve_xprt)(struct rpc_xprt *xprt, struct rpc_task *task);
1089     void (*release_xprt)(struct rpc_xprt *xprt, struct rpc_task *task);
1090     + void (*alloc_slot)(struct rpc_xprt *xprt, struct rpc_task *task);
1091     void (*rpcbind)(struct rpc_task *task);
1092     void (*set_port)(struct rpc_xprt *xprt, unsigned short port);
1093     void (*connect)(struct rpc_task *task);
1094     @@ -279,6 +280,8 @@ void xprt_connect(struct rpc_task *task);
1095     void xprt_reserve(struct rpc_task *task);
1096     int xprt_reserve_xprt(struct rpc_xprt *xprt, struct rpc_task *task);
1097     int xprt_reserve_xprt_cong(struct rpc_xprt *xprt, struct rpc_task *task);
1098     +void xprt_alloc_slot(struct rpc_xprt *xprt, struct rpc_task *task);
1099     +void xprt_lock_and_alloc_slot(struct rpc_xprt *xprt, struct rpc_task *task);
1100     int xprt_prepare_transmit(struct rpc_task *task);
1101     void xprt_transmit(struct rpc_task *task);
1102     void xprt_end_transmit(struct rpc_task *task);
1103     diff --git a/include/rdma/rdma_netlink.h b/include/rdma/rdma_netlink.h
1104     index 3c5363a..bd3d8b2 100644
1105     --- a/include/rdma/rdma_netlink.h
1106     +++ b/include/rdma/rdma_netlink.h
1107     @@ -39,6 +39,7 @@ struct rdma_cm_id_stats {
1108    
1109     struct ibnl_client_cbs {
1110     int (*dump)(struct sk_buff *skb, struct netlink_callback *nlcb);
1111     + struct module *module;
1112     };
1113    
1114     int ibnl_init(void);
1115     diff --git a/kernel/cgroup.c b/kernel/cgroup.c
1116     index ed64cca..762f7cc7 100644
1117     --- a/kernel/cgroup.c
1118     +++ b/kernel/cgroup.c
1119     @@ -1868,9 +1868,8 @@ static void cgroup_task_migrate(struct cgroup *cgrp, struct cgroup *oldcgrp,
1120     * trading it for newcg is protected by cgroup_mutex, we're safe to drop
1121     * it here; it will be freed under RCU.
1122     */
1123     - put_css_set(oldcg);
1124     -
1125     set_bit(CGRP_RELEASABLE, &oldcgrp->flags);
1126     + put_css_set(oldcg);
1127     }
1128    
1129     /**
1130     @@ -4463,31 +4462,20 @@ static const struct file_operations proc_cgroupstats_operations = {
1131     *
1132     * A pointer to the shared css_set was automatically copied in
1133     * fork.c by dup_task_struct(). However, we ignore that copy, since
1134     - * it was not made under the protection of RCU, cgroup_mutex or
1135     - * threadgroup_change_begin(), so it might no longer be a valid
1136     - * cgroup pointer. cgroup_attach_task() might have already changed
1137     - * current->cgroups, allowing the previously referenced cgroup
1138     - * group to be removed and freed.
1139     - *
1140     - * Outside the pointer validity we also need to process the css_set
1141     - * inheritance between threadgoup_change_begin() and
1142     - * threadgoup_change_end(), this way there is no leak in any process
1143     - * wide migration performed by cgroup_attach_proc() that could otherwise
1144     - * miss a thread because it is too early or too late in the fork stage.
1145     + * it was not made under the protection of RCU or cgroup_mutex, so
1146     + * might no longer be a valid cgroup pointer. cgroup_attach_task() might
1147     + * have already changed current->cgroups, allowing the previously
1148     + * referenced cgroup group to be removed and freed.
1149     *
1150     * At the point that cgroup_fork() is called, 'current' is the parent
1151     * task, and the passed argument 'child' points to the child task.
1152     */
1153     void cgroup_fork(struct task_struct *child)
1154     {
1155     - /*
1156     - * We don't need to task_lock() current because current->cgroups
1157     - * can't be changed concurrently here. The parent obviously hasn't
1158     - * exited and called cgroup_exit(), and we are synchronized against
1159     - * cgroup migration through threadgroup_change_begin().
1160     - */
1161     + task_lock(current);
1162     child->cgroups = current->cgroups;
1163     get_css_set(child->cgroups);
1164     + task_unlock(current);
1165     INIT_LIST_HEAD(&child->cg_list);
1166     }
1167    
1168     @@ -4540,19 +4528,10 @@ void cgroup_post_fork(struct task_struct *child)
1169     */
1170     if (use_task_css_set_links) {
1171     write_lock(&css_set_lock);
1172     - if (list_empty(&child->cg_list)) {
1173     - /*
1174     - * It's safe to use child->cgroups without task_lock()
1175     - * here because we are protected through
1176     - * threadgroup_change_begin() against concurrent
1177     - * css_set change in cgroup_task_migrate(). Also
1178     - * the task can't exit at that point until
1179     - * wake_up_new_task() is called, so we are protected
1180     - * against cgroup_exit() setting child->cgroup to
1181     - * init_css_set.
1182     - */
1183     + task_lock(child);
1184     + if (list_empty(&child->cg_list))
1185     list_add(&child->cg_list, &child->cgroups->tasks);
1186     - }
1187     + task_unlock(child);
1188     write_unlock(&css_set_lock);
1189     }
1190     }
1191     diff --git a/kernel/sys.c b/kernel/sys.c
1192     index 898a84c..b0003db 100644
1193     --- a/kernel/sys.c
1194     +++ b/kernel/sys.c
1195     @@ -1180,15 +1180,16 @@ DECLARE_RWSEM(uts_sem);
1196     * Work around broken programs that cannot handle "Linux 3.0".
1197     * Instead we map 3.x to 2.6.40+x, so e.g. 3.0 would be 2.6.40
1198     */
1199     -static int override_release(char __user *release, int len)
1200     +static int override_release(char __user *release, size_t len)
1201     {
1202     int ret = 0;
1203     - char buf[65];
1204    
1205     if (current->personality & UNAME26) {
1206     - char *rest = UTS_RELEASE;
1207     + const char *rest = UTS_RELEASE;
1208     + char buf[65] = { 0 };
1209     int ndots = 0;
1210     unsigned v;
1211     + size_t copy;
1212    
1213     while (*rest) {
1214     if (*rest == '.' && ++ndots >= 3)
1215     @@ -1198,8 +1199,9 @@ static int override_release(char __user *release, int len)
1216     rest++;
1217     }
1218     v = ((LINUX_VERSION_CODE >> 8) & 0xff) + 40;
1219     - snprintf(buf, len, "2.6.%u%s", v, rest);
1220     - ret = copy_to_user(release, buf, len);
1221     + copy = clamp_t(size_t, len, 1, sizeof(buf));
1222     + copy = scnprintf(buf, copy, "2.6.%u%s", v, rest);
1223     + ret = copy_to_user(release, buf, copy + 1);
1224     }
1225     return ret;
1226     }
1227     diff --git a/net/8021q/vlan_core.c b/net/8021q/vlan_core.c
1228     index f364630..912613c 100644
1229     --- a/net/8021q/vlan_core.c
1230     +++ b/net/8021q/vlan_core.c
1231     @@ -5,7 +5,7 @@
1232     #include <linux/export.h>
1233     #include "vlan.h"
1234    
1235     -bool vlan_do_receive(struct sk_buff **skbp, bool last_handler)
1236     +bool vlan_do_receive(struct sk_buff **skbp)
1237     {
1238     struct sk_buff *skb = *skbp;
1239     u16 vlan_id = skb->vlan_tci & VLAN_VID_MASK;
1240     @@ -13,14 +13,8 @@ bool vlan_do_receive(struct sk_buff **skbp, bool last_handler)
1241     struct vlan_pcpu_stats *rx_stats;
1242    
1243     vlan_dev = vlan_find_dev(skb->dev, vlan_id);
1244     - if (!vlan_dev) {
1245     - /* Only the last call to vlan_do_receive() should change
1246     - * pkt_type to PACKET_OTHERHOST
1247     - */
1248     - if (vlan_id && last_handler)
1249     - skb->pkt_type = PACKET_OTHERHOST;
1250     + if (!vlan_dev)
1251     return false;
1252     - }
1253    
1254     skb = *skbp = skb_share_check(skb, GFP_ATOMIC);
1255     if (unlikely(!skb))
1256     diff --git a/net/core/dev.c b/net/core/dev.c
1257     index 086bc2e..82ca51b 100644
1258     --- a/net/core/dev.c
1259     +++ b/net/core/dev.c
1260     @@ -3209,18 +3209,18 @@ another_round:
1261     ncls:
1262     #endif
1263    
1264     - rx_handler = rcu_dereference(skb->dev->rx_handler);
1265     if (vlan_tx_tag_present(skb)) {
1266     if (pt_prev) {
1267     ret = deliver_skb(skb, pt_prev, orig_dev);
1268     pt_prev = NULL;
1269     }
1270     - if (vlan_do_receive(&skb, !rx_handler))
1271     + if (vlan_do_receive(&skb))
1272     goto another_round;
1273     else if (unlikely(!skb))
1274     goto out;
1275     }
1276    
1277     + rx_handler = rcu_dereference(skb->dev->rx_handler);
1278     if (rx_handler) {
1279     if (pt_prev) {
1280     ret = deliver_skb(skb, pt_prev, orig_dev);
1281     @@ -3240,6 +3240,9 @@ ncls:
1282     }
1283     }
1284    
1285     + if (vlan_tx_nonzero_tag_present(skb))
1286     + skb->pkt_type = PACKET_OTHERHOST;
1287     +
1288     /* deliver only exact match when indicated */
1289     null_or_dev = deliver_exact ? skb->dev : NULL;
1290    
1291     diff --git a/net/core/neighbour.c b/net/core/neighbour.c
1292     index 73b9035..ac88107 100644
1293     --- a/net/core/neighbour.c
1294     +++ b/net/core/neighbour.c
1295     @@ -1285,8 +1285,6 @@ int neigh_resolve_output(struct neighbour *neigh, struct sk_buff *skb)
1296     if (!dst)
1297     goto discard;
1298    
1299     - __skb_pull(skb, skb_network_offset(skb));
1300     -
1301     if (!neigh_event_send(neigh, skb)) {
1302     int err;
1303     struct net_device *dev = neigh->dev;
1304     @@ -1296,6 +1294,7 @@ int neigh_resolve_output(struct neighbour *neigh, struct sk_buff *skb)
1305     neigh_hh_init(neigh, dst);
1306    
1307     do {
1308     + __skb_pull(skb, skb_network_offset(skb));
1309     seq = read_seqbegin(&neigh->ha_lock);
1310     err = dev_hard_header(skb, dev, ntohs(skb->protocol),
1311     neigh->ha, NULL, skb->len);
1312     @@ -1326,9 +1325,8 @@ int neigh_connected_output(struct neighbour *neigh, struct sk_buff *skb)
1313     unsigned int seq;
1314     int err;
1315    
1316     - __skb_pull(skb, skb_network_offset(skb));
1317     -
1318     do {
1319     + __skb_pull(skb, skb_network_offset(skb));
1320     seq = read_seqbegin(&neigh->ha_lock);
1321     err = dev_hard_header(skb, dev, ntohs(skb->protocol),
1322     neigh->ha, NULL, skb->len);
1323     diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
1324     index 0cb86ce..8f2458d 100644
1325     --- a/net/ipv4/tcp_ipv4.c
1326     +++ b/net/ipv4/tcp_ipv4.c
1327     @@ -678,10 +678,11 @@ static void tcp_v4_send_reset(struct sock *sk, struct sk_buff *skb)
1328     arg.csumoffset = offsetof(struct tcphdr, check) / 2;
1329     arg.flags = (sk && inet_sk(sk)->transparent) ? IP_REPLY_ARG_NOSRCCHECK : 0;
1330     /* When socket is gone, all binding information is lost.
1331     - * routing might fail in this case. using iif for oif to
1332     - * make sure we can deliver it
1333     + * routing might fail in this case. No choice here, if we choose to force
1334     + * input interface, we will misroute in case of asymmetric route.
1335     */
1336     - arg.bound_dev_if = sk ? sk->sk_bound_dev_if : inet_iif(skb);
1337     + if (sk)
1338     + arg.bound_dev_if = sk->sk_bound_dev_if;
1339    
1340     net = dev_net(skb_dst(skb)->dev);
1341     arg.tos = ip_hdr(skb)->tos;
1342     diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c
1343     index 5ec6069..468a5ce 100644
1344     --- a/net/ipv6/addrconf.c
1345     +++ b/net/ipv6/addrconf.c
1346     @@ -3095,14 +3095,15 @@ static struct inet6_ifaddr *if6_get_first(struct seq_file *seq, loff_t pos)
1347     struct hlist_node *n;
1348     hlist_for_each_entry_rcu_bh(ifa, n, &inet6_addr_lst[state->bucket],
1349     addr_lst) {
1350     + if (!net_eq(dev_net(ifa->idev->dev), net))
1351     + continue;
1352     /* sync with offset */
1353     if (p < state->offset) {
1354     p++;
1355     continue;
1356     }
1357     state->offset++;
1358     - if (net_eq(dev_net(ifa->idev->dev), net))
1359     - return ifa;
1360     + return ifa;
1361     }
1362    
1363     /* prepare for next bucket */
1364     @@ -3120,18 +3121,20 @@ static struct inet6_ifaddr *if6_get_next(struct seq_file *seq,
1365     struct hlist_node *n = &ifa->addr_lst;
1366    
1367     hlist_for_each_entry_continue_rcu_bh(ifa, n, addr_lst) {
1368     + if (!net_eq(dev_net(ifa->idev->dev), net))
1369     + continue;
1370     state->offset++;
1371     - if (net_eq(dev_net(ifa->idev->dev), net))
1372     - return ifa;
1373     + return ifa;
1374     }
1375    
1376     while (++state->bucket < IN6_ADDR_HSIZE) {
1377     state->offset = 0;
1378     hlist_for_each_entry_rcu_bh(ifa, n,
1379     &inet6_addr_lst[state->bucket], addr_lst) {
1380     + if (!net_eq(dev_net(ifa->idev->dev), net))
1381     + continue;
1382     state->offset++;
1383     - if (net_eq(dev_net(ifa->idev->dev), net))
1384     - return ifa;
1385     + return ifa;
1386     }
1387     }
1388    
1389     diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c
1390     index 98256cf..8a8fa2d 100644
1391     --- a/net/ipv6/tcp_ipv6.c
1392     +++ b/net/ipv6/tcp_ipv6.c
1393     @@ -896,7 +896,8 @@ static void tcp_v6_send_response(struct sk_buff *skb, u32 seq, u32 ack, u32 win,
1394     __tcp_v6_send_check(buff, &fl6.saddr, &fl6.daddr);
1395    
1396     fl6.flowi6_proto = IPPROTO_TCP;
1397     - fl6.flowi6_oif = inet6_iif(skb);
1398     + if (ipv6_addr_type(&fl6.daddr) & IPV6_ADDR_LINKLOCAL)
1399     + fl6.flowi6_oif = inet6_iif(skb);
1400     fl6.fl6_dport = t1->dest;
1401     fl6.fl6_sport = t1->source;
1402     security_skb_classify_flow(skb, flowi6_to_flowi(&fl6));
1403     diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c
1404     index bba6ba1..edce424 100644
1405     --- a/net/netlink/af_netlink.c
1406     +++ b/net/netlink/af_netlink.c
1407     @@ -156,6 +156,8 @@ static void netlink_sock_destruct(struct sock *sk)
1408     if (nlk->cb) {
1409     if (nlk->cb->done)
1410     nlk->cb->done(nlk->cb);
1411     +
1412     + module_put(nlk->cb->module);
1413     netlink_destroy_callback(nlk->cb);
1414     }
1415    
1416     @@ -1728,6 +1730,7 @@ static int netlink_dump(struct sock *sk)
1417     nlk->cb = NULL;
1418     mutex_unlock(nlk->cb_mutex);
1419    
1420     + module_put(cb->module);
1421     netlink_destroy_callback(cb);
1422     return 0;
1423    
1424     @@ -1737,9 +1740,9 @@ errout_skb:
1425     return err;
1426     }
1427    
1428     -int netlink_dump_start(struct sock *ssk, struct sk_buff *skb,
1429     - const struct nlmsghdr *nlh,
1430     - struct netlink_dump_control *control)
1431     +int __netlink_dump_start(struct sock *ssk, struct sk_buff *skb,
1432     + const struct nlmsghdr *nlh,
1433     + struct netlink_dump_control *control)
1434     {
1435     struct netlink_callback *cb;
1436     struct sock *sk;
1437     @@ -1754,6 +1757,7 @@ int netlink_dump_start(struct sock *ssk, struct sk_buff *skb,
1438     cb->done = control->done;
1439     cb->nlh = nlh;
1440     cb->data = control->data;
1441     + cb->module = control->module;
1442     cb->min_dump_alloc = control->min_dump_alloc;
1443     atomic_inc(&skb->users);
1444     cb->skb = skb;
1445     @@ -1764,19 +1768,28 @@ int netlink_dump_start(struct sock *ssk, struct sk_buff *skb,
1446     return -ECONNREFUSED;
1447     }
1448     nlk = nlk_sk(sk);
1449     - /* A dump is in progress... */
1450     +
1451     mutex_lock(nlk->cb_mutex);
1452     + /* A dump is in progress... */
1453     if (nlk->cb) {
1454     mutex_unlock(nlk->cb_mutex);
1455     netlink_destroy_callback(cb);
1456     - sock_put(sk);
1457     - return -EBUSY;
1458     + ret = -EBUSY;
1459     + goto out;
1460     }
1461     + /* add reference of module which cb->dump belongs to */
1462     + if (!try_module_get(cb->module)) {
1463     + mutex_unlock(nlk->cb_mutex);
1464     + netlink_destroy_callback(cb);
1465     + ret = -EPROTONOSUPPORT;
1466     + goto out;
1467     + }
1468     +
1469     nlk->cb = cb;
1470     mutex_unlock(nlk->cb_mutex);
1471    
1472     ret = netlink_dump(sk);
1473     -
1474     +out:
1475     sock_put(sk);
1476    
1477     if (ret)
1478     @@ -1787,7 +1800,7 @@ int netlink_dump_start(struct sock *ssk, struct sk_buff *skb,
1479     */
1480     return -EINTR;
1481     }
1482     -EXPORT_SYMBOL(netlink_dump_start);
1483     +EXPORT_SYMBOL(__netlink_dump_start);
1484    
1485     void netlink_ack(struct sk_buff *in_skb, struct nlmsghdr *nlh, int err)
1486     {
1487     diff --git a/net/rds/send.c b/net/rds/send.c
1488     index 96531d4..88eace5 100644
1489     --- a/net/rds/send.c
1490     +++ b/net/rds/send.c
1491     @@ -1122,7 +1122,7 @@ rds_send_pong(struct rds_connection *conn, __be16 dport)
1492     rds_stats_inc(s_send_pong);
1493    
1494     if (!test_bit(RDS_LL_SEND_FULL, &conn->c_flags))
1495     - rds_send_xmit(conn);
1496     + queue_delayed_work(rds_wq, &conn->c_send_w, 0);
1497    
1498     rds_message_put(rm);
1499     return 0;
1500     diff --git a/net/sunrpc/cache.c b/net/sunrpc/cache.c
1501     index de0b0f3..76cb304 100644
1502     --- a/net/sunrpc/cache.c
1503     +++ b/net/sunrpc/cache.c
1504     @@ -1406,11 +1406,11 @@ static ssize_t read_flush(struct file *file, char __user *buf,
1505     size_t count, loff_t *ppos,
1506     struct cache_detail *cd)
1507     {
1508     - char tbuf[20];
1509     + char tbuf[22];
1510     unsigned long p = *ppos;
1511     size_t len;
1512    
1513     - sprintf(tbuf, "%lu\n", convert_to_wallclock(cd->flush_time));
1514     + snprintf(tbuf, sizeof(tbuf), "%lu\n", convert_to_wallclock(cd->flush_time));
1515     len = strlen(tbuf);
1516     if (p >= len)
1517     return 0;
1518     diff --git a/net/sunrpc/xprt.c b/net/sunrpc/xprt.c
1519     index da72492..176a24f 100644
1520     --- a/net/sunrpc/xprt.c
1521     +++ b/net/sunrpc/xprt.c
1522     @@ -969,11 +969,11 @@ static bool xprt_dynamic_free_slot(struct rpc_xprt *xprt, struct rpc_rqst *req)
1523     return false;
1524     }
1525    
1526     -static void xprt_alloc_slot(struct rpc_task *task)
1527     +void xprt_alloc_slot(struct rpc_xprt *xprt, struct rpc_task *task)
1528     {
1529     - struct rpc_xprt *xprt = task->tk_xprt;
1530     struct rpc_rqst *req;
1531    
1532     + spin_lock(&xprt->reserve_lock);
1533     if (!list_empty(&xprt->free)) {
1534     req = list_entry(xprt->free.next, struct rpc_rqst, rq_list);
1535     list_del(&req->rq_list);
1536     @@ -994,12 +994,29 @@ static void xprt_alloc_slot(struct rpc_task *task)
1537     default:
1538     task->tk_status = -EAGAIN;
1539     }
1540     + spin_unlock(&xprt->reserve_lock);
1541     return;
1542     out_init_req:
1543     task->tk_status = 0;
1544     task->tk_rqstp = req;
1545     xprt_request_init(task, xprt);
1546     + spin_unlock(&xprt->reserve_lock);
1547     +}
1548     +EXPORT_SYMBOL_GPL(xprt_alloc_slot);
1549     +
1550     +void xprt_lock_and_alloc_slot(struct rpc_xprt *xprt, struct rpc_task *task)
1551     +{
1552     + /* Note: grabbing the xprt_lock_write() ensures that we throttle
1553     + * new slot allocation if the transport is congested (i.e. when
1554     + * reconnecting a stream transport or when out of socket write
1555     + * buffer space).
1556     + */
1557     + if (xprt_lock_write(xprt, task)) {
1558     + xprt_alloc_slot(xprt, task);
1559     + xprt_release_write(xprt, task);
1560     + }
1561     }
1562     +EXPORT_SYMBOL_GPL(xprt_lock_and_alloc_slot);
1563    
1564     static void xprt_free_slot(struct rpc_xprt *xprt, struct rpc_rqst *req)
1565     {
1566     @@ -1083,20 +1100,9 @@ void xprt_reserve(struct rpc_task *task)
1567     if (task->tk_rqstp != NULL)
1568     return;
1569    
1570     - /* Note: grabbing the xprt_lock_write() here is not strictly needed,
1571     - * but ensures that we throttle new slot allocation if the transport
1572     - * is congested (e.g. if reconnecting or if we're out of socket
1573     - * write buffer space).
1574     - */
1575     task->tk_timeout = 0;
1576     task->tk_status = -EAGAIN;
1577     - if (!xprt_lock_write(xprt, task))
1578     - return;
1579     -
1580     - spin_lock(&xprt->reserve_lock);
1581     - xprt_alloc_slot(task);
1582     - spin_unlock(&xprt->reserve_lock);
1583     - xprt_release_write(xprt, task);
1584     + xprt->ops->alloc_slot(xprt, task);
1585     }
1586    
1587     static inline __be32 xprt_alloc_xid(struct rpc_xprt *xprt)
1588     diff --git a/net/sunrpc/xprtrdma/transport.c b/net/sunrpc/xprtrdma/transport.c
1589     index 06cdbff..5d9202d 100644
1590     --- a/net/sunrpc/xprtrdma/transport.c
1591     +++ b/net/sunrpc/xprtrdma/transport.c
1592     @@ -713,6 +713,7 @@ static void xprt_rdma_print_stats(struct rpc_xprt *xprt, struct seq_file *seq)
1593     static struct rpc_xprt_ops xprt_rdma_procs = {
1594     .reserve_xprt = xprt_rdma_reserve_xprt,
1595     .release_xprt = xprt_release_xprt_cong, /* sunrpc/xprt.c */
1596     + .alloc_slot = xprt_alloc_slot,
1597     .release_request = xprt_release_rqst_cong, /* ditto */
1598     .set_retrans_timeout = xprt_set_retrans_timeout_def, /* ditto */
1599     .rpcbind = rpcb_getport_async, /* sunrpc/rpcb_clnt.c */
1600     diff --git a/net/sunrpc/xprtsock.c b/net/sunrpc/xprtsock.c
1601     index 00ff343..a4a6586 100644
1602     --- a/net/sunrpc/xprtsock.c
1603     +++ b/net/sunrpc/xprtsock.c
1604     @@ -2444,6 +2444,7 @@ static void bc_destroy(struct rpc_xprt *xprt)
1605     static struct rpc_xprt_ops xs_local_ops = {
1606     .reserve_xprt = xprt_reserve_xprt,
1607     .release_xprt = xs_tcp_release_xprt,
1608     + .alloc_slot = xprt_alloc_slot,
1609     .rpcbind = xs_local_rpcbind,
1610     .set_port = xs_local_set_port,
1611     .connect = xs_connect,
1612     @@ -2460,6 +2461,7 @@ static struct rpc_xprt_ops xs_udp_ops = {
1613     .set_buffer_size = xs_udp_set_buffer_size,
1614     .reserve_xprt = xprt_reserve_xprt_cong,
1615     .release_xprt = xprt_release_xprt_cong,
1616     + .alloc_slot = xprt_alloc_slot,
1617     .rpcbind = rpcb_getport_async,
1618     .set_port = xs_set_port,
1619     .connect = xs_connect,
1620     @@ -2477,6 +2479,7 @@ static struct rpc_xprt_ops xs_udp_ops = {
1621     static struct rpc_xprt_ops xs_tcp_ops = {
1622     .reserve_xprt = xprt_reserve_xprt,
1623     .release_xprt = xs_tcp_release_xprt,
1624     + .alloc_slot = xprt_lock_and_alloc_slot,
1625     .rpcbind = rpcb_getport_async,
1626     .set_port = xs_set_port,
1627     .connect = xs_connect,
1628     @@ -2496,6 +2499,7 @@ static struct rpc_xprt_ops xs_tcp_ops = {
1629     static struct rpc_xprt_ops bc_tcp_ops = {
1630     .reserve_xprt = xprt_reserve_xprt,
1631     .release_xprt = xprt_release_xprt,
1632     + .alloc_slot = xprt_alloc_slot,
1633     .rpcbind = xs_local_rpcbind,
1634     .buf_alloc = bc_malloc,
1635     .buf_free = bc_free,