Contents of /trunk/kernel-lts/patches-3.4/0147-3.4.48-all-fixes.patch
Parent Directory | Revision Log
Revision 2207 -
(show annotations)
(download)
Thu Jun 13 10:38:35 2013 UTC (11 years, 3 months ago) by niro
File size: 45476 byte(s)
Thu Jun 13 10:38:35 2013 UTC (11 years, 3 months ago) by niro
File size: 45476 byte(s)
-linux-3.4.48
1 | diff --git a/arch/arm/mach-kirkwood/ts219-setup.c b/arch/arm/mach-kirkwood/ts219-setup.c |
2 | index 73e2b6c..3a73fc7 100644 |
3 | --- a/arch/arm/mach-kirkwood/ts219-setup.c |
4 | +++ b/arch/arm/mach-kirkwood/ts219-setup.c |
5 | @@ -124,7 +124,7 @@ static void __init qnap_ts219_init(void) |
6 | static int __init ts219_pci_init(void) |
7 | { |
8 | if (machine_is_ts219()) |
9 | - kirkwood_pcie_init(KW_PCIE0); |
10 | + kirkwood_pcie_init(KW_PCIE1 | KW_PCIE0); |
11 | |
12 | return 0; |
13 | } |
14 | diff --git a/arch/arm/plat-orion/common.c b/arch/arm/plat-orion/common.c |
15 | index 331f8bb..4dac6e0 100644 |
16 | --- a/arch/arm/plat-orion/common.c |
17 | +++ b/arch/arm/plat-orion/common.c |
18 | @@ -340,7 +340,7 @@ static struct resource orion_ge10_shared_resources[] = { |
19 | |
20 | static struct platform_device orion_ge10_shared = { |
21 | .name = MV643XX_ETH_SHARED_NAME, |
22 | - .id = 1, |
23 | + .id = 2, |
24 | .dev = { |
25 | .platform_data = &orion_ge10_shared_data, |
26 | }, |
27 | @@ -355,8 +355,8 @@ static struct resource orion_ge10_resources[] = { |
28 | |
29 | static struct platform_device orion_ge10 = { |
30 | .name = MV643XX_ETH_NAME, |
31 | - .id = 1, |
32 | - .num_resources = 2, |
33 | + .id = 2, |
34 | + .num_resources = 1, |
35 | .resource = orion_ge10_resources, |
36 | .dev = { |
37 | .coherent_dma_mask = DMA_BIT_MASK(32), |
38 | @@ -393,7 +393,7 @@ static struct resource orion_ge11_shared_resources[] = { |
39 | |
40 | static struct platform_device orion_ge11_shared = { |
41 | .name = MV643XX_ETH_SHARED_NAME, |
42 | - .id = 1, |
43 | + .id = 3, |
44 | .dev = { |
45 | .platform_data = &orion_ge11_shared_data, |
46 | }, |
47 | @@ -408,8 +408,8 @@ static struct resource orion_ge11_resources[] = { |
48 | |
49 | static struct platform_device orion_ge11 = { |
50 | .name = MV643XX_ETH_NAME, |
51 | - .id = 1, |
52 | - .num_resources = 2, |
53 | + .id = 3, |
54 | + .num_resources = 1, |
55 | .resource = orion_ge11_resources, |
56 | .dev = { |
57 | .coherent_dma_mask = DMA_BIT_MASK(32), |
58 | diff --git a/arch/avr32/kernel/module.c b/arch/avr32/kernel/module.c |
59 | index 596f730..2c94129 100644 |
60 | --- a/arch/avr32/kernel/module.c |
61 | +++ b/arch/avr32/kernel/module.c |
62 | @@ -264,7 +264,7 @@ int apply_relocate_add(Elf32_Shdr *sechdrs, const char *strtab, |
63 | break; |
64 | case R_AVR32_GOT18SW: |
65 | if ((relocation & 0xfffe0003) != 0 |
66 | - && (relocation & 0xfffc0003) != 0xffff0000) |
67 | + && (relocation & 0xfffc0000) != 0xfffc0000) |
68 | return reloc_overflow(module, "R_AVR32_GOT18SW", |
69 | relocation); |
70 | relocation >>= 2; |
71 | diff --git a/arch/m68k/kernel/head.S b/arch/m68k/kernel/head.S |
72 | index d197e7f..ac85f16 100644 |
73 | --- a/arch/m68k/kernel/head.S |
74 | +++ b/arch/m68k/kernel/head.S |
75 | @@ -2752,11 +2752,9 @@ func_return get_new_page |
76 | #ifdef CONFIG_MAC |
77 | |
78 | L(scc_initable_mac): |
79 | - .byte 9,12 /* Reset */ |
80 | .byte 4,0x44 /* x16, 1 stopbit, no parity */ |
81 | .byte 3,0xc0 /* receiver: 8 bpc */ |
82 | .byte 5,0xe2 /* transmitter: 8 bpc, assert dtr/rts */ |
83 | - .byte 9,0 /* no interrupts */ |
84 | .byte 10,0 /* NRZ */ |
85 | .byte 11,0x50 /* use baud rate generator */ |
86 | .byte 12,1,13,0 /* 38400 baud */ |
87 | @@ -2899,6 +2897,7 @@ func_start serial_init,%d0/%d1/%a0/%a1 |
88 | is_not_mac(L(serial_init_not_mac)) |
89 | |
90 | #ifdef SERIAL_DEBUG |
91 | + |
92 | /* You may define either or both of these. */ |
93 | #define MAC_USE_SCC_A /* Modem port */ |
94 | #define MAC_USE_SCC_B /* Printer port */ |
95 | @@ -2908,9 +2907,21 @@ func_start serial_init,%d0/%d1/%a0/%a1 |
96 | #define mac_scc_cha_b_data_offset 0x4 |
97 | #define mac_scc_cha_a_data_offset 0x6 |
98 | |
99 | +#if defined(MAC_USE_SCC_A) || defined(MAC_USE_SCC_B) |
100 | + movel %pc@(L(mac_sccbase)),%a0 |
101 | + /* Reset SCC device */ |
102 | + moveb #9,%a0@(mac_scc_cha_a_ctrl_offset) |
103 | + moveb #0xc0,%a0@(mac_scc_cha_a_ctrl_offset) |
104 | + /* Wait for 5 PCLK cycles, which is about 68 CPU cycles */ |
105 | + /* 5 / 3.6864 MHz = approx. 1.36 us = 68 / 50 MHz */ |
106 | + movel #35,%d0 |
107 | +5: |
108 | + subq #1,%d0 |
109 | + jne 5b |
110 | +#endif |
111 | + |
112 | #ifdef MAC_USE_SCC_A |
113 | /* Initialize channel A */ |
114 | - movel %pc@(L(mac_sccbase)),%a0 |
115 | lea %pc@(L(scc_initable_mac)),%a1 |
116 | 5: moveb %a1@+,%d0 |
117 | jmi 6f |
118 | @@ -2922,9 +2933,6 @@ func_start serial_init,%d0/%d1/%a0/%a1 |
119 | |
120 | #ifdef MAC_USE_SCC_B |
121 | /* Initialize channel B */ |
122 | -#ifndef MAC_USE_SCC_A /* Load mac_sccbase only if needed */ |
123 | - movel %pc@(L(mac_sccbase)),%a0 |
124 | -#endif /* MAC_USE_SCC_A */ |
125 | lea %pc@(L(scc_initable_mac)),%a1 |
126 | 7: moveb %a1@+,%d0 |
127 | jmi 8f |
128 | @@ -2933,6 +2941,7 @@ func_start serial_init,%d0/%d1/%a0/%a1 |
129 | jra 7b |
130 | 8: |
131 | #endif /* MAC_USE_SCC_B */ |
132 | + |
133 | #endif /* SERIAL_DEBUG */ |
134 | |
135 | jra L(serial_init_done) |
136 | @@ -3006,17 +3015,17 @@ func_start serial_putc,%d0/%d1/%a0/%a1 |
137 | |
138 | #ifdef SERIAL_DEBUG |
139 | |
140 | -#ifdef MAC_USE_SCC_A |
141 | +#if defined(MAC_USE_SCC_A) || defined(MAC_USE_SCC_B) |
142 | movel %pc@(L(mac_sccbase)),%a1 |
143 | +#endif |
144 | + |
145 | +#ifdef MAC_USE_SCC_A |
146 | 3: btst #2,%a1@(mac_scc_cha_a_ctrl_offset) |
147 | jeq 3b |
148 | moveb %d0,%a1@(mac_scc_cha_a_data_offset) |
149 | #endif /* MAC_USE_SCC_A */ |
150 | |
151 | #ifdef MAC_USE_SCC_B |
152 | -#ifndef MAC_USE_SCC_A /* Load mac_sccbase only if needed */ |
153 | - movel %pc@(L(mac_sccbase)),%a1 |
154 | -#endif /* MAC_USE_SCC_A */ |
155 | 4: btst #2,%a1@(mac_scc_cha_b_ctrl_offset) |
156 | jeq 4b |
157 | moveb %d0,%a1@(mac_scc_cha_b_data_offset) |
158 | diff --git a/arch/x86/um/sys_call_table_32.c b/arch/x86/um/sys_call_table_32.c |
159 | index 416bd40..68d1dc9 100644 |
160 | --- a/arch/x86/um/sys_call_table_32.c |
161 | +++ b/arch/x86/um/sys_call_table_32.c |
162 | @@ -39,9 +39,9 @@ |
163 | #undef __SYSCALL_I386 |
164 | #define __SYSCALL_I386(nr, sym, compat) [ nr ] = sym, |
165 | |
166 | -typedef void (*sys_call_ptr_t)(void); |
167 | +typedef asmlinkage void (*sys_call_ptr_t)(void); |
168 | |
169 | -extern void sys_ni_syscall(void); |
170 | +extern asmlinkage void sys_ni_syscall(void); |
171 | |
172 | const sys_call_ptr_t sys_call_table[] __cacheline_aligned = { |
173 | /* |
174 | diff --git a/drivers/ata/ata_piix.c b/drivers/ata/ata_piix.c |
175 | index f67fc41..af58f9b 100644 |
176 | --- a/drivers/ata/ata_piix.c |
177 | +++ b/drivers/ata/ata_piix.c |
178 | @@ -151,6 +151,7 @@ enum piix_controller_ids { |
179 | piix_pata_vmw, /* PIIX4 for VMware, spurious DMA_ERR */ |
180 | ich8_sata_snb, |
181 | ich8_2port_sata_snb, |
182 | + ich8_2port_sata_byt, |
183 | }; |
184 | |
185 | struct piix_map_db { |
186 | @@ -348,6 +349,9 @@ static const struct pci_device_id piix_pci_tbl[] = { |
187 | { 0x8086, 0x8d60, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_sata_snb }, |
188 | /* SATA Controller IDE (Wellsburg) */ |
189 | { 0x8086, 0x8d68, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_2port_sata }, |
190 | + /* SATA Controller IDE (BayTrail) */ |
191 | + { 0x8086, 0x0F20, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_2port_sata_byt }, |
192 | + { 0x8086, 0x0F21, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_2port_sata_byt }, |
193 | |
194 | { } /* terminate list */ |
195 | }; |
196 | @@ -513,6 +517,7 @@ static const struct piix_map_db *piix_map_db_table[] = { |
197 | [tolapai_sata] = &tolapai_map_db, |
198 | [ich8_sata_snb] = &ich8_map_db, |
199 | [ich8_2port_sata_snb] = &ich8_2port_map_db, |
200 | + [ich8_2port_sata_byt] = &ich8_2port_map_db, |
201 | }; |
202 | |
203 | static struct ata_port_info piix_port_info[] = { |
204 | @@ -663,6 +668,16 @@ static struct ata_port_info piix_port_info[] = { |
205 | .udma_mask = ATA_UDMA6, |
206 | .port_ops = &piix_sata_ops, |
207 | }, |
208 | + |
209 | + [ich8_2port_sata_byt] = |
210 | + { |
211 | + .flags = PIIX_SATA_FLAGS | PIIX_FLAG_SIDPR | PIIX_FLAG_PIO16, |
212 | + .pio_mask = ATA_PIO4, |
213 | + .mwdma_mask = ATA_MWDMA2, |
214 | + .udma_mask = ATA_UDMA6, |
215 | + .port_ops = &piix_sata_ops, |
216 | + }, |
217 | + |
218 | }; |
219 | |
220 | static struct pci_bits piix_enable_bits[] = { |
221 | diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c |
222 | index 02dd34c..9cf09ae 100644 |
223 | --- a/drivers/ata/libata-core.c |
224 | +++ b/drivers/ata/libata-core.c |
225 | @@ -1599,6 +1599,12 @@ unsigned ata_exec_internal_sg(struct ata_device *dev, |
226 | qc->tf = *tf; |
227 | if (cdb) |
228 | memcpy(qc->cdb, cdb, ATAPI_CDB_LEN); |
229 | + |
230 | + /* some SATA bridges need us to indicate data xfer direction */ |
231 | + if (tf->protocol == ATAPI_PROT_DMA && (dev->flags & ATA_DFLAG_DMADIR) && |
232 | + dma_dir == DMA_FROM_DEVICE) |
233 | + qc->tf.feature |= ATAPI_DMADIR; |
234 | + |
235 | qc->flags |= ATA_QCFLAG_RESULT_TF; |
236 | qc->dma_dir = dma_dir; |
237 | if (dma_dir != DMA_NONE) { |
238 | diff --git a/drivers/block/brd.c b/drivers/block/brd.c |
239 | index 531ceb3..4e8213a 100644 |
240 | --- a/drivers/block/brd.c |
241 | +++ b/drivers/block/brd.c |
242 | @@ -117,13 +117,13 @@ static struct page *brd_insert_page(struct brd_device *brd, sector_t sector) |
243 | |
244 | spin_lock(&brd->brd_lock); |
245 | idx = sector >> PAGE_SECTORS_SHIFT; |
246 | + page->index = idx; |
247 | if (radix_tree_insert(&brd->brd_pages, idx, page)) { |
248 | __free_page(page); |
249 | page = radix_tree_lookup(&brd->brd_pages, idx); |
250 | BUG_ON(!page); |
251 | BUG_ON(page->index != idx); |
252 | - } else |
253 | - page->index = idx; |
254 | + } |
255 | spin_unlock(&brd->brd_lock); |
256 | |
257 | radix_tree_preload_end(); |
258 | diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c |
259 | index 68c89db..1209f15 100644 |
260 | --- a/drivers/gpu/drm/radeon/radeon_device.c |
261 | +++ b/drivers/gpu/drm/radeon/radeon_device.c |
262 | @@ -363,18 +363,17 @@ bool radeon_card_posted(struct radeon_device *rdev) |
263 | return false; |
264 | |
265 | /* first check CRTCs */ |
266 | - if (ASIC_IS_DCE41(rdev)) { |
267 | + if (ASIC_IS_DCE4(rdev)) { |
268 | reg = RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC0_REGISTER_OFFSET) | |
269 | RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC1_REGISTER_OFFSET); |
270 | - if (reg & EVERGREEN_CRTC_MASTER_EN) |
271 | - return true; |
272 | - } else if (ASIC_IS_DCE4(rdev)) { |
273 | - reg = RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC0_REGISTER_OFFSET) | |
274 | - RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC1_REGISTER_OFFSET) | |
275 | - RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC2_REGISTER_OFFSET) | |
276 | - RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC3_REGISTER_OFFSET) | |
277 | - RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC4_REGISTER_OFFSET) | |
278 | - RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC5_REGISTER_OFFSET); |
279 | + if (rdev->num_crtc >= 4) { |
280 | + reg |= RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC2_REGISTER_OFFSET) | |
281 | + RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC3_REGISTER_OFFSET); |
282 | + } |
283 | + if (rdev->num_crtc >= 6) { |
284 | + reg |= RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC4_REGISTER_OFFSET) | |
285 | + RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC5_REGISTER_OFFSET); |
286 | + } |
287 | if (reg & EVERGREEN_CRTC_MASTER_EN) |
288 | return true; |
289 | } else if (ASIC_IS_AVIVO(rdev)) { |
290 | diff --git a/drivers/iommu/amd_iommu.c b/drivers/iommu/amd_iommu.c |
291 | index 0569843..99a8444 100644 |
292 | --- a/drivers/iommu/amd_iommu.c |
293 | +++ b/drivers/iommu/amd_iommu.c |
294 | @@ -531,11 +531,23 @@ retry: |
295 | |
296 | static void iommu_poll_events(struct amd_iommu *iommu) |
297 | { |
298 | - u32 head, tail; |
299 | + u32 head, tail, status; |
300 | unsigned long flags; |
301 | |
302 | spin_lock_irqsave(&iommu->lock, flags); |
303 | |
304 | + /* enable event interrupts again */ |
305 | + do { |
306 | + /* |
307 | + * Workaround for Erratum ERBT1312 |
308 | + * Clearing the EVT_INT bit may race in the hardware, so read |
309 | + * it again and make sure it was really cleared |
310 | + */ |
311 | + status = readl(iommu->mmio_base + MMIO_STATUS_OFFSET); |
312 | + writel(MMIO_STATUS_EVT_INT_MASK, |
313 | + iommu->mmio_base + MMIO_STATUS_OFFSET); |
314 | + } while (status & MMIO_STATUS_EVT_INT_MASK); |
315 | + |
316 | head = readl(iommu->mmio_base + MMIO_EVT_HEAD_OFFSET); |
317 | tail = readl(iommu->mmio_base + MMIO_EVT_TAIL_OFFSET); |
318 | |
319 | @@ -572,16 +584,25 @@ static void iommu_handle_ppr_entry(struct amd_iommu *iommu, u64 *raw) |
320 | static void iommu_poll_ppr_log(struct amd_iommu *iommu) |
321 | { |
322 | unsigned long flags; |
323 | - u32 head, tail; |
324 | + u32 head, tail, status; |
325 | |
326 | if (iommu->ppr_log == NULL) |
327 | return; |
328 | |
329 | - /* enable ppr interrupts again */ |
330 | - writel(MMIO_STATUS_PPR_INT_MASK, iommu->mmio_base + MMIO_STATUS_OFFSET); |
331 | - |
332 | spin_lock_irqsave(&iommu->lock, flags); |
333 | |
334 | + /* enable ppr interrupts again */ |
335 | + do { |
336 | + /* |
337 | + * Workaround for Erratum ERBT1312 |
338 | + * Clearing the PPR_INT bit may race in the hardware, so read |
339 | + * it again and make sure it was really cleared |
340 | + */ |
341 | + status = readl(iommu->mmio_base + MMIO_STATUS_OFFSET); |
342 | + writel(MMIO_STATUS_PPR_INT_MASK, |
343 | + iommu->mmio_base + MMIO_STATUS_OFFSET); |
344 | + } while (status & MMIO_STATUS_PPR_INT_MASK); |
345 | + |
346 | head = readl(iommu->mmio_base + MMIO_PPR_HEAD_OFFSET); |
347 | tail = readl(iommu->mmio_base + MMIO_PPR_TAIL_OFFSET); |
348 | |
349 | diff --git a/drivers/iommu/amd_iommu_types.h b/drivers/iommu/amd_iommu_types.h |
350 | index 2435555..c4ffacb 100644 |
351 | --- a/drivers/iommu/amd_iommu_types.h |
352 | +++ b/drivers/iommu/amd_iommu_types.h |
353 | @@ -99,6 +99,7 @@ |
354 | #define PASID_MASK 0x000fffff |
355 | |
356 | /* MMIO status bits */ |
357 | +#define MMIO_STATUS_EVT_INT_MASK (1 << 1) |
358 | #define MMIO_STATUS_COM_WAIT_INT_MASK (1 << 2) |
359 | #define MMIO_STATUS_PPR_INT_MASK (1 << 6) |
360 | |
361 | diff --git a/drivers/leds/leds-ot200.c b/drivers/leds/leds-ot200.c |
362 | index c464682..676e729 100644 |
363 | --- a/drivers/leds/leds-ot200.c |
364 | +++ b/drivers/leds/leds-ot200.c |
365 | @@ -47,37 +47,37 @@ static struct ot200_led leds[] = { |
366 | { |
367 | .name = "led_1", |
368 | .port = 0x49, |
369 | - .mask = BIT(7), |
370 | + .mask = BIT(6), |
371 | }, |
372 | { |
373 | .name = "led_2", |
374 | .port = 0x49, |
375 | - .mask = BIT(6), |
376 | + .mask = BIT(5), |
377 | }, |
378 | { |
379 | .name = "led_3", |
380 | .port = 0x49, |
381 | - .mask = BIT(5), |
382 | + .mask = BIT(4), |
383 | }, |
384 | { |
385 | .name = "led_4", |
386 | .port = 0x49, |
387 | - .mask = BIT(4), |
388 | + .mask = BIT(3), |
389 | }, |
390 | { |
391 | .name = "led_5", |
392 | .port = 0x49, |
393 | - .mask = BIT(3), |
394 | + .mask = BIT(2), |
395 | }, |
396 | { |
397 | .name = "led_6", |
398 | .port = 0x49, |
399 | - .mask = BIT(2), |
400 | + .mask = BIT(1), |
401 | }, |
402 | { |
403 | .name = "led_7", |
404 | .port = 0x49, |
405 | - .mask = BIT(1), |
406 | + .mask = BIT(0), |
407 | } |
408 | }; |
409 | |
410 | diff --git a/drivers/net/xen-netback/netback.c b/drivers/net/xen-netback/netback.c |
411 | index 2bdf798..0d22cff 100644 |
412 | --- a/drivers/net/xen-netback/netback.c |
413 | +++ b/drivers/net/xen-netback/netback.c |
414 | @@ -914,7 +914,6 @@ static int netbk_count_requests(struct xenvif *vif, |
415 | } |
416 | |
417 | static struct page *xen_netbk_alloc_page(struct xen_netbk *netbk, |
418 | - struct sk_buff *skb, |
419 | u16 pending_idx) |
420 | { |
421 | struct page *page; |
422 | @@ -948,7 +947,7 @@ static struct gnttab_copy *xen_netbk_get_requests(struct xen_netbk *netbk, |
423 | |
424 | index = pending_index(netbk->pending_cons++); |
425 | pending_idx = netbk->pending_ring[index]; |
426 | - page = xen_netbk_alloc_page(netbk, skb, pending_idx); |
427 | + page = xen_netbk_alloc_page(netbk, pending_idx); |
428 | if (!page) |
429 | goto err; |
430 | |
431 | @@ -1353,7 +1352,7 @@ static unsigned xen_netbk_tx_build_gops(struct xen_netbk *netbk) |
432 | } |
433 | |
434 | /* XXX could copy straight to head */ |
435 | - page = xen_netbk_alloc_page(netbk, skb, pending_idx); |
436 | + page = xen_netbk_alloc_page(netbk, pending_idx); |
437 | if (!page) { |
438 | kfree_skb(skb); |
439 | netbk_tx_err(vif, &txreq, idx); |
440 | diff --git a/drivers/platform/x86/thinkpad_acpi.c b/drivers/platform/x86/thinkpad_acpi.c |
441 | index d68c000..f08aee6 100644 |
442 | --- a/drivers/platform/x86/thinkpad_acpi.c |
443 | +++ b/drivers/platform/x86/thinkpad_acpi.c |
444 | @@ -8662,6 +8662,13 @@ static int __must_check __init get_thinkpad_model_data( |
445 | tp->model_str = kstrdup(s, GFP_KERNEL); |
446 | if (!tp->model_str) |
447 | return -ENOMEM; |
448 | + } else { |
449 | + s = dmi_get_system_info(DMI_BIOS_VENDOR); |
450 | + if (s && !(strnicmp(s, "Lenovo", 6))) { |
451 | + tp->model_str = kstrdup(s, GFP_KERNEL); |
452 | + if (!tp->model_str) |
453 | + return -ENOMEM; |
454 | + } |
455 | } |
456 | |
457 | s = dmi_get_system_info(DMI_PRODUCT_NAME); |
458 | diff --git a/drivers/staging/vt6656/hostap.c b/drivers/staging/vt6656/hostap.c |
459 | index 51b5adf..df8ea25 100644 |
460 | --- a/drivers/staging/vt6656/hostap.c |
461 | +++ b/drivers/staging/vt6656/hostap.c |
462 | @@ -153,7 +153,7 @@ static int hostap_disable_hostapd(PSDevice pDevice, int rtnl_locked) |
463 | DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "%s: Netdevice %s unregistered\n", |
464 | pDevice->dev->name, pDevice->apdev->name); |
465 | } |
466 | - kfree(pDevice->apdev); |
467 | + free_netdev(pDevice->apdev); |
468 | pDevice->apdev = NULL; |
469 | pDevice->bEnable8021x = FALSE; |
470 | pDevice->bEnableHostWEP = FALSE; |
471 | diff --git a/drivers/target/iscsi/iscsi_target_parameters.c b/drivers/target/iscsi/iscsi_target_parameters.c |
472 | index eb05c9d..8a8ff23 100644 |
473 | --- a/drivers/target/iscsi/iscsi_target_parameters.c |
474 | +++ b/drivers/target/iscsi/iscsi_target_parameters.c |
475 | @@ -713,9 +713,9 @@ static int iscsi_add_notunderstood_response( |
476 | } |
477 | INIT_LIST_HEAD(&extra_response->er_list); |
478 | |
479 | - strncpy(extra_response->key, key, strlen(key) + 1); |
480 | - strncpy(extra_response->value, NOTUNDERSTOOD, |
481 | - strlen(NOTUNDERSTOOD) + 1); |
482 | + strlcpy(extra_response->key, key, sizeof(extra_response->key)); |
483 | + strlcpy(extra_response->value, NOTUNDERSTOOD, |
484 | + sizeof(extra_response->value)); |
485 | |
486 | list_add_tail(&extra_response->er_list, |
487 | ¶m_list->extra_response_list); |
488 | @@ -1571,8 +1571,6 @@ int iscsi_decode_text_input( |
489 | |
490 | if (phase & PHASE_SECURITY) { |
491 | if (iscsi_check_for_auth_key(key) > 0) { |
492 | - char *tmpptr = key + strlen(key); |
493 | - *tmpptr = '='; |
494 | kfree(tmpbuf); |
495 | return 1; |
496 | } |
497 | diff --git a/drivers/target/iscsi/iscsi_target_parameters.h b/drivers/target/iscsi/iscsi_target_parameters.h |
498 | index 6a37fd6..83eed65 100644 |
499 | --- a/drivers/target/iscsi/iscsi_target_parameters.h |
500 | +++ b/drivers/target/iscsi/iscsi_target_parameters.h |
501 | @@ -1,8 +1,10 @@ |
502 | #ifndef ISCSI_PARAMETERS_H |
503 | #define ISCSI_PARAMETERS_H |
504 | |
505 | +#include <scsi/iscsi_proto.h> |
506 | + |
507 | struct iscsi_extra_response { |
508 | - char key[64]; |
509 | + char key[KEY_MAXLEN]; |
510 | char value[32]; |
511 | struct list_head er_list; |
512 | } ____cacheline_aligned; |
513 | diff --git a/drivers/tty/n_tty.c b/drivers/tty/n_tty.c |
514 | index 2303a02..37818fb 100644 |
515 | --- a/drivers/tty/n_tty.c |
516 | +++ b/drivers/tty/n_tty.c |
517 | @@ -1529,6 +1529,14 @@ static void n_tty_set_termios(struct tty_struct *tty, struct ktermios *old) |
518 | tty->real_raw = 0; |
519 | } |
520 | n_tty_set_room(tty); |
521 | + /* |
522 | + * Fix tty hang when I_IXON(tty) is cleared, but the tty |
523 | + * been stopped by STOP_CHAR(tty) before it. |
524 | + */ |
525 | + if (!I_IXON(tty) && old && (old->c_iflag & IXON) && !tty->flow_stopped) { |
526 | + start_tty(tty); |
527 | + } |
528 | + |
529 | /* The termios change make the tty ready for I/O */ |
530 | wake_up_interruptible(&tty->write_wait); |
531 | wake_up_interruptible(&tty->read_wait); |
532 | diff --git a/drivers/usb/atm/cxacru.c b/drivers/usb/atm/cxacru.c |
533 | index 98b89fe..c8dbb97 100644 |
534 | --- a/drivers/usb/atm/cxacru.c |
535 | +++ b/drivers/usb/atm/cxacru.c |
536 | @@ -686,7 +686,8 @@ static int cxacru_cm_get_array(struct cxacru_data *instance, enum cxacru_cm_requ |
537 | { |
538 | int ret, len; |
539 | __le32 *buf; |
540 | - int offb, offd; |
541 | + int offb; |
542 | + unsigned int offd; |
543 | const int stride = CMD_PACKET_SIZE / (4 * 2) - 1; |
544 | int buflen = ((size - 1) / stride + 1 + size * 2) * 4; |
545 | |
546 | diff --git a/drivers/usb/core/quirks.c b/drivers/usb/core/quirks.c |
547 | index 8b2a9d8..f88ad63 100644 |
548 | --- a/drivers/usb/core/quirks.c |
549 | +++ b/drivers/usb/core/quirks.c |
550 | @@ -110,6 +110,9 @@ static const struct usb_device_id usb_quirk_list[] = { |
551 | /* Edirol SD-20 */ |
552 | { USB_DEVICE(0x0582, 0x0027), .driver_info = USB_QUIRK_RESET_RESUME }, |
553 | |
554 | + /* Alcor Micro Corp. Hub */ |
555 | + { USB_DEVICE(0x058f, 0x9254), .driver_info = USB_QUIRK_RESET_RESUME }, |
556 | + |
557 | /* appletouch */ |
558 | { USB_DEVICE(0x05ac, 0x021a), .driver_info = USB_QUIRK_RESET_RESUME }, |
559 | |
560 | diff --git a/drivers/usb/host/uhci-hub.c b/drivers/usb/host/uhci-hub.c |
561 | index 768d542..c994655 100644 |
562 | --- a/drivers/usb/host/uhci-hub.c |
563 | +++ b/drivers/usb/host/uhci-hub.c |
564 | @@ -222,7 +222,8 @@ static int uhci_hub_status_data(struct usb_hcd *hcd, char *buf) |
565 | /* auto-stop if nothing connected for 1 second */ |
566 | if (any_ports_active(uhci)) |
567 | uhci->rh_state = UHCI_RH_RUNNING; |
568 | - else if (time_after_eq(jiffies, uhci->auto_stop_time)) |
569 | + else if (time_after_eq(jiffies, uhci->auto_stop_time) && |
570 | + !uhci->wait_for_hp) |
571 | suspend_rh(uhci, UHCI_RH_AUTO_STOPPED); |
572 | break; |
573 | |
574 | diff --git a/drivers/usb/host/xhci-mem.c b/drivers/usb/host/xhci-mem.c |
575 | index b42a6fb..f059222 100644 |
576 | --- a/drivers/usb/host/xhci-mem.c |
577 | +++ b/drivers/usb/host/xhci-mem.c |
578 | @@ -1443,15 +1443,17 @@ int xhci_endpoint_init(struct xhci_hcd *xhci, |
579 | ep_ctx->ep_info2 |= cpu_to_le32(xhci_get_endpoint_type(udev, ep)); |
580 | |
581 | /* Set the max packet size and max burst */ |
582 | + max_packet = GET_MAX_PACKET(usb_endpoint_maxp(&ep->desc)); |
583 | + max_burst = 0; |
584 | switch (udev->speed) { |
585 | case USB_SPEED_SUPER: |
586 | - max_packet = usb_endpoint_maxp(&ep->desc); |
587 | - ep_ctx->ep_info2 |= cpu_to_le32(MAX_PACKET(max_packet)); |
588 | /* dig out max burst from ep companion desc */ |
589 | - max_packet = ep->ss_ep_comp.bMaxBurst; |
590 | - ep_ctx->ep_info2 |= cpu_to_le32(MAX_BURST(max_packet)); |
591 | + max_burst = ep->ss_ep_comp.bMaxBurst; |
592 | break; |
593 | case USB_SPEED_HIGH: |
594 | + /* Some devices get this wrong */ |
595 | + if (usb_endpoint_xfer_bulk(&ep->desc)) |
596 | + max_packet = 512; |
597 | /* bits 11:12 specify the number of additional transaction |
598 | * opportunities per microframe (USB 2.0, section 9.6.6) |
599 | */ |
600 | @@ -1459,17 +1461,16 @@ int xhci_endpoint_init(struct xhci_hcd *xhci, |
601 | usb_endpoint_xfer_int(&ep->desc)) { |
602 | max_burst = (usb_endpoint_maxp(&ep->desc) |
603 | & 0x1800) >> 11; |
604 | - ep_ctx->ep_info2 |= cpu_to_le32(MAX_BURST(max_burst)); |
605 | } |
606 | - /* Fall through */ |
607 | + break; |
608 | case USB_SPEED_FULL: |
609 | case USB_SPEED_LOW: |
610 | - max_packet = GET_MAX_PACKET(usb_endpoint_maxp(&ep->desc)); |
611 | - ep_ctx->ep_info2 |= cpu_to_le32(MAX_PACKET(max_packet)); |
612 | break; |
613 | default: |
614 | BUG(); |
615 | } |
616 | + ep_ctx->ep_info2 |= cpu_to_le32(MAX_PACKET(max_packet) | |
617 | + MAX_BURST(max_burst)); |
618 | max_esit_payload = xhci_get_max_esit_payload(xhci, udev, ep); |
619 | ep_ctx->tx_info = cpu_to_le32(MAX_ESIT_PAYLOAD_FOR_EP(max_esit_payload)); |
620 | |
621 | diff --git a/drivers/usb/serial/ftdi_sio.c b/drivers/usb/serial/ftdi_sio.c |
622 | index e5ccafc..c6f8e62 100644 |
623 | --- a/drivers/usb/serial/ftdi_sio.c |
624 | +++ b/drivers/usb/serial/ftdi_sio.c |
625 | @@ -199,6 +199,8 @@ static struct usb_device_id id_table_combined [] = { |
626 | { USB_DEVICE(FTDI_VID, FTDI_OPENDCC_GBM_PID) }, |
627 | { USB_DEVICE(FTDI_VID, FTDI_OPENDCC_GBM_BOOST_PID) }, |
628 | { USB_DEVICE(NEWPORT_VID, NEWPORT_AGILIS_PID) }, |
629 | + { USB_DEVICE(NEWPORT_VID, NEWPORT_CONEX_CC_PID) }, |
630 | + { USB_DEVICE(NEWPORT_VID, NEWPORT_CONEX_AGP_PID) }, |
631 | { USB_DEVICE(INTERBIOMETRICS_VID, INTERBIOMETRICS_IOBOARD_PID) }, |
632 | { USB_DEVICE(INTERBIOMETRICS_VID, INTERBIOMETRICS_MINI_IOBOARD_PID) }, |
633 | { USB_DEVICE(FTDI_VID, FTDI_SPROG_II) }, |
634 | diff --git a/drivers/usb/serial/ftdi_sio_ids.h b/drivers/usb/serial/ftdi_sio_ids.h |
635 | index 9852827..6dd7925 100644 |
636 | --- a/drivers/usb/serial/ftdi_sio_ids.h |
637 | +++ b/drivers/usb/serial/ftdi_sio_ids.h |
638 | @@ -772,6 +772,8 @@ |
639 | */ |
640 | #define NEWPORT_VID 0x104D |
641 | #define NEWPORT_AGILIS_PID 0x3000 |
642 | +#define NEWPORT_CONEX_CC_PID 0x3002 |
643 | +#define NEWPORT_CONEX_AGP_PID 0x3006 |
644 | |
645 | /* Interbiometrics USB I/O Board */ |
646 | /* Developed for Interbiometrics by Rudolf Gugler */ |
647 | diff --git a/drivers/usb/serial/io_ti.c b/drivers/usb/serial/io_ti.c |
648 | index 7e8bb8f..b68efdc 100644 |
649 | --- a/drivers/usb/serial/io_ti.c |
650 | +++ b/drivers/usb/serial/io_ti.c |
651 | @@ -550,6 +550,9 @@ static void chase_port(struct edgeport_port *port, unsigned long timeout, |
652 | wait_queue_t wait; |
653 | unsigned long flags; |
654 | |
655 | + if (!tty) |
656 | + return; |
657 | + |
658 | if (!timeout) |
659 | timeout = (HZ * EDGE_CLOSING_WAIT)/100; |
660 | |
661 | diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c |
662 | index 16efe0a..386b3ab 100644 |
663 | --- a/drivers/usb/serial/option.c |
664 | +++ b/drivers/usb/serial/option.c |
665 | @@ -196,6 +196,7 @@ static void option_instat_callback(struct urb *urb); |
666 | |
667 | #define DELL_PRODUCT_5800_MINICARD_VZW 0x8195 /* Novatel E362 */ |
668 | #define DELL_PRODUCT_5800_V2_MINICARD_VZW 0x8196 /* Novatel E362 */ |
669 | +#define DELL_PRODUCT_5804_MINICARD_ATT 0x819b /* Novatel E371 */ |
670 | |
671 | #define KYOCERA_VENDOR_ID 0x0c88 |
672 | #define KYOCERA_PRODUCT_KPC650 0x17da |
673 | @@ -341,8 +342,8 @@ static void option_instat_callback(struct urb *urb); |
674 | #define CINTERION_PRODUCT_EU3_E 0x0051 |
675 | #define CINTERION_PRODUCT_EU3_P 0x0052 |
676 | #define CINTERION_PRODUCT_PH8 0x0053 |
677 | -#define CINTERION_PRODUCT_AH6 0x0055 |
678 | -#define CINTERION_PRODUCT_PLS8 0x0060 |
679 | +#define CINTERION_PRODUCT_AHXX 0x0055 |
680 | +#define CINTERION_PRODUCT_PLXX 0x0060 |
681 | |
682 | /* Olivetti products */ |
683 | #define OLIVETTI_VENDOR_ID 0x0b3c |
684 | @@ -771,6 +772,7 @@ static const struct usb_device_id option_ids[] = { |
685 | { USB_DEVICE(DELL_VENDOR_ID, DELL_PRODUCT_5730_MINICARD_VZW) }, /* Dell Wireless 5730 Mobile Broadband EVDO/HSPA Mini-Card */ |
686 | { USB_DEVICE_AND_INTERFACE_INFO(DELL_VENDOR_ID, DELL_PRODUCT_5800_MINICARD_VZW, 0xff, 0xff, 0xff) }, |
687 | { USB_DEVICE_AND_INTERFACE_INFO(DELL_VENDOR_ID, DELL_PRODUCT_5800_V2_MINICARD_VZW, 0xff, 0xff, 0xff) }, |
688 | + { USB_DEVICE_AND_INTERFACE_INFO(DELL_VENDOR_ID, DELL_PRODUCT_5804_MINICARD_ATT, 0xff, 0xff, 0xff) }, |
689 | { USB_DEVICE(ANYDATA_VENDOR_ID, ANYDATA_PRODUCT_ADU_E100A) }, /* ADU-E100, ADU-310 */ |
690 | { USB_DEVICE(ANYDATA_VENDOR_ID, ANYDATA_PRODUCT_ADU_500A) }, |
691 | { USB_DEVICE(ANYDATA_VENDOR_ID, ANYDATA_PRODUCT_ADU_620UW) }, |
692 | @@ -966,6 +968,8 @@ static const struct usb_device_id option_ids[] = { |
693 | .driver_info = (kernel_ulong_t)&net_intf4_blacklist }, |
694 | { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0330, 0xff, 0xff, 0xff) }, |
695 | { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0395, 0xff, 0xff, 0xff) }, |
696 | + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0412, 0xff, 0xff, 0xff), /* Telewell TW-LTE 4G */ |
697 | + .driver_info = (kernel_ulong_t)&net_intf4_blacklist }, |
698 | { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0414, 0xff, 0xff, 0xff) }, |
699 | { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0417, 0xff, 0xff, 0xff) }, |
700 | { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1008, 0xff, 0xff, 0xff), |
701 | @@ -1264,8 +1268,9 @@ static const struct usb_device_id option_ids[] = { |
702 | { USB_DEVICE(CINTERION_VENDOR_ID, CINTERION_PRODUCT_EU3_E) }, |
703 | { USB_DEVICE(CINTERION_VENDOR_ID, CINTERION_PRODUCT_EU3_P) }, |
704 | { USB_DEVICE(CINTERION_VENDOR_ID, CINTERION_PRODUCT_PH8) }, |
705 | - { USB_DEVICE(CINTERION_VENDOR_ID, CINTERION_PRODUCT_AH6) }, |
706 | - { USB_DEVICE(CINTERION_VENDOR_ID, CINTERION_PRODUCT_PLS8) }, |
707 | + { USB_DEVICE(CINTERION_VENDOR_ID, CINTERION_PRODUCT_AHXX) }, |
708 | + { USB_DEVICE(CINTERION_VENDOR_ID, CINTERION_PRODUCT_PLXX), |
709 | + .driver_info = (kernel_ulong_t)&net_intf4_blacklist }, |
710 | { USB_DEVICE(CINTERION_VENDOR_ID, CINTERION_PRODUCT_HC28_MDM) }, |
711 | { USB_DEVICE(CINTERION_VENDOR_ID, CINTERION_PRODUCT_HC28_MDMNET) }, |
712 | { USB_DEVICE(SIEMENS_VENDOR_ID, CINTERION_PRODUCT_HC25_MDM) }, |
713 | diff --git a/drivers/xen/events.c b/drivers/xen/events.c |
714 | index 26c47a4..417c133 100644 |
715 | --- a/drivers/xen/events.c |
716 | +++ b/drivers/xen/events.c |
717 | @@ -1258,7 +1258,7 @@ static void __xen_evtchn_do_upcall(void) |
718 | { |
719 | int start_word_idx, start_bit_idx; |
720 | int word_idx, bit_idx; |
721 | - int i; |
722 | + int i, irq; |
723 | int cpu = get_cpu(); |
724 | struct shared_info *s = HYPERVISOR_shared_info; |
725 | struct vcpu_info *vcpu_info = __this_cpu_read(xen_vcpu); |
726 | @@ -1266,6 +1266,8 @@ static void __xen_evtchn_do_upcall(void) |
727 | |
728 | do { |
729 | unsigned long pending_words; |
730 | + unsigned long pending_bits; |
731 | + struct irq_desc *desc; |
732 | |
733 | vcpu_info->evtchn_upcall_pending = 0; |
734 | |
735 | @@ -1276,6 +1278,17 @@ static void __xen_evtchn_do_upcall(void) |
736 | /* Clear master flag /before/ clearing selector flag. */ |
737 | wmb(); |
738 | #endif |
739 | + if ((irq = per_cpu(virq_to_irq, cpu)[VIRQ_TIMER]) != -1) { |
740 | + int evtchn = evtchn_from_irq(irq); |
741 | + word_idx = evtchn / BITS_PER_LONG; |
742 | + pending_bits = evtchn % BITS_PER_LONG; |
743 | + if (active_evtchns(cpu, s, word_idx) & (1ULL << pending_bits)) { |
744 | + desc = irq_to_desc(irq); |
745 | + if (desc) |
746 | + generic_handle_irq_desc(irq, desc); |
747 | + } |
748 | + } |
749 | + |
750 | pending_words = xchg(&vcpu_info->evtchn_pending_sel, 0); |
751 | |
752 | start_word_idx = __this_cpu_read(current_word_idx); |
753 | @@ -1284,7 +1297,6 @@ static void __xen_evtchn_do_upcall(void) |
754 | word_idx = start_word_idx; |
755 | |
756 | for (i = 0; pending_words != 0; i++) { |
757 | - unsigned long pending_bits; |
758 | unsigned long words; |
759 | |
760 | words = MASK_LSBS(pending_words, word_idx); |
761 | @@ -1313,8 +1325,7 @@ static void __xen_evtchn_do_upcall(void) |
762 | |
763 | do { |
764 | unsigned long bits; |
765 | - int port, irq; |
766 | - struct irq_desc *desc; |
767 | + int port; |
768 | |
769 | bits = MASK_LSBS(pending_bits, bit_idx); |
770 | |
771 | diff --git a/fs/cifs/cifs_dfs_ref.c b/fs/cifs/cifs_dfs_ref.c |
772 | index 2263144..d0e5fc5 100644 |
773 | --- a/fs/cifs/cifs_dfs_ref.c |
774 | +++ b/fs/cifs/cifs_dfs_ref.c |
775 | @@ -18,6 +18,7 @@ |
776 | #include <linux/slab.h> |
777 | #include <linux/vfs.h> |
778 | #include <linux/fs.h> |
779 | +#include <linux/inet.h> |
780 | #include "cifsglob.h" |
781 | #include "cifsproto.h" |
782 | #include "cifsfs.h" |
783 | @@ -150,7 +151,8 @@ char *cifs_compose_mount_options(const char *sb_mountdata, |
784 | * assuming that we have 'unc=' and 'ip=' in |
785 | * the original sb_mountdata |
786 | */ |
787 | - md_len = strlen(sb_mountdata) + rc + strlen(ref->node_name) + 12; |
788 | + md_len = strlen(sb_mountdata) + rc + strlen(ref->node_name) + 12 + |
789 | + INET6_ADDRSTRLEN; |
790 | mountdata = kzalloc(md_len+1, GFP_KERNEL); |
791 | if (mountdata == NULL) { |
792 | rc = -ENOMEM; |
793 | diff --git a/fs/cifs/inode.c b/fs/cifs/inode.c |
794 | index 745da3d..6fbfbdb 100644 |
795 | --- a/fs/cifs/inode.c |
796 | +++ b/fs/cifs/inode.c |
797 | @@ -173,7 +173,8 @@ cifs_fattr_to_inode(struct inode *inode, struct cifs_fattr *fattr) |
798 | |
799 | if (fattr->cf_flags & CIFS_FATTR_DFS_REFERRAL) |
800 | inode->i_flags |= S_AUTOMOUNT; |
801 | - cifs_set_ops(inode); |
802 | + if (inode->i_state & I_NEW) |
803 | + cifs_set_ops(inode); |
804 | } |
805 | |
806 | void |
807 | diff --git a/fs/fat/inode.c b/fs/fat/inode.c |
808 | index 21687e3..44ae375 100644 |
809 | --- a/fs/fat/inode.c |
810 | +++ b/fs/fat/inode.c |
811 | @@ -1237,6 +1237,19 @@ static int fat_read_root(struct inode *inode) |
812 | return 0; |
813 | } |
814 | |
815 | +static unsigned long calc_fat_clusters(struct super_block *sb) |
816 | +{ |
817 | + struct msdos_sb_info *sbi = MSDOS_SB(sb); |
818 | + |
819 | + /* Divide first to avoid overflow */ |
820 | + if (sbi->fat_bits != 12) { |
821 | + unsigned long ent_per_sec = sb->s_blocksize * 8 / sbi->fat_bits; |
822 | + return ent_per_sec * sbi->fat_length; |
823 | + } |
824 | + |
825 | + return sbi->fat_length * sb->s_blocksize * 8 / sbi->fat_bits; |
826 | +} |
827 | + |
828 | /* |
829 | * Read the super block of an MS-DOS FS. |
830 | */ |
831 | @@ -1433,7 +1446,7 @@ int fat_fill_super(struct super_block *sb, void *data, int silent, int isvfat, |
832 | sbi->fat_bits = (total_clusters > MAX_FAT12) ? 16 : 12; |
833 | |
834 | /* check that FAT table does not overflow */ |
835 | - fat_clusters = sbi->fat_length * sb->s_blocksize * 8 / sbi->fat_bits; |
836 | + fat_clusters = calc_fat_clusters(sb); |
837 | total_clusters = min(total_clusters, fat_clusters - FAT_START_ENT); |
838 | if (total_clusters > MAX_FAT(sb)) { |
839 | if (!silent) |
840 | diff --git a/fs/jfs/inode.c b/fs/jfs/inode.c |
841 | index 77b69b2..13fc885 100644 |
842 | --- a/fs/jfs/inode.c |
843 | +++ b/fs/jfs/inode.c |
844 | @@ -125,7 +125,7 @@ int jfs_write_inode(struct inode *inode, struct writeback_control *wbc) |
845 | { |
846 | int wait = wbc->sync_mode == WB_SYNC_ALL; |
847 | |
848 | - if (test_cflag(COMMIT_Nolink, inode)) |
849 | + if (inode->i_nlink == 0) |
850 | return 0; |
851 | /* |
852 | * If COMMIT_DIRTY is not set, the inode isn't really dirty. |
853 | diff --git a/fs/jfs/jfs_logmgr.c b/fs/jfs/jfs_logmgr.c |
854 | index 2eb952c..cbe48ea 100644 |
855 | --- a/fs/jfs/jfs_logmgr.c |
856 | +++ b/fs/jfs/jfs_logmgr.c |
857 | @@ -1058,7 +1058,8 @@ static int lmLogSync(struct jfs_log * log, int hard_sync) |
858 | */ |
859 | void jfs_syncpt(struct jfs_log *log, int hard_sync) |
860 | { LOG_LOCK(log); |
861 | - lmLogSync(log, hard_sync); |
862 | + if (!test_bit(log_QUIESCE, &log->flag)) |
863 | + lmLogSync(log, hard_sync); |
864 | LOG_UNLOCK(log); |
865 | } |
866 | |
867 | diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c |
868 | index 04f449c..d121c67 100644 |
869 | --- a/fs/nfs/nfs4proc.c |
870 | +++ b/fs/nfs/nfs4proc.c |
871 | @@ -1053,7 +1053,7 @@ static struct nfs4_state *nfs4_try_open_cached(struct nfs4_opendata *opendata) |
872 | struct nfs4_state *state = opendata->state; |
873 | struct nfs_inode *nfsi = NFS_I(state->inode); |
874 | struct nfs_delegation *delegation; |
875 | - int open_mode = opendata->o_arg.open_flags & (O_EXCL|O_TRUNC); |
876 | + int open_mode = opendata->o_arg.open_flags; |
877 | fmode_t fmode = opendata->o_arg.fmode; |
878 | nfs4_stateid stateid; |
879 | int ret = -EAGAIN; |
880 | diff --git a/fs/nilfs2/inode.c b/fs/nilfs2/inode.c |
881 | index 8f7b95a..aa526be 100644 |
882 | --- a/fs/nilfs2/inode.c |
883 | +++ b/fs/nilfs2/inode.c |
884 | @@ -195,13 +195,32 @@ static int nilfs_writepage(struct page *page, struct writeback_control *wbc) |
885 | |
886 | static int nilfs_set_page_dirty(struct page *page) |
887 | { |
888 | - int ret = __set_page_dirty_buffers(page); |
889 | + int ret = __set_page_dirty_nobuffers(page); |
890 | |
891 | - if (ret) { |
892 | + if (page_has_buffers(page)) { |
893 | struct inode *inode = page->mapping->host; |
894 | - unsigned nr_dirty = 1 << (PAGE_SHIFT - inode->i_blkbits); |
895 | + unsigned nr_dirty = 0; |
896 | + struct buffer_head *bh, *head; |
897 | |
898 | - nilfs_set_file_dirty(inode, nr_dirty); |
899 | + /* |
900 | + * This page is locked by callers, and no other thread |
901 | + * concurrently marks its buffers dirty since they are |
902 | + * only dirtied through routines in fs/buffer.c in |
903 | + * which call sites of mark_buffer_dirty are protected |
904 | + * by page lock. |
905 | + */ |
906 | + bh = head = page_buffers(page); |
907 | + do { |
908 | + /* Do not mark hole blocks dirty */ |
909 | + if (buffer_dirty(bh) || !buffer_mapped(bh)) |
910 | + continue; |
911 | + |
912 | + set_buffer_dirty(bh); |
913 | + nr_dirty++; |
914 | + } while (bh = bh->b_this_page, bh != head); |
915 | + |
916 | + if (nr_dirty) |
917 | + nilfs_set_file_dirty(inode, nr_dirty); |
918 | } |
919 | return ret; |
920 | } |
921 | diff --git a/fs/ocfs2/extent_map.c b/fs/ocfs2/extent_map.c |
922 | index 2f5b92e..7eb1c0c 100644 |
923 | --- a/fs/ocfs2/extent_map.c |
924 | +++ b/fs/ocfs2/extent_map.c |
925 | @@ -791,7 +791,7 @@ int ocfs2_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo, |
926 | &hole_size, &rec, &is_last); |
927 | if (ret) { |
928 | mlog_errno(ret); |
929 | - goto out; |
930 | + goto out_unlock; |
931 | } |
932 | |
933 | if (rec.e_blkno == 0ULL) { |
934 | diff --git a/fs/xfs/xfs_iops.c b/fs/xfs/xfs_iops.c |
935 | index 3011b87..23c79ca 100644 |
936 | --- a/fs/xfs/xfs_iops.c |
937 | +++ b/fs/xfs/xfs_iops.c |
938 | @@ -457,6 +457,28 @@ xfs_vn_getattr( |
939 | return 0; |
940 | } |
941 | |
942 | +static void |
943 | +xfs_setattr_mode( |
944 | + struct xfs_trans *tp, |
945 | + struct xfs_inode *ip, |
946 | + struct iattr *iattr) |
947 | +{ |
948 | + struct inode *inode = VFS_I(ip); |
949 | + umode_t mode = iattr->ia_mode; |
950 | + |
951 | + ASSERT(tp); |
952 | + ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL)); |
953 | + |
954 | + if (!in_group_p(inode->i_gid) && !capable(CAP_FSETID)) |
955 | + mode &= ~S_ISGID; |
956 | + |
957 | + ip->i_d.di_mode &= S_IFMT; |
958 | + ip->i_d.di_mode |= mode & ~S_IFMT; |
959 | + |
960 | + inode->i_mode &= S_IFMT; |
961 | + inode->i_mode |= mode & ~S_IFMT; |
962 | +} |
963 | + |
964 | int |
965 | xfs_setattr_nonsize( |
966 | struct xfs_inode *ip, |
967 | @@ -608,18 +630,8 @@ xfs_setattr_nonsize( |
968 | /* |
969 | * Change file access modes. |
970 | */ |
971 | - if (mask & ATTR_MODE) { |
972 | - umode_t mode = iattr->ia_mode; |
973 | - |
974 | - if (!in_group_p(inode->i_gid) && !capable(CAP_FSETID)) |
975 | - mode &= ~S_ISGID; |
976 | - |
977 | - ip->i_d.di_mode &= S_IFMT; |
978 | - ip->i_d.di_mode |= mode & ~S_IFMT; |
979 | - |
980 | - inode->i_mode &= S_IFMT; |
981 | - inode->i_mode |= mode & ~S_IFMT; |
982 | - } |
983 | + if (mask & ATTR_MODE) |
984 | + xfs_setattr_mode(tp, ip, iattr); |
985 | |
986 | /* |
987 | * Change file access or modified times. |
988 | @@ -716,9 +728,8 @@ xfs_setattr_size( |
989 | return XFS_ERROR(error); |
990 | |
991 | ASSERT(S_ISREG(ip->i_d.di_mode)); |
992 | - ASSERT((mask & (ATTR_MODE|ATTR_UID|ATTR_GID|ATTR_ATIME|ATTR_ATIME_SET| |
993 | - ATTR_MTIME_SET|ATTR_KILL_SUID|ATTR_KILL_SGID| |
994 | - ATTR_KILL_PRIV|ATTR_TIMES_SET)) == 0); |
995 | + ASSERT((mask & (ATTR_UID|ATTR_GID|ATTR_ATIME|ATTR_ATIME_SET| |
996 | + ATTR_MTIME_SET|ATTR_KILL_PRIV|ATTR_TIMES_SET)) == 0); |
997 | |
998 | lock_flags = XFS_ILOCK_EXCL; |
999 | if (!(flags & XFS_ATTR_NOLOCK)) |
1000 | @@ -861,6 +872,12 @@ xfs_setattr_size( |
1001 | xfs_iflags_set(ip, XFS_ITRUNCATED); |
1002 | } |
1003 | |
1004 | + /* |
1005 | + * Change file access modes. |
1006 | + */ |
1007 | + if (mask & ATTR_MODE) |
1008 | + xfs_setattr_mode(tp, ip, iattr); |
1009 | + |
1010 | if (mask & ATTR_CTIME) { |
1011 | inode->i_ctime = iattr->ia_ctime; |
1012 | ip->i_d.di_ctime.t_sec = iattr->ia_ctime.tv_sec; |
1013 | diff --git a/include/linux/wait.h b/include/linux/wait.h |
1014 | index 1dee81c..6c6c20e 100644 |
1015 | --- a/include/linux/wait.h |
1016 | +++ b/include/linux/wait.h |
1017 | @@ -233,6 +233,8 @@ do { \ |
1018 | if (!ret) \ |
1019 | break; \ |
1020 | } \ |
1021 | + if (!ret && (condition)) \ |
1022 | + ret = 1; \ |
1023 | finish_wait(&wq, &__wait); \ |
1024 | } while (0) |
1025 | |
1026 | @@ -249,8 +251,9 @@ do { \ |
1027 | * wake_up() has to be called after changing any variable that could |
1028 | * change the result of the wait condition. |
1029 | * |
1030 | - * The function returns 0 if the @timeout elapsed, and the remaining |
1031 | - * jiffies if the condition evaluated to true before the timeout elapsed. |
1032 | + * The function returns 0 if the @timeout elapsed, or the remaining |
1033 | + * jiffies (at least 1) if the @condition evaluated to %true before |
1034 | + * the @timeout elapsed. |
1035 | */ |
1036 | #define wait_event_timeout(wq, condition, timeout) \ |
1037 | ({ \ |
1038 | @@ -318,6 +321,8 @@ do { \ |
1039 | ret = -ERESTARTSYS; \ |
1040 | break; \ |
1041 | } \ |
1042 | + if (!ret && (condition)) \ |
1043 | + ret = 1; \ |
1044 | finish_wait(&wq, &__wait); \ |
1045 | } while (0) |
1046 | |
1047 | @@ -334,9 +339,10 @@ do { \ |
1048 | * wake_up() has to be called after changing any variable that could |
1049 | * change the result of the wait condition. |
1050 | * |
1051 | - * The function returns 0 if the @timeout elapsed, -ERESTARTSYS if it |
1052 | - * was interrupted by a signal, and the remaining jiffies otherwise |
1053 | - * if the condition evaluated to true before the timeout elapsed. |
1054 | + * Returns: |
1055 | + * 0 if the @timeout elapsed, -%ERESTARTSYS if it was interrupted by |
1056 | + * a signal, or the remaining jiffies (at least 1) if the @condition |
1057 | + * evaluated to %true before the @timeout elapsed. |
1058 | */ |
1059 | #define wait_event_interruptible_timeout(wq, condition, timeout) \ |
1060 | ({ \ |
1061 | diff --git a/lib/klist.c b/lib/klist.c |
1062 | index 0874e41..358a368 100644 |
1063 | --- a/lib/klist.c |
1064 | +++ b/lib/klist.c |
1065 | @@ -193,10 +193,10 @@ static void klist_release(struct kref *kref) |
1066 | if (waiter->node != n) |
1067 | continue; |
1068 | |
1069 | + list_del(&waiter->list); |
1070 | waiter->woken = 1; |
1071 | mb(); |
1072 | wake_up_process(waiter->process); |
1073 | - list_del(&waiter->list); |
1074 | } |
1075 | spin_unlock(&klist_remove_lock); |
1076 | knode_set_klist(n, NULL); |
1077 | diff --git a/mm/huge_memory.c b/mm/huge_memory.c |
1078 | index caf15b6..ef99c15 100644 |
1079 | --- a/mm/huge_memory.c |
1080 | +++ b/mm/huge_memory.c |
1081 | @@ -1949,7 +1949,12 @@ static void collapse_huge_page(struct mm_struct *mm, |
1082 | pte_unmap(pte); |
1083 | spin_lock(&mm->page_table_lock); |
1084 | BUG_ON(!pmd_none(*pmd)); |
1085 | - set_pmd_at(mm, address, pmd, _pmd); |
1086 | + /* |
1087 | + * We can only use set_pmd_at when establishing |
1088 | + * hugepmds and never for establishing regular pmds that |
1089 | + * points to regular pagetables. Use pmd_populate for that |
1090 | + */ |
1091 | + pmd_populate(mm, pmd, pmd_pgtable(_pmd)); |
1092 | spin_unlock(&mm->page_table_lock); |
1093 | anon_vma_unlock(vma->anon_vma); |
1094 | goto out; |
1095 | diff --git a/mm/migrate.c b/mm/migrate.c |
1096 | index 1107238..37cd07b 100644 |
1097 | --- a/mm/migrate.c |
1098 | +++ b/mm/migrate.c |
1099 | @@ -145,7 +145,7 @@ static int remove_migration_pte(struct page *new, struct vm_area_struct *vma, |
1100 | if (PageHuge(new)) |
1101 | pte = pte_mkhuge(pte); |
1102 | #endif |
1103 | - flush_cache_page(vma, addr, pte_pfn(pte)); |
1104 | + flush_dcache_page(new); |
1105 | set_pte_at(mm, addr, ptep, pte); |
1106 | |
1107 | if (PageHuge(new)) { |
1108 | diff --git a/mm/mmu_notifier.c b/mm/mmu_notifier.c |
1109 | index 8d1ca2d..c1f947b 100644 |
1110 | --- a/mm/mmu_notifier.c |
1111 | +++ b/mm/mmu_notifier.c |
1112 | @@ -37,51 +37,48 @@ static struct srcu_struct srcu; |
1113 | void __mmu_notifier_release(struct mm_struct *mm) |
1114 | { |
1115 | struct mmu_notifier *mn; |
1116 | + struct hlist_node *node; |
1117 | int id; |
1118 | |
1119 | /* |
1120 | - * srcu_read_lock() here will block synchronize_srcu() in |
1121 | - * mmu_notifier_unregister() until all registered |
1122 | - * ->release() callouts this function makes have |
1123 | - * returned. |
1124 | + * SRCU here will block mmu_notifier_unregister until |
1125 | + * ->release returns. |
1126 | */ |
1127 | id = srcu_read_lock(&srcu); |
1128 | + hlist_for_each_entry_rcu(mn, node, &mm->mmu_notifier_mm->list, hlist) |
1129 | + /* |
1130 | + * If ->release runs before mmu_notifier_unregister it must be |
1131 | + * handled, as it's the only way for the driver to flush all |
1132 | + * existing sptes and stop the driver from establishing any more |
1133 | + * sptes before all the pages in the mm are freed. |
1134 | + */ |
1135 | + if (mn->ops->release) |
1136 | + mn->ops->release(mn, mm); |
1137 | + srcu_read_unlock(&srcu, id); |
1138 | + |
1139 | spin_lock(&mm->mmu_notifier_mm->lock); |
1140 | while (unlikely(!hlist_empty(&mm->mmu_notifier_mm->list))) { |
1141 | mn = hlist_entry(mm->mmu_notifier_mm->list.first, |
1142 | struct mmu_notifier, |
1143 | hlist); |
1144 | - |
1145 | /* |
1146 | - * Unlink. This will prevent mmu_notifier_unregister() |
1147 | - * from also making the ->release() callout. |
1148 | + * We arrived before mmu_notifier_unregister so |
1149 | + * mmu_notifier_unregister will do nothing other than to wait |
1150 | + * for ->release to finish and for mmu_notifier_unregister to |
1151 | + * return. |
1152 | */ |
1153 | hlist_del_init_rcu(&mn->hlist); |
1154 | - spin_unlock(&mm->mmu_notifier_mm->lock); |
1155 | - |
1156 | - /* |
1157 | - * Clear sptes. (see 'release' description in mmu_notifier.h) |
1158 | - */ |
1159 | - if (mn->ops->release) |
1160 | - mn->ops->release(mn, mm); |
1161 | - |
1162 | - spin_lock(&mm->mmu_notifier_mm->lock); |
1163 | } |
1164 | spin_unlock(&mm->mmu_notifier_mm->lock); |
1165 | |
1166 | /* |
1167 | - * All callouts to ->release() which we have done are complete. |
1168 | - * Allow synchronize_srcu() in mmu_notifier_unregister() to complete |
1169 | - */ |
1170 | - srcu_read_unlock(&srcu, id); |
1171 | - |
1172 | - /* |
1173 | - * mmu_notifier_unregister() may have unlinked a notifier and may |
1174 | - * still be calling out to it. Additionally, other notifiers |
1175 | - * may have been active via vmtruncate() et. al. Block here |
1176 | - * to ensure that all notifier callouts for this mm have been |
1177 | - * completed and the sptes are really cleaned up before returning |
1178 | - * to exit_mmap(). |
1179 | + * synchronize_srcu here prevents mmu_notifier_release from returning to |
1180 | + * exit_mmap (which would proceed with freeing all pages in the mm) |
1181 | + * until the ->release method returns, if it was invoked by |
1182 | + * mmu_notifier_unregister. |
1183 | + * |
1184 | + * The mmu_notifier_mm can't go away from under us because one mm_count |
1185 | + * is held by exit_mmap. |
1186 | */ |
1187 | synchronize_srcu(&srcu); |
1188 | } |
1189 | @@ -302,31 +299,34 @@ void mmu_notifier_unregister(struct mmu_notifier *mn, struct mm_struct *mm) |
1190 | { |
1191 | BUG_ON(atomic_read(&mm->mm_count) <= 0); |
1192 | |
1193 | - spin_lock(&mm->mmu_notifier_mm->lock); |
1194 | if (!hlist_unhashed(&mn->hlist)) { |
1195 | + /* |
1196 | + * SRCU here will force exit_mmap to wait for ->release to |
1197 | + * finish before freeing the pages. |
1198 | + */ |
1199 | int id; |
1200 | |
1201 | + id = srcu_read_lock(&srcu); |
1202 | /* |
1203 | - * Ensure we synchronize up with __mmu_notifier_release(). |
1204 | + * exit_mmap will block in mmu_notifier_release to guarantee |
1205 | + * that ->release is called before freeing the pages. |
1206 | */ |
1207 | - id = srcu_read_lock(&srcu); |
1208 | - |
1209 | - hlist_del_rcu(&mn->hlist); |
1210 | - spin_unlock(&mm->mmu_notifier_mm->lock); |
1211 | - |
1212 | if (mn->ops->release) |
1213 | mn->ops->release(mn, mm); |
1214 | + srcu_read_unlock(&srcu, id); |
1215 | |
1216 | + spin_lock(&mm->mmu_notifier_mm->lock); |
1217 | /* |
1218 | - * Allow __mmu_notifier_release() to complete. |
1219 | + * Can not use list_del_rcu() since __mmu_notifier_release |
1220 | + * can delete it before we hold the lock. |
1221 | */ |
1222 | - srcu_read_unlock(&srcu, id); |
1223 | - } else |
1224 | + hlist_del_init_rcu(&mn->hlist); |
1225 | spin_unlock(&mm->mmu_notifier_mm->lock); |
1226 | + } |
1227 | |
1228 | /* |
1229 | - * Wait for any running method to finish, including ->release() if it |
1230 | - * was run by __mmu_notifier_release() instead of us. |
1231 | + * Wait for any running method to finish, of course including |
1232 | + * ->release if it was run by mmu_notifier_relase instead of us. |
1233 | */ |
1234 | synchronize_srcu(&srcu); |
1235 | |
1236 | diff --git a/mm/pagewalk.c b/mm/pagewalk.c |
1237 | index aa9701e..1090e77 100644 |
1238 | --- a/mm/pagewalk.c |
1239 | +++ b/mm/pagewalk.c |
1240 | @@ -127,28 +127,7 @@ static int walk_hugetlb_range(struct vm_area_struct *vma, |
1241 | return 0; |
1242 | } |
1243 | |
1244 | -static struct vm_area_struct* hugetlb_vma(unsigned long addr, struct mm_walk *walk) |
1245 | -{ |
1246 | - struct vm_area_struct *vma; |
1247 | - |
1248 | - /* We don't need vma lookup at all. */ |
1249 | - if (!walk->hugetlb_entry) |
1250 | - return NULL; |
1251 | - |
1252 | - VM_BUG_ON(!rwsem_is_locked(&walk->mm->mmap_sem)); |
1253 | - vma = find_vma(walk->mm, addr); |
1254 | - if (vma && vma->vm_start <= addr && is_vm_hugetlb_page(vma)) |
1255 | - return vma; |
1256 | - |
1257 | - return NULL; |
1258 | -} |
1259 | - |
1260 | #else /* CONFIG_HUGETLB_PAGE */ |
1261 | -static struct vm_area_struct* hugetlb_vma(unsigned long addr, struct mm_walk *walk) |
1262 | -{ |
1263 | - return NULL; |
1264 | -} |
1265 | - |
1266 | static int walk_hugetlb_range(struct vm_area_struct *vma, |
1267 | unsigned long addr, unsigned long end, |
1268 | struct mm_walk *walk) |
1269 | @@ -199,30 +178,53 @@ int walk_page_range(unsigned long addr, unsigned long end, |
1270 | if (!walk->mm) |
1271 | return -EINVAL; |
1272 | |
1273 | + VM_BUG_ON(!rwsem_is_locked(&walk->mm->mmap_sem)); |
1274 | + |
1275 | pgd = pgd_offset(walk->mm, addr); |
1276 | do { |
1277 | - struct vm_area_struct *vma; |
1278 | + struct vm_area_struct *vma = NULL; |
1279 | |
1280 | next = pgd_addr_end(addr, end); |
1281 | |
1282 | /* |
1283 | - * handle hugetlb vma individually because pagetable walk for |
1284 | - * the hugetlb page is dependent on the architecture and |
1285 | - * we can't handled it in the same manner as non-huge pages. |
1286 | + * This function was not intended to be vma based. |
1287 | + * But there are vma special cases to be handled: |
1288 | + * - hugetlb vma's |
1289 | + * - VM_PFNMAP vma's |
1290 | */ |
1291 | - vma = hugetlb_vma(addr, walk); |
1292 | + vma = find_vma(walk->mm, addr); |
1293 | if (vma) { |
1294 | - if (vma->vm_end < next) |
1295 | + /* |
1296 | + * There are no page structures backing a VM_PFNMAP |
1297 | + * range, so do not allow split_huge_page_pmd(). |
1298 | + */ |
1299 | + if ((vma->vm_start <= addr) && |
1300 | + (vma->vm_flags & VM_PFNMAP)) { |
1301 | next = vma->vm_end; |
1302 | + pgd = pgd_offset(walk->mm, next); |
1303 | + continue; |
1304 | + } |
1305 | /* |
1306 | - * Hugepage is very tightly coupled with vma, so |
1307 | - * walk through hugetlb entries within a given vma. |
1308 | + * Handle hugetlb vma individually because pagetable |
1309 | + * walk for the hugetlb page is dependent on the |
1310 | + * architecture and we can't handled it in the same |
1311 | + * manner as non-huge pages. |
1312 | */ |
1313 | - err = walk_hugetlb_range(vma, addr, next, walk); |
1314 | - if (err) |
1315 | - break; |
1316 | - pgd = pgd_offset(walk->mm, next); |
1317 | - continue; |
1318 | + if (walk->hugetlb_entry && (vma->vm_start <= addr) && |
1319 | + is_vm_hugetlb_page(vma)) { |
1320 | + if (vma->vm_end < next) |
1321 | + next = vma->vm_end; |
1322 | + /* |
1323 | + * Hugepage is very tightly coupled with vma, |
1324 | + * so walk through hugetlb entries within a |
1325 | + * given vma. |
1326 | + */ |
1327 | + err = walk_hugetlb_range(vma, addr, next, walk); |
1328 | + if (err) |
1329 | + break; |
1330 | + pgd = pgd_offset(walk->mm, next); |
1331 | + continue; |
1332 | + } |
1333 | } |
1334 | |
1335 | if (pgd_none_or_clear_bad(pgd)) { |
1336 | diff --git a/net/mac80211/iface.c b/net/mac80211/iface.c |
1337 | index 48f937e..95a04f0 100644 |
1338 | --- a/net/mac80211/iface.c |
1339 | +++ b/net/mac80211/iface.c |
1340 | @@ -1257,6 +1257,15 @@ void ieee80211_remove_interfaces(struct ieee80211_local *local) |
1341 | |
1342 | ASSERT_RTNL(); |
1343 | |
1344 | + /* |
1345 | + * Close all AP_VLAN interfaces first, as otherwise they |
1346 | + * might be closed while the AP interface they belong to |
1347 | + * is closed, causing unregister_netdevice_many() to crash. |
1348 | + */ |
1349 | + list_for_each_entry(sdata, &local->interfaces, list) |
1350 | + if (sdata->vif.type == NL80211_IFTYPE_AP_VLAN) |
1351 | + dev_close(sdata->dev); |
1352 | + |
1353 | mutex_lock(&local->iflist_mtx); |
1354 | list_for_each_entry_safe(sdata, tmp, &local->interfaces, list) { |
1355 | list_del(&sdata->list); |
1356 | diff --git a/tools/perf/scripts/python/net_dropmonitor.py b/tools/perf/scripts/python/net_dropmonitor.py |
1357 | index a4ffc95..4c11605 100755 |
1358 | --- a/tools/perf/scripts/python/net_dropmonitor.py |
1359 | +++ b/tools/perf/scripts/python/net_dropmonitor.py |
1360 | @@ -40,9 +40,9 @@ def get_kallsyms_table(): |
1361 | |
1362 | def get_sym(sloc): |
1363 | loc = int(sloc) |
1364 | - for i in kallsyms: |
1365 | - if (i['loc'] >= loc): |
1366 | - return (i['name'], i['loc']-loc) |
1367 | + for i in kallsyms[::-1]: |
1368 | + if loc >= i['loc']: |
1369 | + return (i['name'], loc - i['loc']) |
1370 | return (None, 0) |
1371 | |
1372 | def print_drop_table(): |
1373 | @@ -64,7 +64,7 @@ def trace_end(): |
1374 | |
1375 | # called from perf, when it finds a correspoinding event |
1376 | def skb__kfree_skb(name, context, cpu, sec, nsec, pid, comm, |
1377 | - skbaddr, protocol, location): |
1378 | + skbaddr, location, protocol): |
1379 | slocation = str(location) |
1380 | try: |
1381 | drop_log[slocation] = drop_log[slocation] + 1 |