Contents of /trunk/kernel26-magellan-server/patches-2.6.27-r2/0105-2.6.27.6-all-fixes.patch
Parent Directory | Revision Log
Revision 728 -
(show annotations)
(download)
Tue Dec 23 09:41:15 2008 UTC (15 years, 9 months ago) by niro
File size: 71654 byte(s)
Tue Dec 23 09:41:15 2008 UTC (15 years, 9 months ago) by niro
File size: 71654 byte(s)
-ver bump to 2.6.27-r2: - updated to linux-2.6.27.10 - using tuxonice current-20081025 - adjusted kernel-configs to use tuxonice and enabled ufs filesystem support
1 | diff --git a/Documentation/cciss.txt b/Documentation/cciss.txt |
2 | index 8244c64..48d80d9 100644 |
3 | --- a/Documentation/cciss.txt |
4 | +++ b/Documentation/cciss.txt |
5 | @@ -26,6 +26,8 @@ This driver is known to work with the following cards: |
6 | * SA P410i |
7 | * SA P411 |
8 | * SA P812 |
9 | + * SA P712m |
10 | + * SA P711m |
11 | |
12 | Detecting drive failures: |
13 | ------------------------- |
14 | diff --git a/arch/arm/mach-pxa/include/mach/reset.h b/arch/arm/mach-pxa/include/mach/reset.h |
15 | index 9489a48..7b8842c 100644 |
16 | --- a/arch/arm/mach-pxa/include/mach/reset.h |
17 | +++ b/arch/arm/mach-pxa/include/mach/reset.h |
18 | @@ -10,9 +10,12 @@ |
19 | extern unsigned int reset_status; |
20 | extern void clear_reset_status(unsigned int mask); |
21 | |
22 | -/* |
23 | - * register GPIO as reset generator |
24 | +/** |
25 | + * init_gpio_reset() - register GPIO as reset generator |
26 | + * |
27 | + * @gpio - gpio nr |
28 | + * @output - set gpio as out/low instead of input during normal work |
29 | */ |
30 | -extern int init_gpio_reset(int gpio); |
31 | +extern int init_gpio_reset(int gpio, int output); |
32 | |
33 | #endif /* __ASM_ARCH_RESET_H */ |
34 | diff --git a/arch/arm/mach-pxa/reset.c b/arch/arm/mach-pxa/reset.c |
35 | index 9996c61..1b236a6 100644 |
36 | --- a/arch/arm/mach-pxa/reset.c |
37 | +++ b/arch/arm/mach-pxa/reset.c |
38 | @@ -20,7 +20,7 @@ static void do_hw_reset(void); |
39 | |
40 | static int reset_gpio = -1; |
41 | |
42 | -int init_gpio_reset(int gpio) |
43 | +int init_gpio_reset(int gpio, int output) |
44 | { |
45 | int rc; |
46 | |
47 | @@ -30,9 +30,12 @@ int init_gpio_reset(int gpio) |
48 | goto out; |
49 | } |
50 | |
51 | - rc = gpio_direction_input(gpio); |
52 | + if (output) |
53 | + rc = gpio_direction_output(gpio, 0); |
54 | + else |
55 | + rc = gpio_direction_input(gpio); |
56 | if (rc) { |
57 | - printk(KERN_ERR "Can't configure reset_gpio for input\n"); |
58 | + printk(KERN_ERR "Can't configure reset_gpio\n"); |
59 | gpio_free(gpio); |
60 | goto out; |
61 | } |
62 | diff --git a/arch/arm/mach-pxa/spitz.c b/arch/arm/mach-pxa/spitz.c |
63 | index b569f3b..32cee4c 100644 |
64 | --- a/arch/arm/mach-pxa/spitz.c |
65 | +++ b/arch/arm/mach-pxa/spitz.c |
66 | @@ -548,7 +548,7 @@ static void spitz_restart(char mode) |
67 | |
68 | static void __init common_init(void) |
69 | { |
70 | - init_gpio_reset(SPITZ_GPIO_ON_RESET); |
71 | + init_gpio_reset(SPITZ_GPIO_ON_RESET, 1); |
72 | pm_power_off = spitz_poweroff; |
73 | arm_pm_restart = spitz_restart; |
74 | |
75 | diff --git a/arch/arm/mach-pxa/tosa.c b/arch/arm/mach-pxa/tosa.c |
76 | index 9f3ef9e..130e37e 100644 |
77 | --- a/arch/arm/mach-pxa/tosa.c |
78 | +++ b/arch/arm/mach-pxa/tosa.c |
79 | @@ -781,7 +781,7 @@ static void __init tosa_init(void) |
80 | gpio_set_wake(MFP_PIN_GPIO1, 1); |
81 | /* We can't pass to gpio-keys since it will drop the Reset altfunc */ |
82 | |
83 | - init_gpio_reset(TOSA_GPIO_ON_RESET); |
84 | + init_gpio_reset(TOSA_GPIO_ON_RESET, 0); |
85 | |
86 | pm_power_off = tosa_poweroff; |
87 | arm_pm_restart = tosa_restart; |
88 | diff --git a/arch/arm/mm/cache-xsc3l2.c b/arch/arm/mm/cache-xsc3l2.c |
89 | index 158bd96..99ec030 100644 |
90 | --- a/arch/arm/mm/cache-xsc3l2.c |
91 | +++ b/arch/arm/mm/cache-xsc3l2.c |
92 | @@ -97,7 +97,7 @@ static void xsc3_l2_inv_range(unsigned long start, unsigned long end) |
93 | /* |
94 | * Clean and invalidate partial last cache line. |
95 | */ |
96 | - if (end & (CACHE_LINE_SIZE - 1)) { |
97 | + if (start < end && (end & (CACHE_LINE_SIZE - 1))) { |
98 | xsc3_l2_clean_pa(end & ~(CACHE_LINE_SIZE - 1)); |
99 | xsc3_l2_inv_pa(end & ~(CACHE_LINE_SIZE - 1)); |
100 | end &= ~(CACHE_LINE_SIZE - 1); |
101 | @@ -106,7 +106,7 @@ static void xsc3_l2_inv_range(unsigned long start, unsigned long end) |
102 | /* |
103 | * Invalidate all full cache lines between 'start' and 'end'. |
104 | */ |
105 | - while (start != end) { |
106 | + while (start < end) { |
107 | xsc3_l2_inv_pa(start); |
108 | start += CACHE_LINE_SIZE; |
109 | } |
110 | diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig |
111 | index ed92864..552d2b7 100644 |
112 | --- a/arch/x86/Kconfig |
113 | +++ b/arch/x86/Kconfig |
114 | @@ -1059,6 +1059,26 @@ config HIGHPTE |
115 | low memory. Setting this option will put user-space page table |
116 | entries in high memory. |
117 | |
118 | +config X86_RESERVE_LOW_64K |
119 | + bool "Reserve low 64K of RAM on AMI/Phoenix BIOSen" |
120 | + default y |
121 | + help |
122 | + Reserve the first 64K of physical RAM on BIOSes that are known |
123 | + to potentially corrupt that memory range. A numbers of BIOSes are |
124 | + known to utilize this area during suspend/resume, so it must not |
125 | + be used by the kernel. |
126 | + |
127 | + Set this to N if you are absolutely sure that you trust the BIOS |
128 | + to get all its memory reservations and usages right. |
129 | + |
130 | + If you have doubts about the BIOS (e.g. suspend/resume does not |
131 | + work or there's kernel crashes after certain hardware hotplug |
132 | + events) and it's not AMI or Phoenix, then you might want to enable |
133 | + X86_CHECK_BIOS_CORRUPTION=y to allow the kernel to check typical |
134 | + corruption patterns. |
135 | + |
136 | + Say Y if unsure. |
137 | + |
138 | config MATH_EMULATION |
139 | bool |
140 | prompt "Math emulation" if X86_32 |
141 | diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c |
142 | index 9838f25..64b5c42 100644 |
143 | --- a/arch/x86/kernel/setup.c |
144 | +++ b/arch/x86/kernel/setup.c |
145 | @@ -578,6 +578,39 @@ static struct x86_quirks default_x86_quirks __initdata; |
146 | |
147 | struct x86_quirks *x86_quirks __initdata = &default_x86_quirks; |
148 | |
149 | +static int __init dmi_low_memory_corruption(const struct dmi_system_id *d) |
150 | +{ |
151 | + printk(KERN_NOTICE |
152 | + "%s detected: BIOS may corrupt low RAM, working it around.\n", |
153 | + d->ident); |
154 | + |
155 | + e820_update_range(0, 0x10000, E820_RAM, E820_RESERVED); |
156 | + sanitize_e820_map(e820.map, ARRAY_SIZE(e820.map), &e820.nr_map); |
157 | + |
158 | + return 0; |
159 | +} |
160 | + |
161 | +/* List of systems that have known low memory corruption BIOS problems */ |
162 | +static struct dmi_system_id __initdata bad_bios_dmi_table[] = { |
163 | +#ifdef CONFIG_X86_RESERVE_LOW_64K |
164 | + { |
165 | + .callback = dmi_low_memory_corruption, |
166 | + .ident = "AMI BIOS", |
167 | + .matches = { |
168 | + DMI_MATCH(DMI_BIOS_VENDOR, "American Megatrends Inc."), |
169 | + }, |
170 | + }, |
171 | + { |
172 | + .callback = dmi_low_memory_corruption, |
173 | + .ident = "Phoenix BIOS", |
174 | + .matches = { |
175 | + DMI_MATCH(DMI_BIOS_VENDOR, "Phoenix Technologies, LTD"), |
176 | + }, |
177 | + }, |
178 | +#endif |
179 | + {} |
180 | +}; |
181 | + |
182 | /* |
183 | * Determine if we were loaded by an EFI loader. If so, then we have also been |
184 | * passed the efi memmap, systab, etc., so we should use these data structures |
185 | @@ -699,6 +732,10 @@ void __init setup_arch(char **cmdline_p) |
186 | |
187 | finish_e820_parsing(); |
188 | |
189 | + dmi_scan_machine(); |
190 | + |
191 | + dmi_check_system(bad_bios_dmi_table); |
192 | + |
193 | #ifdef CONFIG_X86_32 |
194 | probe_roms(); |
195 | #endif |
196 | @@ -781,8 +818,6 @@ void __init setup_arch(char **cmdline_p) |
197 | vsmp_init(); |
198 | #endif |
199 | |
200 | - dmi_scan_machine(); |
201 | - |
202 | io_delay_init(); |
203 | |
204 | /* |
205 | @@ -885,3 +920,5 @@ void __init setup_arch(char **cmdline_p) |
206 | #endif |
207 | #endif |
208 | } |
209 | + |
210 | + |
211 | diff --git a/arch/x86/kernel/tsc.c b/arch/x86/kernel/tsc.c |
212 | index 8f98e9d..de850e9 100644 |
213 | --- a/arch/x86/kernel/tsc.c |
214 | +++ b/arch/x86/kernel/tsc.c |
215 | @@ -639,10 +639,6 @@ void __init tsc_init(void) |
216 | cpu_khz = calibrate_cpu(); |
217 | #endif |
218 | |
219 | - lpj = ((u64)tsc_khz * 1000); |
220 | - do_div(lpj, HZ); |
221 | - lpj_fine = lpj; |
222 | - |
223 | printk("Detected %lu.%03lu MHz processor.\n", |
224 | (unsigned long)cpu_khz / 1000, |
225 | (unsigned long)cpu_khz % 1000); |
226 | @@ -662,6 +658,10 @@ void __init tsc_init(void) |
227 | /* now allow native_sched_clock() to use rdtsc */ |
228 | tsc_disabled = 0; |
229 | |
230 | + lpj = ((u64)tsc_khz * 1000); |
231 | + do_div(lpj, HZ); |
232 | + lpj_fine = lpj; |
233 | + |
234 | use_tsc_delay(); |
235 | /* Check and install the TSC clocksource */ |
236 | dmi_check_system(bad_tsc_dmi_table); |
237 | diff --git a/drivers/acpi/dock.c b/drivers/acpi/dock.c |
238 | index 7d2edf1..25d2161 100644 |
239 | --- a/drivers/acpi/dock.c |
240 | +++ b/drivers/acpi/dock.c |
241 | @@ -604,14 +604,17 @@ static int handle_eject_request(struct dock_station *ds, u32 event) |
242 | static void dock_notify(acpi_handle handle, u32 event, void *data) |
243 | { |
244 | struct dock_station *ds = data; |
245 | + struct acpi_device *tmp; |
246 | |
247 | switch (event) { |
248 | case ACPI_NOTIFY_BUS_CHECK: |
249 | - if (!dock_in_progress(ds) && dock_present(ds)) { |
250 | + if (!dock_in_progress(ds) && acpi_bus_get_device(ds->handle, |
251 | + &tmp)) { |
252 | begin_dock(ds); |
253 | dock(ds); |
254 | if (!dock_present(ds)) { |
255 | printk(KERN_ERR PREFIX "Unable to dock!\n"); |
256 | + complete_dock(ds); |
257 | break; |
258 | } |
259 | atomic_notifier_call_chain(&dock_notifier_list, |
260 | diff --git a/drivers/ata/libata-eh.c b/drivers/ata/libata-eh.c |
261 | index c1db2f2..2c4ccec 100644 |
262 | --- a/drivers/ata/libata-eh.c |
263 | +++ b/drivers/ata/libata-eh.c |
264 | @@ -604,9 +604,6 @@ void ata_scsi_error(struct Scsi_Host *host) |
265 | if (ata_ncq_enabled(dev)) |
266 | ehc->saved_ncq_enabled |= 1 << devno; |
267 | } |
268 | - |
269 | - /* set last reset timestamp to some time in the past */ |
270 | - ehc->last_reset = jiffies - 60 * HZ; |
271 | } |
272 | |
273 | ap->pflags |= ATA_PFLAG_EH_IN_PROGRESS; |
274 | @@ -2209,17 +2206,21 @@ int ata_eh_reset(struct ata_link *link, int classify, |
275 | if (link->flags & ATA_LFLAG_NO_SRST) |
276 | softreset = NULL; |
277 | |
278 | - now = jiffies; |
279 | - deadline = ata_deadline(ehc->last_reset, ATA_EH_RESET_COOL_DOWN); |
280 | - if (time_before(now, deadline)) |
281 | - schedule_timeout_uninterruptible(deadline - now); |
282 | + /* make sure each reset attemp is at least COOL_DOWN apart */ |
283 | + if (ehc->i.flags & ATA_EHI_DID_RESET) { |
284 | + now = jiffies; |
285 | + WARN_ON(time_after(ehc->last_reset, now)); |
286 | + deadline = ata_deadline(ehc->last_reset, |
287 | + ATA_EH_RESET_COOL_DOWN); |
288 | + if (time_before(now, deadline)) |
289 | + schedule_timeout_uninterruptible(deadline - now); |
290 | + } |
291 | |
292 | spin_lock_irqsave(ap->lock, flags); |
293 | ap->pflags |= ATA_PFLAG_RESETTING; |
294 | spin_unlock_irqrestore(ap->lock, flags); |
295 | |
296 | ata_eh_about_to_do(link, NULL, ATA_EH_RESET); |
297 | - ehc->last_reset = jiffies; |
298 | |
299 | ata_link_for_each_dev(dev, link) { |
300 | /* If we issue an SRST then an ATA drive (not ATAPI) |
301 | @@ -2285,7 +2286,6 @@ int ata_eh_reset(struct ata_link *link, int classify, |
302 | /* |
303 | * Perform reset |
304 | */ |
305 | - ehc->last_reset = jiffies; |
306 | if (ata_is_host_link(link)) |
307 | ata_eh_freeze_port(ap); |
308 | |
309 | @@ -2297,6 +2297,7 @@ int ata_eh_reset(struct ata_link *link, int classify, |
310 | reset == softreset ? "soft" : "hard"); |
311 | |
312 | /* mark that this EH session started with reset */ |
313 | + ehc->last_reset = jiffies; |
314 | if (reset == hardreset) |
315 | ehc->i.flags |= ATA_EHI_DID_HARDRESET; |
316 | else |
317 | @@ -2404,7 +2405,7 @@ int ata_eh_reset(struct ata_link *link, int classify, |
318 | |
319 | /* reset successful, schedule revalidation */ |
320 | ata_eh_done(link, NULL, ATA_EH_RESET); |
321 | - ehc->last_reset = jiffies; |
322 | + ehc->last_reset = jiffies; /* update to completion time */ |
323 | ehc->i.action |= ATA_EH_REVALIDATE; |
324 | |
325 | rc = 0; |
326 | diff --git a/drivers/block/cciss.c b/drivers/block/cciss.c |
327 | index b73116e..2ac91b8 100644 |
328 | --- a/drivers/block/cciss.c |
329 | +++ b/drivers/block/cciss.c |
330 | @@ -96,6 +96,8 @@ static const struct pci_device_id cciss_pci_device_id[] = { |
331 | {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x3245}, |
332 | {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x3247}, |
333 | {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x3249}, |
334 | + {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x324A}, |
335 | + {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x324B}, |
336 | {PCI_VENDOR_ID_HP, PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID, |
337 | PCI_CLASS_STORAGE_RAID << 8, 0xffff << 8, 0}, |
338 | {0,} |
339 | @@ -133,6 +135,8 @@ static struct board_type products[] = { |
340 | {0x3245103C, "Smart Array P410i", &SA5_access}, |
341 | {0x3247103C, "Smart Array P411", &SA5_access}, |
342 | {0x3249103C, "Smart Array P812", &SA5_access}, |
343 | + {0x324A103C, "Smart Array P712m", &SA5_access}, |
344 | + {0x324B103C, "Smart Array P711m", &SA5_access}, |
345 | {0xFFFF103C, "Unknown Smart Array", &SA5_access}, |
346 | }; |
347 | |
348 | @@ -1365,6 +1369,7 @@ static void cciss_add_disk(ctlr_info_t *h, struct gendisk *disk, |
349 | disk->first_minor = drv_index << NWD_SHIFT; |
350 | disk->fops = &cciss_fops; |
351 | disk->private_data = &h->drv[drv_index]; |
352 | + disk->driverfs_dev = &h->pdev->dev; |
353 | |
354 | /* Set up queue information */ |
355 | blk_queue_bounce_limit(disk->queue, h->pdev->dma_mask); |
356 | @@ -3403,7 +3408,8 @@ static int __devinit cciss_init_one(struct pci_dev *pdev, |
357 | int i; |
358 | int j = 0; |
359 | int rc; |
360 | - int dac; |
361 | + int dac, return_code; |
362 | + InquiryData_struct *inq_buff = NULL; |
363 | |
364 | i = alloc_cciss_hba(); |
365 | if (i < 0) |
366 | @@ -3509,6 +3515,25 @@ static int __devinit cciss_init_one(struct pci_dev *pdev, |
367 | /* Turn the interrupts on so we can service requests */ |
368 | hba[i]->access.set_intr_mask(hba[i], CCISS_INTR_ON); |
369 | |
370 | + /* Get the firmware version */ |
371 | + inq_buff = kzalloc(sizeof(InquiryData_struct), GFP_KERNEL); |
372 | + if (inq_buff == NULL) { |
373 | + printk(KERN_ERR "cciss: out of memory\n"); |
374 | + goto clean4; |
375 | + } |
376 | + |
377 | + return_code = sendcmd_withirq(CISS_INQUIRY, i, inq_buff, |
378 | + sizeof(InquiryData_struct), 0, 0 , 0, TYPE_CMD); |
379 | + if (return_code == IO_OK) { |
380 | + hba[i]->firm_ver[0] = inq_buff->data_byte[32]; |
381 | + hba[i]->firm_ver[1] = inq_buff->data_byte[33]; |
382 | + hba[i]->firm_ver[2] = inq_buff->data_byte[34]; |
383 | + hba[i]->firm_ver[3] = inq_buff->data_byte[35]; |
384 | + } else { /* send command failed */ |
385 | + printk(KERN_WARNING "cciss: unable to determine firmware" |
386 | + " version of controller\n"); |
387 | + } |
388 | + |
389 | cciss_procinit(i); |
390 | |
391 | hba[i]->cciss_max_sectors = 2048; |
392 | @@ -3519,6 +3544,7 @@ static int __devinit cciss_init_one(struct pci_dev *pdev, |
393 | return 1; |
394 | |
395 | clean4: |
396 | + kfree(inq_buff); |
397 | #ifdef CONFIG_CISS_SCSI_TAPE |
398 | kfree(hba[i]->scsi_rejects.complete); |
399 | #endif |
400 | diff --git a/drivers/block/cpqarray.c b/drivers/block/cpqarray.c |
401 | index 09c1434..f5d2e54 100644 |
402 | --- a/drivers/block/cpqarray.c |
403 | +++ b/drivers/block/cpqarray.c |
404 | @@ -567,7 +567,12 @@ static int __init cpqarray_init(void) |
405 | num_cntlrs_reg++; |
406 | } |
407 | |
408 | - return(num_cntlrs_reg); |
409 | + if (num_cntlrs_reg) |
410 | + return 0; |
411 | + else { |
412 | + pci_unregister_driver(&cpqarray_pci_driver); |
413 | + return -ENODEV; |
414 | + } |
415 | } |
416 | |
417 | /* Function to find the first free pointer into our hba[] array */ |
418 | diff --git a/drivers/dca/dca-core.c b/drivers/dca/dca-core.c |
419 | index ec249d2..d883e1b 100644 |
420 | --- a/drivers/dca/dca-core.c |
421 | +++ b/drivers/dca/dca-core.c |
422 | @@ -270,6 +270,6 @@ static void __exit dca_exit(void) |
423 | dca_sysfs_exit(); |
424 | } |
425 | |
426 | -module_init(dca_init); |
427 | +subsys_initcall(dca_init); |
428 | module_exit(dca_exit); |
429 | |
430 | diff --git a/drivers/dma/ioat_dma.c b/drivers/dma/ioat_dma.c |
431 | index bc8c6e3..3f4db54 100644 |
432 | --- a/drivers/dma/ioat_dma.c |
433 | +++ b/drivers/dma/ioat_dma.c |
434 | @@ -519,7 +519,7 @@ static dma_cookie_t ioat1_tx_submit(struct dma_async_tx_descriptor *tx) |
435 | } |
436 | |
437 | hw->ctl = IOAT_DMA_DESCRIPTOR_CTL_CP_STS; |
438 | - if (new->async_tx.callback) { |
439 | + if (first->async_tx.callback) { |
440 | hw->ctl |= IOAT_DMA_DESCRIPTOR_CTL_INT_GN; |
441 | if (first != new) { |
442 | /* move callback into to last desc */ |
443 | @@ -611,7 +611,7 @@ static dma_cookie_t ioat2_tx_submit(struct dma_async_tx_descriptor *tx) |
444 | } |
445 | |
446 | hw->ctl |= IOAT_DMA_DESCRIPTOR_CTL_CP_STS; |
447 | - if (new->async_tx.callback) { |
448 | + if (first->async_tx.callback) { |
449 | hw->ctl |= IOAT_DMA_DESCRIPTOR_CTL_INT_GN; |
450 | if (first != new) { |
451 | /* move callback into to last desc */ |
452 | @@ -801,6 +801,12 @@ static void ioat_dma_free_chan_resources(struct dma_chan *chan) |
453 | struct ioat_desc_sw *desc, *_desc; |
454 | int in_use_descs = 0; |
455 | |
456 | + /* Before freeing channel resources first check |
457 | + * if they have been previously allocated for this channel. |
458 | + */ |
459 | + if (ioat_chan->desccount == 0) |
460 | + return; |
461 | + |
462 | tasklet_disable(&ioat_chan->cleanup_task); |
463 | ioat_dma_memcpy_cleanup(ioat_chan); |
464 | |
465 | @@ -863,6 +869,7 @@ static void ioat_dma_free_chan_resources(struct dma_chan *chan) |
466 | ioat_chan->last_completion = ioat_chan->completion_addr = 0; |
467 | ioat_chan->pending = 0; |
468 | ioat_chan->dmacount = 0; |
469 | + ioat_chan->desccount = 0; |
470 | ioat_chan->watchdog_completion = 0; |
471 | ioat_chan->last_compl_desc_addr_hw = 0; |
472 | ioat_chan->watchdog_tcp_cookie = |
473 | diff --git a/drivers/dma/iovlock.c b/drivers/dma/iovlock.c |
474 | index e763d72..9f6fe46 100644 |
475 | --- a/drivers/dma/iovlock.c |
476 | +++ b/drivers/dma/iovlock.c |
477 | @@ -55,7 +55,6 @@ struct dma_pinned_list *dma_pin_iovec_pages(struct iovec *iov, size_t len) |
478 | int nr_iovecs = 0; |
479 | int iovec_len_used = 0; |
480 | int iovec_pages_used = 0; |
481 | - long err; |
482 | |
483 | /* don't pin down non-user-based iovecs */ |
484 | if (segment_eq(get_fs(), KERNEL_DS)) |
485 | @@ -72,23 +71,21 @@ struct dma_pinned_list *dma_pin_iovec_pages(struct iovec *iov, size_t len) |
486 | local_list = kmalloc(sizeof(*local_list) |
487 | + (nr_iovecs * sizeof (struct dma_page_list)) |
488 | + (iovec_pages_used * sizeof (struct page*)), GFP_KERNEL); |
489 | - if (!local_list) { |
490 | - err = -ENOMEM; |
491 | + if (!local_list) |
492 | goto out; |
493 | - } |
494 | |
495 | /* list of pages starts right after the page list array */ |
496 | pages = (struct page **) &local_list->page_list[nr_iovecs]; |
497 | |
498 | + local_list->nr_iovecs = 0; |
499 | + |
500 | for (i = 0; i < nr_iovecs; i++) { |
501 | struct dma_page_list *page_list = &local_list->page_list[i]; |
502 | |
503 | len -= iov[i].iov_len; |
504 | |
505 | - if (!access_ok(VERIFY_WRITE, iov[i].iov_base, iov[i].iov_len)) { |
506 | - err = -EFAULT; |
507 | + if (!access_ok(VERIFY_WRITE, iov[i].iov_base, iov[i].iov_len)) |
508 | goto unpin; |
509 | - } |
510 | |
511 | page_list->nr_pages = num_pages_spanned(&iov[i]); |
512 | page_list->base_address = iov[i].iov_base; |
513 | @@ -109,10 +106,8 @@ struct dma_pinned_list *dma_pin_iovec_pages(struct iovec *iov, size_t len) |
514 | NULL); |
515 | up_read(¤t->mm->mmap_sem); |
516 | |
517 | - if (ret != page_list->nr_pages) { |
518 | - err = -ENOMEM; |
519 | + if (ret != page_list->nr_pages) |
520 | goto unpin; |
521 | - } |
522 | |
523 | local_list->nr_iovecs = i + 1; |
524 | } |
525 | @@ -122,7 +117,7 @@ struct dma_pinned_list *dma_pin_iovec_pages(struct iovec *iov, size_t len) |
526 | unpin: |
527 | dma_unpin_iovec_pages(local_list); |
528 | out: |
529 | - return ERR_PTR(err); |
530 | + return NULL; |
531 | } |
532 | |
533 | void dma_unpin_iovec_pages(struct dma_pinned_list *pinned_list) |
534 | diff --git a/drivers/hid/hidraw.c b/drivers/hid/hidraw.c |
535 | index c40f040..8c030d9 100644 |
536 | --- a/drivers/hid/hidraw.c |
537 | +++ b/drivers/hid/hidraw.c |
538 | @@ -113,7 +113,7 @@ static ssize_t hidraw_write(struct file *file, const char __user *buffer, size_t |
539 | if (!dev->hid_output_raw_report) |
540 | return -ENODEV; |
541 | |
542 | - if (count > HID_MIN_BUFFER_SIZE) { |
543 | + if (count > HID_MAX_BUFFER_SIZE) { |
544 | printk(KERN_WARNING "hidraw: pid %d passed too large report\n", |
545 | task_pid_nr(current)); |
546 | return -EINVAL; |
547 | diff --git a/drivers/md/linear.c b/drivers/md/linear.c |
548 | index b1eebf8..a58a19e 100644 |
549 | --- a/drivers/md/linear.c |
550 | +++ b/drivers/md/linear.c |
551 | @@ -157,6 +157,8 @@ static linear_conf_t *linear_conf(mddev_t *mddev, int raid_disks) |
552 | |
553 | min_spacing = conf->array_sectors / 2; |
554 | sector_div(min_spacing, PAGE_SIZE/sizeof(struct dev_info *)); |
555 | + if (min_spacing == 0) |
556 | + min_spacing = 1; |
557 | |
558 | /* min_spacing is the minimum spacing that will fit the hash |
559 | * table in one PAGE. This may be much smaller than needed. |
560 | diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c |
561 | index e34cd0e..941576d 100644 |
562 | --- a/drivers/md/raid10.c |
563 | +++ b/drivers/md/raid10.c |
564 | @@ -1132,7 +1132,7 @@ static int raid10_add_disk(mddev_t *mddev, mdk_rdev_t *rdev) |
565 | if (!enough(conf)) |
566 | return -EINVAL; |
567 | |
568 | - if (rdev->raid_disk) |
569 | + if (rdev->raid_disk >= 0) |
570 | first = last = rdev->raid_disk; |
571 | |
572 | if (rdev->saved_raid_disk >= 0 && |
573 | diff --git a/drivers/mmc/core/core.c b/drivers/mmc/core/core.c |
574 | index 044d84e..f7284b9 100644 |
575 | --- a/drivers/mmc/core/core.c |
576 | +++ b/drivers/mmc/core/core.c |
577 | @@ -280,7 +280,11 @@ void mmc_set_data_timeout(struct mmc_data *data, const struct mmc_card *card) |
578 | (card->host->ios.clock / 1000); |
579 | |
580 | if (data->flags & MMC_DATA_WRITE) |
581 | - limit_us = 250000; |
582 | + /* |
583 | + * The limit is really 250 ms, but that is |
584 | + * insufficient for some crappy cards. |
585 | + */ |
586 | + limit_us = 300000; |
587 | else |
588 | limit_us = 100000; |
589 | |
590 | diff --git a/drivers/mtd/chips/cfi_cmdset_0002.c b/drivers/mtd/chips/cfi_cmdset_0002.c |
591 | index a972cc6..9e7a236 100644 |
592 | --- a/drivers/mtd/chips/cfi_cmdset_0002.c |
593 | +++ b/drivers/mtd/chips/cfi_cmdset_0002.c |
594 | @@ -362,19 +362,6 @@ struct mtd_info *cfi_cmdset_0002(struct map_info *map, int primary) |
595 | /* Set the default CFI lock/unlock addresses */ |
596 | cfi->addr_unlock1 = 0x555; |
597 | cfi->addr_unlock2 = 0x2aa; |
598 | - /* Modify the unlock address if we are in compatibility mode */ |
599 | - if ( /* x16 in x8 mode */ |
600 | - ((cfi->device_type == CFI_DEVICETYPE_X8) && |
601 | - (cfi->cfiq->InterfaceDesc == |
602 | - CFI_INTERFACE_X8_BY_X16_ASYNC)) || |
603 | - /* x32 in x16 mode */ |
604 | - ((cfi->device_type == CFI_DEVICETYPE_X16) && |
605 | - (cfi->cfiq->InterfaceDesc == |
606 | - CFI_INTERFACE_X16_BY_X32_ASYNC))) |
607 | - { |
608 | - cfi->addr_unlock1 = 0xaaa; |
609 | - cfi->addr_unlock2 = 0x555; |
610 | - } |
611 | |
612 | } /* CFI mode */ |
613 | else if (cfi->cfi_mode == CFI_MODE_JEDEC) { |
614 | diff --git a/drivers/mtd/chips/jedec_probe.c b/drivers/mtd/chips/jedec_probe.c |
615 | index f84ab61..2f3f2f7 100644 |
616 | --- a/drivers/mtd/chips/jedec_probe.c |
617 | +++ b/drivers/mtd/chips/jedec_probe.c |
618 | @@ -1808,9 +1808,7 @@ static inline u32 jedec_read_mfr(struct map_info *map, uint32_t base, |
619 | * several first banks can contain 0x7f instead of actual ID |
620 | */ |
621 | do { |
622 | - uint32_t ofs = cfi_build_cmd_addr(0 + (bank << 8), |
623 | - cfi_interleave(cfi), |
624 | - cfi->device_type); |
625 | + uint32_t ofs = cfi_build_cmd_addr(0 + (bank << 8), map, cfi); |
626 | mask = (1 << (cfi->device_type * 8)) - 1; |
627 | result = map_read(map, base + ofs); |
628 | bank++; |
629 | @@ -1824,7 +1822,7 @@ static inline u32 jedec_read_id(struct map_info *map, uint32_t base, |
630 | { |
631 | map_word result; |
632 | unsigned long mask; |
633 | - u32 ofs = cfi_build_cmd_addr(1, cfi_interleave(cfi), cfi->device_type); |
634 | + u32 ofs = cfi_build_cmd_addr(1, map, cfi); |
635 | mask = (1 << (cfi->device_type * 8)) -1; |
636 | result = map_read(map, base + ofs); |
637 | return result.x[0] & mask; |
638 | @@ -2067,8 +2065,8 @@ static int jedec_probe_chip(struct map_info *map, __u32 base, |
639 | |
640 | } |
641 | /* Ensure the unlock addresses we try stay inside the map */ |
642 | - probe_offset1 = cfi_build_cmd_addr(cfi->addr_unlock1, cfi_interleave(cfi), cfi->device_type); |
643 | - probe_offset2 = cfi_build_cmd_addr(cfi->addr_unlock2, cfi_interleave(cfi), cfi->device_type); |
644 | + probe_offset1 = cfi_build_cmd_addr(cfi->addr_unlock1, map, cfi); |
645 | + probe_offset2 = cfi_build_cmd_addr(cfi->addr_unlock2, map, cfi); |
646 | if ( ((base + probe_offset1 + map_bankwidth(map)) >= map->size) || |
647 | ((base + probe_offset2 + map_bankwidth(map)) >= map->size)) |
648 | goto retry; |
649 | diff --git a/drivers/net/r8169.c b/drivers/net/r8169.c |
650 | index 0f6f974..39c17bb 100644 |
651 | --- a/drivers/net/r8169.c |
652 | +++ b/drivers/net/r8169.c |
653 | @@ -370,8 +370,9 @@ struct ring_info { |
654 | }; |
655 | |
656 | enum features { |
657 | - RTL_FEATURE_WOL = (1 << 0), |
658 | - RTL_FEATURE_MSI = (1 << 1), |
659 | + RTL_FEATURE_WOL = (1 << 0), |
660 | + RTL_FEATURE_MSI = (1 << 1), |
661 | + RTL_FEATURE_GMII = (1 << 2), |
662 | }; |
663 | |
664 | struct rtl8169_private { |
665 | @@ -406,13 +407,15 @@ struct rtl8169_private { |
666 | struct vlan_group *vlgrp; |
667 | #endif |
668 | int (*set_speed)(struct net_device *, u8 autoneg, u16 speed, u8 duplex); |
669 | - void (*get_settings)(struct net_device *, struct ethtool_cmd *); |
670 | + int (*get_settings)(struct net_device *, struct ethtool_cmd *); |
671 | void (*phy_reset_enable)(void __iomem *); |
672 | void (*hw_start)(struct net_device *); |
673 | unsigned int (*phy_reset_pending)(void __iomem *); |
674 | unsigned int (*link_ok)(void __iomem *); |
675 | struct delayed_work task; |
676 | unsigned features; |
677 | + |
678 | + struct mii_if_info mii; |
679 | }; |
680 | |
681 | MODULE_AUTHOR("Realtek and the Linux r8169 crew <netdev@vger.kernel.org>"); |
682 | @@ -482,6 +485,23 @@ static int mdio_read(void __iomem *ioaddr, int reg_addr) |
683 | return value; |
684 | } |
685 | |
686 | +static void rtl_mdio_write(struct net_device *dev, int phy_id, int location, |
687 | + int val) |
688 | +{ |
689 | + struct rtl8169_private *tp = netdev_priv(dev); |
690 | + void __iomem *ioaddr = tp->mmio_addr; |
691 | + |
692 | + mdio_write(ioaddr, location, val); |
693 | +} |
694 | + |
695 | +static int rtl_mdio_read(struct net_device *dev, int phy_id, int location) |
696 | +{ |
697 | + struct rtl8169_private *tp = netdev_priv(dev); |
698 | + void __iomem *ioaddr = tp->mmio_addr; |
699 | + |
700 | + return mdio_read(ioaddr, location); |
701 | +} |
702 | + |
703 | static void rtl8169_irq_mask_and_ack(void __iomem *ioaddr) |
704 | { |
705 | RTL_W16(IntrMask, 0x0000); |
706 | @@ -720,9 +740,13 @@ static int rtl8169_set_speed_xmii(struct net_device *dev, |
707 | |
708 | auto_nego |= ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM; |
709 | |
710 | - if ((tp->mac_version == RTL_GIGA_MAC_VER_12) || |
711 | - (tp->mac_version == RTL_GIGA_MAC_VER_17)) { |
712 | - /* Vendor specific (0x1f) and reserved (0x0e) MII registers. */ |
713 | + if ((tp->mac_version == RTL_GIGA_MAC_VER_11) || |
714 | + (tp->mac_version == RTL_GIGA_MAC_VER_12) || |
715 | + (tp->mac_version >= RTL_GIGA_MAC_VER_17)) { |
716 | + /* |
717 | + * Wake up the PHY. |
718 | + * Vendor specific (0x1f) and reserved (0x0e) MII registers. |
719 | + */ |
720 | mdio_write(ioaddr, 0x1f, 0x0000); |
721 | mdio_write(ioaddr, 0x0e, 0x0000); |
722 | } |
723 | @@ -850,7 +874,7 @@ static int rtl8169_rx_vlan_skb(struct rtl8169_private *tp, struct RxDesc *desc, |
724 | |
725 | #endif |
726 | |
727 | -static void rtl8169_gset_tbi(struct net_device *dev, struct ethtool_cmd *cmd) |
728 | +static int rtl8169_gset_tbi(struct net_device *dev, struct ethtool_cmd *cmd) |
729 | { |
730 | struct rtl8169_private *tp = netdev_priv(dev); |
731 | void __iomem *ioaddr = tp->mmio_addr; |
732 | @@ -867,65 +891,29 @@ static void rtl8169_gset_tbi(struct net_device *dev, struct ethtool_cmd *cmd) |
733 | |
734 | cmd->speed = SPEED_1000; |
735 | cmd->duplex = DUPLEX_FULL; /* Always set */ |
736 | + |
737 | + return 0; |
738 | } |
739 | |
740 | -static void rtl8169_gset_xmii(struct net_device *dev, struct ethtool_cmd *cmd) |
741 | +static int rtl8169_gset_xmii(struct net_device *dev, struct ethtool_cmd *cmd) |
742 | { |
743 | struct rtl8169_private *tp = netdev_priv(dev); |
744 | - void __iomem *ioaddr = tp->mmio_addr; |
745 | - u8 status; |
746 | - |
747 | - cmd->supported = SUPPORTED_10baseT_Half | |
748 | - SUPPORTED_10baseT_Full | |
749 | - SUPPORTED_100baseT_Half | |
750 | - SUPPORTED_100baseT_Full | |
751 | - SUPPORTED_1000baseT_Full | |
752 | - SUPPORTED_Autoneg | |
753 | - SUPPORTED_TP; |
754 | - |
755 | - cmd->autoneg = 1; |
756 | - cmd->advertising = ADVERTISED_TP | ADVERTISED_Autoneg; |
757 | - |
758 | - if (tp->phy_auto_nego_reg & ADVERTISE_10HALF) |
759 | - cmd->advertising |= ADVERTISED_10baseT_Half; |
760 | - if (tp->phy_auto_nego_reg & ADVERTISE_10FULL) |
761 | - cmd->advertising |= ADVERTISED_10baseT_Full; |
762 | - if (tp->phy_auto_nego_reg & ADVERTISE_100HALF) |
763 | - cmd->advertising |= ADVERTISED_100baseT_Half; |
764 | - if (tp->phy_auto_nego_reg & ADVERTISE_100FULL) |
765 | - cmd->advertising |= ADVERTISED_100baseT_Full; |
766 | - if (tp->phy_1000_ctrl_reg & ADVERTISE_1000FULL) |
767 | - cmd->advertising |= ADVERTISED_1000baseT_Full; |
768 | - |
769 | - status = RTL_R8(PHYstatus); |
770 | - |
771 | - if (status & _1000bpsF) |
772 | - cmd->speed = SPEED_1000; |
773 | - else if (status & _100bps) |
774 | - cmd->speed = SPEED_100; |
775 | - else if (status & _10bps) |
776 | - cmd->speed = SPEED_10; |
777 | - |
778 | - if (status & TxFlowCtrl) |
779 | - cmd->advertising |= ADVERTISED_Asym_Pause; |
780 | - if (status & RxFlowCtrl) |
781 | - cmd->advertising |= ADVERTISED_Pause; |
782 | - |
783 | - cmd->duplex = ((status & _1000bpsF) || (status & FullDup)) ? |
784 | - DUPLEX_FULL : DUPLEX_HALF; |
785 | + |
786 | + return mii_ethtool_gset(&tp->mii, cmd); |
787 | } |
788 | |
789 | static int rtl8169_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) |
790 | { |
791 | struct rtl8169_private *tp = netdev_priv(dev); |
792 | unsigned long flags; |
793 | + int rc; |
794 | |
795 | spin_lock_irqsave(&tp->lock, flags); |
796 | |
797 | - tp->get_settings(dev, cmd); |
798 | + rc = tp->get_settings(dev, cmd); |
799 | |
800 | spin_unlock_irqrestore(&tp->lock, flags); |
801 | - return 0; |
802 | + return rc; |
803 | } |
804 | |
805 | static void rtl8169_get_regs(struct net_device *dev, struct ethtool_regs *regs, |
806 | @@ -1513,7 +1501,7 @@ static const struct rtl_cfg_info { |
807 | unsigned int align; |
808 | u16 intr_event; |
809 | u16 napi_event; |
810 | - unsigned msi; |
811 | + unsigned features; |
812 | } rtl_cfg_infos [] = { |
813 | [RTL_CFG_0] = { |
814 | .hw_start = rtl_hw_start_8169, |
815 | @@ -1522,7 +1510,7 @@ static const struct rtl_cfg_info { |
816 | .intr_event = SYSErr | LinkChg | RxOverflow | |
817 | RxFIFOOver | TxErr | TxOK | RxOK | RxErr, |
818 | .napi_event = RxFIFOOver | TxErr | TxOK | RxOK | RxOverflow, |
819 | - .msi = 0 |
820 | + .features = RTL_FEATURE_GMII |
821 | }, |
822 | [RTL_CFG_1] = { |
823 | .hw_start = rtl_hw_start_8168, |
824 | @@ -1531,7 +1519,7 @@ static const struct rtl_cfg_info { |
825 | .intr_event = SYSErr | LinkChg | RxOverflow | |
826 | TxErr | TxOK | RxOK | RxErr, |
827 | .napi_event = TxErr | TxOK | RxOK | RxOverflow, |
828 | - .msi = RTL_FEATURE_MSI |
829 | + .features = RTL_FEATURE_GMII | RTL_FEATURE_MSI |
830 | }, |
831 | [RTL_CFG_2] = { |
832 | .hw_start = rtl_hw_start_8101, |
833 | @@ -1540,7 +1528,7 @@ static const struct rtl_cfg_info { |
834 | .intr_event = SYSErr | LinkChg | RxOverflow | PCSTimeout | |
835 | RxFIFOOver | TxErr | TxOK | RxOK | RxErr, |
836 | .napi_event = RxFIFOOver | TxErr | TxOK | RxOK | RxOverflow, |
837 | - .msi = RTL_FEATURE_MSI |
838 | + .features = RTL_FEATURE_MSI |
839 | } |
840 | }; |
841 | |
842 | @@ -1552,7 +1540,7 @@ static unsigned rtl_try_msi(struct pci_dev *pdev, void __iomem *ioaddr, |
843 | u8 cfg2; |
844 | |
845 | cfg2 = RTL_R8(Config2) & ~MSIEnable; |
846 | - if (cfg->msi) { |
847 | + if (cfg->features & RTL_FEATURE_MSI) { |
848 | if (pci_enable_msi(pdev)) { |
849 | dev_info(&pdev->dev, "no MSI. Back to INTx.\n"); |
850 | } else { |
851 | @@ -1578,6 +1566,7 @@ rtl8169_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) |
852 | const struct rtl_cfg_info *cfg = rtl_cfg_infos + ent->driver_data; |
853 | const unsigned int region = cfg->region; |
854 | struct rtl8169_private *tp; |
855 | + struct mii_if_info *mii; |
856 | struct net_device *dev; |
857 | void __iomem *ioaddr; |
858 | unsigned int i; |
859 | @@ -1602,6 +1591,14 @@ rtl8169_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) |
860 | tp->pci_dev = pdev; |
861 | tp->msg_enable = netif_msg_init(debug.msg_enable, R8169_MSG_DEFAULT); |
862 | |
863 | + mii = &tp->mii; |
864 | + mii->dev = dev; |
865 | + mii->mdio_read = rtl_mdio_read; |
866 | + mii->mdio_write = rtl_mdio_write; |
867 | + mii->phy_id_mask = 0x1f; |
868 | + mii->reg_num_mask = 0x1f; |
869 | + mii->supports_gmii = !!(cfg->features & RTL_FEATURE_GMII); |
870 | + |
871 | /* enable device (incl. PCI PM wakeup and hotplug setup) */ |
872 | rc = pci_enable_device(pdev); |
873 | if (rc < 0) { |
874 | @@ -2099,8 +2096,6 @@ static void rtl_hw_start_8168(struct net_device *dev) |
875 | |
876 | RTL_R8(IntrMask); |
877 | |
878 | - RTL_W32(RxMissed, 0); |
879 | - |
880 | rtl_set_rx_mode(dev); |
881 | |
882 | RTL_W8(ChipCmd, CmdTxEnb | CmdRxEnb); |
883 | @@ -2143,8 +2138,6 @@ static void rtl_hw_start_8101(struct net_device *dev) |
884 | |
885 | RTL_R8(IntrMask); |
886 | |
887 | - RTL_W32(RxMissed, 0); |
888 | - |
889 | rtl_set_rx_mode(dev); |
890 | |
891 | RTL_W8(ChipCmd, CmdTxEnb | CmdRxEnb); |
892 | @@ -2922,6 +2915,17 @@ static int rtl8169_poll(struct napi_struct *napi, int budget) |
893 | return work_done; |
894 | } |
895 | |
896 | +static void rtl8169_rx_missed(struct net_device *dev, void __iomem *ioaddr) |
897 | +{ |
898 | + struct rtl8169_private *tp = netdev_priv(dev); |
899 | + |
900 | + if (tp->mac_version > RTL_GIGA_MAC_VER_06) |
901 | + return; |
902 | + |
903 | + dev->stats.rx_missed_errors += (RTL_R32(RxMissed) & 0xffffff); |
904 | + RTL_W32(RxMissed, 0); |
905 | +} |
906 | + |
907 | static void rtl8169_down(struct net_device *dev) |
908 | { |
909 | struct rtl8169_private *tp = netdev_priv(dev); |
910 | @@ -2939,9 +2943,7 @@ core_down: |
911 | |
912 | rtl8169_asic_down(ioaddr); |
913 | |
914 | - /* Update the error counts. */ |
915 | - dev->stats.rx_missed_errors += RTL_R32(RxMissed); |
916 | - RTL_W32(RxMissed, 0); |
917 | + rtl8169_rx_missed(dev, ioaddr); |
918 | |
919 | spin_unlock_irq(&tp->lock); |
920 | |
921 | @@ -3063,8 +3065,7 @@ static struct net_device_stats *rtl8169_get_stats(struct net_device *dev) |
922 | |
923 | if (netif_running(dev)) { |
924 | spin_lock_irqsave(&tp->lock, flags); |
925 | - dev->stats.rx_missed_errors += RTL_R32(RxMissed); |
926 | - RTL_W32(RxMissed, 0); |
927 | + rtl8169_rx_missed(dev, ioaddr); |
928 | spin_unlock_irqrestore(&tp->lock, flags); |
929 | } |
930 | |
931 | @@ -3089,8 +3090,7 @@ static int rtl8169_suspend(struct pci_dev *pdev, pm_message_t state) |
932 | |
933 | rtl8169_asic_down(ioaddr); |
934 | |
935 | - dev->stats.rx_missed_errors += RTL_R32(RxMissed); |
936 | - RTL_W32(RxMissed, 0); |
937 | + rtl8169_rx_missed(dev, ioaddr); |
938 | |
939 | spin_unlock_irq(&tp->lock); |
940 | |
941 | diff --git a/drivers/net/wireless/iwlwifi/iwl-5000-hw.h b/drivers/net/wireless/iwlwifi/iwl-5000-hw.h |
942 | index 17d4f31..c479ee2 100644 |
943 | --- a/drivers/net/wireless/iwlwifi/iwl-5000-hw.h |
944 | +++ b/drivers/net/wireless/iwlwifi/iwl-5000-hw.h |
945 | @@ -129,6 +129,13 @@ struct iwl5000_shared { |
946 | __le32 padding2; |
947 | } __attribute__ ((packed)); |
948 | |
949 | +/* calibrations defined for 5000 */ |
950 | +/* defines the order in which results should be sent to the runtime uCode */ |
951 | +enum iwl5000_calib { |
952 | + IWL5000_CALIB_LO, |
953 | + IWL5000_CALIB_TX_IQ, |
954 | + IWL5000_CALIB_TX_IQ_PERD, |
955 | +}; |
956 | |
957 | #endif /* __iwl_5000_hw_h__ */ |
958 | |
959 | diff --git a/drivers/net/wireless/iwlwifi/iwl-5000.c b/drivers/net/wireless/iwlwifi/iwl-5000.c |
960 | index b08036a..79ff288 100644 |
961 | --- a/drivers/net/wireless/iwlwifi/iwl-5000.c |
962 | +++ b/drivers/net/wireless/iwlwifi/iwl-5000.c |
963 | @@ -445,48 +445,6 @@ static int iwl5000_send_Xtal_calib(struct iwl_priv *priv) |
964 | sizeof(cal_cmd), &cal_cmd); |
965 | } |
966 | |
967 | -static int iwl5000_send_calib_results(struct iwl_priv *priv) |
968 | -{ |
969 | - int ret = 0; |
970 | - |
971 | - struct iwl_host_cmd hcmd = { |
972 | - .id = REPLY_PHY_CALIBRATION_CMD, |
973 | - .meta.flags = CMD_SIZE_HUGE, |
974 | - }; |
975 | - |
976 | - if (priv->calib_results.lo_res) { |
977 | - hcmd.len = priv->calib_results.lo_res_len; |
978 | - hcmd.data = priv->calib_results.lo_res; |
979 | - ret = iwl_send_cmd_sync(priv, &hcmd); |
980 | - |
981 | - if (ret) |
982 | - goto err; |
983 | - } |
984 | - |
985 | - if (priv->calib_results.tx_iq_res) { |
986 | - hcmd.len = priv->calib_results.tx_iq_res_len; |
987 | - hcmd.data = priv->calib_results.tx_iq_res; |
988 | - ret = iwl_send_cmd_sync(priv, &hcmd); |
989 | - |
990 | - if (ret) |
991 | - goto err; |
992 | - } |
993 | - |
994 | - if (priv->calib_results.tx_iq_perd_res) { |
995 | - hcmd.len = priv->calib_results.tx_iq_perd_res_len; |
996 | - hcmd.data = priv->calib_results.tx_iq_perd_res; |
997 | - ret = iwl_send_cmd_sync(priv, &hcmd); |
998 | - |
999 | - if (ret) |
1000 | - goto err; |
1001 | - } |
1002 | - |
1003 | - return 0; |
1004 | -err: |
1005 | - IWL_ERROR("Error %d\n", ret); |
1006 | - return ret; |
1007 | -} |
1008 | - |
1009 | static int iwl5000_send_calib_cfg(struct iwl_priv *priv) |
1010 | { |
1011 | struct iwl5000_calib_cfg_cmd calib_cfg_cmd; |
1012 | @@ -511,33 +469,30 @@ static void iwl5000_rx_calib_result(struct iwl_priv *priv, |
1013 | struct iwl_rx_packet *pkt = (void *)rxb->skb->data; |
1014 | struct iwl5000_calib_hdr *hdr = (struct iwl5000_calib_hdr *)pkt->u.raw; |
1015 | int len = le32_to_cpu(pkt->len) & FH_RSCSR_FRAME_SIZE_MSK; |
1016 | - |
1017 | - iwl_free_calib_results(priv); |
1018 | + int index; |
1019 | |
1020 | /* reduce the size of the length field itself */ |
1021 | len -= 4; |
1022 | |
1023 | + /* Define the order in which the results will be sent to the runtime |
1024 | + * uCode. iwl_send_calib_results sends them in a row according to their |
1025 | + * index. We sort them here */ |
1026 | switch (hdr->op_code) { |
1027 | case IWL5000_PHY_CALIBRATE_LO_CMD: |
1028 | - priv->calib_results.lo_res = kzalloc(len, GFP_ATOMIC); |
1029 | - priv->calib_results.lo_res_len = len; |
1030 | - memcpy(priv->calib_results.lo_res, pkt->u.raw, len); |
1031 | + index = IWL5000_CALIB_LO; |
1032 | break; |
1033 | case IWL5000_PHY_CALIBRATE_TX_IQ_CMD: |
1034 | - priv->calib_results.tx_iq_res = kzalloc(len, GFP_ATOMIC); |
1035 | - priv->calib_results.tx_iq_res_len = len; |
1036 | - memcpy(priv->calib_results.tx_iq_res, pkt->u.raw, len); |
1037 | + index = IWL5000_CALIB_TX_IQ; |
1038 | break; |
1039 | case IWL5000_PHY_CALIBRATE_TX_IQ_PERD_CMD: |
1040 | - priv->calib_results.tx_iq_perd_res = kzalloc(len, GFP_ATOMIC); |
1041 | - priv->calib_results.tx_iq_perd_res_len = len; |
1042 | - memcpy(priv->calib_results.tx_iq_perd_res, pkt->u.raw, len); |
1043 | + index = IWL5000_CALIB_TX_IQ_PERD; |
1044 | break; |
1045 | default: |
1046 | IWL_ERROR("Unknown calibration notification %d\n", |
1047 | hdr->op_code); |
1048 | return; |
1049 | } |
1050 | + iwl_calib_set(&priv->calib_results[index], pkt->u.raw, len); |
1051 | } |
1052 | |
1053 | static void iwl5000_rx_calib_complete(struct iwl_priv *priv, |
1054 | @@ -832,7 +787,7 @@ static int iwl5000_alive_notify(struct iwl_priv *priv) |
1055 | iwl5000_send_Xtal_calib(priv); |
1056 | |
1057 | if (priv->ucode_type == UCODE_RT) |
1058 | - iwl5000_send_calib_results(priv); |
1059 | + iwl_send_calib_results(priv); |
1060 | |
1061 | return 0; |
1062 | } |
1063 | diff --git a/drivers/net/wireless/iwlwifi/iwl-agn.c b/drivers/net/wireless/iwlwifi/iwl-agn.c |
1064 | index e01f048..72a6743 100644 |
1065 | --- a/drivers/net/wireless/iwlwifi/iwl-agn.c |
1066 | +++ b/drivers/net/wireless/iwlwifi/iwl-agn.c |
1067 | @@ -2090,7 +2090,6 @@ static void iwl_alive_start(struct iwl_priv *priv) |
1068 | iwl4965_error_recovery(priv); |
1069 | |
1070 | iwl_power_update_mode(priv, 1); |
1071 | - ieee80211_notify_mac(priv->hw, IEEE80211_NOTIFY_RE_ASSOC); |
1072 | |
1073 | if (test_and_clear_bit(STATUS_MODE_PENDING, &priv->status)) |
1074 | iwl4965_set_mode(priv, priv->iw_mode); |
1075 | @@ -2342,6 +2341,7 @@ static void iwl_bg_alive_start(struct work_struct *data) |
1076 | mutex_lock(&priv->mutex); |
1077 | iwl_alive_start(priv); |
1078 | mutex_unlock(&priv->mutex); |
1079 | + ieee80211_notify_mac(priv->hw, IEEE80211_NOTIFY_RE_ASSOC); |
1080 | } |
1081 | |
1082 | static void iwl4965_bg_rf_kill(struct work_struct *work) |
1083 | @@ -2486,6 +2486,7 @@ static void iwl4965_post_associate(struct iwl_priv *priv) |
1084 | if (!priv->vif || !priv->is_open) |
1085 | return; |
1086 | |
1087 | + iwl_power_cancel_timeout(priv); |
1088 | iwl_scan_cancel_timeout(priv, 200); |
1089 | |
1090 | conf = ieee80211_get_hw_conf(priv->hw); |
1091 | @@ -2503,8 +2504,7 @@ static void iwl4965_post_associate(struct iwl_priv *priv) |
1092 | |
1093 | priv->staging_rxon.filter_flags |= RXON_FILTER_ASSOC_MSK; |
1094 | |
1095 | - if (priv->current_ht_config.is_ht) |
1096 | - iwl_set_rxon_ht(priv, &priv->current_ht_config); |
1097 | + iwl_set_rxon_ht(priv, &priv->current_ht_config); |
1098 | |
1099 | iwl_set_rxon_chain(priv); |
1100 | priv->staging_rxon.assoc_id = cpu_to_le16(priv->assoc_id); |
1101 | @@ -2550,10 +2550,6 @@ static void iwl4965_post_associate(struct iwl_priv *priv) |
1102 | break; |
1103 | } |
1104 | |
1105 | - /* Enable Rx differential gain and sensitivity calibrations */ |
1106 | - iwl_chain_noise_reset(priv); |
1107 | - priv->start_calib = 1; |
1108 | - |
1109 | if (priv->iw_mode == IEEE80211_IF_TYPE_IBSS) |
1110 | priv->assoc_station_added = 1; |
1111 | |
1112 | @@ -2561,7 +2557,12 @@ static void iwl4965_post_associate(struct iwl_priv *priv) |
1113 | iwl_activate_qos(priv, 0); |
1114 | spin_unlock_irqrestore(&priv->lock, flags); |
1115 | |
1116 | - iwl_power_update_mode(priv, 0); |
1117 | + iwl_power_enable_management(priv); |
1118 | + |
1119 | + /* Enable Rx differential gain and sensitivity calibrations */ |
1120 | + iwl_chain_noise_reset(priv); |
1121 | + priv->start_calib = 1; |
1122 | + |
1123 | /* we have just associated, don't start scan too early */ |
1124 | priv->next_scan_jiffies = jiffies + IWL_DELAY_NEXT_SCAN; |
1125 | } |
1126 | @@ -3212,18 +3213,26 @@ static int iwl4965_mac_hw_scan(struct ieee80211_hw *hw, u8 *ssid, size_t len) |
1127 | goto out_unlock; |
1128 | } |
1129 | |
1130 | - /* we don't schedule scan within next_scan_jiffies period */ |
1131 | + /* We don't schedule scan within next_scan_jiffies period. |
1132 | + * Avoid scanning during possible EAPOL exchange, return |
1133 | + * success immediately. |
1134 | + */ |
1135 | if (priv->next_scan_jiffies && |
1136 | - time_after(priv->next_scan_jiffies, jiffies)) { |
1137 | - rc = -EAGAIN; |
1138 | + time_after(priv->next_scan_jiffies, jiffies)) { |
1139 | + IWL_DEBUG_SCAN("scan rejected: within next scan period\n"); |
1140 | + queue_work(priv->workqueue, &priv->scan_completed); |
1141 | + rc = 0; |
1142 | goto out_unlock; |
1143 | } |
1144 | /* if we just finished scan ask for delay */ |
1145 | - if (priv->last_scan_jiffies && time_after(priv->last_scan_jiffies + |
1146 | - IWL_DELAY_NEXT_SCAN, jiffies)) { |
1147 | - rc = -EAGAIN; |
1148 | + if (iwl_is_associated(priv) && priv->last_scan_jiffies && |
1149 | + time_after(priv->last_scan_jiffies + IWL_DELAY_NEXT_SCAN, jiffies)) { |
1150 | + IWL_DEBUG_SCAN("scan rejected: within previous scan period\n"); |
1151 | + queue_work(priv->workqueue, &priv->scan_completed); |
1152 | + rc = 0; |
1153 | goto out_unlock; |
1154 | } |
1155 | + |
1156 | if (len) { |
1157 | IWL_DEBUG_SCAN("direct scan for %s [%d]\n ", |
1158 | iwl_escape_essid(ssid, len), (int)len); |
1159 | @@ -3546,6 +3555,16 @@ static void iwl4965_mac_reset_tsf(struct ieee80211_hw *hw) |
1160 | /* Per mac80211.h: This is only used in IBSS mode... */ |
1161 | if (priv->iw_mode != IEEE80211_IF_TYPE_IBSS) { |
1162 | |
1163 | + /* switch to CAM during association period. |
1164 | + * the ucode will block any association/authentication |
1165 | + * frome during assiciation period if it can not hear |
1166 | + * the AP because of PM. the timer enable PM back is |
1167 | + * association do not complete |
1168 | + */ |
1169 | + if (priv->hw->conf.channel->flags & (IEEE80211_CHAN_PASSIVE_SCAN | |
1170 | + IEEE80211_CHAN_RADAR)) |
1171 | + iwl_power_disable_management(priv, 3000); |
1172 | + |
1173 | IWL_DEBUG_MAC80211("leave - not in IBSS\n"); |
1174 | mutex_unlock(&priv->mutex); |
1175 | return; |
1176 | @@ -4083,6 +4102,7 @@ static void iwl_setup_deferred_work(struct iwl_priv *priv) |
1177 | /* FIXME : remove when resolved PENDING */ |
1178 | INIT_WORK(&priv->scan_completed, iwl_bg_scan_completed); |
1179 | iwl_setup_scan_deferred_work(priv); |
1180 | + iwl_setup_power_deferred_work(priv); |
1181 | |
1182 | if (priv->cfg->ops->lib->setup_deferred_work) |
1183 | priv->cfg->ops->lib->setup_deferred_work(priv); |
1184 | @@ -4102,6 +4122,7 @@ static void iwl_cancel_deferred_work(struct iwl_priv *priv) |
1185 | |
1186 | cancel_delayed_work_sync(&priv->init_alive_start); |
1187 | cancel_delayed_work(&priv->scan_check); |
1188 | + cancel_delayed_work_sync(&priv->set_power_save); |
1189 | cancel_delayed_work(&priv->alive_start); |
1190 | cancel_work_sync(&priv->beacon_update); |
1191 | del_timer_sync(&priv->statistics_periodic); |
1192 | @@ -4204,13 +4225,13 @@ static int iwl4965_pci_probe(struct pci_dev *pdev, const struct pci_device_id *e |
1193 | |
1194 | pci_set_master(pdev); |
1195 | |
1196 | - err = pci_set_dma_mask(pdev, DMA_64BIT_MASK); |
1197 | + err = pci_set_dma_mask(pdev, DMA_BIT_MASK(36)); |
1198 | if (!err) |
1199 | - err = pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK); |
1200 | + err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(36)); |
1201 | if (err) { |
1202 | - err = pci_set_dma_mask(pdev, DMA_32BIT_MASK); |
1203 | + err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); |
1204 | if (!err) |
1205 | - err = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK); |
1206 | + err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)); |
1207 | /* both attempts failed: */ |
1208 | if (err) { |
1209 | printk(KERN_WARNING "%s: No suitable DMA available.\n", |
1210 | diff --git a/drivers/net/wireless/iwlwifi/iwl-calib.c b/drivers/net/wireless/iwlwifi/iwl-calib.c |
1211 | index ef49440..35fb4a4 100644 |
1212 | --- a/drivers/net/wireless/iwlwifi/iwl-calib.c |
1213 | +++ b/drivers/net/wireless/iwlwifi/iwl-calib.c |
1214 | @@ -66,6 +66,66 @@ |
1215 | #include "iwl-core.h" |
1216 | #include "iwl-calib.h" |
1217 | |
1218 | +/***************************************************************************** |
1219 | + * INIT calibrations framework |
1220 | + *****************************************************************************/ |
1221 | + |
1222 | + int iwl_send_calib_results(struct iwl_priv *priv) |
1223 | +{ |
1224 | + int ret = 0; |
1225 | + int i = 0; |
1226 | + |
1227 | + struct iwl_host_cmd hcmd = { |
1228 | + .id = REPLY_PHY_CALIBRATION_CMD, |
1229 | + .meta.flags = CMD_SIZE_HUGE, |
1230 | + }; |
1231 | + |
1232 | + for (i = 0; i < IWL_CALIB_MAX; i++) |
1233 | + if (priv->calib_results[i].buf) { |
1234 | + hcmd.len = priv->calib_results[i].buf_len; |
1235 | + hcmd.data = priv->calib_results[i].buf; |
1236 | + ret = iwl_send_cmd_sync(priv, &hcmd); |
1237 | + if (ret) |
1238 | + goto err; |
1239 | + } |
1240 | + |
1241 | + return 0; |
1242 | +err: |
1243 | + IWL_ERROR("Error %d iteration %d\n", ret, i); |
1244 | + return ret; |
1245 | +} |
1246 | +EXPORT_SYMBOL(iwl_send_calib_results); |
1247 | + |
1248 | +int iwl_calib_set(struct iwl_calib_result *res, const u8 *buf, int len) |
1249 | +{ |
1250 | + if (res->buf_len != len) { |
1251 | + kfree(res->buf); |
1252 | + res->buf = kzalloc(len, GFP_ATOMIC); |
1253 | + } |
1254 | + if (unlikely(res->buf == NULL)) |
1255 | + return -ENOMEM; |
1256 | + |
1257 | + res->buf_len = len; |
1258 | + memcpy(res->buf, buf, len); |
1259 | + return 0; |
1260 | +} |
1261 | +EXPORT_SYMBOL(iwl_calib_set); |
1262 | + |
1263 | +void iwl_calib_free_results(struct iwl_priv *priv) |
1264 | +{ |
1265 | + int i; |
1266 | + |
1267 | + for (i = 0; i < IWL_CALIB_MAX; i++) { |
1268 | + kfree(priv->calib_results[i].buf); |
1269 | + priv->calib_results[i].buf = NULL; |
1270 | + priv->calib_results[i].buf_len = 0; |
1271 | + } |
1272 | +} |
1273 | + |
1274 | +/***************************************************************************** |
1275 | + * RUNTIME calibrations framework |
1276 | + *****************************************************************************/ |
1277 | + |
1278 | /* "false alarms" are signals that our DSP tries to lock onto, |
1279 | * but then determines that they are either noise, or transmissions |
1280 | * from a distant wireless network (also "noise", really) that get |
1281 | diff --git a/drivers/net/wireless/iwlwifi/iwl-core.c b/drivers/net/wireless/iwlwifi/iwl-core.c |
1282 | index 80f2f84..1383fd1 100644 |
1283 | --- a/drivers/net/wireless/iwlwifi/iwl-core.c |
1284 | +++ b/drivers/net/wireless/iwlwifi/iwl-core.c |
1285 | @@ -646,8 +646,14 @@ void iwl_set_rxon_ht(struct iwl_priv *priv, struct iwl_ht_info *ht_info) |
1286 | struct iwl_rxon_cmd *rxon = &priv->staging_rxon; |
1287 | u32 val; |
1288 | |
1289 | - if (!ht_info->is_ht) |
1290 | + if (!ht_info->is_ht) { |
1291 | + rxon->flags &= ~(RXON_FLG_CHANNEL_MODE_MIXED_MSK | |
1292 | + RXON_FLG_CHANNEL_MODE_PURE_40_MSK | |
1293 | + RXON_FLG_CTRL_CHANNEL_LOC_HI_MSK | |
1294 | + RXON_FLG_FAT_PROT_MSK | |
1295 | + RXON_FLG_HT_PROT_MSK); |
1296 | return; |
1297 | + } |
1298 | |
1299 | /* Set up channel bandwidth: 20 MHz only, or 20/40 mixed if fat ok */ |
1300 | if (iwl_is_fat_tx_allowed(priv, NULL)) |
1301 | @@ -950,22 +956,6 @@ err: |
1302 | } |
1303 | EXPORT_SYMBOL(iwl_init_drv); |
1304 | |
1305 | -void iwl_free_calib_results(struct iwl_priv *priv) |
1306 | -{ |
1307 | - kfree(priv->calib_results.lo_res); |
1308 | - priv->calib_results.lo_res = NULL; |
1309 | - priv->calib_results.lo_res_len = 0; |
1310 | - |
1311 | - kfree(priv->calib_results.tx_iq_res); |
1312 | - priv->calib_results.tx_iq_res = NULL; |
1313 | - priv->calib_results.tx_iq_res_len = 0; |
1314 | - |
1315 | - kfree(priv->calib_results.tx_iq_perd_res); |
1316 | - priv->calib_results.tx_iq_perd_res = NULL; |
1317 | - priv->calib_results.tx_iq_perd_res_len = 0; |
1318 | -} |
1319 | -EXPORT_SYMBOL(iwl_free_calib_results); |
1320 | - |
1321 | int iwl_set_tx_power(struct iwl_priv *priv, s8 tx_power, bool force) |
1322 | { |
1323 | int ret = 0; |
1324 | @@ -993,10 +983,9 @@ int iwl_set_tx_power(struct iwl_priv *priv, s8 tx_power, bool force) |
1325 | } |
1326 | EXPORT_SYMBOL(iwl_set_tx_power); |
1327 | |
1328 | - |
1329 | void iwl_uninit_drv(struct iwl_priv *priv) |
1330 | { |
1331 | - iwl_free_calib_results(priv); |
1332 | + iwl_calib_free_results(priv); |
1333 | iwlcore_free_geos(priv); |
1334 | iwl_free_channel_map(priv); |
1335 | kfree(priv->scan); |
1336 | diff --git a/drivers/net/wireless/iwlwifi/iwl-core.h b/drivers/net/wireless/iwlwifi/iwl-core.h |
1337 | index 64f139e..51b36b1 100644 |
1338 | --- a/drivers/net/wireless/iwlwifi/iwl-core.h |
1339 | +++ b/drivers/net/wireless/iwlwifi/iwl-core.h |
1340 | @@ -186,7 +186,6 @@ struct ieee80211_hw *iwl_alloc_all(struct iwl_cfg *cfg, |
1341 | void iwl_hw_detect(struct iwl_priv *priv); |
1342 | |
1343 | void iwl_clear_stations_table(struct iwl_priv *priv); |
1344 | -void iwl_free_calib_results(struct iwl_priv *priv); |
1345 | void iwl_reset_qos(struct iwl_priv *priv); |
1346 | void iwl_set_rxon_chain(struct iwl_priv *priv); |
1347 | int iwl_set_rxon_channel(struct iwl_priv *priv, |
1348 | @@ -291,6 +290,13 @@ int iwl_scan_initiate(struct iwl_priv *priv); |
1349 | void iwl_setup_rx_scan_handlers(struct iwl_priv *priv); |
1350 | void iwl_setup_scan_deferred_work(struct iwl_priv *priv); |
1351 | |
1352 | +/******************************************************************************* |
1353 | + * Calibrations - implemented in iwl-calib.c |
1354 | + ******************************************************************************/ |
1355 | +int iwl_send_calib_results(struct iwl_priv *priv); |
1356 | +int iwl_calib_set(struct iwl_calib_result *res, const u8 *buf, int len); |
1357 | +void iwl_calib_free_results(struct iwl_priv *priv); |
1358 | + |
1359 | /***************************************************** |
1360 | * S e n d i n g H o s t C o m m a n d s * |
1361 | *****************************************************/ |
1362 | diff --git a/drivers/net/wireless/iwlwifi/iwl-dev.h b/drivers/net/wireless/iwlwifi/iwl-dev.h |
1363 | index cdfb343..09bdf8e 100644 |
1364 | --- a/drivers/net/wireless/iwlwifi/iwl-dev.h |
1365 | +++ b/drivers/net/wireless/iwlwifi/iwl-dev.h |
1366 | @@ -745,13 +745,10 @@ struct statistics_general_data { |
1367 | u32 beacon_energy_c; |
1368 | }; |
1369 | |
1370 | -struct iwl_calib_results { |
1371 | - void *tx_iq_res; |
1372 | - void *tx_iq_perd_res; |
1373 | - void *lo_res; |
1374 | - u32 tx_iq_res_len; |
1375 | - u32 tx_iq_perd_res_len; |
1376 | - u32 lo_res_len; |
1377 | +/* Opaque calibration results */ |
1378 | +struct iwl_calib_result { |
1379 | + void *buf; |
1380 | + size_t buf_len; |
1381 | }; |
1382 | |
1383 | enum ucode_type { |
1384 | @@ -813,6 +810,7 @@ enum { |
1385 | |
1386 | |
1387 | #define IWL_MAX_NUM_QUEUES 20 /* FIXME: do dynamic allocation */ |
1388 | +#define IWL_CALIB_MAX 3 |
1389 | |
1390 | struct iwl_priv { |
1391 | |
1392 | @@ -857,7 +855,7 @@ struct iwl_priv { |
1393 | s32 last_temperature; |
1394 | |
1395 | /* init calibration results */ |
1396 | - struct iwl_calib_results calib_results; |
1397 | + struct iwl_calib_result calib_results[IWL_CALIB_MAX]; |
1398 | |
1399 | /* Scan related variables */ |
1400 | unsigned long last_scan_jiffies; |
1401 | @@ -1047,6 +1045,7 @@ struct iwl_priv { |
1402 | |
1403 | struct tasklet_struct irq_tasklet; |
1404 | |
1405 | + struct delayed_work set_power_save; |
1406 | struct delayed_work init_alive_start; |
1407 | struct delayed_work alive_start; |
1408 | struct delayed_work scan_check; |
1409 | diff --git a/drivers/net/wireless/iwlwifi/iwl-power.c b/drivers/net/wireless/iwlwifi/iwl-power.c |
1410 | index a099c9e..ae60bfd 100644 |
1411 | --- a/drivers/net/wireless/iwlwifi/iwl-power.c |
1412 | +++ b/drivers/net/wireless/iwlwifi/iwl-power.c |
1413 | @@ -324,7 +324,7 @@ EXPORT_SYMBOL(iwl_power_update_mode); |
1414 | * this will be usefull for rate scale to disable PM during heavy |
1415 | * Tx/Rx activities |
1416 | */ |
1417 | -int iwl_power_disable_management(struct iwl_priv *priv) |
1418 | +int iwl_power_disable_management(struct iwl_priv *priv, u32 ms) |
1419 | { |
1420 | u16 prev_mode; |
1421 | int ret = 0; |
1422 | @@ -337,6 +337,11 @@ int iwl_power_disable_management(struct iwl_priv *priv) |
1423 | ret = iwl_power_update_mode(priv, 0); |
1424 | priv->power_data.power_disabled = 1; |
1425 | priv->power_data.user_power_setting = prev_mode; |
1426 | + cancel_delayed_work(&priv->set_power_save); |
1427 | + if (ms) |
1428 | + queue_delayed_work(priv->workqueue, &priv->set_power_save, |
1429 | + msecs_to_jiffies(ms)); |
1430 | + |
1431 | |
1432 | return ret; |
1433 | } |
1434 | @@ -431,3 +436,35 @@ int iwl_power_temperature_change(struct iwl_priv *priv) |
1435 | return ret; |
1436 | } |
1437 | EXPORT_SYMBOL(iwl_power_temperature_change); |
1438 | + |
1439 | +static void iwl_bg_set_power_save(struct work_struct *work) |
1440 | +{ |
1441 | + struct iwl_priv *priv = container_of(work, |
1442 | + struct iwl_priv, set_power_save.work); |
1443 | + IWL_DEBUG(IWL_DL_STATE, "update power\n"); |
1444 | + |
1445 | + if (test_bit(STATUS_EXIT_PENDING, &priv->status)) |
1446 | + return; |
1447 | + |
1448 | + mutex_lock(&priv->mutex); |
1449 | + |
1450 | + /* on starting association we disable power managment |
1451 | + * until association, if association failed then this |
1452 | + * timer will expire and enable PM again. |
1453 | + */ |
1454 | + if (!iwl_is_associated(priv)) |
1455 | + iwl_power_enable_management(priv); |
1456 | + |
1457 | + mutex_unlock(&priv->mutex); |
1458 | +} |
1459 | +void iwl_setup_power_deferred_work(struct iwl_priv *priv) |
1460 | +{ |
1461 | + INIT_DELAYED_WORK(&priv->set_power_save, iwl_bg_set_power_save); |
1462 | +} |
1463 | +EXPORT_SYMBOL(iwl_setup_power_deferred_work); |
1464 | + |
1465 | +void iwl_power_cancel_timeout(struct iwl_priv *priv) |
1466 | +{ |
1467 | + cancel_delayed_work(&priv->set_power_save); |
1468 | +} |
1469 | +EXPORT_SYMBOL(iwl_power_cancel_timeout); |
1470 | diff --git a/drivers/net/wireless/iwlwifi/iwl-power.h b/drivers/net/wireless/iwlwifi/iwl-power.h |
1471 | index abcbbf9..aa99f36 100644 |
1472 | --- a/drivers/net/wireless/iwlwifi/iwl-power.h |
1473 | +++ b/drivers/net/wireless/iwlwifi/iwl-power.h |
1474 | @@ -78,8 +78,10 @@ struct iwl_power_mgr { |
1475 | u8 power_disabled; /* flag to disable using power saving level */ |
1476 | }; |
1477 | |
1478 | +void iwl_setup_power_deferred_work(struct iwl_priv *priv); |
1479 | +void iwl_power_cancel_timeout(struct iwl_priv *priv); |
1480 | int iwl_power_update_mode(struct iwl_priv *priv, u8 refresh); |
1481 | -int iwl_power_disable_management(struct iwl_priv *priv); |
1482 | +int iwl_power_disable_management(struct iwl_priv *priv, u32 ms); |
1483 | int iwl_power_enable_management(struct iwl_priv *priv); |
1484 | int iwl_power_set_user_mode(struct iwl_priv *priv, u16 mode); |
1485 | int iwl_power_set_system_mode(struct iwl_priv *priv, u16 mode); |
1486 | diff --git a/drivers/net/wireless/iwlwifi/iwl-scan.c b/drivers/net/wireless/iwlwifi/iwl-scan.c |
1487 | index 6c8ac3a..3a90a67 100644 |
1488 | --- a/drivers/net/wireless/iwlwifi/iwl-scan.c |
1489 | +++ b/drivers/net/wireless/iwlwifi/iwl-scan.c |
1490 | @@ -464,11 +464,6 @@ void iwl_init_scan_params(struct iwl_priv *priv) |
1491 | |
1492 | int iwl_scan_initiate(struct iwl_priv *priv) |
1493 | { |
1494 | - if (priv->iw_mode == IEEE80211_IF_TYPE_AP) { |
1495 | - IWL_ERROR("APs don't scan.\n"); |
1496 | - return 0; |
1497 | - } |
1498 | - |
1499 | if (!iwl_is_ready_rf(priv)) { |
1500 | IWL_DEBUG_SCAN("Aborting scan due to not ready.\n"); |
1501 | return -EIO; |
1502 | @@ -480,8 +475,7 @@ int iwl_scan_initiate(struct iwl_priv *priv) |
1503 | } |
1504 | |
1505 | if (test_bit(STATUS_SCAN_ABORTING, &priv->status)) { |
1506 | - IWL_DEBUG_SCAN("Scan request while abort pending. " |
1507 | - "Queuing.\n"); |
1508 | + IWL_DEBUG_SCAN("Scan request while abort pending\n"); |
1509 | return -EAGAIN; |
1510 | } |
1511 | |
1512 | diff --git a/drivers/net/wireless/iwlwifi/iwl3945-base.c b/drivers/net/wireless/iwlwifi/iwl3945-base.c |
1513 | index b775d5b..752e7f8 100644 |
1514 | --- a/drivers/net/wireless/iwlwifi/iwl3945-base.c |
1515 | +++ b/drivers/net/wireless/iwlwifi/iwl3945-base.c |
1516 | @@ -5761,7 +5761,6 @@ static void iwl3945_alive_start(struct iwl3945_priv *priv) |
1517 | if (priv->error_recovering) |
1518 | iwl3945_error_recovery(priv); |
1519 | |
1520 | - ieee80211_notify_mac(priv->hw, IEEE80211_NOTIFY_RE_ASSOC); |
1521 | return; |
1522 | |
1523 | restart: |
1524 | @@ -6006,6 +6005,7 @@ static void iwl3945_bg_alive_start(struct work_struct *data) |
1525 | mutex_lock(&priv->mutex); |
1526 | iwl3945_alive_start(priv); |
1527 | mutex_unlock(&priv->mutex); |
1528 | + ieee80211_notify_mac(priv->hw, IEEE80211_NOTIFY_RE_ASSOC); |
1529 | } |
1530 | |
1531 | static void iwl3945_bg_rf_kill(struct work_struct *work) |
1532 | @@ -6259,6 +6259,11 @@ static void iwl3945_bg_request_scan(struct work_struct *data) |
1533 | direct_mask, |
1534 | (void *)&scan->data[le16_to_cpu(scan->tx_cmd.len)]); |
1535 | |
1536 | + if (scan->channel_count == 0) { |
1537 | + IWL_DEBUG_SCAN("channel count %d\n", scan->channel_count); |
1538 | + goto done; |
1539 | + } |
1540 | + |
1541 | cmd.len += le16_to_cpu(scan->tx_cmd.len) + |
1542 | scan->channel_count * sizeof(struct iwl3945_scan_channel); |
1543 | cmd.data = scan; |
1544 | diff --git a/drivers/net/wireless/zd1211rw/zd_usb.c b/drivers/net/wireless/zd1211rw/zd_usb.c |
1545 | index a60ae86..a3ccd8c 100644 |
1546 | --- a/drivers/net/wireless/zd1211rw/zd_usb.c |
1547 | +++ b/drivers/net/wireless/zd1211rw/zd_usb.c |
1548 | @@ -61,6 +61,7 @@ static struct usb_device_id usb_ids[] = { |
1549 | { USB_DEVICE(0x0105, 0x145f), .driver_info = DEVICE_ZD1211 }, |
1550 | /* ZD1211B */ |
1551 | { USB_DEVICE(0x0ace, 0x1215), .driver_info = DEVICE_ZD1211B }, |
1552 | + { USB_DEVICE(0x0ace, 0xb215), .driver_info = DEVICE_ZD1211B }, |
1553 | { USB_DEVICE(0x157e, 0x300d), .driver_info = DEVICE_ZD1211B }, |
1554 | { USB_DEVICE(0x079b, 0x0062), .driver_info = DEVICE_ZD1211B }, |
1555 | { USB_DEVICE(0x1582, 0x6003), .driver_info = DEVICE_ZD1211B }, |
1556 | @@ -82,6 +83,7 @@ static struct usb_device_id usb_ids[] = { |
1557 | { USB_DEVICE(0x0cde, 0x001a), .driver_info = DEVICE_ZD1211B }, |
1558 | { USB_DEVICE(0x0586, 0x340a), .driver_info = DEVICE_ZD1211B }, |
1559 | { USB_DEVICE(0x0471, 0x1237), .driver_info = DEVICE_ZD1211B }, |
1560 | + { USB_DEVICE(0x07fa, 0x1196), .driver_info = DEVICE_ZD1211B }, |
1561 | /* "Driverless" devices that need ejecting */ |
1562 | { USB_DEVICE(0x0ace, 0x2011), .driver_info = DEVICE_INSTALLER }, |
1563 | { USB_DEVICE(0x0ace, 0x20ff), .driver_info = DEVICE_INSTALLER }, |
1564 | diff --git a/fs/ext3/super.c b/fs/ext3/super.c |
1565 | index f38a5af..810bf7c 100644 |
1566 | --- a/fs/ext3/super.c |
1567 | +++ b/fs/ext3/super.c |
1568 | @@ -2365,13 +2365,12 @@ static void ext3_write_super (struct super_block * sb) |
1569 | |
1570 | static int ext3_sync_fs(struct super_block *sb, int wait) |
1571 | { |
1572 | - tid_t target; |
1573 | - |
1574 | sb->s_dirt = 0; |
1575 | - if (journal_start_commit(EXT3_SB(sb)->s_journal, &target)) { |
1576 | - if (wait) |
1577 | - log_wait_commit(EXT3_SB(sb)->s_journal, target); |
1578 | - } |
1579 | + if (wait) |
1580 | + ext3_force_commit(sb); |
1581 | + else |
1582 | + journal_start_commit(EXT3_SB(sb)->s_journal, NULL); |
1583 | + |
1584 | return 0; |
1585 | } |
1586 | |
1587 | diff --git a/fs/hfs/catalog.c b/fs/hfs/catalog.c |
1588 | index ba85157..6d98f11 100644 |
1589 | --- a/fs/hfs/catalog.c |
1590 | +++ b/fs/hfs/catalog.c |
1591 | @@ -190,6 +190,10 @@ int hfs_cat_find_brec(struct super_block *sb, u32 cnid, |
1592 | |
1593 | fd->search_key->cat.ParID = rec.thread.ParID; |
1594 | len = fd->search_key->cat.CName.len = rec.thread.CName.len; |
1595 | + if (len > HFS_NAMELEN) { |
1596 | + printk(KERN_ERR "hfs: bad catalog namelength\n"); |
1597 | + return -EIO; |
1598 | + } |
1599 | memcpy(fd->search_key->cat.CName.name, rec.thread.CName.name, len); |
1600 | return hfs_brec_find(fd); |
1601 | } |
1602 | diff --git a/fs/jffs2/background.c b/fs/jffs2/background.c |
1603 | index 8adebd3..0fd792b 100644 |
1604 | --- a/fs/jffs2/background.c |
1605 | +++ b/fs/jffs2/background.c |
1606 | @@ -85,15 +85,15 @@ static int jffs2_garbage_collect_thread(void *_c) |
1607 | for (;;) { |
1608 | allow_signal(SIGHUP); |
1609 | again: |
1610 | + spin_lock(&c->erase_completion_lock); |
1611 | if (!jffs2_thread_should_wake(c)) { |
1612 | set_current_state (TASK_INTERRUPTIBLE); |
1613 | + spin_unlock(&c->erase_completion_lock); |
1614 | D1(printk(KERN_DEBUG "jffs2_garbage_collect_thread sleeping...\n")); |
1615 | - /* Yes, there's a race here; we checked jffs2_thread_should_wake() |
1616 | - before setting current->state to TASK_INTERRUPTIBLE. But it doesn't |
1617 | - matter - We don't care if we miss a wakeup, because the GC thread |
1618 | - is only an optimisation anyway. */ |
1619 | schedule(); |
1620 | - } |
1621 | + } else |
1622 | + spin_unlock(&c->erase_completion_lock); |
1623 | + |
1624 | |
1625 | /* This thread is purely an optimisation. But if it runs when |
1626 | other things could be running, it actually makes things a |
1627 | diff --git a/fs/jffs2/compr_lzo.c b/fs/jffs2/compr_lzo.c |
1628 | index 47b0457..90cb60d 100644 |
1629 | --- a/fs/jffs2/compr_lzo.c |
1630 | +++ b/fs/jffs2/compr_lzo.c |
1631 | @@ -19,7 +19,7 @@ |
1632 | |
1633 | static void *lzo_mem; |
1634 | static void *lzo_compress_buf; |
1635 | -static DEFINE_MUTEX(deflate_mutex); |
1636 | +static DEFINE_MUTEX(deflate_mutex); /* for lzo_mem and lzo_compress_buf */ |
1637 | |
1638 | static void free_workspace(void) |
1639 | { |
1640 | @@ -49,18 +49,21 @@ static int jffs2_lzo_compress(unsigned char *data_in, unsigned char *cpage_out, |
1641 | |
1642 | mutex_lock(&deflate_mutex); |
1643 | ret = lzo1x_1_compress(data_in, *sourcelen, lzo_compress_buf, &compress_size, lzo_mem); |
1644 | - mutex_unlock(&deflate_mutex); |
1645 | - |
1646 | if (ret != LZO_E_OK) |
1647 | - return -1; |
1648 | + goto fail; |
1649 | |
1650 | if (compress_size > *dstlen) |
1651 | - return -1; |
1652 | + goto fail; |
1653 | |
1654 | memcpy(cpage_out, lzo_compress_buf, compress_size); |
1655 | - *dstlen = compress_size; |
1656 | + mutex_unlock(&deflate_mutex); |
1657 | |
1658 | + *dstlen = compress_size; |
1659 | return 0; |
1660 | + |
1661 | + fail: |
1662 | + mutex_unlock(&deflate_mutex); |
1663 | + return -1; |
1664 | } |
1665 | |
1666 | static int jffs2_lzo_decompress(unsigned char *data_in, unsigned char *cpage_out, |
1667 | diff --git a/include/asm-generic/memory_model.h b/include/asm-generic/memory_model.h |
1668 | index ae060c6..18546d8 100644 |
1669 | --- a/include/asm-generic/memory_model.h |
1670 | +++ b/include/asm-generic/memory_model.h |
1671 | @@ -34,7 +34,7 @@ |
1672 | |
1673 | #define __pfn_to_page(pfn) \ |
1674 | ({ unsigned long __pfn = (pfn); \ |
1675 | - unsigned long __nid = arch_pfn_to_nid(pfn); \ |
1676 | + unsigned long __nid = arch_pfn_to_nid(__pfn); \ |
1677 | NODE_DATA(__nid)->node_mem_map + arch_local_page_offset(__pfn, __nid);\ |
1678 | }) |
1679 | |
1680 | diff --git a/include/linux/mtd/cfi.h b/include/linux/mtd/cfi.h |
1681 | index d6fb115..3a16bea 100644 |
1682 | --- a/include/linux/mtd/cfi.h |
1683 | +++ b/include/linux/mtd/cfi.h |
1684 | @@ -281,9 +281,25 @@ struct cfi_private { |
1685 | /* |
1686 | * Returns the command address according to the given geometry. |
1687 | */ |
1688 | -static inline uint32_t cfi_build_cmd_addr(uint32_t cmd_ofs, int interleave, int type) |
1689 | +static inline uint32_t cfi_build_cmd_addr(uint32_t cmd_ofs, |
1690 | + struct map_info *map, struct cfi_private *cfi) |
1691 | { |
1692 | - return (cmd_ofs * type) * interleave; |
1693 | + unsigned bankwidth = map_bankwidth(map); |
1694 | + unsigned interleave = cfi_interleave(cfi); |
1695 | + unsigned type = cfi->device_type; |
1696 | + uint32_t addr; |
1697 | + |
1698 | + addr = (cmd_ofs * type) * interleave; |
1699 | + |
1700 | + /* Modify the unlock address if we are in compatiblity mode. |
1701 | + * For 16bit devices on 8 bit busses |
1702 | + * and 32bit devices on 16 bit busses |
1703 | + * set the low bit of the alternating bit sequence of the address. |
1704 | + */ |
1705 | + if (((type * interleave) > bankwidth) && ((uint8_t)cmd_ofs == 0xaa)) |
1706 | + addr |= (type >> 1)*interleave; |
1707 | + |
1708 | + return addr; |
1709 | } |
1710 | |
1711 | /* |
1712 | @@ -429,7 +445,7 @@ static inline uint32_t cfi_send_gen_cmd(u_char cmd, uint32_t cmd_addr, uint32_t |
1713 | int type, map_word *prev_val) |
1714 | { |
1715 | map_word val; |
1716 | - uint32_t addr = base + cfi_build_cmd_addr(cmd_addr, cfi_interleave(cfi), type); |
1717 | + uint32_t addr = base + cfi_build_cmd_addr(cmd_addr, map, cfi); |
1718 | |
1719 | val = cfi_build_cmd(cmd, map, cfi); |
1720 | |
1721 | diff --git a/include/net/af_unix.h b/include/net/af_unix.h |
1722 | index 7dd29b7..c29ff1d 100644 |
1723 | --- a/include/net/af_unix.h |
1724 | +++ b/include/net/af_unix.h |
1725 | @@ -54,6 +54,7 @@ struct unix_sock { |
1726 | atomic_long_t inflight; |
1727 | spinlock_t lock; |
1728 | unsigned int gc_candidate : 1; |
1729 | + unsigned int gc_maybe_cycle : 1; |
1730 | wait_queue_head_t peer_wait; |
1731 | }; |
1732 | #define unix_sk(__sk) ((struct unix_sock *)__sk) |
1733 | diff --git a/kernel/cgroup.c b/kernel/cgroup.c |
1734 | index a0123d7..d68bf2b 100644 |
1735 | --- a/kernel/cgroup.c |
1736 | +++ b/kernel/cgroup.c |
1737 | @@ -2443,7 +2443,6 @@ static int cgroup_rmdir(struct inode *unused_dir, struct dentry *dentry) |
1738 | list_del(&cgrp->sibling); |
1739 | spin_lock(&cgrp->dentry->d_lock); |
1740 | d = dget(cgrp->dentry); |
1741 | - cgrp->dentry = NULL; |
1742 | spin_unlock(&d->d_lock); |
1743 | |
1744 | cgroup_d_remove_dir(d); |
1745 | diff --git a/mm/hugetlb.c b/mm/hugetlb.c |
1746 | index 67a7119..77427c8 100644 |
1747 | --- a/mm/hugetlb.c |
1748 | +++ b/mm/hugetlb.c |
1749 | @@ -353,11 +353,26 @@ static int vma_has_reserves(struct vm_area_struct *vma) |
1750 | return 0; |
1751 | } |
1752 | |
1753 | +static void clear_gigantic_page(struct page *page, |
1754 | + unsigned long addr, unsigned long sz) |
1755 | +{ |
1756 | + int i; |
1757 | + struct page *p = page; |
1758 | + |
1759 | + might_sleep(); |
1760 | + for (i = 0; i < sz/PAGE_SIZE; i++, p = mem_map_next(p, page, i)) { |
1761 | + cond_resched(); |
1762 | + clear_user_highpage(p, addr + i * PAGE_SIZE); |
1763 | + } |
1764 | +} |
1765 | static void clear_huge_page(struct page *page, |
1766 | unsigned long addr, unsigned long sz) |
1767 | { |
1768 | int i; |
1769 | |
1770 | + if (unlikely(sz > MAX_ORDER_NR_PAGES)) |
1771 | + return clear_gigantic_page(page, addr, sz); |
1772 | + |
1773 | might_sleep(); |
1774 | for (i = 0; i < sz/PAGE_SIZE; i++) { |
1775 | cond_resched(); |
1776 | @@ -365,12 +380,32 @@ static void clear_huge_page(struct page *page, |
1777 | } |
1778 | } |
1779 | |
1780 | +static void copy_gigantic_page(struct page *dst, struct page *src, |
1781 | + unsigned long addr, struct vm_area_struct *vma) |
1782 | +{ |
1783 | + int i; |
1784 | + struct hstate *h = hstate_vma(vma); |
1785 | + struct page *dst_base = dst; |
1786 | + struct page *src_base = src; |
1787 | + might_sleep(); |
1788 | + for (i = 0; i < pages_per_huge_page(h); ) { |
1789 | + cond_resched(); |
1790 | + copy_user_highpage(dst, src, addr + i*PAGE_SIZE, vma); |
1791 | + |
1792 | + i++; |
1793 | + dst = mem_map_next(dst, dst_base, i); |
1794 | + src = mem_map_next(src, src_base, i); |
1795 | + } |
1796 | +} |
1797 | static void copy_huge_page(struct page *dst, struct page *src, |
1798 | unsigned long addr, struct vm_area_struct *vma) |
1799 | { |
1800 | int i; |
1801 | struct hstate *h = hstate_vma(vma); |
1802 | |
1803 | + if (unlikely(pages_per_huge_page(h) > MAX_ORDER_NR_PAGES)) |
1804 | + return copy_gigantic_page(dst, src, addr, vma); |
1805 | + |
1806 | might_sleep(); |
1807 | for (i = 0; i < pages_per_huge_page(h); i++) { |
1808 | cond_resched(); |
1809 | @@ -455,6 +490,8 @@ static void update_and_free_page(struct hstate *h, struct page *page) |
1810 | { |
1811 | int i; |
1812 | |
1813 | + VM_BUG_ON(h->order >= MAX_ORDER); |
1814 | + |
1815 | h->nr_huge_pages--; |
1816 | h->nr_huge_pages_node[page_to_nid(page)]--; |
1817 | for (i = 0; i < pages_per_huge_page(h); i++) { |
1818 | @@ -969,6 +1006,14 @@ found: |
1819 | return 1; |
1820 | } |
1821 | |
1822 | +static void prep_compound_huge_page(struct page *page, int order) |
1823 | +{ |
1824 | + if (unlikely(order > (MAX_ORDER - 1))) |
1825 | + prep_compound_gigantic_page(page, order); |
1826 | + else |
1827 | + prep_compound_page(page, order); |
1828 | +} |
1829 | + |
1830 | /* Put bootmem huge pages into the standard lists after mem_map is up */ |
1831 | static void __init gather_bootmem_prealloc(void) |
1832 | { |
1833 | @@ -979,7 +1024,7 @@ static void __init gather_bootmem_prealloc(void) |
1834 | struct hstate *h = m->hstate; |
1835 | __ClearPageReserved(page); |
1836 | WARN_ON(page_count(page) != 1); |
1837 | - prep_compound_page(page, h->order); |
1838 | + prep_compound_huge_page(page, h->order); |
1839 | prep_new_huge_page(h, page, page_to_nid(page)); |
1840 | } |
1841 | } |
1842 | @@ -2103,7 +2148,7 @@ int follow_hugetlb_page(struct mm_struct *mm, struct vm_area_struct *vma, |
1843 | same_page: |
1844 | if (pages) { |
1845 | get_page(page); |
1846 | - pages[i] = page + pfn_offset; |
1847 | + pages[i] = mem_map_offset(page, pfn_offset); |
1848 | } |
1849 | |
1850 | if (vmas) |
1851 | diff --git a/mm/internal.h b/mm/internal.h |
1852 | index 1f43f74..92729ea 100644 |
1853 | --- a/mm/internal.h |
1854 | +++ b/mm/internal.h |
1855 | @@ -17,6 +17,7 @@ void free_pgtables(struct mmu_gather *tlb, struct vm_area_struct *start_vma, |
1856 | unsigned long floor, unsigned long ceiling); |
1857 | |
1858 | extern void prep_compound_page(struct page *page, unsigned long order); |
1859 | +extern void prep_compound_gigantic_page(struct page *page, unsigned long order); |
1860 | |
1861 | static inline void set_page_count(struct page *page, int v) |
1862 | { |
1863 | @@ -53,6 +54,34 @@ static inline unsigned long page_order(struct page *page) |
1864 | } |
1865 | |
1866 | /* |
1867 | + * Return the mem_map entry representing the 'offset' subpage within |
1868 | + * the maximally aligned gigantic page 'base'. Handle any discontiguity |
1869 | + * in the mem_map at MAX_ORDER_NR_PAGES boundaries. |
1870 | + */ |
1871 | +static inline struct page *mem_map_offset(struct page *base, int offset) |
1872 | +{ |
1873 | + if (unlikely(offset >= MAX_ORDER_NR_PAGES)) |
1874 | + return pfn_to_page(page_to_pfn(base) + offset); |
1875 | + return base + offset; |
1876 | +} |
1877 | + |
1878 | +/* |
1879 | + * Iterator over all subpages withing the maximally aligned gigantic |
1880 | + * page 'base'. Handle any discontiguity in the mem_map. |
1881 | + */ |
1882 | +static inline struct page *mem_map_next(struct page *iter, |
1883 | + struct page *base, int offset) |
1884 | +{ |
1885 | + if (unlikely((offset & (MAX_ORDER_NR_PAGES - 1)) == 0)) { |
1886 | + unsigned long pfn = page_to_pfn(base) + offset; |
1887 | + if (!pfn_valid(pfn)) |
1888 | + return NULL; |
1889 | + return pfn_to_page(pfn); |
1890 | + } |
1891 | + return iter + 1; |
1892 | +} |
1893 | + |
1894 | +/* |
1895 | * FLATMEM and DISCONTIGMEM configurations use alloc_bootmem_node, |
1896 | * so all functions starting at paging_init should be marked __init |
1897 | * in those cases. SPARSEMEM, however, allows for memory hotplug, |
1898 | diff --git a/mm/page_alloc.c b/mm/page_alloc.c |
1899 | index 27b8681..ed5cdae 100644 |
1900 | --- a/mm/page_alloc.c |
1901 | +++ b/mm/page_alloc.c |
1902 | @@ -268,24 +268,39 @@ void prep_compound_page(struct page *page, unsigned long order) |
1903 | { |
1904 | int i; |
1905 | int nr_pages = 1 << order; |
1906 | + |
1907 | + set_compound_page_dtor(page, free_compound_page); |
1908 | + set_compound_order(page, order); |
1909 | + __SetPageHead(page); |
1910 | + for (i = 1; i < nr_pages; i++) { |
1911 | + struct page *p = page + i; |
1912 | + |
1913 | + __SetPageTail(p); |
1914 | + p->first_page = page; |
1915 | + } |
1916 | +} |
1917 | + |
1918 | +#ifdef CONFIG_HUGETLBFS |
1919 | +void prep_compound_gigantic_page(struct page *page, unsigned long order) |
1920 | +{ |
1921 | + int i; |
1922 | + int nr_pages = 1 << order; |
1923 | struct page *p = page + 1; |
1924 | |
1925 | set_compound_page_dtor(page, free_compound_page); |
1926 | set_compound_order(page, order); |
1927 | __SetPageHead(page); |
1928 | - for (i = 1; i < nr_pages; i++, p++) { |
1929 | - if (unlikely((i & (MAX_ORDER_NR_PAGES - 1)) == 0)) |
1930 | - p = pfn_to_page(page_to_pfn(page) + i); |
1931 | + for (i = 1; i < nr_pages; i++, p = mem_map_next(p, page, i)) { |
1932 | __SetPageTail(p); |
1933 | p->first_page = page; |
1934 | } |
1935 | } |
1936 | +#endif |
1937 | |
1938 | static void destroy_compound_page(struct page *page, unsigned long order) |
1939 | { |
1940 | int i; |
1941 | int nr_pages = 1 << order; |
1942 | - struct page *p = page + 1; |
1943 | |
1944 | if (unlikely(compound_order(page) != order)) |
1945 | bad_page(page); |
1946 | @@ -293,9 +308,8 @@ static void destroy_compound_page(struct page *page, unsigned long order) |
1947 | if (unlikely(!PageHead(page))) |
1948 | bad_page(page); |
1949 | __ClearPageHead(page); |
1950 | - for (i = 1; i < nr_pages; i++, p++) { |
1951 | - if (unlikely((i & (MAX_ORDER_NR_PAGES - 1)) == 0)) |
1952 | - p = pfn_to_page(page_to_pfn(page) + i); |
1953 | + for (i = 1; i < nr_pages; i++) { |
1954 | + struct page *p = page + i; |
1955 | |
1956 | if (unlikely(!PageTail(p) | |
1957 | (p->first_page != page))) |
1958 | diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c |
1959 | index 015606b..8bde9bf 100644 |
1960 | --- a/net/unix/af_unix.c |
1961 | +++ b/net/unix/af_unix.c |
1962 | @@ -1300,14 +1300,23 @@ static void unix_destruct_fds(struct sk_buff *skb) |
1963 | sock_wfree(skb); |
1964 | } |
1965 | |
1966 | -static void unix_attach_fds(struct scm_cookie *scm, struct sk_buff *skb) |
1967 | +static int unix_attach_fds(struct scm_cookie *scm, struct sk_buff *skb) |
1968 | { |
1969 | int i; |
1970 | + |
1971 | + /* |
1972 | + * Need to duplicate file references for the sake of garbage |
1973 | + * collection. Otherwise a socket in the fps might become a |
1974 | + * candidate for GC while the skb is not yet queued. |
1975 | + */ |
1976 | + UNIXCB(skb).fp = scm_fp_dup(scm->fp); |
1977 | + if (!UNIXCB(skb).fp) |
1978 | + return -ENOMEM; |
1979 | + |
1980 | for (i=scm->fp->count-1; i>=0; i--) |
1981 | unix_inflight(scm->fp->fp[i]); |
1982 | - UNIXCB(skb).fp = scm->fp; |
1983 | skb->destructor = unix_destruct_fds; |
1984 | - scm->fp = NULL; |
1985 | + return 0; |
1986 | } |
1987 | |
1988 | /* |
1989 | @@ -1366,8 +1375,11 @@ static int unix_dgram_sendmsg(struct kiocb *kiocb, struct socket *sock, |
1990 | goto out; |
1991 | |
1992 | memcpy(UNIXCREDS(skb), &siocb->scm->creds, sizeof(struct ucred)); |
1993 | - if (siocb->scm->fp) |
1994 | - unix_attach_fds(siocb->scm, skb); |
1995 | + if (siocb->scm->fp) { |
1996 | + err = unix_attach_fds(siocb->scm, skb); |
1997 | + if (err) |
1998 | + goto out_free; |
1999 | + } |
2000 | unix_get_secdata(siocb->scm, skb); |
2001 | |
2002 | skb_reset_transport_header(skb); |
2003 | @@ -1536,8 +1548,13 @@ static int unix_stream_sendmsg(struct kiocb *kiocb, struct socket *sock, |
2004 | size = min_t(int, size, skb_tailroom(skb)); |
2005 | |
2006 | memcpy(UNIXCREDS(skb), &siocb->scm->creds, sizeof(struct ucred)); |
2007 | - if (siocb->scm->fp) |
2008 | - unix_attach_fds(siocb->scm, skb); |
2009 | + if (siocb->scm->fp) { |
2010 | + err = unix_attach_fds(siocb->scm, skb); |
2011 | + if (err) { |
2012 | + kfree_skb(skb); |
2013 | + goto out_err; |
2014 | + } |
2015 | + } |
2016 | |
2017 | if ((err = memcpy_fromiovec(skb_put(skb,size), msg->msg_iov, size)) != 0) { |
2018 | kfree_skb(skb); |
2019 | diff --git a/net/unix/garbage.c b/net/unix/garbage.c |
2020 | index 2a27b84..6d4a9a8 100644 |
2021 | --- a/net/unix/garbage.c |
2022 | +++ b/net/unix/garbage.c |
2023 | @@ -186,8 +186,17 @@ static void scan_inflight(struct sock *x, void (*func)(struct unix_sock *), |
2024 | */ |
2025 | struct sock *sk = unix_get_socket(*fp++); |
2026 | if (sk) { |
2027 | - hit = true; |
2028 | - func(unix_sk(sk)); |
2029 | + struct unix_sock *u = unix_sk(sk); |
2030 | + |
2031 | + /* |
2032 | + * Ignore non-candidates, they could |
2033 | + * have been added to the queues after |
2034 | + * starting the garbage collection |
2035 | + */ |
2036 | + if (u->gc_candidate) { |
2037 | + hit = true; |
2038 | + func(u); |
2039 | + } |
2040 | } |
2041 | } |
2042 | if (hit && hitlist != NULL) { |
2043 | @@ -249,11 +258,11 @@ static void inc_inflight_move_tail(struct unix_sock *u) |
2044 | { |
2045 | atomic_long_inc(&u->inflight); |
2046 | /* |
2047 | - * If this is still a candidate, move it to the end of the |
2048 | - * list, so that it's checked even if it was already passed |
2049 | - * over |
2050 | + * If this still might be part of a cycle, move it to the end |
2051 | + * of the list, so that it's checked even if it was already |
2052 | + * passed over |
2053 | */ |
2054 | - if (u->gc_candidate) |
2055 | + if (u->gc_maybe_cycle) |
2056 | list_move_tail(&u->link, &gc_candidates); |
2057 | } |
2058 | |
2059 | @@ -267,6 +276,7 @@ void unix_gc(void) |
2060 | struct unix_sock *next; |
2061 | struct sk_buff_head hitlist; |
2062 | struct list_head cursor; |
2063 | + LIST_HEAD(not_cycle_list); |
2064 | |
2065 | spin_lock(&unix_gc_lock); |
2066 | |
2067 | @@ -282,10 +292,14 @@ void unix_gc(void) |
2068 | * |
2069 | * Holding unix_gc_lock will protect these candidates from |
2070 | * being detached, and hence from gaining an external |
2071 | - * reference. This also means, that since there are no |
2072 | - * possible receivers, the receive queues of these sockets are |
2073 | - * static during the GC, even though the dequeue is done |
2074 | - * before the detach without atomicity guarantees. |
2075 | + * reference. Since there are no possible receivers, all |
2076 | + * buffers currently on the candidates' queues stay there |
2077 | + * during the garbage collection. |
2078 | + * |
2079 | + * We also know that no new candidate can be added onto the |
2080 | + * receive queues. Other, non candidate sockets _can_ be |
2081 | + * added to queue, so we must make sure only to touch |
2082 | + * candidates. |
2083 | */ |
2084 | list_for_each_entry_safe(u, next, &gc_inflight_list, link) { |
2085 | long total_refs; |
2086 | @@ -299,6 +313,7 @@ void unix_gc(void) |
2087 | if (total_refs == inflight_refs) { |
2088 | list_move_tail(&u->link, &gc_candidates); |
2089 | u->gc_candidate = 1; |
2090 | + u->gc_maybe_cycle = 1; |
2091 | } |
2092 | } |
2093 | |
2094 | @@ -325,14 +340,24 @@ void unix_gc(void) |
2095 | list_move(&cursor, &u->link); |
2096 | |
2097 | if (atomic_long_read(&u->inflight) > 0) { |
2098 | - list_move_tail(&u->link, &gc_inflight_list); |
2099 | - u->gc_candidate = 0; |
2100 | + list_move_tail(&u->link, ¬_cycle_list); |
2101 | + u->gc_maybe_cycle = 0; |
2102 | scan_children(&u->sk, inc_inflight_move_tail, NULL); |
2103 | } |
2104 | } |
2105 | list_del(&cursor); |
2106 | |
2107 | /* |
2108 | + * not_cycle_list contains those sockets which do not make up a |
2109 | + * cycle. Restore these to the inflight list. |
2110 | + */ |
2111 | + while (!list_empty(¬_cycle_list)) { |
2112 | + u = list_entry(not_cycle_list.next, struct unix_sock, link); |
2113 | + u->gc_candidate = 0; |
2114 | + list_move_tail(&u->link, &gc_inflight_list); |
2115 | + } |
2116 | + |
2117 | + /* |
2118 | * Now gc_candidates contains only garbage. Restore original |
2119 | * inflight counters for these as well, and remove the skbuffs |
2120 | * which are creating the cycle(s). |
2121 | diff --git a/security/keys/internal.h b/security/keys/internal.h |
2122 | index b39f5c2..239098f 100644 |
2123 | --- a/security/keys/internal.h |
2124 | +++ b/security/keys/internal.h |
2125 | @@ -107,6 +107,7 @@ extern key_ref_t search_process_keyrings(struct key_type *type, |
2126 | |
2127 | extern struct key *find_keyring_by_name(const char *name, bool skip_perm_check); |
2128 | |
2129 | +extern int install_user_keyrings(struct task_struct *tsk); |
2130 | extern int install_thread_keyring(struct task_struct *tsk); |
2131 | extern int install_process_keyring(struct task_struct *tsk); |
2132 | |
2133 | diff --git a/security/keys/process_keys.c b/security/keys/process_keys.c |
2134 | index 5be6d01..45b240a 100644 |
2135 | --- a/security/keys/process_keys.c |
2136 | +++ b/security/keys/process_keys.c |
2137 | @@ -40,7 +40,7 @@ struct key_user root_key_user = { |
2138 | /* |
2139 | * install user and user session keyrings for a particular UID |
2140 | */ |
2141 | -static int install_user_keyrings(struct task_struct *tsk) |
2142 | +int install_user_keyrings(struct task_struct *tsk) |
2143 | { |
2144 | struct user_struct *user = tsk->user; |
2145 | struct key *uid_keyring, *session_keyring; |
2146 | diff --git a/security/keys/request_key.c b/security/keys/request_key.c |
2147 | index ba32ca6..abea08f 100644 |
2148 | --- a/security/keys/request_key.c |
2149 | +++ b/security/keys/request_key.c |
2150 | @@ -74,6 +74,10 @@ static int call_sbin_request_key(struct key_construction *cons, |
2151 | |
2152 | kenter("{%d},{%d},%s", key->serial, authkey->serial, op); |
2153 | |
2154 | + ret = install_user_keyrings(tsk); |
2155 | + if (ret < 0) |
2156 | + goto error_alloc; |
2157 | + |
2158 | /* allocate a new session keyring */ |
2159 | sprintf(desc, "_req.%u", key->serial); |
2160 | |
2161 | diff --git a/sound/pci/hda/patch_sigmatel.c b/sound/pci/hda/patch_sigmatel.c |
2162 | index f3da621..732ce13 100644 |
2163 | --- a/sound/pci/hda/patch_sigmatel.c |
2164 | +++ b/sound/pci/hda/patch_sigmatel.c |
2165 | @@ -67,6 +67,7 @@ enum { |
2166 | enum { |
2167 | STAC_92HD73XX_REF, |
2168 | STAC_DELL_M6, |
2169 | + STAC_DELL_EQ, |
2170 | STAC_92HD73XX_MODELS |
2171 | }; |
2172 | |
2173 | @@ -560,9 +561,7 @@ static struct hda_verb dell_eq_core_init[] = { |
2174 | }; |
2175 | |
2176 | static struct hda_verb dell_m6_core_init[] = { |
2177 | - /* set master volume to max value without distortion |
2178 | - * and direct control */ |
2179 | - { 0x1f, AC_VERB_SET_VOLUME_KNOB_CONTROL, 0xec}, |
2180 | + { 0x1f, AC_VERB_SET_VOLUME_KNOB_CONTROL, 0xff}, |
2181 | /* setup audio connections */ |
2182 | { 0x0d, AC_VERB_SET_CONNECT_SEL, 0x00}, |
2183 | { 0x0a, AC_VERB_SET_CONNECT_SEL, 0x01}, |
2184 | @@ -1297,11 +1296,13 @@ static unsigned int dell_m6_pin_configs[13] = { |
2185 | static unsigned int *stac92hd73xx_brd_tbl[STAC_92HD73XX_MODELS] = { |
2186 | [STAC_92HD73XX_REF] = ref92hd73xx_pin_configs, |
2187 | [STAC_DELL_M6] = dell_m6_pin_configs, |
2188 | + [STAC_DELL_EQ] = dell_m6_pin_configs, |
2189 | }; |
2190 | |
2191 | static const char *stac92hd73xx_models[STAC_92HD73XX_MODELS] = { |
2192 | [STAC_92HD73XX_REF] = "ref", |
2193 | [STAC_DELL_M6] = "dell-m6", |
2194 | + [STAC_DELL_EQ] = "dell-eq", |
2195 | }; |
2196 | |
2197 | static struct snd_pci_quirk stac92hd73xx_cfg_tbl[] = { |
2198 | @@ -3560,8 +3561,12 @@ again: |
2199 | spec->gpio_data = 0x01; |
2200 | |
2201 | switch (spec->board_config) { |
2202 | - case STAC_DELL_M6: |
2203 | + case STAC_DELL_EQ: |
2204 | spec->init = dell_eq_core_init; |
2205 | + /* fallthru */ |
2206 | + case STAC_DELL_M6: |
2207 | + if (!spec->init) |
2208 | + spec->init = dell_m6_core_init; |
2209 | switch (codec->subsystem_id) { |
2210 | case 0x1028025e: /* Analog Mics */ |
2211 | case 0x1028025f: |
2212 | @@ -3570,8 +3575,6 @@ again: |
2213 | break; |
2214 | case 0x10280271: /* Digital Mics */ |
2215 | case 0x10280272: |
2216 | - spec->init = dell_m6_core_init; |
2217 | - /* fall-through */ |
2218 | case 0x10280254: |
2219 | case 0x10280255: |
2220 | stac92xx_set_config_reg(codec, 0x13, 0x90A60160); |