Contents of /trunk/kernel26-alx/patches-2.6.27-r3/0103-2.6.27.4-all-fixes.patch
Parent Directory | Revision Log
Revision 1176 -
(show annotations)
(download)
Thu Oct 14 15:11:06 2010 UTC (13 years, 11 months ago) by niro
File size: 27734 byte(s)
Thu Oct 14 15:11:06 2010 UTC (13 years, 11 months ago) by niro
File size: 27734 byte(s)
-2.6.27-alx-r3: new magellan 0.5.2 kernel
1 | diff --git a/arch/x86/kernel/acpi/sleep.c b/arch/x86/kernel/acpi/sleep.c |
2 | index 426e5d9..c44cd6d 100644 |
3 | --- a/arch/x86/kernel/acpi/sleep.c |
4 | +++ b/arch/x86/kernel/acpi/sleep.c |
5 | @@ -10,6 +10,7 @@ |
6 | #include <linux/dmi.h> |
7 | #include <linux/cpumask.h> |
8 | #include <asm/segment.h> |
9 | +#include <asm/desc.h> |
10 | |
11 | #include "realmode/wakeup.h" |
12 | #include "sleep.h" |
13 | @@ -98,6 +99,8 @@ int acpi_save_state_mem(void) |
14 | header->trampoline_segment = setup_trampoline() >> 4; |
15 | #ifdef CONFIG_SMP |
16 | stack_start.sp = temp_stack + 4096; |
17 | + early_gdt_descr.address = |
18 | + (unsigned long)get_cpu_gdt_table(smp_processor_id()); |
19 | #endif |
20 | initial_code = (unsigned long)wakeup_long64; |
21 | saved_magic = 0x123456789abcdef0; |
22 | diff --git a/arch/x86/kernel/amd_iommu_init.c b/arch/x86/kernel/amd_iommu_init.c |
23 | index a69cc0f..bccd0ef 100644 |
24 | --- a/arch/x86/kernel/amd_iommu_init.c |
25 | +++ b/arch/x86/kernel/amd_iommu_init.c |
26 | @@ -210,7 +210,7 @@ static void __init iommu_set_exclusion_range(struct amd_iommu *iommu) |
27 | /* Programs the physical address of the device table into the IOMMU hardware */ |
28 | static void __init iommu_set_device_table(struct amd_iommu *iommu) |
29 | { |
30 | - u32 entry; |
31 | + u64 entry; |
32 | |
33 | BUG_ON(iommu->mmio_base == NULL); |
34 | |
35 | diff --git a/drivers/acpi/hardware/hwsleep.c b/drivers/acpi/hardware/hwsleep.c |
36 | index dba3cfb..ecb6ace 100644 |
37 | --- a/drivers/acpi/hardware/hwsleep.c |
38 | +++ b/drivers/acpi/hardware/hwsleep.c |
39 | @@ -78,19 +78,17 @@ acpi_set_firmware_waking_vector(acpi_physical_address physical_address) |
40 | return_ACPI_STATUS(status); |
41 | } |
42 | |
43 | - /* Set the vector */ |
44 | + /* |
45 | + * According to the ACPI specification 2.0c and later, the 64-bit |
46 | + * waking vector should be cleared and the 32-bit waking vector should |
47 | + * be used, unless we want the wake-up code to be called by the BIOS in |
48 | + * Protected Mode. Some systems (for example HP dv5-1004nr) are known |
49 | + * to fail to resume if the 64-bit vector is used. |
50 | + */ |
51 | + if (facs->version >= 1) |
52 | + facs->xfirmware_waking_vector = 0; |
53 | |
54 | - if ((facs->length < 32) || (!(facs->xfirmware_waking_vector))) { |
55 | - /* |
56 | - * ACPI 1.0 FACS or short table or optional X_ field is zero |
57 | - */ |
58 | - facs->firmware_waking_vector = (u32) physical_address; |
59 | - } else { |
60 | - /* |
61 | - * ACPI 2.0 FACS with valid X_ field |
62 | - */ |
63 | - facs->xfirmware_waking_vector = physical_address; |
64 | - } |
65 | + facs->firmware_waking_vector = (u32)physical_address; |
66 | |
67 | return_ACPI_STATUS(AE_OK); |
68 | } |
69 | @@ -134,20 +132,7 @@ acpi_get_firmware_waking_vector(acpi_physical_address * physical_address) |
70 | } |
71 | |
72 | /* Get the vector */ |
73 | - |
74 | - if ((facs->length < 32) || (!(facs->xfirmware_waking_vector))) { |
75 | - /* |
76 | - * ACPI 1.0 FACS or short table or optional X_ field is zero |
77 | - */ |
78 | - *physical_address = |
79 | - (acpi_physical_address) facs->firmware_waking_vector; |
80 | - } else { |
81 | - /* |
82 | - * ACPI 2.0 FACS with valid X_ field |
83 | - */ |
84 | - *physical_address = |
85 | - (acpi_physical_address) facs->xfirmware_waking_vector; |
86 | - } |
87 | + *physical_address = (acpi_physical_address)facs->firmware_waking_vector; |
88 | |
89 | return_ACPI_STATUS(AE_OK); |
90 | } |
91 | diff --git a/drivers/acpi/sleep/main.c b/drivers/acpi/sleep/main.c |
92 | index d13194a..4751909 100644 |
93 | --- a/drivers/acpi/sleep/main.c |
94 | +++ b/drivers/acpi/sleep/main.c |
95 | @@ -200,6 +200,8 @@ static int acpi_suspend_enter(suspend_state_t pm_state) |
96 | break; |
97 | } |
98 | |
99 | + /* If ACPI is not enabled by the BIOS, we need to enable it here. */ |
100 | + acpi_enable(); |
101 | /* Reprogram control registers and execute _BFS */ |
102 | acpi_leave_sleep_state_prep(acpi_state); |
103 | |
104 | @@ -296,6 +298,14 @@ static struct dmi_system_id __initdata acpisleep_dmi_table[] = { |
105 | DMI_MATCH(DMI_BOARD_NAME, "KN9 Series(NF-CK804)"), |
106 | }, |
107 | }, |
108 | + { |
109 | + .callback = init_old_suspend_ordering, |
110 | + .ident = "HP xw4600 Workstation", |
111 | + .matches = { |
112 | + DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"), |
113 | + DMI_MATCH(DMI_PRODUCT_NAME, "HP xw4600 Workstation"), |
114 | + }, |
115 | + }, |
116 | {}, |
117 | }; |
118 | #endif /* CONFIG_SUSPEND */ |
119 | diff --git a/drivers/char/hvc_console.c b/drivers/char/hvc_console.c |
120 | index fd64137..f2e4caf 100644 |
121 | --- a/drivers/char/hvc_console.c |
122 | +++ b/drivers/char/hvc_console.c |
123 | @@ -367,13 +367,13 @@ static void hvc_close(struct tty_struct *tty, struct file * filp) |
124 | spin_lock_irqsave(&hp->lock, flags); |
125 | |
126 | if (--hp->count == 0) { |
127 | - if (hp->ops->notifier_del) |
128 | - hp->ops->notifier_del(hp, hp->data); |
129 | - |
130 | /* We are done with the tty pointer now. */ |
131 | hp->tty = NULL; |
132 | spin_unlock_irqrestore(&hp->lock, flags); |
133 | |
134 | + if (hp->ops->notifier_del) |
135 | + hp->ops->notifier_del(hp, hp->data); |
136 | + |
137 | /* |
138 | * Chain calls chars_in_buffer() and returns immediately if |
139 | * there is no buffered data otherwise sleeps on a wait queue |
140 | @@ -416,11 +416,11 @@ static void hvc_hangup(struct tty_struct *tty) |
141 | hp->n_outbuf = 0; |
142 | hp->tty = NULL; |
143 | |
144 | + spin_unlock_irqrestore(&hp->lock, flags); |
145 | + |
146 | if (hp->ops->notifier_del) |
147 | hp->ops->notifier_del(hp, hp->data); |
148 | |
149 | - spin_unlock_irqrestore(&hp->lock, flags); |
150 | - |
151 | while(temp_open_count) { |
152 | --temp_open_count; |
153 | kref_put(&hp->kref, destroy_hvc_struct); |
154 | diff --git a/drivers/edac/cell_edac.c b/drivers/edac/cell_edac.c |
155 | index 0e024fe..887072f 100644 |
156 | --- a/drivers/edac/cell_edac.c |
157 | +++ b/drivers/edac/cell_edac.c |
158 | @@ -142,7 +142,7 @@ static void __devinit cell_edac_init_csrows(struct mem_ctl_info *mci) |
159 | csrow->nr_pages = (r.end - r.start + 1) >> PAGE_SHIFT; |
160 | csrow->last_page = csrow->first_page + csrow->nr_pages - 1; |
161 | csrow->mtype = MEM_XDR; |
162 | - csrow->edac_mode = EDAC_FLAG_EC | EDAC_FLAG_SECDED; |
163 | + csrow->edac_mode = EDAC_SECDED; |
164 | dev_dbg(mci->dev, |
165 | "Initialized on node %d, chanmask=0x%x," |
166 | " first_page=0x%lx, nr_pages=0x%x\n", |
167 | diff --git a/drivers/gpio/gpiolib.c b/drivers/gpio/gpiolib.c |
168 | index 8d29405..59f6ad8 100644 |
169 | --- a/drivers/gpio/gpiolib.c |
170 | +++ b/drivers/gpio/gpiolib.c |
171 | @@ -1020,7 +1020,7 @@ int gpio_get_value_cansleep(unsigned gpio) |
172 | |
173 | might_sleep_if(extra_checks); |
174 | chip = gpio_to_chip(gpio); |
175 | - return chip->get(chip, gpio - chip->base); |
176 | + return chip->get ? chip->get(chip, gpio - chip->base) : 0; |
177 | } |
178 | EXPORT_SYMBOL_GPL(gpio_get_value_cansleep); |
179 | |
180 | diff --git a/drivers/md/dm-kcopyd.c b/drivers/md/dm-kcopyd.c |
181 | index 996802b..8f15353 100644 |
182 | --- a/drivers/md/dm-kcopyd.c |
183 | +++ b/drivers/md/dm-kcopyd.c |
184 | @@ -268,6 +268,17 @@ static void push(struct list_head *jobs, struct kcopyd_job *job) |
185 | spin_unlock_irqrestore(&kc->job_lock, flags); |
186 | } |
187 | |
188 | + |
189 | +static void push_head(struct list_head *jobs, struct kcopyd_job *job) |
190 | +{ |
191 | + unsigned long flags; |
192 | + struct dm_kcopyd_client *kc = job->kc; |
193 | + |
194 | + spin_lock_irqsave(&kc->job_lock, flags); |
195 | + list_add(&job->list, jobs); |
196 | + spin_unlock_irqrestore(&kc->job_lock, flags); |
197 | +} |
198 | + |
199 | /* |
200 | * These three functions process 1 item from the corresponding |
201 | * job list. |
202 | @@ -398,7 +409,7 @@ static int process_jobs(struct list_head *jobs, struct dm_kcopyd_client *kc, |
203 | * We couldn't service this job ATM, so |
204 | * push this job back onto the list. |
205 | */ |
206 | - push(jobs, job); |
207 | + push_head(jobs, job); |
208 | break; |
209 | } |
210 | |
211 | diff --git a/drivers/md/dm-snap.c b/drivers/md/dm-snap.c |
212 | index 6e5528a..4ed9b7a 100644 |
213 | --- a/drivers/md/dm-snap.c |
214 | +++ b/drivers/md/dm-snap.c |
215 | @@ -824,8 +824,10 @@ static struct bio *put_pending_exception(struct dm_snap_pending_exception *pe) |
216 | * the bios for the original write to the origin. |
217 | */ |
218 | if (primary_pe && |
219 | - atomic_dec_and_test(&primary_pe->ref_count)) |
220 | + atomic_dec_and_test(&primary_pe->ref_count)) { |
221 | origin_bios = bio_list_get(&primary_pe->origin_bios); |
222 | + free_pending_exception(primary_pe); |
223 | + } |
224 | |
225 | /* |
226 | * Free the pe if it's not linked to an origin write or if |
227 | @@ -834,12 +836,6 @@ static struct bio *put_pending_exception(struct dm_snap_pending_exception *pe) |
228 | if (!primary_pe || primary_pe != pe) |
229 | free_pending_exception(pe); |
230 | |
231 | - /* |
232 | - * Free the primary pe if nothing references it. |
233 | - */ |
234 | - if (primary_pe && !atomic_read(&primary_pe->ref_count)) |
235 | - free_pending_exception(primary_pe); |
236 | - |
237 | return origin_bios; |
238 | } |
239 | |
240 | diff --git a/drivers/media/video/pvrusb2/pvrusb2-hdw.c b/drivers/media/video/pvrusb2/pvrusb2-hdw.c |
241 | index f051c6a..7412258 100644 |
242 | --- a/drivers/media/video/pvrusb2/pvrusb2-hdw.c |
243 | +++ b/drivers/media/video/pvrusb2/pvrusb2-hdw.c |
244 | @@ -60,7 +60,6 @@ static struct pvr2_hdw *unit_pointers[PVR_NUM] = {[ 0 ... PVR_NUM-1 ] = NULL}; |
245 | static DEFINE_MUTEX(pvr2_unit_mtx); |
246 | |
247 | static int ctlchg; |
248 | -static int initusbreset = 1; |
249 | static int procreload; |
250 | static int tuner[PVR_NUM] = { [0 ... PVR_NUM-1] = -1 }; |
251 | static int tolerance[PVR_NUM] = { [0 ... PVR_NUM-1] = 0 }; |
252 | @@ -71,8 +70,6 @@ module_param(ctlchg, int, S_IRUGO|S_IWUSR); |
253 | MODULE_PARM_DESC(ctlchg, "0=optimize ctl change 1=always accept new ctl value"); |
254 | module_param(init_pause_msec, int, S_IRUGO|S_IWUSR); |
255 | MODULE_PARM_DESC(init_pause_msec, "hardware initialization settling delay"); |
256 | -module_param(initusbreset, int, S_IRUGO|S_IWUSR); |
257 | -MODULE_PARM_DESC(initusbreset, "Do USB reset device on probe"); |
258 | module_param(procreload, int, S_IRUGO|S_IWUSR); |
259 | MODULE_PARM_DESC(procreload, |
260 | "Attempt init failure recovery with firmware reload"); |
261 | @@ -1698,9 +1695,6 @@ static void pvr2_hdw_setup_low(struct pvr2_hdw *hdw) |
262 | } |
263 | hdw->fw1_state = FW1_STATE_OK; |
264 | |
265 | - if (initusbreset) { |
266 | - pvr2_hdw_device_reset(hdw); |
267 | - } |
268 | if (!pvr2_hdw_dev_ok(hdw)) return; |
269 | |
270 | for (idx = 0; idx < hdw->hdw_desc->client_modules.cnt; idx++) { |
271 | diff --git a/drivers/pci/hotplug/cpqphp_core.c b/drivers/pci/hotplug/cpqphp_core.c |
272 | index 54defec..87a68d2 100644 |
273 | --- a/drivers/pci/hotplug/cpqphp_core.c |
274 | +++ b/drivers/pci/hotplug/cpqphp_core.c |
275 | @@ -435,7 +435,7 @@ static int ctrl_slot_setup(struct controller *ctrl, |
276 | slot->number, ctrl->slot_device_offset, |
277 | slot_number); |
278 | result = pci_hp_register(hotplug_slot, |
279 | - ctrl->pci_dev->subordinate, |
280 | + ctrl->pci_dev->bus, |
281 | slot->device); |
282 | if (result) { |
283 | err("pci_hp_register failed with error %d\n", result); |
284 | diff --git a/drivers/scsi/device_handler/scsi_dh_rdac.c b/drivers/scsi/device_handler/scsi_dh_rdac.c |
285 | index 6e2f130..d576d4c 100644 |
286 | --- a/drivers/scsi/device_handler/scsi_dh_rdac.c |
287 | +++ b/drivers/scsi/device_handler/scsi_dh_rdac.c |
288 | @@ -590,6 +590,8 @@ static const struct scsi_dh_devlist rdac_dev_list[] = { |
289 | {"STK", "OPENstorage D280"}, |
290 | {"SUN", "CSM200_R"}, |
291 | {"SUN", "LCSM100_F"}, |
292 | + {"DELL", "MD3000"}, |
293 | + {"DELL", "MD3000i"}, |
294 | {NULL, NULL}, |
295 | }; |
296 | |
297 | diff --git a/drivers/usb/atm/speedtch.c b/drivers/usb/atm/speedtch.c |
298 | index 76fce44..3e86240 100644 |
299 | --- a/drivers/usb/atm/speedtch.c |
300 | +++ b/drivers/usb/atm/speedtch.c |
301 | @@ -722,6 +722,16 @@ static void speedtch_atm_stop(struct usbatm_data *usbatm, struct atm_dev *atm_de |
302 | flush_scheduled_work(); |
303 | } |
304 | |
305 | +static int speedtch_pre_reset(struct usb_interface *intf) |
306 | +{ |
307 | + return 0; |
308 | +} |
309 | + |
310 | +static int speedtch_post_reset(struct usb_interface *intf) |
311 | +{ |
312 | + return 0; |
313 | +} |
314 | + |
315 | |
316 | /********** |
317 | ** USB ** |
318 | @@ -740,6 +750,8 @@ static struct usb_driver speedtch_usb_driver = { |
319 | .name = speedtch_driver_name, |
320 | .probe = speedtch_usb_probe, |
321 | .disconnect = usbatm_usb_disconnect, |
322 | + .pre_reset = speedtch_pre_reset, |
323 | + .post_reset = speedtch_post_reset, |
324 | .id_table = speedtch_usb_ids |
325 | }; |
326 | |
327 | diff --git a/drivers/usb/class/cdc-acm.c b/drivers/usb/class/cdc-acm.c |
328 | index c257453..d996a61 100644 |
329 | --- a/drivers/usb/class/cdc-acm.c |
330 | +++ b/drivers/usb/class/cdc-acm.c |
331 | @@ -849,9 +849,10 @@ static void acm_write_buffers_free(struct acm *acm) |
332 | { |
333 | int i; |
334 | struct acm_wb *wb; |
335 | + struct usb_device *usb_dev = interface_to_usbdev(acm->control); |
336 | |
337 | for (wb = &acm->wb[0], i = 0; i < ACM_NW; i++, wb++) { |
338 | - usb_buffer_free(acm->dev, acm->writesize, wb->buf, wb->dmah); |
339 | + usb_buffer_free(usb_dev, acm->writesize, wb->buf, wb->dmah); |
340 | } |
341 | } |
342 | |
343 | diff --git a/drivers/usb/class/cdc-wdm.c b/drivers/usb/class/cdc-wdm.c |
344 | index 7e8e123..023a4e9 100644 |
345 | --- a/drivers/usb/class/cdc-wdm.c |
346 | +++ b/drivers/usb/class/cdc-wdm.c |
347 | @@ -42,6 +42,8 @@ static struct usb_device_id wdm_ids[] = { |
348 | { } |
349 | }; |
350 | |
351 | +MODULE_DEVICE_TABLE (usb, wdm_ids); |
352 | + |
353 | #define WDM_MINOR_BASE 176 |
354 | |
355 | |
356 | diff --git a/drivers/usb/core/driver.c b/drivers/usb/core/driver.c |
357 | index 5a7fa6f..9f42cb8 100644 |
358 | --- a/drivers/usb/core/driver.c |
359 | +++ b/drivers/usb/core/driver.c |
360 | @@ -1609,7 +1609,8 @@ int usb_external_resume_device(struct usb_device *udev) |
361 | status = usb_resume_both(udev); |
362 | udev->last_busy = jiffies; |
363 | usb_pm_unlock(udev); |
364 | - do_unbind_rebind(udev, DO_REBIND); |
365 | + if (status == 0) |
366 | + do_unbind_rebind(udev, DO_REBIND); |
367 | |
368 | /* Now that the device is awake, we can start trying to autosuspend |
369 | * it again. */ |
370 | diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c |
371 | index d999638..875de9a 100644 |
372 | --- a/drivers/usb/core/hub.c |
373 | +++ b/drivers/usb/core/hub.c |
374 | @@ -3424,7 +3424,7 @@ int usb_reset_device(struct usb_device *udev) |
375 | USB_INTERFACE_BOUND) |
376 | rebind = 1; |
377 | } |
378 | - if (rebind) |
379 | + if (ret == 0 && rebind) |
380 | usb_rebind_intf(cintf); |
381 | } |
382 | } |
383 | diff --git a/fs/cifs/readdir.c b/fs/cifs/readdir.c |
384 | index 765adf1..58d5729 100644 |
385 | --- a/fs/cifs/readdir.c |
386 | +++ b/fs/cifs/readdir.c |
387 | @@ -762,14 +762,15 @@ static int find_cifs_entry(const int xid, struct cifsTconInfo *pTcon, |
388 | rc)); |
389 | return rc; |
390 | } |
391 | + cifs_save_resume_key(cifsFile->srch_inf.last_entry, cifsFile); |
392 | } |
393 | |
394 | while ((index_to_find >= cifsFile->srch_inf.index_of_last_entry) && |
395 | (rc == 0) && !cifsFile->srch_inf.endOfSearch) { |
396 | cFYI(1, ("calling findnext2")); |
397 | - cifs_save_resume_key(cifsFile->srch_inf.last_entry, cifsFile); |
398 | rc = CIFSFindNext(xid, pTcon, cifsFile->netfid, |
399 | &cifsFile->srch_inf); |
400 | + cifs_save_resume_key(cifsFile->srch_inf.last_entry, cifsFile); |
401 | if (rc) |
402 | return -ENOENT; |
403 | } |
404 | diff --git a/fs/ext2/dir.c b/fs/ext2/dir.c |
405 | index a78c6b4..11a49ce 100644 |
406 | --- a/fs/ext2/dir.c |
407 | +++ b/fs/ext2/dir.c |
408 | @@ -103,7 +103,7 @@ static int ext2_commit_chunk(struct page *page, loff_t pos, unsigned len) |
409 | return err; |
410 | } |
411 | |
412 | -static void ext2_check_page(struct page *page) |
413 | +static void ext2_check_page(struct page *page, int quiet) |
414 | { |
415 | struct inode *dir = page->mapping->host; |
416 | struct super_block *sb = dir->i_sb; |
417 | @@ -146,10 +146,10 @@ out: |
418 | /* Too bad, we had an error */ |
419 | |
420 | Ebadsize: |
421 | - ext2_error(sb, "ext2_check_page", |
422 | - "size of directory #%lu is not a multiple of chunk size", |
423 | - dir->i_ino |
424 | - ); |
425 | + if (!quiet) |
426 | + ext2_error(sb, __func__, |
427 | + "size of directory #%lu is not a multiple " |
428 | + "of chunk size", dir->i_ino); |
429 | goto fail; |
430 | Eshort: |
431 | error = "rec_len is smaller than minimal"; |
432 | @@ -166,32 +166,36 @@ Espan: |
433 | Einumber: |
434 | error = "inode out of bounds"; |
435 | bad_entry: |
436 | - ext2_error (sb, "ext2_check_page", "bad entry in directory #%lu: %s - " |
437 | - "offset=%lu, inode=%lu, rec_len=%d, name_len=%d", |
438 | - dir->i_ino, error, (page->index<<PAGE_CACHE_SHIFT)+offs, |
439 | - (unsigned long) le32_to_cpu(p->inode), |
440 | - rec_len, p->name_len); |
441 | + if (!quiet) |
442 | + ext2_error(sb, __func__, "bad entry in directory #%lu: : %s - " |
443 | + "offset=%lu, inode=%lu, rec_len=%d, name_len=%d", |
444 | + dir->i_ino, error, (page->index<<PAGE_CACHE_SHIFT)+offs, |
445 | + (unsigned long) le32_to_cpu(p->inode), |
446 | + rec_len, p->name_len); |
447 | goto fail; |
448 | Eend: |
449 | - p = (ext2_dirent *)(kaddr + offs); |
450 | - ext2_error (sb, "ext2_check_page", |
451 | - "entry in directory #%lu spans the page boundary" |
452 | - "offset=%lu, inode=%lu", |
453 | - dir->i_ino, (page->index<<PAGE_CACHE_SHIFT)+offs, |
454 | - (unsigned long) le32_to_cpu(p->inode)); |
455 | + if (!quiet) { |
456 | + p = (ext2_dirent *)(kaddr + offs); |
457 | + ext2_error(sb, "ext2_check_page", |
458 | + "entry in directory #%lu spans the page boundary" |
459 | + "offset=%lu, inode=%lu", |
460 | + dir->i_ino, (page->index<<PAGE_CACHE_SHIFT)+offs, |
461 | + (unsigned long) le32_to_cpu(p->inode)); |
462 | + } |
463 | fail: |
464 | SetPageChecked(page); |
465 | SetPageError(page); |
466 | } |
467 | |
468 | -static struct page * ext2_get_page(struct inode *dir, unsigned long n) |
469 | +static struct page * ext2_get_page(struct inode *dir, unsigned long n, |
470 | + int quiet) |
471 | { |
472 | struct address_space *mapping = dir->i_mapping; |
473 | struct page *page = read_mapping_page(mapping, n, NULL); |
474 | if (!IS_ERR(page)) { |
475 | kmap(page); |
476 | if (!PageChecked(page)) |
477 | - ext2_check_page(page); |
478 | + ext2_check_page(page, quiet); |
479 | if (PageError(page)) |
480 | goto fail; |
481 | } |
482 | @@ -292,7 +296,7 @@ ext2_readdir (struct file * filp, void * dirent, filldir_t filldir) |
483 | for ( ; n < npages; n++, offset = 0) { |
484 | char *kaddr, *limit; |
485 | ext2_dirent *de; |
486 | - struct page *page = ext2_get_page(inode, n); |
487 | + struct page *page = ext2_get_page(inode, n, 0); |
488 | |
489 | if (IS_ERR(page)) { |
490 | ext2_error(sb, __func__, |
491 | @@ -361,6 +365,7 @@ struct ext2_dir_entry_2 * ext2_find_entry (struct inode * dir, |
492 | struct page *page = NULL; |
493 | struct ext2_inode_info *ei = EXT2_I(dir); |
494 | ext2_dirent * de; |
495 | + int dir_has_error = 0; |
496 | |
497 | if (npages == 0) |
498 | goto out; |
499 | @@ -374,7 +379,7 @@ struct ext2_dir_entry_2 * ext2_find_entry (struct inode * dir, |
500 | n = start; |
501 | do { |
502 | char *kaddr; |
503 | - page = ext2_get_page(dir, n); |
504 | + page = ext2_get_page(dir, n, dir_has_error); |
505 | if (!IS_ERR(page)) { |
506 | kaddr = page_address(page); |
507 | de = (ext2_dirent *) kaddr; |
508 | @@ -391,7 +396,9 @@ struct ext2_dir_entry_2 * ext2_find_entry (struct inode * dir, |
509 | de = ext2_next_entry(de); |
510 | } |
511 | ext2_put_page(page); |
512 | - } |
513 | + } else |
514 | + dir_has_error = 1; |
515 | + |
516 | if (++n >= npages) |
517 | n = 0; |
518 | /* next page is past the blocks we've got */ |
519 | @@ -414,7 +421,7 @@ found: |
520 | |
521 | struct ext2_dir_entry_2 * ext2_dotdot (struct inode *dir, struct page **p) |
522 | { |
523 | - struct page *page = ext2_get_page(dir, 0); |
524 | + struct page *page = ext2_get_page(dir, 0, 0); |
525 | ext2_dirent *de = NULL; |
526 | |
527 | if (!IS_ERR(page)) { |
528 | @@ -487,7 +494,7 @@ int ext2_add_link (struct dentry *dentry, struct inode *inode) |
529 | for (n = 0; n <= npages; n++) { |
530 | char *dir_end; |
531 | |
532 | - page = ext2_get_page(dir, n); |
533 | + page = ext2_get_page(dir, n, 0); |
534 | err = PTR_ERR(page); |
535 | if (IS_ERR(page)) |
536 | goto out; |
537 | @@ -655,14 +662,17 @@ int ext2_empty_dir (struct inode * inode) |
538 | { |
539 | struct page *page = NULL; |
540 | unsigned long i, npages = dir_pages(inode); |
541 | + int dir_has_error = 0; |
542 | |
543 | for (i = 0; i < npages; i++) { |
544 | char *kaddr; |
545 | ext2_dirent * de; |
546 | - page = ext2_get_page(inode, i); |
547 | + page = ext2_get_page(inode, i, dir_has_error); |
548 | |
549 | - if (IS_ERR(page)) |
550 | + if (IS_ERR(page)) { |
551 | + dir_has_error = 1; |
552 | continue; |
553 | + } |
554 | |
555 | kaddr = page_address(page); |
556 | de = (ext2_dirent *)kaddr; |
557 | diff --git a/fs/ext3/dir.c b/fs/ext3/dir.c |
558 | index 2eea96e..1b80f1c 100644 |
559 | --- a/fs/ext3/dir.c |
560 | +++ b/fs/ext3/dir.c |
561 | @@ -102,6 +102,7 @@ static int ext3_readdir(struct file * filp, |
562 | int err; |
563 | struct inode *inode = filp->f_path.dentry->d_inode; |
564 | int ret = 0; |
565 | + int dir_has_error = 0; |
566 | |
567 | sb = inode->i_sb; |
568 | |
569 | @@ -148,9 +149,12 @@ static int ext3_readdir(struct file * filp, |
570 | * of recovering data when there's a bad sector |
571 | */ |
572 | if (!bh) { |
573 | - ext3_error (sb, "ext3_readdir", |
574 | - "directory #%lu contains a hole at offset %lu", |
575 | - inode->i_ino, (unsigned long)filp->f_pos); |
576 | + if (!dir_has_error) { |
577 | + ext3_error(sb, __func__, "directory #%lu " |
578 | + "contains a hole at offset %lld", |
579 | + inode->i_ino, filp->f_pos); |
580 | + dir_has_error = 1; |
581 | + } |
582 | /* corrupt size? Maybe no more blocks to read */ |
583 | if (filp->f_pos > inode->i_blocks << 9) |
584 | break; |
585 | diff --git a/fs/ext4/dir.c b/fs/ext4/dir.c |
586 | index ec8e33b..d1d6487 100644 |
587 | --- a/fs/ext4/dir.c |
588 | +++ b/fs/ext4/dir.c |
589 | @@ -102,6 +102,7 @@ static int ext4_readdir(struct file * filp, |
590 | int err; |
591 | struct inode *inode = filp->f_path.dentry->d_inode; |
592 | int ret = 0; |
593 | + int dir_has_error = 0; |
594 | |
595 | sb = inode->i_sb; |
596 | |
597 | @@ -148,9 +149,13 @@ static int ext4_readdir(struct file * filp, |
598 | * of recovering data when there's a bad sector |
599 | */ |
600 | if (!bh) { |
601 | - ext4_error (sb, "ext4_readdir", |
602 | - "directory #%lu contains a hole at offset %lu", |
603 | - inode->i_ino, (unsigned long)filp->f_pos); |
604 | + if (!dir_has_error) { |
605 | + ext4_error(sb, __func__, "directory #%lu " |
606 | + "contains a hole at offset %Lu", |
607 | + inode->i_ino, |
608 | + (unsigned long long) filp->f_pos); |
609 | + dir_has_error = 1; |
610 | + } |
611 | /* corrupt size? Maybe no more blocks to read */ |
612 | if (filp->f_pos > inode->i_blocks << 9) |
613 | break; |
614 | diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c |
615 | index 73d1891..f3ada04 100644 |
616 | --- a/fs/proc/task_mmu.c |
617 | +++ b/fs/proc/task_mmu.c |
618 | @@ -198,11 +198,8 @@ static int do_maps_open(struct inode *inode, struct file *file, |
619 | return ret; |
620 | } |
621 | |
622 | -static int show_map(struct seq_file *m, void *v) |
623 | +static void show_map_vma(struct seq_file *m, struct vm_area_struct *vma) |
624 | { |
625 | - struct proc_maps_private *priv = m->private; |
626 | - struct task_struct *task = priv->task; |
627 | - struct vm_area_struct *vma = v; |
628 | struct mm_struct *mm = vma->vm_mm; |
629 | struct file *file = vma->vm_file; |
630 | int flags = vma->vm_flags; |
631 | @@ -210,9 +207,6 @@ static int show_map(struct seq_file *m, void *v) |
632 | dev_t dev = 0; |
633 | int len; |
634 | |
635 | - if (maps_protect && !ptrace_may_access(task, PTRACE_MODE_READ)) |
636 | - return -EACCES; |
637 | - |
638 | if (file) { |
639 | struct inode *inode = vma->vm_file->f_path.dentry->d_inode; |
640 | dev = inode->i_sb->s_dev; |
641 | @@ -257,6 +251,18 @@ static int show_map(struct seq_file *m, void *v) |
642 | } |
643 | } |
644 | seq_putc(m, '\n'); |
645 | +} |
646 | + |
647 | +static int show_map(struct seq_file *m, void *v) |
648 | +{ |
649 | + struct vm_area_struct *vma = v; |
650 | + struct proc_maps_private *priv = m->private; |
651 | + struct task_struct *task = priv->task; |
652 | + |
653 | + if (maps_protect && !ptrace_may_access(task, PTRACE_MODE_READ)) |
654 | + return -EACCES; |
655 | + |
656 | + show_map_vma(m, vma); |
657 | |
658 | if (m->count < m->size) /* vma is copied successfully */ |
659 | m->version = (vma != get_gate_vma(task))? vma->vm_start: 0; |
660 | @@ -367,23 +373,25 @@ static int smaps_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end, |
661 | |
662 | static int show_smap(struct seq_file *m, void *v) |
663 | { |
664 | + struct proc_maps_private *priv = m->private; |
665 | + struct task_struct *task = priv->task; |
666 | struct vm_area_struct *vma = v; |
667 | struct mem_size_stats mss; |
668 | - int ret; |
669 | struct mm_walk smaps_walk = { |
670 | .pmd_entry = smaps_pte_range, |
671 | .mm = vma->vm_mm, |
672 | .private = &mss, |
673 | }; |
674 | |
675 | + if (maps_protect && !ptrace_may_access(task, PTRACE_MODE_READ)) |
676 | + return -EACCES; |
677 | + |
678 | memset(&mss, 0, sizeof mss); |
679 | mss.vma = vma; |
680 | if (vma->vm_mm && !is_vm_hugetlb_page(vma)) |
681 | walk_page_range(vma->vm_start, vma->vm_end, &smaps_walk); |
682 | |
683 | - ret = show_map(m, v); |
684 | - if (ret) |
685 | - return ret; |
686 | + show_map_vma(m, vma); |
687 | |
688 | seq_printf(m, |
689 | "Size: %8lu kB\n" |
690 | @@ -405,7 +413,9 @@ static int show_smap(struct seq_file *m, void *v) |
691 | mss.referenced >> 10, |
692 | mss.swap >> 10); |
693 | |
694 | - return ret; |
695 | + if (m->count < m->size) /* vma is copied successfully */ |
696 | + m->version = (vma != get_gate_vma(task)) ? vma->vm_start : 0; |
697 | + return 0; |
698 | } |
699 | |
700 | static const struct seq_operations proc_pid_smaps_op = { |
701 | diff --git a/kernel/sched_stats.h b/kernel/sched_stats.h |
702 | index 8385d43..81365b3 100644 |
703 | --- a/kernel/sched_stats.h |
704 | +++ b/kernel/sched_stats.h |
705 | @@ -9,7 +9,7 @@ |
706 | static int show_schedstat(struct seq_file *seq, void *v) |
707 | { |
708 | int cpu; |
709 | - int mask_len = NR_CPUS/32 * 9; |
710 | + int mask_len = (NR_CPUS/32 + 1) * 9; |
711 | char *mask_str = kmalloc(mask_len, GFP_KERNEL); |
712 | |
713 | if (mask_str == NULL) |
714 | diff --git a/mm/rmap.c b/mm/rmap.c |
715 | index 0383acf..e8d639b 100644 |
716 | --- a/mm/rmap.c |
717 | +++ b/mm/rmap.c |
718 | @@ -55,7 +55,33 @@ |
719 | |
720 | struct kmem_cache *anon_vma_cachep; |
721 | |
722 | -/* This must be called under the mmap_sem. */ |
723 | +/** |
724 | + * anon_vma_prepare - attach an anon_vma to a memory region |
725 | + * @vma: the memory region in question |
726 | + * |
727 | + * This makes sure the memory mapping described by 'vma' has |
728 | + * an 'anon_vma' attached to it, so that we can associate the |
729 | + * anonymous pages mapped into it with that anon_vma. |
730 | + * |
731 | + * The common case will be that we already have one, but if |
732 | + * if not we either need to find an adjacent mapping that we |
733 | + * can re-use the anon_vma from (very common when the only |
734 | + * reason for splitting a vma has been mprotect()), or we |
735 | + * allocate a new one. |
736 | + * |
737 | + * Anon-vma allocations are very subtle, because we may have |
738 | + * optimistically looked up an anon_vma in page_lock_anon_vma() |
739 | + * and that may actually touch the spinlock even in the newly |
740 | + * allocated vma (it depends on RCU to make sure that the |
741 | + * anon_vma isn't actually destroyed). |
742 | + * |
743 | + * As a result, we need to do proper anon_vma locking even |
744 | + * for the new allocation. At the same time, we do not want |
745 | + * to do any locking for the common case of already having |
746 | + * an anon_vma. |
747 | + * |
748 | + * This must be called with the mmap_sem held for reading. |
749 | + */ |
750 | int anon_vma_prepare(struct vm_area_struct *vma) |
751 | { |
752 | struct anon_vma *anon_vma = vma->anon_vma; |
753 | @@ -63,20 +89,17 @@ int anon_vma_prepare(struct vm_area_struct *vma) |
754 | might_sleep(); |
755 | if (unlikely(!anon_vma)) { |
756 | struct mm_struct *mm = vma->vm_mm; |
757 | - struct anon_vma *allocated, *locked; |
758 | + struct anon_vma *allocated; |
759 | |
760 | anon_vma = find_mergeable_anon_vma(vma); |
761 | - if (anon_vma) { |
762 | - allocated = NULL; |
763 | - locked = anon_vma; |
764 | - spin_lock(&locked->lock); |
765 | - } else { |
766 | + allocated = NULL; |
767 | + if (!anon_vma) { |
768 | anon_vma = anon_vma_alloc(); |
769 | if (unlikely(!anon_vma)) |
770 | return -ENOMEM; |
771 | allocated = anon_vma; |
772 | - locked = NULL; |
773 | } |
774 | + spin_lock(&anon_vma->lock); |
775 | |
776 | /* page_table_lock to protect against threads */ |
777 | spin_lock(&mm->page_table_lock); |
778 | @@ -87,8 +110,7 @@ int anon_vma_prepare(struct vm_area_struct *vma) |
779 | } |
780 | spin_unlock(&mm->page_table_lock); |
781 | |
782 | - if (locked) |
783 | - spin_unlock(&locked->lock); |
784 | + spin_unlock(&anon_vma->lock); |
785 | if (unlikely(allocated)) |
786 | anon_vma_free(allocated); |
787 | } |
788 | diff --git a/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4.c b/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4.c |
789 | index 5a955c4..7eb0b61 100644 |
790 | --- a/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4.c |
791 | +++ b/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4.c |
792 | @@ -150,10 +150,12 @@ static unsigned int ipv4_conntrack_defrag(unsigned int hooknum, |
793 | const struct net_device *out, |
794 | int (*okfn)(struct sk_buff *)) |
795 | { |
796 | +#if !defined(CONFIG_NF_NAT) && !defined(CONFIG_NF_NAT_MODULE) |
797 | /* Previously seen (loopback)? Ignore. Do this before |
798 | fragment check. */ |
799 | if (skb->nfct) |
800 | return NF_ACCEPT; |
801 | +#endif |
802 | |
803 | /* Gather fragments. */ |
804 | if (ip_hdr(skb)->frag_off & htons(IP_MF | IP_OFFSET)) { |
805 | diff --git a/net/ipv4/netfilter/nf_nat_snmp_basic.c b/net/ipv4/netfilter/nf_nat_snmp_basic.c |
806 | index ffeaffc..8303e4b 100644 |
807 | --- a/net/ipv4/netfilter/nf_nat_snmp_basic.c |
808 | +++ b/net/ipv4/netfilter/nf_nat_snmp_basic.c |
809 | @@ -742,6 +742,7 @@ static unsigned char snmp_object_decode(struct asn1_ctx *ctx, |
810 | *obj = kmalloc(sizeof(struct snmp_object) + len, |
811 | GFP_ATOMIC); |
812 | if (*obj == NULL) { |
813 | + kfree(p); |
814 | kfree(id); |
815 | if (net_ratelimit()) |
816 | printk("OOM in bsalg (%d)\n", __LINE__); |
817 | diff --git a/net/netfilter/xt_iprange.c b/net/netfilter/xt_iprange.c |
818 | index c63e933..4b5741b 100644 |
819 | --- a/net/netfilter/xt_iprange.c |
820 | +++ b/net/netfilter/xt_iprange.c |
821 | @@ -67,7 +67,7 @@ iprange_mt4(const struct sk_buff *skb, const struct net_device *in, |
822 | if (info->flags & IPRANGE_SRC) { |
823 | m = ntohl(iph->saddr) < ntohl(info->src_min.ip); |
824 | m |= ntohl(iph->saddr) > ntohl(info->src_max.ip); |
825 | - m ^= info->flags & IPRANGE_SRC_INV; |
826 | + m ^= !!(info->flags & IPRANGE_SRC_INV); |
827 | if (m) { |
828 | pr_debug("src IP " NIPQUAD_FMT " NOT in range %s" |
829 | NIPQUAD_FMT "-" NIPQUAD_FMT "\n", |
830 | @@ -81,7 +81,7 @@ iprange_mt4(const struct sk_buff *skb, const struct net_device *in, |
831 | if (info->flags & IPRANGE_DST) { |
832 | m = ntohl(iph->daddr) < ntohl(info->dst_min.ip); |
833 | m |= ntohl(iph->daddr) > ntohl(info->dst_max.ip); |
834 | - m ^= info->flags & IPRANGE_DST_INV; |
835 | + m ^= !!(info->flags & IPRANGE_DST_INV); |
836 | if (m) { |
837 | pr_debug("dst IP " NIPQUAD_FMT " NOT in range %s" |
838 | NIPQUAD_FMT "-" NIPQUAD_FMT "\n", |
839 | @@ -123,14 +123,14 @@ iprange_mt6(const struct sk_buff *skb, const struct net_device *in, |
840 | if (info->flags & IPRANGE_SRC) { |
841 | m = iprange_ipv6_sub(&iph->saddr, &info->src_min.in6) < 0; |
842 | m |= iprange_ipv6_sub(&iph->saddr, &info->src_max.in6) > 0; |
843 | - m ^= info->flags & IPRANGE_SRC_INV; |
844 | + m ^= !!(info->flags & IPRANGE_SRC_INV); |
845 | if (m) |
846 | return false; |
847 | } |
848 | if (info->flags & IPRANGE_DST) { |
849 | m = iprange_ipv6_sub(&iph->daddr, &info->dst_min.in6) < 0; |
850 | m |= iprange_ipv6_sub(&iph->daddr, &info->dst_max.in6) > 0; |
851 | - m ^= info->flags & IPRANGE_DST_INV; |
852 | + m ^= !!(info->flags & IPRANGE_DST_INV); |
853 | if (m) |
854 | return false; |
855 | } |