Magellan Linux

Contents of /trunk/kernel26-alx/patches-2.6.23-r1/0109-2.6.23.10-all-fixes.patch

Parent Directory Parent Directory | Revision Log Revision Log


Revision 658 - (show annotations) (download)
Mon Jun 23 21:39:39 2008 UTC (15 years, 10 months ago) by niro
File size: 67427 byte(s)
2.6.23-alx-r1: new default as we fix the via epia clocksource=tsc quircks
-linux-2.6.23.17
-fbcondecor-0.9.4
-squashfs-3.3
-unionfs-2.3.3
-ipw3945-1.2.2
-mptbase-vmware fix

1 diff --git a/arch/i386/boot/pmjump.S b/arch/i386/boot/pmjump.S
2 index 2e55923..26baeab 100644
3 --- a/arch/i386/boot/pmjump.S
4 +++ b/arch/i386/boot/pmjump.S
5 @@ -31,14 +31,14 @@ protected_mode_jump:
6 xorl %ebx, %ebx # Flag to indicate this is a boot
7 movl %edx, %esi # Pointer to boot_params table
8 movl %eax, 2f # Patch ljmpl instruction
9 - jmp 1f # Short jump to flush instruction q.
10
11 -1:
12 movw $__BOOT_DS, %cx
13
14 movl %cr0, %edx
15 orb $1, %dl # Protected mode (PE) bit
16 movl %edx, %cr0
17 + jmp 1f # Short jump to serialize on 386/486
18 +1:
19
20 movw %cx, %ds
21 movw %cx, %es
22 diff --git a/crypto/algapi.c b/crypto/algapi.c
23 index 38aa9e9..3798ebd 100644
24 --- a/crypto/algapi.c
25 +++ b/crypto/algapi.c
26 @@ -98,6 +98,9 @@ static void crypto_remove_spawn(struct crypto_spawn *spawn,
27 return;
28
29 inst->alg.cra_flags |= CRYPTO_ALG_DEAD;
30 + if (hlist_unhashed(&inst->list))
31 + return;
32 +
33 if (!tmpl || !crypto_tmpl_get(tmpl))
34 return;
35
36 @@ -333,9 +336,6 @@ int crypto_register_instance(struct crypto_template *tmpl,
37 LIST_HEAD(list);
38 int err = -EINVAL;
39
40 - if (inst->alg.cra_destroy)
41 - goto err;
42 -
43 err = crypto_check_alg(&inst->alg);
44 if (err)
45 goto err;
46 diff --git a/drivers/ata/ahci.c b/drivers/ata/ahci.c
47 index d684208..d0a40e7 100644
48 --- a/drivers/ata/ahci.c
49 +++ b/drivers/ata/ahci.c
50 @@ -1432,7 +1432,7 @@ static void ahci_port_intr(struct ata_port *ap)
51 struct ata_eh_info *ehi = &ap->eh_info;
52 struct ahci_port_priv *pp = ap->private_data;
53 u32 status, qc_active;
54 - int rc, known_irq = 0;
55 + int rc;
56
57 status = readl(port_mmio + PORT_IRQ_STAT);
58 writel(status, port_mmio + PORT_IRQ_STAT);
59 @@ -1448,74 +1448,11 @@ static void ahci_port_intr(struct ata_port *ap)
60 qc_active = readl(port_mmio + PORT_CMD_ISSUE);
61
62 rc = ata_qc_complete_multiple(ap, qc_active, NULL);
63 - if (rc > 0)
64 - return;
65 if (rc < 0) {
66 ehi->err_mask |= AC_ERR_HSM;
67 ehi->action |= ATA_EH_SOFTRESET;
68 ata_port_freeze(ap);
69 - return;
70 - }
71 -
72 - /* hmmm... a spurious interupt */
73 -
74 - /* if !NCQ, ignore. No modern ATA device has broken HSM
75 - * implementation for non-NCQ commands.
76 - */
77 - if (!ap->sactive)
78 - return;
79 -
80 - if (status & PORT_IRQ_D2H_REG_FIS) {
81 - if (!pp->ncq_saw_d2h)
82 - ata_port_printk(ap, KERN_INFO,
83 - "D2H reg with I during NCQ, "
84 - "this message won't be printed again\n");
85 - pp->ncq_saw_d2h = 1;
86 - known_irq = 1;
87 - }
88 -
89 - if (status & PORT_IRQ_DMAS_FIS) {
90 - if (!pp->ncq_saw_dmas)
91 - ata_port_printk(ap, KERN_INFO,
92 - "DMAS FIS during NCQ, "
93 - "this message won't be printed again\n");
94 - pp->ncq_saw_dmas = 1;
95 - known_irq = 1;
96 - }
97 -
98 - if (status & PORT_IRQ_SDB_FIS) {
99 - const __le32 *f = pp->rx_fis + RX_FIS_SDB;
100 -
101 - if (le32_to_cpu(f[1])) {
102 - /* SDB FIS containing spurious completions
103 - * might be dangerous, whine and fail commands
104 - * with HSM violation. EH will turn off NCQ
105 - * after several such failures.
106 - */
107 - ata_ehi_push_desc(ehi,
108 - "spurious completions during NCQ "
109 - "issue=0x%x SAct=0x%x FIS=%08x:%08x",
110 - readl(port_mmio + PORT_CMD_ISSUE),
111 - readl(port_mmio + PORT_SCR_ACT),
112 - le32_to_cpu(f[0]), le32_to_cpu(f[1]));
113 - ehi->err_mask |= AC_ERR_HSM;
114 - ehi->action |= ATA_EH_SOFTRESET;
115 - ata_port_freeze(ap);
116 - } else {
117 - if (!pp->ncq_saw_sdb)
118 - ata_port_printk(ap, KERN_INFO,
119 - "spurious SDB FIS %08x:%08x during NCQ, "
120 - "this message won't be printed again\n",
121 - le32_to_cpu(f[0]), le32_to_cpu(f[1]));
122 - pp->ncq_saw_sdb = 1;
123 - }
124 - known_irq = 1;
125 }
126 -
127 - if (!known_irq)
128 - ata_port_printk(ap, KERN_INFO, "spurious interrupt "
129 - "(irq_stat 0x%x active_tag 0x%x sactive 0x%x)\n",
130 - status, ap->active_tag, ap->sactive);
131 }
132
133 static void ahci_irq_clear(struct ata_port *ap)
134 diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c
135 index 78b670d..98e33f9 100644
136 --- a/drivers/ata/libata-core.c
137 +++ b/drivers/ata/libata-core.c
138 @@ -3772,6 +3772,7 @@ static const struct ata_blacklist_entry ata_device_blacklist [] = {
139 /* Devices where NCQ should be avoided */
140 /* NCQ is slow */
141 { "WDC WD740ADFD-00", NULL, ATA_HORKAGE_NONCQ },
142 + { "WDC WD740ADFD-00NLR1", NULL, ATA_HORKAGE_NONCQ, },
143 /* http://thread.gmane.org/gmane.linux.ide/14907 */
144 { "FUJITSU MHT2060BH", NULL, ATA_HORKAGE_NONCQ },
145 /* NCQ is broken */
146 @@ -3790,22 +3791,6 @@ static const struct ata_blacklist_entry ata_device_blacklist [] = {
147 { "HTS541060G9SA00", "MB3OC60D", ATA_HORKAGE_NONCQ, },
148 { "HTS541080G9SA00", "MB4OC60D", ATA_HORKAGE_NONCQ, },
149 { "HTS541010G9SA00", "MBZOC60D", ATA_HORKAGE_NONCQ, },
150 - /* Drives which do spurious command completion */
151 - { "HTS541680J9SA00", "SB2IC7EP", ATA_HORKAGE_NONCQ, },
152 - { "HTS541612J9SA00", "SBDIC7JP", ATA_HORKAGE_NONCQ, },
153 - { "HDT722516DLA380", "V43OA96A", ATA_HORKAGE_NONCQ, },
154 - { "Hitachi HTS541616J9SA00", "SB4OC70P", ATA_HORKAGE_NONCQ, },
155 - { "Hitachi HTS542525K9SA00", "BBFOC31P", ATA_HORKAGE_NONCQ, },
156 - { "WDC WD740ADFD-00NLR1", NULL, ATA_HORKAGE_NONCQ, },
157 - { "WDC WD3200AAJS-00RYA0", "12.01B01", ATA_HORKAGE_NONCQ, },
158 - { "FUJITSU MHV2080BH", "00840028", ATA_HORKAGE_NONCQ, },
159 - { "ST9120822AS", "3.CLF", ATA_HORKAGE_NONCQ, },
160 - { "ST9160821AS", "3.CLF", ATA_HORKAGE_NONCQ, },
161 - { "ST9160821AS", "3.ALD", ATA_HORKAGE_NONCQ, },
162 - { "ST9160821AS", "3.CCD", ATA_HORKAGE_NONCQ, },
163 - { "ST3160812AS", "3.ADJ", ATA_HORKAGE_NONCQ, },
164 - { "ST980813AS", "3.ADB", ATA_HORKAGE_NONCQ, },
165 - { "SAMSUNG HD401LJ", "ZZ100-15", ATA_HORKAGE_NONCQ, },
166
167 /* devices which puke on READ_NATIVE_MAX */
168 { "HDS724040KLSA80", "KFAOA20N", ATA_HORKAGE_BROKEN_HPA, },
169 diff --git a/drivers/atm/he.c b/drivers/atm/he.c
170 index d33aba6..3b64a99 100644
171 --- a/drivers/atm/he.c
172 +++ b/drivers/atm/he.c
173 @@ -394,6 +394,11 @@ he_init_one(struct pci_dev *pci_dev, const struct pci_device_id *pci_ent)
174 he_dev->atm_dev->dev_data = he_dev;
175 atm_dev->dev_data = he_dev;
176 he_dev->number = atm_dev->number;
177 +#ifdef USE_TASKLET
178 + tasklet_init(&he_dev->tasklet, he_tasklet, (unsigned long) he_dev);
179 +#endif
180 + spin_lock_init(&he_dev->global_lock);
181 +
182 if (he_start(atm_dev)) {
183 he_stop(he_dev);
184 err = -ENODEV;
185 @@ -1173,11 +1178,6 @@ he_start(struct atm_dev *dev)
186 if ((err = he_init_irq(he_dev)) != 0)
187 return err;
188
189 -#ifdef USE_TASKLET
190 - tasklet_init(&he_dev->tasklet, he_tasklet, (unsigned long) he_dev);
191 -#endif
192 - spin_lock_init(&he_dev->global_lock);
193 -
194 /* 4.11 enable pci bus controller state machines */
195 host_cntl |= (OUTFF_ENB | CMDFF_ENB |
196 QUICK_RD_RETRY | QUICK_WR_RETRY | PERR_INT_ENB);
197 diff --git a/drivers/base/core.c b/drivers/base/core.c
198 index ec86d6f..fa43bc4 100644
199 --- a/drivers/base/core.c
200 +++ b/drivers/base/core.c
201 @@ -814,9 +814,10 @@ int device_add(struct device *dev)
202 error = device_add_attrs(dev);
203 if (error)
204 goto AttrsError;
205 - error = device_pm_add(dev);
206 + error = dpm_sysfs_add(dev);
207 if (error)
208 goto PMError;
209 + device_pm_add(dev);
210 error = bus_add_device(dev);
211 if (error)
212 goto BusError;
213 @@ -841,6 +842,7 @@ int device_add(struct device *dev)
214 return error;
215 BusError:
216 device_pm_remove(dev);
217 + dpm_sysfs_remove(dev);
218 PMError:
219 if (dev->bus)
220 blocking_notifier_call_chain(&dev->bus->bus_notifier,
221 diff --git a/drivers/base/power/Makefile b/drivers/base/power/Makefile
222 index 9caeaea..e8fdd54 100644
223 --- a/drivers/base/power/Makefile
224 +++ b/drivers/base/power/Makefile
225 @@ -1,5 +1,6 @@
226 obj-y := shutdown.o
227 -obj-$(CONFIG_PM_SLEEP) += main.o suspend.o resume.o sysfs.o
228 +obj-$(CONFIG_PM) += sysfs.o
229 +obj-$(CONFIG_PM_SLEEP) += main.o suspend.o resume.o
230 obj-$(CONFIG_PM_TRACE) += trace.o
231
232 ifeq ($(CONFIG_DEBUG_DRIVER),y)
233 diff --git a/drivers/base/power/main.c b/drivers/base/power/main.c
234 index eb9f38d..8a70daf 100644
235 --- a/drivers/base/power/main.c
236 +++ b/drivers/base/power/main.c
237 @@ -33,20 +33,14 @@ DEFINE_MUTEX(dpm_list_mtx);
238
239 int (*platform_enable_wakeup)(struct device *dev, int is_on);
240
241 -int device_pm_add(struct device *dev)
242 +void device_pm_add(struct device *dev)
243 {
244 - int error;
245 -
246 pr_debug("PM: Adding info for %s:%s\n",
247 dev->bus ? dev->bus->name : "No Bus",
248 kobject_name(&dev->kobj));
249 mutex_lock(&dpm_list_mtx);
250 list_add_tail(&dev->power.entry, &dpm_active);
251 - error = dpm_sysfs_add(dev);
252 - if (error)
253 - list_del(&dev->power.entry);
254 mutex_unlock(&dpm_list_mtx);
255 - return error;
256 }
257
258 void device_pm_remove(struct device *dev)
259 diff --git a/drivers/base/power/power.h b/drivers/base/power/power.h
260 index 8ba0830..6c4a19b 100644
261 --- a/drivers/base/power/power.h
262 +++ b/drivers/base/power/power.h
263 @@ -34,14 +34,26 @@ static inline struct dev_pm_info * to_pm_info(struct list_head * entry)
264 return container_of(entry, struct dev_pm_info, entry);
265 }
266
267 -static inline struct device * to_device(struct list_head * entry)
268 +static inline struct device *to_device(struct list_head *entry)
269 {
270 return container_of(to_pm_info(entry), struct device, power);
271 }
272
273 -extern int device_pm_add(struct device *);
274 +extern void device_pm_add(struct device *);
275 extern void device_pm_remove(struct device *);
276
277 +#else /* CONFIG_PM_SLEEP */
278 +
279 +static inline void device_pm_add(struct device *dev)
280 +{
281 +}
282 +
283 +static inline void device_pm_remove(struct device *dev)
284 +{
285 +}
286 +#endif
287 +
288 +#ifdef CONFIG_PM
289 /*
290 * sysfs.c
291 */
292 @@ -62,16 +74,15 @@ extern int resume_device(struct device *);
293 */
294 extern int suspend_device(struct device *, pm_message_t);
295
296 -#else /* CONFIG_PM_SLEEP */
297 -
298 +#else /* CONFIG_PM */
299
300 -static inline int device_pm_add(struct device * dev)
301 +static inline int dpm_sysfs_add(struct device *dev)
302 {
303 return 0;
304 }
305 -static inline void device_pm_remove(struct device * dev)
306 -{
307
308 +static inline void dpm_sysfs_remove(struct device *dev)
309 +{
310 }
311
312 #endif
313 diff --git a/drivers/block/rd.c b/drivers/block/rd.c
314 index 65150b5..b022942 100644
315 --- a/drivers/block/rd.c
316 +++ b/drivers/block/rd.c
317 @@ -189,6 +189,18 @@ static int ramdisk_set_page_dirty(struct page *page)
318 return 0;
319 }
320
321 +/*
322 + * releasepage is called by pagevec_strip/try_to_release_page if
323 + * buffers_heads_over_limit is true. Without a releasepage function
324 + * try_to_free_buffers is called instead. That can unset the dirty
325 + * bit of our ram disk pages, which will be eventually freed, even
326 + * if the page is still in use.
327 + */
328 +static int ramdisk_releasepage(struct page *page, gfp_t dummy)
329 +{
330 + return 0;
331 +}
332 +
333 static const struct address_space_operations ramdisk_aops = {
334 .readpage = ramdisk_readpage,
335 .prepare_write = ramdisk_prepare_write,
336 @@ -196,6 +208,7 @@ static const struct address_space_operations ramdisk_aops = {
337 .writepage = ramdisk_writepage,
338 .set_page_dirty = ramdisk_set_page_dirty,
339 .writepages = ramdisk_writepages,
340 + .releasepage = ramdisk_releasepage,
341 };
342
343 static int rd_blkdev_pagecache_IO(int rw, struct bio_vec *vec, sector_t sector,
344 diff --git a/drivers/char/apm-emulation.c b/drivers/char/apm-emulation.c
345 index ec116df..72183bd 100644
346 --- a/drivers/char/apm-emulation.c
347 +++ b/drivers/char/apm-emulation.c
348 @@ -295,7 +295,6 @@ static int
349 apm_ioctl(struct inode * inode, struct file *filp, u_int cmd, u_long arg)
350 {
351 struct apm_user *as = filp->private_data;
352 - unsigned long flags;
353 int err = -EINVAL;
354
355 if (!as->suser || !as->writer)
356 @@ -331,10 +330,16 @@ apm_ioctl(struct inode * inode, struct file *filp, u_int cmd, u_long arg)
357 * Wait for the suspend/resume to complete. If there
358 * are pending acknowledges, we wait here for them.
359 */
360 - flags = current->flags;
361 + freezer_do_not_count();
362
363 wait_event(apm_suspend_waitqueue,
364 as->suspend_state == SUSPEND_DONE);
365 +
366 + /*
367 + * Since we are waiting until the suspend is done, the
368 + * try_to_freeze() in freezer_count() will not trigger
369 + */
370 + freezer_count();
371 } else {
372 as->suspend_state = SUSPEND_WAIT;
373 mutex_unlock(&state_lock);
374 @@ -362,14 +367,10 @@ apm_ioctl(struct inode * inode, struct file *filp, u_int cmd, u_long arg)
375 * Wait for the suspend/resume to complete. If there
376 * are pending acknowledges, we wait here for them.
377 */
378 - flags = current->flags;
379 -
380 - wait_event_interruptible(apm_suspend_waitqueue,
381 + wait_event_freezable(apm_suspend_waitqueue,
382 as->suspend_state == SUSPEND_DONE);
383 }
384
385 - current->flags = flags;
386 -
387 mutex_lock(&state_lock);
388 err = as->suspend_result;
389 as->suspend_state = SUSPEND_NONE;
390 diff --git a/drivers/char/random.c b/drivers/char/random.c
391 index af274e5..a9178e3 100644
392 --- a/drivers/char/random.c
393 +++ b/drivers/char/random.c
394 @@ -1494,7 +1494,7 @@ __u32 secure_tcpv6_sequence_number(__be32 *saddr, __be32 *daddr,
395 seq = twothirdsMD4Transform((const __u32 *)daddr, hash) & HASH_MASK;
396 seq += keyptr->count;
397
398 - seq += ktime_get_real().tv64;
399 + seq += ktime_to_ns(ktime_get_real());
400
401 return seq;
402 }
403 @@ -1556,7 +1556,7 @@ __u32 secure_tcp_sequence_number(__be32 saddr, __be32 daddr,
404 * overlaps less than one time per MSL (2 minutes).
405 * Choosing a clock of 64 ns period is OK. (period of 274 s)
406 */
407 - seq += ktime_get_real().tv64 >> 6;
408 + seq += ktime_to_ns(ktime_get_real()) >> 6;
409 #if 0
410 printk("init_seq(%lx, %lx, %d, %d) = %d\n",
411 saddr, daddr, sport, dport, seq);
412 @@ -1616,7 +1616,7 @@ u64 secure_dccp_sequence_number(__be32 saddr, __be32 daddr,
413 seq = half_md4_transform(hash, keyptr->secret);
414 seq |= ((u64)keyptr->count) << (32 - HASH_BITS);
415
416 - seq += ktime_get_real().tv64;
417 + seq += ktime_to_ns(ktime_get_real());
418 seq &= (1ull << 48) - 1;
419 #if 0
420 printk("dccp init_seq(%lx, %lx, %d, %d) = %d\n",
421 diff --git a/drivers/input/mouse/alps.c b/drivers/input/mouse/alps.c
422 index 2c5f11a..a810ff8 100644
423 --- a/drivers/input/mouse/alps.c
424 +++ b/drivers/input/mouse/alps.c
425 @@ -53,6 +53,7 @@ static const struct alps_model_info alps_model_data[] = {
426 { { 0x20, 0x02, 0x0e }, 0xf8, 0xf8, ALPS_PASS | ALPS_DUALPOINT }, /* XXX */
427 { { 0x22, 0x02, 0x0a }, 0xf8, 0xf8, ALPS_PASS | ALPS_DUALPOINT },
428 { { 0x22, 0x02, 0x14 }, 0xff, 0xff, ALPS_PASS | ALPS_DUALPOINT }, /* Dell Latitude D600 */
429 + { { 0x73, 0x02, 0x50 }, 0xcf, 0xff, ALPS_FW_BK_1 } /* Dell Vostro 1400 */
430 };
431
432 /*
433 diff --git a/drivers/isdn/i4l/isdn_common.c b/drivers/isdn/i4l/isdn_common.c
434 index 4910bca..f67239a 100644
435 --- a/drivers/isdn/i4l/isdn_common.c
436 +++ b/drivers/isdn/i4l/isdn_common.c
437 @@ -1515,6 +1515,7 @@ isdn_ioctl(struct inode *inode, struct file *file, uint cmd, ulong arg)
438 if (copy_from_user(&iocts, argp,
439 sizeof(isdn_ioctl_struct)))
440 return -EFAULT;
441 + iocts.drvid[sizeof(iocts.drvid)-1] = 0;
442 if (strlen(iocts.drvid)) {
443 if ((p = strchr(iocts.drvid, ',')))
444 *p = 0;
445 @@ -1599,6 +1600,7 @@ isdn_ioctl(struct inode *inode, struct file *file, uint cmd, ulong arg)
446 if (copy_from_user(&iocts, argp,
447 sizeof(isdn_ioctl_struct)))
448 return -EFAULT;
449 + iocts.drvid[sizeof(iocts.drvid)-1] = 0;
450 if (strlen(iocts.drvid)) {
451 drvidx = -1;
452 for (i = 0; i < ISDN_MAX_DRIVERS; i++)
453 @@ -1643,7 +1645,7 @@ isdn_ioctl(struct inode *inode, struct file *file, uint cmd, ulong arg)
454 } else {
455 p = (char __user *) iocts.arg;
456 for (i = 0; i < 10; i++) {
457 - sprintf(bname, "%s%s",
458 + snprintf(bname, sizeof(bname), "%s%s",
459 strlen(dev->drv[drvidx]->msn2eaz[i]) ?
460 dev->drv[drvidx]->msn2eaz[i] : "_",
461 (i < 9) ? "," : "\0");
462 @@ -1673,6 +1675,7 @@ isdn_ioctl(struct inode *inode, struct file *file, uint cmd, ulong arg)
463 char *p;
464 if (copy_from_user(&iocts, argp, sizeof(isdn_ioctl_struct)))
465 return -EFAULT;
466 + iocts.drvid[sizeof(iocts.drvid)-1] = 0;
467 if (strlen(iocts.drvid)) {
468 if ((p = strchr(iocts.drvid, ',')))
469 *p = 0;
470 diff --git a/drivers/isdn/i4l/isdn_net.c b/drivers/isdn/i4l/isdn_net.c
471 index aa83277..75e1423 100644
472 --- a/drivers/isdn/i4l/isdn_net.c
473 +++ b/drivers/isdn/i4l/isdn_net.c
474 @@ -2126,7 +2126,7 @@ isdn_net_find_icall(int di, int ch, int idx, setup_parm *setup)
475 u_long flags;
476 isdn_net_dev *p;
477 isdn_net_phone *n;
478 - char nr[32];
479 + char nr[ISDN_MSNLEN];
480 char *my_eaz;
481
482 /* Search name in netdev-chain */
483 @@ -2135,7 +2135,7 @@ isdn_net_find_icall(int di, int ch, int idx, setup_parm *setup)
484 nr[1] = '\0';
485 printk(KERN_INFO "isdn_net: Incoming call without OAD, assuming '0'\n");
486 } else
487 - strcpy(nr, setup->phone);
488 + strlcpy(nr, setup->phone, ISDN_MSNLEN);
489 si1 = (int) setup->si1;
490 si2 = (int) setup->si2;
491 if (!setup->eazmsn[0]) {
492 @@ -2802,7 +2802,7 @@ isdn_net_setcfg(isdn_net_ioctl_cfg * cfg)
493 chidx = -1;
494 }
495 }
496 - strcpy(lp->msn, cfg->eaz);
497 + strlcpy(lp->msn, cfg->eaz, sizeof(lp->msn));
498 lp->pre_device = drvidx;
499 lp->pre_channel = chidx;
500 lp->onhtime = cfg->onhtime;
501 @@ -2951,7 +2951,7 @@ isdn_net_addphone(isdn_net_ioctl_phone * phone)
502 if (p) {
503 if (!(n = kmalloc(sizeof(isdn_net_phone), GFP_KERNEL)))
504 return -ENOMEM;
505 - strcpy(n->num, phone->phone);
506 + strlcpy(n->num, phone->phone, sizeof(n->num));
507 n->next = p->local->phone[phone->outgoing & 1];
508 p->local->phone[phone->outgoing & 1] = n;
509 return 0;
510 diff --git a/drivers/kvm/kvm_main.c b/drivers/kvm/kvm_main.c
511 index cd05579..b58fdf3 100644
512 --- a/drivers/kvm/kvm_main.c
513 +++ b/drivers/kvm/kvm_main.c
514 @@ -273,6 +273,11 @@ void kvm_flush_remote_tlbs(struct kvm *kvm)
515 }
516 }
517
518 + /* Uniprocessor kernel does not respect cpus in first_cpu. So
519 + * do not go there if we have nothing to do. */
520 + if (cpus_empty(cpus))
521 + return;
522 +
523 /*
524 * We really want smp_call_function_mask() here. But that's not
525 * available, so ipi all cpus in parallel and wait for them
526 @@ -1158,10 +1163,7 @@ int emulate_invlpg(struct kvm_vcpu *vcpu, gva_t address)
527
528 int emulate_clts(struct kvm_vcpu *vcpu)
529 {
530 - unsigned long cr0;
531 -
532 - cr0 = vcpu->cr0 & ~CR0_TS_MASK;
533 - kvm_arch_ops->set_cr0(vcpu, cr0);
534 + kvm_arch_ops->set_cr0(vcpu, vcpu->cr0 & ~X86_CR0_TS);
535 return X86EMUL_CONTINUE;
536 }
537
538 @@ -1755,8 +1757,6 @@ static int complete_pio(struct kvm_vcpu *vcpu)
539 io->count -= io->cur_count;
540 io->cur_count = 0;
541
542 - if (!io->count)
543 - kvm_arch_ops->skip_emulated_instruction(vcpu);
544 return 0;
545 }
546
547 @@ -1802,6 +1802,7 @@ int kvm_setup_pio(struct kvm_vcpu *vcpu, struct kvm_run *run, int in,
548
549 pio_dev = vcpu_find_pio_dev(vcpu, port);
550 if (!string) {
551 + kvm_arch_ops->skip_emulated_instruction(vcpu);
552 kvm_arch_ops->cache_regs(vcpu);
553 memcpy(vcpu->pio_data, &vcpu->regs[VCPU_REGS_RAX], 4);
554 kvm_arch_ops->decache_regs(vcpu);
555 @@ -1848,6 +1849,9 @@ int kvm_setup_pio(struct kvm_vcpu *vcpu, struct kvm_run *run, int in,
556 vcpu->run->io.count = now;
557 vcpu->pio.cur_count = now;
558
559 + if (now == count)
560 + kvm_arch_ops->skip_emulated_instruction(vcpu);
561 +
562 for (i = 0; i < nr_pages; ++i) {
563 spin_lock(&vcpu->kvm->lock);
564 page = gva_to_page(vcpu, address + i * PAGE_SIZE);
565 diff --git a/drivers/kvm/mmu.c b/drivers/kvm/mmu.c
566 index 23965aa..56ab369 100644
567 --- a/drivers/kvm/mmu.c
568 +++ b/drivers/kvm/mmu.c
569 @@ -1066,6 +1066,7 @@ int kvm_mmu_reset_context(struct kvm_vcpu *vcpu)
570 destroy_kvm_mmu(vcpu);
571 return init_kvm_mmu(vcpu);
572 }
573 +EXPORT_SYMBOL_GPL(kvm_mmu_reset_context);
574
575 int kvm_mmu_load(struct kvm_vcpu *vcpu)
576 {
577 diff --git a/drivers/kvm/svm.c b/drivers/kvm/svm.c
578 index bc818cc..fae8cc5 100644
579 --- a/drivers/kvm/svm.c
580 +++ b/drivers/kvm/svm.c
581 @@ -506,6 +506,7 @@ static void init_vmcb(struct vmcb *vmcb)
582 */
583 /* (1ULL << INTERCEPT_SELECTIVE_CR0) | */
584 (1ULL << INTERCEPT_CPUID) |
585 + (1ULL << INTERCEPT_INVD) |
586 (1ULL << INTERCEPT_HLT) |
587 (1ULL << INTERCEPT_INVLPGA) |
588 (1ULL << INTERCEPT_IOIO_PROT) |
589 @@ -519,6 +520,7 @@ static void init_vmcb(struct vmcb *vmcb)
590 (1ULL << INTERCEPT_STGI) |
591 (1ULL << INTERCEPT_CLGI) |
592 (1ULL << INTERCEPT_SKINIT) |
593 + (1ULL << INTERCEPT_WBINVD) |
594 (1ULL << INTERCEPT_MONITOR) |
595 (1ULL << INTERCEPT_MWAIT);
596
597 @@ -1319,6 +1321,7 @@ static int (*svm_exit_handlers[])(struct kvm_vcpu *vcpu,
598 [SVM_EXIT_VINTR] = interrupt_window_interception,
599 /* [SVM_EXIT_CR0_SEL_WRITE] = emulate_on_interception, */
600 [SVM_EXIT_CPUID] = cpuid_interception,
601 + [SVM_EXIT_INVD] = emulate_on_interception,
602 [SVM_EXIT_HLT] = halt_interception,
603 [SVM_EXIT_INVLPG] = emulate_on_interception,
604 [SVM_EXIT_INVLPGA] = invalid_op_interception,
605 @@ -1333,6 +1336,7 @@ static int (*svm_exit_handlers[])(struct kvm_vcpu *vcpu,
606 [SVM_EXIT_STGI] = invalid_op_interception,
607 [SVM_EXIT_CLGI] = invalid_op_interception,
608 [SVM_EXIT_SKINIT] = invalid_op_interception,
609 + [SVM_EXIT_WBINVD] = emulate_on_interception,
610 [SVM_EXIT_MONITOR] = invalid_op_interception,
611 [SVM_EXIT_MWAIT] = invalid_op_interception,
612 };
613 diff --git a/drivers/kvm/vmx.c b/drivers/kvm/vmx.c
614 index 80628f6..916da29 100644
615 --- a/drivers/kvm/vmx.c
616 +++ b/drivers/kvm/vmx.c
617 @@ -463,6 +463,8 @@ static unsigned long vmx_get_rflags(struct kvm_vcpu *vcpu)
618
619 static void vmx_set_rflags(struct kvm_vcpu *vcpu, unsigned long rflags)
620 {
621 + if (vcpu->rmode.active)
622 + rflags |= IOPL_MASK | X86_EFLAGS_VM;
623 vmcs_writel(GUEST_RFLAGS, rflags);
624 }
625
626 @@ -955,6 +957,7 @@ static void enter_rmode(struct kvm_vcpu *vcpu)
627 fix_rmode_seg(VCPU_SREG_GS, &vcpu->rmode.gs);
628 fix_rmode_seg(VCPU_SREG_FS, &vcpu->rmode.fs);
629
630 + kvm_mmu_reset_context(vcpu);
631 init_rmode_tss(vcpu->kvm);
632 }
633
634 diff --git a/drivers/kvm/x86_emulate.c b/drivers/kvm/x86_emulate.c
635 index 4b8a0cc..9fce95b 100644
636 --- a/drivers/kvm/x86_emulate.c
637 +++ b/drivers/kvm/x86_emulate.c
638 @@ -156,7 +156,7 @@ static u8 opcode_table[256] = {
639 static u16 twobyte_table[256] = {
640 /* 0x00 - 0x0F */
641 0, SrcMem | ModRM | DstReg, 0, 0, 0, 0, ImplicitOps, 0,
642 - 0, ImplicitOps, 0, 0, 0, ImplicitOps | ModRM, 0, 0,
643 + ImplicitOps, ImplicitOps, 0, 0, 0, ImplicitOps | ModRM, 0, 0,
644 /* 0x10 - 0x1F */
645 0, 0, 0, 0, 0, 0, 0, 0, ImplicitOps | ModRM, 0, 0, 0, 0, 0, 0, 0,
646 /* 0x20 - 0x2F */
647 @@ -198,7 +198,8 @@ static u16 twobyte_table[256] = {
648 0, 0, ByteOp | DstReg | SrcMem | ModRM | Mov,
649 DstReg | SrcMem16 | ModRM | Mov,
650 /* 0xC0 - 0xCF */
651 - 0, 0, 0, 0, 0, 0, 0, ImplicitOps | ModRM, 0, 0, 0, 0, 0, 0, 0, 0,
652 + 0, 0, 0, DstMem | SrcReg | ModRM | Mov, 0, 0, 0, ImplicitOps | ModRM,
653 + 0, 0, 0, 0, 0, 0, 0, 0,
654 /* 0xD0 - 0xDF */
655 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
656 /* 0xE0 - 0xEF */
657 @@ -772,6 +773,14 @@ done_prefixes:
658 case SrcMem:
659 src.bytes = (d & ByteOp) ? 1 : op_bytes;
660 srcmem_common:
661 + /*
662 + * For instructions with a ModR/M byte, switch to register
663 + * access if Mod = 3.
664 + */
665 + if ((d & ModRM) && modrm_mod == 3) {
666 + src.type = OP_REG;
667 + break;
668 + }
669 src.type = OP_MEM;
670 src.ptr = (unsigned long *)cr2;
671 if ((rc = ops->read_emulated((unsigned long)src.ptr,
672 @@ -838,6 +847,15 @@ done_prefixes:
673 dst.type = OP_MEM;
674 dst.ptr = (unsigned long *)cr2;
675 dst.bytes = (d & ByteOp) ? 1 : op_bytes;
676 + dst.val = 0;
677 + /*
678 + * For instructions with a ModR/M byte, switch to register
679 + * access if Mod = 3.
680 + */
681 + if ((d & ModRM) && modrm_mod == 3) {
682 + dst.type = OP_REG;
683 + break;
684 + }
685 if (d & BitOp) {
686 unsigned long mask = ~(dst.bytes * 8 - 1);
687
688 @@ -1048,7 +1066,7 @@ done_prefixes:
689 }
690 register_address_increment(_regs[VCPU_REGS_RSP],
691 -dst.bytes);
692 - if ((rc = ops->write_std(
693 + if ((rc = ops->write_emulated(
694 register_address(ctxt->ss_base,
695 _regs[VCPU_REGS_RSP]),
696 &dst.val, dst.bytes, ctxt)) != 0)
697 @@ -1324,6 +1342,10 @@ twobyte_insn:
698 dst.bytes = op_bytes;
699 dst.val = (d & ByteOp) ? (s8) src.val : (s16) src.val;
700 break;
701 + case 0xc3: /* movnti */
702 + dst.bytes = op_bytes;
703 + dst.val = (op_bytes == 4) ? (u32) src.val : (u64) src.val;
704 + break;
705 }
706 goto writeback;
707
708 @@ -1331,6 +1353,8 @@ twobyte_special_insn:
709 /* Disable writeback. */
710 no_wb = 1;
711 switch (b) {
712 + case 0x08: /* invd */
713 + break;
714 case 0x09: /* wbinvd */
715 break;
716 case 0x0d: /* GrpP (prefetch) */
717 diff --git a/drivers/net/forcedeth.c b/drivers/net/forcedeth.c
718 index d68796e..fcbe508 100644
719 --- a/drivers/net/forcedeth.c
720 +++ b/drivers/net/forcedeth.c
721 @@ -5280,19 +5280,15 @@ static int __devinit nv_probe(struct pci_dev *pci_dev, const struct pci_device_i
722 if (readl(base + NvRegTransmitterControl) & NVREG_XMITCTL_SYNC_PHY_INIT) {
723 np->mac_in_use = readl(base + NvRegTransmitterControl) & NVREG_XMITCTL_MGMT_ST;
724 dprintk(KERN_INFO "%s: mgmt unit is running. mac in use %x.\n", pci_name(pci_dev), np->mac_in_use);
725 - for (i = 0; i < 5000; i++) {
726 - msleep(1);
727 - if (nv_mgmt_acquire_sema(dev)) {
728 - /* management unit setup the phy already? */
729 - if ((readl(base + NvRegTransmitterControl) & NVREG_XMITCTL_SYNC_MASK) ==
730 - NVREG_XMITCTL_SYNC_PHY_INIT) {
731 - /* phy is inited by mgmt unit */
732 - phyinitialized = 1;
733 - dprintk(KERN_INFO "%s: Phy already initialized by mgmt unit.\n", pci_name(pci_dev));
734 - } else {
735 - /* we need to init the phy */
736 - }
737 - break;
738 + if (nv_mgmt_acquire_sema(dev)) {
739 + /* management unit setup the phy already? */
740 + if ((readl(base + NvRegTransmitterControl) & NVREG_XMITCTL_SYNC_MASK) ==
741 + NVREG_XMITCTL_SYNC_PHY_INIT) {
742 + /* phy is inited by mgmt unit */
743 + phyinitialized = 1;
744 + dprintk(KERN_INFO "%s: Phy already initialized by mgmt unit.\n", pci_name(pci_dev));
745 + } else {
746 + /* we need to init the phy */
747 }
748 }
749 }
750 @@ -5582,6 +5578,22 @@ static struct pci_device_id pci_tbl[] = {
751 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_35),
752 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT,
753 },
754 + { /* MCP79 Ethernet Controller */
755 + PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_36),
756 + .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT,
757 + },
758 + { /* MCP79 Ethernet Controller */
759 + PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_37),
760 + .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT,
761 + },
762 + { /* MCP79 Ethernet Controller */
763 + PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_38),
764 + .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT,
765 + },
766 + { /* MCP79 Ethernet Controller */
767 + PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_39),
768 + .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT,
769 + },
770 {0,},
771 };
772
773 diff --git a/drivers/net/wireless/libertas/cmd.c b/drivers/net/wireless/libertas/cmd.c
774 index 86fff8d..847d34f 100644
775 --- a/drivers/net/wireless/libertas/cmd.c
776 +++ b/drivers/net/wireless/libertas/cmd.c
777 @@ -881,6 +881,10 @@ static int wlan_cmd_mesh_access(wlan_private * priv,
778 return 0;
779 }
780
781 +/*
782 + * Note: NEVER use libertas_queue_cmd() with addtail==0 other than for
783 + * the command timer, because it does not account for queued commands.
784 + */
785 void libertas_queue_cmd(wlan_adapter * adapter, struct cmd_ctrl_node *cmdnode, u8 addtail)
786 {
787 unsigned long flags;
788 @@ -910,10 +914,11 @@ void libertas_queue_cmd(wlan_adapter * adapter, struct cmd_ctrl_node *cmdnode, u
789
790 spin_lock_irqsave(&adapter->driver_lock, flags);
791
792 - if (addtail)
793 + if (addtail) {
794 list_add_tail((struct list_head *)cmdnode,
795 &adapter->cmdpendingq);
796 - else
797 + adapter->nr_cmd_pending++;
798 + } else
799 list_add((struct list_head *)cmdnode, &adapter->cmdpendingq);
800
801 spin_unlock_irqrestore(&adapter->driver_lock, flags);
802 @@ -1400,7 +1405,6 @@ int libertas_prepare_and_send_command(wlan_private * priv,
803 cmdnode->cmdwaitqwoken = 0;
804
805 libertas_queue_cmd(adapter, cmdnode, 1);
806 - adapter->nr_cmd_pending++;
807 wake_up_interruptible(&priv->mainthread.waitq);
808
809 if (wait_option & cmd_option_waitforrsp) {
810 diff --git a/drivers/pnp/pnpacpi/rsparser.c b/drivers/pnp/pnpacpi/rsparser.c
811 index 0e3b8d0..5abf5ea 100644
812 --- a/drivers/pnp/pnpacpi/rsparser.c
813 +++ b/drivers/pnp/pnpacpi/rsparser.c
814 @@ -82,9 +82,11 @@ static void pnpacpi_parse_allocated_irqresource(struct pnp_resource_table *res,
815 while (!(res->irq_resource[i].flags & IORESOURCE_UNSET) &&
816 i < PNP_MAX_IRQ)
817 i++;
818 - if (i >= PNP_MAX_IRQ)
819 + if (i >= PNP_MAX_IRQ) {
820 + printk(KERN_ERR "pnpacpi: exceeded the max number of IRQ "
821 + "resources: %d \n", PNP_MAX_IRQ);
822 return;
823 -
824 + }
825 res->irq_resource[i].flags = IORESOURCE_IRQ; // Also clears _UNSET flag
826 res->irq_resource[i].flags |= irq_flags(triggering, polarity);
827 irq = acpi_register_gsi(gsi, triggering, polarity);
828 @@ -163,6 +165,9 @@ static void pnpacpi_parse_allocated_dmaresource(struct pnp_resource_table *res,
829 }
830 res->dma_resource[i].start = dma;
831 res->dma_resource[i].end = dma;
832 + } else {
833 + printk(KERN_ERR "pnpacpi: exceeded the max number of DMA "
834 + "resources: %d \n", PNP_MAX_DMA);
835 }
836 }
837
838 @@ -184,6 +189,9 @@ static void pnpacpi_parse_allocated_ioresource(struct pnp_resource_table *res,
839 }
840 res->port_resource[i].start = io;
841 res->port_resource[i].end = io + len - 1;
842 + } else {
843 + printk(KERN_ERR "pnpacpi: exceeded the max number of IO "
844 + "resources: %d \n", PNP_MAX_PORT);
845 }
846 }
847
848 @@ -207,6 +215,9 @@ static void pnpacpi_parse_allocated_memresource(struct pnp_resource_table *res,
849
850 res->mem_resource[i].start = mem;
851 res->mem_resource[i].end = mem + len - 1;
852 + } else {
853 + printk(KERN_ERR "pnpacpi: exceeded the max number of mem "
854 + "resources: %d\n", PNP_MAX_MEM);
855 }
856 }
857
858 diff --git a/drivers/scsi/dpt_i2o.c b/drivers/scsi/dpt_i2o.c
859 index 502732a..6b2197b 100644
860 --- a/drivers/scsi/dpt_i2o.c
861 +++ b/drivers/scsi/dpt_i2o.c
862 @@ -173,20 +173,20 @@ static struct pci_device_id dptids[] = {
863 };
864 MODULE_DEVICE_TABLE(pci,dptids);
865
866 -static void adpt_exit(void);
867 -
868 -static int adpt_detect(void)
869 +static int adpt_detect(struct scsi_host_template* sht)
870 {
871 struct pci_dev *pDev = NULL;
872 adpt_hba* pHba;
873
874 + adpt_init();
875 +
876 PINFO("Detecting Adaptec I2O RAID controllers...\n");
877
878 /* search for all Adatpec I2O RAID cards */
879 while ((pDev = pci_get_device( PCI_DPT_VENDOR_ID, PCI_ANY_ID, pDev))) {
880 if(pDev->device == PCI_DPT_DEVICE_ID ||
881 pDev->device == PCI_DPT_RAPTOR_DEVICE_ID){
882 - if(adpt_install_hba(pDev) ){
883 + if(adpt_install_hba(sht, pDev) ){
884 PERROR("Could not Init an I2O RAID device\n");
885 PERROR("Will not try to detect others.\n");
886 return hba_count-1;
887 @@ -248,33 +248,34 @@ rebuild_sys_tab:
888 }
889
890 for (pHba = hba_chain; pHba; pHba = pHba->next) {
891 - if (adpt_scsi_register(pHba) < 0) {
892 + if( adpt_scsi_register(pHba,sht) < 0){
893 adpt_i2o_delete_hba(pHba);
894 continue;
895 }
896 pHba->initialized = TRUE;
897 pHba->state &= ~DPTI_STATE_RESET;
898 - scsi_scan_host(pHba->host);
899 }
900
901 // Register our control device node
902 // nodes will need to be created in /dev to access this
903 // the nodes can not be created from within the driver
904 if (hba_count && register_chrdev(DPTI_I2O_MAJOR, DPT_DRIVER, &adpt_fops)) {
905 - adpt_exit();
906 + adpt_i2o_sys_shutdown();
907 return 0;
908 }
909 return hba_count;
910 }
911
912
913 -static int adpt_release(adpt_hba *pHba)
914 +/*
915 + * scsi_unregister will be called AFTER we return.
916 + */
917 +static int adpt_release(struct Scsi_Host *host)
918 {
919 - struct Scsi_Host *shost = pHba->host;
920 - scsi_remove_host(shost);
921 + adpt_hba* pHba = (adpt_hba*) host->hostdata[0];
922 // adpt_i2o_quiesce_hba(pHba);
923 adpt_i2o_delete_hba(pHba);
924 - scsi_host_put(shost);
925 + scsi_unregister(host);
926 return 0;
927 }
928
929 @@ -881,7 +882,7 @@ static int adpt_reboot_event(struct notifier_block *n, ulong code, void *p)
930 #endif
931
932
933 -static int adpt_install_hba(struct pci_dev* pDev)
934 +static int adpt_install_hba(struct scsi_host_template* sht, struct pci_dev* pDev)
935 {
936
937 adpt_hba* pHba = NULL;
938 @@ -1030,6 +1031,8 @@ static void adpt_i2o_delete_hba(adpt_hba* pHba)
939
940
941 mutex_lock(&adpt_configuration_lock);
942 + // scsi_unregister calls our adpt_release which
943 + // does a quiese
944 if(pHba->host){
945 free_irq(pHba->host->irq, pHba);
946 }
947 @@ -1081,6 +1084,17 @@ static void adpt_i2o_delete_hba(adpt_hba* pHba)
948 }
949
950
951 +static int adpt_init(void)
952 +{
953 + printk("Loading Adaptec I2O RAID: Version " DPT_I2O_VERSION "\n");
954 +#ifdef REBOOT_NOTIFIER
955 + register_reboot_notifier(&adpt_reboot_notifier);
956 +#endif
957 +
958 + return 0;
959 +}
960 +
961 +
962 static struct adpt_device* adpt_find_device(adpt_hba* pHba, u32 chan, u32 id, u32 lun)
963 {
964 struct adpt_device* d;
965 @@ -2166,6 +2180,37 @@ static s32 adpt_scsi_to_i2o(adpt_hba* pHba, struct scsi_cmnd* cmd, struct adpt_d
966 }
967
968
969 +static s32 adpt_scsi_register(adpt_hba* pHba,struct scsi_host_template * sht)
970 +{
971 + struct Scsi_Host *host = NULL;
972 +
973 + host = scsi_register(sht, sizeof(adpt_hba*));
974 + if (host == NULL) {
975 + printk ("%s: scsi_register returned NULL\n",pHba->name);
976 + return -1;
977 + }
978 + host->hostdata[0] = (unsigned long)pHba;
979 + pHba->host = host;
980 +
981 + host->irq = pHba->pDev->irq;
982 + /* no IO ports, so don't have to set host->io_port and
983 + * host->n_io_port
984 + */
985 + host->io_port = 0;
986 + host->n_io_port = 0;
987 + /* see comments in scsi_host.h */
988 + host->max_id = 16;
989 + host->max_lun = 256;
990 + host->max_channel = pHba->top_scsi_channel + 1;
991 + host->cmd_per_lun = 1;
992 + host->unique_id = (uint) pHba;
993 + host->sg_tablesize = pHba->sg_tablesize;
994 + host->can_queue = pHba->post_fifo_size;
995 +
996 + return 0;
997 +}
998 +
999 +
1000 static s32 adpt_i2o_to_scsi(void __iomem *reply, struct scsi_cmnd* cmd)
1001 {
1002 adpt_hba* pHba;
1003 @@ -3284,10 +3329,12 @@ static static void adpt_delay(int millisec)
1004
1005 #endif
1006
1007 -static struct scsi_host_template adpt_template = {
1008 +static struct scsi_host_template driver_template = {
1009 .name = "dpt_i2o",
1010 .proc_name = "dpt_i2o",
1011 .proc_info = adpt_proc_info,
1012 + .detect = adpt_detect,
1013 + .release = adpt_release,
1014 .info = adpt_info,
1015 .queuecommand = adpt_queue,
1016 .eh_abort_handler = adpt_abort,
1017 @@ -3301,62 +3348,5 @@ static struct scsi_host_template adpt_template = {
1018 .cmd_per_lun = 1,
1019 .use_clustering = ENABLE_CLUSTERING,
1020 };
1021 -
1022 -static s32 adpt_scsi_register(adpt_hba* pHba)
1023 -{
1024 - struct Scsi_Host *host;
1025 -
1026 - host = scsi_host_alloc(&adpt_template, sizeof(adpt_hba*));
1027 - if (host == NULL) {
1028 - printk ("%s: scsi_host_alloc returned NULL\n",pHba->name);
1029 - return -1;
1030 - }
1031 - host->hostdata[0] = (unsigned long)pHba;
1032 - pHba->host = host;
1033 -
1034 - host->irq = pHba->pDev->irq;
1035 - /* no IO ports, so don't have to set host->io_port and
1036 - * host->n_io_port
1037 - */
1038 - host->io_port = 0;
1039 - host->n_io_port = 0;
1040 - /* see comments in scsi_host.h */
1041 - host->max_id = 16;
1042 - host->max_lun = 256;
1043 - host->max_channel = pHba->top_scsi_channel + 1;
1044 - host->cmd_per_lun = 1;
1045 - host->unique_id = (uint) pHba;
1046 - host->sg_tablesize = pHba->sg_tablesize;
1047 - host->can_queue = pHba->post_fifo_size;
1048 -
1049 - if (scsi_add_host(host, &pHba->pDev->dev)) {
1050 - scsi_host_put(host);
1051 - return -1;
1052 - }
1053 -
1054 - return 0;
1055 -}
1056 -
1057 -static int __init adpt_init(void)
1058 -{
1059 - int count;
1060 -
1061 - printk("Loading Adaptec I2O RAID: Version " DPT_I2O_VERSION "\n");
1062 -#ifdef REBOOT_NOTIFIER
1063 - register_reboot_notifier(&adpt_reboot_notifier);
1064 -#endif
1065 -
1066 - count = adpt_detect();
1067 -
1068 - return count > 0 ? 0 : -ENODEV;
1069 -}
1070 -
1071 -static void __exit adpt_exit(void)
1072 -{
1073 - while (hba_chain)
1074 - adpt_release(hba_chain);
1075 -}
1076 -
1077 -module_init(adpt_init);
1078 -module_exit(adpt_exit);
1079 +#include "scsi_module.c"
1080 MODULE_LICENSE("GPL");
1081 diff --git a/drivers/scsi/dpti.h b/drivers/scsi/dpti.h
1082 index 0892f6c..fd79068 100644
1083 --- a/drivers/scsi/dpti.h
1084 +++ b/drivers/scsi/dpti.h
1085 @@ -28,9 +28,11 @@
1086 * SCSI interface function Prototypes
1087 */
1088
1089 +static int adpt_detect(struct scsi_host_template * sht);
1090 static int adpt_queue(struct scsi_cmnd * cmd, void (*cmdcomplete) (struct scsi_cmnd *));
1091 static int adpt_abort(struct scsi_cmnd * cmd);
1092 static int adpt_reset(struct scsi_cmnd* cmd);
1093 +static int adpt_release(struct Scsi_Host *host);
1094 static int adpt_slave_configure(struct scsi_device *);
1095
1096 static const char *adpt_info(struct Scsi_Host *pSHost);
1097 @@ -47,6 +49,8 @@ static int adpt_device_reset(struct scsi_cmnd* cmd);
1098
1099 #define DPT_DRIVER_NAME "Adaptec I2O RAID"
1100
1101 +#ifndef HOSTS_C
1102 +
1103 #include "dpt/sys_info.h"
1104 #include <linux/wait.h>
1105 #include "dpt/dpti_i2o.h"
1106 @@ -285,7 +289,7 @@ static s32 adpt_i2o_init_outbound_q(adpt_hba* pHba);
1107 static s32 adpt_i2o_hrt_get(adpt_hba* pHba);
1108 static s32 adpt_scsi_to_i2o(adpt_hba* pHba, struct scsi_cmnd* cmd, struct adpt_device* dptdevice);
1109 static s32 adpt_i2o_to_scsi(void __iomem *reply, struct scsi_cmnd* cmd);
1110 -static s32 adpt_scsi_register(adpt_hba* pHba);
1111 +static s32 adpt_scsi_register(adpt_hba* pHba,struct scsi_host_template * sht);
1112 static s32 adpt_hba_reset(adpt_hba* pHba);
1113 static s32 adpt_i2o_reset_hba(adpt_hba* pHba);
1114 static s32 adpt_rescan(adpt_hba* pHba);
1115 @@ -295,7 +299,7 @@ static void adpt_i2o_delete_hba(adpt_hba* pHba);
1116 static void adpt_inquiry(adpt_hba* pHba);
1117 static void adpt_fail_posted_scbs(adpt_hba* pHba);
1118 static struct adpt_device* adpt_find_device(adpt_hba* pHba, u32 chan, u32 id, u32 lun);
1119 -static int adpt_install_hba(struct pci_dev* pDev) ;
1120 +static int adpt_install_hba(struct scsi_host_template* sht, struct pci_dev* pDev) ;
1121 static int adpt_i2o_online_hba(adpt_hba* pHba);
1122 static void adpt_i2o_post_wait_complete(u32, int);
1123 static int adpt_i2o_systab_send(adpt_hba* pHba);
1124 @@ -339,4 +343,5 @@ static void adpt_i386_info(sysInfo_S* si);
1125 #define FW_DEBUG_BLED_OFFSET 8
1126
1127 #define FW_DEBUG_FLAGS_NO_HEADERS_B 0x01
1128 +#endif /* !HOSTS_C */
1129 #endif /* _DPT_H */
1130 diff --git a/drivers/scsi/esp_scsi.c b/drivers/scsi/esp_scsi.c
1131 index 95cf7b6..f2c91bc 100644
1132 --- a/drivers/scsi/esp_scsi.c
1133 +++ b/drivers/scsi/esp_scsi.c
1134 @@ -2026,8 +2026,8 @@ static void esp_reset_cleanup(struct esp *esp)
1135 tp->flags |= ESP_TGT_CHECK_NEGO;
1136
1137 if (tp->starget)
1138 - starget_for_each_device(tp->starget, NULL,
1139 - esp_clear_hold);
1140 + __starget_for_each_device(tp->starget, NULL,
1141 + esp_clear_hold);
1142 }
1143 esp->flags &= ~ESP_FLAG_RESETTING;
1144 }
1145 diff --git a/drivers/scsi/scsi.c b/drivers/scsi/scsi.c
1146 index a5de1a8..537c4e4 100644
1147 --- a/drivers/scsi/scsi.c
1148 +++ b/drivers/scsi/scsi.c
1149 @@ -886,11 +886,11 @@ EXPORT_SYMBOL(__scsi_iterate_devices);
1150 * starget_for_each_device - helper to walk all devices of a target
1151 * @starget: target whose devices we want to iterate over.
1152 *
1153 - * This traverses over each devices of @shost. The devices have
1154 + * This traverses over each device of @starget. The devices have
1155 * a reference that must be released by scsi_host_put when breaking
1156 * out of the loop.
1157 */
1158 -void starget_for_each_device(struct scsi_target *starget, void * data,
1159 +void starget_for_each_device(struct scsi_target *starget, void *data,
1160 void (*fn)(struct scsi_device *, void *))
1161 {
1162 struct Scsi_Host *shost = dev_to_shost(starget->dev.parent);
1163 @@ -905,6 +905,33 @@ void starget_for_each_device(struct scsi_target *starget, void * data,
1164 EXPORT_SYMBOL(starget_for_each_device);
1165
1166 /**
1167 + * __starget_for_each_device - helper to walk all devices of a target
1168 + * (UNLOCKED)
1169 + * @starget: target whose devices we want to iterate over.
1170 + *
1171 + * This traverses over each device of @starget. It does _not_
1172 + * take a reference on the scsi_device, so the whole loop must be
1173 + * protected by shost->host_lock.
1174 + *
1175 + * Note: The only reason why drivers would want to use this is because
1176 + * they need to access the device list in irq context. Otherwise you
1177 + * really want to use starget_for_each_device instead.
1178 + **/
1179 +void __starget_for_each_device(struct scsi_target *starget, void *data,
1180 + void (*fn)(struct scsi_device *, void *))
1181 +{
1182 + struct Scsi_Host *shost = dev_to_shost(starget->dev.parent);
1183 + struct scsi_device *sdev;
1184 +
1185 + __shost_for_each_device(sdev, shost) {
1186 + if ((sdev->channel == starget->channel) &&
1187 + (sdev->id == starget->id))
1188 + fn(sdev, data);
1189 + }
1190 +}
1191 +EXPORT_SYMBOL(__starget_for_each_device);
1192 +
1193 +/**
1194 * __scsi_device_lookup_by_target - find a device given the target (UNLOCKED)
1195 * @starget: SCSI target pointer
1196 * @lun: SCSI Logical Unit Number
1197 diff --git a/drivers/usb/host/ehci-hcd.c b/drivers/usb/host/ehci-hcd.c
1198 index 31310ca..ed49cec 100644
1199 --- a/drivers/usb/host/ehci-hcd.c
1200 +++ b/drivers/usb/host/ehci-hcd.c
1201 @@ -575,12 +575,15 @@ static int ehci_run (struct usb_hcd *hcd)
1202 * from the companions to the EHCI controller. If any of the
1203 * companions are in the middle of a port reset at the time, it
1204 * could cause trouble. Write-locking ehci_cf_port_reset_rwsem
1205 - * guarantees that no resets are in progress.
1206 + * guarantees that no resets are in progress. After we set CF,
1207 + * a short delay lets the hardware catch up; new resets shouldn't
1208 + * be started before the port switching actions could complete.
1209 */
1210 down_write(&ehci_cf_port_reset_rwsem);
1211 hcd->state = HC_STATE_RUNNING;
1212 ehci_writel(ehci, FLAG_CF, &ehci->regs->configured_flag);
1213 ehci_readl(ehci, &ehci->regs->command); /* unblock posted writes */
1214 + msleep(5);
1215 up_write(&ehci_cf_port_reset_rwsem);
1216
1217 temp = HC_VERSION(ehci_readl(ehci, &ehci->caps->hc_capbase));
1218 diff --git a/drivers/usb/image/microtek.c b/drivers/usb/image/microtek.c
1219 index 768b2c1..af0461e 100644
1220 --- a/drivers/usb/image/microtek.c
1221 +++ b/drivers/usb/image/microtek.c
1222 @@ -824,7 +824,7 @@ static int mts_usb_probe(struct usb_interface *intf,
1223 goto out_kfree2;
1224
1225 new_desc->host->hostdata[0] = (unsigned long)new_desc;
1226 - if (scsi_add_host(new_desc->host, NULL)) {
1227 + if (scsi_add_host(new_desc->host, &dev->dev)) {
1228 err_retval = -EIO;
1229 goto out_host_put;
1230 }
1231 diff --git a/drivers/video/fb_ddc.c b/drivers/video/fb_ddc.c
1232 index f836137..a0df632 100644
1233 --- a/drivers/video/fb_ddc.c
1234 +++ b/drivers/video/fb_ddc.c
1235 @@ -56,13 +56,12 @@ unsigned char *fb_ddc_read(struct i2c_adapter *adapter)
1236 int i, j;
1237
1238 algo_data->setscl(algo_data->data, 1);
1239 - algo_data->setscl(algo_data->data, 0);
1240
1241 for (i = 0; i < 3; i++) {
1242 /* For some old monitors we need the
1243 * following process to initialize/stop DDC
1244 */
1245 - algo_data->setsda(algo_data->data, 0);
1246 + algo_data->setsda(algo_data->data, 1);
1247 msleep(13);
1248
1249 algo_data->setscl(algo_data->data, 1);
1250 @@ -97,14 +96,15 @@ unsigned char *fb_ddc_read(struct i2c_adapter *adapter)
1251 algo_data->setsda(algo_data->data, 1);
1252 msleep(15);
1253 algo_data->setscl(algo_data->data, 0);
1254 + algo_data->setsda(algo_data->data, 0);
1255 if (edid)
1256 break;
1257 }
1258 /* Release the DDC lines when done or the Apple Cinema HD display
1259 * will switch off
1260 */
1261 - algo_data->setsda(algo_data->data, 0);
1262 - algo_data->setscl(algo_data->data, 0);
1263 + algo_data->setsda(algo_data->data, 1);
1264 + algo_data->setscl(algo_data->data, 1);
1265
1266 return edid;
1267 }
1268 diff --git a/fs/xfs/linux-2.6/xfs_buf.c b/fs/xfs/linux-2.6/xfs_buf.c
1269 index be9e65b..386fbff 100644
1270 --- a/fs/xfs/linux-2.6/xfs_buf.c
1271 +++ b/fs/xfs/linux-2.6/xfs_buf.c
1272 @@ -1744,6 +1744,8 @@ xfsbufd(
1273
1274 current->flags |= PF_MEMALLOC;
1275
1276 + set_freezable();
1277 +
1278 do {
1279 if (unlikely(freezing(current))) {
1280 set_bit(XBT_FORCE_SLEEP, &target->bt_flags);
1281 diff --git a/include/linux/pci_ids.h b/include/linux/pci_ids.h
1282 index 85d434b..97de8aa 100644
1283 --- a/include/linux/pci_ids.h
1284 +++ b/include/linux/pci_ids.h
1285 @@ -1237,6 +1237,10 @@
1286 #define PCI_DEVICE_ID_NVIDIA_NVENET_33 0x0761
1287 #define PCI_DEVICE_ID_NVIDIA_NVENET_34 0x0762
1288 #define PCI_DEVICE_ID_NVIDIA_NVENET_35 0x0763
1289 +#define PCI_DEVICE_ID_NVIDIA_NVENET_36 0x0AB0
1290 +#define PCI_DEVICE_ID_NVIDIA_NVENET_37 0x0AB1
1291 +#define PCI_DEVICE_ID_NVIDIA_NVENET_38 0x0AB2
1292 +#define PCI_DEVICE_ID_NVIDIA_NVENET_39 0x0AB3
1293
1294 #define PCI_VENDOR_ID_IMS 0x10e0
1295 #define PCI_DEVICE_ID_IMS_TT128 0x9128
1296 diff --git a/include/linux/pnp.h b/include/linux/pnp.h
1297 index 16b46aa..62decab 100644
1298 --- a/include/linux/pnp.h
1299 +++ b/include/linux/pnp.h
1300 @@ -13,8 +13,8 @@
1301 #include <linux/errno.h>
1302 #include <linux/mod_devicetable.h>
1303
1304 -#define PNP_MAX_PORT 8
1305 -#define PNP_MAX_MEM 4
1306 +#define PNP_MAX_PORT 24
1307 +#define PNP_MAX_MEM 12
1308 #define PNP_MAX_IRQ 2
1309 #define PNP_MAX_DMA 2
1310 #define PNP_NAME_LEN 50
1311 diff --git a/include/linux/thread_info.h b/include/linux/thread_info.h
1312 index 1c4eb41..9c4ad75 100644
1313 --- a/include/linux/thread_info.h
1314 +++ b/include/linux/thread_info.h
1315 @@ -7,12 +7,25 @@
1316 #ifndef _LINUX_THREAD_INFO_H
1317 #define _LINUX_THREAD_INFO_H
1318
1319 +#include <linux/types.h>
1320 +
1321 /*
1322 - * System call restart block.
1323 + * System call restart block.
1324 */
1325 struct restart_block {
1326 long (*fn)(struct restart_block *);
1327 - unsigned long arg0, arg1, arg2, arg3;
1328 + union {
1329 + struct {
1330 + unsigned long arg0, arg1, arg2, arg3;
1331 + };
1332 + /* For futex_wait */
1333 + struct {
1334 + u32 *uaddr;
1335 + u32 val;
1336 + u32 flags;
1337 + u64 time;
1338 + } futex;
1339 + };
1340 };
1341
1342 extern long do_no_restart_syscall(struct restart_block *parm);
1343 diff --git a/include/net/sock.h b/include/net/sock.h
1344 index bdd9ebe..8a71ab3 100644
1345 --- a/include/net/sock.h
1346 +++ b/include/net/sock.h
1347 @@ -1200,6 +1200,9 @@ static inline struct sk_buff *sk_stream_alloc_pskb(struct sock *sk,
1348 {
1349 struct sk_buff *skb;
1350
1351 + /* The TCP header must be at least 32-bit aligned. */
1352 + size = ALIGN(size, 4);
1353 +
1354 skb = alloc_skb_fclone(size + sk->sk_prot->max_header, gfp);
1355 if (skb) {
1356 skb->truesize += mem;
1357 diff --git a/include/net/tcp.h b/include/net/tcp.h
1358 index 54053de..704d51e 100644
1359 --- a/include/net/tcp.h
1360 +++ b/include/net/tcp.h
1361 @@ -1256,6 +1256,9 @@ static inline void tcp_insert_write_queue_before(struct sk_buff *new,
1362 struct sock *sk)
1363 {
1364 __skb_insert(new, skb->prev, skb, &sk->sk_write_queue);
1365 +
1366 + if (sk->sk_send_head == skb)
1367 + sk->sk_send_head = new;
1368 }
1369
1370 static inline void tcp_unlink_write_queue(struct sk_buff *skb, struct sock *sk)
1371 diff --git a/include/scsi/scsi_device.h b/include/scsi/scsi_device.h
1372 index d5057bc..c1d659d 100644
1373 --- a/include/scsi/scsi_device.h
1374 +++ b/include/scsi/scsi_device.h
1375 @@ -222,6 +222,9 @@ extern struct scsi_device *__scsi_device_lookup_by_target(struct scsi_target *,
1376 uint);
1377 extern void starget_for_each_device(struct scsi_target *, void *,
1378 void (*fn)(struct scsi_device *, void *));
1379 +extern void __starget_for_each_device(struct scsi_target *, void *,
1380 + void (*fn)(struct scsi_device *,
1381 + void *));
1382
1383 /* only exposed to implement shost_for_each_device */
1384 extern struct scsi_device *__scsi_iterate_devices(struct Scsi_Host *,
1385 diff --git a/kernel/exit.c b/kernel/exit.c
1386 index 096c27d..b5fee81 100644
1387 --- a/kernel/exit.c
1388 +++ b/kernel/exit.c
1389 @@ -1365,7 +1365,7 @@ static int wait_task_stopped(struct task_struct *p, int delayed_group_leader,
1390 if (unlikely(!exit_code) || unlikely(p->exit_state))
1391 goto bail_ref;
1392 return wait_noreap_copyout(p, pid, uid,
1393 - why, (exit_code << 8) | 0x7f,
1394 + why, exit_code,
1395 infop, ru);
1396 }
1397
1398 diff --git a/kernel/futex.c b/kernel/futex.c
1399 index fcc94e7..b658a9a 100644
1400 --- a/kernel/futex.c
1401 +++ b/kernel/futex.c
1402 @@ -1149,9 +1149,9 @@ static int fixup_pi_state_owner(u32 __user *uaddr, struct futex_q *q,
1403
1404 /*
1405 * In case we must use restart_block to restart a futex_wait,
1406 - * we encode in the 'arg3' shared capability
1407 + * we encode in the 'flags' shared capability
1408 */
1409 -#define ARG3_SHARED 1
1410 +#define FLAGS_SHARED 1
1411
1412 static long futex_wait_restart(struct restart_block *restart);
1413
1414 @@ -1290,12 +1290,13 @@ static int futex_wait(u32 __user *uaddr, struct rw_semaphore *fshared,
1415 struct restart_block *restart;
1416 restart = &current_thread_info()->restart_block;
1417 restart->fn = futex_wait_restart;
1418 - restart->arg0 = (unsigned long)uaddr;
1419 - restart->arg1 = (unsigned long)val;
1420 - restart->arg2 = (unsigned long)abs_time;
1421 - restart->arg3 = 0;
1422 + restart->futex.uaddr = (u32 *)uaddr;
1423 + restart->futex.val = val;
1424 + restart->futex.time = abs_time->tv64;
1425 + restart->futex.flags = 0;
1426 +
1427 if (fshared)
1428 - restart->arg3 |= ARG3_SHARED;
1429 + restart->futex.flags |= FLAGS_SHARED;
1430 return -ERESTART_RESTARTBLOCK;
1431 }
1432
1433 @@ -1310,15 +1311,15 @@ static int futex_wait(u32 __user *uaddr, struct rw_semaphore *fshared,
1434
1435 static long futex_wait_restart(struct restart_block *restart)
1436 {
1437 - u32 __user *uaddr = (u32 __user *)restart->arg0;
1438 - u32 val = (u32)restart->arg1;
1439 - ktime_t *abs_time = (ktime_t *)restart->arg2;
1440 + u32 __user *uaddr = (u32 __user *)restart->futex.uaddr;
1441 struct rw_semaphore *fshared = NULL;
1442 + ktime_t t;
1443
1444 + t.tv64 = restart->futex.time;
1445 restart->fn = do_no_restart_syscall;
1446 - if (restart->arg3 & ARG3_SHARED)
1447 + if (restart->futex.flags & FLAGS_SHARED)
1448 fshared = &current->mm->mmap_sem;
1449 - return (long)futex_wait(uaddr, fshared, val, abs_time);
1450 + return (long)futex_wait(uaddr, fshared, restart->futex.val, &t);
1451 }
1452
1453
1454 diff --git a/kernel/hrtimer.c b/kernel/hrtimer.c
1455 index c21ca6b..ee8d0ac 100644
1456 --- a/kernel/hrtimer.c
1457 +++ b/kernel/hrtimer.c
1458 @@ -826,6 +826,14 @@ hrtimer_start(struct hrtimer *timer, ktime_t tim, const enum hrtimer_mode mode)
1459 #ifdef CONFIG_TIME_LOW_RES
1460 tim = ktime_add(tim, base->resolution);
1461 #endif
1462 + /*
1463 + * Careful here: User space might have asked for a
1464 + * very long sleep, so the add above might result in a
1465 + * negative number, which enqueues the timer in front
1466 + * of the queue.
1467 + */
1468 + if (tim.tv64 < 0)
1469 + tim.tv64 = KTIME_MAX;
1470 }
1471 timer->expires = tim;
1472
1473 diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c
1474 index 7230d91..fdccfd5 100644
1475 --- a/kernel/irq/manage.c
1476 +++ b/kernel/irq/manage.c
1477 @@ -29,12 +29,28 @@
1478 void synchronize_irq(unsigned int irq)
1479 {
1480 struct irq_desc *desc = irq_desc + irq;
1481 + unsigned int status;
1482
1483 if (irq >= NR_IRQS)
1484 return;
1485
1486 - while (desc->status & IRQ_INPROGRESS)
1487 - cpu_relax();
1488 + do {
1489 + unsigned long flags;
1490 +
1491 + /*
1492 + * Wait until we're out of the critical section. This might
1493 + * give the wrong answer due to the lack of memory barriers.
1494 + */
1495 + while (desc->status & IRQ_INPROGRESS)
1496 + cpu_relax();
1497 +
1498 + /* Ok, that indicated we're done: double-check carefully. */
1499 + spin_lock_irqsave(&desc->lock, flags);
1500 + status = desc->status;
1501 + spin_unlock_irqrestore(&desc->lock, flags);
1502 +
1503 + /* Oops, that failed? */
1504 + } while (status & IRQ_INPROGRESS);
1505 }
1506 EXPORT_SYMBOL(synchronize_irq);
1507
1508 diff --git a/kernel/sched.c b/kernel/sched.c
1509 index 6107a0c..7582d01 100644
1510 --- a/kernel/sched.c
1511 +++ b/kernel/sched.c
1512 @@ -5306,7 +5306,7 @@ set_table_entry(struct ctl_table *entry,
1513 static struct ctl_table *
1514 sd_alloc_ctl_domain_table(struct sched_domain *sd)
1515 {
1516 - struct ctl_table *table = sd_alloc_ctl_entry(14);
1517 + struct ctl_table *table = sd_alloc_ctl_entry(12);
1518
1519 set_table_entry(&table[0], "min_interval", &sd->min_interval,
1520 sizeof(long), 0644, proc_doulongvec_minmax);
1521 @@ -5326,10 +5326,10 @@ sd_alloc_ctl_domain_table(struct sched_domain *sd)
1522 sizeof(int), 0644, proc_dointvec_minmax);
1523 set_table_entry(&table[8], "imbalance_pct", &sd->imbalance_pct,
1524 sizeof(int), 0644, proc_dointvec_minmax);
1525 - set_table_entry(&table[10], "cache_nice_tries",
1526 + set_table_entry(&table[9], "cache_nice_tries",
1527 &sd->cache_nice_tries,
1528 sizeof(int), 0644, proc_dointvec_minmax);
1529 - set_table_entry(&table[12], "flags", &sd->flags,
1530 + set_table_entry(&table[10], "flags", &sd->flags,
1531 sizeof(int), 0644, proc_dointvec_minmax);
1532
1533 return table;
1534 diff --git a/lib/textsearch.c b/lib/textsearch.c
1535 index 88c98a2..be8bda3 100644
1536 --- a/lib/textsearch.c
1537 +++ b/lib/textsearch.c
1538 @@ -7,7 +7,7 @@
1539 * 2 of the License, or (at your option) any later version.
1540 *
1541 * Authors: Thomas Graf <tgraf@suug.ch>
1542 - * Pablo Neira Ayuso <pablo@eurodev.net>
1543 + * Pablo Neira Ayuso <pablo@netfilter.org>
1544 *
1545 * ==========================================================================
1546 *
1547 @@ -250,7 +250,8 @@ unsigned int textsearch_find_continuous(struct ts_config *conf,
1548 * the various search algorithms.
1549 *
1550 * Returns a new textsearch configuration according to the specified
1551 - * parameters or a ERR_PTR().
1552 + * parameters or a ERR_PTR(). If a zero length pattern is passed, this
1553 + * function returns EINVAL.
1554 */
1555 struct ts_config *textsearch_prepare(const char *algo, const void *pattern,
1556 unsigned int len, gfp_t gfp_mask, int flags)
1557 @@ -259,6 +260,9 @@ struct ts_config *textsearch_prepare(const char *algo, const void *pattern,
1558 struct ts_config *conf;
1559 struct ts_ops *ops;
1560
1561 + if (len == 0)
1562 + return ERR_PTR(-EINVAL);
1563 +
1564 ops = lookup_ts_algo(algo);
1565 #ifdef CONFIG_KMOD
1566 /*
1567 diff --git a/mm/shmem.c b/mm/shmem.c
1568 index 95558e4..4622ffa 100644
1569 --- a/mm/shmem.c
1570 +++ b/mm/shmem.c
1571 @@ -1071,7 +1071,7 @@ shmem_alloc_page(gfp_t gfp, struct shmem_inode_info *info,
1572 pvma.vm_policy = mpol_shared_policy_lookup(&info->policy, idx);
1573 pvma.vm_pgoff = idx;
1574 pvma.vm_end = PAGE_SIZE;
1575 - page = alloc_page_vma(gfp | __GFP_ZERO, &pvma, 0);
1576 + page = alloc_page_vma(gfp, &pvma, 0);
1577 mpol_free(pvma.vm_policy);
1578 return page;
1579 }
1580 @@ -1091,7 +1091,7 @@ shmem_swapin(struct shmem_inode_info *info,swp_entry_t entry,unsigned long idx)
1581 static inline struct page *
1582 shmem_alloc_page(gfp_t gfp,struct shmem_inode_info *info, unsigned long idx)
1583 {
1584 - return alloc_page(gfp | __GFP_ZERO);
1585 + return alloc_page(gfp);
1586 }
1587 #endif
1588
1589 @@ -1304,6 +1304,7 @@ repeat:
1590
1591 info->alloced++;
1592 spin_unlock(&info->lock);
1593 + clear_highpage(filepage);
1594 flush_dcache_page(filepage);
1595 SetPageUptodate(filepage);
1596 }
1597 diff --git a/net/8021q/vlan_dev.c b/net/8021q/vlan_dev.c
1598 index 328759c..6f5e738 100644
1599 --- a/net/8021q/vlan_dev.c
1600 +++ b/net/8021q/vlan_dev.c
1601 @@ -459,7 +459,8 @@ int vlan_dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
1602 * OTHER THINGS LIKE FDDI/TokenRing/802.3 SNAPs...
1603 */
1604
1605 - if (veth->h_vlan_proto != htons(ETH_P_8021Q)) {
1606 + if (veth->h_vlan_proto != htons(ETH_P_8021Q) ||
1607 + VLAN_DEV_INFO(dev)->flags & VLAN_FLAG_REORDER_HDR) {
1608 int orig_headroom = skb_headroom(skb);
1609 unsigned short veth_TCI;
1610
1611 diff --git a/net/bridge/br.c b/net/bridge/br.c
1612 index 848b8fa..94ae4d2 100644
1613 --- a/net/bridge/br.c
1614 +++ b/net/bridge/br.c
1615 @@ -39,7 +39,7 @@ static int __init br_init(void)
1616
1617 err = br_fdb_init();
1618 if (err)
1619 - goto err_out1;
1620 + goto err_out;
1621
1622 err = br_netfilter_init();
1623 if (err)
1624 @@ -65,6 +65,8 @@ err_out3:
1625 err_out2:
1626 br_netfilter_fini();
1627 err_out1:
1628 + br_fdb_fini();
1629 +err_out:
1630 llc_sap_put(br_stp_sap);
1631 return err;
1632 }
1633 diff --git a/net/bridge/br_input.c b/net/bridge/br_input.c
1634 index 3a8a015..5706cd1 100644
1635 --- a/net/bridge/br_input.c
1636 +++ b/net/bridge/br_input.c
1637 @@ -122,6 +122,7 @@ static inline int is_link_local(const unsigned char *dest)
1638 struct sk_buff *br_handle_frame(struct net_bridge_port *p, struct sk_buff *skb)
1639 {
1640 const unsigned char *dest = eth_hdr(skb)->h_dest;
1641 + int (*rhook)(struct sk_buff **pskb);
1642
1643 if (!is_valid_ether_addr(eth_hdr(skb)->h_source))
1644 goto drop;
1645 @@ -143,9 +144,9 @@ struct sk_buff *br_handle_frame(struct net_bridge_port *p, struct sk_buff *skb)
1646
1647 switch (p->state) {
1648 case BR_STATE_FORWARDING:
1649 -
1650 - if (br_should_route_hook) {
1651 - if (br_should_route_hook(&skb))
1652 + rhook = rcu_dereference(br_should_route_hook);
1653 + if (rhook != NULL) {
1654 + if (rhook(&skb))
1655 return skb;
1656 dest = eth_hdr(skb)->h_dest;
1657 }
1658 diff --git a/net/bridge/netfilter/ebtable_broute.c b/net/bridge/netfilter/ebtable_broute.c
1659 index d37ce04..bc17cf5 100644
1660 --- a/net/bridge/netfilter/ebtable_broute.c
1661 +++ b/net/bridge/netfilter/ebtable_broute.c
1662 @@ -70,13 +70,13 @@ static int __init ebtable_broute_init(void)
1663 if (ret < 0)
1664 return ret;
1665 /* see br_input.c */
1666 - br_should_route_hook = ebt_broute;
1667 + rcu_assign_pointer(br_should_route_hook, ebt_broute);
1668 return ret;
1669 }
1670
1671 static void __exit ebtable_broute_fini(void)
1672 {
1673 - br_should_route_hook = NULL;
1674 + rcu_assign_pointer(br_should_route_hook, NULL);
1675 synchronize_net();
1676 ebt_unregister_table(&broute_table);
1677 }
1678 diff --git a/net/decnet/dn_dev.c b/net/decnet/dn_dev.c
1679 index 8def682..fbafa97 100644
1680 --- a/net/decnet/dn_dev.c
1681 +++ b/net/decnet/dn_dev.c
1682 @@ -650,16 +650,18 @@ static int dn_nl_deladdr(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg)
1683 struct dn_dev *dn_db;
1684 struct ifaddrmsg *ifm;
1685 struct dn_ifaddr *ifa, **ifap;
1686 - int err = -EADDRNOTAVAIL;
1687 + int err;
1688
1689 err = nlmsg_parse(nlh, sizeof(*ifm), tb, IFA_MAX, dn_ifa_policy);
1690 if (err < 0)
1691 goto errout;
1692
1693 + err = -ENODEV;
1694 ifm = nlmsg_data(nlh);
1695 if ((dn_db = dn_dev_by_index(ifm->ifa_index)) == NULL)
1696 goto errout;
1697
1698 + err = -EADDRNOTAVAIL;
1699 for (ifap = &dn_db->ifa_list; (ifa = *ifap); ifap = &ifa->ifa_next) {
1700 if (tb[IFA_LOCAL] &&
1701 nla_memcmp(tb[IFA_LOCAL], &ifa->ifa_local, 2))
1702 diff --git a/net/ipv4/arp.c b/net/ipv4/arp.c
1703 index 9ab9d53..652da8e 100644
1704 --- a/net/ipv4/arp.c
1705 +++ b/net/ipv4/arp.c
1706 @@ -110,12 +110,8 @@
1707 #include <net/tcp.h>
1708 #include <net/sock.h>
1709 #include <net/arp.h>
1710 -#if defined(CONFIG_AX25) || defined(CONFIG_AX25_MODULE)
1711 #include <net/ax25.h>
1712 -#if defined(CONFIG_NETROM) || defined(CONFIG_NETROM_MODULE)
1713 #include <net/netrom.h>
1714 -#endif
1715 -#endif
1716 #if defined(CONFIG_ATM_CLIP) || defined(CONFIG_ATM_CLIP_MODULE)
1717 #include <net/atmclip.h>
1718 struct neigh_table *clip_tbl_hook;
1719 @@ -729,20 +725,10 @@ static int arp_process(struct sk_buff *skb)
1720 htons(dev_type) != arp->ar_hrd)
1721 goto out;
1722 break;
1723 -#ifdef CONFIG_NET_ETHERNET
1724 case ARPHRD_ETHER:
1725 -#endif
1726 -#ifdef CONFIG_TR
1727 case ARPHRD_IEEE802_TR:
1728 -#endif
1729 -#ifdef CONFIG_FDDI
1730 case ARPHRD_FDDI:
1731 -#endif
1732 -#ifdef CONFIG_NET_FC
1733 case ARPHRD_IEEE802:
1734 -#endif
1735 -#if defined(CONFIG_NET_ETHERNET) || defined(CONFIG_TR) || \
1736 - defined(CONFIG_FDDI) || defined(CONFIG_NET_FC)
1737 /*
1738 * ETHERNET, Token Ring and Fibre Channel (which are IEEE 802
1739 * devices, according to RFC 2625) devices will accept ARP
1740 @@ -757,21 +743,16 @@ static int arp_process(struct sk_buff *skb)
1741 arp->ar_pro != htons(ETH_P_IP))
1742 goto out;
1743 break;
1744 -#endif
1745 -#if defined(CONFIG_AX25) || defined(CONFIG_AX25_MODULE)
1746 case ARPHRD_AX25:
1747 if (arp->ar_pro != htons(AX25_P_IP) ||
1748 arp->ar_hrd != htons(ARPHRD_AX25))
1749 goto out;
1750 break;
1751 -#if defined(CONFIG_NETROM) || defined(CONFIG_NETROM_MODULE)
1752 case ARPHRD_NETROM:
1753 if (arp->ar_pro != htons(AX25_P_IP) ||
1754 arp->ar_hrd != htons(ARPHRD_NETROM))
1755 goto out;
1756 break;
1757 -#endif
1758 -#endif
1759 }
1760
1761 /* Understand only these message types */
1762 diff --git a/net/ipv4/netfilter/nf_nat_core.c b/net/ipv4/netfilter/nf_nat_core.c
1763 index deab27f..9731d2c 100644
1764 --- a/net/ipv4/netfilter/nf_nat_core.c
1765 +++ b/net/ipv4/netfilter/nf_nat_core.c
1766 @@ -607,13 +607,10 @@ static void nf_nat_move_storage(struct nf_conn *conntrack, void *old)
1767 struct nf_conn_nat *new_nat = nf_ct_ext_find(conntrack, NF_CT_EXT_NAT);
1768 struct nf_conn_nat *old_nat = (struct nf_conn_nat *)old;
1769 struct nf_conn *ct = old_nat->ct;
1770 - unsigned int srchash;
1771
1772 - if (!(ct->status & IPS_NAT_DONE_MASK))
1773 + if (!ct || !(ct->status & IPS_NAT_DONE_MASK))
1774 return;
1775
1776 - srchash = hash_by_src(&ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple);
1777 -
1778 write_lock_bh(&nf_nat_lock);
1779 hlist_replace_rcu(&old_nat->bysource, &new_nat->bysource);
1780 new_nat->ct = ct;
1781 @@ -681,7 +678,7 @@ static int clean_nat(struct nf_conn *i, void *data)
1782
1783 if (!nat)
1784 return 0;
1785 - memset(nat, 0, sizeof(nat));
1786 + memset(nat, 0, sizeof(*nat));
1787 i->status &= ~(IPS_NAT_MASK | IPS_NAT_DONE_MASK | IPS_SEQ_ADJUST);
1788 return 0;
1789 }
1790 diff --git a/net/ipv4/route.c b/net/ipv4/route.c
1791 index c7ca94b..198b732 100644
1792 --- a/net/ipv4/route.c
1793 +++ b/net/ipv4/route.c
1794 @@ -2913,18 +2913,14 @@ static int ip_rt_acct_read(char *buffer, char **start, off_t offset,
1795 offset /= sizeof(u32);
1796
1797 if (length > 0) {
1798 - u32 *src = ((u32 *) IP_RT_ACCT_CPU(0)) + offset;
1799 u32 *dst = (u32 *) buffer;
1800
1801 - /* Copy first cpu. */
1802 *start = buffer;
1803 - memcpy(dst, src, length);
1804 + memset(dst, 0, length);
1805
1806 - /* Add the other cpus in, one int at a time */
1807 for_each_possible_cpu(i) {
1808 unsigned int j;
1809 -
1810 - src = ((u32 *) IP_RT_ACCT_CPU(i)) + offset;
1811 + u32 *src = ((u32 *) IP_RT_ACCT_CPU(i)) + offset;
1812
1813 for (j = 0; j < length/4; j++)
1814 dst[j] += src[j];
1815 diff --git a/net/ipv4/sysctl_net_ipv4.c b/net/ipv4/sysctl_net_ipv4.c
1816 index 53ef0f4..6ea1306 100644
1817 --- a/net/ipv4/sysctl_net_ipv4.c
1818 +++ b/net/ipv4/sysctl_net_ipv4.c
1819 @@ -121,7 +121,7 @@ static int sysctl_tcp_congestion_control(ctl_table *table, int __user *name,
1820
1821 tcp_get_default_congestion_control(val);
1822 ret = sysctl_string(&tbl, name, nlen, oldval, oldlenp, newval, newlen);
1823 - if (ret == 0 && newval && newlen)
1824 + if (ret == 1 && newval && newlen)
1825 ret = tcp_set_default_congestion_control(val);
1826 return ret;
1827 }
1828 diff --git a/net/ipv4/tcp_illinois.c b/net/ipv4/tcp_illinois.c
1829 index 64f1cba..5aa5f54 100644
1830 --- a/net/ipv4/tcp_illinois.c
1831 +++ b/net/ipv4/tcp_illinois.c
1832 @@ -298,7 +298,7 @@ static u32 tcp_illinois_ssthresh(struct sock *sk)
1833 struct illinois *ca = inet_csk_ca(sk);
1834
1835 /* Multiplicative decrease */
1836 - return max((tp->snd_cwnd * ca->beta) >> BETA_SHIFT, 2U);
1837 + return max(tp->snd_cwnd - ((tp->snd_cwnd * ca->beta) >> BETA_SHIFT), 2U);
1838 }
1839
1840
1841 diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
1842 index 666d8a5..097165f 100644
1843 --- a/net/ipv4/tcp_output.c
1844 +++ b/net/ipv4/tcp_output.c
1845 @@ -1287,7 +1287,6 @@ static int tcp_mtu_probe(struct sock *sk)
1846
1847 skb = tcp_send_head(sk);
1848 tcp_insert_write_queue_before(nskb, skb, sk);
1849 - tcp_advance_send_head(sk, skb);
1850
1851 TCP_SKB_CB(nskb)->seq = TCP_SKB_CB(skb)->seq;
1852 TCP_SKB_CB(nskb)->end_seq = TCP_SKB_CB(skb)->seq + probe_size;
1853 diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c
1854 index 45b4c82..0917944 100644
1855 --- a/net/ipv6/addrconf.c
1856 +++ b/net/ipv6/addrconf.c
1857 @@ -2281,6 +2281,9 @@ static int addrconf_notify(struct notifier_block *this, unsigned long event,
1858 break;
1859 }
1860
1861 + if (!idev && dev->mtu >= IPV6_MIN_MTU)
1862 + idev = ipv6_add_dev(dev);
1863 +
1864 if (idev)
1865 idev->if_flags |= IF_READY;
1866 } else {
1867 @@ -2345,12 +2348,18 @@ static int addrconf_notify(struct notifier_block *this, unsigned long event,
1868 break;
1869
1870 case NETDEV_CHANGEMTU:
1871 - if ( idev && dev->mtu >= IPV6_MIN_MTU) {
1872 + if (idev && dev->mtu >= IPV6_MIN_MTU) {
1873 rt6_mtu_change(dev, dev->mtu);
1874 idev->cnf.mtu6 = dev->mtu;
1875 break;
1876 }
1877
1878 + if (!idev && dev->mtu >= IPV6_MIN_MTU) {
1879 + idev = ipv6_add_dev(dev);
1880 + if (idev)
1881 + break;
1882 + }
1883 +
1884 /* MTU falled under IPV6_MIN_MTU. Stop IPv6 on this interface. */
1885
1886 case NETDEV_DOWN:
1887 diff --git a/net/key/af_key.c b/net/key/af_key.c
1888 index 5502df1..7a5e993 100644
1889 --- a/net/key/af_key.c
1890 +++ b/net/key/af_key.c
1891 @@ -1546,7 +1546,7 @@ static int pfkey_get(struct sock *sk, struct sk_buff *skb, struct sadb_msg *hdr,
1892
1893 out_hdr = (struct sadb_msg *) out_skb->data;
1894 out_hdr->sadb_msg_version = hdr->sadb_msg_version;
1895 - out_hdr->sadb_msg_type = SADB_DUMP;
1896 + out_hdr->sadb_msg_type = SADB_GET;
1897 out_hdr->sadb_msg_satype = pfkey_proto2satype(proto);
1898 out_hdr->sadb_msg_errno = 0;
1899 out_hdr->sadb_msg_reserved = 0;
1900 diff --git a/net/netfilter/nf_conntrack_extend.c b/net/netfilter/nf_conntrack_extend.c
1901 index a1a65a1..cf6ba66 100644
1902 --- a/net/netfilter/nf_conntrack_extend.c
1903 +++ b/net/netfilter/nf_conntrack_extend.c
1904 @@ -109,7 +109,7 @@ void *__nf_ct_ext_add(struct nf_conn *ct, enum nf_ct_ext_id id, gfp_t gfp)
1905 rcu_read_lock();
1906 t = rcu_dereference(nf_ct_ext_types[i]);
1907 if (t && t->move)
1908 - t->move(ct, ct->ext + ct->ext->offset[id]);
1909 + t->move(ct, ct->ext + ct->ext->offset[i]);
1910 rcu_read_unlock();
1911 }
1912 kfree(ct->ext);
1913 diff --git a/net/netfilter/xt_CONNMARK.c b/net/netfilter/xt_CONNMARK.c
1914 index 5a00c54..5bc5227 100644
1915 --- a/net/netfilter/xt_CONNMARK.c
1916 +++ b/net/netfilter/xt_CONNMARK.c
1917 @@ -85,11 +85,6 @@ checkentry(const char *tablename,
1918 {
1919 const struct xt_connmark_target_info *matchinfo = targinfo;
1920
1921 - if (nf_ct_l3proto_try_module_get(target->family) < 0) {
1922 - printk(KERN_WARNING "can't load conntrack support for "
1923 - "proto=%d\n", target->family);
1924 - return false;
1925 - }
1926 if (matchinfo->mode == XT_CONNMARK_RESTORE) {
1927 if (strcmp(tablename, "mangle") != 0) {
1928 printk(KERN_WARNING "CONNMARK: restore can only be "
1929 @@ -102,6 +97,11 @@ checkentry(const char *tablename,
1930 printk(KERN_WARNING "CONNMARK: Only supports 32bit mark\n");
1931 return false;
1932 }
1933 + if (nf_ct_l3proto_try_module_get(target->family) < 0) {
1934 + printk(KERN_WARNING "can't load conntrack support for "
1935 + "proto=%d\n", target->family);
1936 + return false;
1937 + }
1938 return true;
1939 }
1940
1941 diff --git a/net/netfilter/xt_CONNSECMARK.c b/net/netfilter/xt_CONNSECMARK.c
1942 index 63d7313..23f780d 100644
1943 --- a/net/netfilter/xt_CONNSECMARK.c
1944 +++ b/net/netfilter/xt_CONNSECMARK.c
1945 @@ -91,11 +91,6 @@ static bool checkentry(const char *tablename, const void *entry,
1946 {
1947 const struct xt_connsecmark_target_info *info = targinfo;
1948
1949 - if (nf_ct_l3proto_try_module_get(target->family) < 0) {
1950 - printk(KERN_WARNING "can't load conntrack support for "
1951 - "proto=%d\n", target->family);
1952 - return false;
1953 - }
1954 switch (info->mode) {
1955 case CONNSECMARK_SAVE:
1956 case CONNSECMARK_RESTORE:
1957 @@ -106,6 +101,11 @@ static bool checkentry(const char *tablename, const void *entry,
1958 return false;
1959 }
1960
1961 + if (nf_ct_l3proto_try_module_get(target->family) < 0) {
1962 + printk(KERN_WARNING "can't load conntrack support for "
1963 + "proto=%d\n", target->family);
1964 + return false;
1965 + }
1966 return true;
1967 }
1968
1969 diff --git a/net/netfilter/xt_TCPMSS.c b/net/netfilter/xt_TCPMSS.c
1970 index d40f7e4..b41c524 100644
1971 --- a/net/netfilter/xt_TCPMSS.c
1972 +++ b/net/netfilter/xt_TCPMSS.c
1973 @@ -178,10 +178,8 @@ xt_tcpmss_target6(struct sk_buff **pskb,
1974
1975 nexthdr = ipv6h->nexthdr;
1976 tcphoff = ipv6_skip_exthdr(*pskb, sizeof(*ipv6h), &nexthdr);
1977 - if (tcphoff < 0) {
1978 - WARN_ON(1);
1979 + if (tcphoff < 0)
1980 return NF_DROP;
1981 - }
1982 ret = tcpmss_mangle_packet(pskb, targinfo, tcphoff,
1983 sizeof(*ipv6h) + sizeof(struct tcphdr));
1984 if (ret < 0)
1985 diff --git a/net/rxrpc/Kconfig b/net/rxrpc/Kconfig
1986 index e662f1d..0d3103c 100644
1987 --- a/net/rxrpc/Kconfig
1988 +++ b/net/rxrpc/Kconfig
1989 @@ -5,6 +5,7 @@
1990 config AF_RXRPC
1991 tristate "RxRPC session sockets"
1992 depends on INET && EXPERIMENTAL
1993 + select CRYPTO
1994 select KEYS
1995 help
1996 Say Y or M here to include support for RxRPC session sockets (just
1997 diff --git a/net/sched/sch_generic.c b/net/sched/sch_generic.c
1998 index c81649c..a35d7ce 100644
1999 --- a/net/sched/sch_generic.c
2000 +++ b/net/sched/sch_generic.c
2001 @@ -135,7 +135,7 @@ static inline int qdisc_restart(struct net_device *dev)
2002 struct Qdisc *q = dev->qdisc;
2003 struct sk_buff *skb;
2004 unsigned lockless;
2005 - int ret;
2006 + int ret = NETDEV_TX_BUSY;
2007
2008 /* Dequeue packet */
2009 if (unlikely((skb = dev_dequeue_skb(dev, q)) == NULL))
2010 @@ -158,7 +158,8 @@ static inline int qdisc_restart(struct net_device *dev)
2011 /* And release queue */
2012 spin_unlock(&dev->queue_lock);
2013
2014 - ret = dev_hard_start_xmit(skb, dev);
2015 + if (!netif_subqueue_stopped(dev, skb->queue_mapping))
2016 + ret = dev_hard_start_xmit(skb, dev);
2017
2018 if (!lockless)
2019 netif_tx_unlock(dev);
2020 diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c
2021 index a05c342..fa85358 100644
2022 --- a/net/unix/af_unix.c
2023 +++ b/net/unix/af_unix.c
2024 @@ -1632,8 +1632,15 @@ static int unix_dgram_recvmsg(struct kiocb *iocb, struct socket *sock,
2025 mutex_lock(&u->readlock);
2026
2027 skb = skb_recv_datagram(sk, flags, noblock, &err);
2028 - if (!skb)
2029 + if (!skb) {
2030 + unix_state_lock(sk);
2031 + /* Signal EOF on disconnected non-blocking SEQPACKET socket. */
2032 + if (sk->sk_type == SOCK_SEQPACKET && err == -EAGAIN &&
2033 + (sk->sk_shutdown & RCV_SHUTDOWN))
2034 + err = 0;
2035 + unix_state_unlock(sk);
2036 goto out_unlock;
2037 + }
2038
2039 wake_up_interruptible(&u->peer_wait);
2040
2041 diff --git a/net/xfrm/xfrm_state.c b/net/xfrm/xfrm_state.c
2042 index d4356e6..4e28f8d 100644
2043 --- a/net/xfrm/xfrm_state.c
2044 +++ b/net/xfrm/xfrm_state.c
2045 @@ -370,7 +370,7 @@ int __xfrm_state_delete(struct xfrm_state *x)
2046 * The xfrm_state_alloc call gives a reference, and that
2047 * is what we are dropping here.
2048 */
2049 - __xfrm_state_put(x);
2050 + xfrm_state_put(x);
2051 err = 0;
2052 }
2053