Magellan Linux

Contents of /trunk/kernel26-alx/patches-2.6.24-r1/0102-2.6.24.3-all-fixes.patch

Parent Directory Parent Directory | Revision Log Revision Log


Revision 627 - (show annotations) (download)
Sat May 31 15:56:56 2008 UTC (16 years, 5 months ago) by niro
File size: 47775 byte(s)
-2.6.24-alx-r1: renamed to r1 instead of r7, enabled userspace-kernel linker to get uvesafb support

1 diff --git a/arch/powerpc/platforms/chrp/pci.c b/arch/powerpc/platforms/chrp/pci.c
2 index 0340a34..759c2ac 100644
3 --- a/arch/powerpc/platforms/chrp/pci.c
4 +++ b/arch/powerpc/platforms/chrp/pci.c
5 @@ -354,7 +354,7 @@ DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_WINBOND, PCI_DEVICE_ID_WINBOND_82C105,
6 * mode as well. The same fixup must be done to the class-code property in
7 * the IDE node /pci@80000000/ide@C,1
8 */
9 -static void __devinit chrp_pci_fixup_vt8231_ata(struct pci_dev *viaide)
10 +static void chrp_pci_fixup_vt8231_ata(struct pci_dev *viaide)
11 {
12 u8 progif;
13 struct pci_dev *viaisa;
14 @@ -375,4 +375,4 @@ static void __devinit chrp_pci_fixup_vt8231_ata(struct pci_dev *viaide)
15
16 pci_dev_put(viaisa);
17 }
18 -DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_82C586_1, chrp_pci_fixup_vt8231_ata);
19 +DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_82C586_1, chrp_pci_fixup_vt8231_ata);
20 diff --git a/arch/powerpc/platforms/powermac/feature.c b/arch/powerpc/platforms/powermac/feature.c
21 index ba931be..5169ecc 100644
22 --- a/arch/powerpc/platforms/powermac/feature.c
23 +++ b/arch/powerpc/platforms/powermac/feature.c
24 @@ -2565,6 +2565,8 @@ static void __init probe_uninorth(void)
25
26 /* Locate core99 Uni-N */
27 uninorth_node = of_find_node_by_name(NULL, "uni-n");
28 + uninorth_maj = 1;
29 +
30 /* Locate G5 u3 */
31 if (uninorth_node == NULL) {
32 uninorth_node = of_find_node_by_name(NULL, "u3");
33 @@ -2575,8 +2577,10 @@ static void __init probe_uninorth(void)
34 uninorth_node = of_find_node_by_name(NULL, "u4");
35 uninorth_maj = 4;
36 }
37 - if (uninorth_node == NULL)
38 + if (uninorth_node == NULL) {
39 + uninorth_maj = 0;
40 return;
41 + }
42
43 addrp = of_get_property(uninorth_node, "reg", NULL);
44 if (addrp == NULL)
45 @@ -3029,3 +3033,8 @@ void pmac_resume_agp_for_card(struct pci_dev *dev)
46 pmac_agp_resume(pmac_agp_bridge);
47 }
48 EXPORT_SYMBOL(pmac_resume_agp_for_card);
49 +
50 +int pmac_get_uninorth_variant(void)
51 +{
52 + return uninorth_maj;
53 +}
54 diff --git a/arch/s390/lib/uaccess_std.c b/arch/s390/lib/uaccess_std.c
55 index 28c4500..d2ffbad 100644
56 --- a/arch/s390/lib/uaccess_std.c
57 +++ b/arch/s390/lib/uaccess_std.c
58 @@ -293,10 +293,10 @@ int futex_atomic_cmpxchg_std(int __user *uaddr, int oldval, int newval)
59
60 asm volatile(
61 " sacf 256\n"
62 - " cs %1,%4,0(%5)\n"
63 - "0: lr %0,%1\n"
64 - "1: sacf 0\n"
65 - EX_TABLE(0b,1b)
66 + "0: cs %1,%4,0(%5)\n"
67 + "1: lr %0,%1\n"
68 + "2: sacf 0\n"
69 + EX_TABLE(0b,2b) EX_TABLE(1b,2b)
70 : "=d" (ret), "+d" (oldval), "=m" (*uaddr)
71 : "0" (-EFAULT), "d" (newval), "a" (uaddr), "m" (*uaddr)
72 : "cc", "memory" );
73 diff --git a/arch/sparc/lib/rwsem.S b/arch/sparc/lib/rwsem.S
74 index 2065774..f406b1f 100644
75 --- a/arch/sparc/lib/rwsem.S
76 +++ b/arch/sparc/lib/rwsem.S
77 @@ -7,7 +7,7 @@
78 #include <asm/ptrace.h>
79 #include <asm/psr.h>
80
81 - .section .sched.text
82 + .section .sched.text, "ax"
83 .align 4
84
85 .globl ___down_read
86 diff --git a/arch/sparc64/lib/rwsem.S b/arch/sparc64/lib/rwsem.S
87 index 75f0e6b..1a4cc56 100644
88 --- a/arch/sparc64/lib/rwsem.S
89 +++ b/arch/sparc64/lib/rwsem.S
90 @@ -6,7 +6,7 @@
91
92 #include <asm/rwsem-const.h>
93
94 - .section .sched.text
95 + .section .sched.text, "ax"
96
97 .globl __down_read
98 __down_read:
99 diff --git a/arch/x86/mm/pageattr_64.c b/arch/x86/mm/pageattr_64.c
100 index c40afba..f636c1e 100644
101 --- a/arch/x86/mm/pageattr_64.c
102 +++ b/arch/x86/mm/pageattr_64.c
103 @@ -207,7 +207,7 @@ int change_page_attr_addr(unsigned long address, int numpages, pgprot_t prot)
104 if (__pa(address) < KERNEL_TEXT_SIZE) {
105 unsigned long addr2;
106 pgprot_t prot2;
107 - addr2 = __START_KERNEL_map + __pa(address);
108 + addr2 = __START_KERNEL_map + __pa(address) - phys_base;
109 /* Make sure the kernel mappings stay executable */
110 prot2 = pte_pgprot(pte_mkexec(pfn_pte(0, prot)));
111 err = __change_page_attr(addr2, pfn, prot2,
112 diff --git a/drivers/macintosh/smu.c b/drivers/macintosh/smu.c
113 index d409f67..1ebe7a3 100644
114 --- a/drivers/macintosh/smu.c
115 +++ b/drivers/macintosh/smu.c
116 @@ -85,6 +85,7 @@ struct smu_device {
117 u32 cmd_buf_abs; /* command buffer absolute */
118 struct list_head cmd_list;
119 struct smu_cmd *cmd_cur; /* pending command */
120 + int broken_nap;
121 struct list_head cmd_i2c_list;
122 struct smu_i2c_cmd *cmd_i2c_cur; /* pending i2c command */
123 struct timer_list i2c_timer;
124 @@ -135,6 +136,19 @@ static void smu_start_cmd(void)
125 fend = faddr + smu->cmd_buf->length + 2;
126 flush_inval_dcache_range(faddr, fend);
127
128 +
129 + /* We also disable NAP mode for the duration of the command
130 + * on U3 based machines.
131 + * This is slightly racy as it can be written back to 1 by a sysctl
132 + * but that never happens in practice. There seem to be an issue with
133 + * U3 based machines such as the iMac G5 where napping for the
134 + * whole duration of the command prevents the SMU from fetching it
135 + * from memory. This might be related to the strange i2c based
136 + * mechanism the SMU uses to access memory.
137 + */
138 + if (smu->broken_nap)
139 + powersave_nap = 0;
140 +
141 /* This isn't exactly a DMA mapping here, I suspect
142 * the SMU is actually communicating with us via i2c to the
143 * northbridge or the CPU to access RAM.
144 @@ -211,6 +225,10 @@ static irqreturn_t smu_db_intr(int irq, void *arg)
145 misc = cmd->misc;
146 mb();
147 cmd->status = rc;
148 +
149 + /* Re-enable NAP mode */
150 + if (smu->broken_nap)
151 + powersave_nap = 1;
152 bail:
153 /* Start next command if any */
154 smu_start_cmd();
155 @@ -461,7 +479,7 @@ int __init smu_init (void)
156 if (np == NULL)
157 return -ENODEV;
158
159 - printk(KERN_INFO "SMU driver %s %s\n", VERSION, AUTHOR);
160 + printk(KERN_INFO "SMU: Driver %s %s\n", VERSION, AUTHOR);
161
162 if (smu_cmdbuf_abs == 0) {
163 printk(KERN_ERR "SMU: Command buffer not allocated !\n");
164 @@ -533,6 +551,11 @@ int __init smu_init (void)
165 goto fail;
166 }
167
168 + /* U3 has an issue with NAP mode when issuing SMU commands */
169 + smu->broken_nap = pmac_get_uninorth_variant() < 4;
170 + if (smu->broken_nap)
171 + printk(KERN_INFO "SMU: using NAP mode workaround\n");
172 +
173 sys_ctrler = SYS_CTRLER_SMU;
174 return 0;
175
176 diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
177 index 49a1982..75efa8b 100644
178 --- a/drivers/net/bonding/bond_main.c
179 +++ b/drivers/net/bonding/bond_main.c
180 @@ -4883,14 +4883,16 @@ int bond_create(char *name, struct bond_params *params, struct bonding **newbond
181 down_write(&bonding_rwsem);
182
183 /* Check to see if the bond already exists. */
184 - list_for_each_entry_safe(bond, nxt, &bond_dev_list, bond_list)
185 - if (strnicmp(bond->dev->name, name, IFNAMSIZ) == 0) {
186 - printk(KERN_ERR DRV_NAME
187 + if (name) {
188 + list_for_each_entry_safe(bond, nxt, &bond_dev_list, bond_list)
189 + if (strnicmp(bond->dev->name, name, IFNAMSIZ) == 0) {
190 + printk(KERN_ERR DRV_NAME
191 ": cannot add bond %s; it already exists\n",
192 - name);
193 - res = -EPERM;
194 - goto out_rtnl;
195 - }
196 + name);
197 + res = -EPERM;
198 + goto out_rtnl;
199 + }
200 + }
201
202 bond_dev = alloc_netdev(sizeof(struct bonding), name ? name : "",
203 ether_setup);
204 diff --git a/drivers/net/dl2k.h b/drivers/net/dl2k.h
205 index d66c605..266ec87 100644
206 --- a/drivers/net/dl2k.h
207 +++ b/drivers/net/dl2k.h
208 @@ -388,8 +388,8 @@ enum _mii_mssr {
209 MII_MSSR_CFG_RES = 0x4000,
210 MII_MSSR_LOCAL_RCV_STATUS = 0x2000,
211 MII_MSSR_REMOTE_RCVR = 0x1000,
212 - MII_MSSR_LP_1000BT_HD = 0x0800,
213 - MII_MSSR_LP_1000BT_FD = 0x0400,
214 + MII_MSSR_LP_1000BT_FD = 0x0800,
215 + MII_MSSR_LP_1000BT_HD = 0x0400,
216 MII_MSSR_IDLE_ERR_COUNT = 0x00ff,
217 };
218
219 diff --git a/drivers/net/pcmcia/smc91c92_cs.c b/drivers/net/pcmcia/smc91c92_cs.c
220 index c9868e9..e94434f 100644
221 --- a/drivers/net/pcmcia/smc91c92_cs.c
222 +++ b/drivers/net/pcmcia/smc91c92_cs.c
223 @@ -559,8 +559,16 @@ static int mhz_setup(struct pcmcia_device *link)
224
225 /* Read the station address from the CIS. It is stored as the last
226 (fourth) string in the Version 1 Version/ID tuple. */
227 - if (link->prod_id[3]) {
228 - station_addr = link->prod_id[3];
229 + tuple->DesiredTuple = CISTPL_VERS_1;
230 + if (first_tuple(link, tuple, parse) != CS_SUCCESS) {
231 + rc = -1;
232 + goto free_cfg_mem;
233 + }
234 + /* Ugh -- the EM1144 card has two VERS_1 tuples!?! */
235 + if (next_tuple(link, tuple, parse) != CS_SUCCESS)
236 + first_tuple(link, tuple, parse);
237 + if (parse->version_1.ns > 3) {
238 + station_addr = parse->version_1.str + parse->version_1.ofs[3];
239 if (cvt_ascii_address(dev, station_addr) == 0) {
240 rc = 0;
241 goto free_cfg_mem;
242 diff --git a/drivers/scsi/gdth.c b/drivers/scsi/gdth.c
243 index b253b8c..8eb78be 100644
244 --- a/drivers/scsi/gdth.c
245 +++ b/drivers/scsi/gdth.c
246 @@ -4838,6 +4838,9 @@ static int __init gdth_isa_probe_one(ulong32 isa_bios)
247 if (error)
248 goto out_free_coal_stat;
249 list_add_tail(&ha->list, &gdth_instances);
250 +
251 + scsi_scan_host(shp);
252 +
253 return 0;
254
255 out_free_coal_stat:
256 @@ -4965,6 +4968,9 @@ static int __init gdth_eisa_probe_one(ushort eisa_slot)
257 if (error)
258 goto out_free_coal_stat;
259 list_add_tail(&ha->list, &gdth_instances);
260 +
261 + scsi_scan_host(shp);
262 +
263 return 0;
264
265 out_free_ccb_phys:
266 @@ -5102,6 +5108,9 @@ static int __init gdth_pci_probe_one(gdth_pci_str *pcistr, int ctr)
267 if (error)
268 goto out_free_coal_stat;
269 list_add_tail(&ha->list, &gdth_instances);
270 +
271 + scsi_scan_host(shp);
272 +
273 return 0;
274
275 out_free_coal_stat:
276 diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
277 index a69b155..cfd859a 100644
278 --- a/drivers/scsi/sd.c
279 +++ b/drivers/scsi/sd.c
280 @@ -907,6 +907,7 @@ static int sd_done(struct scsi_cmnd *SCpnt)
281 unsigned int xfer_size = SCpnt->request_bufflen;
282 unsigned int good_bytes = result ? 0 : xfer_size;
283 u64 start_lba = SCpnt->request->sector;
284 + u64 end_lba = SCpnt->request->sector + (xfer_size / 512);
285 u64 bad_lba;
286 struct scsi_sense_hdr sshdr;
287 int sense_valid = 0;
288 @@ -945,26 +946,23 @@ static int sd_done(struct scsi_cmnd *SCpnt)
289 goto out;
290 if (xfer_size <= SCpnt->device->sector_size)
291 goto out;
292 - switch (SCpnt->device->sector_size) {
293 - case 256:
294 + if (SCpnt->device->sector_size < 512) {
295 + /* only legitimate sector_size here is 256 */
296 start_lba <<= 1;
297 - break;
298 - case 512:
299 - break;
300 - case 1024:
301 - start_lba >>= 1;
302 - break;
303 - case 2048:
304 - start_lba >>= 2;
305 - break;
306 - case 4096:
307 - start_lba >>= 3;
308 - break;
309 - default:
310 - /* Print something here with limiting frequency. */
311 - goto out;
312 - break;
313 + end_lba <<= 1;
314 + } else {
315 + /* be careful ... don't want any overflows */
316 + u64 factor = SCpnt->device->sector_size / 512;
317 + do_div(start_lba, factor);
318 + do_div(end_lba, factor);
319 }
320 +
321 + if (bad_lba < start_lba || bad_lba >= end_lba)
322 + /* the bad lba was reported incorrectly, we have
323 + * no idea where the error is
324 + */
325 + goto out;
326 +
327 /* This computation should always be done in terms of
328 * the resolution of the device's medium.
329 */
330 diff --git a/drivers/usb/class/usblp.c b/drivers/usb/class/usblp.c
331 index ad632f2..0647164 100644
332 --- a/drivers/usb/class/usblp.c
333 +++ b/drivers/usb/class/usblp.c
334 @@ -428,6 +428,7 @@ static int usblp_open(struct inode *inode, struct file *file)
335 usblp->rcomplete = 0;
336
337 if (handle_bidir(usblp) < 0) {
338 + usb_autopm_put_interface(intf);
339 usblp->used = 0;
340 file->private_data = NULL;
341 retval = -EIO;
342 diff --git a/fs/inotify_user.c b/fs/inotify_user.c
343 index 5e00933..7253ffd 100644
344 --- a/fs/inotify_user.c
345 +++ b/fs/inotify_user.c
346 @@ -269,7 +269,7 @@ static void inotify_dev_queue_event(struct inotify_watch *w, u32 wd, u32 mask,
347 /* we can safely put the watch as we don't reference it while
348 * generating the event
349 */
350 - if (mask & IN_IGNORED || mask & IN_ONESHOT)
351 + if (mask & IN_IGNORED || w->mask & IN_ONESHOT)
352 put_inotify_watch(w); /* final put */
353
354 /* coalescing: drop this event if it is a dupe of the previous */
355 diff --git a/fs/nfs/write.c b/fs/nfs/write.c
356 index 51cc1bd..855b6d5 100644
357 --- a/fs/nfs/write.c
358 +++ b/fs/nfs/write.c
359 @@ -701,6 +701,17 @@ int nfs_flush_incompatible(struct file *file, struct page *page)
360 }
361
362 /*
363 + * If the page cache is marked as unsafe or invalid, then we can't rely on
364 + * the PageUptodate() flag. In this case, we will need to turn off
365 + * write optimisations that depend on the page contents being correct.
366 + */
367 +static int nfs_write_pageuptodate(struct page *page, struct inode *inode)
368 +{
369 + return PageUptodate(page) &&
370 + !(NFS_I(inode)->cache_validity & (NFS_INO_REVAL_PAGECACHE|NFS_INO_INVALID_DATA));
371 +}
372 +
373 +/*
374 * Update and possibly write a cached page of an NFS file.
375 *
376 * XXX: Keep an eye on generic_file_read to make sure it doesn't do bad
377 @@ -721,10 +732,13 @@ int nfs_updatepage(struct file *file, struct page *page,
378 (long long)(page_offset(page) +offset));
379
380 /* If we're not using byte range locks, and we know the page
381 - * is entirely in cache, it may be more efficient to avoid
382 - * fragmenting write requests.
383 + * is up to date, it may be more efficient to extend the write
384 + * to cover the entire page in order to avoid fragmentation
385 + * inefficiencies.
386 */
387 - if (PageUptodate(page) && inode->i_flock == NULL && !(file->f_mode & O_SYNC)) {
388 + if (nfs_write_pageuptodate(page, inode) &&
389 + inode->i_flock == NULL &&
390 + !(file->f_mode & O_SYNC)) {
391 count = max(count + offset, nfs_page_length(page));
392 offset = 0;
393 }
394 diff --git a/fs/xfs/linux-2.6/xfs_file.c b/fs/xfs/linux-2.6/xfs_file.c
395 index 21a1c2b..edab1ff 100644
396 --- a/fs/xfs/linux-2.6/xfs_file.c
397 +++ b/fs/xfs/linux-2.6/xfs_file.c
398 @@ -350,8 +350,8 @@ xfs_file_readdir(
399
400 size = buf.used;
401 de = (struct hack_dirent *)buf.dirent;
402 - curr_offset = de->offset /* & 0x7fffffff */;
403 while (size > 0) {
404 + curr_offset = de->offset /* & 0x7fffffff */;
405 if (filldir(dirent, de->name, de->namlen,
406 curr_offset & 0x7fffffff,
407 de->ino, de->d_type)) {
408 @@ -362,7 +362,6 @@ xfs_file_readdir(
409 sizeof(u64));
410 size -= reclen;
411 de = (struct hack_dirent *)((char *)de + reclen);
412 - curr_offset = de->offset /* & 0x7fffffff */;
413 }
414 }
415
416 diff --git a/include/asm-powerpc/pmac_feature.h b/include/asm-powerpc/pmac_feature.h
417 index 26bcb0a..877c35a 100644
418 --- a/include/asm-powerpc/pmac_feature.h
419 +++ b/include/asm-powerpc/pmac_feature.h
420 @@ -392,6 +392,14 @@ extern u32 __iomem *uninorth_base;
421 #define UN_BIS(r,v) (UN_OUT((r), UN_IN(r) | (v)))
422 #define UN_BIC(r,v) (UN_OUT((r), UN_IN(r) & ~(v)))
423
424 +/* Uninorth variant:
425 + *
426 + * 0 = not uninorth
427 + * 1 = U1.x or U2.x
428 + * 3 = U3
429 + * 4 = U4
430 + */
431 +extern int pmac_get_uninorth_variant(void);
432
433 #endif /* __ASM_POWERPC_PMAC_FEATURE_H */
434 #endif /* __KERNEL__ */
435 diff --git a/include/linux/Kbuild b/include/linux/Kbuild
436 index f30fa92..4b32bb1 100644
437 --- a/include/linux/Kbuild
438 +++ b/include/linux/Kbuild
439 @@ -217,6 +217,7 @@ unifdef-y += i2o-dev.h
440 unifdef-y += icmp.h
441 unifdef-y += icmpv6.h
442 unifdef-y += if_addr.h
443 +unifdef-y += if_addrlabel.h
444 unifdef-y += if_arp.h
445 unifdef-y += if_bridge.h
446 unifdef-y += if_ec.h
447 diff --git a/include/linux/hrtimer.h b/include/linux/hrtimer.h
448 index 7a9398e..540799b 100644
449 --- a/include/linux/hrtimer.h
450 +++ b/include/linux/hrtimer.h
451 @@ -300,7 +300,7 @@ hrtimer_forward(struct hrtimer *timer, ktime_t now, ktime_t interval);
452
453 /* Precise sleep: */
454 extern long hrtimer_nanosleep(struct timespec *rqtp,
455 - struct timespec *rmtp,
456 + struct timespec __user *rmtp,
457 const enum hrtimer_mode mode,
458 const clockid_t clockid);
459 extern long hrtimer_nanosleep_restart(struct restart_block *restart_block);
460 diff --git a/include/linux/hugetlb.h b/include/linux/hugetlb.h
461 index 30d606a..7ca198b 100644
462 --- a/include/linux/hugetlb.h
463 +++ b/include/linux/hugetlb.h
464 @@ -17,6 +17,7 @@ static inline int is_vm_hugetlb_page(struct vm_area_struct *vma)
465 }
466
467 int hugetlb_sysctl_handler(struct ctl_table *, int, struct file *, void __user *, size_t *, loff_t *);
468 +int hugetlb_overcommit_handler(struct ctl_table *, int, struct file *, void __user *, size_t *, loff_t *);
469 int hugetlb_treat_movable_handler(struct ctl_table *, int, struct file *, void __user *, size_t *, loff_t *);
470 int copy_hugetlb_page_range(struct mm_struct *, struct mm_struct *, struct vm_area_struct *);
471 int follow_hugetlb_page(struct mm_struct *, struct vm_area_struct *, struct page **, struct vm_area_struct **, unsigned long *, int *, int, int);
472 diff --git a/include/linux/ktime.h b/include/linux/ktime.h
473 index a6ddec1..816cf4e 100644
474 --- a/include/linux/ktime.h
475 +++ b/include/linux/ktime.h
476 @@ -310,6 +310,8 @@ static inline ktime_t ktime_sub_us(const ktime_t kt, const u64 usec)
477 return ktime_sub_ns(kt, usec * 1000);
478 }
479
480 +extern ktime_t ktime_add_safe(const ktime_t lhs, const ktime_t rhs);
481 +
482 /*
483 * The resolution of the clocks. The resolution value is returned in
484 * the clock_getres() system call to give application programmers an
485 diff --git a/kernel/audit.c b/kernel/audit.c
486 index f93c271..801c946 100644
487 --- a/kernel/audit.c
488 +++ b/kernel/audit.c
489 @@ -1200,13 +1200,17 @@ struct audit_buffer *audit_log_start(struct audit_context *ctx, gfp_t gfp_mask,
490 static inline int audit_expand(struct audit_buffer *ab, int extra)
491 {
492 struct sk_buff *skb = ab->skb;
493 - int ret = pskb_expand_head(skb, skb_headroom(skb), extra,
494 - ab->gfp_mask);
495 + int oldtail = skb_tailroom(skb);
496 + int ret = pskb_expand_head(skb, 0, extra, ab->gfp_mask);
497 + int newtail = skb_tailroom(skb);
498 +
499 if (ret < 0) {
500 audit_log_lost("out of memory in audit_expand");
501 return 0;
502 }
503 - return skb_tailroom(skb);
504 +
505 + skb->truesize += newtail - oldtail;
506 + return newtail;
507 }
508
509 /*
510 diff --git a/kernel/compat.c b/kernel/compat.c
511 index 42a1ed4..f2a2975 100644
512 --- a/kernel/compat.c
513 +++ b/kernel/compat.c
514 @@ -40,10 +40,36 @@ int put_compat_timespec(const struct timespec *ts, struct compat_timespec __user
515 __put_user(ts->tv_nsec, &cts->tv_nsec)) ? -EFAULT : 0;
516 }
517
518 +static long compat_nanosleep_restart(struct restart_block *restart)
519 +{
520 + struct compat_timespec __user *rmtp;
521 + struct timespec rmt;
522 + mm_segment_t oldfs;
523 + long ret;
524 +
525 + rmtp = (struct compat_timespec __user *)(restart->arg1);
526 + restart->arg1 = (unsigned long)&rmt;
527 + oldfs = get_fs();
528 + set_fs(KERNEL_DS);
529 + ret = hrtimer_nanosleep_restart(restart);
530 + set_fs(oldfs);
531 +
532 + if (ret) {
533 + restart->fn = compat_nanosleep_restart;
534 + restart->arg1 = (unsigned long)rmtp;
535 +
536 + if (rmtp && put_compat_timespec(&rmt, rmtp))
537 + return -EFAULT;
538 + }
539 +
540 + return ret;
541 +}
542 +
543 asmlinkage long compat_sys_nanosleep(struct compat_timespec __user *rqtp,
544 struct compat_timespec __user *rmtp)
545 {
546 struct timespec tu, rmt;
547 + mm_segment_t oldfs;
548 long ret;
549
550 if (get_compat_timespec(&tu, rqtp))
551 @@ -52,11 +78,21 @@ asmlinkage long compat_sys_nanosleep(struct compat_timespec __user *rqtp,
552 if (!timespec_valid(&tu))
553 return -EINVAL;
554
555 - ret = hrtimer_nanosleep(&tu, rmtp ? &rmt : NULL, HRTIMER_MODE_REL,
556 - CLOCK_MONOTONIC);
557 + oldfs = get_fs();
558 + set_fs(KERNEL_DS);
559 + ret = hrtimer_nanosleep(&tu,
560 + rmtp ? (struct timespec __user *)&rmt : NULL,
561 + HRTIMER_MODE_REL, CLOCK_MONOTONIC);
562 + set_fs(oldfs);
563 +
564 + if (ret) {
565 + struct restart_block *restart
566 + = &current_thread_info()->restart_block;
567 +
568 + restart->fn = compat_nanosleep_restart;
569 + restart->arg1 = (unsigned long)rmtp;
570
571 - if (ret && rmtp) {
572 - if (put_compat_timespec(&rmt, rmtp))
573 + if (rmtp && put_compat_timespec(&rmt, rmtp))
574 return -EFAULT;
575 }
576
577 diff --git a/kernel/futex.c b/kernel/futex.c
578 index db9824d..55d78b5 100644
579 --- a/kernel/futex.c
580 +++ b/kernel/futex.c
581 @@ -2094,7 +2094,7 @@ asmlinkage long sys_futex(u32 __user *uaddr, int op, u32 val,
582
583 t = timespec_to_ktime(ts);
584 if (cmd == FUTEX_WAIT)
585 - t = ktime_add(ktime_get(), t);
586 + t = ktime_add_safe(ktime_get(), t);
587 tp = &t;
588 }
589 /*
590 diff --git a/kernel/futex_compat.c b/kernel/futex_compat.c
591 index 0a43def..8682c79 100644
592 --- a/kernel/futex_compat.c
593 +++ b/kernel/futex_compat.c
594 @@ -175,7 +175,7 @@ asmlinkage long compat_sys_futex(u32 __user *uaddr, int op, u32 val,
595
596 t = timespec_to_ktime(ts);
597 if (cmd == FUTEX_WAIT)
598 - t = ktime_add(ktime_get(), t);
599 + t = ktime_add_safe(ktime_get(), t);
600 tp = &t;
601 }
602 if (cmd == FUTEX_REQUEUE || cmd == FUTEX_CMP_REQUEUE)
603 diff --git a/kernel/hrtimer.c b/kernel/hrtimer.c
604 index f994bb8..2429893 100644
605 --- a/kernel/hrtimer.c
606 +++ b/kernel/hrtimer.c
607 @@ -325,6 +325,24 @@ unsigned long ktime_divns(const ktime_t kt, s64 div)
608 }
609 #endif /* BITS_PER_LONG >= 64 */
610
611 +/*
612 + * Add two ktime values and do a safety check for overflow:
613 + */
614 +
615 +ktime_t ktime_add_safe(const ktime_t lhs, const ktime_t rhs)
616 +{
617 + ktime_t res = ktime_add(lhs, rhs);
618 +
619 + /*
620 + * We use KTIME_SEC_MAX here, the maximum timeout which we can
621 + * return to user space in a timespec:
622 + */
623 + if (res.tv64 < 0 || res.tv64 < lhs.tv64 || res.tv64 < rhs.tv64)
624 + res = ktime_set(KTIME_SEC_MAX, 0);
625 +
626 + return res;
627 +}
628 +
629 /* High resolution timer related functions */
630 #ifdef CONFIG_HIGH_RES_TIMERS
631
632 @@ -409,6 +427,8 @@ static int hrtimer_reprogram(struct hrtimer *timer,
633 ktime_t expires = ktime_sub(timer->expires, base->offset);
634 int res;
635
636 + WARN_ON_ONCE(timer->expires.tv64 < 0);
637 +
638 /*
639 * When the callback is running, we do not reprogram the clock event
640 * device. The timer callback is either running on a different CPU or
641 @@ -419,6 +439,15 @@ static int hrtimer_reprogram(struct hrtimer *timer,
642 if (hrtimer_callback_running(timer))
643 return 0;
644
645 + /*
646 + * CLOCK_REALTIME timer might be requested with an absolute
647 + * expiry time which is less than base->offset. Nothing wrong
648 + * about that, just avoid to call into the tick code, which
649 + * has now objections against negative expiry values.
650 + */
651 + if (expires.tv64 < 0)
652 + return -ETIME;
653 +
654 if (expires.tv64 >= expires_next->tv64)
655 return 0;
656
657 @@ -682,13 +711,7 @@ hrtimer_forward(struct hrtimer *timer, ktime_t now, ktime_t interval)
658 */
659 orun++;
660 }
661 - timer->expires = ktime_add(timer->expires, interval);
662 - /*
663 - * Make sure, that the result did not wrap with a very large
664 - * interval.
665 - */
666 - if (timer->expires.tv64 < 0)
667 - timer->expires = ktime_set(KTIME_SEC_MAX, 0);
668 + timer->expires = ktime_add_safe(timer->expires, interval);
669
670 return orun;
671 }
672 @@ -839,7 +862,7 @@ hrtimer_start(struct hrtimer *timer, ktime_t tim, const enum hrtimer_mode mode)
673 new_base = switch_hrtimer_base(timer, base);
674
675 if (mode == HRTIMER_MODE_REL) {
676 - tim = ktime_add(tim, new_base->get_time());
677 + tim = ktime_add_safe(tim, new_base->get_time());
678 /*
679 * CONFIG_TIME_LOW_RES is a temporary way for architectures
680 * to signal that they simply return xtime in
681 @@ -848,16 +871,8 @@ hrtimer_start(struct hrtimer *timer, ktime_t tim, const enum hrtimer_mode mode)
682 * timeouts. This will go away with the GTOD framework.
683 */
684 #ifdef CONFIG_TIME_LOW_RES
685 - tim = ktime_add(tim, base->resolution);
686 + tim = ktime_add_safe(tim, base->resolution);
687 #endif
688 - /*
689 - * Careful here: User space might have asked for a
690 - * very long sleep, so the add above might result in a
691 - * negative number, which enqueues the timer in front
692 - * of the queue.
693 - */
694 - if (tim.tv64 < 0)
695 - tim.tv64 = KTIME_MAX;
696 }
697 timer->expires = tim;
698
699 @@ -1291,11 +1306,26 @@ static int __sched do_nanosleep(struct hrtimer_sleeper *t, enum hrtimer_mode mod
700 return t->task == NULL;
701 }
702
703 +static int update_rmtp(struct hrtimer *timer, struct timespec __user *rmtp)
704 +{
705 + struct timespec rmt;
706 + ktime_t rem;
707 +
708 + rem = ktime_sub(timer->expires, timer->base->get_time());
709 + if (rem.tv64 <= 0)
710 + return 0;
711 + rmt = ktime_to_timespec(rem);
712 +
713 + if (copy_to_user(rmtp, &rmt, sizeof(*rmtp)))
714 + return -EFAULT;
715 +
716 + return 1;
717 +}
718 +
719 long __sched hrtimer_nanosleep_restart(struct restart_block *restart)
720 {
721 struct hrtimer_sleeper t;
722 - struct timespec *rmtp;
723 - ktime_t time;
724 + struct timespec __user *rmtp;
725
726 restart->fn = do_no_restart_syscall;
727
728 @@ -1305,12 +1335,11 @@ long __sched hrtimer_nanosleep_restart(struct restart_block *restart)
729 if (do_nanosleep(&t, HRTIMER_MODE_ABS))
730 return 0;
731
732 - rmtp = (struct timespec *)restart->arg1;
733 + rmtp = (struct timespec __user *)restart->arg1;
734 if (rmtp) {
735 - time = ktime_sub(t.timer.expires, t.timer.base->get_time());
736 - if (time.tv64 <= 0)
737 - return 0;
738 - *rmtp = ktime_to_timespec(time);
739 + int ret = update_rmtp(&t.timer, rmtp);
740 + if (ret <= 0)
741 + return ret;
742 }
743
744 restart->fn = hrtimer_nanosleep_restart;
745 @@ -1319,12 +1348,11 @@ long __sched hrtimer_nanosleep_restart(struct restart_block *restart)
746 return -ERESTART_RESTARTBLOCK;
747 }
748
749 -long hrtimer_nanosleep(struct timespec *rqtp, struct timespec *rmtp,
750 +long hrtimer_nanosleep(struct timespec *rqtp, struct timespec __user *rmtp,
751 const enum hrtimer_mode mode, const clockid_t clockid)
752 {
753 struct restart_block *restart;
754 struct hrtimer_sleeper t;
755 - ktime_t rem;
756
757 hrtimer_init(&t.timer, clockid, mode);
758 t.timer.expires = timespec_to_ktime(*rqtp);
759 @@ -1336,10 +1364,9 @@ long hrtimer_nanosleep(struct timespec *rqtp, struct timespec *rmtp,
760 return -ERESTARTNOHAND;
761
762 if (rmtp) {
763 - rem = ktime_sub(t.timer.expires, t.timer.base->get_time());
764 - if (rem.tv64 <= 0)
765 - return 0;
766 - *rmtp = ktime_to_timespec(rem);
767 + int ret = update_rmtp(&t.timer, rmtp);
768 + if (ret <= 0)
769 + return ret;
770 }
771
772 restart = &current_thread_info()->restart_block;
773 @@ -1355,8 +1382,7 @@ long hrtimer_nanosleep(struct timespec *rqtp, struct timespec *rmtp,
774 asmlinkage long
775 sys_nanosleep(struct timespec __user *rqtp, struct timespec __user *rmtp)
776 {
777 - struct timespec tu, rmt;
778 - int ret;
779 + struct timespec tu;
780
781 if (copy_from_user(&tu, rqtp, sizeof(tu)))
782 return -EFAULT;
783 @@ -1364,15 +1390,7 @@ sys_nanosleep(struct timespec __user *rqtp, struct timespec __user *rmtp)
784 if (!timespec_valid(&tu))
785 return -EINVAL;
786
787 - ret = hrtimer_nanosleep(&tu, rmtp ? &rmt : NULL, HRTIMER_MODE_REL,
788 - CLOCK_MONOTONIC);
789 -
790 - if (ret && rmtp) {
791 - if (copy_to_user(rmtp, &rmt, sizeof(*rmtp)))
792 - return -EFAULT;
793 - }
794 -
795 - return ret;
796 + return hrtimer_nanosleep(&tu, rmtp, HRTIMER_MODE_REL, CLOCK_MONOTONIC);
797 }
798
799 /*
800 diff --git a/kernel/irq/chip.c b/kernel/irq/chip.c
801 index 44019ce..465c69c 100644
802 --- a/kernel/irq/chip.c
803 +++ b/kernel/irq/chip.c
804 @@ -246,6 +246,17 @@ static unsigned int default_startup(unsigned int irq)
805 }
806
807 /*
808 + * default shutdown function
809 + */
810 +static void default_shutdown(unsigned int irq)
811 +{
812 + struct irq_desc *desc = irq_desc + irq;
813 +
814 + desc->chip->mask(irq);
815 + desc->status |= IRQ_MASKED;
816 +}
817 +
818 +/*
819 * Fixup enable/disable function pointers
820 */
821 void irq_chip_set_defaults(struct irq_chip *chip)
822 @@ -256,8 +267,15 @@ void irq_chip_set_defaults(struct irq_chip *chip)
823 chip->disable = default_disable;
824 if (!chip->startup)
825 chip->startup = default_startup;
826 + /*
827 + * We use chip->disable, when the user provided its own. When
828 + * we have default_disable set for chip->disable, then we need
829 + * to use default_shutdown, otherwise the irq line is not
830 + * disabled on free_irq():
831 + */
832 if (!chip->shutdown)
833 - chip->shutdown = chip->disable;
834 + chip->shutdown = chip->disable != default_disable ?
835 + chip->disable : default_shutdown;
836 if (!chip->name)
837 chip->name = chip->typename;
838 if (!chip->end)
839 diff --git a/kernel/posix-timers.c b/kernel/posix-timers.c
840 index 35b4bbf..9076432 100644
841 --- a/kernel/posix-timers.c
842 +++ b/kernel/posix-timers.c
843 @@ -766,9 +766,11 @@ common_timer_set(struct k_itimer *timr, int flags,
844 /* SIGEV_NONE timers are not queued ! See common_timer_get */
845 if (((timr->it_sigev_notify & ~SIGEV_THREAD_ID) == SIGEV_NONE)) {
846 /* Setup correct expiry time for relative timers */
847 - if (mode == HRTIMER_MODE_REL)
848 - timer->expires = ktime_add(timer->expires,
849 - timer->base->get_time());
850 + if (mode == HRTIMER_MODE_REL) {
851 + timer->expires =
852 + ktime_add_safe(timer->expires,
853 + timer->base->get_time());
854 + }
855 return 0;
856 }
857
858 @@ -981,20 +983,9 @@ sys_clock_getres(const clockid_t which_clock, struct timespec __user *tp)
859 static int common_nsleep(const clockid_t which_clock, int flags,
860 struct timespec *tsave, struct timespec __user *rmtp)
861 {
862 - struct timespec rmt;
863 - int ret;
864 -
865 - ret = hrtimer_nanosleep(tsave, rmtp ? &rmt : NULL,
866 - flags & TIMER_ABSTIME ?
867 - HRTIMER_MODE_ABS : HRTIMER_MODE_REL,
868 - which_clock);
869 -
870 - if (ret && rmtp) {
871 - if (copy_to_user(rmtp, &rmt, sizeof(*rmtp)))
872 - return -EFAULT;
873 - }
874 -
875 - return ret;
876 + return hrtimer_nanosleep(tsave, rmtp, flags & TIMER_ABSTIME ?
877 + HRTIMER_MODE_ABS : HRTIMER_MODE_REL,
878 + which_clock);
879 }
880
881 asmlinkage long
882 diff --git a/kernel/sysctl.c b/kernel/sysctl.c
883 index c68f68d..e3e0ee3 100644
884 --- a/kernel/sysctl.c
885 +++ b/kernel/sysctl.c
886 @@ -910,7 +910,7 @@ static struct ctl_table vm_table[] = {
887 .data = &nr_overcommit_huge_pages,
888 .maxlen = sizeof(nr_overcommit_huge_pages),
889 .mode = 0644,
890 - .proc_handler = &proc_doulongvec_minmax,
891 + .proc_handler = &hugetlb_overcommit_handler,
892 },
893 #endif
894 {
895 diff --git a/mm/hugetlb.c b/mm/hugetlb.c
896 index db861d8..9c746cb 100644
897 --- a/mm/hugetlb.c
898 +++ b/mm/hugetlb.c
899 @@ -605,6 +605,16 @@ int hugetlb_treat_movable_handler(struct ctl_table *table, int write,
900 return 0;
901 }
902
903 +int hugetlb_overcommit_handler(struct ctl_table *table, int write,
904 + struct file *file, void __user *buffer,
905 + size_t *length, loff_t *ppos)
906 +{
907 + spin_lock(&hugetlb_lock);
908 + proc_doulongvec_minmax(table, write, file, buffer, length, ppos);
909 + spin_unlock(&hugetlb_lock);
910 + return 0;
911 +}
912 +
913 #endif /* CONFIG_SYSCTL */
914
915 int hugetlb_report_meminfo(char *buf)
916 diff --git a/mm/memory.c b/mm/memory.c
917 index 4b0144b..da8b74b 100644
918 --- a/mm/memory.c
919 +++ b/mm/memory.c
920 @@ -980,6 +980,8 @@ int get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
921 int i;
922 unsigned int vm_flags;
923
924 + if (len <= 0)
925 + return 0;
926 /*
927 * Require read or write permissions.
928 * If 'force' is set, we only require the "MAY" flags.
929 diff --git a/mm/slub.c b/mm/slub.c
930 index 474945e..c432f68 100644
931 --- a/mm/slub.c
932 +++ b/mm/slub.c
933 @@ -2592,6 +2592,7 @@ EXPORT_SYMBOL(ksize);
934 void kfree(const void *x)
935 {
936 struct page *page;
937 + void *object = (void *)x;
938
939 if (unlikely(ZERO_OR_NULL_PTR(x)))
940 return;
941 @@ -2601,7 +2602,7 @@ void kfree(const void *x)
942 put_page(page);
943 return;
944 }
945 - slab_free(page->slab, page, (void *)x, __builtin_return_address(0));
946 + slab_free(page->slab, page, object, __builtin_return_address(0));
947 }
948 EXPORT_SYMBOL(kfree);
949
950 diff --git a/net/bluetooth/hci_sysfs.c b/net/bluetooth/hci_sysfs.c
951 index 17f7fb7..2726adc 100644
952 --- a/net/bluetooth/hci_sysfs.c
953 +++ b/net/bluetooth/hci_sysfs.c
954 @@ -12,6 +12,8 @@
955 #undef BT_DBG
956 #define BT_DBG(D...)
957 #endif
958 +static struct workqueue_struct *btaddconn;
959 +static struct workqueue_struct *btdelconn;
960
961 static inline char *typetostr(int type)
962 {
963 @@ -279,6 +281,7 @@ static void add_conn(struct work_struct *work)
964 struct hci_conn *conn = container_of(work, struct hci_conn, work);
965 int i;
966
967 + flush_workqueue(btdelconn);
968 if (device_add(&conn->dev) < 0) {
969 BT_ERR("Failed to register connection device");
970 return;
971 @@ -313,6 +316,7 @@ void hci_conn_add_sysfs(struct hci_conn *conn)
972
973 INIT_WORK(&conn->work, add_conn);
974
975 + queue_work(btaddconn, &conn->work);
976 schedule_work(&conn->work);
977 }
978
979 @@ -349,6 +353,7 @@ void hci_conn_del_sysfs(struct hci_conn *conn)
980
981 INIT_WORK(&conn->work, del_conn);
982
983 + queue_work(btdelconn, &conn->work);
984 schedule_work(&conn->work);
985 }
986
987 @@ -398,31 +403,52 @@ int __init bt_sysfs_init(void)
988 {
989 int err;
990
991 + btaddconn = create_singlethread_workqueue("btaddconn");
992 + if (!btaddconn) {
993 + err = -ENOMEM;
994 + goto out;
995 + }
996 + btdelconn = create_singlethread_workqueue("btdelconn");
997 + if (!btdelconn) {
998 + err = -ENOMEM;
999 + goto out_del;
1000 + }
1001 +
1002 bt_platform = platform_device_register_simple("bluetooth", -1, NULL, 0);
1003 - if (IS_ERR(bt_platform))
1004 - return PTR_ERR(bt_platform);
1005 + if (IS_ERR(bt_platform)) {
1006 + err = PTR_ERR(bt_platform);
1007 + goto out_platform;
1008 + }
1009
1010 err = bus_register(&bt_bus);
1011 - if (err < 0) {
1012 - platform_device_unregister(bt_platform);
1013 - return err;
1014 - }
1015 + if (err < 0)
1016 + goto out_bus;
1017
1018 bt_class = class_create(THIS_MODULE, "bluetooth");
1019 if (IS_ERR(bt_class)) {
1020 - bus_unregister(&bt_bus);
1021 - platform_device_unregister(bt_platform);
1022 - return PTR_ERR(bt_class);
1023 + err = PTR_ERR(bt_class);
1024 + goto out_class;
1025 }
1026
1027 return 0;
1028 +
1029 +out_class:
1030 + bus_unregister(&bt_bus);
1031 +out_bus:
1032 + platform_device_unregister(bt_platform);
1033 +out_platform:
1034 + destroy_workqueue(btdelconn);
1035 +out_del:
1036 + destroy_workqueue(btaddconn);
1037 +out:
1038 + return err;
1039 }
1040
1041 void bt_sysfs_cleanup(void)
1042 {
1043 + destroy_workqueue(btaddconn);
1044 + destroy_workqueue(btdelconn);
1045 class_destroy(bt_class);
1046 -
1047 bus_unregister(&bt_bus);
1048 -
1049 platform_device_unregister(bt_platform);
1050 }
1051 diff --git a/net/ipv4/fib_hash.c b/net/ipv4/fib_hash.c
1052 index 0dfee27..2f78e1e 100644
1053 --- a/net/ipv4/fib_hash.c
1054 +++ b/net/ipv4/fib_hash.c
1055 @@ -434,19 +434,43 @@ static int fn_hash_insert(struct fib_table *tb, struct fib_config *cfg)
1056
1057 if (fa && fa->fa_tos == tos &&
1058 fa->fa_info->fib_priority == fi->fib_priority) {
1059 - struct fib_alias *fa_orig;
1060 + struct fib_alias *fa_first, *fa_match;
1061
1062 err = -EEXIST;
1063 if (cfg->fc_nlflags & NLM_F_EXCL)
1064 goto out;
1065
1066 + /* We have 2 goals:
1067 + * 1. Find exact match for type, scope, fib_info to avoid
1068 + * duplicate routes
1069 + * 2. Find next 'fa' (or head), NLM_F_APPEND inserts before it
1070 + */
1071 + fa_match = NULL;
1072 + fa_first = fa;
1073 + fa = list_entry(fa->fa_list.prev, struct fib_alias, fa_list);
1074 + list_for_each_entry_continue(fa, &f->fn_alias, fa_list) {
1075 + if (fa->fa_tos != tos)
1076 + break;
1077 + if (fa->fa_info->fib_priority != fi->fib_priority)
1078 + break;
1079 + if (fa->fa_type == cfg->fc_type &&
1080 + fa->fa_scope == cfg->fc_scope &&
1081 + fa->fa_info == fi) {
1082 + fa_match = fa;
1083 + break;
1084 + }
1085 + }
1086 +
1087 if (cfg->fc_nlflags & NLM_F_REPLACE) {
1088 struct fib_info *fi_drop;
1089 u8 state;
1090
1091 - if (fi->fib_treeref > 1)
1092 + fa = fa_first;
1093 + if (fa_match) {
1094 + if (fa == fa_match)
1095 + err = 0;
1096 goto out;
1097 -
1098 + }
1099 write_lock_bh(&fib_hash_lock);
1100 fi_drop = fa->fa_info;
1101 fa->fa_info = fi;
1102 @@ -469,20 +493,11 @@ static int fn_hash_insert(struct fib_table *tb, struct fib_config *cfg)
1103 * uses the same scope, type, and nexthop
1104 * information.
1105 */
1106 - fa_orig = fa;
1107 - fa = list_entry(fa->fa_list.prev, struct fib_alias, fa_list);
1108 - list_for_each_entry_continue(fa, &f->fn_alias, fa_list) {
1109 - if (fa->fa_tos != tos)
1110 - break;
1111 - if (fa->fa_info->fib_priority != fi->fib_priority)
1112 - break;
1113 - if (fa->fa_type == cfg->fc_type &&
1114 - fa->fa_scope == cfg->fc_scope &&
1115 - fa->fa_info == fi)
1116 - goto out;
1117 - }
1118 + if (fa_match)
1119 + goto out;
1120 +
1121 if (!(cfg->fc_nlflags & NLM_F_APPEND))
1122 - fa = fa_orig;
1123 + fa = fa_first;
1124 }
1125
1126 err = -ENOENT;
1127 diff --git a/net/ipv4/fib_trie.c b/net/ipv4/fib_trie.c
1128 index 1010b46..fd46509 100644
1129 --- a/net/ipv4/fib_trie.c
1130 +++ b/net/ipv4/fib_trie.c
1131 @@ -1203,20 +1203,45 @@ static int fn_trie_insert(struct fib_table *tb, struct fib_config *cfg)
1132 * and we need to allocate a new one of those as well.
1133 */
1134
1135 - if (fa && fa->fa_info->fib_priority == fi->fib_priority) {
1136 - struct fib_alias *fa_orig;
1137 + if (fa && fa->fa_tos == tos &&
1138 + fa->fa_info->fib_priority == fi->fib_priority) {
1139 + struct fib_alias *fa_first, *fa_match;
1140
1141 err = -EEXIST;
1142 if (cfg->fc_nlflags & NLM_F_EXCL)
1143 goto out;
1144
1145 + /* We have 2 goals:
1146 + * 1. Find exact match for type, scope, fib_info to avoid
1147 + * duplicate routes
1148 + * 2. Find next 'fa' (or head), NLM_F_APPEND inserts before it
1149 + */
1150 + fa_match = NULL;
1151 + fa_first = fa;
1152 + fa = list_entry(fa->fa_list.prev, struct fib_alias, fa_list);
1153 + list_for_each_entry_continue(fa, fa_head, fa_list) {
1154 + if (fa->fa_tos != tos)
1155 + break;
1156 + if (fa->fa_info->fib_priority != fi->fib_priority)
1157 + break;
1158 + if (fa->fa_type == cfg->fc_type &&
1159 + fa->fa_scope == cfg->fc_scope &&
1160 + fa->fa_info == fi) {
1161 + fa_match = fa;
1162 + break;
1163 + }
1164 + }
1165 +
1166 if (cfg->fc_nlflags & NLM_F_REPLACE) {
1167 struct fib_info *fi_drop;
1168 u8 state;
1169
1170 - if (fi->fib_treeref > 1)
1171 + fa = fa_first;
1172 + if (fa_match) {
1173 + if (fa == fa_match)
1174 + err = 0;
1175 goto out;
1176 -
1177 + }
1178 err = -ENOBUFS;
1179 new_fa = kmem_cache_alloc(fn_alias_kmem, GFP_KERNEL);
1180 if (new_fa == NULL)
1181 @@ -1228,7 +1253,7 @@ static int fn_trie_insert(struct fib_table *tb, struct fib_config *cfg)
1182 new_fa->fa_type = cfg->fc_type;
1183 new_fa->fa_scope = cfg->fc_scope;
1184 state = fa->fa_state;
1185 - new_fa->fa_state &= ~FA_S_ACCESSED;
1186 + new_fa->fa_state = state & ~FA_S_ACCESSED;
1187
1188 list_replace_rcu(&fa->fa_list, &new_fa->fa_list);
1189 alias_free_mem_rcu(fa);
1190 @@ -1245,20 +1270,11 @@ static int fn_trie_insert(struct fib_table *tb, struct fib_config *cfg)
1191 * uses the same scope, type, and nexthop
1192 * information.
1193 */
1194 - fa_orig = fa;
1195 - list_for_each_entry(fa, fa_orig->fa_list.prev, fa_list) {
1196 - if (fa->fa_tos != tos)
1197 - break;
1198 - if (fa->fa_info->fib_priority != fi->fib_priority)
1199 - break;
1200 - if (fa->fa_type == cfg->fc_type &&
1201 - fa->fa_scope == cfg->fc_scope &&
1202 - fa->fa_info == fi) {
1203 - goto out;
1204 - }
1205 - }
1206 + if (fa_match)
1207 + goto out;
1208 +
1209 if (!(cfg->fc_nlflags & NLM_F_APPEND))
1210 - fa = fa_orig;
1211 + fa = fa_first;
1212 }
1213 err = -ENOENT;
1214 if (!(cfg->fc_nlflags & NLM_F_CREATE))
1215 @@ -1614,9 +1630,8 @@ static int fn_trie_delete(struct fib_table *tb, struct fib_config *cfg)
1216 pr_debug("Deleting %08x/%d tos=%d t=%p\n", key, plen, tos, t);
1217
1218 fa_to_delete = NULL;
1219 - fa_head = fa->fa_list.prev;
1220 -
1221 - list_for_each_entry(fa, fa_head, fa_list) {
1222 + fa = list_entry(fa->fa_list.prev, struct fib_alias, fa_list);
1223 + list_for_each_entry_continue(fa, fa_head, fa_list) {
1224 struct fib_info *fi = fa->fa_info;
1225
1226 if (fa->fa_tos != tos)
1227 diff --git a/net/ipv4/inet_diag.c b/net/ipv4/inet_diag.c
1228 index e468e7a..6d2979c 100644
1229 --- a/net/ipv4/inet_diag.c
1230 +++ b/net/ipv4/inet_diag.c
1231 @@ -259,8 +259,10 @@ static int inet_diag_get_exact(struct sk_buff *in_skb,
1232 const struct inet_diag_handler *handler;
1233
1234 handler = inet_diag_lock_handler(nlh->nlmsg_type);
1235 - if (!handler)
1236 - return -ENOENT;
1237 + if (IS_ERR(handler)) {
1238 + err = PTR_ERR(handler);
1239 + goto unlock;
1240 + }
1241
1242 hashinfo = handler->idiag_hashinfo;
1243 err = -EINVAL;
1244 @@ -708,8 +710,8 @@ static int inet_diag_dump(struct sk_buff *skb, struct netlink_callback *cb)
1245 struct inet_hashinfo *hashinfo;
1246
1247 handler = inet_diag_lock_handler(cb->nlh->nlmsg_type);
1248 - if (!handler)
1249 - goto no_handler;
1250 + if (IS_ERR(handler))
1251 + goto unlock;
1252
1253 hashinfo = handler->idiag_hashinfo;
1254
1255 @@ -838,7 +840,6 @@ done:
1256 cb->args[2] = num;
1257 unlock:
1258 inet_diag_unlock_handler(handler);
1259 -no_handler:
1260 return skb->len;
1261 }
1262
1263 diff --git a/net/ipv4/ip_output.c b/net/ipv4/ip_output.c
1264 index bc9e575..61c60cf 100644
1265 --- a/net/ipv4/ip_output.c
1266 +++ b/net/ipv4/ip_output.c
1267 @@ -462,6 +462,7 @@ int ip_fragment(struct sk_buff *skb, int (*output)(struct sk_buff*))
1268 if (skb_shinfo(skb)->frag_list) {
1269 struct sk_buff *frag;
1270 int first_len = skb_pagelen(skb);
1271 + int truesizes = 0;
1272
1273 if (first_len - hlen > mtu ||
1274 ((first_len - hlen) & 7) ||
1275 @@ -485,7 +486,7 @@ int ip_fragment(struct sk_buff *skb, int (*output)(struct sk_buff*))
1276 sock_hold(skb->sk);
1277 frag->sk = skb->sk;
1278 frag->destructor = sock_wfree;
1279 - skb->truesize -= frag->truesize;
1280 + truesizes += frag->truesize;
1281 }
1282 }
1283
1284 @@ -496,6 +497,7 @@ int ip_fragment(struct sk_buff *skb, int (*output)(struct sk_buff*))
1285 frag = skb_shinfo(skb)->frag_list;
1286 skb_shinfo(skb)->frag_list = NULL;
1287 skb->data_len = first_len - skb_headlen(skb);
1288 + skb->truesize -= truesizes;
1289 skb->len = first_len;
1290 iph->tot_len = htons(first_len);
1291 iph->frag_off = htons(IP_MF);
1292 diff --git a/net/ipv4/ipcomp.c b/net/ipv4/ipcomp.c
1293 index 2c44a94..80cab8c 100644
1294 --- a/net/ipv4/ipcomp.c
1295 +++ b/net/ipv4/ipcomp.c
1296 @@ -74,6 +74,7 @@ out:
1297
1298 static int ipcomp_input(struct xfrm_state *x, struct sk_buff *skb)
1299 {
1300 + int nexthdr;
1301 int err = -ENOMEM;
1302 struct ip_comp_hdr *ipch;
1303
1304 @@ -84,13 +85,15 @@ static int ipcomp_input(struct xfrm_state *x, struct sk_buff *skb)
1305
1306 /* Remove ipcomp header and decompress original payload */
1307 ipch = (void *)skb->data;
1308 + nexthdr = ipch->nexthdr;
1309 +
1310 skb->transport_header = skb->network_header + sizeof(*ipch);
1311 __skb_pull(skb, sizeof(*ipch));
1312 err = ipcomp_decompress(x, skb);
1313 if (err)
1314 goto out;
1315
1316 - err = ipch->nexthdr;
1317 + err = nexthdr;
1318
1319 out:
1320 return err;
1321 diff --git a/net/ipv4/sysctl_net_ipv4.c b/net/ipv4/sysctl_net_ipv4.c
1322 index bec6fe8..16e04b7 100644
1323 --- a/net/ipv4/sysctl_net_ipv4.c
1324 +++ b/net/ipv4/sysctl_net_ipv4.c
1325 @@ -248,7 +248,7 @@ static int strategy_allowed_congestion_control(ctl_table *table, int __user *nam
1326
1327 tcp_get_available_congestion_control(tbl.data, tbl.maxlen);
1328 ret = sysctl_string(&tbl, name, nlen, oldval, oldlenp, newval, newlen);
1329 - if (ret == 0 && newval && newlen)
1330 + if (ret == 1 && newval && newlen)
1331 ret = tcp_set_allowed_congestion_control(tbl.data);
1332 kfree(tbl.data);
1333
1334 diff --git a/net/ipv4/xfrm4_tunnel.c b/net/ipv4/xfrm4_tunnel.c
1335 index 3268451..152f83d 100644
1336 --- a/net/ipv4/xfrm4_tunnel.c
1337 +++ b/net/ipv4/xfrm4_tunnel.c
1338 @@ -50,7 +50,7 @@ static struct xfrm_type ipip_type = {
1339
1340 static int xfrm_tunnel_rcv(struct sk_buff *skb)
1341 {
1342 - return xfrm4_rcv_spi(skb, IPPROTO_IP, ip_hdr(skb)->saddr);
1343 + return xfrm4_rcv_spi(skb, IPPROTO_IPIP, ip_hdr(skb)->saddr);
1344 }
1345
1346 static int xfrm_tunnel_err(struct sk_buff *skb, u32 info)
1347 diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c
1348 index 3bef30e..2f59baa 100644
1349 --- a/net/ipv6/ip6_output.c
1350 +++ b/net/ipv6/ip6_output.c
1351 @@ -609,6 +609,7 @@ static int ip6_fragment(struct sk_buff *skb, int (*output)(struct sk_buff *))
1352
1353 if (skb_shinfo(skb)->frag_list) {
1354 int first_len = skb_pagelen(skb);
1355 + int truesizes = 0;
1356
1357 if (first_len - hlen > mtu ||
1358 ((first_len - hlen) & 7) ||
1359 @@ -631,7 +632,7 @@ static int ip6_fragment(struct sk_buff *skb, int (*output)(struct sk_buff *))
1360 sock_hold(skb->sk);
1361 frag->sk = skb->sk;
1362 frag->destructor = sock_wfree;
1363 - skb->truesize -= frag->truesize;
1364 + truesizes += frag->truesize;
1365 }
1366 }
1367
1368 @@ -662,6 +663,7 @@ static int ip6_fragment(struct sk_buff *skb, int (*output)(struct sk_buff *))
1369
1370 first_len = skb_pagelen(skb);
1371 skb->data_len = first_len - skb_headlen(skb);
1372 + skb->truesize -= truesizes;
1373 skb->len = first_len;
1374 ipv6_hdr(skb)->payload_len = htons(first_len -
1375 sizeof(struct ipv6hdr));
1376 diff --git a/net/ipv6/ipcomp6.c b/net/ipv6/ipcomp6.c
1377 index 0cd4056..1c5b09f 100644
1378 --- a/net/ipv6/ipcomp6.c
1379 +++ b/net/ipv6/ipcomp6.c
1380 @@ -64,6 +64,7 @@ static LIST_HEAD(ipcomp6_tfms_list);
1381
1382 static int ipcomp6_input(struct xfrm_state *x, struct sk_buff *skb)
1383 {
1384 + int nexthdr;
1385 int err = -ENOMEM;
1386 struct ip_comp_hdr *ipch;
1387 int plen, dlen;
1388 @@ -79,6 +80,8 @@ static int ipcomp6_input(struct xfrm_state *x, struct sk_buff *skb)
1389
1390 /* Remove ipcomp header and decompress original payload */
1391 ipch = (void *)skb->data;
1392 + nexthdr = ipch->nexthdr;
1393 +
1394 skb->transport_header = skb->network_header + sizeof(*ipch);
1395 __skb_pull(skb, sizeof(*ipch));
1396
1397 @@ -108,7 +111,7 @@ static int ipcomp6_input(struct xfrm_state *x, struct sk_buff *skb)
1398 skb->truesize += dlen - plen;
1399 __skb_put(skb, dlen - plen);
1400 skb_copy_to_linear_data(skb, scratch, dlen);
1401 - err = ipch->nexthdr;
1402 + err = nexthdr;
1403
1404 out_put_cpu:
1405 put_cpu();
1406 diff --git a/net/netfilter/nf_conntrack_proto_tcp.c b/net/netfilter/nf_conntrack_proto_tcp.c
1407 index 7a3f64c..17ef459 100644
1408 --- a/net/netfilter/nf_conntrack_proto_tcp.c
1409 +++ b/net/netfilter/nf_conntrack_proto_tcp.c
1410 @@ -135,7 +135,7 @@ enum tcp_bit_set {
1411 * CLOSE_WAIT: ACK seen (after FIN)
1412 * LAST_ACK: FIN seen (after FIN)
1413 * TIME_WAIT: last ACK seen
1414 - * CLOSE: closed connection
1415 + * CLOSE: closed connection (RST)
1416 *
1417 * LISTEN state is not used.
1418 *
1419 @@ -834,8 +834,21 @@ static int tcp_packet(struct nf_conn *conntrack,
1420 case TCP_CONNTRACK_SYN_SENT:
1421 if (old_state < TCP_CONNTRACK_TIME_WAIT)
1422 break;
1423 - if ((conntrack->proto.tcp.seen[!dir].flags &
1424 - IP_CT_TCP_FLAG_CLOSE_INIT)
1425 + /* RFC 1122: "When a connection is closed actively,
1426 + * it MUST linger in TIME-WAIT state for a time 2xMSL
1427 + * (Maximum Segment Lifetime). However, it MAY accept
1428 + * a new SYN from the remote TCP to reopen the connection
1429 + * directly from TIME-WAIT state, if..."
1430 + * We ignore the conditions because we are in the
1431 + * TIME-WAIT state anyway.
1432 + *
1433 + * Handle aborted connections: we and the server
1434 + * think there is an existing connection but the client
1435 + * aborts it and starts a new one.
1436 + */
1437 + if (((conntrack->proto.tcp.seen[dir].flags
1438 + | conntrack->proto.tcp.seen[!dir].flags)
1439 + & IP_CT_TCP_FLAG_CLOSE_INIT)
1440 || (conntrack->proto.tcp.last_dir == dir
1441 && conntrack->proto.tcp.last_index == TCP_RST_SET)) {
1442 /* Attempt to reopen a closed/aborted connection.
1443 @@ -848,18 +861,25 @@ static int tcp_packet(struct nf_conn *conntrack,
1444 }
1445 /* Fall through */
1446 case TCP_CONNTRACK_IGNORE:
1447 - /* Ignored packets:
1448 + /* Ignored packets:
1449 + *
1450 + * Our connection entry may be out of sync, so ignore
1451 + * packets which may signal the real connection between
1452 + * the client and the server.
1453 *
1454 * a) SYN in ORIGINAL
1455 * b) SYN/ACK in REPLY
1456 * c) ACK in reply direction after initial SYN in original.
1457 + *
1458 + * If the ignored packet is invalid, the receiver will send
1459 + * a RST we'll catch below.
1460 */
1461 if (index == TCP_SYNACK_SET
1462 && conntrack->proto.tcp.last_index == TCP_SYN_SET
1463 && conntrack->proto.tcp.last_dir != dir
1464 && ntohl(th->ack_seq) ==
1465 conntrack->proto.tcp.last_end) {
1466 - /* This SYN/ACK acknowledges a SYN that we earlier
1467 + /* b) This SYN/ACK acknowledges a SYN that we earlier
1468 * ignored as invalid. This means that the client and
1469 * the server are both in sync, while the firewall is
1470 * not. We kill this session and block the SYN/ACK so
1471 @@ -884,7 +904,7 @@ static int tcp_packet(struct nf_conn *conntrack,
1472 write_unlock_bh(&tcp_lock);
1473 if (LOG_INVALID(IPPROTO_TCP))
1474 nf_log_packet(pf, 0, skb, NULL, NULL, NULL,
1475 - "nf_ct_tcp: invalid packed ignored ");
1476 + "nf_ct_tcp: invalid packet ignored ");
1477 return NF_ACCEPT;
1478 case TCP_CONNTRACK_MAX:
1479 /* Invalid packet */
1480 @@ -938,8 +958,7 @@ static int tcp_packet(struct nf_conn *conntrack,
1481
1482 conntrack->proto.tcp.state = new_state;
1483 if (old_state != new_state
1484 - && (new_state == TCP_CONNTRACK_FIN_WAIT
1485 - || new_state == TCP_CONNTRACK_CLOSE))
1486 + && new_state == TCP_CONNTRACK_FIN_WAIT)
1487 conntrack->proto.tcp.seen[dir].flags |= IP_CT_TCP_FLAG_CLOSE_INIT;
1488 timeout = conntrack->proto.tcp.retrans >= nf_ct_tcp_max_retrans
1489 && *tcp_timeouts[new_state] > nf_ct_tcp_timeout_max_retrans
1490 diff --git a/net/sched/em_meta.c b/net/sched/em_meta.c
1491 index ceda889..4d5ce77 100644
1492 --- a/net/sched/em_meta.c
1493 +++ b/net/sched/em_meta.c
1494 @@ -719,11 +719,13 @@ static int em_meta_match(struct sk_buff *skb, struct tcf_ematch *m,
1495
1496 static inline void meta_delete(struct meta_match *meta)
1497 {
1498 - struct meta_type_ops *ops = meta_type_ops(&meta->lvalue);
1499 + if (meta) {
1500 + struct meta_type_ops *ops = meta_type_ops(&meta->lvalue);
1501
1502 - if (ops && ops->destroy) {
1503 - ops->destroy(&meta->lvalue);
1504 - ops->destroy(&meta->rvalue);
1505 + if (ops && ops->destroy) {
1506 + ops->destroy(&meta->lvalue);
1507 + ops->destroy(&meta->rvalue);
1508 + }
1509 }
1510
1511 kfree(meta);
1512 diff --git a/net/sched/ematch.c b/net/sched/ematch.c
1513 index f3a104e..c856031 100644
1514 --- a/net/sched/ematch.c
1515 +++ b/net/sched/ematch.c
1516 @@ -305,10 +305,9 @@ int tcf_em_tree_validate(struct tcf_proto *tp, struct rtattr *rta,
1517 struct tcf_ematch_tree_hdr *tree_hdr;
1518 struct tcf_ematch *em;
1519
1520 - if (!rta) {
1521 - memset(tree, 0, sizeof(*tree));
1522 + memset(tree, 0, sizeof(*tree));
1523 + if (!rta)
1524 return 0;
1525 - }
1526
1527 if (rtattr_parse_nested(tb, TCA_EMATCH_TREE_MAX, rta) < 0)
1528 goto errout;
1529 diff --git a/security/selinux/ss/services.c b/security/selinux/ss/services.c
1530 index 4bf715d..3a16aba 100644
1531 --- a/security/selinux/ss/services.c
1532 +++ b/security/selinux/ss/services.c
1533 @@ -2629,7 +2629,6 @@ int security_netlbl_sid_to_secattr(u32 sid, struct netlbl_lsm_secattr *secattr)
1534
1535 netlbl_sid_to_secattr_failure:
1536 POLICY_RDUNLOCK;
1537 - netlbl_secattr_destroy(secattr);
1538 return rc;
1539 }
1540 #endif /* CONFIG_NETLABEL */