Magellan Linux

Contents of /trunk/kernel26-alx/patches-2.6.17-r5/0105-2.6.17.12-all-fixes.patch

Parent Directory Parent Directory | Revision Log Revision Log


Revision 199 - (show annotations) (download)
Fri May 18 11:04:36 2007 UTC (17 years, 5 months ago) by niro
File size: 51157 byte(s)
-import

1 diff --git a/arch/ia64/sn/kernel/xpc_channel.c b/arch/ia64/sn/kernel/xpc_channel.c
2 index 8255a9b..7e8a4d1 100644
3 --- a/arch/ia64/sn/kernel/xpc_channel.c
4 +++ b/arch/ia64/sn/kernel/xpc_channel.c
5 @@ -279,8 +279,8 @@ xpc_pull_remote_cachelines(struct xpc_pa
6 return part->reason;
7 }
8
9 - bte_ret = xp_bte_copy((u64) src, (u64) ia64_tpa((u64) dst),
10 - (u64) cnt, (BTE_NORMAL | BTE_WACQUIRE), NULL);
11 + bte_ret = xp_bte_copy((u64) src, (u64) dst, (u64) cnt,
12 + (BTE_NORMAL | BTE_WACQUIRE), NULL);
13 if (bte_ret == BTE_SUCCESS) {
14 return xpcSuccess;
15 }
16 diff --git a/arch/ia64/sn/kernel/xpc_main.c b/arch/ia64/sn/kernel/xpc_main.c
17 index 99b123a..440039f 100644
18 --- a/arch/ia64/sn/kernel/xpc_main.c
19 +++ b/arch/ia64/sn/kernel/xpc_main.c
20 @@ -1052,6 +1052,8 @@ xpc_do_exit(enum xpc_retval reason)
21 if (xpc_sysctl) {
22 unregister_sysctl_table(xpc_sysctl);
23 }
24 +
25 + kfree(xpc_remote_copy_buffer_base);
26 }
27
28
29 @@ -1212,24 +1214,20 @@ xpc_init(void)
30 partid_t partid;
31 struct xpc_partition *part;
32 pid_t pid;
33 + size_t buf_size;
34
35
36 if (!ia64_platform_is("sn2")) {
37 return -ENODEV;
38 }
39
40 - /*
41 - * xpc_remote_copy_buffer is used as a temporary buffer for bte_copy'ng
42 - * various portions of a partition's reserved page. Its size is based
43 - * on the size of the reserved page header and part_nasids mask. So we
44 - * need to ensure that the other items will fit as well.
45 - */
46 - if (XPC_RP_VARS_SIZE > XPC_RP_HEADER_SIZE + XP_NASID_MASK_BYTES) {
47 - dev_err(xpc_part, "xpc_remote_copy_buffer is not big enough\n");
48 - return -EPERM;
49 - }
50 - DBUG_ON((u64) xpc_remote_copy_buffer !=
51 - L1_CACHE_ALIGN((u64) xpc_remote_copy_buffer));
52 +
53 + buf_size = max(XPC_RP_VARS_SIZE,
54 + XPC_RP_HEADER_SIZE + XP_NASID_MASK_BYTES);
55 + xpc_remote_copy_buffer = xpc_kmalloc_cacheline_aligned(buf_size,
56 + GFP_KERNEL, &xpc_remote_copy_buffer_base);
57 + if (xpc_remote_copy_buffer == NULL)
58 + return -ENOMEM;
59
60 snprintf(xpc_part->bus_id, BUS_ID_SIZE, "part");
61 snprintf(xpc_chan->bus_id, BUS_ID_SIZE, "chan");
62 @@ -1293,6 +1291,8 @@ xpc_init(void)
63 if (xpc_sysctl) {
64 unregister_sysctl_table(xpc_sysctl);
65 }
66 +
67 + kfree(xpc_remote_copy_buffer_base);
68 return -EBUSY;
69 }
70
71 @@ -1311,6 +1311,8 @@ xpc_init(void)
72 if (xpc_sysctl) {
73 unregister_sysctl_table(xpc_sysctl);
74 }
75 +
76 + kfree(xpc_remote_copy_buffer_base);
77 return -EBUSY;
78 }
79
80 @@ -1362,6 +1364,8 @@ xpc_init(void)
81 if (xpc_sysctl) {
82 unregister_sysctl_table(xpc_sysctl);
83 }
84 +
85 + kfree(xpc_remote_copy_buffer_base);
86 return -EBUSY;
87 }
88
89 diff --git a/arch/ia64/sn/kernel/xpc_partition.c b/arch/ia64/sn/kernel/xpc_partition.c
90 index 2a89cfc..57c723f 100644
91 --- a/arch/ia64/sn/kernel/xpc_partition.c
92 +++ b/arch/ia64/sn/kernel/xpc_partition.c
93 @@ -71,19 +71,15 @@ struct xpc_partition xpc_partitions[XP_M
94 * Generic buffer used to store a local copy of portions of a remote
95 * partition's reserved page (either its header and part_nasids mask,
96 * or its vars).
97 - *
98 - * xpc_discovery runs only once and is a seperate thread that is
99 - * very likely going to be processing in parallel with receiving
100 - * interrupts.
101 */
102 -char ____cacheline_aligned xpc_remote_copy_buffer[XPC_RP_HEADER_SIZE +
103 - XP_NASID_MASK_BYTES];
104 +char *xpc_remote_copy_buffer;
105 +void *xpc_remote_copy_buffer_base;
106
107
108 /*
109 * Guarantee that the kmalloc'd memory is cacheline aligned.
110 */
111 -static void *
112 +void *
113 xpc_kmalloc_cacheline_aligned(size_t size, gfp_t flags, void **base)
114 {
115 /* see if kmalloc will give us cachline aligned memory by default */
116 @@ -148,7 +144,7 @@ xpc_get_rsvd_page_pa(int nasid)
117 }
118 }
119
120 - bte_res = xp_bte_copy(rp_pa, ia64_tpa(buf), buf_len,
121 + bte_res = xp_bte_copy(rp_pa, buf, buf_len,
122 (BTE_NOTIFY | BTE_WACQUIRE), NULL);
123 if (bte_res != BTE_SUCCESS) {
124 dev_dbg(xpc_part, "xp_bte_copy failed %i\n", bte_res);
125 @@ -447,7 +443,7 @@ xpc_check_remote_hb(void)
126
127 /* pull the remote_hb cache line */
128 bres = xp_bte_copy(part->remote_vars_pa,
129 - ia64_tpa((u64) remote_vars),
130 + (u64) remote_vars,
131 XPC_RP_VARS_SIZE,
132 (BTE_NOTIFY | BTE_WACQUIRE), NULL);
133 if (bres != BTE_SUCCESS) {
134 @@ -498,8 +494,7 @@ xpc_get_remote_rp(int nasid, u64 *discov
135
136
137 /* pull over the reserved page header and part_nasids mask */
138 -
139 - bres = xp_bte_copy(*remote_rp_pa, ia64_tpa((u64) remote_rp),
140 + bres = xp_bte_copy(*remote_rp_pa, (u64) remote_rp,
141 XPC_RP_HEADER_SIZE + xp_nasid_mask_bytes,
142 (BTE_NOTIFY | BTE_WACQUIRE), NULL);
143 if (bres != BTE_SUCCESS) {
144 @@ -554,11 +549,8 @@ xpc_get_remote_vars(u64 remote_vars_pa,
145 return xpcVarsNotSet;
146 }
147
148 -
149 /* pull over the cross partition variables */
150 -
151 - bres = xp_bte_copy(remote_vars_pa, ia64_tpa((u64) remote_vars),
152 - XPC_RP_VARS_SIZE,
153 + bres = xp_bte_copy(remote_vars_pa, (u64) remote_vars, XPC_RP_VARS_SIZE,
154 (BTE_NOTIFY | BTE_WACQUIRE), NULL);
155 if (bres != BTE_SUCCESS) {
156 return xpc_map_bte_errors(bres);
157 @@ -1239,7 +1231,7 @@ xpc_initiate_partid_to_nasids(partid_t p
158
159 part_nasid_pa = (u64) XPC_RP_PART_NASIDS(part->remote_rp_pa);
160
161 - bte_res = xp_bte_copy(part_nasid_pa, ia64_tpa((u64) nasid_mask),
162 + bte_res = xp_bte_copy(part_nasid_pa, (u64) nasid_mask,
163 xp_nasid_mask_bytes, (BTE_NOTIFY | BTE_WACQUIRE), NULL);
164
165 return xpc_map_bte_errors(bte_res);
166 diff --git a/arch/sparc64/mm/generic.c b/arch/sparc64/mm/generic.c
167 index 8cb0620..af9d81d 100644
168 --- a/arch/sparc64/mm/generic.c
169 +++ b/arch/sparc64/mm/generic.c
170 @@ -69,6 +69,8 @@ static inline void io_remap_pte_range(st
171 } else
172 offset += PAGE_SIZE;
173
174 + if (pte_write(entry))
175 + entry = pte_mkdirty(entry);
176 do {
177 BUG_ON(!pte_none(*pte));
178 set_pte_at(mm, address, pte, entry);
179 diff --git a/drivers/ide/pci/via82cxxx.c b/drivers/ide/pci/via82cxxx.c
180 index 3e677c4..9914a78 100644
181 --- a/drivers/ide/pci/via82cxxx.c
182 +++ b/drivers/ide/pci/via82cxxx.c
183 @@ -6,7 +6,7 @@
184 *
185 * vt82c576, vt82c586, vt82c586a, vt82c586b, vt82c596a, vt82c596b,
186 * vt82c686, vt82c686a, vt82c686b, vt8231, vt8233, vt8233c, vt8233a,
187 - * vt8235, vt8237
188 + * vt8235, vt8237, vt8237a
189 *
190 * Copyright (c) 2000-2002 Vojtech Pavlik
191 *
192 @@ -82,6 +82,7 @@ static struct via_isa_bridge {
193 { "vt6410", PCI_DEVICE_ID_VIA_6410, 0x00, 0x2f, VIA_UDMA_133 | VIA_BAD_AST },
194 { "vt8251", PCI_DEVICE_ID_VIA_8251, 0x00, 0x2f, VIA_UDMA_133 | VIA_BAD_AST },
195 { "vt8237", PCI_DEVICE_ID_VIA_8237, 0x00, 0x2f, VIA_UDMA_133 | VIA_BAD_AST },
196 + { "vt8237a", PCI_DEVICE_ID_VIA_8237A, 0x00, 0x2f, VIA_UDMA_133 | VIA_BAD_AST },
197 { "vt8235", PCI_DEVICE_ID_VIA_8235, 0x00, 0x2f, VIA_UDMA_133 | VIA_BAD_AST },
198 { "vt8233a", PCI_DEVICE_ID_VIA_8233A, 0x00, 0x2f, VIA_UDMA_133 | VIA_BAD_AST },
199 { "vt8233c", PCI_DEVICE_ID_VIA_8233C_0, 0x00, 0x2f, VIA_UDMA_100 },
200 diff --git a/drivers/md/dm-exception-store.c b/drivers/md/dm-exception-store.c
201 index cc07bbe..34a7593 100644
202 --- a/drivers/md/dm-exception-store.c
203 +++ b/drivers/md/dm-exception-store.c
204 @@ -91,7 +91,6 @@ struct pstore {
205 struct dm_snapshot *snap; /* up pointer to my snapshot */
206 int version;
207 int valid;
208 - uint32_t chunk_size;
209 uint32_t exceptions_per_area;
210
211 /*
212 @@ -133,7 +132,7 @@ static int alloc_area(struct pstore *ps)
213 int r = -ENOMEM;
214 size_t len;
215
216 - len = ps->chunk_size << SECTOR_SHIFT;
217 + len = ps->snap->chunk_size << SECTOR_SHIFT;
218
219 /*
220 * Allocate the chunk_size block of memory that will hold
221 @@ -160,8 +159,8 @@ static int chunk_io(struct pstore *ps, u
222 unsigned long bits;
223
224 where.bdev = ps->snap->cow->bdev;
225 - where.sector = ps->chunk_size * chunk;
226 - where.count = ps->chunk_size;
227 + where.sector = ps->snap->chunk_size * chunk;
228 + where.count = ps->snap->chunk_size;
229
230 return dm_io_sync_vm(1, &where, rw, ps->area, &bits);
231 }
232 @@ -188,7 +187,7 @@ static int area_io(struct pstore *ps, ui
233
234 static int zero_area(struct pstore *ps, uint32_t area)
235 {
236 - memset(ps->area, 0, ps->chunk_size << SECTOR_SHIFT);
237 + memset(ps->area, 0, ps->snap->chunk_size << SECTOR_SHIFT);
238 return area_io(ps, area, WRITE);
239 }
240
241 @@ -196,6 +195,7 @@ static int read_header(struct pstore *ps
242 {
243 int r;
244 struct disk_header *dh;
245 + chunk_t chunk_size;
246
247 r = chunk_io(ps, 0, READ);
248 if (r)
249 @@ -210,8 +210,29 @@ static int read_header(struct pstore *ps
250 *new_snapshot = 0;
251 ps->valid = le32_to_cpu(dh->valid);
252 ps->version = le32_to_cpu(dh->version);
253 - ps->chunk_size = le32_to_cpu(dh->chunk_size);
254 -
255 + chunk_size = le32_to_cpu(dh->chunk_size);
256 + if (ps->snap->chunk_size != chunk_size) {
257 + DMWARN("chunk size %llu in device metadata overrides "
258 + "table chunk size of %llu.",
259 + (unsigned long long)chunk_size,
260 + (unsigned long long)ps->snap->chunk_size);
261 +
262 + /* We had a bogus chunk_size. Fix stuff up. */
263 + dm_io_put(sectors_to_pages(ps->snap->chunk_size));
264 + free_area(ps);
265 +
266 + ps->snap->chunk_size = chunk_size;
267 + ps->snap->chunk_mask = chunk_size - 1;
268 + ps->snap->chunk_shift = ffs(chunk_size) - 1;
269 +
270 + r = alloc_area(ps);
271 + if (r)
272 + return r;
273 +
274 + r = dm_io_get(sectors_to_pages(chunk_size));
275 + if (r)
276 + return r;
277 + }
278 } else {
279 DMWARN("Invalid/corrupt snapshot");
280 r = -ENXIO;
281 @@ -224,13 +245,13 @@ static int write_header(struct pstore *p
282 {
283 struct disk_header *dh;
284
285 - memset(ps->area, 0, ps->chunk_size << SECTOR_SHIFT);
286 + memset(ps->area, 0, ps->snap->chunk_size << SECTOR_SHIFT);
287
288 dh = (struct disk_header *) ps->area;
289 dh->magic = cpu_to_le32(SNAP_MAGIC);
290 dh->valid = cpu_to_le32(ps->valid);
291 dh->version = cpu_to_le32(ps->version);
292 - dh->chunk_size = cpu_to_le32(ps->chunk_size);
293 + dh->chunk_size = cpu_to_le32(ps->snap->chunk_size);
294
295 return chunk_io(ps, 0, WRITE);
296 }
297 @@ -365,7 +386,7 @@ static void persistent_destroy(struct ex
298 {
299 struct pstore *ps = get_info(store);
300
301 - dm_io_put(sectors_to_pages(ps->chunk_size));
302 + dm_io_put(sectors_to_pages(ps->snap->chunk_size));
303 vfree(ps->callbacks);
304 free_area(ps);
305 kfree(ps);
306 @@ -384,6 +405,16 @@ static int persistent_read_metadata(stru
307 return r;
308
309 /*
310 + * Now we know correct chunk_size, complete the initialisation.
311 + */
312 + ps->exceptions_per_area = (ps->snap->chunk_size << SECTOR_SHIFT) /
313 + sizeof(struct disk_exception);
314 + ps->callbacks = dm_vcalloc(ps->exceptions_per_area,
315 + sizeof(*ps->callbacks));
316 + if (!ps->callbacks)
317 + return -ENOMEM;
318 +
319 + /*
320 * Do we need to setup a new snapshot ?
321 */
322 if (new_snapshot) {
323 @@ -533,9 +564,6 @@ int dm_create_persistent(struct exceptio
324 ps->snap = store->snap;
325 ps->valid = 1;
326 ps->version = SNAPSHOT_DISK_VERSION;
327 - ps->chunk_size = chunk_size;
328 - ps->exceptions_per_area = (chunk_size << SECTOR_SHIFT) /
329 - sizeof(struct disk_exception);
330 ps->next_free = 2; /* skipping the header and first area */
331 ps->current_committed = 0;
332
333 @@ -543,18 +571,9 @@ int dm_create_persistent(struct exceptio
334 if (r)
335 goto bad;
336
337 - /*
338 - * Allocate space for all the callbacks.
339 - */
340 ps->callback_count = 0;
341 atomic_set(&ps->pending_count, 0);
342 - ps->callbacks = dm_vcalloc(ps->exceptions_per_area,
343 - sizeof(*ps->callbacks));
344 -
345 - if (!ps->callbacks) {
346 - r = -ENOMEM;
347 - goto bad;
348 - }
349 + ps->callbacks = NULL;
350
351 store->destroy = persistent_destroy;
352 store->read_metadata = persistent_read_metadata;
353 diff --git a/drivers/md/dm-ioctl.c b/drivers/md/dm-ioctl.c
354 index 8edd643..f7e7436 100644
355 --- a/drivers/md/dm-ioctl.c
356 +++ b/drivers/md/dm-ioctl.c
357 @@ -102,8 +102,10 @@ static struct hash_cell *__get_name_cell
358 unsigned int h = hash_str(str);
359
360 list_for_each_entry (hc, _name_buckets + h, name_list)
361 - if (!strcmp(hc->name, str))
362 + if (!strcmp(hc->name, str)) {
363 + dm_get(hc->md);
364 return hc;
365 + }
366
367 return NULL;
368 }
369 @@ -114,8 +116,10 @@ static struct hash_cell *__get_uuid_cell
370 unsigned int h = hash_str(str);
371
372 list_for_each_entry (hc, _uuid_buckets + h, uuid_list)
373 - if (!strcmp(hc->uuid, str))
374 + if (!strcmp(hc->uuid, str)) {
375 + dm_get(hc->md);
376 return hc;
377 + }
378
379 return NULL;
380 }
381 @@ -191,7 +195,7 @@ static int unregister_with_devfs(struct
382 */
383 static int dm_hash_insert(const char *name, const char *uuid, struct mapped_device *md)
384 {
385 - struct hash_cell *cell;
386 + struct hash_cell *cell, *hc;
387
388 /*
389 * Allocate the new cells.
390 @@ -204,14 +208,19 @@ static int dm_hash_insert(const char *na
391 * Insert the cell into both hash tables.
392 */
393 down_write(&_hash_lock);
394 - if (__get_name_cell(name))
395 + hc = __get_name_cell(name);
396 + if (hc) {
397 + dm_put(hc->md);
398 goto bad;
399 + }
400
401 list_add(&cell->name_list, _name_buckets + hash_str(name));
402
403 if (uuid) {
404 - if (__get_uuid_cell(uuid)) {
405 + hc = __get_uuid_cell(uuid);
406 + if (hc) {
407 list_del(&cell->name_list);
408 + dm_put(hc->md);
409 goto bad;
410 }
411 list_add(&cell->uuid_list, _uuid_buckets + hash_str(uuid));
412 @@ -289,6 +298,7 @@ static int dm_hash_rename(const char *ol
413 if (hc) {
414 DMWARN("asked to rename to an already existing name %s -> %s",
415 old, new);
416 + dm_put(hc->md);
417 up_write(&_hash_lock);
418 kfree(new_name);
419 return -EBUSY;
420 @@ -328,6 +338,7 @@ static int dm_hash_rename(const char *ol
421 dm_table_put(table);
422 }
423
424 + dm_put(hc->md);
425 up_write(&_hash_lock);
426 kfree(old_name);
427 return 0;
428 @@ -611,10 +622,8 @@ static struct hash_cell *__find_device_h
429 return __get_name_cell(param->name);
430
431 md = dm_get_md(huge_decode_dev(param->dev));
432 - if (md) {
433 + if (md)
434 mdptr = dm_get_mdptr(md);
435 - dm_put(md);
436 - }
437
438 return mdptr;
439 }
440 @@ -628,7 +637,6 @@ static struct mapped_device *find_device
441 hc = __find_device_hash_cell(param);
442 if (hc) {
443 md = hc->md;
444 - dm_get(md);
445
446 /*
447 * Sneakily write in both the name and the uuid
448 @@ -653,6 +661,7 @@ static struct mapped_device *find_device
449 static int dev_remove(struct dm_ioctl *param, size_t param_size)
450 {
451 struct hash_cell *hc;
452 + struct mapped_device *md;
453
454 down_write(&_hash_lock);
455 hc = __find_device_hash_cell(param);
456 @@ -663,8 +672,11 @@ static int dev_remove(struct dm_ioctl *p
457 return -ENXIO;
458 }
459
460 + md = hc->md;
461 +
462 __hash_remove(hc);
463 up_write(&_hash_lock);
464 + dm_put(md);
465 param->data_size = 0;
466 return 0;
467 }
468 @@ -790,7 +802,6 @@ static int do_resume(struct dm_ioctl *pa
469 }
470
471 md = hc->md;
472 - dm_get(md);
473
474 new_map = hc->new_map;
475 hc->new_map = NULL;
476 @@ -1078,6 +1089,7 @@ static int table_clear(struct dm_ioctl *
477 {
478 int r;
479 struct hash_cell *hc;
480 + struct mapped_device *md;
481
482 down_write(&_hash_lock);
483
484 @@ -1096,7 +1108,9 @@ static int table_clear(struct dm_ioctl *
485 param->flags &= ~DM_INACTIVE_PRESENT_FLAG;
486
487 r = __dev_status(hc->md, param);
488 + md = hc->md;
489 up_write(&_hash_lock);
490 + dm_put(md);
491 return r;
492 }
493
494 diff --git a/drivers/md/dm-raid1.c b/drivers/md/dm-raid1.c
495 index d12cf3e..2cab46e 100644
496 --- a/drivers/md/dm-raid1.c
497 +++ b/drivers/md/dm-raid1.c
498 @@ -106,12 +106,42 @@ struct region {
499 struct bio_list delayed_bios;
500 };
501
502 +
503 +/*-----------------------------------------------------------------
504 + * Mirror set structures.
505 + *---------------------------------------------------------------*/
506 +struct mirror {
507 + atomic_t error_count;
508 + struct dm_dev *dev;
509 + sector_t offset;
510 +};
511 +
512 +struct mirror_set {
513 + struct dm_target *ti;
514 + struct list_head list;
515 + struct region_hash rh;
516 + struct kcopyd_client *kcopyd_client;
517 +
518 + spinlock_t lock; /* protects the next two lists */
519 + struct bio_list reads;
520 + struct bio_list writes;
521 +
522 + /* recovery */
523 + region_t nr_regions;
524 + int in_sync;
525 +
526 + struct mirror *default_mirror; /* Default mirror */
527 +
528 + unsigned int nr_mirrors;
529 + struct mirror mirror[0];
530 +};
531 +
532 /*
533 * Conversion fns
534 */
535 static inline region_t bio_to_region(struct region_hash *rh, struct bio *bio)
536 {
537 - return bio->bi_sector >> rh->region_shift;
538 + return (bio->bi_sector - rh->ms->ti->begin) >> rh->region_shift;
539 }
540
541 static inline sector_t region_to_sector(struct region_hash *rh, region_t region)
542 @@ -223,7 +253,9 @@ static struct region *__rh_alloc(struct
543 struct region *reg, *nreg;
544
545 read_unlock(&rh->hash_lock);
546 - nreg = mempool_alloc(rh->region_pool, GFP_NOIO);
547 + nreg = mempool_alloc(rh->region_pool, GFP_ATOMIC);
548 + if (unlikely(!nreg))
549 + nreg = kmalloc(sizeof(struct region), GFP_NOIO);
550 nreg->state = rh->log->type->in_sync(rh->log, region, 1) ?
551 RH_CLEAN : RH_NOSYNC;
552 nreg->rh = rh;
553 @@ -541,35 +573,6 @@ static void rh_start_recovery(struct reg
554 wake();
555 }
556
557 -/*-----------------------------------------------------------------
558 - * Mirror set structures.
559 - *---------------------------------------------------------------*/
560 -struct mirror {
561 - atomic_t error_count;
562 - struct dm_dev *dev;
563 - sector_t offset;
564 -};
565 -
566 -struct mirror_set {
567 - struct dm_target *ti;
568 - struct list_head list;
569 - struct region_hash rh;
570 - struct kcopyd_client *kcopyd_client;
571 -
572 - spinlock_t lock; /* protects the next two lists */
573 - struct bio_list reads;
574 - struct bio_list writes;
575 -
576 - /* recovery */
577 - region_t nr_regions;
578 - int in_sync;
579 -
580 - struct mirror *default_mirror; /* Default mirror */
581 -
582 - unsigned int nr_mirrors;
583 - struct mirror mirror[0];
584 -};
585 -
586 /*
587 * Every mirror should look like this one.
588 */
589 @@ -1115,7 +1118,7 @@ static int mirror_map(struct dm_target *
590 struct mirror *m;
591 struct mirror_set *ms = ti->private;
592
593 - map_context->ll = bio->bi_sector >> ms->rh.region_shift;
594 + map_context->ll = bio_to_region(&ms->rh, bio);
595
596 if (rw == WRITE) {
597 queue_bio(ms, bio, rw);
598 diff --git a/drivers/md/dm-snap.c b/drivers/md/dm-snap.c
599 index 08312b4..b84bc1a 100644
600 --- a/drivers/md/dm-snap.c
601 +++ b/drivers/md/dm-snap.c
602 @@ -530,7 +530,7 @@ static int snapshot_ctr(struct dm_target
603 }
604
605 ti->private = s;
606 - ti->split_io = chunk_size;
607 + ti->split_io = s->chunk_size;
608
609 return 0;
610
611 @@ -1204,7 +1204,7 @@ static int origin_status(struct dm_targe
612
613 static struct target_type origin_target = {
614 .name = "snapshot-origin",
615 - .version = {1, 1, 0},
616 + .version = {1, 4, 0},
617 .module = THIS_MODULE,
618 .ctr = origin_ctr,
619 .dtr = origin_dtr,
620 @@ -1215,7 +1215,7 @@ static struct target_type origin_target
621
622 static struct target_type snapshot_target = {
623 .name = "snapshot",
624 - .version = {1, 1, 0},
625 + .version = {1, 4, 0},
626 .module = THIS_MODULE,
627 .ctr = snapshot_ctr,
628 .dtr = snapshot_dtr,
629 diff --git a/drivers/md/dm.c b/drivers/md/dm.c
630 index 4d710b7..dfd0378 100644
631 --- a/drivers/md/dm.c
632 +++ b/drivers/md/dm.c
633 @@ -26,6 +26,7 @@ static const char *_name = DM_NAME;
634 static unsigned int major = 0;
635 static unsigned int _major = 0;
636
637 +static DEFINE_SPINLOCK(_minor_lock);
638 /*
639 * One of these is allocated per bio.
640 */
641 @@ -54,12 +55,15 @@ union map_info *dm_get_mapinfo(struct bi
642 return NULL;
643 }
644
645 +#define MINOR_ALLOCED ((void *)-1)
646 +
647 /*
648 * Bits for the md->flags field.
649 */
650 #define DMF_BLOCK_IO 0
651 #define DMF_SUSPENDED 1
652 #define DMF_FROZEN 2
653 +#define DMF_FREEING 3
654
655 struct mapped_device {
656 struct rw_semaphore io_lock;
657 @@ -218,9 +222,23 @@ static int dm_blk_open(struct inode *ino
658 {
659 struct mapped_device *md;
660
661 + spin_lock(&_minor_lock);
662 +
663 md = inode->i_bdev->bd_disk->private_data;
664 + if (!md)
665 + goto out;
666 +
667 + if (test_bit(DMF_FREEING, &md->flags)) {
668 + md = NULL;
669 + goto out;
670 + }
671 +
672 dm_get(md);
673 - return 0;
674 +
675 +out:
676 + spin_unlock(&_minor_lock);
677 +
678 + return md ? 0 : -ENXIO;
679 }
680
681 static int dm_blk_close(struct inode *inode, struct file *file)
682 @@ -744,14 +762,13 @@ static int dm_any_congested(void *conges
683 /*-----------------------------------------------------------------
684 * An IDR is used to keep track of allocated minor numbers.
685 *---------------------------------------------------------------*/
686 -static DEFINE_MUTEX(_minor_lock);
687 static DEFINE_IDR(_minor_idr);
688
689 static void free_minor(unsigned int minor)
690 {
691 - mutex_lock(&_minor_lock);
692 + spin_lock(&_minor_lock);
693 idr_remove(&_minor_idr, minor);
694 - mutex_unlock(&_minor_lock);
695 + spin_unlock(&_minor_lock);
696 }
697
698 /*
699 @@ -764,23 +781,20 @@ static int specific_minor(struct mapped_
700 if (minor >= (1 << MINORBITS))
701 return -EINVAL;
702
703 - mutex_lock(&_minor_lock);
704 + r = idr_pre_get(&_minor_idr, GFP_KERNEL);
705 + if (!r)
706 + return -ENOMEM;
707 +
708 + spin_lock(&_minor_lock);
709
710 if (idr_find(&_minor_idr, minor)) {
711 r = -EBUSY;
712 goto out;
713 }
714
715 - r = idr_pre_get(&_minor_idr, GFP_KERNEL);
716 - if (!r) {
717 - r = -ENOMEM;
718 - goto out;
719 - }
720 -
721 - r = idr_get_new_above(&_minor_idr, md, minor, &m);
722 - if (r) {
723 + r = idr_get_new_above(&_minor_idr, MINOR_ALLOCED, minor, &m);
724 + if (r)
725 goto out;
726 - }
727
728 if (m != minor) {
729 idr_remove(&_minor_idr, m);
730 @@ -789,7 +803,7 @@ static int specific_minor(struct mapped_
731 }
732
733 out:
734 - mutex_unlock(&_minor_lock);
735 + spin_unlock(&_minor_lock);
736 return r;
737 }
738
739 @@ -798,15 +812,13 @@ static int next_free_minor(struct mapped
740 int r;
741 unsigned int m;
742
743 - mutex_lock(&_minor_lock);
744 -
745 r = idr_pre_get(&_minor_idr, GFP_KERNEL);
746 - if (!r) {
747 - r = -ENOMEM;
748 - goto out;
749 - }
750 + if (!r)
751 + return -ENOMEM;
752
753 - r = idr_get_new(&_minor_idr, md, &m);
754 + spin_lock(&_minor_lock);
755 +
756 + r = idr_get_new(&_minor_idr, MINOR_ALLOCED, &m);
757 if (r) {
758 goto out;
759 }
760 @@ -820,7 +832,7 @@ static int next_free_minor(struct mapped
761 *minor = m;
762
763 out:
764 - mutex_unlock(&_minor_lock);
765 + spin_unlock(&_minor_lock);
766 return r;
767 }
768
769 @@ -833,12 +845,16 @@ static struct mapped_device *alloc_dev(u
770 {
771 int r;
772 struct mapped_device *md = kmalloc(sizeof(*md), GFP_KERNEL);
773 + void *old_md;
774
775 if (!md) {
776 DMWARN("unable to allocate device, out of memory.");
777 return NULL;
778 }
779
780 + if (!try_module_get(THIS_MODULE))
781 + goto bad0;
782 +
783 /* get a minor number for the dev */
784 r = persistent ? specific_minor(md, minor) : next_free_minor(md, &minor);
785 if (r < 0)
786 @@ -875,6 +891,10 @@ static struct mapped_device *alloc_dev(u
787 if (!md->disk)
788 goto bad4;
789
790 + atomic_set(&md->pending, 0);
791 + init_waitqueue_head(&md->wait);
792 + init_waitqueue_head(&md->eventq);
793 +
794 md->disk->major = _major;
795 md->disk->first_minor = minor;
796 md->disk->fops = &dm_blk_dops;
797 @@ -884,9 +904,12 @@ static struct mapped_device *alloc_dev(u
798 add_disk(md->disk);
799 format_dev_t(md->name, MKDEV(_major, minor));
800
801 - atomic_set(&md->pending, 0);
802 - init_waitqueue_head(&md->wait);
803 - init_waitqueue_head(&md->eventq);
804 + /* Populate the mapping, nobody knows we exist yet */
805 + spin_lock(&_minor_lock);
806 + old_md = idr_replace(&_minor_idr, md, minor);
807 + spin_unlock(&_minor_lock);
808 +
809 + BUG_ON(old_md != MINOR_ALLOCED);
810
811 return md;
812
813 @@ -898,6 +921,8 @@ static struct mapped_device *alloc_dev(u
814 blk_cleanup_queue(md->queue);
815 free_minor(minor);
816 bad1:
817 + module_put(THIS_MODULE);
818 + bad0:
819 kfree(md);
820 return NULL;
821 }
822 @@ -914,8 +939,14 @@ static void free_dev(struct mapped_devic
823 mempool_destroy(md->io_pool);
824 del_gendisk(md->disk);
825 free_minor(minor);
826 +
827 + spin_lock(&_minor_lock);
828 + md->disk->private_data = NULL;
829 + spin_unlock(&_minor_lock);
830 +
831 put_disk(md->disk);
832 blk_cleanup_queue(md->queue);
833 + module_put(THIS_MODULE);
834 kfree(md);
835 }
836
837 @@ -1015,13 +1046,18 @@ static struct mapped_device *dm_find_md(
838 if (MAJOR(dev) != _major || minor >= (1 << MINORBITS))
839 return NULL;
840
841 - mutex_lock(&_minor_lock);
842 + spin_lock(&_minor_lock);
843
844 md = idr_find(&_minor_idr, minor);
845 - if (!md || (dm_disk(md)->first_minor != minor))
846 + if (md && (md == MINOR_ALLOCED ||
847 + (dm_disk(md)->first_minor != minor) ||
848 + test_bit(DMF_FREEING, &md->flags))) {
849 md = NULL;
850 + goto out;
851 + }
852
853 - mutex_unlock(&_minor_lock);
854 +out:
855 + spin_unlock(&_minor_lock);
856
857 return md;
858 }
859 @@ -1055,8 +1091,13 @@ void dm_put(struct mapped_device *md)
860 {
861 struct dm_table *map;
862
863 - if (atomic_dec_and_test(&md->holders)) {
864 + BUG_ON(test_bit(DMF_FREEING, &md->flags));
865 +
866 + if (atomic_dec_and_lock(&md->holders, &_minor_lock)) {
867 map = dm_get_table(md);
868 + idr_replace(&_minor_idr, MINOR_ALLOCED, dm_disk(md)->first_minor);
869 + set_bit(DMF_FREEING, &md->flags);
870 + spin_unlock(&_minor_lock);
871 if (!dm_suspended(md)) {
872 dm_table_presuspend_targets(map);
873 dm_table_postsuspend_targets(map);
874 diff --git a/drivers/net/sky2.c b/drivers/net/sky2.c
875 index 72ca553..cc66193 100644
876 --- a/drivers/net/sky2.c
877 +++ b/drivers/net/sky2.c
878 @@ -51,7 +51,7 @@ #endif
879 #include "sky2.h"
880
881 #define DRV_NAME "sky2"
882 -#define DRV_VERSION "1.4"
883 +#define DRV_VERSION "1.6.1"
884 #define PFX DRV_NAME " "
885
886 /*
887 @@ -321,7 +321,7 @@ static void sky2_phy_init(struct sky2_hw
888 }
889
890 ctrl = gm_phy_read(hw, port, PHY_MARV_PHY_CTRL);
891 - if (hw->copper) {
892 + if (sky2_is_copper(hw)) {
893 if (hw->chip_id == CHIP_ID_YUKON_FE) {
894 /* enable automatic crossover */
895 ctrl |= PHY_M_PC_MDI_XMODE(PHY_M_PC_ENA_AUTO) >> 1;
896 @@ -338,25 +338,37 @@ static void sky2_phy_init(struct sky2_hw
897 ctrl |= PHY_M_PC_DSC(2) | PHY_M_PC_DOWN_S_ENA;
898 }
899 }
900 - gm_phy_write(hw, port, PHY_MARV_PHY_CTRL, ctrl);
901 } else {
902 /* workaround for deviation #4.88 (CRC errors) */
903 /* disable Automatic Crossover */
904
905 ctrl &= ~PHY_M_PC_MDIX_MSK;
906 - gm_phy_write(hw, port, PHY_MARV_PHY_CTRL, ctrl);
907 + }
908
909 - if (hw->chip_id == CHIP_ID_YUKON_XL) {
910 - /* Fiber: select 1000BASE-X only mode MAC Specific Ctrl Reg. */
911 - gm_phy_write(hw, port, PHY_MARV_EXT_ADR, 2);
912 - ctrl = gm_phy_read(hw, port, PHY_MARV_PHY_CTRL);
913 - ctrl &= ~PHY_M_MAC_MD_MSK;
914 - ctrl |= PHY_M_MAC_MODE_SEL(PHY_M_MAC_MD_1000BX);
915 - gm_phy_write(hw, port, PHY_MARV_PHY_CTRL, ctrl);
916 + gm_phy_write(hw, port, PHY_MARV_PHY_CTRL, ctrl);
917 +
918 + /* special setup for PHY 88E1112 Fiber */
919 + if (hw->chip_id == CHIP_ID_YUKON_XL && !sky2_is_copper(hw)) {
920 + pg = gm_phy_read(hw, port, PHY_MARV_EXT_ADR);
921 +
922 + /* Fiber: select 1000BASE-X only mode MAC Specific Ctrl Reg. */
923 + gm_phy_write(hw, port, PHY_MARV_EXT_ADR, 2);
924 + ctrl = gm_phy_read(hw, port, PHY_MARV_PHY_CTRL);
925 + ctrl &= ~PHY_M_MAC_MD_MSK;
926 + ctrl |= PHY_M_MAC_MODE_SEL(PHY_M_MAC_MD_1000BX);
927 + gm_phy_write(hw, port, PHY_MARV_PHY_CTRL, ctrl);
928
929 + if (hw->pmd_type == 'P') {
930 /* select page 1 to access Fiber registers */
931 gm_phy_write(hw, port, PHY_MARV_EXT_ADR, 1);
932 +
933 + /* for SFP-module set SIGDET polarity to low */
934 + ctrl = gm_phy_read(hw, port, PHY_MARV_PHY_CTRL);
935 + ctrl |= PHY_M_FIB_SIGD_POL;
936 + gm_phy_write(hw, port, PHY_MARV_CTRL, ctrl);
937 }
938 +
939 + gm_phy_write(hw, port, PHY_MARV_EXT_ADR, pg);
940 }
941
942 ctrl = gm_phy_read(hw, port, PHY_MARV_CTRL);
943 @@ -373,7 +385,7 @@ static void sky2_phy_init(struct sky2_hw
944 adv = PHY_AN_CSMA;
945
946 if (sky2->autoneg == AUTONEG_ENABLE) {
947 - if (hw->copper) {
948 + if (sky2_is_copper(hw)) {
949 if (sky2->advertising & ADVERTISED_1000baseT_Full)
950 ct1000 |= PHY_M_1000C_AFD;
951 if (sky2->advertising & ADVERTISED_1000baseT_Half)
952 @@ -386,8 +398,12 @@ static void sky2_phy_init(struct sky2_hw
953 adv |= PHY_M_AN_10_FD;
954 if (sky2->advertising & ADVERTISED_10baseT_Half)
955 adv |= PHY_M_AN_10_HD;
956 - } else /* special defines for FIBER (88E1011S only) */
957 - adv |= PHY_M_AN_1000X_AHD | PHY_M_AN_1000X_AFD;
958 + } else { /* special defines for FIBER (88E1040S only) */
959 + if (sky2->advertising & ADVERTISED_1000baseT_Full)
960 + adv |= PHY_M_AN_1000X_AFD;
961 + if (sky2->advertising & ADVERTISED_1000baseT_Half)
962 + adv |= PHY_M_AN_1000X_AHD;
963 + }
964
965 /* Set Flow-control capabilities */
966 if (sky2->tx_pause && sky2->rx_pause)
967 @@ -949,14 +965,14 @@ #endif
968 /*
969 * It appears the hardware has a bug in the FIFO logic that
970 * cause it to hang if the FIFO gets overrun and the receive buffer
971 - * is not aligned. ALso alloc_skb() won't align properly if slab
972 + * is not aligned. Also dev_alloc_skb() won't align properly if slab
973 * debugging is enabled.
974 */
975 static inline struct sk_buff *sky2_alloc_skb(unsigned int size, gfp_t gfp_mask)
976 {
977 struct sk_buff *skb;
978
979 - skb = alloc_skb(size + RX_SKB_ALIGN, gfp_mask);
980 + skb = __dev_alloc_skb(size + RX_SKB_ALIGN, gfp_mask);
981 if (likely(skb)) {
982 unsigned long p = (unsigned long) skb->data;
983 skb_reserve(skb, ALIGN(p, RX_SKB_ALIGN) - p);
984 @@ -1497,7 +1513,7 @@ static int sky2_down(struct net_device *
985
986 static u16 sky2_phy_speed(const struct sky2_hw *hw, u16 aux)
987 {
988 - if (!hw->copper)
989 + if (!sky2_is_copper(hw))
990 return SPEED_1000;
991
992 if (hw->chip_id == CHIP_ID_YUKON_FE)
993 @@ -1855,7 +1871,7 @@ static struct sk_buff *sky2_receive(stru
994 goto oversize;
995
996 if (length < copybreak) {
997 - skb = alloc_skb(length + 2, GFP_ATOMIC);
998 + skb = dev_alloc_skb(length + 2);
999 if (!skb)
1000 goto resubmit;
1001
1002 @@ -2016,6 +2032,9 @@ #endif
1003 }
1004 }
1005
1006 + /* Fully processed status ring so clear irq */
1007 + sky2_write32(hw, STAT_CTRL, SC_STAT_CLR_IRQ);
1008 +
1009 exit_loop:
1010 return work_done;
1011 }
1012 @@ -2218,9 +2237,6 @@ static int sky2_poll(struct net_device *
1013 *budget -= work_done;
1014 dev0->quota -= work_done;
1015
1016 - if (status & Y2_IS_STAT_BMU)
1017 - sky2_write32(hw, STAT_CTRL, SC_STAT_CLR_IRQ);
1018 -
1019 if (sky2_more_work(hw))
1020 return 1;
1021
1022 @@ -2287,7 +2303,7 @@ static inline u32 sky2_clk2us(const stru
1023 static int __devinit sky2_reset(struct sky2_hw *hw)
1024 {
1025 u16 status;
1026 - u8 t8, pmd_type;
1027 + u8 t8;
1028 int i;
1029
1030 sky2_write8(hw, B0_CTST, CS_RST_CLR);
1031 @@ -2333,9 +2349,7 @@ static int __devinit sky2_reset(struct s
1032 sky2_pci_write32(hw, PEX_UNC_ERR_STAT, 0xffffffffUL);
1033
1034
1035 - pmd_type = sky2_read8(hw, B2_PMD_TYP);
1036 - hw->copper = !(pmd_type == 'L' || pmd_type == 'S');
1037 -
1038 + hw->pmd_type = sky2_read8(hw, B2_PMD_TYP);
1039 hw->ports = 1;
1040 t8 = sky2_read8(hw, B2_Y2_HW_RES);
1041 if ((t8 & CFG_DUAL_MAC_MSK) == CFG_DUAL_MAC_MSK) {
1042 @@ -2432,21 +2446,22 @@ static int __devinit sky2_reset(struct s
1043
1044 static u32 sky2_supported_modes(const struct sky2_hw *hw)
1045 {
1046 - u32 modes;
1047 - if (hw->copper) {
1048 - modes = SUPPORTED_10baseT_Half
1049 - | SUPPORTED_10baseT_Full
1050 - | SUPPORTED_100baseT_Half
1051 - | SUPPORTED_100baseT_Full
1052 - | SUPPORTED_Autoneg | SUPPORTED_TP;
1053 + if (sky2_is_copper(hw)) {
1054 + u32 modes = SUPPORTED_10baseT_Half
1055 + | SUPPORTED_10baseT_Full
1056 + | SUPPORTED_100baseT_Half
1057 + | SUPPORTED_100baseT_Full
1058 + | SUPPORTED_Autoneg | SUPPORTED_TP;
1059
1060 if (hw->chip_id != CHIP_ID_YUKON_FE)
1061 modes |= SUPPORTED_1000baseT_Half
1062 - | SUPPORTED_1000baseT_Full;
1063 + | SUPPORTED_1000baseT_Full;
1064 + return modes;
1065 } else
1066 - modes = SUPPORTED_1000baseT_Full | SUPPORTED_FIBRE
1067 - | SUPPORTED_Autoneg;
1068 - return modes;
1069 + return SUPPORTED_1000baseT_Half
1070 + | SUPPORTED_1000baseT_Full
1071 + | SUPPORTED_Autoneg
1072 + | SUPPORTED_FIBRE;
1073 }
1074
1075 static int sky2_get_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
1076 @@ -2457,7 +2472,7 @@ static int sky2_get_settings(struct net_
1077 ecmd->transceiver = XCVR_INTERNAL;
1078 ecmd->supported = sky2_supported_modes(hw);
1079 ecmd->phy_address = PHY_ADDR_MARV;
1080 - if (hw->copper) {
1081 + if (sky2_is_copper(hw)) {
1082 ecmd->supported = SUPPORTED_10baseT_Half
1083 | SUPPORTED_10baseT_Full
1084 | SUPPORTED_100baseT_Half
1085 @@ -2466,12 +2481,14 @@ static int sky2_get_settings(struct net_
1086 | SUPPORTED_1000baseT_Full
1087 | SUPPORTED_Autoneg | SUPPORTED_TP;
1088 ecmd->port = PORT_TP;
1089 - } else
1090 + ecmd->speed = sky2->speed;
1091 + } else {
1092 + ecmd->speed = SPEED_1000;
1093 ecmd->port = PORT_FIBRE;
1094 + }
1095
1096 ecmd->advertising = sky2->advertising;
1097 ecmd->autoneg = sky2->autoneg;
1098 - ecmd->speed = sky2->speed;
1099 ecmd->duplex = sky2->duplex;
1100 return 0;
1101 }
1102 @@ -3184,6 +3201,8 @@ static int __devinit sky2_test_msi(struc
1103 struct pci_dev *pdev = hw->pdev;
1104 int err;
1105
1106 + init_waitqueue_head (&hw->msi_wait);
1107 +
1108 sky2_write32(hw, B0_IMSK, Y2_IS_IRQ_SW);
1109
1110 err = request_irq(pdev->irq, sky2_test_intr, SA_SHIRQ, DRV_NAME, hw);
1111 @@ -3193,10 +3212,8 @@ static int __devinit sky2_test_msi(struc
1112 return err;
1113 }
1114
1115 - init_waitqueue_head (&hw->msi_wait);
1116 -
1117 sky2_write8(hw, B0_CTST, CS_ST_SW_IRQ);
1118 - wmb();
1119 + sky2_read8(hw, B0_CTST);
1120
1121 wait_event_timeout(hw->msi_wait, hw->msi_detected, HZ/10);
1122
1123 diff --git a/drivers/net/sky2.h b/drivers/net/sky2.h
1124 index 8a0bc55..9516c1f 100644
1125 --- a/drivers/net/sky2.h
1126 +++ b/drivers/net/sky2.h
1127 @@ -1318,6 +1318,14 @@ enum {
1128 };
1129
1130 /* for Yukon-2 Gigabit Ethernet PHY (88E1112 only) */
1131 +/***** PHY_MARV_PHY_CTRL (page 1) 16 bit r/w Fiber Specific Ctrl *****/
1132 +enum {
1133 + PHY_M_FIB_FORCE_LNK = 1<<10,/* Force Link Good */
1134 + PHY_M_FIB_SIGD_POL = 1<<9, /* SIGDET Polarity */
1135 + PHY_M_FIB_TX_DIS = 1<<3, /* Transmitter Disable */
1136 +};
1137 +
1138 +/* for Yukon-2 Gigabit Ethernet PHY (88E1112 only) */
1139 /***** PHY_MARV_PHY_CTRL (page 2) 16 bit r/w MAC Specific Ctrl *****/
1140 enum {
1141 PHY_M_MAC_MD_MSK = 7<<7, /* Bit 9.. 7: Mode Select Mask */
1142 @@ -1566,7 +1574,7 @@ enum {
1143
1144 GMR_FS_ANY_ERR = GMR_FS_RX_FF_OV | GMR_FS_CRC_ERR |
1145 GMR_FS_FRAGMENT | GMR_FS_LONG_ERR |
1146 - GMR_FS_MII_ERR | GMR_FS_BAD_FC | GMR_FS_GOOD_FC |
1147 + GMR_FS_MII_ERR | GMR_FS_BAD_FC |
1148 GMR_FS_UN_SIZE | GMR_FS_JABBER,
1149 };
1150
1151 @@ -1879,7 +1887,7 @@ struct sky2_hw {
1152 int pm_cap;
1153 u8 chip_id;
1154 u8 chip_rev;
1155 - u8 copper;
1156 + u8 pmd_type;
1157 u8 ports;
1158
1159 struct sky2_status_le *st_le;
1160 @@ -1891,6 +1899,11 @@ struct sky2_hw {
1161 wait_queue_head_t msi_wait;
1162 };
1163
1164 +static inline int sky2_is_copper(const struct sky2_hw *hw)
1165 +{
1166 + return !(hw->pmd_type == 'L' || hw->pmd_type == 'S' || hw->pmd_type == 'P');
1167 +}
1168 +
1169 /* Register accessor for memory mapped device */
1170 static inline u32 sky2_read32(const struct sky2_hw *hw, unsigned reg)
1171 {
1172 diff --git a/drivers/net/tg3.c b/drivers/net/tg3.c
1173 index 862c226..38b605d 100644
1174 --- a/drivers/net/tg3.c
1175 +++ b/drivers/net/tg3.c
1176 @@ -69,8 +69,8 @@ #include "tg3.h"
1177
1178 #define DRV_MODULE_NAME "tg3"
1179 #define PFX DRV_MODULE_NAME ": "
1180 -#define DRV_MODULE_VERSION "3.59"
1181 -#define DRV_MODULE_RELDATE "June 8, 2006"
1182 +#define DRV_MODULE_VERSION "3.59.1"
1183 +#define DRV_MODULE_RELDATE "August 25, 2006"
1184
1185 #define TG3_DEF_MAC_MODE 0
1186 #define TG3_DEF_RX_MODE 0
1187 @@ -11381,11 +11381,15 @@ #if TG3_TSO_SUPPORT != 0
1188 tp->tg3_flags2 |= TG3_FLG2_TSO_CAPABLE;
1189 }
1190
1191 - /* TSO is on by default on chips that support hardware TSO.
1192 + /* TSO is on by default on chips that support HW_TSO_2.
1193 + * Some HW_TSO_1 capable chips have bugs that can lead to
1194 + * tx timeouts in some cases when TSO is enabled.
1195 * Firmware TSO on older chips gives lower performance, so it
1196 * is off by default, but can be enabled using ethtool.
1197 */
1198 - if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
1199 + if ((tp->tg3_flags2 & TG3_FLG2_HW_TSO_2) ||
1200 + (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 &&
1201 + tp->pci_chip_rev_id >= CHIPREV_ID_5750_C2))
1202 dev->features |= NETIF_F_TSO;
1203
1204 #endif
1205 diff --git a/drivers/net/tg3.h b/drivers/net/tg3.h
1206 index ff0faab..cd68f46 100644
1207 --- a/drivers/net/tg3.h
1208 +++ b/drivers/net/tg3.h
1209 @@ -125,6 +125,7 @@ #define CHIPREV_ID_5705_A3 0x3003
1210 #define CHIPREV_ID_5750_A0 0x4000
1211 #define CHIPREV_ID_5750_A1 0x4001
1212 #define CHIPREV_ID_5750_A3 0x4003
1213 +#define CHIPREV_ID_5750_C2 0x4202
1214 #define CHIPREV_ID_5752_A0_HW 0x5000
1215 #define CHIPREV_ID_5752_A0 0x6000
1216 #define CHIPREV_ID_5752_A1 0x6001
1217 diff --git a/drivers/net/wireless/spectrum_cs.c b/drivers/net/wireless/spectrum_cs.c
1218 index f7b77ce..54fe4e4 100644
1219 --- a/drivers/net/wireless/spectrum_cs.c
1220 +++ b/drivers/net/wireless/spectrum_cs.c
1221 @@ -245,7 +245,7 @@ spectrum_reset(struct pcmcia_device *lin
1222 u_int save_cor;
1223
1224 /* Doing it if hardware is gone is guaranteed crash */
1225 - if (pcmcia_dev_present(link))
1226 + if (!pcmcia_dev_present(link))
1227 return -ENODEV;
1228
1229 /* Save original COR value */
1230 diff --git a/drivers/usb/host/uhci-q.c b/drivers/usb/host/uhci-q.c
1231 index 27909bc..5f44354 100644
1232 --- a/drivers/usb/host/uhci-q.c
1233 +++ b/drivers/usb/host/uhci-q.c
1234 @@ -264,7 +264,7 @@ static void uhci_fixup_toggles(struct uh
1235 * need to change any toggles in this URB */
1236 td = list_entry(urbp->td_list.next, struct uhci_td, list);
1237 if (toggle > 1 || uhci_toggle(td_token(td)) == toggle) {
1238 - td = list_entry(urbp->td_list.next, struct uhci_td,
1239 + td = list_entry(urbp->td_list.prev, struct uhci_td,
1240 list);
1241 toggle = uhci_toggle(td_token(td)) ^ 1;
1242
1243 diff --git a/fs/binfmt_elf.c b/fs/binfmt_elf.c
1244 index 537893a..997a202 100644
1245 --- a/fs/binfmt_elf.c
1246 +++ b/fs/binfmt_elf.c
1247 @@ -86,7 +86,7 @@ static struct linux_binfmt elf_format =
1248 .min_coredump = ELF_EXEC_PAGESIZE
1249 };
1250
1251 -#define BAD_ADDR(x) ((unsigned long)(x) > TASK_SIZE)
1252 +#define BAD_ADDR(x) ((unsigned long)(x) >= TASK_SIZE)
1253
1254 static int set_brk(unsigned long start, unsigned long end)
1255 {
1256 @@ -389,7 +389,7 @@ static unsigned long load_elf_interp(str
1257 * <= p_memsize so it is only necessary to check p_memsz.
1258 */
1259 k = load_addr + eppnt->p_vaddr;
1260 - if (k > TASK_SIZE || eppnt->p_filesz > eppnt->p_memsz ||
1261 + if (BAD_ADDR(k) || eppnt->p_filesz > eppnt->p_memsz ||
1262 eppnt->p_memsz > TASK_SIZE || TASK_SIZE - eppnt->p_memsz < k) {
1263 error = -ENOMEM;
1264 goto out_close;
1265 @@ -876,7 +876,7 @@ static int load_elf_binary(struct linux_
1266 * allowed task size. Note that p_filesz must always be
1267 * <= p_memsz so it is only necessary to check p_memsz.
1268 */
1269 - if (k > TASK_SIZE || elf_ppnt->p_filesz > elf_ppnt->p_memsz ||
1270 + if (BAD_ADDR(k) || elf_ppnt->p_filesz > elf_ppnt->p_memsz ||
1271 elf_ppnt->p_memsz > TASK_SIZE ||
1272 TASK_SIZE - elf_ppnt->p_memsz < k) {
1273 /* set_brk can never work. Avoid overflows. */
1274 @@ -930,10 +930,9 @@ static int load_elf_binary(struct linux_
1275 interpreter,
1276 &interp_load_addr);
1277 if (BAD_ADDR(elf_entry)) {
1278 - printk(KERN_ERR "Unable to load interpreter %.128s\n",
1279 - elf_interpreter);
1280 force_sig(SIGSEGV, current);
1281 - retval = -ENOEXEC; /* Nobody gets to see this, but.. */
1282 + retval = IS_ERR((void *)elf_entry) ?
1283 + (int)elf_entry : -EINVAL;
1284 goto out_free_dentry;
1285 }
1286 reloc_func_desc = interp_load_addr;
1287 @@ -944,8 +943,8 @@ static int load_elf_binary(struct linux_
1288 } else {
1289 elf_entry = loc->elf_ex.e_entry;
1290 if (BAD_ADDR(elf_entry)) {
1291 - send_sig(SIGSEGV, current, 0);
1292 - retval = -ENOEXEC; /* Nobody gets to see this, but.. */
1293 + force_sig(SIGSEGV, current);
1294 + retval = -EINVAL;
1295 goto out_free_dentry;
1296 }
1297 }
1298 diff --git a/fs/ext2/super.c b/fs/ext2/super.c
1299 index 7e30bae..e51d1a8 100644
1300 --- a/fs/ext2/super.c
1301 +++ b/fs/ext2/super.c
1302 @@ -252,6 +252,46 @@ #ifdef CONFIG_QUOTA
1303 #endif
1304 };
1305
1306 +static struct dentry *ext2_get_dentry(struct super_block *sb, void *vobjp)
1307 +{
1308 + __u32 *objp = vobjp;
1309 + unsigned long ino = objp[0];
1310 + __u32 generation = objp[1];
1311 + struct inode *inode;
1312 + struct dentry *result;
1313 +
1314 + if (ino != EXT2_ROOT_INO && ino < EXT2_FIRST_INO(sb))
1315 + return ERR_PTR(-ESTALE);
1316 + if (ino > le32_to_cpu(EXT2_SB(sb)->s_es->s_inodes_count))
1317 + return ERR_PTR(-ESTALE);
1318 +
1319 + /* iget isn't really right if the inode is currently unallocated!!
1320 + * ext2_read_inode currently does appropriate checks, but
1321 + * it might be "neater" to call ext2_get_inode first and check
1322 + * if the inode is valid.....
1323 + */
1324 + inode = iget(sb, ino);
1325 + if (inode == NULL)
1326 + return ERR_PTR(-ENOMEM);
1327 + if (is_bad_inode(inode)
1328 + || (generation && inode->i_generation != generation)
1329 + ) {
1330 + /* we didn't find the right inode.. */
1331 + iput(inode);
1332 + return ERR_PTR(-ESTALE);
1333 + }
1334 + /* now to find a dentry.
1335 + * If possible, get a well-connected one
1336 + */
1337 + result = d_alloc_anon(inode);
1338 + if (!result) {
1339 + iput(inode);
1340 + return ERR_PTR(-ENOMEM);
1341 + }
1342 + return result;
1343 +}
1344 +
1345 +
1346 /* Yes, most of these are left as NULL!!
1347 * A NULL value implies the default, which works with ext2-like file
1348 * systems, but can be improved upon.
1349 @@ -259,6 +299,7 @@ #endif
1350 */
1351 static struct export_operations ext2_export_ops = {
1352 .get_parent = ext2_get_parent,
1353 + .get_dentry = ext2_get_dentry,
1354 };
1355
1356 static unsigned long get_sb_block(void **data)
1357 diff --git a/fs/locks.c b/fs/locks.c
1358 index ab61a8b..529a0ed 100644
1359 --- a/fs/locks.c
1360 +++ b/fs/locks.c
1361 @@ -1389,8 +1389,9 @@ static int __setlease(struct file *filp,
1362 if (!leases_enable)
1363 goto out;
1364
1365 - error = lease_alloc(filp, arg, &fl);
1366 - if (error)
1367 + error = -ENOMEM;
1368 + fl = locks_alloc_lock();
1369 + if (fl == NULL)
1370 goto out;
1371
1372 locks_copy_lock(fl, lease);
1373 @@ -1398,6 +1399,7 @@ static int __setlease(struct file *filp,
1374 locks_insert_lock(before, fl);
1375
1376 *flp = fl;
1377 + error = 0;
1378 out:
1379 return error;
1380 }
1381 diff --git a/include/asm-ia64/mman.h b/include/asm-ia64/mman.h
1382 index df1b20e..b242f95 100644
1383 --- a/include/asm-ia64/mman.h
1384 +++ b/include/asm-ia64/mman.h
1385 @@ -9,10 +9,12 @@ #define _ASM_IA64_MMAN_H
1386 */
1387
1388 #ifdef __KERNEL__
1389 +#ifndef __ASSEMBLY__
1390 #define arch_mmap_check ia64_map_check_rgn
1391 int ia64_map_check_rgn(unsigned long addr, unsigned long len,
1392 unsigned long flags);
1393 #endif
1394 +#endif
1395
1396 #include <asm-generic/mman.h>
1397
1398 diff --git a/include/asm-ia64/sn/xp.h b/include/asm-ia64/sn/xp.h
1399 index 9bd2f9b..6f807e0 100644
1400 --- a/include/asm-ia64/sn/xp.h
1401 +++ b/include/asm-ia64/sn/xp.h
1402 @@ -60,23 +60,37 @@ #define XP_NASID_MASK_WORDS ((XP_MAX_PHY
1403 * the bte_copy() once in the hope that the failure was due to a temporary
1404 * aberration (i.e., the link going down temporarily).
1405 *
1406 - * See bte_copy for definition of the input parameters.
1407 + * src - physical address of the source of the transfer.
1408 + * vdst - virtual address of the destination of the transfer.
1409 + * len - number of bytes to transfer from source to destination.
1410 + * mode - see bte_copy() for definition.
1411 + * notification - see bte_copy() for definition.
1412 *
1413 * Note: xp_bte_copy() should never be called while holding a spinlock.
1414 */
1415 static inline bte_result_t
1416 -xp_bte_copy(u64 src, u64 dest, u64 len, u64 mode, void *notification)
1417 +xp_bte_copy(u64 src, u64 vdst, u64 len, u64 mode, void *notification)
1418 {
1419 bte_result_t ret;
1420 + u64 pdst = ia64_tpa(vdst);
1421
1422
1423 - ret = bte_copy(src, dest, len, mode, notification);
1424 + /*
1425 + * Ensure that the physically mapped memory is contiguous.
1426 + *
1427 + * We do this by ensuring that the memory is from region 7 only.
1428 + * If the need should arise to use memory from one of the other
1429 + * regions, then modify the BUG_ON() statement to ensure that the
1430 + * memory from that region is always physically contiguous.
1431 + */
1432 + BUG_ON(REGION_NUMBER(vdst) != RGN_KERNEL);
1433
1434 + ret = bte_copy(src, pdst, len, mode, notification);
1435 if (ret != BTE_SUCCESS) {
1436 if (!in_interrupt()) {
1437 cond_resched();
1438 }
1439 - ret = bte_copy(src, dest, len, mode, notification);
1440 + ret = bte_copy(src, pdst, len, mode, notification);
1441 }
1442
1443 return ret;
1444 diff --git a/include/asm-ia64/sn/xpc.h b/include/asm-ia64/sn/xpc.h
1445 index aa3b8ac..b454ad4 100644
1446 --- a/include/asm-ia64/sn/xpc.h
1447 +++ b/include/asm-ia64/sn/xpc.h
1448 @@ -684,7 +684,9 @@ extern struct xpc_vars *xpc_vars;
1449 extern struct xpc_rsvd_page *xpc_rsvd_page;
1450 extern struct xpc_vars_part *xpc_vars_part;
1451 extern struct xpc_partition xpc_partitions[XP_MAX_PARTITIONS + 1];
1452 -extern char xpc_remote_copy_buffer[];
1453 +extern char *xpc_remote_copy_buffer;
1454 +extern void *xpc_remote_copy_buffer_base;
1455 +extern void *xpc_kmalloc_cacheline_aligned(size_t, gfp_t, void **);
1456 extern struct xpc_rsvd_page *xpc_rsvd_page_init(void);
1457 extern void xpc_allow_IPI_ops(void);
1458 extern void xpc_restrict_IPI_ops(void);
1459 diff --git a/include/linux/netfilter_bridge.h b/include/linux/netfilter_bridge.h
1460 index a75b84b..178a97e 100644
1461 --- a/include/linux/netfilter_bridge.h
1462 +++ b/include/linux/netfilter_bridge.h
1463 @@ -47,18 +47,26 @@ #define BRNF_DONT_TAKE_PARENT 0x04
1464 #define BRNF_BRIDGED 0x08
1465 #define BRNF_NF_BRIDGE_PREROUTING 0x10
1466
1467 -
1468 /* Only used in br_forward.c */
1469 -static inline
1470 -void nf_bridge_maybe_copy_header(struct sk_buff *skb)
1471 +static inline int nf_bridge_maybe_copy_header(struct sk_buff *skb)
1472 {
1473 + int err;
1474 +
1475 if (skb->nf_bridge) {
1476 if (skb->protocol == __constant_htons(ETH_P_8021Q)) {
1477 + err = skb_cow(skb, 18);
1478 + if (err)
1479 + return err;
1480 memcpy(skb->data - 18, skb->nf_bridge->data, 18);
1481 skb_push(skb, 4);
1482 - } else
1483 + } else {
1484 + err = skb_cow(skb, 16);
1485 + if (err)
1486 + return err;
1487 memcpy(skb->data - 16, skb->nf_bridge->data, 16);
1488 + }
1489 }
1490 + return 0;
1491 }
1492
1493 /* This is called by the IP fragmenting code and it ensures there is
1494 diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
1495 index 2c31bb0..a1ce843 100644
1496 --- a/include/linux/skbuff.h
1497 +++ b/include/linux/skbuff.h
1498 @@ -1009,6 +1009,21 @@ static inline int pskb_trim(struct sk_bu
1499 }
1500
1501 /**
1502 + * pskb_trim_unique - remove end from a paged unique (not cloned) buffer
1503 + * @skb: buffer to alter
1504 + * @len: new length
1505 + *
1506 + * This is identical to pskb_trim except that the caller knows that
1507 + * the skb is not cloned so we should never get an error due to out-
1508 + * of-memory.
1509 + */
1510 +static inline void pskb_trim_unique(struct sk_buff *skb, unsigned int len)
1511 +{
1512 + int err = pskb_trim(skb, len);
1513 + BUG_ON(err);
1514 +}
1515 +
1516 +/**
1517 * skb_orphan - orphan a buffer
1518 * @skb: buffer to orphan
1519 *
1520 diff --git a/kernel/futex.c b/kernel/futex.c
1521 index 5699c51..225af28 100644
1522 --- a/kernel/futex.c
1523 +++ b/kernel/futex.c
1524 @@ -593,6 +593,7 @@ static int unqueue_me(struct futex_q *q)
1525 /* In the common case we don't take the spinlock, which is nice. */
1526 retry:
1527 lock_ptr = q->lock_ptr;
1528 + barrier();
1529 if (lock_ptr != 0) {
1530 spin_lock(lock_ptr);
1531 /*
1532 diff --git a/kernel/stop_machine.c b/kernel/stop_machine.c
1533 index dcfb5d7..51cacd1 100644
1534 --- a/kernel/stop_machine.c
1535 +++ b/kernel/stop_machine.c
1536 @@ -111,7 +111,6 @@ static int stop_machine(void)
1537 /* If some failed, kill them all. */
1538 if (ret < 0) {
1539 stopmachine_set_state(STOPMACHINE_EXIT);
1540 - up(&stopmachine_mutex);
1541 return ret;
1542 }
1543
1544 diff --git a/lib/ts_bm.c b/lib/ts_bm.c
1545 index c4c1ac5..7917265 100644
1546 --- a/lib/ts_bm.c
1547 +++ b/lib/ts_bm.c
1548 @@ -112,15 +112,14 @@ static int subpattern(u8 *pattern, int i
1549 return ret;
1550 }
1551
1552 -static void compute_prefix_tbl(struct ts_bm *bm, const u8 *pattern,
1553 - unsigned int len)
1554 +static void compute_prefix_tbl(struct ts_bm *bm)
1555 {
1556 int i, j, g;
1557
1558 for (i = 0; i < ASIZE; i++)
1559 - bm->bad_shift[i] = len;
1560 - for (i = 0; i < len - 1; i++)
1561 - bm->bad_shift[pattern[i]] = len - 1 - i;
1562 + bm->bad_shift[i] = bm->patlen;
1563 + for (i = 0; i < bm->patlen - 1; i++)
1564 + bm->bad_shift[bm->pattern[i]] = bm->patlen - 1 - i;
1565
1566 /* Compute the good shift array, used to match reocurrences
1567 * of a subpattern */
1568 @@ -151,8 +150,8 @@ static struct ts_config *bm_init(const v
1569 bm = ts_config_priv(conf);
1570 bm->patlen = len;
1571 bm->pattern = (u8 *) bm->good_shift + prefix_tbl_len;
1572 - compute_prefix_tbl(bm, pattern, len);
1573 memcpy(bm->pattern, pattern, len);
1574 + compute_prefix_tbl(bm);
1575
1576 return conf;
1577 }
1578 diff --git a/net/bridge/br_forward.c b/net/bridge/br_forward.c
1579 index 56f3aa4..ddb7e1c 100644
1580 --- a/net/bridge/br_forward.c
1581 +++ b/net/bridge/br_forward.c
1582 @@ -43,11 +43,15 @@ int br_dev_queue_push_xmit(struct sk_buf
1583 else {
1584 #ifdef CONFIG_BRIDGE_NETFILTER
1585 /* ip_refrag calls ip_fragment, doesn't copy the MAC header. */
1586 - nf_bridge_maybe_copy_header(skb);
1587 + if (nf_bridge_maybe_copy_header(skb))
1588 + kfree_skb(skb);
1589 + else
1590 #endif
1591 - skb_push(skb, ETH_HLEN);
1592 + {
1593 + skb_push(skb, ETH_HLEN);
1594
1595 - dev_queue_xmit(skb);
1596 + dev_queue_xmit(skb);
1597 + }
1598 }
1599
1600 return 0;
1601 diff --git a/net/core/pktgen.c b/net/core/pktgen.c
1602 index c23e9c0..b916b7f 100644
1603 --- a/net/core/pktgen.c
1604 +++ b/net/core/pktgen.c
1605 @@ -2149,6 +2149,8 @@ static struct sk_buff *fill_packet_ipv4(
1606 skb->mac.raw = ((u8 *) iph) - 14 - pkt_dev->nr_labels*sizeof(u32);
1607 skb->dev = odev;
1608 skb->pkt_type = PACKET_HOST;
1609 + skb->nh.iph = iph;
1610 + skb->h.uh = udph;
1611
1612 if (pkt_dev->nfrags <= 0)
1613 pgh = (struct pktgen_hdr *)skb_put(skb, datalen);
1614 @@ -2460,6 +2462,8 @@ static struct sk_buff *fill_packet_ipv6(
1615 skb->protocol = protocol;
1616 skb->dev = odev;
1617 skb->pkt_type = PACKET_HOST;
1618 + skb->nh.ipv6h = iph;
1619 + skb->h.uh = udph;
1620
1621 if (pkt_dev->nfrags <= 0)
1622 pgh = (struct pktgen_hdr *)skb_put(skb, datalen);
1623 diff --git a/net/ipv4/ip_output.c b/net/ipv4/ip_output.c
1624 index cff9c3a..d987a27 100644
1625 --- a/net/ipv4/ip_output.c
1626 +++ b/net/ipv4/ip_output.c
1627 @@ -946,7 +946,7 @@ alloc_new_skb:
1628 skb_prev->csum = csum_sub(skb_prev->csum,
1629 skb->csum);
1630 data += fraggap;
1631 - skb_trim(skb_prev, maxfraglen);
1632 + pskb_trim_unique(skb_prev, maxfraglen);
1633 }
1634
1635 copy = datalen - transhdrlen - fraggap;
1636 @@ -1139,7 +1139,7 @@ ssize_t ip_append_page(struct sock *sk,
1637 data, fraggap, 0);
1638 skb_prev->csum = csum_sub(skb_prev->csum,
1639 skb->csum);
1640 - skb_trim(skb_prev, maxfraglen);
1641 + pskb_trim_unique(skb_prev, maxfraglen);
1642 }
1643
1644 /*
1645 diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
1646 index f33c9dd..e0657b9 100644
1647 --- a/net/ipv4/tcp_output.c
1648 +++ b/net/ipv4/tcp_output.c
1649 @@ -197,6 +197,7 @@ void tcp_select_initial_window(int __spa
1650 * See RFC1323 for an explanation of the limit to 14
1651 */
1652 space = max_t(u32, sysctl_tcp_rmem[2], sysctl_rmem_max);
1653 + space = min_t(u32, space, *window_clamp);
1654 while (space > 65535 && (*rcv_wscale) < 14) {
1655 space >>= 1;
1656 (*rcv_wscale)++;
1657 diff --git a/net/ipv6/exthdrs.c b/net/ipv6/exthdrs.c
1658 index a18d425..9ca783d 100644
1659 --- a/net/ipv6/exthdrs.c
1660 +++ b/net/ipv6/exthdrs.c
1661 @@ -635,14 +635,17 @@ ipv6_renew_options(struct sock *sk, stru
1662 struct ipv6_txoptions *opt2;
1663 int err;
1664
1665 - if (newtype != IPV6_HOPOPTS && opt->hopopt)
1666 - tot_len += CMSG_ALIGN(ipv6_optlen(opt->hopopt));
1667 - if (newtype != IPV6_RTHDRDSTOPTS && opt->dst0opt)
1668 - tot_len += CMSG_ALIGN(ipv6_optlen(opt->dst0opt));
1669 - if (newtype != IPV6_RTHDR && opt->srcrt)
1670 - tot_len += CMSG_ALIGN(ipv6_optlen(opt->srcrt));
1671 - if (newtype != IPV6_DSTOPTS && opt->dst1opt)
1672 - tot_len += CMSG_ALIGN(ipv6_optlen(opt->dst1opt));
1673 + if (opt) {
1674 + if (newtype != IPV6_HOPOPTS && opt->hopopt)
1675 + tot_len += CMSG_ALIGN(ipv6_optlen(opt->hopopt));
1676 + if (newtype != IPV6_RTHDRDSTOPTS && opt->dst0opt)
1677 + tot_len += CMSG_ALIGN(ipv6_optlen(opt->dst0opt));
1678 + if (newtype != IPV6_RTHDR && opt->srcrt)
1679 + tot_len += CMSG_ALIGN(ipv6_optlen(opt->srcrt));
1680 + if (newtype != IPV6_DSTOPTS && opt->dst1opt)
1681 + tot_len += CMSG_ALIGN(ipv6_optlen(opt->dst1opt));
1682 + }
1683 +
1684 if (newopt && newoptlen)
1685 tot_len += CMSG_ALIGN(newoptlen);
1686
1687 @@ -659,25 +662,25 @@ ipv6_renew_options(struct sock *sk, stru
1688 opt2->tot_len = tot_len;
1689 p = (char *)(opt2 + 1);
1690
1691 - err = ipv6_renew_option(opt->hopopt, newopt, newoptlen,
1692 + err = ipv6_renew_option(opt ? opt->hopopt : NULL, newopt, newoptlen,
1693 newtype != IPV6_HOPOPTS,
1694 &opt2->hopopt, &p);
1695 if (err)
1696 goto out;
1697
1698 - err = ipv6_renew_option(opt->dst0opt, newopt, newoptlen,
1699 + err = ipv6_renew_option(opt ? opt->dst0opt : NULL, newopt, newoptlen,
1700 newtype != IPV6_RTHDRDSTOPTS,
1701 &opt2->dst0opt, &p);
1702 if (err)
1703 goto out;
1704
1705 - err = ipv6_renew_option(opt->srcrt, newopt, newoptlen,
1706 + err = ipv6_renew_option(opt ? opt->srcrt : NULL, newopt, newoptlen,
1707 newtype != IPV6_RTHDR,
1708 - (struct ipv6_opt_hdr **)opt2->srcrt, &p);
1709 + (struct ipv6_opt_hdr **)&opt2->srcrt, &p);
1710 if (err)
1711 goto out;
1712
1713 - err = ipv6_renew_option(opt->dst1opt, newopt, newoptlen,
1714 + err = ipv6_renew_option(opt ? opt->dst1opt : NULL, newopt, newoptlen,
1715 newtype != IPV6_DSTOPTS,
1716 &opt2->dst1opt, &p);
1717 if (err)
1718 diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c
1719 index e460489..56eddb3 100644
1720 --- a/net/ipv6/ip6_output.c
1721 +++ b/net/ipv6/ip6_output.c
1722 @@ -1047,7 +1047,7 @@ alloc_new_skb:
1723 skb_prev->csum = csum_sub(skb_prev->csum,
1724 skb->csum);
1725 data += fraggap;
1726 - skb_trim(skb_prev, maxfraglen);
1727 + pskb_trim_unique(skb_prev, maxfraglen);
1728 }
1729 copy = datalen - transhdrlen - fraggap;
1730 if (copy < 0) {
1731 diff --git a/net/sctp/socket.c b/net/sctp/socket.c
1732 index 600eb59..5b1c837 100644
1733 --- a/net/sctp/socket.c
1734 +++ b/net/sctp/socket.c
1735 @@ -1246,9 +1246,13 @@ SCTP_STATIC void sctp_close(struct sock
1736 }
1737 }
1738
1739 - if (sock_flag(sk, SOCK_LINGER) && !sk->sk_lingertime)
1740 - sctp_primitive_ABORT(asoc, NULL);
1741 - else
1742 + if (sock_flag(sk, SOCK_LINGER) && !sk->sk_lingertime) {
1743 + struct sctp_chunk *chunk;
1744 +
1745 + chunk = sctp_make_abort_user(asoc, NULL, 0);
1746 + if (chunk)
1747 + sctp_primitive_ABORT(asoc, chunk);
1748 + } else
1749 sctp_primitive_SHUTDOWN(asoc, NULL);
1750 }
1751