Magellan Linux

Contents of /trunk/kernel-lts/patches-3.4/0170-3.4.71-all-fixes.patch

Parent Directory Parent Directory | Revision Log Revision Log


Revision 2335 - (show annotations) (download)
Thu Dec 12 08:43:33 2013 UTC (10 years, 4 months ago) by niro
File size: 56983 byte(s)
-linux-3.4.71
1 diff --git a/arch/cris/include/asm/io.h b/arch/cris/include/asm/io.h
2 index ac12ae2b9286..db9a16c704f3 100644
3 --- a/arch/cris/include/asm/io.h
4 +++ b/arch/cris/include/asm/io.h
5 @@ -3,6 +3,7 @@
6
7 #include <asm/page.h> /* for __va, __pa */
8 #include <arch/io.h>
9 +#include <asm-generic/iomap.h>
10 #include <linux/kernel.h>
11
12 struct cris_io_operations
13 diff --git a/arch/ia64/include/asm/processor.h b/arch/ia64/include/asm/processor.h
14 index 483f6c6a4238..2d0cb8e8eedd 100644
15 --- a/arch/ia64/include/asm/processor.h
16 +++ b/arch/ia64/include/asm/processor.h
17 @@ -322,7 +322,7 @@ struct thread_struct {
18 regs->loadrs = 0; \
19 regs->r8 = get_dumpable(current->mm); /* set "don't zap registers" flag */ \
20 regs->r12 = new_sp - 16; /* allocate 16 byte scratch area */ \
21 - if (unlikely(!get_dumpable(current->mm))) { \
22 + if (unlikely(get_dumpable(current->mm) != SUID_DUMP_USER)) { \
23 /* \
24 * Zap scratch regs to avoid leaking bits between processes with different \
25 * uid/privileges. \
26 diff --git a/arch/powerpc/kernel/signal_32.c b/arch/powerpc/kernel/signal_32.c
27 index 45eb998557f8..e6de787956ce 100644
28 --- a/arch/powerpc/kernel/signal_32.c
29 +++ b/arch/powerpc/kernel/signal_32.c
30 @@ -459,7 +459,15 @@ static int save_user_regs(struct pt_regs *regs, struct mcontext __user *frame,
31 if (copy_vsx_to_user(&frame->mc_vsregs, current))
32 return 1;
33 msr |= MSR_VSX;
34 - }
35 + } else if (!ctx_has_vsx_region)
36 + /*
37 + * With a small context structure we can't hold the VSX
38 + * registers, hence clear the MSR value to indicate the state
39 + * was not saved.
40 + */
41 + msr &= ~MSR_VSX;
42 +
43 +
44 #endif /* CONFIG_VSX */
45 #ifdef CONFIG_SPE
46 /* save spe registers */
47 diff --git a/arch/powerpc/kernel/vio.c b/arch/powerpc/kernel/vio.c
48 index cfe0069bcfc8..fcf89bff1177 100644
49 --- a/arch/powerpc/kernel/vio.c
50 +++ b/arch/powerpc/kernel/vio.c
51 @@ -1342,12 +1342,12 @@ static ssize_t modalias_show(struct device *dev, struct device_attribute *attr,
52
53 dn = dev->of_node;
54 if (!dn) {
55 - strcat(buf, "\n");
56 + strcpy(buf, "\n");
57 return strlen(buf);
58 }
59 cp = of_get_property(dn, "compatible", NULL);
60 if (!cp) {
61 - strcat(buf, "\n");
62 + strcpy(buf, "\n");
63 return strlen(buf);
64 }
65
66 diff --git a/arch/powerpc/platforms/powernv/pci-ioda.c b/arch/powerpc/platforms/powernv/pci-ioda.c
67 index fbdd74dac3ac..5da8e8df5922 100644
68 --- a/arch/powerpc/platforms/powernv/pci-ioda.c
69 +++ b/arch/powerpc/platforms/powernv/pci-ioda.c
70 @@ -613,13 +613,23 @@ static int __devinit pnv_ioda_configure_pe(struct pnv_phb *phb,
71 rid_end = pe->rid + 1;
72 }
73
74 - /* Associate PE in PELT */
75 + /*
76 + * Associate PE in PELT. We need add the PE into the
77 + * corresponding PELT-V as well. Otherwise, the error
78 + * originated from the PE might contribute to other
79 + * PEs.
80 + */
81 rc = opal_pci_set_pe(phb->opal_id, pe->pe_number, pe->rid,
82 bcomp, dcomp, fcomp, OPAL_MAP_PE);
83 if (rc) {
84 pe_err(pe, "OPAL error %ld trying to setup PELT table\n", rc);
85 return -ENXIO;
86 }
87 +
88 + rc = opal_pci_set_peltv(phb->opal_id, pe->pe_number,
89 + pe->pe_number, OPAL_ADD_PE_TO_DOMAIN);
90 + if (rc)
91 + pe_warn(pe, "OPAL error %d adding self to PELTV\n", rc);
92 opal_pci_eeh_freeze_clear(phb->opal_id, pe->pe_number,
93 OPAL_EEH_ACTION_CLEAR_FREEZE_ALL);
94
95 diff --git a/arch/x86/kernel/microcode_amd.c b/arch/x86/kernel/microcode_amd.c
96 index 5d8cf0d6796c..b316ffe8ab59 100644
97 --- a/arch/x86/kernel/microcode_amd.c
98 +++ b/arch/x86/kernel/microcode_amd.c
99 @@ -338,7 +338,7 @@ static enum ucode_state request_microcode_amd(int cpu, struct device *device)
100 snprintf(fw_name, sizeof(fw_name), "amd-ucode/microcode_amd_fam%.2xh.bin", c->x86);
101
102 if (request_firmware(&fw, (const char *)fw_name, device)) {
103 - pr_err("failed to load file %s\n", fw_name);
104 + pr_debug("failed to load file %s\n", fw_name);
105 goto out;
106 }
107
108 diff --git a/block/blk-core.c b/block/blk-core.c
109 index 85fd41003434..a02cfb7e4123 100644
110 --- a/block/blk-core.c
111 +++ b/block/blk-core.c
112 @@ -2041,6 +2041,7 @@ void blk_start_request(struct request *req)
113 if (unlikely(blk_bidi_rq(req)))
114 req->next_rq->resid_len = blk_rq_bytes(req->next_rq);
115
116 + BUG_ON(test_bit(REQ_ATOM_COMPLETE, &req->atomic_flags));
117 blk_add_timer(req);
118 }
119 EXPORT_SYMBOL(blk_start_request);
120 diff --git a/block/blk-settings.c b/block/blk-settings.c
121 index d3234fc494ad..b74cc58bc038 100644
122 --- a/block/blk-settings.c
123 +++ b/block/blk-settings.c
124 @@ -143,6 +143,7 @@ void blk_set_stacking_limits(struct queue_limits *lim)
125 lim->discard_zeroes_data = 1;
126 lim->max_segments = USHRT_MAX;
127 lim->max_hw_sectors = UINT_MAX;
128 + lim->max_segment_size = UINT_MAX;
129
130 lim->max_sectors = BLK_DEF_MAX_SECTORS;
131 }
132 diff --git a/block/blk-timeout.c b/block/blk-timeout.c
133 index 780354888958..b1182ea52427 100644
134 --- a/block/blk-timeout.c
135 +++ b/block/blk-timeout.c
136 @@ -90,8 +90,8 @@ static void blk_rq_timed_out(struct request *req)
137 __blk_complete_request(req);
138 break;
139 case BLK_EH_RESET_TIMER:
140 - blk_clear_rq_complete(req);
141 blk_add_timer(req);
142 + blk_clear_rq_complete(req);
143 break;
144 case BLK_EH_NOT_HANDLED:
145 /*
146 @@ -173,7 +173,6 @@ void blk_add_timer(struct request *req)
147 return;
148
149 BUG_ON(!list_empty(&req->timeout_list));
150 - BUG_ON(test_bit(REQ_ATOM_COMPLETE, &req->atomic_flags));
151
152 /*
153 * Some LLDs, like scsi, peek at the timeout to prevent a
154 diff --git a/crypto/ansi_cprng.c b/crypto/ansi_cprng.c
155 index 6ddd99e6114b..c21f761b65b5 100644
156 --- a/crypto/ansi_cprng.c
157 +++ b/crypto/ansi_cprng.c
158 @@ -230,11 +230,11 @@ remainder:
159 */
160 if (byte_count < DEFAULT_BLK_SZ) {
161 empty_rbuf:
162 - for (; ctx->rand_data_valid < DEFAULT_BLK_SZ;
163 - ctx->rand_data_valid++) {
164 + while (ctx->rand_data_valid < DEFAULT_BLK_SZ) {
165 *ptr = ctx->rand_data[ctx->rand_data_valid];
166 ptr++;
167 byte_count--;
168 + ctx->rand_data_valid++;
169 if (byte_count == 0)
170 goto done;
171 }
172 diff --git a/drivers/acpi/acpica/exoparg1.c b/drivers/acpi/acpica/exoparg1.c
173 index 9ba8c73cea16..fe2f9d95d0f8 100644
174 --- a/drivers/acpi/acpica/exoparg1.c
175 +++ b/drivers/acpi/acpica/exoparg1.c
176 @@ -970,10 +970,17 @@ acpi_status acpi_ex_opcode_1A_0T_1R(struct acpi_walk_state *walk_state)
177 */
178 return_desc =
179 *(operand[0]->reference.where);
180 - if (return_desc) {
181 - acpi_ut_add_reference
182 - (return_desc);
183 + if (!return_desc) {
184 + /*
185 + * Element is NULL, do not allow the dereference.
186 + * This provides compatibility with other ACPI
187 + * implementations.
188 + */
189 + return_ACPI_STATUS
190 + (AE_AML_UNINITIALIZED_ELEMENT);
191 }
192 +
193 + acpi_ut_add_reference(return_desc);
194 break;
195
196 default:
197 @@ -998,11 +1005,40 @@ acpi_status acpi_ex_opcode_1A_0T_1R(struct acpi_walk_state *walk_state)
198 acpi_namespace_node
199 *)
200 return_desc);
201 - }
202 + if (!return_desc) {
203 + break;
204 + }
205
206 - /* Add another reference to the object! */
207 + /*
208 + * June 2013:
209 + * buffer_fields/field_units require additional resolution
210 + */
211 + switch (return_desc->common.type) {
212 + case ACPI_TYPE_BUFFER_FIELD:
213 + case ACPI_TYPE_LOCAL_REGION_FIELD:
214 + case ACPI_TYPE_LOCAL_BANK_FIELD:
215 + case ACPI_TYPE_LOCAL_INDEX_FIELD:
216
217 - acpi_ut_add_reference(return_desc);
218 + status =
219 + acpi_ex_read_data_from_field
220 + (walk_state, return_desc,
221 + &temp_desc);
222 + if (ACPI_FAILURE(status)) {
223 + goto cleanup;
224 + }
225 +
226 + return_desc = temp_desc;
227 + break;
228 +
229 + default:
230 +
231 + /* Add another reference to the object */
232 +
233 + acpi_ut_add_reference
234 + (return_desc);
235 + break;
236 + }
237 + }
238 break;
239
240 default:
241 diff --git a/drivers/acpi/acpica/exstore.c b/drivers/acpi/acpica/exstore.c
242 index c6cf843cc4c9..9806f4be7b88 100644
243 --- a/drivers/acpi/acpica/exstore.c
244 +++ b/drivers/acpi/acpica/exstore.c
245 @@ -57,6 +57,11 @@ acpi_ex_store_object_to_index(union acpi_operand_object *val_desc,
246 union acpi_operand_object *dest_desc,
247 struct acpi_walk_state *walk_state);
248
249 +static acpi_status
250 +acpi_ex_store_direct_to_node(union acpi_operand_object *source_desc,
251 + struct acpi_namespace_node *node,
252 + struct acpi_walk_state *walk_state);
253 +
254 /*******************************************************************************
255 *
256 * FUNCTION: acpi_ex_store
257 @@ -376,7 +381,11 @@ acpi_ex_store_object_to_index(union acpi_operand_object *source_desc,
258 * When storing into an object the data is converted to the
259 * target object type then stored in the object. This means
260 * that the target object type (for an initialized target) will
261 - * not be changed by a store operation.
262 + * not be changed by a store operation. A copy_object can change
263 + * the target type, however.
264 + *
265 + * The implicit_conversion flag is set to NO/FALSE only when
266 + * storing to an arg_x -- as per the rules of the ACPI spec.
267 *
268 * Assumes parameters are already validated.
269 *
270 @@ -400,7 +409,7 @@ acpi_ex_store_object_to_node(union acpi_operand_object *source_desc,
271 target_type = acpi_ns_get_type(node);
272 target_desc = acpi_ns_get_attached_object(node);
273
274 - ACPI_DEBUG_PRINT((ACPI_DB_EXEC, "Storing %p(%s) into node %p(%s)\n",
275 + ACPI_DEBUG_PRINT((ACPI_DB_EXEC, "Storing %p (%s) to node %p (%s)\n",
276 source_desc,
277 acpi_ut_get_object_type_name(source_desc), node,
278 acpi_ut_get_type_name(target_type)));
279 @@ -414,46 +423,31 @@ acpi_ex_store_object_to_node(union acpi_operand_object *source_desc,
280 return_ACPI_STATUS(status);
281 }
282
283 - /* If no implicit conversion, drop into the default case below */
284 -
285 - if ((!implicit_conversion) ||
286 - ((walk_state->opcode == AML_COPY_OP) &&
287 - (target_type != ACPI_TYPE_LOCAL_REGION_FIELD) &&
288 - (target_type != ACPI_TYPE_LOCAL_BANK_FIELD) &&
289 - (target_type != ACPI_TYPE_LOCAL_INDEX_FIELD))) {
290 - /*
291 - * Force execution of default (no implicit conversion). Note:
292 - * copy_object does not perform an implicit conversion, as per the ACPI
293 - * spec -- except in case of region/bank/index fields -- because these
294 - * objects must retain their original type permanently.
295 - */
296 - target_type = ACPI_TYPE_ANY;
297 - }
298 -
299 /* Do the actual store operation */
300
301 switch (target_type) {
302 - case ACPI_TYPE_BUFFER_FIELD:
303 - case ACPI_TYPE_LOCAL_REGION_FIELD:
304 - case ACPI_TYPE_LOCAL_BANK_FIELD:
305 - case ACPI_TYPE_LOCAL_INDEX_FIELD:
306 -
307 - /* For fields, copy the source data to the target field. */
308 -
309 - status = acpi_ex_write_data_to_field(source_desc, target_desc,
310 - &walk_state->result_obj);
311 - break;
312 -
313 case ACPI_TYPE_INTEGER:
314 case ACPI_TYPE_STRING:
315 case ACPI_TYPE_BUFFER:
316
317 /*
318 - * These target types are all of type Integer/String/Buffer, and
319 - * therefore support implicit conversion before the store.
320 - *
321 - * Copy and/or convert the source object to a new target object
322 + * The simple data types all support implicit source operand
323 + * conversion before the store.
324 */
325 +
326 + if ((walk_state->opcode == AML_COPY_OP) || !implicit_conversion) {
327 + /*
328 + * However, copy_object and Stores to arg_x do not perform
329 + * an implicit conversion, as per the ACPI specification.
330 + * A direct store is performed instead.
331 + */
332 + status = acpi_ex_store_direct_to_node(source_desc, node,
333 + walk_state);
334 + break;
335 + }
336 +
337 + /* Store with implicit source operand conversion support */
338 +
339 status =
340 acpi_ex_store_object_to_object(source_desc, target_desc,
341 &new_desc, walk_state);
342 @@ -467,13 +461,12 @@ acpi_ex_store_object_to_node(union acpi_operand_object *source_desc,
343 * the Name's type to that of the value being stored in it.
344 * source_desc reference count is incremented by attach_object.
345 *
346 - * Note: This may change the type of the node if an explicit store
347 - * has been performed such that the node/object type has been
348 - * changed.
349 + * Note: This may change the type of the node if an explicit
350 + * store has been performed such that the node/object type
351 + * has been changed.
352 */
353 - status =
354 - acpi_ns_attach_object(node, new_desc,
355 - new_desc->common.type);
356 + status = acpi_ns_attach_object(node, new_desc,
357 + new_desc->common.type);
358
359 ACPI_DEBUG_PRINT((ACPI_DB_EXEC,
360 "Store %s into %s via Convert/Attach\n",
361 @@ -484,19 +477,83 @@ acpi_ex_store_object_to_node(union acpi_operand_object *source_desc,
362 }
363 break;
364
365 + case ACPI_TYPE_BUFFER_FIELD:
366 + case ACPI_TYPE_LOCAL_REGION_FIELD:
367 + case ACPI_TYPE_LOCAL_BANK_FIELD:
368 + case ACPI_TYPE_LOCAL_INDEX_FIELD:
369 + /*
370 + * For all fields, always write the source data to the target
371 + * field. Any required implicit source operand conversion is
372 + * performed in the function below as necessary. Note, field
373 + * objects must retain their original type permanently.
374 + */
375 + status = acpi_ex_write_data_to_field(source_desc, target_desc,
376 + &walk_state->result_obj);
377 + break;
378 +
379 default:
380 + /*
381 + * No conversions for all other types. Directly store a copy of
382 + * the source object. This is the ACPI spec-defined behavior for
383 + * the copy_object operator.
384 + *
385 + * NOTE: For the Store operator, this is a departure from the
386 + * ACPI spec, which states "If conversion is impossible, abort
387 + * the running control method". Instead, this code implements
388 + * "If conversion is impossible, treat the Store operation as
389 + * a CopyObject".
390 + */
391 + status = acpi_ex_store_direct_to_node(source_desc, node,
392 + walk_state);
393 + break;
394 + }
395
396 - ACPI_DEBUG_PRINT((ACPI_DB_EXEC,
397 - "Storing %s (%p) directly into node (%p) with no implicit conversion\n",
398 - acpi_ut_get_object_type_name(source_desc),
399 - source_desc, node));
400 + return_ACPI_STATUS(status);
401 +}
402
403 - /* No conversions for all other types. Just attach the source object */
404 +/*******************************************************************************
405 + *
406 + * FUNCTION: acpi_ex_store_direct_to_node
407 + *
408 + * PARAMETERS: source_desc - Value to be stored
409 + * node - Named object to receive the value
410 + * walk_state - Current walk state
411 + *
412 + * RETURN: Status
413 + *
414 + * DESCRIPTION: "Store" an object directly to a node. This involves a copy
415 + * and an attach.
416 + *
417 + ******************************************************************************/
418
419 - status = acpi_ns_attach_object(node, source_desc,
420 - source_desc->common.type);
421 - break;
422 +static acpi_status
423 +acpi_ex_store_direct_to_node(union acpi_operand_object *source_desc,
424 + struct acpi_namespace_node *node,
425 + struct acpi_walk_state *walk_state)
426 +{
427 + acpi_status status;
428 + union acpi_operand_object *new_desc;
429 +
430 + ACPI_FUNCTION_TRACE(ex_store_direct_to_node);
431 +
432 + ACPI_DEBUG_PRINT((ACPI_DB_EXEC,
433 + "Storing [%s] (%p) directly into node [%s] (%p)"
434 + " with no implicit conversion\n",
435 + acpi_ut_get_object_type_name(source_desc),
436 + source_desc, acpi_ut_get_type_name(node->type),
437 + node));
438 +
439 + /* Copy the source object to a new object */
440 +
441 + status =
442 + acpi_ut_copy_iobject_to_iobject(source_desc, &new_desc, walk_state);
443 + if (ACPI_FAILURE(status)) {
444 + return_ACPI_STATUS(status);
445 }
446
447 + /* Attach the new object to the node */
448 +
449 + status = acpi_ns_attach_object(node, new_desc, new_desc->common.type);
450 + acpi_ut_remove_reference(new_desc);
451 return_ACPI_STATUS(status);
452 }
453 diff --git a/drivers/block/brd.c b/drivers/block/brd.c
454 index 4e8213aa02fd..a7d70e2a8d74 100644
455 --- a/drivers/block/brd.c
456 +++ b/drivers/block/brd.c
457 @@ -546,7 +546,7 @@ static struct kobject *brd_probe(dev_t dev, int *part, void *data)
458
459 mutex_lock(&brd_devices_mutex);
460 brd = brd_init_one(MINOR(dev) >> part_shift);
461 - kobj = brd ? get_disk(brd->brd_disk) : ERR_PTR(-ENOMEM);
462 + kobj = brd ? get_disk(brd->brd_disk) : NULL;
463 mutex_unlock(&brd_devices_mutex);
464
465 *part = 0;
466 diff --git a/drivers/block/loop.c b/drivers/block/loop.c
467 index 179b5b408cb3..a4ddbae2e100 100644
468 --- a/drivers/block/loop.c
469 +++ b/drivers/block/loop.c
470 @@ -1743,7 +1743,7 @@ static struct kobject *loop_probe(dev_t dev, int *part, void *data)
471 if (err < 0)
472 err = loop_add(&lo, MINOR(dev) >> part_shift);
473 if (err < 0)
474 - kobj = ERR_PTR(err);
475 + kobj = NULL;
476 else
477 kobj = get_disk(lo->lo_disk);
478 mutex_unlock(&loop_index_mutex);
479 diff --git a/drivers/hwmon/lm90.c b/drivers/hwmon/lm90.c
480 index 22b14a68e35e..1f4f94103c55 100644
481 --- a/drivers/hwmon/lm90.c
482 +++ b/drivers/hwmon/lm90.c
483 @@ -278,7 +278,7 @@ static const struct lm90_params lm90_params[] = {
484 [max6696] = {
485 .flags = LM90_HAVE_EMERGENCY
486 | LM90_HAVE_EMERGENCY_ALARM | LM90_HAVE_TEMP3,
487 - .alert_alarms = 0x187c,
488 + .alert_alarms = 0x1c7c,
489 .max_convrate = 6,
490 .reg_local_ext = MAX6657_REG_R_LOCAL_TEMPL,
491 },
492 @@ -1504,19 +1504,22 @@ static void lm90_alert(struct i2c_client *client, unsigned int flag)
493 if ((alarms & 0x7f) == 0 && (alarms2 & 0xfe) == 0) {
494 dev_info(&client->dev, "Everything OK\n");
495 } else {
496 - if (alarms & 0x61)
497 + if ((alarms & 0x61) || (alarms2 & 0x80))
498 dev_warn(&client->dev,
499 "temp%d out of range, please check!\n", 1);
500 - if (alarms & 0x1a)
501 + if ((alarms & 0x1a) || (alarms2 & 0x20))
502 dev_warn(&client->dev,
503 "temp%d out of range, please check!\n", 2);
504 if (alarms & 0x04)
505 dev_warn(&client->dev,
506 "temp%d diode open, please check!\n", 2);
507
508 - if (alarms2 & 0x18)
509 + if (alarms2 & 0x5a)
510 dev_warn(&client->dev,
511 "temp%d out of range, please check!\n", 3);
512 + if (alarms2 & 0x04)
513 + dev_warn(&client->dev,
514 + "temp%d diode open, please check!\n", 3);
515
516 /*
517 * Disable ALERT# output, because these chips don't implement
518 diff --git a/drivers/net/can/c_can/c_can.c b/drivers/net/can/c_can/c_can.c
519 index 21a3d77ea7e2..77405b4e8636 100644
520 --- a/drivers/net/can/c_can/c_can.c
521 +++ b/drivers/net/can/c_can/c_can.c
522 @@ -760,9 +760,6 @@ static int c_can_do_rx_poll(struct net_device *dev, int quota)
523 msg_ctrl_save = priv->read_reg(priv,
524 &priv->regs->ifregs[0].msg_cntrl);
525
526 - if (msg_ctrl_save & IF_MCONT_EOB)
527 - return num_rx_pkts;
528 -
529 if (msg_ctrl_save & IF_MCONT_MSGLST) {
530 c_can_handle_lost_msg_obj(dev, 0, msg_obj);
531 num_rx_pkts++;
532 @@ -770,6 +767,9 @@ static int c_can_do_rx_poll(struct net_device *dev, int quota)
533 continue;
534 }
535
536 + if (msg_ctrl_save & IF_MCONT_EOB)
537 + return num_rx_pkts;
538 +
539 if (!(msg_ctrl_save & IF_MCONT_NEWDAT))
540 continue;
541
542 diff --git a/drivers/net/ethernet/realtek/8139cp.c b/drivers/net/ethernet/realtek/8139cp.c
543 index 1b4404725b8c..effb3b71ce75 100644
544 --- a/drivers/net/ethernet/realtek/8139cp.c
545 +++ b/drivers/net/ethernet/realtek/8139cp.c
546 @@ -1232,6 +1232,7 @@ static void cp_tx_timeout(struct net_device *dev)
547 cp_clean_rings(cp);
548 rc = cp_init_rings(cp);
549 cp_start_hw(cp);
550 + cp_enable_irq(cp);
551
552 netif_wake_queue(dev);
553
554 diff --git a/drivers/net/wireless/libertas/debugfs.c b/drivers/net/wireless/libertas/debugfs.c
555 index a06cc283e23d..0b48430d6ad0 100644
556 --- a/drivers/net/wireless/libertas/debugfs.c
557 +++ b/drivers/net/wireless/libertas/debugfs.c
558 @@ -913,7 +913,10 @@ static ssize_t lbs_debugfs_write(struct file *f, const char __user *buf,
559 char *p2;
560 struct debug_data *d = f->private_data;
561
562 - pdata = kmalloc(cnt, GFP_KERNEL);
563 + if (cnt == 0)
564 + return 0;
565 +
566 + pdata = kmalloc(cnt + 1, GFP_KERNEL);
567 if (pdata == NULL)
568 return 0;
569
570 @@ -922,6 +925,7 @@ static ssize_t lbs_debugfs_write(struct file *f, const char __user *buf,
571 kfree(pdata);
572 return 0;
573 }
574 + pdata[cnt] = '\0';
575
576 p0 = pdata;
577 for (i = 0; i < num_of_items; i++) {
578 diff --git a/drivers/net/wireless/rt2x00/rt2800usb.c b/drivers/net/wireless/rt2x00/rt2800usb.c
579 index 65cb4250259f..6eec862fea28 100644
580 --- a/drivers/net/wireless/rt2x00/rt2800usb.c
581 +++ b/drivers/net/wireless/rt2x00/rt2800usb.c
582 @@ -143,6 +143,8 @@ static bool rt2800usb_txstatus_timeout(struct rt2x00_dev *rt2x00dev)
583 return false;
584 }
585
586 +#define TXSTATUS_READ_INTERVAL 1000000
587 +
588 static bool rt2800usb_tx_sta_fifo_read_completed(struct rt2x00_dev *rt2x00dev,
589 int urb_status, u32 tx_status)
590 {
591 @@ -170,8 +172,9 @@ static bool rt2800usb_tx_sta_fifo_read_completed(struct rt2x00_dev *rt2x00dev,
592 queue_work(rt2x00dev->workqueue, &rt2x00dev->txdone_work);
593
594 if (rt2800usb_txstatus_pending(rt2x00dev)) {
595 - /* Read register after 250 us */
596 - hrtimer_start(&rt2x00dev->txstatus_timer, ktime_set(0, 250000),
597 + /* Read register after 1 ms */
598 + hrtimer_start(&rt2x00dev->txstatus_timer,
599 + ktime_set(0, TXSTATUS_READ_INTERVAL),
600 HRTIMER_MODE_REL);
601 return false;
602 }
603 @@ -196,8 +199,9 @@ static void rt2800usb_async_read_tx_status(struct rt2x00_dev *rt2x00dev)
604 if (test_and_set_bit(TX_STATUS_READING, &rt2x00dev->flags))
605 return;
606
607 - /* Read TX_STA_FIFO register after 500 us */
608 - hrtimer_start(&rt2x00dev->txstatus_timer, ktime_set(0, 500000),
609 + /* Read TX_STA_FIFO register after 2 ms */
610 + hrtimer_start(&rt2x00dev->txstatus_timer,
611 + ktime_set(0, 2*TXSTATUS_READ_INTERVAL),
612 HRTIMER_MODE_REL);
613 }
614
615 diff --git a/drivers/net/wireless/rt2x00/rt2x00mac.c b/drivers/net/wireless/rt2x00/rt2x00mac.c
616 index a8885f060060..6701f2d71274 100644
617 --- a/drivers/net/wireless/rt2x00/rt2x00mac.c
618 +++ b/drivers/net/wireless/rt2x00/rt2x00mac.c
619 @@ -771,6 +771,9 @@ void rt2x00mac_flush(struct ieee80211_hw *hw, bool drop)
620 struct rt2x00_dev *rt2x00dev = hw->priv;
621 struct data_queue *queue;
622
623 + if (!test_bit(DEVICE_STATE_PRESENT, &rt2x00dev->flags))
624 + return;
625 +
626 tx_queue_for_each(rt2x00dev, queue)
627 rt2x00queue_flush_queue(queue, drop);
628 }
629 diff --git a/drivers/scsi/aacraid/commctrl.c b/drivers/scsi/aacraid/commctrl.c
630 index 0bd38da4ada0..05fbc3ded9b5 100644
631 --- a/drivers/scsi/aacraid/commctrl.c
632 +++ b/drivers/scsi/aacraid/commctrl.c
633 @@ -508,7 +508,8 @@ static int aac_send_raw_srb(struct aac_dev* dev, void __user * arg)
634 goto cleanup;
635 }
636
637 - if (fibsize > (dev->max_fib_size - sizeof(struct aac_fibhdr))) {
638 + if ((fibsize < (sizeof(struct user_aac_srb) - sizeof(struct user_sgentry))) ||
639 + (fibsize > (dev->max_fib_size - sizeof(struct aac_fibhdr)))) {
640 rcode = -EINVAL;
641 goto cleanup;
642 }
643 diff --git a/drivers/uio/uio.c b/drivers/uio/uio.c
644 index a783d533a1a6..715075291834 100644
645 --- a/drivers/uio/uio.c
646 +++ b/drivers/uio/uio.c
647 @@ -650,16 +650,30 @@ static int uio_mmap_physical(struct vm_area_struct *vma)
648 {
649 struct uio_device *idev = vma->vm_private_data;
650 int mi = uio_find_mem_index(vma);
651 + struct uio_mem *mem;
652 if (mi < 0)
653 return -EINVAL;
654 + mem = idev->info->mem + mi;
655 +
656 + if (vma->vm_end - vma->vm_start > mem->size)
657 + return -EINVAL;
658
659 vma->vm_flags |= VM_IO | VM_RESERVED;
660
661 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
662
663 + /*
664 + * We cannot use the vm_iomap_memory() helper here,
665 + * because vma->vm_pgoff is the map index we looked
666 + * up above in uio_find_mem_index(), rather than an
667 + * actual page offset into the mmap.
668 + *
669 + * So we just do the physical mmap without a page
670 + * offset.
671 + */
672 return remap_pfn_range(vma,
673 vma->vm_start,
674 - idev->info->mem[mi].addr >> PAGE_SHIFT,
675 + mem->addr >> PAGE_SHIFT,
676 vma->vm_end - vma->vm_start,
677 vma->vm_page_prot);
678 }
679 diff --git a/drivers/usb/serial/mos7840.c b/drivers/usb/serial/mos7840.c
680 index c8542356898b..91293b68df5a 100644
681 --- a/drivers/usb/serial/mos7840.c
682 +++ b/drivers/usb/serial/mos7840.c
683 @@ -1664,7 +1664,11 @@ static int mos7840_tiocmget(struct tty_struct *tty)
684 return -ENODEV;
685
686 status = mos7840_get_uart_reg(port, MODEM_STATUS_REGISTER, &msr);
687 + if (status != 1)
688 + return -EIO;
689 status = mos7840_get_uart_reg(port, MODEM_CONTROL_REGISTER, &mcr);
690 + if (status != 1)
691 + return -EIO;
692 result = ((mcr & MCR_DTR) ? TIOCM_DTR : 0)
693 | ((mcr & MCR_RTS) ? TIOCM_RTS : 0)
694 | ((mcr & MCR_LOOPBACK) ? TIOCM_LOOP : 0)
695 diff --git a/drivers/video/au1100fb.c b/drivers/video/au1100fb.c
696 index ffbce4525468..612c1c7cb31b 100644
697 --- a/drivers/video/au1100fb.c
698 +++ b/drivers/video/au1100fb.c
699 @@ -375,39 +375,15 @@ void au1100fb_fb_rotate(struct fb_info *fbi, int angle)
700 int au1100fb_fb_mmap(struct fb_info *fbi, struct vm_area_struct *vma)
701 {
702 struct au1100fb_device *fbdev;
703 - unsigned int len;
704 - unsigned long start=0, off;
705
706 fbdev = to_au1100fb_device(fbi);
707
708 - if (vma->vm_pgoff > (~0UL >> PAGE_SHIFT)) {
709 - return -EINVAL;
710 - }
711 -
712 - start = fbdev->fb_phys & PAGE_MASK;
713 - len = PAGE_ALIGN((start & ~PAGE_MASK) + fbdev->fb_len);
714 -
715 - off = vma->vm_pgoff << PAGE_SHIFT;
716 -
717 - if ((vma->vm_end - vma->vm_start + off) > len) {
718 - return -EINVAL;
719 - }
720 -
721 - off += start;
722 - vma->vm_pgoff = off >> PAGE_SHIFT;
723 -
724 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
725 pgprot_val(vma->vm_page_prot) |= (6 << 9); //CCA=6
726
727 vma->vm_flags |= VM_IO;
728
729 - if (io_remap_pfn_range(vma, vma->vm_start, off >> PAGE_SHIFT,
730 - vma->vm_end - vma->vm_start,
731 - vma->vm_page_prot)) {
732 - return -EAGAIN;
733 - }
734 -
735 - return 0;
736 + return vm_iomap_memory(vma, fbdev->fb_phys, fbdev->fb_len);
737 }
738
739 static struct fb_ops au1100fb_ops =
740 diff --git a/drivers/video/au1200fb.c b/drivers/video/au1200fb.c
741 index 7ca79f02056e..117be3d9b854 100644
742 --- a/drivers/video/au1200fb.c
743 +++ b/drivers/video/au1200fb.c
744 @@ -1233,36 +1233,15 @@ static int au1200fb_fb_blank(int blank_mode, struct fb_info *fbi)
745 * method mainly to allow the use of the TLB streaming flag (CCA=6)
746 */
747 static int au1200fb_fb_mmap(struct fb_info *info, struct vm_area_struct *vma)
748 -
749 {
750 - unsigned int len;
751 - unsigned long start=0, off;
752 struct au1200fb_device *fbdev = info->par;
753
754 - if (vma->vm_pgoff > (~0UL >> PAGE_SHIFT)) {
755 - return -EINVAL;
756 - }
757 -
758 - start = fbdev->fb_phys & PAGE_MASK;
759 - len = PAGE_ALIGN((start & ~PAGE_MASK) + fbdev->fb_len);
760 -
761 - off = vma->vm_pgoff << PAGE_SHIFT;
762 -
763 - if ((vma->vm_end - vma->vm_start + off) > len) {
764 - return -EINVAL;
765 - }
766 -
767 - off += start;
768 - vma->vm_pgoff = off >> PAGE_SHIFT;
769 -
770 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
771 pgprot_val(vma->vm_page_prot) |= _CACHE_MASK; /* CCA=7 */
772
773 vma->vm_flags |= VM_IO;
774
775 - return io_remap_pfn_range(vma, vma->vm_start, off >> PAGE_SHIFT,
776 - vma->vm_end - vma->vm_start,
777 - vma->vm_page_prot);
778 + return vm_iomap_memory(vma, fbdev->fb_phys, fbdev->fb_len);
779
780 return 0;
781 }
782 diff --git a/fs/configfs/dir.c b/fs/configfs/dir.c
783 index 7e6c52d8a207..c91f6d1bf64f 100644
784 --- a/fs/configfs/dir.c
785 +++ b/fs/configfs/dir.c
786 @@ -56,10 +56,19 @@ static void configfs_d_iput(struct dentry * dentry,
787 struct configfs_dirent *sd = dentry->d_fsdata;
788
789 if (sd) {
790 - BUG_ON(sd->s_dentry != dentry);
791 /* Coordinate with configfs_readdir */
792 spin_lock(&configfs_dirent_lock);
793 - sd->s_dentry = NULL;
794 + /* Coordinate with configfs_attach_attr where will increase
795 + * sd->s_count and update sd->s_dentry to new allocated one.
796 + * Only set sd->dentry to null when this dentry is the only
797 + * sd owner.
798 + * If not do so, configfs_d_iput may run just after
799 + * configfs_attach_attr and set sd->s_dentry to null
800 + * even it's still in use.
801 + */
802 + if (atomic_read(&sd->s_count) <= 2)
803 + sd->s_dentry = NULL;
804 +
805 spin_unlock(&configfs_dirent_lock);
806 configfs_put(sd);
807 }
808 @@ -426,8 +435,11 @@ static int configfs_attach_attr(struct configfs_dirent * sd, struct dentry * den
809 struct configfs_attribute * attr = sd->s_element;
810 int error;
811
812 + spin_lock(&configfs_dirent_lock);
813 dentry->d_fsdata = configfs_get(sd);
814 sd->s_dentry = dentry;
815 + spin_unlock(&configfs_dirent_lock);
816 +
817 error = configfs_create(dentry, (attr->ca_mode & S_IALLUGO) | S_IFREG,
818 configfs_init_file);
819 if (error) {
820 diff --git a/fs/dcache.c b/fs/dcache.c
821 index 9d39de40909d..09e2eda55c57 100644
822 --- a/fs/dcache.c
823 +++ b/fs/dcache.c
824 @@ -2513,7 +2513,6 @@ static int prepend_path(const struct path *path,
825 bool slash = false;
826 int error = 0;
827
828 - br_read_lock(vfsmount_lock);
829 while (dentry != root->dentry || vfsmnt != root->mnt) {
830 struct dentry * parent;
831
832 @@ -2543,8 +2542,6 @@ static int prepend_path(const struct path *path,
833 if (!error && !slash)
834 error = prepend(buffer, buflen, "/", 1);
835
836 -out:
837 - br_read_unlock(vfsmount_lock);
838 return error;
839
840 global_root:
841 @@ -2561,7 +2558,7 @@ global_root:
842 error = prepend(buffer, buflen, "/", 1);
843 if (!error)
844 error = real_mount(vfsmnt)->mnt_ns ? 1 : 2;
845 - goto out;
846 + return error;
847 }
848
849 /**
850 @@ -2588,9 +2585,11 @@ char *__d_path(const struct path *path,
851 int error;
852
853 prepend(&res, &buflen, "\0", 1);
854 + br_read_lock(vfsmount_lock);
855 write_seqlock(&rename_lock);
856 error = prepend_path(path, root, &res, &buflen);
857 write_sequnlock(&rename_lock);
858 + br_read_unlock(vfsmount_lock);
859
860 if (error < 0)
861 return ERR_PTR(error);
862 @@ -2607,9 +2606,11 @@ char *d_absolute_path(const struct path *path,
863 int error;
864
865 prepend(&res, &buflen, "\0", 1);
866 + br_read_lock(vfsmount_lock);
867 write_seqlock(&rename_lock);
868 error = prepend_path(path, &root, &res, &buflen);
869 write_sequnlock(&rename_lock);
870 + br_read_unlock(vfsmount_lock);
871
872 if (error > 1)
873 error = -EINVAL;
874 @@ -2673,11 +2674,13 @@ char *d_path(const struct path *path, char *buf, int buflen)
875 return path->dentry->d_op->d_dname(path->dentry, buf, buflen);
876
877 get_fs_root(current->fs, &root);
878 + br_read_lock(vfsmount_lock);
879 write_seqlock(&rename_lock);
880 error = path_with_deleted(path, &root, &res, &buflen);
881 + write_sequnlock(&rename_lock);
882 + br_read_unlock(vfsmount_lock);
883 if (error < 0)
884 res = ERR_PTR(error);
885 - write_sequnlock(&rename_lock);
886 path_put(&root);
887 return res;
888 }
889 @@ -2832,6 +2835,7 @@ SYSCALL_DEFINE2(getcwd, char __user *, buf, unsigned long, size)
890 get_fs_root_and_pwd(current->fs, &root, &pwd);
891
892 error = -ENOENT;
893 + br_read_lock(vfsmount_lock);
894 write_seqlock(&rename_lock);
895 if (!d_unlinked(pwd.dentry)) {
896 unsigned long len;
897 @@ -2841,6 +2845,7 @@ SYSCALL_DEFINE2(getcwd, char __user *, buf, unsigned long, size)
898 prepend(&cwd, &buflen, "\0", 1);
899 error = prepend_path(&pwd, &root, &cwd, &buflen);
900 write_sequnlock(&rename_lock);
901 + br_read_unlock(vfsmount_lock);
902
903 if (error < 0)
904 goto out;
905 @@ -2861,6 +2866,7 @@ SYSCALL_DEFINE2(getcwd, char __user *, buf, unsigned long, size)
906 }
907 } else {
908 write_sequnlock(&rename_lock);
909 + br_read_unlock(vfsmount_lock);
910 }
911
912 out:
913 diff --git a/fs/exec.c b/fs/exec.c
914 index 0ea0b4c476d8..5b9dfbe84b19 100644
915 --- a/fs/exec.c
916 +++ b/fs/exec.c
917 @@ -909,11 +909,13 @@ static int de_thread(struct task_struct *tsk)
918
919 sig->notify_count = -1; /* for exit_notify() */
920 for (;;) {
921 + threadgroup_change_begin(tsk);
922 write_lock_irq(&tasklist_lock);
923 if (likely(leader->exit_state))
924 break;
925 __set_current_state(TASK_UNINTERRUPTIBLE);
926 write_unlock_irq(&tasklist_lock);
927 + threadgroup_change_end(tsk);
928 schedule();
929 }
930
931 @@ -969,6 +971,7 @@ static int de_thread(struct task_struct *tsk)
932 if (unlikely(leader->ptrace))
933 __wake_up_parent(leader, leader->parent);
934 write_unlock_irq(&tasklist_lock);
935 + threadgroup_change_end(tsk);
936
937 release_task(leader);
938 }
939 @@ -2024,6 +2027,12 @@ static int __get_dumpable(unsigned long mm_flags)
940 return (ret >= 2) ? 2 : ret;
941 }
942
943 +/*
944 + * This returns the actual value of the suid_dumpable flag. For things
945 + * that are using this for checking for privilege transitions, it must
946 + * test against SUID_DUMP_USER rather than treating it as a boolean
947 + * value.
948 + */
949 int get_dumpable(struct mm_struct *mm)
950 {
951 return __get_dumpable(mm->flags);
952 diff --git a/fs/nfs/inode.c b/fs/nfs/inode.c
953 index edf411988bf3..9bb4e5c541b0 100644
954 --- a/fs/nfs/inode.c
955 +++ b/fs/nfs/inode.c
956 @@ -224,6 +224,8 @@ nfs_find_actor(struct inode *inode, void *opaque)
957
958 if (NFS_FILEID(inode) != fattr->fileid)
959 return 0;
960 + if ((S_IFMT & inode->i_mode) != (S_IFMT & fattr->mode))
961 + return 0;
962 if (nfs_compare_fh(NFS_FH(inode), fh))
963 return 0;
964 if (is_bad_inode(inode) || NFS_STALE(inode))
965 diff --git a/fs/nfs/nfs3proc.c b/fs/nfs/nfs3proc.c
966 index a7a043d272da..51017baa67af 100644
967 --- a/fs/nfs/nfs3proc.c
968 +++ b/fs/nfs/nfs3proc.c
969 @@ -24,14 +24,14 @@
970
971 #define NFSDBG_FACILITY NFSDBG_PROC
972
973 -/* A wrapper to handle the EJUKEBOX and EKEYEXPIRED error messages */
974 +/* A wrapper to handle the EJUKEBOX error messages */
975 static int
976 nfs3_rpc_wrapper(struct rpc_clnt *clnt, struct rpc_message *msg, int flags)
977 {
978 int res;
979 do {
980 res = rpc_call_sync(clnt, msg, flags);
981 - if (res != -EJUKEBOX && res != -EKEYEXPIRED)
982 + if (res != -EJUKEBOX)
983 break;
984 freezable_schedule_timeout_killable(NFS_JUKEBOX_RETRY_TIME);
985 res = -ERESTARTSYS;
986 @@ -44,7 +44,7 @@ nfs3_rpc_wrapper(struct rpc_clnt *clnt, struct rpc_message *msg, int flags)
987 static int
988 nfs3_async_handle_jukebox(struct rpc_task *task, struct inode *inode)
989 {
990 - if (task->tk_status != -EJUKEBOX && task->tk_status != -EKEYEXPIRED)
991 + if (task->tk_status != -EJUKEBOX)
992 return 0;
993 if (task->tk_status == -EJUKEBOX)
994 nfs_inc_stats(inode, NFSIOS_DELAY);
995 diff --git a/fs/nfs/nfs4filelayout.c b/fs/nfs/nfs4filelayout.c
996 index 5acfd9ea8a31..4d64d5b85e56 100644
997 --- a/fs/nfs/nfs4filelayout.c
998 +++ b/fs/nfs/nfs4filelayout.c
999 @@ -122,7 +122,6 @@ static int filelayout_async_handle_error(struct rpc_task *task,
1000 break;
1001 case -NFS4ERR_DELAY:
1002 case -NFS4ERR_GRACE:
1003 - case -EKEYEXPIRED:
1004 rpc_delay(task, FILELAYOUT_POLL_RETRY_MAX);
1005 break;
1006 case -NFS4ERR_RETRY_UNCACHED_REP:
1007 diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c
1008 index d121c67f87d0..cabddb5da071 100644
1009 --- a/fs/nfs/nfs4proc.c
1010 +++ b/fs/nfs/nfs4proc.c
1011 @@ -319,7 +319,6 @@ static int nfs4_handle_exception(struct nfs_server *server, int errorcode, struc
1012 }
1013 case -NFS4ERR_GRACE:
1014 case -NFS4ERR_DELAY:
1015 - case -EKEYEXPIRED:
1016 ret = nfs4_delay(server->client, &exception->timeout);
1017 if (ret != 0)
1018 break;
1019 @@ -1352,13 +1351,6 @@ int nfs4_open_delegation_recall(struct nfs_open_context *ctx, struct nfs4_state
1020 nfs_inode_find_state_and_recover(state->inode,
1021 stateid);
1022 nfs4_schedule_stateid_recovery(server, state);
1023 - case -EKEYEXPIRED:
1024 - /*
1025 - * User RPCSEC_GSS context has expired.
1026 - * We cannot recover this stateid now, so
1027 - * skip it and allow recovery thread to
1028 - * proceed.
1029 - */
1030 case -ENOMEM:
1031 err = 0;
1032 goto out;
1033 @@ -3924,7 +3916,6 @@ nfs4_async_handle_error(struct rpc_task *task, const struct nfs_server *server,
1034 case -NFS4ERR_DELAY:
1035 nfs_inc_server_stats(server, NFSIOS_DELAY);
1036 case -NFS4ERR_GRACE:
1037 - case -EKEYEXPIRED:
1038 rpc_delay(task, NFS4_POLL_RETRY_MAX);
1039 task->tk_status = 0;
1040 return -EAGAIN;
1041 @@ -4216,6 +4207,7 @@ static int _nfs4_proc_getlk(struct nfs4_state *state, int cmd, struct file_lock
1042 status = 0;
1043 }
1044 request->fl_ops->fl_release_private(request);
1045 + request->fl_ops = NULL;
1046 out:
1047 return status;
1048 }
1049 @@ -4871,15 +4863,6 @@ int nfs4_lock_delegation_recall(struct nfs4_state *state, struct file_lock *fl)
1050 nfs4_schedule_stateid_recovery(server, state);
1051 err = 0;
1052 goto out;
1053 - case -EKEYEXPIRED:
1054 - /*
1055 - * User RPCSEC_GSS context has expired.
1056 - * We cannot recover this stateid now, so
1057 - * skip it and allow recovery thread to
1058 - * proceed.
1059 - */
1060 - err = 0;
1061 - goto out;
1062 case -ENOMEM:
1063 case -NFS4ERR_DENIED:
1064 /* kill_proc(fl->fl_pid, SIGLOST, 1); */
1065 diff --git a/fs/nfs/nfs4state.c b/fs/nfs/nfs4state.c
1066 index 7f0fcfc1fe9d..e46579471ccc 100644
1067 --- a/fs/nfs/nfs4state.c
1068 +++ b/fs/nfs/nfs4state.c
1069 @@ -1298,14 +1298,6 @@ restart:
1070 /* Mark the file as being 'closed' */
1071 state->state = 0;
1072 break;
1073 - case -EKEYEXPIRED:
1074 - /*
1075 - * User RPCSEC_GSS context has expired.
1076 - * We cannot recover this stateid now, so
1077 - * skip it and allow recovery thread to
1078 - * proceed.
1079 - */
1080 - break;
1081 case -NFS4ERR_ADMIN_REVOKED:
1082 case -NFS4ERR_STALE_STATEID:
1083 case -NFS4ERR_BAD_STATEID:
1084 @@ -1458,14 +1450,6 @@ static void nfs4_state_start_reclaim_nograce(struct nfs_client *clp)
1085 nfs4_state_mark_reclaim_helper(clp, nfs4_state_mark_reclaim_nograce);
1086 }
1087
1088 -static void nfs4_warn_keyexpired(const char *s)
1089 -{
1090 - printk_ratelimited(KERN_WARNING "Error: state manager"
1091 - " encountered RPCSEC_GSS session"
1092 - " expired against NFSv4 server %s.\n",
1093 - s);
1094 -}
1095 -
1096 static int nfs4_recovery_handle_error(struct nfs_client *clp, int error)
1097 {
1098 switch (error) {
1099 @@ -1497,10 +1481,6 @@ static int nfs4_recovery_handle_error(struct nfs_client *clp, int error)
1100 set_bit(NFS4CLNT_SESSION_RESET, &clp->cl_state);
1101 /* Zero session reset errors */
1102 break;
1103 - case -EKEYEXPIRED:
1104 - /* Nothing we can do */
1105 - nfs4_warn_keyexpired(clp->cl_hostname);
1106 - break;
1107 default:
1108 return error;
1109 }
1110 @@ -1745,7 +1725,6 @@ static void nfs4_set_lease_expired(struct nfs_client *clp, int status)
1111 break;
1112
1113 case -EKEYEXPIRED:
1114 - nfs4_warn_keyexpired(clp->cl_hostname);
1115 case -NFS4ERR_NOT_SAME: /* FixMe: implement recovery
1116 * in nfs4_exchange_id */
1117 default:
1118 diff --git a/fs/nfs/proc.c b/fs/nfs/proc.c
1119 index b63b6f4d14fb..af9947e35fcb 100644
1120 --- a/fs/nfs/proc.c
1121 +++ b/fs/nfs/proc.c
1122 @@ -47,39 +47,6 @@
1123 #define NFSDBG_FACILITY NFSDBG_PROC
1124
1125 /*
1126 - * wrapper to handle the -EKEYEXPIRED error message. This should generally
1127 - * only happen if using krb5 auth and a user's TGT expires. NFSv2 doesn't
1128 - * support the NFSERR_JUKEBOX error code, but we handle this situation in the
1129 - * same way that we handle that error with NFSv3.
1130 - */
1131 -static int
1132 -nfs_rpc_wrapper(struct rpc_clnt *clnt, struct rpc_message *msg, int flags)
1133 -{
1134 - int res;
1135 - do {
1136 - res = rpc_call_sync(clnt, msg, flags);
1137 - if (res != -EKEYEXPIRED)
1138 - break;
1139 - freezable_schedule_timeout_killable(NFS_JUKEBOX_RETRY_TIME);
1140 - res = -ERESTARTSYS;
1141 - } while (!fatal_signal_pending(current));
1142 - return res;
1143 -}
1144 -
1145 -#define rpc_call_sync(clnt, msg, flags) nfs_rpc_wrapper(clnt, msg, flags)
1146 -
1147 -static int
1148 -nfs_async_handle_expired_key(struct rpc_task *task)
1149 -{
1150 - if (task->tk_status != -EKEYEXPIRED)
1151 - return 0;
1152 - task->tk_status = 0;
1153 - rpc_restart_call(task);
1154 - rpc_delay(task, NFS_JUKEBOX_RETRY_TIME);
1155 - return 1;
1156 -}
1157 -
1158 -/*
1159 * Bare-bones access to getattr: this is for nfs_read_super.
1160 */
1161 static int
1162 @@ -365,8 +332,6 @@ static void nfs_proc_unlink_rpc_prepare(struct rpc_task *task, struct nfs_unlink
1163
1164 static int nfs_proc_unlink_done(struct rpc_task *task, struct inode *dir)
1165 {
1166 - if (nfs_async_handle_expired_key(task))
1167 - return 0;
1168 nfs_mark_for_revalidate(dir);
1169 return 1;
1170 }
1171 @@ -386,8 +351,6 @@ static int
1172 nfs_proc_rename_done(struct rpc_task *task, struct inode *old_dir,
1173 struct inode *new_dir)
1174 {
1175 - if (nfs_async_handle_expired_key(task))
1176 - return 0;
1177 nfs_mark_for_revalidate(old_dir);
1178 nfs_mark_for_revalidate(new_dir);
1179 return 1;
1180 @@ -641,9 +604,6 @@ nfs_proc_pathconf(struct nfs_server *server, struct nfs_fh *fhandle,
1181
1182 static int nfs_read_done(struct rpc_task *task, struct nfs_read_data *data)
1183 {
1184 - if (nfs_async_handle_expired_key(task))
1185 - return -EAGAIN;
1186 -
1187 nfs_invalidate_atime(data->inode);
1188 if (task->tk_status >= 0) {
1189 nfs_refresh_inode(data->inode, data->res.fattr);
1190 @@ -668,9 +628,6 @@ static void nfs_proc_read_rpc_prepare(struct rpc_task *task, struct nfs_read_dat
1191
1192 static int nfs_write_done(struct rpc_task *task, struct nfs_write_data *data)
1193 {
1194 - if (nfs_async_handle_expired_key(task))
1195 - return -EAGAIN;
1196 -
1197 if (task->tk_status >= 0)
1198 nfs_post_op_update_inode_force_wcc(data->inode, data->res.fattr);
1199 return 0;
1200 diff --git a/fs/nfsd/vfs.c b/fs/nfsd/vfs.c
1201 index f03160106b95..026a873e3f6c 100644
1202 --- a/fs/nfsd/vfs.c
1203 +++ b/fs/nfsd/vfs.c
1204 @@ -297,41 +297,12 @@ commit_metadata(struct svc_fh *fhp)
1205 }
1206
1207 /*
1208 - * Set various file attributes.
1209 - * N.B. After this call fhp needs an fh_put
1210 + * Go over the attributes and take care of the small differences between
1211 + * NFS semantics and what Linux expects.
1212 */
1213 -__be32
1214 -nfsd_setattr(struct svc_rqst *rqstp, struct svc_fh *fhp, struct iattr *iap,
1215 - int check_guard, time_t guardtime)
1216 +static void
1217 +nfsd_sanitize_attrs(struct inode *inode, struct iattr *iap)
1218 {
1219 - struct dentry *dentry;
1220 - struct inode *inode;
1221 - int accmode = NFSD_MAY_SATTR;
1222 - umode_t ftype = 0;
1223 - __be32 err;
1224 - int host_err;
1225 - int size_change = 0;
1226 -
1227 - if (iap->ia_valid & (ATTR_ATIME | ATTR_MTIME | ATTR_SIZE))
1228 - accmode |= NFSD_MAY_WRITE|NFSD_MAY_OWNER_OVERRIDE;
1229 - if (iap->ia_valid & ATTR_SIZE)
1230 - ftype = S_IFREG;
1231 -
1232 - /* Get inode */
1233 - err = fh_verify(rqstp, fhp, ftype, accmode);
1234 - if (err)
1235 - goto out;
1236 -
1237 - dentry = fhp->fh_dentry;
1238 - inode = dentry->d_inode;
1239 -
1240 - /* Ignore any mode updates on symlinks */
1241 - if (S_ISLNK(inode->i_mode))
1242 - iap->ia_valid &= ~ATTR_MODE;
1243 -
1244 - if (!iap->ia_valid)
1245 - goto out;
1246 -
1247 /*
1248 * NFSv2 does not differentiate between "set-[ac]time-to-now"
1249 * which only requires access, and "set-[ac]time-to-X" which
1250 @@ -341,8 +312,7 @@ nfsd_setattr(struct svc_rqst *rqstp, struct svc_fh *fhp, struct iattr *iap,
1251 * convert to "set to now" instead of "set to explicit time"
1252 *
1253 * We only call inode_change_ok as the last test as technically
1254 - * it is not an interface that we should be using. It is only
1255 - * valid if the filesystem does not define it's own i_op->setattr.
1256 + * it is not an interface that we should be using.
1257 */
1258 #define BOTH_TIME_SET (ATTR_ATIME_SET | ATTR_MTIME_SET)
1259 #define MAX_TOUCH_TIME_ERROR (30*60)
1260 @@ -368,30 +338,6 @@ nfsd_setattr(struct svc_rqst *rqstp, struct svc_fh *fhp, struct iattr *iap,
1261 iap->ia_valid &= ~BOTH_TIME_SET;
1262 }
1263 }
1264 -
1265 - /*
1266 - * The size case is special.
1267 - * It changes the file as well as the attributes.
1268 - */
1269 - if (iap->ia_valid & ATTR_SIZE) {
1270 - if (iap->ia_size < inode->i_size) {
1271 - err = nfsd_permission(rqstp, fhp->fh_export, dentry,
1272 - NFSD_MAY_TRUNC|NFSD_MAY_OWNER_OVERRIDE);
1273 - if (err)
1274 - goto out;
1275 - }
1276 -
1277 - host_err = get_write_access(inode);
1278 - if (host_err)
1279 - goto out_nfserr;
1280 -
1281 - size_change = 1;
1282 - host_err = locks_verify_truncate(inode, NULL, iap->ia_size);
1283 - if (host_err) {
1284 - put_write_access(inode);
1285 - goto out_nfserr;
1286 - }
1287 - }
1288
1289 /* sanitize the mode change */
1290 if (iap->ia_valid & ATTR_MODE) {
1291 @@ -414,32 +360,111 @@ nfsd_setattr(struct svc_rqst *rqstp, struct svc_fh *fhp, struct iattr *iap,
1292 iap->ia_valid |= (ATTR_KILL_SUID | ATTR_KILL_SGID);
1293 }
1294 }
1295 +}
1296
1297 - /* Change the attributes. */
1298 +static __be32
1299 +nfsd_get_write_access(struct svc_rqst *rqstp, struct svc_fh *fhp,
1300 + struct iattr *iap)
1301 +{
1302 + struct inode *inode = fhp->fh_dentry->d_inode;
1303 + int host_err;
1304
1305 - iap->ia_valid |= ATTR_CTIME;
1306 + if (iap->ia_size < inode->i_size) {
1307 + __be32 err;
1308
1309 - err = nfserr_notsync;
1310 - if (!check_guard || guardtime == inode->i_ctime.tv_sec) {
1311 - host_err = nfsd_break_lease(inode);
1312 - if (host_err)
1313 - goto out_nfserr;
1314 - fh_lock(fhp);
1315 + err = nfsd_permission(rqstp, fhp->fh_export, fhp->fh_dentry,
1316 + NFSD_MAY_TRUNC | NFSD_MAY_OWNER_OVERRIDE);
1317 + if (err)
1318 + return err;
1319 + }
1320
1321 - host_err = notify_change(dentry, iap);
1322 - err = nfserrno(host_err);
1323 - fh_unlock(fhp);
1324 + host_err = get_write_access(inode);
1325 + if (host_err)
1326 + goto out_nfserrno;
1327 +
1328 + host_err = locks_verify_truncate(inode, NULL, iap->ia_size);
1329 + if (host_err)
1330 + goto out_put_write_access;
1331 + return 0;
1332 +
1333 +out_put_write_access:
1334 + put_write_access(inode);
1335 +out_nfserrno:
1336 + return nfserrno(host_err);
1337 +}
1338 +
1339 +/*
1340 + * Set various file attributes. After this call fhp needs an fh_put.
1341 + */
1342 +__be32
1343 +nfsd_setattr(struct svc_rqst *rqstp, struct svc_fh *fhp, struct iattr *iap,
1344 + int check_guard, time_t guardtime)
1345 +{
1346 + struct dentry *dentry;
1347 + struct inode *inode;
1348 + int accmode = NFSD_MAY_SATTR;
1349 + umode_t ftype = 0;
1350 + __be32 err;
1351 + int host_err;
1352 + int size_change = 0;
1353 +
1354 + if (iap->ia_valid & (ATTR_ATIME | ATTR_MTIME | ATTR_SIZE))
1355 + accmode |= NFSD_MAY_WRITE|NFSD_MAY_OWNER_OVERRIDE;
1356 + if (iap->ia_valid & ATTR_SIZE)
1357 + ftype = S_IFREG;
1358 +
1359 + /* Get inode */
1360 + err = fh_verify(rqstp, fhp, ftype, accmode);
1361 + if (err)
1362 + goto out;
1363 +
1364 + dentry = fhp->fh_dentry;
1365 + inode = dentry->d_inode;
1366 +
1367 + /* Ignore any mode updates on symlinks */
1368 + if (S_ISLNK(inode->i_mode))
1369 + iap->ia_valid &= ~ATTR_MODE;
1370 +
1371 + if (!iap->ia_valid)
1372 + goto out;
1373 +
1374 + nfsd_sanitize_attrs(inode, iap);
1375 +
1376 + /*
1377 + * The size case is special, it changes the file in addition to the
1378 + * attributes.
1379 + */
1380 + if (iap->ia_valid & ATTR_SIZE) {
1381 + err = nfsd_get_write_access(rqstp, fhp, iap);
1382 + if (err)
1383 + goto out;
1384 + size_change = 1;
1385 }
1386 +
1387 + iap->ia_valid |= ATTR_CTIME;
1388 +
1389 + if (check_guard && guardtime != inode->i_ctime.tv_sec) {
1390 + err = nfserr_notsync;
1391 + goto out_put_write_access;
1392 + }
1393 +
1394 + host_err = nfsd_break_lease(inode);
1395 + if (host_err)
1396 + goto out_put_write_access_nfserror;
1397 +
1398 + fh_lock(fhp);
1399 + host_err = notify_change(dentry, iap);
1400 + fh_unlock(fhp);
1401 +
1402 +out_put_write_access_nfserror:
1403 + err = nfserrno(host_err);
1404 +out_put_write_access:
1405 if (size_change)
1406 put_write_access(inode);
1407 if (!err)
1408 commit_metadata(fhp);
1409 out:
1410 return err;
1411 -
1412 -out_nfserr:
1413 - err = nfserrno(host_err);
1414 - goto out;
1415 }
1416
1417 #if defined(CONFIG_NFSD_V2_ACL) || \
1418 diff --git a/fs/proc/inode.c b/fs/proc/inode.c
1419 index 205c92280838..6c61f119f608 100644
1420 --- a/fs/proc/inode.c
1421 +++ b/fs/proc/inode.c
1422 @@ -443,12 +443,10 @@ static const struct file_operations proc_reg_file_ops_no_compat = {
1423
1424 struct inode *proc_get_inode(struct super_block *sb, struct proc_dir_entry *de)
1425 {
1426 - struct inode * inode;
1427 + struct inode *inode = new_inode_pseudo(sb);
1428
1429 - inode = iget_locked(sb, de->low_ino);
1430 - if (!inode)
1431 - return NULL;
1432 - if (inode->i_state & I_NEW) {
1433 + if (inode) {
1434 + inode->i_ino = de->low_ino;
1435 inode->i_mtime = inode->i_atime = inode->i_ctime = CURRENT_TIME;
1436 PROC_I(inode)->fd = 0;
1437 PROC_I(inode)->pde = de;
1438 @@ -477,9 +475,7 @@ struct inode *proc_get_inode(struct super_block *sb, struct proc_dir_entry *de)
1439 inode->i_fop = de->proc_fops;
1440 }
1441 }
1442 - unlock_new_inode(inode);
1443 - } else
1444 - pde_put(de);
1445 + }
1446 return inode;
1447 }
1448
1449 diff --git a/include/linux/binfmts.h b/include/linux/binfmts.h
1450 index 5bab59b1034e..424b381c96f1 100644
1451 --- a/include/linux/binfmts.h
1452 +++ b/include/linux/binfmts.h
1453 @@ -113,9 +113,6 @@ extern void setup_new_exec(struct linux_binprm * bprm);
1454 extern void would_dump(struct linux_binprm *, struct file *);
1455
1456 extern int suid_dumpable;
1457 -#define SUID_DUMP_DISABLE 0 /* No setuid dumping */
1458 -#define SUID_DUMP_USER 1 /* Dump as user of process */
1459 -#define SUID_DUMP_ROOT 2 /* Dump as root */
1460
1461 /* Stack area protections */
1462 #define EXSTACK_DEFAULT 0 /* Whatever the arch defaults to */
1463 diff --git a/include/linux/fs.h b/include/linux/fs.h
1464 index 25c40b9f848a..210c347425e8 100644
1465 --- a/include/linux/fs.h
1466 +++ b/include/linux/fs.h
1467 @@ -915,9 +915,11 @@ static inline loff_t i_size_read(const struct inode *inode)
1468 static inline void i_size_write(struct inode *inode, loff_t i_size)
1469 {
1470 #if BITS_PER_LONG==32 && defined(CONFIG_SMP)
1471 + preempt_disable();
1472 write_seqcount_begin(&inode->i_size_seqcount);
1473 inode->i_size = i_size;
1474 write_seqcount_end(&inode->i_size_seqcount);
1475 + preempt_enable();
1476 #elif BITS_PER_LONG==32 && defined(CONFIG_PREEMPT)
1477 preempt_disable();
1478 inode->i_size = i_size;
1479 diff --git a/include/linux/sched.h b/include/linux/sched.h
1480 index 3dd0efbb70f2..e132a2d24740 100644
1481 --- a/include/linux/sched.h
1482 +++ b/include/linux/sched.h
1483 @@ -404,6 +404,10 @@ static inline void arch_pick_mmap_layout(struct mm_struct *mm) {}
1484 extern void set_dumpable(struct mm_struct *mm, int value);
1485 extern int get_dumpable(struct mm_struct *mm);
1486
1487 +#define SUID_DUMP_DISABLE 0 /* No setuid dumping */
1488 +#define SUID_DUMP_USER 1 /* Dump as user of process */
1489 +#define SUID_DUMP_ROOT 2 /* Dump as root */
1490 +
1491 /* mm flags */
1492 /* dumpable bits */
1493 #define MMF_DUMPABLE 0 /* core dump is permitted */
1494 @@ -2466,27 +2470,18 @@ static inline void threadgroup_change_end(struct task_struct *tsk)
1495 *
1496 * Lock the threadgroup @tsk belongs to. No new task is allowed to enter
1497 * and member tasks aren't allowed to exit (as indicated by PF_EXITING) or
1498 - * perform exec. This is useful for cases where the threadgroup needs to
1499 - * stay stable across blockable operations.
1500 + * change ->group_leader/pid. This is useful for cases where the threadgroup
1501 + * needs to stay stable across blockable operations.
1502 *
1503 * fork and exit paths explicitly call threadgroup_change_{begin|end}() for
1504 * synchronization. While held, no new task will be added to threadgroup
1505 * and no existing live task will have its PF_EXITING set.
1506 *
1507 - * During exec, a task goes and puts its thread group through unusual
1508 - * changes. After de-threading, exclusive access is assumed to resources
1509 - * which are usually shared by tasks in the same group - e.g. sighand may
1510 - * be replaced with a new one. Also, the exec'ing task takes over group
1511 - * leader role including its pid. Exclude these changes while locked by
1512 - * grabbing cred_guard_mutex which is used to synchronize exec path.
1513 + * de_thread() does threadgroup_change_{begin|end}() when a non-leader
1514 + * sub-thread becomes a new leader.
1515 */
1516 static inline void threadgroup_lock(struct task_struct *tsk)
1517 {
1518 - /*
1519 - * exec uses exit for de-threading nesting group_rwsem inside
1520 - * cred_guard_mutex. Grab cred_guard_mutex first.
1521 - */
1522 - mutex_lock(&tsk->signal->cred_guard_mutex);
1523 down_write(&tsk->signal->group_rwsem);
1524 }
1525
1526 @@ -2499,7 +2494,6 @@ static inline void threadgroup_lock(struct task_struct *tsk)
1527 static inline void threadgroup_unlock(struct task_struct *tsk)
1528 {
1529 up_write(&tsk->signal->group_rwsem);
1530 - mutex_unlock(&tsk->signal->cred_guard_mutex);
1531 }
1532 #else
1533 static inline void threadgroup_change_begin(struct task_struct *tsk) {}
1534 diff --git a/kernel/ptrace.c b/kernel/ptrace.c
1535 index daf4394d1aba..a1432369be50 100644
1536 --- a/kernel/ptrace.c
1537 +++ b/kernel/ptrace.c
1538 @@ -254,7 +254,8 @@ ok:
1539 smp_rmb();
1540 if (task->mm)
1541 dumpable = get_dumpable(task->mm);
1542 - if (!dumpable && !ptrace_has_cap(task_user_ns(task), mode))
1543 + if (dumpable != SUID_DUMP_USER &&
1544 + !ptrace_has_cap(task_user_ns(task), mode))
1545 return -EPERM;
1546
1547 return security_ptrace_access_check(task, mode);
1548 diff --git a/kernel/trace/trace_event_perf.c b/kernel/trace/trace_event_perf.c
1549 index fee3752ae8f6..d01adb77449c 100644
1550 --- a/kernel/trace/trace_event_perf.c
1551 +++ b/kernel/trace/trace_event_perf.c
1552 @@ -26,7 +26,7 @@ static int perf_trace_event_perm(struct ftrace_event_call *tp_event,
1553 {
1554 /* The ftrace function trace is allowed only for root. */
1555 if (ftrace_event_is_function(tp_event) &&
1556 - perf_paranoid_kernel() && !capable(CAP_SYS_ADMIN))
1557 + perf_paranoid_tracepoint_raw() && !capable(CAP_SYS_ADMIN))
1558 return -EPERM;
1559
1560 /* No tracing, just counting, so no obvious leak */
1561 diff --git a/net/sunrpc/clnt.c b/net/sunrpc/clnt.c
1562 index a28a2111297e..f21486a2ac48 100644
1563 --- a/net/sunrpc/clnt.c
1564 +++ b/net/sunrpc/clnt.c
1565 @@ -1338,6 +1338,7 @@ call_refreshresult(struct rpc_task *task)
1566 rpc_delay(task, 3*HZ);
1567 case -EAGAIN:
1568 status = -EACCES;
1569 + case -EKEYEXPIRED:
1570 if (!task->tk_cred_retry)
1571 break;
1572 task->tk_cred_retry--;
1573 diff --git a/net/sunrpc/xprtsock.c b/net/sunrpc/xprtsock.c
1574 index 79064471cd01..31f981d700a3 100644
1575 --- a/net/sunrpc/xprtsock.c
1576 +++ b/net/sunrpc/xprtsock.c
1577 @@ -390,8 +390,10 @@ static int xs_send_kvec(struct socket *sock, struct sockaddr *addr, int addrlen,
1578 return kernel_sendmsg(sock, &msg, NULL, 0, 0);
1579 }
1580
1581 -static int xs_send_pagedata(struct socket *sock, struct xdr_buf *xdr, unsigned int base, int more)
1582 +static int xs_send_pagedata(struct socket *sock, struct xdr_buf *xdr, unsigned int base, int more, bool zerocopy)
1583 {
1584 + ssize_t (*do_sendpage)(struct socket *sock, struct page *page,
1585 + int offset, size_t size, int flags);
1586 struct page **ppage;
1587 unsigned int remainder;
1588 int err, sent = 0;
1589 @@ -400,6 +402,9 @@ static int xs_send_pagedata(struct socket *sock, struct xdr_buf *xdr, unsigned i
1590 base += xdr->page_base;
1591 ppage = xdr->pages + (base >> PAGE_SHIFT);
1592 base &= ~PAGE_MASK;
1593 + do_sendpage = sock->ops->sendpage;
1594 + if (!zerocopy)
1595 + do_sendpage = sock_no_sendpage;
1596 for(;;) {
1597 unsigned int len = min_t(unsigned int, PAGE_SIZE - base, remainder);
1598 int flags = XS_SENDMSG_FLAGS;
1599 @@ -407,7 +412,7 @@ static int xs_send_pagedata(struct socket *sock, struct xdr_buf *xdr, unsigned i
1600 remainder -= len;
1601 if (remainder != 0 || more)
1602 flags |= MSG_MORE;
1603 - err = sock->ops->sendpage(sock, *ppage, base, len, flags);
1604 + err = do_sendpage(sock, *ppage, base, len, flags);
1605 if (remainder == 0 || err != len)
1606 break;
1607 sent += err;
1608 @@ -428,9 +433,10 @@ static int xs_send_pagedata(struct socket *sock, struct xdr_buf *xdr, unsigned i
1609 * @addrlen: UDP only -- length of destination address
1610 * @xdr: buffer containing this request
1611 * @base: starting position in the buffer
1612 + * @zerocopy: true if it is safe to use sendpage()
1613 *
1614 */
1615 -static int xs_sendpages(struct socket *sock, struct sockaddr *addr, int addrlen, struct xdr_buf *xdr, unsigned int base)
1616 +static int xs_sendpages(struct socket *sock, struct sockaddr *addr, int addrlen, struct xdr_buf *xdr, unsigned int base, bool zerocopy)
1617 {
1618 unsigned int remainder = xdr->len - base;
1619 int err, sent = 0;
1620 @@ -458,7 +464,7 @@ static int xs_sendpages(struct socket *sock, struct sockaddr *addr, int addrlen,
1621 if (base < xdr->page_len) {
1622 unsigned int len = xdr->page_len - base;
1623 remainder -= len;
1624 - err = xs_send_pagedata(sock, xdr, base, remainder != 0);
1625 + err = xs_send_pagedata(sock, xdr, base, remainder != 0, zerocopy);
1626 if (remainder == 0 || err != len)
1627 goto out;
1628 sent += err;
1629 @@ -561,7 +567,7 @@ static int xs_local_send_request(struct rpc_task *task)
1630 req->rq_svec->iov_base, req->rq_svec->iov_len);
1631
1632 status = xs_sendpages(transport->sock, NULL, 0,
1633 - xdr, req->rq_bytes_sent);
1634 + xdr, req->rq_bytes_sent, true);
1635 dprintk("RPC: %s(%u) = %d\n",
1636 __func__, xdr->len - req->rq_bytes_sent, status);
1637 if (likely(status >= 0)) {
1638 @@ -617,7 +623,7 @@ static int xs_udp_send_request(struct rpc_task *task)
1639 status = xs_sendpages(transport->sock,
1640 xs_addr(xprt),
1641 xprt->addrlen, xdr,
1642 - req->rq_bytes_sent);
1643 + req->rq_bytes_sent, true);
1644
1645 dprintk("RPC: xs_udp_send_request(%u) = %d\n",
1646 xdr->len - req->rq_bytes_sent, status);
1647 @@ -688,6 +694,7 @@ static int xs_tcp_send_request(struct rpc_task *task)
1648 struct rpc_xprt *xprt = req->rq_xprt;
1649 struct sock_xprt *transport = container_of(xprt, struct sock_xprt, xprt);
1650 struct xdr_buf *xdr = &req->rq_snd_buf;
1651 + bool zerocopy = true;
1652 int status;
1653
1654 xs_encode_stream_record_marker(&req->rq_snd_buf);
1655 @@ -695,13 +702,20 @@ static int xs_tcp_send_request(struct rpc_task *task)
1656 xs_pktdump("packet data:",
1657 req->rq_svec->iov_base,
1658 req->rq_svec->iov_len);
1659 + /* Don't use zero copy if this is a resend. If the RPC call
1660 + * completes while the socket holds a reference to the pages,
1661 + * then we may end up resending corrupted data.
1662 + */
1663 + if (task->tk_flags & RPC_TASK_SENT)
1664 + zerocopy = false;
1665
1666 /* Continue transmitting the packet/record. We must be careful
1667 * to cope with writespace callbacks arriving _after_ we have
1668 * called sendmsg(). */
1669 while (1) {
1670 status = xs_sendpages(transport->sock,
1671 - NULL, 0, xdr, req->rq_bytes_sent);
1672 + NULL, 0, xdr, req->rq_bytes_sent,
1673 + zerocopy);
1674
1675 dprintk("RPC: xs_tcp_send_request(%u) = %d\n",
1676 xdr->len - req->rq_bytes_sent, status);
1677 diff --git a/security/integrity/ima/ima_policy.c b/security/integrity/ima/ima_policy.c
1678 index d8edff209bf3..d6aab27c8584 100644
1679 --- a/security/integrity/ima/ima_policy.c
1680 +++ b/security/integrity/ima/ima_policy.c
1681 @@ -62,7 +62,6 @@ static struct ima_measure_rule_entry default_rules[] = {
1682 {.action = DONT_MEASURE,.fsmagic = SYSFS_MAGIC,.flags = IMA_FSMAGIC},
1683 {.action = DONT_MEASURE,.fsmagic = DEBUGFS_MAGIC,.flags = IMA_FSMAGIC},
1684 {.action = DONT_MEASURE,.fsmagic = TMPFS_MAGIC,.flags = IMA_FSMAGIC},
1685 - {.action = DONT_MEASURE,.fsmagic = RAMFS_MAGIC,.flags = IMA_FSMAGIC},
1686 {.action = DONT_MEASURE,.fsmagic = SECURITYFS_MAGIC,.flags = IMA_FSMAGIC},
1687 {.action = DONT_MEASURE,.fsmagic = SELINUX_MAGIC,.flags = IMA_FSMAGIC},
1688 {.action = MEASURE,.func = FILE_MMAP,.mask = MAY_EXEC,
1689 diff --git a/sound/isa/msnd/msnd_pinnacle.c b/sound/isa/msnd/msnd_pinnacle.c
1690 index 29cc8e162b02..a7d6a52a4f81 100644
1691 --- a/sound/isa/msnd/msnd_pinnacle.c
1692 +++ b/sound/isa/msnd/msnd_pinnacle.c
1693 @@ -73,9 +73,11 @@
1694 #ifdef MSND_CLASSIC
1695 # include "msnd_classic.h"
1696 # define LOGNAME "msnd_classic"
1697 +# define DEV_NAME "msnd-classic"
1698 #else
1699 # include "msnd_pinnacle.h"
1700 # define LOGNAME "snd_msnd_pinnacle"
1701 +# define DEV_NAME "msnd-pinnacle"
1702 #endif
1703
1704 static void __devinit set_default_audio_parameters(struct snd_msnd *chip)
1705 @@ -1068,8 +1070,6 @@ static int __devexit snd_msnd_isa_remove(struct device *pdev, unsigned int dev)
1706 return 0;
1707 }
1708
1709 -#define DEV_NAME "msnd-pinnacle"
1710 -
1711 static struct isa_driver snd_msnd_driver = {
1712 .match = snd_msnd_isa_match,
1713 .probe = snd_msnd_isa_probe,
1714 diff --git a/sound/usb/6fire/chip.c b/sound/usb/6fire/chip.c
1715 index fc8cc823e438..f8033485db93 100644
1716 --- a/sound/usb/6fire/chip.c
1717 +++ b/sound/usb/6fire/chip.c
1718 @@ -101,7 +101,7 @@ static int __devinit usb6fire_chip_probe(struct usb_interface *intf,
1719 usb_set_intfdata(intf, chips[i]);
1720 mutex_unlock(&register_mutex);
1721 return 0;
1722 - } else if (regidx < 0)
1723 + } else if (!devices[i] && regidx < 0)
1724 regidx = i;
1725 }
1726 if (regidx < 0) {