Contents of /trunk/kernel-alx-legacy/patches-4.9/0421-4.9.322-all-fixes.patch
Parent Directory | Revision Log
Revision 3723 -
(show annotations)
(download)
Mon Oct 24 14:08:34 2022 UTC (18 months, 3 weeks ago) by niro
File size: 46717 byte(s)
Mon Oct 24 14:08:34 2022 UTC (18 months, 3 weeks ago) by niro
File size: 46717 byte(s)
-linux-4.9.322
1 | diff --git a/Makefile b/Makefile |
2 | index 4b9b412d5fb82..bd4c898a9940e 100644 |
3 | --- a/Makefile |
4 | +++ b/Makefile |
5 | @@ -1,6 +1,6 @@ |
6 | VERSION = 4 |
7 | PATCHLEVEL = 9 |
8 | -SUBLEVEL = 321 |
9 | +SUBLEVEL = 322 |
10 | EXTRAVERSION = |
11 | NAME = Roaring Lionus |
12 | |
13 | diff --git a/arch/arm/xen/p2m.c b/arch/arm/xen/p2m.c |
14 | index b4ec8d1b0befd..dc8f41deea1e1 100644 |
15 | --- a/arch/arm/xen/p2m.c |
16 | +++ b/arch/arm/xen/p2m.c |
17 | @@ -61,11 +61,12 @@ out: |
18 | |
19 | unsigned long __pfn_to_mfn(unsigned long pfn) |
20 | { |
21 | - struct rb_node *n = phys_to_mach.rb_node; |
22 | + struct rb_node *n; |
23 | struct xen_p2m_entry *entry; |
24 | unsigned long irqflags; |
25 | |
26 | read_lock_irqsave(&p2m_lock, irqflags); |
27 | + n = phys_to_mach.rb_node; |
28 | while (n) { |
29 | entry = rb_entry(n, struct xen_p2m_entry, rbnode_phys); |
30 | if (entry->pfn <= pfn && |
31 | @@ -151,10 +152,11 @@ bool __set_phys_to_machine_multi(unsigned long pfn, |
32 | int rc; |
33 | unsigned long irqflags; |
34 | struct xen_p2m_entry *p2m_entry; |
35 | - struct rb_node *n = phys_to_mach.rb_node; |
36 | + struct rb_node *n; |
37 | |
38 | if (mfn == INVALID_P2M_ENTRY) { |
39 | write_lock_irqsave(&p2m_lock, irqflags); |
40 | + n = phys_to_mach.rb_node; |
41 | while (n) { |
42 | p2m_entry = rb_entry(n, struct xen_p2m_entry, rbnode_phys); |
43 | if (p2m_entry->pfn <= pfn && |
44 | diff --git a/arch/powerpc/include/asm/ppc-opcode.h b/arch/powerpc/include/asm/ppc-opcode.h |
45 | index b7067590f15c9..8cb3c929641af 100644 |
46 | --- a/arch/powerpc/include/asm/ppc-opcode.h |
47 | +++ b/arch/powerpc/include/asm/ppc-opcode.h |
48 | @@ -134,6 +134,7 @@ |
49 | #define PPC_INST_COPY 0x7c00060c |
50 | #define PPC_INST_COPY_FIRST 0x7c20060c |
51 | #define PPC_INST_CP_ABORT 0x7c00068c |
52 | +#define PPC_INST_DARN 0x7c0005e6 |
53 | #define PPC_INST_DCBA 0x7c0005ec |
54 | #define PPC_INST_DCBA_MASK 0xfc0007fe |
55 | #define PPC_INST_DCBAL 0x7c2005ec |
56 | @@ -328,6 +329,9 @@ |
57 | |
58 | /* Deal with instructions that older assemblers aren't aware of */ |
59 | #define PPC_CP_ABORT stringify_in_c(.long PPC_INST_CP_ABORT) |
60 | +#define PPC_DARN(t, l) stringify_in_c(.long PPC_INST_DARN | \ |
61 | + ___PPC_RT(t) | \ |
62 | + (((l) & 0x3) << 16)) |
63 | #define PPC_DCBAL(a, b) stringify_in_c(.long PPC_INST_DCBAL | \ |
64 | __PPC_RA(a) | __PPC_RB(b)) |
65 | #define PPC_DCBZL(a, b) stringify_in_c(.long PPC_INST_DCBZL | \ |
66 | diff --git a/arch/powerpc/platforms/powernv/powernv.h b/arch/powerpc/platforms/powernv/powernv.h |
67 | index da7c843ac7f15..e98e14a5db4dd 100644 |
68 | --- a/arch/powerpc/platforms/powernv/powernv.h |
69 | +++ b/arch/powerpc/platforms/powernv/powernv.h |
70 | @@ -27,4 +27,6 @@ extern void opal_event_shutdown(void); |
71 | |
72 | bool cpu_core_split_required(void); |
73 | |
74 | +void pnv_rng_init(void); |
75 | + |
76 | #endif /* _POWERNV_H */ |
77 | diff --git a/arch/powerpc/platforms/powernv/rng.c b/arch/powerpc/platforms/powernv/rng.c |
78 | index 5dcbdea1afac0..dc13ed3f6c2b2 100644 |
79 | --- a/arch/powerpc/platforms/powernv/rng.c |
80 | +++ b/arch/powerpc/platforms/powernv/rng.c |
81 | @@ -16,11 +16,14 @@ |
82 | #include <linux/slab.h> |
83 | #include <linux/smp.h> |
84 | #include <asm/archrandom.h> |
85 | +#include <asm/cputable.h> |
86 | #include <asm/io.h> |
87 | #include <asm/prom.h> |
88 | #include <asm/machdep.h> |
89 | #include <asm/smp.h> |
90 | +#include "powernv.h" |
91 | |
92 | +#define DARN_ERR 0xFFFFFFFFFFFFFFFFul |
93 | |
94 | struct powernv_rng { |
95 | void __iomem *regs; |
96 | @@ -30,7 +33,6 @@ struct powernv_rng { |
97 | |
98 | static DEFINE_PER_CPU(struct powernv_rng *, powernv_rng); |
99 | |
100 | - |
101 | int powernv_hwrng_present(void) |
102 | { |
103 | struct powernv_rng *rng; |
104 | @@ -45,7 +47,11 @@ static unsigned long rng_whiten(struct powernv_rng *rng, unsigned long val) |
105 | unsigned long parity; |
106 | |
107 | /* Calculate the parity of the value */ |
108 | - asm ("popcntd %0,%1" : "=r" (parity) : "r" (val)); |
109 | + asm (".machine push; \ |
110 | + .machine power7; \ |
111 | + popcntd %0,%1; \ |
112 | + .machine pop;" |
113 | + : "=r" (parity) : "r" (val)); |
114 | |
115 | /* xor our value with the previous mask */ |
116 | val ^= rng->mask; |
117 | @@ -67,6 +73,38 @@ int powernv_get_random_real_mode(unsigned long *v) |
118 | return 1; |
119 | } |
120 | |
121 | +static int powernv_get_random_darn(unsigned long *v) |
122 | +{ |
123 | + unsigned long val; |
124 | + |
125 | + /* Using DARN with L=1 - 64-bit conditioned random number */ |
126 | + asm volatile(PPC_DARN(%0, 1) : "=r"(val)); |
127 | + |
128 | + if (val == DARN_ERR) |
129 | + return 0; |
130 | + |
131 | + *v = val; |
132 | + |
133 | + return 1; |
134 | +} |
135 | + |
136 | +static int __init initialise_darn(void) |
137 | +{ |
138 | + unsigned long val; |
139 | + int i; |
140 | + |
141 | + if (!cpu_has_feature(CPU_FTR_ARCH_300)) |
142 | + return -ENODEV; |
143 | + |
144 | + for (i = 0; i < 10; i++) { |
145 | + if (powernv_get_random_darn(&val)) { |
146 | + ppc_md.get_random_seed = powernv_get_random_darn; |
147 | + return 0; |
148 | + } |
149 | + } |
150 | + return -EIO; |
151 | +} |
152 | + |
153 | int powernv_get_random_long(unsigned long *v) |
154 | { |
155 | struct powernv_rng *rng; |
156 | @@ -88,7 +126,7 @@ static __init void rng_init_per_cpu(struct powernv_rng *rng, |
157 | |
158 | chip_id = of_get_ibm_chip_id(dn); |
159 | if (chip_id == -1) |
160 | - pr_warn("No ibm,chip-id found for %s.\n", dn->full_name); |
161 | + pr_warn("No ibm,chip-id found for %pOF.\n", dn); |
162 | |
163 | for_each_possible_cpu(cpu) { |
164 | if (per_cpu(powernv_rng, cpu) == NULL || |
165 | @@ -126,30 +164,55 @@ static __init int rng_create(struct device_node *dn) |
166 | |
167 | rng_init_per_cpu(rng, dn); |
168 | |
169 | - pr_info_once("Registering arch random hook.\n"); |
170 | - |
171 | ppc_md.get_random_seed = powernv_get_random_long; |
172 | |
173 | return 0; |
174 | } |
175 | |
176 | -static __init int rng_init(void) |
177 | +static int __init pnv_get_random_long_early(unsigned long *v) |
178 | { |
179 | struct device_node *dn; |
180 | - int rc; |
181 | + |
182 | + if (!slab_is_available()) |
183 | + return 0; |
184 | + |
185 | + if (cmpxchg(&ppc_md.get_random_seed, pnv_get_random_long_early, |
186 | + NULL) != pnv_get_random_long_early) |
187 | + return 0; |
188 | |
189 | for_each_compatible_node(dn, NULL, "ibm,power-rng") { |
190 | - rc = rng_create(dn); |
191 | - if (rc) { |
192 | - pr_err("Failed creating rng for %s (%d).\n", |
193 | - dn->full_name, rc); |
194 | + if (rng_create(dn)) |
195 | continue; |
196 | - } |
197 | - |
198 | /* Create devices for hwrng driver */ |
199 | of_platform_device_create(dn, NULL, NULL); |
200 | } |
201 | |
202 | + if (!ppc_md.get_random_seed) |
203 | + return 0; |
204 | + return ppc_md.get_random_seed(v); |
205 | +} |
206 | + |
207 | +void __init pnv_rng_init(void) |
208 | +{ |
209 | + struct device_node *dn; |
210 | + |
211 | + /* Prefer darn over the rest. */ |
212 | + if (!initialise_darn()) |
213 | + return; |
214 | + |
215 | + dn = of_find_compatible_node(NULL, NULL, "ibm,power-rng"); |
216 | + if (dn) |
217 | + ppc_md.get_random_seed = pnv_get_random_long_early; |
218 | + |
219 | + of_node_put(dn); |
220 | +} |
221 | + |
222 | +static int __init pnv_rng_late_init(void) |
223 | +{ |
224 | + unsigned long v; |
225 | + /* In case it wasn't called during init for some other reason. */ |
226 | + if (ppc_md.get_random_seed == pnv_get_random_long_early) |
227 | + pnv_get_random_long_early(&v); |
228 | return 0; |
229 | } |
230 | -machine_subsys_initcall(powernv, rng_init); |
231 | +machine_subsys_initcall(powernv, pnv_rng_late_init); |
232 | diff --git a/arch/powerpc/platforms/powernv/setup.c b/arch/powerpc/platforms/powernv/setup.c |
233 | index b77d5eed95205..e97b714d30d76 100644 |
234 | --- a/arch/powerpc/platforms/powernv/setup.c |
235 | +++ b/arch/powerpc/platforms/powernv/setup.c |
236 | @@ -168,6 +168,8 @@ static void __init pnv_setup_arch(void) |
237 | powersave_nap = 1; |
238 | |
239 | /* XXX PMCS */ |
240 | + |
241 | + pnv_rng_init(); |
242 | } |
243 | |
244 | static void __init pnv_init(void) |
245 | diff --git a/drivers/block/xen-blkfront.c b/drivers/block/xen-blkfront.c |
246 | index 17ea0ba50278d..9c907c1a4a1c0 100644 |
247 | --- a/drivers/block/xen-blkfront.c |
248 | +++ b/drivers/block/xen-blkfront.c |
249 | @@ -144,6 +144,10 @@ static unsigned int xen_blkif_max_ring_order; |
250 | module_param_named(max_ring_page_order, xen_blkif_max_ring_order, int, S_IRUGO); |
251 | MODULE_PARM_DESC(max_ring_page_order, "Maximum order of pages to be used for the shared ring"); |
252 | |
253 | +static bool __read_mostly xen_blkif_trusted = true; |
254 | +module_param_named(trusted, xen_blkif_trusted, bool, 0644); |
255 | +MODULE_PARM_DESC(trusted, "Is the backend trusted"); |
256 | + |
257 | #define BLK_RING_SIZE(info) \ |
258 | __CONST_RING_SIZE(blkif, XEN_PAGE_SIZE * (info)->nr_ring_pages) |
259 | |
260 | @@ -206,6 +210,7 @@ struct blkfront_info |
261 | unsigned int discard_granularity; |
262 | unsigned int discard_alignment; |
263 | unsigned int feature_persistent:1; |
264 | + unsigned int bounce:1; |
265 | /* Number of 4KB segments handled */ |
266 | unsigned int max_indirect_segments; |
267 | int is_ready; |
268 | @@ -296,8 +301,8 @@ static int fill_grant_buffer(struct blkfront_ring_info *rinfo, int num) |
269 | if (!gnt_list_entry) |
270 | goto out_of_memory; |
271 | |
272 | - if (info->feature_persistent) { |
273 | - granted_page = alloc_page(GFP_NOIO); |
274 | + if (info->bounce) { |
275 | + granted_page = alloc_page(GFP_NOIO | __GFP_ZERO); |
276 | if (!granted_page) { |
277 | kfree(gnt_list_entry); |
278 | goto out_of_memory; |
279 | @@ -316,7 +321,7 @@ out_of_memory: |
280 | list_for_each_entry_safe(gnt_list_entry, n, |
281 | &rinfo->grants, node) { |
282 | list_del(&gnt_list_entry->node); |
283 | - if (info->feature_persistent) |
284 | + if (info->bounce) |
285 | __free_page(gnt_list_entry->page); |
286 | kfree(gnt_list_entry); |
287 | i--; |
288 | @@ -362,7 +367,7 @@ static struct grant *get_grant(grant_ref_t *gref_head, |
289 | /* Assign a gref to this page */ |
290 | gnt_list_entry->gref = gnttab_claim_grant_reference(gref_head); |
291 | BUG_ON(gnt_list_entry->gref == -ENOSPC); |
292 | - if (info->feature_persistent) |
293 | + if (info->bounce) |
294 | grant_foreign_access(gnt_list_entry, info); |
295 | else { |
296 | /* Grant access to the GFN passed by the caller */ |
297 | @@ -386,7 +391,7 @@ static struct grant *get_indirect_grant(grant_ref_t *gref_head, |
298 | /* Assign a gref to this page */ |
299 | gnt_list_entry->gref = gnttab_claim_grant_reference(gref_head); |
300 | BUG_ON(gnt_list_entry->gref == -ENOSPC); |
301 | - if (!info->feature_persistent) { |
302 | + if (!info->bounce) { |
303 | struct page *indirect_page; |
304 | |
305 | /* Fetch a pre-allocated page to use for indirect grefs */ |
306 | @@ -701,7 +706,7 @@ static int blkif_queue_rw_req(struct request *req, struct blkfront_ring_info *ri |
307 | .grant_idx = 0, |
308 | .segments = NULL, |
309 | .rinfo = rinfo, |
310 | - .need_copy = rq_data_dir(req) && info->feature_persistent, |
311 | + .need_copy = rq_data_dir(req) && info->bounce, |
312 | }; |
313 | |
314 | /* |
315 | @@ -1015,11 +1020,12 @@ static void xlvbd_flush(struct blkfront_info *info) |
316 | { |
317 | blk_queue_write_cache(info->rq, info->feature_flush ? true : false, |
318 | info->feature_fua ? true : false); |
319 | - pr_info("blkfront: %s: %s %s %s %s %s\n", |
320 | + pr_info("blkfront: %s: %s %s %s %s %s %s %s\n", |
321 | info->gd->disk_name, flush_info(info), |
322 | "persistent grants:", info->feature_persistent ? |
323 | "enabled;" : "disabled;", "indirect descriptors:", |
324 | - info->max_indirect_segments ? "enabled;" : "disabled;"); |
325 | + info->max_indirect_segments ? "enabled;" : "disabled;", |
326 | + "bounce buffer:", info->bounce ? "enabled" : "disabled;"); |
327 | } |
328 | |
329 | static int xen_translate_vdev(int vdevice, int *minor, unsigned int *offset) |
330 | @@ -1254,7 +1260,7 @@ static void blkif_free_ring(struct blkfront_ring_info *rinfo) |
331 | if (!list_empty(&rinfo->indirect_pages)) { |
332 | struct page *indirect_page, *n; |
333 | |
334 | - BUG_ON(info->feature_persistent); |
335 | + BUG_ON(info->bounce); |
336 | list_for_each_entry_safe(indirect_page, n, &rinfo->indirect_pages, lru) { |
337 | list_del(&indirect_page->lru); |
338 | __free_page(indirect_page); |
339 | @@ -1271,7 +1277,7 @@ static void blkif_free_ring(struct blkfront_ring_info *rinfo) |
340 | continue; |
341 | |
342 | rinfo->persistent_gnts_c--; |
343 | - if (info->feature_persistent) |
344 | + if (info->bounce) |
345 | __free_page(persistent_gnt->page); |
346 | kfree(persistent_gnt); |
347 | } |
348 | @@ -1291,7 +1297,7 @@ static void blkif_free_ring(struct blkfront_ring_info *rinfo) |
349 | for (j = 0; j < segs; j++) { |
350 | persistent_gnt = rinfo->shadow[i].grants_used[j]; |
351 | gnttab_end_foreign_access(persistent_gnt->gref, 0, 0UL); |
352 | - if (info->feature_persistent) |
353 | + if (info->bounce) |
354 | __free_page(persistent_gnt->page); |
355 | kfree(persistent_gnt); |
356 | } |
357 | @@ -1481,7 +1487,7 @@ static int blkif_completion(unsigned long *id, |
358 | data.s = s; |
359 | num_sg = s->num_sg; |
360 | |
361 | - if (bret->operation == BLKIF_OP_READ && info->feature_persistent) { |
362 | + if (bret->operation == BLKIF_OP_READ && info->bounce) { |
363 | for_each_sg(s->sg, sg, num_sg, i) { |
364 | BUG_ON(sg->offset + sg->length > PAGE_SIZE); |
365 | |
366 | @@ -1540,7 +1546,7 @@ static int blkif_completion(unsigned long *id, |
367 | * Add the used indirect page back to the list of |
368 | * available pages for indirect grefs. |
369 | */ |
370 | - if (!info->feature_persistent) { |
371 | + if (!info->bounce) { |
372 | indirect_page = s->indirect_grants[i]->page; |
373 | list_add(&indirect_page->lru, &rinfo->indirect_pages); |
374 | } |
375 | @@ -1729,7 +1735,7 @@ static int setup_blkring(struct xenbus_device *dev, |
376 | for (i = 0; i < info->nr_ring_pages; i++) |
377 | rinfo->ring_ref[i] = GRANT_INVALID_REF; |
378 | |
379 | - sring = alloc_pages_exact(ring_size, GFP_NOIO); |
380 | + sring = alloc_pages_exact(ring_size, GFP_NOIO | __GFP_ZERO); |
381 | if (!sring) { |
382 | xenbus_dev_fatal(dev, -ENOMEM, "allocating shared ring"); |
383 | return -ENOMEM; |
384 | @@ -1822,6 +1828,13 @@ static int talk_to_blkback(struct xenbus_device *dev, |
385 | int err; |
386 | unsigned int i, max_page_order = 0; |
387 | unsigned int ring_page_order = 0; |
388 | + unsigned int trusted; |
389 | + |
390 | + /* Check if backend is trusted. */ |
391 | + err = xenbus_scanf(XBT_NIL, dev->nodename, "trusted", "%u", &trusted); |
392 | + if (err < 0) |
393 | + trusted = 1; |
394 | + info->bounce = !xen_blkif_trusted || !trusted; |
395 | |
396 | err = xenbus_scanf(XBT_NIL, info->xbdev->otherend, |
397 | "max-ring-page-order", "%u", &max_page_order); |
398 | @@ -2301,17 +2314,18 @@ static int blkfront_setup_indirect(struct blkfront_ring_info *rinfo) |
399 | if (err) |
400 | goto out_of_memory; |
401 | |
402 | - if (!info->feature_persistent && info->max_indirect_segments) { |
403 | + if (!info->bounce && info->max_indirect_segments) { |
404 | /* |
405 | - * We are using indirect descriptors but not persistent |
406 | - * grants, we need to allocate a set of pages that can be |
407 | + * We are using indirect descriptors but don't have a bounce |
408 | + * buffer, we need to allocate a set of pages that can be |
409 | * used for mapping indirect grefs |
410 | */ |
411 | int num = INDIRECT_GREFS(grants) * BLK_RING_SIZE(info); |
412 | |
413 | BUG_ON(!list_empty(&rinfo->indirect_pages)); |
414 | for (i = 0; i < num; i++) { |
415 | - struct page *indirect_page = alloc_page(GFP_NOIO); |
416 | + struct page *indirect_page = alloc_page(GFP_NOIO | |
417 | + __GFP_ZERO); |
418 | if (!indirect_page) |
419 | goto out_of_memory; |
420 | list_add(&indirect_page->lru, &rinfo->indirect_pages); |
421 | @@ -2409,6 +2423,8 @@ static void blkfront_gather_backend_features(struct blkfront_info *info) |
422 | info->feature_persistent = 0; |
423 | else |
424 | info->feature_persistent = persistent; |
425 | + if (info->feature_persistent) |
426 | + info->bounce = true; |
427 | |
428 | err = xenbus_scanf(XBT_NIL, info->xbdev->otherend, |
429 | "feature-max-indirect-segments", "%u", |
430 | diff --git a/drivers/hwmon/ibmaem.c b/drivers/hwmon/ibmaem.c |
431 | index 1f643782ce047..c9cfc958e853b 100644 |
432 | --- a/drivers/hwmon/ibmaem.c |
433 | +++ b/drivers/hwmon/ibmaem.c |
434 | @@ -563,7 +563,7 @@ static int aem_init_aem1_inst(struct aem_ipmi_data *probe, u8 module_handle) |
435 | |
436 | res = platform_device_add(data->pdev); |
437 | if (res) |
438 | - goto ipmi_err; |
439 | + goto dev_add_err; |
440 | |
441 | platform_set_drvdata(data->pdev, data); |
442 | |
443 | @@ -611,7 +611,9 @@ hwmon_reg_err: |
444 | ipmi_destroy_user(data->ipmi.user); |
445 | ipmi_err: |
446 | platform_set_drvdata(data->pdev, NULL); |
447 | - platform_device_unregister(data->pdev); |
448 | + platform_device_del(data->pdev); |
449 | +dev_add_err: |
450 | + platform_device_put(data->pdev); |
451 | dev_err: |
452 | ida_simple_remove(&aem_ida, data->id); |
453 | id_err: |
454 | @@ -703,7 +705,7 @@ static int aem_init_aem2_inst(struct aem_ipmi_data *probe, |
455 | |
456 | res = platform_device_add(data->pdev); |
457 | if (res) |
458 | - goto ipmi_err; |
459 | + goto dev_add_err; |
460 | |
461 | platform_set_drvdata(data->pdev, data); |
462 | |
463 | @@ -751,7 +753,9 @@ hwmon_reg_err: |
464 | ipmi_destroy_user(data->ipmi.user); |
465 | ipmi_err: |
466 | platform_set_drvdata(data->pdev, NULL); |
467 | - platform_device_unregister(data->pdev); |
468 | + platform_device_del(data->pdev); |
469 | +dev_add_err: |
470 | + platform_device_put(data->pdev); |
471 | dev_err: |
472 | ida_simple_remove(&aem_ida, data->id); |
473 | id_err: |
474 | diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c |
475 | index 1e9321410bbb6..b396e78b1b6d3 100644 |
476 | --- a/drivers/md/raid5.c |
477 | +++ b/drivers/md/raid5.c |
478 | @@ -7322,6 +7322,7 @@ static int raid5_add_disk(struct mddev *mddev, struct md_rdev *rdev) |
479 | */ |
480 | if (rdev->saved_raid_disk >= 0 && |
481 | rdev->saved_raid_disk >= first && |
482 | + rdev->saved_raid_disk <= last && |
483 | conf->disks[rdev->saved_raid_disk].rdev == NULL) |
484 | first = rdev->saved_raid_disk; |
485 | |
486 | diff --git a/drivers/net/bonding/bond_3ad.c b/drivers/net/bonding/bond_3ad.c |
487 | index 578d8e12e2d21..8ec0671f97113 100644 |
488 | --- a/drivers/net/bonding/bond_3ad.c |
489 | +++ b/drivers/net/bonding/bond_3ad.c |
490 | @@ -2163,7 +2163,8 @@ void bond_3ad_unbind_slave(struct slave *slave) |
491 | temp_aggregator->num_of_ports--; |
492 | if (__agg_active_ports(temp_aggregator) == 0) { |
493 | select_new_active_agg = temp_aggregator->is_active; |
494 | - ad_clear_agg(temp_aggregator); |
495 | + if (temp_aggregator->num_of_ports == 0) |
496 | + ad_clear_agg(temp_aggregator); |
497 | if (select_new_active_agg) { |
498 | netdev_info(bond->dev, "Removing an active aggregator\n"); |
499 | /* select new active aggregator */ |
500 | diff --git a/drivers/net/bonding/bond_alb.c b/drivers/net/bonding/bond_alb.c |
501 | index 1f8fbd7776fb4..df81b525156fe 100644 |
502 | --- a/drivers/net/bonding/bond_alb.c |
503 | +++ b/drivers/net/bonding/bond_alb.c |
504 | @@ -1264,12 +1264,12 @@ int bond_alb_initialize(struct bonding *bond, int rlb_enabled) |
505 | return res; |
506 | |
507 | if (rlb_enabled) { |
508 | - bond->alb_info.rlb_enabled = 1; |
509 | res = rlb_initialize(bond); |
510 | if (res) { |
511 | tlb_deinitialize(bond); |
512 | return res; |
513 | } |
514 | + bond->alb_info.rlb_enabled = 1; |
515 | } else { |
516 | bond->alb_info.rlb_enabled = 0; |
517 | } |
518 | diff --git a/drivers/net/caif/caif_virtio.c b/drivers/net/caif/caif_virtio.c |
519 | index b306210b02b7b..b91c7c7b253c6 100644 |
520 | --- a/drivers/net/caif/caif_virtio.c |
521 | +++ b/drivers/net/caif/caif_virtio.c |
522 | @@ -727,13 +727,21 @@ static int cfv_probe(struct virtio_device *vdev) |
523 | /* Carrier is off until netdevice is opened */ |
524 | netif_carrier_off(netdev); |
525 | |
526 | + /* serialize netdev register + virtio_device_ready() with ndo_open() */ |
527 | + rtnl_lock(); |
528 | + |
529 | /* register Netdev */ |
530 | - err = register_netdev(netdev); |
531 | + err = register_netdevice(netdev); |
532 | if (err) { |
533 | + rtnl_unlock(); |
534 | dev_err(&vdev->dev, "Unable to register netdev (%d)\n", err); |
535 | goto err; |
536 | } |
537 | |
538 | + virtio_device_ready(vdev); |
539 | + |
540 | + rtnl_unlock(); |
541 | + |
542 | debugfs_init(cfv); |
543 | |
544 | return 0; |
545 | diff --git a/drivers/net/usb/ax88179_178a.c b/drivers/net/usb/ax88179_178a.c |
546 | index 738d10fc595c0..48938d00ff7e8 100644 |
547 | --- a/drivers/net/usb/ax88179_178a.c |
548 | +++ b/drivers/net/usb/ax88179_178a.c |
549 | @@ -1373,6 +1373,42 @@ static int ax88179_rx_fixup(struct usbnet *dev, struct sk_buff *skb) |
550 | * are bundled into this buffer and where we can find an array of |
551 | * per-packet metadata (which contains elements encoded into u16). |
552 | */ |
553 | + |
554 | + /* SKB contents for current firmware: |
555 | + * <packet 1> <padding> |
556 | + * ... |
557 | + * <packet N> <padding> |
558 | + * <per-packet metadata entry 1> <dummy header> |
559 | + * ... |
560 | + * <per-packet metadata entry N> <dummy header> |
561 | + * <padding2> <rx_hdr> |
562 | + * |
563 | + * where: |
564 | + * <packet N> contains pkt_len bytes: |
565 | + * 2 bytes of IP alignment pseudo header |
566 | + * packet received |
567 | + * <per-packet metadata entry N> contains 4 bytes: |
568 | + * pkt_len and fields AX_RXHDR_* |
569 | + * <padding> 0-7 bytes to terminate at |
570 | + * 8 bytes boundary (64-bit). |
571 | + * <padding2> 4 bytes to make rx_hdr terminate at |
572 | + * 8 bytes boundary (64-bit) |
573 | + * <dummy-header> contains 4 bytes: |
574 | + * pkt_len=0 and AX_RXHDR_DROP_ERR |
575 | + * <rx-hdr> contains 4 bytes: |
576 | + * pkt_cnt and hdr_off (offset of |
577 | + * <per-packet metadata entry 1>) |
578 | + * |
579 | + * pkt_cnt is number of entrys in the per-packet metadata. |
580 | + * In current firmware there is 2 entrys per packet. |
581 | + * The first points to the packet and the |
582 | + * second is a dummy header. |
583 | + * This was done probably to align fields in 64-bit and |
584 | + * maintain compatibility with old firmware. |
585 | + * This code assumes that <dummy header> and <padding2> are |
586 | + * optional. |
587 | + */ |
588 | + |
589 | if (skb->len < 4) |
590 | return 0; |
591 | skb_trim(skb, skb->len - 4); |
592 | @@ -1387,51 +1423,66 @@ static int ax88179_rx_fixup(struct usbnet *dev, struct sk_buff *skb) |
593 | /* Make sure that the bounds of the metadata array are inside the SKB |
594 | * (and in front of the counter at the end). |
595 | */ |
596 | - if (pkt_cnt * 2 + hdr_off > skb->len) |
597 | + if (pkt_cnt * 4 + hdr_off > skb->len) |
598 | return 0; |
599 | pkt_hdr = (u32 *)(skb->data + hdr_off); |
600 | |
601 | /* Packets must not overlap the metadata array */ |
602 | skb_trim(skb, hdr_off); |
603 | |
604 | - for (; ; pkt_cnt--, pkt_hdr++) { |
605 | + for (; pkt_cnt > 0; pkt_cnt--, pkt_hdr++) { |
606 | + u16 pkt_len_plus_padd; |
607 | u16 pkt_len; |
608 | |
609 | le32_to_cpus(pkt_hdr); |
610 | pkt_len = (*pkt_hdr >> 16) & 0x1fff; |
611 | + pkt_len_plus_padd = (pkt_len + 7) & 0xfff8; |
612 | |
613 | - if (pkt_len > skb->len) |
614 | + /* Skip dummy header used for alignment |
615 | + */ |
616 | + if (pkt_len == 0) |
617 | + continue; |
618 | + |
619 | + if (pkt_len_plus_padd > skb->len) |
620 | return 0; |
621 | |
622 | /* Check CRC or runt packet */ |
623 | - if (((*pkt_hdr & (AX_RXHDR_CRC_ERR | AX_RXHDR_DROP_ERR)) == 0) && |
624 | - pkt_len >= 2 + ETH_HLEN) { |
625 | - bool last = (pkt_cnt == 0); |
626 | - |
627 | - if (last) { |
628 | - ax_skb = skb; |
629 | - } else { |
630 | - ax_skb = skb_clone(skb, GFP_ATOMIC); |
631 | - if (!ax_skb) |
632 | - return 0; |
633 | - } |
634 | - ax_skb->len = pkt_len; |
635 | - /* Skip IP alignment pseudo header */ |
636 | - skb_pull(ax_skb, 2); |
637 | - skb_set_tail_pointer(ax_skb, ax_skb->len); |
638 | - ax_skb->truesize = pkt_len + sizeof(struct sk_buff); |
639 | - ax88179_rx_checksum(ax_skb, pkt_hdr); |
640 | + if ((*pkt_hdr & (AX_RXHDR_CRC_ERR | AX_RXHDR_DROP_ERR)) || |
641 | + pkt_len < 2 + ETH_HLEN) { |
642 | + dev->net->stats.rx_errors++; |
643 | + skb_pull(skb, pkt_len_plus_padd); |
644 | + continue; |
645 | + } |
646 | |
647 | - if (last) |
648 | - return 1; |
649 | + /* last packet */ |
650 | + if (pkt_len_plus_padd == skb->len) { |
651 | + skb_trim(skb, pkt_len); |
652 | |
653 | - usbnet_skb_return(dev, ax_skb); |
654 | + /* Skip IP alignment pseudo header */ |
655 | + skb_pull(skb, 2); |
656 | + |
657 | + skb->truesize = SKB_TRUESIZE(pkt_len_plus_padd); |
658 | + ax88179_rx_checksum(skb, pkt_hdr); |
659 | + return 1; |
660 | } |
661 | |
662 | - /* Trim this packet away from the SKB */ |
663 | - if (!skb_pull(skb, (pkt_len + 7) & 0xFFF8)) |
664 | + ax_skb = skb_clone(skb, GFP_ATOMIC); |
665 | + if (!ax_skb) |
666 | return 0; |
667 | + skb_trim(ax_skb, pkt_len); |
668 | + |
669 | + /* Skip IP alignment pseudo header */ |
670 | + skb_pull(ax_skb, 2); |
671 | + |
672 | + skb->truesize = pkt_len_plus_padd + |
673 | + SKB_DATA_ALIGN(sizeof(struct sk_buff)); |
674 | + ax88179_rx_checksum(ax_skb, pkt_hdr); |
675 | + usbnet_skb_return(dev, ax_skb); |
676 | + |
677 | + skb_pull(skb, pkt_len_plus_padd); |
678 | } |
679 | + |
680 | + return 0; |
681 | } |
682 | |
683 | static struct sk_buff * |
684 | diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c |
685 | index 003c53a5bb336..db37cf9281e1b 100644 |
686 | --- a/drivers/net/usb/qmi_wwan.c |
687 | +++ b/drivers/net/usb/qmi_wwan.c |
688 | @@ -928,10 +928,16 @@ static const struct usb_device_id products[] = { |
689 | {QMI_QUIRK_SET_DTR(0x1bc7, 0x1031, 3)}, /* Telit LE910C1-EUX */ |
690 | {QMI_QUIRK_SET_DTR(0x1bc7, 0x1040, 2)}, /* Telit LE922A */ |
691 | {QMI_QUIRK_SET_DTR(0x1bc7, 0x1050, 2)}, /* Telit FN980 */ |
692 | + {QMI_QUIRK_SET_DTR(0x1bc7, 0x1060, 2)}, /* Telit LN920 */ |
693 | + {QMI_QUIRK_SET_DTR(0x1bc7, 0x1070, 2)}, /* Telit FN990 */ |
694 | {QMI_FIXED_INTF(0x1bc7, 0x1100, 3)}, /* Telit ME910 */ |
695 | {QMI_FIXED_INTF(0x1bc7, 0x1101, 3)}, /* Telit ME910 dual modem */ |
696 | {QMI_FIXED_INTF(0x1bc7, 0x1200, 5)}, /* Telit LE920 */ |
697 | {QMI_QUIRK_SET_DTR(0x1bc7, 0x1201, 2)}, /* Telit LE920, LE920A4 */ |
698 | + {QMI_QUIRK_SET_DTR(0x1bc7, 0x1230, 2)}, /* Telit LE910Cx */ |
699 | + {QMI_QUIRK_SET_DTR(0x1bc7, 0x1260, 2)}, /* Telit LE910Cx */ |
700 | + {QMI_QUIRK_SET_DTR(0x1bc7, 0x1261, 2)}, /* Telit LE910Cx */ |
701 | + {QMI_QUIRK_SET_DTR(0x1bc7, 0x1900, 1)}, /* Telit LN940 series */ |
702 | {QMI_FIXED_INTF(0x1c9e, 0x9801, 3)}, /* Telewell TW-3G HSPA+ */ |
703 | {QMI_FIXED_INTF(0x1c9e, 0x9803, 4)}, /* Telewell TW-3G HSPA+ */ |
704 | {QMI_FIXED_INTF(0x1c9e, 0x9b01, 3)}, /* XS Stick W100-2 from 4G Systems */ |
705 | diff --git a/drivers/net/usb/usbnet.c b/drivers/net/usb/usbnet.c |
706 | index 6a004742ec71a..4b7a9672d92b7 100644 |
707 | --- a/drivers/net/usb/usbnet.c |
708 | +++ b/drivers/net/usb/usbnet.c |
709 | @@ -1951,8 +1951,8 @@ static int __usbnet_read_cmd(struct usbnet *dev, u8 cmd, u8 reqtype, |
710 | " value=0x%04x index=0x%04x size=%d\n", |
711 | cmd, reqtype, value, index, size); |
712 | |
713 | - if (data) { |
714 | - buf = kmalloc(size, GFP_KERNEL); |
715 | + if (size) { |
716 | + buf = kmalloc(size, GFP_NOIO); |
717 | if (!buf) |
718 | goto out; |
719 | } |
720 | @@ -1960,8 +1960,13 @@ static int __usbnet_read_cmd(struct usbnet *dev, u8 cmd, u8 reqtype, |
721 | err = usb_control_msg(dev->udev, usb_rcvctrlpipe(dev->udev, 0), |
722 | cmd, reqtype, value, index, buf, size, |
723 | USB_CTRL_GET_TIMEOUT); |
724 | - if (err > 0 && err <= size) |
725 | - memcpy(data, buf, err); |
726 | + if (err > 0 && err <= size) { |
727 | + if (data) |
728 | + memcpy(data, buf, err); |
729 | + else |
730 | + netdev_dbg(dev->net, |
731 | + "Huh? Data requested but thrown away.\n"); |
732 | + } |
733 | kfree(buf); |
734 | out: |
735 | return err; |
736 | @@ -1979,10 +1984,16 @@ static int __usbnet_write_cmd(struct usbnet *dev, u8 cmd, u8 reqtype, |
737 | cmd, reqtype, value, index, size); |
738 | |
739 | if (data) { |
740 | - buf = kmemdup(data, size, GFP_KERNEL); |
741 | + buf = kmemdup(data, size, GFP_NOIO); |
742 | if (!buf) |
743 | goto out; |
744 | - } |
745 | + } else { |
746 | + if (size) { |
747 | + WARN_ON_ONCE(1); |
748 | + err = -EINVAL; |
749 | + goto out; |
750 | + } |
751 | + } |
752 | |
753 | err = usb_control_msg(dev->udev, usb_sndctrlpipe(dev->udev, 0), |
754 | cmd, reqtype, value, index, buf, size, |
755 | diff --git a/drivers/net/xen-netfront.c b/drivers/net/xen-netfront.c |
756 | index 82dcd44b3e5e2..cae036f5299a8 100644 |
757 | --- a/drivers/net/xen-netfront.c |
758 | +++ b/drivers/net/xen-netfront.c |
759 | @@ -62,6 +62,10 @@ module_param_named(max_queues, xennet_max_queues, uint, 0644); |
760 | MODULE_PARM_DESC(max_queues, |
761 | "Maximum number of queues per virtual interface"); |
762 | |
763 | +static bool __read_mostly xennet_trusted = true; |
764 | +module_param_named(trusted, xennet_trusted, bool, 0644); |
765 | +MODULE_PARM_DESC(trusted, "Is the backend trusted"); |
766 | + |
767 | #define XENNET_TIMEOUT (5 * HZ) |
768 | |
769 | static const struct ethtool_ops xennet_ethtool_ops; |
770 | @@ -162,6 +166,9 @@ struct netfront_info { |
771 | /* Is device behaving sane? */ |
772 | bool broken; |
773 | |
774 | + /* Should skbs be bounced into a zeroed buffer? */ |
775 | + bool bounce; |
776 | + |
777 | atomic_t rx_gso_checksum_fixup; |
778 | }; |
779 | |
780 | @@ -260,7 +267,7 @@ static struct sk_buff *xennet_alloc_one_rx_buffer(struct netfront_queue *queue) |
781 | if (unlikely(!skb)) |
782 | return NULL; |
783 | |
784 | - page = alloc_page(GFP_ATOMIC | __GFP_NOWARN); |
785 | + page = alloc_page(GFP_ATOMIC | __GFP_NOWARN | __GFP_ZERO); |
786 | if (!page) { |
787 | kfree_skb(skb); |
788 | return NULL; |
789 | @@ -591,6 +598,34 @@ static void xennet_mark_tx_pending(struct netfront_queue *queue) |
790 | queue->tx_link[i] = TX_PENDING; |
791 | } |
792 | |
793 | +struct sk_buff *bounce_skb(const struct sk_buff *skb) |
794 | +{ |
795 | + unsigned int headerlen = skb_headroom(skb); |
796 | + /* Align size to allocate full pages and avoid contiguous data leaks */ |
797 | + unsigned int size = ALIGN(skb_end_offset(skb) + skb->data_len, |
798 | + XEN_PAGE_SIZE); |
799 | + struct sk_buff *n = alloc_skb(size, GFP_ATOMIC | __GFP_ZERO); |
800 | + |
801 | + if (!n) |
802 | + return NULL; |
803 | + |
804 | + if (!IS_ALIGNED((uintptr_t)n->head, XEN_PAGE_SIZE)) { |
805 | + WARN_ONCE(1, "misaligned skb allocated\n"); |
806 | + kfree_skb(n); |
807 | + return NULL; |
808 | + } |
809 | + |
810 | + /* Set the data pointer */ |
811 | + skb_reserve(n, headerlen); |
812 | + /* Set the tail pointer and length */ |
813 | + skb_put(n, skb->len); |
814 | + |
815 | + BUG_ON(skb_copy_bits(skb, -headerlen, n->head, headerlen + skb->len)); |
816 | + |
817 | + skb_copy_header(n, skb); |
818 | + return n; |
819 | +} |
820 | + |
821 | #define MAX_XEN_SKB_FRAGS (65536 / XEN_PAGE_SIZE + 1) |
822 | |
823 | static int xennet_start_xmit(struct sk_buff *skb, struct net_device *dev) |
824 | @@ -643,9 +678,13 @@ static int xennet_start_xmit(struct sk_buff *skb, struct net_device *dev) |
825 | |
826 | /* The first req should be at least ETH_HLEN size or the packet will be |
827 | * dropped by netback. |
828 | + * |
829 | + * If the backend is not trusted bounce all data to zeroed pages to |
830 | + * avoid exposing contiguous data on the granted page not belonging to |
831 | + * the skb. |
832 | */ |
833 | - if (unlikely(PAGE_SIZE - offset < ETH_HLEN)) { |
834 | - nskb = skb_copy(skb, GFP_ATOMIC); |
835 | + if (np->bounce || unlikely(PAGE_SIZE - offset < ETH_HLEN)) { |
836 | + nskb = bounce_skb(skb); |
837 | if (!nskb) |
838 | goto drop; |
839 | dev_kfree_skb_any(skb); |
840 | @@ -1962,9 +2001,16 @@ static int talk_to_netback(struct xenbus_device *dev, |
841 | unsigned int max_queues = 0; |
842 | struct netfront_queue *queue = NULL; |
843 | unsigned int num_queues = 1; |
844 | + unsigned int trusted; |
845 | |
846 | info->netdev->irq = 0; |
847 | |
848 | + /* Check if backend is trusted. */ |
849 | + err = xenbus_scanf(XBT_NIL, dev->nodename, "trusted", "%u", &trusted); |
850 | + if (err < 0) |
851 | + trusted = 1; |
852 | + info->bounce = !xennet_trusted || !trusted; |
853 | + |
854 | /* Check if backend supports multiple queues */ |
855 | err = xenbus_scanf(XBT_NIL, info->xbdev->otherend, |
856 | "multi-queue-max-queues", "%u", &max_queues); |
857 | @@ -2129,6 +2175,9 @@ static int xennet_connect(struct net_device *dev) |
858 | err = talk_to_netback(np->xbdev, np); |
859 | if (err) |
860 | return err; |
861 | + if (np->bounce) |
862 | + dev_info(&np->xbdev->dev, |
863 | + "bouncing transmitted data to zeroed pages\n"); |
864 | |
865 | /* talk_to_netback() sets the correct number of queues */ |
866 | num_queues = dev->real_num_tx_queues; |
867 | diff --git a/drivers/nfc/nfcmrvl/i2c.c b/drivers/nfc/nfcmrvl/i2c.c |
868 | index 78b7aa835c81d..bb546cabe8090 100644 |
869 | --- a/drivers/nfc/nfcmrvl/i2c.c |
870 | +++ b/drivers/nfc/nfcmrvl/i2c.c |
871 | @@ -186,9 +186,9 @@ static int nfcmrvl_i2c_parse_dt(struct device_node *node, |
872 | pdata->irq_polarity = IRQF_TRIGGER_RISING; |
873 | |
874 | ret = irq_of_parse_and_map(node, 0); |
875 | - if (ret < 0) { |
876 | - pr_err("Unable to get irq, error: %d\n", ret); |
877 | - return ret; |
878 | + if (!ret) { |
879 | + pr_err("Unable to get irq\n"); |
880 | + return -EINVAL; |
881 | } |
882 | pdata->irq = ret; |
883 | |
884 | diff --git a/drivers/nfc/nfcmrvl/spi.c b/drivers/nfc/nfcmrvl/spi.c |
885 | index fc8e78a29d77a..d66c83efdd7d5 100644 |
886 | --- a/drivers/nfc/nfcmrvl/spi.c |
887 | +++ b/drivers/nfc/nfcmrvl/spi.c |
888 | @@ -130,9 +130,9 @@ static int nfcmrvl_spi_parse_dt(struct device_node *node, |
889 | } |
890 | |
891 | ret = irq_of_parse_and_map(node, 0); |
892 | - if (ret < 0) { |
893 | - pr_err("Unable to get irq, error: %d\n", ret); |
894 | - return ret; |
895 | + if (!ret) { |
896 | + pr_err("Unable to get irq\n"); |
897 | + return -EINVAL; |
898 | } |
899 | pdata->irq = ret; |
900 | |
901 | diff --git a/drivers/nfc/nxp-nci/i2c.c b/drivers/nfc/nxp-nci/i2c.c |
902 | index 7eab97585f22b..a4f1a981e2dd1 100644 |
903 | --- a/drivers/nfc/nxp-nci/i2c.c |
904 | +++ b/drivers/nfc/nxp-nci/i2c.c |
905 | @@ -180,6 +180,9 @@ static int nxp_nci_i2c_nci_read(struct nxp_nci_i2c_phy *phy, |
906 | memcpy(skb_put(*skb, NCI_CTRL_HDR_SIZE), (void *) &header, |
907 | NCI_CTRL_HDR_SIZE); |
908 | |
909 | + if (!header.plen) |
910 | + return 0; |
911 | + |
912 | r = i2c_master_recv(client, skb_put(*skb, header.plen), header.plen); |
913 | if (r != header.plen) { |
914 | nfc_err(&client->dev, |
915 | diff --git a/drivers/xen/gntdev.c b/drivers/xen/gntdev.c |
916 | index 69d59102ff1be..2c3248e71e9c1 100644 |
917 | --- a/drivers/xen/gntdev.c |
918 | +++ b/drivers/xen/gntdev.c |
919 | @@ -57,6 +57,7 @@ MODULE_PARM_DESC(limit, "Maximum number of grants that may be mapped by " |
920 | |
921 | static atomic_t pages_mapped = ATOMIC_INIT(0); |
922 | |
923 | +/* True in PV mode, false otherwise */ |
924 | static int use_ptemod; |
925 | #define populate_freeable_maps use_ptemod |
926 | |
927 | @@ -92,11 +93,16 @@ struct grant_map { |
928 | struct gnttab_unmap_grant_ref *unmap_ops; |
929 | struct gnttab_map_grant_ref *kmap_ops; |
930 | struct gnttab_unmap_grant_ref *kunmap_ops; |
931 | + bool *being_removed; |
932 | struct page **pages; |
933 | unsigned long pages_vm_start; |
934 | + /* Number of live grants */ |
935 | + atomic_t live_grants; |
936 | + /* Needed to avoid allocation in unmap_grant_pages */ |
937 | + struct gntab_unmap_queue_data unmap_data; |
938 | }; |
939 | |
940 | -static int unmap_grant_pages(struct grant_map *map, int offset, int pages); |
941 | +static void unmap_grant_pages(struct grant_map *map, int offset, int pages); |
942 | |
943 | /* ------------------------------------------------------------------ */ |
944 | |
945 | @@ -127,6 +133,7 @@ static void gntdev_free_map(struct grant_map *map) |
946 | kfree(map->unmap_ops); |
947 | kfree(map->kmap_ops); |
948 | kfree(map->kunmap_ops); |
949 | + kfree(map->being_removed); |
950 | kfree(map); |
951 | } |
952 | |
953 | @@ -145,12 +152,15 @@ static struct grant_map *gntdev_alloc_map(struct gntdev_priv *priv, int count) |
954 | add->kmap_ops = kcalloc(count, sizeof(add->kmap_ops[0]), GFP_KERNEL); |
955 | add->kunmap_ops = kcalloc(count, sizeof(add->kunmap_ops[0]), GFP_KERNEL); |
956 | add->pages = kcalloc(count, sizeof(add->pages[0]), GFP_KERNEL); |
957 | + add->being_removed = |
958 | + kcalloc(count, sizeof(add->being_removed[0]), GFP_KERNEL); |
959 | if (NULL == add->grants || |
960 | NULL == add->map_ops || |
961 | NULL == add->unmap_ops || |
962 | NULL == add->kmap_ops || |
963 | NULL == add->kunmap_ops || |
964 | - NULL == add->pages) |
965 | + NULL == add->pages || |
966 | + NULL == add->being_removed) |
967 | goto err; |
968 | |
969 | if (gnttab_alloc_pages(count, add->pages)) |
970 | @@ -215,6 +225,34 @@ static void gntdev_put_map(struct gntdev_priv *priv, struct grant_map *map) |
971 | return; |
972 | |
973 | atomic_sub(map->count, &pages_mapped); |
974 | + if (map->pages && !use_ptemod) { |
975 | + /* |
976 | + * Increment the reference count. This ensures that the |
977 | + * subsequent call to unmap_grant_pages() will not wind up |
978 | + * re-entering itself. It *can* wind up calling |
979 | + * gntdev_put_map() recursively, but such calls will be with a |
980 | + * reference count greater than 1, so they will return before |
981 | + * this code is reached. The recursion depth is thus limited to |
982 | + * 1. |
983 | + */ |
984 | + atomic_set(&map->users, 1); |
985 | + |
986 | + /* |
987 | + * Unmap the grants. This may or may not be asynchronous, so it |
988 | + * is possible that the reference count is 1 on return, but it |
989 | + * could also be greater than 1. |
990 | + */ |
991 | + unmap_grant_pages(map, 0, map->count); |
992 | + |
993 | + /* Check if the memory now needs to be freed */ |
994 | + if (!atomic_dec_and_test(&map->users)) |
995 | + return; |
996 | + |
997 | + /* |
998 | + * All pages have been returned to the hypervisor, so free the |
999 | + * map. |
1000 | + */ |
1001 | + } |
1002 | |
1003 | if (map->notify.flags & UNMAP_NOTIFY_SEND_EVENT) { |
1004 | notify_remote_via_evtchn(map->notify.event); |
1005 | @@ -272,6 +310,7 @@ static int set_grant_ptes_as_special(pte_t *pte, pgtable_t token, |
1006 | |
1007 | static int map_grant_pages(struct grant_map *map) |
1008 | { |
1009 | + size_t alloced = 0; |
1010 | int i, err = 0; |
1011 | |
1012 | if (!use_ptemod) { |
1013 | @@ -320,85 +359,107 @@ static int map_grant_pages(struct grant_map *map) |
1014 | map->pages, map->count); |
1015 | |
1016 | for (i = 0; i < map->count; i++) { |
1017 | - if (map->map_ops[i].status == GNTST_okay) |
1018 | + if (map->map_ops[i].status == GNTST_okay) { |
1019 | map->unmap_ops[i].handle = map->map_ops[i].handle; |
1020 | - else if (!err) |
1021 | + if (!use_ptemod) |
1022 | + alloced++; |
1023 | + } else if (!err) |
1024 | err = -EINVAL; |
1025 | |
1026 | if (map->flags & GNTMAP_device_map) |
1027 | map->unmap_ops[i].dev_bus_addr = map->map_ops[i].dev_bus_addr; |
1028 | |
1029 | if (use_ptemod) { |
1030 | - if (map->kmap_ops[i].status == GNTST_okay) |
1031 | + if (map->kmap_ops[i].status == GNTST_okay) { |
1032 | + if (map->map_ops[i].status == GNTST_okay) |
1033 | + alloced++; |
1034 | map->kunmap_ops[i].handle = map->kmap_ops[i].handle; |
1035 | - else if (!err) |
1036 | + } else if (!err) |
1037 | err = -EINVAL; |
1038 | } |
1039 | } |
1040 | + atomic_add(alloced, &map->live_grants); |
1041 | return err; |
1042 | } |
1043 | |
1044 | -static int __unmap_grant_pages(struct grant_map *map, int offset, int pages) |
1045 | +static void __unmap_grant_pages_done(int result, |
1046 | + struct gntab_unmap_queue_data *data) |
1047 | { |
1048 | - int i, err = 0; |
1049 | - struct gntab_unmap_queue_data unmap_data; |
1050 | + unsigned int i; |
1051 | + struct grant_map *map = data->data; |
1052 | + unsigned int offset = data->unmap_ops - map->unmap_ops; |
1053 | + |
1054 | + for (i = 0; i < data->count; i++) { |
1055 | + WARN_ON(map->unmap_ops[offset+i].status); |
1056 | + pr_debug("unmap handle=%d st=%d\n", |
1057 | + map->unmap_ops[offset+i].handle, |
1058 | + map->unmap_ops[offset+i].status); |
1059 | + map->unmap_ops[offset+i].handle = -1; |
1060 | + } |
1061 | + /* |
1062 | + * Decrease the live-grant counter. This must happen after the loop to |
1063 | + * prevent premature reuse of the grants by gnttab_mmap(). |
1064 | + */ |
1065 | + atomic_sub(data->count, &map->live_grants); |
1066 | |
1067 | + /* Release reference taken by unmap_grant_pages */ |
1068 | + gntdev_put_map(NULL, map); |
1069 | +} |
1070 | + |
1071 | +static void __unmap_grant_pages(struct grant_map *map, int offset, int pages) |
1072 | +{ |
1073 | if (map->notify.flags & UNMAP_NOTIFY_CLEAR_BYTE) { |
1074 | int pgno = (map->notify.addr >> PAGE_SHIFT); |
1075 | + |
1076 | if (pgno >= offset && pgno < offset + pages) { |
1077 | /* No need for kmap, pages are in lowmem */ |
1078 | uint8_t *tmp = pfn_to_kaddr(page_to_pfn(map->pages[pgno])); |
1079 | + |
1080 | tmp[map->notify.addr & (PAGE_SIZE-1)] = 0; |
1081 | map->notify.flags &= ~UNMAP_NOTIFY_CLEAR_BYTE; |
1082 | } |
1083 | } |
1084 | |
1085 | - unmap_data.unmap_ops = map->unmap_ops + offset; |
1086 | - unmap_data.kunmap_ops = use_ptemod ? map->kunmap_ops + offset : NULL; |
1087 | - unmap_data.pages = map->pages + offset; |
1088 | - unmap_data.count = pages; |
1089 | + map->unmap_data.unmap_ops = map->unmap_ops + offset; |
1090 | + map->unmap_data.kunmap_ops = use_ptemod ? map->kunmap_ops + offset : NULL; |
1091 | + map->unmap_data.pages = map->pages + offset; |
1092 | + map->unmap_data.count = pages; |
1093 | + map->unmap_data.done = __unmap_grant_pages_done; |
1094 | + map->unmap_data.data = map; |
1095 | + atomic_inc(&map->users); /* to keep map alive during async call below */ |
1096 | |
1097 | - err = gnttab_unmap_refs_sync(&unmap_data); |
1098 | - if (err) |
1099 | - return err; |
1100 | - |
1101 | - for (i = 0; i < pages; i++) { |
1102 | - if (map->unmap_ops[offset+i].status) |
1103 | - err = -EINVAL; |
1104 | - pr_debug("unmap handle=%d st=%d\n", |
1105 | - map->unmap_ops[offset+i].handle, |
1106 | - map->unmap_ops[offset+i].status); |
1107 | - map->unmap_ops[offset+i].handle = -1; |
1108 | - } |
1109 | - return err; |
1110 | + gnttab_unmap_refs_async(&map->unmap_data); |
1111 | } |
1112 | |
1113 | -static int unmap_grant_pages(struct grant_map *map, int offset, int pages) |
1114 | +static void unmap_grant_pages(struct grant_map *map, int offset, int pages) |
1115 | { |
1116 | - int range, err = 0; |
1117 | + int range; |
1118 | + |
1119 | + if (atomic_read(&map->live_grants) == 0) |
1120 | + return; /* Nothing to do */ |
1121 | |
1122 | pr_debug("unmap %d+%d [%d+%d]\n", map->index, map->count, offset, pages); |
1123 | |
1124 | /* It is possible the requested range will have a "hole" where we |
1125 | * already unmapped some of the grants. Only unmap valid ranges. |
1126 | */ |
1127 | - while (pages && !err) { |
1128 | - while (pages && map->unmap_ops[offset].handle == -1) { |
1129 | + while (pages) { |
1130 | + while (pages && map->being_removed[offset]) { |
1131 | offset++; |
1132 | pages--; |
1133 | } |
1134 | range = 0; |
1135 | while (range < pages) { |
1136 | - if (map->unmap_ops[offset+range].handle == -1) |
1137 | + if (map->being_removed[offset + range]) |
1138 | break; |
1139 | + map->being_removed[offset + range] = true; |
1140 | range++; |
1141 | } |
1142 | - err = __unmap_grant_pages(map, offset, range); |
1143 | + if (range) |
1144 | + __unmap_grant_pages(map, offset, range); |
1145 | offset += range; |
1146 | pages -= range; |
1147 | } |
1148 | - |
1149 | - return err; |
1150 | } |
1151 | |
1152 | /* ------------------------------------------------------------------ */ |
1153 | @@ -454,7 +515,6 @@ static void unmap_if_in_range(struct grant_map *map, |
1154 | unsigned long start, unsigned long end) |
1155 | { |
1156 | unsigned long mstart, mend; |
1157 | - int err; |
1158 | |
1159 | if (!map->vma) |
1160 | return; |
1161 | @@ -468,10 +528,9 @@ static void unmap_if_in_range(struct grant_map *map, |
1162 | map->index, map->count, |
1163 | map->vma->vm_start, map->vma->vm_end, |
1164 | start, end, mstart, mend); |
1165 | - err = unmap_grant_pages(map, |
1166 | + unmap_grant_pages(map, |
1167 | (mstart - map->vma->vm_start) >> PAGE_SHIFT, |
1168 | (mend - mstart) >> PAGE_SHIFT); |
1169 | - WARN_ON(err); |
1170 | } |
1171 | |
1172 | static void mn_invl_range_start(struct mmu_notifier *mn, |
1173 | @@ -503,7 +562,6 @@ static void mn_release(struct mmu_notifier *mn, |
1174 | { |
1175 | struct gntdev_priv *priv = container_of(mn, struct gntdev_priv, mn); |
1176 | struct grant_map *map; |
1177 | - int err; |
1178 | |
1179 | mutex_lock(&priv->lock); |
1180 | list_for_each_entry(map, &priv->maps, next) { |
1181 | @@ -512,8 +570,7 @@ static void mn_release(struct mmu_notifier *mn, |
1182 | pr_debug("map %d+%d (%lx %lx)\n", |
1183 | map->index, map->count, |
1184 | map->vma->vm_start, map->vma->vm_end); |
1185 | - err = unmap_grant_pages(map, /* offset */ 0, map->count); |
1186 | - WARN_ON(err); |
1187 | + unmap_grant_pages(map, /* offset */ 0, map->count); |
1188 | } |
1189 | list_for_each_entry(map, &priv->freeable_maps, next) { |
1190 | if (!map->vma) |
1191 | @@ -521,8 +578,7 @@ static void mn_release(struct mmu_notifier *mn, |
1192 | pr_debug("map %d+%d (%lx %lx)\n", |
1193 | map->index, map->count, |
1194 | map->vma->vm_start, map->vma->vm_end); |
1195 | - err = unmap_grant_pages(map, /* offset */ 0, map->count); |
1196 | - WARN_ON(err); |
1197 | + unmap_grant_pages(map, /* offset */ 0, map->count); |
1198 | } |
1199 | mutex_unlock(&priv->lock); |
1200 | } |
1201 | @@ -1012,6 +1068,10 @@ static int gntdev_mmap(struct file *flip, struct vm_area_struct *vma) |
1202 | goto unlock_out; |
1203 | } |
1204 | |
1205 | + if (atomic_read(&map->live_grants)) { |
1206 | + err = -EAGAIN; |
1207 | + goto unlock_out; |
1208 | + } |
1209 | atomic_inc(&map->users); |
1210 | |
1211 | vma->vm_ops = &gntdev_vmops; |
1212 | diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h |
1213 | index dab550cf29c12..827d69d66394c 100644 |
1214 | --- a/include/linux/skbuff.h |
1215 | +++ b/include/linux/skbuff.h |
1216 | @@ -975,6 +975,7 @@ static inline struct sk_buff *alloc_skb_head(gfp_t priority) |
1217 | struct sk_buff *skb_morph(struct sk_buff *dst, struct sk_buff *src); |
1218 | int skb_copy_ubufs(struct sk_buff *skb, gfp_t gfp_mask); |
1219 | struct sk_buff *skb_clone(struct sk_buff *skb, gfp_t priority); |
1220 | +void skb_copy_header(struct sk_buff *new, const struct sk_buff *old); |
1221 | struct sk_buff *skb_copy(const struct sk_buff *skb, gfp_t priority); |
1222 | struct sk_buff *__pskb_copy_fclone(struct sk_buff *skb, int headroom, |
1223 | gfp_t gfp_mask, bool fclone); |
1224 | diff --git a/net/core/skbuff.c b/net/core/skbuff.c |
1225 | index 41d328a93790f..22b216629f9bc 100644 |
1226 | --- a/net/core/skbuff.c |
1227 | +++ b/net/core/skbuff.c |
1228 | @@ -1071,7 +1071,7 @@ static void skb_headers_offset_update(struct sk_buff *skb, int off) |
1229 | skb->inner_mac_header += off; |
1230 | } |
1231 | |
1232 | -static void copy_skb_header(struct sk_buff *new, const struct sk_buff *old) |
1233 | +void skb_copy_header(struct sk_buff *new, const struct sk_buff *old) |
1234 | { |
1235 | __copy_skb_header(new, old); |
1236 | |
1237 | @@ -1079,6 +1079,7 @@ static void copy_skb_header(struct sk_buff *new, const struct sk_buff *old) |
1238 | skb_shinfo(new)->gso_segs = skb_shinfo(old)->gso_segs; |
1239 | skb_shinfo(new)->gso_type = skb_shinfo(old)->gso_type; |
1240 | } |
1241 | +EXPORT_SYMBOL(skb_copy_header); |
1242 | |
1243 | static inline int skb_alloc_rx_flag(const struct sk_buff *skb) |
1244 | { |
1245 | @@ -1122,7 +1123,7 @@ struct sk_buff *skb_copy(const struct sk_buff *skb, gfp_t gfp_mask) |
1246 | if (skb_copy_bits(skb, -headerlen, n->head, headerlen + skb->len)) |
1247 | BUG(); |
1248 | |
1249 | - copy_skb_header(n, skb); |
1250 | + skb_copy_header(n, skb); |
1251 | return n; |
1252 | } |
1253 | EXPORT_SYMBOL(skb_copy); |
1254 | @@ -1185,7 +1186,7 @@ struct sk_buff *__pskb_copy_fclone(struct sk_buff *skb, int headroom, |
1255 | skb_clone_fraglist(n); |
1256 | } |
1257 | |
1258 | - copy_skb_header(n, skb); |
1259 | + skb_copy_header(n, skb); |
1260 | out: |
1261 | return n; |
1262 | } |
1263 | @@ -1356,7 +1357,7 @@ struct sk_buff *skb_copy_expand(const struct sk_buff *skb, |
1264 | skb->len + head_copy_len)) |
1265 | BUG(); |
1266 | |
1267 | - copy_skb_header(n, skb); |
1268 | + skb_copy_header(n, skb); |
1269 | |
1270 | skb_headers_offset_update(n, newheadroom - oldheadroom); |
1271 | |
1272 | diff --git a/net/ipv6/sit.c b/net/ipv6/sit.c |
1273 | index 07e545fd2a3d8..560aedccfa1ac 100644 |
1274 | --- a/net/ipv6/sit.c |
1275 | +++ b/net/ipv6/sit.c |
1276 | @@ -308,9 +308,7 @@ static int ipip6_tunnel_get_prl(struct ip_tunnel *t, |
1277 | kcalloc(cmax, sizeof(*kp), GFP_KERNEL) : |
1278 | NULL; |
1279 | |
1280 | - rcu_read_lock(); |
1281 | - |
1282 | - ca = t->prl_count < cmax ? t->prl_count : cmax; |
1283 | + ca = min(t->prl_count, cmax); |
1284 | |
1285 | if (!kp) { |
1286 | /* We don't try hard to allocate much memory for |
1287 | @@ -325,7 +323,7 @@ static int ipip6_tunnel_get_prl(struct ip_tunnel *t, |
1288 | } |
1289 | } |
1290 | |
1291 | - c = 0; |
1292 | + rcu_read_lock(); |
1293 | for_each_prl_rcu(t->prl) { |
1294 | if (c >= cmax) |
1295 | break; |
1296 | @@ -337,7 +335,7 @@ static int ipip6_tunnel_get_prl(struct ip_tunnel *t, |
1297 | if (kprl.addr != htonl(INADDR_ANY)) |
1298 | break; |
1299 | } |
1300 | -out: |
1301 | + |
1302 | rcu_read_unlock(); |
1303 | |
1304 | len = sizeof(*kp) * c; |
1305 | @@ -346,7 +344,7 @@ out: |
1306 | ret = -EFAULT; |
1307 | |
1308 | kfree(kp); |
1309 | - |
1310 | +out: |
1311 | return ret; |
1312 | } |
1313 | |
1314 | diff --git a/net/netfilter/nft_set_hash.c b/net/netfilter/nft_set_hash.c |
1315 | index a3dface3e6e68..8a41596846413 100644 |
1316 | --- a/net/netfilter/nft_set_hash.c |
1317 | +++ b/net/netfilter/nft_set_hash.c |
1318 | @@ -121,6 +121,7 @@ static bool nft_hash_update(struct nft_set *set, const u32 *key, |
1319 | /* Another cpu may race to insert the element with the same key */ |
1320 | if (prev) { |
1321 | nft_set_elem_destroy(set, he, true); |
1322 | + atomic_dec(&set->nelems); |
1323 | he = prev; |
1324 | } |
1325 | |
1326 | @@ -130,6 +131,7 @@ out: |
1327 | |
1328 | err2: |
1329 | nft_set_elem_destroy(set, he, true); |
1330 | + atomic_dec(&set->nelems); |
1331 | err1: |
1332 | return false; |
1333 | } |
1334 | diff --git a/net/rose/rose_timer.c b/net/rose/rose_timer.c |
1335 | index bc5469d6d9cb5..a7b3448969361 100644 |
1336 | --- a/net/rose/rose_timer.c |
1337 | +++ b/net/rose/rose_timer.c |
1338 | @@ -34,95 +34,95 @@ static void rose_idletimer_expiry(unsigned long); |
1339 | |
1340 | void rose_start_heartbeat(struct sock *sk) |
1341 | { |
1342 | - del_timer(&sk->sk_timer); |
1343 | + sk_stop_timer(sk, &sk->sk_timer); |
1344 | |
1345 | sk->sk_timer.data = (unsigned long)sk; |
1346 | sk->sk_timer.function = &rose_heartbeat_expiry; |
1347 | sk->sk_timer.expires = jiffies + 5 * HZ; |
1348 | |
1349 | - add_timer(&sk->sk_timer); |
1350 | + sk_reset_timer(sk, &sk->sk_timer, sk->sk_timer.expires); |
1351 | } |
1352 | |
1353 | void rose_start_t1timer(struct sock *sk) |
1354 | { |
1355 | struct rose_sock *rose = rose_sk(sk); |
1356 | |
1357 | - del_timer(&rose->timer); |
1358 | + sk_stop_timer(sk, &rose->timer); |
1359 | |
1360 | rose->timer.data = (unsigned long)sk; |
1361 | rose->timer.function = &rose_timer_expiry; |
1362 | rose->timer.expires = jiffies + rose->t1; |
1363 | |
1364 | - add_timer(&rose->timer); |
1365 | + sk_reset_timer(sk, &rose->timer, rose->timer.expires); |
1366 | } |
1367 | |
1368 | void rose_start_t2timer(struct sock *sk) |
1369 | { |
1370 | struct rose_sock *rose = rose_sk(sk); |
1371 | |
1372 | - del_timer(&rose->timer); |
1373 | + sk_stop_timer(sk, &rose->timer); |
1374 | |
1375 | rose->timer.data = (unsigned long)sk; |
1376 | rose->timer.function = &rose_timer_expiry; |
1377 | rose->timer.expires = jiffies + rose->t2; |
1378 | |
1379 | - add_timer(&rose->timer); |
1380 | + sk_reset_timer(sk, &rose->timer, rose->timer.expires); |
1381 | } |
1382 | |
1383 | void rose_start_t3timer(struct sock *sk) |
1384 | { |
1385 | struct rose_sock *rose = rose_sk(sk); |
1386 | |
1387 | - del_timer(&rose->timer); |
1388 | + sk_stop_timer(sk, &rose->timer); |
1389 | |
1390 | rose->timer.data = (unsigned long)sk; |
1391 | rose->timer.function = &rose_timer_expiry; |
1392 | rose->timer.expires = jiffies + rose->t3; |
1393 | |
1394 | - add_timer(&rose->timer); |
1395 | + sk_reset_timer(sk, &rose->timer, rose->timer.expires); |
1396 | } |
1397 | |
1398 | void rose_start_hbtimer(struct sock *sk) |
1399 | { |
1400 | struct rose_sock *rose = rose_sk(sk); |
1401 | |
1402 | - del_timer(&rose->timer); |
1403 | + sk_stop_timer(sk, &rose->timer); |
1404 | |
1405 | rose->timer.data = (unsigned long)sk; |
1406 | rose->timer.function = &rose_timer_expiry; |
1407 | rose->timer.expires = jiffies + rose->hb; |
1408 | |
1409 | - add_timer(&rose->timer); |
1410 | + sk_reset_timer(sk, &rose->timer, rose->timer.expires); |
1411 | } |
1412 | |
1413 | void rose_start_idletimer(struct sock *sk) |
1414 | { |
1415 | struct rose_sock *rose = rose_sk(sk); |
1416 | |
1417 | - del_timer(&rose->idletimer); |
1418 | + sk_stop_timer(sk, &rose->timer); |
1419 | |
1420 | if (rose->idle > 0) { |
1421 | rose->idletimer.data = (unsigned long)sk; |
1422 | rose->idletimer.function = &rose_idletimer_expiry; |
1423 | rose->idletimer.expires = jiffies + rose->idle; |
1424 | |
1425 | - add_timer(&rose->idletimer); |
1426 | + sk_reset_timer(sk, &rose->idletimer, rose->idletimer.expires); |
1427 | } |
1428 | } |
1429 | |
1430 | void rose_stop_heartbeat(struct sock *sk) |
1431 | { |
1432 | - del_timer(&sk->sk_timer); |
1433 | + sk_stop_timer(sk, &sk->sk_timer); |
1434 | } |
1435 | |
1436 | void rose_stop_timer(struct sock *sk) |
1437 | { |
1438 | - del_timer(&rose_sk(sk)->timer); |
1439 | + sk_stop_timer(sk, &rose_sk(sk)->timer); |
1440 | } |
1441 | |
1442 | void rose_stop_idletimer(struct sock *sk) |
1443 | { |
1444 | - del_timer(&rose_sk(sk)->idletimer); |
1445 | + sk_stop_timer(sk, &rose_sk(sk)->idletimer); |
1446 | } |
1447 | |
1448 | static void rose_heartbeat_expiry(unsigned long param) |
1449 | @@ -139,6 +139,7 @@ static void rose_heartbeat_expiry(unsigned long param) |
1450 | (sk->sk_state == TCP_LISTEN && sock_flag(sk, SOCK_DEAD))) { |
1451 | bh_unlock_sock(sk); |
1452 | rose_destroy_socket(sk); |
1453 | + sock_put(sk); |
1454 | return; |
1455 | } |
1456 | break; |
1457 | @@ -161,6 +162,7 @@ static void rose_heartbeat_expiry(unsigned long param) |
1458 | |
1459 | rose_start_heartbeat(sk); |
1460 | bh_unlock_sock(sk); |
1461 | + sock_put(sk); |
1462 | } |
1463 | |
1464 | static void rose_timer_expiry(unsigned long param) |
1465 | @@ -190,6 +192,7 @@ static void rose_timer_expiry(unsigned long param) |
1466 | break; |
1467 | } |
1468 | bh_unlock_sock(sk); |
1469 | + sock_put(sk); |
1470 | } |
1471 | |
1472 | static void rose_idletimer_expiry(unsigned long param) |
1473 | @@ -213,4 +216,5 @@ static void rose_idletimer_expiry(unsigned long param) |
1474 | sock_set_flag(sk, SOCK_DEAD); |
1475 | } |
1476 | bh_unlock_sock(sk); |
1477 | + sock_put(sk); |
1478 | } |
1479 | diff --git a/net/sunrpc/xdr.c b/net/sunrpc/xdr.c |
1480 | index 06b4b76edd9dc..e0b217f3aae98 100644 |
1481 | --- a/net/sunrpc/xdr.c |
1482 | +++ b/net/sunrpc/xdr.c |
1483 | @@ -544,7 +544,7 @@ static __be32 *xdr_get_next_encode_buffer(struct xdr_stream *xdr, |
1484 | */ |
1485 | xdr->p = (void *)p + frag2bytes; |
1486 | space_left = xdr->buf->buflen - xdr->buf->len; |
1487 | - if (space_left - nbytes >= PAGE_SIZE) |
1488 | + if (space_left - frag1bytes >= PAGE_SIZE) |
1489 | xdr->end = (void *)p + PAGE_SIZE; |
1490 | else |
1491 | xdr->end = (void *)p + space_left - frag1bytes; |