Magellan Linux

Contents of /trunk/kernel-alx/patches-4.4/0110-4.4.11-all-fixes.patch

Parent Directory Parent Directory | Revision Log Revision Log


Revision 2802 - (show annotations) (download)
Mon May 30 12:46:26 2016 UTC (7 years, 11 months ago) by niro
File size: 121850 byte(s)
-linux-4.4.11
1 diff --git a/Makefile b/Makefile
2 index 5b5f462f834c..aad86274b61b 100644
3 --- a/Makefile
4 +++ b/Makefile
5 @@ -1,6 +1,6 @@
6 VERSION = 4
7 PATCHLEVEL = 4
8 -SUBLEVEL = 10
9 +SUBLEVEL = 11
10 EXTRAVERSION =
11 NAME = Blurry Fish Butt
12
13 diff --git a/arch/arm/boot/dts/at91sam9x5.dtsi b/arch/arm/boot/dts/at91sam9x5.dtsi
14 index 0827d594b1f0..cd0cd5fd09a3 100644
15 --- a/arch/arm/boot/dts/at91sam9x5.dtsi
16 +++ b/arch/arm/boot/dts/at91sam9x5.dtsi
17 @@ -106,7 +106,7 @@
18
19 pmc: pmc@fffffc00 {
20 compatible = "atmel,at91sam9x5-pmc", "syscon";
21 - reg = <0xfffffc00 0x100>;
22 + reg = <0xfffffc00 0x200>;
23 interrupts = <1 IRQ_TYPE_LEVEL_HIGH 7>;
24 interrupt-controller;
25 #address-cells = <1>;
26 diff --git a/arch/s390/include/asm/mmu.h b/arch/s390/include/asm/mmu.h
27 index d29ad9545b41..081b2ad99d73 100644
28 --- a/arch/s390/include/asm/mmu.h
29 +++ b/arch/s390/include/asm/mmu.h
30 @@ -11,7 +11,7 @@ typedef struct {
31 spinlock_t list_lock;
32 struct list_head pgtable_list;
33 struct list_head gmap_list;
34 - unsigned long asce_bits;
35 + unsigned long asce;
36 unsigned long asce_limit;
37 unsigned long vdso_base;
38 /* The mmu context allocates 4K page tables. */
39 diff --git a/arch/s390/include/asm/mmu_context.h b/arch/s390/include/asm/mmu_context.h
40 index e485817f7b1a..22877c9440ea 100644
41 --- a/arch/s390/include/asm/mmu_context.h
42 +++ b/arch/s390/include/asm/mmu_context.h
43 @@ -26,12 +26,28 @@ static inline int init_new_context(struct task_struct *tsk,
44 mm->context.has_pgste = 0;
45 mm->context.use_skey = 0;
46 #endif
47 - if (mm->context.asce_limit == 0) {
48 + switch (mm->context.asce_limit) {
49 + case 1UL << 42:
50 + /*
51 + * forked 3-level task, fall through to set new asce with new
52 + * mm->pgd
53 + */
54 + case 0:
55 /* context created by exec, set asce limit to 4TB */
56 - mm->context.asce_bits = _ASCE_TABLE_LENGTH |
57 - _ASCE_USER_BITS | _ASCE_TYPE_REGION3;
58 mm->context.asce_limit = STACK_TOP_MAX;
59 - } else if (mm->context.asce_limit == (1UL << 31)) {
60 + mm->context.asce = __pa(mm->pgd) | _ASCE_TABLE_LENGTH |
61 + _ASCE_USER_BITS | _ASCE_TYPE_REGION3;
62 + break;
63 + case 1UL << 53:
64 + /* forked 4-level task, set new asce with new mm->pgd */
65 + mm->context.asce = __pa(mm->pgd) | _ASCE_TABLE_LENGTH |
66 + _ASCE_USER_BITS | _ASCE_TYPE_REGION2;
67 + break;
68 + case 1UL << 31:
69 + /* forked 2-level compat task, set new asce with new mm->pgd */
70 + mm->context.asce = __pa(mm->pgd) | _ASCE_TABLE_LENGTH |
71 + _ASCE_USER_BITS | _ASCE_TYPE_SEGMENT;
72 + /* pgd_alloc() did not increase mm->nr_pmds */
73 mm_inc_nr_pmds(mm);
74 }
75 crst_table_init((unsigned long *) mm->pgd, pgd_entry_type(mm));
76 @@ -42,7 +58,7 @@ static inline int init_new_context(struct task_struct *tsk,
77
78 static inline void set_user_asce(struct mm_struct *mm)
79 {
80 - S390_lowcore.user_asce = mm->context.asce_bits | __pa(mm->pgd);
81 + S390_lowcore.user_asce = mm->context.asce;
82 if (current->thread.mm_segment.ar4)
83 __ctl_load(S390_lowcore.user_asce, 7, 7);
84 set_cpu_flag(CIF_ASCE);
85 @@ -71,7 +87,7 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
86 {
87 int cpu = smp_processor_id();
88
89 - S390_lowcore.user_asce = next->context.asce_bits | __pa(next->pgd);
90 + S390_lowcore.user_asce = next->context.asce;
91 if (prev == next)
92 return;
93 if (MACHINE_HAS_TLB_LC)
94 diff --git a/arch/s390/include/asm/pgalloc.h b/arch/s390/include/asm/pgalloc.h
95 index d7cc79fb6191..5991cdcb5b40 100644
96 --- a/arch/s390/include/asm/pgalloc.h
97 +++ b/arch/s390/include/asm/pgalloc.h
98 @@ -56,8 +56,8 @@ static inline unsigned long pgd_entry_type(struct mm_struct *mm)
99 return _REGION2_ENTRY_EMPTY;
100 }
101
102 -int crst_table_upgrade(struct mm_struct *, unsigned long limit);
103 -void crst_table_downgrade(struct mm_struct *, unsigned long limit);
104 +int crst_table_upgrade(struct mm_struct *);
105 +void crst_table_downgrade(struct mm_struct *);
106
107 static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long address)
108 {
109 diff --git a/arch/s390/include/asm/processor.h b/arch/s390/include/asm/processor.h
110 index b16c3d0a1b9f..c1ea67db8404 100644
111 --- a/arch/s390/include/asm/processor.h
112 +++ b/arch/s390/include/asm/processor.h
113 @@ -163,7 +163,7 @@ extern __vector128 init_task_fpu_regs[__NUM_VXRS];
114 regs->psw.mask = PSW_USER_BITS | PSW_MASK_BA; \
115 regs->psw.addr = new_psw | PSW_ADDR_AMODE; \
116 regs->gprs[15] = new_stackp; \
117 - crst_table_downgrade(current->mm, 1UL << 31); \
118 + crst_table_downgrade(current->mm); \
119 execve_tail(); \
120 } while (0)
121
122 diff --git a/arch/s390/include/asm/tlbflush.h b/arch/s390/include/asm/tlbflush.h
123 index ca148f7c3eaa..a2e6ef32e054 100644
124 --- a/arch/s390/include/asm/tlbflush.h
125 +++ b/arch/s390/include/asm/tlbflush.h
126 @@ -110,8 +110,7 @@ static inline void __tlb_flush_asce(struct mm_struct *mm, unsigned long asce)
127 static inline void __tlb_flush_kernel(void)
128 {
129 if (MACHINE_HAS_IDTE)
130 - __tlb_flush_idte((unsigned long) init_mm.pgd |
131 - init_mm.context.asce_bits);
132 + __tlb_flush_idte(init_mm.context.asce);
133 else
134 __tlb_flush_global();
135 }
136 @@ -133,8 +132,7 @@ static inline void __tlb_flush_asce(struct mm_struct *mm, unsigned long asce)
137 static inline void __tlb_flush_kernel(void)
138 {
139 if (MACHINE_HAS_TLB_LC)
140 - __tlb_flush_idte_local((unsigned long) init_mm.pgd |
141 - init_mm.context.asce_bits);
142 + __tlb_flush_idte_local(init_mm.context.asce);
143 else
144 __tlb_flush_local();
145 }
146 @@ -148,8 +146,7 @@ static inline void __tlb_flush_mm(struct mm_struct * mm)
147 * only ran on the local cpu.
148 */
149 if (MACHINE_HAS_IDTE && list_empty(&mm->context.gmap_list))
150 - __tlb_flush_asce(mm, (unsigned long) mm->pgd |
151 - mm->context.asce_bits);
152 + __tlb_flush_asce(mm, mm->context.asce);
153 else
154 __tlb_flush_full(mm);
155 }
156 diff --git a/arch/s390/mm/init.c b/arch/s390/mm/init.c
157 index c722400c7697..feff9caf89b5 100644
158 --- a/arch/s390/mm/init.c
159 +++ b/arch/s390/mm/init.c
160 @@ -89,7 +89,8 @@ void __init paging_init(void)
161 asce_bits = _ASCE_TYPE_REGION3 | _ASCE_TABLE_LENGTH;
162 pgd_type = _REGION3_ENTRY_EMPTY;
163 }
164 - S390_lowcore.kernel_asce = (__pa(init_mm.pgd) & PAGE_MASK) | asce_bits;
165 + init_mm.context.asce = (__pa(init_mm.pgd) & PAGE_MASK) | asce_bits;
166 + S390_lowcore.kernel_asce = init_mm.context.asce;
167 clear_table((unsigned long *) init_mm.pgd, pgd_type,
168 sizeof(unsigned long)*2048);
169 vmem_map_init();
170 diff --git a/arch/s390/mm/mmap.c b/arch/s390/mm/mmap.c
171 index ea01477b4aa6..f2b6b1d9c804 100644
172 --- a/arch/s390/mm/mmap.c
173 +++ b/arch/s390/mm/mmap.c
174 @@ -174,7 +174,7 @@ int s390_mmap_check(unsigned long addr, unsigned long len, unsigned long flags)
175 if (!(flags & MAP_FIXED))
176 addr = 0;
177 if ((addr + len) >= TASK_SIZE)
178 - return crst_table_upgrade(current->mm, 1UL << 53);
179 + return crst_table_upgrade(current->mm);
180 return 0;
181 }
182
183 @@ -191,7 +191,7 @@ s390_get_unmapped_area(struct file *filp, unsigned long addr,
184 return area;
185 if (area == -ENOMEM && !is_compat_task() && TASK_SIZE < (1UL << 53)) {
186 /* Upgrade the page table to 4 levels and retry. */
187 - rc = crst_table_upgrade(mm, 1UL << 53);
188 + rc = crst_table_upgrade(mm);
189 if (rc)
190 return (unsigned long) rc;
191 area = arch_get_unmapped_area(filp, addr, len, pgoff, flags);
192 @@ -213,7 +213,7 @@ s390_get_unmapped_area_topdown(struct file *filp, const unsigned long addr,
193 return area;
194 if (area == -ENOMEM && !is_compat_task() && TASK_SIZE < (1UL << 53)) {
195 /* Upgrade the page table to 4 levels and retry. */
196 - rc = crst_table_upgrade(mm, 1UL << 53);
197 + rc = crst_table_upgrade(mm);
198 if (rc)
199 return (unsigned long) rc;
200 area = arch_get_unmapped_area_topdown(filp, addr, len,
201 diff --git a/arch/s390/mm/pgtable.c b/arch/s390/mm/pgtable.c
202 index 54ef3bc01b43..471a370a527b 100644
203 --- a/arch/s390/mm/pgtable.c
204 +++ b/arch/s390/mm/pgtable.c
205 @@ -49,81 +49,52 @@ static void __crst_table_upgrade(void *arg)
206 __tlb_flush_local();
207 }
208
209 -int crst_table_upgrade(struct mm_struct *mm, unsigned long limit)
210 +int crst_table_upgrade(struct mm_struct *mm)
211 {
212 unsigned long *table, *pgd;
213 - unsigned long entry;
214 - int flush;
215
216 - BUG_ON(limit > (1UL << 53));
217 - flush = 0;
218 -repeat:
219 + /* upgrade should only happen from 3 to 4 levels */
220 + BUG_ON(mm->context.asce_limit != (1UL << 42));
221 +
222 table = crst_table_alloc(mm);
223 if (!table)
224 return -ENOMEM;
225 +
226 spin_lock_bh(&mm->page_table_lock);
227 - if (mm->context.asce_limit < limit) {
228 - pgd = (unsigned long *) mm->pgd;
229 - if (mm->context.asce_limit <= (1UL << 31)) {
230 - entry = _REGION3_ENTRY_EMPTY;
231 - mm->context.asce_limit = 1UL << 42;
232 - mm->context.asce_bits = _ASCE_TABLE_LENGTH |
233 - _ASCE_USER_BITS |
234 - _ASCE_TYPE_REGION3;
235 - } else {
236 - entry = _REGION2_ENTRY_EMPTY;
237 - mm->context.asce_limit = 1UL << 53;
238 - mm->context.asce_bits = _ASCE_TABLE_LENGTH |
239 - _ASCE_USER_BITS |
240 - _ASCE_TYPE_REGION2;
241 - }
242 - crst_table_init(table, entry);
243 - pgd_populate(mm, (pgd_t *) table, (pud_t *) pgd);
244 - mm->pgd = (pgd_t *) table;
245 - mm->task_size = mm->context.asce_limit;
246 - table = NULL;
247 - flush = 1;
248 - }
249 + pgd = (unsigned long *) mm->pgd;
250 + crst_table_init(table, _REGION2_ENTRY_EMPTY);
251 + pgd_populate(mm, (pgd_t *) table, (pud_t *) pgd);
252 + mm->pgd = (pgd_t *) table;
253 + mm->context.asce_limit = 1UL << 53;
254 + mm->context.asce = __pa(mm->pgd) | _ASCE_TABLE_LENGTH |
255 + _ASCE_USER_BITS | _ASCE_TYPE_REGION2;
256 + mm->task_size = mm->context.asce_limit;
257 spin_unlock_bh(&mm->page_table_lock);
258 - if (table)
259 - crst_table_free(mm, table);
260 - if (mm->context.asce_limit < limit)
261 - goto repeat;
262 - if (flush)
263 - on_each_cpu(__crst_table_upgrade, mm, 0);
264 +
265 + on_each_cpu(__crst_table_upgrade, mm, 0);
266 return 0;
267 }
268
269 -void crst_table_downgrade(struct mm_struct *mm, unsigned long limit)
270 +void crst_table_downgrade(struct mm_struct *mm)
271 {
272 pgd_t *pgd;
273
274 + /* downgrade should only happen from 3 to 2 levels (compat only) */
275 + BUG_ON(mm->context.asce_limit != (1UL << 42));
276 +
277 if (current->active_mm == mm) {
278 clear_user_asce();
279 __tlb_flush_mm(mm);
280 }
281 - while (mm->context.asce_limit > limit) {
282 - pgd = mm->pgd;
283 - switch (pgd_val(*pgd) & _REGION_ENTRY_TYPE_MASK) {
284 - case _REGION_ENTRY_TYPE_R2:
285 - mm->context.asce_limit = 1UL << 42;
286 - mm->context.asce_bits = _ASCE_TABLE_LENGTH |
287 - _ASCE_USER_BITS |
288 - _ASCE_TYPE_REGION3;
289 - break;
290 - case _REGION_ENTRY_TYPE_R3:
291 - mm->context.asce_limit = 1UL << 31;
292 - mm->context.asce_bits = _ASCE_TABLE_LENGTH |
293 - _ASCE_USER_BITS |
294 - _ASCE_TYPE_SEGMENT;
295 - break;
296 - default:
297 - BUG();
298 - }
299 - mm->pgd = (pgd_t *) (pgd_val(*pgd) & _REGION_ENTRY_ORIGIN);
300 - mm->task_size = mm->context.asce_limit;
301 - crst_table_free(mm, (unsigned long *) pgd);
302 - }
303 +
304 + pgd = mm->pgd;
305 + mm->pgd = (pgd_t *) (pgd_val(*pgd) & _REGION_ENTRY_ORIGIN);
306 + mm->context.asce_limit = 1UL << 31;
307 + mm->context.asce = __pa(mm->pgd) | _ASCE_TABLE_LENGTH |
308 + _ASCE_USER_BITS | _ASCE_TYPE_SEGMENT;
309 + mm->task_size = mm->context.asce_limit;
310 + crst_table_free(mm, (unsigned long *) pgd);
311 +
312 if (current->active_mm == mm)
313 set_user_asce(mm);
314 }
315 diff --git a/crypto/ahash.c b/crypto/ahash.c
316 index d19b52324cf5..dac1c24e9c3e 100644
317 --- a/crypto/ahash.c
318 +++ b/crypto/ahash.c
319 @@ -69,8 +69,9 @@ static int hash_walk_new_entry(struct crypto_hash_walk *walk)
320 struct scatterlist *sg;
321
322 sg = walk->sg;
323 - walk->pg = sg_page(sg);
324 walk->offset = sg->offset;
325 + walk->pg = sg_page(walk->sg) + (walk->offset >> PAGE_SHIFT);
326 + walk->offset = offset_in_page(walk->offset);
327 walk->entrylen = sg->length;
328
329 if (walk->entrylen > walk->total)
330 diff --git a/crypto/testmgr.c b/crypto/testmgr.c
331 index ae8c57fd8bc7..d4944318ca1f 100644
332 --- a/crypto/testmgr.c
333 +++ b/crypto/testmgr.c
334 @@ -1849,6 +1849,7 @@ static int alg_test_drbg(const struct alg_test_desc *desc, const char *driver,
335 static int do_test_rsa(struct crypto_akcipher *tfm,
336 struct akcipher_testvec *vecs)
337 {
338 + char *xbuf[XBUFSIZE];
339 struct akcipher_request *req;
340 void *outbuf_enc = NULL;
341 void *outbuf_dec = NULL;
342 @@ -1857,9 +1858,12 @@ static int do_test_rsa(struct crypto_akcipher *tfm,
343 int err = -ENOMEM;
344 struct scatterlist src, dst, src_tab[2];
345
346 + if (testmgr_alloc_buf(xbuf))
347 + return err;
348 +
349 req = akcipher_request_alloc(tfm, GFP_KERNEL);
350 if (!req)
351 - return err;
352 + goto free_xbuf;
353
354 init_completion(&result.completion);
355
356 @@ -1877,9 +1881,14 @@ static int do_test_rsa(struct crypto_akcipher *tfm,
357 if (!outbuf_enc)
358 goto free_req;
359
360 + if (WARN_ON(vecs->m_size > PAGE_SIZE))
361 + goto free_all;
362 +
363 + memcpy(xbuf[0], vecs->m, vecs->m_size);
364 +
365 sg_init_table(src_tab, 2);
366 - sg_set_buf(&src_tab[0], vecs->m, 8);
367 - sg_set_buf(&src_tab[1], vecs->m + 8, vecs->m_size - 8);
368 + sg_set_buf(&src_tab[0], xbuf[0], 8);
369 + sg_set_buf(&src_tab[1], xbuf[0] + 8, vecs->m_size - 8);
370 sg_init_one(&dst, outbuf_enc, out_len_max);
371 akcipher_request_set_crypt(req, src_tab, &dst, vecs->m_size,
372 out_len_max);
373 @@ -1898,7 +1907,7 @@ static int do_test_rsa(struct crypto_akcipher *tfm,
374 goto free_all;
375 }
376 /* verify that encrypted message is equal to expected */
377 - if (memcmp(vecs->c, sg_virt(req->dst), vecs->c_size)) {
378 + if (memcmp(vecs->c, outbuf_enc, vecs->c_size)) {
379 pr_err("alg: rsa: encrypt test failed. Invalid output\n");
380 err = -EINVAL;
381 goto free_all;
382 @@ -1913,7 +1922,13 @@ static int do_test_rsa(struct crypto_akcipher *tfm,
383 err = -ENOMEM;
384 goto free_all;
385 }
386 - sg_init_one(&src, vecs->c, vecs->c_size);
387 +
388 + if (WARN_ON(vecs->c_size > PAGE_SIZE))
389 + goto free_all;
390 +
391 + memcpy(xbuf[0], vecs->c, vecs->c_size);
392 +
393 + sg_init_one(&src, xbuf[0], vecs->c_size);
394 sg_init_one(&dst, outbuf_dec, out_len_max);
395 init_completion(&result.completion);
396 akcipher_request_set_crypt(req, &src, &dst, vecs->c_size, out_len_max);
397 @@ -1940,6 +1955,8 @@ free_all:
398 kfree(outbuf_enc);
399 free_req:
400 akcipher_request_free(req);
401 +free_xbuf:
402 + testmgr_free_buf(xbuf);
403 return err;
404 }
405
406 diff --git a/drivers/base/regmap/regmap-spmi.c b/drivers/base/regmap/regmap-spmi.c
407 index 7e58f6560399..4a36e415e938 100644
408 --- a/drivers/base/regmap/regmap-spmi.c
409 +++ b/drivers/base/regmap/regmap-spmi.c
410 @@ -142,7 +142,7 @@ static int regmap_spmi_ext_read(void *context,
411 while (val_size) {
412 len = min_t(size_t, val_size, 8);
413
414 - err = spmi_ext_register_readl(context, addr, val, val_size);
415 + err = spmi_ext_register_readl(context, addr, val, len);
416 if (err)
417 goto err_out;
418
419 diff --git a/drivers/crypto/qat/qat_common/adf_common_drv.h b/drivers/crypto/qat/qat_common/adf_common_drv.h
420 index 3f76bd495bcb..b9178d0a3093 100644
421 --- a/drivers/crypto/qat/qat_common/adf_common_drv.h
422 +++ b/drivers/crypto/qat/qat_common/adf_common_drv.h
423 @@ -145,6 +145,8 @@ int adf_enable_aer(struct adf_accel_dev *accel_dev, struct pci_driver *adf);
424 void adf_disable_aer(struct adf_accel_dev *accel_dev);
425 int adf_init_aer(void);
426 void adf_exit_aer(void);
427 +int adf_init_pf_wq(void);
428 +void adf_exit_pf_wq(void);
429 int adf_init_admin_comms(struct adf_accel_dev *accel_dev);
430 void adf_exit_admin_comms(struct adf_accel_dev *accel_dev);
431 int adf_send_admin_init(struct adf_accel_dev *accel_dev);
432 diff --git a/drivers/crypto/qat/qat_common/adf_ctl_drv.c b/drivers/crypto/qat/qat_common/adf_ctl_drv.c
433 index 473d36d91644..e7480f373532 100644
434 --- a/drivers/crypto/qat/qat_common/adf_ctl_drv.c
435 +++ b/drivers/crypto/qat/qat_common/adf_ctl_drv.c
436 @@ -469,12 +469,17 @@ static int __init adf_register_ctl_device_driver(void)
437 if (adf_init_aer())
438 goto err_aer;
439
440 + if (adf_init_pf_wq())
441 + goto err_pf_wq;
442 +
443 if (qat_crypto_register())
444 goto err_crypto_register;
445
446 return 0;
447
448 err_crypto_register:
449 + adf_exit_pf_wq();
450 +err_pf_wq:
451 adf_exit_aer();
452 err_aer:
453 adf_chr_drv_destroy();
454 @@ -487,6 +492,7 @@ static void __exit adf_unregister_ctl_device_driver(void)
455 {
456 adf_chr_drv_destroy();
457 adf_exit_aer();
458 + adf_exit_pf_wq();
459 qat_crypto_unregister();
460 adf_clean_vf_map(false);
461 mutex_destroy(&adf_ctl_lock);
462 diff --git a/drivers/crypto/qat/qat_common/adf_sriov.c b/drivers/crypto/qat/qat_common/adf_sriov.c
463 index 1117a8b58280..38a0415e767d 100644
464 --- a/drivers/crypto/qat/qat_common/adf_sriov.c
465 +++ b/drivers/crypto/qat/qat_common/adf_sriov.c
466 @@ -119,11 +119,6 @@ static int adf_enable_sriov(struct adf_accel_dev *accel_dev)
467 int i;
468 u32 reg;
469
470 - /* Workqueue for PF2VF responses */
471 - pf2vf_resp_wq = create_workqueue("qat_pf2vf_resp_wq");
472 - if (!pf2vf_resp_wq)
473 - return -ENOMEM;
474 -
475 for (i = 0, vf_info = accel_dev->pf.vf_info; i < totalvfs;
476 i++, vf_info++) {
477 /* This ptr will be populated when VFs will be created */
478 @@ -216,11 +211,6 @@ void adf_disable_sriov(struct adf_accel_dev *accel_dev)
479
480 kfree(accel_dev->pf.vf_info);
481 accel_dev->pf.vf_info = NULL;
482 -
483 - if (pf2vf_resp_wq) {
484 - destroy_workqueue(pf2vf_resp_wq);
485 - pf2vf_resp_wq = NULL;
486 - }
487 }
488 EXPORT_SYMBOL_GPL(adf_disable_sriov);
489
490 @@ -304,3 +294,19 @@ int adf_sriov_configure(struct pci_dev *pdev, int numvfs)
491 return numvfs;
492 }
493 EXPORT_SYMBOL_GPL(adf_sriov_configure);
494 +
495 +int __init adf_init_pf_wq(void)
496 +{
497 + /* Workqueue for PF2VF responses */
498 + pf2vf_resp_wq = create_workqueue("qat_pf2vf_resp_wq");
499 +
500 + return !pf2vf_resp_wq ? -ENOMEM : 0;
501 +}
502 +
503 +void adf_exit_pf_wq(void)
504 +{
505 + if (pf2vf_resp_wq) {
506 + destroy_workqueue(pf2vf_resp_wq);
507 + pf2vf_resp_wq = NULL;
508 + }
509 +}
510 diff --git a/drivers/gpu/drm/i915/intel_crt.c b/drivers/gpu/drm/i915/intel_crt.c
511 index 6a2c76e367a5..97d1ed20418b 100644
512 --- a/drivers/gpu/drm/i915/intel_crt.c
513 +++ b/drivers/gpu/drm/i915/intel_crt.c
514 @@ -248,8 +248,14 @@ static bool intel_crt_compute_config(struct intel_encoder *encoder,
515 pipe_config->has_pch_encoder = true;
516
517 /* LPT FDI RX only supports 8bpc. */
518 - if (HAS_PCH_LPT(dev))
519 + if (HAS_PCH_LPT(dev)) {
520 + if (pipe_config->bw_constrained && pipe_config->pipe_bpp < 24) {
521 + DRM_DEBUG_KMS("LPT only supports 24bpp\n");
522 + return false;
523 + }
524 +
525 pipe_config->pipe_bpp = 24;
526 + }
527
528 /* FDI must always be 2.7 GHz */
529 if (HAS_DDI(dev)) {
530 diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c
531 index f091ad12d694..0a68d2ec89dc 100644
532 --- a/drivers/gpu/drm/i915/intel_pm.c
533 +++ b/drivers/gpu/drm/i915/intel_pm.c
534 @@ -6620,6 +6620,12 @@ static void broadwell_init_clock_gating(struct drm_device *dev)
535 misccpctl = I915_READ(GEN7_MISCCPCTL);
536 I915_WRITE(GEN7_MISCCPCTL, misccpctl & ~GEN7_DOP_CLOCK_GATE_ENABLE);
537 I915_WRITE(GEN8_L3SQCREG1, BDW_WA_L3SQCREG1_DEFAULT);
538 + /*
539 + * Wait at least 100 clocks before re-enabling clock gating. See
540 + * the definition of L3SQCREG1 in BSpec.
541 + */
542 + POSTING_READ(GEN8_L3SQCREG1);
543 + udelay(1);
544 I915_WRITE(GEN7_MISCCPCTL, misccpctl);
545
546 /*
547 diff --git a/drivers/gpu/drm/radeon/atombios_crtc.c b/drivers/gpu/drm/radeon/atombios_crtc.c
548 index dac78ad24b31..79bab6fd76bb 100644
549 --- a/drivers/gpu/drm/radeon/atombios_crtc.c
550 +++ b/drivers/gpu/drm/radeon/atombios_crtc.c
551 @@ -1739,6 +1739,7 @@ static u32 radeon_get_pll_use_mask(struct drm_crtc *crtc)
552 static int radeon_get_shared_dp_ppll(struct drm_crtc *crtc)
553 {
554 struct drm_device *dev = crtc->dev;
555 + struct radeon_device *rdev = dev->dev_private;
556 struct drm_crtc *test_crtc;
557 struct radeon_crtc *test_radeon_crtc;
558
559 @@ -1748,6 +1749,10 @@ static int radeon_get_shared_dp_ppll(struct drm_crtc *crtc)
560 test_radeon_crtc = to_radeon_crtc(test_crtc);
561 if (test_radeon_crtc->encoder &&
562 ENCODER_MODE_IS_DP(atombios_get_encoder_mode(test_radeon_crtc->encoder))) {
563 + /* PPLL2 is exclusive to UNIPHYA on DCE61 */
564 + if (ASIC_IS_DCE61(rdev) && !ASIC_IS_DCE8(rdev) &&
565 + test_radeon_crtc->pll_id == ATOM_PPLL2)
566 + continue;
567 /* for DP use the same PLL for all */
568 if (test_radeon_crtc->pll_id != ATOM_PPLL_INVALID)
569 return test_radeon_crtc->pll_id;
570 @@ -1769,6 +1774,7 @@ static int radeon_get_shared_nondp_ppll(struct drm_crtc *crtc)
571 {
572 struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
573 struct drm_device *dev = crtc->dev;
574 + struct radeon_device *rdev = dev->dev_private;
575 struct drm_crtc *test_crtc;
576 struct radeon_crtc *test_radeon_crtc;
577 u32 adjusted_clock, test_adjusted_clock;
578 @@ -1784,6 +1790,10 @@ static int radeon_get_shared_nondp_ppll(struct drm_crtc *crtc)
579 test_radeon_crtc = to_radeon_crtc(test_crtc);
580 if (test_radeon_crtc->encoder &&
581 !ENCODER_MODE_IS_DP(atombios_get_encoder_mode(test_radeon_crtc->encoder))) {
582 + /* PPLL2 is exclusive to UNIPHYA on DCE61 */
583 + if (ASIC_IS_DCE61(rdev) && !ASIC_IS_DCE8(rdev) &&
584 + test_radeon_crtc->pll_id == ATOM_PPLL2)
585 + continue;
586 /* check if we are already driving this connector with another crtc */
587 if (test_radeon_crtc->connector == radeon_crtc->connector) {
588 /* if we are, return that pll */
589 diff --git a/drivers/gpu/drm/radeon/radeon_dp_auxch.c b/drivers/gpu/drm/radeon/radeon_dp_auxch.c
590 index 3b0c229d7dcd..db64e0062689 100644
591 --- a/drivers/gpu/drm/radeon/radeon_dp_auxch.c
592 +++ b/drivers/gpu/drm/radeon/radeon_dp_auxch.c
593 @@ -105,7 +105,7 @@ radeon_dp_aux_transfer_native(struct drm_dp_aux *aux, struct drm_dp_aux_msg *msg
594
595 tmp &= AUX_HPD_SEL(0x7);
596 tmp |= AUX_HPD_SEL(chan->rec.hpd);
597 - tmp |= AUX_EN | AUX_LS_READ_EN;
598 + tmp |= AUX_EN | AUX_LS_READ_EN | AUX_HPD_DISCON(0x1);
599
600 WREG32(AUX_CONTROL + aux_offset[instance], tmp);
601
602 diff --git a/drivers/infiniband/hw/mlx5/main.c b/drivers/infiniband/hw/mlx5/main.c
603 index 721d63f5b461..fd17443aeacd 100644
604 --- a/drivers/infiniband/hw/mlx5/main.c
605 +++ b/drivers/infiniband/hw/mlx5/main.c
606 @@ -405,8 +405,8 @@ static int mlx5_query_hca_port(struct ib_device *ibdev, u8 port,
607 struct mlx5_ib_dev *dev = to_mdev(ibdev);
608 struct mlx5_core_dev *mdev = dev->mdev;
609 struct mlx5_hca_vport_context *rep;
610 - int max_mtu;
611 - int oper_mtu;
612 + u16 max_mtu;
613 + u16 oper_mtu;
614 int err;
615 u8 ib_link_width_oper;
616 u8 vl_hw_cap;
617 diff --git a/drivers/input/misc/max8997_haptic.c b/drivers/input/misc/max8997_haptic.c
618 index a806ba3818f7..8d6326d7e7be 100644
619 --- a/drivers/input/misc/max8997_haptic.c
620 +++ b/drivers/input/misc/max8997_haptic.c
621 @@ -255,12 +255,14 @@ static int max8997_haptic_probe(struct platform_device *pdev)
622 struct max8997_dev *iodev = dev_get_drvdata(pdev->dev.parent);
623 const struct max8997_platform_data *pdata =
624 dev_get_platdata(iodev->dev);
625 - const struct max8997_haptic_platform_data *haptic_pdata =
626 - pdata->haptic_pdata;
627 + const struct max8997_haptic_platform_data *haptic_pdata = NULL;
628 struct max8997_haptic *chip;
629 struct input_dev *input_dev;
630 int error;
631
632 + if (pdata)
633 + haptic_pdata = pdata->haptic_pdata;
634 +
635 if (!haptic_pdata) {
636 dev_err(&pdev->dev, "no haptic platform data\n");
637 return -EINVAL;
638 diff --git a/drivers/media/v4l2-core/videobuf2-v4l2.c b/drivers/media/v4l2-core/videobuf2-v4l2.c
639 index 6c441be8f893..502984c724ff 100644
640 --- a/drivers/media/v4l2-core/videobuf2-v4l2.c
641 +++ b/drivers/media/v4l2-core/videobuf2-v4l2.c
642 @@ -67,11 +67,6 @@ static int __verify_planes_array(struct vb2_buffer *vb, const struct v4l2_buffer
643 return 0;
644 }
645
646 -static int __verify_planes_array_core(struct vb2_buffer *vb, const void *pb)
647 -{
648 - return __verify_planes_array(vb, pb);
649 -}
650 -
651 /**
652 * __verify_length() - Verify that the bytesused value for each plane fits in
653 * the plane length and that the data offset doesn't exceed the bytesused value.
654 @@ -437,7 +432,6 @@ static int __fill_vb2_buffer(struct vb2_buffer *vb,
655 }
656
657 static const struct vb2_buf_ops v4l2_buf_ops = {
658 - .verify_planes_array = __verify_planes_array_core,
659 .fill_user_buffer = __fill_v4l2_buffer,
660 .fill_vb2_buffer = __fill_vb2_buffer,
661 .set_timestamp = __set_timestamp,
662 diff --git a/drivers/net/ethernet/atheros/atlx/atl2.c b/drivers/net/ethernet/atheros/atlx/atl2.c
663 index 8f76f4558a88..2ff465848b65 100644
664 --- a/drivers/net/ethernet/atheros/atlx/atl2.c
665 +++ b/drivers/net/ethernet/atheros/atlx/atl2.c
666 @@ -1412,7 +1412,7 @@ static int atl2_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
667
668 err = -EIO;
669
670 - netdev->hw_features = NETIF_F_SG | NETIF_F_HW_VLAN_CTAG_RX;
671 + netdev->hw_features = NETIF_F_HW_VLAN_CTAG_RX;
672 netdev->features |= (NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX);
673
674 /* Init PHY as early as possible due to power saving issue */
675 diff --git a/drivers/net/ethernet/cavium/thunder/nicvf_queues.c b/drivers/net/ethernet/cavium/thunder/nicvf_queues.c
676 index 206b6a71a545..d1c217eaf417 100644
677 --- a/drivers/net/ethernet/cavium/thunder/nicvf_queues.c
678 +++ b/drivers/net/ethernet/cavium/thunder/nicvf_queues.c
679 @@ -550,6 +550,7 @@ static void nicvf_rcv_queue_config(struct nicvf *nic, struct queue_set *qs,
680 nicvf_config_vlan_stripping(nic, nic->netdev->features);
681
682 /* Enable Receive queue */
683 + memset(&rq_cfg, 0, sizeof(struct rq_cfg));
684 rq_cfg.ena = 1;
685 rq_cfg.tcp_ena = 0;
686 nicvf_queue_reg_write(nic, NIC_QSET_RQ_0_7_CFG, qidx, *(u64 *)&rq_cfg);
687 @@ -582,6 +583,7 @@ void nicvf_cmp_queue_config(struct nicvf *nic, struct queue_set *qs,
688 qidx, (u64)(cq->dmem.phys_base));
689
690 /* Enable Completion queue */
691 + memset(&cq_cfg, 0, sizeof(struct cq_cfg));
692 cq_cfg.ena = 1;
693 cq_cfg.reset = 0;
694 cq_cfg.caching = 0;
695 @@ -630,6 +632,7 @@ static void nicvf_snd_queue_config(struct nicvf *nic, struct queue_set *qs,
696 qidx, (u64)(sq->dmem.phys_base));
697
698 /* Enable send queue & set queue size */
699 + memset(&sq_cfg, 0, sizeof(struct sq_cfg));
700 sq_cfg.ena = 1;
701 sq_cfg.reset = 0;
702 sq_cfg.ldwb = 0;
703 @@ -666,6 +669,7 @@ static void nicvf_rbdr_config(struct nicvf *nic, struct queue_set *qs,
704
705 /* Enable RBDR & set queue size */
706 /* Buffer size should be in multiples of 128 bytes */
707 + memset(&rbdr_cfg, 0, sizeof(struct rbdr_cfg));
708 rbdr_cfg.ena = 1;
709 rbdr_cfg.reset = 0;
710 rbdr_cfg.ldwb = 0;
711 diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c
712 index b2a32209ffbf..f6147ffc7fbc 100644
713 --- a/drivers/net/ethernet/freescale/fec_main.c
714 +++ b/drivers/net/ethernet/freescale/fec_main.c
715 @@ -1557,9 +1557,15 @@ fec_enet_rx(struct net_device *ndev, int budget)
716 struct fec_enet_private *fep = netdev_priv(ndev);
717
718 for_each_set_bit(queue_id, &fep->work_rx, FEC_ENET_MAX_RX_QS) {
719 - clear_bit(queue_id, &fep->work_rx);
720 - pkt_received += fec_enet_rx_queue(ndev,
721 + int ret;
722 +
723 + ret = fec_enet_rx_queue(ndev,
724 budget - pkt_received, queue_id);
725 +
726 + if (ret < budget - pkt_received)
727 + clear_bit(queue_id, &fep->work_rx);
728 +
729 + pkt_received += ret;
730 }
731 return pkt_received;
732 }
733 diff --git a/drivers/net/ethernet/mellanox/mlx4/en_rx.c b/drivers/net/ethernet/mellanox/mlx4/en_rx.c
734 index e7a5000aa12c..bbff8ec6713e 100644
735 --- a/drivers/net/ethernet/mellanox/mlx4/en_rx.c
736 +++ b/drivers/net/ethernet/mellanox/mlx4/en_rx.c
737 @@ -704,7 +704,7 @@ static int get_fixed_ipv6_csum(__wsum hw_checksum, struct sk_buff *skb,
738
739 if (ipv6h->nexthdr == IPPROTO_FRAGMENT || ipv6h->nexthdr == IPPROTO_HOPOPTS)
740 return -1;
741 - hw_checksum = csum_add(hw_checksum, (__force __wsum)(ipv6h->nexthdr << 8));
742 + hw_checksum = csum_add(hw_checksum, (__force __wsum)htons(ipv6h->nexthdr));
743
744 csum_pseudo_hdr = csum_partial(&ipv6h->saddr,
745 sizeof(ipv6h->saddr) + sizeof(ipv6h->daddr), 0);
746 diff --git a/drivers/net/ethernet/mellanox/mlx4/en_tx.c b/drivers/net/ethernet/mellanox/mlx4/en_tx.c
747 index 4421bf5463f6..e4019a803a9c 100644
748 --- a/drivers/net/ethernet/mellanox/mlx4/en_tx.c
749 +++ b/drivers/net/ethernet/mellanox/mlx4/en_tx.c
750 @@ -400,7 +400,6 @@ static bool mlx4_en_process_tx_cq(struct net_device *dev,
751 u32 packets = 0;
752 u32 bytes = 0;
753 int factor = priv->cqe_factor;
754 - u64 timestamp = 0;
755 int done = 0;
756 int budget = priv->tx_work_limit;
757 u32 last_nr_txbb;
758 @@ -440,9 +439,12 @@ static bool mlx4_en_process_tx_cq(struct net_device *dev,
759 new_index = be16_to_cpu(cqe->wqe_index) & size_mask;
760
761 do {
762 + u64 timestamp = 0;
763 +
764 txbbs_skipped += last_nr_txbb;
765 ring_index = (ring_index + last_nr_txbb) & size_mask;
766 - if (ring->tx_info[ring_index].ts_requested)
767 +
768 + if (unlikely(ring->tx_info[ring_index].ts_requested))
769 timestamp = mlx4_en_get_cqe_ts(cqe);
770
771 /* free next descriptor */
772 diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
773 index 1203d892e842..cbd17e25beeb 100644
774 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
775 +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
776 @@ -1372,7 +1372,7 @@ static int mlx5e_set_dev_port_mtu(struct net_device *netdev)
777 {
778 struct mlx5e_priv *priv = netdev_priv(netdev);
779 struct mlx5_core_dev *mdev = priv->mdev;
780 - int hw_mtu;
781 + u16 hw_mtu;
782 int err;
783
784 err = mlx5_set_port_mtu(mdev, MLX5E_SW2HW_MTU(netdev->mtu), 1);
785 @@ -1891,22 +1891,27 @@ static int mlx5e_set_features(struct net_device *netdev,
786 return err;
787 }
788
789 +#define MXL5_HW_MIN_MTU 64
790 +#define MXL5E_MIN_MTU (MXL5_HW_MIN_MTU + ETH_FCS_LEN)
791 +
792 static int mlx5e_change_mtu(struct net_device *netdev, int new_mtu)
793 {
794 struct mlx5e_priv *priv = netdev_priv(netdev);
795 struct mlx5_core_dev *mdev = priv->mdev;
796 bool was_opened;
797 - int max_mtu;
798 + u16 max_mtu;
799 + u16 min_mtu;
800 int err = 0;
801
802 mlx5_query_port_max_mtu(mdev, &max_mtu, 1);
803
804 max_mtu = MLX5E_HW2SW_MTU(max_mtu);
805 + min_mtu = MLX5E_HW2SW_MTU(MXL5E_MIN_MTU);
806
807 - if (new_mtu > max_mtu) {
808 + if (new_mtu > max_mtu || new_mtu < min_mtu) {
809 netdev_err(netdev,
810 - "%s: Bad MTU (%d) > (%d) Max\n",
811 - __func__, new_mtu, max_mtu);
812 + "%s: Bad MTU (%d), valid range is: [%d..%d]\n",
813 + __func__, new_mtu, min_mtu, max_mtu);
814 return -EINVAL;
815 }
816
817 diff --git a/drivers/net/ethernet/mellanox/mlx5/core/port.c b/drivers/net/ethernet/mellanox/mlx5/core/port.c
818 index a87e773e93f3..53a793bc2e3d 100644
819 --- a/drivers/net/ethernet/mellanox/mlx5/core/port.c
820 +++ b/drivers/net/ethernet/mellanox/mlx5/core/port.c
821 @@ -246,8 +246,8 @@ int mlx5_query_port_admin_status(struct mlx5_core_dev *dev,
822 }
823 EXPORT_SYMBOL_GPL(mlx5_query_port_admin_status);
824
825 -static void mlx5_query_port_mtu(struct mlx5_core_dev *dev, int *admin_mtu,
826 - int *max_mtu, int *oper_mtu, u8 port)
827 +static void mlx5_query_port_mtu(struct mlx5_core_dev *dev, u16 *admin_mtu,
828 + u16 *max_mtu, u16 *oper_mtu, u8 port)
829 {
830 u32 in[MLX5_ST_SZ_DW(pmtu_reg)];
831 u32 out[MLX5_ST_SZ_DW(pmtu_reg)];
832 @@ -267,7 +267,7 @@ static void mlx5_query_port_mtu(struct mlx5_core_dev *dev, int *admin_mtu,
833 *admin_mtu = MLX5_GET(pmtu_reg, out, admin_mtu);
834 }
835
836 -int mlx5_set_port_mtu(struct mlx5_core_dev *dev, int mtu, u8 port)
837 +int mlx5_set_port_mtu(struct mlx5_core_dev *dev, u16 mtu, u8 port)
838 {
839 u32 in[MLX5_ST_SZ_DW(pmtu_reg)];
840 u32 out[MLX5_ST_SZ_DW(pmtu_reg)];
841 @@ -282,14 +282,14 @@ int mlx5_set_port_mtu(struct mlx5_core_dev *dev, int mtu, u8 port)
842 }
843 EXPORT_SYMBOL_GPL(mlx5_set_port_mtu);
844
845 -void mlx5_query_port_max_mtu(struct mlx5_core_dev *dev, int *max_mtu,
846 +void mlx5_query_port_max_mtu(struct mlx5_core_dev *dev, u16 *max_mtu,
847 u8 port)
848 {
849 mlx5_query_port_mtu(dev, NULL, max_mtu, NULL, port);
850 }
851 EXPORT_SYMBOL_GPL(mlx5_query_port_max_mtu);
852
853 -void mlx5_query_port_oper_mtu(struct mlx5_core_dev *dev, int *oper_mtu,
854 +void mlx5_query_port_oper_mtu(struct mlx5_core_dev *dev, u16 *oper_mtu,
855 u8 port)
856 {
857 mlx5_query_port_mtu(dev, NULL, NULL, oper_mtu, port);
858 diff --git a/drivers/net/usb/cdc_mbim.c b/drivers/net/usb/cdc_mbim.c
859 index bdd83d95ec0a..96a5028621c8 100644
860 --- a/drivers/net/usb/cdc_mbim.c
861 +++ b/drivers/net/usb/cdc_mbim.c
862 @@ -617,8 +617,13 @@ static const struct usb_device_id mbim_devs[] = {
863 { USB_VENDOR_AND_INTERFACE_INFO(0x0bdb, USB_CLASS_COMM, USB_CDC_SUBCLASS_MBIM, USB_CDC_PROTO_NONE),
864 .driver_info = (unsigned long)&cdc_mbim_info,
865 },
866 - /* Huawei E3372 fails unless NDP comes after the IP packets */
867 - { USB_DEVICE_AND_INTERFACE_INFO(0x12d1, 0x157d, USB_CLASS_COMM, USB_CDC_SUBCLASS_MBIM, USB_CDC_PROTO_NONE),
868 +
869 + /* Some Huawei devices, ME906s-158 (12d1:15c1) and E3372
870 + * (12d1:157d), are known to fail unless the NDP is placed
871 + * after the IP packets. Applying the quirk to all Huawei
872 + * devices is broader than necessary, but harmless.
873 + */
874 + { USB_VENDOR_AND_INTERFACE_INFO(0x12d1, USB_CLASS_COMM, USB_CDC_SUBCLASS_MBIM, USB_CDC_PROTO_NONE),
875 .driver_info = (unsigned long)&cdc_mbim_info_ndp_to_end,
876 },
877 /* default entry */
878 diff --git a/drivers/pinctrl/pinctrl-at91-pio4.c b/drivers/pinctrl/pinctrl-at91-pio4.c
879 index 33edd07d9149..b3235fd2950c 100644
880 --- a/drivers/pinctrl/pinctrl-at91-pio4.c
881 +++ b/drivers/pinctrl/pinctrl-at91-pio4.c
882 @@ -717,9 +717,11 @@ static int atmel_conf_pin_config_group_set(struct pinctrl_dev *pctldev,
883 break;
884 case PIN_CONFIG_BIAS_PULL_UP:
885 conf |= ATMEL_PIO_PUEN_MASK;
886 + conf &= (~ATMEL_PIO_PDEN_MASK);
887 break;
888 case PIN_CONFIG_BIAS_PULL_DOWN:
889 conf |= ATMEL_PIO_PDEN_MASK;
890 + conf &= (~ATMEL_PIO_PUEN_MASK);
891 break;
892 case PIN_CONFIG_DRIVE_OPEN_DRAIN:
893 if (arg == 0)
894 diff --git a/drivers/regulator/axp20x-regulator.c b/drivers/regulator/axp20x-regulator.c
895 index f2e1a39ce0f3..5cf4a97e0304 100644
896 --- a/drivers/regulator/axp20x-regulator.c
897 +++ b/drivers/regulator/axp20x-regulator.c
898 @@ -221,10 +221,10 @@ static const struct regulator_desc axp22x_regulators[] = {
899 AXP22X_ELDO2_V_OUT, 0x1f, AXP22X_PWR_OUT_CTRL2, BIT(1)),
900 AXP_DESC(AXP22X, ELDO3, "eldo3", "eldoin", 700, 3300, 100,
901 AXP22X_ELDO3_V_OUT, 0x1f, AXP22X_PWR_OUT_CTRL2, BIT(2)),
902 - AXP_DESC_IO(AXP22X, LDO_IO0, "ldo_io0", "ips", 1800, 3300, 100,
903 + AXP_DESC_IO(AXP22X, LDO_IO0, "ldo_io0", "ips", 700, 3300, 100,
904 AXP22X_LDO_IO0_V_OUT, 0x1f, AXP20X_GPIO0_CTRL, 0x07,
905 AXP22X_IO_ENABLED, AXP22X_IO_DISABLED),
906 - AXP_DESC_IO(AXP22X, LDO_IO1, "ldo_io1", "ips", 1800, 3300, 100,
907 + AXP_DESC_IO(AXP22X, LDO_IO1, "ldo_io1", "ips", 700, 3300, 100,
908 AXP22X_LDO_IO1_V_OUT, 0x1f, AXP20X_GPIO1_CTRL, 0x07,
909 AXP22X_IO_ENABLED, AXP22X_IO_DISABLED),
910 AXP_DESC_FIXED(AXP22X, RTC_LDO, "rtc_ldo", "ips", 3000),
911 diff --git a/drivers/regulator/s2mps11.c b/drivers/regulator/s2mps11.c
912 index 72fc3c32db49..b6d831b84e1d 100644
913 --- a/drivers/regulator/s2mps11.c
914 +++ b/drivers/regulator/s2mps11.c
915 @@ -305,7 +305,7 @@ static struct regulator_ops s2mps11_buck_ops = {
916 .enable_mask = S2MPS11_ENABLE_MASK \
917 }
918
919 -#define regulator_desc_s2mps11_buck6_10(num, min, step) { \
920 +#define regulator_desc_s2mps11_buck67810(num, min, step) { \
921 .name = "BUCK"#num, \
922 .id = S2MPS11_BUCK##num, \
923 .ops = &s2mps11_buck_ops, \
924 @@ -321,6 +321,22 @@ static struct regulator_ops s2mps11_buck_ops = {
925 .enable_mask = S2MPS11_ENABLE_MASK \
926 }
927
928 +#define regulator_desc_s2mps11_buck9 { \
929 + .name = "BUCK9", \
930 + .id = S2MPS11_BUCK9, \
931 + .ops = &s2mps11_buck_ops, \
932 + .type = REGULATOR_VOLTAGE, \
933 + .owner = THIS_MODULE, \
934 + .min_uV = MIN_3000_MV, \
935 + .uV_step = STEP_25_MV, \
936 + .n_voltages = S2MPS11_BUCK9_N_VOLTAGES, \
937 + .ramp_delay = S2MPS11_RAMP_DELAY, \
938 + .vsel_reg = S2MPS11_REG_B9CTRL2, \
939 + .vsel_mask = S2MPS11_BUCK9_VSEL_MASK, \
940 + .enable_reg = S2MPS11_REG_B9CTRL1, \
941 + .enable_mask = S2MPS11_ENABLE_MASK \
942 +}
943 +
944 static const struct regulator_desc s2mps11_regulators[] = {
945 regulator_desc_s2mps11_ldo(1, STEP_25_MV),
946 regulator_desc_s2mps11_ldo(2, STEP_50_MV),
947 @@ -365,11 +381,11 @@ static const struct regulator_desc s2mps11_regulators[] = {
948 regulator_desc_s2mps11_buck1_4(3),
949 regulator_desc_s2mps11_buck1_4(4),
950 regulator_desc_s2mps11_buck5,
951 - regulator_desc_s2mps11_buck6_10(6, MIN_600_MV, STEP_6_25_MV),
952 - regulator_desc_s2mps11_buck6_10(7, MIN_600_MV, STEP_6_25_MV),
953 - regulator_desc_s2mps11_buck6_10(8, MIN_600_MV, STEP_6_25_MV),
954 - regulator_desc_s2mps11_buck6_10(9, MIN_3000_MV, STEP_25_MV),
955 - regulator_desc_s2mps11_buck6_10(10, MIN_750_MV, STEP_12_5_MV),
956 + regulator_desc_s2mps11_buck67810(6, MIN_600_MV, STEP_6_25_MV),
957 + regulator_desc_s2mps11_buck67810(7, MIN_600_MV, STEP_6_25_MV),
958 + regulator_desc_s2mps11_buck67810(8, MIN_600_MV, STEP_6_25_MV),
959 + regulator_desc_s2mps11_buck9,
960 + regulator_desc_s2mps11_buck67810(10, MIN_750_MV, STEP_12_5_MV),
961 };
962
963 static struct regulator_ops s2mps14_reg_ops;
964 diff --git a/drivers/scsi/qla1280.c b/drivers/scsi/qla1280.c
965 index 5d0ec42a9317..634254a52301 100644
966 --- a/drivers/scsi/qla1280.c
967 +++ b/drivers/scsi/qla1280.c
968 @@ -4214,7 +4214,7 @@ static struct scsi_host_template qla1280_driver_template = {
969 .eh_bus_reset_handler = qla1280_eh_bus_reset,
970 .eh_host_reset_handler = qla1280_eh_adapter_reset,
971 .bios_param = qla1280_biosparam,
972 - .can_queue = 0xfffff,
973 + .can_queue = MAX_OUTSTANDING_COMMANDS,
974 .this_id = -1,
975 .sg_tablesize = SG_ALL,
976 .use_clustering = ENABLE_CLUSTERING,
977 diff --git a/drivers/spi/spi-pxa2xx.c b/drivers/spi/spi-pxa2xx.c
978 index b25dc71b0ea9..73c8ea0b1360 100644
979 --- a/drivers/spi/spi-pxa2xx.c
980 +++ b/drivers/spi/spi-pxa2xx.c
981 @@ -111,7 +111,7 @@ static const struct lpss_config lpss_platforms[] = {
982 .reg_general = -1,
983 .reg_ssp = 0x20,
984 .reg_cs_ctrl = 0x24,
985 - .reg_capabilities = 0xfc,
986 + .reg_capabilities = -1,
987 .rx_threshold = 1,
988 .tx_threshold_lo = 32,
989 .tx_threshold_hi = 56,
990 diff --git a/drivers/spi/spi-ti-qspi.c b/drivers/spi/spi-ti-qspi.c
991 index 64318fcfacf2..5044c6198332 100644
992 --- a/drivers/spi/spi-ti-qspi.c
993 +++ b/drivers/spi/spi-ti-qspi.c
994 @@ -94,6 +94,7 @@ struct ti_qspi {
995 #define QSPI_FLEN(n) ((n - 1) << 0)
996 #define QSPI_WLEN_MAX_BITS 128
997 #define QSPI_WLEN_MAX_BYTES 16
998 +#define QSPI_WLEN_MASK QSPI_WLEN(QSPI_WLEN_MAX_BITS)
999
1000 /* STATUS REGISTER */
1001 #define BUSY 0x01
1002 @@ -224,16 +225,16 @@ static inline int ti_qspi_poll_wc(struct ti_qspi *qspi)
1003 return -ETIMEDOUT;
1004 }
1005
1006 -static int qspi_write_msg(struct ti_qspi *qspi, struct spi_transfer *t)
1007 +static int qspi_write_msg(struct ti_qspi *qspi, struct spi_transfer *t,
1008 + int count)
1009 {
1010 - int wlen, count, xfer_len;
1011 + int wlen, xfer_len;
1012 unsigned int cmd;
1013 const u8 *txbuf;
1014 u32 data;
1015
1016 txbuf = t->tx_buf;
1017 cmd = qspi->cmd | QSPI_WR_SNGL;
1018 - count = t->len;
1019 wlen = t->bits_per_word >> 3; /* in bytes */
1020 xfer_len = wlen;
1021
1022 @@ -293,9 +294,10 @@ static int qspi_write_msg(struct ti_qspi *qspi, struct spi_transfer *t)
1023 return 0;
1024 }
1025
1026 -static int qspi_read_msg(struct ti_qspi *qspi, struct spi_transfer *t)
1027 +static int qspi_read_msg(struct ti_qspi *qspi, struct spi_transfer *t,
1028 + int count)
1029 {
1030 - int wlen, count;
1031 + int wlen;
1032 unsigned int cmd;
1033 u8 *rxbuf;
1034
1035 @@ -312,7 +314,6 @@ static int qspi_read_msg(struct ti_qspi *qspi, struct spi_transfer *t)
1036 cmd |= QSPI_RD_SNGL;
1037 break;
1038 }
1039 - count = t->len;
1040 wlen = t->bits_per_word >> 3; /* in bytes */
1041
1042 while (count) {
1043 @@ -343,12 +344,13 @@ static int qspi_read_msg(struct ti_qspi *qspi, struct spi_transfer *t)
1044 return 0;
1045 }
1046
1047 -static int qspi_transfer_msg(struct ti_qspi *qspi, struct spi_transfer *t)
1048 +static int qspi_transfer_msg(struct ti_qspi *qspi, struct spi_transfer *t,
1049 + int count)
1050 {
1051 int ret;
1052
1053 if (t->tx_buf) {
1054 - ret = qspi_write_msg(qspi, t);
1055 + ret = qspi_write_msg(qspi, t, count);
1056 if (ret) {
1057 dev_dbg(qspi->dev, "Error while writing\n");
1058 return ret;
1059 @@ -356,7 +358,7 @@ static int qspi_transfer_msg(struct ti_qspi *qspi, struct spi_transfer *t)
1060 }
1061
1062 if (t->rx_buf) {
1063 - ret = qspi_read_msg(qspi, t);
1064 + ret = qspi_read_msg(qspi, t, count);
1065 if (ret) {
1066 dev_dbg(qspi->dev, "Error while reading\n");
1067 return ret;
1068 @@ -373,7 +375,8 @@ static int ti_qspi_start_transfer_one(struct spi_master *master,
1069 struct spi_device *spi = m->spi;
1070 struct spi_transfer *t;
1071 int status = 0, ret;
1072 - int frame_length;
1073 + unsigned int frame_len_words, transfer_len_words;
1074 + int wlen;
1075
1076 /* setup device control reg */
1077 qspi->dc = 0;
1078 @@ -385,30 +388,38 @@ static int ti_qspi_start_transfer_one(struct spi_master *master,
1079 if (spi->mode & SPI_CS_HIGH)
1080 qspi->dc |= QSPI_CSPOL(spi->chip_select);
1081
1082 - frame_length = (m->frame_length << 3) / spi->bits_per_word;
1083 -
1084 - frame_length = clamp(frame_length, 0, QSPI_FRAME);
1085 + frame_len_words = 0;
1086 + list_for_each_entry(t, &m->transfers, transfer_list)
1087 + frame_len_words += t->len / (t->bits_per_word >> 3);
1088 + frame_len_words = min_t(unsigned int, frame_len_words, QSPI_FRAME);
1089
1090 /* setup command reg */
1091 qspi->cmd = 0;
1092 qspi->cmd |= QSPI_EN_CS(spi->chip_select);
1093 - qspi->cmd |= QSPI_FLEN(frame_length);
1094 + qspi->cmd |= QSPI_FLEN(frame_len_words);
1095
1096 ti_qspi_write(qspi, qspi->dc, QSPI_SPI_DC_REG);
1097
1098 mutex_lock(&qspi->list_lock);
1099
1100 list_for_each_entry(t, &m->transfers, transfer_list) {
1101 - qspi->cmd |= QSPI_WLEN(t->bits_per_word);
1102 + qspi->cmd = ((qspi->cmd & ~QSPI_WLEN_MASK) |
1103 + QSPI_WLEN(t->bits_per_word));
1104 +
1105 + wlen = t->bits_per_word >> 3;
1106 + transfer_len_words = min(t->len / wlen, frame_len_words);
1107
1108 - ret = qspi_transfer_msg(qspi, t);
1109 + ret = qspi_transfer_msg(qspi, t, transfer_len_words * wlen);
1110 if (ret) {
1111 dev_dbg(qspi->dev, "transfer message failed\n");
1112 mutex_unlock(&qspi->list_lock);
1113 return -EINVAL;
1114 }
1115
1116 - m->actual_length += t->len;
1117 + m->actual_length += transfer_len_words * wlen;
1118 + frame_len_words -= transfer_len_words;
1119 + if (frame_len_words == 0)
1120 + break;
1121 }
1122
1123 mutex_unlock(&qspi->list_lock);
1124 diff --git a/fs/isofs/rock.c b/fs/isofs/rock.c
1125 index 735d7522a3a9..204659a5f6db 100644
1126 --- a/fs/isofs/rock.c
1127 +++ b/fs/isofs/rock.c
1128 @@ -203,6 +203,8 @@ int get_rock_ridge_filename(struct iso_directory_record *de,
1129 int retnamlen = 0;
1130 int truncate = 0;
1131 int ret = 0;
1132 + char *p;
1133 + int len;
1134
1135 if (!ISOFS_SB(inode->i_sb)->s_rock)
1136 return 0;
1137 @@ -267,12 +269,17 @@ repeat:
1138 rr->u.NM.flags);
1139 break;
1140 }
1141 - if ((strlen(retname) + rr->len - 5) >= 254) {
1142 + len = rr->len - 5;
1143 + if (retnamlen + len >= 254) {
1144 truncate = 1;
1145 break;
1146 }
1147 - strncat(retname, rr->u.NM.name, rr->len - 5);
1148 - retnamlen += rr->len - 5;
1149 + p = memchr(rr->u.NM.name, '\0', len);
1150 + if (unlikely(p))
1151 + len = p - rr->u.NM.name;
1152 + memcpy(retname + retnamlen, rr->u.NM.name, len);
1153 + retnamlen += len;
1154 + retname[retnamlen] = '\0';
1155 break;
1156 case SIG('R', 'E'):
1157 kfree(rs.buffer);
1158 diff --git a/fs/namei.c b/fs/namei.c
1159 index d8ee4da93650..209ca7737cb2 100644
1160 --- a/fs/namei.c
1161 +++ b/fs/namei.c
1162 @@ -2906,22 +2906,10 @@ no_open:
1163 dentry = lookup_real(dir, dentry, nd->flags);
1164 if (IS_ERR(dentry))
1165 return PTR_ERR(dentry);
1166 -
1167 - if (create_error) {
1168 - int open_flag = op->open_flag;
1169 -
1170 - error = create_error;
1171 - if ((open_flag & O_EXCL)) {
1172 - if (!dentry->d_inode)
1173 - goto out;
1174 - } else if (!dentry->d_inode) {
1175 - goto out;
1176 - } else if ((open_flag & O_TRUNC) &&
1177 - d_is_reg(dentry)) {
1178 - goto out;
1179 - }
1180 - /* will fail later, go on to get the right error */
1181 - }
1182 + }
1183 + if (create_error && !dentry->d_inode) {
1184 + error = create_error;
1185 + goto out;
1186 }
1187 looked_up:
1188 path->dentry = dentry;
1189 @@ -4195,7 +4183,11 @@ int vfs_rename(struct inode *old_dir, struct dentry *old_dentry,
1190 bool new_is_dir = false;
1191 unsigned max_links = new_dir->i_sb->s_max_links;
1192
1193 - if (source == target)
1194 + /*
1195 + * Check source == target.
1196 + * On overlayfs need to look at underlying inodes.
1197 + */
1198 + if (vfs_select_inode(old_dentry, 0) == vfs_select_inode(new_dentry, 0))
1199 return 0;
1200
1201 error = may_delete(old_dir, old_dentry, is_dir);
1202 diff --git a/fs/ocfs2/acl.c b/fs/ocfs2/acl.c
1203 index 0cdf497c91ef..2162434728c0 100644
1204 --- a/fs/ocfs2/acl.c
1205 +++ b/fs/ocfs2/acl.c
1206 @@ -322,3 +322,90 @@ struct posix_acl *ocfs2_iop_get_acl(struct inode *inode, int type)
1207 brelse(di_bh);
1208 return acl;
1209 }
1210 +
1211 +int ocfs2_acl_chmod(struct inode *inode, struct buffer_head *bh)
1212 +{
1213 + struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
1214 + struct posix_acl *acl;
1215 + int ret;
1216 +
1217 + if (S_ISLNK(inode->i_mode))
1218 + return -EOPNOTSUPP;
1219 +
1220 + if (!(osb->s_mount_opt & OCFS2_MOUNT_POSIX_ACL))
1221 + return 0;
1222 +
1223 + acl = ocfs2_get_acl_nolock(inode, ACL_TYPE_ACCESS, bh);
1224 + if (IS_ERR(acl) || !acl)
1225 + return PTR_ERR(acl);
1226 + ret = __posix_acl_chmod(&acl, GFP_KERNEL, inode->i_mode);
1227 + if (ret)
1228 + return ret;
1229 + ret = ocfs2_set_acl(NULL, inode, NULL, ACL_TYPE_ACCESS,
1230 + acl, NULL, NULL);
1231 + posix_acl_release(acl);
1232 + return ret;
1233 +}
1234 +
1235 +/*
1236 + * Initialize the ACLs of a new inode. If parent directory has default ACL,
1237 + * then clone to new inode. Called from ocfs2_mknod.
1238 + */
1239 +int ocfs2_init_acl(handle_t *handle,
1240 + struct inode *inode,
1241 + struct inode *dir,
1242 + struct buffer_head *di_bh,
1243 + struct buffer_head *dir_bh,
1244 + struct ocfs2_alloc_context *meta_ac,
1245 + struct ocfs2_alloc_context *data_ac)
1246 +{
1247 + struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
1248 + struct posix_acl *acl = NULL;
1249 + int ret = 0, ret2;
1250 + umode_t mode;
1251 +
1252 + if (!S_ISLNK(inode->i_mode)) {
1253 + if (osb->s_mount_opt & OCFS2_MOUNT_POSIX_ACL) {
1254 + acl = ocfs2_get_acl_nolock(dir, ACL_TYPE_DEFAULT,
1255 + dir_bh);
1256 + if (IS_ERR(acl))
1257 + return PTR_ERR(acl);
1258 + }
1259 + if (!acl) {
1260 + mode = inode->i_mode & ~current_umask();
1261 + ret = ocfs2_acl_set_mode(inode, di_bh, handle, mode);
1262 + if (ret) {
1263 + mlog_errno(ret);
1264 + goto cleanup;
1265 + }
1266 + }
1267 + }
1268 + if ((osb->s_mount_opt & OCFS2_MOUNT_POSIX_ACL) && acl) {
1269 + if (S_ISDIR(inode->i_mode)) {
1270 + ret = ocfs2_set_acl(handle, inode, di_bh,
1271 + ACL_TYPE_DEFAULT, acl,
1272 + meta_ac, data_ac);
1273 + if (ret)
1274 + goto cleanup;
1275 + }
1276 + mode = inode->i_mode;
1277 + ret = __posix_acl_create(&acl, GFP_NOFS, &mode);
1278 + if (ret < 0)
1279 + return ret;
1280 +
1281 + ret2 = ocfs2_acl_set_mode(inode, di_bh, handle, mode);
1282 + if (ret2) {
1283 + mlog_errno(ret2);
1284 + ret = ret2;
1285 + goto cleanup;
1286 + }
1287 + if (ret > 0) {
1288 + ret = ocfs2_set_acl(handle, inode,
1289 + di_bh, ACL_TYPE_ACCESS,
1290 + acl, meta_ac, data_ac);
1291 + }
1292 + }
1293 +cleanup:
1294 + posix_acl_release(acl);
1295 + return ret;
1296 +}
1297 diff --git a/fs/ocfs2/acl.h b/fs/ocfs2/acl.h
1298 index 3fce68d08625..2783a75b3999 100644
1299 --- a/fs/ocfs2/acl.h
1300 +++ b/fs/ocfs2/acl.h
1301 @@ -35,5 +35,10 @@ int ocfs2_set_acl(handle_t *handle,
1302 struct posix_acl *acl,
1303 struct ocfs2_alloc_context *meta_ac,
1304 struct ocfs2_alloc_context *data_ac);
1305 +extern int ocfs2_acl_chmod(struct inode *, struct buffer_head *);
1306 +extern int ocfs2_init_acl(handle_t *, struct inode *, struct inode *,
1307 + struct buffer_head *, struct buffer_head *,
1308 + struct ocfs2_alloc_context *,
1309 + struct ocfs2_alloc_context *);
1310
1311 #endif /* OCFS2_ACL_H */
1312 diff --git a/fs/ocfs2/file.c b/fs/ocfs2/file.c
1313 index 0e5b4515f92e..77d30cbd944d 100644
1314 --- a/fs/ocfs2/file.c
1315 +++ b/fs/ocfs2/file.c
1316 @@ -1268,20 +1268,20 @@ bail_unlock_rw:
1317 if (size_change)
1318 ocfs2_rw_unlock(inode, 1);
1319 bail:
1320 - brelse(bh);
1321
1322 /* Release quota pointers in case we acquired them */
1323 for (qtype = 0; qtype < OCFS2_MAXQUOTAS; qtype++)
1324 dqput(transfer_to[qtype]);
1325
1326 if (!status && attr->ia_valid & ATTR_MODE) {
1327 - status = posix_acl_chmod(inode, inode->i_mode);
1328 + status = ocfs2_acl_chmod(inode, bh);
1329 if (status < 0)
1330 mlog_errno(status);
1331 }
1332 if (inode_locked)
1333 ocfs2_inode_unlock(inode, 1);
1334
1335 + brelse(bh);
1336 return status;
1337 }
1338
1339 diff --git a/fs/ocfs2/namei.c b/fs/ocfs2/namei.c
1340 index 3123408da935..62af9554541d 100644
1341 --- a/fs/ocfs2/namei.c
1342 +++ b/fs/ocfs2/namei.c
1343 @@ -259,7 +259,6 @@ static int ocfs2_mknod(struct inode *dir,
1344 struct ocfs2_dir_lookup_result lookup = { NULL, };
1345 sigset_t oldset;
1346 int did_block_signals = 0;
1347 - struct posix_acl *default_acl = NULL, *acl = NULL;
1348 struct ocfs2_dentry_lock *dl = NULL;
1349
1350 trace_ocfs2_mknod(dir, dentry, dentry->d_name.len, dentry->d_name.name,
1351 @@ -367,12 +366,6 @@ static int ocfs2_mknod(struct inode *dir,
1352 goto leave;
1353 }
1354
1355 - status = posix_acl_create(dir, &inode->i_mode, &default_acl, &acl);
1356 - if (status) {
1357 - mlog_errno(status);
1358 - goto leave;
1359 - }
1360 -
1361 handle = ocfs2_start_trans(osb, ocfs2_mknod_credits(osb->sb,
1362 S_ISDIR(mode),
1363 xattr_credits));
1364 @@ -421,16 +414,8 @@ static int ocfs2_mknod(struct inode *dir,
1365 inc_nlink(dir);
1366 }
1367
1368 - if (default_acl) {
1369 - status = ocfs2_set_acl(handle, inode, new_fe_bh,
1370 - ACL_TYPE_DEFAULT, default_acl,
1371 - meta_ac, data_ac);
1372 - }
1373 - if (!status && acl) {
1374 - status = ocfs2_set_acl(handle, inode, new_fe_bh,
1375 - ACL_TYPE_ACCESS, acl,
1376 - meta_ac, data_ac);
1377 - }
1378 + status = ocfs2_init_acl(handle, inode, dir, new_fe_bh, parent_fe_bh,
1379 + meta_ac, data_ac);
1380
1381 if (status < 0) {
1382 mlog_errno(status);
1383 @@ -472,10 +457,6 @@ static int ocfs2_mknod(struct inode *dir,
1384 d_instantiate(dentry, inode);
1385 status = 0;
1386 leave:
1387 - if (default_acl)
1388 - posix_acl_release(default_acl);
1389 - if (acl)
1390 - posix_acl_release(acl);
1391 if (status < 0 && did_quota_inode)
1392 dquot_free_inode(inode);
1393 if (handle)
1394 diff --git a/fs/ocfs2/refcounttree.c b/fs/ocfs2/refcounttree.c
1395 index 252119860e6c..6a0c55d7dff0 100644
1396 --- a/fs/ocfs2/refcounttree.c
1397 +++ b/fs/ocfs2/refcounttree.c
1398 @@ -4248,20 +4248,12 @@ static int ocfs2_reflink(struct dentry *old_dentry, struct inode *dir,
1399 struct inode *inode = d_inode(old_dentry);
1400 struct buffer_head *old_bh = NULL;
1401 struct inode *new_orphan_inode = NULL;
1402 - struct posix_acl *default_acl, *acl;
1403 - umode_t mode;
1404
1405 if (!ocfs2_refcount_tree(OCFS2_SB(inode->i_sb)))
1406 return -EOPNOTSUPP;
1407
1408 - mode = inode->i_mode;
1409 - error = posix_acl_create(dir, &mode, &default_acl, &acl);
1410 - if (error) {
1411 - mlog_errno(error);
1412 - return error;
1413 - }
1414
1415 - error = ocfs2_create_inode_in_orphan(dir, mode,
1416 + error = ocfs2_create_inode_in_orphan(dir, inode->i_mode,
1417 &new_orphan_inode);
1418 if (error) {
1419 mlog_errno(error);
1420 @@ -4300,16 +4292,11 @@ static int ocfs2_reflink(struct dentry *old_dentry, struct inode *dir,
1421 /* If the security isn't preserved, we need to re-initialize them. */
1422 if (!preserve) {
1423 error = ocfs2_init_security_and_acl(dir, new_orphan_inode,
1424 - &new_dentry->d_name,
1425 - default_acl, acl);
1426 + &new_dentry->d_name);
1427 if (error)
1428 mlog_errno(error);
1429 }
1430 out:
1431 - if (default_acl)
1432 - posix_acl_release(default_acl);
1433 - if (acl)
1434 - posix_acl_release(acl);
1435 if (!error) {
1436 error = ocfs2_mv_orphaned_inode_to_new(dir, new_orphan_inode,
1437 new_dentry);
1438 diff --git a/fs/ocfs2/xattr.c b/fs/ocfs2/xattr.c
1439 index e9164f09841b..877830b05e12 100644
1440 --- a/fs/ocfs2/xattr.c
1441 +++ b/fs/ocfs2/xattr.c
1442 @@ -7197,12 +7197,10 @@ out:
1443 */
1444 int ocfs2_init_security_and_acl(struct inode *dir,
1445 struct inode *inode,
1446 - const struct qstr *qstr,
1447 - struct posix_acl *default_acl,
1448 - struct posix_acl *acl)
1449 + const struct qstr *qstr)
1450 {
1451 - struct buffer_head *dir_bh = NULL;
1452 int ret = 0;
1453 + struct buffer_head *dir_bh = NULL;
1454
1455 ret = ocfs2_init_security_get(inode, dir, qstr, NULL);
1456 if (ret) {
1457 @@ -7215,11 +7213,9 @@ int ocfs2_init_security_and_acl(struct inode *dir,
1458 mlog_errno(ret);
1459 goto leave;
1460 }
1461 -
1462 - if (!ret && default_acl)
1463 - ret = ocfs2_iop_set_acl(inode, default_acl, ACL_TYPE_DEFAULT);
1464 - if (!ret && acl)
1465 - ret = ocfs2_iop_set_acl(inode, acl, ACL_TYPE_ACCESS);
1466 + ret = ocfs2_init_acl(NULL, inode, dir, NULL, dir_bh, NULL, NULL);
1467 + if (ret)
1468 + mlog_errno(ret);
1469
1470 ocfs2_inode_unlock(dir, 0);
1471 brelse(dir_bh);
1472 diff --git a/fs/ocfs2/xattr.h b/fs/ocfs2/xattr.h
1473 index f10d5b93c366..1633cc15ea1f 100644
1474 --- a/fs/ocfs2/xattr.h
1475 +++ b/fs/ocfs2/xattr.h
1476 @@ -94,7 +94,5 @@ int ocfs2_reflink_xattrs(struct inode *old_inode,
1477 bool preserve_security);
1478 int ocfs2_init_security_and_acl(struct inode *dir,
1479 struct inode *inode,
1480 - const struct qstr *qstr,
1481 - struct posix_acl *default_acl,
1482 - struct posix_acl *acl);
1483 + const struct qstr *qstr);
1484 #endif /* OCFS2_XATTR_H */
1485 diff --git a/fs/open.c b/fs/open.c
1486 index 6a24f988d253..157b9940dd73 100644
1487 --- a/fs/open.c
1488 +++ b/fs/open.c
1489 @@ -840,16 +840,12 @@ EXPORT_SYMBOL(file_path);
1490 int vfs_open(const struct path *path, struct file *file,
1491 const struct cred *cred)
1492 {
1493 - struct dentry *dentry = path->dentry;
1494 - struct inode *inode = dentry->d_inode;
1495 + struct inode *inode = vfs_select_inode(path->dentry, file->f_flags);
1496
1497 - file->f_path = *path;
1498 - if (dentry->d_flags & DCACHE_OP_SELECT_INODE) {
1499 - inode = dentry->d_op->d_select_inode(dentry, file->f_flags);
1500 - if (IS_ERR(inode))
1501 - return PTR_ERR(inode);
1502 - }
1503 + if (IS_ERR(inode))
1504 + return PTR_ERR(inode);
1505
1506 + file->f_path = *path;
1507 return do_dentry_open(file, inode, NULL, cred);
1508 }
1509
1510 diff --git a/include/linux/bpf.h b/include/linux/bpf.h
1511 index 83d1926c61e4..67bc2da5d233 100644
1512 --- a/include/linux/bpf.h
1513 +++ b/include/linux/bpf.h
1514 @@ -165,12 +165,13 @@ void bpf_register_prog_type(struct bpf_prog_type_list *tl);
1515 void bpf_register_map_type(struct bpf_map_type_list *tl);
1516
1517 struct bpf_prog *bpf_prog_get(u32 ufd);
1518 +struct bpf_prog *bpf_prog_inc(struct bpf_prog *prog);
1519 void bpf_prog_put(struct bpf_prog *prog);
1520 void bpf_prog_put_rcu(struct bpf_prog *prog);
1521
1522 struct bpf_map *bpf_map_get_with_uref(u32 ufd);
1523 struct bpf_map *__bpf_map_get(struct fd f);
1524 -void bpf_map_inc(struct bpf_map *map, bool uref);
1525 +struct bpf_map *bpf_map_inc(struct bpf_map *map, bool uref);
1526 void bpf_map_put_with_uref(struct bpf_map *map);
1527 void bpf_map_put(struct bpf_map *map);
1528
1529 diff --git a/include/linux/dcache.h b/include/linux/dcache.h
1530 index f513dd855cb2..d81746d3b2da 100644
1531 --- a/include/linux/dcache.h
1532 +++ b/include/linux/dcache.h
1533 @@ -592,4 +592,16 @@ static inline struct dentry *d_real(struct dentry *dentry)
1534 return dentry;
1535 }
1536
1537 +static inline struct inode *vfs_select_inode(struct dentry *dentry,
1538 + unsigned open_flags)
1539 +{
1540 + struct inode *inode = d_inode(dentry);
1541 +
1542 + if (inode && unlikely(dentry->d_flags & DCACHE_OP_SELECT_INODE))
1543 + inode = dentry->d_op->d_select_inode(dentry, open_flags);
1544 +
1545 + return inode;
1546 +}
1547 +
1548 +
1549 #endif /* __LINUX_DCACHE_H */
1550 diff --git a/include/linux/mfd/samsung/s2mps11.h b/include/linux/mfd/samsung/s2mps11.h
1551 index b288965e8101..2c14eeca46f0 100644
1552 --- a/include/linux/mfd/samsung/s2mps11.h
1553 +++ b/include/linux/mfd/samsung/s2mps11.h
1554 @@ -173,10 +173,12 @@ enum s2mps11_regulators {
1555
1556 #define S2MPS11_LDO_VSEL_MASK 0x3F
1557 #define S2MPS11_BUCK_VSEL_MASK 0xFF
1558 +#define S2MPS11_BUCK9_VSEL_MASK 0x1F
1559 #define S2MPS11_ENABLE_MASK (0x03 << S2MPS11_ENABLE_SHIFT)
1560 #define S2MPS11_ENABLE_SHIFT 0x06
1561 #define S2MPS11_LDO_N_VOLTAGES (S2MPS11_LDO_VSEL_MASK + 1)
1562 #define S2MPS11_BUCK_N_VOLTAGES (S2MPS11_BUCK_VSEL_MASK + 1)
1563 +#define S2MPS11_BUCK9_N_VOLTAGES (S2MPS11_BUCK9_VSEL_MASK + 1)
1564 #define S2MPS11_RAMP_DELAY 25000 /* uV/us */
1565
1566 #define S2MPS11_CTRL1_PWRHOLD_MASK BIT(4)
1567 diff --git a/include/linux/mlx5/driver.h b/include/linux/mlx5/driver.h
1568 index af3efd9157f0..412aa988c6ad 100644
1569 --- a/include/linux/mlx5/driver.h
1570 +++ b/include/linux/mlx5/driver.h
1571 @@ -792,9 +792,9 @@ int mlx5_set_port_admin_status(struct mlx5_core_dev *dev,
1572 int mlx5_query_port_admin_status(struct mlx5_core_dev *dev,
1573 enum mlx5_port_status *status);
1574
1575 -int mlx5_set_port_mtu(struct mlx5_core_dev *dev, int mtu, u8 port);
1576 -void mlx5_query_port_max_mtu(struct mlx5_core_dev *dev, int *max_mtu, u8 port);
1577 -void mlx5_query_port_oper_mtu(struct mlx5_core_dev *dev, int *oper_mtu,
1578 +int mlx5_set_port_mtu(struct mlx5_core_dev *dev, u16 mtu, u8 port);
1579 +void mlx5_query_port_max_mtu(struct mlx5_core_dev *dev, u16 *max_mtu, u8 port);
1580 +void mlx5_query_port_oper_mtu(struct mlx5_core_dev *dev, u16 *oper_mtu,
1581 u8 port);
1582
1583 int mlx5_query_port_vl_hw_cap(struct mlx5_core_dev *dev,
1584 diff --git a/include/linux/net.h b/include/linux/net.h
1585 index 0b4ac7da583a..25ef630f1bd6 100644
1586 --- a/include/linux/net.h
1587 +++ b/include/linux/net.h
1588 @@ -245,7 +245,15 @@ do { \
1589 net_ratelimited_function(pr_warn, fmt, ##__VA_ARGS__)
1590 #define net_info_ratelimited(fmt, ...) \
1591 net_ratelimited_function(pr_info, fmt, ##__VA_ARGS__)
1592 -#if defined(DEBUG)
1593 +#if defined(CONFIG_DYNAMIC_DEBUG)
1594 +#define net_dbg_ratelimited(fmt, ...) \
1595 +do { \
1596 + DEFINE_DYNAMIC_DEBUG_METADATA(descriptor, fmt); \
1597 + if (unlikely(descriptor.flags & _DPRINTK_FLAGS_PRINT) && \
1598 + net_ratelimit()) \
1599 + __dynamic_pr_debug(&descriptor, fmt, ##__VA_ARGS__); \
1600 +} while (0)
1601 +#elif defined(DEBUG)
1602 #define net_dbg_ratelimited(fmt, ...) \
1603 net_ratelimited_function(pr_debug, fmt, ##__VA_ARGS__)
1604 #else
1605 diff --git a/include/net/codel.h b/include/net/codel.h
1606 index 267e70210061..d168aca115cc 100644
1607 --- a/include/net/codel.h
1608 +++ b/include/net/codel.h
1609 @@ -162,12 +162,14 @@ struct codel_vars {
1610 * struct codel_stats - contains codel shared variables and stats
1611 * @maxpacket: largest packet we've seen so far
1612 * @drop_count: temp count of dropped packets in dequeue()
1613 + * @drop_len: bytes of dropped packets in dequeue()
1614 * ecn_mark: number of packets we ECN marked instead of dropping
1615 * ce_mark: number of packets CE marked because sojourn time was above ce_threshold
1616 */
1617 struct codel_stats {
1618 u32 maxpacket;
1619 u32 drop_count;
1620 + u32 drop_len;
1621 u32 ecn_mark;
1622 u32 ce_mark;
1623 };
1624 @@ -308,6 +310,7 @@ static struct sk_buff *codel_dequeue(struct Qdisc *sch,
1625 vars->rec_inv_sqrt);
1626 goto end;
1627 }
1628 + stats->drop_len += qdisc_pkt_len(skb);
1629 qdisc_drop(skb, sch);
1630 stats->drop_count++;
1631 skb = dequeue_func(vars, sch);
1632 @@ -330,6 +333,7 @@ static struct sk_buff *codel_dequeue(struct Qdisc *sch,
1633 if (params->ecn && INET_ECN_set_ce(skb)) {
1634 stats->ecn_mark++;
1635 } else {
1636 + stats->drop_len += qdisc_pkt_len(skb);
1637 qdisc_drop(skb, sch);
1638 stats->drop_count++;
1639
1640 diff --git a/include/net/sch_generic.h b/include/net/sch_generic.h
1641 index b2a8e6338576..86df0835f6b5 100644
1642 --- a/include/net/sch_generic.h
1643 +++ b/include/net/sch_generic.h
1644 @@ -396,7 +396,8 @@ struct Qdisc *dev_graft_qdisc(struct netdev_queue *dev_queue,
1645 struct Qdisc *qdisc);
1646 void qdisc_reset(struct Qdisc *qdisc);
1647 void qdisc_destroy(struct Qdisc *qdisc);
1648 -void qdisc_tree_decrease_qlen(struct Qdisc *qdisc, unsigned int n);
1649 +void qdisc_tree_reduce_backlog(struct Qdisc *qdisc, unsigned int n,
1650 + unsigned int len);
1651 struct Qdisc *qdisc_alloc(struct netdev_queue *dev_queue,
1652 const struct Qdisc_ops *ops);
1653 struct Qdisc *qdisc_create_dflt(struct netdev_queue *dev_queue,
1654 @@ -698,6 +699,23 @@ static inline void qdisc_reset_queue(struct Qdisc *sch)
1655 sch->qstats.backlog = 0;
1656 }
1657
1658 +static inline struct Qdisc *qdisc_replace(struct Qdisc *sch, struct Qdisc *new,
1659 + struct Qdisc **pold)
1660 +{
1661 + struct Qdisc *old;
1662 +
1663 + sch_tree_lock(sch);
1664 + old = *pold;
1665 + *pold = new;
1666 + if (old != NULL) {
1667 + qdisc_tree_reduce_backlog(old, old->q.qlen, old->qstats.backlog);
1668 + qdisc_reset(old);
1669 + }
1670 + sch_tree_unlock(sch);
1671 +
1672 + return old;
1673 +}
1674 +
1675 static inline unsigned int __qdisc_queue_drop(struct Qdisc *sch,
1676 struct sk_buff_head *list)
1677 {
1678 diff --git a/include/uapi/linux/if.h b/include/uapi/linux/if.h
1679 index 9cf2394f0bcf..752f5dc040a5 100644
1680 --- a/include/uapi/linux/if.h
1681 +++ b/include/uapi/linux/if.h
1682 @@ -19,14 +19,20 @@
1683 #ifndef _LINUX_IF_H
1684 #define _LINUX_IF_H
1685
1686 +#include <linux/libc-compat.h> /* for compatibility with glibc */
1687 #include <linux/types.h> /* for "__kernel_caddr_t" et al */
1688 #include <linux/socket.h> /* for "struct sockaddr" et al */
1689 #include <linux/compiler.h> /* for "__user" et al */
1690
1691 +#if __UAPI_DEF_IF_IFNAMSIZ
1692 #define IFNAMSIZ 16
1693 +#endif /* __UAPI_DEF_IF_IFNAMSIZ */
1694 #define IFALIASZ 256
1695 #include <linux/hdlc/ioctl.h>
1696
1697 +/* For glibc compatibility. An empty enum does not compile. */
1698 +#if __UAPI_DEF_IF_NET_DEVICE_FLAGS_LOWER_UP_DORMANT_ECHO != 0 && \
1699 + __UAPI_DEF_IF_NET_DEVICE_FLAGS != 0
1700 /**
1701 * enum net_device_flags - &struct net_device flags
1702 *
1703 @@ -68,6 +74,8 @@
1704 * @IFF_ECHO: echo sent packets. Volatile.
1705 */
1706 enum net_device_flags {
1707 +/* for compatibility with glibc net/if.h */
1708 +#if __UAPI_DEF_IF_NET_DEVICE_FLAGS
1709 IFF_UP = 1<<0, /* sysfs */
1710 IFF_BROADCAST = 1<<1, /* volatile */
1711 IFF_DEBUG = 1<<2, /* sysfs */
1712 @@ -84,11 +92,17 @@ enum net_device_flags {
1713 IFF_PORTSEL = 1<<13, /* sysfs */
1714 IFF_AUTOMEDIA = 1<<14, /* sysfs */
1715 IFF_DYNAMIC = 1<<15, /* sysfs */
1716 +#endif /* __UAPI_DEF_IF_NET_DEVICE_FLAGS */
1717 +#if __UAPI_DEF_IF_NET_DEVICE_FLAGS_LOWER_UP_DORMANT_ECHO
1718 IFF_LOWER_UP = 1<<16, /* volatile */
1719 IFF_DORMANT = 1<<17, /* volatile */
1720 IFF_ECHO = 1<<18, /* volatile */
1721 +#endif /* __UAPI_DEF_IF_NET_DEVICE_FLAGS_LOWER_UP_DORMANT_ECHO */
1722 };
1723 +#endif /* __UAPI_DEF_IF_NET_DEVICE_FLAGS_LOWER_UP_DORMANT_ECHO != 0 && __UAPI_DEF_IF_NET_DEVICE_FLAGS != 0 */
1724
1725 +/* for compatibility with glibc net/if.h */
1726 +#if __UAPI_DEF_IF_NET_DEVICE_FLAGS
1727 #define IFF_UP IFF_UP
1728 #define IFF_BROADCAST IFF_BROADCAST
1729 #define IFF_DEBUG IFF_DEBUG
1730 @@ -105,9 +119,13 @@ enum net_device_flags {
1731 #define IFF_PORTSEL IFF_PORTSEL
1732 #define IFF_AUTOMEDIA IFF_AUTOMEDIA
1733 #define IFF_DYNAMIC IFF_DYNAMIC
1734 +#endif /* __UAPI_DEF_IF_NET_DEVICE_FLAGS */
1735 +
1736 +#if __UAPI_DEF_IF_NET_DEVICE_FLAGS_LOWER_UP_DORMANT_ECHO
1737 #define IFF_LOWER_UP IFF_LOWER_UP
1738 #define IFF_DORMANT IFF_DORMANT
1739 #define IFF_ECHO IFF_ECHO
1740 +#endif /* __UAPI_DEF_IF_NET_DEVICE_FLAGS_LOWER_UP_DORMANT_ECHO */
1741
1742 #define IFF_VOLATILE (IFF_LOOPBACK|IFF_POINTOPOINT|IFF_BROADCAST|IFF_ECHO|\
1743 IFF_MASTER|IFF_SLAVE|IFF_RUNNING|IFF_LOWER_UP|IFF_DORMANT)
1744 @@ -166,6 +184,8 @@ enum {
1745 * being very small might be worth keeping for clean configuration.
1746 */
1747
1748 +/* for compatibility with glibc net/if.h */
1749 +#if __UAPI_DEF_IF_IFMAP
1750 struct ifmap {
1751 unsigned long mem_start;
1752 unsigned long mem_end;
1753 @@ -175,6 +195,7 @@ struct ifmap {
1754 unsigned char port;
1755 /* 3 bytes spare */
1756 };
1757 +#endif /* __UAPI_DEF_IF_IFMAP */
1758
1759 struct if_settings {
1760 unsigned int type; /* Type of physical device or protocol */
1761 @@ -200,6 +221,8 @@ struct if_settings {
1762 * remainder may be interface specific.
1763 */
1764
1765 +/* for compatibility with glibc net/if.h */
1766 +#if __UAPI_DEF_IF_IFREQ
1767 struct ifreq {
1768 #define IFHWADDRLEN 6
1769 union
1770 @@ -223,6 +246,7 @@ struct ifreq {
1771 struct if_settings ifru_settings;
1772 } ifr_ifru;
1773 };
1774 +#endif /* __UAPI_DEF_IF_IFREQ */
1775
1776 #define ifr_name ifr_ifrn.ifrn_name /* interface name */
1777 #define ifr_hwaddr ifr_ifru.ifru_hwaddr /* MAC address */
1778 @@ -249,6 +273,8 @@ struct ifreq {
1779 * must know all networks accessible).
1780 */
1781
1782 +/* for compatibility with glibc net/if.h */
1783 +#if __UAPI_DEF_IF_IFCONF
1784 struct ifconf {
1785 int ifc_len; /* size of buffer */
1786 union {
1787 @@ -256,6 +282,8 @@ struct ifconf {
1788 struct ifreq __user *ifcu_req;
1789 } ifc_ifcu;
1790 };
1791 +#endif /* __UAPI_DEF_IF_IFCONF */
1792 +
1793 #define ifc_buf ifc_ifcu.ifcu_buf /* buffer address */
1794 #define ifc_req ifc_ifcu.ifcu_req /* array of structures */
1795
1796 diff --git a/include/uapi/linux/libc-compat.h b/include/uapi/linux/libc-compat.h
1797 index 7d024ceb075d..d5e38c73377c 100644
1798 --- a/include/uapi/linux/libc-compat.h
1799 +++ b/include/uapi/linux/libc-compat.h
1800 @@ -51,6 +51,40 @@
1801 /* We have included glibc headers... */
1802 #if defined(__GLIBC__)
1803
1804 +/* Coordinate with glibc net/if.h header. */
1805 +#if defined(_NET_IF_H)
1806 +
1807 +/* GLIBC headers included first so don't define anything
1808 + * that would already be defined. */
1809 +
1810 +#define __UAPI_DEF_IF_IFCONF 0
1811 +#define __UAPI_DEF_IF_IFMAP 0
1812 +#define __UAPI_DEF_IF_IFNAMSIZ 0
1813 +#define __UAPI_DEF_IF_IFREQ 0
1814 +/* Everything up to IFF_DYNAMIC, matches net/if.h until glibc 2.23 */
1815 +#define __UAPI_DEF_IF_NET_DEVICE_FLAGS 0
1816 +/* For the future if glibc adds IFF_LOWER_UP, IFF_DORMANT and IFF_ECHO */
1817 +#ifndef __UAPI_DEF_IF_NET_DEVICE_FLAGS_LOWER_UP_DORMANT_ECHO
1818 +#define __UAPI_DEF_IF_NET_DEVICE_FLAGS_LOWER_UP_DORMANT_ECHO 1
1819 +#endif /* __UAPI_DEF_IF_NET_DEVICE_FLAGS_LOWER_UP_DORMANT_ECHO */
1820 +
1821 +#else /* _NET_IF_H */
1822 +
1823 +/* Linux headers included first, and we must define everything
1824 + * we need. The expectation is that glibc will check the
1825 + * __UAPI_DEF_* defines and adjust appropriately. */
1826 +
1827 +#define __UAPI_DEF_IF_IFCONF 1
1828 +#define __UAPI_DEF_IF_IFMAP 1
1829 +#define __UAPI_DEF_IF_IFNAMSIZ 1
1830 +#define __UAPI_DEF_IF_IFREQ 1
1831 +/* Everything up to IFF_DYNAMIC, matches net/if.h until glibc 2.23 */
1832 +#define __UAPI_DEF_IF_NET_DEVICE_FLAGS 1
1833 +/* For the future if glibc adds IFF_LOWER_UP, IFF_DORMANT and IFF_ECHO */
1834 +#define __UAPI_DEF_IF_NET_DEVICE_FLAGS_LOWER_UP_DORMANT_ECHO 1
1835 +
1836 +#endif /* _NET_IF_H */
1837 +
1838 /* Coordinate with glibc netinet/in.h header. */
1839 #if defined(_NETINET_IN_H)
1840
1841 @@ -117,6 +151,16 @@
1842 * that we need. */
1843 #else /* !defined(__GLIBC__) */
1844
1845 +/* Definitions for if.h */
1846 +#define __UAPI_DEF_IF_IFCONF 1
1847 +#define __UAPI_DEF_IF_IFMAP 1
1848 +#define __UAPI_DEF_IF_IFNAMSIZ 1
1849 +#define __UAPI_DEF_IF_IFREQ 1
1850 +/* Everything up to IFF_DYNAMIC, matches net/if.h until glibc 2.23 */
1851 +#define __UAPI_DEF_IF_NET_DEVICE_FLAGS 1
1852 +/* For the future if glibc adds IFF_LOWER_UP, IFF_DORMANT and IFF_ECHO */
1853 +#define __UAPI_DEF_IF_NET_DEVICE_FLAGS_LOWER_UP_DORMANT_ECHO 1
1854 +
1855 /* Definitions for in.h */
1856 #define __UAPI_DEF_IN_ADDR 1
1857 #define __UAPI_DEF_IN_IPPROTO 1
1858 diff --git a/kernel/bpf/inode.c b/kernel/bpf/inode.c
1859 index 5a8a797d50b7..d1a7646f79c5 100644
1860 --- a/kernel/bpf/inode.c
1861 +++ b/kernel/bpf/inode.c
1862 @@ -31,10 +31,10 @@ static void *bpf_any_get(void *raw, enum bpf_type type)
1863 {
1864 switch (type) {
1865 case BPF_TYPE_PROG:
1866 - atomic_inc(&((struct bpf_prog *)raw)->aux->refcnt);
1867 + raw = bpf_prog_inc(raw);
1868 break;
1869 case BPF_TYPE_MAP:
1870 - bpf_map_inc(raw, true);
1871 + raw = bpf_map_inc(raw, true);
1872 break;
1873 default:
1874 WARN_ON_ONCE(1);
1875 @@ -277,7 +277,8 @@ static void *bpf_obj_do_get(const struct filename *pathname,
1876 goto out;
1877
1878 raw = bpf_any_get(inode->i_private, *type);
1879 - touch_atime(&path);
1880 + if (!IS_ERR(raw))
1881 + touch_atime(&path);
1882
1883 path_put(&path);
1884 return raw;
1885 diff --git a/kernel/bpf/syscall.c b/kernel/bpf/syscall.c
1886 index 3b39550d8485..4e32cc94edd9 100644
1887 --- a/kernel/bpf/syscall.c
1888 +++ b/kernel/bpf/syscall.c
1889 @@ -181,11 +181,18 @@ struct bpf_map *__bpf_map_get(struct fd f)
1890 return f.file->private_data;
1891 }
1892
1893 -void bpf_map_inc(struct bpf_map *map, bool uref)
1894 +/* prog's and map's refcnt limit */
1895 +#define BPF_MAX_REFCNT 32768
1896 +
1897 +struct bpf_map *bpf_map_inc(struct bpf_map *map, bool uref)
1898 {
1899 - atomic_inc(&map->refcnt);
1900 + if (atomic_inc_return(&map->refcnt) > BPF_MAX_REFCNT) {
1901 + atomic_dec(&map->refcnt);
1902 + return ERR_PTR(-EBUSY);
1903 + }
1904 if (uref)
1905 atomic_inc(&map->usercnt);
1906 + return map;
1907 }
1908
1909 struct bpf_map *bpf_map_get_with_uref(u32 ufd)
1910 @@ -197,7 +204,7 @@ struct bpf_map *bpf_map_get_with_uref(u32 ufd)
1911 if (IS_ERR(map))
1912 return map;
1913
1914 - bpf_map_inc(map, true);
1915 + map = bpf_map_inc(map, true);
1916 fdput(f);
1917
1918 return map;
1919 @@ -580,6 +587,15 @@ static struct bpf_prog *__bpf_prog_get(struct fd f)
1920 return f.file->private_data;
1921 }
1922
1923 +struct bpf_prog *bpf_prog_inc(struct bpf_prog *prog)
1924 +{
1925 + if (atomic_inc_return(&prog->aux->refcnt) > BPF_MAX_REFCNT) {
1926 + atomic_dec(&prog->aux->refcnt);
1927 + return ERR_PTR(-EBUSY);
1928 + }
1929 + return prog;
1930 +}
1931 +
1932 /* called by sockets/tracing/seccomp before attaching program to an event
1933 * pairs with bpf_prog_put()
1934 */
1935 @@ -592,7 +608,7 @@ struct bpf_prog *bpf_prog_get(u32 ufd)
1936 if (IS_ERR(prog))
1937 return prog;
1938
1939 - atomic_inc(&prog->aux->refcnt);
1940 + prog = bpf_prog_inc(prog);
1941 fdput(f);
1942
1943 return prog;
1944 diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c
1945 index 2e7f7ab739e4..2cbfba78d3db 100644
1946 --- a/kernel/bpf/verifier.c
1947 +++ b/kernel/bpf/verifier.c
1948 @@ -239,15 +239,6 @@ static const char * const reg_type_str[] = {
1949 [CONST_IMM] = "imm",
1950 };
1951
1952 -static const struct {
1953 - int map_type;
1954 - int func_id;
1955 -} func_limit[] = {
1956 - {BPF_MAP_TYPE_PROG_ARRAY, BPF_FUNC_tail_call},
1957 - {BPF_MAP_TYPE_PERF_EVENT_ARRAY, BPF_FUNC_perf_event_read},
1958 - {BPF_MAP_TYPE_PERF_EVENT_ARRAY, BPF_FUNC_perf_event_output},
1959 -};
1960 -
1961 static void print_verifier_state(struct verifier_env *env)
1962 {
1963 enum bpf_reg_type t;
1964 @@ -898,24 +889,44 @@ static int check_func_arg(struct verifier_env *env, u32 regno,
1965
1966 static int check_map_func_compatibility(struct bpf_map *map, int func_id)
1967 {
1968 - bool bool_map, bool_func;
1969 - int i;
1970 -
1971 if (!map)
1972 return 0;
1973
1974 - for (i = 0; i < ARRAY_SIZE(func_limit); i++) {
1975 - bool_map = (map->map_type == func_limit[i].map_type);
1976 - bool_func = (func_id == func_limit[i].func_id);
1977 - /* only when map & func pair match it can continue.
1978 - * don't allow any other map type to be passed into
1979 - * the special func;
1980 - */
1981 - if (bool_func && bool_map != bool_func)
1982 - return -EINVAL;
1983 + /* We need a two way check, first is from map perspective ... */
1984 + switch (map->map_type) {
1985 + case BPF_MAP_TYPE_PROG_ARRAY:
1986 + if (func_id != BPF_FUNC_tail_call)
1987 + goto error;
1988 + break;
1989 + case BPF_MAP_TYPE_PERF_EVENT_ARRAY:
1990 + if (func_id != BPF_FUNC_perf_event_read &&
1991 + func_id != BPF_FUNC_perf_event_output)
1992 + goto error;
1993 + break;
1994 + default:
1995 + break;
1996 + }
1997 +
1998 + /* ... and second from the function itself. */
1999 + switch (func_id) {
2000 + case BPF_FUNC_tail_call:
2001 + if (map->map_type != BPF_MAP_TYPE_PROG_ARRAY)
2002 + goto error;
2003 + break;
2004 + case BPF_FUNC_perf_event_read:
2005 + case BPF_FUNC_perf_event_output:
2006 + if (map->map_type != BPF_MAP_TYPE_PERF_EVENT_ARRAY)
2007 + goto error;
2008 + break;
2009 + default:
2010 + break;
2011 }
2012
2013 return 0;
2014 +error:
2015 + verbose("cannot pass map_type %d into func %d\n",
2016 + map->map_type, func_id);
2017 + return -EINVAL;
2018 }
2019
2020 static int check_call(struct verifier_env *env, int func_id)
2021 @@ -1348,6 +1359,7 @@ static int check_ld_abs(struct verifier_env *env, struct bpf_insn *insn)
2022 }
2023
2024 if (insn->dst_reg != BPF_REG_0 || insn->off != 0 ||
2025 + BPF_SIZE(insn->code) == BPF_DW ||
2026 (mode == BPF_ABS && insn->src_reg != BPF_REG_0)) {
2027 verbose("BPF_LD_ABS uses reserved fields\n");
2028 return -EINVAL;
2029 @@ -2003,7 +2015,6 @@ static int replace_map_fd_with_map_ptr(struct verifier_env *env)
2030 if (IS_ERR(map)) {
2031 verbose("fd %d is not pointing to valid bpf_map\n",
2032 insn->imm);
2033 - fdput(f);
2034 return PTR_ERR(map);
2035 }
2036
2037 @@ -2023,15 +2034,18 @@ static int replace_map_fd_with_map_ptr(struct verifier_env *env)
2038 return -E2BIG;
2039 }
2040
2041 - /* remember this map */
2042 - env->used_maps[env->used_map_cnt++] = map;
2043 -
2044 /* hold the map. If the program is rejected by verifier,
2045 * the map will be released by release_maps() or it
2046 * will be used by the valid program until it's unloaded
2047 * and all maps are released in free_bpf_prog_info()
2048 */
2049 - bpf_map_inc(map, false);
2050 + map = bpf_map_inc(map, false);
2051 + if (IS_ERR(map)) {
2052 + fdput(f);
2053 + return PTR_ERR(map);
2054 + }
2055 + env->used_maps[env->used_map_cnt++] = map;
2056 +
2057 fdput(f);
2058 next_insn:
2059 insn++;
2060 diff --git a/kernel/events/ring_buffer.c b/kernel/events/ring_buffer.c
2061 index adfdc0536117..014b69528194 100644
2062 --- a/kernel/events/ring_buffer.c
2063 +++ b/kernel/events/ring_buffer.c
2064 @@ -347,6 +347,7 @@ void perf_aux_output_end(struct perf_output_handle *handle, unsigned long size,
2065 bool truncated)
2066 {
2067 struct ring_buffer *rb = handle->rb;
2068 + bool wakeup = truncated;
2069 unsigned long aux_head;
2070 u64 flags = 0;
2071
2072 @@ -375,9 +376,16 @@ void perf_aux_output_end(struct perf_output_handle *handle, unsigned long size,
2073 aux_head = rb->user_page->aux_head = local_read(&rb->aux_head);
2074
2075 if (aux_head - local_read(&rb->aux_wakeup) >= rb->aux_watermark) {
2076 - perf_output_wakeup(handle);
2077 + wakeup = true;
2078 local_add(rb->aux_watermark, &rb->aux_wakeup);
2079 }
2080 +
2081 + if (wakeup) {
2082 + if (truncated)
2083 + handle->event->pending_disable = 1;
2084 + perf_output_wakeup(handle);
2085 + }
2086 +
2087 handle->event = NULL;
2088
2089 local_set(&rb->aux_nest, 0);
2090 diff --git a/kernel/workqueue.c b/kernel/workqueue.c
2091 index 0ec05948a97b..2c2f971f3e75 100644
2092 --- a/kernel/workqueue.c
2093 +++ b/kernel/workqueue.c
2094 @@ -4457,6 +4457,17 @@ static void rebind_workers(struct worker_pool *pool)
2095 pool->attrs->cpumask) < 0);
2096
2097 spin_lock_irq(&pool->lock);
2098 +
2099 + /*
2100 + * XXX: CPU hotplug notifiers are weird and can call DOWN_FAILED
2101 + * w/o preceding DOWN_PREPARE. Work around it. CPU hotplug is
2102 + * being reworked and this can go away in time.
2103 + */
2104 + if (!(pool->flags & POOL_DISASSOCIATED)) {
2105 + spin_unlock_irq(&pool->lock);
2106 + return;
2107 + }
2108 +
2109 pool->flags &= ~POOL_DISASSOCIATED;
2110
2111 for_each_pool_worker(worker, pool) {
2112 diff --git a/mm/zsmalloc.c b/mm/zsmalloc.c
2113 index fc083996e40a..c1ea19478119 100644
2114 --- a/mm/zsmalloc.c
2115 +++ b/mm/zsmalloc.c
2116 @@ -1732,10 +1732,13 @@ static struct page *isolate_source_page(struct size_class *class)
2117 static unsigned long zs_can_compact(struct size_class *class)
2118 {
2119 unsigned long obj_wasted;
2120 + unsigned long obj_allocated = zs_stat_get(class, OBJ_ALLOCATED);
2121 + unsigned long obj_used = zs_stat_get(class, OBJ_USED);
2122
2123 - obj_wasted = zs_stat_get(class, OBJ_ALLOCATED) -
2124 - zs_stat_get(class, OBJ_USED);
2125 + if (obj_allocated <= obj_used)
2126 + return 0;
2127
2128 + obj_wasted = obj_allocated - obj_used;
2129 obj_wasted /= get_maxobj_per_zspage(class->size,
2130 class->pages_per_zspage);
2131
2132 diff --git a/net/bridge/br_ioctl.c b/net/bridge/br_ioctl.c
2133 index 263b4de4de57..60a3dbfca8a1 100644
2134 --- a/net/bridge/br_ioctl.c
2135 +++ b/net/bridge/br_ioctl.c
2136 @@ -21,18 +21,19 @@
2137 #include <asm/uaccess.h>
2138 #include "br_private.h"
2139
2140 -/* called with RTNL */
2141 static int get_bridge_ifindices(struct net *net, int *indices, int num)
2142 {
2143 struct net_device *dev;
2144 int i = 0;
2145
2146 - for_each_netdev(net, dev) {
2147 + rcu_read_lock();
2148 + for_each_netdev_rcu(net, dev) {
2149 if (i >= num)
2150 break;
2151 if (dev->priv_flags & IFF_EBRIDGE)
2152 indices[i++] = dev->ifindex;
2153 }
2154 + rcu_read_unlock();
2155
2156 return i;
2157 }
2158 diff --git a/net/bridge/br_multicast.c b/net/bridge/br_multicast.c
2159 index 03661d97463c..ea9893743a0f 100644
2160 --- a/net/bridge/br_multicast.c
2161 +++ b/net/bridge/br_multicast.c
2162 @@ -1270,6 +1270,7 @@ static int br_ip4_multicast_query(struct net_bridge *br,
2163 struct br_ip saddr;
2164 unsigned long max_delay;
2165 unsigned long now = jiffies;
2166 + unsigned int offset = skb_transport_offset(skb);
2167 __be32 group;
2168 int err = 0;
2169
2170 @@ -1280,14 +1281,14 @@ static int br_ip4_multicast_query(struct net_bridge *br,
2171
2172 group = ih->group;
2173
2174 - if (skb->len == sizeof(*ih)) {
2175 + if (skb->len == offset + sizeof(*ih)) {
2176 max_delay = ih->code * (HZ / IGMP_TIMER_SCALE);
2177
2178 if (!max_delay) {
2179 max_delay = 10 * HZ;
2180 group = 0;
2181 }
2182 - } else if (skb->len >= sizeof(*ih3)) {
2183 + } else if (skb->len >= offset + sizeof(*ih3)) {
2184 ih3 = igmpv3_query_hdr(skb);
2185 if (ih3->nsrcs)
2186 goto out;
2187 @@ -1348,6 +1349,7 @@ static int br_ip6_multicast_query(struct net_bridge *br,
2188 struct br_ip saddr;
2189 unsigned long max_delay;
2190 unsigned long now = jiffies;
2191 + unsigned int offset = skb_transport_offset(skb);
2192 const struct in6_addr *group = NULL;
2193 bool is_general_query;
2194 int err = 0;
2195 @@ -1357,8 +1359,8 @@ static int br_ip6_multicast_query(struct net_bridge *br,
2196 (port && port->state == BR_STATE_DISABLED))
2197 goto out;
2198
2199 - if (skb->len == sizeof(*mld)) {
2200 - if (!pskb_may_pull(skb, sizeof(*mld))) {
2201 + if (skb->len == offset + sizeof(*mld)) {
2202 + if (!pskb_may_pull(skb, offset + sizeof(*mld))) {
2203 err = -EINVAL;
2204 goto out;
2205 }
2206 @@ -1367,7 +1369,7 @@ static int br_ip6_multicast_query(struct net_bridge *br,
2207 if (max_delay)
2208 group = &mld->mld_mca;
2209 } else {
2210 - if (!pskb_may_pull(skb, sizeof(*mld2q))) {
2211 + if (!pskb_may_pull(skb, offset + sizeof(*mld2q))) {
2212 err = -EINVAL;
2213 goto out;
2214 }
2215 diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c
2216 index ca966f7de351..87b91ffbdec3 100644
2217 --- a/net/core/rtnetlink.c
2218 +++ b/net/core/rtnetlink.c
2219 @@ -1175,14 +1175,16 @@ static noinline_for_stack int rtnl_fill_vfinfo(struct sk_buff *skb,
2220
2221 static int rtnl_fill_link_ifmap(struct sk_buff *skb, struct net_device *dev)
2222 {
2223 - struct rtnl_link_ifmap map = {
2224 - .mem_start = dev->mem_start,
2225 - .mem_end = dev->mem_end,
2226 - .base_addr = dev->base_addr,
2227 - .irq = dev->irq,
2228 - .dma = dev->dma,
2229 - .port = dev->if_port,
2230 - };
2231 + struct rtnl_link_ifmap map;
2232 +
2233 + memset(&map, 0, sizeof(map));
2234 + map.mem_start = dev->mem_start;
2235 + map.mem_end = dev->mem_end;
2236 + map.base_addr = dev->base_addr;
2237 + map.irq = dev->irq;
2238 + map.dma = dev->dma;
2239 + map.port = dev->if_port;
2240 +
2241 if (nla_put(skb, IFLA_MAP, sizeof(map), &map))
2242 return -EMSGSIZE;
2243
2244 diff --git a/net/core/skbuff.c b/net/core/skbuff.c
2245 index 8616d1147c93..9835d9a8a7a4 100644
2246 --- a/net/core/skbuff.c
2247 +++ b/net/core/skbuff.c
2248 @@ -4427,15 +4427,16 @@ int skb_vlan_push(struct sk_buff *skb, __be16 vlan_proto, u16 vlan_tci)
2249 __skb_push(skb, offset);
2250 err = __vlan_insert_tag(skb, skb->vlan_proto,
2251 skb_vlan_tag_get(skb));
2252 - if (err)
2253 + if (err) {
2254 + __skb_pull(skb, offset);
2255 return err;
2256 + }
2257 +
2258 skb->protocol = skb->vlan_proto;
2259 skb->mac_len += VLAN_HLEN;
2260 - __skb_pull(skb, offset);
2261
2262 - if (skb->ip_summed == CHECKSUM_COMPLETE)
2263 - skb->csum = csum_add(skb->csum, csum_partial(skb->data
2264 - + (2 * ETH_ALEN), VLAN_HLEN, 0));
2265 + skb_postpush_rcsum(skb, skb->data + (2 * ETH_ALEN), VLAN_HLEN);
2266 + __skb_pull(skb, offset);
2267 }
2268 __vlan_hwaccel_put_tag(skb, vlan_proto, vlan_tci);
2269 return 0;
2270 diff --git a/net/decnet/dn_route.c b/net/decnet/dn_route.c
2271 index 607a14f20d88..b1dc096d22f8 100644
2272 --- a/net/decnet/dn_route.c
2273 +++ b/net/decnet/dn_route.c
2274 @@ -1034,10 +1034,13 @@ source_ok:
2275 if (!fld.daddr) {
2276 fld.daddr = fld.saddr;
2277
2278 - err = -EADDRNOTAVAIL;
2279 if (dev_out)
2280 dev_put(dev_out);
2281 + err = -EINVAL;
2282 dev_out = init_net.loopback_dev;
2283 + if (!dev_out->dn_ptr)
2284 + goto out;
2285 + err = -EADDRNOTAVAIL;
2286 dev_hold(dev_out);
2287 if (!fld.daddr) {
2288 fld.daddr =
2289 @@ -1110,6 +1113,8 @@ source_ok:
2290 if (dev_out == NULL)
2291 goto out;
2292 dn_db = rcu_dereference_raw(dev_out->dn_ptr);
2293 + if (!dn_db)
2294 + goto e_inval;
2295 /* Possible improvement - check all devices for local addr */
2296 if (dn_dev_islocal(dev_out, fld.daddr)) {
2297 dev_put(dev_out);
2298 @@ -1151,6 +1156,8 @@ select_source:
2299 dev_put(dev_out);
2300 dev_out = init_net.loopback_dev;
2301 dev_hold(dev_out);
2302 + if (!dev_out->dn_ptr)
2303 + goto e_inval;
2304 fld.flowidn_oif = dev_out->ifindex;
2305 if (res.fi)
2306 dn_fib_info_put(res.fi);
2307 diff --git a/net/ipv4/fib_frontend.c b/net/ipv4/fib_frontend.c
2308 index 8a9246deccfe..63566ec54794 100644
2309 --- a/net/ipv4/fib_frontend.c
2310 +++ b/net/ipv4/fib_frontend.c
2311 @@ -904,7 +904,11 @@ void fib_del_ifaddr(struct in_ifaddr *ifa, struct in_ifaddr *iprim)
2312 if (ifa->ifa_flags & IFA_F_SECONDARY) {
2313 prim = inet_ifa_byprefix(in_dev, any, ifa->ifa_mask);
2314 if (!prim) {
2315 - pr_warn("%s: bug: prim == NULL\n", __func__);
2316 + /* if the device has been deleted, we don't perform
2317 + * address promotion
2318 + */
2319 + if (!in_dev->dead)
2320 + pr_warn("%s: bug: prim == NULL\n", __func__);
2321 return;
2322 }
2323 if (iprim && iprim != prim) {
2324 diff --git a/net/ipv4/fib_semantics.c b/net/ipv4/fib_semantics.c
2325 index d97268e8ff10..2b68418c7198 100644
2326 --- a/net/ipv4/fib_semantics.c
2327 +++ b/net/ipv4/fib_semantics.c
2328 @@ -975,6 +975,8 @@ fib_convert_metrics(struct fib_info *fi, const struct fib_config *cfg)
2329 val = 65535 - 40;
2330 if (type == RTAX_MTU && val > 65535 - 15)
2331 val = 65535 - 15;
2332 + if (type == RTAX_HOPLIMIT && val > 255)
2333 + val = 255;
2334 if (type == RTAX_FEATURES && (val & ~RTAX_FEATURE_MASK))
2335 return -EINVAL;
2336 fi->fib_metrics[type - 1] = val;
2337 diff --git a/net/ipv4/ip_gre.c b/net/ipv4/ip_gre.c
2338 index 614521437e30..7dc962b89fa1 100644
2339 --- a/net/ipv4/ip_gre.c
2340 +++ b/net/ipv4/ip_gre.c
2341 @@ -180,6 +180,7 @@ static __be16 tnl_flags_to_gre_flags(__be16 tflags)
2342 return flags;
2343 }
2344
2345 +/* Fills in tpi and returns header length to be pulled. */
2346 static int parse_gre_header(struct sk_buff *skb, struct tnl_ptk_info *tpi,
2347 bool *csum_err)
2348 {
2349 @@ -239,7 +240,7 @@ static int parse_gre_header(struct sk_buff *skb, struct tnl_ptk_info *tpi,
2350 return -EINVAL;
2351 }
2352 }
2353 - return iptunnel_pull_header(skb, hdr_len, tpi->proto);
2354 + return hdr_len;
2355 }
2356
2357 static void ipgre_err(struct sk_buff *skb, u32 info,
2358 @@ -342,7 +343,7 @@ static void gre_err(struct sk_buff *skb, u32 info)
2359 struct tnl_ptk_info tpi;
2360 bool csum_err = false;
2361
2362 - if (parse_gre_header(skb, &tpi, &csum_err)) {
2363 + if (parse_gre_header(skb, &tpi, &csum_err) < 0) {
2364 if (!csum_err) /* ignore csum errors. */
2365 return;
2366 }
2367 @@ -420,6 +421,7 @@ static int gre_rcv(struct sk_buff *skb)
2368 {
2369 struct tnl_ptk_info tpi;
2370 bool csum_err = false;
2371 + int hdr_len;
2372
2373 #ifdef CONFIG_NET_IPGRE_BROADCAST
2374 if (ipv4_is_multicast(ip_hdr(skb)->daddr)) {
2375 @@ -429,7 +431,10 @@ static int gre_rcv(struct sk_buff *skb)
2376 }
2377 #endif
2378
2379 - if (parse_gre_header(skb, &tpi, &csum_err) < 0)
2380 + hdr_len = parse_gre_header(skb, &tpi, &csum_err);
2381 + if (hdr_len < 0)
2382 + goto drop;
2383 + if (iptunnel_pull_header(skb, hdr_len, tpi.proto) < 0)
2384 goto drop;
2385
2386 if (ipgre_rcv(skb, &tpi) == PACKET_RCVD)
2387 diff --git a/net/ipv4/route.c b/net/ipv4/route.c
2388 index 02c62299d717..b050cf980a57 100644
2389 --- a/net/ipv4/route.c
2390 +++ b/net/ipv4/route.c
2391 @@ -2045,6 +2045,18 @@ static struct rtable *__mkroute_output(const struct fib_result *res,
2392 */
2393 if (fi && res->prefixlen < 4)
2394 fi = NULL;
2395 + } else if ((type == RTN_LOCAL) && (orig_oif != 0) &&
2396 + (orig_oif != dev_out->ifindex)) {
2397 + /* For local routes that require a particular output interface
2398 + * we do not want to cache the result. Caching the result
2399 + * causes incorrect behaviour when there are multiple source
2400 + * addresses on the interface, the end result being that if the
2401 + * intended recipient is waiting on that interface for the
2402 + * packet he won't receive it because it will be delivered on
2403 + * the loopback interface and the IP_PKTINFO ipi_ifindex will
2404 + * be set to the loopback interface as well.
2405 + */
2406 + fi = NULL;
2407 }
2408
2409 fnhe = NULL;
2410 diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
2411 index 9bfc39ff2285..7c9883ab56e5 100644
2412 --- a/net/ipv4/tcp_output.c
2413 +++ b/net/ipv4/tcp_output.c
2414 @@ -2625,8 +2625,10 @@ int __tcp_retransmit_skb(struct sock *sk, struct sk_buff *skb)
2415 */
2416 if (unlikely((NET_IP_ALIGN && ((unsigned long)skb->data & 3)) ||
2417 skb_headroom(skb) >= 0xFFFF)) {
2418 - struct sk_buff *nskb = __pskb_copy(skb, MAX_TCP_HEADER,
2419 - GFP_ATOMIC);
2420 + struct sk_buff *nskb;
2421 +
2422 + skb_mstamp_get(&skb->skb_mstamp);
2423 + nskb = __pskb_copy(skb, MAX_TCP_HEADER, GFP_ATOMIC);
2424 err = nskb ? tcp_transmit_skb(sk, nskb, 0, GFP_ATOMIC) :
2425 -ENOBUFS;
2426 } else {
2427 diff --git a/net/ipv6/reassembly.c b/net/ipv6/reassembly.c
2428 index 45f5ae51de65..a234552a7e3d 100644
2429 --- a/net/ipv6/reassembly.c
2430 +++ b/net/ipv6/reassembly.c
2431 @@ -496,10 +496,8 @@ static int ip6_frag_reasm(struct frag_queue *fq, struct sk_buff *prev,
2432 IP6CB(head)->flags |= IP6SKB_FRAGMENTED;
2433
2434 /* Yes, and fold redundant checksum back. 8) */
2435 - if (head->ip_summed == CHECKSUM_COMPLETE)
2436 - head->csum = csum_partial(skb_network_header(head),
2437 - skb_network_header_len(head),
2438 - head->csum);
2439 + skb_postpush_rcsum(head, skb_network_header(head),
2440 + skb_network_header_len(head));
2441
2442 rcu_read_lock();
2443 IP6_INC_STATS_BH(net, __in6_dev_get(dev), IPSTATS_MIB_REASMOKS);
2444 diff --git a/net/ipv6/route.c b/net/ipv6/route.c
2445 index 3f164d3aaee2..5af2cca0a46d 100644
2446 --- a/net/ipv6/route.c
2447 +++ b/net/ipv6/route.c
2448 @@ -1727,6 +1727,8 @@ static int ip6_convert_metrics(struct mx6_config *mxc,
2449 } else {
2450 val = nla_get_u32(nla);
2451 }
2452 + if (type == RTAX_HOPLIMIT && val > 255)
2453 + val = 255;
2454 if (type == RTAX_FEATURES && (val & ~RTAX_FEATURE_MASK))
2455 goto err;
2456
2457 diff --git a/net/llc/af_llc.c b/net/llc/af_llc.c
2458 index 8dab4e569571..bb8edb9ef506 100644
2459 --- a/net/llc/af_llc.c
2460 +++ b/net/llc/af_llc.c
2461 @@ -626,6 +626,7 @@ static void llc_cmsg_rcv(struct msghdr *msg, struct sk_buff *skb)
2462 if (llc->cmsg_flags & LLC_CMSG_PKTINFO) {
2463 struct llc_pktinfo info;
2464
2465 + memset(&info, 0, sizeof(info));
2466 info.lpi_ifindex = llc_sk(skb->sk)->dev->ifindex;
2467 llc_pdu_decode_dsap(skb, &info.lpi_sap);
2468 llc_pdu_decode_da(skb, info.lpi_mac);
2469 diff --git a/net/netfilter/nf_conntrack_core.c b/net/netfilter/nf_conntrack_core.c
2470 index 3cb3cb831591..86a3c6f0c871 100644
2471 --- a/net/netfilter/nf_conntrack_core.c
2472 +++ b/net/netfilter/nf_conntrack_core.c
2473 @@ -1757,6 +1757,7 @@ void nf_conntrack_init_end(void)
2474
2475 int nf_conntrack_init_net(struct net *net)
2476 {
2477 + static atomic64_t unique_id;
2478 int ret = -ENOMEM;
2479 int cpu;
2480
2481 @@ -1779,7 +1780,8 @@ int nf_conntrack_init_net(struct net *net)
2482 if (!net->ct.stat)
2483 goto err_pcpu_lists;
2484
2485 - net->ct.slabname = kasprintf(GFP_KERNEL, "nf_conntrack_%p", net);
2486 + net->ct.slabname = kasprintf(GFP_KERNEL, "nf_conntrack_%llu",
2487 + (u64)atomic64_inc_return(&unique_id));
2488 if (!net->ct.slabname)
2489 goto err_slabname;
2490
2491 diff --git a/net/openvswitch/actions.c b/net/openvswitch/actions.c
2492 index c88d0f2d3e01..7cb8184ac165 100644
2493 --- a/net/openvswitch/actions.c
2494 +++ b/net/openvswitch/actions.c
2495 @@ -158,9 +158,7 @@ static int push_mpls(struct sk_buff *skb, struct sw_flow_key *key,
2496 new_mpls_lse = (__be32 *)skb_mpls_header(skb);
2497 *new_mpls_lse = mpls->mpls_lse;
2498
2499 - if (skb->ip_summed == CHECKSUM_COMPLETE)
2500 - skb->csum = csum_add(skb->csum, csum_partial(new_mpls_lse,
2501 - MPLS_HLEN, 0));
2502 + skb_postpush_rcsum(skb, new_mpls_lse, MPLS_HLEN);
2503
2504 hdr = eth_hdr(skb);
2505 hdr->h_proto = mpls->mpls_ethertype;
2506 @@ -280,7 +278,7 @@ static int set_eth_addr(struct sk_buff *skb, struct sw_flow_key *flow_key,
2507 ether_addr_copy_masked(eth_hdr(skb)->h_dest, key->eth_dst,
2508 mask->eth_dst);
2509
2510 - ovs_skb_postpush_rcsum(skb, eth_hdr(skb), ETH_ALEN * 2);
2511 + skb_postpush_rcsum(skb, eth_hdr(skb), ETH_ALEN * 2);
2512
2513 ether_addr_copy(flow_key->eth.src, eth_hdr(skb)->h_source);
2514 ether_addr_copy(flow_key->eth.dst, eth_hdr(skb)->h_dest);
2515 @@ -463,7 +461,7 @@ static int set_ipv6(struct sk_buff *skb, struct sw_flow_key *flow_key,
2516 mask_ipv6_addr(saddr, key->ipv6_src, mask->ipv6_src, masked);
2517
2518 if (unlikely(memcmp(saddr, masked, sizeof(masked)))) {
2519 - set_ipv6_addr(skb, key->ipv6_proto, saddr, masked,
2520 + set_ipv6_addr(skb, flow_key->ip.proto, saddr, masked,
2521 true);
2522 memcpy(&flow_key->ipv6.addr.src, masked,
2523 sizeof(flow_key->ipv6.addr.src));
2524 @@ -485,7 +483,7 @@ static int set_ipv6(struct sk_buff *skb, struct sw_flow_key *flow_key,
2525 NULL, &flags)
2526 != NEXTHDR_ROUTING);
2527
2528 - set_ipv6_addr(skb, key->ipv6_proto, daddr, masked,
2529 + set_ipv6_addr(skb, flow_key->ip.proto, daddr, masked,
2530 recalc_csum);
2531 memcpy(&flow_key->ipv6.addr.dst, masked,
2532 sizeof(flow_key->ipv6.addr.dst));
2533 @@ -639,7 +637,7 @@ static int ovs_vport_output(struct net *net, struct sock *sk, struct sk_buff *sk
2534 /* Reconstruct the MAC header. */
2535 skb_push(skb, data->l2_len);
2536 memcpy(skb->data, &data->l2_data, data->l2_len);
2537 - ovs_skb_postpush_rcsum(skb, skb->data, data->l2_len);
2538 + skb_postpush_rcsum(skb, skb->data, data->l2_len);
2539 skb_reset_mac_header(skb);
2540
2541 ovs_vport_send(vport, skb);
2542 diff --git a/net/openvswitch/vport-netdev.c b/net/openvswitch/vport-netdev.c
2543 index 6b0190b987ec..76fcaf1fd2a9 100644
2544 --- a/net/openvswitch/vport-netdev.c
2545 +++ b/net/openvswitch/vport-netdev.c
2546 @@ -58,7 +58,7 @@ static void netdev_port_receive(struct sk_buff *skb)
2547 return;
2548
2549 skb_push(skb, ETH_HLEN);
2550 - ovs_skb_postpush_rcsum(skb, skb->data, ETH_HLEN);
2551 + skb_postpush_rcsum(skb, skb->data, ETH_HLEN);
2552 ovs_vport_receive(vport, skb, skb_tunnel_info(skb));
2553 return;
2554 error:
2555 diff --git a/net/openvswitch/vport.h b/net/openvswitch/vport.h
2556 index 8ea3a96980ac..6e2b62f9d595 100644
2557 --- a/net/openvswitch/vport.h
2558 +++ b/net/openvswitch/vport.h
2559 @@ -184,13 +184,6 @@ static inline struct vport *vport_from_priv(void *priv)
2560 int ovs_vport_receive(struct vport *, struct sk_buff *,
2561 const struct ip_tunnel_info *);
2562
2563 -static inline void ovs_skb_postpush_rcsum(struct sk_buff *skb,
2564 - const void *start, unsigned int len)
2565 -{
2566 - if (skb->ip_summed == CHECKSUM_COMPLETE)
2567 - skb->csum = csum_add(skb->csum, csum_partial(start, len, 0));
2568 -}
2569 -
2570 static inline const char *ovs_vport_name(struct vport *vport)
2571 {
2572 return vport->dev->name;
2573 diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
2574 index da1ae0e13cb5..9cc7b512b472 100644
2575 --- a/net/packet/af_packet.c
2576 +++ b/net/packet/af_packet.c
2577 @@ -3436,6 +3436,7 @@ static int packet_mc_add(struct sock *sk, struct packet_mreq_max *mreq)
2578 i->ifindex = mreq->mr_ifindex;
2579 i->alen = mreq->mr_alen;
2580 memcpy(i->addr, mreq->mr_address, i->alen);
2581 + memset(i->addr + i->alen, 0, sizeof(i->addr) - i->alen);
2582 i->count = 1;
2583 i->next = po->mclist;
2584 po->mclist = i;
2585 diff --git a/net/sched/sch_api.c b/net/sched/sch_api.c
2586 index af1acf009866..95b560f0b253 100644
2587 --- a/net/sched/sch_api.c
2588 +++ b/net/sched/sch_api.c
2589 @@ -744,14 +744,15 @@ static u32 qdisc_alloc_handle(struct net_device *dev)
2590 return 0;
2591 }
2592
2593 -void qdisc_tree_decrease_qlen(struct Qdisc *sch, unsigned int n)
2594 +void qdisc_tree_reduce_backlog(struct Qdisc *sch, unsigned int n,
2595 + unsigned int len)
2596 {
2597 const struct Qdisc_class_ops *cops;
2598 unsigned long cl;
2599 u32 parentid;
2600 int drops;
2601
2602 - if (n == 0)
2603 + if (n == 0 && len == 0)
2604 return;
2605 drops = max_t(int, n, 0);
2606 rcu_read_lock();
2607 @@ -774,11 +775,12 @@ void qdisc_tree_decrease_qlen(struct Qdisc *sch, unsigned int n)
2608 cops->put(sch, cl);
2609 }
2610 sch->q.qlen -= n;
2611 + sch->qstats.backlog -= len;
2612 __qdisc_qstats_drop(sch, drops);
2613 }
2614 rcu_read_unlock();
2615 }
2616 -EXPORT_SYMBOL(qdisc_tree_decrease_qlen);
2617 +EXPORT_SYMBOL(qdisc_tree_reduce_backlog);
2618
2619 static void notify_and_destroy(struct net *net, struct sk_buff *skb,
2620 struct nlmsghdr *n, u32 clid,
2621 diff --git a/net/sched/sch_cbq.c b/net/sched/sch_cbq.c
2622 index c538d9e4a8f6..baafddf229ce 100644
2623 --- a/net/sched/sch_cbq.c
2624 +++ b/net/sched/sch_cbq.c
2625 @@ -1624,13 +1624,8 @@ static int cbq_graft(struct Qdisc *sch, unsigned long arg, struct Qdisc *new,
2626 new->reshape_fail = cbq_reshape_fail;
2627 #endif
2628 }
2629 - sch_tree_lock(sch);
2630 - *old = cl->q;
2631 - cl->q = new;
2632 - qdisc_tree_decrease_qlen(*old, (*old)->q.qlen);
2633 - qdisc_reset(*old);
2634 - sch_tree_unlock(sch);
2635
2636 + *old = qdisc_replace(sch, new, &cl->q);
2637 return 0;
2638 }
2639
2640 @@ -1914,7 +1909,7 @@ static int cbq_delete(struct Qdisc *sch, unsigned long arg)
2641 {
2642 struct cbq_sched_data *q = qdisc_priv(sch);
2643 struct cbq_class *cl = (struct cbq_class *)arg;
2644 - unsigned int qlen;
2645 + unsigned int qlen, backlog;
2646
2647 if (cl->filters || cl->children || cl == &q->link)
2648 return -EBUSY;
2649 @@ -1922,8 +1917,9 @@ static int cbq_delete(struct Qdisc *sch, unsigned long arg)
2650 sch_tree_lock(sch);
2651
2652 qlen = cl->q->q.qlen;
2653 + backlog = cl->q->qstats.backlog;
2654 qdisc_reset(cl->q);
2655 - qdisc_tree_decrease_qlen(cl->q, qlen);
2656 + qdisc_tree_reduce_backlog(cl->q, qlen, backlog);
2657
2658 if (cl->next_alive)
2659 cbq_deactivate_class(cl);
2660 diff --git a/net/sched/sch_choke.c b/net/sched/sch_choke.c
2661 index 5ffb8b8337c7..0a08c860eee4 100644
2662 --- a/net/sched/sch_choke.c
2663 +++ b/net/sched/sch_choke.c
2664 @@ -128,8 +128,8 @@ static void choke_drop_by_idx(struct Qdisc *sch, unsigned int idx)
2665 choke_zap_tail_holes(q);
2666
2667 qdisc_qstats_backlog_dec(sch, skb);
2668 + qdisc_tree_reduce_backlog(sch, 1, qdisc_pkt_len(skb));
2669 qdisc_drop(skb, sch);
2670 - qdisc_tree_decrease_qlen(sch, 1);
2671 --sch->q.qlen;
2672 }
2673
2674 @@ -456,6 +456,7 @@ static int choke_change(struct Qdisc *sch, struct nlattr *opt)
2675 old = q->tab;
2676 if (old) {
2677 unsigned int oqlen = sch->q.qlen, tail = 0;
2678 + unsigned dropped = 0;
2679
2680 while (q->head != q->tail) {
2681 struct sk_buff *skb = q->tab[q->head];
2682 @@ -467,11 +468,12 @@ static int choke_change(struct Qdisc *sch, struct nlattr *opt)
2683 ntab[tail++] = skb;
2684 continue;
2685 }
2686 + dropped += qdisc_pkt_len(skb);
2687 qdisc_qstats_backlog_dec(sch, skb);
2688 --sch->q.qlen;
2689 qdisc_drop(skb, sch);
2690 }
2691 - qdisc_tree_decrease_qlen(sch, oqlen - sch->q.qlen);
2692 + qdisc_tree_reduce_backlog(sch, oqlen - sch->q.qlen, dropped);
2693 q->head = 0;
2694 q->tail = tail;
2695 }
2696 diff --git a/net/sched/sch_codel.c b/net/sched/sch_codel.c
2697 index 535007d5f0b5..9b7e2980ee5c 100644
2698 --- a/net/sched/sch_codel.c
2699 +++ b/net/sched/sch_codel.c
2700 @@ -79,12 +79,13 @@ static struct sk_buff *codel_qdisc_dequeue(struct Qdisc *sch)
2701
2702 skb = codel_dequeue(sch, &q->params, &q->vars, &q->stats, dequeue);
2703
2704 - /* We cant call qdisc_tree_decrease_qlen() if our qlen is 0,
2705 + /* We cant call qdisc_tree_reduce_backlog() if our qlen is 0,
2706 * or HTB crashes. Defer it for next round.
2707 */
2708 if (q->stats.drop_count && sch->q.qlen) {
2709 - qdisc_tree_decrease_qlen(sch, q->stats.drop_count);
2710 + qdisc_tree_reduce_backlog(sch, q->stats.drop_count, q->stats.drop_len);
2711 q->stats.drop_count = 0;
2712 + q->stats.drop_len = 0;
2713 }
2714 if (skb)
2715 qdisc_bstats_update(sch, skb);
2716 @@ -116,7 +117,7 @@ static int codel_change(struct Qdisc *sch, struct nlattr *opt)
2717 {
2718 struct codel_sched_data *q = qdisc_priv(sch);
2719 struct nlattr *tb[TCA_CODEL_MAX + 1];
2720 - unsigned int qlen;
2721 + unsigned int qlen, dropped = 0;
2722 int err;
2723
2724 if (!opt)
2725 @@ -156,10 +157,11 @@ static int codel_change(struct Qdisc *sch, struct nlattr *opt)
2726 while (sch->q.qlen > sch->limit) {
2727 struct sk_buff *skb = __skb_dequeue(&sch->q);
2728
2729 + dropped += qdisc_pkt_len(skb);
2730 qdisc_qstats_backlog_dec(sch, skb);
2731 qdisc_drop(skb, sch);
2732 }
2733 - qdisc_tree_decrease_qlen(sch, qlen - sch->q.qlen);
2734 + qdisc_tree_reduce_backlog(sch, qlen - sch->q.qlen, dropped);
2735
2736 sch_tree_unlock(sch);
2737 return 0;
2738 diff --git a/net/sched/sch_drr.c b/net/sched/sch_drr.c
2739 index f26bdea875c1..d6e3ad43cecb 100644
2740 --- a/net/sched/sch_drr.c
2741 +++ b/net/sched/sch_drr.c
2742 @@ -53,9 +53,10 @@ static struct drr_class *drr_find_class(struct Qdisc *sch, u32 classid)
2743 static void drr_purge_queue(struct drr_class *cl)
2744 {
2745 unsigned int len = cl->qdisc->q.qlen;
2746 + unsigned int backlog = cl->qdisc->qstats.backlog;
2747
2748 qdisc_reset(cl->qdisc);
2749 - qdisc_tree_decrease_qlen(cl->qdisc, len);
2750 + qdisc_tree_reduce_backlog(cl->qdisc, len, backlog);
2751 }
2752
2753 static const struct nla_policy drr_policy[TCA_DRR_MAX + 1] = {
2754 @@ -226,11 +227,7 @@ static int drr_graft_class(struct Qdisc *sch, unsigned long arg,
2755 new = &noop_qdisc;
2756 }
2757
2758 - sch_tree_lock(sch);
2759 - drr_purge_queue(cl);
2760 - *old = cl->qdisc;
2761 - cl->qdisc = new;
2762 - sch_tree_unlock(sch);
2763 + *old = qdisc_replace(sch, new, &cl->qdisc);
2764 return 0;
2765 }
2766
2767 diff --git a/net/sched/sch_dsmark.c b/net/sched/sch_dsmark.c
2768 index f357f34d02d2..d0dff0cd8186 100644
2769 --- a/net/sched/sch_dsmark.c
2770 +++ b/net/sched/sch_dsmark.c
2771 @@ -73,13 +73,7 @@ static int dsmark_graft(struct Qdisc *sch, unsigned long arg,
2772 new = &noop_qdisc;
2773 }
2774
2775 - sch_tree_lock(sch);
2776 - *old = p->q;
2777 - p->q = new;
2778 - qdisc_tree_decrease_qlen(*old, (*old)->q.qlen);
2779 - qdisc_reset(*old);
2780 - sch_tree_unlock(sch);
2781 -
2782 + *old = qdisc_replace(sch, new, &p->q);
2783 return 0;
2784 }
2785
2786 @@ -264,6 +258,7 @@ static int dsmark_enqueue(struct sk_buff *skb, struct Qdisc *sch)
2787 return err;
2788 }
2789
2790 + qdisc_qstats_backlog_inc(sch, skb);
2791 sch->q.qlen++;
2792
2793 return NET_XMIT_SUCCESS;
2794 @@ -286,6 +281,7 @@ static struct sk_buff *dsmark_dequeue(struct Qdisc *sch)
2795 return NULL;
2796
2797 qdisc_bstats_update(sch, skb);
2798 + qdisc_qstats_backlog_dec(sch, skb);
2799 sch->q.qlen--;
2800
2801 index = skb->tc_index & (p->indices - 1);
2802 @@ -401,6 +397,7 @@ static void dsmark_reset(struct Qdisc *sch)
2803
2804 pr_debug("%s(sch %p,[qdisc %p])\n", __func__, sch, p);
2805 qdisc_reset(p->q);
2806 + sch->qstats.backlog = 0;
2807 sch->q.qlen = 0;
2808 }
2809
2810 diff --git a/net/sched/sch_fq.c b/net/sched/sch_fq.c
2811 index 109b2322778f..3c6a47d66a04 100644
2812 --- a/net/sched/sch_fq.c
2813 +++ b/net/sched/sch_fq.c
2814 @@ -662,6 +662,7 @@ static int fq_change(struct Qdisc *sch, struct nlattr *opt)
2815 struct fq_sched_data *q = qdisc_priv(sch);
2816 struct nlattr *tb[TCA_FQ_MAX + 1];
2817 int err, drop_count = 0;
2818 + unsigned drop_len = 0;
2819 u32 fq_log;
2820
2821 if (!opt)
2822 @@ -736,10 +737,11 @@ static int fq_change(struct Qdisc *sch, struct nlattr *opt)
2823
2824 if (!skb)
2825 break;
2826 + drop_len += qdisc_pkt_len(skb);
2827 kfree_skb(skb);
2828 drop_count++;
2829 }
2830 - qdisc_tree_decrease_qlen(sch, drop_count);
2831 + qdisc_tree_reduce_backlog(sch, drop_count, drop_len);
2832
2833 sch_tree_unlock(sch);
2834 return err;
2835 diff --git a/net/sched/sch_fq_codel.c b/net/sched/sch_fq_codel.c
2836 index 4c834e93dafb..d3fc8f9dd3d4 100644
2837 --- a/net/sched/sch_fq_codel.c
2838 +++ b/net/sched/sch_fq_codel.c
2839 @@ -175,7 +175,7 @@ static unsigned int fq_codel_qdisc_drop(struct Qdisc *sch)
2840 static int fq_codel_enqueue(struct sk_buff *skb, struct Qdisc *sch)
2841 {
2842 struct fq_codel_sched_data *q = qdisc_priv(sch);
2843 - unsigned int idx;
2844 + unsigned int idx, prev_backlog;
2845 struct fq_codel_flow *flow;
2846 int uninitialized_var(ret);
2847
2848 @@ -203,6 +203,7 @@ static int fq_codel_enqueue(struct sk_buff *skb, struct Qdisc *sch)
2849 if (++sch->q.qlen <= sch->limit)
2850 return NET_XMIT_SUCCESS;
2851
2852 + prev_backlog = sch->qstats.backlog;
2853 q->drop_overlimit++;
2854 /* Return Congestion Notification only if we dropped a packet
2855 * from this flow.
2856 @@ -211,7 +212,7 @@ static int fq_codel_enqueue(struct sk_buff *skb, struct Qdisc *sch)
2857 return NET_XMIT_CN;
2858
2859 /* As we dropped a packet, better let upper stack know this */
2860 - qdisc_tree_decrease_qlen(sch, 1);
2861 + qdisc_tree_reduce_backlog(sch, 1, prev_backlog - sch->qstats.backlog);
2862 return NET_XMIT_SUCCESS;
2863 }
2864
2865 @@ -241,6 +242,7 @@ static struct sk_buff *fq_codel_dequeue(struct Qdisc *sch)
2866 struct fq_codel_flow *flow;
2867 struct list_head *head;
2868 u32 prev_drop_count, prev_ecn_mark;
2869 + unsigned int prev_backlog;
2870
2871 begin:
2872 head = &q->new_flows;
2873 @@ -259,6 +261,7 @@ begin:
2874
2875 prev_drop_count = q->cstats.drop_count;
2876 prev_ecn_mark = q->cstats.ecn_mark;
2877 + prev_backlog = sch->qstats.backlog;
2878
2879 skb = codel_dequeue(sch, &q->cparams, &flow->cvars, &q->cstats,
2880 dequeue);
2881 @@ -276,12 +279,14 @@ begin:
2882 }
2883 qdisc_bstats_update(sch, skb);
2884 flow->deficit -= qdisc_pkt_len(skb);
2885 - /* We cant call qdisc_tree_decrease_qlen() if our qlen is 0,
2886 + /* We cant call qdisc_tree_reduce_backlog() if our qlen is 0,
2887 * or HTB crashes. Defer it for next round.
2888 */
2889 if (q->cstats.drop_count && sch->q.qlen) {
2890 - qdisc_tree_decrease_qlen(sch, q->cstats.drop_count);
2891 + qdisc_tree_reduce_backlog(sch, q->cstats.drop_count,
2892 + q->cstats.drop_len);
2893 q->cstats.drop_count = 0;
2894 + q->cstats.drop_len = 0;
2895 }
2896 return skb;
2897 }
2898 @@ -372,11 +377,13 @@ static int fq_codel_change(struct Qdisc *sch, struct nlattr *opt)
2899 while (sch->q.qlen > sch->limit) {
2900 struct sk_buff *skb = fq_codel_dequeue(sch);
2901
2902 + q->cstats.drop_len += qdisc_pkt_len(skb);
2903 kfree_skb(skb);
2904 q->cstats.drop_count++;
2905 }
2906 - qdisc_tree_decrease_qlen(sch, q->cstats.drop_count);
2907 + qdisc_tree_reduce_backlog(sch, q->cstats.drop_count, q->cstats.drop_len);
2908 q->cstats.drop_count = 0;
2909 + q->cstats.drop_len = 0;
2910
2911 sch_tree_unlock(sch);
2912 return 0;
2913 diff --git a/net/sched/sch_generic.c b/net/sched/sch_generic.c
2914 index 16bc83b2842a..aa4725038f94 100644
2915 --- a/net/sched/sch_generic.c
2916 +++ b/net/sched/sch_generic.c
2917 @@ -159,12 +159,15 @@ int sch_direct_xmit(struct sk_buff *skb, struct Qdisc *q,
2918 if (validate)
2919 skb = validate_xmit_skb_list(skb, dev);
2920
2921 - if (skb) {
2922 + if (likely(skb)) {
2923 HARD_TX_LOCK(dev, txq, smp_processor_id());
2924 if (!netif_xmit_frozen_or_stopped(txq))
2925 skb = dev_hard_start_xmit(skb, dev, txq, &ret);
2926
2927 HARD_TX_UNLOCK(dev, txq);
2928 + } else {
2929 + spin_lock(root_lock);
2930 + return qdisc_qlen(q);
2931 }
2932 spin_lock(root_lock);
2933
2934 diff --git a/net/sched/sch_hfsc.c b/net/sched/sch_hfsc.c
2935 index b7ebe2c87586..d783d7cc3348 100644
2936 --- a/net/sched/sch_hfsc.c
2937 +++ b/net/sched/sch_hfsc.c
2938 @@ -895,9 +895,10 @@ static void
2939 hfsc_purge_queue(struct Qdisc *sch, struct hfsc_class *cl)
2940 {
2941 unsigned int len = cl->qdisc->q.qlen;
2942 + unsigned int backlog = cl->qdisc->qstats.backlog;
2943
2944 qdisc_reset(cl->qdisc);
2945 - qdisc_tree_decrease_qlen(cl->qdisc, len);
2946 + qdisc_tree_reduce_backlog(cl->qdisc, len, backlog);
2947 }
2948
2949 static void
2950 @@ -1215,11 +1216,7 @@ hfsc_graft_class(struct Qdisc *sch, unsigned long arg, struct Qdisc *new,
2951 new = &noop_qdisc;
2952 }
2953
2954 - sch_tree_lock(sch);
2955 - hfsc_purge_queue(sch, cl);
2956 - *old = cl->qdisc;
2957 - cl->qdisc = new;
2958 - sch_tree_unlock(sch);
2959 + *old = qdisc_replace(sch, new, &cl->qdisc);
2960 return 0;
2961 }
2962
2963 diff --git a/net/sched/sch_hhf.c b/net/sched/sch_hhf.c
2964 index 86b04e31e60b..13d6f83ec491 100644
2965 --- a/net/sched/sch_hhf.c
2966 +++ b/net/sched/sch_hhf.c
2967 @@ -382,6 +382,7 @@ static int hhf_enqueue(struct sk_buff *skb, struct Qdisc *sch)
2968 struct hhf_sched_data *q = qdisc_priv(sch);
2969 enum wdrr_bucket_idx idx;
2970 struct wdrr_bucket *bucket;
2971 + unsigned int prev_backlog;
2972
2973 idx = hhf_classify(skb, sch);
2974
2975 @@ -409,6 +410,7 @@ static int hhf_enqueue(struct sk_buff *skb, struct Qdisc *sch)
2976 if (++sch->q.qlen <= sch->limit)
2977 return NET_XMIT_SUCCESS;
2978
2979 + prev_backlog = sch->qstats.backlog;
2980 q->drop_overlimit++;
2981 /* Return Congestion Notification only if we dropped a packet from this
2982 * bucket.
2983 @@ -417,7 +419,7 @@ static int hhf_enqueue(struct sk_buff *skb, struct Qdisc *sch)
2984 return NET_XMIT_CN;
2985
2986 /* As we dropped a packet, better let upper stack know this. */
2987 - qdisc_tree_decrease_qlen(sch, 1);
2988 + qdisc_tree_reduce_backlog(sch, 1, prev_backlog - sch->qstats.backlog);
2989 return NET_XMIT_SUCCESS;
2990 }
2991
2992 @@ -527,7 +529,7 @@ static int hhf_change(struct Qdisc *sch, struct nlattr *opt)
2993 {
2994 struct hhf_sched_data *q = qdisc_priv(sch);
2995 struct nlattr *tb[TCA_HHF_MAX + 1];
2996 - unsigned int qlen;
2997 + unsigned int qlen, prev_backlog;
2998 int err;
2999 u64 non_hh_quantum;
3000 u32 new_quantum = q->quantum;
3001 @@ -577,12 +579,14 @@ static int hhf_change(struct Qdisc *sch, struct nlattr *opt)
3002 }
3003
3004 qlen = sch->q.qlen;
3005 + prev_backlog = sch->qstats.backlog;
3006 while (sch->q.qlen > sch->limit) {
3007 struct sk_buff *skb = hhf_dequeue(sch);
3008
3009 kfree_skb(skb);
3010 }
3011 - qdisc_tree_decrease_qlen(sch, qlen - sch->q.qlen);
3012 + qdisc_tree_reduce_backlog(sch, qlen - sch->q.qlen,
3013 + prev_backlog - sch->qstats.backlog);
3014
3015 sch_tree_unlock(sch);
3016 return 0;
3017 diff --git a/net/sched/sch_htb.c b/net/sched/sch_htb.c
3018 index 15ccd7f8fb2a..87b02ed3d5f2 100644
3019 --- a/net/sched/sch_htb.c
3020 +++ b/net/sched/sch_htb.c
3021 @@ -600,6 +600,7 @@ static int htb_enqueue(struct sk_buff *skb, struct Qdisc *sch)
3022 htb_activate(q, cl);
3023 }
3024
3025 + qdisc_qstats_backlog_inc(sch, skb);
3026 sch->q.qlen++;
3027 return NET_XMIT_SUCCESS;
3028 }
3029 @@ -889,6 +890,7 @@ static struct sk_buff *htb_dequeue(struct Qdisc *sch)
3030 ok:
3031 qdisc_bstats_update(sch, skb);
3032 qdisc_unthrottled(sch);
3033 + qdisc_qstats_backlog_dec(sch, skb);
3034 sch->q.qlen--;
3035 return skb;
3036 }
3037 @@ -955,6 +957,7 @@ static unsigned int htb_drop(struct Qdisc *sch)
3038 unsigned int len;
3039 if (cl->un.leaf.q->ops->drop &&
3040 (len = cl->un.leaf.q->ops->drop(cl->un.leaf.q))) {
3041 + sch->qstats.backlog -= len;
3042 sch->q.qlen--;
3043 if (!cl->un.leaf.q->q.qlen)
3044 htb_deactivate(q, cl);
3045 @@ -984,12 +987,12 @@ static void htb_reset(struct Qdisc *sch)
3046 }
3047 cl->prio_activity = 0;
3048 cl->cmode = HTB_CAN_SEND;
3049 -
3050 }
3051 }
3052 qdisc_watchdog_cancel(&q->watchdog);
3053 __skb_queue_purge(&q->direct_queue);
3054 sch->q.qlen = 0;
3055 + sch->qstats.backlog = 0;
3056 memset(q->hlevel, 0, sizeof(q->hlevel));
3057 memset(q->row_mask, 0, sizeof(q->row_mask));
3058 for (i = 0; i < TC_HTB_NUMPRIO; i++)
3059 @@ -1163,14 +1166,7 @@ static int htb_graft(struct Qdisc *sch, unsigned long arg, struct Qdisc *new,
3060 cl->common.classid)) == NULL)
3061 return -ENOBUFS;
3062
3063 - sch_tree_lock(sch);
3064 - *old = cl->un.leaf.q;
3065 - cl->un.leaf.q = new;
3066 - if (*old != NULL) {
3067 - qdisc_tree_decrease_qlen(*old, (*old)->q.qlen);
3068 - qdisc_reset(*old);
3069 - }
3070 - sch_tree_unlock(sch);
3071 + *old = qdisc_replace(sch, new, &cl->un.leaf.q);
3072 return 0;
3073 }
3074
3075 @@ -1272,7 +1268,6 @@ static int htb_delete(struct Qdisc *sch, unsigned long arg)
3076 {
3077 struct htb_sched *q = qdisc_priv(sch);
3078 struct htb_class *cl = (struct htb_class *)arg;
3079 - unsigned int qlen;
3080 struct Qdisc *new_q = NULL;
3081 int last_child = 0;
3082
3083 @@ -1292,9 +1287,11 @@ static int htb_delete(struct Qdisc *sch, unsigned long arg)
3084 sch_tree_lock(sch);
3085
3086 if (!cl->level) {
3087 - qlen = cl->un.leaf.q->q.qlen;
3088 + unsigned int qlen = cl->un.leaf.q->q.qlen;
3089 + unsigned int backlog = cl->un.leaf.q->qstats.backlog;
3090 +
3091 qdisc_reset(cl->un.leaf.q);
3092 - qdisc_tree_decrease_qlen(cl->un.leaf.q, qlen);
3093 + qdisc_tree_reduce_backlog(cl->un.leaf.q, qlen, backlog);
3094 }
3095
3096 /* delete from hash and active; remainder in destroy_class */
3097 @@ -1428,10 +1425,11 @@ static int htb_change_class(struct Qdisc *sch, u32 classid,
3098 sch_tree_lock(sch);
3099 if (parent && !parent->level) {
3100 unsigned int qlen = parent->un.leaf.q->q.qlen;
3101 + unsigned int backlog = parent->un.leaf.q->qstats.backlog;
3102
3103 /* turn parent into inner node */
3104 qdisc_reset(parent->un.leaf.q);
3105 - qdisc_tree_decrease_qlen(parent->un.leaf.q, qlen);
3106 + qdisc_tree_reduce_backlog(parent->un.leaf.q, qlen, backlog);
3107 qdisc_destroy(parent->un.leaf.q);
3108 if (parent->prio_activity)
3109 htb_deactivate(q, parent);
3110 diff --git a/net/sched/sch_multiq.c b/net/sched/sch_multiq.c
3111 index 4e904ca0af9d..bcdd54bb101c 100644
3112 --- a/net/sched/sch_multiq.c
3113 +++ b/net/sched/sch_multiq.c
3114 @@ -218,7 +218,8 @@ static int multiq_tune(struct Qdisc *sch, struct nlattr *opt)
3115 if (q->queues[i] != &noop_qdisc) {
3116 struct Qdisc *child = q->queues[i];
3117 q->queues[i] = &noop_qdisc;
3118 - qdisc_tree_decrease_qlen(child, child->q.qlen);
3119 + qdisc_tree_reduce_backlog(child, child->q.qlen,
3120 + child->qstats.backlog);
3121 qdisc_destroy(child);
3122 }
3123 }
3124 @@ -238,8 +239,9 @@ static int multiq_tune(struct Qdisc *sch, struct nlattr *opt)
3125 q->queues[i] = child;
3126
3127 if (old != &noop_qdisc) {
3128 - qdisc_tree_decrease_qlen(old,
3129 - old->q.qlen);
3130 + qdisc_tree_reduce_backlog(old,
3131 + old->q.qlen,
3132 + old->qstats.backlog);
3133 qdisc_destroy(old);
3134 }
3135 sch_tree_unlock(sch);
3136 @@ -303,13 +305,7 @@ static int multiq_graft(struct Qdisc *sch, unsigned long arg, struct Qdisc *new,
3137 if (new == NULL)
3138 new = &noop_qdisc;
3139
3140 - sch_tree_lock(sch);
3141 - *old = q->queues[band];
3142 - q->queues[band] = new;
3143 - qdisc_tree_decrease_qlen(*old, (*old)->q.qlen);
3144 - qdisc_reset(*old);
3145 - sch_tree_unlock(sch);
3146 -
3147 + *old = qdisc_replace(sch, new, &q->queues[band]);
3148 return 0;
3149 }
3150
3151 diff --git a/net/sched/sch_netem.c b/net/sched/sch_netem.c
3152 index 5abd1d9de989..4befe97a9034 100644
3153 --- a/net/sched/sch_netem.c
3154 +++ b/net/sched/sch_netem.c
3155 @@ -395,6 +395,25 @@ static void tfifo_enqueue(struct sk_buff *nskb, struct Qdisc *sch)
3156 sch->q.qlen++;
3157 }
3158
3159 +/* netem can't properly corrupt a megapacket (like we get from GSO), so instead
3160 + * when we statistically choose to corrupt one, we instead segment it, returning
3161 + * the first packet to be corrupted, and re-enqueue the remaining frames
3162 + */
3163 +static struct sk_buff *netem_segment(struct sk_buff *skb, struct Qdisc *sch)
3164 +{
3165 + struct sk_buff *segs;
3166 + netdev_features_t features = netif_skb_features(skb);
3167 +
3168 + segs = skb_gso_segment(skb, features & ~NETIF_F_GSO_MASK);
3169 +
3170 + if (IS_ERR_OR_NULL(segs)) {
3171 + qdisc_reshape_fail(skb, sch);
3172 + return NULL;
3173 + }
3174 + consume_skb(skb);
3175 + return segs;
3176 +}
3177 +
3178 /*
3179 * Insert one skb into qdisc.
3180 * Note: parent depends on return value to account for queue length.
3181 @@ -407,7 +426,11 @@ static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch)
3182 /* We don't fill cb now as skb_unshare() may invalidate it */
3183 struct netem_skb_cb *cb;
3184 struct sk_buff *skb2;
3185 + struct sk_buff *segs = NULL;
3186 + unsigned int len = 0, last_len, prev_len = qdisc_pkt_len(skb);
3187 + int nb = 0;
3188 int count = 1;
3189 + int rc = NET_XMIT_SUCCESS;
3190
3191 /* Random duplication */
3192 if (q->duplicate && q->duplicate >= get_crandom(&q->dup_cor))
3193 @@ -453,10 +476,23 @@ static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch)
3194 * do it now in software before we mangle it.
3195 */
3196 if (q->corrupt && q->corrupt >= get_crandom(&q->corrupt_cor)) {
3197 + if (skb_is_gso(skb)) {
3198 + segs = netem_segment(skb, sch);
3199 + if (!segs)
3200 + return NET_XMIT_DROP;
3201 + } else {
3202 + segs = skb;
3203 + }
3204 +
3205 + skb = segs;
3206 + segs = segs->next;
3207 +
3208 if (!(skb = skb_unshare(skb, GFP_ATOMIC)) ||
3209 (skb->ip_summed == CHECKSUM_PARTIAL &&
3210 - skb_checksum_help(skb)))
3211 - return qdisc_drop(skb, sch);
3212 + skb_checksum_help(skb))) {
3213 + rc = qdisc_drop(skb, sch);
3214 + goto finish_segs;
3215 + }
3216
3217 skb->data[prandom_u32() % skb_headlen(skb)] ^=
3218 1<<(prandom_u32() % 8);
3219 @@ -516,6 +552,27 @@ static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch)
3220 sch->qstats.requeues++;
3221 }
3222
3223 +finish_segs:
3224 + if (segs) {
3225 + while (segs) {
3226 + skb2 = segs->next;
3227 + segs->next = NULL;
3228 + qdisc_skb_cb(segs)->pkt_len = segs->len;
3229 + last_len = segs->len;
3230 + rc = qdisc_enqueue(segs, sch);
3231 + if (rc != NET_XMIT_SUCCESS) {
3232 + if (net_xmit_drop_count(rc))
3233 + qdisc_qstats_drop(sch);
3234 + } else {
3235 + nb++;
3236 + len += last_len;
3237 + }
3238 + segs = skb2;
3239 + }
3240 + sch->q.qlen += nb;
3241 + if (nb > 1)
3242 + qdisc_tree_reduce_backlog(sch, 1 - nb, prev_len - len);
3243 + }
3244 return NET_XMIT_SUCCESS;
3245 }
3246
3247 @@ -598,7 +655,8 @@ deliver:
3248 if (unlikely(err != NET_XMIT_SUCCESS)) {
3249 if (net_xmit_drop_count(err)) {
3250 qdisc_qstats_drop(sch);
3251 - qdisc_tree_decrease_qlen(sch, 1);
3252 + qdisc_tree_reduce_backlog(sch, 1,
3253 + qdisc_pkt_len(skb));
3254 }
3255 }
3256 goto tfifo_dequeue;
3257 @@ -1037,15 +1095,7 @@ static int netem_graft(struct Qdisc *sch, unsigned long arg, struct Qdisc *new,
3258 {
3259 struct netem_sched_data *q = qdisc_priv(sch);
3260
3261 - sch_tree_lock(sch);
3262 - *old = q->qdisc;
3263 - q->qdisc = new;
3264 - if (*old) {
3265 - qdisc_tree_decrease_qlen(*old, (*old)->q.qlen);
3266 - qdisc_reset(*old);
3267 - }
3268 - sch_tree_unlock(sch);
3269 -
3270 + *old = qdisc_replace(sch, new, &q->qdisc);
3271 return 0;
3272 }
3273
3274 diff --git a/net/sched/sch_pie.c b/net/sched/sch_pie.c
3275 index b783a446d884..71ae3b9629f9 100644
3276 --- a/net/sched/sch_pie.c
3277 +++ b/net/sched/sch_pie.c
3278 @@ -183,7 +183,7 @@ static int pie_change(struct Qdisc *sch, struct nlattr *opt)
3279 {
3280 struct pie_sched_data *q = qdisc_priv(sch);
3281 struct nlattr *tb[TCA_PIE_MAX + 1];
3282 - unsigned int qlen;
3283 + unsigned int qlen, dropped = 0;
3284 int err;
3285
3286 if (!opt)
3287 @@ -232,10 +232,11 @@ static int pie_change(struct Qdisc *sch, struct nlattr *opt)
3288 while (sch->q.qlen > sch->limit) {
3289 struct sk_buff *skb = __skb_dequeue(&sch->q);
3290
3291 + dropped += qdisc_pkt_len(skb);
3292 qdisc_qstats_backlog_dec(sch, skb);
3293 qdisc_drop(skb, sch);
3294 }
3295 - qdisc_tree_decrease_qlen(sch, qlen - sch->q.qlen);
3296 + qdisc_tree_reduce_backlog(sch, qlen - sch->q.qlen, dropped);
3297
3298 sch_tree_unlock(sch);
3299 return 0;
3300 diff --git a/net/sched/sch_prio.c b/net/sched/sch_prio.c
3301 index ba6487f2741f..fee1b15506b2 100644
3302 --- a/net/sched/sch_prio.c
3303 +++ b/net/sched/sch_prio.c
3304 @@ -191,7 +191,7 @@ static int prio_tune(struct Qdisc *sch, struct nlattr *opt)
3305 struct Qdisc *child = q->queues[i];
3306 q->queues[i] = &noop_qdisc;
3307 if (child != &noop_qdisc) {
3308 - qdisc_tree_decrease_qlen(child, child->q.qlen);
3309 + qdisc_tree_reduce_backlog(child, child->q.qlen, child->qstats.backlog);
3310 qdisc_destroy(child);
3311 }
3312 }
3313 @@ -210,8 +210,9 @@ static int prio_tune(struct Qdisc *sch, struct nlattr *opt)
3314 q->queues[i] = child;
3315
3316 if (old != &noop_qdisc) {
3317 - qdisc_tree_decrease_qlen(old,
3318 - old->q.qlen);
3319 + qdisc_tree_reduce_backlog(old,
3320 + old->q.qlen,
3321 + old->qstats.backlog);
3322 qdisc_destroy(old);
3323 }
3324 sch_tree_unlock(sch);
3325 @@ -268,13 +269,7 @@ static int prio_graft(struct Qdisc *sch, unsigned long arg, struct Qdisc *new,
3326 if (new == NULL)
3327 new = &noop_qdisc;
3328
3329 - sch_tree_lock(sch);
3330 - *old = q->queues[band];
3331 - q->queues[band] = new;
3332 - qdisc_tree_decrease_qlen(*old, (*old)->q.qlen);
3333 - qdisc_reset(*old);
3334 - sch_tree_unlock(sch);
3335 -
3336 + *old = qdisc_replace(sch, new, &q->queues[band]);
3337 return 0;
3338 }
3339
3340 diff --git a/net/sched/sch_qfq.c b/net/sched/sch_qfq.c
3341 index 3dc3a6e56052..8d2d8d953432 100644
3342 --- a/net/sched/sch_qfq.c
3343 +++ b/net/sched/sch_qfq.c
3344 @@ -220,9 +220,10 @@ static struct qfq_class *qfq_find_class(struct Qdisc *sch, u32 classid)
3345 static void qfq_purge_queue(struct qfq_class *cl)
3346 {
3347 unsigned int len = cl->qdisc->q.qlen;
3348 + unsigned int backlog = cl->qdisc->qstats.backlog;
3349
3350 qdisc_reset(cl->qdisc);
3351 - qdisc_tree_decrease_qlen(cl->qdisc, len);
3352 + qdisc_tree_reduce_backlog(cl->qdisc, len, backlog);
3353 }
3354
3355 static const struct nla_policy qfq_policy[TCA_QFQ_MAX + 1] = {
3356 @@ -617,11 +618,7 @@ static int qfq_graft_class(struct Qdisc *sch, unsigned long arg,
3357 new = &noop_qdisc;
3358 }
3359
3360 - sch_tree_lock(sch);
3361 - qfq_purge_queue(cl);
3362 - *old = cl->qdisc;
3363 - cl->qdisc = new;
3364 - sch_tree_unlock(sch);
3365 + *old = qdisc_replace(sch, new, &cl->qdisc);
3366 return 0;
3367 }
3368
3369 diff --git a/net/sched/sch_red.c b/net/sched/sch_red.c
3370 index 6c0534cc7758..8c0508c0e287 100644
3371 --- a/net/sched/sch_red.c
3372 +++ b/net/sched/sch_red.c
3373 @@ -210,7 +210,8 @@ static int red_change(struct Qdisc *sch, struct nlattr *opt)
3374 q->flags = ctl->flags;
3375 q->limit = ctl->limit;
3376 if (child) {
3377 - qdisc_tree_decrease_qlen(q->qdisc, q->qdisc->q.qlen);
3378 + qdisc_tree_reduce_backlog(q->qdisc, q->qdisc->q.qlen,
3379 + q->qdisc->qstats.backlog);
3380 qdisc_destroy(q->qdisc);
3381 q->qdisc = child;
3382 }
3383 @@ -313,12 +314,7 @@ static int red_graft(struct Qdisc *sch, unsigned long arg, struct Qdisc *new,
3384 if (new == NULL)
3385 new = &noop_qdisc;
3386
3387 - sch_tree_lock(sch);
3388 - *old = q->qdisc;
3389 - q->qdisc = new;
3390 - qdisc_tree_decrease_qlen(*old, (*old)->q.qlen);
3391 - qdisc_reset(*old);
3392 - sch_tree_unlock(sch);
3393 + *old = qdisc_replace(sch, new, &q->qdisc);
3394 return 0;
3395 }
3396
3397 diff --git a/net/sched/sch_sfb.c b/net/sched/sch_sfb.c
3398 index 5bbb6332ec57..c69611640fa5 100644
3399 --- a/net/sched/sch_sfb.c
3400 +++ b/net/sched/sch_sfb.c
3401 @@ -510,7 +510,8 @@ static int sfb_change(struct Qdisc *sch, struct nlattr *opt)
3402
3403 sch_tree_lock(sch);
3404
3405 - qdisc_tree_decrease_qlen(q->qdisc, q->qdisc->q.qlen);
3406 + qdisc_tree_reduce_backlog(q->qdisc, q->qdisc->q.qlen,
3407 + q->qdisc->qstats.backlog);
3408 qdisc_destroy(q->qdisc);
3409 q->qdisc = child;
3410
3411 @@ -606,12 +607,7 @@ static int sfb_graft(struct Qdisc *sch, unsigned long arg, struct Qdisc *new,
3412 if (new == NULL)
3413 new = &noop_qdisc;
3414
3415 - sch_tree_lock(sch);
3416 - *old = q->qdisc;
3417 - q->qdisc = new;
3418 - qdisc_tree_decrease_qlen(*old, (*old)->q.qlen);
3419 - qdisc_reset(*old);
3420 - sch_tree_unlock(sch);
3421 + *old = qdisc_replace(sch, new, &q->qdisc);
3422 return 0;
3423 }
3424
3425 diff --git a/net/sched/sch_sfq.c b/net/sched/sch_sfq.c
3426 index 3abab534eb5c..498f0a2cb47f 100644
3427 --- a/net/sched/sch_sfq.c
3428 +++ b/net/sched/sch_sfq.c
3429 @@ -346,7 +346,7 @@ static int
3430 sfq_enqueue(struct sk_buff *skb, struct Qdisc *sch)
3431 {
3432 struct sfq_sched_data *q = qdisc_priv(sch);
3433 - unsigned int hash;
3434 + unsigned int hash, dropped;
3435 sfq_index x, qlen;
3436 struct sfq_slot *slot;
3437 int uninitialized_var(ret);
3438 @@ -461,7 +461,7 @@ enqueue:
3439 return NET_XMIT_SUCCESS;
3440
3441 qlen = slot->qlen;
3442 - sfq_drop(sch);
3443 + dropped = sfq_drop(sch);
3444 /* Return Congestion Notification only if we dropped a packet
3445 * from this flow.
3446 */
3447 @@ -469,7 +469,7 @@ enqueue:
3448 return NET_XMIT_CN;
3449
3450 /* As we dropped a packet, better let upper stack know this */
3451 - qdisc_tree_decrease_qlen(sch, 1);
3452 + qdisc_tree_reduce_backlog(sch, 1, dropped);
3453 return NET_XMIT_SUCCESS;
3454 }
3455
3456 @@ -537,6 +537,7 @@ static void sfq_rehash(struct Qdisc *sch)
3457 struct sfq_slot *slot;
3458 struct sk_buff_head list;
3459 int dropped = 0;
3460 + unsigned int drop_len = 0;
3461
3462 __skb_queue_head_init(&list);
3463
3464 @@ -565,6 +566,7 @@ static void sfq_rehash(struct Qdisc *sch)
3465 if (x >= SFQ_MAX_FLOWS) {
3466 drop:
3467 qdisc_qstats_backlog_dec(sch, skb);
3468 + drop_len += qdisc_pkt_len(skb);
3469 kfree_skb(skb);
3470 dropped++;
3471 continue;
3472 @@ -594,7 +596,7 @@ drop:
3473 }
3474 }
3475 sch->q.qlen -= dropped;
3476 - qdisc_tree_decrease_qlen(sch, dropped);
3477 + qdisc_tree_reduce_backlog(sch, dropped, drop_len);
3478 }
3479
3480 static void sfq_perturbation(unsigned long arg)
3481 @@ -618,7 +620,7 @@ static int sfq_change(struct Qdisc *sch, struct nlattr *opt)
3482 struct sfq_sched_data *q = qdisc_priv(sch);
3483 struct tc_sfq_qopt *ctl = nla_data(opt);
3484 struct tc_sfq_qopt_v1 *ctl_v1 = NULL;
3485 - unsigned int qlen;
3486 + unsigned int qlen, dropped = 0;
3487 struct red_parms *p = NULL;
3488
3489 if (opt->nla_len < nla_attr_size(sizeof(*ctl)))
3490 @@ -667,8 +669,8 @@ static int sfq_change(struct Qdisc *sch, struct nlattr *opt)
3491
3492 qlen = sch->q.qlen;
3493 while (sch->q.qlen > q->limit)
3494 - sfq_drop(sch);
3495 - qdisc_tree_decrease_qlen(sch, qlen - sch->q.qlen);
3496 + dropped += sfq_drop(sch);
3497 + qdisc_tree_reduce_backlog(sch, qlen - sch->q.qlen, dropped);
3498
3499 del_timer(&q->perturb_timer);
3500 if (q->perturb_period) {
3501 diff --git a/net/sched/sch_tbf.c b/net/sched/sch_tbf.c
3502 index a4afde14e865..c2fbde742f37 100644
3503 --- a/net/sched/sch_tbf.c
3504 +++ b/net/sched/sch_tbf.c
3505 @@ -160,6 +160,7 @@ static int tbf_segment(struct sk_buff *skb, struct Qdisc *sch)
3506 struct tbf_sched_data *q = qdisc_priv(sch);
3507 struct sk_buff *segs, *nskb;
3508 netdev_features_t features = netif_skb_features(skb);
3509 + unsigned int len = 0, prev_len = qdisc_pkt_len(skb);
3510 int ret, nb;
3511
3512 segs = skb_gso_segment(skb, features & ~NETIF_F_GSO_MASK);
3513 @@ -172,6 +173,7 @@ static int tbf_segment(struct sk_buff *skb, struct Qdisc *sch)
3514 nskb = segs->next;
3515 segs->next = NULL;
3516 qdisc_skb_cb(segs)->pkt_len = segs->len;
3517 + len += segs->len;
3518 ret = qdisc_enqueue(segs, q->qdisc);
3519 if (ret != NET_XMIT_SUCCESS) {
3520 if (net_xmit_drop_count(ret))
3521 @@ -183,7 +185,7 @@ static int tbf_segment(struct sk_buff *skb, struct Qdisc *sch)
3522 }
3523 sch->q.qlen += nb;
3524 if (nb > 1)
3525 - qdisc_tree_decrease_qlen(sch, 1 - nb);
3526 + qdisc_tree_reduce_backlog(sch, 1 - nb, prev_len - len);
3527 consume_skb(skb);
3528 return nb > 0 ? NET_XMIT_SUCCESS : NET_XMIT_DROP;
3529 }
3530 @@ -399,7 +401,8 @@ static int tbf_change(struct Qdisc *sch, struct nlattr *opt)
3531
3532 sch_tree_lock(sch);
3533 if (child) {
3534 - qdisc_tree_decrease_qlen(q->qdisc, q->qdisc->q.qlen);
3535 + qdisc_tree_reduce_backlog(q->qdisc, q->qdisc->q.qlen,
3536 + q->qdisc->qstats.backlog);
3537 qdisc_destroy(q->qdisc);
3538 q->qdisc = child;
3539 }
3540 @@ -502,13 +505,7 @@ static int tbf_graft(struct Qdisc *sch, unsigned long arg, struct Qdisc *new,
3541 if (new == NULL)
3542 new = &noop_qdisc;
3543
3544 - sch_tree_lock(sch);
3545 - *old = q->qdisc;
3546 - q->qdisc = new;
3547 - qdisc_tree_decrease_qlen(*old, (*old)->q.qlen);
3548 - qdisc_reset(*old);
3549 - sch_tree_unlock(sch);
3550 -
3551 + *old = qdisc_replace(sch, new, &q->qdisc);
3552 return 0;
3553 }
3554
3555 diff --git a/net/vmw_vsock/af_vsock.c b/net/vmw_vsock/af_vsock.c
3556 index 7fd1220fbfa0..9b5bd6d142dc 100644
3557 --- a/net/vmw_vsock/af_vsock.c
3558 +++ b/net/vmw_vsock/af_vsock.c
3559 @@ -1794,27 +1794,8 @@ vsock_stream_recvmsg(struct socket *sock, struct msghdr *msg, size_t len,
3560 else if (sk->sk_shutdown & RCV_SHUTDOWN)
3561 err = 0;
3562
3563 - if (copied > 0) {
3564 - /* We only do these additional bookkeeping/notification steps
3565 - * if we actually copied something out of the queue pair
3566 - * instead of just peeking ahead.
3567 - */
3568 -
3569 - if (!(flags & MSG_PEEK)) {
3570 - /* If the other side has shutdown for sending and there
3571 - * is nothing more to read, then modify the socket
3572 - * state.
3573 - */
3574 - if (vsk->peer_shutdown & SEND_SHUTDOWN) {
3575 - if (vsock_stream_has_data(vsk) <= 0) {
3576 - sk->sk_state = SS_UNCONNECTED;
3577 - sock_set_flag(sk, SOCK_DONE);
3578 - sk->sk_state_change(sk);
3579 - }
3580 - }
3581 - }
3582 + if (copied > 0)
3583 err = copied;
3584 - }
3585
3586 out_wait:
3587 finish_wait(sk_sleep(sk), &wait);
3588 diff --git a/net/x25/x25_facilities.c b/net/x25/x25_facilities.c
3589 index 7ecd04c21360..997ff7b2509b 100644
3590 --- a/net/x25/x25_facilities.c
3591 +++ b/net/x25/x25_facilities.c
3592 @@ -277,6 +277,7 @@ int x25_negotiate_facilities(struct sk_buff *skb, struct sock *sk,
3593
3594 memset(&theirs, 0, sizeof(theirs));
3595 memcpy(new, ours, sizeof(*new));
3596 + memset(dte, 0, sizeof(*dte));
3597
3598 len = x25_parse_facilities(skb, &theirs, dte, &x25->vc_facil_mask);
3599 if (len < 0)
3600 diff --git a/samples/bpf/trace_output_kern.c b/samples/bpf/trace_output_kern.c
3601 index 8d8d1ec429eb..9b96f4fb8cea 100644
3602 --- a/samples/bpf/trace_output_kern.c
3603 +++ b/samples/bpf/trace_output_kern.c
3604 @@ -18,7 +18,6 @@ int bpf_prog1(struct pt_regs *ctx)
3605 u64 cookie;
3606 } data;
3607
3608 - memset(&data, 0, sizeof(data));
3609 data.pid = bpf_get_current_pid_tgid();
3610 data.cookie = 0x12345678;
3611
3612 diff --git a/sound/pci/hda/hda_sysfs.c b/sound/pci/hda/hda_sysfs.c
3613 index 64e0d1d81ca5..9739fce9e032 100644
3614 --- a/sound/pci/hda/hda_sysfs.c
3615 +++ b/sound/pci/hda/hda_sysfs.c
3616 @@ -141,14 +141,6 @@ static int reconfig_codec(struct hda_codec *codec)
3617 err = snd_hda_codec_configure(codec);
3618 if (err < 0)
3619 goto error;
3620 - /* rebuild PCMs */
3621 - err = snd_hda_codec_build_pcms(codec);
3622 - if (err < 0)
3623 - goto error;
3624 - /* rebuild mixers */
3625 - err = snd_hda_codec_build_controls(codec);
3626 - if (err < 0)
3627 - goto error;
3628 err = snd_card_register(codec->card);
3629 error:
3630 snd_hda_power_down(codec);
3631 diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
3632 index ac4490a96863..4918ffa5ba68 100644
3633 --- a/sound/pci/hda/patch_realtek.c
3634 +++ b/sound/pci/hda/patch_realtek.c
3635 @@ -6426,6 +6426,7 @@ enum {
3636 ALC668_FIXUP_DELL_DISABLE_AAMIX,
3637 ALC668_FIXUP_DELL_XPS13,
3638 ALC662_FIXUP_ASUS_Nx50,
3639 + ALC668_FIXUP_ASUS_Nx51,
3640 };
3641
3642 static const struct hda_fixup alc662_fixups[] = {
3643 @@ -6672,6 +6673,15 @@ static const struct hda_fixup alc662_fixups[] = {
3644 .chained = true,
3645 .chain_id = ALC662_FIXUP_BASS_1A
3646 },
3647 + [ALC668_FIXUP_ASUS_Nx51] = {
3648 + .type = HDA_FIXUP_PINS,
3649 + .v.pins = (const struct hda_pintbl[]) {
3650 + {0x1a, 0x90170151}, /* bass speaker */
3651 + {}
3652 + },
3653 + .chained = true,
3654 + .chain_id = ALC662_FIXUP_BASS_CHMAP,
3655 + },
3656 };
3657
3658 static const struct snd_pci_quirk alc662_fixup_tbl[] = {
3659 @@ -6694,11 +6704,14 @@ static const struct snd_pci_quirk alc662_fixup_tbl[] = {
3660 SND_PCI_QUIRK(0x1028, 0x0698, "Dell", ALC668_FIXUP_DELL_MIC_NO_PRESENCE),
3661 SND_PCI_QUIRK(0x1028, 0x069f, "Dell", ALC668_FIXUP_DELL_MIC_NO_PRESENCE),
3662 SND_PCI_QUIRK(0x103c, 0x1632, "HP RP5800", ALC662_FIXUP_HP_RP5800),
3663 + SND_PCI_QUIRK(0x1043, 0x1080, "Asus UX501VW", ALC668_FIXUP_HEADSET_MODE),
3664 SND_PCI_QUIRK(0x1043, 0x11cd, "Asus N550", ALC662_FIXUP_ASUS_Nx50),
3665 SND_PCI_QUIRK(0x1043, 0x13df, "Asus N550JX", ALC662_FIXUP_BASS_1A),
3666 SND_PCI_QUIRK(0x1043, 0x129d, "Asus N750", ALC662_FIXUP_ASUS_Nx50),
3667 SND_PCI_QUIRK(0x1043, 0x1477, "ASUS N56VZ", ALC662_FIXUP_BASS_MODE4_CHMAP),
3668 SND_PCI_QUIRK(0x1043, 0x15a7, "ASUS UX51VZH", ALC662_FIXUP_BASS_16),
3669 + SND_PCI_QUIRK(0x1043, 0x177d, "ASUS N551", ALC668_FIXUP_ASUS_Nx51),
3670 + SND_PCI_QUIRK(0x1043, 0x17bd, "ASUS N751", ALC668_FIXUP_ASUS_Nx51),
3671 SND_PCI_QUIRK(0x1043, 0x1b73, "ASUS N55SF", ALC662_FIXUP_BASS_16),
3672 SND_PCI_QUIRK(0x1043, 0x1bf3, "ASUS N76VZ", ALC662_FIXUP_BASS_MODE4_CHMAP),
3673 SND_PCI_QUIRK(0x1043, 0x8469, "ASUS mobo", ALC662_FIXUP_NO_JACK_DETECT),
3674 diff --git a/sound/usb/quirks.c b/sound/usb/quirks.c
3675 index 001fb4dc0722..db11ecf0b74d 100644
3676 --- a/sound/usb/quirks.c
3677 +++ b/sound/usb/quirks.c
3678 @@ -1138,8 +1138,11 @@ bool snd_usb_get_sample_rate_quirk(struct snd_usb_audio *chip)
3679 case USB_ID(0x047F, 0x0415): /* Plantronics BT-300 */
3680 case USB_ID(0x047F, 0xAA05): /* Plantronics DA45 */
3681 case USB_ID(0x04D8, 0xFEEA): /* Benchmark DAC1 Pre */
3682 + case USB_ID(0x0556, 0x0014): /* Phoenix Audio TMX320VC */
3683 case USB_ID(0x074D, 0x3553): /* Outlaw RR2150 (Micronas UAC3553B) */
3684 + case USB_ID(0x1de7, 0x0013): /* Phoenix Audio MT202exe */
3685 case USB_ID(0x1de7, 0x0014): /* Phoenix Audio TMX320 */
3686 + case USB_ID(0x1de7, 0x0114): /* Phoenix Audio MT202pcs */
3687 case USB_ID(0x21B4, 0x0081): /* AudioQuest DragonFly */
3688 return true;
3689 }
3690 diff --git a/tools/lib/traceevent/parse-filter.c b/tools/lib/traceevent/parse-filter.c
3691 index 0144b3d1bb77..88cccea3ca99 100644
3692 --- a/tools/lib/traceevent/parse-filter.c
3693 +++ b/tools/lib/traceevent/parse-filter.c
3694 @@ -1164,11 +1164,11 @@ process_filter(struct event_format *event, struct filter_arg **parg,
3695 current_op = current_exp;
3696
3697 ret = collapse_tree(current_op, parg, error_str);
3698 + /* collapse_tree() may free current_op, and updates parg accordingly */
3699 + current_op = NULL;
3700 if (ret < 0)
3701 goto fail;
3702
3703 - *parg = current_op;
3704 -
3705 free(token);
3706 return 0;
3707