Magellan Linux

Contents of /trunk/kernel-magellan/patches-4.5/0104-4.5.5-all-fixes.patch

Parent Directory Parent Directory | Revision Log Revision Log


Revision 2789 - (show annotations) (download)
Mon May 23 14:57:37 2016 UTC (7 years, 11 months ago) by niro
File size: 161366 byte(s)
-linux-4.5.5
1 diff --git a/Makefile b/Makefile
2 index d64eade37241..a23df411d393 100644
3 --- a/Makefile
4 +++ b/Makefile
5 @@ -1,6 +1,6 @@
6 VERSION = 4
7 PATCHLEVEL = 5
8 -SUBLEVEL = 4
9 +SUBLEVEL = 5
10 EXTRAVERSION =
11 NAME = Blurry Fish Butt
12
13 diff --git a/arch/arm/boot/dts/at91sam9x5.dtsi b/arch/arm/boot/dts/at91sam9x5.dtsi
14 index 0827d594b1f0..cd0cd5fd09a3 100644
15 --- a/arch/arm/boot/dts/at91sam9x5.dtsi
16 +++ b/arch/arm/boot/dts/at91sam9x5.dtsi
17 @@ -106,7 +106,7 @@
18
19 pmc: pmc@fffffc00 {
20 compatible = "atmel,at91sam9x5-pmc", "syscon";
21 - reg = <0xfffffc00 0x100>;
22 + reg = <0xfffffc00 0x200>;
23 interrupts = <1 IRQ_TYPE_LEVEL_HIGH 7>;
24 interrupt-controller;
25 #address-cells = <1>;
26 diff --git a/crypto/ahash.c b/crypto/ahash.c
27 index d19b52324cf5..dac1c24e9c3e 100644
28 --- a/crypto/ahash.c
29 +++ b/crypto/ahash.c
30 @@ -69,8 +69,9 @@ static int hash_walk_new_entry(struct crypto_hash_walk *walk)
31 struct scatterlist *sg;
32
33 sg = walk->sg;
34 - walk->pg = sg_page(sg);
35 walk->offset = sg->offset;
36 + walk->pg = sg_page(walk->sg) + (walk->offset >> PAGE_SHIFT);
37 + walk->offset = offset_in_page(walk->offset);
38 walk->entrylen = sg->length;
39
40 if (walk->entrylen > walk->total)
41 diff --git a/crypto/testmgr.c b/crypto/testmgr.c
42 index ae8c57fd8bc7..d4944318ca1f 100644
43 --- a/crypto/testmgr.c
44 +++ b/crypto/testmgr.c
45 @@ -1849,6 +1849,7 @@ static int alg_test_drbg(const struct alg_test_desc *desc, const char *driver,
46 static int do_test_rsa(struct crypto_akcipher *tfm,
47 struct akcipher_testvec *vecs)
48 {
49 + char *xbuf[XBUFSIZE];
50 struct akcipher_request *req;
51 void *outbuf_enc = NULL;
52 void *outbuf_dec = NULL;
53 @@ -1857,9 +1858,12 @@ static int do_test_rsa(struct crypto_akcipher *tfm,
54 int err = -ENOMEM;
55 struct scatterlist src, dst, src_tab[2];
56
57 + if (testmgr_alloc_buf(xbuf))
58 + return err;
59 +
60 req = akcipher_request_alloc(tfm, GFP_KERNEL);
61 if (!req)
62 - return err;
63 + goto free_xbuf;
64
65 init_completion(&result.completion);
66
67 @@ -1877,9 +1881,14 @@ static int do_test_rsa(struct crypto_akcipher *tfm,
68 if (!outbuf_enc)
69 goto free_req;
70
71 + if (WARN_ON(vecs->m_size > PAGE_SIZE))
72 + goto free_all;
73 +
74 + memcpy(xbuf[0], vecs->m, vecs->m_size);
75 +
76 sg_init_table(src_tab, 2);
77 - sg_set_buf(&src_tab[0], vecs->m, 8);
78 - sg_set_buf(&src_tab[1], vecs->m + 8, vecs->m_size - 8);
79 + sg_set_buf(&src_tab[0], xbuf[0], 8);
80 + sg_set_buf(&src_tab[1], xbuf[0] + 8, vecs->m_size - 8);
81 sg_init_one(&dst, outbuf_enc, out_len_max);
82 akcipher_request_set_crypt(req, src_tab, &dst, vecs->m_size,
83 out_len_max);
84 @@ -1898,7 +1907,7 @@ static int do_test_rsa(struct crypto_akcipher *tfm,
85 goto free_all;
86 }
87 /* verify that encrypted message is equal to expected */
88 - if (memcmp(vecs->c, sg_virt(req->dst), vecs->c_size)) {
89 + if (memcmp(vecs->c, outbuf_enc, vecs->c_size)) {
90 pr_err("alg: rsa: encrypt test failed. Invalid output\n");
91 err = -EINVAL;
92 goto free_all;
93 @@ -1913,7 +1922,13 @@ static int do_test_rsa(struct crypto_akcipher *tfm,
94 err = -ENOMEM;
95 goto free_all;
96 }
97 - sg_init_one(&src, vecs->c, vecs->c_size);
98 +
99 + if (WARN_ON(vecs->c_size > PAGE_SIZE))
100 + goto free_all;
101 +
102 + memcpy(xbuf[0], vecs->c, vecs->c_size);
103 +
104 + sg_init_one(&src, xbuf[0], vecs->c_size);
105 sg_init_one(&dst, outbuf_dec, out_len_max);
106 init_completion(&result.completion);
107 akcipher_request_set_crypt(req, &src, &dst, vecs->c_size, out_len_max);
108 @@ -1940,6 +1955,8 @@ free_all:
109 kfree(outbuf_enc);
110 free_req:
111 akcipher_request_free(req);
112 +free_xbuf:
113 + testmgr_free_buf(xbuf);
114 return err;
115 }
116
117 diff --git a/drivers/base/regmap/regmap-spmi.c b/drivers/base/regmap/regmap-spmi.c
118 index 7e58f6560399..4a36e415e938 100644
119 --- a/drivers/base/regmap/regmap-spmi.c
120 +++ b/drivers/base/regmap/regmap-spmi.c
121 @@ -142,7 +142,7 @@ static int regmap_spmi_ext_read(void *context,
122 while (val_size) {
123 len = min_t(size_t, val_size, 8);
124
125 - err = spmi_ext_register_readl(context, addr, val, val_size);
126 + err = spmi_ext_register_readl(context, addr, val, len);
127 if (err)
128 goto err_out;
129
130 diff --git a/drivers/crypto/qat/qat_common/adf_common_drv.h b/drivers/crypto/qat/qat_common/adf_common_drv.h
131 index 0e82ce3c383e..976b01e58afb 100644
132 --- a/drivers/crypto/qat/qat_common/adf_common_drv.h
133 +++ b/drivers/crypto/qat/qat_common/adf_common_drv.h
134 @@ -236,6 +236,8 @@ void adf_enable_vf2pf_interrupts(struct adf_accel_dev *accel_dev,
135 uint32_t vf_mask);
136 void adf_enable_pf2vf_interrupts(struct adf_accel_dev *accel_dev);
137 void adf_disable_pf2vf_interrupts(struct adf_accel_dev *accel_dev);
138 +int adf_init_pf_wq(void);
139 +void adf_exit_pf_wq(void);
140 #else
141 static inline int adf_sriov_configure(struct pci_dev *pdev, int numvfs)
142 {
143 @@ -253,5 +255,14 @@ static inline void adf_enable_pf2vf_interrupts(struct adf_accel_dev *accel_dev)
144 static inline void adf_disable_pf2vf_interrupts(struct adf_accel_dev *accel_dev)
145 {
146 }
147 +
148 +static inline int adf_init_pf_wq(void)
149 +{
150 + return 0;
151 +}
152 +
153 +static inline void adf_exit_pf_wq(void)
154 +{
155 +}
156 #endif
157 #endif
158 diff --git a/drivers/crypto/qat/qat_common/adf_ctl_drv.c b/drivers/crypto/qat/qat_common/adf_ctl_drv.c
159 index 5c897e6e7994..3c3f948290ca 100644
160 --- a/drivers/crypto/qat/qat_common/adf_ctl_drv.c
161 +++ b/drivers/crypto/qat/qat_common/adf_ctl_drv.c
162 @@ -462,12 +462,17 @@ static int __init adf_register_ctl_device_driver(void)
163 if (adf_init_aer())
164 goto err_aer;
165
166 + if (adf_init_pf_wq())
167 + goto err_pf_wq;
168 +
169 if (qat_crypto_register())
170 goto err_crypto_register;
171
172 return 0;
173
174 err_crypto_register:
175 + adf_exit_pf_wq();
176 +err_pf_wq:
177 adf_exit_aer();
178 err_aer:
179 adf_chr_drv_destroy();
180 @@ -480,6 +485,7 @@ static void __exit adf_unregister_ctl_device_driver(void)
181 {
182 adf_chr_drv_destroy();
183 adf_exit_aer();
184 + adf_exit_pf_wq();
185 qat_crypto_unregister();
186 adf_clean_vf_map(false);
187 mutex_destroy(&adf_ctl_lock);
188 diff --git a/drivers/crypto/qat/qat_common/adf_sriov.c b/drivers/crypto/qat/qat_common/adf_sriov.c
189 index 1117a8b58280..38a0415e767d 100644
190 --- a/drivers/crypto/qat/qat_common/adf_sriov.c
191 +++ b/drivers/crypto/qat/qat_common/adf_sriov.c
192 @@ -119,11 +119,6 @@ static int adf_enable_sriov(struct adf_accel_dev *accel_dev)
193 int i;
194 u32 reg;
195
196 - /* Workqueue for PF2VF responses */
197 - pf2vf_resp_wq = create_workqueue("qat_pf2vf_resp_wq");
198 - if (!pf2vf_resp_wq)
199 - return -ENOMEM;
200 -
201 for (i = 0, vf_info = accel_dev->pf.vf_info; i < totalvfs;
202 i++, vf_info++) {
203 /* This ptr will be populated when VFs will be created */
204 @@ -216,11 +211,6 @@ void adf_disable_sriov(struct adf_accel_dev *accel_dev)
205
206 kfree(accel_dev->pf.vf_info);
207 accel_dev->pf.vf_info = NULL;
208 -
209 - if (pf2vf_resp_wq) {
210 - destroy_workqueue(pf2vf_resp_wq);
211 - pf2vf_resp_wq = NULL;
212 - }
213 }
214 EXPORT_SYMBOL_GPL(adf_disable_sriov);
215
216 @@ -304,3 +294,19 @@ int adf_sriov_configure(struct pci_dev *pdev, int numvfs)
217 return numvfs;
218 }
219 EXPORT_SYMBOL_GPL(adf_sriov_configure);
220 +
221 +int __init adf_init_pf_wq(void)
222 +{
223 + /* Workqueue for PF2VF responses */
224 + pf2vf_resp_wq = create_workqueue("qat_pf2vf_resp_wq");
225 +
226 + return !pf2vf_resp_wq ? -ENOMEM : 0;
227 +}
228 +
229 +void adf_exit_pf_wq(void)
230 +{
231 + if (pf2vf_resp_wq) {
232 + destroy_workqueue(pf2vf_resp_wq);
233 + pf2vf_resp_wq = NULL;
234 + }
235 +}
236 diff --git a/drivers/gpu/drm/amd/amdgpu/atombios_dp.c b/drivers/gpu/drm/amd/amdgpu/atombios_dp.c
237 index bf731e9f643e..7f85c2c1d681 100644
238 --- a/drivers/gpu/drm/amd/amdgpu/atombios_dp.c
239 +++ b/drivers/gpu/drm/amd/amdgpu/atombios_dp.c
240 @@ -276,8 +276,8 @@ static int amdgpu_atombios_dp_get_dp_link_config(struct drm_connector *connector
241 }
242 }
243 } else {
244 - for (lane_num = 1; lane_num <= max_lane_num; lane_num <<= 1) {
245 - for (i = 0; i < ARRAY_SIZE(link_rates) && link_rates[i] <= max_link_rate; i++) {
246 + for (i = 0; i < ARRAY_SIZE(link_rates) && link_rates[i] <= max_link_rate; i++) {
247 + for (lane_num = 1; lane_num <= max_lane_num; lane_num <<= 1) {
248 max_pix_clock = (lane_num * link_rates[i] * 8) / bpp;
249 if (max_pix_clock >= pix_clock) {
250 *dp_lanes = lane_num;
251 diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
252 index cf39ed3133d6..4d0bc2a8843c 100644
253 --- a/drivers/gpu/drm/i915/i915_debugfs.c
254 +++ b/drivers/gpu/drm/i915/i915_debugfs.c
255 @@ -2860,20 +2860,6 @@ static void intel_dp_info(struct seq_file *m,
256 intel_panel_info(m, &intel_connector->panel);
257 }
258
259 -static void intel_dp_mst_info(struct seq_file *m,
260 - struct intel_connector *intel_connector)
261 -{
262 - struct intel_encoder *intel_encoder = intel_connector->encoder;
263 - struct intel_dp_mst_encoder *intel_mst =
264 - enc_to_mst(&intel_encoder->base);
265 - struct intel_digital_port *intel_dig_port = intel_mst->primary;
266 - struct intel_dp *intel_dp = &intel_dig_port->dp;
267 - bool has_audio = drm_dp_mst_port_has_audio(&intel_dp->mst_mgr,
268 - intel_connector->port);
269 -
270 - seq_printf(m, "\taudio support: %s\n", yesno(has_audio));
271 -}
272 -
273 static void intel_hdmi_info(struct seq_file *m,
274 struct intel_connector *intel_connector)
275 {
276 @@ -2917,8 +2903,6 @@ static void intel_connector_info(struct seq_file *m,
277 intel_hdmi_info(m, intel_connector);
278 else if (intel_encoder->type == INTEL_OUTPUT_LVDS)
279 intel_lvds_info(m, intel_connector);
280 - else if (intel_encoder->type == INTEL_OUTPUT_DP_MST)
281 - intel_dp_mst_info(m, intel_connector);
282 }
283
284 seq_printf(m, "\tmodes:\n");
285 diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
286 index 9b6737c85acb..5672b57e65d5 100644
287 --- a/drivers/gpu/drm/i915/i915_reg.h
288 +++ b/drivers/gpu/drm/i915/i915_reg.h
289 @@ -7412,6 +7412,8 @@ enum skl_disp_power_wells {
290 #define TRANS_CLK_SEL_DISABLED (0x0<<29)
291 #define TRANS_CLK_SEL_PORT(x) (((x)+1)<<29)
292
293 +#define CDCLK_FREQ _MMIO(0x46200)
294 +
295 #define _TRANSA_MSA_MISC 0x60410
296 #define _TRANSB_MSA_MISC 0x61410
297 #define _TRANSC_MSA_MISC 0x62410
298 diff --git a/drivers/gpu/drm/i915/intel_audio.c b/drivers/gpu/drm/i915/intel_audio.c
299 index 30f921421b0c..7d281b40064a 100644
300 --- a/drivers/gpu/drm/i915/intel_audio.c
301 +++ b/drivers/gpu/drm/i915/intel_audio.c
302 @@ -262,8 +262,7 @@ static void hsw_audio_codec_disable(struct intel_encoder *encoder)
303 tmp |= AUD_CONFIG_N_PROG_ENABLE;
304 tmp &= ~AUD_CONFIG_UPPER_N_MASK;
305 tmp &= ~AUD_CONFIG_LOWER_N_MASK;
306 - if (intel_pipe_has_type(intel_crtc, INTEL_OUTPUT_DISPLAYPORT) ||
307 - intel_pipe_has_type(intel_crtc, INTEL_OUTPUT_DP_MST))
308 + if (intel_pipe_has_type(intel_crtc, INTEL_OUTPUT_DISPLAYPORT))
309 tmp |= AUD_CONFIG_N_VALUE_INDEX;
310 I915_WRITE(HSW_AUD_CFG(pipe), tmp);
311
312 @@ -476,8 +475,7 @@ static void ilk_audio_codec_enable(struct drm_connector *connector,
313 tmp &= ~AUD_CONFIG_N_VALUE_INDEX;
314 tmp &= ~AUD_CONFIG_N_PROG_ENABLE;
315 tmp &= ~AUD_CONFIG_PIXEL_CLOCK_HDMI_MASK;
316 - if (intel_pipe_has_type(intel_crtc, INTEL_OUTPUT_DISPLAYPORT) ||
317 - intel_pipe_has_type(intel_crtc, INTEL_OUTPUT_DP_MST))
318 + if (intel_pipe_has_type(intel_crtc, INTEL_OUTPUT_DISPLAYPORT))
319 tmp |= AUD_CONFIG_N_VALUE_INDEX;
320 else
321 tmp |= audio_config_hdmi_pixel_clock(adjusted_mode);
322 @@ -515,8 +513,7 @@ void intel_audio_codec_enable(struct intel_encoder *intel_encoder)
323
324 /* ELD Conn_Type */
325 connector->eld[5] &= ~(3 << 2);
326 - if (intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT) ||
327 - intel_pipe_has_type(crtc, INTEL_OUTPUT_DP_MST))
328 + if (intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT))
329 connector->eld[5] |= (1 << 2);
330
331 connector->eld[6] = drm_av_sync_delay(connector, adjusted_mode) / 2;
332 diff --git a/drivers/gpu/drm/i915/intel_crt.c b/drivers/gpu/drm/i915/intel_crt.c
333 index a7b4a524fadd..dbacc2901d47 100644
334 --- a/drivers/gpu/drm/i915/intel_crt.c
335 +++ b/drivers/gpu/drm/i915/intel_crt.c
336 @@ -255,8 +255,14 @@ static bool intel_crt_compute_config(struct intel_encoder *encoder,
337 pipe_config->has_pch_encoder = true;
338
339 /* LPT FDI RX only supports 8bpc. */
340 - if (HAS_PCH_LPT(dev))
341 + if (HAS_PCH_LPT(dev)) {
342 + if (pipe_config->bw_constrained && pipe_config->pipe_bpp < 24) {
343 + DRM_DEBUG_KMS("LPT only supports 24bpp\n");
344 + return false;
345 + }
346 +
347 pipe_config->pipe_bpp = 24;
348 + }
349
350 /* FDI must always be 2.7 GHz */
351 if (HAS_DDI(dev)) {
352 diff --git a/drivers/gpu/drm/i915/intel_ddi.c b/drivers/gpu/drm/i915/intel_ddi.c
353 index 33b8e0a2b3fd..40511493914c 100644
354 --- a/drivers/gpu/drm/i915/intel_ddi.c
355 +++ b/drivers/gpu/drm/i915/intel_ddi.c
356 @@ -3165,23 +3165,6 @@ void intel_ddi_fdi_disable(struct drm_crtc *crtc)
357 I915_WRITE(FDI_RX_CTL(PIPE_A), val);
358 }
359
360 -bool intel_ddi_is_audio_enabled(struct drm_i915_private *dev_priv,
361 - struct intel_crtc *intel_crtc)
362 -{
363 - u32 temp;
364 -
365 - if (intel_display_power_get_if_enabled(dev_priv, POWER_DOMAIN_AUDIO)) {
366 - temp = I915_READ(HSW_AUD_PIN_ELD_CP_VLD);
367 -
368 - intel_display_power_put(dev_priv, POWER_DOMAIN_AUDIO);
369 -
370 - if (temp & AUDIO_OUTPUT_ENABLE(intel_crtc->pipe))
371 - return true;
372 - }
373 -
374 - return false;
375 -}
376 -
377 void intel_ddi_get_config(struct intel_encoder *encoder,
378 struct intel_crtc_state *pipe_config)
379 {
380 @@ -3242,8 +3225,11 @@ void intel_ddi_get_config(struct intel_encoder *encoder,
381 break;
382 }
383
384 - pipe_config->has_audio =
385 - intel_ddi_is_audio_enabled(dev_priv, intel_crtc);
386 + if (intel_display_power_is_enabled(dev_priv, POWER_DOMAIN_AUDIO)) {
387 + temp = I915_READ(HSW_AUD_PIN_ELD_CP_VLD);
388 + if (temp & AUDIO_OUTPUT_ENABLE(intel_crtc->pipe))
389 + pipe_config->has_audio = true;
390 + }
391
392 if (encoder->type == INTEL_OUTPUT_EDP && dev_priv->vbt.edp_bpp &&
393 pipe_config->pipe_bpp > dev_priv->vbt.edp_bpp) {
394 diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
395 index c023a04c44d0..39b00b9daf2d 100644
396 --- a/drivers/gpu/drm/i915/intel_display.c
397 +++ b/drivers/gpu/drm/i915/intel_display.c
398 @@ -9793,6 +9793,8 @@ static void broadwell_set_cdclk(struct drm_device *dev, int cdclk)
399 sandybridge_pcode_write(dev_priv, HSW_PCODE_DE_WRITE_FREQ_REQ, data);
400 mutex_unlock(&dev_priv->rps.hw_lock);
401
402 + I915_WRITE(CDCLK_FREQ, DIV_ROUND_CLOSEST(cdclk, 1000) - 1);
403 +
404 intel_update_cdclk(dev);
405
406 WARN(cdclk != dev_priv->cdclk_freq,
407 diff --git a/drivers/gpu/drm/i915/intel_dp_mst.c b/drivers/gpu/drm/i915/intel_dp_mst.c
408 index db6361b5a6ab..e8e6984b8053 100644
409 --- a/drivers/gpu/drm/i915/intel_dp_mst.c
410 +++ b/drivers/gpu/drm/i915/intel_dp_mst.c
411 @@ -78,8 +78,6 @@ static bool intel_dp_mst_compute_config(struct intel_encoder *encoder,
412 return false;
413 }
414
415 - if (drm_dp_mst_port_has_audio(&intel_dp->mst_mgr, found->port))
416 - pipe_config->has_audio = true;
417 mst_pbn = drm_dp_calc_pbn_mode(adjusted_mode->crtc_clock, bpp);
418
419 pipe_config->pbn = mst_pbn;
420 @@ -104,11 +102,6 @@ static void intel_mst_disable_dp(struct intel_encoder *encoder)
421 struct intel_dp_mst_encoder *intel_mst = enc_to_mst(&encoder->base);
422 struct intel_digital_port *intel_dig_port = intel_mst->primary;
423 struct intel_dp *intel_dp = &intel_dig_port->dp;
424 - struct drm_device *dev = encoder->base.dev;
425 - struct drm_i915_private *dev_priv = dev->dev_private;
426 - struct drm_crtc *crtc = encoder->base.crtc;
427 - struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
428 -
429 int ret;
430
431 DRM_DEBUG_KMS("%d\n", intel_dp->active_mst_links);
432 @@ -119,10 +112,6 @@ static void intel_mst_disable_dp(struct intel_encoder *encoder)
433 if (ret) {
434 DRM_ERROR("failed to update payload %d\n", ret);
435 }
436 - if (intel_crtc->config->has_audio) {
437 - intel_audio_codec_disable(encoder);
438 - intel_display_power_put(dev_priv, POWER_DOMAIN_AUDIO);
439 - }
440 }
441
442 static void intel_mst_post_disable_dp(struct intel_encoder *encoder)
443 @@ -219,7 +208,6 @@ static void intel_mst_enable_dp(struct intel_encoder *encoder)
444 struct intel_dp *intel_dp = &intel_dig_port->dp;
445 struct drm_device *dev = intel_dig_port->base.base.dev;
446 struct drm_i915_private *dev_priv = dev->dev_private;
447 - struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
448 enum port port = intel_dig_port->port;
449 int ret;
450
451 @@ -232,13 +220,6 @@ static void intel_mst_enable_dp(struct intel_encoder *encoder)
452 ret = drm_dp_check_act_status(&intel_dp->mst_mgr);
453
454 ret = drm_dp_update_payload_part2(&intel_dp->mst_mgr);
455 -
456 - if (crtc->config->has_audio) {
457 - DRM_DEBUG_DRIVER("Enabling DP audio on pipe %c\n",
458 - pipe_name(crtc->pipe));
459 - intel_display_power_get(dev_priv, POWER_DOMAIN_AUDIO);
460 - intel_audio_codec_enable(encoder);
461 - }
462 }
463
464 static bool intel_dp_mst_enc_get_hw_state(struct intel_encoder *encoder,
465 @@ -264,9 +245,6 @@ static void intel_dp_mst_enc_get_config(struct intel_encoder *encoder,
466
467 pipe_config->has_dp_encoder = true;
468
469 - pipe_config->has_audio =
470 - intel_ddi_is_audio_enabled(dev_priv, crtc);
471 -
472 temp = I915_READ(TRANS_DDI_FUNC_CTL(cpu_transcoder));
473 if (temp & TRANS_DDI_PHSYNC)
474 flags |= DRM_MODE_FLAG_PHSYNC;
475 diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h
476 index 3ce3bee368fe..1ae61f488987 100644
477 --- a/drivers/gpu/drm/i915/intel_drv.h
478 +++ b/drivers/gpu/drm/i915/intel_drv.h
479 @@ -1013,8 +1013,6 @@ void intel_ddi_set_pipe_settings(struct drm_crtc *crtc);
480 void intel_ddi_prepare_link_retrain(struct intel_dp *intel_dp);
481 bool intel_ddi_connector_get_hw_state(struct intel_connector *intel_connector);
482 void intel_ddi_fdi_disable(struct drm_crtc *crtc);
483 -bool intel_ddi_is_audio_enabled(struct drm_i915_private *dev_priv,
484 - struct intel_crtc *intel_crtc);
485 void intel_ddi_get_config(struct intel_encoder *encoder,
486 struct intel_crtc_state *pipe_config);
487 struct intel_encoder *
488 diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c
489 index 7e4a9842b9ea..0eae3994e5e3 100644
490 --- a/drivers/gpu/drm/i915/intel_pm.c
491 +++ b/drivers/gpu/drm/i915/intel_pm.c
492 @@ -6565,6 +6565,12 @@ static void broadwell_init_clock_gating(struct drm_device *dev)
493 misccpctl = I915_READ(GEN7_MISCCPCTL);
494 I915_WRITE(GEN7_MISCCPCTL, misccpctl & ~GEN7_DOP_CLOCK_GATE_ENABLE);
495 I915_WRITE(GEN8_L3SQCREG1, BDW_WA_L3SQCREG1_DEFAULT);
496 + /*
497 + * Wait at least 100 clocks before re-enabling clock gating. See
498 + * the definition of L3SQCREG1 in BSpec.
499 + */
500 + POSTING_READ(GEN8_L3SQCREG1);
501 + udelay(1);
502 I915_WRITE(GEN7_MISCCPCTL, misccpctl);
503
504 /*
505 diff --git a/drivers/gpu/drm/radeon/atombios_crtc.c b/drivers/gpu/drm/radeon/atombios_crtc.c
506 index 801dd60ac192..7f52142d37d5 100644
507 --- a/drivers/gpu/drm/radeon/atombios_crtc.c
508 +++ b/drivers/gpu/drm/radeon/atombios_crtc.c
509 @@ -1740,6 +1740,7 @@ static u32 radeon_get_pll_use_mask(struct drm_crtc *crtc)
510 static int radeon_get_shared_dp_ppll(struct drm_crtc *crtc)
511 {
512 struct drm_device *dev = crtc->dev;
513 + struct radeon_device *rdev = dev->dev_private;
514 struct drm_crtc *test_crtc;
515 struct radeon_crtc *test_radeon_crtc;
516
517 @@ -1749,6 +1750,10 @@ static int radeon_get_shared_dp_ppll(struct drm_crtc *crtc)
518 test_radeon_crtc = to_radeon_crtc(test_crtc);
519 if (test_radeon_crtc->encoder &&
520 ENCODER_MODE_IS_DP(atombios_get_encoder_mode(test_radeon_crtc->encoder))) {
521 + /* PPLL2 is exclusive to UNIPHYA on DCE61 */
522 + if (ASIC_IS_DCE61(rdev) && !ASIC_IS_DCE8(rdev) &&
523 + test_radeon_crtc->pll_id == ATOM_PPLL2)
524 + continue;
525 /* for DP use the same PLL for all */
526 if (test_radeon_crtc->pll_id != ATOM_PPLL_INVALID)
527 return test_radeon_crtc->pll_id;
528 @@ -1770,6 +1775,7 @@ static int radeon_get_shared_nondp_ppll(struct drm_crtc *crtc)
529 {
530 struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
531 struct drm_device *dev = crtc->dev;
532 + struct radeon_device *rdev = dev->dev_private;
533 struct drm_crtc *test_crtc;
534 struct radeon_crtc *test_radeon_crtc;
535 u32 adjusted_clock, test_adjusted_clock;
536 @@ -1785,6 +1791,10 @@ static int radeon_get_shared_nondp_ppll(struct drm_crtc *crtc)
537 test_radeon_crtc = to_radeon_crtc(test_crtc);
538 if (test_radeon_crtc->encoder &&
539 !ENCODER_MODE_IS_DP(atombios_get_encoder_mode(test_radeon_crtc->encoder))) {
540 + /* PPLL2 is exclusive to UNIPHYA on DCE61 */
541 + if (ASIC_IS_DCE61(rdev) && !ASIC_IS_DCE8(rdev) &&
542 + test_radeon_crtc->pll_id == ATOM_PPLL2)
543 + continue;
544 /* check if we are already driving this connector with another crtc */
545 if (test_radeon_crtc->connector == radeon_crtc->connector) {
546 /* if we are, return that pll */
547 diff --git a/drivers/gpu/drm/radeon/atombios_dp.c b/drivers/gpu/drm/radeon/atombios_dp.c
548 index 6af832545bc5..b5760851195c 100644
549 --- a/drivers/gpu/drm/radeon/atombios_dp.c
550 +++ b/drivers/gpu/drm/radeon/atombios_dp.c
551 @@ -326,8 +326,8 @@ int radeon_dp_get_dp_link_config(struct drm_connector *connector,
552 }
553 }
554 } else {
555 - for (lane_num = 1; lane_num <= max_lane_num; lane_num <<= 1) {
556 - for (i = 0; i < ARRAY_SIZE(link_rates) && link_rates[i] <= max_link_rate; i++) {
557 + for (i = 0; i < ARRAY_SIZE(link_rates) && link_rates[i] <= max_link_rate; i++) {
558 + for (lane_num = 1; lane_num <= max_lane_num; lane_num <<= 1) {
559 max_pix_clock = (lane_num * link_rates[i] * 8) / bpp;
560 if (max_pix_clock >= pix_clock) {
561 *dp_lanes = lane_num;
562 diff --git a/drivers/gpu/drm/radeon/radeon_dp_auxch.c b/drivers/gpu/drm/radeon/radeon_dp_auxch.c
563 index 3b0c229d7dcd..db64e0062689 100644
564 --- a/drivers/gpu/drm/radeon/radeon_dp_auxch.c
565 +++ b/drivers/gpu/drm/radeon/radeon_dp_auxch.c
566 @@ -105,7 +105,7 @@ radeon_dp_aux_transfer_native(struct drm_dp_aux *aux, struct drm_dp_aux_msg *msg
567
568 tmp &= AUX_HPD_SEL(0x7);
569 tmp |= AUX_HPD_SEL(chan->rec.hpd);
570 - tmp |= AUX_EN | AUX_LS_READ_EN;
571 + tmp |= AUX_EN | AUX_LS_READ_EN | AUX_HPD_DISCON(0x1);
572
573 WREG32(AUX_CONTROL + aux_offset[instance], tmp);
574
575 diff --git a/drivers/infiniband/hw/mlx5/main.c b/drivers/infiniband/hw/mlx5/main.c
576 index ed9cefa1f6f1..eaed31d04468 100644
577 --- a/drivers/infiniband/hw/mlx5/main.c
578 +++ b/drivers/infiniband/hw/mlx5/main.c
579 @@ -654,8 +654,8 @@ static int mlx5_query_hca_port(struct ib_device *ibdev, u8 port,
580 struct mlx5_ib_dev *dev = to_mdev(ibdev);
581 struct mlx5_core_dev *mdev = dev->mdev;
582 struct mlx5_hca_vport_context *rep;
583 - int max_mtu;
584 - int oper_mtu;
585 + u16 max_mtu;
586 + u16 oper_mtu;
587 int err;
588 u8 ib_link_width_oper;
589 u8 vl_hw_cap;
590 diff --git a/drivers/input/misc/max8997_haptic.c b/drivers/input/misc/max8997_haptic.c
591 index a806ba3818f7..8d6326d7e7be 100644
592 --- a/drivers/input/misc/max8997_haptic.c
593 +++ b/drivers/input/misc/max8997_haptic.c
594 @@ -255,12 +255,14 @@ static int max8997_haptic_probe(struct platform_device *pdev)
595 struct max8997_dev *iodev = dev_get_drvdata(pdev->dev.parent);
596 const struct max8997_platform_data *pdata =
597 dev_get_platdata(iodev->dev);
598 - const struct max8997_haptic_platform_data *haptic_pdata =
599 - pdata->haptic_pdata;
600 + const struct max8997_haptic_platform_data *haptic_pdata = NULL;
601 struct max8997_haptic *chip;
602 struct input_dev *input_dev;
603 int error;
604
605 + if (pdata)
606 + haptic_pdata = pdata->haptic_pdata;
607 +
608 if (!haptic_pdata) {
609 dev_err(&pdev->dev, "no haptic platform data\n");
610 return -EINVAL;
611 diff --git a/drivers/media/v4l2-core/videobuf2-v4l2.c b/drivers/media/v4l2-core/videobuf2-v4l2.c
612 index 7f366f1b0377..0b1b8c7b6ce5 100644
613 --- a/drivers/media/v4l2-core/videobuf2-v4l2.c
614 +++ b/drivers/media/v4l2-core/videobuf2-v4l2.c
615 @@ -74,11 +74,6 @@ static int __verify_planes_array(struct vb2_buffer *vb, const struct v4l2_buffer
616 return 0;
617 }
618
619 -static int __verify_planes_array_core(struct vb2_buffer *vb, const void *pb)
620 -{
621 - return __verify_planes_array(vb, pb);
622 -}
623 -
624 /**
625 * __verify_length() - Verify that the bytesused value for each plane fits in
626 * the plane length and that the data offset doesn't exceed the bytesused value.
627 @@ -442,7 +437,6 @@ static int __fill_vb2_buffer(struct vb2_buffer *vb,
628 }
629
630 static const struct vb2_buf_ops v4l2_buf_ops = {
631 - .verify_planes_array = __verify_planes_array_core,
632 .fill_user_buffer = __fill_v4l2_buffer,
633 .fill_vb2_buffer = __fill_vb2_buffer,
634 .copy_timestamp = __copy_timestamp,
635 diff --git a/drivers/net/ethernet/atheros/atlx/atl2.c b/drivers/net/ethernet/atheros/atlx/atl2.c
636 index 8f76f4558a88..2ff465848b65 100644
637 --- a/drivers/net/ethernet/atheros/atlx/atl2.c
638 +++ b/drivers/net/ethernet/atheros/atlx/atl2.c
639 @@ -1412,7 +1412,7 @@ static int atl2_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
640
641 err = -EIO;
642
643 - netdev->hw_features = NETIF_F_SG | NETIF_F_HW_VLAN_CTAG_RX;
644 + netdev->hw_features = NETIF_F_HW_VLAN_CTAG_RX;
645 netdev->features |= (NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX);
646
647 /* Init PHY as early as possible due to power saving issue */
648 diff --git a/drivers/net/ethernet/cavium/thunder/nicvf_queues.c b/drivers/net/ethernet/cavium/thunder/nicvf_queues.c
649 index 767347b1f631..f50bdbfaae7c 100644
650 --- a/drivers/net/ethernet/cavium/thunder/nicvf_queues.c
651 +++ b/drivers/net/ethernet/cavium/thunder/nicvf_queues.c
652 @@ -519,6 +519,7 @@ static void nicvf_rcv_queue_config(struct nicvf *nic, struct queue_set *qs,
653 nicvf_config_vlan_stripping(nic, nic->netdev->features);
654
655 /* Enable Receive queue */
656 + memset(&rq_cfg, 0, sizeof(struct rq_cfg));
657 rq_cfg.ena = 1;
658 rq_cfg.tcp_ena = 0;
659 nicvf_queue_reg_write(nic, NIC_QSET_RQ_0_7_CFG, qidx, *(u64 *)&rq_cfg);
660 @@ -551,6 +552,7 @@ void nicvf_cmp_queue_config(struct nicvf *nic, struct queue_set *qs,
661 qidx, (u64)(cq->dmem.phys_base));
662
663 /* Enable Completion queue */
664 + memset(&cq_cfg, 0, sizeof(struct cq_cfg));
665 cq_cfg.ena = 1;
666 cq_cfg.reset = 0;
667 cq_cfg.caching = 0;
668 @@ -599,6 +601,7 @@ static void nicvf_snd_queue_config(struct nicvf *nic, struct queue_set *qs,
669 qidx, (u64)(sq->dmem.phys_base));
670
671 /* Enable send queue & set queue size */
672 + memset(&sq_cfg, 0, sizeof(struct sq_cfg));
673 sq_cfg.ena = 1;
674 sq_cfg.reset = 0;
675 sq_cfg.ldwb = 0;
676 @@ -635,6 +638,7 @@ static void nicvf_rbdr_config(struct nicvf *nic, struct queue_set *qs,
677
678 /* Enable RBDR & set queue size */
679 /* Buffer size should be in multiples of 128 bytes */
680 + memset(&rbdr_cfg, 0, sizeof(struct rbdr_cfg));
681 rbdr_cfg.ena = 1;
682 rbdr_cfg.reset = 0;
683 rbdr_cfg.ldwb = 0;
684 diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c
685 index 41c81f6ec630..0d6e8c177650 100644
686 --- a/drivers/net/ethernet/freescale/fec_main.c
687 +++ b/drivers/net/ethernet/freescale/fec_main.c
688 @@ -1566,9 +1566,15 @@ fec_enet_rx(struct net_device *ndev, int budget)
689 struct fec_enet_private *fep = netdev_priv(ndev);
690
691 for_each_set_bit(queue_id, &fep->work_rx, FEC_ENET_MAX_RX_QS) {
692 - clear_bit(queue_id, &fep->work_rx);
693 - pkt_received += fec_enet_rx_queue(ndev,
694 + int ret;
695 +
696 + ret = fec_enet_rx_queue(ndev,
697 budget - pkt_received, queue_id);
698 +
699 + if (ret < budget - pkt_received)
700 + clear_bit(queue_id, &fep->work_rx);
701 +
702 + pkt_received += ret;
703 }
704 return pkt_received;
705 }
706 diff --git a/drivers/net/ethernet/mellanox/mlx4/en_rx.c b/drivers/net/ethernet/mellanox/mlx4/en_rx.c
707 index 41440b2b20a3..03ef9aca21e4 100644
708 --- a/drivers/net/ethernet/mellanox/mlx4/en_rx.c
709 +++ b/drivers/net/ethernet/mellanox/mlx4/en_rx.c
710 @@ -704,7 +704,7 @@ static int get_fixed_ipv6_csum(__wsum hw_checksum, struct sk_buff *skb,
711
712 if (ipv6h->nexthdr == IPPROTO_FRAGMENT || ipv6h->nexthdr == IPPROTO_HOPOPTS)
713 return -1;
714 - hw_checksum = csum_add(hw_checksum, (__force __wsum)(ipv6h->nexthdr << 8));
715 + hw_checksum = csum_add(hw_checksum, (__force __wsum)htons(ipv6h->nexthdr));
716
717 csum_pseudo_hdr = csum_partial(&ipv6h->saddr,
718 sizeof(ipv6h->saddr) + sizeof(ipv6h->daddr), 0);
719 diff --git a/drivers/net/ethernet/mellanox/mlx4/en_tx.c b/drivers/net/ethernet/mellanox/mlx4/en_tx.c
720 index e0946ab22010..0debb611da8b 100644
721 --- a/drivers/net/ethernet/mellanox/mlx4/en_tx.c
722 +++ b/drivers/net/ethernet/mellanox/mlx4/en_tx.c
723 @@ -402,7 +402,6 @@ static bool mlx4_en_process_tx_cq(struct net_device *dev,
724 u32 packets = 0;
725 u32 bytes = 0;
726 int factor = priv->cqe_factor;
727 - u64 timestamp = 0;
728 int done = 0;
729 int budget = priv->tx_work_limit;
730 u32 last_nr_txbb;
731 @@ -442,9 +441,12 @@ static bool mlx4_en_process_tx_cq(struct net_device *dev,
732 new_index = be16_to_cpu(cqe->wqe_index) & size_mask;
733
734 do {
735 + u64 timestamp = 0;
736 +
737 txbbs_skipped += last_nr_txbb;
738 ring_index = (ring_index + last_nr_txbb) & size_mask;
739 - if (ring->tx_info[ring_index].ts_requested)
740 +
741 + if (unlikely(ring->tx_info[ring_index].ts_requested))
742 timestamp = mlx4_en_get_cqe_ts(cqe);
743
744 /* free next descriptor */
745 diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
746 index 402994bf7e16..e293a2ec2775 100644
747 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
748 +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
749 @@ -1389,24 +1389,50 @@ static int mlx5e_refresh_tirs_self_loopback_enable(struct mlx5e_priv *priv)
750 return 0;
751 }
752
753 -static int mlx5e_set_dev_port_mtu(struct net_device *netdev)
754 +static int mlx5e_set_mtu(struct mlx5e_priv *priv, u16 mtu)
755 {
756 - struct mlx5e_priv *priv = netdev_priv(netdev);
757 struct mlx5_core_dev *mdev = priv->mdev;
758 - int hw_mtu;
759 + u16 hw_mtu = MLX5E_SW2HW_MTU(mtu);
760 int err;
761
762 - err = mlx5_set_port_mtu(mdev, MLX5E_SW2HW_MTU(netdev->mtu), 1);
763 + err = mlx5_set_port_mtu(mdev, hw_mtu, 1);
764 if (err)
765 return err;
766
767 - mlx5_query_port_oper_mtu(mdev, &hw_mtu, 1);
768 + /* Update vport context MTU */
769 + mlx5_modify_nic_vport_mtu(mdev, hw_mtu);
770 + return 0;
771 +}
772 +
773 +static void mlx5e_query_mtu(struct mlx5e_priv *priv, u16 *mtu)
774 +{
775 + struct mlx5_core_dev *mdev = priv->mdev;
776 + u16 hw_mtu = 0;
777 + int err;
778 +
779 + err = mlx5_query_nic_vport_mtu(mdev, &hw_mtu);
780 + if (err || !hw_mtu) /* fallback to port oper mtu */
781 + mlx5_query_port_oper_mtu(mdev, &hw_mtu, 1);
782 +
783 + *mtu = MLX5E_HW2SW_MTU(hw_mtu);
784 +}
785 +
786 +static int mlx5e_set_dev_port_mtu(struct net_device *netdev)
787 +{
788 + struct mlx5e_priv *priv = netdev_priv(netdev);
789 + u16 mtu;
790 + int err;
791
792 - if (MLX5E_HW2SW_MTU(hw_mtu) != netdev->mtu)
793 - netdev_warn(netdev, "%s: Port MTU %d is different than netdev mtu %d\n",
794 - __func__, MLX5E_HW2SW_MTU(hw_mtu), netdev->mtu);
795 + err = mlx5e_set_mtu(priv, netdev->mtu);
796 + if (err)
797 + return err;
798
799 - netdev->mtu = MLX5E_HW2SW_MTU(hw_mtu);
800 + mlx5e_query_mtu(priv, &mtu);
801 + if (mtu != netdev->mtu)
802 + netdev_warn(netdev, "%s: VPort MTU %d is different than netdev mtu %d\n",
803 + __func__, mtu, netdev->mtu);
804 +
805 + netdev->mtu = mtu;
806 return 0;
807 }
808
809 @@ -1906,22 +1932,27 @@ static int mlx5e_set_features(struct net_device *netdev,
810 return err;
811 }
812
813 +#define MXL5_HW_MIN_MTU 64
814 +#define MXL5E_MIN_MTU (MXL5_HW_MIN_MTU + ETH_FCS_LEN)
815 +
816 static int mlx5e_change_mtu(struct net_device *netdev, int new_mtu)
817 {
818 struct mlx5e_priv *priv = netdev_priv(netdev);
819 struct mlx5_core_dev *mdev = priv->mdev;
820 bool was_opened;
821 - int max_mtu;
822 + u16 max_mtu;
823 + u16 min_mtu;
824 int err = 0;
825
826 mlx5_query_port_max_mtu(mdev, &max_mtu, 1);
827
828 max_mtu = MLX5E_HW2SW_MTU(max_mtu);
829 + min_mtu = MLX5E_HW2SW_MTU(MXL5E_MIN_MTU);
830
831 - if (new_mtu > max_mtu) {
832 + if (new_mtu > max_mtu || new_mtu < min_mtu) {
833 netdev_err(netdev,
834 - "%s: Bad MTU (%d) > (%d) Max\n",
835 - __func__, new_mtu, max_mtu);
836 + "%s: Bad MTU (%d), valid range is: [%d..%d]\n",
837 + __func__, new_mtu, min_mtu, max_mtu);
838 return -EINVAL;
839 }
840
841 diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
842 index 6f68dba8d7ed..cc901852f1a9 100644
843 --- a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
844 +++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
845 @@ -957,33 +957,6 @@ unlock_fg:
846 return rule;
847 }
848
849 -static struct mlx5_flow_rule *add_rule_to_auto_fg(struct mlx5_flow_table *ft,
850 - u8 match_criteria_enable,
851 - u32 *match_criteria,
852 - u32 *match_value,
853 - u8 action,
854 - u32 flow_tag,
855 - struct mlx5_flow_destination *dest)
856 -{
857 - struct mlx5_flow_rule *rule;
858 - struct mlx5_flow_group *g;
859 -
860 - g = create_autogroup(ft, match_criteria_enable, match_criteria);
861 - if (IS_ERR(g))
862 - return (void *)g;
863 -
864 - rule = add_rule_fg(g, match_value,
865 - action, flow_tag, dest);
866 - if (IS_ERR(rule)) {
867 - /* Remove assumes refcount > 0 and autogroup creates a group
868 - * with a refcount = 0.
869 - */
870 - tree_get_node(&g->node);
871 - tree_remove_node(&g->node);
872 - }
873 - return rule;
874 -}
875 -
876 struct mlx5_flow_rule *
877 mlx5_add_flow_rule(struct mlx5_flow_table *ft,
878 u8 match_criteria_enable,
879 @@ -1008,8 +981,23 @@ mlx5_add_flow_rule(struct mlx5_flow_table *ft,
880 goto unlock;
881 }
882
883 - rule = add_rule_to_auto_fg(ft, match_criteria_enable, match_criteria,
884 - match_value, action, flow_tag, dest);
885 + g = create_autogroup(ft, match_criteria_enable, match_criteria);
886 + if (IS_ERR(g)) {
887 + rule = (void *)g;
888 + goto unlock;
889 + }
890 +
891 + rule = add_rule_fg(g, match_value,
892 + action, flow_tag, dest);
893 + if (IS_ERR(rule)) {
894 + /* Remove assumes refcount > 0 and autogroup creates a group
895 + * with a refcount = 0.
896 + */
897 + unlock_ref_node(&ft->node);
898 + tree_get_node(&g->node);
899 + tree_remove_node(&g->node);
900 + return rule;
901 + }
902 unlock:
903 unlock_ref_node(&ft->node);
904 return rule;
905 diff --git a/drivers/net/ethernet/mellanox/mlx5/core/port.c b/drivers/net/ethernet/mellanox/mlx5/core/port.c
906 index a87e773e93f3..53a793bc2e3d 100644
907 --- a/drivers/net/ethernet/mellanox/mlx5/core/port.c
908 +++ b/drivers/net/ethernet/mellanox/mlx5/core/port.c
909 @@ -246,8 +246,8 @@ int mlx5_query_port_admin_status(struct mlx5_core_dev *dev,
910 }
911 EXPORT_SYMBOL_GPL(mlx5_query_port_admin_status);
912
913 -static void mlx5_query_port_mtu(struct mlx5_core_dev *dev, int *admin_mtu,
914 - int *max_mtu, int *oper_mtu, u8 port)
915 +static void mlx5_query_port_mtu(struct mlx5_core_dev *dev, u16 *admin_mtu,
916 + u16 *max_mtu, u16 *oper_mtu, u8 port)
917 {
918 u32 in[MLX5_ST_SZ_DW(pmtu_reg)];
919 u32 out[MLX5_ST_SZ_DW(pmtu_reg)];
920 @@ -267,7 +267,7 @@ static void mlx5_query_port_mtu(struct mlx5_core_dev *dev, int *admin_mtu,
921 *admin_mtu = MLX5_GET(pmtu_reg, out, admin_mtu);
922 }
923
924 -int mlx5_set_port_mtu(struct mlx5_core_dev *dev, int mtu, u8 port)
925 +int mlx5_set_port_mtu(struct mlx5_core_dev *dev, u16 mtu, u8 port)
926 {
927 u32 in[MLX5_ST_SZ_DW(pmtu_reg)];
928 u32 out[MLX5_ST_SZ_DW(pmtu_reg)];
929 @@ -282,14 +282,14 @@ int mlx5_set_port_mtu(struct mlx5_core_dev *dev, int mtu, u8 port)
930 }
931 EXPORT_SYMBOL_GPL(mlx5_set_port_mtu);
932
933 -void mlx5_query_port_max_mtu(struct mlx5_core_dev *dev, int *max_mtu,
934 +void mlx5_query_port_max_mtu(struct mlx5_core_dev *dev, u16 *max_mtu,
935 u8 port)
936 {
937 mlx5_query_port_mtu(dev, NULL, max_mtu, NULL, port);
938 }
939 EXPORT_SYMBOL_GPL(mlx5_query_port_max_mtu);
940
941 -void mlx5_query_port_oper_mtu(struct mlx5_core_dev *dev, int *oper_mtu,
942 +void mlx5_query_port_oper_mtu(struct mlx5_core_dev *dev, u16 *oper_mtu,
943 u8 port)
944 {
945 mlx5_query_port_mtu(dev, NULL, NULL, oper_mtu, port);
946 diff --git a/drivers/net/ethernet/mellanox/mlx5/core/vport.c b/drivers/net/ethernet/mellanox/mlx5/core/vport.c
947 index c7398b95aecd..6d5f56e73b5d 100644
948 --- a/drivers/net/ethernet/mellanox/mlx5/core/vport.c
949 +++ b/drivers/net/ethernet/mellanox/mlx5/core/vport.c
950 @@ -196,6 +196,46 @@ int mlx5_modify_nic_vport_mac_address(struct mlx5_core_dev *mdev,
951 }
952 EXPORT_SYMBOL_GPL(mlx5_modify_nic_vport_mac_address);
953
954 +int mlx5_query_nic_vport_mtu(struct mlx5_core_dev *mdev, u16 *mtu)
955 +{
956 + int outlen = MLX5_ST_SZ_BYTES(query_nic_vport_context_out);
957 + u32 *out;
958 + int err;
959 +
960 + out = mlx5_vzalloc(outlen);
961 + if (!out)
962 + return -ENOMEM;
963 +
964 + err = mlx5_query_nic_vport_context(mdev, 0, out, outlen);
965 + if (!err)
966 + *mtu = MLX5_GET(query_nic_vport_context_out, out,
967 + nic_vport_context.mtu);
968 +
969 + kvfree(out);
970 + return err;
971 +}
972 +EXPORT_SYMBOL_GPL(mlx5_query_nic_vport_mtu);
973 +
974 +int mlx5_modify_nic_vport_mtu(struct mlx5_core_dev *mdev, u16 mtu)
975 +{
976 + int inlen = MLX5_ST_SZ_BYTES(modify_nic_vport_context_in);
977 + void *in;
978 + int err;
979 +
980 + in = mlx5_vzalloc(inlen);
981 + if (!in)
982 + return -ENOMEM;
983 +
984 + MLX5_SET(modify_nic_vport_context_in, in, field_select.mtu, 1);
985 + MLX5_SET(modify_nic_vport_context_in, in, nic_vport_context.mtu, mtu);
986 +
987 + err = mlx5_modify_nic_vport_context(mdev, in, inlen);
988 +
989 + kvfree(in);
990 + return err;
991 +}
992 +EXPORT_SYMBOL_GPL(mlx5_modify_nic_vport_mtu);
993 +
994 int mlx5_query_nic_vport_mac_list(struct mlx5_core_dev *dev,
995 u32 vport,
996 enum mlx5_list_type list_type,
997 diff --git a/drivers/net/usb/cdc_mbim.c b/drivers/net/usb/cdc_mbim.c
998 index bdd83d95ec0a..96a5028621c8 100644
999 --- a/drivers/net/usb/cdc_mbim.c
1000 +++ b/drivers/net/usb/cdc_mbim.c
1001 @@ -617,8 +617,13 @@ static const struct usb_device_id mbim_devs[] = {
1002 { USB_VENDOR_AND_INTERFACE_INFO(0x0bdb, USB_CLASS_COMM, USB_CDC_SUBCLASS_MBIM, USB_CDC_PROTO_NONE),
1003 .driver_info = (unsigned long)&cdc_mbim_info,
1004 },
1005 - /* Huawei E3372 fails unless NDP comes after the IP packets */
1006 - { USB_DEVICE_AND_INTERFACE_INFO(0x12d1, 0x157d, USB_CLASS_COMM, USB_CDC_SUBCLASS_MBIM, USB_CDC_PROTO_NONE),
1007 +
1008 + /* Some Huawei devices, ME906s-158 (12d1:15c1) and E3372
1009 + * (12d1:157d), are known to fail unless the NDP is placed
1010 + * after the IP packets. Applying the quirk to all Huawei
1011 + * devices is broader than necessary, but harmless.
1012 + */
1013 + { USB_VENDOR_AND_INTERFACE_INFO(0x12d1, USB_CLASS_COMM, USB_CDC_SUBCLASS_MBIM, USB_CDC_PROTO_NONE),
1014 .driver_info = (unsigned long)&cdc_mbim_info_ndp_to_end,
1015 },
1016 /* default entry */
1017 diff --git a/drivers/pinctrl/pinctrl-at91-pio4.c b/drivers/pinctrl/pinctrl-at91-pio4.c
1018 index ee69db6ae1c7..e1c0d4e1bb33 100644
1019 --- a/drivers/pinctrl/pinctrl-at91-pio4.c
1020 +++ b/drivers/pinctrl/pinctrl-at91-pio4.c
1021 @@ -722,9 +722,11 @@ static int atmel_conf_pin_config_group_set(struct pinctrl_dev *pctldev,
1022 break;
1023 case PIN_CONFIG_BIAS_PULL_UP:
1024 conf |= ATMEL_PIO_PUEN_MASK;
1025 + conf &= (~ATMEL_PIO_PDEN_MASK);
1026 break;
1027 case PIN_CONFIG_BIAS_PULL_DOWN:
1028 conf |= ATMEL_PIO_PDEN_MASK;
1029 + conf &= (~ATMEL_PIO_PUEN_MASK);
1030 break;
1031 case PIN_CONFIG_DRIVE_OPEN_DRAIN:
1032 if (arg == 0)
1033 diff --git a/drivers/regulator/axp20x-regulator.c b/drivers/regulator/axp20x-regulator.c
1034 index f2e1a39ce0f3..5cf4a97e0304 100644
1035 --- a/drivers/regulator/axp20x-regulator.c
1036 +++ b/drivers/regulator/axp20x-regulator.c
1037 @@ -221,10 +221,10 @@ static const struct regulator_desc axp22x_regulators[] = {
1038 AXP22X_ELDO2_V_OUT, 0x1f, AXP22X_PWR_OUT_CTRL2, BIT(1)),
1039 AXP_DESC(AXP22X, ELDO3, "eldo3", "eldoin", 700, 3300, 100,
1040 AXP22X_ELDO3_V_OUT, 0x1f, AXP22X_PWR_OUT_CTRL2, BIT(2)),
1041 - AXP_DESC_IO(AXP22X, LDO_IO0, "ldo_io0", "ips", 1800, 3300, 100,
1042 + AXP_DESC_IO(AXP22X, LDO_IO0, "ldo_io0", "ips", 700, 3300, 100,
1043 AXP22X_LDO_IO0_V_OUT, 0x1f, AXP20X_GPIO0_CTRL, 0x07,
1044 AXP22X_IO_ENABLED, AXP22X_IO_DISABLED),
1045 - AXP_DESC_IO(AXP22X, LDO_IO1, "ldo_io1", "ips", 1800, 3300, 100,
1046 + AXP_DESC_IO(AXP22X, LDO_IO1, "ldo_io1", "ips", 700, 3300, 100,
1047 AXP22X_LDO_IO1_V_OUT, 0x1f, AXP20X_GPIO1_CTRL, 0x07,
1048 AXP22X_IO_ENABLED, AXP22X_IO_DISABLED),
1049 AXP_DESC_FIXED(AXP22X, RTC_LDO, "rtc_ldo", "ips", 3000),
1050 diff --git a/drivers/regulator/s2mps11.c b/drivers/regulator/s2mps11.c
1051 index 3242ffc0cb25..09dce49609c1 100644
1052 --- a/drivers/regulator/s2mps11.c
1053 +++ b/drivers/regulator/s2mps11.c
1054 @@ -306,7 +306,7 @@ static struct regulator_ops s2mps11_buck_ops = {
1055 .enable_mask = S2MPS11_ENABLE_MASK \
1056 }
1057
1058 -#define regulator_desc_s2mps11_buck6_10(num, min, step) { \
1059 +#define regulator_desc_s2mps11_buck67810(num, min, step) { \
1060 .name = "BUCK"#num, \
1061 .id = S2MPS11_BUCK##num, \
1062 .ops = &s2mps11_buck_ops, \
1063 @@ -322,6 +322,22 @@ static struct regulator_ops s2mps11_buck_ops = {
1064 .enable_mask = S2MPS11_ENABLE_MASK \
1065 }
1066
1067 +#define regulator_desc_s2mps11_buck9 { \
1068 + .name = "BUCK9", \
1069 + .id = S2MPS11_BUCK9, \
1070 + .ops = &s2mps11_buck_ops, \
1071 + .type = REGULATOR_VOLTAGE, \
1072 + .owner = THIS_MODULE, \
1073 + .min_uV = MIN_3000_MV, \
1074 + .uV_step = STEP_25_MV, \
1075 + .n_voltages = S2MPS11_BUCK9_N_VOLTAGES, \
1076 + .ramp_delay = S2MPS11_RAMP_DELAY, \
1077 + .vsel_reg = S2MPS11_REG_B9CTRL2, \
1078 + .vsel_mask = S2MPS11_BUCK9_VSEL_MASK, \
1079 + .enable_reg = S2MPS11_REG_B9CTRL1, \
1080 + .enable_mask = S2MPS11_ENABLE_MASK \
1081 +}
1082 +
1083 static const struct regulator_desc s2mps11_regulators[] = {
1084 regulator_desc_s2mps11_ldo(1, STEP_25_MV),
1085 regulator_desc_s2mps11_ldo(2, STEP_50_MV),
1086 @@ -366,11 +382,11 @@ static const struct regulator_desc s2mps11_regulators[] = {
1087 regulator_desc_s2mps11_buck1_4(3),
1088 regulator_desc_s2mps11_buck1_4(4),
1089 regulator_desc_s2mps11_buck5,
1090 - regulator_desc_s2mps11_buck6_10(6, MIN_600_MV, STEP_6_25_MV),
1091 - regulator_desc_s2mps11_buck6_10(7, MIN_600_MV, STEP_6_25_MV),
1092 - regulator_desc_s2mps11_buck6_10(8, MIN_600_MV, STEP_6_25_MV),
1093 - regulator_desc_s2mps11_buck6_10(9, MIN_3000_MV, STEP_25_MV),
1094 - regulator_desc_s2mps11_buck6_10(10, MIN_750_MV, STEP_12_5_MV),
1095 + regulator_desc_s2mps11_buck67810(6, MIN_600_MV, STEP_6_25_MV),
1096 + regulator_desc_s2mps11_buck67810(7, MIN_600_MV, STEP_6_25_MV),
1097 + regulator_desc_s2mps11_buck67810(8, MIN_600_MV, STEP_6_25_MV),
1098 + regulator_desc_s2mps11_buck9,
1099 + regulator_desc_s2mps11_buck67810(10, MIN_750_MV, STEP_12_5_MV),
1100 };
1101
1102 static struct regulator_ops s2mps14_reg_ops;
1103 diff --git a/drivers/scsi/qla1280.c b/drivers/scsi/qla1280.c
1104 index 5d0ec42a9317..634254a52301 100644
1105 --- a/drivers/scsi/qla1280.c
1106 +++ b/drivers/scsi/qla1280.c
1107 @@ -4214,7 +4214,7 @@ static struct scsi_host_template qla1280_driver_template = {
1108 .eh_bus_reset_handler = qla1280_eh_bus_reset,
1109 .eh_host_reset_handler = qla1280_eh_adapter_reset,
1110 .bios_param = qla1280_biosparam,
1111 - .can_queue = 0xfffff,
1112 + .can_queue = MAX_OUTSTANDING_COMMANDS,
1113 .this_id = -1,
1114 .sg_tablesize = SG_ALL,
1115 .use_clustering = ENABLE_CLUSTERING,
1116 diff --git a/drivers/spi/spi-pxa2xx.c b/drivers/spi/spi-pxa2xx.c
1117 index ab9914ad8365..64232ecbb821 100644
1118 --- a/drivers/spi/spi-pxa2xx.c
1119 +++ b/drivers/spi/spi-pxa2xx.c
1120 @@ -111,7 +111,7 @@ static const struct lpss_config lpss_platforms[] = {
1121 .reg_general = -1,
1122 .reg_ssp = 0x20,
1123 .reg_cs_ctrl = 0x24,
1124 - .reg_capabilities = 0xfc,
1125 + .reg_capabilities = -1,
1126 .rx_threshold = 1,
1127 .tx_threshold_lo = 32,
1128 .tx_threshold_hi = 56,
1129 diff --git a/drivers/spi/spi-ti-qspi.c b/drivers/spi/spi-ti-qspi.c
1130 index 64318fcfacf2..5044c6198332 100644
1131 --- a/drivers/spi/spi-ti-qspi.c
1132 +++ b/drivers/spi/spi-ti-qspi.c
1133 @@ -94,6 +94,7 @@ struct ti_qspi {
1134 #define QSPI_FLEN(n) ((n - 1) << 0)
1135 #define QSPI_WLEN_MAX_BITS 128
1136 #define QSPI_WLEN_MAX_BYTES 16
1137 +#define QSPI_WLEN_MASK QSPI_WLEN(QSPI_WLEN_MAX_BITS)
1138
1139 /* STATUS REGISTER */
1140 #define BUSY 0x01
1141 @@ -224,16 +225,16 @@ static inline int ti_qspi_poll_wc(struct ti_qspi *qspi)
1142 return -ETIMEDOUT;
1143 }
1144
1145 -static int qspi_write_msg(struct ti_qspi *qspi, struct spi_transfer *t)
1146 +static int qspi_write_msg(struct ti_qspi *qspi, struct spi_transfer *t,
1147 + int count)
1148 {
1149 - int wlen, count, xfer_len;
1150 + int wlen, xfer_len;
1151 unsigned int cmd;
1152 const u8 *txbuf;
1153 u32 data;
1154
1155 txbuf = t->tx_buf;
1156 cmd = qspi->cmd | QSPI_WR_SNGL;
1157 - count = t->len;
1158 wlen = t->bits_per_word >> 3; /* in bytes */
1159 xfer_len = wlen;
1160
1161 @@ -293,9 +294,10 @@ static int qspi_write_msg(struct ti_qspi *qspi, struct spi_transfer *t)
1162 return 0;
1163 }
1164
1165 -static int qspi_read_msg(struct ti_qspi *qspi, struct spi_transfer *t)
1166 +static int qspi_read_msg(struct ti_qspi *qspi, struct spi_transfer *t,
1167 + int count)
1168 {
1169 - int wlen, count;
1170 + int wlen;
1171 unsigned int cmd;
1172 u8 *rxbuf;
1173
1174 @@ -312,7 +314,6 @@ static int qspi_read_msg(struct ti_qspi *qspi, struct spi_transfer *t)
1175 cmd |= QSPI_RD_SNGL;
1176 break;
1177 }
1178 - count = t->len;
1179 wlen = t->bits_per_word >> 3; /* in bytes */
1180
1181 while (count) {
1182 @@ -343,12 +344,13 @@ static int qspi_read_msg(struct ti_qspi *qspi, struct spi_transfer *t)
1183 return 0;
1184 }
1185
1186 -static int qspi_transfer_msg(struct ti_qspi *qspi, struct spi_transfer *t)
1187 +static int qspi_transfer_msg(struct ti_qspi *qspi, struct spi_transfer *t,
1188 + int count)
1189 {
1190 int ret;
1191
1192 if (t->tx_buf) {
1193 - ret = qspi_write_msg(qspi, t);
1194 + ret = qspi_write_msg(qspi, t, count);
1195 if (ret) {
1196 dev_dbg(qspi->dev, "Error while writing\n");
1197 return ret;
1198 @@ -356,7 +358,7 @@ static int qspi_transfer_msg(struct ti_qspi *qspi, struct spi_transfer *t)
1199 }
1200
1201 if (t->rx_buf) {
1202 - ret = qspi_read_msg(qspi, t);
1203 + ret = qspi_read_msg(qspi, t, count);
1204 if (ret) {
1205 dev_dbg(qspi->dev, "Error while reading\n");
1206 return ret;
1207 @@ -373,7 +375,8 @@ static int ti_qspi_start_transfer_one(struct spi_master *master,
1208 struct spi_device *spi = m->spi;
1209 struct spi_transfer *t;
1210 int status = 0, ret;
1211 - int frame_length;
1212 + unsigned int frame_len_words, transfer_len_words;
1213 + int wlen;
1214
1215 /* setup device control reg */
1216 qspi->dc = 0;
1217 @@ -385,30 +388,38 @@ static int ti_qspi_start_transfer_one(struct spi_master *master,
1218 if (spi->mode & SPI_CS_HIGH)
1219 qspi->dc |= QSPI_CSPOL(spi->chip_select);
1220
1221 - frame_length = (m->frame_length << 3) / spi->bits_per_word;
1222 -
1223 - frame_length = clamp(frame_length, 0, QSPI_FRAME);
1224 + frame_len_words = 0;
1225 + list_for_each_entry(t, &m->transfers, transfer_list)
1226 + frame_len_words += t->len / (t->bits_per_word >> 3);
1227 + frame_len_words = min_t(unsigned int, frame_len_words, QSPI_FRAME);
1228
1229 /* setup command reg */
1230 qspi->cmd = 0;
1231 qspi->cmd |= QSPI_EN_CS(spi->chip_select);
1232 - qspi->cmd |= QSPI_FLEN(frame_length);
1233 + qspi->cmd |= QSPI_FLEN(frame_len_words);
1234
1235 ti_qspi_write(qspi, qspi->dc, QSPI_SPI_DC_REG);
1236
1237 mutex_lock(&qspi->list_lock);
1238
1239 list_for_each_entry(t, &m->transfers, transfer_list) {
1240 - qspi->cmd |= QSPI_WLEN(t->bits_per_word);
1241 + qspi->cmd = ((qspi->cmd & ~QSPI_WLEN_MASK) |
1242 + QSPI_WLEN(t->bits_per_word));
1243 +
1244 + wlen = t->bits_per_word >> 3;
1245 + transfer_len_words = min(t->len / wlen, frame_len_words);
1246
1247 - ret = qspi_transfer_msg(qspi, t);
1248 + ret = qspi_transfer_msg(qspi, t, transfer_len_words * wlen);
1249 if (ret) {
1250 dev_dbg(qspi->dev, "transfer message failed\n");
1251 mutex_unlock(&qspi->list_lock);
1252 return -EINVAL;
1253 }
1254
1255 - m->actual_length += t->len;
1256 + m->actual_length += transfer_len_words * wlen;
1257 + frame_len_words -= transfer_len_words;
1258 + if (frame_len_words == 0)
1259 + break;
1260 }
1261
1262 mutex_unlock(&qspi->list_lock);
1263 diff --git a/drivers/staging/wilc1000/wilc_spi.c b/drivers/staging/wilc1000/wilc_spi.c
1264 index 86de50c9f7f5..b3d6541b3896 100644
1265 --- a/drivers/staging/wilc1000/wilc_spi.c
1266 +++ b/drivers/staging/wilc1000/wilc_spi.c
1267 @@ -120,8 +120,6 @@ static u8 crc7(u8 crc, const u8 *buffer, u32 len)
1268
1269 #define USE_SPI_DMA 0
1270
1271 -static const struct wilc1000_ops wilc1000_spi_ops;
1272 -
1273 static int wilc_bus_probe(struct spi_device *spi)
1274 {
1275 int ret, gpio;
1276 diff --git a/fs/btrfs/ctree.c b/fs/btrfs/ctree.c
1277 index 769e0ff1b4ce..dea6486a7508 100644
1278 --- a/fs/btrfs/ctree.c
1279 +++ b/fs/btrfs/ctree.c
1280 @@ -19,6 +19,7 @@
1281 #include <linux/sched.h>
1282 #include <linux/slab.h>
1283 #include <linux/rbtree.h>
1284 +#include <linux/vmalloc.h>
1285 #include "ctree.h"
1286 #include "disk-io.h"
1287 #include "transaction.h"
1288 @@ -5361,10 +5362,13 @@ int btrfs_compare_trees(struct btrfs_root *left_root,
1289 goto out;
1290 }
1291
1292 - tmp_buf = kmalloc(left_root->nodesize, GFP_NOFS);
1293 + tmp_buf = kmalloc(left_root->nodesize, GFP_KERNEL | __GFP_NOWARN);
1294 if (!tmp_buf) {
1295 - ret = -ENOMEM;
1296 - goto out;
1297 + tmp_buf = vmalloc(left_root->nodesize);
1298 + if (!tmp_buf) {
1299 + ret = -ENOMEM;
1300 + goto out;
1301 + }
1302 }
1303
1304 left_path->search_commit_root = 1;
1305 @@ -5565,7 +5569,7 @@ int btrfs_compare_trees(struct btrfs_root *left_root,
1306 out:
1307 btrfs_free_path(left_path);
1308 btrfs_free_path(right_path);
1309 - kfree(tmp_buf);
1310 + kvfree(tmp_buf);
1311 return ret;
1312 }
1313
1314 diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h
1315 index bfe4a337fb4d..6661ad8b4088 100644
1316 --- a/fs/btrfs/ctree.h
1317 +++ b/fs/btrfs/ctree.h
1318 @@ -2252,7 +2252,7 @@ struct btrfs_ioctl_defrag_range_args {
1319 #define BTRFS_MOUNT_FREE_SPACE_TREE (1 << 26)
1320
1321 #define BTRFS_DEFAULT_COMMIT_INTERVAL (30)
1322 -#define BTRFS_DEFAULT_MAX_INLINE (8192)
1323 +#define BTRFS_DEFAULT_MAX_INLINE (2048)
1324
1325 #define btrfs_clear_opt(o, opt) ((o) &= ~BTRFS_MOUNT_##opt)
1326 #define btrfs_set_opt(o, opt) ((o) |= BTRFS_MOUNT_##opt)
1327 diff --git a/fs/btrfs/dev-replace.c b/fs/btrfs/dev-replace.c
1328 index cbb7dbfb3fff..218f51a5dbab 100644
1329 --- a/fs/btrfs/dev-replace.c
1330 +++ b/fs/btrfs/dev-replace.c
1331 @@ -394,6 +394,8 @@ int btrfs_dev_replace_start(struct btrfs_root *root,
1332 dev_replace->cursor_right = 0;
1333 dev_replace->is_valid = 1;
1334 dev_replace->item_needs_writeback = 1;
1335 + atomic64_set(&dev_replace->num_write_errors, 0);
1336 + atomic64_set(&dev_replace->num_uncorrectable_read_errors, 0);
1337 args->result = BTRFS_IOCTL_DEV_REPLACE_RESULT_NO_ERROR;
1338 btrfs_dev_replace_unlock(dev_replace);
1339
1340 diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c
1341 index d8d68af5aef0..ae6e3e36fdf0 100644
1342 --- a/fs/btrfs/disk-io.c
1343 +++ b/fs/btrfs/disk-io.c
1344 @@ -303,7 +303,7 @@ static int csum_tree_block(struct btrfs_fs_info *fs_info,
1345 err = map_private_extent_buffer(buf, offset, 32,
1346 &kaddr, &map_start, &map_len);
1347 if (err)
1348 - return 1;
1349 + return err;
1350 cur_len = min(len, map_len - (offset - map_start));
1351 crc = btrfs_csum_data(kaddr + offset - map_start,
1352 crc, cur_len);
1353 @@ -313,7 +313,7 @@ static int csum_tree_block(struct btrfs_fs_info *fs_info,
1354 if (csum_size > sizeof(inline_result)) {
1355 result = kzalloc(csum_size, GFP_NOFS);
1356 if (!result)
1357 - return 1;
1358 + return -ENOMEM;
1359 } else {
1360 result = (char *)&inline_result;
1361 }
1362 @@ -334,7 +334,7 @@ static int csum_tree_block(struct btrfs_fs_info *fs_info,
1363 val, found, btrfs_header_level(buf));
1364 if (result != (char *)&inline_result)
1365 kfree(result);
1366 - return 1;
1367 + return -EUCLEAN;
1368 }
1369 } else {
1370 write_extent_buffer(buf, result, 0, csum_size);
1371 @@ -513,11 +513,21 @@ static int csum_dirty_buffer(struct btrfs_fs_info *fs_info, struct page *page)
1372 eb = (struct extent_buffer *)page->private;
1373 if (page != eb->pages[0])
1374 return 0;
1375 +
1376 found_start = btrfs_header_bytenr(eb);
1377 - if (WARN_ON(found_start != start || !PageUptodate(page)))
1378 - return 0;
1379 - csum_tree_block(fs_info, eb, 0);
1380 - return 0;
1381 + /*
1382 + * Please do not consolidate these warnings into a single if.
1383 + * It is useful to know what went wrong.
1384 + */
1385 + if (WARN_ON(found_start != start))
1386 + return -EUCLEAN;
1387 + if (WARN_ON(!PageUptodate(page)))
1388 + return -EUCLEAN;
1389 +
1390 + ASSERT(memcmp_extent_buffer(eb, fs_info->fsid,
1391 + btrfs_header_fsid(), BTRFS_FSID_SIZE) == 0);
1392 +
1393 + return csum_tree_block(fs_info, eb, 0);
1394 }
1395
1396 static int check_tree_block_fsid(struct btrfs_fs_info *fs_info,
1397 @@ -660,10 +670,8 @@ static int btree_readpage_end_io_hook(struct btrfs_io_bio *io_bio,
1398 eb, found_level);
1399
1400 ret = csum_tree_block(root->fs_info, eb, 1);
1401 - if (ret) {
1402 - ret = -EIO;
1403 + if (ret)
1404 goto err;
1405 - }
1406
1407 /*
1408 * If this is a leaf block and it is corrupt, set the corrupt bit so
1409 diff --git a/fs/btrfs/file.c b/fs/btrfs/file.c
1410 index 9a30ca64066b..5d956b869e03 100644
1411 --- a/fs/btrfs/file.c
1412 +++ b/fs/btrfs/file.c
1413 @@ -1996,10 +1996,11 @@ int btrfs_sync_file(struct file *file, loff_t start, loff_t end, int datasync)
1414 */
1415 smp_mb();
1416 if (btrfs_inode_in_log(inode, root->fs_info->generation) ||
1417 - (BTRFS_I(inode)->last_trans <=
1418 - root->fs_info->last_trans_committed &&
1419 - (full_sync ||
1420 - !btrfs_have_ordered_extents_in_range(inode, start, len)))) {
1421 + (full_sync && BTRFS_I(inode)->last_trans <=
1422 + root->fs_info->last_trans_committed) ||
1423 + (!btrfs_have_ordered_extents_in_range(inode, start, len) &&
1424 + BTRFS_I(inode)->last_trans
1425 + <= root->fs_info->last_trans_committed)) {
1426 /*
1427 * We'v had everything committed since the last time we were
1428 * modified so clear this flag in case it was set for whatever
1429 diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
1430 index d96f5cf38a2d..f407e487c687 100644
1431 --- a/fs/btrfs/inode.c
1432 +++ b/fs/btrfs/inode.c
1433 @@ -4211,11 +4211,20 @@ static int truncate_space_check(struct btrfs_trans_handle *trans,
1434 {
1435 int ret;
1436
1437 + /*
1438 + * This is only used to apply pressure to the enospc system, we don't
1439 + * intend to use this reservation at all.
1440 + */
1441 bytes_deleted = btrfs_csum_bytes_to_leaves(root, bytes_deleted);
1442 + bytes_deleted *= root->nodesize;
1443 ret = btrfs_block_rsv_add(root, &root->fs_info->trans_block_rsv,
1444 bytes_deleted, BTRFS_RESERVE_NO_FLUSH);
1445 - if (!ret)
1446 + if (!ret) {
1447 + trace_btrfs_space_reservation(root->fs_info, "transaction",
1448 + trans->transid,
1449 + bytes_deleted, 1);
1450 trans->bytes_reserved += bytes_deleted;
1451 + }
1452 return ret;
1453
1454 }
1455 @@ -7414,7 +7423,26 @@ static int lock_extent_direct(struct inode *inode, u64 lockstart, u64 lockend,
1456 cached_state, GFP_NOFS);
1457
1458 if (ordered) {
1459 - btrfs_start_ordered_extent(inode, ordered, 1);
1460 + /*
1461 + * If we are doing a DIO read and the ordered extent we
1462 + * found is for a buffered write, we can not wait for it
1463 + * to complete and retry, because if we do so we can
1464 + * deadlock with concurrent buffered writes on page
1465 + * locks. This happens only if our DIO read covers more
1466 + * than one extent map, if at this point has already
1467 + * created an ordered extent for a previous extent map
1468 + * and locked its range in the inode's io tree, and a
1469 + * concurrent write against that previous extent map's
1470 + * range and this range started (we unlock the ranges
1471 + * in the io tree only when the bios complete and
1472 + * buffered writes always lock pages before attempting
1473 + * to lock range in the io tree).
1474 + */
1475 + if (writing ||
1476 + test_bit(BTRFS_ORDERED_DIRECT, &ordered->flags))
1477 + btrfs_start_ordered_extent(inode, ordered, 1);
1478 + else
1479 + ret = -ENOTBLK;
1480 btrfs_put_ordered_extent(ordered);
1481 } else {
1482 /*
1483 @@ -7431,9 +7459,11 @@ static int lock_extent_direct(struct inode *inode, u64 lockstart, u64 lockend,
1484 * that page.
1485 */
1486 ret = -ENOTBLK;
1487 - break;
1488 }
1489
1490 + if (ret)
1491 + break;
1492 +
1493 cond_resched();
1494 }
1495
1496 diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c
1497 index 48aee9846329..e3791f268489 100644
1498 --- a/fs/btrfs/ioctl.c
1499 +++ b/fs/btrfs/ioctl.c
1500 @@ -59,6 +59,7 @@
1501 #include "props.h"
1502 #include "sysfs.h"
1503 #include "qgroup.h"
1504 +#include "tree-log.h"
1505
1506 #ifdef CONFIG_64BIT
1507 /* If we have a 32-bit userspace and 64-bit kernel, then the UAPI
1508 @@ -1656,7 +1657,7 @@ static noinline int btrfs_ioctl_snap_create_transid(struct file *file,
1509
1510 src_inode = file_inode(src.file);
1511 if (src_inode->i_sb != file_inode(file)->i_sb) {
1512 - btrfs_info(BTRFS_I(src_inode)->root->fs_info,
1513 + btrfs_info(BTRFS_I(file_inode(file))->root->fs_info,
1514 "Snapshot src from another FS");
1515 ret = -EXDEV;
1516 } else if (!inode_owner_or_capable(src_inode)) {
1517 @@ -2097,8 +2098,6 @@ static noinline int search_ioctl(struct inode *inode,
1518 key.offset = (u64)-1;
1519 root = btrfs_read_fs_root_no_name(info, &key);
1520 if (IS_ERR(root)) {
1521 - btrfs_err(info, "could not find root %llu",
1522 - sk->tree_id);
1523 btrfs_free_path(path);
1524 return -ENOENT;
1525 }
1526 @@ -2476,6 +2475,8 @@ static noinline int btrfs_ioctl_snap_destroy(struct file *file,
1527 trans->block_rsv = &block_rsv;
1528 trans->bytes_reserved = block_rsv.size;
1529
1530 + btrfs_record_snapshot_destroy(trans, dir);
1531 +
1532 ret = btrfs_unlink_subvol(trans, root, dir,
1533 dest->root_key.objectid,
1534 dentry->d_name.name,
1535 @@ -3068,6 +3069,9 @@ static int btrfs_extent_same(struct inode *src, u64 loff, u64 olen,
1536 ret = extent_same_check_offsets(src, loff, &len, olen);
1537 if (ret)
1538 goto out_unlock;
1539 + ret = extent_same_check_offsets(src, dst_loff, &len, olen);
1540 + if (ret)
1541 + goto out_unlock;
1542
1543 /*
1544 * Single inode case wants the same checks, except we
1545 diff --git a/fs/btrfs/qgroup.c b/fs/btrfs/qgroup.c
1546 index 5279fdae7142..7173360eea7a 100644
1547 --- a/fs/btrfs/qgroup.c
1548 +++ b/fs/btrfs/qgroup.c
1549 @@ -1842,8 +1842,10 @@ out:
1550 }
1551
1552 /*
1553 - * copy the acounting information between qgroups. This is necessary when a
1554 - * snapshot or a subvolume is created
1555 + * Copy the acounting information between qgroups. This is necessary
1556 + * when a snapshot or a subvolume is created. Throwing an error will
1557 + * cause a transaction abort so we take extra care here to only error
1558 + * when a readonly fs is a reasonable outcome.
1559 */
1560 int btrfs_qgroup_inherit(struct btrfs_trans_handle *trans,
1561 struct btrfs_fs_info *fs_info, u64 srcid, u64 objectid,
1562 @@ -1873,15 +1875,15 @@ int btrfs_qgroup_inherit(struct btrfs_trans_handle *trans,
1563 2 * inherit->num_excl_copies;
1564 for (i = 0; i < nums; ++i) {
1565 srcgroup = find_qgroup_rb(fs_info, *i_qgroups);
1566 - if (!srcgroup) {
1567 - ret = -EINVAL;
1568 - goto out;
1569 - }
1570
1571 - if ((srcgroup->qgroupid >> 48) <= (objectid >> 48)) {
1572 - ret = -EINVAL;
1573 - goto out;
1574 - }
1575 + /*
1576 + * Zero out invalid groups so we can ignore
1577 + * them later.
1578 + */
1579 + if (!srcgroup ||
1580 + ((srcgroup->qgroupid >> 48) <= (objectid >> 48)))
1581 + *i_qgroups = 0ULL;
1582 +
1583 ++i_qgroups;
1584 }
1585 }
1586 @@ -1916,17 +1918,19 @@ int btrfs_qgroup_inherit(struct btrfs_trans_handle *trans,
1587 */
1588 if (inherit) {
1589 i_qgroups = (u64 *)(inherit + 1);
1590 - for (i = 0; i < inherit->num_qgroups; ++i) {
1591 + for (i = 0; i < inherit->num_qgroups; ++i, ++i_qgroups) {
1592 + if (*i_qgroups == 0)
1593 + continue;
1594 ret = add_qgroup_relation_item(trans, quota_root,
1595 objectid, *i_qgroups);
1596 - if (ret)
1597 + if (ret && ret != -EEXIST)
1598 goto out;
1599 ret = add_qgroup_relation_item(trans, quota_root,
1600 *i_qgroups, objectid);
1601 - if (ret)
1602 + if (ret && ret != -EEXIST)
1603 goto out;
1604 - ++i_qgroups;
1605 }
1606 + ret = 0;
1607 }
1608
1609
1610 @@ -1987,17 +1991,22 @@ int btrfs_qgroup_inherit(struct btrfs_trans_handle *trans,
1611
1612 i_qgroups = (u64 *)(inherit + 1);
1613 for (i = 0; i < inherit->num_qgroups; ++i) {
1614 - ret = add_relation_rb(quota_root->fs_info, objectid,
1615 - *i_qgroups);
1616 - if (ret)
1617 - goto unlock;
1618 + if (*i_qgroups) {
1619 + ret = add_relation_rb(quota_root->fs_info, objectid,
1620 + *i_qgroups);
1621 + if (ret)
1622 + goto unlock;
1623 + }
1624 ++i_qgroups;
1625 }
1626
1627 - for (i = 0; i < inherit->num_ref_copies; ++i) {
1628 + for (i = 0; i < inherit->num_ref_copies; ++i, i_qgroups += 2) {
1629 struct btrfs_qgroup *src;
1630 struct btrfs_qgroup *dst;
1631
1632 + if (!i_qgroups[0] || !i_qgroups[1])
1633 + continue;
1634 +
1635 src = find_qgroup_rb(fs_info, i_qgroups[0]);
1636 dst = find_qgroup_rb(fs_info, i_qgroups[1]);
1637
1638 @@ -2008,12 +2017,14 @@ int btrfs_qgroup_inherit(struct btrfs_trans_handle *trans,
1639
1640 dst->rfer = src->rfer - level_size;
1641 dst->rfer_cmpr = src->rfer_cmpr - level_size;
1642 - i_qgroups += 2;
1643 }
1644 - for (i = 0; i < inherit->num_excl_copies; ++i) {
1645 + for (i = 0; i < inherit->num_excl_copies; ++i, i_qgroups += 2) {
1646 struct btrfs_qgroup *src;
1647 struct btrfs_qgroup *dst;
1648
1649 + if (!i_qgroups[0] || !i_qgroups[1])
1650 + continue;
1651 +
1652 src = find_qgroup_rb(fs_info, i_qgroups[0]);
1653 dst = find_qgroup_rb(fs_info, i_qgroups[1]);
1654
1655 @@ -2024,7 +2035,6 @@ int btrfs_qgroup_inherit(struct btrfs_trans_handle *trans,
1656
1657 dst->excl = src->excl + level_size;
1658 dst->excl_cmpr = src->excl_cmpr + level_size;
1659 - i_qgroups += 2;
1660 }
1661
1662 unlock:
1663 diff --git a/fs/btrfs/reada.c b/fs/btrfs/reada.c
1664 index 619f92963e27..49b3fb73ffbf 100644
1665 --- a/fs/btrfs/reada.c
1666 +++ b/fs/btrfs/reada.c
1667 @@ -265,7 +265,7 @@ static struct reada_zone *reada_find_zone(struct btrfs_fs_info *fs_info,
1668 spin_unlock(&fs_info->reada_lock);
1669
1670 if (ret == 1) {
1671 - if (logical >= zone->start && logical < zone->end)
1672 + if (logical >= zone->start && logical <= zone->end)
1673 return zone;
1674 spin_lock(&fs_info->reada_lock);
1675 kref_put(&zone->refcnt, reada_zone_release);
1676 @@ -679,7 +679,7 @@ static int reada_start_machine_dev(struct btrfs_fs_info *fs_info,
1677 */
1678 ret = radix_tree_gang_lookup(&dev->reada_extents, (void **)&re,
1679 dev->reada_next >> PAGE_CACHE_SHIFT, 1);
1680 - if (ret == 0 || re->logical >= dev->reada_curr_zone->end) {
1681 + if (ret == 0 || re->logical > dev->reada_curr_zone->end) {
1682 ret = reada_pick_zone(dev);
1683 if (!ret) {
1684 spin_unlock(&fs_info->reada_lock);
1685 diff --git a/fs/btrfs/relocation.c b/fs/btrfs/relocation.c
1686 index 2bd0011450df..5c806f0d443d 100644
1687 --- a/fs/btrfs/relocation.c
1688 +++ b/fs/btrfs/relocation.c
1689 @@ -1850,6 +1850,7 @@ again:
1690 eb = read_tree_block(dest, old_bytenr, old_ptr_gen);
1691 if (IS_ERR(eb)) {
1692 ret = PTR_ERR(eb);
1693 + break;
1694 } else if (!extent_buffer_uptodate(eb)) {
1695 ret = -EIO;
1696 free_extent_buffer(eb);
1697 diff --git a/fs/btrfs/tree-log.c b/fs/btrfs/tree-log.c
1698 index 849a30aa117d..58ae0a2ce65c 100644
1699 --- a/fs/btrfs/tree-log.c
1700 +++ b/fs/btrfs/tree-log.c
1701 @@ -4621,7 +4621,22 @@ static int btrfs_log_inode(struct btrfs_trans_handle *trans,
1702
1703 mutex_lock(&BTRFS_I(inode)->log_mutex);
1704
1705 - btrfs_get_logged_extents(inode, &logged_list, start, end);
1706 + /*
1707 + * Collect ordered extents only if we are logging data. This is to
1708 + * ensure a subsequent request to log this inode in LOG_INODE_ALL mode
1709 + * will process the ordered extents if they still exists at the time,
1710 + * because when we collect them we test and set for the flag
1711 + * BTRFS_ORDERED_LOGGED to prevent multiple log requests to process the
1712 + * same ordered extents. The consequence for the LOG_INODE_ALL log mode
1713 + * not processing the ordered extents is that we end up logging the
1714 + * corresponding file extent items, based on the extent maps in the
1715 + * inode's extent_map_tree's modified_list, without logging the
1716 + * respective checksums (since the may still be only attached to the
1717 + * ordered extents and have not been inserted in the csum tree by
1718 + * btrfs_finish_ordered_io() yet).
1719 + */
1720 + if (inode_only == LOG_INODE_ALL)
1721 + btrfs_get_logged_extents(inode, &logged_list, start, end);
1722
1723 /*
1724 * a brute force approach to making sure we get the most uptodate
1725 @@ -4909,6 +4924,42 @@ out_unlock:
1726 }
1727
1728 /*
1729 + * Check if we must fallback to a transaction commit when logging an inode.
1730 + * This must be called after logging the inode and is used only in the context
1731 + * when fsyncing an inode requires the need to log some other inode - in which
1732 + * case we can't lock the i_mutex of each other inode we need to log as that
1733 + * can lead to deadlocks with concurrent fsync against other inodes (as we can
1734 + * log inodes up or down in the hierarchy) or rename operations for example. So
1735 + * we take the log_mutex of the inode after we have logged it and then check for
1736 + * its last_unlink_trans value - this is safe because any task setting
1737 + * last_unlink_trans must take the log_mutex and it must do this before it does
1738 + * the actual unlink operation, so if we do this check before a concurrent task
1739 + * sets last_unlink_trans it means we've logged a consistent version/state of
1740 + * all the inode items, otherwise we are not sure and must do a transaction
1741 + * commit (the concurrent task migth have only updated last_unlink_trans before
1742 + * we logged the inode or it might have also done the unlink).
1743 + */
1744 +static bool btrfs_must_commit_transaction(struct btrfs_trans_handle *trans,
1745 + struct inode *inode)
1746 +{
1747 + struct btrfs_fs_info *fs_info = BTRFS_I(inode)->root->fs_info;
1748 + bool ret = false;
1749 +
1750 + mutex_lock(&BTRFS_I(inode)->log_mutex);
1751 + if (BTRFS_I(inode)->last_unlink_trans > fs_info->last_trans_committed) {
1752 + /*
1753 + * Make sure any commits to the log are forced to be full
1754 + * commits.
1755 + */
1756 + btrfs_set_log_full_commit(fs_info, trans);
1757 + ret = true;
1758 + }
1759 + mutex_unlock(&BTRFS_I(inode)->log_mutex);
1760 +
1761 + return ret;
1762 +}
1763 +
1764 +/*
1765 * follow the dentry parent pointers up the chain and see if any
1766 * of the directories in it require a full commit before they can
1767 * be logged. Returns zero if nothing special needs to be done or 1 if
1768 @@ -4921,7 +4972,6 @@ static noinline int check_parent_dirs_for_sync(struct btrfs_trans_handle *trans,
1769 u64 last_committed)
1770 {
1771 int ret = 0;
1772 - struct btrfs_root *root;
1773 struct dentry *old_parent = NULL;
1774 struct inode *orig_inode = inode;
1775
1776 @@ -4953,14 +5003,7 @@ static noinline int check_parent_dirs_for_sync(struct btrfs_trans_handle *trans,
1777 BTRFS_I(inode)->logged_trans = trans->transid;
1778 smp_mb();
1779
1780 - if (BTRFS_I(inode)->last_unlink_trans > last_committed) {
1781 - root = BTRFS_I(inode)->root;
1782 -
1783 - /*
1784 - * make sure any commits to the log are forced
1785 - * to be full commits
1786 - */
1787 - btrfs_set_log_full_commit(root->fs_info, trans);
1788 + if (btrfs_must_commit_transaction(trans, inode)) {
1789 ret = 1;
1790 break;
1791 }
1792 @@ -5119,6 +5162,9 @@ process_leaf:
1793 btrfs_release_path(path);
1794 ret = btrfs_log_inode(trans, root, di_inode,
1795 log_mode, 0, LLONG_MAX, ctx);
1796 + if (!ret &&
1797 + btrfs_must_commit_transaction(trans, di_inode))
1798 + ret = 1;
1799 iput(di_inode);
1800 if (ret)
1801 goto next_dir_inode;
1802 @@ -5233,6 +5279,9 @@ static int btrfs_log_all_parents(struct btrfs_trans_handle *trans,
1803
1804 ret = btrfs_log_inode(trans, root, dir_inode,
1805 LOG_INODE_ALL, 0, LLONG_MAX, ctx);
1806 + if (!ret &&
1807 + btrfs_must_commit_transaction(trans, dir_inode))
1808 + ret = 1;
1809 iput(dir_inode);
1810 if (ret)
1811 goto out;
1812 @@ -5584,6 +5633,9 @@ error:
1813 * They revolve around files there were unlinked from the directory, and
1814 * this function updates the parent directory so that a full commit is
1815 * properly done if it is fsync'd later after the unlinks are done.
1816 + *
1817 + * Must be called before the unlink operations (updates to the subvolume tree,
1818 + * inodes, etc) are done.
1819 */
1820 void btrfs_record_unlink_dir(struct btrfs_trans_handle *trans,
1821 struct inode *dir, struct inode *inode,
1822 @@ -5599,8 +5651,11 @@ void btrfs_record_unlink_dir(struct btrfs_trans_handle *trans,
1823 * into the file. When the file is logged we check it and
1824 * don't log the parents if the file is fully on disk.
1825 */
1826 - if (S_ISREG(inode->i_mode))
1827 + if (S_ISREG(inode->i_mode)) {
1828 + mutex_lock(&BTRFS_I(inode)->log_mutex);
1829 BTRFS_I(inode)->last_unlink_trans = trans->transid;
1830 + mutex_unlock(&BTRFS_I(inode)->log_mutex);
1831 + }
1832
1833 /*
1834 * if this directory was already logged any new
1835 @@ -5631,7 +5686,29 @@ void btrfs_record_unlink_dir(struct btrfs_trans_handle *trans,
1836 return;
1837
1838 record:
1839 + mutex_lock(&BTRFS_I(dir)->log_mutex);
1840 + BTRFS_I(dir)->last_unlink_trans = trans->transid;
1841 + mutex_unlock(&BTRFS_I(dir)->log_mutex);
1842 +}
1843 +
1844 +/*
1845 + * Make sure that if someone attempts to fsync the parent directory of a deleted
1846 + * snapshot, it ends up triggering a transaction commit. This is to guarantee
1847 + * that after replaying the log tree of the parent directory's root we will not
1848 + * see the snapshot anymore and at log replay time we will not see any log tree
1849 + * corresponding to the deleted snapshot's root, which could lead to replaying
1850 + * it after replaying the log tree of the parent directory (which would replay
1851 + * the snapshot delete operation).
1852 + *
1853 + * Must be called before the actual snapshot destroy operation (updates to the
1854 + * parent root and tree of tree roots trees, etc) are done.
1855 + */
1856 +void btrfs_record_snapshot_destroy(struct btrfs_trans_handle *trans,
1857 + struct inode *dir)
1858 +{
1859 + mutex_lock(&BTRFS_I(dir)->log_mutex);
1860 BTRFS_I(dir)->last_unlink_trans = trans->transid;
1861 + mutex_unlock(&BTRFS_I(dir)->log_mutex);
1862 }
1863
1864 /*
1865 diff --git a/fs/btrfs/tree-log.h b/fs/btrfs/tree-log.h
1866 index 6916a781ea02..a9f1b75d080d 100644
1867 --- a/fs/btrfs/tree-log.h
1868 +++ b/fs/btrfs/tree-log.h
1869 @@ -79,6 +79,8 @@ int btrfs_pin_log_trans(struct btrfs_root *root);
1870 void btrfs_record_unlink_dir(struct btrfs_trans_handle *trans,
1871 struct inode *dir, struct inode *inode,
1872 int for_rename);
1873 +void btrfs_record_snapshot_destroy(struct btrfs_trans_handle *trans,
1874 + struct inode *dir);
1875 int btrfs_log_new_name(struct btrfs_trans_handle *trans,
1876 struct inode *inode, struct inode *old_dir,
1877 struct dentry *parent);
1878 diff --git a/fs/isofs/rock.c b/fs/isofs/rock.c
1879 index 5384ceb35b1c..98b3eb7d8eaf 100644
1880 --- a/fs/isofs/rock.c
1881 +++ b/fs/isofs/rock.c
1882 @@ -203,6 +203,8 @@ int get_rock_ridge_filename(struct iso_directory_record *de,
1883 int retnamlen = 0;
1884 int truncate = 0;
1885 int ret = 0;
1886 + char *p;
1887 + int len;
1888
1889 if (!ISOFS_SB(inode->i_sb)->s_rock)
1890 return 0;
1891 @@ -267,12 +269,17 @@ repeat:
1892 rr->u.NM.flags);
1893 break;
1894 }
1895 - if ((strlen(retname) + rr->len - 5) >= 254) {
1896 + len = rr->len - 5;
1897 + if (retnamlen + len >= 254) {
1898 truncate = 1;
1899 break;
1900 }
1901 - strncat(retname, rr->u.NM.name, rr->len - 5);
1902 - retnamlen += rr->len - 5;
1903 + p = memchr(rr->u.NM.name, '\0', len);
1904 + if (unlikely(p))
1905 + len = p - rr->u.NM.name;
1906 + memcpy(retname + retnamlen, rr->u.NM.name, len);
1907 + retnamlen += len;
1908 + retname[retnamlen] = '\0';
1909 break;
1910 case SIG('R', 'E'):
1911 kfree(rs.buffer);
1912 diff --git a/fs/namei.c b/fs/namei.c
1913 index 9c590e0f66e9..7824bfb89ada 100644
1914 --- a/fs/namei.c
1915 +++ b/fs/namei.c
1916 @@ -2968,22 +2968,10 @@ no_open:
1917 dentry = lookup_real(dir, dentry, nd->flags);
1918 if (IS_ERR(dentry))
1919 return PTR_ERR(dentry);
1920 -
1921 - if (create_error) {
1922 - int open_flag = op->open_flag;
1923 -
1924 - error = create_error;
1925 - if ((open_flag & O_EXCL)) {
1926 - if (!dentry->d_inode)
1927 - goto out;
1928 - } else if (!dentry->d_inode) {
1929 - goto out;
1930 - } else if ((open_flag & O_TRUNC) &&
1931 - d_is_reg(dentry)) {
1932 - goto out;
1933 - }
1934 - /* will fail later, go on to get the right error */
1935 - }
1936 + }
1937 + if (create_error && !dentry->d_inode) {
1938 + error = create_error;
1939 + goto out;
1940 }
1941 looked_up:
1942 path->dentry = dentry;
1943 @@ -4258,7 +4246,11 @@ int vfs_rename(struct inode *old_dir, struct dentry *old_dentry,
1944 bool new_is_dir = false;
1945 unsigned max_links = new_dir->i_sb->s_max_links;
1946
1947 - if (source == target)
1948 + /*
1949 + * Check source == target.
1950 + * On overlayfs need to look at underlying inodes.
1951 + */
1952 + if (vfs_select_inode(old_dentry, 0) == vfs_select_inode(new_dentry, 0))
1953 return 0;
1954
1955 error = may_delete(old_dir, old_dentry, is_dir);
1956 diff --git a/fs/ocfs2/acl.c b/fs/ocfs2/acl.c
1957 index 0cdf497c91ef..2162434728c0 100644
1958 --- a/fs/ocfs2/acl.c
1959 +++ b/fs/ocfs2/acl.c
1960 @@ -322,3 +322,90 @@ struct posix_acl *ocfs2_iop_get_acl(struct inode *inode, int type)
1961 brelse(di_bh);
1962 return acl;
1963 }
1964 +
1965 +int ocfs2_acl_chmod(struct inode *inode, struct buffer_head *bh)
1966 +{
1967 + struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
1968 + struct posix_acl *acl;
1969 + int ret;
1970 +
1971 + if (S_ISLNK(inode->i_mode))
1972 + return -EOPNOTSUPP;
1973 +
1974 + if (!(osb->s_mount_opt & OCFS2_MOUNT_POSIX_ACL))
1975 + return 0;
1976 +
1977 + acl = ocfs2_get_acl_nolock(inode, ACL_TYPE_ACCESS, bh);
1978 + if (IS_ERR(acl) || !acl)
1979 + return PTR_ERR(acl);
1980 + ret = __posix_acl_chmod(&acl, GFP_KERNEL, inode->i_mode);
1981 + if (ret)
1982 + return ret;
1983 + ret = ocfs2_set_acl(NULL, inode, NULL, ACL_TYPE_ACCESS,
1984 + acl, NULL, NULL);
1985 + posix_acl_release(acl);
1986 + return ret;
1987 +}
1988 +
1989 +/*
1990 + * Initialize the ACLs of a new inode. If parent directory has default ACL,
1991 + * then clone to new inode. Called from ocfs2_mknod.
1992 + */
1993 +int ocfs2_init_acl(handle_t *handle,
1994 + struct inode *inode,
1995 + struct inode *dir,
1996 + struct buffer_head *di_bh,
1997 + struct buffer_head *dir_bh,
1998 + struct ocfs2_alloc_context *meta_ac,
1999 + struct ocfs2_alloc_context *data_ac)
2000 +{
2001 + struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
2002 + struct posix_acl *acl = NULL;
2003 + int ret = 0, ret2;
2004 + umode_t mode;
2005 +
2006 + if (!S_ISLNK(inode->i_mode)) {
2007 + if (osb->s_mount_opt & OCFS2_MOUNT_POSIX_ACL) {
2008 + acl = ocfs2_get_acl_nolock(dir, ACL_TYPE_DEFAULT,
2009 + dir_bh);
2010 + if (IS_ERR(acl))
2011 + return PTR_ERR(acl);
2012 + }
2013 + if (!acl) {
2014 + mode = inode->i_mode & ~current_umask();
2015 + ret = ocfs2_acl_set_mode(inode, di_bh, handle, mode);
2016 + if (ret) {
2017 + mlog_errno(ret);
2018 + goto cleanup;
2019 + }
2020 + }
2021 + }
2022 + if ((osb->s_mount_opt & OCFS2_MOUNT_POSIX_ACL) && acl) {
2023 + if (S_ISDIR(inode->i_mode)) {
2024 + ret = ocfs2_set_acl(handle, inode, di_bh,
2025 + ACL_TYPE_DEFAULT, acl,
2026 + meta_ac, data_ac);
2027 + if (ret)
2028 + goto cleanup;
2029 + }
2030 + mode = inode->i_mode;
2031 + ret = __posix_acl_create(&acl, GFP_NOFS, &mode);
2032 + if (ret < 0)
2033 + return ret;
2034 +
2035 + ret2 = ocfs2_acl_set_mode(inode, di_bh, handle, mode);
2036 + if (ret2) {
2037 + mlog_errno(ret2);
2038 + ret = ret2;
2039 + goto cleanup;
2040 + }
2041 + if (ret > 0) {
2042 + ret = ocfs2_set_acl(handle, inode,
2043 + di_bh, ACL_TYPE_ACCESS,
2044 + acl, meta_ac, data_ac);
2045 + }
2046 + }
2047 +cleanup:
2048 + posix_acl_release(acl);
2049 + return ret;
2050 +}
2051 diff --git a/fs/ocfs2/acl.h b/fs/ocfs2/acl.h
2052 index 3fce68d08625..2783a75b3999 100644
2053 --- a/fs/ocfs2/acl.h
2054 +++ b/fs/ocfs2/acl.h
2055 @@ -35,5 +35,10 @@ int ocfs2_set_acl(handle_t *handle,
2056 struct posix_acl *acl,
2057 struct ocfs2_alloc_context *meta_ac,
2058 struct ocfs2_alloc_context *data_ac);
2059 +extern int ocfs2_acl_chmod(struct inode *, struct buffer_head *);
2060 +extern int ocfs2_init_acl(handle_t *, struct inode *, struct inode *,
2061 + struct buffer_head *, struct buffer_head *,
2062 + struct ocfs2_alloc_context *,
2063 + struct ocfs2_alloc_context *);
2064
2065 #endif /* OCFS2_ACL_H */
2066 diff --git a/fs/ocfs2/file.c b/fs/ocfs2/file.c
2067 index 7cb38fdca229..3d60fda1cb09 100644
2068 --- a/fs/ocfs2/file.c
2069 +++ b/fs/ocfs2/file.c
2070 @@ -1268,20 +1268,20 @@ bail_unlock_rw:
2071 if (size_change)
2072 ocfs2_rw_unlock(inode, 1);
2073 bail:
2074 - brelse(bh);
2075
2076 /* Release quota pointers in case we acquired them */
2077 for (qtype = 0; qtype < OCFS2_MAXQUOTAS; qtype++)
2078 dqput(transfer_to[qtype]);
2079
2080 if (!status && attr->ia_valid & ATTR_MODE) {
2081 - status = posix_acl_chmod(inode, inode->i_mode);
2082 + status = ocfs2_acl_chmod(inode, bh);
2083 if (status < 0)
2084 mlog_errno(status);
2085 }
2086 if (inode_locked)
2087 ocfs2_inode_unlock(inode, 1);
2088
2089 + brelse(bh);
2090 return status;
2091 }
2092
2093 diff --git a/fs/ocfs2/namei.c b/fs/ocfs2/namei.c
2094 index 6b3e87189a64..a8f1225e6d9b 100644
2095 --- a/fs/ocfs2/namei.c
2096 +++ b/fs/ocfs2/namei.c
2097 @@ -259,7 +259,6 @@ static int ocfs2_mknod(struct inode *dir,
2098 struct ocfs2_dir_lookup_result lookup = { NULL, };
2099 sigset_t oldset;
2100 int did_block_signals = 0;
2101 - struct posix_acl *default_acl = NULL, *acl = NULL;
2102 struct ocfs2_dentry_lock *dl = NULL;
2103
2104 trace_ocfs2_mknod(dir, dentry, dentry->d_name.len, dentry->d_name.name,
2105 @@ -367,12 +366,6 @@ static int ocfs2_mknod(struct inode *dir,
2106 goto leave;
2107 }
2108
2109 - status = posix_acl_create(dir, &inode->i_mode, &default_acl, &acl);
2110 - if (status) {
2111 - mlog_errno(status);
2112 - goto leave;
2113 - }
2114 -
2115 handle = ocfs2_start_trans(osb, ocfs2_mknod_credits(osb->sb,
2116 S_ISDIR(mode),
2117 xattr_credits));
2118 @@ -421,16 +414,8 @@ static int ocfs2_mknod(struct inode *dir,
2119 inc_nlink(dir);
2120 }
2121
2122 - if (default_acl) {
2123 - status = ocfs2_set_acl(handle, inode, new_fe_bh,
2124 - ACL_TYPE_DEFAULT, default_acl,
2125 - meta_ac, data_ac);
2126 - }
2127 - if (!status && acl) {
2128 - status = ocfs2_set_acl(handle, inode, new_fe_bh,
2129 - ACL_TYPE_ACCESS, acl,
2130 - meta_ac, data_ac);
2131 - }
2132 + status = ocfs2_init_acl(handle, inode, dir, new_fe_bh, parent_fe_bh,
2133 + meta_ac, data_ac);
2134
2135 if (status < 0) {
2136 mlog_errno(status);
2137 @@ -472,10 +457,6 @@ static int ocfs2_mknod(struct inode *dir,
2138 d_instantiate(dentry, inode);
2139 status = 0;
2140 leave:
2141 - if (default_acl)
2142 - posix_acl_release(default_acl);
2143 - if (acl)
2144 - posix_acl_release(acl);
2145 if (status < 0 && did_quota_inode)
2146 dquot_free_inode(inode);
2147 if (handle)
2148 diff --git a/fs/ocfs2/refcounttree.c b/fs/ocfs2/refcounttree.c
2149 index 3eff031aaf26..9919964c5b3b 100644
2150 --- a/fs/ocfs2/refcounttree.c
2151 +++ b/fs/ocfs2/refcounttree.c
2152 @@ -4248,20 +4248,12 @@ static int ocfs2_reflink(struct dentry *old_dentry, struct inode *dir,
2153 struct inode *inode = d_inode(old_dentry);
2154 struct buffer_head *old_bh = NULL;
2155 struct inode *new_orphan_inode = NULL;
2156 - struct posix_acl *default_acl, *acl;
2157 - umode_t mode;
2158
2159 if (!ocfs2_refcount_tree(OCFS2_SB(inode->i_sb)))
2160 return -EOPNOTSUPP;
2161
2162 - mode = inode->i_mode;
2163 - error = posix_acl_create(dir, &mode, &default_acl, &acl);
2164 - if (error) {
2165 - mlog_errno(error);
2166 - return error;
2167 - }
2168
2169 - error = ocfs2_create_inode_in_orphan(dir, mode,
2170 + error = ocfs2_create_inode_in_orphan(dir, inode->i_mode,
2171 &new_orphan_inode);
2172 if (error) {
2173 mlog_errno(error);
2174 @@ -4300,16 +4292,11 @@ static int ocfs2_reflink(struct dentry *old_dentry, struct inode *dir,
2175 /* If the security isn't preserved, we need to re-initialize them. */
2176 if (!preserve) {
2177 error = ocfs2_init_security_and_acl(dir, new_orphan_inode,
2178 - &new_dentry->d_name,
2179 - default_acl, acl);
2180 + &new_dentry->d_name);
2181 if (error)
2182 mlog_errno(error);
2183 }
2184 out:
2185 - if (default_acl)
2186 - posix_acl_release(default_acl);
2187 - if (acl)
2188 - posix_acl_release(acl);
2189 if (!error) {
2190 error = ocfs2_mv_orphaned_inode_to_new(dir, new_orphan_inode,
2191 new_dentry);
2192 diff --git a/fs/ocfs2/xattr.c b/fs/ocfs2/xattr.c
2193 index 7d3d979f57d9..f19b7381a998 100644
2194 --- a/fs/ocfs2/xattr.c
2195 +++ b/fs/ocfs2/xattr.c
2196 @@ -7216,12 +7216,10 @@ out:
2197 */
2198 int ocfs2_init_security_and_acl(struct inode *dir,
2199 struct inode *inode,
2200 - const struct qstr *qstr,
2201 - struct posix_acl *default_acl,
2202 - struct posix_acl *acl)
2203 + const struct qstr *qstr)
2204 {
2205 - struct buffer_head *dir_bh = NULL;
2206 int ret = 0;
2207 + struct buffer_head *dir_bh = NULL;
2208
2209 ret = ocfs2_init_security_get(inode, dir, qstr, NULL);
2210 if (ret) {
2211 @@ -7234,11 +7232,9 @@ int ocfs2_init_security_and_acl(struct inode *dir,
2212 mlog_errno(ret);
2213 goto leave;
2214 }
2215 -
2216 - if (!ret && default_acl)
2217 - ret = ocfs2_iop_set_acl(inode, default_acl, ACL_TYPE_DEFAULT);
2218 - if (!ret && acl)
2219 - ret = ocfs2_iop_set_acl(inode, acl, ACL_TYPE_ACCESS);
2220 + ret = ocfs2_init_acl(NULL, inode, dir, NULL, dir_bh, NULL, NULL);
2221 + if (ret)
2222 + mlog_errno(ret);
2223
2224 ocfs2_inode_unlock(dir, 0);
2225 brelse(dir_bh);
2226 diff --git a/fs/ocfs2/xattr.h b/fs/ocfs2/xattr.h
2227 index f10d5b93c366..1633cc15ea1f 100644
2228 --- a/fs/ocfs2/xattr.h
2229 +++ b/fs/ocfs2/xattr.h
2230 @@ -94,7 +94,5 @@ int ocfs2_reflink_xattrs(struct inode *old_inode,
2231 bool preserve_security);
2232 int ocfs2_init_security_and_acl(struct inode *dir,
2233 struct inode *inode,
2234 - const struct qstr *qstr,
2235 - struct posix_acl *default_acl,
2236 - struct posix_acl *acl);
2237 + const struct qstr *qstr);
2238 #endif /* OCFS2_XATTR_H */
2239 diff --git a/fs/open.c b/fs/open.c
2240 index 17cb6b1dab75..081d3d6df74b 100644
2241 --- a/fs/open.c
2242 +++ b/fs/open.c
2243 @@ -840,16 +840,12 @@ EXPORT_SYMBOL(file_path);
2244 int vfs_open(const struct path *path, struct file *file,
2245 const struct cred *cred)
2246 {
2247 - struct dentry *dentry = path->dentry;
2248 - struct inode *inode = dentry->d_inode;
2249 + struct inode *inode = vfs_select_inode(path->dentry, file->f_flags);
2250
2251 - file->f_path = *path;
2252 - if (dentry->d_flags & DCACHE_OP_SELECT_INODE) {
2253 - inode = dentry->d_op->d_select_inode(dentry, file->f_flags);
2254 - if (IS_ERR(inode))
2255 - return PTR_ERR(inode);
2256 - }
2257 + if (IS_ERR(inode))
2258 + return PTR_ERR(inode);
2259
2260 + file->f_path = *path;
2261 return do_dentry_open(file, inode, NULL, cred);
2262 }
2263
2264 diff --git a/include/linux/bpf.h b/include/linux/bpf.h
2265 index 83d1926c61e4..67bc2da5d233 100644
2266 --- a/include/linux/bpf.h
2267 +++ b/include/linux/bpf.h
2268 @@ -165,12 +165,13 @@ void bpf_register_prog_type(struct bpf_prog_type_list *tl);
2269 void bpf_register_map_type(struct bpf_map_type_list *tl);
2270
2271 struct bpf_prog *bpf_prog_get(u32 ufd);
2272 +struct bpf_prog *bpf_prog_inc(struct bpf_prog *prog);
2273 void bpf_prog_put(struct bpf_prog *prog);
2274 void bpf_prog_put_rcu(struct bpf_prog *prog);
2275
2276 struct bpf_map *bpf_map_get_with_uref(u32 ufd);
2277 struct bpf_map *__bpf_map_get(struct fd f);
2278 -void bpf_map_inc(struct bpf_map *map, bool uref);
2279 +struct bpf_map *bpf_map_inc(struct bpf_map *map, bool uref);
2280 void bpf_map_put_with_uref(struct bpf_map *map);
2281 void bpf_map_put(struct bpf_map *map);
2282
2283 diff --git a/include/linux/dcache.h b/include/linux/dcache.h
2284 index 03dda7ba73ac..96c1a2da92d7 100644
2285 --- a/include/linux/dcache.h
2286 +++ b/include/linux/dcache.h
2287 @@ -592,4 +592,16 @@ static inline struct dentry *d_real(struct dentry *dentry)
2288 return dentry;
2289 }
2290
2291 +static inline struct inode *vfs_select_inode(struct dentry *dentry,
2292 + unsigned open_flags)
2293 +{
2294 + struct inode *inode = d_inode(dentry);
2295 +
2296 + if (inode && unlikely(dentry->d_flags & DCACHE_OP_SELECT_INODE))
2297 + inode = dentry->d_op->d_select_inode(dentry, open_flags);
2298 +
2299 + return inode;
2300 +}
2301 +
2302 +
2303 #endif /* __LINUX_DCACHE_H */
2304 diff --git a/include/linux/mfd/samsung/s2mps11.h b/include/linux/mfd/samsung/s2mps11.h
2305 index b288965e8101..2c14eeca46f0 100644
2306 --- a/include/linux/mfd/samsung/s2mps11.h
2307 +++ b/include/linux/mfd/samsung/s2mps11.h
2308 @@ -173,10 +173,12 @@ enum s2mps11_regulators {
2309
2310 #define S2MPS11_LDO_VSEL_MASK 0x3F
2311 #define S2MPS11_BUCK_VSEL_MASK 0xFF
2312 +#define S2MPS11_BUCK9_VSEL_MASK 0x1F
2313 #define S2MPS11_ENABLE_MASK (0x03 << S2MPS11_ENABLE_SHIFT)
2314 #define S2MPS11_ENABLE_SHIFT 0x06
2315 #define S2MPS11_LDO_N_VOLTAGES (S2MPS11_LDO_VSEL_MASK + 1)
2316 #define S2MPS11_BUCK_N_VOLTAGES (S2MPS11_BUCK_VSEL_MASK + 1)
2317 +#define S2MPS11_BUCK9_N_VOLTAGES (S2MPS11_BUCK9_VSEL_MASK + 1)
2318 #define S2MPS11_RAMP_DELAY 25000 /* uV/us */
2319
2320 #define S2MPS11_CTRL1_PWRHOLD_MASK BIT(4)
2321 diff --git a/include/linux/mlx5/driver.h b/include/linux/mlx5/driver.h
2322 index 1e3006dcf35d..46dd88e7071b 100644
2323 --- a/include/linux/mlx5/driver.h
2324 +++ b/include/linux/mlx5/driver.h
2325 @@ -813,9 +813,9 @@ int mlx5_set_port_admin_status(struct mlx5_core_dev *dev,
2326 int mlx5_query_port_admin_status(struct mlx5_core_dev *dev,
2327 enum mlx5_port_status *status);
2328
2329 -int mlx5_set_port_mtu(struct mlx5_core_dev *dev, int mtu, u8 port);
2330 -void mlx5_query_port_max_mtu(struct mlx5_core_dev *dev, int *max_mtu, u8 port);
2331 -void mlx5_query_port_oper_mtu(struct mlx5_core_dev *dev, int *oper_mtu,
2332 +int mlx5_set_port_mtu(struct mlx5_core_dev *dev, u16 mtu, u8 port);
2333 +void mlx5_query_port_max_mtu(struct mlx5_core_dev *dev, u16 *max_mtu, u8 port);
2334 +void mlx5_query_port_oper_mtu(struct mlx5_core_dev *dev, u16 *oper_mtu,
2335 u8 port);
2336
2337 int mlx5_query_port_vl_hw_cap(struct mlx5_core_dev *dev,
2338 diff --git a/include/linux/mlx5/vport.h b/include/linux/mlx5/vport.h
2339 index 123771003e68..a3f3c71124d3 100644
2340 --- a/include/linux/mlx5/vport.h
2341 +++ b/include/linux/mlx5/vport.h
2342 @@ -45,6 +45,8 @@ int mlx5_query_nic_vport_mac_address(struct mlx5_core_dev *mdev,
2343 u16 vport, u8 *addr);
2344 int mlx5_modify_nic_vport_mac_address(struct mlx5_core_dev *dev,
2345 u16 vport, u8 *addr);
2346 +int mlx5_query_nic_vport_mtu(struct mlx5_core_dev *mdev, u16 *mtu);
2347 +int mlx5_modify_nic_vport_mtu(struct mlx5_core_dev *mdev, u16 mtu);
2348 int mlx5_query_nic_vport_system_image_guid(struct mlx5_core_dev *mdev,
2349 u64 *system_image_guid);
2350 int mlx5_query_nic_vport_node_guid(struct mlx5_core_dev *mdev, u64 *node_guid);
2351 diff --git a/include/linux/mm.h b/include/linux/mm.h
2352 index a6c240e885c0..e4e59f9d82f1 100644
2353 --- a/include/linux/mm.h
2354 +++ b/include/linux/mm.h
2355 @@ -456,11 +456,20 @@ static inline int page_mapcount(struct page *page)
2356
2357 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
2358 int total_mapcount(struct page *page);
2359 +int page_trans_huge_mapcount(struct page *page, int *total_mapcount);
2360 #else
2361 static inline int total_mapcount(struct page *page)
2362 {
2363 return page_mapcount(page);
2364 }
2365 +static inline int page_trans_huge_mapcount(struct page *page,
2366 + int *total_mapcount)
2367 +{
2368 + int mapcount = page_mapcount(page);
2369 + if (total_mapcount)
2370 + *total_mapcount = mapcount;
2371 + return mapcount;
2372 +}
2373 #endif
2374
2375 static inline int page_count(struct page *page)
2376 diff --git a/include/linux/net.h b/include/linux/net.h
2377 index 0b4ac7da583a..25ef630f1bd6 100644
2378 --- a/include/linux/net.h
2379 +++ b/include/linux/net.h
2380 @@ -245,7 +245,15 @@ do { \
2381 net_ratelimited_function(pr_warn, fmt, ##__VA_ARGS__)
2382 #define net_info_ratelimited(fmt, ...) \
2383 net_ratelimited_function(pr_info, fmt, ##__VA_ARGS__)
2384 -#if defined(DEBUG)
2385 +#if defined(CONFIG_DYNAMIC_DEBUG)
2386 +#define net_dbg_ratelimited(fmt, ...) \
2387 +do { \
2388 + DEFINE_DYNAMIC_DEBUG_METADATA(descriptor, fmt); \
2389 + if (unlikely(descriptor.flags & _DPRINTK_FLAGS_PRINT) && \
2390 + net_ratelimit()) \
2391 + __dynamic_pr_debug(&descriptor, fmt, ##__VA_ARGS__); \
2392 +} while (0)
2393 +#elif defined(DEBUG)
2394 #define net_dbg_ratelimited(fmt, ...) \
2395 net_ratelimited_function(pr_debug, fmt, ##__VA_ARGS__)
2396 #else
2397 diff --git a/include/linux/rculist_nulls.h b/include/linux/rculist_nulls.h
2398 index 1c33dd7da4a7..4ae95f7e8597 100644
2399 --- a/include/linux/rculist_nulls.h
2400 +++ b/include/linux/rculist_nulls.h
2401 @@ -98,6 +98,45 @@ static inline void hlist_nulls_add_head_rcu(struct hlist_nulls_node *n,
2402 if (!is_a_nulls(first))
2403 first->pprev = &n->next;
2404 }
2405 +
2406 +/**
2407 + * hlist_nulls_add_tail_rcu
2408 + * @n: the element to add to the hash list.
2409 + * @h: the list to add to.
2410 + *
2411 + * Description:
2412 + * Adds the specified element to the end of the specified hlist_nulls,
2413 + * while permitting racing traversals. NOTE: tail insertion requires
2414 + * list traversal.
2415 + *
2416 + * The caller must take whatever precautions are necessary
2417 + * (such as holding appropriate locks) to avoid racing
2418 + * with another list-mutation primitive, such as hlist_nulls_add_head_rcu()
2419 + * or hlist_nulls_del_rcu(), running on this same list.
2420 + * However, it is perfectly legal to run concurrently with
2421 + * the _rcu list-traversal primitives, such as
2422 + * hlist_nulls_for_each_entry_rcu(), used to prevent memory-consistency
2423 + * problems on Alpha CPUs. Regardless of the type of CPU, the
2424 + * list-traversal primitive must be guarded by rcu_read_lock().
2425 + */
2426 +static inline void hlist_nulls_add_tail_rcu(struct hlist_nulls_node *n,
2427 + struct hlist_nulls_head *h)
2428 +{
2429 + struct hlist_nulls_node *i, *last = NULL;
2430 +
2431 + for (i = hlist_nulls_first_rcu(h); !is_a_nulls(i);
2432 + i = hlist_nulls_next_rcu(i))
2433 + last = i;
2434 +
2435 + if (last) {
2436 + n->next = last->next;
2437 + n->pprev = &last->next;
2438 + rcu_assign_pointer(hlist_nulls_next_rcu(last), n);
2439 + } else {
2440 + hlist_nulls_add_head_rcu(n, h);
2441 + }
2442 +}
2443 +
2444 /**
2445 * hlist_nulls_for_each_entry_rcu - iterate over rcu list of given type
2446 * @tpos: the type * to use as a loop cursor.
2447 diff --git a/include/linux/swap.h b/include/linux/swap.h
2448 index 5fa4aa4ddd05..b974a2106dd7 100644
2449 --- a/include/linux/swap.h
2450 +++ b/include/linux/swap.h
2451 @@ -418,7 +418,7 @@ extern sector_t swapdev_block(int, pgoff_t);
2452 extern int page_swapcount(struct page *);
2453 extern int swp_swapcount(swp_entry_t entry);
2454 extern struct swap_info_struct *page_swap_info(struct page *);
2455 -extern int reuse_swap_page(struct page *);
2456 +extern bool reuse_swap_page(struct page *, int *);
2457 extern int try_to_free_swap(struct page *);
2458 struct backing_dev_info;
2459
2460 @@ -513,8 +513,8 @@ static inline int swp_swapcount(swp_entry_t entry)
2461 return 0;
2462 }
2463
2464 -#define reuse_swap_page(page) \
2465 - (!PageTransCompound(page) && page_mapcount(page) == 1)
2466 +#define reuse_swap_page(page, total_mapcount) \
2467 + (page_trans_huge_mapcount(page, total_mapcount) == 1)
2468
2469 static inline int try_to_free_swap(struct page *page)
2470 {
2471 diff --git a/include/net/codel.h b/include/net/codel.h
2472 index 267e70210061..d168aca115cc 100644
2473 --- a/include/net/codel.h
2474 +++ b/include/net/codel.h
2475 @@ -162,12 +162,14 @@ struct codel_vars {
2476 * struct codel_stats - contains codel shared variables and stats
2477 * @maxpacket: largest packet we've seen so far
2478 * @drop_count: temp count of dropped packets in dequeue()
2479 + * @drop_len: bytes of dropped packets in dequeue()
2480 * ecn_mark: number of packets we ECN marked instead of dropping
2481 * ce_mark: number of packets CE marked because sojourn time was above ce_threshold
2482 */
2483 struct codel_stats {
2484 u32 maxpacket;
2485 u32 drop_count;
2486 + u32 drop_len;
2487 u32 ecn_mark;
2488 u32 ce_mark;
2489 };
2490 @@ -308,6 +310,7 @@ static struct sk_buff *codel_dequeue(struct Qdisc *sch,
2491 vars->rec_inv_sqrt);
2492 goto end;
2493 }
2494 + stats->drop_len += qdisc_pkt_len(skb);
2495 qdisc_drop(skb, sch);
2496 stats->drop_count++;
2497 skb = dequeue_func(vars, sch);
2498 @@ -330,6 +333,7 @@ static struct sk_buff *codel_dequeue(struct Qdisc *sch,
2499 if (params->ecn && INET_ECN_set_ce(skb)) {
2500 stats->ecn_mark++;
2501 } else {
2502 + stats->drop_len += qdisc_pkt_len(skb);
2503 qdisc_drop(skb, sch);
2504 stats->drop_count++;
2505
2506 diff --git a/include/net/sch_generic.h b/include/net/sch_generic.h
2507 index 636a362a0e03..e5bba897d206 100644
2508 --- a/include/net/sch_generic.h
2509 +++ b/include/net/sch_generic.h
2510 @@ -396,7 +396,8 @@ struct Qdisc *dev_graft_qdisc(struct netdev_queue *dev_queue,
2511 struct Qdisc *qdisc);
2512 void qdisc_reset(struct Qdisc *qdisc);
2513 void qdisc_destroy(struct Qdisc *qdisc);
2514 -void qdisc_tree_decrease_qlen(struct Qdisc *qdisc, unsigned int n);
2515 +void qdisc_tree_reduce_backlog(struct Qdisc *qdisc, unsigned int n,
2516 + unsigned int len);
2517 struct Qdisc *qdisc_alloc(struct netdev_queue *dev_queue,
2518 const struct Qdisc_ops *ops);
2519 struct Qdisc *qdisc_create_dflt(struct netdev_queue *dev_queue,
2520 @@ -707,6 +708,23 @@ static inline void qdisc_reset_queue(struct Qdisc *sch)
2521 sch->qstats.backlog = 0;
2522 }
2523
2524 +static inline struct Qdisc *qdisc_replace(struct Qdisc *sch, struct Qdisc *new,
2525 + struct Qdisc **pold)
2526 +{
2527 + struct Qdisc *old;
2528 +
2529 + sch_tree_lock(sch);
2530 + old = *pold;
2531 + *pold = new;
2532 + if (old != NULL) {
2533 + qdisc_tree_reduce_backlog(old, old->q.qlen, old->qstats.backlog);
2534 + qdisc_reset(old);
2535 + }
2536 + sch_tree_unlock(sch);
2537 +
2538 + return old;
2539 +}
2540 +
2541 static inline unsigned int __qdisc_queue_drop(struct Qdisc *sch,
2542 struct sk_buff_head *list)
2543 {
2544 diff --git a/include/net/sock.h b/include/net/sock.h
2545 index f5ea148853e2..3c688ca3456d 100644
2546 --- a/include/net/sock.h
2547 +++ b/include/net/sock.h
2548 @@ -630,7 +630,11 @@ static inline void sk_add_node_rcu(struct sock *sk, struct hlist_head *list)
2549
2550 static inline void __sk_nulls_add_node_rcu(struct sock *sk, struct hlist_nulls_head *list)
2551 {
2552 - hlist_nulls_add_head_rcu(&sk->sk_nulls_node, list);
2553 + if (IS_ENABLED(CONFIG_IPV6) && sk->sk_reuseport &&
2554 + sk->sk_family == AF_INET6)
2555 + hlist_nulls_add_tail_rcu(&sk->sk_nulls_node, list);
2556 + else
2557 + hlist_nulls_add_head_rcu(&sk->sk_nulls_node, list);
2558 }
2559
2560 static inline void sk_nulls_add_node_rcu(struct sock *sk, struct hlist_nulls_head *list)
2561 diff --git a/include/uapi/linux/if.h b/include/uapi/linux/if.h
2562 index 9cf2394f0bcf..752f5dc040a5 100644
2563 --- a/include/uapi/linux/if.h
2564 +++ b/include/uapi/linux/if.h
2565 @@ -19,14 +19,20 @@
2566 #ifndef _LINUX_IF_H
2567 #define _LINUX_IF_H
2568
2569 +#include <linux/libc-compat.h> /* for compatibility with glibc */
2570 #include <linux/types.h> /* for "__kernel_caddr_t" et al */
2571 #include <linux/socket.h> /* for "struct sockaddr" et al */
2572 #include <linux/compiler.h> /* for "__user" et al */
2573
2574 +#if __UAPI_DEF_IF_IFNAMSIZ
2575 #define IFNAMSIZ 16
2576 +#endif /* __UAPI_DEF_IF_IFNAMSIZ */
2577 #define IFALIASZ 256
2578 #include <linux/hdlc/ioctl.h>
2579
2580 +/* For glibc compatibility. An empty enum does not compile. */
2581 +#if __UAPI_DEF_IF_NET_DEVICE_FLAGS_LOWER_UP_DORMANT_ECHO != 0 && \
2582 + __UAPI_DEF_IF_NET_DEVICE_FLAGS != 0
2583 /**
2584 * enum net_device_flags - &struct net_device flags
2585 *
2586 @@ -68,6 +74,8 @@
2587 * @IFF_ECHO: echo sent packets. Volatile.
2588 */
2589 enum net_device_flags {
2590 +/* for compatibility with glibc net/if.h */
2591 +#if __UAPI_DEF_IF_NET_DEVICE_FLAGS
2592 IFF_UP = 1<<0, /* sysfs */
2593 IFF_BROADCAST = 1<<1, /* volatile */
2594 IFF_DEBUG = 1<<2, /* sysfs */
2595 @@ -84,11 +92,17 @@ enum net_device_flags {
2596 IFF_PORTSEL = 1<<13, /* sysfs */
2597 IFF_AUTOMEDIA = 1<<14, /* sysfs */
2598 IFF_DYNAMIC = 1<<15, /* sysfs */
2599 +#endif /* __UAPI_DEF_IF_NET_DEVICE_FLAGS */
2600 +#if __UAPI_DEF_IF_NET_DEVICE_FLAGS_LOWER_UP_DORMANT_ECHO
2601 IFF_LOWER_UP = 1<<16, /* volatile */
2602 IFF_DORMANT = 1<<17, /* volatile */
2603 IFF_ECHO = 1<<18, /* volatile */
2604 +#endif /* __UAPI_DEF_IF_NET_DEVICE_FLAGS_LOWER_UP_DORMANT_ECHO */
2605 };
2606 +#endif /* __UAPI_DEF_IF_NET_DEVICE_FLAGS_LOWER_UP_DORMANT_ECHO != 0 && __UAPI_DEF_IF_NET_DEVICE_FLAGS != 0 */
2607
2608 +/* for compatibility with glibc net/if.h */
2609 +#if __UAPI_DEF_IF_NET_DEVICE_FLAGS
2610 #define IFF_UP IFF_UP
2611 #define IFF_BROADCAST IFF_BROADCAST
2612 #define IFF_DEBUG IFF_DEBUG
2613 @@ -105,9 +119,13 @@ enum net_device_flags {
2614 #define IFF_PORTSEL IFF_PORTSEL
2615 #define IFF_AUTOMEDIA IFF_AUTOMEDIA
2616 #define IFF_DYNAMIC IFF_DYNAMIC
2617 +#endif /* __UAPI_DEF_IF_NET_DEVICE_FLAGS */
2618 +
2619 +#if __UAPI_DEF_IF_NET_DEVICE_FLAGS_LOWER_UP_DORMANT_ECHO
2620 #define IFF_LOWER_UP IFF_LOWER_UP
2621 #define IFF_DORMANT IFF_DORMANT
2622 #define IFF_ECHO IFF_ECHO
2623 +#endif /* __UAPI_DEF_IF_NET_DEVICE_FLAGS_LOWER_UP_DORMANT_ECHO */
2624
2625 #define IFF_VOLATILE (IFF_LOOPBACK|IFF_POINTOPOINT|IFF_BROADCAST|IFF_ECHO|\
2626 IFF_MASTER|IFF_SLAVE|IFF_RUNNING|IFF_LOWER_UP|IFF_DORMANT)
2627 @@ -166,6 +184,8 @@ enum {
2628 * being very small might be worth keeping for clean configuration.
2629 */
2630
2631 +/* for compatibility with glibc net/if.h */
2632 +#if __UAPI_DEF_IF_IFMAP
2633 struct ifmap {
2634 unsigned long mem_start;
2635 unsigned long mem_end;
2636 @@ -175,6 +195,7 @@ struct ifmap {
2637 unsigned char port;
2638 /* 3 bytes spare */
2639 };
2640 +#endif /* __UAPI_DEF_IF_IFMAP */
2641
2642 struct if_settings {
2643 unsigned int type; /* Type of physical device or protocol */
2644 @@ -200,6 +221,8 @@ struct if_settings {
2645 * remainder may be interface specific.
2646 */
2647
2648 +/* for compatibility with glibc net/if.h */
2649 +#if __UAPI_DEF_IF_IFREQ
2650 struct ifreq {
2651 #define IFHWADDRLEN 6
2652 union
2653 @@ -223,6 +246,7 @@ struct ifreq {
2654 struct if_settings ifru_settings;
2655 } ifr_ifru;
2656 };
2657 +#endif /* __UAPI_DEF_IF_IFREQ */
2658
2659 #define ifr_name ifr_ifrn.ifrn_name /* interface name */
2660 #define ifr_hwaddr ifr_ifru.ifru_hwaddr /* MAC address */
2661 @@ -249,6 +273,8 @@ struct ifreq {
2662 * must know all networks accessible).
2663 */
2664
2665 +/* for compatibility with glibc net/if.h */
2666 +#if __UAPI_DEF_IF_IFCONF
2667 struct ifconf {
2668 int ifc_len; /* size of buffer */
2669 union {
2670 @@ -256,6 +282,8 @@ struct ifconf {
2671 struct ifreq __user *ifcu_req;
2672 } ifc_ifcu;
2673 };
2674 +#endif /* __UAPI_DEF_IF_IFCONF */
2675 +
2676 #define ifc_buf ifc_ifcu.ifcu_buf /* buffer address */
2677 #define ifc_req ifc_ifcu.ifcu_req /* array of structures */
2678
2679 diff --git a/include/uapi/linux/libc-compat.h b/include/uapi/linux/libc-compat.h
2680 index 7d024ceb075d..d5e38c73377c 100644
2681 --- a/include/uapi/linux/libc-compat.h
2682 +++ b/include/uapi/linux/libc-compat.h
2683 @@ -51,6 +51,40 @@
2684 /* We have included glibc headers... */
2685 #if defined(__GLIBC__)
2686
2687 +/* Coordinate with glibc net/if.h header. */
2688 +#if defined(_NET_IF_H)
2689 +
2690 +/* GLIBC headers included first so don't define anything
2691 + * that would already be defined. */
2692 +
2693 +#define __UAPI_DEF_IF_IFCONF 0
2694 +#define __UAPI_DEF_IF_IFMAP 0
2695 +#define __UAPI_DEF_IF_IFNAMSIZ 0
2696 +#define __UAPI_DEF_IF_IFREQ 0
2697 +/* Everything up to IFF_DYNAMIC, matches net/if.h until glibc 2.23 */
2698 +#define __UAPI_DEF_IF_NET_DEVICE_FLAGS 0
2699 +/* For the future if glibc adds IFF_LOWER_UP, IFF_DORMANT and IFF_ECHO */
2700 +#ifndef __UAPI_DEF_IF_NET_DEVICE_FLAGS_LOWER_UP_DORMANT_ECHO
2701 +#define __UAPI_DEF_IF_NET_DEVICE_FLAGS_LOWER_UP_DORMANT_ECHO 1
2702 +#endif /* __UAPI_DEF_IF_NET_DEVICE_FLAGS_LOWER_UP_DORMANT_ECHO */
2703 +
2704 +#else /* _NET_IF_H */
2705 +
2706 +/* Linux headers included first, and we must define everything
2707 + * we need. The expectation is that glibc will check the
2708 + * __UAPI_DEF_* defines and adjust appropriately. */
2709 +
2710 +#define __UAPI_DEF_IF_IFCONF 1
2711 +#define __UAPI_DEF_IF_IFMAP 1
2712 +#define __UAPI_DEF_IF_IFNAMSIZ 1
2713 +#define __UAPI_DEF_IF_IFREQ 1
2714 +/* Everything up to IFF_DYNAMIC, matches net/if.h until glibc 2.23 */
2715 +#define __UAPI_DEF_IF_NET_DEVICE_FLAGS 1
2716 +/* For the future if glibc adds IFF_LOWER_UP, IFF_DORMANT and IFF_ECHO */
2717 +#define __UAPI_DEF_IF_NET_DEVICE_FLAGS_LOWER_UP_DORMANT_ECHO 1
2718 +
2719 +#endif /* _NET_IF_H */
2720 +
2721 /* Coordinate with glibc netinet/in.h header. */
2722 #if defined(_NETINET_IN_H)
2723
2724 @@ -117,6 +151,16 @@
2725 * that we need. */
2726 #else /* !defined(__GLIBC__) */
2727
2728 +/* Definitions for if.h */
2729 +#define __UAPI_DEF_IF_IFCONF 1
2730 +#define __UAPI_DEF_IF_IFMAP 1
2731 +#define __UAPI_DEF_IF_IFNAMSIZ 1
2732 +#define __UAPI_DEF_IF_IFREQ 1
2733 +/* Everything up to IFF_DYNAMIC, matches net/if.h until glibc 2.23 */
2734 +#define __UAPI_DEF_IF_NET_DEVICE_FLAGS 1
2735 +/* For the future if glibc adds IFF_LOWER_UP, IFF_DORMANT and IFF_ECHO */
2736 +#define __UAPI_DEF_IF_NET_DEVICE_FLAGS_LOWER_UP_DORMANT_ECHO 1
2737 +
2738 /* Definitions for in.h */
2739 #define __UAPI_DEF_IN_ADDR 1
2740 #define __UAPI_DEF_IN_IPPROTO 1
2741 diff --git a/kernel/bpf/inode.c b/kernel/bpf/inode.c
2742 index f2ece3c174a5..8f94ca1860cf 100644
2743 --- a/kernel/bpf/inode.c
2744 +++ b/kernel/bpf/inode.c
2745 @@ -31,10 +31,10 @@ static void *bpf_any_get(void *raw, enum bpf_type type)
2746 {
2747 switch (type) {
2748 case BPF_TYPE_PROG:
2749 - atomic_inc(&((struct bpf_prog *)raw)->aux->refcnt);
2750 + raw = bpf_prog_inc(raw);
2751 break;
2752 case BPF_TYPE_MAP:
2753 - bpf_map_inc(raw, true);
2754 + raw = bpf_map_inc(raw, true);
2755 break;
2756 default:
2757 WARN_ON_ONCE(1);
2758 @@ -297,7 +297,8 @@ static void *bpf_obj_do_get(const struct filename *pathname,
2759 goto out;
2760
2761 raw = bpf_any_get(inode->i_private, *type);
2762 - touch_atime(&path);
2763 + if (!IS_ERR(raw))
2764 + touch_atime(&path);
2765
2766 path_put(&path);
2767 return raw;
2768 diff --git a/kernel/bpf/syscall.c b/kernel/bpf/syscall.c
2769 index 637397059f76..aa5f39772ac4 100644
2770 --- a/kernel/bpf/syscall.c
2771 +++ b/kernel/bpf/syscall.c
2772 @@ -201,11 +201,18 @@ struct bpf_map *__bpf_map_get(struct fd f)
2773 return f.file->private_data;
2774 }
2775
2776 -void bpf_map_inc(struct bpf_map *map, bool uref)
2777 +/* prog's and map's refcnt limit */
2778 +#define BPF_MAX_REFCNT 32768
2779 +
2780 +struct bpf_map *bpf_map_inc(struct bpf_map *map, bool uref)
2781 {
2782 - atomic_inc(&map->refcnt);
2783 + if (atomic_inc_return(&map->refcnt) > BPF_MAX_REFCNT) {
2784 + atomic_dec(&map->refcnt);
2785 + return ERR_PTR(-EBUSY);
2786 + }
2787 if (uref)
2788 atomic_inc(&map->usercnt);
2789 + return map;
2790 }
2791
2792 struct bpf_map *bpf_map_get_with_uref(u32 ufd)
2793 @@ -217,7 +224,7 @@ struct bpf_map *bpf_map_get_with_uref(u32 ufd)
2794 if (IS_ERR(map))
2795 return map;
2796
2797 - bpf_map_inc(map, true);
2798 + map = bpf_map_inc(map, true);
2799 fdput(f);
2800
2801 return map;
2802 @@ -600,6 +607,15 @@ static struct bpf_prog *__bpf_prog_get(struct fd f)
2803 return f.file->private_data;
2804 }
2805
2806 +struct bpf_prog *bpf_prog_inc(struct bpf_prog *prog)
2807 +{
2808 + if (atomic_inc_return(&prog->aux->refcnt) > BPF_MAX_REFCNT) {
2809 + atomic_dec(&prog->aux->refcnt);
2810 + return ERR_PTR(-EBUSY);
2811 + }
2812 + return prog;
2813 +}
2814 +
2815 /* called by sockets/tracing/seccomp before attaching program to an event
2816 * pairs with bpf_prog_put()
2817 */
2818 @@ -612,7 +628,7 @@ struct bpf_prog *bpf_prog_get(u32 ufd)
2819 if (IS_ERR(prog))
2820 return prog;
2821
2822 - atomic_inc(&prog->aux->refcnt);
2823 + prog = bpf_prog_inc(prog);
2824 fdput(f);
2825
2826 return prog;
2827 diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c
2828 index 2e7f7ab739e4..2cbfba78d3db 100644
2829 --- a/kernel/bpf/verifier.c
2830 +++ b/kernel/bpf/verifier.c
2831 @@ -239,15 +239,6 @@ static const char * const reg_type_str[] = {
2832 [CONST_IMM] = "imm",
2833 };
2834
2835 -static const struct {
2836 - int map_type;
2837 - int func_id;
2838 -} func_limit[] = {
2839 - {BPF_MAP_TYPE_PROG_ARRAY, BPF_FUNC_tail_call},
2840 - {BPF_MAP_TYPE_PERF_EVENT_ARRAY, BPF_FUNC_perf_event_read},
2841 - {BPF_MAP_TYPE_PERF_EVENT_ARRAY, BPF_FUNC_perf_event_output},
2842 -};
2843 -
2844 static void print_verifier_state(struct verifier_env *env)
2845 {
2846 enum bpf_reg_type t;
2847 @@ -898,24 +889,44 @@ static int check_func_arg(struct verifier_env *env, u32 regno,
2848
2849 static int check_map_func_compatibility(struct bpf_map *map, int func_id)
2850 {
2851 - bool bool_map, bool_func;
2852 - int i;
2853 -
2854 if (!map)
2855 return 0;
2856
2857 - for (i = 0; i < ARRAY_SIZE(func_limit); i++) {
2858 - bool_map = (map->map_type == func_limit[i].map_type);
2859 - bool_func = (func_id == func_limit[i].func_id);
2860 - /* only when map & func pair match it can continue.
2861 - * don't allow any other map type to be passed into
2862 - * the special func;
2863 - */
2864 - if (bool_func && bool_map != bool_func)
2865 - return -EINVAL;
2866 + /* We need a two way check, first is from map perspective ... */
2867 + switch (map->map_type) {
2868 + case BPF_MAP_TYPE_PROG_ARRAY:
2869 + if (func_id != BPF_FUNC_tail_call)
2870 + goto error;
2871 + break;
2872 + case BPF_MAP_TYPE_PERF_EVENT_ARRAY:
2873 + if (func_id != BPF_FUNC_perf_event_read &&
2874 + func_id != BPF_FUNC_perf_event_output)
2875 + goto error;
2876 + break;
2877 + default:
2878 + break;
2879 + }
2880 +
2881 + /* ... and second from the function itself. */
2882 + switch (func_id) {
2883 + case BPF_FUNC_tail_call:
2884 + if (map->map_type != BPF_MAP_TYPE_PROG_ARRAY)
2885 + goto error;
2886 + break;
2887 + case BPF_FUNC_perf_event_read:
2888 + case BPF_FUNC_perf_event_output:
2889 + if (map->map_type != BPF_MAP_TYPE_PERF_EVENT_ARRAY)
2890 + goto error;
2891 + break;
2892 + default:
2893 + break;
2894 }
2895
2896 return 0;
2897 +error:
2898 + verbose("cannot pass map_type %d into func %d\n",
2899 + map->map_type, func_id);
2900 + return -EINVAL;
2901 }
2902
2903 static int check_call(struct verifier_env *env, int func_id)
2904 @@ -1348,6 +1359,7 @@ static int check_ld_abs(struct verifier_env *env, struct bpf_insn *insn)
2905 }
2906
2907 if (insn->dst_reg != BPF_REG_0 || insn->off != 0 ||
2908 + BPF_SIZE(insn->code) == BPF_DW ||
2909 (mode == BPF_ABS && insn->src_reg != BPF_REG_0)) {
2910 verbose("BPF_LD_ABS uses reserved fields\n");
2911 return -EINVAL;
2912 @@ -2003,7 +2015,6 @@ static int replace_map_fd_with_map_ptr(struct verifier_env *env)
2913 if (IS_ERR(map)) {
2914 verbose("fd %d is not pointing to valid bpf_map\n",
2915 insn->imm);
2916 - fdput(f);
2917 return PTR_ERR(map);
2918 }
2919
2920 @@ -2023,15 +2034,18 @@ static int replace_map_fd_with_map_ptr(struct verifier_env *env)
2921 return -E2BIG;
2922 }
2923
2924 - /* remember this map */
2925 - env->used_maps[env->used_map_cnt++] = map;
2926 -
2927 /* hold the map. If the program is rejected by verifier,
2928 * the map will be released by release_maps() or it
2929 * will be used by the valid program until it's unloaded
2930 * and all maps are released in free_bpf_prog_info()
2931 */
2932 - bpf_map_inc(map, false);
2933 + map = bpf_map_inc(map, false);
2934 + if (IS_ERR(map)) {
2935 + fdput(f);
2936 + return PTR_ERR(map);
2937 + }
2938 + env->used_maps[env->used_map_cnt++] = map;
2939 +
2940 fdput(f);
2941 next_insn:
2942 insn++;
2943 diff --git a/kernel/events/ring_buffer.c b/kernel/events/ring_buffer.c
2944 index 1faad2cfdb9e..287201a5d12f 100644
2945 --- a/kernel/events/ring_buffer.c
2946 +++ b/kernel/events/ring_buffer.c
2947 @@ -347,6 +347,7 @@ void perf_aux_output_end(struct perf_output_handle *handle, unsigned long size,
2948 bool truncated)
2949 {
2950 struct ring_buffer *rb = handle->rb;
2951 + bool wakeup = truncated;
2952 unsigned long aux_head;
2953 u64 flags = 0;
2954
2955 @@ -375,9 +376,16 @@ void perf_aux_output_end(struct perf_output_handle *handle, unsigned long size,
2956 aux_head = rb->user_page->aux_head = local_read(&rb->aux_head);
2957
2958 if (aux_head - local_read(&rb->aux_wakeup) >= rb->aux_watermark) {
2959 - perf_output_wakeup(handle);
2960 + wakeup = true;
2961 local_add(rb->aux_watermark, &rb->aux_wakeup);
2962 }
2963 +
2964 + if (wakeup) {
2965 + if (truncated)
2966 + handle->event->pending_disable = 1;
2967 + perf_output_wakeup(handle);
2968 + }
2969 +
2970 handle->event = NULL;
2971
2972 local_set(&rb->aux_nest, 0);
2973 diff --git a/kernel/workqueue.c b/kernel/workqueue.c
2974 index 9e82d0450fad..f48c80e4ba75 100644
2975 --- a/kernel/workqueue.c
2976 +++ b/kernel/workqueue.c
2977 @@ -4556,6 +4556,17 @@ static void rebind_workers(struct worker_pool *pool)
2978 pool->attrs->cpumask) < 0);
2979
2980 spin_lock_irq(&pool->lock);
2981 +
2982 + /*
2983 + * XXX: CPU hotplug notifiers are weird and can call DOWN_FAILED
2984 + * w/o preceding DOWN_PREPARE. Work around it. CPU hotplug is
2985 + * being reworked and this can go away in time.
2986 + */
2987 + if (!(pool->flags & POOL_DISASSOCIATED)) {
2988 + spin_unlock_irq(&pool->lock);
2989 + return;
2990 + }
2991 +
2992 pool->flags &= ~POOL_DISASSOCIATED;
2993
2994 for_each_pool_worker(worker, pool) {
2995 diff --git a/mm/huge_memory.c b/mm/huge_memory.c
2996 index a7db0a2db1ab..a82a87b3f9c6 100644
2997 --- a/mm/huge_memory.c
2998 +++ b/mm/huge_memory.c
2999 @@ -1257,15 +1257,9 @@ int do_huge_pmd_wp_page(struct mm_struct *mm, struct vm_area_struct *vma,
3000 VM_BUG_ON_PAGE(!PageCompound(page) || !PageHead(page), page);
3001 /*
3002 * We can only reuse the page if nobody else maps the huge page or it's
3003 - * part. We can do it by checking page_mapcount() on each sub-page, but
3004 - * it's expensive.
3005 - * The cheaper way is to check page_count() to be equal 1: every
3006 - * mapcount takes page reference reference, so this way we can
3007 - * guarantee, that the PMD is the only mapping.
3008 - * This can give false negative if somebody pinned the page, but that's
3009 - * fine.
3010 + * part.
3011 */
3012 - if (page_mapcount(page) == 1 && page_count(page) == 1) {
3013 + if (page_trans_huge_mapcount(page, NULL) == 1) {
3014 pmd_t entry;
3015 entry = pmd_mkyoung(orig_pmd);
3016 entry = maybe_pmd_mkwrite(pmd_mkdirty(entry), vma);
3017 @@ -2038,7 +2032,8 @@ static int __collapse_huge_page_isolate(struct vm_area_struct *vma,
3018 if (pte_write(pteval)) {
3019 writable = true;
3020 } else {
3021 - if (PageSwapCache(page) && !reuse_swap_page(page)) {
3022 + if (PageSwapCache(page) &&
3023 + !reuse_swap_page(page, NULL)) {
3024 unlock_page(page);
3025 result = SCAN_SWAP_CACHE_PAGE;
3026 goto out;
3027 @@ -3338,6 +3333,64 @@ int total_mapcount(struct page *page)
3028 }
3029
3030 /*
3031 + * This calculates accurately how many mappings a transparent hugepage
3032 + * has (unlike page_mapcount() which isn't fully accurate). This full
3033 + * accuracy is primarily needed to know if copy-on-write faults can
3034 + * reuse the page and change the mapping to read-write instead of
3035 + * copying them. At the same time this returns the total_mapcount too.
3036 + *
3037 + * The function returns the highest mapcount any one of the subpages
3038 + * has. If the return value is one, even if different processes are
3039 + * mapping different subpages of the transparent hugepage, they can
3040 + * all reuse it, because each process is reusing a different subpage.
3041 + *
3042 + * The total_mapcount is instead counting all virtual mappings of the
3043 + * subpages. If the total_mapcount is equal to "one", it tells the
3044 + * caller all mappings belong to the same "mm" and in turn the
3045 + * anon_vma of the transparent hugepage can become the vma->anon_vma
3046 + * local one as no other process may be mapping any of the subpages.
3047 + *
3048 + * It would be more accurate to replace page_mapcount() with
3049 + * page_trans_huge_mapcount(), however we only use
3050 + * page_trans_huge_mapcount() in the copy-on-write faults where we
3051 + * need full accuracy to avoid breaking page pinning, because
3052 + * page_trans_huge_mapcount() is slower than page_mapcount().
3053 + */
3054 +int page_trans_huge_mapcount(struct page *page, int *total_mapcount)
3055 +{
3056 + int i, ret, _total_mapcount, mapcount;
3057 +
3058 + /* hugetlbfs shouldn't call it */
3059 + VM_BUG_ON_PAGE(PageHuge(page), page);
3060 +
3061 + if (likely(!PageTransCompound(page))) {
3062 + mapcount = atomic_read(&page->_mapcount) + 1;
3063 + if (total_mapcount)
3064 + *total_mapcount = mapcount;
3065 + return mapcount;
3066 + }
3067 +
3068 + page = compound_head(page);
3069 +
3070 + _total_mapcount = ret = 0;
3071 + for (i = 0; i < HPAGE_PMD_NR; i++) {
3072 + mapcount = atomic_read(&page[i]._mapcount) + 1;
3073 + ret = max(ret, mapcount);
3074 + _total_mapcount += mapcount;
3075 + }
3076 + if (PageDoubleMap(page)) {
3077 + ret -= 1;
3078 + _total_mapcount -= HPAGE_PMD_NR;
3079 + }
3080 + mapcount = compound_mapcount(page);
3081 + ret += mapcount;
3082 + _total_mapcount += mapcount;
3083 + if (total_mapcount)
3084 + *total_mapcount = _total_mapcount;
3085 + return ret;
3086 +}
3087 +
3088 +/*
3089 * This function splits huge page into normal pages. @page can point to any
3090 * subpage of huge page to split. Split doesn't change the position of @page.
3091 *
3092 diff --git a/mm/memory.c b/mm/memory.c
3093 index 3345dcf862cf..c1aa0e4b4096 100644
3094 --- a/mm/memory.c
3095 +++ b/mm/memory.c
3096 @@ -2357,6 +2357,7 @@ static int do_wp_page(struct mm_struct *mm, struct vm_area_struct *vma,
3097 * not dirty accountable.
3098 */
3099 if (PageAnon(old_page) && !PageKsm(old_page)) {
3100 + int total_mapcount;
3101 if (!trylock_page(old_page)) {
3102 page_cache_get(old_page);
3103 pte_unmap_unlock(page_table, ptl);
3104 @@ -2371,13 +2372,18 @@ static int do_wp_page(struct mm_struct *mm, struct vm_area_struct *vma,
3105 }
3106 page_cache_release(old_page);
3107 }
3108 - if (reuse_swap_page(old_page)) {
3109 - /*
3110 - * The page is all ours. Move it to our anon_vma so
3111 - * the rmap code will not search our parent or siblings.
3112 - * Protected against the rmap code by the page lock.
3113 - */
3114 - page_move_anon_rmap(old_page, vma, address);
3115 + if (reuse_swap_page(old_page, &total_mapcount)) {
3116 + if (total_mapcount == 1) {
3117 + /*
3118 + * The page is all ours. Move it to
3119 + * our anon_vma so the rmap code will
3120 + * not search our parent or siblings.
3121 + * Protected against the rmap code by
3122 + * the page lock.
3123 + */
3124 + page_move_anon_rmap(compound_head(old_page),
3125 + vma, address);
3126 + }
3127 unlock_page(old_page);
3128 return wp_page_reuse(mm, vma, address, page_table, ptl,
3129 orig_pte, old_page, 0, 0);
3130 @@ -2602,7 +2608,7 @@ static int do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma,
3131 inc_mm_counter_fast(mm, MM_ANONPAGES);
3132 dec_mm_counter_fast(mm, MM_SWAPENTS);
3133 pte = mk_pte(page, vma->vm_page_prot);
3134 - if ((flags & FAULT_FLAG_WRITE) && reuse_swap_page(page)) {
3135 + if ((flags & FAULT_FLAG_WRITE) && reuse_swap_page(page, NULL)) {
3136 pte = maybe_mkwrite(pte_mkdirty(pte), vma);
3137 flags &= ~FAULT_FLAG_WRITE;
3138 ret |= VM_FAULT_WRITE;
3139 diff --git a/mm/swapfile.c b/mm/swapfile.c
3140 index d2c37365e2d6..954fd8f72b79 100644
3141 --- a/mm/swapfile.c
3142 +++ b/mm/swapfile.c
3143 @@ -916,18 +916,19 @@ out:
3144 * to it. And as a side-effect, free up its swap: because the old content
3145 * on disk will never be read, and seeking back there to write new content
3146 * later would only waste time away from clustering.
3147 + *
3148 + * NOTE: total_mapcount should not be relied upon by the caller if
3149 + * reuse_swap_page() returns false, but it may be always overwritten
3150 + * (see the other implementation for CONFIG_SWAP=n).
3151 */
3152 -int reuse_swap_page(struct page *page)
3153 +bool reuse_swap_page(struct page *page, int *total_mapcount)
3154 {
3155 int count;
3156
3157 VM_BUG_ON_PAGE(!PageLocked(page), page);
3158 if (unlikely(PageKsm(page)))
3159 - return 0;
3160 - /* The page is part of THP and cannot be reused */
3161 - if (PageTransCompound(page))
3162 - return 0;
3163 - count = page_mapcount(page);
3164 + return false;
3165 + count = page_trans_huge_mapcount(page, total_mapcount);
3166 if (count <= 1 && PageSwapCache(page)) {
3167 count += page_swapcount(page);
3168 if (count == 1 && !PageWriteback(page)) {
3169 diff --git a/mm/zsmalloc.c b/mm/zsmalloc.c
3170 index 2d7c4c11fc63..336149ffd07d 100644
3171 --- a/mm/zsmalloc.c
3172 +++ b/mm/zsmalloc.c
3173 @@ -1732,10 +1732,13 @@ static struct page *isolate_source_page(struct size_class *class)
3174 static unsigned long zs_can_compact(struct size_class *class)
3175 {
3176 unsigned long obj_wasted;
3177 + unsigned long obj_allocated = zs_stat_get(class, OBJ_ALLOCATED);
3178 + unsigned long obj_used = zs_stat_get(class, OBJ_USED);
3179
3180 - obj_wasted = zs_stat_get(class, OBJ_ALLOCATED) -
3181 - zs_stat_get(class, OBJ_USED);
3182 + if (obj_allocated <= obj_used)
3183 + return 0;
3184
3185 + obj_wasted = obj_allocated - obj_used;
3186 obj_wasted /= get_maxobj_per_zspage(class->size,
3187 class->pages_per_zspage);
3188
3189 diff --git a/net/bridge/br_ioctl.c b/net/bridge/br_ioctl.c
3190 index 263b4de4de57..60a3dbfca8a1 100644
3191 --- a/net/bridge/br_ioctl.c
3192 +++ b/net/bridge/br_ioctl.c
3193 @@ -21,18 +21,19 @@
3194 #include <asm/uaccess.h>
3195 #include "br_private.h"
3196
3197 -/* called with RTNL */
3198 static int get_bridge_ifindices(struct net *net, int *indices, int num)
3199 {
3200 struct net_device *dev;
3201 int i = 0;
3202
3203 - for_each_netdev(net, dev) {
3204 + rcu_read_lock();
3205 + for_each_netdev_rcu(net, dev) {
3206 if (i >= num)
3207 break;
3208 if (dev->priv_flags & IFF_EBRIDGE)
3209 indices[i++] = dev->ifindex;
3210 }
3211 + rcu_read_unlock();
3212
3213 return i;
3214 }
3215 diff --git a/net/bridge/br_multicast.c b/net/bridge/br_multicast.c
3216 index 03661d97463c..ea9893743a0f 100644
3217 --- a/net/bridge/br_multicast.c
3218 +++ b/net/bridge/br_multicast.c
3219 @@ -1270,6 +1270,7 @@ static int br_ip4_multicast_query(struct net_bridge *br,
3220 struct br_ip saddr;
3221 unsigned long max_delay;
3222 unsigned long now = jiffies;
3223 + unsigned int offset = skb_transport_offset(skb);
3224 __be32 group;
3225 int err = 0;
3226
3227 @@ -1280,14 +1281,14 @@ static int br_ip4_multicast_query(struct net_bridge *br,
3228
3229 group = ih->group;
3230
3231 - if (skb->len == sizeof(*ih)) {
3232 + if (skb->len == offset + sizeof(*ih)) {
3233 max_delay = ih->code * (HZ / IGMP_TIMER_SCALE);
3234
3235 if (!max_delay) {
3236 max_delay = 10 * HZ;
3237 group = 0;
3238 }
3239 - } else if (skb->len >= sizeof(*ih3)) {
3240 + } else if (skb->len >= offset + sizeof(*ih3)) {
3241 ih3 = igmpv3_query_hdr(skb);
3242 if (ih3->nsrcs)
3243 goto out;
3244 @@ -1348,6 +1349,7 @@ static int br_ip6_multicast_query(struct net_bridge *br,
3245 struct br_ip saddr;
3246 unsigned long max_delay;
3247 unsigned long now = jiffies;
3248 + unsigned int offset = skb_transport_offset(skb);
3249 const struct in6_addr *group = NULL;
3250 bool is_general_query;
3251 int err = 0;
3252 @@ -1357,8 +1359,8 @@ static int br_ip6_multicast_query(struct net_bridge *br,
3253 (port && port->state == BR_STATE_DISABLED))
3254 goto out;
3255
3256 - if (skb->len == sizeof(*mld)) {
3257 - if (!pskb_may_pull(skb, sizeof(*mld))) {
3258 + if (skb->len == offset + sizeof(*mld)) {
3259 + if (!pskb_may_pull(skb, offset + sizeof(*mld))) {
3260 err = -EINVAL;
3261 goto out;
3262 }
3263 @@ -1367,7 +1369,7 @@ static int br_ip6_multicast_query(struct net_bridge *br,
3264 if (max_delay)
3265 group = &mld->mld_mca;
3266 } else {
3267 - if (!pskb_may_pull(skb, sizeof(*mld2q))) {
3268 + if (!pskb_may_pull(skb, offset + sizeof(*mld2q))) {
3269 err = -EINVAL;
3270 goto out;
3271 }
3272 diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c
3273 index 215e6137f6ff..482c3717a45e 100644
3274 --- a/net/core/rtnetlink.c
3275 +++ b/net/core/rtnetlink.c
3276 @@ -1176,14 +1176,16 @@ static noinline_for_stack int rtnl_fill_vfinfo(struct sk_buff *skb,
3277
3278 static int rtnl_fill_link_ifmap(struct sk_buff *skb, struct net_device *dev)
3279 {
3280 - struct rtnl_link_ifmap map = {
3281 - .mem_start = dev->mem_start,
3282 - .mem_end = dev->mem_end,
3283 - .base_addr = dev->base_addr,
3284 - .irq = dev->irq,
3285 - .dma = dev->dma,
3286 - .port = dev->if_port,
3287 - };
3288 + struct rtnl_link_ifmap map;
3289 +
3290 + memset(&map, 0, sizeof(map));
3291 + map.mem_start = dev->mem_start;
3292 + map.mem_end = dev->mem_end;
3293 + map.base_addr = dev->base_addr;
3294 + map.irq = dev->irq;
3295 + map.dma = dev->dma;
3296 + map.port = dev->if_port;
3297 +
3298 if (nla_put(skb, IFLA_MAP, sizeof(map), &map))
3299 return -EMSGSIZE;
3300
3301 diff --git a/net/core/skbuff.c b/net/core/skbuff.c
3302 index 8616d1147c93..9835d9a8a7a4 100644
3303 --- a/net/core/skbuff.c
3304 +++ b/net/core/skbuff.c
3305 @@ -4427,15 +4427,16 @@ int skb_vlan_push(struct sk_buff *skb, __be16 vlan_proto, u16 vlan_tci)
3306 __skb_push(skb, offset);
3307 err = __vlan_insert_tag(skb, skb->vlan_proto,
3308 skb_vlan_tag_get(skb));
3309 - if (err)
3310 + if (err) {
3311 + __skb_pull(skb, offset);
3312 return err;
3313 + }
3314 +
3315 skb->protocol = skb->vlan_proto;
3316 skb->mac_len += VLAN_HLEN;
3317 - __skb_pull(skb, offset);
3318
3319 - if (skb->ip_summed == CHECKSUM_COMPLETE)
3320 - skb->csum = csum_add(skb->csum, csum_partial(skb->data
3321 - + (2 * ETH_ALEN), VLAN_HLEN, 0));
3322 + skb_postpush_rcsum(skb, skb->data + (2 * ETH_ALEN), VLAN_HLEN);
3323 + __skb_pull(skb, offset);
3324 }
3325 __vlan_hwaccel_put_tag(skb, vlan_proto, vlan_tci);
3326 return 0;
3327 diff --git a/net/decnet/dn_route.c b/net/decnet/dn_route.c
3328 index 607a14f20d88..b1dc096d22f8 100644
3329 --- a/net/decnet/dn_route.c
3330 +++ b/net/decnet/dn_route.c
3331 @@ -1034,10 +1034,13 @@ source_ok:
3332 if (!fld.daddr) {
3333 fld.daddr = fld.saddr;
3334
3335 - err = -EADDRNOTAVAIL;
3336 if (dev_out)
3337 dev_put(dev_out);
3338 + err = -EINVAL;
3339 dev_out = init_net.loopback_dev;
3340 + if (!dev_out->dn_ptr)
3341 + goto out;
3342 + err = -EADDRNOTAVAIL;
3343 dev_hold(dev_out);
3344 if (!fld.daddr) {
3345 fld.daddr =
3346 @@ -1110,6 +1113,8 @@ source_ok:
3347 if (dev_out == NULL)
3348 goto out;
3349 dn_db = rcu_dereference_raw(dev_out->dn_ptr);
3350 + if (!dn_db)
3351 + goto e_inval;
3352 /* Possible improvement - check all devices for local addr */
3353 if (dn_dev_islocal(dev_out, fld.daddr)) {
3354 dev_put(dev_out);
3355 @@ -1151,6 +1156,8 @@ select_source:
3356 dev_put(dev_out);
3357 dev_out = init_net.loopback_dev;
3358 dev_hold(dev_out);
3359 + if (!dev_out->dn_ptr)
3360 + goto e_inval;
3361 fld.flowidn_oif = dev_out->ifindex;
3362 if (res.fi)
3363 dn_fib_info_put(res.fi);
3364 diff --git a/net/ipv4/fib_frontend.c b/net/ipv4/fib_frontend.c
3365 index 8a9246deccfe..63566ec54794 100644
3366 --- a/net/ipv4/fib_frontend.c
3367 +++ b/net/ipv4/fib_frontend.c
3368 @@ -904,7 +904,11 @@ void fib_del_ifaddr(struct in_ifaddr *ifa, struct in_ifaddr *iprim)
3369 if (ifa->ifa_flags & IFA_F_SECONDARY) {
3370 prim = inet_ifa_byprefix(in_dev, any, ifa->ifa_mask);
3371 if (!prim) {
3372 - pr_warn("%s: bug: prim == NULL\n", __func__);
3373 + /* if the device has been deleted, we don't perform
3374 + * address promotion
3375 + */
3376 + if (!in_dev->dead)
3377 + pr_warn("%s: bug: prim == NULL\n", __func__);
3378 return;
3379 }
3380 if (iprim && iprim != prim) {
3381 diff --git a/net/ipv4/fib_semantics.c b/net/ipv4/fib_semantics.c
3382 index d97268e8ff10..2b68418c7198 100644
3383 --- a/net/ipv4/fib_semantics.c
3384 +++ b/net/ipv4/fib_semantics.c
3385 @@ -975,6 +975,8 @@ fib_convert_metrics(struct fib_info *fi, const struct fib_config *cfg)
3386 val = 65535 - 40;
3387 if (type == RTAX_MTU && val > 65535 - 15)
3388 val = 65535 - 15;
3389 + if (type == RTAX_HOPLIMIT && val > 255)
3390 + val = 255;
3391 if (type == RTAX_FEATURES && (val & ~RTAX_FEATURE_MASK))
3392 return -EINVAL;
3393 fi->fib_metrics[type - 1] = val;
3394 diff --git a/net/ipv4/ip_gre.c b/net/ipv4/ip_gre.c
3395 index 41ba68de46d8..d0c1e7766214 100644
3396 --- a/net/ipv4/ip_gre.c
3397 +++ b/net/ipv4/ip_gre.c
3398 @@ -179,6 +179,7 @@ static __be16 tnl_flags_to_gre_flags(__be16 tflags)
3399 return flags;
3400 }
3401
3402 +/* Fills in tpi and returns header length to be pulled. */
3403 static int parse_gre_header(struct sk_buff *skb, struct tnl_ptk_info *tpi,
3404 bool *csum_err)
3405 {
3406 @@ -238,7 +239,7 @@ static int parse_gre_header(struct sk_buff *skb, struct tnl_ptk_info *tpi,
3407 return -EINVAL;
3408 }
3409 }
3410 - return iptunnel_pull_header(skb, hdr_len, tpi->proto);
3411 + return hdr_len;
3412 }
3413
3414 static void ipgre_err(struct sk_buff *skb, u32 info,
3415 @@ -341,7 +342,7 @@ static void gre_err(struct sk_buff *skb, u32 info)
3416 struct tnl_ptk_info tpi;
3417 bool csum_err = false;
3418
3419 - if (parse_gre_header(skb, &tpi, &csum_err)) {
3420 + if (parse_gre_header(skb, &tpi, &csum_err) < 0) {
3421 if (!csum_err) /* ignore csum errors. */
3422 return;
3423 }
3424 @@ -419,6 +420,7 @@ static int gre_rcv(struct sk_buff *skb)
3425 {
3426 struct tnl_ptk_info tpi;
3427 bool csum_err = false;
3428 + int hdr_len;
3429
3430 #ifdef CONFIG_NET_IPGRE_BROADCAST
3431 if (ipv4_is_multicast(ip_hdr(skb)->daddr)) {
3432 @@ -428,7 +430,10 @@ static int gre_rcv(struct sk_buff *skb)
3433 }
3434 #endif
3435
3436 - if (parse_gre_header(skb, &tpi, &csum_err) < 0)
3437 + hdr_len = parse_gre_header(skb, &tpi, &csum_err);
3438 + if (hdr_len < 0)
3439 + goto drop;
3440 + if (iptunnel_pull_header(skb, hdr_len, tpi.proto) < 0)
3441 goto drop;
3442
3443 if (ipgre_rcv(skb, &tpi) == PACKET_RCVD)
3444 diff --git a/net/ipv4/route.c b/net/ipv4/route.c
3445 index 02c62299d717..b050cf980a57 100644
3446 --- a/net/ipv4/route.c
3447 +++ b/net/ipv4/route.c
3448 @@ -2045,6 +2045,18 @@ static struct rtable *__mkroute_output(const struct fib_result *res,
3449 */
3450 if (fi && res->prefixlen < 4)
3451 fi = NULL;
3452 + } else if ((type == RTN_LOCAL) && (orig_oif != 0) &&
3453 + (orig_oif != dev_out->ifindex)) {
3454 + /* For local routes that require a particular output interface
3455 + * we do not want to cache the result. Caching the result
3456 + * causes incorrect behaviour when there are multiple source
3457 + * addresses on the interface, the end result being that if the
3458 + * intended recipient is waiting on that interface for the
3459 + * packet he won't receive it because it will be delivered on
3460 + * the loopback interface and the IP_PKTINFO ipi_ifindex will
3461 + * be set to the loopback interface as well.
3462 + */
3463 + fi = NULL;
3464 }
3465
3466 fnhe = NULL;
3467 diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
3468 index fda379cd600d..b6876f2f4cf2 100644
3469 --- a/net/ipv4/tcp_output.c
3470 +++ b/net/ipv4/tcp_output.c
3471 @@ -2625,8 +2625,10 @@ int __tcp_retransmit_skb(struct sock *sk, struct sk_buff *skb)
3472 */
3473 if (unlikely((NET_IP_ALIGN && ((unsigned long)skb->data & 3)) ||
3474 skb_headroom(skb) >= 0xFFFF)) {
3475 - struct sk_buff *nskb = __pskb_copy(skb, MAX_TCP_HEADER,
3476 - GFP_ATOMIC);
3477 + struct sk_buff *nskb;
3478 +
3479 + skb_mstamp_get(&skb->skb_mstamp);
3480 + nskb = __pskb_copy(skb, MAX_TCP_HEADER, GFP_ATOMIC);
3481 err = nskb ? tcp_transmit_skb(sk, nskb, 0, GFP_ATOMIC) :
3482 -ENOBUFS;
3483 } else {
3484 diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
3485 index eb8933bc0e6e..56218ff388c7 100644
3486 --- a/net/ipv4/udp.c
3487 +++ b/net/ipv4/udp.c
3488 @@ -339,8 +339,13 @@ found:
3489
3490 hslot2 = udp_hashslot2(udptable, udp_sk(sk)->udp_portaddr_hash);
3491 spin_lock(&hslot2->lock);
3492 - hlist_nulls_add_head_rcu(&udp_sk(sk)->udp_portaddr_node,
3493 - &hslot2->head);
3494 + if (IS_ENABLED(CONFIG_IPV6) && sk->sk_reuseport &&
3495 + sk->sk_family == AF_INET6)
3496 + hlist_nulls_add_tail_rcu(&udp_sk(sk)->udp_portaddr_node,
3497 + &hslot2->head);
3498 + else
3499 + hlist_nulls_add_head_rcu(&udp_sk(sk)->udp_portaddr_node,
3500 + &hslot2->head);
3501 hslot2->count++;
3502 spin_unlock(&hslot2->lock);
3503 }
3504 diff --git a/net/ipv6/ila/ila_lwt.c b/net/ipv6/ila/ila_lwt.c
3505 index 2ae3c4fd8aab..41f18de5dcc2 100644
3506 --- a/net/ipv6/ila/ila_lwt.c
3507 +++ b/net/ipv6/ila/ila_lwt.c
3508 @@ -120,8 +120,7 @@ nla_put_failure:
3509
3510 static int ila_encap_nlsize(struct lwtunnel_state *lwtstate)
3511 {
3512 - /* No encapsulation overhead */
3513 - return 0;
3514 + return nla_total_size(sizeof(u64)); /* ILA_ATTR_LOCATOR */
3515 }
3516
3517 static int ila_encap_cmp(struct lwtunnel_state *a, struct lwtunnel_state *b)
3518 diff --git a/net/ipv6/reassembly.c b/net/ipv6/reassembly.c
3519 index 18f3498a6c80..e2ea31175ef9 100644
3520 --- a/net/ipv6/reassembly.c
3521 +++ b/net/ipv6/reassembly.c
3522 @@ -496,10 +496,8 @@ static int ip6_frag_reasm(struct frag_queue *fq, struct sk_buff *prev,
3523 IP6CB(head)->flags |= IP6SKB_FRAGMENTED;
3524
3525 /* Yes, and fold redundant checksum back. 8) */
3526 - if (head->ip_summed == CHECKSUM_COMPLETE)
3527 - head->csum = csum_partial(skb_network_header(head),
3528 - skb_network_header_len(head),
3529 - head->csum);
3530 + skb_postpush_rcsum(head, skb_network_header(head),
3531 + skb_network_header_len(head));
3532
3533 rcu_read_lock();
3534 IP6_INC_STATS_BH(net, __in6_dev_get(dev), IPSTATS_MIB_REASMOKS);
3535 diff --git a/net/ipv6/route.c b/net/ipv6/route.c
3536 index ed446639219c..18e29e2f8877 100644
3537 --- a/net/ipv6/route.c
3538 +++ b/net/ipv6/route.c
3539 @@ -1737,6 +1737,8 @@ static int ip6_convert_metrics(struct mx6_config *mxc,
3540 } else {
3541 val = nla_get_u32(nla);
3542 }
3543 + if (type == RTAX_HOPLIMIT && val > 255)
3544 + val = 255;
3545 if (type == RTAX_FEATURES && (val & ~RTAX_FEATURE_MASK))
3546 goto err;
3547
3548 diff --git a/net/llc/af_llc.c b/net/llc/af_llc.c
3549 index 8dab4e569571..bb8edb9ef506 100644
3550 --- a/net/llc/af_llc.c
3551 +++ b/net/llc/af_llc.c
3552 @@ -626,6 +626,7 @@ static void llc_cmsg_rcv(struct msghdr *msg, struct sk_buff *skb)
3553 if (llc->cmsg_flags & LLC_CMSG_PKTINFO) {
3554 struct llc_pktinfo info;
3555
3556 + memset(&info, 0, sizeof(info));
3557 info.lpi_ifindex = llc_sk(skb->sk)->dev->ifindex;
3558 llc_pdu_decode_dsap(skb, &info.lpi_sap);
3559 llc_pdu_decode_da(skb, info.lpi_mac);
3560 diff --git a/net/netfilter/nf_conntrack_core.c b/net/netfilter/nf_conntrack_core.c
3561 index f60b4fdeeb8c..6b94f0bc11b8 100644
3562 --- a/net/netfilter/nf_conntrack_core.c
3563 +++ b/net/netfilter/nf_conntrack_core.c
3564 @@ -1780,6 +1780,7 @@ void nf_conntrack_init_end(void)
3565
3566 int nf_conntrack_init_net(struct net *net)
3567 {
3568 + static atomic64_t unique_id;
3569 int ret = -ENOMEM;
3570 int cpu;
3571
3572 @@ -1802,7 +1803,8 @@ int nf_conntrack_init_net(struct net *net)
3573 if (!net->ct.stat)
3574 goto err_pcpu_lists;
3575
3576 - net->ct.slabname = kasprintf(GFP_KERNEL, "nf_conntrack_%p", net);
3577 + net->ct.slabname = kasprintf(GFP_KERNEL, "nf_conntrack_%llu",
3578 + (u64)atomic64_inc_return(&unique_id));
3579 if (!net->ct.slabname)
3580 goto err_slabname;
3581
3582 diff --git a/net/openvswitch/actions.c b/net/openvswitch/actions.c
3583 index 2d59df521915..879185fe183f 100644
3584 --- a/net/openvswitch/actions.c
3585 +++ b/net/openvswitch/actions.c
3586 @@ -158,9 +158,7 @@ static int push_mpls(struct sk_buff *skb, struct sw_flow_key *key,
3587 new_mpls_lse = (__be32 *)skb_mpls_header(skb);
3588 *new_mpls_lse = mpls->mpls_lse;
3589
3590 - if (skb->ip_summed == CHECKSUM_COMPLETE)
3591 - skb->csum = csum_add(skb->csum, csum_partial(new_mpls_lse,
3592 - MPLS_HLEN, 0));
3593 + skb_postpush_rcsum(skb, new_mpls_lse, MPLS_HLEN);
3594
3595 hdr = eth_hdr(skb);
3596 hdr->h_proto = mpls->mpls_ethertype;
3597 @@ -280,7 +278,7 @@ static int set_eth_addr(struct sk_buff *skb, struct sw_flow_key *flow_key,
3598 ether_addr_copy_masked(eth_hdr(skb)->h_dest, key->eth_dst,
3599 mask->eth_dst);
3600
3601 - ovs_skb_postpush_rcsum(skb, eth_hdr(skb), ETH_ALEN * 2);
3602 + skb_postpush_rcsum(skb, eth_hdr(skb), ETH_ALEN * 2);
3603
3604 ether_addr_copy(flow_key->eth.src, eth_hdr(skb)->h_source);
3605 ether_addr_copy(flow_key->eth.dst, eth_hdr(skb)->h_dest);
3606 @@ -463,7 +461,7 @@ static int set_ipv6(struct sk_buff *skb, struct sw_flow_key *flow_key,
3607 mask_ipv6_addr(saddr, key->ipv6_src, mask->ipv6_src, masked);
3608
3609 if (unlikely(memcmp(saddr, masked, sizeof(masked)))) {
3610 - set_ipv6_addr(skb, key->ipv6_proto, saddr, masked,
3611 + set_ipv6_addr(skb, flow_key->ip.proto, saddr, masked,
3612 true);
3613 memcpy(&flow_key->ipv6.addr.src, masked,
3614 sizeof(flow_key->ipv6.addr.src));
3615 @@ -485,7 +483,7 @@ static int set_ipv6(struct sk_buff *skb, struct sw_flow_key *flow_key,
3616 NULL, &flags)
3617 != NEXTHDR_ROUTING);
3618
3619 - set_ipv6_addr(skb, key->ipv6_proto, daddr, masked,
3620 + set_ipv6_addr(skb, flow_key->ip.proto, daddr, masked,
3621 recalc_csum);
3622 memcpy(&flow_key->ipv6.addr.dst, masked,
3623 sizeof(flow_key->ipv6.addr.dst));
3624 @@ -639,7 +637,7 @@ static int ovs_vport_output(struct net *net, struct sock *sk, struct sk_buff *sk
3625 /* Reconstruct the MAC header. */
3626 skb_push(skb, data->l2_len);
3627 memcpy(skb->data, &data->l2_data, data->l2_len);
3628 - ovs_skb_postpush_rcsum(skb, skb->data, data->l2_len);
3629 + skb_postpush_rcsum(skb, skb->data, data->l2_len);
3630 skb_reset_mac_header(skb);
3631
3632 ovs_vport_send(vport, skb);
3633 diff --git a/net/openvswitch/conntrack.c b/net/openvswitch/conntrack.c
3634 index ee6ff8ffc12d..9e5b9fc805fb 100644
3635 --- a/net/openvswitch/conntrack.c
3636 +++ b/net/openvswitch/conntrack.c
3637 @@ -320,6 +320,7 @@ static int handle_fragments(struct net *net, struct sw_flow_key *key,
3638 } else if (key->eth.type == htons(ETH_P_IPV6)) {
3639 enum ip6_defrag_users user = IP6_DEFRAG_CONNTRACK_IN + zone;
3640
3641 + skb_orphan(skb);
3642 memset(IP6CB(skb), 0, sizeof(struct inet6_skb_parm));
3643 err = nf_ct_frag6_gather(net, skb, user);
3644 if (err)
3645 diff --git a/net/openvswitch/vport-netdev.c b/net/openvswitch/vport-netdev.c
3646 index 6a6adf314363..4e3972344aa6 100644
3647 --- a/net/openvswitch/vport-netdev.c
3648 +++ b/net/openvswitch/vport-netdev.c
3649 @@ -58,7 +58,7 @@ static void netdev_port_receive(struct sk_buff *skb)
3650 return;
3651
3652 skb_push(skb, ETH_HLEN);
3653 - ovs_skb_postpush_rcsum(skb, skb->data, ETH_HLEN);
3654 + skb_postpush_rcsum(skb, skb->data, ETH_HLEN);
3655 ovs_vport_receive(vport, skb, skb_tunnel_info(skb));
3656 return;
3657 error:
3658 diff --git a/net/openvswitch/vport.h b/net/openvswitch/vport.h
3659 index c10899cb9040..f01f28a567ad 100644
3660 --- a/net/openvswitch/vport.h
3661 +++ b/net/openvswitch/vport.h
3662 @@ -185,13 +185,6 @@ static inline struct vport *vport_from_priv(void *priv)
3663 int ovs_vport_receive(struct vport *, struct sk_buff *,
3664 const struct ip_tunnel_info *);
3665
3666 -static inline void ovs_skb_postpush_rcsum(struct sk_buff *skb,
3667 - const void *start, unsigned int len)
3668 -{
3669 - if (skb->ip_summed == CHECKSUM_COMPLETE)
3670 - skb->csum = csum_add(skb->csum, csum_partial(start, len, 0));
3671 -}
3672 -
3673 static inline const char *ovs_vport_name(struct vport *vport)
3674 {
3675 return vport->dev->name;
3676 diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
3677 index da1ae0e13cb5..9cc7b512b472 100644
3678 --- a/net/packet/af_packet.c
3679 +++ b/net/packet/af_packet.c
3680 @@ -3436,6 +3436,7 @@ static int packet_mc_add(struct sock *sk, struct packet_mreq_max *mreq)
3681 i->ifindex = mreq->mr_ifindex;
3682 i->alen = mreq->mr_alen;
3683 memcpy(i->addr, mreq->mr_address, i->alen);
3684 + memset(i->addr + i->alen, 0, sizeof(i->addr) - i->alen);
3685 i->count = 1;
3686 i->next = po->mclist;
3687 po->mclist = i;
3688 diff --git a/net/sched/sch_api.c b/net/sched/sch_api.c
3689 index af1acf009866..95b560f0b253 100644
3690 --- a/net/sched/sch_api.c
3691 +++ b/net/sched/sch_api.c
3692 @@ -744,14 +744,15 @@ static u32 qdisc_alloc_handle(struct net_device *dev)
3693 return 0;
3694 }
3695
3696 -void qdisc_tree_decrease_qlen(struct Qdisc *sch, unsigned int n)
3697 +void qdisc_tree_reduce_backlog(struct Qdisc *sch, unsigned int n,
3698 + unsigned int len)
3699 {
3700 const struct Qdisc_class_ops *cops;
3701 unsigned long cl;
3702 u32 parentid;
3703 int drops;
3704
3705 - if (n == 0)
3706 + if (n == 0 && len == 0)
3707 return;
3708 drops = max_t(int, n, 0);
3709 rcu_read_lock();
3710 @@ -774,11 +775,12 @@ void qdisc_tree_decrease_qlen(struct Qdisc *sch, unsigned int n)
3711 cops->put(sch, cl);
3712 }
3713 sch->q.qlen -= n;
3714 + sch->qstats.backlog -= len;
3715 __qdisc_qstats_drop(sch, drops);
3716 }
3717 rcu_read_unlock();
3718 }
3719 -EXPORT_SYMBOL(qdisc_tree_decrease_qlen);
3720 +EXPORT_SYMBOL(qdisc_tree_reduce_backlog);
3721
3722 static void notify_and_destroy(struct net *net, struct sk_buff *skb,
3723 struct nlmsghdr *n, u32 clid,
3724 diff --git a/net/sched/sch_cbq.c b/net/sched/sch_cbq.c
3725 index c538d9e4a8f6..baafddf229ce 100644
3726 --- a/net/sched/sch_cbq.c
3727 +++ b/net/sched/sch_cbq.c
3728 @@ -1624,13 +1624,8 @@ static int cbq_graft(struct Qdisc *sch, unsigned long arg, struct Qdisc *new,
3729 new->reshape_fail = cbq_reshape_fail;
3730 #endif
3731 }
3732 - sch_tree_lock(sch);
3733 - *old = cl->q;
3734 - cl->q = new;
3735 - qdisc_tree_decrease_qlen(*old, (*old)->q.qlen);
3736 - qdisc_reset(*old);
3737 - sch_tree_unlock(sch);
3738
3739 + *old = qdisc_replace(sch, new, &cl->q);
3740 return 0;
3741 }
3742
3743 @@ -1914,7 +1909,7 @@ static int cbq_delete(struct Qdisc *sch, unsigned long arg)
3744 {
3745 struct cbq_sched_data *q = qdisc_priv(sch);
3746 struct cbq_class *cl = (struct cbq_class *)arg;
3747 - unsigned int qlen;
3748 + unsigned int qlen, backlog;
3749
3750 if (cl->filters || cl->children || cl == &q->link)
3751 return -EBUSY;
3752 @@ -1922,8 +1917,9 @@ static int cbq_delete(struct Qdisc *sch, unsigned long arg)
3753 sch_tree_lock(sch);
3754
3755 qlen = cl->q->q.qlen;
3756 + backlog = cl->q->qstats.backlog;
3757 qdisc_reset(cl->q);
3758 - qdisc_tree_decrease_qlen(cl->q, qlen);
3759 + qdisc_tree_reduce_backlog(cl->q, qlen, backlog);
3760
3761 if (cl->next_alive)
3762 cbq_deactivate_class(cl);
3763 diff --git a/net/sched/sch_choke.c b/net/sched/sch_choke.c
3764 index 5ffb8b8337c7..0a08c860eee4 100644
3765 --- a/net/sched/sch_choke.c
3766 +++ b/net/sched/sch_choke.c
3767 @@ -128,8 +128,8 @@ static void choke_drop_by_idx(struct Qdisc *sch, unsigned int idx)
3768 choke_zap_tail_holes(q);
3769
3770 qdisc_qstats_backlog_dec(sch, skb);
3771 + qdisc_tree_reduce_backlog(sch, 1, qdisc_pkt_len(skb));
3772 qdisc_drop(skb, sch);
3773 - qdisc_tree_decrease_qlen(sch, 1);
3774 --sch->q.qlen;
3775 }
3776
3777 @@ -456,6 +456,7 @@ static int choke_change(struct Qdisc *sch, struct nlattr *opt)
3778 old = q->tab;
3779 if (old) {
3780 unsigned int oqlen = sch->q.qlen, tail = 0;
3781 + unsigned dropped = 0;
3782
3783 while (q->head != q->tail) {
3784 struct sk_buff *skb = q->tab[q->head];
3785 @@ -467,11 +468,12 @@ static int choke_change(struct Qdisc *sch, struct nlattr *opt)
3786 ntab[tail++] = skb;
3787 continue;
3788 }
3789 + dropped += qdisc_pkt_len(skb);
3790 qdisc_qstats_backlog_dec(sch, skb);
3791 --sch->q.qlen;
3792 qdisc_drop(skb, sch);
3793 }
3794 - qdisc_tree_decrease_qlen(sch, oqlen - sch->q.qlen);
3795 + qdisc_tree_reduce_backlog(sch, oqlen - sch->q.qlen, dropped);
3796 q->head = 0;
3797 q->tail = tail;
3798 }
3799 diff --git a/net/sched/sch_codel.c b/net/sched/sch_codel.c
3800 index 535007d5f0b5..9b7e2980ee5c 100644
3801 --- a/net/sched/sch_codel.c
3802 +++ b/net/sched/sch_codel.c
3803 @@ -79,12 +79,13 @@ static struct sk_buff *codel_qdisc_dequeue(struct Qdisc *sch)
3804
3805 skb = codel_dequeue(sch, &q->params, &q->vars, &q->stats, dequeue);
3806
3807 - /* We cant call qdisc_tree_decrease_qlen() if our qlen is 0,
3808 + /* We cant call qdisc_tree_reduce_backlog() if our qlen is 0,
3809 * or HTB crashes. Defer it for next round.
3810 */
3811 if (q->stats.drop_count && sch->q.qlen) {
3812 - qdisc_tree_decrease_qlen(sch, q->stats.drop_count);
3813 + qdisc_tree_reduce_backlog(sch, q->stats.drop_count, q->stats.drop_len);
3814 q->stats.drop_count = 0;
3815 + q->stats.drop_len = 0;
3816 }
3817 if (skb)
3818 qdisc_bstats_update(sch, skb);
3819 @@ -116,7 +117,7 @@ static int codel_change(struct Qdisc *sch, struct nlattr *opt)
3820 {
3821 struct codel_sched_data *q = qdisc_priv(sch);
3822 struct nlattr *tb[TCA_CODEL_MAX + 1];
3823 - unsigned int qlen;
3824 + unsigned int qlen, dropped = 0;
3825 int err;
3826
3827 if (!opt)
3828 @@ -156,10 +157,11 @@ static int codel_change(struct Qdisc *sch, struct nlattr *opt)
3829 while (sch->q.qlen > sch->limit) {
3830 struct sk_buff *skb = __skb_dequeue(&sch->q);
3831
3832 + dropped += qdisc_pkt_len(skb);
3833 qdisc_qstats_backlog_dec(sch, skb);
3834 qdisc_drop(skb, sch);
3835 }
3836 - qdisc_tree_decrease_qlen(sch, qlen - sch->q.qlen);
3837 + qdisc_tree_reduce_backlog(sch, qlen - sch->q.qlen, dropped);
3838
3839 sch_tree_unlock(sch);
3840 return 0;
3841 diff --git a/net/sched/sch_drr.c b/net/sched/sch_drr.c
3842 index a1cd778240cd..a63e879e8975 100644
3843 --- a/net/sched/sch_drr.c
3844 +++ b/net/sched/sch_drr.c
3845 @@ -53,9 +53,10 @@ static struct drr_class *drr_find_class(struct Qdisc *sch, u32 classid)
3846 static void drr_purge_queue(struct drr_class *cl)
3847 {
3848 unsigned int len = cl->qdisc->q.qlen;
3849 + unsigned int backlog = cl->qdisc->qstats.backlog;
3850
3851 qdisc_reset(cl->qdisc);
3852 - qdisc_tree_decrease_qlen(cl->qdisc, len);
3853 + qdisc_tree_reduce_backlog(cl->qdisc, len, backlog);
3854 }
3855
3856 static const struct nla_policy drr_policy[TCA_DRR_MAX + 1] = {
3857 @@ -226,11 +227,7 @@ static int drr_graft_class(struct Qdisc *sch, unsigned long arg,
3858 new = &noop_qdisc;
3859 }
3860
3861 - sch_tree_lock(sch);
3862 - drr_purge_queue(cl);
3863 - *old = cl->qdisc;
3864 - cl->qdisc = new;
3865 - sch_tree_unlock(sch);
3866 + *old = qdisc_replace(sch, new, &cl->qdisc);
3867 return 0;
3868 }
3869
3870 diff --git a/net/sched/sch_dsmark.c b/net/sched/sch_dsmark.c
3871 index f357f34d02d2..d0dff0cd8186 100644
3872 --- a/net/sched/sch_dsmark.c
3873 +++ b/net/sched/sch_dsmark.c
3874 @@ -73,13 +73,7 @@ static int dsmark_graft(struct Qdisc *sch, unsigned long arg,
3875 new = &noop_qdisc;
3876 }
3877
3878 - sch_tree_lock(sch);
3879 - *old = p->q;
3880 - p->q = new;
3881 - qdisc_tree_decrease_qlen(*old, (*old)->q.qlen);
3882 - qdisc_reset(*old);
3883 - sch_tree_unlock(sch);
3884 -
3885 + *old = qdisc_replace(sch, new, &p->q);
3886 return 0;
3887 }
3888
3889 @@ -264,6 +258,7 @@ static int dsmark_enqueue(struct sk_buff *skb, struct Qdisc *sch)
3890 return err;
3891 }
3892
3893 + qdisc_qstats_backlog_inc(sch, skb);
3894 sch->q.qlen++;
3895
3896 return NET_XMIT_SUCCESS;
3897 @@ -286,6 +281,7 @@ static struct sk_buff *dsmark_dequeue(struct Qdisc *sch)
3898 return NULL;
3899
3900 qdisc_bstats_update(sch, skb);
3901 + qdisc_qstats_backlog_dec(sch, skb);
3902 sch->q.qlen--;
3903
3904 index = skb->tc_index & (p->indices - 1);
3905 @@ -401,6 +397,7 @@ static void dsmark_reset(struct Qdisc *sch)
3906
3907 pr_debug("%s(sch %p,[qdisc %p])\n", __func__, sch, p);
3908 qdisc_reset(p->q);
3909 + sch->qstats.backlog = 0;
3910 sch->q.qlen = 0;
3911 }
3912
3913 diff --git a/net/sched/sch_fq.c b/net/sched/sch_fq.c
3914 index 109b2322778f..3c6a47d66a04 100644
3915 --- a/net/sched/sch_fq.c
3916 +++ b/net/sched/sch_fq.c
3917 @@ -662,6 +662,7 @@ static int fq_change(struct Qdisc *sch, struct nlattr *opt)
3918 struct fq_sched_data *q = qdisc_priv(sch);
3919 struct nlattr *tb[TCA_FQ_MAX + 1];
3920 int err, drop_count = 0;
3921 + unsigned drop_len = 0;
3922 u32 fq_log;
3923
3924 if (!opt)
3925 @@ -736,10 +737,11 @@ static int fq_change(struct Qdisc *sch, struct nlattr *opt)
3926
3927 if (!skb)
3928 break;
3929 + drop_len += qdisc_pkt_len(skb);
3930 kfree_skb(skb);
3931 drop_count++;
3932 }
3933 - qdisc_tree_decrease_qlen(sch, drop_count);
3934 + qdisc_tree_reduce_backlog(sch, drop_count, drop_len);
3935
3936 sch_tree_unlock(sch);
3937 return err;
3938 diff --git a/net/sched/sch_fq_codel.c b/net/sched/sch_fq_codel.c
3939 index 4c834e93dafb..d3fc8f9dd3d4 100644
3940 --- a/net/sched/sch_fq_codel.c
3941 +++ b/net/sched/sch_fq_codel.c
3942 @@ -175,7 +175,7 @@ static unsigned int fq_codel_qdisc_drop(struct Qdisc *sch)
3943 static int fq_codel_enqueue(struct sk_buff *skb, struct Qdisc *sch)
3944 {
3945 struct fq_codel_sched_data *q = qdisc_priv(sch);
3946 - unsigned int idx;
3947 + unsigned int idx, prev_backlog;
3948 struct fq_codel_flow *flow;
3949 int uninitialized_var(ret);
3950
3951 @@ -203,6 +203,7 @@ static int fq_codel_enqueue(struct sk_buff *skb, struct Qdisc *sch)
3952 if (++sch->q.qlen <= sch->limit)
3953 return NET_XMIT_SUCCESS;
3954
3955 + prev_backlog = sch->qstats.backlog;
3956 q->drop_overlimit++;
3957 /* Return Congestion Notification only if we dropped a packet
3958 * from this flow.
3959 @@ -211,7 +212,7 @@ static int fq_codel_enqueue(struct sk_buff *skb, struct Qdisc *sch)
3960 return NET_XMIT_CN;
3961
3962 /* As we dropped a packet, better let upper stack know this */
3963 - qdisc_tree_decrease_qlen(sch, 1);
3964 + qdisc_tree_reduce_backlog(sch, 1, prev_backlog - sch->qstats.backlog);
3965 return NET_XMIT_SUCCESS;
3966 }
3967
3968 @@ -241,6 +242,7 @@ static struct sk_buff *fq_codel_dequeue(struct Qdisc *sch)
3969 struct fq_codel_flow *flow;
3970 struct list_head *head;
3971 u32 prev_drop_count, prev_ecn_mark;
3972 + unsigned int prev_backlog;
3973
3974 begin:
3975 head = &q->new_flows;
3976 @@ -259,6 +261,7 @@ begin:
3977
3978 prev_drop_count = q->cstats.drop_count;
3979 prev_ecn_mark = q->cstats.ecn_mark;
3980 + prev_backlog = sch->qstats.backlog;
3981
3982 skb = codel_dequeue(sch, &q->cparams, &flow->cvars, &q->cstats,
3983 dequeue);
3984 @@ -276,12 +279,14 @@ begin:
3985 }
3986 qdisc_bstats_update(sch, skb);
3987 flow->deficit -= qdisc_pkt_len(skb);
3988 - /* We cant call qdisc_tree_decrease_qlen() if our qlen is 0,
3989 + /* We cant call qdisc_tree_reduce_backlog() if our qlen is 0,
3990 * or HTB crashes. Defer it for next round.
3991 */
3992 if (q->cstats.drop_count && sch->q.qlen) {
3993 - qdisc_tree_decrease_qlen(sch, q->cstats.drop_count);
3994 + qdisc_tree_reduce_backlog(sch, q->cstats.drop_count,
3995 + q->cstats.drop_len);
3996 q->cstats.drop_count = 0;
3997 + q->cstats.drop_len = 0;
3998 }
3999 return skb;
4000 }
4001 @@ -372,11 +377,13 @@ static int fq_codel_change(struct Qdisc *sch, struct nlattr *opt)
4002 while (sch->q.qlen > sch->limit) {
4003 struct sk_buff *skb = fq_codel_dequeue(sch);
4004
4005 + q->cstats.drop_len += qdisc_pkt_len(skb);
4006 kfree_skb(skb);
4007 q->cstats.drop_count++;
4008 }
4009 - qdisc_tree_decrease_qlen(sch, q->cstats.drop_count);
4010 + qdisc_tree_reduce_backlog(sch, q->cstats.drop_count, q->cstats.drop_len);
4011 q->cstats.drop_count = 0;
4012 + q->cstats.drop_len = 0;
4013
4014 sch_tree_unlock(sch);
4015 return 0;
4016 diff --git a/net/sched/sch_generic.c b/net/sched/sch_generic.c
4017 index 16bc83b2842a..aa4725038f94 100644
4018 --- a/net/sched/sch_generic.c
4019 +++ b/net/sched/sch_generic.c
4020 @@ -159,12 +159,15 @@ int sch_direct_xmit(struct sk_buff *skb, struct Qdisc *q,
4021 if (validate)
4022 skb = validate_xmit_skb_list(skb, dev);
4023
4024 - if (skb) {
4025 + if (likely(skb)) {
4026 HARD_TX_LOCK(dev, txq, smp_processor_id());
4027 if (!netif_xmit_frozen_or_stopped(txq))
4028 skb = dev_hard_start_xmit(skb, dev, txq, &ret);
4029
4030 HARD_TX_UNLOCK(dev, txq);
4031 + } else {
4032 + spin_lock(root_lock);
4033 + return qdisc_qlen(q);
4034 }
4035 spin_lock(root_lock);
4036
4037 diff --git a/net/sched/sch_hfsc.c b/net/sched/sch_hfsc.c
4038 index b7ebe2c87586..d783d7cc3348 100644
4039 --- a/net/sched/sch_hfsc.c
4040 +++ b/net/sched/sch_hfsc.c
4041 @@ -895,9 +895,10 @@ static void
4042 hfsc_purge_queue(struct Qdisc *sch, struct hfsc_class *cl)
4043 {
4044 unsigned int len = cl->qdisc->q.qlen;
4045 + unsigned int backlog = cl->qdisc->qstats.backlog;
4046
4047 qdisc_reset(cl->qdisc);
4048 - qdisc_tree_decrease_qlen(cl->qdisc, len);
4049 + qdisc_tree_reduce_backlog(cl->qdisc, len, backlog);
4050 }
4051
4052 static void
4053 @@ -1215,11 +1216,7 @@ hfsc_graft_class(struct Qdisc *sch, unsigned long arg, struct Qdisc *new,
4054 new = &noop_qdisc;
4055 }
4056
4057 - sch_tree_lock(sch);
4058 - hfsc_purge_queue(sch, cl);
4059 - *old = cl->qdisc;
4060 - cl->qdisc = new;
4061 - sch_tree_unlock(sch);
4062 + *old = qdisc_replace(sch, new, &cl->qdisc);
4063 return 0;
4064 }
4065
4066 diff --git a/net/sched/sch_hhf.c b/net/sched/sch_hhf.c
4067 index 86b04e31e60b..13d6f83ec491 100644
4068 --- a/net/sched/sch_hhf.c
4069 +++ b/net/sched/sch_hhf.c
4070 @@ -382,6 +382,7 @@ static int hhf_enqueue(struct sk_buff *skb, struct Qdisc *sch)
4071 struct hhf_sched_data *q = qdisc_priv(sch);
4072 enum wdrr_bucket_idx idx;
4073 struct wdrr_bucket *bucket;
4074 + unsigned int prev_backlog;
4075
4076 idx = hhf_classify(skb, sch);
4077
4078 @@ -409,6 +410,7 @@ static int hhf_enqueue(struct sk_buff *skb, struct Qdisc *sch)
4079 if (++sch->q.qlen <= sch->limit)
4080 return NET_XMIT_SUCCESS;
4081
4082 + prev_backlog = sch->qstats.backlog;
4083 q->drop_overlimit++;
4084 /* Return Congestion Notification only if we dropped a packet from this
4085 * bucket.
4086 @@ -417,7 +419,7 @@ static int hhf_enqueue(struct sk_buff *skb, struct Qdisc *sch)
4087 return NET_XMIT_CN;
4088
4089 /* As we dropped a packet, better let upper stack know this. */
4090 - qdisc_tree_decrease_qlen(sch, 1);
4091 + qdisc_tree_reduce_backlog(sch, 1, prev_backlog - sch->qstats.backlog);
4092 return NET_XMIT_SUCCESS;
4093 }
4094
4095 @@ -527,7 +529,7 @@ static int hhf_change(struct Qdisc *sch, struct nlattr *opt)
4096 {
4097 struct hhf_sched_data *q = qdisc_priv(sch);
4098 struct nlattr *tb[TCA_HHF_MAX + 1];
4099 - unsigned int qlen;
4100 + unsigned int qlen, prev_backlog;
4101 int err;
4102 u64 non_hh_quantum;
4103 u32 new_quantum = q->quantum;
4104 @@ -577,12 +579,14 @@ static int hhf_change(struct Qdisc *sch, struct nlattr *opt)
4105 }
4106
4107 qlen = sch->q.qlen;
4108 + prev_backlog = sch->qstats.backlog;
4109 while (sch->q.qlen > sch->limit) {
4110 struct sk_buff *skb = hhf_dequeue(sch);
4111
4112 kfree_skb(skb);
4113 }
4114 - qdisc_tree_decrease_qlen(sch, qlen - sch->q.qlen);
4115 + qdisc_tree_reduce_backlog(sch, qlen - sch->q.qlen,
4116 + prev_backlog - sch->qstats.backlog);
4117
4118 sch_tree_unlock(sch);
4119 return 0;
4120 diff --git a/net/sched/sch_htb.c b/net/sched/sch_htb.c
4121 index 15ccd7f8fb2a..87b02ed3d5f2 100644
4122 --- a/net/sched/sch_htb.c
4123 +++ b/net/sched/sch_htb.c
4124 @@ -600,6 +600,7 @@ static int htb_enqueue(struct sk_buff *skb, struct Qdisc *sch)
4125 htb_activate(q, cl);
4126 }
4127
4128 + qdisc_qstats_backlog_inc(sch, skb);
4129 sch->q.qlen++;
4130 return NET_XMIT_SUCCESS;
4131 }
4132 @@ -889,6 +890,7 @@ static struct sk_buff *htb_dequeue(struct Qdisc *sch)
4133 ok:
4134 qdisc_bstats_update(sch, skb);
4135 qdisc_unthrottled(sch);
4136 + qdisc_qstats_backlog_dec(sch, skb);
4137 sch->q.qlen--;
4138 return skb;
4139 }
4140 @@ -955,6 +957,7 @@ static unsigned int htb_drop(struct Qdisc *sch)
4141 unsigned int len;
4142 if (cl->un.leaf.q->ops->drop &&
4143 (len = cl->un.leaf.q->ops->drop(cl->un.leaf.q))) {
4144 + sch->qstats.backlog -= len;
4145 sch->q.qlen--;
4146 if (!cl->un.leaf.q->q.qlen)
4147 htb_deactivate(q, cl);
4148 @@ -984,12 +987,12 @@ static void htb_reset(struct Qdisc *sch)
4149 }
4150 cl->prio_activity = 0;
4151 cl->cmode = HTB_CAN_SEND;
4152 -
4153 }
4154 }
4155 qdisc_watchdog_cancel(&q->watchdog);
4156 __skb_queue_purge(&q->direct_queue);
4157 sch->q.qlen = 0;
4158 + sch->qstats.backlog = 0;
4159 memset(q->hlevel, 0, sizeof(q->hlevel));
4160 memset(q->row_mask, 0, sizeof(q->row_mask));
4161 for (i = 0; i < TC_HTB_NUMPRIO; i++)
4162 @@ -1163,14 +1166,7 @@ static int htb_graft(struct Qdisc *sch, unsigned long arg, struct Qdisc *new,
4163 cl->common.classid)) == NULL)
4164 return -ENOBUFS;
4165
4166 - sch_tree_lock(sch);
4167 - *old = cl->un.leaf.q;
4168 - cl->un.leaf.q = new;
4169 - if (*old != NULL) {
4170 - qdisc_tree_decrease_qlen(*old, (*old)->q.qlen);
4171 - qdisc_reset(*old);
4172 - }
4173 - sch_tree_unlock(sch);
4174 + *old = qdisc_replace(sch, new, &cl->un.leaf.q);
4175 return 0;
4176 }
4177
4178 @@ -1272,7 +1268,6 @@ static int htb_delete(struct Qdisc *sch, unsigned long arg)
4179 {
4180 struct htb_sched *q = qdisc_priv(sch);
4181 struct htb_class *cl = (struct htb_class *)arg;
4182 - unsigned int qlen;
4183 struct Qdisc *new_q = NULL;
4184 int last_child = 0;
4185
4186 @@ -1292,9 +1287,11 @@ static int htb_delete(struct Qdisc *sch, unsigned long arg)
4187 sch_tree_lock(sch);
4188
4189 if (!cl->level) {
4190 - qlen = cl->un.leaf.q->q.qlen;
4191 + unsigned int qlen = cl->un.leaf.q->q.qlen;
4192 + unsigned int backlog = cl->un.leaf.q->qstats.backlog;
4193 +
4194 qdisc_reset(cl->un.leaf.q);
4195 - qdisc_tree_decrease_qlen(cl->un.leaf.q, qlen);
4196 + qdisc_tree_reduce_backlog(cl->un.leaf.q, qlen, backlog);
4197 }
4198
4199 /* delete from hash and active; remainder in destroy_class */
4200 @@ -1428,10 +1425,11 @@ static int htb_change_class(struct Qdisc *sch, u32 classid,
4201 sch_tree_lock(sch);
4202 if (parent && !parent->level) {
4203 unsigned int qlen = parent->un.leaf.q->q.qlen;
4204 + unsigned int backlog = parent->un.leaf.q->qstats.backlog;
4205
4206 /* turn parent into inner node */
4207 qdisc_reset(parent->un.leaf.q);
4208 - qdisc_tree_decrease_qlen(parent->un.leaf.q, qlen);
4209 + qdisc_tree_reduce_backlog(parent->un.leaf.q, qlen, backlog);
4210 qdisc_destroy(parent->un.leaf.q);
4211 if (parent->prio_activity)
4212 htb_deactivate(q, parent);
4213 diff --git a/net/sched/sch_multiq.c b/net/sched/sch_multiq.c
4214 index 4e904ca0af9d..bcdd54bb101c 100644
4215 --- a/net/sched/sch_multiq.c
4216 +++ b/net/sched/sch_multiq.c
4217 @@ -218,7 +218,8 @@ static int multiq_tune(struct Qdisc *sch, struct nlattr *opt)
4218 if (q->queues[i] != &noop_qdisc) {
4219 struct Qdisc *child = q->queues[i];
4220 q->queues[i] = &noop_qdisc;
4221 - qdisc_tree_decrease_qlen(child, child->q.qlen);
4222 + qdisc_tree_reduce_backlog(child, child->q.qlen,
4223 + child->qstats.backlog);
4224 qdisc_destroy(child);
4225 }
4226 }
4227 @@ -238,8 +239,9 @@ static int multiq_tune(struct Qdisc *sch, struct nlattr *opt)
4228 q->queues[i] = child;
4229
4230 if (old != &noop_qdisc) {
4231 - qdisc_tree_decrease_qlen(old,
4232 - old->q.qlen);
4233 + qdisc_tree_reduce_backlog(old,
4234 + old->q.qlen,
4235 + old->qstats.backlog);
4236 qdisc_destroy(old);
4237 }
4238 sch_tree_unlock(sch);
4239 @@ -303,13 +305,7 @@ static int multiq_graft(struct Qdisc *sch, unsigned long arg, struct Qdisc *new,
4240 if (new == NULL)
4241 new = &noop_qdisc;
4242
4243 - sch_tree_lock(sch);
4244 - *old = q->queues[band];
4245 - q->queues[band] = new;
4246 - qdisc_tree_decrease_qlen(*old, (*old)->q.qlen);
4247 - qdisc_reset(*old);
4248 - sch_tree_unlock(sch);
4249 -
4250 + *old = qdisc_replace(sch, new, &q->queues[band]);
4251 return 0;
4252 }
4253
4254 diff --git a/net/sched/sch_netem.c b/net/sched/sch_netem.c
4255 index 5abd1d9de989..4befe97a9034 100644
4256 --- a/net/sched/sch_netem.c
4257 +++ b/net/sched/sch_netem.c
4258 @@ -395,6 +395,25 @@ static void tfifo_enqueue(struct sk_buff *nskb, struct Qdisc *sch)
4259 sch->q.qlen++;
4260 }
4261
4262 +/* netem can't properly corrupt a megapacket (like we get from GSO), so instead
4263 + * when we statistically choose to corrupt one, we instead segment it, returning
4264 + * the first packet to be corrupted, and re-enqueue the remaining frames
4265 + */
4266 +static struct sk_buff *netem_segment(struct sk_buff *skb, struct Qdisc *sch)
4267 +{
4268 + struct sk_buff *segs;
4269 + netdev_features_t features = netif_skb_features(skb);
4270 +
4271 + segs = skb_gso_segment(skb, features & ~NETIF_F_GSO_MASK);
4272 +
4273 + if (IS_ERR_OR_NULL(segs)) {
4274 + qdisc_reshape_fail(skb, sch);
4275 + return NULL;
4276 + }
4277 + consume_skb(skb);
4278 + return segs;
4279 +}
4280 +
4281 /*
4282 * Insert one skb into qdisc.
4283 * Note: parent depends on return value to account for queue length.
4284 @@ -407,7 +426,11 @@ static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch)
4285 /* We don't fill cb now as skb_unshare() may invalidate it */
4286 struct netem_skb_cb *cb;
4287 struct sk_buff *skb2;
4288 + struct sk_buff *segs = NULL;
4289 + unsigned int len = 0, last_len, prev_len = qdisc_pkt_len(skb);
4290 + int nb = 0;
4291 int count = 1;
4292 + int rc = NET_XMIT_SUCCESS;
4293
4294 /* Random duplication */
4295 if (q->duplicate && q->duplicate >= get_crandom(&q->dup_cor))
4296 @@ -453,10 +476,23 @@ static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch)
4297 * do it now in software before we mangle it.
4298 */
4299 if (q->corrupt && q->corrupt >= get_crandom(&q->corrupt_cor)) {
4300 + if (skb_is_gso(skb)) {
4301 + segs = netem_segment(skb, sch);
4302 + if (!segs)
4303 + return NET_XMIT_DROP;
4304 + } else {
4305 + segs = skb;
4306 + }
4307 +
4308 + skb = segs;
4309 + segs = segs->next;
4310 +
4311 if (!(skb = skb_unshare(skb, GFP_ATOMIC)) ||
4312 (skb->ip_summed == CHECKSUM_PARTIAL &&
4313 - skb_checksum_help(skb)))
4314 - return qdisc_drop(skb, sch);
4315 + skb_checksum_help(skb))) {
4316 + rc = qdisc_drop(skb, sch);
4317 + goto finish_segs;
4318 + }
4319
4320 skb->data[prandom_u32() % skb_headlen(skb)] ^=
4321 1<<(prandom_u32() % 8);
4322 @@ -516,6 +552,27 @@ static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch)
4323 sch->qstats.requeues++;
4324 }
4325
4326 +finish_segs:
4327 + if (segs) {
4328 + while (segs) {
4329 + skb2 = segs->next;
4330 + segs->next = NULL;
4331 + qdisc_skb_cb(segs)->pkt_len = segs->len;
4332 + last_len = segs->len;
4333 + rc = qdisc_enqueue(segs, sch);
4334 + if (rc != NET_XMIT_SUCCESS) {
4335 + if (net_xmit_drop_count(rc))
4336 + qdisc_qstats_drop(sch);
4337 + } else {
4338 + nb++;
4339 + len += last_len;
4340 + }
4341 + segs = skb2;
4342 + }
4343 + sch->q.qlen += nb;
4344 + if (nb > 1)
4345 + qdisc_tree_reduce_backlog(sch, 1 - nb, prev_len - len);
4346 + }
4347 return NET_XMIT_SUCCESS;
4348 }
4349
4350 @@ -598,7 +655,8 @@ deliver:
4351 if (unlikely(err != NET_XMIT_SUCCESS)) {
4352 if (net_xmit_drop_count(err)) {
4353 qdisc_qstats_drop(sch);
4354 - qdisc_tree_decrease_qlen(sch, 1);
4355 + qdisc_tree_reduce_backlog(sch, 1,
4356 + qdisc_pkt_len(skb));
4357 }
4358 }
4359 goto tfifo_dequeue;
4360 @@ -1037,15 +1095,7 @@ static int netem_graft(struct Qdisc *sch, unsigned long arg, struct Qdisc *new,
4361 {
4362 struct netem_sched_data *q = qdisc_priv(sch);
4363
4364 - sch_tree_lock(sch);
4365 - *old = q->qdisc;
4366 - q->qdisc = new;
4367 - if (*old) {
4368 - qdisc_tree_decrease_qlen(*old, (*old)->q.qlen);
4369 - qdisc_reset(*old);
4370 - }
4371 - sch_tree_unlock(sch);
4372 -
4373 + *old = qdisc_replace(sch, new, &q->qdisc);
4374 return 0;
4375 }
4376
4377 diff --git a/net/sched/sch_pie.c b/net/sched/sch_pie.c
4378 index b783a446d884..71ae3b9629f9 100644
4379 --- a/net/sched/sch_pie.c
4380 +++ b/net/sched/sch_pie.c
4381 @@ -183,7 +183,7 @@ static int pie_change(struct Qdisc *sch, struct nlattr *opt)
4382 {
4383 struct pie_sched_data *q = qdisc_priv(sch);
4384 struct nlattr *tb[TCA_PIE_MAX + 1];
4385 - unsigned int qlen;
4386 + unsigned int qlen, dropped = 0;
4387 int err;
4388
4389 if (!opt)
4390 @@ -232,10 +232,11 @@ static int pie_change(struct Qdisc *sch, struct nlattr *opt)
4391 while (sch->q.qlen > sch->limit) {
4392 struct sk_buff *skb = __skb_dequeue(&sch->q);
4393
4394 + dropped += qdisc_pkt_len(skb);
4395 qdisc_qstats_backlog_dec(sch, skb);
4396 qdisc_drop(skb, sch);
4397 }
4398 - qdisc_tree_decrease_qlen(sch, qlen - sch->q.qlen);
4399 + qdisc_tree_reduce_backlog(sch, qlen - sch->q.qlen, dropped);
4400
4401 sch_tree_unlock(sch);
4402 return 0;
4403 diff --git a/net/sched/sch_prio.c b/net/sched/sch_prio.c
4404 index ba6487f2741f..fee1b15506b2 100644
4405 --- a/net/sched/sch_prio.c
4406 +++ b/net/sched/sch_prio.c
4407 @@ -191,7 +191,7 @@ static int prio_tune(struct Qdisc *sch, struct nlattr *opt)
4408 struct Qdisc *child = q->queues[i];
4409 q->queues[i] = &noop_qdisc;
4410 if (child != &noop_qdisc) {
4411 - qdisc_tree_decrease_qlen(child, child->q.qlen);
4412 + qdisc_tree_reduce_backlog(child, child->q.qlen, child->qstats.backlog);
4413 qdisc_destroy(child);
4414 }
4415 }
4416 @@ -210,8 +210,9 @@ static int prio_tune(struct Qdisc *sch, struct nlattr *opt)
4417 q->queues[i] = child;
4418
4419 if (old != &noop_qdisc) {
4420 - qdisc_tree_decrease_qlen(old,
4421 - old->q.qlen);
4422 + qdisc_tree_reduce_backlog(old,
4423 + old->q.qlen,
4424 + old->qstats.backlog);
4425 qdisc_destroy(old);
4426 }
4427 sch_tree_unlock(sch);
4428 @@ -268,13 +269,7 @@ static int prio_graft(struct Qdisc *sch, unsigned long arg, struct Qdisc *new,
4429 if (new == NULL)
4430 new = &noop_qdisc;
4431
4432 - sch_tree_lock(sch);
4433 - *old = q->queues[band];
4434 - q->queues[band] = new;
4435 - qdisc_tree_decrease_qlen(*old, (*old)->q.qlen);
4436 - qdisc_reset(*old);
4437 - sch_tree_unlock(sch);
4438 -
4439 + *old = qdisc_replace(sch, new, &q->queues[band]);
4440 return 0;
4441 }
4442
4443 diff --git a/net/sched/sch_qfq.c b/net/sched/sch_qfq.c
4444 index 3dc3a6e56052..8d2d8d953432 100644
4445 --- a/net/sched/sch_qfq.c
4446 +++ b/net/sched/sch_qfq.c
4447 @@ -220,9 +220,10 @@ static struct qfq_class *qfq_find_class(struct Qdisc *sch, u32 classid)
4448 static void qfq_purge_queue(struct qfq_class *cl)
4449 {
4450 unsigned int len = cl->qdisc->q.qlen;
4451 + unsigned int backlog = cl->qdisc->qstats.backlog;
4452
4453 qdisc_reset(cl->qdisc);
4454 - qdisc_tree_decrease_qlen(cl->qdisc, len);
4455 + qdisc_tree_reduce_backlog(cl->qdisc, len, backlog);
4456 }
4457
4458 static const struct nla_policy qfq_policy[TCA_QFQ_MAX + 1] = {
4459 @@ -617,11 +618,7 @@ static int qfq_graft_class(struct Qdisc *sch, unsigned long arg,
4460 new = &noop_qdisc;
4461 }
4462
4463 - sch_tree_lock(sch);
4464 - qfq_purge_queue(cl);
4465 - *old = cl->qdisc;
4466 - cl->qdisc = new;
4467 - sch_tree_unlock(sch);
4468 + *old = qdisc_replace(sch, new, &cl->qdisc);
4469 return 0;
4470 }
4471
4472 diff --git a/net/sched/sch_red.c b/net/sched/sch_red.c
4473 index 6c0534cc7758..8c0508c0e287 100644
4474 --- a/net/sched/sch_red.c
4475 +++ b/net/sched/sch_red.c
4476 @@ -210,7 +210,8 @@ static int red_change(struct Qdisc *sch, struct nlattr *opt)
4477 q->flags = ctl->flags;
4478 q->limit = ctl->limit;
4479 if (child) {
4480 - qdisc_tree_decrease_qlen(q->qdisc, q->qdisc->q.qlen);
4481 + qdisc_tree_reduce_backlog(q->qdisc, q->qdisc->q.qlen,
4482 + q->qdisc->qstats.backlog);
4483 qdisc_destroy(q->qdisc);
4484 q->qdisc = child;
4485 }
4486 @@ -313,12 +314,7 @@ static int red_graft(struct Qdisc *sch, unsigned long arg, struct Qdisc *new,
4487 if (new == NULL)
4488 new = &noop_qdisc;
4489
4490 - sch_tree_lock(sch);
4491 - *old = q->qdisc;
4492 - q->qdisc = new;
4493 - qdisc_tree_decrease_qlen(*old, (*old)->q.qlen);
4494 - qdisc_reset(*old);
4495 - sch_tree_unlock(sch);
4496 + *old = qdisc_replace(sch, new, &q->qdisc);
4497 return 0;
4498 }
4499
4500 diff --git a/net/sched/sch_sfb.c b/net/sched/sch_sfb.c
4501 index 5bbb6332ec57..c69611640fa5 100644
4502 --- a/net/sched/sch_sfb.c
4503 +++ b/net/sched/sch_sfb.c
4504 @@ -510,7 +510,8 @@ static int sfb_change(struct Qdisc *sch, struct nlattr *opt)
4505
4506 sch_tree_lock(sch);
4507
4508 - qdisc_tree_decrease_qlen(q->qdisc, q->qdisc->q.qlen);
4509 + qdisc_tree_reduce_backlog(q->qdisc, q->qdisc->q.qlen,
4510 + q->qdisc->qstats.backlog);
4511 qdisc_destroy(q->qdisc);
4512 q->qdisc = child;
4513
4514 @@ -606,12 +607,7 @@ static int sfb_graft(struct Qdisc *sch, unsigned long arg, struct Qdisc *new,
4515 if (new == NULL)
4516 new = &noop_qdisc;
4517
4518 - sch_tree_lock(sch);
4519 - *old = q->qdisc;
4520 - q->qdisc = new;
4521 - qdisc_tree_decrease_qlen(*old, (*old)->q.qlen);
4522 - qdisc_reset(*old);
4523 - sch_tree_unlock(sch);
4524 + *old = qdisc_replace(sch, new, &q->qdisc);
4525 return 0;
4526 }
4527
4528 diff --git a/net/sched/sch_sfq.c b/net/sched/sch_sfq.c
4529 index 3abab534eb5c..498f0a2cb47f 100644
4530 --- a/net/sched/sch_sfq.c
4531 +++ b/net/sched/sch_sfq.c
4532 @@ -346,7 +346,7 @@ static int
4533 sfq_enqueue(struct sk_buff *skb, struct Qdisc *sch)
4534 {
4535 struct sfq_sched_data *q = qdisc_priv(sch);
4536 - unsigned int hash;
4537 + unsigned int hash, dropped;
4538 sfq_index x, qlen;
4539 struct sfq_slot *slot;
4540 int uninitialized_var(ret);
4541 @@ -461,7 +461,7 @@ enqueue:
4542 return NET_XMIT_SUCCESS;
4543
4544 qlen = slot->qlen;
4545 - sfq_drop(sch);
4546 + dropped = sfq_drop(sch);
4547 /* Return Congestion Notification only if we dropped a packet
4548 * from this flow.
4549 */
4550 @@ -469,7 +469,7 @@ enqueue:
4551 return NET_XMIT_CN;
4552
4553 /* As we dropped a packet, better let upper stack know this */
4554 - qdisc_tree_decrease_qlen(sch, 1);
4555 + qdisc_tree_reduce_backlog(sch, 1, dropped);
4556 return NET_XMIT_SUCCESS;
4557 }
4558
4559 @@ -537,6 +537,7 @@ static void sfq_rehash(struct Qdisc *sch)
4560 struct sfq_slot *slot;
4561 struct sk_buff_head list;
4562 int dropped = 0;
4563 + unsigned int drop_len = 0;
4564
4565 __skb_queue_head_init(&list);
4566
4567 @@ -565,6 +566,7 @@ static void sfq_rehash(struct Qdisc *sch)
4568 if (x >= SFQ_MAX_FLOWS) {
4569 drop:
4570 qdisc_qstats_backlog_dec(sch, skb);
4571 + drop_len += qdisc_pkt_len(skb);
4572 kfree_skb(skb);
4573 dropped++;
4574 continue;
4575 @@ -594,7 +596,7 @@ drop:
4576 }
4577 }
4578 sch->q.qlen -= dropped;
4579 - qdisc_tree_decrease_qlen(sch, dropped);
4580 + qdisc_tree_reduce_backlog(sch, dropped, drop_len);
4581 }
4582
4583 static void sfq_perturbation(unsigned long arg)
4584 @@ -618,7 +620,7 @@ static int sfq_change(struct Qdisc *sch, struct nlattr *opt)
4585 struct sfq_sched_data *q = qdisc_priv(sch);
4586 struct tc_sfq_qopt *ctl = nla_data(opt);
4587 struct tc_sfq_qopt_v1 *ctl_v1 = NULL;
4588 - unsigned int qlen;
4589 + unsigned int qlen, dropped = 0;
4590 struct red_parms *p = NULL;
4591
4592 if (opt->nla_len < nla_attr_size(sizeof(*ctl)))
4593 @@ -667,8 +669,8 @@ static int sfq_change(struct Qdisc *sch, struct nlattr *opt)
4594
4595 qlen = sch->q.qlen;
4596 while (sch->q.qlen > q->limit)
4597 - sfq_drop(sch);
4598 - qdisc_tree_decrease_qlen(sch, qlen - sch->q.qlen);
4599 + dropped += sfq_drop(sch);
4600 + qdisc_tree_reduce_backlog(sch, qlen - sch->q.qlen, dropped);
4601
4602 del_timer(&q->perturb_timer);
4603 if (q->perturb_period) {
4604 diff --git a/net/sched/sch_tbf.c b/net/sched/sch_tbf.c
4605 index a4afde14e865..c2fbde742f37 100644
4606 --- a/net/sched/sch_tbf.c
4607 +++ b/net/sched/sch_tbf.c
4608 @@ -160,6 +160,7 @@ static int tbf_segment(struct sk_buff *skb, struct Qdisc *sch)
4609 struct tbf_sched_data *q = qdisc_priv(sch);
4610 struct sk_buff *segs, *nskb;
4611 netdev_features_t features = netif_skb_features(skb);
4612 + unsigned int len = 0, prev_len = qdisc_pkt_len(skb);
4613 int ret, nb;
4614
4615 segs = skb_gso_segment(skb, features & ~NETIF_F_GSO_MASK);
4616 @@ -172,6 +173,7 @@ static int tbf_segment(struct sk_buff *skb, struct Qdisc *sch)
4617 nskb = segs->next;
4618 segs->next = NULL;
4619 qdisc_skb_cb(segs)->pkt_len = segs->len;
4620 + len += segs->len;
4621 ret = qdisc_enqueue(segs, q->qdisc);
4622 if (ret != NET_XMIT_SUCCESS) {
4623 if (net_xmit_drop_count(ret))
4624 @@ -183,7 +185,7 @@ static int tbf_segment(struct sk_buff *skb, struct Qdisc *sch)
4625 }
4626 sch->q.qlen += nb;
4627 if (nb > 1)
4628 - qdisc_tree_decrease_qlen(sch, 1 - nb);
4629 + qdisc_tree_reduce_backlog(sch, 1 - nb, prev_len - len);
4630 consume_skb(skb);
4631 return nb > 0 ? NET_XMIT_SUCCESS : NET_XMIT_DROP;
4632 }
4633 @@ -399,7 +401,8 @@ static int tbf_change(struct Qdisc *sch, struct nlattr *opt)
4634
4635 sch_tree_lock(sch);
4636 if (child) {
4637 - qdisc_tree_decrease_qlen(q->qdisc, q->qdisc->q.qlen);
4638 + qdisc_tree_reduce_backlog(q->qdisc, q->qdisc->q.qlen,
4639 + q->qdisc->qstats.backlog);
4640 qdisc_destroy(q->qdisc);
4641 q->qdisc = child;
4642 }
4643 @@ -502,13 +505,7 @@ static int tbf_graft(struct Qdisc *sch, unsigned long arg, struct Qdisc *new,
4644 if (new == NULL)
4645 new = &noop_qdisc;
4646
4647 - sch_tree_lock(sch);
4648 - *old = q->qdisc;
4649 - q->qdisc = new;
4650 - qdisc_tree_decrease_qlen(*old, (*old)->q.qlen);
4651 - qdisc_reset(*old);
4652 - sch_tree_unlock(sch);
4653 -
4654 + *old = qdisc_replace(sch, new, &q->qdisc);
4655 return 0;
4656 }
4657
4658 diff --git a/net/vmw_vsock/af_vsock.c b/net/vmw_vsock/af_vsock.c
4659 index bbe65dcb9738..c93e67beaea7 100644
4660 --- a/net/vmw_vsock/af_vsock.c
4661 +++ b/net/vmw_vsock/af_vsock.c
4662 @@ -1789,27 +1789,8 @@ vsock_stream_recvmsg(struct socket *sock, struct msghdr *msg, size_t len,
4663 else if (sk->sk_shutdown & RCV_SHUTDOWN)
4664 err = 0;
4665
4666 - if (copied > 0) {
4667 - /* We only do these additional bookkeeping/notification steps
4668 - * if we actually copied something out of the queue pair
4669 - * instead of just peeking ahead.
4670 - */
4671 -
4672 - if (!(flags & MSG_PEEK)) {
4673 - /* If the other side has shutdown for sending and there
4674 - * is nothing more to read, then modify the socket
4675 - * state.
4676 - */
4677 - if (vsk->peer_shutdown & SEND_SHUTDOWN) {
4678 - if (vsock_stream_has_data(vsk) <= 0) {
4679 - sk->sk_state = SS_UNCONNECTED;
4680 - sock_set_flag(sk, SOCK_DONE);
4681 - sk->sk_state_change(sk);
4682 - }
4683 - }
4684 - }
4685 + if (copied > 0)
4686 err = copied;
4687 - }
4688
4689 out:
4690 release_sock(sk);
4691 diff --git a/net/x25/x25_facilities.c b/net/x25/x25_facilities.c
4692 index 7ecd04c21360..997ff7b2509b 100644
4693 --- a/net/x25/x25_facilities.c
4694 +++ b/net/x25/x25_facilities.c
4695 @@ -277,6 +277,7 @@ int x25_negotiate_facilities(struct sk_buff *skb, struct sock *sk,
4696
4697 memset(&theirs, 0, sizeof(theirs));
4698 memcpy(new, ours, sizeof(*new));
4699 + memset(dte, 0, sizeof(*dte));
4700
4701 len = x25_parse_facilities(skb, &theirs, dte, &x25->vc_facil_mask);
4702 if (len < 0)
4703 diff --git a/samples/bpf/trace_output_kern.c b/samples/bpf/trace_output_kern.c
4704 index 8d8d1ec429eb..9b96f4fb8cea 100644
4705 --- a/samples/bpf/trace_output_kern.c
4706 +++ b/samples/bpf/trace_output_kern.c
4707 @@ -18,7 +18,6 @@ int bpf_prog1(struct pt_regs *ctx)
4708 u64 cookie;
4709 } data;
4710
4711 - memset(&data, 0, sizeof(data));
4712 data.pid = bpf_get_current_pid_tgid();
4713 data.cookie = 0x12345678;
4714
4715 diff --git a/sound/pci/hda/hda_sysfs.c b/sound/pci/hda/hda_sysfs.c
4716 index 64e0d1d81ca5..9739fce9e032 100644
4717 --- a/sound/pci/hda/hda_sysfs.c
4718 +++ b/sound/pci/hda/hda_sysfs.c
4719 @@ -141,14 +141,6 @@ static int reconfig_codec(struct hda_codec *codec)
4720 err = snd_hda_codec_configure(codec);
4721 if (err < 0)
4722 goto error;
4723 - /* rebuild PCMs */
4724 - err = snd_hda_codec_build_pcms(codec);
4725 - if (err < 0)
4726 - goto error;
4727 - /* rebuild mixers */
4728 - err = snd_hda_codec_build_controls(codec);
4729 - if (err < 0)
4730 - goto error;
4731 err = snd_card_register(codec->card);
4732 error:
4733 snd_hda_power_down(codec);
4734 diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
4735 index ac4490a96863..4918ffa5ba68 100644
4736 --- a/sound/pci/hda/patch_realtek.c
4737 +++ b/sound/pci/hda/patch_realtek.c
4738 @@ -6426,6 +6426,7 @@ enum {
4739 ALC668_FIXUP_DELL_DISABLE_AAMIX,
4740 ALC668_FIXUP_DELL_XPS13,
4741 ALC662_FIXUP_ASUS_Nx50,
4742 + ALC668_FIXUP_ASUS_Nx51,
4743 };
4744
4745 static const struct hda_fixup alc662_fixups[] = {
4746 @@ -6672,6 +6673,15 @@ static const struct hda_fixup alc662_fixups[] = {
4747 .chained = true,
4748 .chain_id = ALC662_FIXUP_BASS_1A
4749 },
4750 + [ALC668_FIXUP_ASUS_Nx51] = {
4751 + .type = HDA_FIXUP_PINS,
4752 + .v.pins = (const struct hda_pintbl[]) {
4753 + {0x1a, 0x90170151}, /* bass speaker */
4754 + {}
4755 + },
4756 + .chained = true,
4757 + .chain_id = ALC662_FIXUP_BASS_CHMAP,
4758 + },
4759 };
4760
4761 static const struct snd_pci_quirk alc662_fixup_tbl[] = {
4762 @@ -6694,11 +6704,14 @@ static const struct snd_pci_quirk alc662_fixup_tbl[] = {
4763 SND_PCI_QUIRK(0x1028, 0x0698, "Dell", ALC668_FIXUP_DELL_MIC_NO_PRESENCE),
4764 SND_PCI_QUIRK(0x1028, 0x069f, "Dell", ALC668_FIXUP_DELL_MIC_NO_PRESENCE),
4765 SND_PCI_QUIRK(0x103c, 0x1632, "HP RP5800", ALC662_FIXUP_HP_RP5800),
4766 + SND_PCI_QUIRK(0x1043, 0x1080, "Asus UX501VW", ALC668_FIXUP_HEADSET_MODE),
4767 SND_PCI_QUIRK(0x1043, 0x11cd, "Asus N550", ALC662_FIXUP_ASUS_Nx50),
4768 SND_PCI_QUIRK(0x1043, 0x13df, "Asus N550JX", ALC662_FIXUP_BASS_1A),
4769 SND_PCI_QUIRK(0x1043, 0x129d, "Asus N750", ALC662_FIXUP_ASUS_Nx50),
4770 SND_PCI_QUIRK(0x1043, 0x1477, "ASUS N56VZ", ALC662_FIXUP_BASS_MODE4_CHMAP),
4771 SND_PCI_QUIRK(0x1043, 0x15a7, "ASUS UX51VZH", ALC662_FIXUP_BASS_16),
4772 + SND_PCI_QUIRK(0x1043, 0x177d, "ASUS N551", ALC668_FIXUP_ASUS_Nx51),
4773 + SND_PCI_QUIRK(0x1043, 0x17bd, "ASUS N751", ALC668_FIXUP_ASUS_Nx51),
4774 SND_PCI_QUIRK(0x1043, 0x1b73, "ASUS N55SF", ALC662_FIXUP_BASS_16),
4775 SND_PCI_QUIRK(0x1043, 0x1bf3, "ASUS N76VZ", ALC662_FIXUP_BASS_MODE4_CHMAP),
4776 SND_PCI_QUIRK(0x1043, 0x8469, "ASUS mobo", ALC662_FIXUP_NO_JACK_DETECT),
4777 diff --git a/sound/usb/quirks.c b/sound/usb/quirks.c
4778 index 001fb4dc0722..db11ecf0b74d 100644
4779 --- a/sound/usb/quirks.c
4780 +++ b/sound/usb/quirks.c
4781 @@ -1138,8 +1138,11 @@ bool snd_usb_get_sample_rate_quirk(struct snd_usb_audio *chip)
4782 case USB_ID(0x047F, 0x0415): /* Plantronics BT-300 */
4783 case USB_ID(0x047F, 0xAA05): /* Plantronics DA45 */
4784 case USB_ID(0x04D8, 0xFEEA): /* Benchmark DAC1 Pre */
4785 + case USB_ID(0x0556, 0x0014): /* Phoenix Audio TMX320VC */
4786 case USB_ID(0x074D, 0x3553): /* Outlaw RR2150 (Micronas UAC3553B) */
4787 + case USB_ID(0x1de7, 0x0013): /* Phoenix Audio MT202exe */
4788 case USB_ID(0x1de7, 0x0014): /* Phoenix Audio TMX320 */
4789 + case USB_ID(0x1de7, 0x0114): /* Phoenix Audio MT202pcs */
4790 case USB_ID(0x21B4, 0x0081): /* AudioQuest DragonFly */
4791 return true;
4792 }
4793 diff --git a/tools/lib/traceevent/parse-filter.c b/tools/lib/traceevent/parse-filter.c
4794 index 0144b3d1bb77..88cccea3ca99 100644
4795 --- a/tools/lib/traceevent/parse-filter.c
4796 +++ b/tools/lib/traceevent/parse-filter.c
4797 @@ -1164,11 +1164,11 @@ process_filter(struct event_format *event, struct filter_arg **parg,
4798 current_op = current_exp;
4799
4800 ret = collapse_tree(current_op, parg, error_str);
4801 + /* collapse_tree() may free current_op, and updates parg accordingly */
4802 + current_op = NULL;
4803 if (ret < 0)
4804 goto fail;
4805
4806 - *parg = current_op;
4807 -
4808 free(token);
4809 return 0;
4810
4811 diff --git a/tools/perf/util/sort.c b/tools/perf/util/sort.c
4812 index ec722346e6ff..16892a7ca27e 100644
4813 --- a/tools/perf/util/sort.c
4814 +++ b/tools/perf/util/sort.c
4815 @@ -2272,6 +2272,9 @@ static char *prefix_if_not_in(const char *pre, char *str)
4816
4817 static char *setup_overhead(char *keys)
4818 {
4819 + if (sort__mode == SORT_MODE__DIFF)
4820 + return keys;
4821 +
4822 keys = prefix_if_not_in("overhead", keys);
4823
4824 if (symbol_conf.cumulate_callchain)