Magellan Linux

Annotation of /trunk/kernel-alx/patches-4.1/0111-4.1.12-all-fixes.patch

Parent Directory Parent Directory | Revision Log Revision Log


Revision 2748 - (hide annotations) (download)
Mon Jan 11 12:00:45 2016 UTC (8 years, 4 months ago) by niro
File size: 50366 byte(s)
-linux-4.1 patches up to 4.1.15
1 niro 2748 diff --git a/Makefile b/Makefile
2     index c7d877b1c248..2320f1911404 100644
3     --- a/Makefile
4     +++ b/Makefile
5     @@ -1,6 +1,6 @@
6     VERSION = 4
7     PATCHLEVEL = 1
8     -SUBLEVEL = 11
9     +SUBLEVEL = 12
10     EXTRAVERSION =
11     NAME = Series 4800
12    
13     diff --git a/arch/arm64/Makefile b/arch/arm64/Makefile
14     index 81151663ef38..3258174e6152 100644
15     --- a/arch/arm64/Makefile
16     +++ b/arch/arm64/Makefile
17     @@ -31,7 +31,7 @@ endif
18     CHECKFLAGS += -D__aarch64__
19    
20     ifeq ($(CONFIG_ARM64_ERRATUM_843419), y)
21     -CFLAGS_MODULE += -mcmodel=large
22     +KBUILD_CFLAGS_MODULE += -mcmodel=large
23     endif
24    
25     # Default value
26     diff --git a/arch/arm64/include/asm/pgtable.h b/arch/arm64/include/asm/pgtable.h
27     index 56283f8a675c..cf7319422768 100644
28     --- a/arch/arm64/include/asm/pgtable.h
29     +++ b/arch/arm64/include/asm/pgtable.h
30     @@ -80,7 +80,7 @@ extern void __pgd_error(const char *file, int line, unsigned long val);
31     #define PAGE_S2 __pgprot(PROT_DEFAULT | PTE_S2_MEMATTR(MT_S2_NORMAL) | PTE_S2_RDONLY)
32     #define PAGE_S2_DEVICE __pgprot(PROT_DEFAULT | PTE_S2_MEMATTR(MT_S2_DEVICE_nGnRE) | PTE_S2_RDONLY | PTE_UXN)
33    
34     -#define PAGE_NONE __pgprot(((_PAGE_DEFAULT) & ~PTE_TYPE_MASK) | PTE_PROT_NONE | PTE_PXN | PTE_UXN)
35     +#define PAGE_NONE __pgprot(((_PAGE_DEFAULT) & ~PTE_VALID) | PTE_PROT_NONE | PTE_PXN | PTE_UXN)
36     #define PAGE_SHARED __pgprot(_PAGE_DEFAULT | PTE_USER | PTE_NG | PTE_PXN | PTE_UXN | PTE_WRITE)
37     #define PAGE_SHARED_EXEC __pgprot(_PAGE_DEFAULT | PTE_USER | PTE_NG | PTE_PXN | PTE_WRITE)
38     #define PAGE_COPY __pgprot(_PAGE_DEFAULT | PTE_USER | PTE_NG | PTE_PXN | PTE_UXN)
39     @@ -460,7 +460,7 @@ static inline pud_t *pud_offset(pgd_t *pgd, unsigned long addr)
40     static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
41     {
42     const pteval_t mask = PTE_USER | PTE_PXN | PTE_UXN | PTE_RDONLY |
43     - PTE_PROT_NONE | PTE_WRITE | PTE_TYPE_MASK;
44     + PTE_PROT_NONE | PTE_VALID | PTE_WRITE;
45     pte_val(pte) = (pte_val(pte) & ~mask) | (pgprot_val(newprot) & mask);
46     return pte;
47     }
48     diff --git a/arch/powerpc/kvm/book3s_hv.c b/arch/powerpc/kvm/book3s_hv.c
49     index df81caab7383..f1e0e5522e3a 100644
50     --- a/arch/powerpc/kvm/book3s_hv.c
51     +++ b/arch/powerpc/kvm/book3s_hv.c
52     @@ -2178,7 +2178,7 @@ static int kvmppc_run_vcpu(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
53     vc->runner = vcpu;
54     if (n_ceded == vc->n_runnable) {
55     kvmppc_vcore_blocked(vc);
56     - } else if (should_resched()) {
57     + } else if (need_resched()) {
58     vc->vcore_state = VCORE_PREEMPT;
59     /* Let something else run */
60     cond_resched_lock(&vc->lock);
61     diff --git a/arch/sparc/crypto/aes_glue.c b/arch/sparc/crypto/aes_glue.c
62     index 2e48eb8813ff..c90930de76ba 100644
63     --- a/arch/sparc/crypto/aes_glue.c
64     +++ b/arch/sparc/crypto/aes_glue.c
65     @@ -433,6 +433,7 @@ static struct crypto_alg algs[] = { {
66     .blkcipher = {
67     .min_keysize = AES_MIN_KEY_SIZE,
68     .max_keysize = AES_MAX_KEY_SIZE,
69     + .ivsize = AES_BLOCK_SIZE,
70     .setkey = aes_set_key,
71     .encrypt = cbc_encrypt,
72     .decrypt = cbc_decrypt,
73     @@ -452,6 +453,7 @@ static struct crypto_alg algs[] = { {
74     .blkcipher = {
75     .min_keysize = AES_MIN_KEY_SIZE,
76     .max_keysize = AES_MAX_KEY_SIZE,
77     + .ivsize = AES_BLOCK_SIZE,
78     .setkey = aes_set_key,
79     .encrypt = ctr_crypt,
80     .decrypt = ctr_crypt,
81     diff --git a/arch/sparc/crypto/camellia_glue.c b/arch/sparc/crypto/camellia_glue.c
82     index 6bf2479a12fb..561a84d93cf6 100644
83     --- a/arch/sparc/crypto/camellia_glue.c
84     +++ b/arch/sparc/crypto/camellia_glue.c
85     @@ -274,6 +274,7 @@ static struct crypto_alg algs[] = { {
86     .blkcipher = {
87     .min_keysize = CAMELLIA_MIN_KEY_SIZE,
88     .max_keysize = CAMELLIA_MAX_KEY_SIZE,
89     + .ivsize = CAMELLIA_BLOCK_SIZE,
90     .setkey = camellia_set_key,
91     .encrypt = cbc_encrypt,
92     .decrypt = cbc_decrypt,
93     diff --git a/arch/sparc/crypto/des_glue.c b/arch/sparc/crypto/des_glue.c
94     index dd6a34fa6e19..61af794aa2d3 100644
95     --- a/arch/sparc/crypto/des_glue.c
96     +++ b/arch/sparc/crypto/des_glue.c
97     @@ -429,6 +429,7 @@ static struct crypto_alg algs[] = { {
98     .blkcipher = {
99     .min_keysize = DES_KEY_SIZE,
100     .max_keysize = DES_KEY_SIZE,
101     + .ivsize = DES_BLOCK_SIZE,
102     .setkey = des_set_key,
103     .encrypt = cbc_encrypt,
104     .decrypt = cbc_decrypt,
105     @@ -485,6 +486,7 @@ static struct crypto_alg algs[] = { {
106     .blkcipher = {
107     .min_keysize = DES3_EDE_KEY_SIZE,
108     .max_keysize = DES3_EDE_KEY_SIZE,
109     + .ivsize = DES3_EDE_BLOCK_SIZE,
110     .setkey = des3_ede_set_key,
111     .encrypt = cbc3_encrypt,
112     .decrypt = cbc3_decrypt,
113     diff --git a/arch/x86/include/asm/preempt.h b/arch/x86/include/asm/preempt.h
114     index 8f3271842533..67b6cd00a44f 100644
115     --- a/arch/x86/include/asm/preempt.h
116     +++ b/arch/x86/include/asm/preempt.h
117     @@ -90,9 +90,9 @@ static __always_inline bool __preempt_count_dec_and_test(void)
118     /*
119     * Returns true when we need to resched and can (barring IRQ state).
120     */
121     -static __always_inline bool should_resched(void)
122     +static __always_inline bool should_resched(int preempt_offset)
123     {
124     - return unlikely(!raw_cpu_read_4(__preempt_count));
125     + return unlikely(raw_cpu_read_4(__preempt_count) == preempt_offset);
126     }
127    
128     #ifdef CONFIG_PREEMPT
129     diff --git a/crypto/ahash.c b/crypto/ahash.c
130     index 8acb886032ae..9c1dc8d6106a 100644
131     --- a/crypto/ahash.c
132     +++ b/crypto/ahash.c
133     @@ -544,7 +544,8 @@ static int ahash_prepare_alg(struct ahash_alg *alg)
134     struct crypto_alg *base = &alg->halg.base;
135    
136     if (alg->halg.digestsize > PAGE_SIZE / 8 ||
137     - alg->halg.statesize > PAGE_SIZE / 8)
138     + alg->halg.statesize > PAGE_SIZE / 8 ||
139     + alg->halg.statesize == 0)
140     return -EINVAL;
141    
142     base->cra_type = &crypto_ahash_type;
143     diff --git a/drivers/block/rbd.c b/drivers/block/rbd.c
144     index 010ce0b1f517..fe8f1e4b4c7c 100644
145     --- a/drivers/block/rbd.c
146     +++ b/drivers/block/rbd.c
147     @@ -5174,7 +5174,6 @@ static int rbd_dev_probe_parent(struct rbd_device *rbd_dev)
148     out_err:
149     if (parent) {
150     rbd_dev_unparent(rbd_dev);
151     - kfree(rbd_dev->header_name);
152     rbd_dev_destroy(parent);
153     } else {
154     rbd_put_client(rbdc);
155     diff --git a/drivers/gpu/drm/drm_dp_mst_topology.c b/drivers/gpu/drm/drm_dp_mst_topology.c
156     index 7f467fdc9107..2a2eb96caeda 100644
157     --- a/drivers/gpu/drm/drm_dp_mst_topology.c
158     +++ b/drivers/gpu/drm/drm_dp_mst_topology.c
159     @@ -2766,12 +2766,13 @@ static int drm_dp_mst_i2c_xfer(struct i2c_adapter *adapter, struct i2c_msg *msgs
160     if (msgs[num - 1].flags & I2C_M_RD)
161     reading = true;
162    
163     - if (!reading) {
164     + if (!reading || (num - 1 > DP_REMOTE_I2C_READ_MAX_TRANSACTIONS)) {
165     DRM_DEBUG_KMS("Unsupported I2C transaction for MST device\n");
166     ret = -EIO;
167     goto out;
168     }
169    
170     + memset(&msg, 0, sizeof(msg));
171     msg.req_type = DP_REMOTE_I2C_READ;
172     msg.u.i2c_read.num_transactions = num - 1;
173     msg.u.i2c_read.port_number = port->port_num;
174     diff --git a/drivers/gpu/drm/drm_sysfs.c b/drivers/gpu/drm/drm_sysfs.c
175     index eb7e61078a5b..92586b0af3ab 100644
176     --- a/drivers/gpu/drm/drm_sysfs.c
177     +++ b/drivers/gpu/drm/drm_sysfs.c
178     @@ -235,18 +235,12 @@ static ssize_t dpms_show(struct device *device,
179     char *buf)
180     {
181     struct drm_connector *connector = to_drm_connector(device);
182     - struct drm_device *dev = connector->dev;
183     - uint64_t dpms_status;
184     - int ret;
185     + int dpms;
186    
187     - ret = drm_object_property_get_value(&connector->base,
188     - dev->mode_config.dpms_property,
189     - &dpms_status);
190     - if (ret)
191     - return 0;
192     + dpms = READ_ONCE(connector->dpms);
193    
194     return snprintf(buf, PAGE_SIZE, "%s\n",
195     - drm_get_dpms_name((int)dpms_status));
196     + drm_get_dpms_name(dpms));
197     }
198    
199     static ssize_t enabled_show(struct device *device,
200     diff --git a/drivers/gpu/drm/nouveau/nouveau_fbcon.c b/drivers/gpu/drm/nouveau/nouveau_fbcon.c
201     index 6751553abe4a..567791b27d6d 100644
202     --- a/drivers/gpu/drm/nouveau/nouveau_fbcon.c
203     +++ b/drivers/gpu/drm/nouveau/nouveau_fbcon.c
204     @@ -178,8 +178,30 @@ nouveau_fbcon_sync(struct fb_info *info)
205     return 0;
206     }
207    
208     +static int
209     +nouveau_fbcon_open(struct fb_info *info, int user)
210     +{
211     + struct nouveau_fbdev *fbcon = info->par;
212     + struct nouveau_drm *drm = nouveau_drm(fbcon->dev);
213     + int ret = pm_runtime_get_sync(drm->dev->dev);
214     + if (ret < 0 && ret != -EACCES)
215     + return ret;
216     + return 0;
217     +}
218     +
219     +static int
220     +nouveau_fbcon_release(struct fb_info *info, int user)
221     +{
222     + struct nouveau_fbdev *fbcon = info->par;
223     + struct nouveau_drm *drm = nouveau_drm(fbcon->dev);
224     + pm_runtime_put(drm->dev->dev);
225     + return 0;
226     +}
227     +
228     static struct fb_ops nouveau_fbcon_ops = {
229     .owner = THIS_MODULE,
230     + .fb_open = nouveau_fbcon_open,
231     + .fb_release = nouveau_fbcon_release,
232     .fb_check_var = drm_fb_helper_check_var,
233     .fb_set_par = drm_fb_helper_set_par,
234     .fb_fillrect = nouveau_fbcon_fillrect,
235     @@ -195,6 +217,8 @@ static struct fb_ops nouveau_fbcon_ops = {
236    
237     static struct fb_ops nouveau_fbcon_sw_ops = {
238     .owner = THIS_MODULE,
239     + .fb_open = nouveau_fbcon_open,
240     + .fb_release = nouveau_fbcon_release,
241     .fb_check_var = drm_fb_helper_check_var,
242     .fb_set_par = drm_fb_helper_set_par,
243     .fb_fillrect = cfb_fillrect,
244     diff --git a/drivers/gpu/drm/radeon/radeon_display.c b/drivers/gpu/drm/radeon/radeon_display.c
245     index d2e9e9efc159..6743174acdbc 100644
246     --- a/drivers/gpu/drm/radeon/radeon_display.c
247     +++ b/drivers/gpu/drm/radeon/radeon_display.c
248     @@ -1633,18 +1633,8 @@ int radeon_modeset_init(struct radeon_device *rdev)
249     radeon_fbdev_init(rdev);
250     drm_kms_helper_poll_init(rdev->ddev);
251    
252     - if (rdev->pm.dpm_enabled) {
253     - /* do dpm late init */
254     - ret = radeon_pm_late_init(rdev);
255     - if (ret) {
256     - rdev->pm.dpm_enabled = false;
257     - DRM_ERROR("radeon_pm_late_init failed, disabling dpm\n");
258     - }
259     - /* set the dpm state for PX since there won't be
260     - * a modeset to call this.
261     - */
262     - radeon_pm_compute_clocks(rdev);
263     - }
264     + /* do pm late init */
265     + ret = radeon_pm_late_init(rdev);
266    
267     return 0;
268     }
269     diff --git a/drivers/gpu/drm/radeon/radeon_dp_mst.c b/drivers/gpu/drm/radeon/radeon_dp_mst.c
270     index 257b10be5cda..42986130cc63 100644
271     --- a/drivers/gpu/drm/radeon/radeon_dp_mst.c
272     +++ b/drivers/gpu/drm/radeon/radeon_dp_mst.c
273     @@ -283,6 +283,7 @@ static struct drm_connector *radeon_dp_add_mst_connector(struct drm_dp_mst_topol
274     radeon_connector->mst_encoder = radeon_dp_create_fake_mst_encoder(master);
275    
276     drm_object_attach_property(&connector->base, dev->mode_config.path_property, 0);
277     + drm_object_attach_property(&connector->base, dev->mode_config.tile_property, 0);
278     drm_mode_connector_set_path_property(connector, pathprop);
279     drm_reinit_primary_mode_group(dev);
280    
281     diff --git a/drivers/gpu/drm/radeon/radeon_pm.c b/drivers/gpu/drm/radeon/radeon_pm.c
282     index c1ba83a8dd8c..948c33105801 100644
283     --- a/drivers/gpu/drm/radeon/radeon_pm.c
284     +++ b/drivers/gpu/drm/radeon/radeon_pm.c
285     @@ -1331,14 +1331,6 @@ static int radeon_pm_init_old(struct radeon_device *rdev)
286     INIT_DELAYED_WORK(&rdev->pm.dynpm_idle_work, radeon_dynpm_idle_work_handler);
287    
288     if (rdev->pm.num_power_states > 1) {
289     - /* where's the best place to put these? */
290     - ret = device_create_file(rdev->dev, &dev_attr_power_profile);
291     - if (ret)
292     - DRM_ERROR("failed to create device file for power profile\n");
293     - ret = device_create_file(rdev->dev, &dev_attr_power_method);
294     - if (ret)
295     - DRM_ERROR("failed to create device file for power method\n");
296     -
297     if (radeon_debugfs_pm_init(rdev)) {
298     DRM_ERROR("Failed to register debugfs file for PM!\n");
299     }
300     @@ -1396,20 +1388,6 @@ static int radeon_pm_init_dpm(struct radeon_device *rdev)
301     goto dpm_failed;
302     rdev->pm.dpm_enabled = true;
303    
304     - ret = device_create_file(rdev->dev, &dev_attr_power_dpm_state);
305     - if (ret)
306     - DRM_ERROR("failed to create device file for dpm state\n");
307     - ret = device_create_file(rdev->dev, &dev_attr_power_dpm_force_performance_level);
308     - if (ret)
309     - DRM_ERROR("failed to create device file for dpm state\n");
310     - /* XXX: these are noops for dpm but are here for backwards compat */
311     - ret = device_create_file(rdev->dev, &dev_attr_power_profile);
312     - if (ret)
313     - DRM_ERROR("failed to create device file for power profile\n");
314     - ret = device_create_file(rdev->dev, &dev_attr_power_method);
315     - if (ret)
316     - DRM_ERROR("failed to create device file for power method\n");
317     -
318     if (radeon_debugfs_pm_init(rdev)) {
319     DRM_ERROR("Failed to register debugfs file for dpm!\n");
320     }
321     @@ -1550,9 +1528,44 @@ int radeon_pm_late_init(struct radeon_device *rdev)
322     int ret = 0;
323    
324     if (rdev->pm.pm_method == PM_METHOD_DPM) {
325     - mutex_lock(&rdev->pm.mutex);
326     - ret = radeon_dpm_late_enable(rdev);
327     - mutex_unlock(&rdev->pm.mutex);
328     + if (rdev->pm.dpm_enabled) {
329     + ret = device_create_file(rdev->dev, &dev_attr_power_dpm_state);
330     + if (ret)
331     + DRM_ERROR("failed to create device file for dpm state\n");
332     + ret = device_create_file(rdev->dev, &dev_attr_power_dpm_force_performance_level);
333     + if (ret)
334     + DRM_ERROR("failed to create device file for dpm state\n");
335     + /* XXX: these are noops for dpm but are here for backwards compat */
336     + ret = device_create_file(rdev->dev, &dev_attr_power_profile);
337     + if (ret)
338     + DRM_ERROR("failed to create device file for power profile\n");
339     + ret = device_create_file(rdev->dev, &dev_attr_power_method);
340     + if (ret)
341     + DRM_ERROR("failed to create device file for power method\n");
342     +
343     + mutex_lock(&rdev->pm.mutex);
344     + ret = radeon_dpm_late_enable(rdev);
345     + mutex_unlock(&rdev->pm.mutex);
346     + if (ret) {
347     + rdev->pm.dpm_enabled = false;
348     + DRM_ERROR("radeon_pm_late_init failed, disabling dpm\n");
349     + } else {
350     + /* set the dpm state for PX since there won't be
351     + * a modeset to call this.
352     + */
353     + radeon_pm_compute_clocks(rdev);
354     + }
355     + }
356     + } else {
357     + if (rdev->pm.num_power_states > 1) {
358     + /* where's the best place to put these? */
359     + ret = device_create_file(rdev->dev, &dev_attr_power_profile);
360     + if (ret)
361     + DRM_ERROR("failed to create device file for power profile\n");
362     + ret = device_create_file(rdev->dev, &dev_attr_power_method);
363     + if (ret)
364     + DRM_ERROR("failed to create device file for power method\n");
365     + }
366     }
367     return ret;
368     }
369     diff --git a/drivers/i2c/busses/i2c-designware-platdrv.c b/drivers/i2c/busses/i2c-designware-platdrv.c
370     index 0a80e4aabaed..3f7d4876937e 100644
371     --- a/drivers/i2c/busses/i2c-designware-platdrv.c
372     +++ b/drivers/i2c/busses/i2c-designware-platdrv.c
373     @@ -24,6 +24,7 @@
374     #include <linux/kernel.h>
375     #include <linux/module.h>
376     #include <linux/delay.h>
377     +#include <linux/dmi.h>
378     #include <linux/i2c.h>
379     #include <linux/clk.h>
380     #include <linux/clk-provider.h>
381     @@ -51,6 +52,22 @@ static u32 i2c_dw_get_clk_rate_khz(struct dw_i2c_dev *dev)
382     }
383    
384     #ifdef CONFIG_ACPI
385     +/*
386     + * The HCNT/LCNT information coming from ACPI should be the most accurate
387     + * for given platform. However, some systems get it wrong. On such systems
388     + * we get better results by calculating those based on the input clock.
389     + */
390     +static const struct dmi_system_id dw_i2c_no_acpi_params[] = {
391     + {
392     + .ident = "Dell Inspiron 7348",
393     + .matches = {
394     + DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
395     + DMI_MATCH(DMI_PRODUCT_NAME, "Inspiron 7348"),
396     + },
397     + },
398     + { }
399     +};
400     +
401     static void dw_i2c_acpi_params(struct platform_device *pdev, char method[],
402     u16 *hcnt, u16 *lcnt, u32 *sda_hold)
403     {
404     @@ -58,6 +75,9 @@ static void dw_i2c_acpi_params(struct platform_device *pdev, char method[],
405     acpi_handle handle = ACPI_HANDLE(&pdev->dev);
406     union acpi_object *obj;
407    
408     + if (dmi_check_system(dw_i2c_no_acpi_params))
409     + return;
410     +
411     if (ACPI_FAILURE(acpi_evaluate_object(handle, method, NULL, &buf)))
412     return;
413    
414     @@ -253,12 +273,6 @@ static int dw_i2c_probe(struct platform_device *pdev)
415     adap->dev.parent = &pdev->dev;
416     adap->dev.of_node = pdev->dev.of_node;
417    
418     - r = i2c_add_numbered_adapter(adap);
419     - if (r) {
420     - dev_err(&pdev->dev, "failure adding adapter\n");
421     - return r;
422     - }
423     -
424     if (dev->pm_runtime_disabled) {
425     pm_runtime_forbid(&pdev->dev);
426     } else {
427     @@ -268,6 +282,13 @@ static int dw_i2c_probe(struct platform_device *pdev)
428     pm_runtime_enable(&pdev->dev);
429     }
430    
431     + r = i2c_add_numbered_adapter(adap);
432     + if (r) {
433     + dev_err(&pdev->dev, "failure adding adapter\n");
434     + pm_runtime_disable(&pdev->dev);
435     + return r;
436     + }
437     +
438     return 0;
439     }
440    
441     diff --git a/drivers/i2c/busses/i2c-rcar.c b/drivers/i2c/busses/i2c-rcar.c
442     index 5a84bea5b845..d9d022cdfff0 100644
443     --- a/drivers/i2c/busses/i2c-rcar.c
444     +++ b/drivers/i2c/busses/i2c-rcar.c
445     @@ -688,15 +688,16 @@ static int rcar_i2c_probe(struct platform_device *pdev)
446     return ret;
447     }
448    
449     + pm_runtime_enable(dev);
450     + platform_set_drvdata(pdev, priv);
451     +
452     ret = i2c_add_numbered_adapter(adap);
453     if (ret < 0) {
454     dev_err(dev, "reg adap failed: %d\n", ret);
455     + pm_runtime_disable(dev);
456     return ret;
457     }
458    
459     - pm_runtime_enable(dev);
460     - platform_set_drvdata(pdev, priv);
461     -
462     dev_info(dev, "probed\n");
463    
464     return 0;
465     diff --git a/drivers/i2c/busses/i2c-s3c2410.c b/drivers/i2c/busses/i2c-s3c2410.c
466     index 297e9c9ac943..424794271703 100644
467     --- a/drivers/i2c/busses/i2c-s3c2410.c
468     +++ b/drivers/i2c/busses/i2c-s3c2410.c
469     @@ -1243,17 +1243,19 @@ static int s3c24xx_i2c_probe(struct platform_device *pdev)
470     i2c->adap.nr = i2c->pdata->bus_num;
471     i2c->adap.dev.of_node = pdev->dev.of_node;
472    
473     + platform_set_drvdata(pdev, i2c);
474     +
475     + pm_runtime_enable(&pdev->dev);
476     +
477     ret = i2c_add_numbered_adapter(&i2c->adap);
478     if (ret < 0) {
479     dev_err(&pdev->dev, "failed to add bus to i2c core\n");
480     + pm_runtime_disable(&pdev->dev);
481     s3c24xx_i2c_deregister_cpufreq(i2c);
482     clk_unprepare(i2c->clk);
483     return ret;
484     }
485    
486     - platform_set_drvdata(pdev, i2c);
487     -
488     - pm_runtime_enable(&pdev->dev);
489     pm_runtime_enable(&i2c->adap.dev);
490    
491     dev_info(&pdev->dev, "%s: S3C I2C adapter\n", dev_name(&i2c->adap.dev));
492     diff --git a/drivers/md/dm-thin.c b/drivers/md/dm-thin.c
493     index e22e6c892b8a..7073b22d4cb4 100644
494     --- a/drivers/md/dm-thin.c
495     +++ b/drivers/md/dm-thin.c
496     @@ -2959,7 +2959,7 @@ static int pool_ctr(struct dm_target *ti, unsigned argc, char **argv)
497     metadata_low_callback,
498     pool);
499     if (r)
500     - goto out_free_pt;
501     + goto out_flags_changed;
502    
503     pt->callbacks.congested_fn = pool_is_congested;
504     dm_table_add_target_callbacks(ti->table, &pt->callbacks);
505     diff --git a/drivers/mfd/max77843.c b/drivers/mfd/max77843.c
506     index a354ac677ec7..1074a0d68680 100644
507     --- a/drivers/mfd/max77843.c
508     +++ b/drivers/mfd/max77843.c
509     @@ -79,7 +79,7 @@ static int max77843_chg_init(struct max77843 *max77843)
510     if (!max77843->i2c_chg) {
511     dev_err(&max77843->i2c->dev,
512     "Cannot allocate I2C device for Charger\n");
513     - return PTR_ERR(max77843->i2c_chg);
514     + return -ENODEV;
515     }
516     i2c_set_clientdata(max77843->i2c_chg, max77843);
517    
518     diff --git a/drivers/net/ethernet/ibm/emac/core.h b/drivers/net/ethernet/ibm/emac/core.h
519     index 28df37420da9..ac02c675c59c 100644
520     --- a/drivers/net/ethernet/ibm/emac/core.h
521     +++ b/drivers/net/ethernet/ibm/emac/core.h
522     @@ -460,8 +460,8 @@ struct emac_ethtool_regs_subhdr {
523     u32 index;
524     };
525    
526     -#define EMAC_ETHTOOL_REGS_VER 0
527     -#define EMAC4_ETHTOOL_REGS_VER 1
528     -#define EMAC4SYNC_ETHTOOL_REGS_VER 2
529     +#define EMAC_ETHTOOL_REGS_VER 3
530     +#define EMAC4_ETHTOOL_REGS_VER 4
531     +#define EMAC4SYNC_ETHTOOL_REGS_VER 5
532    
533     #endif /* __IBM_NEWEMAC_CORE_H */
534     diff --git a/drivers/net/ppp/pppoe.c b/drivers/net/ppp/pppoe.c
535     index b62a5e3a1c65..db2c3cdf2c40 100644
536     --- a/drivers/net/ppp/pppoe.c
537     +++ b/drivers/net/ppp/pppoe.c
538     @@ -313,7 +313,6 @@ static void pppoe_flush_dev(struct net_device *dev)
539     if (po->pppoe_dev == dev &&
540     sk->sk_state & (PPPOX_CONNECTED | PPPOX_BOUND | PPPOX_ZOMBIE)) {
541     pppox_unbind_sock(sk);
542     - sk->sk_state = PPPOX_ZOMBIE;
543     sk->sk_state_change(sk);
544     po->pppoe_dev = NULL;
545     dev_put(dev);
546     diff --git a/drivers/pinctrl/freescale/pinctrl-imx25.c b/drivers/pinctrl/freescale/pinctrl-imx25.c
547     index faf635654312..293ed4381cc0 100644
548     --- a/drivers/pinctrl/freescale/pinctrl-imx25.c
549     +++ b/drivers/pinctrl/freescale/pinctrl-imx25.c
550     @@ -26,7 +26,8 @@
551     #include "pinctrl-imx.h"
552    
553     enum imx25_pads {
554     - MX25_PAD_RESERVE0 = 1,
555     + MX25_PAD_RESERVE0 = 0,
556     + MX25_PAD_RESERVE1 = 1,
557     MX25_PAD_A10 = 2,
558     MX25_PAD_A13 = 3,
559     MX25_PAD_A14 = 4,
560     @@ -169,6 +170,7 @@ enum imx25_pads {
561     /* Pad names for the pinmux subsystem */
562     static const struct pinctrl_pin_desc imx25_pinctrl_pads[] = {
563     IMX_PINCTRL_PIN(MX25_PAD_RESERVE0),
564     + IMX_PINCTRL_PIN(MX25_PAD_RESERVE1),
565     IMX_PINCTRL_PIN(MX25_PAD_A10),
566     IMX_PINCTRL_PIN(MX25_PAD_A13),
567     IMX_PINCTRL_PIN(MX25_PAD_A14),
568     diff --git a/drivers/xen/preempt.c b/drivers/xen/preempt.c
569     index a1800c150839..08cb419eb4e6 100644
570     --- a/drivers/xen/preempt.c
571     +++ b/drivers/xen/preempt.c
572     @@ -31,7 +31,7 @@ EXPORT_SYMBOL_GPL(xen_in_preemptible_hcall);
573     asmlinkage __visible void xen_maybe_preempt_hcall(void)
574     {
575     if (unlikely(__this_cpu_read(xen_in_preemptible_hcall)
576     - && should_resched())) {
577     + && need_resched())) {
578     /*
579     * Clear flag as we may be rescheduled on a different
580     * cpu.
581     diff --git a/fs/btrfs/backref.c b/fs/btrfs/backref.c
582     index 614aaa1969bd..723470850b94 100644
583     --- a/fs/btrfs/backref.c
584     +++ b/fs/btrfs/backref.c
585     @@ -1786,7 +1786,6 @@ static int iterate_inode_extrefs(u64 inum, struct btrfs_root *fs_root,
586     int found = 0;
587     struct extent_buffer *eb;
588     struct btrfs_inode_extref *extref;
589     - struct extent_buffer *leaf;
590     u32 item_size;
591     u32 cur_offset;
592     unsigned long ptr;
593     @@ -1814,9 +1813,8 @@ static int iterate_inode_extrefs(u64 inum, struct btrfs_root *fs_root,
594     btrfs_set_lock_blocking_rw(eb, BTRFS_READ_LOCK);
595     btrfs_release_path(path);
596    
597     - leaf = path->nodes[0];
598     - item_size = btrfs_item_size_nr(leaf, slot);
599     - ptr = btrfs_item_ptr_offset(leaf, slot);
600     + item_size = btrfs_item_size_nr(eb, slot);
601     + ptr = btrfs_item_ptr_offset(eb, slot);
602     cur_offset = 0;
603    
604     while (cur_offset < item_size) {
605     @@ -1830,7 +1828,7 @@ static int iterate_inode_extrefs(u64 inum, struct btrfs_root *fs_root,
606     if (ret)
607     break;
608    
609     - cur_offset += btrfs_inode_extref_name_len(leaf, extref);
610     + cur_offset += btrfs_inode_extref_name_len(eb, extref);
611     cur_offset += sizeof(*extref);
612     }
613     btrfs_tree_read_unlock_blocking(eb);
614     diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c
615     index 37d456a9a3b8..af3dd3c55ef1 100644
616     --- a/fs/btrfs/ioctl.c
617     +++ b/fs/btrfs/ioctl.c
618     @@ -4492,6 +4492,11 @@ locked:
619     bctl->flags |= BTRFS_BALANCE_TYPE_MASK;
620     }
621    
622     + if (bctl->flags & ~(BTRFS_BALANCE_ARGS_MASK | BTRFS_BALANCE_TYPE_MASK)) {
623     + ret = -EINVAL;
624     + goto out_bargs;
625     + }
626     +
627     do_balance:
628     /*
629     * Ownership of bctl and mutually_exclusive_operation_running
630     diff --git a/fs/btrfs/volumes.h b/fs/btrfs/volumes.h
631     index ebc31331a837..e1cc5b45069a 100644
632     --- a/fs/btrfs/volumes.h
633     +++ b/fs/btrfs/volumes.h
634     @@ -372,6 +372,14 @@ struct map_lookup {
635     #define BTRFS_BALANCE_ARGS_VRANGE (1ULL << 4)
636     #define BTRFS_BALANCE_ARGS_LIMIT (1ULL << 5)
637    
638     +#define BTRFS_BALANCE_ARGS_MASK \
639     + (BTRFS_BALANCE_ARGS_PROFILES | \
640     + BTRFS_BALANCE_ARGS_USAGE | \
641     + BTRFS_BALANCE_ARGS_DEVID | \
642     + BTRFS_BALANCE_ARGS_DRANGE | \
643     + BTRFS_BALANCE_ARGS_VRANGE | \
644     + BTRFS_BALANCE_ARGS_LIMIT)
645     +
646     /*
647     * Profile changing flags. When SOFT is set we won't relocate chunk if
648     * it already has the target profile (even though it may be
649     diff --git a/fs/locks.c b/fs/locks.c
650     index 653faabb07f4..d3d558ba4da7 100644
651     --- a/fs/locks.c
652     +++ b/fs/locks.c
653     @@ -862,12 +862,11 @@ static int posix_locks_deadlock(struct file_lock *caller_fl,
654     * whether or not a lock was successfully freed by testing the return
655     * value for -ENOENT.
656     */
657     -static int flock_lock_file(struct file *filp, struct file_lock *request)
658     +static int flock_lock_inode(struct inode *inode, struct file_lock *request)
659     {
660     struct file_lock *new_fl = NULL;
661     struct file_lock *fl;
662     struct file_lock_context *ctx;
663     - struct inode *inode = file_inode(filp);
664     int error = 0;
665     bool found = false;
666     LIST_HEAD(dispose);
667     @@ -890,7 +889,7 @@ static int flock_lock_file(struct file *filp, struct file_lock *request)
668     goto find_conflict;
669    
670     list_for_each_entry(fl, &ctx->flc_flock, fl_list) {
671     - if (filp != fl->fl_file)
672     + if (request->fl_file != fl->fl_file)
673     continue;
674     if (request->fl_type == fl->fl_type)
675     goto out;
676     @@ -1164,20 +1163,19 @@ int posix_lock_file(struct file *filp, struct file_lock *fl,
677     EXPORT_SYMBOL(posix_lock_file);
678    
679     /**
680     - * posix_lock_file_wait - Apply a POSIX-style lock to a file
681     - * @filp: The file to apply the lock to
682     + * posix_lock_inode_wait - Apply a POSIX-style lock to a file
683     + * @inode: inode of file to which lock request should be applied
684     * @fl: The lock to be applied
685     *
686     - * Add a POSIX style lock to a file.
687     - * We merge adjacent & overlapping locks whenever possible.
688     - * POSIX locks are sorted by owner task, then by starting address
689     + * Variant of posix_lock_file_wait that does not take a filp, and so can be
690     + * used after the filp has already been torn down.
691     */
692     -int posix_lock_file_wait(struct file *filp, struct file_lock *fl)
693     +int posix_lock_inode_wait(struct inode *inode, struct file_lock *fl)
694     {
695     int error;
696     might_sleep ();
697     for (;;) {
698     - error = posix_lock_file(filp, fl, NULL);
699     + error = __posix_lock_file(inode, fl, NULL);
700     if (error != FILE_LOCK_DEFERRED)
701     break;
702     error = wait_event_interruptible(fl->fl_wait, !fl->fl_next);
703     @@ -1189,7 +1187,7 @@ int posix_lock_file_wait(struct file *filp, struct file_lock *fl)
704     }
705     return error;
706     }
707     -EXPORT_SYMBOL(posix_lock_file_wait);
708     +EXPORT_SYMBOL(posix_lock_inode_wait);
709    
710     /**
711     * locks_mandatory_locked - Check for an active lock
712     @@ -1851,18 +1849,18 @@ int fcntl_setlease(unsigned int fd, struct file *filp, long arg)
713     }
714    
715     /**
716     - * flock_lock_file_wait - Apply a FLOCK-style lock to a file
717     - * @filp: The file to apply the lock to
718     + * flock_lock_inode_wait - Apply a FLOCK-style lock to a file
719     + * @inode: inode of the file to apply to
720     * @fl: The lock to be applied
721     *
722     - * Add a FLOCK style lock to a file.
723     + * Apply a FLOCK style lock request to an inode.
724     */
725     -int flock_lock_file_wait(struct file *filp, struct file_lock *fl)
726     +int flock_lock_inode_wait(struct inode *inode, struct file_lock *fl)
727     {
728     int error;
729     might_sleep();
730     for (;;) {
731     - error = flock_lock_file(filp, fl);
732     + error = flock_lock_inode(inode, fl);
733     if (error != FILE_LOCK_DEFERRED)
734     break;
735     error = wait_event_interruptible(fl->fl_wait, !fl->fl_next);
736     @@ -1874,8 +1872,7 @@ int flock_lock_file_wait(struct file *filp, struct file_lock *fl)
737     }
738     return error;
739     }
740     -
741     -EXPORT_SYMBOL(flock_lock_file_wait);
742     +EXPORT_SYMBOL(flock_lock_inode_wait);
743    
744     /**
745     * sys_flock: - flock() system call.
746     @@ -2401,7 +2398,8 @@ locks_remove_flock(struct file *filp)
747     .fl_type = F_UNLCK,
748     .fl_end = OFFSET_MAX,
749     };
750     - struct file_lock_context *flctx = file_inode(filp)->i_flctx;
751     + struct inode *inode = file_inode(filp);
752     + struct file_lock_context *flctx = inode->i_flctx;
753    
754     if (list_empty(&flctx->flc_flock))
755     return;
756     @@ -2409,7 +2407,7 @@ locks_remove_flock(struct file *filp)
757     if (filp->f_op->flock)
758     filp->f_op->flock(filp, F_SETLKW, &fl);
759     else
760     - flock_lock_file(filp, &fl);
761     + flock_lock_inode(inode, &fl);
762    
763     if (fl.fl_ops && fl.fl_ops->fl_release_private)
764     fl.fl_ops->fl_release_private(&fl);
765     diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c
766     index c245874d7e9d..8f393fcc313b 100644
767     --- a/fs/nfs/nfs4proc.c
768     +++ b/fs/nfs/nfs4proc.c
769     @@ -5367,15 +5367,15 @@ static int nfs4_proc_getlk(struct nfs4_state *state, int cmd, struct file_lock *
770     return err;
771     }
772    
773     -static int do_vfs_lock(struct file *file, struct file_lock *fl)
774     +static int do_vfs_lock(struct inode *inode, struct file_lock *fl)
775     {
776     int res = 0;
777     switch (fl->fl_flags & (FL_POSIX|FL_FLOCK)) {
778     case FL_POSIX:
779     - res = posix_lock_file_wait(file, fl);
780     + res = posix_lock_inode_wait(inode, fl);
781     break;
782     case FL_FLOCK:
783     - res = flock_lock_file_wait(file, fl);
784     + res = flock_lock_inode_wait(inode, fl);
785     break;
786     default:
787     BUG();
788     @@ -5435,7 +5435,7 @@ static void nfs4_locku_done(struct rpc_task *task, void *data)
789     switch (task->tk_status) {
790     case 0:
791     renew_lease(calldata->server, calldata->timestamp);
792     - do_vfs_lock(calldata->fl.fl_file, &calldata->fl);
793     + do_vfs_lock(calldata->lsp->ls_state->inode, &calldata->fl);
794     if (nfs4_update_lock_stateid(calldata->lsp,
795     &calldata->res.stateid))
796     break;
797     @@ -5543,7 +5543,7 @@ static int nfs4_proc_unlck(struct nfs4_state *state, int cmd, struct file_lock *
798     mutex_lock(&sp->so_delegreturn_mutex);
799     /* Exclude nfs4_reclaim_open_stateid() - note nesting! */
800     down_read(&nfsi->rwsem);
801     - if (do_vfs_lock(request->fl_file, request) == -ENOENT) {
802     + if (do_vfs_lock(inode, request) == -ENOENT) {
803     up_read(&nfsi->rwsem);
804     mutex_unlock(&sp->so_delegreturn_mutex);
805     goto out;
806     @@ -5684,7 +5684,7 @@ static void nfs4_lock_done(struct rpc_task *task, void *calldata)
807     data->timestamp);
808     if (data->arg.new_lock) {
809     data->fl.fl_flags &= ~(FL_SLEEP | FL_ACCESS);
810     - if (do_vfs_lock(data->fl.fl_file, &data->fl) < 0) {
811     + if (do_vfs_lock(lsp->ls_state->inode, &data->fl) < 0) {
812     rpc_restart_call_prepare(task);
813     break;
814     }
815     @@ -5926,7 +5926,7 @@ static int _nfs4_proc_setlk(struct nfs4_state *state, int cmd, struct file_lock
816     if (status != 0)
817     goto out;
818     request->fl_flags |= FL_ACCESS;
819     - status = do_vfs_lock(request->fl_file, request);
820     + status = do_vfs_lock(state->inode, request);
821     if (status < 0)
822     goto out;
823     down_read(&nfsi->rwsem);
824     @@ -5934,7 +5934,7 @@ static int _nfs4_proc_setlk(struct nfs4_state *state, int cmd, struct file_lock
825     /* Yes: cache locks! */
826     /* ...but avoid races with delegation recall... */
827     request->fl_flags = fl_flags & ~FL_SLEEP;
828     - status = do_vfs_lock(request->fl_file, request);
829     + status = do_vfs_lock(state->inode, request);
830     up_read(&nfsi->rwsem);
831     goto out;
832     }
833     diff --git a/fs/nfsd/blocklayout.c b/fs/nfsd/blocklayout.c
834     index cdefaa331a07..c29d9421bd5e 100644
835     --- a/fs/nfsd/blocklayout.c
836     +++ b/fs/nfsd/blocklayout.c
837     @@ -56,14 +56,6 @@ nfsd4_block_proc_layoutget(struct inode *inode, const struct svc_fh *fhp,
838     u32 device_generation = 0;
839     int error;
840    
841     - /*
842     - * We do not attempt to support I/O smaller than the fs block size,
843     - * or not aligned to it.
844     - */
845     - if (args->lg_minlength < block_size) {
846     - dprintk("pnfsd: I/O too small\n");
847     - goto out_layoutunavailable;
848     - }
849     if (seg->offset & (block_size - 1)) {
850     dprintk("pnfsd: I/O misaligned\n");
851     goto out_layoutunavailable;
852     diff --git a/include/asm-generic/preempt.h b/include/asm-generic/preempt.h
853     index eb6f9e6c3075..b6a53e8e526a 100644
854     --- a/include/asm-generic/preempt.h
855     +++ b/include/asm-generic/preempt.h
856     @@ -71,9 +71,10 @@ static __always_inline bool __preempt_count_dec_and_test(void)
857     /*
858     * Returns true when we need to resched and can (barring IRQ state).
859     */
860     -static __always_inline bool should_resched(void)
861     +static __always_inline bool should_resched(int preempt_offset)
862     {
863     - return unlikely(!preempt_count() && tif_need_resched());
864     + return unlikely(preempt_count() == preempt_offset &&
865     + tif_need_resched());
866     }
867    
868     #ifdef CONFIG_PREEMPT
869     diff --git a/include/drm/drm_dp_mst_helper.h b/include/drm/drm_dp_mst_helper.h
870     index 86d0b25ed054..a89f505c856b 100644
871     --- a/include/drm/drm_dp_mst_helper.h
872     +++ b/include/drm/drm_dp_mst_helper.h
873     @@ -253,6 +253,7 @@ struct drm_dp_remote_dpcd_write {
874     u8 *bytes;
875     };
876    
877     +#define DP_REMOTE_I2C_READ_MAX_TRANSACTIONS 4
878     struct drm_dp_remote_i2c_read {
879     u8 num_transactions;
880     u8 port_number;
881     @@ -262,7 +263,7 @@ struct drm_dp_remote_i2c_read {
882     u8 *bytes;
883     u8 no_stop_bit;
884     u8 i2c_transaction_delay;
885     - } transactions[4];
886     + } transactions[DP_REMOTE_I2C_READ_MAX_TRANSACTIONS];
887     u8 read_i2c_device_id;
888     u8 num_bytes_read;
889     };
890     diff --git a/include/linux/fs.h b/include/linux/fs.h
891     index f93192333b37..fdc369fa69e8 100644
892     --- a/include/linux/fs.h
893     +++ b/include/linux/fs.h
894     @@ -1036,12 +1036,12 @@ extern void locks_remove_file(struct file *);
895     extern void locks_release_private(struct file_lock *);
896     extern void posix_test_lock(struct file *, struct file_lock *);
897     extern int posix_lock_file(struct file *, struct file_lock *, struct file_lock *);
898     -extern int posix_lock_file_wait(struct file *, struct file_lock *);
899     +extern int posix_lock_inode_wait(struct inode *, struct file_lock *);
900     extern int posix_unblock_lock(struct file_lock *);
901     extern int vfs_test_lock(struct file *, struct file_lock *);
902     extern int vfs_lock_file(struct file *, unsigned int, struct file_lock *, struct file_lock *);
903     extern int vfs_cancel_lock(struct file *filp, struct file_lock *fl);
904     -extern int flock_lock_file_wait(struct file *filp, struct file_lock *fl);
905     +extern int flock_lock_inode_wait(struct inode *inode, struct file_lock *fl);
906     extern int __break_lease(struct inode *inode, unsigned int flags, unsigned int type);
907     extern void lease_get_mtime(struct inode *, struct timespec *time);
908     extern int generic_setlease(struct file *, long, struct file_lock **, void **priv);
909     @@ -1127,7 +1127,8 @@ static inline int posix_lock_file(struct file *filp, struct file_lock *fl,
910     return -ENOLCK;
911     }
912    
913     -static inline int posix_lock_file_wait(struct file *filp, struct file_lock *fl)
914     +static inline int posix_lock_inode_wait(struct inode *inode,
915     + struct file_lock *fl)
916     {
917     return -ENOLCK;
918     }
919     @@ -1153,8 +1154,8 @@ static inline int vfs_cancel_lock(struct file *filp, struct file_lock *fl)
920     return 0;
921     }
922    
923     -static inline int flock_lock_file_wait(struct file *filp,
924     - struct file_lock *request)
925     +static inline int flock_lock_inode_wait(struct inode *inode,
926     + struct file_lock *request)
927     {
928     return -ENOLCK;
929     }
930     @@ -1192,6 +1193,20 @@ static inline void show_fd_locks(struct seq_file *f,
931     struct file *filp, struct files_struct *files) {}
932     #endif /* !CONFIG_FILE_LOCKING */
933    
934     +static inline struct inode *file_inode(const struct file *f)
935     +{
936     + return f->f_inode;
937     +}
938     +
939     +static inline int posix_lock_file_wait(struct file *filp, struct file_lock *fl)
940     +{
941     + return posix_lock_inode_wait(file_inode(filp), fl);
942     +}
943     +
944     +static inline int flock_lock_file_wait(struct file *filp, struct file_lock *fl)
945     +{
946     + return flock_lock_inode_wait(file_inode(filp), fl);
947     +}
948    
949     struct fasync_struct {
950     spinlock_t fa_lock;
951     @@ -1991,11 +2006,6 @@ extern void ihold(struct inode * inode);
952     extern void iput(struct inode *);
953     extern int generic_update_time(struct inode *, struct timespec *, int);
954    
955     -static inline struct inode *file_inode(const struct file *f)
956     -{
957     - return f->f_inode;
958     -}
959     -
960     /* /sys/fs */
961     extern struct kobject *fs_kobj;
962    
963     diff --git a/include/linux/preempt.h b/include/linux/preempt.h
964     index de83b4eb1642..8cd6725c5758 100644
965     --- a/include/linux/preempt.h
966     +++ b/include/linux/preempt.h
967     @@ -20,7 +20,8 @@
968     #if defined(CONFIG_DEBUG_PREEMPT) || defined(CONFIG_PREEMPT_TRACER)
969     extern void preempt_count_add(int val);
970     extern void preempt_count_sub(int val);
971     -#define preempt_count_dec_and_test() ({ preempt_count_sub(1); should_resched(); })
972     +#define preempt_count_dec_and_test() \
973     + ({ preempt_count_sub(1); should_resched(0); })
974     #else
975     #define preempt_count_add(val) __preempt_count_add(val)
976     #define preempt_count_sub(val) __preempt_count_sub(val)
977     @@ -59,7 +60,7 @@ do { \
978    
979     #define preempt_check_resched() \
980     do { \
981     - if (should_resched()) \
982     + if (should_resched(0)) \
983     __preempt_schedule(); \
984     } while (0)
985    
986     diff --git a/include/linux/preempt_mask.h b/include/linux/preempt_mask.h
987     index dbeec4d4a3be..5cb25f17331a 100644
988     --- a/include/linux/preempt_mask.h
989     +++ b/include/linux/preempt_mask.h
990     @@ -71,13 +71,21 @@
991     */
992     #define in_nmi() (preempt_count() & NMI_MASK)
993    
994     +/*
995     + * The preempt_count offset after preempt_disable();
996     + */
997     #if defined(CONFIG_PREEMPT_COUNT)
998     -# define PREEMPT_CHECK_OFFSET 1
999     +# define PREEMPT_DISABLE_OFFSET PREEMPT_OFFSET
1000     #else
1001     -# define PREEMPT_CHECK_OFFSET 0
1002     +# define PREEMPT_DISABLE_OFFSET 0
1003     #endif
1004    
1005     /*
1006     + * The preempt_count offset after spin_lock()
1007     + */
1008     +#define PREEMPT_LOCK_OFFSET PREEMPT_DISABLE_OFFSET
1009     +
1010     +/*
1011     * The preempt_count offset needed for things like:
1012     *
1013     * spin_lock_bh()
1014     @@ -90,7 +98,7 @@
1015     *
1016     * Work as expected.
1017     */
1018     -#define SOFTIRQ_LOCK_OFFSET (SOFTIRQ_DISABLE_OFFSET + PREEMPT_CHECK_OFFSET)
1019     +#define SOFTIRQ_LOCK_OFFSET (SOFTIRQ_DISABLE_OFFSET + PREEMPT_LOCK_OFFSET)
1020    
1021     /*
1022     * Are we running in atomic context? WARNING: this macro cannot
1023     @@ -106,7 +114,7 @@
1024     * (used by the scheduler, *after* releasing the kernel lock)
1025     */
1026     #define in_atomic_preempt_off() \
1027     - ((preempt_count() & ~PREEMPT_ACTIVE) != PREEMPT_CHECK_OFFSET)
1028     + ((preempt_count() & ~PREEMPT_ACTIVE) != PREEMPT_DISABLE_OFFSET)
1029    
1030     #ifdef CONFIG_PREEMPT_COUNT
1031     # define preemptible() (preempt_count() == 0 && !irqs_disabled())
1032     diff --git a/include/linux/sched.h b/include/linux/sched.h
1033     index 26a2e6122734..61f4f2d5c882 100644
1034     --- a/include/linux/sched.h
1035     +++ b/include/linux/sched.h
1036     @@ -2834,12 +2834,6 @@ extern int _cond_resched(void);
1037    
1038     extern int __cond_resched_lock(spinlock_t *lock);
1039    
1040     -#ifdef CONFIG_PREEMPT_COUNT
1041     -#define PREEMPT_LOCK_OFFSET PREEMPT_OFFSET
1042     -#else
1043     -#define PREEMPT_LOCK_OFFSET 0
1044     -#endif
1045     -
1046     #define cond_resched_lock(lock) ({ \
1047     ___might_sleep(__FILE__, __LINE__, PREEMPT_LOCK_OFFSET);\
1048     __cond_resched_lock(lock); \
1049     diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
1050     index eb1c55b8255a..4307e20a4a4a 100644
1051     --- a/include/linux/skbuff.h
1052     +++ b/include/linux/skbuff.h
1053     @@ -2588,6 +2588,9 @@ static inline void skb_postpull_rcsum(struct sk_buff *skb,
1054     {
1055     if (skb->ip_summed == CHECKSUM_COMPLETE)
1056     skb->csum = csum_sub(skb->csum, csum_partial(start, len, 0));
1057     + else if (skb->ip_summed == CHECKSUM_PARTIAL &&
1058     + skb_checksum_start_offset(skb) < 0)
1059     + skb->ip_summed = CHECKSUM_NONE;
1060     }
1061    
1062     unsigned char *skb_pull_rcsum(struct sk_buff *skb, unsigned int len);
1063     diff --git a/include/net/af_unix.h b/include/net/af_unix.h
1064     index a175ba4a7adb..dfe4ddfbb43c 100644
1065     --- a/include/net/af_unix.h
1066     +++ b/include/net/af_unix.h
1067     @@ -64,7 +64,11 @@ struct unix_sock {
1068     #define UNIX_GC_MAYBE_CYCLE 1
1069     struct socket_wq peer_wq;
1070     };
1071     -#define unix_sk(__sk) ((struct unix_sock *)__sk)
1072     +
1073     +static inline struct unix_sock *unix_sk(struct sock *sk)
1074     +{
1075     + return (struct unix_sock *)sk;
1076     +}
1077    
1078     #define peer_wait peer_wq.wait
1079    
1080     diff --git a/include/net/sock.h b/include/net/sock.h
1081     index 3a4898ec8c67..ed01a012f8d5 100644
1082     --- a/include/net/sock.h
1083     +++ b/include/net/sock.h
1084     @@ -826,6 +826,14 @@ static inline __must_check int sk_add_backlog(struct sock *sk, struct sk_buff *s
1085     if (sk_rcvqueues_full(sk, limit))
1086     return -ENOBUFS;
1087    
1088     + /*
1089     + * If the skb was allocated from pfmemalloc reserves, only
1090     + * allow SOCK_MEMALLOC sockets to use it as this socket is
1091     + * helping free memory
1092     + */
1093     + if (skb_pfmemalloc(skb) && !sock_flag(sk, SOCK_MEMALLOC))
1094     + return -ENOMEM;
1095     +
1096     __sk_add_backlog(sk, skb);
1097     sk->sk_backlog.len += skb->truesize;
1098     return 0;
1099     diff --git a/kernel/sched/core.c b/kernel/sched/core.c
1100     index 8476206a1e19..4d870eb6086b 100644
1101     --- a/kernel/sched/core.c
1102     +++ b/kernel/sched/core.c
1103     @@ -4232,7 +4232,7 @@ SYSCALL_DEFINE0(sched_yield)
1104    
1105     int __sched _cond_resched(void)
1106     {
1107     - if (should_resched()) {
1108     + if (should_resched(0)) {
1109     preempt_schedule_common();
1110     return 1;
1111     }
1112     @@ -4250,7 +4250,7 @@ EXPORT_SYMBOL(_cond_resched);
1113     */
1114     int __cond_resched_lock(spinlock_t *lock)
1115     {
1116     - int resched = should_resched();
1117     + int resched = should_resched(PREEMPT_LOCK_OFFSET);
1118     int ret = 0;
1119    
1120     lockdep_assert_held(lock);
1121     @@ -4272,7 +4272,7 @@ int __sched __cond_resched_softirq(void)
1122     {
1123     BUG_ON(!in_softirq());
1124    
1125     - if (should_resched()) {
1126     + if (should_resched(SOFTIRQ_DISABLE_OFFSET)) {
1127     local_bh_enable();
1128     preempt_schedule_common();
1129     local_bh_disable();
1130     diff --git a/kernel/workqueue.c b/kernel/workqueue.c
1131     index 586ad91300b0..5c01664c26e2 100644
1132     --- a/kernel/workqueue.c
1133     +++ b/kernel/workqueue.c
1134     @@ -1451,13 +1451,13 @@ static void __queue_delayed_work(int cpu, struct workqueue_struct *wq,
1135     timer_stats_timer_set_start_info(&dwork->timer);
1136    
1137     dwork->wq = wq;
1138     + /* timer isn't guaranteed to run in this cpu, record earlier */
1139     + if (cpu == WORK_CPU_UNBOUND)
1140     + cpu = raw_smp_processor_id();
1141     dwork->cpu = cpu;
1142     timer->expires = jiffies + delay;
1143    
1144     - if (unlikely(cpu != WORK_CPU_UNBOUND))
1145     - add_timer_on(timer, cpu);
1146     - else
1147     - add_timer(timer);
1148     + add_timer_on(timer, cpu);
1149     }
1150    
1151     /**
1152     diff --git a/mm/memcontrol.c b/mm/memcontrol.c
1153     index a04225d372ba..68dea90334cb 100644
1154     --- a/mm/memcontrol.c
1155     +++ b/mm/memcontrol.c
1156     @@ -3677,6 +3677,7 @@ static int __mem_cgroup_usage_register_event(struct mem_cgroup *memcg,
1157     ret = page_counter_memparse(args, "-1", &threshold);
1158     if (ret)
1159     return ret;
1160     + threshold <<= PAGE_SHIFT;
1161    
1162     mutex_lock(&memcg->thresholds_lock);
1163    
1164     diff --git a/net/core/ethtool.c b/net/core/ethtool.c
1165     index 1d00b8922902..4a6824767f3d 100644
1166     --- a/net/core/ethtool.c
1167     +++ b/net/core/ethtool.c
1168     @@ -1273,7 +1273,7 @@ static int ethtool_get_strings(struct net_device *dev, void __user *useraddr)
1169    
1170     gstrings.len = ret;
1171    
1172     - data = kmalloc(gstrings.len * ETH_GSTRING_LEN, GFP_USER);
1173     + data = kcalloc(gstrings.len, ETH_GSTRING_LEN, GFP_USER);
1174     if (!data)
1175     return -ENOMEM;
1176    
1177     diff --git a/net/core/filter.c b/net/core/filter.c
1178     index bf831a85c315..0fa2613b5e35 100644
1179     --- a/net/core/filter.c
1180     +++ b/net/core/filter.c
1181     @@ -1526,9 +1526,13 @@ int sk_get_filter(struct sock *sk, struct sock_filter __user *ubuf,
1182     goto out;
1183    
1184     /* We're copying the filter that has been originally attached,
1185     - * so no conversion/decode needed anymore.
1186     + * so no conversion/decode needed anymore. eBPF programs that
1187     + * have no original program cannot be dumped through this.
1188     */
1189     + ret = -EACCES;
1190     fprog = filter->prog->orig_prog;
1191     + if (!fprog)
1192     + goto out;
1193    
1194     ret = fprog->len;
1195     if (!len)
1196     diff --git a/net/core/skbuff.c b/net/core/skbuff.c
1197     index a2e4e47b2839..075d2e78c87e 100644
1198     --- a/net/core/skbuff.c
1199     +++ b/net/core/skbuff.c
1200     @@ -2976,11 +2976,12 @@ EXPORT_SYMBOL(skb_append_datato_frags);
1201     */
1202     unsigned char *skb_pull_rcsum(struct sk_buff *skb, unsigned int len)
1203     {
1204     + unsigned char *data = skb->data;
1205     +
1206     BUG_ON(len > skb->len);
1207     - skb->len -= len;
1208     - BUG_ON(skb->len < skb->data_len);
1209     - skb_postpull_rcsum(skb, skb->data, len);
1210     - return skb->data += len;
1211     + __skb_pull(skb, len);
1212     + skb_postpull_rcsum(skb, data, len);
1213     + return skb->data;
1214     }
1215     EXPORT_SYMBOL_GPL(skb_pull_rcsum);
1216    
1217     diff --git a/net/ipv4/inet_connection_sock.c b/net/ipv4/inet_connection_sock.c
1218     index e664706b350c..4d2bc8c6694f 100644
1219     --- a/net/ipv4/inet_connection_sock.c
1220     +++ b/net/ipv4/inet_connection_sock.c
1221     @@ -568,21 +568,22 @@ EXPORT_SYMBOL(inet_rtx_syn_ack);
1222     static bool reqsk_queue_unlink(struct request_sock_queue *queue,
1223     struct request_sock *req)
1224     {
1225     - struct listen_sock *lopt = queue->listen_opt;
1226     struct request_sock **prev;
1227     + struct listen_sock *lopt;
1228     bool found = false;
1229    
1230     spin_lock(&queue->syn_wait_lock);
1231     -
1232     - for (prev = &lopt->syn_table[req->rsk_hash]; *prev != NULL;
1233     - prev = &(*prev)->dl_next) {
1234     - if (*prev == req) {
1235     - *prev = req->dl_next;
1236     - found = true;
1237     - break;
1238     + lopt = queue->listen_opt;
1239     + if (lopt) {
1240     + for (prev = &lopt->syn_table[req->rsk_hash]; *prev != NULL;
1241     + prev = &(*prev)->dl_next) {
1242     + if (*prev == req) {
1243     + *prev = req->dl_next;
1244     + found = true;
1245     + break;
1246     + }
1247     }
1248     }
1249     -
1250     spin_unlock(&queue->syn_wait_lock);
1251     if (timer_pending(&req->rsk_timer) && del_timer_sync(&req->rsk_timer))
1252     reqsk_put(req);
1253     @@ -676,20 +677,20 @@ void reqsk_queue_hash_req(struct request_sock_queue *queue,
1254     req->num_timeout = 0;
1255     req->sk = NULL;
1256    
1257     + setup_timer(&req->rsk_timer, reqsk_timer_handler, (unsigned long)req);
1258     + mod_timer_pinned(&req->rsk_timer, jiffies + timeout);
1259     + req->rsk_hash = hash;
1260     +
1261     /* before letting lookups find us, make sure all req fields
1262     * are committed to memory and refcnt initialized.
1263     */
1264     smp_wmb();
1265     atomic_set(&req->rsk_refcnt, 2);
1266     - setup_timer(&req->rsk_timer, reqsk_timer_handler, (unsigned long)req);
1267     - req->rsk_hash = hash;
1268    
1269     spin_lock(&queue->syn_wait_lock);
1270     req->dl_next = lopt->syn_table[hash];
1271     lopt->syn_table[hash] = req;
1272     spin_unlock(&queue->syn_wait_lock);
1273     -
1274     - mod_timer_pinned(&req->rsk_timer, jiffies + timeout);
1275     }
1276     EXPORT_SYMBOL(reqsk_queue_hash_req);
1277    
1278     diff --git a/net/l2tp/l2tp_core.c b/net/l2tp/l2tp_core.c
1279     index a29a504492af..e3db498f0233 100644
1280     --- a/net/l2tp/l2tp_core.c
1281     +++ b/net/l2tp/l2tp_core.c
1282     @@ -1319,7 +1319,7 @@ static void l2tp_tunnel_del_work(struct work_struct *work)
1283     tunnel = container_of(work, struct l2tp_tunnel, del_work);
1284     sk = l2tp_tunnel_sock_lookup(tunnel);
1285     if (!sk)
1286     - return;
1287     + goto out;
1288    
1289     sock = sk->sk_socket;
1290    
1291     @@ -1340,6 +1340,8 @@ static void l2tp_tunnel_del_work(struct work_struct *work)
1292     }
1293    
1294     l2tp_tunnel_sock_put(sk);
1295     +out:
1296     + l2tp_tunnel_dec_refcount(tunnel);
1297     }
1298    
1299     /* Create a socket for the tunnel, if one isn't set up by
1300     @@ -1639,8 +1641,13 @@ EXPORT_SYMBOL_GPL(l2tp_tunnel_create);
1301     */
1302     int l2tp_tunnel_delete(struct l2tp_tunnel *tunnel)
1303     {
1304     + l2tp_tunnel_inc_refcount(tunnel);
1305     l2tp_tunnel_closeall(tunnel);
1306     - return (false == queue_work(l2tp_wq, &tunnel->del_work));
1307     + if (false == queue_work(l2tp_wq, &tunnel->del_work)) {
1308     + l2tp_tunnel_dec_refcount(tunnel);
1309     + return 1;
1310     + }
1311     + return 0;
1312     }
1313     EXPORT_SYMBOL_GPL(l2tp_tunnel_delete);
1314    
1315     diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c
1316     index 980121e75d2e..d139c43ac6e5 100644
1317     --- a/net/netlink/af_netlink.c
1318     +++ b/net/netlink/af_netlink.c
1319     @@ -2683,6 +2683,7 @@ static int netlink_dump(struct sock *sk)
1320     struct sk_buff *skb = NULL;
1321     struct nlmsghdr *nlh;
1322     int len, err = -ENOBUFS;
1323     + int alloc_min_size;
1324     int alloc_size;
1325    
1326     mutex_lock(nlk->cb_mutex);
1327     @@ -2691,9 +2692,6 @@ static int netlink_dump(struct sock *sk)
1328     goto errout_skb;
1329     }
1330    
1331     - cb = &nlk->cb;
1332     - alloc_size = max_t(int, cb->min_dump_alloc, NLMSG_GOODSIZE);
1333     -
1334     if (!netlink_rx_is_mmaped(sk) &&
1335     atomic_read(&sk->sk_rmem_alloc) >= sk->sk_rcvbuf)
1336     goto errout_skb;
1337     @@ -2703,23 +2701,35 @@ static int netlink_dump(struct sock *sk)
1338     * to reduce number of system calls on dump operations, if user
1339     * ever provided a big enough buffer.
1340     */
1341     - if (alloc_size < nlk->max_recvmsg_len) {
1342     - skb = netlink_alloc_skb(sk,
1343     - nlk->max_recvmsg_len,
1344     - nlk->portid,
1345     + cb = &nlk->cb;
1346     + alloc_min_size = max_t(int, cb->min_dump_alloc, NLMSG_GOODSIZE);
1347     +
1348     + if (alloc_min_size < nlk->max_recvmsg_len) {
1349     + alloc_size = nlk->max_recvmsg_len;
1350     + skb = netlink_alloc_skb(sk, alloc_size, nlk->portid,
1351     GFP_KERNEL |
1352     __GFP_NOWARN |
1353     __GFP_NORETRY);
1354     - /* available room should be exact amount to avoid MSG_TRUNC */
1355     - if (skb)
1356     - skb_reserve(skb, skb_tailroom(skb) -
1357     - nlk->max_recvmsg_len);
1358     }
1359     - if (!skb)
1360     + if (!skb) {
1361     + alloc_size = alloc_min_size;
1362     skb = netlink_alloc_skb(sk, alloc_size, nlk->portid,
1363     GFP_KERNEL);
1364     + }
1365     if (!skb)
1366     goto errout_skb;
1367     +
1368     + /* Trim skb to allocated size. User is expected to provide buffer as
1369     + * large as max(min_dump_alloc, 16KiB (mac_recvmsg_len capped at
1370     + * netlink_recvmsg())). dump will pack as many smaller messages as
1371     + * could fit within the allocated skb. skb is typically allocated
1372     + * with larger space than required (could be as much as near 2x the
1373     + * requested size with align to next power of 2 approach). Allowing
1374     + * dump to use the excess space makes it difficult for a user to have a
1375     + * reasonable static buffer based on the expected largest dump of a
1376     + * single netdev. The outcome is MSG_TRUNC error.
1377     + */
1378     + skb_reserve(skb, skb_tailroom(skb) - alloc_size);
1379     netlink_skb_set_owner_r(skb, sk);
1380    
1381     len = cb->dump(skb, cb);
1382     diff --git a/net/openvswitch/flow_table.c b/net/openvswitch/flow_table.c
1383     index aa349514e4cb..eed562295c78 100644
1384     --- a/net/openvswitch/flow_table.c
1385     +++ b/net/openvswitch/flow_table.c
1386     @@ -92,7 +92,8 @@ struct sw_flow *ovs_flow_alloc(void)
1387    
1388     /* Initialize the default stat node. */
1389     stats = kmem_cache_alloc_node(flow_stats_cache,
1390     - GFP_KERNEL | __GFP_ZERO, 0);
1391     + GFP_KERNEL | __GFP_ZERO,
1392     + node_online(0) ? 0 : NUMA_NO_NODE);
1393     if (!stats)
1394     goto err;
1395    
1396     diff --git a/net/sched/act_mirred.c b/net/sched/act_mirred.c
1397     index 3f63ceac8e01..844dd85426dc 100644
1398     --- a/net/sched/act_mirred.c
1399     +++ b/net/sched/act_mirred.c
1400     @@ -166,6 +166,7 @@ static int tcf_mirred(struct sk_buff *skb, const struct tc_action *a,
1401    
1402     skb2->skb_iif = skb->dev->ifindex;
1403     skb2->dev = dev;
1404     + skb_sender_cpu_clear(skb2);
1405     err = dev_queue_xmit(skb2);
1406    
1407     out:
1408     diff --git a/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c b/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c
1409     index f9f13a32ddb8..2873b8d65608 100644
1410     --- a/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c
1411     +++ b/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c
1412     @@ -146,7 +146,8 @@ int rdma_read_chunk_lcl(struct svcxprt_rdma *xprt,
1413     ctxt->read_hdr = head;
1414     pages_needed =
1415     min_t(int, pages_needed, rdma_read_max_sge(xprt, pages_needed));
1416     - read = min_t(int, pages_needed << PAGE_SHIFT, rs_length);
1417     + read = min_t(int, (pages_needed << PAGE_SHIFT) - *page_offset,
1418     + rs_length);
1419    
1420     for (pno = 0; pno < pages_needed; pno++) {
1421     int len = min_t(int, rs_length, PAGE_SIZE - pg_off);
1422     @@ -245,7 +246,8 @@ int rdma_read_chunk_frmr(struct svcxprt_rdma *xprt,
1423     ctxt->direction = DMA_FROM_DEVICE;
1424     ctxt->frmr = frmr;
1425     pages_needed = min_t(int, pages_needed, xprt->sc_frmr_pg_list_len);
1426     - read = min_t(int, pages_needed << PAGE_SHIFT, rs_length);
1427     + read = min_t(int, (pages_needed << PAGE_SHIFT) - *page_offset,
1428     + rs_length);
1429    
1430     frmr->kva = page_address(rqstp->rq_arg.pages[pg_no]);
1431     frmr->direction = DMA_FROM_DEVICE;
1432     diff --git a/net/tipc/msg.h b/net/tipc/msg.h
1433     index e1d3595e2ee9..4cbb0fbad046 100644
1434     --- a/net/tipc/msg.h
1435     +++ b/net/tipc/msg.h
1436     @@ -353,7 +353,7 @@ static inline void msg_set_seqno(struct tipc_msg *m, u32 n)
1437     static inline u32 msg_importance(struct tipc_msg *m)
1438     {
1439     if (unlikely(msg_user(m) == MSG_FRAGMENTER))
1440     - return msg_bits(m, 5, 13, 0x7);
1441     + return msg_bits(m, 9, 0, 0x7);
1442     if (likely(msg_isdata(m) && !msg_errcode(m)))
1443     return msg_user(m);
1444     return TIPC_SYSTEM_IMPORTANCE;
1445     @@ -362,7 +362,7 @@ static inline u32 msg_importance(struct tipc_msg *m)
1446     static inline void msg_set_importance(struct tipc_msg *m, u32 i)
1447     {
1448     if (unlikely(msg_user(m) == MSG_FRAGMENTER))
1449     - msg_set_bits(m, 5, 13, 0x7, i);
1450     + msg_set_bits(m, 9, 0, 0x7, i);
1451     else if (likely(i < TIPC_SYSTEM_IMPORTANCE))
1452     msg_set_user(m, i);
1453     else
1454     diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c
1455     index 06430598cf51..76e66695621c 100644
1456     --- a/net/unix/af_unix.c
1457     +++ b/net/unix/af_unix.c
1458     @@ -1938,6 +1938,11 @@ static int unix_stream_recvmsg(struct socket *sock, struct msghdr *msg,
1459     goto out;
1460     }
1461    
1462     + if (flags & MSG_PEEK)
1463     + skip = sk_peek_offset(sk, flags);
1464     + else
1465     + skip = 0;
1466     +
1467     do {
1468     int chunk;
1469     struct sk_buff *skb, *last;
1470     @@ -1984,7 +1989,6 @@ again:
1471     break;
1472     }
1473    
1474     - skip = sk_peek_offset(sk, flags);
1475     while (skip >= unix_skb_len(skb)) {
1476     skip -= unix_skb_len(skb);
1477     last = skb;
1478     @@ -2048,6 +2052,16 @@ again:
1479    
1480     sk_peek_offset_fwd(sk, chunk);
1481    
1482     + if (UNIXCB(skb).fp)
1483     + break;
1484     +
1485     + skip = 0;
1486     + last = skb;
1487     + unix_state_lock(sk);
1488     + skb = skb_peek_next(skb, &sk->sk_receive_queue);
1489     + if (skb)
1490     + goto again;
1491     + unix_state_unlock(sk);
1492     break;
1493     }
1494     } while (size);