Magellan Linux

Annotation of /trunk/kernel-magellan/patches-4.8/0103-4.8.4-all-fixes.patch

Parent Directory Parent Directory | Revision Log Revision Log


Revision 2843 - (hide annotations) (download)
Tue Nov 22 13:19:27 2016 UTC (7 years, 6 months ago) by niro
File size: 73868 byte(s)
-linux-4.8.4
1 niro 2843 diff --git a/MAINTAINERS b/MAINTAINERS
2     index f593300e310b..babaf8261941 100644
3     --- a/MAINTAINERS
4     +++ b/MAINTAINERS
5     @@ -12951,11 +12951,10 @@ F: arch/x86/xen/*swiotlb*
6     F: drivers/xen/*swiotlb*
7    
8     XFS FILESYSTEM
9     -P: Silicon Graphics Inc
10     M: Dave Chinner <david@fromorbit.com>
11     -M: xfs@oss.sgi.com
12     -L: xfs@oss.sgi.com
13     -W: http://oss.sgi.com/projects/xfs
14     +M: linux-xfs@vger.kernel.org
15     +L: linux-xfs@vger.kernel.org
16     +W: http://xfs.org/
17     T: git git://git.kernel.org/pub/scm/linux/kernel/git/dgc/linux-xfs.git
18     S: Supported
19     F: Documentation/filesystems/xfs.txt
20     diff --git a/Makefile b/Makefile
21     index 42eb45c86a42..82a36ab540a4 100644
22     --- a/Makefile
23     +++ b/Makefile
24     @@ -1,6 +1,6 @@
25     VERSION = 4
26     PATCHLEVEL = 8
27     -SUBLEVEL = 3
28     +SUBLEVEL = 4
29     EXTRAVERSION =
30     NAME = Psychotic Stoned Sheep
31    
32     diff --git a/arch/arc/include/asm/irqflags-arcv2.h b/arch/arc/include/asm/irqflags-arcv2.h
33     index d1ec7f6b31e0..e880dfa3fcd3 100644
34     --- a/arch/arc/include/asm/irqflags-arcv2.h
35     +++ b/arch/arc/include/asm/irqflags-arcv2.h
36     @@ -112,7 +112,7 @@ static inline long arch_local_save_flags(void)
37     */
38     temp = (1 << 5) |
39     ((!!(temp & STATUS_IE_MASK)) << CLRI_STATUS_IE_BIT) |
40     - (temp & CLRI_STATUS_E_MASK);
41     + ((temp >> 1) & CLRI_STATUS_E_MASK);
42     return temp;
43     }
44    
45     diff --git a/arch/arc/kernel/intc-arcv2.c b/arch/arc/kernel/intc-arcv2.c
46     index 6c24faf48b16..62b59409a5d9 100644
47     --- a/arch/arc/kernel/intc-arcv2.c
48     +++ b/arch/arc/kernel/intc-arcv2.c
49     @@ -74,7 +74,7 @@ void arc_init_IRQ(void)
50     tmp = read_aux_reg(0xa);
51     tmp |= STATUS_AD_MASK | (irq_prio << 1);
52     tmp &= ~STATUS_IE_MASK;
53     - asm volatile("flag %0 \n"::"r"(tmp));
54     + asm volatile("kflag %0 \n"::"r"(tmp));
55     }
56    
57     static void arcv2_irq_mask(struct irq_data *data)
58     diff --git a/block/cfq-iosched.c b/block/cfq-iosched.c
59     index cc2f6dbd4303..5e24d880306c 100644
60     --- a/block/cfq-iosched.c
61     +++ b/block/cfq-iosched.c
62     @@ -3042,7 +3042,6 @@ static struct request *cfq_check_fifo(struct cfq_queue *cfqq)
63     if (ktime_get_ns() < rq->fifo_time)
64     rq = NULL;
65    
66     - cfq_log_cfqq(cfqq->cfqd, cfqq, "fifo=%p", rq);
67     return rq;
68     }
69    
70     @@ -3420,6 +3419,9 @@ static bool cfq_may_dispatch(struct cfq_data *cfqd, struct cfq_queue *cfqq)
71     {
72     unsigned int max_dispatch;
73    
74     + if (cfq_cfqq_must_dispatch(cfqq))
75     + return true;
76     +
77     /*
78     * Drain async requests before we start sync IO
79     */
80     @@ -3511,15 +3513,20 @@ static bool cfq_dispatch_request(struct cfq_data *cfqd, struct cfq_queue *cfqq)
81    
82     BUG_ON(RB_EMPTY_ROOT(&cfqq->sort_list));
83    
84     + rq = cfq_check_fifo(cfqq);
85     + if (rq)
86     + cfq_mark_cfqq_must_dispatch(cfqq);
87     +
88     if (!cfq_may_dispatch(cfqd, cfqq))
89     return false;
90    
91     /*
92     * follow expired path, else get first next available
93     */
94     - rq = cfq_check_fifo(cfqq);
95     if (!rq)
96     rq = cfqq->next_rq;
97     + else
98     + cfq_log_cfqq(cfqq->cfqd, cfqq, "fifo=%p", rq);
99    
100     /*
101     * insert request into driver dispatch list
102     @@ -3989,7 +3996,7 @@ cfq_should_preempt(struct cfq_data *cfqd, struct cfq_queue *new_cfqq,
103     * if the new request is sync, but the currently running queue is
104     * not, let the sync request have priority.
105     */
106     - if (rq_is_sync(rq) && !cfq_cfqq_sync(cfqq))
107     + if (rq_is_sync(rq) && !cfq_cfqq_sync(cfqq) && !cfq_cfqq_must_dispatch(cfqq))
108     return true;
109    
110     /*
111     diff --git a/crypto/async_tx/async_pq.c b/crypto/async_tx/async_pq.c
112     index 08b3ac68952b..f83de99d7d71 100644
113     --- a/crypto/async_tx/async_pq.c
114     +++ b/crypto/async_tx/async_pq.c
115     @@ -368,8 +368,6 @@ async_syndrome_val(struct page **blocks, unsigned int offset, int disks,
116    
117     dma_set_unmap(tx, unmap);
118     async_tx_submit(chan, tx, submit);
119     -
120     - return tx;
121     } else {
122     struct page *p_src = P(blocks, disks);
123     struct page *q_src = Q(blocks, disks);
124     @@ -424,9 +422,11 @@ async_syndrome_val(struct page **blocks, unsigned int offset, int disks,
125     submit->cb_param = cb_param_orig;
126     submit->flags = flags_orig;
127     async_tx_sync_epilog(submit);
128     -
129     - return NULL;
130     + tx = NULL;
131     }
132     + dmaengine_unmap_put(unmap);
133     +
134     + return tx;
135     }
136     EXPORT_SYMBOL_GPL(async_syndrome_val);
137    
138     diff --git a/crypto/ghash-generic.c b/crypto/ghash-generic.c
139     index bac70995e064..12ad3e3a84e3 100644
140     --- a/crypto/ghash-generic.c
141     +++ b/crypto/ghash-generic.c
142     @@ -14,24 +14,13 @@
143    
144     #include <crypto/algapi.h>
145     #include <crypto/gf128mul.h>
146     +#include <crypto/ghash.h>
147     #include <crypto/internal/hash.h>
148     #include <linux/crypto.h>
149     #include <linux/init.h>
150     #include <linux/kernel.h>
151     #include <linux/module.h>
152    
153     -#define GHASH_BLOCK_SIZE 16
154     -#define GHASH_DIGEST_SIZE 16
155     -
156     -struct ghash_ctx {
157     - struct gf128mul_4k *gf128;
158     -};
159     -
160     -struct ghash_desc_ctx {
161     - u8 buffer[GHASH_BLOCK_SIZE];
162     - u32 bytes;
163     -};
164     -
165     static int ghash_init(struct shash_desc *desc)
166     {
167     struct ghash_desc_ctx *dctx = shash_desc_ctx(desc);
168     diff --git a/drivers/acpi/nfit/core.c b/drivers/acpi/nfit/core.c
169     index e1d5ea6d5e40..2accf784534e 100644
170     --- a/drivers/acpi/nfit/core.c
171     +++ b/drivers/acpi/nfit/core.c
172     @@ -2689,6 +2689,9 @@ static void acpi_nfit_notify(struct acpi_device *adev, u32 event)
173    
174     dev_dbg(dev, "%s: event: %d\n", __func__, event);
175    
176     + if (event != NFIT_NOTIFY_UPDATE)
177     + return;
178     +
179     device_lock(dev);
180     if (!dev->driver) {
181     /* dev->driver may be null if we're being removed */
182     diff --git a/drivers/acpi/nfit/nfit.h b/drivers/acpi/nfit/nfit.h
183     index e894ded24d99..51d23f130d86 100644
184     --- a/drivers/acpi/nfit/nfit.h
185     +++ b/drivers/acpi/nfit/nfit.h
186     @@ -78,6 +78,10 @@ enum {
187     NFIT_ARS_TIMEOUT = 90,
188     };
189    
190     +enum nfit_root_notifiers {
191     + NFIT_NOTIFY_UPDATE = 0x80,
192     +};
193     +
194     struct nfit_spa {
195     struct list_head list;
196     struct nd_region *nd_region;
197     diff --git a/drivers/base/dma-mapping.c b/drivers/base/dma-mapping.c
198     index d799662f19eb..261420ddfe66 100644
199     --- a/drivers/base/dma-mapping.c
200     +++ b/drivers/base/dma-mapping.c
201     @@ -334,7 +334,7 @@ void dma_common_free_remap(void *cpu_addr, size_t size, unsigned long vm_flags)
202     return;
203     }
204    
205     - unmap_kernel_range((unsigned long)cpu_addr, size);
206     + unmap_kernel_range((unsigned long)cpu_addr, PAGE_ALIGN(size));
207     vunmap(cpu_addr);
208     }
209     #endif
210     diff --git a/drivers/clk/mvebu/cp110-system-controller.c b/drivers/clk/mvebu/cp110-system-controller.c
211     index 7fa42d6b2b92..f2303da7fda7 100644
212     --- a/drivers/clk/mvebu/cp110-system-controller.c
213     +++ b/drivers/clk/mvebu/cp110-system-controller.c
214     @@ -81,13 +81,6 @@ enum {
215     #define CP110_GATE_EIP150 25
216     #define CP110_GATE_EIP197 26
217    
218     -static struct clk *cp110_clks[CP110_CLK_NUM];
219     -
220     -static struct clk_onecell_data cp110_clk_data = {
221     - .clks = cp110_clks,
222     - .clk_num = CP110_CLK_NUM,
223     -};
224     -
225     struct cp110_gate_clk {
226     struct clk_hw hw;
227     struct regmap *regmap;
228     @@ -142,6 +135,8 @@ static struct clk *cp110_register_gate(const char *name,
229     if (!gate)
230     return ERR_PTR(-ENOMEM);
231    
232     + memset(&init, 0, sizeof(init));
233     +
234     init.name = name;
235     init.ops = &cp110_gate_ops;
236     init.parent_names = &parent_name;
237     @@ -194,7 +189,8 @@ static int cp110_syscon_clk_probe(struct platform_device *pdev)
238     struct regmap *regmap;
239     struct device_node *np = pdev->dev.of_node;
240     const char *ppv2_name, *apll_name, *core_name, *eip_name, *nand_name;
241     - struct clk *clk;
242     + struct clk_onecell_data *cp110_clk_data;
243     + struct clk *clk, **cp110_clks;
244     u32 nand_clk_ctrl;
245     int i, ret;
246    
247     @@ -207,6 +203,20 @@ static int cp110_syscon_clk_probe(struct platform_device *pdev)
248     if (ret)
249     return ret;
250    
251     + cp110_clks = devm_kcalloc(&pdev->dev, sizeof(struct clk *),
252     + CP110_CLK_NUM, GFP_KERNEL);
253     + if (!cp110_clks)
254     + return -ENOMEM;
255     +
256     + cp110_clk_data = devm_kzalloc(&pdev->dev,
257     + sizeof(*cp110_clk_data),
258     + GFP_KERNEL);
259     + if (!cp110_clk_data)
260     + return -ENOMEM;
261     +
262     + cp110_clk_data->clks = cp110_clks;
263     + cp110_clk_data->clk_num = CP110_CLK_NUM;
264     +
265     /* Register the APLL which is the root of the clk tree */
266     of_property_read_string_index(np, "core-clock-output-names",
267     CP110_CORE_APLL, &apll_name);
268     @@ -334,10 +344,12 @@ static int cp110_syscon_clk_probe(struct platform_device *pdev)
269     cp110_clks[CP110_MAX_CORE_CLOCKS + i] = clk;
270     }
271    
272     - ret = of_clk_add_provider(np, cp110_of_clk_get, &cp110_clk_data);
273     + ret = of_clk_add_provider(np, cp110_of_clk_get, cp110_clk_data);
274     if (ret)
275     goto fail_clk_add;
276    
277     + platform_set_drvdata(pdev, cp110_clks);
278     +
279     return 0;
280    
281     fail_clk_add:
282     @@ -364,6 +376,7 @@ fail0:
283    
284     static int cp110_syscon_clk_remove(struct platform_device *pdev)
285     {
286     + struct clk **cp110_clks = platform_get_drvdata(pdev);
287     int i;
288    
289     of_clk_del_provider(pdev->dev.of_node);
290     diff --git a/drivers/crypto/vmx/ghash.c b/drivers/crypto/vmx/ghash.c
291     index 6c999cb01b80..27a94a119009 100644
292     --- a/drivers/crypto/vmx/ghash.c
293     +++ b/drivers/crypto/vmx/ghash.c
294     @@ -26,16 +26,13 @@
295     #include <linux/hardirq.h>
296     #include <asm/switch_to.h>
297     #include <crypto/aes.h>
298     +#include <crypto/ghash.h>
299     #include <crypto/scatterwalk.h>
300     #include <crypto/internal/hash.h>
301     #include <crypto/b128ops.h>
302    
303     #define IN_INTERRUPT in_interrupt()
304    
305     -#define GHASH_BLOCK_SIZE (16)
306     -#define GHASH_DIGEST_SIZE (16)
307     -#define GHASH_KEY_LEN (16)
308     -
309     void gcm_init_p8(u128 htable[16], const u64 Xi[2]);
310     void gcm_gmult_p8(u64 Xi[2], const u128 htable[16]);
311     void gcm_ghash_p8(u64 Xi[2], const u128 htable[16],
312     @@ -55,16 +52,11 @@ struct p8_ghash_desc_ctx {
313    
314     static int p8_ghash_init_tfm(struct crypto_tfm *tfm)
315     {
316     - const char *alg;
317     + const char *alg = "ghash-generic";
318     struct crypto_shash *fallback;
319     struct crypto_shash *shash_tfm = __crypto_shash_cast(tfm);
320     struct p8_ghash_ctx *ctx = crypto_tfm_ctx(tfm);
321    
322     - if (!(alg = crypto_tfm_alg_name(tfm))) {
323     - printk(KERN_ERR "Failed to get algorithm name.\n");
324     - return -ENOENT;
325     - }
326     -
327     fallback = crypto_alloc_shash(alg, 0, CRYPTO_ALG_NEED_FALLBACK);
328     if (IS_ERR(fallback)) {
329     printk(KERN_ERR
330     @@ -78,10 +70,18 @@ static int p8_ghash_init_tfm(struct crypto_tfm *tfm)
331     crypto_shash_set_flags(fallback,
332     crypto_shash_get_flags((struct crypto_shash
333     *) tfm));
334     - ctx->fallback = fallback;
335    
336     - shash_tfm->descsize = sizeof(struct p8_ghash_desc_ctx)
337     - + crypto_shash_descsize(fallback);
338     + /* Check if the descsize defined in the algorithm is still enough. */
339     + if (shash_tfm->descsize < sizeof(struct p8_ghash_desc_ctx)
340     + + crypto_shash_descsize(fallback)) {
341     + printk(KERN_ERR
342     + "Desc size of the fallback implementation (%s) does not match the expected value: %lu vs %u\n",
343     + alg,
344     + shash_tfm->descsize - sizeof(struct p8_ghash_desc_ctx),
345     + crypto_shash_descsize(fallback));
346     + return -EINVAL;
347     + }
348     + ctx->fallback = fallback;
349    
350     return 0;
351     }
352     @@ -113,7 +113,7 @@ static int p8_ghash_setkey(struct crypto_shash *tfm, const u8 *key,
353     {
354     struct p8_ghash_ctx *ctx = crypto_tfm_ctx(crypto_shash_tfm(tfm));
355    
356     - if (keylen != GHASH_KEY_LEN)
357     + if (keylen != GHASH_BLOCK_SIZE)
358     return -EINVAL;
359    
360     preempt_disable();
361     @@ -211,7 +211,8 @@ struct shash_alg p8_ghash_alg = {
362     .update = p8_ghash_update,
363     .final = p8_ghash_final,
364     .setkey = p8_ghash_setkey,
365     - .descsize = sizeof(struct p8_ghash_desc_ctx),
366     + .descsize = sizeof(struct p8_ghash_desc_ctx)
367     + + sizeof(struct ghash_desc_ctx),
368     .base = {
369     .cra_name = "ghash",
370     .cra_driver_name = "p8_ghash",
371     diff --git a/drivers/gpu/drm/virtio/virtgpu_drm_bus.c b/drivers/gpu/drm/virtio/virtgpu_drm_bus.c
372     index 7f0e93f87a55..88a39165edd5 100644
373     --- a/drivers/gpu/drm/virtio/virtgpu_drm_bus.c
374     +++ b/drivers/gpu/drm/virtio/virtgpu_drm_bus.c
375     @@ -27,6 +27,16 @@
376    
377     #include "virtgpu_drv.h"
378    
379     +int drm_virtio_set_busid(struct drm_device *dev, struct drm_master *master)
380     +{
381     + struct pci_dev *pdev = dev->pdev;
382     +
383     + if (pdev) {
384     + return drm_pci_set_busid(dev, master);
385     + }
386     + return 0;
387     +}
388     +
389     static void virtio_pci_kick_out_firmware_fb(struct pci_dev *pci_dev)
390     {
391     struct apertures_struct *ap;
392     diff --git a/drivers/gpu/drm/virtio/virtgpu_drv.c b/drivers/gpu/drm/virtio/virtgpu_drv.c
393     index c13f70cfc461..5820b7020ae5 100644
394     --- a/drivers/gpu/drm/virtio/virtgpu_drv.c
395     +++ b/drivers/gpu/drm/virtio/virtgpu_drv.c
396     @@ -117,6 +117,7 @@ static const struct file_operations virtio_gpu_driver_fops = {
397    
398     static struct drm_driver driver = {
399     .driver_features = DRIVER_MODESET | DRIVER_GEM | DRIVER_PRIME | DRIVER_RENDER | DRIVER_ATOMIC,
400     + .set_busid = drm_virtio_set_busid,
401     .load = virtio_gpu_driver_load,
402     .unload = virtio_gpu_driver_unload,
403     .open = virtio_gpu_driver_open,
404     diff --git a/drivers/gpu/drm/virtio/virtgpu_drv.h b/drivers/gpu/drm/virtio/virtgpu_drv.h
405     index b18ef3111f0c..acf556a35cb2 100644
406     --- a/drivers/gpu/drm/virtio/virtgpu_drv.h
407     +++ b/drivers/gpu/drm/virtio/virtgpu_drv.h
408     @@ -49,6 +49,7 @@
409     #define DRIVER_PATCHLEVEL 1
410    
411     /* virtgpu_drm_bus.c */
412     +int drm_virtio_set_busid(struct drm_device *dev, struct drm_master *master);
413     int drm_virtio_init(struct drm_driver *driver, struct virtio_device *vdev);
414    
415     struct virtio_gpu_object {
416     diff --git a/drivers/infiniband/hw/hfi1/rc.c b/drivers/infiniband/hw/hfi1/rc.c
417     index 5da190e6011b..bcf76c33726b 100644
418     --- a/drivers/infiniband/hw/hfi1/rc.c
419     +++ b/drivers/infiniband/hw/hfi1/rc.c
420     @@ -932,8 +932,10 @@ void hfi1_send_rc_ack(struct hfi1_ctxtdata *rcd, struct rvt_qp *qp,
421     return;
422    
423     queue_ack:
424     - this_cpu_inc(*ibp->rvp.rc_qacks);
425     spin_lock_irqsave(&qp->s_lock, flags);
426     + if (!(ib_rvt_state_ops[qp->state] & RVT_PROCESS_RECV_OK))
427     + goto unlock;
428     + this_cpu_inc(*ibp->rvp.rc_qacks);
429     qp->s_flags |= RVT_S_ACK_PENDING | RVT_S_RESP_PENDING;
430     qp->s_nak_state = qp->r_nak_state;
431     qp->s_ack_psn = qp->r_ack_psn;
432     @@ -942,6 +944,7 @@ queue_ack:
433    
434     /* Schedule the send tasklet. */
435     hfi1_schedule_send(qp);
436     +unlock:
437     spin_unlock_irqrestore(&qp->s_lock, flags);
438     }
439    
440     diff --git a/drivers/misc/mei/amthif.c b/drivers/misc/mei/amthif.c
441     index a039a5df6f21..fd9271bc1a11 100644
442     --- a/drivers/misc/mei/amthif.c
443     +++ b/drivers/misc/mei/amthif.c
444     @@ -67,8 +67,12 @@ int mei_amthif_host_init(struct mei_device *dev, struct mei_me_client *me_cl)
445     struct mei_cl *cl = &dev->iamthif_cl;
446     int ret;
447    
448     - if (mei_cl_is_connected(cl))
449     - return 0;
450     + mutex_lock(&dev->device_lock);
451     +
452     + if (mei_cl_is_connected(cl)) {
453     + ret = 0;
454     + goto out;
455     + }
456    
457     dev->iamthif_state = MEI_IAMTHIF_IDLE;
458    
459     @@ -77,11 +81,13 @@ int mei_amthif_host_init(struct mei_device *dev, struct mei_me_client *me_cl)
460     ret = mei_cl_link(cl);
461     if (ret < 0) {
462     dev_err(dev->dev, "amthif: failed cl_link %d\n", ret);
463     - return ret;
464     + goto out;
465     }
466    
467     ret = mei_cl_connect(cl, me_cl, NULL);
468    
469     +out:
470     + mutex_unlock(&dev->device_lock);
471     return ret;
472     }
473    
474     diff --git a/drivers/misc/mei/bus.c b/drivers/misc/mei/bus.c
475     index 1f33fea9299f..e094df3cf2d5 100644
476     --- a/drivers/misc/mei/bus.c
477     +++ b/drivers/misc/mei/bus.c
478     @@ -983,12 +983,10 @@ void mei_cl_bus_rescan_work(struct work_struct *work)
479     container_of(work, struct mei_device, bus_rescan_work);
480     struct mei_me_client *me_cl;
481    
482     - mutex_lock(&bus->device_lock);
483     me_cl = mei_me_cl_by_uuid(bus, &mei_amthif_guid);
484     if (me_cl)
485     mei_amthif_host_init(bus, me_cl);
486     mei_me_cl_put(me_cl);
487     - mutex_unlock(&bus->device_lock);
488    
489     mei_cl_bus_rescan(bus);
490     }
491     diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c
492     index d0b3a1bb82ca..dad15b6c66dd 100644
493     --- a/drivers/net/ethernet/intel/i40e/i40e_main.c
494     +++ b/drivers/net/ethernet/intel/i40e/i40e_main.c
495     @@ -11360,6 +11360,12 @@ static pci_ers_result_t i40e_pci_error_detected(struct pci_dev *pdev,
496    
497     dev_info(&pdev->dev, "%s: error %d\n", __func__, error);
498    
499     + if (!pf) {
500     + dev_info(&pdev->dev,
501     + "Cannot recover - error happened during device probe\n");
502     + return PCI_ERS_RESULT_DISCONNECT;
503     + }
504     +
505     /* shutdown all operations */
506     if (!test_bit(__I40E_SUSPENDED, &pf->state)) {
507     rtnl_lock();
508     diff --git a/drivers/net/wireless/ath/carl9170/debug.c b/drivers/net/wireless/ath/carl9170/debug.c
509     index 6808db433283..ec3a64e5d2bb 100644
510     --- a/drivers/net/wireless/ath/carl9170/debug.c
511     +++ b/drivers/net/wireless/ath/carl9170/debug.c
512     @@ -75,7 +75,8 @@ static ssize_t carl9170_debugfs_read(struct file *file, char __user *userbuf,
513    
514     if (!ar)
515     return -ENODEV;
516     - dfops = container_of(file->f_op, struct carl9170_debugfs_fops, fops);
517     + dfops = container_of(debugfs_real_fops(file),
518     + struct carl9170_debugfs_fops, fops);
519    
520     if (!dfops->read)
521     return -ENOSYS;
522     @@ -127,7 +128,8 @@ static ssize_t carl9170_debugfs_write(struct file *file,
523    
524     if (!ar)
525     return -ENODEV;
526     - dfops = container_of(file->f_op, struct carl9170_debugfs_fops, fops);
527     + dfops = container_of(debugfs_real_fops(file),
528     + struct carl9170_debugfs_fops, fops);
529    
530     if (!dfops->write)
531     return -ENOSYS;
532     diff --git a/drivers/net/wireless/broadcom/b43/debugfs.c b/drivers/net/wireless/broadcom/b43/debugfs.c
533     index b4bcd94aff6c..77046384dd80 100644
534     --- a/drivers/net/wireless/broadcom/b43/debugfs.c
535     +++ b/drivers/net/wireless/broadcom/b43/debugfs.c
536     @@ -524,7 +524,8 @@ static ssize_t b43_debugfs_read(struct file *file, char __user *userbuf,
537     goto out_unlock;
538     }
539    
540     - dfops = container_of(file->f_op, struct b43_debugfs_fops, fops);
541     + dfops = container_of(debugfs_real_fops(file),
542     + struct b43_debugfs_fops, fops);
543     if (!dfops->read) {
544     err = -ENOSYS;
545     goto out_unlock;
546     @@ -585,7 +586,8 @@ static ssize_t b43_debugfs_write(struct file *file,
547     goto out_unlock;
548     }
549    
550     - dfops = container_of(file->f_op, struct b43_debugfs_fops, fops);
551     + dfops = container_of(debugfs_real_fops(file),
552     + struct b43_debugfs_fops, fops);
553     if (!dfops->write) {
554     err = -ENOSYS;
555     goto out_unlock;
556     diff --git a/drivers/net/wireless/broadcom/b43legacy/debugfs.c b/drivers/net/wireless/broadcom/b43legacy/debugfs.c
557     index 090910ea259e..82ef56ed7ca1 100644
558     --- a/drivers/net/wireless/broadcom/b43legacy/debugfs.c
559     +++ b/drivers/net/wireless/broadcom/b43legacy/debugfs.c
560     @@ -221,7 +221,8 @@ static ssize_t b43legacy_debugfs_read(struct file *file, char __user *userbuf,
561     goto out_unlock;
562     }
563    
564     - dfops = container_of(file->f_op, struct b43legacy_debugfs_fops, fops);
565     + dfops = container_of(debugfs_real_fops(file),
566     + struct b43legacy_debugfs_fops, fops);
567     if (!dfops->read) {
568     err = -ENOSYS;
569     goto out_unlock;
570     @@ -287,7 +288,8 @@ static ssize_t b43legacy_debugfs_write(struct file *file,
571     goto out_unlock;
572     }
573    
574     - dfops = container_of(file->f_op, struct b43legacy_debugfs_fops, fops);
575     + dfops = container_of(debugfs_real_fops(file),
576     + struct b43legacy_debugfs_fops, fops);
577     if (!dfops->write) {
578     err = -ENOSYS;
579     goto out_unlock;
580     diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c
581     index b8aec5e5ef93..abaf003a5b39 100644
582     --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c
583     +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c
584     @@ -2533,7 +2533,7 @@ static void brcmf_fill_bss_param(struct brcmf_if *ifp, struct station_info *si)
585     WL_BSS_INFO_MAX);
586     if (err) {
587     brcmf_err("Failed to get bss info (%d)\n", err);
588     - return;
589     + goto out_kfree;
590     }
591     si->filled |= BIT(NL80211_STA_INFO_BSS_PARAM);
592     si->bss_param.beacon_interval = le16_to_cpu(buf->bss_le.beacon_period);
593     @@ -2545,6 +2545,9 @@ static void brcmf_fill_bss_param(struct brcmf_if *ifp, struct station_info *si)
594     si->bss_param.flags |= BSS_PARAM_FLAGS_SHORT_PREAMBLE;
595     if (capability & WLAN_CAPABILITY_SHORT_SLOT_TIME)
596     si->bss_param.flags |= BSS_PARAM_FLAGS_SHORT_SLOT_TIME;
597     +
598     +out_kfree:
599     + kfree(buf);
600     }
601    
602     static s32
603     @@ -3884,11 +3887,11 @@ brcmf_cfg80211_del_pmksa(struct wiphy *wiphy, struct net_device *ndev,
604     if (!check_vif_up(ifp->vif))
605     return -EIO;
606    
607     - brcmf_dbg(CONN, "del_pmksa - PMK bssid = %pM\n", &pmksa->bssid);
608     + brcmf_dbg(CONN, "del_pmksa - PMK bssid = %pM\n", pmksa->bssid);
609    
610     npmk = le32_to_cpu(cfg->pmk_list.npmk);
611     for (i = 0; i < npmk; i++)
612     - if (!memcmp(&pmksa->bssid, &pmk[i].bssid, ETH_ALEN))
613     + if (!memcmp(pmksa->bssid, pmk[i].bssid, ETH_ALEN))
614     break;
615    
616     if ((npmk > 0) && (i < npmk)) {
617     diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/flowring.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/flowring.c
618     index 7e269f9aa607..63664442e687 100644
619     --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/flowring.c
620     +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/flowring.c
621     @@ -234,13 +234,20 @@ static void brcmf_flowring_block(struct brcmf_flowring *flow, u16 flowid,
622    
623     void brcmf_flowring_delete(struct brcmf_flowring *flow, u16 flowid)
624     {
625     + struct brcmf_bus *bus_if = dev_get_drvdata(flow->dev);
626     struct brcmf_flowring_ring *ring;
627     + struct brcmf_if *ifp;
628     u16 hash_idx;
629     + u8 ifidx;
630     struct sk_buff *skb;
631    
632     ring = flow->rings[flowid];
633     if (!ring)
634     return;
635     +
636     + ifidx = brcmf_flowring_ifidx_get(flow, flowid);
637     + ifp = brcmf_get_ifp(bus_if->drvr, ifidx);
638     +
639     brcmf_flowring_block(flow, flowid, false);
640     hash_idx = ring->hash_id;
641     flow->hash[hash_idx].ifidx = BRCMF_FLOWRING_INVALID_IFIDX;
642     @@ -249,7 +256,7 @@ void brcmf_flowring_delete(struct brcmf_flowring *flow, u16 flowid)
643    
644     skb = skb_dequeue(&ring->skblist);
645     while (skb) {
646     - brcmu_pkt_buf_free_skb(skb);
647     + brcmf_txfinalize(ifp, skb, false);
648     skb = skb_dequeue(&ring->skblist);
649     }
650    
651     diff --git a/drivers/scsi/arcmsr/arcmsr_hba.c b/drivers/scsi/arcmsr/arcmsr_hba.c
652     index 7640498964a5..3d53d636b17b 100644
653     --- a/drivers/scsi/arcmsr/arcmsr_hba.c
654     +++ b/drivers/scsi/arcmsr/arcmsr_hba.c
655     @@ -2388,15 +2388,23 @@ static int arcmsr_iop_message_xfer(struct AdapterControlBlock *acb,
656     }
657     case ARCMSR_MESSAGE_WRITE_WQBUFFER: {
658     unsigned char *ver_addr;
659     - int32_t user_len, cnt2end;
660     + uint32_t user_len;
661     + int32_t cnt2end;
662     uint8_t *pQbuffer, *ptmpuserbuffer;
663     +
664     + user_len = pcmdmessagefld->cmdmessage.Length;
665     + if (user_len > ARCMSR_API_DATA_BUFLEN) {
666     + retvalue = ARCMSR_MESSAGE_FAIL;
667     + goto message_out;
668     + }
669     +
670     ver_addr = kmalloc(ARCMSR_API_DATA_BUFLEN, GFP_ATOMIC);
671     if (!ver_addr) {
672     retvalue = ARCMSR_MESSAGE_FAIL;
673     goto message_out;
674     }
675     ptmpuserbuffer = ver_addr;
676     - user_len = pcmdmessagefld->cmdmessage.Length;
677     +
678     memcpy(ptmpuserbuffer,
679     pcmdmessagefld->messagedatabuffer, user_len);
680     spin_lock_irqsave(&acb->wqbuffer_lock, flags);
681     diff --git a/drivers/scsi/ibmvscsi/ibmvfc.c b/drivers/scsi/ibmvscsi/ibmvfc.c
682     index ab67ec4b6bd6..79c9860a165f 100644
683     --- a/drivers/scsi/ibmvscsi/ibmvfc.c
684     +++ b/drivers/scsi/ibmvscsi/ibmvfc.c
685     @@ -717,7 +717,6 @@ static int ibmvfc_reset_crq(struct ibmvfc_host *vhost)
686     spin_lock_irqsave(vhost->host->host_lock, flags);
687     vhost->state = IBMVFC_NO_CRQ;
688     vhost->logged_in = 0;
689     - ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_NONE);
690    
691     /* Clean out the queue */
692     memset(crq->msgs, 0, PAGE_SIZE);
693     diff --git a/drivers/tty/serial/8250/8250_dw.c b/drivers/tty/serial/8250/8250_dw.c
694     index e19969614203..b022f5a01e63 100644
695     --- a/drivers/tty/serial/8250/8250_dw.c
696     +++ b/drivers/tty/serial/8250/8250_dw.c
697     @@ -462,7 +462,7 @@ static int dw8250_probe(struct platform_device *pdev)
698     }
699    
700     data->pclk = devm_clk_get(&pdev->dev, "apb_pclk");
701     - if (IS_ERR(data->clk) && PTR_ERR(data->clk) == -EPROBE_DEFER) {
702     + if (IS_ERR(data->pclk) && PTR_ERR(data->pclk) == -EPROBE_DEFER) {
703     err = -EPROBE_DEFER;
704     goto err_clk;
705     }
706     diff --git a/drivers/tty/serial/8250/8250_port.c b/drivers/tty/serial/8250/8250_port.c
707     index bdfa659b9606..858a54633664 100644
708     --- a/drivers/tty/serial/8250/8250_port.c
709     +++ b/drivers/tty/serial/8250/8250_port.c
710     @@ -1414,12 +1414,8 @@ static void __do_stop_tx_rs485(struct uart_8250_port *p)
711     if (!(p->port.rs485.flags & SER_RS485_RX_DURING_TX)) {
712     serial8250_clear_fifos(p);
713    
714     - serial8250_rpm_get(p);
715     -
716     p->ier |= UART_IER_RLSI | UART_IER_RDI;
717     serial_port_out(&p->port, UART_IER, p->ier);
718     -
719     - serial8250_rpm_put(p);
720     }
721     }
722    
723     @@ -1429,6 +1425,7 @@ static void serial8250_em485_handle_stop_tx(unsigned long arg)
724     struct uart_8250_em485 *em485 = p->em485;
725     unsigned long flags;
726    
727     + serial8250_rpm_get(p);
728     spin_lock_irqsave(&p->port.lock, flags);
729     if (em485 &&
730     em485->active_timer == &em485->stop_tx_timer) {
731     @@ -1436,6 +1433,7 @@ static void serial8250_em485_handle_stop_tx(unsigned long arg)
732     em485->active_timer = NULL;
733     }
734     spin_unlock_irqrestore(&p->port.lock, flags);
735     + serial8250_rpm_put(p);
736     }
737    
738     static void __stop_tx_rs485(struct uart_8250_port *p)
739     @@ -1475,7 +1473,7 @@ static inline void __stop_tx(struct uart_8250_port *p)
740     unsigned char lsr = serial_in(p, UART_LSR);
741     /*
742     * To provide required timeing and allow FIFO transfer,
743     - * __stop_tx_rs485 must be called only when both FIFO and
744     + * __stop_tx_rs485() must be called only when both FIFO and
745     * shift register are empty. It is for device driver to enable
746     * interrupt on TEMT.
747     */
748     @@ -1484,9 +1482,10 @@ static inline void __stop_tx(struct uart_8250_port *p)
749    
750     del_timer(&em485->start_tx_timer);
751     em485->active_timer = NULL;
752     +
753     + __stop_tx_rs485(p);
754     }
755     __do_stop_tx(p);
756     - __stop_tx_rs485(p);
757     }
758    
759     static void serial8250_stop_tx(struct uart_port *port)
760     diff --git a/drivers/tty/serial/atmel_serial.c b/drivers/tty/serial/atmel_serial.c
761     index 2eaa18ddef61..8bbde52db376 100644
762     --- a/drivers/tty/serial/atmel_serial.c
763     +++ b/drivers/tty/serial/atmel_serial.c
764     @@ -1929,6 +1929,9 @@ static void atmel_shutdown(struct uart_port *port)
765     {
766     struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
767    
768     + /* Disable modem control lines interrupts */
769     + atmel_disable_ms(port);
770     +
771     /* Disable interrupts at device level */
772     atmel_uart_writel(port, ATMEL_US_IDR, -1);
773    
774     @@ -1979,8 +1982,6 @@ static void atmel_shutdown(struct uart_port *port)
775     */
776     free_irq(port->irq, port);
777    
778     - atmel_port->ms_irq_enabled = false;
779     -
780     atmel_flush_buffer(port);
781     }
782    
783     diff --git a/drivers/tty/serial/imx.c b/drivers/tty/serial/imx.c
784     index 0df2b1c091ae..615c0279a1a6 100644
785     --- a/drivers/tty/serial/imx.c
786     +++ b/drivers/tty/serial/imx.c
787     @@ -740,12 +740,13 @@ static unsigned int imx_get_hwmctrl(struct imx_port *sport)
788     {
789     unsigned int tmp = TIOCM_DSR;
790     unsigned usr1 = readl(sport->port.membase + USR1);
791     + unsigned usr2 = readl(sport->port.membase + USR2);
792    
793     if (usr1 & USR1_RTSS)
794     tmp |= TIOCM_CTS;
795    
796     /* in DCE mode DCDIN is always 0 */
797     - if (!(usr1 & USR2_DCDIN))
798     + if (!(usr2 & USR2_DCDIN))
799     tmp |= TIOCM_CAR;
800    
801     if (sport->dte_mode)
802     diff --git a/fs/attr.c b/fs/attr.c
803     index 42bb42bb3c72..3c42cab06b5d 100644
804     --- a/fs/attr.c
805     +++ b/fs/attr.c
806     @@ -202,6 +202,21 @@ int notify_change(struct dentry * dentry, struct iattr * attr, struct inode **de
807     return -EPERM;
808     }
809    
810     + /*
811     + * If utimes(2) and friends are called with times == NULL (or both
812     + * times are UTIME_NOW), then we need to check for write permission
813     + */
814     + if (ia_valid & ATTR_TOUCH) {
815     + if (IS_IMMUTABLE(inode))
816     + return -EPERM;
817     +
818     + if (!inode_owner_or_capable(inode)) {
819     + error = inode_permission(inode, MAY_WRITE);
820     + if (error)
821     + return error;
822     + }
823     + }
824     +
825     if ((ia_valid & ATTR_MODE)) {
826     umode_t amode = attr->ia_mode;
827     /* Flag setting protected by i_mutex */
828     diff --git a/fs/autofs4/waitq.c b/fs/autofs4/waitq.c
829     index 431fd7ee3488..e44271dfceb6 100644
830     --- a/fs/autofs4/waitq.c
831     +++ b/fs/autofs4/waitq.c
832     @@ -431,8 +431,8 @@ int autofs4_wait(struct autofs_sb_info *sbi,
833     memcpy(&wq->name, &qstr, sizeof(struct qstr));
834     wq->dev = autofs4_get_dev(sbi);
835     wq->ino = autofs4_get_ino(sbi);
836     - wq->uid = current_uid();
837     - wq->gid = current_gid();
838     + wq->uid = current_real_cred()->uid;
839     + wq->gid = current_real_cred()->gid;
840     wq->pid = pid;
841     wq->tgid = tgid;
842     wq->status = -EINTR; /* Status return if interrupted */
843     diff --git a/fs/btrfs/compression.c b/fs/btrfs/compression.c
844     index 029db6e1105c..60a850ee8c78 100644
845     --- a/fs/btrfs/compression.c
846     +++ b/fs/btrfs/compression.c
847     @@ -698,7 +698,7 @@ int btrfs_submit_compressed_read(struct inode *inode, struct bio *bio,
848    
849     ret = btrfs_map_bio(root, comp_bio, mirror_num, 0);
850     if (ret) {
851     - bio->bi_error = ret;
852     + comp_bio->bi_error = ret;
853     bio_endio(comp_bio);
854     }
855    
856     @@ -728,7 +728,7 @@ int btrfs_submit_compressed_read(struct inode *inode, struct bio *bio,
857    
858     ret = btrfs_map_bio(root, comp_bio, mirror_num, 0);
859     if (ret) {
860     - bio->bi_error = ret;
861     + comp_bio->bi_error = ret;
862     bio_endio(comp_bio);
863     }
864    
865     diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h
866     index 33fe03551105..791e47ce9d27 100644
867     --- a/fs/btrfs/ctree.h
868     +++ b/fs/btrfs/ctree.h
869     @@ -251,7 +251,8 @@ struct btrfs_super_block {
870     #define BTRFS_FEATURE_COMPAT_SAFE_CLEAR 0ULL
871    
872     #define BTRFS_FEATURE_COMPAT_RO_SUPP \
873     - (BTRFS_FEATURE_COMPAT_RO_FREE_SPACE_TREE)
874     + (BTRFS_FEATURE_COMPAT_RO_FREE_SPACE_TREE | \
875     + BTRFS_FEATURE_COMPAT_RO_FREE_SPACE_TREE_VALID)
876    
877     #define BTRFS_FEATURE_COMPAT_RO_SAFE_SET 0ULL
878     #define BTRFS_FEATURE_COMPAT_RO_SAFE_CLEAR 0ULL
879     diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c
880     index 54bc8c7c6bcd..3dede6d53bad 100644
881     --- a/fs/btrfs/disk-io.c
882     +++ b/fs/btrfs/disk-io.c
883     @@ -2566,6 +2566,7 @@ int open_ctree(struct super_block *sb,
884     int num_backups_tried = 0;
885     int backup_index = 0;
886     int max_active;
887     + int clear_free_space_tree = 0;
888    
889     tree_root = fs_info->tree_root = btrfs_alloc_root(fs_info, GFP_KERNEL);
890     chunk_root = fs_info->chunk_root = btrfs_alloc_root(fs_info, GFP_KERNEL);
891     @@ -3129,6 +3130,26 @@ retry_root_backup:
892     if (sb->s_flags & MS_RDONLY)
893     return 0;
894    
895     + if (btrfs_test_opt(fs_info, CLEAR_CACHE) &&
896     + btrfs_fs_compat_ro(fs_info, FREE_SPACE_TREE)) {
897     + clear_free_space_tree = 1;
898     + } else if (btrfs_fs_compat_ro(fs_info, FREE_SPACE_TREE) &&
899     + !btrfs_fs_compat_ro(fs_info, FREE_SPACE_TREE_VALID)) {
900     + btrfs_warn(fs_info, "free space tree is invalid");
901     + clear_free_space_tree = 1;
902     + }
903     +
904     + if (clear_free_space_tree) {
905     + btrfs_info(fs_info, "clearing free space tree");
906     + ret = btrfs_clear_free_space_tree(fs_info);
907     + if (ret) {
908     + btrfs_warn(fs_info,
909     + "failed to clear free space tree: %d", ret);
910     + close_ctree(tree_root);
911     + return ret;
912     + }
913     + }
914     +
915     if (btrfs_test_opt(tree_root->fs_info, FREE_SPACE_TREE) &&
916     !btrfs_fs_compat_ro(fs_info, FREE_SPACE_TREE)) {
917     btrfs_info(fs_info, "creating free space tree");
918     @@ -3166,18 +3187,6 @@ retry_root_backup:
919    
920     btrfs_qgroup_rescan_resume(fs_info);
921    
922     - if (btrfs_test_opt(tree_root->fs_info, CLEAR_CACHE) &&
923     - btrfs_fs_compat_ro(fs_info, FREE_SPACE_TREE)) {
924     - btrfs_info(fs_info, "clearing free space tree");
925     - ret = btrfs_clear_free_space_tree(fs_info);
926     - if (ret) {
927     - btrfs_warn(fs_info,
928     - "failed to clear free space tree: %d", ret);
929     - close_ctree(tree_root);
930     - return ret;
931     - }
932     - }
933     -
934     if (!fs_info->uuid_root) {
935     btrfs_info(fs_info, "creating UUID tree");
936     ret = btrfs_create_uuid_tree(fs_info);
937     diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c
938     index 44fe66b53c8b..c3ec30dea9a5 100644
939     --- a/fs/btrfs/extent_io.c
940     +++ b/fs/btrfs/extent_io.c
941     @@ -5524,17 +5524,45 @@ void copy_extent_buffer(struct extent_buffer *dst, struct extent_buffer *src,
942     }
943     }
944    
945     -/*
946     - * The extent buffer bitmap operations are done with byte granularity because
947     - * bitmap items are not guaranteed to be aligned to a word and therefore a
948     - * single word in a bitmap may straddle two pages in the extent buffer.
949     - */
950     -#define BIT_BYTE(nr) ((nr) / BITS_PER_BYTE)
951     -#define BYTE_MASK ((1 << BITS_PER_BYTE) - 1)
952     -#define BITMAP_FIRST_BYTE_MASK(start) \
953     - ((BYTE_MASK << ((start) & (BITS_PER_BYTE - 1))) & BYTE_MASK)
954     -#define BITMAP_LAST_BYTE_MASK(nbits) \
955     - (BYTE_MASK >> (-(nbits) & (BITS_PER_BYTE - 1)))
956     +void le_bitmap_set(u8 *map, unsigned int start, int len)
957     +{
958     + u8 *p = map + BIT_BYTE(start);
959     + const unsigned int size = start + len;
960     + int bits_to_set = BITS_PER_BYTE - (start % BITS_PER_BYTE);
961     + u8 mask_to_set = BITMAP_FIRST_BYTE_MASK(start);
962     +
963     + while (len - bits_to_set >= 0) {
964     + *p |= mask_to_set;
965     + len -= bits_to_set;
966     + bits_to_set = BITS_PER_BYTE;
967     + mask_to_set = ~(u8)0;
968     + p++;
969     + }
970     + if (len) {
971     + mask_to_set &= BITMAP_LAST_BYTE_MASK(size);
972     + *p |= mask_to_set;
973     + }
974     +}
975     +
976     +void le_bitmap_clear(u8 *map, unsigned int start, int len)
977     +{
978     + u8 *p = map + BIT_BYTE(start);
979     + const unsigned int size = start + len;
980     + int bits_to_clear = BITS_PER_BYTE - (start % BITS_PER_BYTE);
981     + u8 mask_to_clear = BITMAP_FIRST_BYTE_MASK(start);
982     +
983     + while (len - bits_to_clear >= 0) {
984     + *p &= ~mask_to_clear;
985     + len -= bits_to_clear;
986     + bits_to_clear = BITS_PER_BYTE;
987     + mask_to_clear = ~(u8)0;
988     + p++;
989     + }
990     + if (len) {
991     + mask_to_clear &= BITMAP_LAST_BYTE_MASK(size);
992     + *p &= ~mask_to_clear;
993     + }
994     +}
995    
996     /*
997     * eb_bitmap_offset() - calculate the page and offset of the byte containing the
998     @@ -5578,7 +5606,7 @@ static inline void eb_bitmap_offset(struct extent_buffer *eb,
999     int extent_buffer_test_bit(struct extent_buffer *eb, unsigned long start,
1000     unsigned long nr)
1001     {
1002     - char *kaddr;
1003     + u8 *kaddr;
1004     struct page *page;
1005     unsigned long i;
1006     size_t offset;
1007     @@ -5600,13 +5628,13 @@ int extent_buffer_test_bit(struct extent_buffer *eb, unsigned long start,
1008     void extent_buffer_bitmap_set(struct extent_buffer *eb, unsigned long start,
1009     unsigned long pos, unsigned long len)
1010     {
1011     - char *kaddr;
1012     + u8 *kaddr;
1013     struct page *page;
1014     unsigned long i;
1015     size_t offset;
1016     const unsigned int size = pos + len;
1017     int bits_to_set = BITS_PER_BYTE - (pos % BITS_PER_BYTE);
1018     - unsigned int mask_to_set = BITMAP_FIRST_BYTE_MASK(pos);
1019     + u8 mask_to_set = BITMAP_FIRST_BYTE_MASK(pos);
1020    
1021     eb_bitmap_offset(eb, start, pos, &i, &offset);
1022     page = eb->pages[i];
1023     @@ -5617,7 +5645,7 @@ void extent_buffer_bitmap_set(struct extent_buffer *eb, unsigned long start,
1024     kaddr[offset] |= mask_to_set;
1025     len -= bits_to_set;
1026     bits_to_set = BITS_PER_BYTE;
1027     - mask_to_set = ~0U;
1028     + mask_to_set = ~(u8)0;
1029     if (++offset >= PAGE_SIZE && len > 0) {
1030     offset = 0;
1031     page = eb->pages[++i];
1032     @@ -5642,13 +5670,13 @@ void extent_buffer_bitmap_set(struct extent_buffer *eb, unsigned long start,
1033     void extent_buffer_bitmap_clear(struct extent_buffer *eb, unsigned long start,
1034     unsigned long pos, unsigned long len)
1035     {
1036     - char *kaddr;
1037     + u8 *kaddr;
1038     struct page *page;
1039     unsigned long i;
1040     size_t offset;
1041     const unsigned int size = pos + len;
1042     int bits_to_clear = BITS_PER_BYTE - (pos % BITS_PER_BYTE);
1043     - unsigned int mask_to_clear = BITMAP_FIRST_BYTE_MASK(pos);
1044     + u8 mask_to_clear = BITMAP_FIRST_BYTE_MASK(pos);
1045    
1046     eb_bitmap_offset(eb, start, pos, &i, &offset);
1047     page = eb->pages[i];
1048     @@ -5659,7 +5687,7 @@ void extent_buffer_bitmap_clear(struct extent_buffer *eb, unsigned long start,
1049     kaddr[offset] &= ~mask_to_clear;
1050     len -= bits_to_clear;
1051     bits_to_clear = BITS_PER_BYTE;
1052     - mask_to_clear = ~0U;
1053     + mask_to_clear = ~(u8)0;
1054     if (++offset >= PAGE_SIZE && len > 0) {
1055     offset = 0;
1056     page = eb->pages[++i];
1057     diff --git a/fs/btrfs/extent_io.h b/fs/btrfs/extent_io.h
1058     index 28cd88fccc7e..1cf4e4226fc8 100644
1059     --- a/fs/btrfs/extent_io.h
1060     +++ b/fs/btrfs/extent_io.h
1061     @@ -59,6 +59,28 @@
1062     */
1063     #define EXTENT_PAGE_PRIVATE 1
1064    
1065     +/*
1066     + * The extent buffer bitmap operations are done with byte granularity instead of
1067     + * word granularity for two reasons:
1068     + * 1. The bitmaps must be little-endian on disk.
1069     + * 2. Bitmap items are not guaranteed to be aligned to a word and therefore a
1070     + * single word in a bitmap may straddle two pages in the extent buffer.
1071     + */
1072     +#define BIT_BYTE(nr) ((nr) / BITS_PER_BYTE)
1073     +#define BYTE_MASK ((1 << BITS_PER_BYTE) - 1)
1074     +#define BITMAP_FIRST_BYTE_MASK(start) \
1075     + ((BYTE_MASK << ((start) & (BITS_PER_BYTE - 1))) & BYTE_MASK)
1076     +#define BITMAP_LAST_BYTE_MASK(nbits) \
1077     + (BYTE_MASK >> (-(nbits) & (BITS_PER_BYTE - 1)))
1078     +
1079     +static inline int le_test_bit(int nr, const u8 *addr)
1080     +{
1081     + return 1U & (addr[BIT_BYTE(nr)] >> (nr & (BITS_PER_BYTE-1)));
1082     +}
1083     +
1084     +extern void le_bitmap_set(u8 *map, unsigned int start, int len);
1085     +extern void le_bitmap_clear(u8 *map, unsigned int start, int len);
1086     +
1087     struct extent_state;
1088     struct btrfs_root;
1089     struct btrfs_io_bio;
1090     diff --git a/fs/btrfs/free-space-tree.c b/fs/btrfs/free-space-tree.c
1091     index 87e7e3d3e676..ea605ffd0e03 100644
1092     --- a/fs/btrfs/free-space-tree.c
1093     +++ b/fs/btrfs/free-space-tree.c
1094     @@ -151,7 +151,7 @@ static inline u32 free_space_bitmap_size(u64 size, u32 sectorsize)
1095     return DIV_ROUND_UP((u32)div_u64(size, sectorsize), BITS_PER_BYTE);
1096     }
1097    
1098     -static unsigned long *alloc_bitmap(u32 bitmap_size)
1099     +static u8 *alloc_bitmap(u32 bitmap_size)
1100     {
1101     void *mem;
1102    
1103     @@ -180,8 +180,7 @@ int convert_free_space_to_bitmaps(struct btrfs_trans_handle *trans,
1104     struct btrfs_free_space_info *info;
1105     struct btrfs_key key, found_key;
1106     struct extent_buffer *leaf;
1107     - unsigned long *bitmap;
1108     - char *bitmap_cursor;
1109     + u8 *bitmap, *bitmap_cursor;
1110     u64 start, end;
1111     u64 bitmap_range, i;
1112     u32 bitmap_size, flags, expected_extent_count;
1113     @@ -231,7 +230,7 @@ int convert_free_space_to_bitmaps(struct btrfs_trans_handle *trans,
1114     block_group->sectorsize);
1115     last = div_u64(found_key.objectid + found_key.offset - start,
1116     block_group->sectorsize);
1117     - bitmap_set(bitmap, first, last - first);
1118     + le_bitmap_set(bitmap, first, last - first);
1119    
1120     extent_count++;
1121     nr++;
1122     @@ -269,7 +268,7 @@ int convert_free_space_to_bitmaps(struct btrfs_trans_handle *trans,
1123     goto out;
1124     }
1125    
1126     - bitmap_cursor = (char *)bitmap;
1127     + bitmap_cursor = bitmap;
1128     bitmap_range = block_group->sectorsize * BTRFS_FREE_SPACE_BITMAP_BITS;
1129     i = start;
1130     while (i < end) {
1131     @@ -318,7 +317,7 @@ int convert_free_space_to_extents(struct btrfs_trans_handle *trans,
1132     struct btrfs_free_space_info *info;
1133     struct btrfs_key key, found_key;
1134     struct extent_buffer *leaf;
1135     - unsigned long *bitmap;
1136     + u8 *bitmap;
1137     u64 start, end;
1138     /* Initialize to silence GCC. */
1139     u64 extent_start = 0;
1140     @@ -362,7 +361,7 @@ int convert_free_space_to_extents(struct btrfs_trans_handle *trans,
1141     break;
1142     } else if (found_key.type == BTRFS_FREE_SPACE_BITMAP_KEY) {
1143     unsigned long ptr;
1144     - char *bitmap_cursor;
1145     + u8 *bitmap_cursor;
1146     u32 bitmap_pos, data_size;
1147    
1148     ASSERT(found_key.objectid >= start);
1149     @@ -372,7 +371,7 @@ int convert_free_space_to_extents(struct btrfs_trans_handle *trans,
1150     bitmap_pos = div_u64(found_key.objectid - start,
1151     block_group->sectorsize *
1152     BITS_PER_BYTE);
1153     - bitmap_cursor = ((char *)bitmap) + bitmap_pos;
1154     + bitmap_cursor = bitmap + bitmap_pos;
1155     data_size = free_space_bitmap_size(found_key.offset,
1156     block_group->sectorsize);
1157    
1158     @@ -409,7 +408,7 @@ int convert_free_space_to_extents(struct btrfs_trans_handle *trans,
1159     offset = start;
1160     bitnr = 0;
1161     while (offset < end) {
1162     - bit = !!test_bit(bitnr, bitmap);
1163     + bit = !!le_test_bit(bitnr, bitmap);
1164     if (prev_bit == 0 && bit == 1) {
1165     extent_start = offset;
1166     } else if (prev_bit == 1 && bit == 0) {
1167     @@ -1183,6 +1182,7 @@ int btrfs_create_free_space_tree(struct btrfs_fs_info *fs_info)
1168     }
1169    
1170     btrfs_set_fs_compat_ro(fs_info, FREE_SPACE_TREE);
1171     + btrfs_set_fs_compat_ro(fs_info, FREE_SPACE_TREE_VALID);
1172     fs_info->creating_free_space_tree = 0;
1173    
1174     ret = btrfs_commit_transaction(trans, tree_root);
1175     @@ -1251,6 +1251,7 @@ int btrfs_clear_free_space_tree(struct btrfs_fs_info *fs_info)
1176     return PTR_ERR(trans);
1177    
1178     btrfs_clear_fs_compat_ro(fs_info, FREE_SPACE_TREE);
1179     + btrfs_clear_fs_compat_ro(fs_info, FREE_SPACE_TREE_VALID);
1180     fs_info->free_space_root = NULL;
1181    
1182     ret = clear_free_space_tree(trans, free_space_root);
1183     diff --git a/fs/cachefiles/interface.c b/fs/cachefiles/interface.c
1184     index ce5f345d70f5..e7f16a77a22a 100644
1185     --- a/fs/cachefiles/interface.c
1186     +++ b/fs/cachefiles/interface.c
1187     @@ -253,6 +253,8 @@ static void cachefiles_drop_object(struct fscache_object *_object)
1188     struct cachefiles_object *object;
1189     struct cachefiles_cache *cache;
1190     const struct cred *saved_cred;
1191     + struct inode *inode;
1192     + blkcnt_t i_blocks = 0;
1193    
1194     ASSERT(_object);
1195    
1196     @@ -279,6 +281,10 @@ static void cachefiles_drop_object(struct fscache_object *_object)
1197     _object != cache->cache.fsdef
1198     ) {
1199     _debug("- retire object OBJ%x", object->fscache.debug_id);
1200     + inode = d_backing_inode(object->dentry);
1201     + if (inode)
1202     + i_blocks = inode->i_blocks;
1203     +
1204     cachefiles_begin_secure(cache, &saved_cred);
1205     cachefiles_delete_object(cache, object);
1206     cachefiles_end_secure(cache, saved_cred);
1207     @@ -292,7 +298,7 @@ static void cachefiles_drop_object(struct fscache_object *_object)
1208    
1209     /* note that the object is now inactive */
1210     if (test_bit(CACHEFILES_OBJECT_ACTIVE, &object->flags))
1211     - cachefiles_mark_object_inactive(cache, object);
1212     + cachefiles_mark_object_inactive(cache, object, i_blocks);
1213    
1214     dput(object->dentry);
1215     object->dentry = NULL;
1216     diff --git a/fs/cachefiles/internal.h b/fs/cachefiles/internal.h
1217     index 2fcde1a34b7c..cd1effee8a49 100644
1218     --- a/fs/cachefiles/internal.h
1219     +++ b/fs/cachefiles/internal.h
1220     @@ -160,7 +160,8 @@ extern char *cachefiles_cook_key(const u8 *raw, int keylen, uint8_t type);
1221     * namei.c
1222     */
1223     extern void cachefiles_mark_object_inactive(struct cachefiles_cache *cache,
1224     - struct cachefiles_object *object);
1225     + struct cachefiles_object *object,
1226     + blkcnt_t i_blocks);
1227     extern int cachefiles_delete_object(struct cachefiles_cache *cache,
1228     struct cachefiles_object *object);
1229     extern int cachefiles_walk_to_object(struct cachefiles_object *parent,
1230     diff --git a/fs/cachefiles/namei.c b/fs/cachefiles/namei.c
1231     index 3f7c2cd41f8f..c6ee4b5fb7e6 100644
1232     --- a/fs/cachefiles/namei.c
1233     +++ b/fs/cachefiles/namei.c
1234     @@ -261,10 +261,9 @@ requeue:
1235     * Mark an object as being inactive.
1236     */
1237     void cachefiles_mark_object_inactive(struct cachefiles_cache *cache,
1238     - struct cachefiles_object *object)
1239     + struct cachefiles_object *object,
1240     + blkcnt_t i_blocks)
1241     {
1242     - blkcnt_t i_blocks = d_backing_inode(object->dentry)->i_blocks;
1243     -
1244     write_lock(&cache->active_lock);
1245     rb_erase(&object->active_node, &cache->active_nodes);
1246     clear_bit(CACHEFILES_OBJECT_ACTIVE, &object->flags);
1247     @@ -707,7 +706,8 @@ mark_active_timed_out:
1248    
1249     check_error:
1250     _debug("check error %d", ret);
1251     - cachefiles_mark_object_inactive(cache, object);
1252     + cachefiles_mark_object_inactive(
1253     + cache, object, d_backing_inode(object->dentry)->i_blocks);
1254     release_dentry:
1255     dput(object->dentry);
1256     object->dentry = NULL;
1257     diff --git a/fs/debugfs/file.c b/fs/debugfs/file.c
1258     index 592059f88e04..309f4e9b2419 100644
1259     --- a/fs/debugfs/file.c
1260     +++ b/fs/debugfs/file.c
1261     @@ -97,9 +97,6 @@ EXPORT_SYMBOL_GPL(debugfs_use_file_finish);
1262    
1263     #define F_DENTRY(filp) ((filp)->f_path.dentry)
1264    
1265     -#define REAL_FOPS_DEREF(dentry) \
1266     - ((const struct file_operations *)(dentry)->d_fsdata)
1267     -
1268     static int open_proxy_open(struct inode *inode, struct file *filp)
1269     {
1270     const struct dentry *dentry = F_DENTRY(filp);
1271     @@ -112,7 +109,7 @@ static int open_proxy_open(struct inode *inode, struct file *filp)
1272     goto out;
1273     }
1274    
1275     - real_fops = REAL_FOPS_DEREF(dentry);
1276     + real_fops = debugfs_real_fops(filp);
1277     real_fops = fops_get(real_fops);
1278     if (!real_fops) {
1279     /* Huh? Module did not clean up after itself at exit? */
1280     @@ -143,7 +140,7 @@ static ret_type full_proxy_ ## name(proto) \
1281     { \
1282     const struct dentry *dentry = F_DENTRY(filp); \
1283     const struct file_operations *real_fops = \
1284     - REAL_FOPS_DEREF(dentry); \
1285     + debugfs_real_fops(filp); \
1286     int srcu_idx; \
1287     ret_type r; \
1288     \
1289     @@ -176,7 +173,7 @@ static unsigned int full_proxy_poll(struct file *filp,
1290     struct poll_table_struct *wait)
1291     {
1292     const struct dentry *dentry = F_DENTRY(filp);
1293     - const struct file_operations *real_fops = REAL_FOPS_DEREF(dentry);
1294     + const struct file_operations *real_fops = debugfs_real_fops(filp);
1295     int srcu_idx;
1296     unsigned int r = 0;
1297    
1298     @@ -193,7 +190,7 @@ static unsigned int full_proxy_poll(struct file *filp,
1299     static int full_proxy_release(struct inode *inode, struct file *filp)
1300     {
1301     const struct dentry *dentry = F_DENTRY(filp);
1302     - const struct file_operations *real_fops = REAL_FOPS_DEREF(dentry);
1303     + const struct file_operations *real_fops = debugfs_real_fops(filp);
1304     const struct file_operations *proxy_fops = filp->f_op;
1305     int r = 0;
1306    
1307     @@ -241,7 +238,7 @@ static int full_proxy_open(struct inode *inode, struct file *filp)
1308     goto out;
1309     }
1310    
1311     - real_fops = REAL_FOPS_DEREF(dentry);
1312     + real_fops = debugfs_real_fops(filp);
1313     real_fops = fops_get(real_fops);
1314     if (!real_fops) {
1315     /* Huh? Module did not cleanup after itself at exit? */
1316     diff --git a/fs/dlm/lowcomms.c b/fs/dlm/lowcomms.c
1317     index 963016c8f3d1..609998de533e 100644
1318     --- a/fs/dlm/lowcomms.c
1319     +++ b/fs/dlm/lowcomms.c
1320     @@ -1656,16 +1656,12 @@ void dlm_lowcomms_stop(void)
1321     mutex_lock(&connections_lock);
1322     dlm_allow_conn = 0;
1323     foreach_conn(stop_conn);
1324     + clean_writequeues();
1325     + foreach_conn(free_conn);
1326     mutex_unlock(&connections_lock);
1327    
1328     work_stop();
1329    
1330     - mutex_lock(&connections_lock);
1331     - clean_writequeues();
1332     -
1333     - foreach_conn(free_conn);
1334     -
1335     - mutex_unlock(&connections_lock);
1336     kmem_cache_destroy(con_cache);
1337     }
1338    
1339     diff --git a/fs/ext4/extents.c b/fs/ext4/extents.c
1340     index d7ccb7f51dfc..7f69347bd5a5 100644
1341     --- a/fs/ext4/extents.c
1342     +++ b/fs/ext4/extents.c
1343     @@ -5734,6 +5734,9 @@ int ext4_insert_range(struct inode *inode, loff_t offset, loff_t len)
1344     up_write(&EXT4_I(inode)->i_data_sem);
1345     goto out_stop;
1346     }
1347     + } else {
1348     + ext4_ext_drop_refs(path);
1349     + kfree(path);
1350     }
1351    
1352     ret = ext4_es_remove_extent(inode, offset_lblk,
1353     diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c
1354     index c6ea25a190f8..f4cdc647ecfc 100644
1355     --- a/fs/ext4/inode.c
1356     +++ b/fs/ext4/inode.c
1357     @@ -647,11 +647,19 @@ found:
1358     /*
1359     * We have to zeroout blocks before inserting them into extent
1360     * status tree. Otherwise someone could look them up there and
1361     - * use them before they are really zeroed.
1362     + * use them before they are really zeroed. We also have to
1363     + * unmap metadata before zeroing as otherwise writeback can
1364     + * overwrite zeros with stale data from block device.
1365     */
1366     if (flags & EXT4_GET_BLOCKS_ZERO &&
1367     map->m_flags & EXT4_MAP_MAPPED &&
1368     map->m_flags & EXT4_MAP_NEW) {
1369     + ext4_lblk_t i;
1370     +
1371     + for (i = 0; i < map->m_len; i++) {
1372     + unmap_underlying_metadata(inode->i_sb->s_bdev,
1373     + map->m_pblk + i);
1374     + }
1375     ret = ext4_issue_zeroout(inode, map->m_lblk,
1376     map->m_pblk, map->m_len);
1377     if (ret) {
1378     @@ -1649,6 +1657,8 @@ static void mpage_release_unused_pages(struct mpage_da_data *mpd,
1379     BUG_ON(!PageLocked(page));
1380     BUG_ON(PageWriteback(page));
1381     if (invalidate) {
1382     + if (page_mapped(page))
1383     + clear_page_dirty_for_io(page);
1384     block_invalidatepage(page, 0, PAGE_SIZE);
1385     ClearPageUptodate(page);
1386     }
1387     @@ -3890,7 +3900,7 @@ int ext4_update_disksize_before_punch(struct inode *inode, loff_t offset,
1388     }
1389    
1390     /*
1391     - * ext4_punch_hole: punches a hole in a file by releaseing the blocks
1392     + * ext4_punch_hole: punches a hole in a file by releasing the blocks
1393     * associated with the given offset and length
1394     *
1395     * @inode: File inode
1396     @@ -3919,7 +3929,7 @@ int ext4_punch_hole(struct inode *inode, loff_t offset, loff_t length)
1397     * Write out all dirty pages to avoid race conditions
1398     * Then release them.
1399     */
1400     - if (mapping->nrpages && mapping_tagged(mapping, PAGECACHE_TAG_DIRTY)) {
1401     + if (mapping_tagged(mapping, PAGECACHE_TAG_DIRTY)) {
1402     ret = filemap_write_and_wait_range(mapping, offset,
1403     offset + length - 1);
1404     if (ret)
1405     @@ -4814,14 +4824,14 @@ static int ext4_do_update_inode(handle_t *handle,
1406     * Fix up interoperability with old kernels. Otherwise, old inodes get
1407     * re-used with the upper 16 bits of the uid/gid intact
1408     */
1409     - if (!ei->i_dtime) {
1410     + if (ei->i_dtime && list_empty(&ei->i_orphan)) {
1411     + raw_inode->i_uid_high = 0;
1412     + raw_inode->i_gid_high = 0;
1413     + } else {
1414     raw_inode->i_uid_high =
1415     cpu_to_le16(high_16_bits(i_uid));
1416     raw_inode->i_gid_high =
1417     cpu_to_le16(high_16_bits(i_gid));
1418     - } else {
1419     - raw_inode->i_uid_high = 0;
1420     - raw_inode->i_gid_high = 0;
1421     }
1422     } else {
1423     raw_inode->i_uid_low = cpu_to_le16(fs_high2lowuid(i_uid));
1424     diff --git a/fs/ext4/move_extent.c b/fs/ext4/move_extent.c
1425     index a920c5d29fac..6fc14def0c70 100644
1426     --- a/fs/ext4/move_extent.c
1427     +++ b/fs/ext4/move_extent.c
1428     @@ -598,6 +598,13 @@ ext4_move_extents(struct file *o_filp, struct file *d_filp, __u64 orig_blk,
1429     return -EOPNOTSUPP;
1430     }
1431    
1432     + if (ext4_encrypted_inode(orig_inode) ||
1433     + ext4_encrypted_inode(donor_inode)) {
1434     + ext4_msg(orig_inode->i_sb, KERN_ERR,
1435     + "Online defrag not supported for encrypted files");
1436     + return -EOPNOTSUPP;
1437     + }
1438     +
1439     /* Protect orig and donor inodes against a truncate */
1440     lock_two_nondirectories(orig_inode, donor_inode);
1441    
1442     diff --git a/fs/ext4/namei.c b/fs/ext4/namei.c
1443     index 34c0142caf6a..7e2f8c3c11ce 100644
1444     --- a/fs/ext4/namei.c
1445     +++ b/fs/ext4/namei.c
1446     @@ -2044,33 +2044,31 @@ static int make_indexed_dir(handle_t *handle, struct ext4_filename *fname,
1447     frame->entries = entries;
1448     frame->at = entries;
1449     frame->bh = bh;
1450     - bh = bh2;
1451    
1452     retval = ext4_handle_dirty_dx_node(handle, dir, frame->bh);
1453     if (retval)
1454     goto out_frames;
1455     - retval = ext4_handle_dirty_dirent_node(handle, dir, bh);
1456     + retval = ext4_handle_dirty_dirent_node(handle, dir, bh2);
1457     if (retval)
1458     goto out_frames;
1459    
1460     - de = do_split(handle,dir, &bh, frame, &fname->hinfo);
1461     + de = do_split(handle,dir, &bh2, frame, &fname->hinfo);
1462     if (IS_ERR(de)) {
1463     retval = PTR_ERR(de);
1464     goto out_frames;
1465     }
1466     - dx_release(frames);
1467    
1468     - retval = add_dirent_to_buf(handle, fname, dir, inode, de, bh);
1469     - brelse(bh);
1470     - return retval;
1471     + retval = add_dirent_to_buf(handle, fname, dir, inode, de, bh2);
1472     out_frames:
1473     /*
1474     * Even if the block split failed, we have to properly write
1475     * out all the changes we did so far. Otherwise we can end up
1476     * with corrupted filesystem.
1477     */
1478     - ext4_mark_inode_dirty(handle, dir);
1479     + if (retval)
1480     + ext4_mark_inode_dirty(handle, dir);
1481     dx_release(frames);
1482     + brelse(bh2);
1483     return retval;
1484     }
1485    
1486     diff --git a/fs/ext4/symlink.c b/fs/ext4/symlink.c
1487     index 4d83d9e05f2e..04a7850a0d45 100644
1488     --- a/fs/ext4/symlink.c
1489     +++ b/fs/ext4/symlink.c
1490     @@ -65,13 +65,12 @@ static const char *ext4_encrypted_get_link(struct dentry *dentry,
1491     res = fscrypt_fname_alloc_buffer(inode, cstr.len, &pstr);
1492     if (res)
1493     goto errout;
1494     + paddr = pstr.name;
1495    
1496     res = fscrypt_fname_disk_to_usr(inode, 0, 0, &cstr, &pstr);
1497     if (res < 0)
1498     goto errout;
1499    
1500     - paddr = pstr.name;
1501     -
1502     /* Null-terminate the name */
1503     if (res <= pstr.len)
1504     paddr[res] = '\0';
1505     diff --git a/fs/fuse/dir.c b/fs/fuse/dir.c
1506     index c47b7780ce37..4ff9251e9d3a 100644
1507     --- a/fs/fuse/dir.c
1508     +++ b/fs/fuse/dir.c
1509     @@ -1702,14 +1702,46 @@ error:
1510     static int fuse_setattr(struct dentry *entry, struct iattr *attr)
1511     {
1512     struct inode *inode = d_inode(entry);
1513     + struct file *file = (attr->ia_valid & ATTR_FILE) ? attr->ia_file : NULL;
1514     + int ret;
1515    
1516     if (!fuse_allow_current_process(get_fuse_conn(inode)))
1517     return -EACCES;
1518    
1519     - if (attr->ia_valid & ATTR_FILE)
1520     - return fuse_do_setattr(inode, attr, attr->ia_file);
1521     - else
1522     - return fuse_do_setattr(inode, attr, NULL);
1523     + if (attr->ia_valid & (ATTR_KILL_SUID | ATTR_KILL_SGID)) {
1524     + int kill;
1525     +
1526     + attr->ia_valid &= ~(ATTR_KILL_SUID | ATTR_KILL_SGID |
1527     + ATTR_MODE);
1528     + /*
1529     + * ia_mode calculation may have used stale i_mode. Refresh and
1530     + * recalculate.
1531     + */
1532     + ret = fuse_do_getattr(inode, NULL, file);
1533     + if (ret)
1534     + return ret;
1535     +
1536     + attr->ia_mode = inode->i_mode;
1537     + kill = should_remove_suid(entry);
1538     + if (kill & ATTR_KILL_SUID) {
1539     + attr->ia_valid |= ATTR_MODE;
1540     + attr->ia_mode &= ~S_ISUID;
1541     + }
1542     + if (kill & ATTR_KILL_SGID) {
1543     + attr->ia_valid |= ATTR_MODE;
1544     + attr->ia_mode &= ~S_ISGID;
1545     + }
1546     + }
1547     + if (!attr->ia_valid)
1548     + return 0;
1549     +
1550     + ret = fuse_do_setattr(inode, attr, file);
1551     + if (!ret) {
1552     + /* Directory mode changed, may need to revalidate access */
1553     + if (d_is_dir(entry) && (attr->ia_valid & ATTR_MODE))
1554     + fuse_invalidate_entry_cache(entry);
1555     + }
1556     + return ret;
1557     }
1558    
1559     static int fuse_getattr(struct vfsmount *mnt, struct dentry *entry,
1560     @@ -1801,6 +1833,23 @@ static ssize_t fuse_getxattr(struct dentry *entry, struct inode *inode,
1561     return ret;
1562     }
1563    
1564     +static int fuse_verify_xattr_list(char *list, size_t size)
1565     +{
1566     + size_t origsize = size;
1567     +
1568     + while (size) {
1569     + size_t thislen = strnlen(list, size);
1570     +
1571     + if (!thislen || thislen == size)
1572     + return -EIO;
1573     +
1574     + size -= thislen + 1;
1575     + list += thislen + 1;
1576     + }
1577     +
1578     + return origsize;
1579     +}
1580     +
1581     static ssize_t fuse_listxattr(struct dentry *entry, char *list, size_t size)
1582     {
1583     struct inode *inode = d_inode(entry);
1584     @@ -1836,6 +1885,8 @@ static ssize_t fuse_listxattr(struct dentry *entry, char *list, size_t size)
1585     ret = fuse_simple_request(fc, &args);
1586     if (!ret && !size)
1587     ret = outarg.size;
1588     + if (ret > 0 && size)
1589     + ret = fuse_verify_xattr_list(list, ret);
1590     if (ret == -ENOSYS) {
1591     fc->no_listxattr = 1;
1592     ret = -EOPNOTSUPP;
1593     diff --git a/fs/jbd2/transaction.c b/fs/jbd2/transaction.c
1594     index b5bc3e249163..3d8246a9faa4 100644
1595     --- a/fs/jbd2/transaction.c
1596     +++ b/fs/jbd2/transaction.c
1597     @@ -159,6 +159,7 @@ static void wait_transaction_locked(journal_t *journal)
1598     read_unlock(&journal->j_state_lock);
1599     if (need_to_start)
1600     jbd2_log_start_commit(journal, tid);
1601     + jbd2_might_wait_for_commit(journal);
1602     schedule();
1603     finish_wait(&journal->j_wait_transaction_locked, &wait);
1604     }
1605     @@ -182,8 +183,6 @@ static int add_transaction_credits(journal_t *journal, int blocks,
1606     int needed;
1607     int total = blocks + rsv_blocks;
1608    
1609     - jbd2_might_wait_for_commit(journal);
1610     -
1611     /*
1612     * If the current transaction is locked down for commit, wait
1613     * for the lock to be released.
1614     @@ -214,6 +213,7 @@ static int add_transaction_credits(journal_t *journal, int blocks,
1615     if (atomic_read(&journal->j_reserved_credits) + total >
1616     journal->j_max_transaction_buffers) {
1617     read_unlock(&journal->j_state_lock);
1618     + jbd2_might_wait_for_commit(journal);
1619     wait_event(journal->j_wait_reserved,
1620     atomic_read(&journal->j_reserved_credits) + total <=
1621     journal->j_max_transaction_buffers);
1622     @@ -238,6 +238,7 @@ static int add_transaction_credits(journal_t *journal, int blocks,
1623     if (jbd2_log_space_left(journal) < jbd2_space_needed(journal)) {
1624     atomic_sub(total, &t->t_outstanding_credits);
1625     read_unlock(&journal->j_state_lock);
1626     + jbd2_might_wait_for_commit(journal);
1627     write_lock(&journal->j_state_lock);
1628     if (jbd2_log_space_left(journal) < jbd2_space_needed(journal))
1629     __jbd2_log_wait_for_space(journal);
1630     @@ -255,6 +256,7 @@ static int add_transaction_credits(journal_t *journal, int blocks,
1631     sub_reserved_credits(journal, rsv_blocks);
1632     atomic_sub(total, &t->t_outstanding_credits);
1633     read_unlock(&journal->j_state_lock);
1634     + jbd2_might_wait_for_commit(journal);
1635     wait_event(journal->j_wait_reserved,
1636     atomic_read(&journal->j_reserved_credits) + rsv_blocks
1637     <= journal->j_max_transaction_buffers / 2);
1638     diff --git a/fs/reiserfs/super.c b/fs/reiserfs/super.c
1639     index 7a4a85a6821e..74d5ddd26296 100644
1640     --- a/fs/reiserfs/super.c
1641     +++ b/fs/reiserfs/super.c
1642     @@ -190,7 +190,15 @@ static int remove_save_link_only(struct super_block *s,
1643     static int reiserfs_quota_on_mount(struct super_block *, int);
1644     #endif
1645    
1646     -/* look for uncompleted unlinks and truncates and complete them */
1647     +/*
1648     + * Look for uncompleted unlinks and truncates and complete them
1649     + *
1650     + * Called with superblock write locked. If quotas are enabled, we have to
1651     + * release/retake lest we call dquot_quota_on_mount(), proceed to
1652     + * schedule_on_each_cpu() in invalidate_bdev() and deadlock waiting for the per
1653     + * cpu worklets to complete flush_async_commits() that in turn wait for the
1654     + * superblock write lock.
1655     + */
1656     static int finish_unfinished(struct super_block *s)
1657     {
1658     INITIALIZE_PATH(path);
1659     @@ -237,7 +245,9 @@ static int finish_unfinished(struct super_block *s)
1660     quota_enabled[i] = 0;
1661     continue;
1662     }
1663     + reiserfs_write_unlock(s);
1664     ret = reiserfs_quota_on_mount(s, i);
1665     + reiserfs_write_lock(s);
1666     if (ret < 0)
1667     reiserfs_warning(s, "reiserfs-2500",
1668     "cannot turn on journaled "
1669     diff --git a/fs/utimes.c b/fs/utimes.c
1670     index 794f5f5b1fb5..ba54b9e648c9 100644
1671     --- a/fs/utimes.c
1672     +++ b/fs/utimes.c
1673     @@ -87,21 +87,7 @@ static int utimes_common(struct path *path, struct timespec *times)
1674     */
1675     newattrs.ia_valid |= ATTR_TIMES_SET;
1676     } else {
1677     - /*
1678     - * If times is NULL (or both times are UTIME_NOW),
1679     - * then we need to check permissions, because
1680     - * inode_change_ok() won't do it.
1681     - */
1682     - error = -EPERM;
1683     - if (IS_IMMUTABLE(inode))
1684     - goto mnt_drop_write_and_out;
1685     -
1686     - error = -EACCES;
1687     - if (!inode_owner_or_capable(inode)) {
1688     - error = inode_permission(inode, MAY_WRITE);
1689     - if (error)
1690     - goto mnt_drop_write_and_out;
1691     - }
1692     + newattrs.ia_valid |= ATTR_TOUCH;
1693     }
1694     retry_deleg:
1695     inode_lock(inode);
1696     @@ -113,7 +99,6 @@ retry_deleg:
1697     goto retry_deleg;
1698     }
1699    
1700     -mnt_drop_write_and_out:
1701     mnt_drop_write(path->mnt);
1702     out:
1703     return error;
1704     diff --git a/include/crypto/ghash.h b/include/crypto/ghash.h
1705     new file mode 100644
1706     index 000000000000..2a61c9bbab8f
1707     --- /dev/null
1708     +++ b/include/crypto/ghash.h
1709     @@ -0,0 +1,23 @@
1710     +/*
1711     + * Common values for GHASH algorithms
1712     + */
1713     +
1714     +#ifndef __CRYPTO_GHASH_H__
1715     +#define __CRYPTO_GHASH_H__
1716     +
1717     +#include <linux/types.h>
1718     +#include <crypto/gf128mul.h>
1719     +
1720     +#define GHASH_BLOCK_SIZE 16
1721     +#define GHASH_DIGEST_SIZE 16
1722     +
1723     +struct ghash_ctx {
1724     + struct gf128mul_4k *gf128;
1725     +};
1726     +
1727     +struct ghash_desc_ctx {
1728     + u8 buffer[GHASH_BLOCK_SIZE];
1729     + u32 bytes;
1730     +};
1731     +
1732     +#endif
1733     diff --git a/include/linux/debugfs.h b/include/linux/debugfs.h
1734     index 1438e2322d5c..4d3f0d1aec73 100644
1735     --- a/include/linux/debugfs.h
1736     +++ b/include/linux/debugfs.h
1737     @@ -45,6 +45,23 @@ extern struct dentry *arch_debugfs_dir;
1738    
1739     extern struct srcu_struct debugfs_srcu;
1740    
1741     +/**
1742     + * debugfs_real_fops - getter for the real file operation
1743     + * @filp: a pointer to a struct file
1744     + *
1745     + * Must only be called under the protection established by
1746     + * debugfs_use_file_start().
1747     + */
1748     +static inline const struct file_operations *debugfs_real_fops(struct file *filp)
1749     + __must_hold(&debugfs_srcu)
1750     +{
1751     + /*
1752     + * Neither the pointer to the struct file_operations, nor its
1753     + * contents ever change -- srcu_dereference() is not needed here.
1754     + */
1755     + return filp->f_path.dentry->d_fsdata;
1756     +}
1757     +
1758     #if defined(CONFIG_DEBUG_FS)
1759    
1760     struct dentry *debugfs_create_file(const char *name, umode_t mode,
1761     diff --git a/include/linux/fs.h b/include/linux/fs.h
1762     index 901e25d495cc..7c391366fb43 100644
1763     --- a/include/linux/fs.h
1764     +++ b/include/linux/fs.h
1765     @@ -224,6 +224,7 @@ typedef int (dio_iodone_t)(struct kiocb *iocb, loff_t offset,
1766     #define ATTR_KILL_PRIV (1 << 14)
1767     #define ATTR_OPEN (1 << 15) /* Truncating from open(O_TRUNC) */
1768     #define ATTR_TIMES_SET (1 << 16)
1769     +#define ATTR_TOUCH (1 << 17)
1770    
1771     /*
1772     * Whiteout is represented by a char device. The following constants define the
1773     diff --git a/include/linux/radix-tree.h b/include/linux/radix-tree.h
1774     index 4c45105dece3..52b97db93830 100644
1775     --- a/include/linux/radix-tree.h
1776     +++ b/include/linux/radix-tree.h
1777     @@ -280,9 +280,9 @@ bool __radix_tree_delete_node(struct radix_tree_root *root,
1778     struct radix_tree_node *node);
1779     void *radix_tree_delete_item(struct radix_tree_root *, unsigned long, void *);
1780     void *radix_tree_delete(struct radix_tree_root *, unsigned long);
1781     -struct radix_tree_node *radix_tree_replace_clear_tags(
1782     - struct radix_tree_root *root,
1783     - unsigned long index, void *entry);
1784     +void radix_tree_clear_tags(struct radix_tree_root *root,
1785     + struct radix_tree_node *node,
1786     + void **slot);
1787     unsigned int radix_tree_gang_lookup(struct radix_tree_root *root,
1788     void **results, unsigned long first_index,
1789     unsigned int max_items);
1790     diff --git a/include/linux/sem.h b/include/linux/sem.h
1791     index 976ce3a19f1b..d0efd6e6c20a 100644
1792     --- a/include/linux/sem.h
1793     +++ b/include/linux/sem.h
1794     @@ -21,6 +21,7 @@ struct sem_array {
1795     struct list_head list_id; /* undo requests on this array */
1796     int sem_nsems; /* no. of semaphores in array */
1797     int complex_count; /* pending complex operations */
1798     + bool complex_mode; /* no parallel simple ops */
1799     };
1800    
1801     #ifdef CONFIG_SYSVIPC
1802     diff --git a/include/uapi/linux/btrfs.h b/include/uapi/linux/btrfs.h
1803     index ac5eacd3055b..db4c253f8011 100644
1804     --- a/include/uapi/linux/btrfs.h
1805     +++ b/include/uapi/linux/btrfs.h
1806     @@ -239,7 +239,17 @@ struct btrfs_ioctl_fs_info_args {
1807     * Used by:
1808     * struct btrfs_ioctl_feature_flags
1809     */
1810     -#define BTRFS_FEATURE_COMPAT_RO_FREE_SPACE_TREE (1ULL << 0)
1811     +#define BTRFS_FEATURE_COMPAT_RO_FREE_SPACE_TREE (1ULL << 0)
1812     +/*
1813     + * Older kernels (< 4.9) on big-endian systems produced broken free space tree
1814     + * bitmaps, and btrfs-progs also used to corrupt the free space tree (versions
1815     + * < 4.7.3). If this bit is clear, then the free space tree cannot be trusted.
1816     + * btrfs-progs can also intentionally clear this bit to ask the kernel to
1817     + * rebuild the free space tree, however this might not work on older kernels
1818     + * that do not know about this bit. If not sure, clear the cache manually on
1819     + * first mount when booting older kernel versions.
1820     + */
1821     +#define BTRFS_FEATURE_COMPAT_RO_FREE_SPACE_TREE_VALID (1ULL << 1)
1822    
1823     #define BTRFS_FEATURE_INCOMPAT_MIXED_BACKREF (1ULL << 0)
1824     #define BTRFS_FEATURE_INCOMPAT_DEFAULT_SUBVOL (1ULL << 1)
1825     diff --git a/ipc/sem.c b/ipc/sem.c
1826     index 7c9d4f7683c0..5e318c5f749d 100644
1827     --- a/ipc/sem.c
1828     +++ b/ipc/sem.c
1829     @@ -162,14 +162,21 @@ static int sysvipc_sem_proc_show(struct seq_file *s, void *it);
1830    
1831     /*
1832     * Locking:
1833     + * a) global sem_lock() for read/write
1834     * sem_undo.id_next,
1835     * sem_array.complex_count,
1836     - * sem_array.pending{_alter,_cont},
1837     - * sem_array.sem_undo: global sem_lock() for read/write
1838     - * sem_undo.proc_next: only "current" is allowed to read/write that field.
1839     + * sem_array.complex_mode
1840     + * sem_array.pending{_alter,_const},
1841     + * sem_array.sem_undo
1842     *
1843     + * b) global or semaphore sem_lock() for read/write:
1844     * sem_array.sem_base[i].pending_{const,alter}:
1845     - * global or semaphore sem_lock() for read/write
1846     + * sem_array.complex_mode (for read)
1847     + *
1848     + * c) special:
1849     + * sem_undo_list.list_proc:
1850     + * * undo_list->lock for write
1851     + * * rcu for read
1852     */
1853    
1854     #define sc_semmsl sem_ctls[0]
1855     @@ -260,30 +267,61 @@ static void sem_rcu_free(struct rcu_head *head)
1856     }
1857    
1858     /*
1859     - * Wait until all currently ongoing simple ops have completed.
1860     + * Enter the mode suitable for non-simple operations:
1861     * Caller must own sem_perm.lock.
1862     - * New simple ops cannot start, because simple ops first check
1863     - * that sem_perm.lock is free.
1864     - * that a) sem_perm.lock is free and b) complex_count is 0.
1865     */
1866     -static void sem_wait_array(struct sem_array *sma)
1867     +static void complexmode_enter(struct sem_array *sma)
1868     {
1869     int i;
1870     struct sem *sem;
1871    
1872     - if (sma->complex_count) {
1873     - /* The thread that increased sma->complex_count waited on
1874     - * all sem->lock locks. Thus we don't need to wait again.
1875     - */
1876     + if (sma->complex_mode) {
1877     + /* We are already in complex_mode. Nothing to do */
1878     return;
1879     }
1880    
1881     + /* We need a full barrier after seting complex_mode:
1882     + * The write to complex_mode must be visible
1883     + * before we read the first sem->lock spinlock state.
1884     + */
1885     + smp_store_mb(sma->complex_mode, true);
1886     +
1887     for (i = 0; i < sma->sem_nsems; i++) {
1888     sem = sma->sem_base + i;
1889     spin_unlock_wait(&sem->lock);
1890     }
1891     + /*
1892     + * spin_unlock_wait() is not a memory barriers, it is only a
1893     + * control barrier. The code must pair with spin_unlock(&sem->lock),
1894     + * thus just the control barrier is insufficient.
1895     + *
1896     + * smp_rmb() is sufficient, as writes cannot pass the control barrier.
1897     + */
1898     + smp_rmb();
1899     +}
1900     +
1901     +/*
1902     + * Try to leave the mode that disallows simple operations:
1903     + * Caller must own sem_perm.lock.
1904     + */
1905     +static void complexmode_tryleave(struct sem_array *sma)
1906     +{
1907     + if (sma->complex_count) {
1908     + /* Complex ops are sleeping.
1909     + * We must stay in complex mode
1910     + */
1911     + return;
1912     + }
1913     + /*
1914     + * Immediately after setting complex_mode to false,
1915     + * a simple op can start. Thus: all memory writes
1916     + * performed by the current operation must be visible
1917     + * before we set complex_mode to false.
1918     + */
1919     + smp_store_release(&sma->complex_mode, false);
1920     }
1921    
1922     +#define SEM_GLOBAL_LOCK (-1)
1923     /*
1924     * If the request contains only one semaphore operation, and there are
1925     * no complex transactions pending, lock only the semaphore involved.
1926     @@ -300,56 +338,42 @@ static inline int sem_lock(struct sem_array *sma, struct sembuf *sops,
1927     /* Complex operation - acquire a full lock */
1928     ipc_lock_object(&sma->sem_perm);
1929    
1930     - /* And wait until all simple ops that are processed
1931     - * right now have dropped their locks.
1932     - */
1933     - sem_wait_array(sma);
1934     - return -1;
1935     + /* Prevent parallel simple ops */
1936     + complexmode_enter(sma);
1937     + return SEM_GLOBAL_LOCK;
1938     }
1939    
1940     /*
1941     * Only one semaphore affected - try to optimize locking.
1942     - * The rules are:
1943     - * - optimized locking is possible if no complex operation
1944     - * is either enqueued or processed right now.
1945     - * - The test for enqueued complex ops is simple:
1946     - * sma->complex_count != 0
1947     - * - Testing for complex ops that are processed right now is
1948     - * a bit more difficult. Complex ops acquire the full lock
1949     - * and first wait that the running simple ops have completed.
1950     - * (see above)
1951     - * Thus: If we own a simple lock and the global lock is free
1952     - * and complex_count is now 0, then it will stay 0 and
1953     - * thus just locking sem->lock is sufficient.
1954     + * Optimized locking is possible if no complex operation
1955     + * is either enqueued or processed right now.
1956     + *
1957     + * Both facts are tracked by complex_mode.
1958     */
1959     sem = sma->sem_base + sops->sem_num;
1960    
1961     - if (sma->complex_count == 0) {
1962     + /*
1963     + * Initial check for complex_mode. Just an optimization,
1964     + * no locking, no memory barrier.
1965     + */
1966     + if (!sma->complex_mode) {
1967     /*
1968     * It appears that no complex operation is around.
1969     * Acquire the per-semaphore lock.
1970     */
1971     spin_lock(&sem->lock);
1972    
1973     - /* Then check that the global lock is free */
1974     - if (!spin_is_locked(&sma->sem_perm.lock)) {
1975     - /*
1976     - * We need a memory barrier with acquire semantics,
1977     - * otherwise we can race with another thread that does:
1978     - * complex_count++;
1979     - * spin_unlock(sem_perm.lock);
1980     - */
1981     - smp_acquire__after_ctrl_dep();
1982     + /*
1983     + * See 51d7d5205d33
1984     + * ("powerpc: Add smp_mb() to arch_spin_is_locked()"):
1985     + * A full barrier is required: the write of sem->lock
1986     + * must be visible before the read is executed
1987     + */
1988     + smp_mb();
1989    
1990     - /*
1991     - * Now repeat the test of complex_count:
1992     - * It can't change anymore until we drop sem->lock.
1993     - * Thus: if is now 0, then it will stay 0.
1994     - */
1995     - if (sma->complex_count == 0) {
1996     - /* fast path successful! */
1997     - return sops->sem_num;
1998     - }
1999     + if (!smp_load_acquire(&sma->complex_mode)) {
2000     + /* fast path successful! */
2001     + return sops->sem_num;
2002     }
2003     spin_unlock(&sem->lock);
2004     }
2005     @@ -369,15 +393,16 @@ static inline int sem_lock(struct sem_array *sma, struct sembuf *sops,
2006     /* Not a false alarm, thus complete the sequence for a
2007     * full lock.
2008     */
2009     - sem_wait_array(sma);
2010     - return -1;
2011     + complexmode_enter(sma);
2012     + return SEM_GLOBAL_LOCK;
2013     }
2014     }
2015    
2016     static inline void sem_unlock(struct sem_array *sma, int locknum)
2017     {
2018     - if (locknum == -1) {
2019     + if (locknum == SEM_GLOBAL_LOCK) {
2020     unmerge_queues(sma);
2021     + complexmode_tryleave(sma);
2022     ipc_unlock_object(&sma->sem_perm);
2023     } else {
2024     struct sem *sem = sma->sem_base + locknum;
2025     @@ -529,6 +554,7 @@ static int newary(struct ipc_namespace *ns, struct ipc_params *params)
2026     }
2027    
2028     sma->complex_count = 0;
2029     + sma->complex_mode = true; /* dropped by sem_unlock below */
2030     INIT_LIST_HEAD(&sma->pending_alter);
2031     INIT_LIST_HEAD(&sma->pending_const);
2032     INIT_LIST_HEAD(&sma->list_id);
2033     @@ -2184,10 +2210,10 @@ static int sysvipc_sem_proc_show(struct seq_file *s, void *it)
2034     /*
2035     * The proc interface isn't aware of sem_lock(), it calls
2036     * ipc_lock_object() directly (in sysvipc_find_ipc).
2037     - * In order to stay compatible with sem_lock(), we must wait until
2038     - * all simple semop() calls have left their critical regions.
2039     + * In order to stay compatible with sem_lock(), we must
2040     + * enter / leave complex_mode.
2041     */
2042     - sem_wait_array(sma);
2043     + complexmode_enter(sma);
2044    
2045     sem_otime = get_semotime(sma);
2046    
2047     @@ -2204,6 +2230,8 @@ static int sysvipc_sem_proc_show(struct seq_file *s, void *it)
2048     sem_otime,
2049     sma->sem_ctime);
2050    
2051     + complexmode_tryleave(sma);
2052     +
2053     return 0;
2054     }
2055     #endif
2056     diff --git a/lib/radix-tree.c b/lib/radix-tree.c
2057     index 91f0727e3cad..8e6d552c40dd 100644
2058     --- a/lib/radix-tree.c
2059     +++ b/lib/radix-tree.c
2060     @@ -1583,15 +1583,10 @@ void *radix_tree_delete(struct radix_tree_root *root, unsigned long index)
2061     }
2062     EXPORT_SYMBOL(radix_tree_delete);
2063    
2064     -struct radix_tree_node *radix_tree_replace_clear_tags(
2065     - struct radix_tree_root *root,
2066     - unsigned long index, void *entry)
2067     +void radix_tree_clear_tags(struct radix_tree_root *root,
2068     + struct radix_tree_node *node,
2069     + void **slot)
2070     {
2071     - struct radix_tree_node *node;
2072     - void **slot;
2073     -
2074     - __radix_tree_lookup(root, index, &node, &slot);
2075     -
2076     if (node) {
2077     unsigned int tag, offset = get_slot_offset(node, slot);
2078     for (tag = 0; tag < RADIX_TREE_MAX_TAGS; tag++)
2079     @@ -1600,9 +1595,6 @@ struct radix_tree_node *radix_tree_replace_clear_tags(
2080     /* Clear root node tags */
2081     root->gfp_mask &= __GFP_BITS_MASK;
2082     }
2083     -
2084     - radix_tree_replace_slot(slot, entry);
2085     - return node;
2086     }
2087    
2088     /**
2089     diff --git a/mm/filemap.c b/mm/filemap.c
2090     index 2d0986a64f1f..ced9ef6c06b0 100644
2091     --- a/mm/filemap.c
2092     +++ b/mm/filemap.c
2093     @@ -169,33 +169,35 @@ static int page_cache_tree_insert(struct address_space *mapping,
2094     static void page_cache_tree_delete(struct address_space *mapping,
2095     struct page *page, void *shadow)
2096     {
2097     - struct radix_tree_node *node;
2098     int i, nr = PageHuge(page) ? 1 : hpage_nr_pages(page);
2099    
2100     VM_BUG_ON_PAGE(!PageLocked(page), page);
2101     VM_BUG_ON_PAGE(PageTail(page), page);
2102     VM_BUG_ON_PAGE(nr != 1 && shadow, page);
2103    
2104     - if (shadow) {
2105     - mapping->nrexceptional += nr;
2106     - /*
2107     - * Make sure the nrexceptional update is committed before
2108     - * the nrpages update so that final truncate racing
2109     - * with reclaim does not see both counters 0 at the
2110     - * same time and miss a shadow entry.
2111     - */
2112     - smp_wmb();
2113     - }
2114     - mapping->nrpages -= nr;
2115     -
2116     for (i = 0; i < nr; i++) {
2117     - node = radix_tree_replace_clear_tags(&mapping->page_tree,
2118     - page->index + i, shadow);
2119     + struct radix_tree_node *node;
2120     + void **slot;
2121     +
2122     + __radix_tree_lookup(&mapping->page_tree, page->index + i,
2123     + &node, &slot);
2124     +
2125     + radix_tree_clear_tags(&mapping->page_tree, node, slot);
2126     +
2127     if (!node) {
2128     VM_BUG_ON_PAGE(nr != 1, page);
2129     - return;
2130     + /*
2131     + * We need a node to properly account shadow
2132     + * entries. Don't plant any without. XXX
2133     + */
2134     + shadow = NULL;
2135     }
2136    
2137     + radix_tree_replace_slot(slot, shadow);
2138     +
2139     + if (!node)
2140     + break;
2141     +
2142     workingset_node_pages_dec(node);
2143     if (shadow)
2144     workingset_node_shadows_inc(node);
2145     @@ -219,6 +221,18 @@ static void page_cache_tree_delete(struct address_space *mapping,
2146     &node->private_list);
2147     }
2148     }
2149     +
2150     + if (shadow) {
2151     + mapping->nrexceptional += nr;
2152     + /*
2153     + * Make sure the nrexceptional update is committed before
2154     + * the nrpages update so that final truncate racing
2155     + * with reclaim does not see both counters 0 at the
2156     + * same time and miss a shadow entry.
2157     + */
2158     + smp_wmb();
2159     + }
2160     + mapping->nrpages -= nr;
2161     }
2162    
2163     /*
2164     @@ -619,7 +633,6 @@ int replace_page_cache_page(struct page *old, struct page *new, gfp_t gfp_mask)
2165     __delete_from_page_cache(old, NULL);
2166     error = page_cache_tree_insert(mapping, new, NULL);
2167     BUG_ON(error);
2168     - mapping->nrpages++;
2169    
2170     /*
2171     * hugetlb pages do not participate in page cache accounting.
2172     @@ -1674,6 +1687,10 @@ static ssize_t do_generic_file_read(struct file *filp, loff_t *ppos,
2173     unsigned int prev_offset;
2174     int error = 0;
2175    
2176     + if (unlikely(*ppos >= inode->i_sb->s_maxbytes))
2177     + return -EINVAL;
2178     + iov_iter_truncate(iter, inode->i_sb->s_maxbytes);
2179     +
2180     index = *ppos >> PAGE_SHIFT;
2181     prev_index = ra->prev_pos >> PAGE_SHIFT;
2182     prev_offset = ra->prev_pos & (PAGE_SIZE-1);
2183     diff --git a/mm/hugetlb.c b/mm/hugetlb.c
2184     index 87e11d8ad536..603bdd01ec2c 100644
2185     --- a/mm/hugetlb.c
2186     +++ b/mm/hugetlb.c
2187     @@ -1443,13 +1443,14 @@ static void dissolve_free_huge_page(struct page *page)
2188     {
2189     spin_lock(&hugetlb_lock);
2190     if (PageHuge(page) && !page_count(page)) {
2191     - struct hstate *h = page_hstate(page);
2192     - int nid = page_to_nid(page);
2193     - list_del(&page->lru);
2194     + struct page *head = compound_head(page);
2195     + struct hstate *h = page_hstate(head);
2196     + int nid = page_to_nid(head);
2197     + list_del(&head->lru);
2198     h->free_huge_pages--;
2199     h->free_huge_pages_node[nid]--;
2200     h->max_huge_pages--;
2201     - update_and_free_page(h, page);
2202     + update_and_free_page(h, head);
2203     }
2204     spin_unlock(&hugetlb_lock);
2205     }
2206     @@ -1457,7 +1458,8 @@ static void dissolve_free_huge_page(struct page *page)
2207     /*
2208     * Dissolve free hugepages in a given pfn range. Used by memory hotplug to
2209     * make specified memory blocks removable from the system.
2210     - * Note that start_pfn should aligned with (minimum) hugepage size.
2211     + * Note that this will dissolve a free gigantic hugepage completely, if any
2212     + * part of it lies within the given range.
2213     */
2214     void dissolve_free_huge_pages(unsigned long start_pfn, unsigned long end_pfn)
2215     {
2216     @@ -1466,7 +1468,6 @@ void dissolve_free_huge_pages(unsigned long start_pfn, unsigned long end_pfn)
2217     if (!hugepages_supported())
2218     return;
2219    
2220     - VM_BUG_ON(!IS_ALIGNED(start_pfn, 1 << minimum_order));
2221     for (pfn = start_pfn; pfn < end_pfn; pfn += 1 << minimum_order)
2222     dissolve_free_huge_page(pfn_to_page(pfn));
2223     }
2224     diff --git a/sound/soc/codecs/nau8825.c b/sound/soc/codecs/nau8825.c
2225     index 2e59a85e360b..ff566376da40 100644
2226     --- a/sound/soc/codecs/nau8825.c
2227     +++ b/sound/soc/codecs/nau8825.c
2228     @@ -1907,7 +1907,7 @@ static int nau8825_calc_fll_param(unsigned int fll_in, unsigned int fs,
2229     /* Calculate the FLL 10-bit integer input and the FLL 16-bit fractional
2230     * input based on FDCO, FREF and FLL ratio.
2231     */
2232     - fvco = div_u64(fvco << 16, fref * fll_param->ratio);
2233     + fvco = div_u64(fvco_max << 16, fref * fll_param->ratio);
2234     fll_param->fll_int = (fvco >> 16) & 0x3FF;
2235     fll_param->fll_frac = fvco & 0xFFFF;
2236     return 0;
2237     diff --git a/sound/soc/intel/atom/sst/sst_pvt.c b/sound/soc/intel/atom/sst/sst_pvt.c
2238     index adb32fefd693..b1e6b8f34a6a 100644
2239     --- a/sound/soc/intel/atom/sst/sst_pvt.c
2240     +++ b/sound/soc/intel/atom/sst/sst_pvt.c
2241     @@ -279,17 +279,15 @@ int sst_prepare_and_post_msg(struct intel_sst_drv *sst,
2242    
2243     if (response) {
2244     ret = sst_wait_timeout(sst, block);
2245     - if (ret < 0) {
2246     + if (ret < 0)
2247     goto out;
2248     - } else if(block->data) {
2249     - if (!data)
2250     - goto out;
2251     - *data = kzalloc(block->size, GFP_KERNEL);
2252     - if (!(*data)) {
2253     +
2254     + if (data && block->data) {
2255     + *data = kmemdup(block->data, block->size, GFP_KERNEL);
2256     + if (!*data) {
2257     ret = -ENOMEM;
2258     goto out;
2259     - } else
2260     - memcpy(data, (void *) block->data, block->size);
2261     + }
2262     }
2263     }
2264     out: