Magellan Linux

Contents of /trunk/kernel26-alx/patches-3.10/0147-3.10.48-all-fixes.patch

Parent Directory Parent Directory | Revision Log Revision Log


Revision 2672 - (show annotations) (download)
Tue Jul 21 16:46:35 2015 UTC (8 years, 9 months ago) by niro
File size: 51406 byte(s)
-3.10.84-alx-r1
1 diff --git a/Makefile b/Makefile
2 index 6a3b46d1863c..f7e5680740f9 100644
3 --- a/Makefile
4 +++ b/Makefile
5 @@ -1,6 +1,6 @@
6 VERSION = 3
7 PATCHLEVEL = 10
8 -SUBLEVEL = 47
9 +SUBLEVEL = 48
10 EXTRAVERSION =
11 NAME = TOSSUG Baby Fish
12
13 diff --git a/arch/arm/mach-omap2/mux.c b/arch/arm/mach-omap2/mux.c
14 index f82cf878d6af..94c2f6d17dae 100644
15 --- a/arch/arm/mach-omap2/mux.c
16 +++ b/arch/arm/mach-omap2/mux.c
17 @@ -183,8 +183,10 @@ static int __init _omap_mux_get_by_name(struct omap_mux_partition *partition,
18 m0_entry = mux->muxnames[0];
19
20 /* First check for full name in mode0.muxmode format */
21 - if (mode0_len && strncmp(muxname, m0_entry, mode0_len))
22 - continue;
23 + if (mode0_len)
24 + if (strncmp(muxname, m0_entry, mode0_len) ||
25 + (strlen(m0_entry) != mode0_len))
26 + continue;
27
28 /* Then check for muxmode only */
29 for (i = 0; i < OMAP_MUX_NR_MODES; i++) {
30 diff --git a/arch/arm64/kernel/entry.S b/arch/arm64/kernel/entry.S
31 index 6ad781b21c08..7cd589ebca2a 100644
32 --- a/arch/arm64/kernel/entry.S
33 +++ b/arch/arm64/kernel/entry.S
34 @@ -275,7 +275,6 @@ el1_sp_pc:
35 * Stack or PC alignment exception handling
36 */
37 mrs x0, far_el1
38 - mov x1, x25
39 mov x2, sp
40 b do_sp_pc_abort
41 el1_undef:
42 diff --git a/arch/unicore32/mm/alignment.c b/arch/unicore32/mm/alignment.c
43 index de7dc5fdd58b..24e836023e6c 100644
44 --- a/arch/unicore32/mm/alignment.c
45 +++ b/arch/unicore32/mm/alignment.c
46 @@ -21,6 +21,7 @@
47 #include <linux/sched.h>
48 #include <linux/uaccess.h>
49
50 +#include <asm/pgtable.h>
51 #include <asm/tlbflush.h>
52 #include <asm/unaligned.h>
53
54 diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
55 index 3741c653767c..8b320722de7a 100644
56 --- a/arch/x86/include/asm/kvm_host.h
57 +++ b/arch/x86/include/asm/kvm_host.h
58 @@ -92,7 +92,7 @@
59 #define KVM_REFILL_PAGES 25
60 #define KVM_MAX_CPUID_ENTRIES 80
61 #define KVM_NR_FIXED_MTRR_REGION 88
62 -#define KVM_NR_VAR_MTRR 8
63 +#define KVM_NR_VAR_MTRR 10
64
65 #define ASYNC_PF_PER_VCPU 64
66
67 @@ -445,7 +445,7 @@ struct kvm_vcpu_arch {
68 bool nmi_injected; /* Trying to inject an NMI this entry */
69
70 struct mtrr_state_type mtrr_state;
71 - u32 pat;
72 + u64 pat;
73
74 int switch_db_regs;
75 unsigned long db[KVM_NR_DB_REGS];
76 diff --git a/drivers/block/rbd.c b/drivers/block/rbd.c
77 index 1e89a3dd3d51..07caf44d5755 100644
78 --- a/drivers/block/rbd.c
79 +++ b/drivers/block/rbd.c
80 @@ -1385,6 +1385,14 @@ static bool obj_request_exists_test(struct rbd_obj_request *obj_request)
81 return test_bit(OBJ_REQ_EXISTS, &obj_request->flags) != 0;
82 }
83
84 +static bool obj_request_overlaps_parent(struct rbd_obj_request *obj_request)
85 +{
86 + struct rbd_device *rbd_dev = obj_request->img_request->rbd_dev;
87 +
88 + return obj_request->img_offset <
89 + round_up(rbd_dev->parent_overlap, rbd_obj_bytes(&rbd_dev->header));
90 +}
91 +
92 static void rbd_obj_request_get(struct rbd_obj_request *obj_request)
93 {
94 dout("%s: obj %p (was %d)\n", __func__, obj_request,
95 @@ -1401,6 +1409,13 @@ static void rbd_obj_request_put(struct rbd_obj_request *obj_request)
96 kref_put(&obj_request->kref, rbd_obj_request_destroy);
97 }
98
99 +static void rbd_img_request_get(struct rbd_img_request *img_request)
100 +{
101 + dout("%s: img %p (was %d)\n", __func__, img_request,
102 + atomic_read(&img_request->kref.refcount));
103 + kref_get(&img_request->kref);
104 +}
105 +
106 static bool img_request_child_test(struct rbd_img_request *img_request);
107 static void rbd_parent_request_destroy(struct kref *kref);
108 static void rbd_img_request_destroy(struct kref *kref);
109 @@ -2154,6 +2169,7 @@ static void rbd_img_obj_callback(struct rbd_obj_request *obj_request)
110 img_request->next_completion = which;
111 out:
112 spin_unlock_irq(&img_request->completion_lock);
113 + rbd_img_request_put(img_request);
114
115 if (!more)
116 rbd_img_request_complete(img_request);
117 @@ -2250,6 +2266,7 @@ static int rbd_img_request_fill(struct rbd_img_request *img_request,
118 goto out_partial;
119 obj_request->osd_req = osd_req;
120 obj_request->callback = rbd_img_obj_callback;
121 + rbd_img_request_get(img_request);
122
123 osd_req_op_extent_init(osd_req, 0, opcode, offset, length,
124 0, 0);
125 @@ -2673,7 +2690,7 @@ static int rbd_img_obj_request_submit(struct rbd_obj_request *obj_request)
126 */
127 if (!img_request_write_test(img_request) ||
128 !img_request_layered_test(img_request) ||
129 - rbd_dev->parent_overlap <= obj_request->img_offset ||
130 + !obj_request_overlaps_parent(obj_request) ||
131 ((known = obj_request_known_test(obj_request)) &&
132 obj_request_exists_test(obj_request))) {
133
134 diff --git a/drivers/gpu/drm/radeon/atombios_crtc.c b/drivers/gpu/drm/radeon/atombios_crtc.c
135 index a56d0199e334..971dd8795b68 100644
136 --- a/drivers/gpu/drm/radeon/atombios_crtc.c
137 +++ b/drivers/gpu/drm/radeon/atombios_crtc.c
138 @@ -839,14 +839,16 @@ static void atombios_crtc_program_pll(struct drm_crtc *crtc,
139 args.v5.ucMiscInfo = 0; /* HDMI depth, etc. */
140 if (ss_enabled && (ss->type & ATOM_EXTERNAL_SS_MASK))
141 args.v5.ucMiscInfo |= PIXEL_CLOCK_V5_MISC_REF_DIV_SRC;
142 - switch (bpc) {
143 - case 8:
144 - default:
145 - args.v5.ucMiscInfo |= PIXEL_CLOCK_V5_MISC_HDMI_24BPP;
146 - break;
147 - case 10:
148 - args.v5.ucMiscInfo |= PIXEL_CLOCK_V5_MISC_HDMI_30BPP;
149 - break;
150 + if (encoder_mode == ATOM_ENCODER_MODE_HDMI) {
151 + switch (bpc) {
152 + case 8:
153 + default:
154 + args.v5.ucMiscInfo |= PIXEL_CLOCK_V5_MISC_HDMI_24BPP;
155 + break;
156 + case 10:
157 + args.v5.ucMiscInfo |= PIXEL_CLOCK_V5_MISC_HDMI_30BPP;
158 + break;
159 + }
160 }
161 args.v5.ucTransmitterID = encoder_id;
162 args.v5.ucEncoderMode = encoder_mode;
163 @@ -861,20 +863,22 @@ static void atombios_crtc_program_pll(struct drm_crtc *crtc,
164 args.v6.ucMiscInfo = 0; /* HDMI depth, etc. */
165 if (ss_enabled && (ss->type & ATOM_EXTERNAL_SS_MASK))
166 args.v6.ucMiscInfo |= PIXEL_CLOCK_V6_MISC_REF_DIV_SRC;
167 - switch (bpc) {
168 - case 8:
169 - default:
170 - args.v6.ucMiscInfo |= PIXEL_CLOCK_V6_MISC_HDMI_24BPP;
171 - break;
172 - case 10:
173 - args.v6.ucMiscInfo |= PIXEL_CLOCK_V6_MISC_HDMI_30BPP;
174 - break;
175 - case 12:
176 - args.v6.ucMiscInfo |= PIXEL_CLOCK_V6_MISC_HDMI_36BPP;
177 - break;
178 - case 16:
179 - args.v6.ucMiscInfo |= PIXEL_CLOCK_V6_MISC_HDMI_48BPP;
180 - break;
181 + if (encoder_mode == ATOM_ENCODER_MODE_HDMI) {
182 + switch (bpc) {
183 + case 8:
184 + default:
185 + args.v6.ucMiscInfo |= PIXEL_CLOCK_V6_MISC_HDMI_24BPP;
186 + break;
187 + case 10:
188 + args.v6.ucMiscInfo |= PIXEL_CLOCK_V6_MISC_HDMI_30BPP;
189 + break;
190 + case 12:
191 + args.v6.ucMiscInfo |= PIXEL_CLOCK_V6_MISC_HDMI_36BPP;
192 + break;
193 + case 16:
194 + args.v6.ucMiscInfo |= PIXEL_CLOCK_V6_MISC_HDMI_48BPP;
195 + break;
196 + }
197 }
198 args.v6.ucTransmitterID = encoder_id;
199 args.v6.ucEncoderMode = encoder_mode;
200 diff --git a/drivers/gpu/drm/radeon/atombios_dp.c b/drivers/gpu/drm/radeon/atombios_dp.c
201 index 16023986d301..4c05f2b015cf 100644
202 --- a/drivers/gpu/drm/radeon/atombios_dp.c
203 +++ b/drivers/gpu/drm/radeon/atombios_dp.c
204 @@ -384,6 +384,19 @@ static int dp_get_max_dp_pix_clock(int link_rate,
205
206 /***** radeon specific DP functions *****/
207
208 +static int radeon_dp_get_max_link_rate(struct drm_connector *connector,
209 + u8 dpcd[DP_DPCD_SIZE])
210 +{
211 + int max_link_rate;
212 +
213 + if (radeon_connector_is_dp12_capable(connector))
214 + max_link_rate = min(drm_dp_max_link_rate(dpcd), 540000);
215 + else
216 + max_link_rate = min(drm_dp_max_link_rate(dpcd), 270000);
217 +
218 + return max_link_rate;
219 +}
220 +
221 /* First get the min lane# when low rate is used according to pixel clock
222 * (prefer low rate), second check max lane# supported by DP panel,
223 * if the max lane# < low rate lane# then use max lane# instead.
224 @@ -393,7 +406,7 @@ static int radeon_dp_get_dp_lane_number(struct drm_connector *connector,
225 int pix_clock)
226 {
227 int bpp = convert_bpc_to_bpp(radeon_get_monitor_bpc(connector));
228 - int max_link_rate = drm_dp_max_link_rate(dpcd);
229 + int max_link_rate = radeon_dp_get_max_link_rate(connector, dpcd);
230 int max_lane_num = drm_dp_max_lane_count(dpcd);
231 int lane_num;
232 int max_dp_pix_clock;
233 @@ -431,7 +444,7 @@ static int radeon_dp_get_dp_link_clock(struct drm_connector *connector,
234 return 540000;
235 }
236
237 - return drm_dp_max_link_rate(dpcd);
238 + return radeon_dp_get_max_link_rate(connector, dpcd);
239 }
240
241 static u8 radeon_dp_encoder_service(struct radeon_device *rdev,
242 diff --git a/drivers/gpu/drm/radeon/atombios_encoders.c b/drivers/gpu/drm/radeon/atombios_encoders.c
243 index 1f7f3ce875c8..5802d7486354 100644
244 --- a/drivers/gpu/drm/radeon/atombios_encoders.c
245 +++ b/drivers/gpu/drm/radeon/atombios_encoders.c
246 @@ -1877,8 +1877,11 @@ atombios_set_encoder_crtc_source(struct drm_encoder *encoder)
247 args.v2.ucEncodeMode = ATOM_ENCODER_MODE_CRT;
248 else
249 args.v2.ucEncodeMode = atombios_get_encoder_mode(encoder);
250 - } else
251 + } else if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) {
252 + args.v2.ucEncodeMode = ATOM_ENCODER_MODE_LVDS;
253 + } else {
254 args.v2.ucEncodeMode = atombios_get_encoder_mode(encoder);
255 + }
256 switch (radeon_encoder->encoder_id) {
257 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY:
258 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1:
259 diff --git a/drivers/gpu/drm/radeon/radeon_connectors.c b/drivers/gpu/drm/radeon/radeon_connectors.c
260 index 5a87c9fc78d3..fc604fc75797 100644
261 --- a/drivers/gpu/drm/radeon/radeon_connectors.c
262 +++ b/drivers/gpu/drm/radeon/radeon_connectors.c
263 @@ -1345,7 +1345,7 @@ bool radeon_connector_is_dp12_capable(struct drm_connector *connector)
264 struct radeon_device *rdev = dev->dev_private;
265
266 if (ASIC_IS_DCE5(rdev) &&
267 - (rdev->clock.dp_extclk >= 53900) &&
268 + (rdev->clock.default_dispclk >= 53900) &&
269 radeon_connector_encoder_is_hbr2(connector)) {
270 return true;
271 }
272 diff --git a/drivers/gpu/drm/radeon/radeon_cs.c b/drivers/gpu/drm/radeon/radeon_cs.c
273 index fe36f1d9496d..60af3cda587b 100644
274 --- a/drivers/gpu/drm/radeon/radeon_cs.c
275 +++ b/drivers/gpu/drm/radeon/radeon_cs.c
276 @@ -96,6 +96,12 @@ static int radeon_cs_parser_relocs(struct radeon_cs_parser *p)
277 uint32_t domain = r->write_domain ?
278 r->write_domain : r->read_domains;
279
280 + if (domain & RADEON_GEM_DOMAIN_CPU) {
281 + DRM_ERROR("RADEON_GEM_DOMAIN_CPU is not valid "
282 + "for command submission\n");
283 + return -EINVAL;
284 + }
285 +
286 p->relocs[i].lobj.domain = domain;
287 if (domain == RADEON_GEM_DOMAIN_VRAM)
288 domain |= RADEON_GEM_DOMAIN_GTT;
289 diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c b/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c
290 index 021b5227e783..1b0f34bd3a03 100644
291 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c
292 +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c
293 @@ -179,7 +179,6 @@ static int vmw_fb_set_par(struct fb_info *info)
294 vmw_write(vmw_priv, SVGA_REG_DISPLAY_POSITION_Y, info->var.yoffset);
295 vmw_write(vmw_priv, SVGA_REG_DISPLAY_WIDTH, info->var.xres);
296 vmw_write(vmw_priv, SVGA_REG_DISPLAY_HEIGHT, info->var.yres);
297 - vmw_write(vmw_priv, SVGA_REG_BYTES_PER_LINE, info->fix.line_length);
298 vmw_write(vmw_priv, SVGA_REG_DISPLAY_ID, SVGA_ID_INVALID);
299 }
300
301 diff --git a/drivers/hwmon/ina2xx.c b/drivers/hwmon/ina2xx.c
302 index 4958b2f89dce..371c1ee233b7 100644
303 --- a/drivers/hwmon/ina2xx.c
304 +++ b/drivers/hwmon/ina2xx.c
305 @@ -147,7 +147,8 @@ static int ina2xx_get_value(struct ina2xx_data *data, u8 reg)
306
307 switch (reg) {
308 case INA2XX_SHUNT_VOLTAGE:
309 - val = DIV_ROUND_CLOSEST(data->regs[reg],
310 + /* signed register */
311 + val = DIV_ROUND_CLOSEST((s16)data->regs[reg],
312 data->config->shunt_div);
313 break;
314 case INA2XX_BUS_VOLTAGE:
315 @@ -159,8 +160,8 @@ static int ina2xx_get_value(struct ina2xx_data *data, u8 reg)
316 val = data->regs[reg] * data->config->power_lsb;
317 break;
318 case INA2XX_CURRENT:
319 - /* LSB=1mA (selected). Is in mA */
320 - val = data->regs[reg];
321 + /* signed register, LSB=1mA (selected), in mA */
322 + val = (s16)data->regs[reg];
323 break;
324 default:
325 /* programmer goofed */
326 diff --git a/drivers/iio/inkern.c b/drivers/iio/inkern.c
327 index 0cf5f8e06cfc..1e8e94d4db7d 100644
328 --- a/drivers/iio/inkern.c
329 +++ b/drivers/iio/inkern.c
330 @@ -183,7 +183,7 @@ static struct iio_channel *of_iio_channel_get_by_name(struct device_node *np,
331 else if (name && index >= 0) {
332 pr_err("ERROR: could not get IIO channel %s:%s(%i)\n",
333 np->full_name, name ? name : "", index);
334 - return chan;
335 + return NULL;
336 }
337
338 /*
339 @@ -193,8 +193,9 @@ static struct iio_channel *of_iio_channel_get_by_name(struct device_node *np,
340 */
341 np = np->parent;
342 if (np && !of_get_property(np, "io-channel-ranges", NULL))
343 - break;
344 + return NULL;
345 }
346 +
347 return chan;
348 }
349
350 @@ -317,6 +318,7 @@ struct iio_channel *iio_channel_get(struct device *dev,
351 if (channel != NULL)
352 return channel;
353 }
354 +
355 return iio_channel_get_sys(name, channel_name);
356 }
357 EXPORT_SYMBOL_GPL(iio_channel_get);
358 diff --git a/drivers/irqchip/spear-shirq.c b/drivers/irqchip/spear-shirq.c
359 index 8527743b5cef..391b9cea73ed 100644
360 --- a/drivers/irqchip/spear-shirq.c
361 +++ b/drivers/irqchip/spear-shirq.c
362 @@ -125,7 +125,7 @@ static struct spear_shirq spear320_shirq_ras2 = {
363 };
364
365 static struct spear_shirq spear320_shirq_ras3 = {
366 - .irq_nr = 3,
367 + .irq_nr = 7,
368 .irq_bit_off = 0,
369 .invalid_irq = 1,
370 .regs = {
371 diff --git a/drivers/md/dm-thin.c b/drivers/md/dm-thin.c
372 index 7d0ac0a6e724..86a2a5e3b26b 100644
373 --- a/drivers/md/dm-thin.c
374 +++ b/drivers/md/dm-thin.c
375 @@ -2647,7 +2647,8 @@ static void set_discard_limits(struct pool_c *pt, struct queue_limits *limits)
376 */
377 if (pt->adjusted_pf.discard_passdown) {
378 data_limits = &bdev_get_queue(pt->data_dev->bdev)->limits;
379 - limits->discard_granularity = data_limits->discard_granularity;
380 + limits->discard_granularity = max(data_limits->discard_granularity,
381 + pool->sectors_per_block << SECTOR_SHIFT);
382 } else
383 limits->discard_granularity = pool->sectors_per_block << SECTOR_SHIFT;
384 }
385 diff --git a/drivers/md/md.c b/drivers/md/md.c
386 index 963fa59be9b3..aaf77b07bb72 100644
387 --- a/drivers/md/md.c
388 +++ b/drivers/md/md.c
389 @@ -7447,6 +7447,19 @@ void md_do_sync(struct md_thread *thread)
390 rdev->recovery_offset < j)
391 j = rdev->recovery_offset;
392 rcu_read_unlock();
393 +
394 + /* If there is a bitmap, we need to make sure all
395 + * writes that started before we added a spare
396 + * complete before we start doing a recovery.
397 + * Otherwise the write might complete and (via
398 + * bitmap_endwrite) set a bit in the bitmap after the
399 + * recovery has checked that bit and skipped that
400 + * region.
401 + */
402 + if (mddev->bitmap) {
403 + mddev->pers->quiesce(mddev, 1);
404 + mddev->pers->quiesce(mddev, 0);
405 + }
406 }
407
408 printk(KERN_INFO "md: %s of RAID array %s\n", desc, mdname(mddev));
409 diff --git a/drivers/mmc/host/rtsx_pci_sdmmc.c b/drivers/mmc/host/rtsx_pci_sdmmc.c
410 index ad13f4240c49..7ffb5cba30a9 100644
411 --- a/drivers/mmc/host/rtsx_pci_sdmmc.c
412 +++ b/drivers/mmc/host/rtsx_pci_sdmmc.c
413 @@ -247,6 +247,9 @@ static void sd_send_cmd_get_rsp(struct realtek_pci_sdmmc *host,
414 case MMC_RSP_R1:
415 rsp_type = SD_RSP_TYPE_R1;
416 break;
417 + case MMC_RSP_R1 & ~MMC_RSP_CRC:
418 + rsp_type = SD_RSP_TYPE_R1 | SD_NO_CHECK_CRC7;
419 + break;
420 case MMC_RSP_R1B:
421 rsp_type = SD_RSP_TYPE_R1b;
422 break;
423 diff --git a/drivers/mtd/nand/fsl_elbc_nand.c b/drivers/mtd/nand/fsl_elbc_nand.c
424 index 20657209a472..c31d183820c5 100644
425 --- a/drivers/mtd/nand/fsl_elbc_nand.c
426 +++ b/drivers/mtd/nand/fsl_elbc_nand.c
427 @@ -725,6 +725,19 @@ static int fsl_elbc_write_page(struct mtd_info *mtd, struct nand_chip *chip,
428 return 0;
429 }
430
431 +/* ECC will be calculated automatically, and errors will be detected in
432 + * waitfunc.
433 + */
434 +static int fsl_elbc_write_subpage(struct mtd_info *mtd, struct nand_chip *chip,
435 + uint32_t offset, uint32_t data_len,
436 + const uint8_t *buf, int oob_required)
437 +{
438 + fsl_elbc_write_buf(mtd, buf, mtd->writesize);
439 + fsl_elbc_write_buf(mtd, chip->oob_poi, mtd->oobsize);
440 +
441 + return 0;
442 +}
443 +
444 static int fsl_elbc_chip_init(struct fsl_elbc_mtd *priv)
445 {
446 struct fsl_lbc_ctrl *ctrl = priv->ctrl;
447 @@ -763,6 +776,7 @@ static int fsl_elbc_chip_init(struct fsl_elbc_mtd *priv)
448
449 chip->ecc.read_page = fsl_elbc_read_page;
450 chip->ecc.write_page = fsl_elbc_write_page;
451 + chip->ecc.write_subpage = fsl_elbc_write_subpage;
452
453 /* If CS Base Register selects full hardware ECC then use it */
454 if ((in_be32(&lbc->bank[priv->bank].br) & BR_DECC) ==
455 diff --git a/drivers/mtd/nand/omap2.c b/drivers/mtd/nand/omap2.c
456 index 81b80af55872..8c4eb287bbdb 100644
457 --- a/drivers/mtd/nand/omap2.c
458 +++ b/drivers/mtd/nand/omap2.c
459 @@ -1463,7 +1463,7 @@ static int omap_elm_correct_data(struct mtd_info *mtd, u_char *data,
460
461 /* Check if any error reported */
462 if (!is_error_reported)
463 - return 0;
464 + return stat;
465
466 /* Decode BCH error using ELM module */
467 elm_decode_bch_error_page(info->elm_dev, ecc_vec, err_vec);
468 diff --git a/drivers/net/wireless/b43/xmit.c b/drivers/net/wireless/b43/xmit.c
469 index e85d34b76039..ebcce00ce067 100644
470 --- a/drivers/net/wireless/b43/xmit.c
471 +++ b/drivers/net/wireless/b43/xmit.c
472 @@ -810,9 +810,13 @@ void b43_rx(struct b43_wldev *dev, struct sk_buff *skb, const void *_rxhdr)
473 break;
474 case B43_PHYTYPE_G:
475 status.band = IEEE80211_BAND_2GHZ;
476 - /* chanid is the radio channel cookie value as used
477 - * to tune the radio. */
478 - status.freq = chanid + 2400;
479 + /* Somewhere between 478.104 and 508.1084 firmware for G-PHY
480 + * has been modified to be compatible with N-PHY and others.
481 + */
482 + if (dev->fw.rev >= 508)
483 + status.freq = ieee80211_channel_to_frequency(chanid, status.band);
484 + else
485 + status.freq = chanid + 2400;
486 break;
487 case B43_PHYTYPE_N:
488 case B43_PHYTYPE_LP:
489 diff --git a/drivers/net/wireless/iwlwifi/pcie/trans.c b/drivers/net/wireless/iwlwifi/pcie/trans.c
490 index 4088dd5e9244..ff04135d37af 100644
491 --- a/drivers/net/wireless/iwlwifi/pcie/trans.c
492 +++ b/drivers/net/wireless/iwlwifi/pcie/trans.c
493 @@ -339,6 +339,7 @@ static int iwl_pcie_prepare_card_hw(struct iwl_trans *trans)
494 {
495 int ret;
496 int t = 0;
497 + int iter;
498
499 IWL_DEBUG_INFO(trans, "iwl_trans_prepare_card_hw enter\n");
500
501 @@ -347,18 +348,23 @@ static int iwl_pcie_prepare_card_hw(struct iwl_trans *trans)
502 if (ret >= 0)
503 return 0;
504
505 - /* If HW is not ready, prepare the conditions to check again */
506 - iwl_set_bit(trans, CSR_HW_IF_CONFIG_REG,
507 - CSR_HW_IF_CONFIG_REG_PREPARE);
508 + for (iter = 0; iter < 10; iter++) {
509 + /* If HW is not ready, prepare the conditions to check again */
510 + iwl_set_bit(trans, CSR_HW_IF_CONFIG_REG,
511 + CSR_HW_IF_CONFIG_REG_PREPARE);
512 +
513 + do {
514 + ret = iwl_pcie_set_hw_ready(trans);
515 + if (ret >= 0)
516 + return 0;
517
518 - do {
519 - ret = iwl_pcie_set_hw_ready(trans);
520 - if (ret >= 0)
521 - return 0;
522 + usleep_range(200, 1000);
523 + t += 200;
524 + } while (t < 150000);
525 + msleep(25);
526 + }
527
528 - usleep_range(200, 1000);
529 - t += 200;
530 - } while (t < 150000);
531 + IWL_DEBUG_INFO(trans, "got NIC after %d iterations\n", iter);
532
533 return ret;
534 }
535 diff --git a/drivers/net/wireless/rt2x00/rt2500pci.c b/drivers/net/wireless/rt2x00/rt2500pci.c
536 index 77e45b223d15..d582febbfba2 100644
537 --- a/drivers/net/wireless/rt2x00/rt2500pci.c
538 +++ b/drivers/net/wireless/rt2x00/rt2500pci.c
539 @@ -1684,8 +1684,13 @@ static int rt2500pci_init_eeprom(struct rt2x00_dev *rt2x00dev)
540 /*
541 * Detect if this device has an hardware controlled radio.
542 */
543 - if (rt2x00_get_field16(eeprom, EEPROM_ANTENNA_HARDWARE_RADIO))
544 + if (rt2x00_get_field16(eeprom, EEPROM_ANTENNA_HARDWARE_RADIO)) {
545 __set_bit(CAPABILITY_HW_BUTTON, &rt2x00dev->cap_flags);
546 + /*
547 + * On this device RFKILL initialized during probe does not work.
548 + */
549 + __set_bit(REQUIRE_DELAYED_RFKILL, &rt2x00dev->cap_flags);
550 + }
551
552 /*
553 * Check if the BBP tuning should be enabled.
554 diff --git a/drivers/net/wireless/rt2x00/rt2x00.h b/drivers/net/wireless/rt2x00/rt2x00.h
555 index 7510723a8c37..1e716ff0f19e 100644
556 --- a/drivers/net/wireless/rt2x00/rt2x00.h
557 +++ b/drivers/net/wireless/rt2x00/rt2x00.h
558 @@ -708,6 +708,7 @@ enum rt2x00_capability_flags {
559 REQUIRE_SW_SEQNO,
560 REQUIRE_HT_TX_DESC,
561 REQUIRE_PS_AUTOWAKE,
562 + REQUIRE_DELAYED_RFKILL,
563
564 /*
565 * Capabilities
566 diff --git a/drivers/net/wireless/rt2x00/rt2x00dev.c b/drivers/net/wireless/rt2x00/rt2x00dev.c
567 index a2889d1cfe37..e22942bc2bb1 100644
568 --- a/drivers/net/wireless/rt2x00/rt2x00dev.c
569 +++ b/drivers/net/wireless/rt2x00/rt2x00dev.c
570 @@ -1128,9 +1128,10 @@ static void rt2x00lib_uninitialize(struct rt2x00_dev *rt2x00dev)
571 return;
572
573 /*
574 - * Unregister extra components.
575 + * Stop rfkill polling.
576 */
577 - rt2x00rfkill_unregister(rt2x00dev);
578 + if (test_bit(REQUIRE_DELAYED_RFKILL, &rt2x00dev->cap_flags))
579 + rt2x00rfkill_unregister(rt2x00dev);
580
581 /*
582 * Allow the HW to uninitialize.
583 @@ -1168,6 +1169,12 @@ static int rt2x00lib_initialize(struct rt2x00_dev *rt2x00dev)
584
585 set_bit(DEVICE_STATE_INITIALIZED, &rt2x00dev->flags);
586
587 + /*
588 + * Start rfkill polling.
589 + */
590 + if (test_bit(REQUIRE_DELAYED_RFKILL, &rt2x00dev->cap_flags))
591 + rt2x00rfkill_register(rt2x00dev);
592 +
593 return 0;
594 }
595
596 @@ -1363,7 +1370,12 @@ int rt2x00lib_probe_dev(struct rt2x00_dev *rt2x00dev)
597 rt2x00link_register(rt2x00dev);
598 rt2x00leds_register(rt2x00dev);
599 rt2x00debug_register(rt2x00dev);
600 - rt2x00rfkill_register(rt2x00dev);
601 +
602 + /*
603 + * Start rfkill polling.
604 + */
605 + if (!test_bit(REQUIRE_DELAYED_RFKILL, &rt2x00dev->cap_flags))
606 + rt2x00rfkill_register(rt2x00dev);
607
608 return 0;
609
610 @@ -1379,6 +1391,12 @@ void rt2x00lib_remove_dev(struct rt2x00_dev *rt2x00dev)
611 clear_bit(DEVICE_STATE_PRESENT, &rt2x00dev->flags);
612
613 /*
614 + * Stop rfkill polling.
615 + */
616 + if (!test_bit(REQUIRE_DELAYED_RFKILL, &rt2x00dev->cap_flags))
617 + rt2x00rfkill_unregister(rt2x00dev);
618 +
619 + /*
620 * Disable radio.
621 */
622 rt2x00lib_disable_radio(rt2x00dev);
623 diff --git a/drivers/net/wireless/rt2x00/rt2x00mac.c b/drivers/net/wireless/rt2x00/rt2x00mac.c
624 index 2b724fc4e306..c03748dafd49 100644
625 --- a/drivers/net/wireless/rt2x00/rt2x00mac.c
626 +++ b/drivers/net/wireless/rt2x00/rt2x00mac.c
627 @@ -489,6 +489,8 @@ int rt2x00mac_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
628 crypto.cipher = rt2x00crypto_key_to_cipher(key);
629 if (crypto.cipher == CIPHER_NONE)
630 return -EOPNOTSUPP;
631 + if (crypto.cipher == CIPHER_TKIP && rt2x00_is_usb(rt2x00dev))
632 + return -EOPNOTSUPP;
633
634 crypto.cmd = cmd;
635
636 diff --git a/drivers/scsi/ibmvscsi/ibmvscsi.c b/drivers/scsi/ibmvscsi/ibmvscsi.c
637 index d0fa4b6c551f..c62b3e5d44bd 100644
638 --- a/drivers/scsi/ibmvscsi/ibmvscsi.c
639 +++ b/drivers/scsi/ibmvscsi/ibmvscsi.c
640 @@ -185,6 +185,11 @@ static struct viosrp_crq *crq_queue_next_crq(struct crq_queue *queue)
641 if (crq->valid & 0x80) {
642 if (++queue->cur == queue->size)
643 queue->cur = 0;
644 +
645 + /* Ensure the read of the valid bit occurs before reading any
646 + * other bits of the CRQ entry
647 + */
648 + rmb();
649 } else
650 crq = NULL;
651 spin_unlock_irqrestore(&queue->lock, flags);
652 @@ -203,6 +208,11 @@ static int ibmvscsi_send_crq(struct ibmvscsi_host_data *hostdata,
653 {
654 struct vio_dev *vdev = to_vio_dev(hostdata->dev);
655
656 + /*
657 + * Ensure the command buffer is flushed to memory before handing it
658 + * over to the VIOS to prevent it from fetching any stale data.
659 + */
660 + mb();
661 return plpar_hcall_norets(H_SEND_CRQ, vdev->unit_address, word1, word2);
662 }
663
664 @@ -794,7 +804,8 @@ static void purge_requests(struct ibmvscsi_host_data *hostdata, int error_code)
665 evt->hostdata->dev);
666 if (evt->cmnd_done)
667 evt->cmnd_done(evt->cmnd);
668 - } else if (evt->done)
669 + } else if (evt->done && evt->crq.format != VIOSRP_MAD_FORMAT &&
670 + evt->iu.srp.login_req.opcode != SRP_LOGIN_REQ)
671 evt->done(evt);
672 free_event_struct(&evt->hostdata->pool, evt);
673 spin_lock_irqsave(hostdata->host->host_lock, flags);
674 diff --git a/drivers/scsi/sym53c8xx_2/sym_hipd.c b/drivers/scsi/sym53c8xx_2/sym_hipd.c
675 index d92fe4037e94..6b349e301869 100644
676 --- a/drivers/scsi/sym53c8xx_2/sym_hipd.c
677 +++ b/drivers/scsi/sym53c8xx_2/sym_hipd.c
678 @@ -3000,7 +3000,11 @@ sym_dequeue_from_squeue(struct sym_hcb *np, int i, int target, int lun, int task
679 if ((target == -1 || cp->target == target) &&
680 (lun == -1 || cp->lun == lun) &&
681 (task == -1 || cp->tag == task)) {
682 +#ifdef SYM_OPT_HANDLE_DEVICE_QUEUEING
683 sym_set_cam_status(cp->cmd, DID_SOFT_ERROR);
684 +#else
685 + sym_set_cam_status(cp->cmd, DID_REQUEUE);
686 +#endif
687 sym_remque(&cp->link_ccbq);
688 sym_insque_tail(&cp->link_ccbq, &np->comp_ccbq);
689 }
690 diff --git a/drivers/scsi/virtio_scsi.c b/drivers/scsi/virtio_scsi.c
691 index 95a5d73e675c..11f5326f449f 100644
692 --- a/drivers/scsi/virtio_scsi.c
693 +++ b/drivers/scsi/virtio_scsi.c
694 @@ -270,6 +270,16 @@ static void virtscsi_req_done(struct virtqueue *vq)
695 virtscsi_vq_done(vscsi, req_vq, virtscsi_complete_cmd);
696 };
697
698 +static void virtscsi_poll_requests(struct virtio_scsi *vscsi)
699 +{
700 + int i, num_vqs;
701 +
702 + num_vqs = vscsi->num_queues;
703 + for (i = 0; i < num_vqs; i++)
704 + virtscsi_vq_done(vscsi, &vscsi->req_vqs[i],
705 + virtscsi_complete_cmd);
706 +}
707 +
708 static void virtscsi_complete_free(struct virtio_scsi *vscsi, void *buf)
709 {
710 struct virtio_scsi_cmd *cmd = buf;
711 @@ -288,6 +298,8 @@ static void virtscsi_ctrl_done(struct virtqueue *vq)
712 virtscsi_vq_done(vscsi, &vscsi->ctrl_vq, virtscsi_complete_free);
713 };
714
715 +static void virtscsi_handle_event(struct work_struct *work);
716 +
717 static int virtscsi_kick_event(struct virtio_scsi *vscsi,
718 struct virtio_scsi_event_node *event_node)
719 {
720 @@ -295,6 +307,7 @@ static int virtscsi_kick_event(struct virtio_scsi *vscsi,
721 struct scatterlist sg;
722 unsigned long flags;
723
724 + INIT_WORK(&event_node->work, virtscsi_handle_event);
725 sg_init_one(&sg, &event_node->event, sizeof(struct virtio_scsi_event));
726
727 spin_lock_irqsave(&vscsi->event_vq.vq_lock, flags);
728 @@ -412,7 +425,6 @@ static void virtscsi_complete_event(struct virtio_scsi *vscsi, void *buf)
729 {
730 struct virtio_scsi_event_node *event_node = buf;
731
732 - INIT_WORK(&event_node->work, virtscsi_handle_event);
733 schedule_work(&event_node->work);
734 }
735
736 @@ -602,6 +614,18 @@ static int virtscsi_tmf(struct virtio_scsi *vscsi, struct virtio_scsi_cmd *cmd)
737 cmd->resp.tmf.response == VIRTIO_SCSI_S_FUNCTION_SUCCEEDED)
738 ret = SUCCESS;
739
740 + /*
741 + * The spec guarantees that all requests related to the TMF have
742 + * been completed, but the callback might not have run yet if
743 + * we're using independent interrupts (e.g. MSI). Poll the
744 + * virtqueues once.
745 + *
746 + * In the abort case, sc->scsi_done will do nothing, because
747 + * the block layer must have detected a timeout and as a result
748 + * REQ_ATOM_COMPLETE has been set.
749 + */
750 + virtscsi_poll_requests(vscsi);
751 +
752 out:
753 mempool_free(cmd, virtscsi_cmd_pool);
754 return ret;
755 diff --git a/drivers/tty/serial/8250/8250_dw.c b/drivers/tty/serial/8250/8250_dw.c
756 index 5d880917850f..345b5ddcb1a0 100644
757 --- a/drivers/tty/serial/8250/8250_dw.c
758 +++ b/drivers/tty/serial/8250/8250_dw.c
759 @@ -54,58 +54,100 @@
760
761
762 struct dw8250_data {
763 - int last_lcr;
764 + int last_mcr;
765 int line;
766 struct clk *clk;
767 };
768
769 +static inline int dw8250_modify_msr(struct uart_port *p, int offset, int value)
770 +{
771 + struct dw8250_data *d = p->private_data;
772 +
773 + /* If reading MSR, report CTS asserted when auto-CTS/RTS enabled */
774 + if (offset == UART_MSR && d->last_mcr & UART_MCR_AFE) {
775 + value |= UART_MSR_CTS;
776 + value &= ~UART_MSR_DCTS;
777 + }
778 +
779 + return value;
780 +}
781 +
782 +static void dw8250_force_idle(struct uart_port *p)
783 +{
784 + serial8250_clear_and_reinit_fifos(container_of
785 + (p, struct uart_8250_port, port));
786 + (void)p->serial_in(p, UART_RX);
787 +}
788 +
789 static void dw8250_serial_out(struct uart_port *p, int offset, int value)
790 {
791 struct dw8250_data *d = p->private_data;
792
793 - if (offset == UART_LCR)
794 - d->last_lcr = value;
795 + if (offset == UART_MCR)
796 + d->last_mcr = value;
797 +
798 + writeb(value, p->membase + (offset << p->regshift));
799
800 - offset <<= p->regshift;
801 - writeb(value, p->membase + offset);
802 + /* Make sure LCR write wasn't ignored */
803 + if (offset == UART_LCR) {
804 + int tries = 1000;
805 + while (tries--) {
806 + unsigned int lcr = p->serial_in(p, UART_LCR);
807 + if ((value & ~UART_LCR_SPAR) == (lcr & ~UART_LCR_SPAR))
808 + return;
809 + dw8250_force_idle(p);
810 + writeb(value, p->membase + (UART_LCR << p->regshift));
811 + }
812 + dev_err(p->dev, "Couldn't set LCR to %d\n", value);
813 + }
814 }
815
816 static unsigned int dw8250_serial_in(struct uart_port *p, int offset)
817 {
818 - offset <<= p->regshift;
819 + unsigned int value = readb(p->membase + (offset << p->regshift));
820
821 - return readb(p->membase + offset);
822 + return dw8250_modify_msr(p, offset, value);
823 }
824
825 static void dw8250_serial_out32(struct uart_port *p, int offset, int value)
826 {
827 struct dw8250_data *d = p->private_data;
828
829 - if (offset == UART_LCR)
830 - d->last_lcr = value;
831 + if (offset == UART_MCR)
832 + d->last_mcr = value;
833
834 - offset <<= p->regshift;
835 - writel(value, p->membase + offset);
836 + writel(value, p->membase + (offset << p->regshift));
837 +
838 + /* Make sure LCR write wasn't ignored */
839 + if (offset == UART_LCR) {
840 + int tries = 1000;
841 + while (tries--) {
842 + unsigned int lcr = p->serial_in(p, UART_LCR);
843 + if ((value & ~UART_LCR_SPAR) == (lcr & ~UART_LCR_SPAR))
844 + return;
845 + dw8250_force_idle(p);
846 + writel(value, p->membase + (UART_LCR << p->regshift));
847 + }
848 + dev_err(p->dev, "Couldn't set LCR to %d\n", value);
849 + }
850 }
851
852 static unsigned int dw8250_serial_in32(struct uart_port *p, int offset)
853 {
854 - offset <<= p->regshift;
855 + unsigned int value = readl(p->membase + (offset << p->regshift));
856
857 - return readl(p->membase + offset);
858 + return dw8250_modify_msr(p, offset, value);
859 }
860
861 static int dw8250_handle_irq(struct uart_port *p)
862 {
863 - struct dw8250_data *d = p->private_data;
864 unsigned int iir = p->serial_in(p, UART_IIR);
865
866 if (serial8250_handle_irq(p, iir)) {
867 return 1;
868 } else if ((iir & UART_IIR_BUSY) == UART_IIR_BUSY) {
869 - /* Clear the USR and write the LCR again. */
870 + /* Clear the USR */
871 (void)p->serial_in(p, DW_UART_USR);
872 - p->serial_out(p, UART_LCR, d->last_lcr);
873
874 return 1;
875 }
876 diff --git a/drivers/usb/gadget/f_fs.c b/drivers/usb/gadget/f_fs.c
877 index b6e9d917221e..84219f656051 100644
878 --- a/drivers/usb/gadget/f_fs.c
879 +++ b/drivers/usb/gadget/f_fs.c
880 @@ -1389,11 +1389,13 @@ static int functionfs_bind(struct ffs_data *ffs, struct usb_composite_dev *cdev)
881 ffs->ep0req->context = ffs;
882
883 lang = ffs->stringtabs;
884 - for (lang = ffs->stringtabs; *lang; ++lang) {
885 - struct usb_string *str = (*lang)->strings;
886 - int id = first_id;
887 - for (; str->s; ++id, ++str)
888 - str->id = id;
889 + if (lang) {
890 + for (; *lang; ++lang) {
891 + struct usb_string *str = (*lang)->strings;
892 + int id = first_id;
893 + for (; str->s; ++id, ++str)
894 + str->id = id;
895 + }
896 }
897
898 ffs->gadget = cdev->gadget;
899 diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c
900 index bcfb08e41eb6..fe42cae6d1ef 100644
901 --- a/drivers/usb/host/xhci-ring.c
902 +++ b/drivers/usb/host/xhci-ring.c
903 @@ -3590,7 +3590,7 @@ static unsigned int xhci_get_burst_count(struct xhci_hcd *xhci,
904 return 0;
905
906 max_burst = urb->ep->ss_ep_comp.bMaxBurst;
907 - return roundup(total_packet_count, max_burst + 1) - 1;
908 + return DIV_ROUND_UP(total_packet_count, max_burst + 1) - 1;
909 }
910
911 /*
912 diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c
913 index 4b46de842175..9a7088bc634d 100644
914 --- a/drivers/usb/host/xhci.c
915 +++ b/drivers/usb/host/xhci.c
916 @@ -960,7 +960,7 @@ int xhci_suspend(struct xhci_hcd *xhci)
917 */
918 int xhci_resume(struct xhci_hcd *xhci, bool hibernated)
919 {
920 - u32 command, temp = 0;
921 + u32 command, temp = 0, status;
922 struct usb_hcd *hcd = xhci_to_hcd(xhci);
923 struct usb_hcd *secondary_hcd;
924 int retval = 0;
925 @@ -1084,8 +1084,12 @@ int xhci_resume(struct xhci_hcd *xhci, bool hibernated)
926
927 done:
928 if (retval == 0) {
929 - usb_hcd_resume_root_hub(hcd);
930 - usb_hcd_resume_root_hub(xhci->shared_hcd);
931 + /* Resume root hubs only when have pending events. */
932 + status = readl(&xhci->op_regs->status);
933 + if (status & STS_EINT) {
934 + usb_hcd_resume_root_hub(hcd);
935 + usb_hcd_resume_root_hub(xhci->shared_hcd);
936 + }
937 }
938
939 /*
940 diff --git a/drivers/usb/serial/ftdi_sio.c b/drivers/usb/serial/ftdi_sio.c
941 index b9e663ac9a35..3e315de9bbd4 100644
942 --- a/drivers/usb/serial/ftdi_sio.c
943 +++ b/drivers/usb/serial/ftdi_sio.c
944 @@ -1577,14 +1577,17 @@ static void ftdi_set_max_packet_size(struct usb_serial_port *port)
945 struct usb_device *udev = serial->dev;
946
947 struct usb_interface *interface = serial->interface;
948 - struct usb_endpoint_descriptor *ep_desc = &interface->cur_altsetting->endpoint[1].desc;
949 + struct usb_endpoint_descriptor *ep_desc;
950
951 unsigned num_endpoints;
952 - int i;
953 + unsigned i;
954
955 num_endpoints = interface->cur_altsetting->desc.bNumEndpoints;
956 dev_info(&udev->dev, "Number of endpoints %d\n", num_endpoints);
957
958 + if (!num_endpoints)
959 + return;
960 +
961 /* NOTE: some customers have programmed FT232R/FT245R devices
962 * with an endpoint size of 0 - not good. In this case, we
963 * want to override the endpoint descriptor setting and use a
964 diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c
965 index 70ede84f4f6b..e25e8ca09fe2 100644
966 --- a/drivers/usb/serial/option.c
967 +++ b/drivers/usb/serial/option.c
968 @@ -352,6 +352,9 @@ static void option_instat_callback(struct urb *urb);
969 /* Zoom */
970 #define ZOOM_PRODUCT_4597 0x9607
971
972 +/* SpeedUp SU9800 usb 3g modem */
973 +#define SPEEDUP_PRODUCT_SU9800 0x9800
974 +
975 /* Haier products */
976 #define HAIER_VENDOR_ID 0x201e
977 #define HAIER_PRODUCT_CE100 0x2009
978 @@ -372,8 +375,12 @@ static void option_instat_callback(struct urb *urb);
979 /* Olivetti products */
980 #define OLIVETTI_VENDOR_ID 0x0b3c
981 #define OLIVETTI_PRODUCT_OLICARD100 0xc000
982 +#define OLIVETTI_PRODUCT_OLICARD120 0xc001
983 +#define OLIVETTI_PRODUCT_OLICARD140 0xc002
984 #define OLIVETTI_PRODUCT_OLICARD145 0xc003
985 +#define OLIVETTI_PRODUCT_OLICARD155 0xc004
986 #define OLIVETTI_PRODUCT_OLICARD200 0xc005
987 +#define OLIVETTI_PRODUCT_OLICARD160 0xc00a
988 #define OLIVETTI_PRODUCT_OLICARD500 0xc00b
989
990 /* Celot products */
991 @@ -1577,6 +1584,7 @@ static const struct usb_device_id option_ids[] = {
992 { USB_DEVICE(LONGCHEER_VENDOR_ID, FOUR_G_SYSTEMS_PRODUCT_W14),
993 .driver_info = (kernel_ulong_t)&four_g_w14_blacklist
994 },
995 + { USB_DEVICE_INTERFACE_CLASS(LONGCHEER_VENDOR_ID, SPEEDUP_PRODUCT_SU9800, 0xff) },
996 { USB_DEVICE(LONGCHEER_VENDOR_ID, ZOOM_PRODUCT_4597) },
997 { USB_DEVICE(LONGCHEER_VENDOR_ID, IBALL_3_5G_CONNECT) },
998 { USB_DEVICE(HAIER_VENDOR_ID, HAIER_PRODUCT_CE100) },
999 @@ -1611,15 +1619,21 @@ static const struct usb_device_id option_ids[] = {
1000 { USB_DEVICE(SIEMENS_VENDOR_ID, CINTERION_PRODUCT_HC25_MDMNET) },
1001 { USB_DEVICE(SIEMENS_VENDOR_ID, CINTERION_PRODUCT_HC28_MDM) }, /* HC28 enumerates with Siemens or Cinterion VID depending on FW revision */
1002 { USB_DEVICE(SIEMENS_VENDOR_ID, CINTERION_PRODUCT_HC28_MDMNET) },
1003 -
1004 - { USB_DEVICE(OLIVETTI_VENDOR_ID, OLIVETTI_PRODUCT_OLICARD100) },
1005 + { USB_DEVICE(OLIVETTI_VENDOR_ID, OLIVETTI_PRODUCT_OLICARD100),
1006 + .driver_info = (kernel_ulong_t)&net_intf4_blacklist },
1007 + { USB_DEVICE(OLIVETTI_VENDOR_ID, OLIVETTI_PRODUCT_OLICARD120),
1008 + .driver_info = (kernel_ulong_t)&net_intf4_blacklist },
1009 + { USB_DEVICE(OLIVETTI_VENDOR_ID, OLIVETTI_PRODUCT_OLICARD140),
1010 + .driver_info = (kernel_ulong_t)&net_intf4_blacklist },
1011 { USB_DEVICE(OLIVETTI_VENDOR_ID, OLIVETTI_PRODUCT_OLICARD145) },
1012 + { USB_DEVICE(OLIVETTI_VENDOR_ID, OLIVETTI_PRODUCT_OLICARD155),
1013 + .driver_info = (kernel_ulong_t)&net_intf6_blacklist },
1014 { USB_DEVICE(OLIVETTI_VENDOR_ID, OLIVETTI_PRODUCT_OLICARD200),
1015 - .driver_info = (kernel_ulong_t)&net_intf6_blacklist
1016 - },
1017 + .driver_info = (kernel_ulong_t)&net_intf6_blacklist },
1018 + { USB_DEVICE(OLIVETTI_VENDOR_ID, OLIVETTI_PRODUCT_OLICARD160),
1019 + .driver_info = (kernel_ulong_t)&net_intf6_blacklist },
1020 { USB_DEVICE(OLIVETTI_VENDOR_ID, OLIVETTI_PRODUCT_OLICARD500),
1021 - .driver_info = (kernel_ulong_t)&net_intf4_blacklist
1022 - },
1023 + .driver_info = (kernel_ulong_t)&net_intf4_blacklist },
1024 { USB_DEVICE(CELOT_VENDOR_ID, CELOT_PRODUCT_CT680M) }, /* CT-650 CDMA 450 1xEVDO modem */
1025 { USB_DEVICE_AND_INTERFACE_INFO(SAMSUNG_VENDOR_ID, SAMSUNG_PRODUCT_GT_B3730, USB_CLASS_CDC_DATA, 0x00, 0x00) }, /* Samsung GT-B3730 LTE USB modem.*/
1026 { USB_DEVICE(YUGA_VENDOR_ID, YUGA_PRODUCT_CEM600) },
1027 diff --git a/drivers/video/fb-puv3.c b/drivers/video/fb-puv3.c
1028 index 27fc956166fa..520112531eb0 100644
1029 --- a/drivers/video/fb-puv3.c
1030 +++ b/drivers/video/fb-puv3.c
1031 @@ -18,8 +18,10 @@
1032 #include <linux/fb.h>
1033 #include <linux/init.h>
1034 #include <linux/console.h>
1035 +#include <linux/mm.h>
1036
1037 #include <asm/sizes.h>
1038 +#include <asm/pgtable.h>
1039 #include <mach/hardware.h>
1040
1041 /* Platform_data reserved for unifb registers. */
1042 diff --git a/fs/cifs/cifs_unicode.c b/fs/cifs/cifs_unicode.c
1043 index 0227b45ef00a..15e9505aa35f 100644
1044 --- a/fs/cifs/cifs_unicode.c
1045 +++ b/fs/cifs/cifs_unicode.c
1046 @@ -290,7 +290,8 @@ int
1047 cifsConvertToUTF16(__le16 *target, const char *source, int srclen,
1048 const struct nls_table *cp, int mapChars)
1049 {
1050 - int i, j, charlen;
1051 + int i, charlen;
1052 + int j = 0;
1053 char src_char;
1054 __le16 dst_char;
1055 wchar_t tmp;
1056 @@ -298,12 +299,11 @@ cifsConvertToUTF16(__le16 *target, const char *source, int srclen,
1057 if (!mapChars)
1058 return cifs_strtoUTF16(target, source, PATH_MAX, cp);
1059
1060 - for (i = 0, j = 0; i < srclen; j++) {
1061 + for (i = 0; i < srclen; j++) {
1062 src_char = source[i];
1063 charlen = 1;
1064 switch (src_char) {
1065 case 0:
1066 - put_unaligned(0, &target[j]);
1067 goto ctoUTF16_out;
1068 case ':':
1069 dst_char = cpu_to_le16(UNI_COLON);
1070 @@ -350,6 +350,7 @@ cifsConvertToUTF16(__le16 *target, const char *source, int srclen,
1071 }
1072
1073 ctoUTF16_out:
1074 + put_unaligned(0, &target[j]); /* Null terminate target unicode string */
1075 return j;
1076 }
1077
1078 diff --git a/fs/ext4/indirect.c b/fs/ext4/indirect.c
1079 index b8d5d351e24f..589061469687 100644
1080 --- a/fs/ext4/indirect.c
1081 +++ b/fs/ext4/indirect.c
1082 @@ -390,7 +390,13 @@ static int ext4_alloc_branch(handle_t *handle, struct inode *inode,
1083 return 0;
1084 failed:
1085 for (; i >= 0; i--) {
1086 - if (i != indirect_blks && branch[i].bh)
1087 + /*
1088 + * We want to ext4_forget() only freshly allocated indirect
1089 + * blocks. Buffer for new_blocks[i-1] is at branch[i].bh and
1090 + * buffer at branch[0].bh is indirect block / inode already
1091 + * existing before ext4_alloc_branch() was called.
1092 + */
1093 + if (i > 0 && i != indirect_blks && branch[i].bh)
1094 ext4_forget(handle, 1, inode, branch[i].bh,
1095 branch[i].bh->b_blocknr);
1096 ext4_free_blocks(handle, inode, NULL, new_blocks[i],
1097 @@ -1325,16 +1331,24 @@ static int free_hole_blocks(handle_t *handle, struct inode *inode,
1098 blk = *i_data;
1099 if (level > 0) {
1100 ext4_lblk_t first2;
1101 + ext4_lblk_t count2;
1102 +
1103 bh = sb_bread(inode->i_sb, le32_to_cpu(blk));
1104 if (!bh) {
1105 EXT4_ERROR_INODE_BLOCK(inode, le32_to_cpu(blk),
1106 "Read failure");
1107 return -EIO;
1108 }
1109 - first2 = (first > offset) ? first - offset : 0;
1110 + if (first > offset) {
1111 + first2 = first - offset;
1112 + count2 = count;
1113 + } else {
1114 + first2 = 0;
1115 + count2 = count - (offset - first);
1116 + }
1117 ret = free_hole_blocks(handle, inode, bh,
1118 (__le32 *)bh->b_data, level - 1,
1119 - first2, count - offset,
1120 + first2, count2,
1121 inode->i_sb->s_blocksize >> 2);
1122 if (ret) {
1123 brelse(bh);
1124 diff --git a/fs/nfsd/nfs4proc.c b/fs/nfsd/nfs4proc.c
1125 index f7d7d04674fb..0f9ce13972d0 100644
1126 --- a/fs/nfsd/nfs4proc.c
1127 +++ b/fs/nfsd/nfs4proc.c
1128 @@ -576,15 +576,6 @@ nfsd4_create(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
1129
1130 switch (create->cr_type) {
1131 case NF4LNK:
1132 - /* ugh! we have to null-terminate the linktext, or
1133 - * vfs_symlink() will choke. it is always safe to
1134 - * null-terminate by brute force, since at worst we
1135 - * will overwrite the first byte of the create namelen
1136 - * in the XDR buffer, which has already been extracted
1137 - * during XDR decode.
1138 - */
1139 - create->cr_linkname[create->cr_linklen] = 0;
1140 -
1141 status = nfsd_symlink(rqstp, &cstate->current_fh,
1142 create->cr_name, create->cr_namelen,
1143 create->cr_linkname, create->cr_linklen,
1144 diff --git a/fs/nfsd/nfs4xdr.c b/fs/nfsd/nfs4xdr.c
1145 index d4890a96421e..9b45f0666cfc 100644
1146 --- a/fs/nfsd/nfs4xdr.c
1147 +++ b/fs/nfsd/nfs4xdr.c
1148 @@ -553,7 +553,18 @@ nfsd4_decode_create(struct nfsd4_compoundargs *argp, struct nfsd4_create *create
1149 READ_BUF(4);
1150 READ32(create->cr_linklen);
1151 READ_BUF(create->cr_linklen);
1152 - SAVEMEM(create->cr_linkname, create->cr_linklen);
1153 + /*
1154 + * The VFS will want a null-terminated string, and
1155 + * null-terminating in place isn't safe since this might
1156 + * end on a page boundary:
1157 + */
1158 + create->cr_linkname =
1159 + kmalloc(create->cr_linklen + 1, GFP_KERNEL);
1160 + if (!create->cr_linkname)
1161 + return nfserr_jukebox;
1162 + memcpy(create->cr_linkname, p, create->cr_linklen);
1163 + create->cr_linkname[create->cr_linklen] = '\0';
1164 + defer_free(argp, kfree, create->cr_linkname);
1165 break;
1166 case NF4BLK:
1167 case NF4CHR:
1168 diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
1169 index 60f49637b4d5..21920add7972 100644
1170 --- a/kernel/trace/trace.c
1171 +++ b/kernel/trace/trace.c
1172 @@ -1306,7 +1306,6 @@ void tracing_start(void)
1173
1174 arch_spin_unlock(&ftrace_max_lock);
1175
1176 - ftrace_start();
1177 out:
1178 raw_spin_unlock_irqrestore(&global_trace.start_lock, flags);
1179 }
1180 @@ -1353,7 +1352,6 @@ void tracing_stop(void)
1181 struct ring_buffer *buffer;
1182 unsigned long flags;
1183
1184 - ftrace_stop();
1185 raw_spin_lock_irqsave(&global_trace.start_lock, flags);
1186 if (global_trace.stop_count++)
1187 goto out;
1188 diff --git a/mm/hugetlb.c b/mm/hugetlb.c
1189 index de73c9d144e1..dbc949c409c7 100644
1190 --- a/mm/hugetlb.c
1191 +++ b/mm/hugetlb.c
1192 @@ -2328,6 +2328,31 @@ static void set_huge_ptep_writable(struct vm_area_struct *vma,
1193 update_mmu_cache(vma, address, ptep);
1194 }
1195
1196 +static int is_hugetlb_entry_migration(pte_t pte)
1197 +{
1198 + swp_entry_t swp;
1199 +
1200 + if (huge_pte_none(pte) || pte_present(pte))
1201 + return 0;
1202 + swp = pte_to_swp_entry(pte);
1203 + if (non_swap_entry(swp) && is_migration_entry(swp))
1204 + return 1;
1205 + else
1206 + return 0;
1207 +}
1208 +
1209 +static int is_hugetlb_entry_hwpoisoned(pte_t pte)
1210 +{
1211 + swp_entry_t swp;
1212 +
1213 + if (huge_pte_none(pte) || pte_present(pte))
1214 + return 0;
1215 + swp = pte_to_swp_entry(pte);
1216 + if (non_swap_entry(swp) && is_hwpoison_entry(swp))
1217 + return 1;
1218 + else
1219 + return 0;
1220 +}
1221
1222 int copy_hugetlb_page_range(struct mm_struct *dst, struct mm_struct *src,
1223 struct vm_area_struct *vma)
1224 @@ -2355,10 +2380,26 @@ int copy_hugetlb_page_range(struct mm_struct *dst, struct mm_struct *src,
1225
1226 spin_lock(&dst->page_table_lock);
1227 spin_lock_nested(&src->page_table_lock, SINGLE_DEPTH_NESTING);
1228 - if (!huge_pte_none(huge_ptep_get(src_pte))) {
1229 + entry = huge_ptep_get(src_pte);
1230 + if (huge_pte_none(entry)) { /* skip none entry */
1231 + ;
1232 + } else if (unlikely(is_hugetlb_entry_migration(entry) ||
1233 + is_hugetlb_entry_hwpoisoned(entry))) {
1234 + swp_entry_t swp_entry = pte_to_swp_entry(entry);
1235 +
1236 + if (is_write_migration_entry(swp_entry) && cow) {
1237 + /*
1238 + * COW mappings require pages in both
1239 + * parent and child to be set to read.
1240 + */
1241 + make_migration_entry_read(&swp_entry);
1242 + entry = swp_entry_to_pte(swp_entry);
1243 + set_huge_pte_at(src, addr, src_pte, entry);
1244 + }
1245 + set_huge_pte_at(dst, addr, dst_pte, entry);
1246 + } else {
1247 if (cow)
1248 huge_ptep_set_wrprotect(src, addr, src_pte);
1249 - entry = huge_ptep_get(src_pte);
1250 ptepage = pte_page(entry);
1251 get_page(ptepage);
1252 page_dup_rmap(ptepage);
1253 @@ -2373,32 +2414,6 @@ nomem:
1254 return -ENOMEM;
1255 }
1256
1257 -static int is_hugetlb_entry_migration(pte_t pte)
1258 -{
1259 - swp_entry_t swp;
1260 -
1261 - if (huge_pte_none(pte) || pte_present(pte))
1262 - return 0;
1263 - swp = pte_to_swp_entry(pte);
1264 - if (non_swap_entry(swp) && is_migration_entry(swp))
1265 - return 1;
1266 - else
1267 - return 0;
1268 -}
1269 -
1270 -static int is_hugetlb_entry_hwpoisoned(pte_t pte)
1271 -{
1272 - swp_entry_t swp;
1273 -
1274 - if (huge_pte_none(pte) || pte_present(pte))
1275 - return 0;
1276 - swp = pte_to_swp_entry(pte);
1277 - if (non_swap_entry(swp) && is_hwpoison_entry(swp))
1278 - return 1;
1279 - else
1280 - return 0;
1281 -}
1282 -
1283 void __unmap_hugepage_range(struct mmu_gather *tlb, struct vm_area_struct *vma,
1284 unsigned long start, unsigned long end,
1285 struct page *ref_page)
1286 diff --git a/mm/mempolicy.c b/mm/mempolicy.c
1287 index 6c2dace665aa..1124d5fc06e9 100644
1288 --- a/mm/mempolicy.c
1289 +++ b/mm/mempolicy.c
1290 @@ -608,19 +608,18 @@ static unsigned long change_prot_numa(struct vm_area_struct *vma,
1291 * If pagelist != NULL then isolate pages from the LRU and
1292 * put them on the pagelist.
1293 */
1294 -static struct vm_area_struct *
1295 +static int
1296 check_range(struct mm_struct *mm, unsigned long start, unsigned long end,
1297 const nodemask_t *nodes, unsigned long flags, void *private)
1298 {
1299 - int err;
1300 - struct vm_area_struct *first, *vma, *prev;
1301 -
1302 + int err = 0;
1303 + struct vm_area_struct *vma, *prev;
1304
1305 - first = find_vma(mm, start);
1306 - if (!first)
1307 - return ERR_PTR(-EFAULT);
1308 + vma = find_vma(mm, start);
1309 + if (!vma)
1310 + return -EFAULT;
1311 prev = NULL;
1312 - for (vma = first; vma && vma->vm_start < end; vma = vma->vm_next) {
1313 + for (; vma && vma->vm_start < end; vma = vma->vm_next) {
1314 unsigned long endvma = vma->vm_end;
1315
1316 if (endvma > end)
1317 @@ -630,9 +629,9 @@ check_range(struct mm_struct *mm, unsigned long start, unsigned long end,
1318
1319 if (!(flags & MPOL_MF_DISCONTIG_OK)) {
1320 if (!vma->vm_next && vma->vm_end < end)
1321 - return ERR_PTR(-EFAULT);
1322 + return -EFAULT;
1323 if (prev && prev->vm_end < vma->vm_start)
1324 - return ERR_PTR(-EFAULT);
1325 + return -EFAULT;
1326 }
1327
1328 if (is_vm_hugetlb_page(vma))
1329 @@ -649,15 +648,13 @@ check_range(struct mm_struct *mm, unsigned long start, unsigned long end,
1330
1331 err = check_pgd_range(vma, start, endvma, nodes,
1332 flags, private);
1333 - if (err) {
1334 - first = ERR_PTR(err);
1335 + if (err)
1336 break;
1337 - }
1338 }
1339 next:
1340 prev = vma;
1341 }
1342 - return first;
1343 + return err;
1344 }
1345
1346 /*
1347 @@ -1138,16 +1135,17 @@ out:
1348
1349 /*
1350 * Allocate a new page for page migration based on vma policy.
1351 - * Start assuming that page is mapped by vma pointed to by @private.
1352 + * Start by assuming the page is mapped by the same vma as contains @start.
1353 * Search forward from there, if not. N.B., this assumes that the
1354 * list of pages handed to migrate_pages()--which is how we get here--
1355 * is in virtual address order.
1356 */
1357 -static struct page *new_vma_page(struct page *page, unsigned long private, int **x)
1358 +static struct page *new_page(struct page *page, unsigned long start, int **x)
1359 {
1360 - struct vm_area_struct *vma = (struct vm_area_struct *)private;
1361 + struct vm_area_struct *vma;
1362 unsigned long uninitialized_var(address);
1363
1364 + vma = find_vma(current->mm, start);
1365 while (vma) {
1366 address = page_address_in_vma(page, vma);
1367 if (address != -EFAULT)
1368 @@ -1173,7 +1171,7 @@ int do_migrate_pages(struct mm_struct *mm, const nodemask_t *from,
1369 return -ENOSYS;
1370 }
1371
1372 -static struct page *new_vma_page(struct page *page, unsigned long private, int **x)
1373 +static struct page *new_page(struct page *page, unsigned long start, int **x)
1374 {
1375 return NULL;
1376 }
1377 @@ -1183,7 +1181,6 @@ static long do_mbind(unsigned long start, unsigned long len,
1378 unsigned short mode, unsigned short mode_flags,
1379 nodemask_t *nmask, unsigned long flags)
1380 {
1381 - struct vm_area_struct *vma;
1382 struct mm_struct *mm = current->mm;
1383 struct mempolicy *new;
1384 unsigned long end;
1385 @@ -1249,11 +1246,9 @@ static long do_mbind(unsigned long start, unsigned long len,
1386 if (err)
1387 goto mpol_out;
1388
1389 - vma = check_range(mm, start, end, nmask,
1390 + err = check_range(mm, start, end, nmask,
1391 flags | MPOL_MF_INVERT, &pagelist);
1392 -
1393 - err = PTR_ERR(vma); /* maybe ... */
1394 - if (!IS_ERR(vma))
1395 + if (!err)
1396 err = mbind_range(mm, start, end, new);
1397
1398 if (!err) {
1399 @@ -1261,9 +1256,8 @@ static long do_mbind(unsigned long start, unsigned long len,
1400
1401 if (!list_empty(&pagelist)) {
1402 WARN_ON_ONCE(flags & MPOL_MF_LAZY);
1403 - nr_failed = migrate_pages(&pagelist, new_vma_page,
1404 - (unsigned long)vma,
1405 - MIGRATE_SYNC, MR_MEMPOLICY_MBIND);
1406 + nr_failed = migrate_pages(&pagelist, new_page,
1407 + start, MIGRATE_SYNC, MR_MEMPOLICY_MBIND);
1408 if (nr_failed)
1409 putback_lru_pages(&pagelist);
1410 }
1411 diff --git a/net/bluetooth/hci_conn.c b/net/bluetooth/hci_conn.c
1412 index 4c51c055d00f..8e7290aea8f8 100644
1413 --- a/net/bluetooth/hci_conn.c
1414 +++ b/net/bluetooth/hci_conn.c
1415 @@ -659,7 +659,7 @@ static int hci_conn_auth(struct hci_conn *conn, __u8 sec_level, __u8 auth_type)
1416 /* If we're already encrypted set the REAUTH_PEND flag,
1417 * otherwise set the ENCRYPT_PEND.
1418 */
1419 - if (conn->key_type != 0xff)
1420 + if (conn->link_mode & HCI_LM_ENCRYPT)
1421 set_bit(HCI_CONN_REAUTH_PEND, &conn->flags);
1422 else
1423 set_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags);
1424 diff --git a/net/bluetooth/hci_event.c b/net/bluetooth/hci_event.c
1425 index ab2ec7c414cb..5daf7ab26710 100644
1426 --- a/net/bluetooth/hci_event.c
1427 +++ b/net/bluetooth/hci_event.c
1428 @@ -3218,8 +3218,11 @@ static void hci_user_confirm_request_evt(struct hci_dev *hdev,
1429
1430 /* If we're not the initiators request authorization to
1431 * proceed from user space (mgmt_user_confirm with
1432 - * confirm_hint set to 1). */
1433 - if (!test_bit(HCI_CONN_AUTH_PEND, &conn->flags)) {
1434 + * confirm_hint set to 1). The exception is if neither
1435 + * side had MITM in which case we do auto-accept.
1436 + */
1437 + if (!test_bit(HCI_CONN_AUTH_PEND, &conn->flags) &&
1438 + (loc_mitm || rem_mitm)) {
1439 BT_DBG("Confirming auto-accept as acceptor");
1440 confirm_hint = 1;
1441 goto confirm;
1442 diff --git a/net/bluetooth/mgmt.c b/net/bluetooth/mgmt.c
1443 index f8ecbc70293d..8208a13a9837 100644
1444 --- a/net/bluetooth/mgmt.c
1445 +++ b/net/bluetooth/mgmt.c
1446 @@ -2333,8 +2333,13 @@ static int user_pairing_resp(struct sock *sk, struct hci_dev *hdev,
1447 }
1448
1449 if (addr->type == BDADDR_LE_PUBLIC || addr->type == BDADDR_LE_RANDOM) {
1450 - /* Continue with pairing via SMP */
1451 + /* Continue with pairing via SMP. The hdev lock must be
1452 + * released as SMP may try to recquire it for crypto
1453 + * purposes.
1454 + */
1455 + hci_dev_unlock(hdev);
1456 err = smp_user_confirm_reply(conn, mgmt_op, passkey);
1457 + hci_dev_lock(hdev);
1458
1459 if (!err)
1460 err = cmd_complete(sk, hdev->id, mgmt_op,
1461 diff --git a/net/mac80211/debugfs_netdev.c b/net/mac80211/debugfs_netdev.c
1462 index 14abcf44f974..2d5b4f65c519 100644
1463 --- a/net/mac80211/debugfs_netdev.c
1464 +++ b/net/mac80211/debugfs_netdev.c
1465 @@ -34,8 +34,7 @@ static ssize_t ieee80211_if_read(
1466 ssize_t ret = -EINVAL;
1467
1468 read_lock(&dev_base_lock);
1469 - if (sdata->dev->reg_state == NETREG_REGISTERED)
1470 - ret = (*format)(sdata, buf, sizeof(buf));
1471 + ret = (*format)(sdata, buf, sizeof(buf));
1472 read_unlock(&dev_base_lock);
1473
1474 if (ret >= 0)
1475 @@ -62,8 +61,7 @@ static ssize_t ieee80211_if_write(
1476
1477 ret = -ENODEV;
1478 rtnl_lock();
1479 - if (sdata->dev->reg_state == NETREG_REGISTERED)
1480 - ret = (*write)(sdata, buf, count);
1481 + ret = (*write)(sdata, buf, count);
1482 rtnl_unlock();
1483
1484 return ret;
1485 diff --git a/net/mac80211/sta_info.c b/net/mac80211/sta_info.c
1486 index 0418777c361f..557a5760f9f6 100644
1487 --- a/net/mac80211/sta_info.c
1488 +++ b/net/mac80211/sta_info.c
1489 @@ -270,6 +270,7 @@ void sta_info_free(struct ieee80211_local *local, struct sta_info *sta)
1490
1491 sta_dbg(sta->sdata, "Destroyed STA %pM\n", sta->sta.addr);
1492
1493 + kfree(rcu_dereference_raw(sta->sta.rates));
1494 kfree(sta);
1495 }
1496
1497 diff --git a/sound/usb/pcm.c b/sound/usb/pcm.c
1498 index 93b6e32cfead..0d7a872dab36 100644
1499 --- a/sound/usb/pcm.c
1500 +++ b/sound/usb/pcm.c
1501 @@ -1420,7 +1420,8 @@ static void retire_playback_urb(struct snd_usb_substream *subs,
1502 * on two reads of a counter updated every ms.
1503 */
1504 if (abs(est_delay - subs->last_delay) * 1000 > runtime->rate * 2)
1505 - snd_printk(KERN_DEBUG "delay: estimated %d, actual %d\n",
1506 + dev_dbg_ratelimited(&subs->dev->dev,
1507 + "delay: estimated %d, actual %d\n",
1508 est_delay, subs->last_delay);
1509
1510 if (!subs->running) {
1511 diff --git a/tools/usb/ffs-test.c b/tools/usb/ffs-test.c
1512 index fe1e66b6ef40..a87e99f37c52 100644
1513 --- a/tools/usb/ffs-test.c
1514 +++ b/tools/usb/ffs-test.c
1515 @@ -116,8 +116,8 @@ static const struct {
1516 .header = {
1517 .magic = cpu_to_le32(FUNCTIONFS_DESCRIPTORS_MAGIC),
1518 .length = cpu_to_le32(sizeof descriptors),
1519 - .fs_count = 3,
1520 - .hs_count = 3,
1521 + .fs_count = cpu_to_le32(3),
1522 + .hs_count = cpu_to_le32(3),
1523 },
1524 .fs_descs = {
1525 .intf = {