Magellan Linux

Contents of /trunk/kernel-magellan/patches-3.2/0108-3.2.9-all-fixes.patch

Parent Directory Parent Directory | Revision Log Revision Log


Revision 1663 - (show annotations) (download)
Fri Mar 2 10:03:33 2012 UTC (12 years, 1 month ago) by niro
File size: 132007 byte(s)
-added more upstream patches
1 diff --git a/arch/arm/common/pl330.c b/arch/arm/common/pl330.c
2 index 8d8df74..67abef5 100644
3 --- a/arch/arm/common/pl330.c
4 +++ b/arch/arm/common/pl330.c
5 @@ -1496,12 +1496,13 @@ int pl330_chan_ctrl(void *ch_id, enum pl330_chan_op op)
6 struct pl330_thread *thrd = ch_id;
7 struct pl330_dmac *pl330;
8 unsigned long flags;
9 - int ret = 0, active = thrd->req_running;
10 + int ret = 0, active;
11
12 if (!thrd || thrd->free || thrd->dmac->state == DYING)
13 return -EINVAL;
14
15 pl330 = thrd->dmac;
16 + active = thrd->req_running;
17
18 spin_lock_irqsave(&pl330->lock, flags);
19
20 diff --git a/arch/arm/include/asm/assembler.h b/arch/arm/include/asm/assembler.h
21 index 29035e8..7bb8bf9 100644
22 --- a/arch/arm/include/asm/assembler.h
23 +++ b/arch/arm/include/asm/assembler.h
24 @@ -137,6 +137,11 @@
25 disable_irq
26 .endm
27
28 + .macro save_and_disable_irqs_notrace, oldcpsr
29 + mrs \oldcpsr, cpsr
30 + disable_irq_notrace
31 + .endm
32 +
33 /*
34 * Restore interrupt state previously stored in a register. We don't
35 * guarantee that this will preserve the flags.
36 diff --git a/arch/arm/mach-at91/at91rm9200_devices.c b/arch/arm/mach-at91/at91rm9200_devices.c
37 index ad93068..143eebb 100644
38 --- a/arch/arm/mach-at91/at91rm9200_devices.c
39 +++ b/arch/arm/mach-at91/at91rm9200_devices.c
40 @@ -83,7 +83,7 @@ void __init at91_add_device_usbh(struct at91_usbh_data *data) {}
41 * USB Device (Gadget)
42 * -------------------------------------------------------------------- */
43
44 -#ifdef CONFIG_USB_AT91
45 +#if defined(CONFIG_USB_AT91) || defined(CONFIG_USB_AT91_MODULE)
46 static struct at91_udc_data udc_data;
47
48 static struct resource udc_resources[] = {
49 diff --git a/arch/arm/mach-at91/at91sam9260_devices.c b/arch/arm/mach-at91/at91sam9260_devices.c
50 index 629fa97..2590988 100644
51 --- a/arch/arm/mach-at91/at91sam9260_devices.c
52 +++ b/arch/arm/mach-at91/at91sam9260_devices.c
53 @@ -84,7 +84,7 @@ void __init at91_add_device_usbh(struct at91_usbh_data *data) {}
54 * USB Device (Gadget)
55 * -------------------------------------------------------------------- */
56
57 -#ifdef CONFIG_USB_AT91
58 +#if defined(CONFIG_USB_AT91) || defined(CONFIG_USB_AT91_MODULE)
59 static struct at91_udc_data udc_data;
60
61 static struct resource udc_resources[] = {
62 diff --git a/arch/arm/mach-at91/at91sam9261_devices.c b/arch/arm/mach-at91/at91sam9261_devices.c
63 index a178b58..daf3e66 100644
64 --- a/arch/arm/mach-at91/at91sam9261_devices.c
65 +++ b/arch/arm/mach-at91/at91sam9261_devices.c
66 @@ -87,7 +87,7 @@ void __init at91_add_device_usbh(struct at91_usbh_data *data) {}
67 * USB Device (Gadget)
68 * -------------------------------------------------------------------- */
69
70 -#ifdef CONFIG_USB_AT91
71 +#if defined(CONFIG_USB_AT91) || defined(CONFIG_USB_AT91_MODULE)
72 static struct at91_udc_data udc_data;
73
74 static struct resource udc_resources[] = {
75 diff --git a/arch/arm/mach-at91/at91sam9263_devices.c b/arch/arm/mach-at91/at91sam9263_devices.c
76 index d5fbac9..32a7e43 100644
77 --- a/arch/arm/mach-at91/at91sam9263_devices.c
78 +++ b/arch/arm/mach-at91/at91sam9263_devices.c
79 @@ -92,7 +92,7 @@ void __init at91_add_device_usbh(struct at91_usbh_data *data) {}
80 * USB Device (Gadget)
81 * -------------------------------------------------------------------- */
82
83 -#ifdef CONFIG_USB_AT91
84 +#if defined(CONFIG_USB_AT91) || defined(CONFIG_USB_AT91_MODULE)
85 static struct at91_udc_data udc_data;
86
87 static struct resource udc_resources[] = {
88 diff --git a/arch/arm/mach-omap2/vp.c b/arch/arm/mach-omap2/vp.c
89 index 66bd700..3b52027 100644
90 --- a/arch/arm/mach-omap2/vp.c
91 +++ b/arch/arm/mach-omap2/vp.c
92 @@ -41,6 +41,11 @@ void __init omap_vp_init(struct voltagedomain *voltdm)
93 u32 val, sys_clk_rate, timeout, waittime;
94 u32 vddmin, vddmax, vstepmin, vstepmax;
95
96 + if (!voltdm->pmic || !voltdm->pmic->uv_to_vsel) {
97 + pr_err("%s: No PMIC info for vdd_%s\n", __func__, voltdm->name);
98 + return;
99 + }
100 +
101 if (!voltdm->read || !voltdm->write) {
102 pr_err("%s: No read/write API for accessing vdd_%s regs\n",
103 __func__, voltdm->name);
104 diff --git a/arch/arm/mm/cache-v7.S b/arch/arm/mm/cache-v7.S
105 index 07c4bc8..a655d3d 100644
106 --- a/arch/arm/mm/cache-v7.S
107 +++ b/arch/arm/mm/cache-v7.S
108 @@ -54,9 +54,15 @@ loop1:
109 and r1, r1, #7 @ mask of the bits for current cache only
110 cmp r1, #2 @ see what cache we have at this level
111 blt skip @ skip if no cache, or just i-cache
112 +#ifdef CONFIG_PREEMPT
113 + save_and_disable_irqs_notrace r9 @ make cssr&csidr read atomic
114 +#endif
115 mcr p15, 2, r10, c0, c0, 0 @ select current cache level in cssr
116 isb @ isb to sych the new cssr&csidr
117 mrc p15, 1, r1, c0, c0, 0 @ read the new csidr
118 +#ifdef CONFIG_PREEMPT
119 + restore_irqs_notrace r9
120 +#endif
121 and r2, r1, #7 @ extract the length of the cache lines
122 add r2, r2, #4 @ add 4 (line length offset)
123 ldr r4, =0x3ff
124 diff --git a/arch/powerpc/kernel/perf_event.c b/arch/powerpc/kernel/perf_event.c
125 index 10a140f..64483fd 100644
126 --- a/arch/powerpc/kernel/perf_event.c
127 +++ b/arch/powerpc/kernel/perf_event.c
128 @@ -865,6 +865,7 @@ static void power_pmu_start(struct perf_event *event, int ef_flags)
129 {
130 unsigned long flags;
131 s64 left;
132 + unsigned long val;
133
134 if (!event->hw.idx || !event->hw.sample_period)
135 return;
136 @@ -880,7 +881,12 @@ static void power_pmu_start(struct perf_event *event, int ef_flags)
137
138 event->hw.state = 0;
139 left = local64_read(&event->hw.period_left);
140 - write_pmc(event->hw.idx, left);
141 +
142 + val = 0;
143 + if (left < 0x80000000L)
144 + val = 0x80000000L - left;
145 +
146 + write_pmc(event->hw.idx, val);
147
148 perf_event_update_userpage(event);
149 perf_pmu_enable(event->pmu);
150 diff --git a/arch/s390/kernel/time.c b/arch/s390/kernel/time.c
151 index ebbfab3..e03c555 100644
152 --- a/arch/s390/kernel/time.c
153 +++ b/arch/s390/kernel/time.c
154 @@ -113,11 +113,14 @@ static void fixup_clock_comparator(unsigned long long delta)
155 static int s390_next_ktime(ktime_t expires,
156 struct clock_event_device *evt)
157 {
158 + struct timespec ts;
159 u64 nsecs;
160
161 - nsecs = ktime_to_ns(ktime_sub(expires, ktime_get_monotonic_offset()));
162 + ts.tv_sec = ts.tv_nsec = 0;
163 + monotonic_to_bootbased(&ts);
164 + nsecs = ktime_to_ns(ktime_add(timespec_to_ktime(ts), expires));
165 do_div(nsecs, 125);
166 - S390_lowcore.clock_comparator = TOD_UNIX_EPOCH + (nsecs << 9);
167 + S390_lowcore.clock_comparator = sched_clock_base_cc + (nsecs << 9);
168 set_clock_comparator(S390_lowcore.clock_comparator);
169 return 0;
170 }
171 diff --git a/arch/x86/kernel/cpu/intel_cacheinfo.c b/arch/x86/kernel/cpu/intel_cacheinfo.c
172 index a3b0811..0e89635 100644
173 --- a/arch/x86/kernel/cpu/intel_cacheinfo.c
174 +++ b/arch/x86/kernel/cpu/intel_cacheinfo.c
175 @@ -326,8 +326,7 @@ static void __cpuinit amd_calc_l3_indices(struct amd_northbridge *nb)
176 l3->indices = (max(max3(sc0, sc1, sc2), sc3) << 10) - 1;
177 }
178
179 -static void __cpuinit amd_init_l3_cache(struct _cpuid4_info_regs *this_leaf,
180 - int index)
181 +static void __cpuinit amd_init_l3_cache(struct _cpuid4_info_regs *this_leaf, int index)
182 {
183 int node;
184
185 @@ -725,14 +724,16 @@ static DEFINE_PER_CPU(struct _cpuid4_info *, ici_cpuid4_info);
186 #define CPUID4_INFO_IDX(x, y) (&((per_cpu(ici_cpuid4_info, x))[y]))
187
188 #ifdef CONFIG_SMP
189 -static void __cpuinit cache_shared_cpu_map_setup(unsigned int cpu, int index)
190 +
191 +static int __cpuinit cache_shared_amd_cpu_map_setup(unsigned int cpu, int index)
192 {
193 - struct _cpuid4_info *this_leaf, *sibling_leaf;
194 - unsigned long num_threads_sharing;
195 - int index_msb, i, sibling;
196 + struct _cpuid4_info *this_leaf;
197 + int ret, i, sibling;
198 struct cpuinfo_x86 *c = &cpu_data(cpu);
199
200 - if ((index == 3) && (c->x86_vendor == X86_VENDOR_AMD)) {
201 + ret = 0;
202 + if (index == 3) {
203 + ret = 1;
204 for_each_cpu(i, cpu_llc_shared_mask(cpu)) {
205 if (!per_cpu(ici_cpuid4_info, i))
206 continue;
207 @@ -743,8 +744,35 @@ static void __cpuinit cache_shared_cpu_map_setup(unsigned int cpu, int index)
208 set_bit(sibling, this_leaf->shared_cpu_map);
209 }
210 }
211 - return;
212 + } else if ((c->x86 == 0x15) && ((index == 1) || (index == 2))) {
213 + ret = 1;
214 + for_each_cpu(i, cpu_sibling_mask(cpu)) {
215 + if (!per_cpu(ici_cpuid4_info, i))
216 + continue;
217 + this_leaf = CPUID4_INFO_IDX(i, index);
218 + for_each_cpu(sibling, cpu_sibling_mask(cpu)) {
219 + if (!cpu_online(sibling))
220 + continue;
221 + set_bit(sibling, this_leaf->shared_cpu_map);
222 + }
223 + }
224 }
225 +
226 + return ret;
227 +}
228 +
229 +static void __cpuinit cache_shared_cpu_map_setup(unsigned int cpu, int index)
230 +{
231 + struct _cpuid4_info *this_leaf, *sibling_leaf;
232 + unsigned long num_threads_sharing;
233 + int index_msb, i;
234 + struct cpuinfo_x86 *c = &cpu_data(cpu);
235 +
236 + if (c->x86_vendor == X86_VENDOR_AMD) {
237 + if (cache_shared_amd_cpu_map_setup(cpu, index))
238 + return;
239 + }
240 +
241 this_leaf = CPUID4_INFO_IDX(cpu, index);
242 num_threads_sharing = 1 + this_leaf->base.eax.split.num_threads_sharing;
243
244 diff --git a/drivers/base/regmap/regcache.c b/drivers/base/regmap/regcache.c
245 index 666f6f5..64004b0 100644
246 --- a/drivers/base/regmap/regcache.c
247 +++ b/drivers/base/regmap/regcache.c
248 @@ -54,7 +54,7 @@ static int regcache_hw_init(struct regmap *map)
249 for (count = 0, i = 0; i < map->num_reg_defaults_raw; i++) {
250 val = regcache_get_val(map->reg_defaults_raw,
251 i, map->cache_word_size);
252 - if (!val)
253 + if (regmap_volatile(map, i))
254 continue;
255 count++;
256 }
257 @@ -69,7 +69,7 @@ static int regcache_hw_init(struct regmap *map)
258 for (i = 0, j = 0; i < map->num_reg_defaults_raw; i++) {
259 val = regcache_get_val(map->reg_defaults_raw,
260 i, map->cache_word_size);
261 - if (!val)
262 + if (regmap_volatile(map, i))
263 continue;
264 map->reg_defaults[j].reg = i;
265 map->reg_defaults[j].def = val;
266 diff --git a/drivers/cdrom/cdrom.c b/drivers/cdrom/cdrom.c
267 index cedb231..2678b6f 100644
268 --- a/drivers/cdrom/cdrom.c
269 +++ b/drivers/cdrom/cdrom.c
270 @@ -2120,11 +2120,6 @@ static int cdrom_read_cdda_old(struct cdrom_device_info *cdi, __u8 __user *ubuf,
271 if (!nr)
272 return -ENOMEM;
273
274 - if (!access_ok(VERIFY_WRITE, ubuf, nframes * CD_FRAMESIZE_RAW)) {
275 - ret = -EFAULT;
276 - goto out;
277 - }
278 -
279 cgc.data_direction = CGC_DATA_READ;
280 while (nframes > 0) {
281 if (nr > nframes)
282 @@ -2133,7 +2128,7 @@ static int cdrom_read_cdda_old(struct cdrom_device_info *cdi, __u8 __user *ubuf,
283 ret = cdrom_read_block(cdi, &cgc, lba, nr, 1, CD_FRAMESIZE_RAW);
284 if (ret)
285 break;
286 - if (__copy_to_user(ubuf, cgc.buffer, CD_FRAMESIZE_RAW * nr)) {
287 + if (copy_to_user(ubuf, cgc.buffer, CD_FRAMESIZE_RAW * nr)) {
288 ret = -EFAULT;
289 break;
290 }
291 @@ -2141,7 +2136,6 @@ static int cdrom_read_cdda_old(struct cdrom_device_info *cdi, __u8 __user *ubuf,
292 nframes -= nr;
293 lba += nr;
294 }
295 -out:
296 kfree(cgc.buffer);
297 return ret;
298 }
299 diff --git a/drivers/gpu/drm/radeon/r100.c b/drivers/gpu/drm/radeon/r100.c
300 index 31b0d1a..fad7cd1 100644
301 --- a/drivers/gpu/drm/radeon/r100.c
302 +++ b/drivers/gpu/drm/radeon/r100.c
303 @@ -789,9 +789,7 @@ int r100_irq_process(struct radeon_device *rdev)
304 WREG32(RADEON_AIC_CNTL, msi_rearm | RS400_MSI_REARM);
305 break;
306 default:
307 - msi_rearm = RREG32(RADEON_MSI_REARM_EN) & ~RV370_MSI_REARM_EN;
308 - WREG32(RADEON_MSI_REARM_EN, msi_rearm);
309 - WREG32(RADEON_MSI_REARM_EN, msi_rearm | RV370_MSI_REARM_EN);
310 + WREG32(RADEON_MSI_REARM_EN, RV370_MSI_REARM_EN);
311 break;
312 }
313 }
314 diff --git a/drivers/gpu/drm/radeon/rs600.c b/drivers/gpu/drm/radeon/rs600.c
315 index c259e21..ee898e9 100644
316 --- a/drivers/gpu/drm/radeon/rs600.c
317 +++ b/drivers/gpu/drm/radeon/rs600.c
318 @@ -693,9 +693,7 @@ int rs600_irq_process(struct radeon_device *rdev)
319 WREG32(RADEON_BUS_CNTL, msi_rearm | RS600_MSI_REARM);
320 break;
321 default:
322 - msi_rearm = RREG32(RADEON_MSI_REARM_EN) & ~RV370_MSI_REARM_EN;
323 - WREG32(RADEON_MSI_REARM_EN, msi_rearm);
324 - WREG32(RADEON_MSI_REARM_EN, msi_rearm | RV370_MSI_REARM_EN);
325 + WREG32(RADEON_MSI_REARM_EN, RV370_MSI_REARM_EN);
326 break;
327 }
328 }
329 diff --git a/drivers/hwmon/ads1015.c b/drivers/hwmon/ads1015.c
330 index eedca3c..dd87ae9 100644
331 --- a/drivers/hwmon/ads1015.c
332 +++ b/drivers/hwmon/ads1015.c
333 @@ -271,7 +271,7 @@ static int ads1015_probe(struct i2c_client *client,
334 continue;
335 err = device_create_file(&client->dev, &ads1015_in[k].dev_attr);
336 if (err)
337 - goto exit_free;
338 + goto exit_remove;
339 }
340
341 data->hwmon_dev = hwmon_device_register(&client->dev);
342 @@ -285,7 +285,6 @@ static int ads1015_probe(struct i2c_client *client,
343 exit_remove:
344 for (k = 0; k < ADS1015_CHANNELS; ++k)
345 device_remove_file(&client->dev, &ads1015_in[k].dev_attr);
346 -exit_free:
347 kfree(data);
348 exit:
349 return err;
350 diff --git a/drivers/hwmon/f75375s.c b/drivers/hwmon/f75375s.c
351 index e4ab491..040a820 100644
352 --- a/drivers/hwmon/f75375s.c
353 +++ b/drivers/hwmon/f75375s.c
354 @@ -304,8 +304,6 @@ static int set_pwm_enable_direct(struct i2c_client *client, int nr, int val)
355 case 0: /* Full speed */
356 fanmode |= (3 << FAN_CTRL_MODE(nr));
357 data->pwm[nr] = 255;
358 - f75375_write8(client, F75375_REG_FAN_PWM_DUTY(nr),
359 - data->pwm[nr]);
360 break;
361 case 1: /* PWM */
362 fanmode |= (3 << FAN_CTRL_MODE(nr));
363 @@ -318,6 +316,9 @@ static int set_pwm_enable_direct(struct i2c_client *client, int nr, int val)
364 }
365 f75375_write8(client, F75375_REG_FAN_TIMER, fanmode);
366 data->pwm_enable[nr] = val;
367 + if (val == 0)
368 + f75375_write8(client, F75375_REG_FAN_PWM_DUTY(nr),
369 + data->pwm[nr]);
370 return 0;
371 }
372
373 diff --git a/drivers/hwmon/max6639.c b/drivers/hwmon/max6639.c
374 index f20d997..8c3df04 100644
375 --- a/drivers/hwmon/max6639.c
376 +++ b/drivers/hwmon/max6639.c
377 @@ -72,8 +72,8 @@ static unsigned short normal_i2c[] = { 0x2c, 0x2e, 0x2f, I2C_CLIENT_END };
378
379 static const int rpm_ranges[] = { 2000, 4000, 8000, 16000 };
380
381 -#define FAN_FROM_REG(val, div, rpm_range) ((val) == 0 ? -1 : \
382 - (val) == 255 ? 0 : (rpm_ranges[rpm_range] * 30) / ((div + 1) * (val)))
383 +#define FAN_FROM_REG(val, rpm_range) ((val) == 0 || (val) == 255 ? \
384 + 0 : (rpm_ranges[rpm_range] * 30) / (val))
385 #define TEMP_LIMIT_TO_REG(val) SENSORS_LIMIT((val) / 1000, 0, 255)
386
387 /*
388 @@ -333,7 +333,7 @@ static ssize_t show_fan_input(struct device *dev,
389 return PTR_ERR(data);
390
391 return sprintf(buf, "%d\n", FAN_FROM_REG(data->fan[attr->index],
392 - data->ppr, data->rpm_range));
393 + data->rpm_range));
394 }
395
396 static ssize_t show_alarm(struct device *dev,
397 @@ -429,9 +429,9 @@ static int max6639_init_client(struct i2c_client *client)
398 struct max6639_data *data = i2c_get_clientdata(client);
399 struct max6639_platform_data *max6639_info =
400 client->dev.platform_data;
401 - int i = 0;
402 + int i;
403 int rpm_range = 1; /* default: 4000 RPM */
404 - int err = 0;
405 + int err;
406
407 /* Reset chip to default values, see below for GCONFIG setup */
408 err = i2c_smbus_write_byte_data(client, MAX6639_REG_GCONFIG,
409 @@ -446,11 +446,6 @@ static int max6639_init_client(struct i2c_client *client)
410 else
411 data->ppr = 2;
412 data->ppr -= 1;
413 - err = i2c_smbus_write_byte_data(client,
414 - MAX6639_REG_FAN_PPR(i),
415 - data->ppr << 5);
416 - if (err)
417 - goto exit;
418
419 if (max6639_info)
420 rpm_range = rpm_range_to_reg(max6639_info->rpm_range);
421 @@ -458,6 +453,13 @@ static int max6639_init_client(struct i2c_client *client)
422
423 for (i = 0; i < 2; i++) {
424
425 + /* Set Fan pulse per revolution */
426 + err = i2c_smbus_write_byte_data(client,
427 + MAX6639_REG_FAN_PPR(i),
428 + data->ppr << 6);
429 + if (err)
430 + goto exit;
431 +
432 /* Fans config PWM, RPM */
433 err = i2c_smbus_write_byte_data(client,
434 MAX6639_REG_FAN_CONFIG1(i),
435 diff --git a/drivers/infiniband/ulp/ipoib/ipoib.h b/drivers/infiniband/ulp/ipoib/ipoib.h
436 index b3cc1e0..86df632 100644
437 --- a/drivers/infiniband/ulp/ipoib/ipoib.h
438 +++ b/drivers/infiniband/ulp/ipoib/ipoib.h
439 @@ -44,6 +44,7 @@
440 #include <linux/mutex.h>
441
442 #include <net/neighbour.h>
443 +#include <net/sch_generic.h>
444
445 #include <linux/atomic.h>
446
447 @@ -117,8 +118,9 @@ struct ipoib_header {
448 u16 reserved;
449 };
450
451 -struct ipoib_pseudoheader {
452 - u8 hwaddr[INFINIBAND_ALEN];
453 +struct ipoib_cb {
454 + struct qdisc_skb_cb qdisc_cb;
455 + u8 hwaddr[INFINIBAND_ALEN];
456 };
457
458 /* Used for all multicast joins (broadcast, IPv4 mcast and IPv6 mcast) */
459 diff --git a/drivers/infiniband/ulp/ipoib/ipoib_main.c b/drivers/infiniband/ulp/ipoib/ipoib_main.c
460 index 83695b4..fe2fdbb 100644
461 --- a/drivers/infiniband/ulp/ipoib/ipoib_main.c
462 +++ b/drivers/infiniband/ulp/ipoib/ipoib_main.c
463 @@ -658,7 +658,7 @@ static void ipoib_path_lookup(struct sk_buff *skb, struct net_device *dev)
464 }
465
466 static void unicast_arp_send(struct sk_buff *skb, struct net_device *dev,
467 - struct ipoib_pseudoheader *phdr)
468 + struct ipoib_cb *cb)
469 {
470 struct ipoib_dev_priv *priv = netdev_priv(dev);
471 struct ipoib_path *path;
472 @@ -666,17 +666,15 @@ static void unicast_arp_send(struct sk_buff *skb, struct net_device *dev,
473
474 spin_lock_irqsave(&priv->lock, flags);
475
476 - path = __path_find(dev, phdr->hwaddr + 4);
477 + path = __path_find(dev, cb->hwaddr + 4);
478 if (!path || !path->valid) {
479 int new_path = 0;
480
481 if (!path) {
482 - path = path_rec_create(dev, phdr->hwaddr + 4);
483 + path = path_rec_create(dev, cb->hwaddr + 4);
484 new_path = 1;
485 }
486 if (path) {
487 - /* put pseudoheader back on for next time */
488 - skb_push(skb, sizeof *phdr);
489 __skb_queue_tail(&path->queue, skb);
490
491 if (!path->query && path_rec_start(dev, path)) {
492 @@ -700,12 +698,10 @@ static void unicast_arp_send(struct sk_buff *skb, struct net_device *dev,
493 be16_to_cpu(path->pathrec.dlid));
494
495 spin_unlock_irqrestore(&priv->lock, flags);
496 - ipoib_send(dev, skb, path->ah, IPOIB_QPN(phdr->hwaddr));
497 + ipoib_send(dev, skb, path->ah, IPOIB_QPN(cb->hwaddr));
498 return;
499 } else if ((path->query || !path_rec_start(dev, path)) &&
500 skb_queue_len(&path->queue) < IPOIB_MAX_PATH_REC_QUEUE) {
501 - /* put pseudoheader back on for next time */
502 - skb_push(skb, sizeof *phdr);
503 __skb_queue_tail(&path->queue, skb);
504 } else {
505 ++dev->stats.tx_dropped;
506 @@ -774,16 +770,14 @@ static int ipoib_start_xmit(struct sk_buff *skb, struct net_device *dev)
507 dev_kfree_skb_any(skb);
508 }
509 } else {
510 - struct ipoib_pseudoheader *phdr =
511 - (struct ipoib_pseudoheader *) skb->data;
512 - skb_pull(skb, sizeof *phdr);
513 + struct ipoib_cb *cb = (struct ipoib_cb *) skb->cb;
514
515 - if (phdr->hwaddr[4] == 0xff) {
516 + if (cb->hwaddr[4] == 0xff) {
517 /* Add in the P_Key for multicast*/
518 - phdr->hwaddr[8] = (priv->pkey >> 8) & 0xff;
519 - phdr->hwaddr[9] = priv->pkey & 0xff;
520 + cb->hwaddr[8] = (priv->pkey >> 8) & 0xff;
521 + cb->hwaddr[9] = priv->pkey & 0xff;
522
523 - ipoib_mcast_send(dev, phdr->hwaddr + 4, skb);
524 + ipoib_mcast_send(dev, cb->hwaddr + 4, skb);
525 } else {
526 /* unicast GID -- should be ARP or RARP reply */
527
528 @@ -792,14 +786,14 @@ static int ipoib_start_xmit(struct sk_buff *skb, struct net_device *dev)
529 ipoib_warn(priv, "Unicast, no %s: type %04x, QPN %06x %pI6\n",
530 skb_dst(skb) ? "neigh" : "dst",
531 be16_to_cpup((__be16 *) skb->data),
532 - IPOIB_QPN(phdr->hwaddr),
533 - phdr->hwaddr + 4);
534 + IPOIB_QPN(cb->hwaddr),
535 + cb->hwaddr + 4);
536 dev_kfree_skb_any(skb);
537 ++dev->stats.tx_dropped;
538 goto unlock;
539 }
540
541 - unicast_arp_send(skb, dev, phdr);
542 + unicast_arp_send(skb, dev, cb);
543 }
544 }
545 unlock:
546 @@ -825,8 +819,6 @@ static int ipoib_hard_header(struct sk_buff *skb,
547 const void *daddr, const void *saddr, unsigned len)
548 {
549 struct ipoib_header *header;
550 - struct dst_entry *dst;
551 - struct neighbour *n;
552
553 header = (struct ipoib_header *) skb_push(skb, sizeof *header);
554
555 @@ -834,18 +826,13 @@ static int ipoib_hard_header(struct sk_buff *skb,
556 header->reserved = 0;
557
558 /*
559 - * If we don't have a neighbour structure, stuff the
560 - * destination address onto the front of the skb so we can
561 - * figure out where to send the packet later.
562 + * If we don't have a dst_entry structure, stuff the
563 + * destination address into skb->cb so we can figure out where
564 + * to send the packet later.
565 */
566 - dst = skb_dst(skb);
567 - n = NULL;
568 - if (dst)
569 - n = dst_get_neighbour_raw(dst);
570 - if ((!dst || !n) && daddr) {
571 - struct ipoib_pseudoheader *phdr =
572 - (struct ipoib_pseudoheader *) skb_push(skb, sizeof *phdr);
573 - memcpy(phdr->hwaddr, daddr, INFINIBAND_ALEN);
574 + if (!skb_dst(skb)) {
575 + struct ipoib_cb *cb = (struct ipoib_cb *) skb->cb;
576 + memcpy(cb->hwaddr, daddr, INFINIBAND_ALEN);
577 }
578
579 return 0;
580 @@ -1021,11 +1008,7 @@ static void ipoib_setup(struct net_device *dev)
581
582 dev->flags |= IFF_BROADCAST | IFF_MULTICAST;
583
584 - /*
585 - * We add in INFINIBAND_ALEN to allow for the destination
586 - * address "pseudoheader" for skbs without neighbour struct.
587 - */
588 - dev->hard_header_len = IPOIB_ENCAP_LEN + INFINIBAND_ALEN;
589 + dev->hard_header_len = IPOIB_ENCAP_LEN;
590 dev->addr_len = INFINIBAND_ALEN;
591 dev->type = ARPHRD_INFINIBAND;
592 dev->tx_queue_len = ipoib_sendq_size * 2;
593 diff --git a/drivers/infiniband/ulp/ipoib/ipoib_multicast.c b/drivers/infiniband/ulp/ipoib/ipoib_multicast.c
594 index 873bff9..e5069b4 100644
595 --- a/drivers/infiniband/ulp/ipoib/ipoib_multicast.c
596 +++ b/drivers/infiniband/ulp/ipoib/ipoib_multicast.c
597 @@ -262,21 +262,14 @@ static int ipoib_mcast_join_finish(struct ipoib_mcast *mcast,
598 netif_tx_lock_bh(dev);
599 while (!skb_queue_empty(&mcast->pkt_queue)) {
600 struct sk_buff *skb = skb_dequeue(&mcast->pkt_queue);
601 - struct dst_entry *dst = skb_dst(skb);
602 - struct neighbour *n = NULL;
603
604 netif_tx_unlock_bh(dev);
605
606 skb->dev = dev;
607 - if (dst)
608 - n = dst_get_neighbour_raw(dst);
609 - if (!dst || !n) {
610 - /* put pseudoheader back on for next time */
611 - skb_push(skb, sizeof (struct ipoib_pseudoheader));
612 - }
613
614 if (dev_queue_xmit(skb))
615 ipoib_warn(priv, "dev_queue_xmit failed to requeue packet\n");
616 +
617 netif_tx_lock_bh(dev);
618 }
619 netif_tx_unlock_bh(dev);
620 diff --git a/drivers/media/rc/imon.c b/drivers/media/rc/imon.c
621 index 6ed9646..3f175eb 100644
622 --- a/drivers/media/rc/imon.c
623 +++ b/drivers/media/rc/imon.c
624 @@ -47,7 +47,7 @@
625 #define MOD_AUTHOR "Jarod Wilson <jarod@wilsonet.com>"
626 #define MOD_DESC "Driver for SoundGraph iMON MultiMedia IR/Display"
627 #define MOD_NAME "imon"
628 -#define MOD_VERSION "0.9.3"
629 +#define MOD_VERSION "0.9.4"
630
631 #define DISPLAY_MINOR_BASE 144
632 #define DEVICE_NAME "lcd%d"
633 @@ -1658,9 +1658,17 @@ static void usb_rx_callback_intf0(struct urb *urb)
634 return;
635
636 ictx = (struct imon_context *)urb->context;
637 - if (!ictx || !ictx->dev_present_intf0)
638 + if (!ictx)
639 return;
640
641 + /*
642 + * if we get a callback before we're done configuring the hardware, we
643 + * can't yet process the data, as there's nowhere to send it, but we
644 + * still need to submit a new rx URB to avoid wedging the hardware
645 + */
646 + if (!ictx->dev_present_intf0)
647 + goto out;
648 +
649 switch (urb->status) {
650 case -ENOENT: /* usbcore unlink successful! */
651 return;
652 @@ -1678,6 +1686,7 @@ static void usb_rx_callback_intf0(struct urb *urb)
653 break;
654 }
655
656 +out:
657 usb_submit_urb(ictx->rx_urb_intf0, GFP_ATOMIC);
658 }
659
660 @@ -1690,9 +1699,17 @@ static void usb_rx_callback_intf1(struct urb *urb)
661 return;
662
663 ictx = (struct imon_context *)urb->context;
664 - if (!ictx || !ictx->dev_present_intf1)
665 + if (!ictx)
666 return;
667
668 + /*
669 + * if we get a callback before we're done configuring the hardware, we
670 + * can't yet process the data, as there's nowhere to send it, but we
671 + * still need to submit a new rx URB to avoid wedging the hardware
672 + */
673 + if (!ictx->dev_present_intf1)
674 + goto out;
675 +
676 switch (urb->status) {
677 case -ENOENT: /* usbcore unlink successful! */
678 return;
679 @@ -1710,6 +1727,7 @@ static void usb_rx_callback_intf1(struct urb *urb)
680 break;
681 }
682
683 +out:
684 usb_submit_urb(ictx->rx_urb_intf1, GFP_ATOMIC);
685 }
686
687 @@ -2242,7 +2260,7 @@ find_endpoint_failed:
688 mutex_unlock(&ictx->lock);
689 usb_free_urb(rx_urb);
690 rx_urb_alloc_failed:
691 - dev_err(ictx->dev, "unable to initialize intf0, err %d\n", ret);
692 + dev_err(ictx->dev, "unable to initialize intf1, err %d\n", ret);
693
694 return NULL;
695 }
696 diff --git a/drivers/media/video/hdpvr/hdpvr-video.c b/drivers/media/video/hdpvr/hdpvr-video.c
697 index 087f7c0..41fd57b 100644
698 --- a/drivers/media/video/hdpvr/hdpvr-video.c
699 +++ b/drivers/media/video/hdpvr/hdpvr-video.c
700 @@ -283,12 +283,13 @@ static int hdpvr_start_streaming(struct hdpvr_device *dev)
701
702 hdpvr_config_call(dev, CTRL_START_STREAMING_VALUE, 0x00);
703
704 + dev->status = STATUS_STREAMING;
705 +
706 INIT_WORK(&dev->worker, hdpvr_transmit_buffers);
707 queue_work(dev->workqueue, &dev->worker);
708
709 v4l2_dbg(MSG_BUFFER, hdpvr_debug, &dev->v4l2_dev,
710 "streaming started\n");
711 - dev->status = STATUS_STREAMING;
712
713 return 0;
714 }
715 diff --git a/drivers/mmc/card/block.c b/drivers/mmc/card/block.c
716 index 1e0e27c..e15e47d 100644
717 --- a/drivers/mmc/card/block.c
718 +++ b/drivers/mmc/card/block.c
719 @@ -266,6 +266,9 @@ static struct mmc_blk_ioc_data *mmc_blk_ioctl_copy_from_user(
720 goto idata_err;
721 }
722
723 + if (!idata->buf_bytes)
724 + return idata;
725 +
726 idata->buf = kzalloc(idata->buf_bytes, GFP_KERNEL);
727 if (!idata->buf) {
728 err = -ENOMEM;
729 @@ -312,25 +315,6 @@ static int mmc_blk_ioctl_cmd(struct block_device *bdev,
730 if (IS_ERR(idata))
731 return PTR_ERR(idata);
732
733 - cmd.opcode = idata->ic.opcode;
734 - cmd.arg = idata->ic.arg;
735 - cmd.flags = idata->ic.flags;
736 -
737 - data.sg = &sg;
738 - data.sg_len = 1;
739 - data.blksz = idata->ic.blksz;
740 - data.blocks = idata->ic.blocks;
741 -
742 - sg_init_one(data.sg, idata->buf, idata->buf_bytes);
743 -
744 - if (idata->ic.write_flag)
745 - data.flags = MMC_DATA_WRITE;
746 - else
747 - data.flags = MMC_DATA_READ;
748 -
749 - mrq.cmd = &cmd;
750 - mrq.data = &data;
751 -
752 md = mmc_blk_get(bdev->bd_disk);
753 if (!md) {
754 err = -EINVAL;
755 @@ -343,6 +327,48 @@ static int mmc_blk_ioctl_cmd(struct block_device *bdev,
756 goto cmd_done;
757 }
758
759 + cmd.opcode = idata->ic.opcode;
760 + cmd.arg = idata->ic.arg;
761 + cmd.flags = idata->ic.flags;
762 +
763 + if (idata->buf_bytes) {
764 + data.sg = &sg;
765 + data.sg_len = 1;
766 + data.blksz = idata->ic.blksz;
767 + data.blocks = idata->ic.blocks;
768 +
769 + sg_init_one(data.sg, idata->buf, idata->buf_bytes);
770 +
771 + if (idata->ic.write_flag)
772 + data.flags = MMC_DATA_WRITE;
773 + else
774 + data.flags = MMC_DATA_READ;
775 +
776 + /* data.flags must already be set before doing this. */
777 + mmc_set_data_timeout(&data, card);
778 +
779 + /* Allow overriding the timeout_ns for empirical tuning. */
780 + if (idata->ic.data_timeout_ns)
781 + data.timeout_ns = idata->ic.data_timeout_ns;
782 +
783 + if ((cmd.flags & MMC_RSP_R1B) == MMC_RSP_R1B) {
784 + /*
785 + * Pretend this is a data transfer and rely on the
786 + * host driver to compute timeout. When all host
787 + * drivers support cmd.cmd_timeout for R1B, this
788 + * can be changed to:
789 + *
790 + * mrq.data = NULL;
791 + * cmd.cmd_timeout = idata->ic.cmd_timeout_ms;
792 + */
793 + data.timeout_ns = idata->ic.cmd_timeout_ms * 1000000;
794 + }
795 +
796 + mrq.data = &data;
797 + }
798 +
799 + mrq.cmd = &cmd;
800 +
801 mmc_claim_host(card->host);
802
803 if (idata->ic.is_acmd) {
804 @@ -351,24 +377,6 @@ static int mmc_blk_ioctl_cmd(struct block_device *bdev,
805 goto cmd_rel_host;
806 }
807
808 - /* data.flags must already be set before doing this. */
809 - mmc_set_data_timeout(&data, card);
810 - /* Allow overriding the timeout_ns for empirical tuning. */
811 - if (idata->ic.data_timeout_ns)
812 - data.timeout_ns = idata->ic.data_timeout_ns;
813 -
814 - if ((cmd.flags & MMC_RSP_R1B) == MMC_RSP_R1B) {
815 - /*
816 - * Pretend this is a data transfer and rely on the host driver
817 - * to compute timeout. When all host drivers support
818 - * cmd.cmd_timeout for R1B, this can be changed to:
819 - *
820 - * mrq.data = NULL;
821 - * cmd.cmd_timeout = idata->ic.cmd_timeout_ms;
822 - */
823 - data.timeout_ns = idata->ic.cmd_timeout_ms * 1000000;
824 - }
825 -
826 mmc_wait_for_req(card->host, &mrq);
827
828 if (cmd.error) {
829 diff --git a/drivers/net/can/sja1000/sja1000.c b/drivers/net/can/sja1000/sja1000.c
830 index 04a3f1b..192b0d1 100644
831 --- a/drivers/net/can/sja1000/sja1000.c
832 +++ b/drivers/net/can/sja1000/sja1000.c
833 @@ -95,11 +95,16 @@ static void sja1000_write_cmdreg(struct sja1000_priv *priv, u8 val)
834 spin_unlock_irqrestore(&priv->cmdreg_lock, flags);
835 }
836
837 +static int sja1000_is_absent(struct sja1000_priv *priv)
838 +{
839 + return (priv->read_reg(priv, REG_MOD) == 0xFF);
840 +}
841 +
842 static int sja1000_probe_chip(struct net_device *dev)
843 {
844 struct sja1000_priv *priv = netdev_priv(dev);
845
846 - if (priv->reg_base && (priv->read_reg(priv, 0) == 0xFF)) {
847 + if (priv->reg_base && sja1000_is_absent(priv)) {
848 printk(KERN_INFO "%s: probing @0x%lX failed\n",
849 DRV_NAME, dev->base_addr);
850 return 0;
851 @@ -493,6 +498,9 @@ irqreturn_t sja1000_interrupt(int irq, void *dev_id)
852 while ((isrc = priv->read_reg(priv, REG_IR)) && (n < SJA1000_MAX_IRQ)) {
853 n++;
854 status = priv->read_reg(priv, REG_SR);
855 + /* check for absent controller due to hw unplug */
856 + if (status == 0xFF && sja1000_is_absent(priv))
857 + return IRQ_NONE;
858
859 if (isrc & IRQ_WUI)
860 dev_warn(dev->dev.parent, "wakeup interrupt\n");
861 @@ -509,6 +517,9 @@ irqreturn_t sja1000_interrupt(int irq, void *dev_id)
862 while (status & SR_RBS) {
863 sja1000_rx(dev);
864 status = priv->read_reg(priv, REG_SR);
865 + /* check for absent controller */
866 + if (status == 0xFF && sja1000_is_absent(priv))
867 + return IRQ_NONE;
868 }
869 }
870 if (isrc & (IRQ_DOI | IRQ_EI | IRQ_BEI | IRQ_EPI | IRQ_ALI)) {
871 diff --git a/drivers/net/ethernet/3com/3c59x.c b/drivers/net/ethernet/3com/3c59x.c
872 index b42c06b..e0c5529 100644
873 --- a/drivers/net/ethernet/3com/3c59x.c
874 +++ b/drivers/net/ethernet/3com/3c59x.c
875 @@ -1842,7 +1842,7 @@ vortex_timer(unsigned long data)
876 ok = 1;
877 }
878
879 - if (!netif_carrier_ok(dev))
880 + if (dev->flags & IFF_SLAVE || !netif_carrier_ok(dev))
881 next_tick = 5*HZ;
882
883 if (vp->medialock)
884 diff --git a/drivers/net/ethernet/jme.c b/drivers/net/ethernet/jme.c
885 index 76b8457..ab784e0 100644
886 --- a/drivers/net/ethernet/jme.c
887 +++ b/drivers/net/ethernet/jme.c
888 @@ -2328,19 +2328,11 @@ jme_change_mtu(struct net_device *netdev, int new_mtu)
889 ((new_mtu) < IPV6_MIN_MTU))
890 return -EINVAL;
891
892 - if (new_mtu > 4000) {
893 - jme->reg_rxcs &= ~RXCS_FIFOTHNP;
894 - jme->reg_rxcs |= RXCS_FIFOTHNP_64QW;
895 - jme_restart_rx_engine(jme);
896 - } else {
897 - jme->reg_rxcs &= ~RXCS_FIFOTHNP;
898 - jme->reg_rxcs |= RXCS_FIFOTHNP_128QW;
899 - jme_restart_rx_engine(jme);
900 - }
901
902 netdev->mtu = new_mtu;
903 netdev_update_features(netdev);
904
905 + jme_restart_rx_engine(jme);
906 jme_reset_link(jme);
907
908 return 0;
909 diff --git a/drivers/net/ethernet/jme.h b/drivers/net/ethernet/jme.h
910 index 4304072..3efc897 100644
911 --- a/drivers/net/ethernet/jme.h
912 +++ b/drivers/net/ethernet/jme.h
913 @@ -730,7 +730,7 @@ enum jme_rxcs_values {
914 RXCS_RETRYCNT_60 = 0x00000F00,
915
916 RXCS_DEFAULT = RXCS_FIFOTHTP_128T |
917 - RXCS_FIFOTHNP_128QW |
918 + RXCS_FIFOTHNP_16QW |
919 RXCS_DMAREQSZ_128B |
920 RXCS_RETRYGAP_256ns |
921 RXCS_RETRYCNT_32,
922 diff --git a/drivers/net/ethernet/ti/davinci_emac.c b/drivers/net/ethernet/ti/davinci_emac.c
923 index 815c797..22f2788 100644
924 --- a/drivers/net/ethernet/ti/davinci_emac.c
925 +++ b/drivers/net/ethernet/ti/davinci_emac.c
926 @@ -1007,7 +1007,7 @@ static void emac_rx_handler(void *token, int len, int status)
927 int ret;
928
929 /* free and bail if we are shutting down */
930 - if (unlikely(!netif_running(ndev) || !netif_carrier_ok(ndev))) {
931 + if (unlikely(!netif_running(ndev))) {
932 dev_kfree_skb_any(skb);
933 return;
934 }
935 @@ -1036,7 +1036,9 @@ static void emac_rx_handler(void *token, int len, int status)
936 recycle:
937 ret = cpdma_chan_submit(priv->rxchan, skb, skb->data,
938 skb_tailroom(skb), GFP_KERNEL);
939 - if (WARN_ON(ret < 0))
940 +
941 + WARN_ON(ret == -ENOMEM);
942 + if (unlikely(ret < 0))
943 dev_kfree_skb_any(skb);
944 }
945
946 diff --git a/drivers/net/ethernet/via/via-velocity.c b/drivers/net/ethernet/via/via-velocity.c
947 index 4535d7c..e015a57 100644
948 --- a/drivers/net/ethernet/via/via-velocity.c
949 +++ b/drivers/net/ethernet/via/via-velocity.c
950 @@ -2489,9 +2489,6 @@ static int velocity_close(struct net_device *dev)
951 if (dev->irq != 0)
952 free_irq(dev->irq, dev);
953
954 - /* Power down the chip */
955 - pci_set_power_state(vptr->pdev, PCI_D3hot);
956 -
957 velocity_free_rings(vptr);
958
959 vptr->flags &= (~VELOCITY_FLAGS_OPENED);
960 diff --git a/drivers/net/usb/ipheth.c b/drivers/net/usb/ipheth.c
961 index 13c1f04..ad96164 100644
962 --- a/drivers/net/usb/ipheth.c
963 +++ b/drivers/net/usb/ipheth.c
964 @@ -60,6 +60,7 @@
965 #define USB_PRODUCT_IPHONE_3GS 0x1294
966 #define USB_PRODUCT_IPHONE_4 0x1297
967 #define USB_PRODUCT_IPHONE_4_VZW 0x129c
968 +#define USB_PRODUCT_IPHONE_4S 0x12a0
969
970 #define IPHETH_USBINTF_CLASS 255
971 #define IPHETH_USBINTF_SUBCLASS 253
972 @@ -103,6 +104,10 @@ static struct usb_device_id ipheth_table[] = {
973 USB_VENDOR_APPLE, USB_PRODUCT_IPHONE_4_VZW,
974 IPHETH_USBINTF_CLASS, IPHETH_USBINTF_SUBCLASS,
975 IPHETH_USBINTF_PROTO) },
976 + { USB_DEVICE_AND_INTERFACE_INFO(
977 + USB_VENDOR_APPLE, USB_PRODUCT_IPHONE_4S,
978 + IPHETH_USBINTF_CLASS, IPHETH_USBINTF_SUBCLASS,
979 + IPHETH_USBINTF_PROTO) },
980 { }
981 };
982 MODULE_DEVICE_TABLE(usb, ipheth_table);
983 diff --git a/drivers/net/veth.c b/drivers/net/veth.c
984 index ef883e9..b907398 100644
985 --- a/drivers/net/veth.c
986 +++ b/drivers/net/veth.c
987 @@ -423,7 +423,9 @@ static void veth_dellink(struct net_device *dev, struct list_head *head)
988 unregister_netdevice_queue(peer, head);
989 }
990
991 -static const struct nla_policy veth_policy[VETH_INFO_MAX + 1];
992 +static const struct nla_policy veth_policy[VETH_INFO_MAX + 1] = {
993 + [VETH_INFO_PEER] = { .len = sizeof(struct ifinfomsg) },
994 +};
995
996 static struct rtnl_link_ops veth_link_ops = {
997 .kind = DRV_NAME,
998 diff --git a/drivers/net/wireless/ath/ath9k/rc.c b/drivers/net/wireless/ath/ath9k/rc.c
999 index 528d5f3..64af11f 100644
1000 --- a/drivers/net/wireless/ath/ath9k/rc.c
1001 +++ b/drivers/net/wireless/ath/ath9k/rc.c
1002 @@ -1347,7 +1347,7 @@ static void ath_tx_status(void *priv, struct ieee80211_supported_band *sband,
1003 fc = hdr->frame_control;
1004 for (i = 0; i < sc->hw->max_rates; i++) {
1005 struct ieee80211_tx_rate *rate = &tx_info->status.rates[i];
1006 - if (!rate->count)
1007 + if (rate->idx < 0 || !rate->count)
1008 break;
1009
1010 final_ts_idx = i;
1011 diff --git a/drivers/pci/probe.c b/drivers/pci/probe.c
1012 index 04e74f4..dfee1b3 100644
1013 --- a/drivers/pci/probe.c
1014 +++ b/drivers/pci/probe.c
1015 @@ -651,6 +651,11 @@ int __devinit pci_scan_bridge(struct pci_bus *bus, struct pci_dev *dev, int max,
1016 dev_dbg(&dev->dev, "scanning [bus %02x-%02x] behind bridge, pass %d\n",
1017 secondary, subordinate, pass);
1018
1019 + if (!primary && (primary != bus->number) && secondary && subordinate) {
1020 + dev_warn(&dev->dev, "Primary bus is hard wired to 0\n");
1021 + primary = bus->number;
1022 + }
1023 +
1024 /* Check if setup is sensible at all */
1025 if (!pass &&
1026 (primary != bus->number || secondary <= bus->number)) {
1027 diff --git a/drivers/scsi/scsi_pm.c b/drivers/scsi/scsi_pm.c
1028 index d329f8b..2aeaf5c 100644
1029 --- a/drivers/scsi/scsi_pm.c
1030 +++ b/drivers/scsi/scsi_pm.c
1031 @@ -7,6 +7,7 @@
1032
1033 #include <linux/pm_runtime.h>
1034 #include <linux/export.h>
1035 +#include <linux/async.h>
1036
1037 #include <scsi/scsi.h>
1038 #include <scsi/scsi_device.h>
1039 @@ -69,6 +70,19 @@ static int scsi_bus_resume_common(struct device *dev)
1040 return err;
1041 }
1042
1043 +static int scsi_bus_prepare(struct device *dev)
1044 +{
1045 + if (scsi_is_sdev_device(dev)) {
1046 + /* sd probing uses async_schedule. Wait until it finishes. */
1047 + async_synchronize_full();
1048 +
1049 + } else if (scsi_is_host_device(dev)) {
1050 + /* Wait until async scanning is finished */
1051 + scsi_complete_async_scans();
1052 + }
1053 + return 0;
1054 +}
1055 +
1056 static int scsi_bus_suspend(struct device *dev)
1057 {
1058 return scsi_bus_suspend_common(dev, PMSG_SUSPEND);
1059 @@ -87,6 +101,7 @@ static int scsi_bus_poweroff(struct device *dev)
1060 #else /* CONFIG_PM_SLEEP */
1061
1062 #define scsi_bus_resume_common NULL
1063 +#define scsi_bus_prepare NULL
1064 #define scsi_bus_suspend NULL
1065 #define scsi_bus_freeze NULL
1066 #define scsi_bus_poweroff NULL
1067 @@ -195,6 +210,7 @@ void scsi_autopm_put_host(struct Scsi_Host *shost)
1068 #endif /* CONFIG_PM_RUNTIME */
1069
1070 const struct dev_pm_ops scsi_bus_pm_ops = {
1071 + .prepare = scsi_bus_prepare,
1072 .suspend = scsi_bus_suspend,
1073 .resume = scsi_bus_resume_common,
1074 .freeze = scsi_bus_freeze,
1075 diff --git a/drivers/scsi/scsi_priv.h b/drivers/scsi/scsi_priv.h
1076 index 2a58895..5b475d0 100644
1077 --- a/drivers/scsi/scsi_priv.h
1078 +++ b/drivers/scsi/scsi_priv.h
1079 @@ -110,6 +110,7 @@ extern void scsi_exit_procfs(void);
1080 #endif /* CONFIG_PROC_FS */
1081
1082 /* scsi_scan.c */
1083 +extern int scsi_complete_async_scans(void);
1084 extern int scsi_scan_host_selected(struct Scsi_Host *, unsigned int,
1085 unsigned int, unsigned int, int);
1086 extern void scsi_forget_host(struct Scsi_Host *);
1087 diff --git a/drivers/scsi/scsi_scan.c b/drivers/scsi/scsi_scan.c
1088 index b3c6d95..6e7ea4a 100644
1089 --- a/drivers/scsi/scsi_scan.c
1090 +++ b/drivers/scsi/scsi_scan.c
1091 @@ -1815,6 +1815,7 @@ static void scsi_finish_async_scan(struct async_scan_data *data)
1092 }
1093 spin_unlock(&async_scan_lock);
1094
1095 + scsi_autopm_put_host(shost);
1096 scsi_host_put(shost);
1097 kfree(data);
1098 }
1099 @@ -1841,7 +1842,6 @@ static int do_scan_async(void *_data)
1100
1101 do_scsi_scan_host(shost);
1102 scsi_finish_async_scan(data);
1103 - scsi_autopm_put_host(shost);
1104 return 0;
1105 }
1106
1107 @@ -1869,7 +1869,7 @@ void scsi_scan_host(struct Scsi_Host *shost)
1108 p = kthread_run(do_scan_async, data, "scsi_scan_%d", shost->host_no);
1109 if (IS_ERR(p))
1110 do_scan_async(data);
1111 - /* scsi_autopm_put_host(shost) is called in do_scan_async() */
1112 + /* scsi_autopm_put_host(shost) is called in scsi_finish_async_scan() */
1113 }
1114 EXPORT_SYMBOL(scsi_scan_host);
1115
1116 diff --git a/drivers/target/target_core_alua.c b/drivers/target/target_core_alua.c
1117 index 1dcbef4..1d24512 100644
1118 --- a/drivers/target/target_core_alua.c
1119 +++ b/drivers/target/target_core_alua.c
1120 @@ -79,7 +79,7 @@ int target_emulate_report_target_port_groups(struct se_task *task)
1121 return -EINVAL;
1122 }
1123
1124 - buf = transport_kmap_first_data_page(cmd);
1125 + buf = transport_kmap_data_sg(cmd);
1126
1127 spin_lock(&su_dev->t10_alua.tg_pt_gps_lock);
1128 list_for_each_entry(tg_pt_gp, &su_dev->t10_alua.tg_pt_gps_list,
1129 @@ -164,7 +164,7 @@ int target_emulate_report_target_port_groups(struct se_task *task)
1130 buf[2] = ((rd_len >> 8) & 0xff);
1131 buf[3] = (rd_len & 0xff);
1132
1133 - transport_kunmap_first_data_page(cmd);
1134 + transport_kunmap_data_sg(cmd);
1135
1136 task->task_scsi_status = GOOD;
1137 transport_complete_task(task, 1);
1138 @@ -195,7 +195,7 @@ int target_emulate_set_target_port_groups(struct se_task *task)
1139 cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
1140 return -EINVAL;
1141 }
1142 - buf = transport_kmap_first_data_page(cmd);
1143 + buf = transport_kmap_data_sg(cmd);
1144
1145 /*
1146 * Determine if explict ALUA via SET_TARGET_PORT_GROUPS is allowed
1147 @@ -352,7 +352,7 @@ int target_emulate_set_target_port_groups(struct se_task *task)
1148 }
1149
1150 out:
1151 - transport_kunmap_first_data_page(cmd);
1152 + transport_kunmap_data_sg(cmd);
1153 task->task_scsi_status = GOOD;
1154 transport_complete_task(task, 1);
1155 return 0;
1156 diff --git a/drivers/target/target_core_cdb.c b/drivers/target/target_core_cdb.c
1157 index 251e48f..8facd33 100644
1158 --- a/drivers/target/target_core_cdb.c
1159 +++ b/drivers/target/target_core_cdb.c
1160 @@ -82,7 +82,7 @@ target_emulate_inquiry_std(struct se_cmd *cmd)
1161 return -EINVAL;
1162 }
1163
1164 - buf = transport_kmap_first_data_page(cmd);
1165 + buf = transport_kmap_data_sg(cmd);
1166
1167 if (dev == tpg->tpg_virt_lun0.lun_se_dev) {
1168 buf[0] = 0x3f; /* Not connected */
1169 @@ -135,7 +135,7 @@ target_emulate_inquiry_std(struct se_cmd *cmd)
1170 buf[4] = 31; /* Set additional length to 31 */
1171
1172 out:
1173 - transport_kunmap_first_data_page(cmd);
1174 + transport_kunmap_data_sg(cmd);
1175 return 0;
1176 }
1177
1178 @@ -726,7 +726,7 @@ int target_emulate_inquiry(struct se_task *task)
1179 return -EINVAL;
1180 }
1181
1182 - buf = transport_kmap_first_data_page(cmd);
1183 + buf = transport_kmap_data_sg(cmd);
1184
1185 buf[0] = dev->transport->get_device_type(dev);
1186
1187 @@ -743,7 +743,7 @@ int target_emulate_inquiry(struct se_task *task)
1188 ret = -EINVAL;
1189
1190 out_unmap:
1191 - transport_kunmap_first_data_page(cmd);
1192 + transport_kunmap_data_sg(cmd);
1193 out:
1194 if (!ret) {
1195 task->task_scsi_status = GOOD;
1196 @@ -765,7 +765,7 @@ int target_emulate_readcapacity(struct se_task *task)
1197 else
1198 blocks = (u32)blocks_long;
1199
1200 - buf = transport_kmap_first_data_page(cmd);
1201 + buf = transport_kmap_data_sg(cmd);
1202
1203 buf[0] = (blocks >> 24) & 0xff;
1204 buf[1] = (blocks >> 16) & 0xff;
1205 @@ -781,7 +781,7 @@ int target_emulate_readcapacity(struct se_task *task)
1206 if (dev->se_sub_dev->se_dev_attrib.emulate_tpu || dev->se_sub_dev->se_dev_attrib.emulate_tpws)
1207 put_unaligned_be32(0xFFFFFFFF, &buf[0]);
1208
1209 - transport_kunmap_first_data_page(cmd);
1210 + transport_kunmap_data_sg(cmd);
1211
1212 task->task_scsi_status = GOOD;
1213 transport_complete_task(task, 1);
1214 @@ -795,7 +795,7 @@ int target_emulate_readcapacity_16(struct se_task *task)
1215 unsigned char *buf;
1216 unsigned long long blocks = dev->transport->get_blocks(dev);
1217
1218 - buf = transport_kmap_first_data_page(cmd);
1219 + buf = transport_kmap_data_sg(cmd);
1220
1221 buf[0] = (blocks >> 56) & 0xff;
1222 buf[1] = (blocks >> 48) & 0xff;
1223 @@ -816,7 +816,7 @@ int target_emulate_readcapacity_16(struct se_task *task)
1224 if (dev->se_sub_dev->se_dev_attrib.emulate_tpu || dev->se_sub_dev->se_dev_attrib.emulate_tpws)
1225 buf[14] = 0x80;
1226
1227 - transport_kunmap_first_data_page(cmd);
1228 + transport_kunmap_data_sg(cmd);
1229
1230 task->task_scsi_status = GOOD;
1231 transport_complete_task(task, 1);
1232 @@ -1029,9 +1029,9 @@ int target_emulate_modesense(struct se_task *task)
1233 offset = cmd->data_length;
1234 }
1235
1236 - rbuf = transport_kmap_first_data_page(cmd);
1237 + rbuf = transport_kmap_data_sg(cmd);
1238 memcpy(rbuf, buf, offset);
1239 - transport_kunmap_first_data_page(cmd);
1240 + transport_kunmap_data_sg(cmd);
1241
1242 task->task_scsi_status = GOOD;
1243 transport_complete_task(task, 1);
1244 @@ -1053,7 +1053,7 @@ int target_emulate_request_sense(struct se_task *task)
1245 return -ENOSYS;
1246 }
1247
1248 - buf = transport_kmap_first_data_page(cmd);
1249 + buf = transport_kmap_data_sg(cmd);
1250
1251 if (!core_scsi3_ua_clear_for_request_sense(cmd, &ua_asc, &ua_ascq)) {
1252 /*
1253 @@ -1099,7 +1099,7 @@ int target_emulate_request_sense(struct se_task *task)
1254 }
1255
1256 end:
1257 - transport_kunmap_first_data_page(cmd);
1258 + transport_kunmap_data_sg(cmd);
1259 task->task_scsi_status = GOOD;
1260 transport_complete_task(task, 1);
1261 return 0;
1262 @@ -1133,7 +1133,7 @@ int target_emulate_unmap(struct se_task *task)
1263 dl = get_unaligned_be16(&cdb[0]);
1264 bd_dl = get_unaligned_be16(&cdb[2]);
1265
1266 - buf = transport_kmap_first_data_page(cmd);
1267 + buf = transport_kmap_data_sg(cmd);
1268
1269 ptr = &buf[offset];
1270 pr_debug("UNMAP: Sub: %s Using dl: %hu bd_dl: %hu size: %hu"
1271 @@ -1157,7 +1157,7 @@ int target_emulate_unmap(struct se_task *task)
1272 }
1273
1274 err:
1275 - transport_kunmap_first_data_page(cmd);
1276 + transport_kunmap_data_sg(cmd);
1277 if (!ret) {
1278 task->task_scsi_status = GOOD;
1279 transport_complete_task(task, 1);
1280 diff --git a/drivers/target/target_core_device.c b/drivers/target/target_core_device.c
1281 index 9b86394..19f8aca 100644
1282 --- a/drivers/target/target_core_device.c
1283 +++ b/drivers/target/target_core_device.c
1284 @@ -658,7 +658,7 @@ int target_report_luns(struct se_task *se_task)
1285 unsigned char *buf;
1286 u32 cdb_offset = 0, lun_count = 0, offset = 8, i;
1287
1288 - buf = transport_kmap_first_data_page(se_cmd);
1289 + buf = (unsigned char *) transport_kmap_data_sg(se_cmd);
1290
1291 /*
1292 * If no struct se_session pointer is present, this struct se_cmd is
1293 @@ -696,7 +696,7 @@ int target_report_luns(struct se_task *se_task)
1294 * See SPC3 r07, page 159.
1295 */
1296 done:
1297 - transport_kunmap_first_data_page(se_cmd);
1298 + transport_kunmap_data_sg(se_cmd);
1299 lun_count *= 8;
1300 buf[0] = ((lun_count >> 24) & 0xff);
1301 buf[1] = ((lun_count >> 16) & 0xff);
1302 diff --git a/drivers/target/target_core_pr.c b/drivers/target/target_core_pr.c
1303 index 9119d92..778c1a6 100644
1304 --- a/drivers/target/target_core_pr.c
1305 +++ b/drivers/target/target_core_pr.c
1306 @@ -1538,7 +1538,7 @@ static int core_scsi3_decode_spec_i_port(
1307 tidh_new->dest_local_nexus = 1;
1308 list_add_tail(&tidh_new->dest_list, &tid_dest_list);
1309
1310 - buf = transport_kmap_first_data_page(cmd);
1311 + buf = transport_kmap_data_sg(cmd);
1312 /*
1313 * For a PERSISTENT RESERVE OUT specify initiator ports payload,
1314 * first extract TransportID Parameter Data Length, and make sure
1315 @@ -1789,7 +1789,7 @@ static int core_scsi3_decode_spec_i_port(
1316
1317 }
1318
1319 - transport_kunmap_first_data_page(cmd);
1320 + transport_kunmap_data_sg(cmd);
1321
1322 /*
1323 * Go ahead and create a registrations from tid_dest_list for the
1324 @@ -1837,7 +1837,7 @@ static int core_scsi3_decode_spec_i_port(
1325
1326 return 0;
1327 out:
1328 - transport_kunmap_first_data_page(cmd);
1329 + transport_kunmap_data_sg(cmd);
1330 /*
1331 * For the failure case, release everything from tid_dest_list
1332 * including *dest_pr_reg and the configfs dependances..
1333 @@ -3429,14 +3429,14 @@ static int core_scsi3_emulate_pro_register_and_move(
1334 * will be moved to for the TransportID containing SCSI initiator WWN
1335 * information.
1336 */
1337 - buf = transport_kmap_first_data_page(cmd);
1338 + buf = transport_kmap_data_sg(cmd);
1339 rtpi = (buf[18] & 0xff) << 8;
1340 rtpi |= buf[19] & 0xff;
1341 tid_len = (buf[20] & 0xff) << 24;
1342 tid_len |= (buf[21] & 0xff) << 16;
1343 tid_len |= (buf[22] & 0xff) << 8;
1344 tid_len |= buf[23] & 0xff;
1345 - transport_kunmap_first_data_page(cmd);
1346 + transport_kunmap_data_sg(cmd);
1347 buf = NULL;
1348
1349 if ((tid_len + 24) != cmd->data_length) {
1350 @@ -3488,7 +3488,7 @@ static int core_scsi3_emulate_pro_register_and_move(
1351 return -EINVAL;
1352 }
1353
1354 - buf = transport_kmap_first_data_page(cmd);
1355 + buf = transport_kmap_data_sg(cmd);
1356 proto_ident = (buf[24] & 0x0f);
1357 #if 0
1358 pr_debug("SPC-3 PR REGISTER_AND_MOVE: Extracted Protocol Identifier:"
1359 @@ -3522,7 +3522,7 @@ static int core_scsi3_emulate_pro_register_and_move(
1360 goto out;
1361 }
1362
1363 - transport_kunmap_first_data_page(cmd);
1364 + transport_kunmap_data_sg(cmd);
1365 buf = NULL;
1366
1367 pr_debug("SPC-3 PR [%s] Extracted initiator %s identifier: %s"
1368 @@ -3787,13 +3787,13 @@ after_iport_check:
1369 " REGISTER_AND_MOVE\n");
1370 }
1371
1372 - transport_kunmap_first_data_page(cmd);
1373 + transport_kunmap_data_sg(cmd);
1374
1375 core_scsi3_put_pr_reg(dest_pr_reg);
1376 return 0;
1377 out:
1378 if (buf)
1379 - transport_kunmap_first_data_page(cmd);
1380 + transport_kunmap_data_sg(cmd);
1381 if (dest_se_deve)
1382 core_scsi3_lunacl_undepend_item(dest_se_deve);
1383 if (dest_node_acl)
1384 @@ -3867,7 +3867,7 @@ int target_scsi3_emulate_pr_out(struct se_task *task)
1385 scope = (cdb[2] & 0xf0);
1386 type = (cdb[2] & 0x0f);
1387
1388 - buf = transport_kmap_first_data_page(cmd);
1389 + buf = transport_kmap_data_sg(cmd);
1390 /*
1391 * From PERSISTENT_RESERVE_OUT parameter list (payload)
1392 */
1393 @@ -3885,7 +3885,7 @@ int target_scsi3_emulate_pr_out(struct se_task *task)
1394 aptpl = (buf[17] & 0x01);
1395 unreg = (buf[17] & 0x02);
1396 }
1397 - transport_kunmap_first_data_page(cmd);
1398 + transport_kunmap_data_sg(cmd);
1399 buf = NULL;
1400
1401 /*
1402 @@ -3985,7 +3985,7 @@ static int core_scsi3_pri_read_keys(struct se_cmd *cmd)
1403 return -EINVAL;
1404 }
1405
1406 - buf = transport_kmap_first_data_page(cmd);
1407 + buf = transport_kmap_data_sg(cmd);
1408 buf[0] = ((su_dev->t10_pr.pr_generation >> 24) & 0xff);
1409 buf[1] = ((su_dev->t10_pr.pr_generation >> 16) & 0xff);
1410 buf[2] = ((su_dev->t10_pr.pr_generation >> 8) & 0xff);
1411 @@ -4019,7 +4019,7 @@ static int core_scsi3_pri_read_keys(struct se_cmd *cmd)
1412 buf[6] = ((add_len >> 8) & 0xff);
1413 buf[7] = (add_len & 0xff);
1414
1415 - transport_kunmap_first_data_page(cmd);
1416 + transport_kunmap_data_sg(cmd);
1417
1418 return 0;
1419 }
1420 @@ -4045,7 +4045,7 @@ static int core_scsi3_pri_read_reservation(struct se_cmd *cmd)
1421 return -EINVAL;
1422 }
1423
1424 - buf = transport_kmap_first_data_page(cmd);
1425 + buf = transport_kmap_data_sg(cmd);
1426 buf[0] = ((su_dev->t10_pr.pr_generation >> 24) & 0xff);
1427 buf[1] = ((su_dev->t10_pr.pr_generation >> 16) & 0xff);
1428 buf[2] = ((su_dev->t10_pr.pr_generation >> 8) & 0xff);
1429 @@ -4104,7 +4104,7 @@ static int core_scsi3_pri_read_reservation(struct se_cmd *cmd)
1430
1431 err:
1432 spin_unlock(&se_dev->dev_reservation_lock);
1433 - transport_kunmap_first_data_page(cmd);
1434 + transport_kunmap_data_sg(cmd);
1435
1436 return 0;
1437 }
1438 @@ -4128,7 +4128,7 @@ static int core_scsi3_pri_report_capabilities(struct se_cmd *cmd)
1439 return -EINVAL;
1440 }
1441
1442 - buf = transport_kmap_first_data_page(cmd);
1443 + buf = transport_kmap_data_sg(cmd);
1444
1445 buf[0] = ((add_len << 8) & 0xff);
1446 buf[1] = (add_len & 0xff);
1447 @@ -4160,7 +4160,7 @@ static int core_scsi3_pri_report_capabilities(struct se_cmd *cmd)
1448 buf[4] |= 0x02; /* PR_TYPE_WRITE_EXCLUSIVE */
1449 buf[5] |= 0x01; /* PR_TYPE_EXCLUSIVE_ACCESS_ALLREG */
1450
1451 - transport_kunmap_first_data_page(cmd);
1452 + transport_kunmap_data_sg(cmd);
1453
1454 return 0;
1455 }
1456 @@ -4190,7 +4190,7 @@ static int core_scsi3_pri_read_full_status(struct se_cmd *cmd)
1457 return -EINVAL;
1458 }
1459
1460 - buf = transport_kmap_first_data_page(cmd);
1461 + buf = transport_kmap_data_sg(cmd);
1462
1463 buf[0] = ((su_dev->t10_pr.pr_generation >> 24) & 0xff);
1464 buf[1] = ((su_dev->t10_pr.pr_generation >> 16) & 0xff);
1465 @@ -4311,7 +4311,7 @@ static int core_scsi3_pri_read_full_status(struct se_cmd *cmd)
1466 buf[6] = ((add_len >> 8) & 0xff);
1467 buf[7] = (add_len & 0xff);
1468
1469 - transport_kunmap_first_data_page(cmd);
1470 + transport_kunmap_data_sg(cmd);
1471
1472 return 0;
1473 }
1474 diff --git a/drivers/target/target_core_pscsi.c b/drivers/target/target_core_pscsi.c
1475 index 8b15e56..5c12137 100644
1476 --- a/drivers/target/target_core_pscsi.c
1477 +++ b/drivers/target/target_core_pscsi.c
1478 @@ -695,7 +695,7 @@ static int pscsi_transport_complete(struct se_task *task)
1479
1480 if (task->task_se_cmd->se_deve->lun_flags &
1481 TRANSPORT_LUNFLAGS_READ_ONLY) {
1482 - unsigned char *buf = transport_kmap_first_data_page(task->task_se_cmd);
1483 + unsigned char *buf = transport_kmap_data_sg(task->task_se_cmd);
1484
1485 if (cdb[0] == MODE_SENSE_10) {
1486 if (!(buf[3] & 0x80))
1487 @@ -705,7 +705,7 @@ static int pscsi_transport_complete(struct se_task *task)
1488 buf[2] |= 0x80;
1489 }
1490
1491 - transport_kunmap_first_data_page(task->task_se_cmd);
1492 + transport_kunmap_data_sg(task->task_se_cmd);
1493 }
1494 }
1495 after_mode_sense:
1496 diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c
1497 index 861628e..e4ddb93 100644
1498 --- a/drivers/target/target_core_transport.c
1499 +++ b/drivers/target/target_core_transport.c
1500 @@ -3053,11 +3053,6 @@ static int transport_generic_cmd_sequencer(
1501 (cmd->se_cmd_flags & SCF_SCSI_DATA_SG_IO_CDB)))
1502 goto out_unsupported_cdb;
1503
1504 - /* Let's limit control cdbs to a page, for simplicity's sake. */
1505 - if ((cmd->se_cmd_flags & SCF_SCSI_CONTROL_SG_IO_CDB) &&
1506 - size > PAGE_SIZE)
1507 - goto out_invalid_cdb_field;
1508 -
1509 transport_set_supported_SAM_opcode(cmd);
1510 return ret;
1511
1512 @@ -3435,9 +3430,11 @@ int transport_generic_map_mem_to_cmd(
1513 }
1514 EXPORT_SYMBOL(transport_generic_map_mem_to_cmd);
1515
1516 -void *transport_kmap_first_data_page(struct se_cmd *cmd)
1517 +void *transport_kmap_data_sg(struct se_cmd *cmd)
1518 {
1519 struct scatterlist *sg = cmd->t_data_sg;
1520 + struct page **pages;
1521 + int i;
1522
1523 BUG_ON(!sg);
1524 /*
1525 @@ -3445,15 +3442,41 @@ void *transport_kmap_first_data_page(struct se_cmd *cmd)
1526 * tcm_loop who may be using a contig buffer from the SCSI midlayer for
1527 * control CDBs passed as SGLs via transport_generic_map_mem_to_cmd()
1528 */
1529 - return kmap(sg_page(sg)) + sg->offset;
1530 + if (!cmd->t_data_nents)
1531 + return NULL;
1532 + else if (cmd->t_data_nents == 1)
1533 + return kmap(sg_page(sg)) + sg->offset;
1534 +
1535 + /* >1 page. use vmap */
1536 + pages = kmalloc(sizeof(*pages) * cmd->t_data_nents, GFP_KERNEL);
1537 + if (!pages)
1538 + return NULL;
1539 +
1540 + /* convert sg[] to pages[] */
1541 + for_each_sg(cmd->t_data_sg, sg, cmd->t_data_nents, i) {
1542 + pages[i] = sg_page(sg);
1543 + }
1544 +
1545 + cmd->t_data_vmap = vmap(pages, cmd->t_data_nents, VM_MAP, PAGE_KERNEL);
1546 + kfree(pages);
1547 + if (!cmd->t_data_vmap)
1548 + return NULL;
1549 +
1550 + return cmd->t_data_vmap + cmd->t_data_sg[0].offset;
1551 }
1552 -EXPORT_SYMBOL(transport_kmap_first_data_page);
1553 +EXPORT_SYMBOL(transport_kmap_data_sg);
1554
1555 -void transport_kunmap_first_data_page(struct se_cmd *cmd)
1556 +void transport_kunmap_data_sg(struct se_cmd *cmd)
1557 {
1558 - kunmap(sg_page(cmd->t_data_sg));
1559 + if (!cmd->t_data_nents)
1560 + return;
1561 + else if (cmd->t_data_nents == 1)
1562 + kunmap(sg_page(cmd->t_data_sg));
1563 +
1564 + vunmap(cmd->t_data_vmap);
1565 + cmd->t_data_vmap = NULL;
1566 }
1567 -EXPORT_SYMBOL(transport_kunmap_first_data_page);
1568 +EXPORT_SYMBOL(transport_kunmap_data_sg);
1569
1570 static int
1571 transport_generic_get_mem(struct se_cmd *cmd)
1572 diff --git a/drivers/usb/core/hcd-pci.c b/drivers/usb/core/hcd-pci.c
1573 index a004db3..61d08dd 100644
1574 --- a/drivers/usb/core/hcd-pci.c
1575 +++ b/drivers/usb/core/hcd-pci.c
1576 @@ -187,7 +187,10 @@ int usb_hcd_pci_probe(struct pci_dev *dev, const struct pci_device_id *id)
1577 return -ENODEV;
1578 dev->current_state = PCI_D0;
1579
1580 - if (!dev->irq) {
1581 + /* The xHCI driver supports MSI and MSI-X,
1582 + * so don't fail if the BIOS doesn't provide a legacy IRQ.
1583 + */
1584 + if (!dev->irq && (driver->flags & HCD_MASK) != HCD_USB3) {
1585 dev_err(&dev->dev,
1586 "Found HC with no IRQ. Check BIOS/PCI %s setup!\n",
1587 pci_name(dev));
1588 diff --git a/drivers/usb/core/hcd.c b/drivers/usb/core/hcd.c
1589 index 179e364..8cb9304 100644
1590 --- a/drivers/usb/core/hcd.c
1591 +++ b/drivers/usb/core/hcd.c
1592 @@ -2465,8 +2465,10 @@ int usb_add_hcd(struct usb_hcd *hcd,
1593 && device_can_wakeup(&hcd->self.root_hub->dev))
1594 dev_dbg(hcd->self.controller, "supports USB remote wakeup\n");
1595
1596 - /* enable irqs just before we start the controller */
1597 - if (usb_hcd_is_primary_hcd(hcd)) {
1598 + /* enable irqs just before we start the controller,
1599 + * if the BIOS provides legacy PCI irqs.
1600 + */
1601 + if (usb_hcd_is_primary_hcd(hcd) && irqnum) {
1602 retval = usb_hcd_request_irqs(hcd, irqnum, irqflags);
1603 if (retval)
1604 goto err_request_irq;
1605 diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c
1606 index 7978146..bc06a8f 100644
1607 --- a/drivers/usb/core/hub.c
1608 +++ b/drivers/usb/core/hub.c
1609 @@ -705,10 +705,26 @@ static void hub_activate(struct usb_hub *hub, enum hub_activation_type type)
1610 if (type == HUB_INIT3)
1611 goto init3;
1612
1613 - /* After a resume, port power should still be on.
1614 + /* The superspeed hub except for root hub has to use Hub Depth
1615 + * value as an offset into the route string to locate the bits
1616 + * it uses to determine the downstream port number. So hub driver
1617 + * should send a set hub depth request to superspeed hub after
1618 + * the superspeed hub is set configuration in initialization or
1619 + * reset procedure.
1620 + *
1621 + * After a resume, port power should still be on.
1622 * For any other type of activation, turn it on.
1623 */
1624 if (type != HUB_RESUME) {
1625 + if (hdev->parent && hub_is_superspeed(hdev)) {
1626 + ret = usb_control_msg(hdev, usb_sndctrlpipe(hdev, 0),
1627 + HUB_SET_DEPTH, USB_RT_HUB,
1628 + hdev->level - 1, 0, NULL, 0,
1629 + USB_CTRL_SET_TIMEOUT);
1630 + if (ret < 0)
1631 + dev_err(hub->intfdev,
1632 + "set hub depth failed\n");
1633 + }
1634
1635 /* Speed up system boot by using a delayed_work for the
1636 * hub's initial power-up delays. This is pretty awkward
1637 @@ -987,18 +1003,6 @@ static int hub_configure(struct usb_hub *hub,
1638 goto fail;
1639 }
1640
1641 - if (hub_is_superspeed(hdev) && (hdev->parent != NULL)) {
1642 - ret = usb_control_msg(hdev, usb_sndctrlpipe(hdev, 0),
1643 - HUB_SET_DEPTH, USB_RT_HUB,
1644 - hdev->level - 1, 0, NULL, 0,
1645 - USB_CTRL_SET_TIMEOUT);
1646 -
1647 - if (ret < 0) {
1648 - message = "can't set hub depth";
1649 - goto fail;
1650 - }
1651 - }
1652 -
1653 /* Request the entire hub descriptor.
1654 * hub->descriptor can handle USB_MAXCHILDREN ports,
1655 * but the hub can/will return fewer bytes here.
1656 diff --git a/drivers/usb/host/pci-quirks.c b/drivers/usb/host/pci-quirks.c
1657 index ac53a66..7732d69 100644
1658 --- a/drivers/usb/host/pci-quirks.c
1659 +++ b/drivers/usb/host/pci-quirks.c
1660 @@ -872,7 +872,17 @@ static void __devinit quirk_usb_early_handoff(struct pci_dev *pdev)
1661 */
1662 if (pdev->vendor == 0x184e) /* vendor Netlogic */
1663 return;
1664 + if (pdev->class != PCI_CLASS_SERIAL_USB_UHCI &&
1665 + pdev->class != PCI_CLASS_SERIAL_USB_OHCI &&
1666 + pdev->class != PCI_CLASS_SERIAL_USB_EHCI &&
1667 + pdev->class != PCI_CLASS_SERIAL_USB_XHCI)
1668 + return;
1669
1670 + if (pci_enable_device(pdev) < 0) {
1671 + dev_warn(&pdev->dev, "Can't enable PCI device, "
1672 + "BIOS handoff failed.\n");
1673 + return;
1674 + }
1675 if (pdev->class == PCI_CLASS_SERIAL_USB_UHCI)
1676 quirk_usb_handoff_uhci(pdev);
1677 else if (pdev->class == PCI_CLASS_SERIAL_USB_OHCI)
1678 @@ -881,5 +891,6 @@ static void __devinit quirk_usb_early_handoff(struct pci_dev *pdev)
1679 quirk_usb_disable_ehci(pdev);
1680 else if (pdev->class == PCI_CLASS_SERIAL_USB_XHCI)
1681 quirk_usb_handoff_xhci(pdev);
1682 + pci_disable_device(pdev);
1683 }
1684 DECLARE_PCI_FIXUP_FINAL(PCI_ANY_ID, PCI_ANY_ID, quirk_usb_early_handoff);
1685 diff --git a/drivers/usb/host/xhci-hub.c b/drivers/usb/host/xhci-hub.c
1686 index 430e88f..a8b2980 100644
1687 --- a/drivers/usb/host/xhci-hub.c
1688 +++ b/drivers/usb/host/xhci-hub.c
1689 @@ -95,7 +95,7 @@ static void xhci_usb2_hub_descriptor(struct usb_hcd *hcd, struct xhci_hcd *xhci,
1690 */
1691 memset(port_removable, 0, sizeof(port_removable));
1692 for (i = 0; i < ports; i++) {
1693 - portsc = xhci_readl(xhci, xhci->usb3_ports[i]);
1694 + portsc = xhci_readl(xhci, xhci->usb2_ports[i]);
1695 /* If a device is removable, PORTSC reports a 0, same as in the
1696 * hub descriptor DeviceRemovable bits.
1697 */
1698 diff --git a/drivers/usb/host/xhci-mem.c b/drivers/usb/host/xhci-mem.c
1699 index 0e4b25f..c69cf54 100644
1700 --- a/drivers/usb/host/xhci-mem.c
1701 +++ b/drivers/usb/host/xhci-mem.c
1702 @@ -1140,26 +1140,42 @@ static unsigned int xhci_parse_exponent_interval(struct usb_device *udev,
1703 }
1704
1705 /*
1706 - * Convert bInterval expressed in frames (in 1-255 range) to exponent of
1707 + * Convert bInterval expressed in microframes (in 1-255 range) to exponent of
1708 * microframes, rounded down to nearest power of 2.
1709 */
1710 -static unsigned int xhci_parse_frame_interval(struct usb_device *udev,
1711 - struct usb_host_endpoint *ep)
1712 +static unsigned int xhci_microframes_to_exponent(struct usb_device *udev,
1713 + struct usb_host_endpoint *ep, unsigned int desc_interval,
1714 + unsigned int min_exponent, unsigned int max_exponent)
1715 {
1716 unsigned int interval;
1717
1718 - interval = fls(8 * ep->desc.bInterval) - 1;
1719 - interval = clamp_val(interval, 3, 10);
1720 - if ((1 << interval) != 8 * ep->desc.bInterval)
1721 + interval = fls(desc_interval) - 1;
1722 + interval = clamp_val(interval, min_exponent, max_exponent);
1723 + if ((1 << interval) != desc_interval)
1724 dev_warn(&udev->dev,
1725 "ep %#x - rounding interval to %d microframes, ep desc says %d microframes\n",
1726 ep->desc.bEndpointAddress,
1727 1 << interval,
1728 - 8 * ep->desc.bInterval);
1729 + desc_interval);
1730
1731 return interval;
1732 }
1733
1734 +static unsigned int xhci_parse_microframe_interval(struct usb_device *udev,
1735 + struct usb_host_endpoint *ep)
1736 +{
1737 + return xhci_microframes_to_exponent(udev, ep,
1738 + ep->desc.bInterval, 0, 15);
1739 +}
1740 +
1741 +
1742 +static unsigned int xhci_parse_frame_interval(struct usb_device *udev,
1743 + struct usb_host_endpoint *ep)
1744 +{
1745 + return xhci_microframes_to_exponent(udev, ep,
1746 + ep->desc.bInterval * 8, 3, 10);
1747 +}
1748 +
1749 /* Return the polling or NAK interval.
1750 *
1751 * The polling interval is expressed in "microframes". If xHCI's Interval field
1752 @@ -1178,7 +1194,7 @@ static unsigned int xhci_get_endpoint_interval(struct usb_device *udev,
1753 /* Max NAK rate */
1754 if (usb_endpoint_xfer_control(&ep->desc) ||
1755 usb_endpoint_xfer_bulk(&ep->desc)) {
1756 - interval = ep->desc.bInterval;
1757 + interval = xhci_parse_microframe_interval(udev, ep);
1758 break;
1759 }
1760 /* Fall through - SS and HS isoc/int have same decoding */
1761 diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c
1762 index b33f059..034f554 100644
1763 --- a/drivers/usb/host/xhci.c
1764 +++ b/drivers/usb/host/xhci.c
1765 @@ -352,6 +352,11 @@ static int xhci_try_enable_msi(struct usb_hcd *hcd)
1766 /* hcd->irq is -1, we have MSI */
1767 return 0;
1768
1769 + if (!pdev->irq) {
1770 + xhci_err(xhci, "No msi-x/msi found and no IRQ in BIOS\n");
1771 + return -EINVAL;
1772 + }
1773 +
1774 /* fall back to legacy interrupt*/
1775 ret = request_irq(pdev->irq, &usb_hcd_irq, IRQF_SHARED,
1776 hcd->irq_descr, hcd);
1777 diff --git a/drivers/usb/serial/cp210x.c b/drivers/usb/serial/cp210x.c
1778 index a515237..33d25d4 100644
1779 --- a/drivers/usb/serial/cp210x.c
1780 +++ b/drivers/usb/serial/cp210x.c
1781 @@ -136,6 +136,8 @@ static const struct usb_device_id id_table[] = {
1782 { USB_DEVICE(0x16DC, 0x0011) }, /* W-IE-NE-R Plein & Baus GmbH RCM Remote Control for MARATON Power Supply */
1783 { USB_DEVICE(0x16DC, 0x0012) }, /* W-IE-NE-R Plein & Baus GmbH MPOD Multi Channel Power Supply */
1784 { USB_DEVICE(0x16DC, 0x0015) }, /* W-IE-NE-R Plein & Baus GmbH CML Control, Monitoring and Data Logger */
1785 + { USB_DEVICE(0x17A8, 0x0001) }, /* Kamstrup Optical Eye/3-wire */
1786 + { USB_DEVICE(0x17A8, 0x0005) }, /* Kamstrup M-Bus Master MultiPort 250D */
1787 { USB_DEVICE(0x17F4, 0xAAAA) }, /* Wavesense Jazz blood glucose meter */
1788 { USB_DEVICE(0x1843, 0x0200) }, /* Vaisala USB Instrument Cable */
1789 { USB_DEVICE(0x18EF, 0xE00F) }, /* ELV USB-I2C-Interface */
1790 diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c
1791 index 338d082..68fa8c7 100644
1792 --- a/drivers/usb/serial/option.c
1793 +++ b/drivers/usb/serial/option.c
1794 @@ -788,7 +788,6 @@ static const struct usb_device_id option_ids[] = {
1795 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0012, 0xff, 0xff, 0xff),
1796 .driver_info = (kernel_ulong_t)&net_intf1_blacklist },
1797 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0013, 0xff, 0xff, 0xff) },
1798 - { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0014, 0xff, 0xff, 0xff) },
1799 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, ZTE_PRODUCT_MF628, 0xff, 0xff, 0xff) },
1800 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0016, 0xff, 0xff, 0xff) },
1801 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0017, 0xff, 0xff, 0xff),
1802 @@ -803,7 +802,6 @@ static const struct usb_device_id option_ids[] = {
1803 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0024, 0xff, 0xff, 0xff) },
1804 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0025, 0xff, 0xff, 0xff),
1805 .driver_info = (kernel_ulong_t)&net_intf1_blacklist },
1806 - /* { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0026, 0xff, 0xff, 0xff) }, */
1807 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0028, 0xff, 0xff, 0xff) },
1808 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0029, 0xff, 0xff, 0xff) },
1809 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0030, 0xff, 0xff, 0xff) },
1810 @@ -828,7 +826,6 @@ static const struct usb_device_id option_ids[] = {
1811 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0051, 0xff, 0xff, 0xff) },
1812 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0052, 0xff, 0xff, 0xff),
1813 .driver_info = (kernel_ulong_t)&net_intf4_blacklist },
1814 - /* { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0053, 0xff, 0xff, 0xff) }, */
1815 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0054, 0xff, 0xff, 0xff) },
1816 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0055, 0xff, 0xff, 0xff),
1817 .driver_info = (kernel_ulong_t)&net_intf1_blacklist },
1818 @@ -836,7 +833,6 @@ static const struct usb_device_id option_ids[] = {
1819 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0057, 0xff, 0xff, 0xff) },
1820 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0058, 0xff, 0xff, 0xff),
1821 .driver_info = (kernel_ulong_t)&net_intf4_blacklist },
1822 - { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0059, 0xff, 0xff, 0xff) },
1823 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0061, 0xff, 0xff, 0xff) },
1824 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0062, 0xff, 0xff, 0xff) },
1825 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0063, 0xff, 0xff, 0xff),
1826 @@ -846,7 +842,6 @@ static const struct usb_device_id option_ids[] = {
1827 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0066, 0xff, 0xff, 0xff) },
1828 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0067, 0xff, 0xff, 0xff) },
1829 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0069, 0xff, 0xff, 0xff) },
1830 - { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0070, 0xff, 0xff, 0xff) },
1831 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0076, 0xff, 0xff, 0xff) },
1832 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0077, 0xff, 0xff, 0xff) },
1833 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0078, 0xff, 0xff, 0xff) },
1834 @@ -865,8 +860,6 @@ static const struct usb_device_id option_ids[] = {
1835 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0095, 0xff, 0xff, 0xff) },
1836 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0096, 0xff, 0xff, 0xff) },
1837 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0097, 0xff, 0xff, 0xff) },
1838 - { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0098, 0xff, 0xff, 0xff) },
1839 - { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0099, 0xff, 0xff, 0xff) },
1840 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0104, 0xff, 0xff, 0xff),
1841 .driver_info = (kernel_ulong_t)&net_intf4_blacklist },
1842 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0105, 0xff, 0xff, 0xff) },
1843 @@ -887,28 +880,18 @@ static const struct usb_device_id option_ids[] = {
1844 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0143, 0xff, 0xff, 0xff) },
1845 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0144, 0xff, 0xff, 0xff) },
1846 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0145, 0xff, 0xff, 0xff) },
1847 - { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0146, 0xff, 0xff, 0xff) },
1848 - { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0147, 0xff, 0xff, 0xff) },
1849 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0148, 0xff, 0xff, 0xff) },
1850 - { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0149, 0xff, 0xff, 0xff) },
1851 - { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0150, 0xff, 0xff, 0xff) },
1852 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0151, 0xff, 0xff, 0xff) },
1853 - { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0152, 0xff, 0xff, 0xff) },
1854 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0153, 0xff, 0xff, 0xff) },
1855 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0155, 0xff, 0xff, 0xff) },
1856 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0156, 0xff, 0xff, 0xff) },
1857 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0157, 0xff, 0xff, 0xff) },
1858 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0158, 0xff, 0xff, 0xff) },
1859 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0159, 0xff, 0xff, 0xff) },
1860 - { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0160, 0xff, 0xff, 0xff) },
1861 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0161, 0xff, 0xff, 0xff) },
1862 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0162, 0xff, 0xff, 0xff) },
1863 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0164, 0xff, 0xff, 0xff) },
1864 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0165, 0xff, 0xff, 0xff) },
1865 - { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0168, 0xff, 0xff, 0xff) },
1866 - { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0170, 0xff, 0xff, 0xff) },
1867 - { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0176, 0xff, 0xff, 0xff) },
1868 - { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0178, 0xff, 0xff, 0xff) },
1869 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1008, 0xff, 0xff, 0xff) },
1870 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1010, 0xff, 0xff, 0xff) },
1871 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1012, 0xff, 0xff, 0xff) },
1872 @@ -1083,127 +1066,27 @@ static const struct usb_device_id option_ids[] = {
1873 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1298, 0xff, 0xff, 0xff) },
1874 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1299, 0xff, 0xff, 0xff) },
1875 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1300, 0xff, 0xff, 0xff) },
1876 - { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1401, 0xff, 0xff, 0xff) },
1877 - { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1402, 0xff, 0xff, 0xff) },
1878 - { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1403, 0xff, 0xff, 0xff) },
1879 - { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1404, 0xff, 0xff, 0xff) },
1880 - { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1405, 0xff, 0xff, 0xff) },
1881 - { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1406, 0xff, 0xff, 0xff) },
1882 - { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1407, 0xff, 0xff, 0xff) },
1883 - { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1408, 0xff, 0xff, 0xff) },
1884 - { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1409, 0xff, 0xff, 0xff) },
1885 - { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1410, 0xff, 0xff, 0xff) },
1886 - { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1411, 0xff, 0xff, 0xff) },
1887 - { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1412, 0xff, 0xff, 0xff) },
1888 - { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1413, 0xff, 0xff, 0xff) },
1889 - { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1414, 0xff, 0xff, 0xff) },
1890 - { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1415, 0xff, 0xff, 0xff) },
1891 - { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1416, 0xff, 0xff, 0xff) },
1892 - { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1417, 0xff, 0xff, 0xff) },
1893 - { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1418, 0xff, 0xff, 0xff) },
1894 - { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1419, 0xff, 0xff, 0xff) },
1895 - { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1420, 0xff, 0xff, 0xff) },
1896 - { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1421, 0xff, 0xff, 0xff) },
1897 - { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1422, 0xff, 0xff, 0xff) },
1898 - { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1423, 0xff, 0xff, 0xff) },
1899 - { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1424, 0xff, 0xff, 0xff) },
1900 - { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1425, 0xff, 0xff, 0xff) },
1901 - { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1426, 0xff, 0xff, 0xff) },
1902 - { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1427, 0xff, 0xff, 0xff) },
1903 - { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1428, 0xff, 0xff, 0xff) },
1904 - { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1429, 0xff, 0xff, 0xff) },
1905 - { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1430, 0xff, 0xff, 0xff) },
1906 - { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1431, 0xff, 0xff, 0xff) },
1907 - { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1432, 0xff, 0xff, 0xff) },
1908 - { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1433, 0xff, 0xff, 0xff) },
1909 - { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1434, 0xff, 0xff, 0xff) },
1910 - { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1435, 0xff, 0xff, 0xff) },
1911 - { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1436, 0xff, 0xff, 0xff) },
1912 - { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1437, 0xff, 0xff, 0xff) },
1913 - { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1438, 0xff, 0xff, 0xff) },
1914 - { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1439, 0xff, 0xff, 0xff) },
1915 - { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1440, 0xff, 0xff, 0xff) },
1916 - { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1441, 0xff, 0xff, 0xff) },
1917 - { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1442, 0xff, 0xff, 0xff) },
1918 - { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1443, 0xff, 0xff, 0xff) },
1919 - { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1444, 0xff, 0xff, 0xff) },
1920 - { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1445, 0xff, 0xff, 0xff) },
1921 - { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1446, 0xff, 0xff, 0xff) },
1922 - { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1447, 0xff, 0xff, 0xff) },
1923 - { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1448, 0xff, 0xff, 0xff) },
1924 - { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1449, 0xff, 0xff, 0xff) },
1925 - { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1450, 0xff, 0xff, 0xff) },
1926 - { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1451, 0xff, 0xff, 0xff) },
1927 - { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1452, 0xff, 0xff, 0xff) },
1928 - { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1453, 0xff, 0xff, 0xff) },
1929 - { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1454, 0xff, 0xff, 0xff) },
1930 - { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1455, 0xff, 0xff, 0xff) },
1931 - { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1456, 0xff, 0xff, 0xff) },
1932 - { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1457, 0xff, 0xff, 0xff) },
1933 - { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1458, 0xff, 0xff, 0xff) },
1934 - { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1459, 0xff, 0xff, 0xff) },
1935 - { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1460, 0xff, 0xff, 0xff) },
1936 - { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1461, 0xff, 0xff, 0xff) },
1937 - { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1462, 0xff, 0xff, 0xff) },
1938 - { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1463, 0xff, 0xff, 0xff) },
1939 - { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1464, 0xff, 0xff, 0xff) },
1940 - { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1465, 0xff, 0xff, 0xff) },
1941 - { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1466, 0xff, 0xff, 0xff) },
1942 - { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1467, 0xff, 0xff, 0xff) },
1943 - { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1468, 0xff, 0xff, 0xff) },
1944 - { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1469, 0xff, 0xff, 0xff) },
1945 - { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1470, 0xff, 0xff, 0xff) },
1946 - { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1471, 0xff, 0xff, 0xff) },
1947 - { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1472, 0xff, 0xff, 0xff) },
1948 - { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1473, 0xff, 0xff, 0xff) },
1949 - { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1474, 0xff, 0xff, 0xff) },
1950 - { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1475, 0xff, 0xff, 0xff) },
1951 - { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1476, 0xff, 0xff, 0xff) },
1952 - { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1477, 0xff, 0xff, 0xff) },
1953 - { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1478, 0xff, 0xff, 0xff) },
1954 - { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1479, 0xff, 0xff, 0xff) },
1955 - { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1480, 0xff, 0xff, 0xff) },
1956 - { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1481, 0xff, 0xff, 0xff) },
1957 - { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1482, 0xff, 0xff, 0xff) },
1958 - { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1483, 0xff, 0xff, 0xff) },
1959 - { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1484, 0xff, 0xff, 0xff) },
1960 - { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1485, 0xff, 0xff, 0xff) },
1961 - { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1486, 0xff, 0xff, 0xff) },
1962 - { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1487, 0xff, 0xff, 0xff) },
1963 - { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1488, 0xff, 0xff, 0xff) },
1964 - { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1489, 0xff, 0xff, 0xff) },
1965 - { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1490, 0xff, 0xff, 0xff) },
1966 - { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1491, 0xff, 0xff, 0xff) },
1967 - { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1492, 0xff, 0xff, 0xff) },
1968 - { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1493, 0xff, 0xff, 0xff) },
1969 - { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1494, 0xff, 0xff, 0xff) },
1970 - { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1495, 0xff, 0xff, 0xff) },
1971 - { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1496, 0xff, 0xff, 0xff) },
1972 - { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1497, 0xff, 0xff, 0xff) },
1973 - { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1498, 0xff, 0xff, 0xff) },
1974 - { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1499, 0xff, 0xff, 0xff) },
1975 - { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1500, 0xff, 0xff, 0xff) },
1976 - { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1501, 0xff, 0xff, 0xff) },
1977 - { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1502, 0xff, 0xff, 0xff) },
1978 - { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1503, 0xff, 0xff, 0xff) },
1979 - { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1504, 0xff, 0xff, 0xff) },
1980 - { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1505, 0xff, 0xff, 0xff) },
1981 - { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1506, 0xff, 0xff, 0xff) },
1982 - { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1507, 0xff, 0xff, 0xff) },
1983 - { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1508, 0xff, 0xff, 0xff) },
1984 - { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1509, 0xff, 0xff, 0xff) },
1985 - { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1510, 0xff, 0xff, 0xff) },
1986 + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x2002, 0xff,
1987 + 0xff, 0xff), .driver_info = (kernel_ulong_t)&zte_k3765_z_blacklist },
1988 + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x2003, 0xff, 0xff, 0xff) },
1989 +
1990 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0014, 0xff, 0xff, 0xff) }, /* ZTE CDMA products */
1991 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0027, 0xff, 0xff, 0xff) },
1992 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0059, 0xff, 0xff, 0xff) },
1993 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0060, 0xff, 0xff, 0xff) },
1994 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0070, 0xff, 0xff, 0xff) },
1995 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0073, 0xff, 0xff, 0xff) },
1996 + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0094, 0xff, 0xff, 0xff) },
1997 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0130, 0xff, 0xff, 0xff) },
1998 + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0133, 0xff, 0xff, 0xff) },
1999 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0141, 0xff, 0xff, 0xff) },
2000 - { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x2002, 0xff,
2001 - 0xff, 0xff), .driver_info = (kernel_ulong_t)&zte_k3765_z_blacklist },
2002 - { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x2003, 0xff, 0xff, 0xff) },
2003 + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0147, 0xff, 0xff, 0xff) },
2004 + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0152, 0xff, 0xff, 0xff) },
2005 + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0168, 0xff, 0xff, 0xff) },
2006 + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0170, 0xff, 0xff, 0xff) },
2007 + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0176, 0xff, 0xff, 0xff) },
2008 + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0178, 0xff, 0xff, 0xff) },
2009 +
2010 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, ZTE_PRODUCT_CDMA_TECH, 0xff, 0xff, 0xff) },
2011 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, ZTE_PRODUCT_AC8710, 0xff, 0xff, 0xff) },
2012 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, ZTE_PRODUCT_AC2726, 0xff, 0xff, 0xff) },
2013 diff --git a/drivers/usb/serial/ti_usb_3410_5052.c b/drivers/usb/serial/ti_usb_3410_5052.c
2014 index ea84456..21c82b0 100644
2015 --- a/drivers/usb/serial/ti_usb_3410_5052.c
2016 +++ b/drivers/usb/serial/ti_usb_3410_5052.c
2017 @@ -165,7 +165,7 @@ static unsigned int product_5052_count;
2018 /* the array dimension is the number of default entries plus */
2019 /* TI_EXTRA_VID_PID_COUNT user defined entries plus 1 terminating */
2020 /* null entry */
2021 -static struct usb_device_id ti_id_table_3410[13+TI_EXTRA_VID_PID_COUNT+1] = {
2022 +static struct usb_device_id ti_id_table_3410[14+TI_EXTRA_VID_PID_COUNT+1] = {
2023 { USB_DEVICE(TI_VENDOR_ID, TI_3410_PRODUCT_ID) },
2024 { USB_DEVICE(TI_VENDOR_ID, TI_3410_EZ430_ID) },
2025 { USB_DEVICE(MTS_VENDOR_ID, MTS_GSM_NO_FW_PRODUCT_ID) },
2026 @@ -179,6 +179,7 @@ static struct usb_device_id ti_id_table_3410[13+TI_EXTRA_VID_PID_COUNT+1] = {
2027 { USB_DEVICE(IBM_VENDOR_ID, IBM_4543_PRODUCT_ID) },
2028 { USB_DEVICE(IBM_VENDOR_ID, IBM_454B_PRODUCT_ID) },
2029 { USB_DEVICE(IBM_VENDOR_ID, IBM_454C_PRODUCT_ID) },
2030 + { USB_DEVICE(ABBOTT_VENDOR_ID, ABBOTT_PRODUCT_ID) },
2031 };
2032
2033 static struct usb_device_id ti_id_table_5052[5+TI_EXTRA_VID_PID_COUNT+1] = {
2034 @@ -188,7 +189,7 @@ static struct usb_device_id ti_id_table_5052[5+TI_EXTRA_VID_PID_COUNT+1] = {
2035 { USB_DEVICE(TI_VENDOR_ID, TI_5052_FIRMWARE_PRODUCT_ID) },
2036 };
2037
2038 -static struct usb_device_id ti_id_table_combined[17+2*TI_EXTRA_VID_PID_COUNT+1] = {
2039 +static struct usb_device_id ti_id_table_combined[18+2*TI_EXTRA_VID_PID_COUNT+1] = {
2040 { USB_DEVICE(TI_VENDOR_ID, TI_3410_PRODUCT_ID) },
2041 { USB_DEVICE(TI_VENDOR_ID, TI_3410_EZ430_ID) },
2042 { USB_DEVICE(MTS_VENDOR_ID, MTS_GSM_NO_FW_PRODUCT_ID) },
2043 @@ -206,6 +207,7 @@ static struct usb_device_id ti_id_table_combined[17+2*TI_EXTRA_VID_PID_COUNT+1]
2044 { USB_DEVICE(IBM_VENDOR_ID, IBM_4543_PRODUCT_ID) },
2045 { USB_DEVICE(IBM_VENDOR_ID, IBM_454B_PRODUCT_ID) },
2046 { USB_DEVICE(IBM_VENDOR_ID, IBM_454C_PRODUCT_ID) },
2047 + { USB_DEVICE(ABBOTT_VENDOR_ID, ABBOTT_PRODUCT_ID) },
2048 { }
2049 };
2050
2051 diff --git a/drivers/usb/serial/ti_usb_3410_5052.h b/drivers/usb/serial/ti_usb_3410_5052.h
2052 index 2aac195..f140f1b 100644
2053 --- a/drivers/usb/serial/ti_usb_3410_5052.h
2054 +++ b/drivers/usb/serial/ti_usb_3410_5052.h
2055 @@ -49,6 +49,10 @@
2056 #define MTS_MT9234ZBA_PRODUCT_ID 0xF115
2057 #define MTS_MT9234ZBAOLD_PRODUCT_ID 0x0319
2058
2059 +/* Abbott Diabetics vendor and product ids */
2060 +#define ABBOTT_VENDOR_ID 0x1a61
2061 +#define ABBOTT_PRODUCT_ID 0x3410
2062 +
2063 /* Commands */
2064 #define TI_GET_VERSION 0x01
2065 #define TI_GET_PORT_STATUS 0x02
2066 diff --git a/drivers/usb/storage/usb.c b/drivers/usb/storage/usb.c
2067 index 9e069ef..db51ba1 100644
2068 --- a/drivers/usb/storage/usb.c
2069 +++ b/drivers/usb/storage/usb.c
2070 @@ -788,15 +788,19 @@ static void quiesce_and_remove_host(struct us_data *us)
2071 struct Scsi_Host *host = us_to_host(us);
2072
2073 /* If the device is really gone, cut short reset delays */
2074 - if (us->pusb_dev->state == USB_STATE_NOTATTACHED)
2075 + if (us->pusb_dev->state == USB_STATE_NOTATTACHED) {
2076 set_bit(US_FLIDX_DISCONNECTING, &us->dflags);
2077 + wake_up(&us->delay_wait);
2078 + }
2079
2080 - /* Prevent SCSI-scanning (if it hasn't started yet)
2081 - * and wait for the SCSI-scanning thread to stop.
2082 + /* Prevent SCSI scanning (if it hasn't started yet)
2083 + * or wait for the SCSI-scanning routine to stop.
2084 */
2085 - set_bit(US_FLIDX_DONT_SCAN, &us->dflags);
2086 - wake_up(&us->delay_wait);
2087 - wait_for_completion(&us->scanning_done);
2088 + cancel_delayed_work_sync(&us->scan_dwork);
2089 +
2090 + /* Balance autopm calls if scanning was cancelled */
2091 + if (test_bit(US_FLIDX_SCAN_PENDING, &us->dflags))
2092 + usb_autopm_put_interface_no_suspend(us->pusb_intf);
2093
2094 /* Removing the host will perform an orderly shutdown: caches
2095 * synchronized, disks spun down, etc.
2096 @@ -823,52 +827,28 @@ static void release_everything(struct us_data *us)
2097 scsi_host_put(us_to_host(us));
2098 }
2099
2100 -/* Thread to carry out delayed SCSI-device scanning */
2101 -static int usb_stor_scan_thread(void * __us)
2102 +/* Delayed-work routine to carry out SCSI-device scanning */
2103 +static void usb_stor_scan_dwork(struct work_struct *work)
2104 {
2105 - struct us_data *us = (struct us_data *)__us;
2106 + struct us_data *us = container_of(work, struct us_data,
2107 + scan_dwork.work);
2108 struct device *dev = &us->pusb_intf->dev;
2109
2110 - dev_dbg(dev, "device found\n");
2111 + dev_dbg(dev, "starting scan\n");
2112
2113 - set_freezable_with_signal();
2114 - /*
2115 - * Wait for the timeout to expire or for a disconnect
2116 - *
2117 - * We can't freeze in this thread or we risk causing khubd to
2118 - * fail to freeze, but we can't be non-freezable either. Nor can
2119 - * khubd freeze while waiting for scanning to complete as it may
2120 - * hold the device lock, causing a hang when suspending devices.
2121 - * So we request a fake signal when freezing and use
2122 - * interruptible sleep to kick us out of our wait early when
2123 - * freezing happens.
2124 - */
2125 - if (delay_use > 0) {
2126 - dev_dbg(dev, "waiting for device to settle "
2127 - "before scanning\n");
2128 - wait_event_interruptible_timeout(us->delay_wait,
2129 - test_bit(US_FLIDX_DONT_SCAN, &us->dflags),
2130 - delay_use * HZ);
2131 + /* For bulk-only devices, determine the max LUN value */
2132 + if (us->protocol == USB_PR_BULK && !(us->fflags & US_FL_SINGLE_LUN)) {
2133 + mutex_lock(&us->dev_mutex);
2134 + us->max_lun = usb_stor_Bulk_max_lun(us);
2135 + mutex_unlock(&us->dev_mutex);
2136 }
2137 + scsi_scan_host(us_to_host(us));
2138 + dev_dbg(dev, "scan complete\n");
2139
2140 - /* If the device is still connected, perform the scanning */
2141 - if (!test_bit(US_FLIDX_DONT_SCAN, &us->dflags)) {
2142 -
2143 - /* For bulk-only devices, determine the max LUN value */
2144 - if (us->protocol == USB_PR_BULK &&
2145 - !(us->fflags & US_FL_SINGLE_LUN)) {
2146 - mutex_lock(&us->dev_mutex);
2147 - us->max_lun = usb_stor_Bulk_max_lun(us);
2148 - mutex_unlock(&us->dev_mutex);
2149 - }
2150 - scsi_scan_host(us_to_host(us));
2151 - dev_dbg(dev, "scan complete\n");
2152 -
2153 - /* Should we unbind if no devices were detected? */
2154 - }
2155 + /* Should we unbind if no devices were detected? */
2156
2157 usb_autopm_put_interface(us->pusb_intf);
2158 - complete_and_exit(&us->scanning_done, 0);
2159 + clear_bit(US_FLIDX_SCAN_PENDING, &us->dflags);
2160 }
2161
2162 static unsigned int usb_stor_sg_tablesize(struct usb_interface *intf)
2163 @@ -915,7 +895,7 @@ int usb_stor_probe1(struct us_data **pus,
2164 init_completion(&us->cmnd_ready);
2165 init_completion(&(us->notify));
2166 init_waitqueue_head(&us->delay_wait);
2167 - init_completion(&us->scanning_done);
2168 + INIT_DELAYED_WORK(&us->scan_dwork, usb_stor_scan_dwork);
2169
2170 /* Associate the us_data structure with the USB device */
2171 result = associate_dev(us, intf);
2172 @@ -946,7 +926,6 @@ EXPORT_SYMBOL_GPL(usb_stor_probe1);
2173 /* Second part of general USB mass-storage probing */
2174 int usb_stor_probe2(struct us_data *us)
2175 {
2176 - struct task_struct *th;
2177 int result;
2178 struct device *dev = &us->pusb_intf->dev;
2179
2180 @@ -987,20 +966,14 @@ int usb_stor_probe2(struct us_data *us)
2181 goto BadDevice;
2182 }
2183
2184 - /* Start up the thread for delayed SCSI-device scanning */
2185 - th = kthread_create(usb_stor_scan_thread, us, "usb-stor-scan");
2186 - if (IS_ERR(th)) {
2187 - dev_warn(dev,
2188 - "Unable to start the device-scanning thread\n");
2189 - complete(&us->scanning_done);
2190 - quiesce_and_remove_host(us);
2191 - result = PTR_ERR(th);
2192 - goto BadDevice;
2193 - }
2194 -
2195 + /* Submit the delayed_work for SCSI-device scanning */
2196 usb_autopm_get_interface_no_resume(us->pusb_intf);
2197 - wake_up_process(th);
2198 + set_bit(US_FLIDX_SCAN_PENDING, &us->dflags);
2199
2200 + if (delay_use > 0)
2201 + dev_dbg(dev, "waiting for device to settle before scanning\n");
2202 + queue_delayed_work(system_freezable_wq, &us->scan_dwork,
2203 + delay_use * HZ);
2204 return 0;
2205
2206 /* We come here if there are any problems */
2207 diff --git a/drivers/usb/storage/usb.h b/drivers/usb/storage/usb.h
2208 index 7b0f211..75f70f0 100644
2209 --- a/drivers/usb/storage/usb.h
2210 +++ b/drivers/usb/storage/usb.h
2211 @@ -47,6 +47,7 @@
2212 #include <linux/blkdev.h>
2213 #include <linux/completion.h>
2214 #include <linux/mutex.h>
2215 +#include <linux/workqueue.h>
2216 #include <scsi/scsi_host.h>
2217
2218 struct us_data;
2219 @@ -72,7 +73,7 @@ struct us_unusual_dev {
2220 #define US_FLIDX_DISCONNECTING 3 /* disconnect in progress */
2221 #define US_FLIDX_RESETTING 4 /* device reset in progress */
2222 #define US_FLIDX_TIMED_OUT 5 /* SCSI midlayer timed out */
2223 -#define US_FLIDX_DONT_SCAN 6 /* don't scan (disconnect) */
2224 +#define US_FLIDX_SCAN_PENDING 6 /* scanning not yet done */
2225 #define US_FLIDX_REDO_READ10 7 /* redo READ(10) command */
2226 #define US_FLIDX_READ10_WORKED 8 /* previous READ(10) succeeded */
2227
2228 @@ -147,8 +148,8 @@ struct us_data {
2229 /* mutual exclusion and synchronization structures */
2230 struct completion cmnd_ready; /* to sleep thread on */
2231 struct completion notify; /* thread begin/end */
2232 - wait_queue_head_t delay_wait; /* wait during scan, reset */
2233 - struct completion scanning_done; /* wait for scan thread */
2234 + wait_queue_head_t delay_wait; /* wait during reset */
2235 + struct delayed_work scan_dwork; /* for async scanning */
2236
2237 /* subdriver information */
2238 void *extra; /* Any extra data */
2239 diff --git a/drivers/video/omap2/dss/dpi.c b/drivers/video/omap2/dss/dpi.c
2240 index 976ac23..c04205c 100644
2241 --- a/drivers/video/omap2/dss/dpi.c
2242 +++ b/drivers/video/omap2/dss/dpi.c
2243 @@ -180,6 +180,11 @@ int omapdss_dpi_display_enable(struct omap_dss_device *dssdev)
2244 {
2245 int r;
2246
2247 + if (cpu_is_omap34xx() && !dpi.vdds_dsi_reg) {
2248 + DSSERR("no VDSS_DSI regulator\n");
2249 + return -ENODEV;
2250 + }
2251 +
2252 if (dssdev->manager == NULL) {
2253 DSSERR("failed to enable display: no manager\n");
2254 return -ENODEV;
2255 diff --git a/fs/ecryptfs/inode.c b/fs/ecryptfs/inode.c
2256 index d2039ca..af11098 100644
2257 --- a/fs/ecryptfs/inode.c
2258 +++ b/fs/ecryptfs/inode.c
2259 @@ -1104,6 +1104,8 @@ ecryptfs_setxattr(struct dentry *dentry, const char *name, const void *value,
2260 }
2261
2262 rc = vfs_setxattr(lower_dentry, name, value, size, flags);
2263 + if (!rc)
2264 + fsstack_copy_attr_all(dentry->d_inode, lower_dentry->d_inode);
2265 out:
2266 return rc;
2267 }
2268 diff --git a/fs/eventpoll.c b/fs/eventpoll.c
2269 index 828e750..ea54cde 100644
2270 --- a/fs/eventpoll.c
2271 +++ b/fs/eventpoll.c
2272 @@ -197,6 +197,12 @@ struct eventpoll {
2273
2274 /* The user that created the eventpoll descriptor */
2275 struct user_struct *user;
2276 +
2277 + struct file *file;
2278 +
2279 + /* used to optimize loop detection check */
2280 + int visited;
2281 + struct list_head visited_list_link;
2282 };
2283
2284 /* Wait structure used by the poll hooks */
2285 @@ -255,6 +261,15 @@ static struct kmem_cache *epi_cache __read_mostly;
2286 /* Slab cache used to allocate "struct eppoll_entry" */
2287 static struct kmem_cache *pwq_cache __read_mostly;
2288
2289 +/* Visited nodes during ep_loop_check(), so we can unset them when we finish */
2290 +static LIST_HEAD(visited_list);
2291 +
2292 +/*
2293 + * List of files with newly added links, where we may need to limit the number
2294 + * of emanating paths. Protected by the epmutex.
2295 + */
2296 +static LIST_HEAD(tfile_check_list);
2297 +
2298 #ifdef CONFIG_SYSCTL
2299
2300 #include <linux/sysctl.h>
2301 @@ -276,6 +291,12 @@ ctl_table epoll_table[] = {
2302 };
2303 #endif /* CONFIG_SYSCTL */
2304
2305 +static const struct file_operations eventpoll_fops;
2306 +
2307 +static inline int is_file_epoll(struct file *f)
2308 +{
2309 + return f->f_op == &eventpoll_fops;
2310 +}
2311
2312 /* Setup the structure that is used as key for the RB tree */
2313 static inline void ep_set_ffd(struct epoll_filefd *ffd,
2314 @@ -299,6 +320,11 @@ static inline int ep_is_linked(struct list_head *p)
2315 return !list_empty(p);
2316 }
2317
2318 +static inline struct eppoll_entry *ep_pwq_from_wait(wait_queue_t *p)
2319 +{
2320 + return container_of(p, struct eppoll_entry, wait);
2321 +}
2322 +
2323 /* Get the "struct epitem" from a wait queue pointer */
2324 static inline struct epitem *ep_item_from_wait(wait_queue_t *p)
2325 {
2326 @@ -446,6 +472,18 @@ static void ep_poll_safewake(wait_queue_head_t *wq)
2327 put_cpu();
2328 }
2329
2330 +static void ep_remove_wait_queue(struct eppoll_entry *pwq)
2331 +{
2332 + wait_queue_head_t *whead;
2333 +
2334 + rcu_read_lock();
2335 + /* If it is cleared by POLLFREE, it should be rcu-safe */
2336 + whead = rcu_dereference(pwq->whead);
2337 + if (whead)
2338 + remove_wait_queue(whead, &pwq->wait);
2339 + rcu_read_unlock();
2340 +}
2341 +
2342 /*
2343 * This function unregisters poll callbacks from the associated file
2344 * descriptor. Must be called with "mtx" held (or "epmutex" if called from
2345 @@ -460,7 +498,7 @@ static void ep_unregister_pollwait(struct eventpoll *ep, struct epitem *epi)
2346 pwq = list_first_entry(lsthead, struct eppoll_entry, llink);
2347
2348 list_del(&pwq->llink);
2349 - remove_wait_queue(pwq->whead, &pwq->wait);
2350 + ep_remove_wait_queue(pwq);
2351 kmem_cache_free(pwq_cache, pwq);
2352 }
2353 }
2354 @@ -711,12 +749,6 @@ static const struct file_operations eventpoll_fops = {
2355 .llseek = noop_llseek,
2356 };
2357
2358 -/* Fast test to see if the file is an eventpoll file */
2359 -static inline int is_file_epoll(struct file *f)
2360 -{
2361 - return f->f_op == &eventpoll_fops;
2362 -}
2363 -
2364 /*
2365 * This is called from eventpoll_release() to unlink files from the eventpoll
2366 * interface. We need to have this facility to cleanup correctly files that are
2367 @@ -827,6 +859,17 @@ static int ep_poll_callback(wait_queue_t *wait, unsigned mode, int sync, void *k
2368 struct epitem *epi = ep_item_from_wait(wait);
2369 struct eventpoll *ep = epi->ep;
2370
2371 + if ((unsigned long)key & POLLFREE) {
2372 + ep_pwq_from_wait(wait)->whead = NULL;
2373 + /*
2374 + * whead = NULL above can race with ep_remove_wait_queue()
2375 + * which can do another remove_wait_queue() after us, so we
2376 + * can't use __remove_wait_queue(). whead->lock is held by
2377 + * the caller.
2378 + */
2379 + list_del_init(&wait->task_list);
2380 + }
2381 +
2382 spin_lock_irqsave(&ep->lock, flags);
2383
2384 /*
2385 @@ -926,6 +969,99 @@ static void ep_rbtree_insert(struct eventpoll *ep, struct epitem *epi)
2386 rb_insert_color(&epi->rbn, &ep->rbr);
2387 }
2388
2389 +
2390 +
2391 +#define PATH_ARR_SIZE 5
2392 +/*
2393 + * These are the number paths of length 1 to 5, that we are allowing to emanate
2394 + * from a single file of interest. For example, we allow 1000 paths of length
2395 + * 1, to emanate from each file of interest. This essentially represents the
2396 + * potential wakeup paths, which need to be limited in order to avoid massive
2397 + * uncontrolled wakeup storms. The common use case should be a single ep which
2398 + * is connected to n file sources. In this case each file source has 1 path
2399 + * of length 1. Thus, the numbers below should be more than sufficient. These
2400 + * path limits are enforced during an EPOLL_CTL_ADD operation, since a modify
2401 + * and delete can't add additional paths. Protected by the epmutex.
2402 + */
2403 +static const int path_limits[PATH_ARR_SIZE] = { 1000, 500, 100, 50, 10 };
2404 +static int path_count[PATH_ARR_SIZE];
2405 +
2406 +static int path_count_inc(int nests)
2407 +{
2408 + if (++path_count[nests] > path_limits[nests])
2409 + return -1;
2410 + return 0;
2411 +}
2412 +
2413 +static void path_count_init(void)
2414 +{
2415 + int i;
2416 +
2417 + for (i = 0; i < PATH_ARR_SIZE; i++)
2418 + path_count[i] = 0;
2419 +}
2420 +
2421 +static int reverse_path_check_proc(void *priv, void *cookie, int call_nests)
2422 +{
2423 + int error = 0;
2424 + struct file *file = priv;
2425 + struct file *child_file;
2426 + struct epitem *epi;
2427 +
2428 + list_for_each_entry(epi, &file->f_ep_links, fllink) {
2429 + child_file = epi->ep->file;
2430 + if (is_file_epoll(child_file)) {
2431 + if (list_empty(&child_file->f_ep_links)) {
2432 + if (path_count_inc(call_nests)) {
2433 + error = -1;
2434 + break;
2435 + }
2436 + } else {
2437 + error = ep_call_nested(&poll_loop_ncalls,
2438 + EP_MAX_NESTS,
2439 + reverse_path_check_proc,
2440 + child_file, child_file,
2441 + current);
2442 + }
2443 + if (error != 0)
2444 + break;
2445 + } else {
2446 + printk(KERN_ERR "reverse_path_check_proc: "
2447 + "file is not an ep!\n");
2448 + }
2449 + }
2450 + return error;
2451 +}
2452 +
2453 +/**
2454 + * reverse_path_check - The tfile_check_list is list of file *, which have
2455 + * links that are proposed to be newly added. We need to
2456 + * make sure that those added links don't add too many
2457 + * paths such that we will spend all our time waking up
2458 + * eventpoll objects.
2459 + *
2460 + * Returns: Returns zero if the proposed links don't create too many paths,
2461 + * -1 otherwise.
2462 + */
2463 +static int reverse_path_check(void)
2464 +{
2465 + int length = 0;
2466 + int error = 0;
2467 + struct file *current_file;
2468 +
2469 + /* let's call this for all tfiles */
2470 + list_for_each_entry(current_file, &tfile_check_list, f_tfile_llink) {
2471 + length++;
2472 + path_count_init();
2473 + error = ep_call_nested(&poll_loop_ncalls, EP_MAX_NESTS,
2474 + reverse_path_check_proc, current_file,
2475 + current_file, current);
2476 + if (error)
2477 + break;
2478 + }
2479 + return error;
2480 +}
2481 +
2482 /*
2483 * Must be called with "mtx" held.
2484 */
2485 @@ -987,6 +1123,11 @@ static int ep_insert(struct eventpoll *ep, struct epoll_event *event,
2486 */
2487 ep_rbtree_insert(ep, epi);
2488
2489 + /* now check if we've created too many backpaths */
2490 + error = -EINVAL;
2491 + if (reverse_path_check())
2492 + goto error_remove_epi;
2493 +
2494 /* We have to drop the new item inside our item list to keep track of it */
2495 spin_lock_irqsave(&ep->lock, flags);
2496
2497 @@ -1011,6 +1152,14 @@ static int ep_insert(struct eventpoll *ep, struct epoll_event *event,
2498
2499 return 0;
2500
2501 +error_remove_epi:
2502 + spin_lock(&tfile->f_lock);
2503 + if (ep_is_linked(&epi->fllink))
2504 + list_del_init(&epi->fllink);
2505 + spin_unlock(&tfile->f_lock);
2506 +
2507 + rb_erase(&epi->rbn, &ep->rbr);
2508 +
2509 error_unregister:
2510 ep_unregister_pollwait(ep, epi);
2511
2512 @@ -1275,18 +1424,36 @@ static int ep_loop_check_proc(void *priv, void *cookie, int call_nests)
2513 int error = 0;
2514 struct file *file = priv;
2515 struct eventpoll *ep = file->private_data;
2516 + struct eventpoll *ep_tovisit;
2517 struct rb_node *rbp;
2518 struct epitem *epi;
2519
2520 mutex_lock_nested(&ep->mtx, call_nests + 1);
2521 + ep->visited = 1;
2522 + list_add(&ep->visited_list_link, &visited_list);
2523 for (rbp = rb_first(&ep->rbr); rbp; rbp = rb_next(rbp)) {
2524 epi = rb_entry(rbp, struct epitem, rbn);
2525 if (unlikely(is_file_epoll(epi->ffd.file))) {
2526 + ep_tovisit = epi->ffd.file->private_data;
2527 + if (ep_tovisit->visited)
2528 + continue;
2529 error = ep_call_nested(&poll_loop_ncalls, EP_MAX_NESTS,
2530 - ep_loop_check_proc, epi->ffd.file,
2531 - epi->ffd.file->private_data, current);
2532 + ep_loop_check_proc, epi->ffd.file,
2533 + ep_tovisit, current);
2534 if (error != 0)
2535 break;
2536 + } else {
2537 + /*
2538 + * If we've reached a file that is not associated with
2539 + * an ep, then we need to check if the newly added
2540 + * links are going to add too many wakeup paths. We do
2541 + * this by adding it to the tfile_check_list, if it's
2542 + * not already there, and calling reverse_path_check()
2543 + * during ep_insert().
2544 + */
2545 + if (list_empty(&epi->ffd.file->f_tfile_llink))
2546 + list_add(&epi->ffd.file->f_tfile_llink,
2547 + &tfile_check_list);
2548 }
2549 }
2550 mutex_unlock(&ep->mtx);
2551 @@ -1307,8 +1474,31 @@ static int ep_loop_check_proc(void *priv, void *cookie, int call_nests)
2552 */
2553 static int ep_loop_check(struct eventpoll *ep, struct file *file)
2554 {
2555 - return ep_call_nested(&poll_loop_ncalls, EP_MAX_NESTS,
2556 + int ret;
2557 + struct eventpoll *ep_cur, *ep_next;
2558 +
2559 + ret = ep_call_nested(&poll_loop_ncalls, EP_MAX_NESTS,
2560 ep_loop_check_proc, file, ep, current);
2561 + /* clear visited list */
2562 + list_for_each_entry_safe(ep_cur, ep_next, &visited_list,
2563 + visited_list_link) {
2564 + ep_cur->visited = 0;
2565 + list_del(&ep_cur->visited_list_link);
2566 + }
2567 + return ret;
2568 +}
2569 +
2570 +static void clear_tfile_check_list(void)
2571 +{
2572 + struct file *file;
2573 +
2574 + /* first clear the tfile_check_list */
2575 + while (!list_empty(&tfile_check_list)) {
2576 + file = list_first_entry(&tfile_check_list, struct file,
2577 + f_tfile_llink);
2578 + list_del_init(&file->f_tfile_llink);
2579 + }
2580 + INIT_LIST_HEAD(&tfile_check_list);
2581 }
2582
2583 /*
2584 @@ -1316,8 +1506,9 @@ static int ep_loop_check(struct eventpoll *ep, struct file *file)
2585 */
2586 SYSCALL_DEFINE1(epoll_create1, int, flags)
2587 {
2588 - int error;
2589 + int error, fd;
2590 struct eventpoll *ep = NULL;
2591 + struct file *file;
2592
2593 /* Check the EPOLL_* constant for consistency. */
2594 BUILD_BUG_ON(EPOLL_CLOEXEC != O_CLOEXEC);
2595 @@ -1334,11 +1525,25 @@ SYSCALL_DEFINE1(epoll_create1, int, flags)
2596 * Creates all the items needed to setup an eventpoll file. That is,
2597 * a file structure and a free file descriptor.
2598 */
2599 - error = anon_inode_getfd("[eventpoll]", &eventpoll_fops, ep,
2600 + fd = get_unused_fd_flags(O_RDWR | (flags & O_CLOEXEC));
2601 + if (fd < 0) {
2602 + error = fd;
2603 + goto out_free_ep;
2604 + }
2605 + file = anon_inode_getfile("[eventpoll]", &eventpoll_fops, ep,
2606 O_RDWR | (flags & O_CLOEXEC));
2607 - if (error < 0)
2608 - ep_free(ep);
2609 -
2610 + if (IS_ERR(file)) {
2611 + error = PTR_ERR(file);
2612 + goto out_free_fd;
2613 + }
2614 + fd_install(fd, file);
2615 + ep->file = file;
2616 + return fd;
2617 +
2618 +out_free_fd:
2619 + put_unused_fd(fd);
2620 +out_free_ep:
2621 + ep_free(ep);
2622 return error;
2623 }
2624
2625 @@ -1404,21 +1609,27 @@ SYSCALL_DEFINE4(epoll_ctl, int, epfd, int, op, int, fd,
2626 /*
2627 * When we insert an epoll file descriptor, inside another epoll file
2628 * descriptor, there is the change of creating closed loops, which are
2629 - * better be handled here, than in more critical paths.
2630 + * better be handled here, than in more critical paths. While we are
2631 + * checking for loops we also determine the list of files reachable
2632 + * and hang them on the tfile_check_list, so we can check that we
2633 + * haven't created too many possible wakeup paths.
2634 *
2635 - * We hold epmutex across the loop check and the insert in this case, in
2636 - * order to prevent two separate inserts from racing and each doing the
2637 - * insert "at the same time" such that ep_loop_check passes on both
2638 - * before either one does the insert, thereby creating a cycle.
2639 + * We need to hold the epmutex across both ep_insert and ep_remove
2640 + * b/c we want to make sure we are looking at a coherent view of
2641 + * epoll network.
2642 */
2643 - if (unlikely(is_file_epoll(tfile) && op == EPOLL_CTL_ADD)) {
2644 + if (op == EPOLL_CTL_ADD || op == EPOLL_CTL_DEL) {
2645 mutex_lock(&epmutex);
2646 did_lock_epmutex = 1;
2647 - error = -ELOOP;
2648 - if (ep_loop_check(ep, tfile) != 0)
2649 - goto error_tgt_fput;
2650 }
2651 -
2652 + if (op == EPOLL_CTL_ADD) {
2653 + if (is_file_epoll(tfile)) {
2654 + error = -ELOOP;
2655 + if (ep_loop_check(ep, tfile) != 0)
2656 + goto error_tgt_fput;
2657 + } else
2658 + list_add(&tfile->f_tfile_llink, &tfile_check_list);
2659 + }
2660
2661 mutex_lock_nested(&ep->mtx, 0);
2662
2663 @@ -1437,6 +1648,7 @@ SYSCALL_DEFINE4(epoll_ctl, int, epfd, int, op, int, fd,
2664 error = ep_insert(ep, &epds, tfile, fd);
2665 } else
2666 error = -EEXIST;
2667 + clear_tfile_check_list();
2668 break;
2669 case EPOLL_CTL_DEL:
2670 if (epi)
2671 @@ -1455,7 +1667,7 @@ SYSCALL_DEFINE4(epoll_ctl, int, epfd, int, op, int, fd,
2672 mutex_unlock(&ep->mtx);
2673
2674 error_tgt_fput:
2675 - if (unlikely(did_lock_epmutex))
2676 + if (did_lock_epmutex)
2677 mutex_unlock(&epmutex);
2678
2679 fput(tfile);
2680 diff --git a/fs/namei.c b/fs/namei.c
2681 index 5008f01..744e942 100644
2682 --- a/fs/namei.c
2683 +++ b/fs/namei.c
2684 @@ -1094,8 +1094,10 @@ static struct dentry *d_inode_lookup(struct dentry *parent, struct dentry *dentr
2685 struct dentry *old;
2686
2687 /* Don't create child dentry for a dead directory. */
2688 - if (unlikely(IS_DEADDIR(inode)))
2689 + if (unlikely(IS_DEADDIR(inode))) {
2690 + dput(dentry);
2691 return ERR_PTR(-ENOENT);
2692 + }
2693
2694 old = inode->i_op->lookup(inode, dentry, nd);
2695 if (unlikely(old)) {
2696 diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c
2697 index 055d702..e527030 100644
2698 --- a/fs/nfs/nfs4proc.c
2699 +++ b/fs/nfs/nfs4proc.c
2700 @@ -3568,8 +3568,8 @@ static ssize_t __nfs4_get_acl_uncached(struct inode *inode, void *buf, size_t bu
2701 }
2702 if (npages > 1) {
2703 /* for decoding across pages */
2704 - args.acl_scratch = alloc_page(GFP_KERNEL);
2705 - if (!args.acl_scratch)
2706 + res.acl_scratch = alloc_page(GFP_KERNEL);
2707 + if (!res.acl_scratch)
2708 goto out_free;
2709 }
2710 args.acl_len = npages * PAGE_SIZE;
2711 @@ -3605,8 +3605,8 @@ out_free:
2712 for (i = 0; i < npages; i++)
2713 if (pages[i])
2714 __free_page(pages[i]);
2715 - if (args.acl_scratch)
2716 - __free_page(args.acl_scratch);
2717 + if (res.acl_scratch)
2718 + __free_page(res.acl_scratch);
2719 return ret;
2720 }
2721
2722 @@ -4876,8 +4876,10 @@ int nfs4_proc_exchange_id(struct nfs_client *clp, struct rpc_cred *cred)
2723 clp->cl_rpcclient->cl_auth->au_flavor);
2724
2725 res.server_scope = kzalloc(sizeof(struct server_scope), GFP_KERNEL);
2726 - if (unlikely(!res.server_scope))
2727 - return -ENOMEM;
2728 + if (unlikely(!res.server_scope)) {
2729 + status = -ENOMEM;
2730 + goto out;
2731 + }
2732
2733 status = rpc_call_sync(clp->cl_rpcclient, &msg, RPC_TASK_TIMEOUT);
2734 if (!status)
2735 @@ -4894,12 +4896,13 @@ int nfs4_proc_exchange_id(struct nfs_client *clp, struct rpc_cred *cred)
2736 clp->server_scope = NULL;
2737 }
2738
2739 - if (!clp->server_scope)
2740 + if (!clp->server_scope) {
2741 clp->server_scope = res.server_scope;
2742 - else
2743 - kfree(res.server_scope);
2744 + goto out;
2745 + }
2746 }
2747 -
2748 + kfree(res.server_scope);
2749 +out:
2750 dprintk("<-- %s status= %d\n", __func__, status);
2751 return status;
2752 }
2753 diff --git a/fs/nfs/nfs4state.c b/fs/nfs/nfs4state.c
2754 index 6a7107a..a58eed7 100644
2755 --- a/fs/nfs/nfs4state.c
2756 +++ b/fs/nfs/nfs4state.c
2757 @@ -1071,6 +1071,8 @@ void nfs4_schedule_stateid_recovery(const struct nfs_server *server, struct nfs4
2758 {
2759 struct nfs_client *clp = server->nfs_client;
2760
2761 + if (test_and_clear_bit(NFS_DELEGATED_STATE, &state->flags))
2762 + nfs_async_inode_return_delegation(state->inode, &state->stateid);
2763 nfs4_state_mark_reclaim_nograce(clp, state);
2764 nfs4_schedule_state_manager(clp);
2765 }
2766 diff --git a/fs/nfs/nfs4xdr.c b/fs/nfs/nfs4xdr.c
2767 index dcaf693..68adab4 100644
2768 --- a/fs/nfs/nfs4xdr.c
2769 +++ b/fs/nfs/nfs4xdr.c
2770 @@ -2522,7 +2522,6 @@ static void nfs4_xdr_enc_getacl(struct rpc_rqst *req, struct xdr_stream *xdr,
2771
2772 xdr_inline_pages(&req->rq_rcv_buf, replen << 2,
2773 args->acl_pages, args->acl_pgbase, args->acl_len);
2774 - xdr_set_scratch_buffer(xdr, page_address(args->acl_scratch), PAGE_SIZE);
2775
2776 encode_nops(&hdr);
2777 }
2778 @@ -6034,6 +6033,10 @@ nfs4_xdr_dec_getacl(struct rpc_rqst *rqstp, struct xdr_stream *xdr,
2779 struct compound_hdr hdr;
2780 int status;
2781
2782 + if (res->acl_scratch != NULL) {
2783 + void *p = page_address(res->acl_scratch);
2784 + xdr_set_scratch_buffer(xdr, p, PAGE_SIZE);
2785 + }
2786 status = decode_compound_hdr(xdr, &hdr);
2787 if (status)
2788 goto out;
2789 diff --git a/fs/signalfd.c b/fs/signalfd.c
2790 index 492465b..7ae2a57 100644
2791 --- a/fs/signalfd.c
2792 +++ b/fs/signalfd.c
2793 @@ -30,6 +30,21 @@
2794 #include <linux/signalfd.h>
2795 #include <linux/syscalls.h>
2796
2797 +void signalfd_cleanup(struct sighand_struct *sighand)
2798 +{
2799 + wait_queue_head_t *wqh = &sighand->signalfd_wqh;
2800 + /*
2801 + * The lockless check can race with remove_wait_queue() in progress,
2802 + * but in this case its caller should run under rcu_read_lock() and
2803 + * sighand_cachep is SLAB_DESTROY_BY_RCU, we can safely return.
2804 + */
2805 + if (likely(!waitqueue_active(wqh)))
2806 + return;
2807 +
2808 + /* wait_queue_t->func(POLLFREE) should do remove_wait_queue() */
2809 + wake_up_poll(wqh, POLLHUP | POLLFREE);
2810 +}
2811 +
2812 struct signalfd_ctx {
2813 sigset_t sigmask;
2814 };
2815 diff --git a/include/asm-generic/poll.h b/include/asm-generic/poll.h
2816 index 44bce83..9ce7f44 100644
2817 --- a/include/asm-generic/poll.h
2818 +++ b/include/asm-generic/poll.h
2819 @@ -28,6 +28,8 @@
2820 #define POLLRDHUP 0x2000
2821 #endif
2822
2823 +#define POLLFREE 0x4000 /* currently only for epoll */
2824 +
2825 struct pollfd {
2826 int fd;
2827 short events;
2828 diff --git a/include/linux/eventpoll.h b/include/linux/eventpoll.h
2829 index f362733..657ab55 100644
2830 --- a/include/linux/eventpoll.h
2831 +++ b/include/linux/eventpoll.h
2832 @@ -61,6 +61,7 @@ struct file;
2833 static inline void eventpoll_init_file(struct file *file)
2834 {
2835 INIT_LIST_HEAD(&file->f_ep_links);
2836 + INIT_LIST_HEAD(&file->f_tfile_llink);
2837 }
2838
2839
2840 diff --git a/include/linux/fs.h b/include/linux/fs.h
2841 index e0bc4ff..10b2288 100644
2842 --- a/include/linux/fs.h
2843 +++ b/include/linux/fs.h
2844 @@ -1001,6 +1001,7 @@ struct file {
2845 #ifdef CONFIG_EPOLL
2846 /* Used by fs/eventpoll.c to link all the hooks to this file */
2847 struct list_head f_ep_links;
2848 + struct list_head f_tfile_llink;
2849 #endif /* #ifdef CONFIG_EPOLL */
2850 struct address_space *f_mapping;
2851 #ifdef CONFIG_DEBUG_WRITECOUNT
2852 diff --git a/include/linux/nfs_xdr.h b/include/linux/nfs_xdr.h
2853 index 6c898af..41116ab 100644
2854 --- a/include/linux/nfs_xdr.h
2855 +++ b/include/linux/nfs_xdr.h
2856 @@ -602,7 +602,6 @@ struct nfs_getaclargs {
2857 size_t acl_len;
2858 unsigned int acl_pgbase;
2859 struct page ** acl_pages;
2860 - struct page * acl_scratch;
2861 struct nfs4_sequence_args seq_args;
2862 };
2863
2864 @@ -612,6 +611,7 @@ struct nfs_getaclres {
2865 size_t acl_len;
2866 size_t acl_data_offset;
2867 int acl_flags;
2868 + struct page * acl_scratch;
2869 struct nfs4_sequence_res seq_res;
2870 };
2871
2872 diff --git a/include/linux/signalfd.h b/include/linux/signalfd.h
2873 index 3ff4961..247399b 100644
2874 --- a/include/linux/signalfd.h
2875 +++ b/include/linux/signalfd.h
2876 @@ -61,13 +61,16 @@ static inline void signalfd_notify(struct task_struct *tsk, int sig)
2877 wake_up(&tsk->sighand->signalfd_wqh);
2878 }
2879
2880 +extern void signalfd_cleanup(struct sighand_struct *sighand);
2881 +
2882 #else /* CONFIG_SIGNALFD */
2883
2884 static inline void signalfd_notify(struct task_struct *tsk, int sig) { }
2885
2886 +static inline void signalfd_cleanup(struct sighand_struct *sighand) { }
2887 +
2888 #endif /* CONFIG_SIGNALFD */
2889
2890 #endif /* __KERNEL__ */
2891
2892 #endif /* _LINUX_SIGNALFD_H */
2893 -
2894 diff --git a/include/linux/usb/ch11.h b/include/linux/usb/ch11.h
2895 index 4ebaf08..1eb735b 100644
2896 --- a/include/linux/usb/ch11.h
2897 +++ b/include/linux/usb/ch11.h
2898 @@ -62,12 +62,6 @@
2899 #define USB_PORT_FEAT_TEST 21
2900 #define USB_PORT_FEAT_INDICATOR 22
2901 #define USB_PORT_FEAT_C_PORT_L1 23
2902 -#define USB_PORT_FEAT_C_PORT_LINK_STATE 25
2903 -#define USB_PORT_FEAT_C_PORT_CONFIG_ERROR 26
2904 -#define USB_PORT_FEAT_PORT_REMOTE_WAKE_MASK 27
2905 -#define USB_PORT_FEAT_BH_PORT_RESET 28
2906 -#define USB_PORT_FEAT_C_BH_PORT_RESET 29
2907 -#define USB_PORT_FEAT_FORCE_LINKPM_ACCEPT 30
2908
2909 /*
2910 * Port feature selectors added by USB 3.0 spec.
2911 @@ -76,8 +70,8 @@
2912 #define USB_PORT_FEAT_LINK_STATE 5
2913 #define USB_PORT_FEAT_U1_TIMEOUT 23
2914 #define USB_PORT_FEAT_U2_TIMEOUT 24
2915 -#define USB_PORT_FEAT_C_LINK_STATE 25
2916 -#define USB_PORT_FEAT_C_CONFIG_ERR 26
2917 +#define USB_PORT_FEAT_C_PORT_LINK_STATE 25
2918 +#define USB_PORT_FEAT_C_PORT_CONFIG_ERROR 26
2919 #define USB_PORT_FEAT_REMOTE_WAKE_MASK 27
2920 #define USB_PORT_FEAT_BH_PORT_RESET 28
2921 #define USB_PORT_FEAT_C_BH_PORT_RESET 29
2922 diff --git a/include/net/flow.h b/include/net/flow.h
2923 index 57f15a7..2a7eefd 100644
2924 --- a/include/net/flow.h
2925 +++ b/include/net/flow.h
2926 @@ -90,6 +90,16 @@ static inline void flowi4_init_output(struct flowi4 *fl4, int oif,
2927 fl4->fl4_dport = dport;
2928 fl4->fl4_sport = sport;
2929 }
2930 +
2931 +/* Reset some input parameters after previous lookup */
2932 +static inline void flowi4_update_output(struct flowi4 *fl4, int oif, __u8 tos,
2933 + __be32 daddr, __be32 saddr)
2934 +{
2935 + fl4->flowi4_oif = oif;
2936 + fl4->flowi4_tos = tos;
2937 + fl4->daddr = daddr;
2938 + fl4->saddr = saddr;
2939 +}
2940
2941
2942 struct flowi6 {
2943 diff --git a/include/net/route.h b/include/net/route.h
2944 index 91855d1..b1c0d5b 100644
2945 --- a/include/net/route.h
2946 +++ b/include/net/route.h
2947 @@ -270,6 +270,7 @@ static inline struct rtable *ip_route_connect(struct flowi4 *fl4,
2948 if (IS_ERR(rt))
2949 return rt;
2950 ip_rt_put(rt);
2951 + flowi4_update_output(fl4, oif, tos, fl4->daddr, fl4->saddr);
2952 }
2953 security_sk_classify_flow(sk, flowi4_to_flowi(fl4));
2954 return ip_route_output_flow(net, fl4, sk);
2955 @@ -284,6 +285,9 @@ static inline struct rtable *ip_route_newports(struct flowi4 *fl4, struct rtable
2956 fl4->fl4_dport = dport;
2957 fl4->fl4_sport = sport;
2958 ip_rt_put(rt);
2959 + flowi4_update_output(fl4, sk->sk_bound_dev_if,
2960 + RT_CONN_FLAGS(sk), fl4->daddr,
2961 + fl4->saddr);
2962 security_sk_classify_flow(sk, flowi4_to_flowi(fl4));
2963 return ip_route_output_flow(sock_net(sk), fl4, sk);
2964 }
2965 diff --git a/include/net/sch_generic.h b/include/net/sch_generic.h
2966 index f6bb08b..55ce96b 100644
2967 --- a/include/net/sch_generic.h
2968 +++ b/include/net/sch_generic.h
2969 @@ -220,9 +220,16 @@ struct tcf_proto {
2970
2971 struct qdisc_skb_cb {
2972 unsigned int pkt_len;
2973 - long data[];
2974 + unsigned char data[24];
2975 };
2976
2977 +static inline void qdisc_cb_private_validate(const struct sk_buff *skb, int sz)
2978 +{
2979 + struct qdisc_skb_cb *qcb;
2980 + BUILD_BUG_ON(sizeof(skb->cb) < sizeof(unsigned int) + sz);
2981 + BUILD_BUG_ON(sizeof(qcb->data) < sz);
2982 +}
2983 +
2984 static inline int qdisc_qlen(const struct Qdisc *q)
2985 {
2986 return q->q.qlen;
2987 diff --git a/include/target/target_core_base.h b/include/target/target_core_base.h
2988 index a79886c..94bbec3 100644
2989 --- a/include/target/target_core_base.h
2990 +++ b/include/target/target_core_base.h
2991 @@ -486,6 +486,7 @@ struct se_cmd {
2992
2993 struct scatterlist *t_data_sg;
2994 unsigned int t_data_nents;
2995 + void *t_data_vmap;
2996 struct scatterlist *t_bidi_data_sg;
2997 unsigned int t_bidi_data_nents;
2998
2999 diff --git a/include/target/target_core_transport.h b/include/target/target_core_transport.h
3000 index dac4f2d..72751e8 100644
3001 --- a/include/target/target_core_transport.h
3002 +++ b/include/target/target_core_transport.h
3003 @@ -129,8 +129,8 @@ extern void transport_init_se_cmd(struct se_cmd *,
3004 struct target_core_fabric_ops *,
3005 struct se_session *, u32, int, int,
3006 unsigned char *);
3007 -void *transport_kmap_first_data_page(struct se_cmd *cmd);
3008 -void transport_kunmap_first_data_page(struct se_cmd *cmd);
3009 +void *transport_kmap_data_sg(struct se_cmd *);
3010 +void transport_kunmap_data_sg(struct se_cmd *);
3011 extern int transport_generic_allocate_tasks(struct se_cmd *, unsigned char *);
3012 extern int transport_handle_cdb_direct(struct se_cmd *);
3013 extern int transport_generic_handle_cdb_map(struct se_cmd *);
3014 diff --git a/kernel/fork.c b/kernel/fork.c
3015 index da4a6a1..0acf42c0 100644
3016 --- a/kernel/fork.c
3017 +++ b/kernel/fork.c
3018 @@ -66,6 +66,7 @@
3019 #include <linux/user-return-notifier.h>
3020 #include <linux/oom.h>
3021 #include <linux/khugepaged.h>
3022 +#include <linux/signalfd.h>
3023
3024 #include <asm/pgtable.h>
3025 #include <asm/pgalloc.h>
3026 @@ -910,8 +911,10 @@ static int copy_sighand(unsigned long clone_flags, struct task_struct *tsk)
3027
3028 void __cleanup_sighand(struct sighand_struct *sighand)
3029 {
3030 - if (atomic_dec_and_test(&sighand->count))
3031 + if (atomic_dec_and_test(&sighand->count)) {
3032 + signalfd_cleanup(sighand);
3033 kmem_cache_free(sighand_cachep, sighand);
3034 + }
3035 }
3036
3037
3038 diff --git a/kernel/irq/autoprobe.c b/kernel/irq/autoprobe.c
3039 index 342d8f4..0119b9d 100644
3040 --- a/kernel/irq/autoprobe.c
3041 +++ b/kernel/irq/autoprobe.c
3042 @@ -53,7 +53,7 @@ unsigned long probe_irq_on(void)
3043 if (desc->irq_data.chip->irq_set_type)
3044 desc->irq_data.chip->irq_set_type(&desc->irq_data,
3045 IRQ_TYPE_PROBE);
3046 - irq_startup(desc);
3047 + irq_startup(desc, false);
3048 }
3049 raw_spin_unlock_irq(&desc->lock);
3050 }
3051 @@ -70,7 +70,7 @@ unsigned long probe_irq_on(void)
3052 raw_spin_lock_irq(&desc->lock);
3053 if (!desc->action && irq_settings_can_probe(desc)) {
3054 desc->istate |= IRQS_AUTODETECT | IRQS_WAITING;
3055 - if (irq_startup(desc))
3056 + if (irq_startup(desc, false))
3057 desc->istate |= IRQS_PENDING;
3058 }
3059 raw_spin_unlock_irq(&desc->lock);
3060 diff --git a/kernel/irq/chip.c b/kernel/irq/chip.c
3061 index f7c543a..fb7db75 100644
3062 --- a/kernel/irq/chip.c
3063 +++ b/kernel/irq/chip.c
3064 @@ -157,19 +157,22 @@ static void irq_state_set_masked(struct irq_desc *desc)
3065 irqd_set(&desc->irq_data, IRQD_IRQ_MASKED);
3066 }
3067
3068 -int irq_startup(struct irq_desc *desc)
3069 +int irq_startup(struct irq_desc *desc, bool resend)
3070 {
3071 + int ret = 0;
3072 +
3073 irq_state_clr_disabled(desc);
3074 desc->depth = 0;
3075
3076 if (desc->irq_data.chip->irq_startup) {
3077 - int ret = desc->irq_data.chip->irq_startup(&desc->irq_data);
3078 + ret = desc->irq_data.chip->irq_startup(&desc->irq_data);
3079 irq_state_clr_masked(desc);
3080 - return ret;
3081 + } else {
3082 + irq_enable(desc);
3083 }
3084 -
3085 - irq_enable(desc);
3086 - return 0;
3087 + if (resend)
3088 + check_irq_resend(desc, desc->irq_data.irq);
3089 + return ret;
3090 }
3091
3092 void irq_shutdown(struct irq_desc *desc)
3093 @@ -330,6 +333,24 @@ out_unlock:
3094 }
3095 EXPORT_SYMBOL_GPL(handle_simple_irq);
3096
3097 +/*
3098 + * Called unconditionally from handle_level_irq() and only for oneshot
3099 + * interrupts from handle_fasteoi_irq()
3100 + */
3101 +static void cond_unmask_irq(struct irq_desc *desc)
3102 +{
3103 + /*
3104 + * We need to unmask in the following cases:
3105 + * - Standard level irq (IRQF_ONESHOT is not set)
3106 + * - Oneshot irq which did not wake the thread (caused by a
3107 + * spurious interrupt or a primary handler handling it
3108 + * completely).
3109 + */
3110 + if (!irqd_irq_disabled(&desc->irq_data) &&
3111 + irqd_irq_masked(&desc->irq_data) && !desc->threads_oneshot)
3112 + unmask_irq(desc);
3113 +}
3114 +
3115 /**
3116 * handle_level_irq - Level type irq handler
3117 * @irq: the interrupt number
3118 @@ -362,8 +383,8 @@ handle_level_irq(unsigned int irq, struct irq_desc *desc)
3119
3120 handle_irq_event(desc);
3121
3122 - if (!irqd_irq_disabled(&desc->irq_data) && !(desc->istate & IRQS_ONESHOT))
3123 - unmask_irq(desc);
3124 + cond_unmask_irq(desc);
3125 +
3126 out_unlock:
3127 raw_spin_unlock(&desc->lock);
3128 }
3129 @@ -417,6 +438,9 @@ handle_fasteoi_irq(unsigned int irq, struct irq_desc *desc)
3130 preflow_handler(desc);
3131 handle_irq_event(desc);
3132
3133 + if (desc->istate & IRQS_ONESHOT)
3134 + cond_unmask_irq(desc);
3135 +
3136 out_eoi:
3137 desc->irq_data.chip->irq_eoi(&desc->irq_data);
3138 out_unlock:
3139 @@ -625,7 +649,7 @@ __irq_set_handler(unsigned int irq, irq_flow_handler_t handle, int is_chained,
3140 irq_settings_set_noprobe(desc);
3141 irq_settings_set_norequest(desc);
3142 irq_settings_set_nothread(desc);
3143 - irq_startup(desc);
3144 + irq_startup(desc, true);
3145 }
3146 out:
3147 irq_put_desc_busunlock(desc, flags);
3148 diff --git a/kernel/irq/internals.h b/kernel/irq/internals.h
3149 index a73dd6c..e1a8b64 100644
3150 --- a/kernel/irq/internals.h
3151 +++ b/kernel/irq/internals.h
3152 @@ -67,7 +67,7 @@ extern int __irq_set_trigger(struct irq_desc *desc, unsigned int irq,
3153 extern void __disable_irq(struct irq_desc *desc, unsigned int irq, bool susp);
3154 extern void __enable_irq(struct irq_desc *desc, unsigned int irq, bool resume);
3155
3156 -extern int irq_startup(struct irq_desc *desc);
3157 +extern int irq_startup(struct irq_desc *desc, bool resend);
3158 extern void irq_shutdown(struct irq_desc *desc);
3159 extern void irq_enable(struct irq_desc *desc);
3160 extern void irq_disable(struct irq_desc *desc);
3161 diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c
3162 index 1da999f..cf2d7ae 100644
3163 --- a/kernel/irq/manage.c
3164 +++ b/kernel/irq/manage.c
3165 @@ -1027,7 +1027,7 @@ __setup_irq(unsigned int irq, struct irq_desc *desc, struct irqaction *new)
3166 desc->istate |= IRQS_ONESHOT;
3167
3168 if (irq_settings_can_autoenable(desc))
3169 - irq_startup(desc);
3170 + irq_startup(desc, true);
3171 else
3172 /* Undo nested disables: */
3173 desc->depth = 1;
3174 diff --git a/mm/nommu.c b/mm/nommu.c
3175 index b982290..ee7e57e 100644
3176 --- a/mm/nommu.c
3177 +++ b/mm/nommu.c
3178 @@ -696,9 +696,11 @@ static void add_vma_to_mm(struct mm_struct *mm, struct vm_area_struct *vma)
3179 if (vma->vm_file) {
3180 mapping = vma->vm_file->f_mapping;
3181
3182 + mutex_lock(&mapping->i_mmap_mutex);
3183 flush_dcache_mmap_lock(mapping);
3184 vma_prio_tree_insert(vma, &mapping->i_mmap);
3185 flush_dcache_mmap_unlock(mapping);
3186 + mutex_unlock(&mapping->i_mmap_mutex);
3187 }
3188
3189 /* add the VMA to the tree */
3190 @@ -760,9 +762,11 @@ static void delete_vma_from_mm(struct vm_area_struct *vma)
3191 if (vma->vm_file) {
3192 mapping = vma->vm_file->f_mapping;
3193
3194 + mutex_lock(&mapping->i_mmap_mutex);
3195 flush_dcache_mmap_lock(mapping);
3196 vma_prio_tree_remove(vma, &mapping->i_mmap);
3197 flush_dcache_mmap_unlock(mapping);
3198 + mutex_unlock(&mapping->i_mmap_mutex);
3199 }
3200
3201 /* remove from the MM's tree and list */
3202 @@ -2052,6 +2056,7 @@ int nommu_shrink_inode_mappings(struct inode *inode, size_t size,
3203 high = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
3204
3205 down_write(&nommu_region_sem);
3206 + mutex_lock(&inode->i_mapping->i_mmap_mutex);
3207
3208 /* search for VMAs that fall within the dead zone */
3209 vma_prio_tree_foreach(vma, &iter, &inode->i_mapping->i_mmap,
3210 @@ -2059,6 +2064,7 @@ int nommu_shrink_inode_mappings(struct inode *inode, size_t size,
3211 /* found one - only interested if it's shared out of the page
3212 * cache */
3213 if (vma->vm_flags & VM_SHARED) {
3214 + mutex_unlock(&inode->i_mapping->i_mmap_mutex);
3215 up_write(&nommu_region_sem);
3216 return -ETXTBSY; /* not quite true, but near enough */
3217 }
3218 @@ -2086,6 +2092,7 @@ int nommu_shrink_inode_mappings(struct inode *inode, size_t size,
3219 }
3220 }
3221
3222 + mutex_unlock(&inode->i_mapping->i_mmap_mutex);
3223 up_write(&nommu_region_sem);
3224 return 0;
3225 }
3226 diff --git a/net/core/dev.c b/net/core/dev.c
3227 index 5a13edf..c56cacf 100644
3228 --- a/net/core/dev.c
3229 +++ b/net/core/dev.c
3230 @@ -3565,14 +3565,20 @@ static inline gro_result_t
3231 __napi_gro_receive(struct napi_struct *napi, struct sk_buff *skb)
3232 {
3233 struct sk_buff *p;
3234 + unsigned int maclen = skb->dev->hard_header_len;
3235
3236 for (p = napi->gro_list; p; p = p->next) {
3237 unsigned long diffs;
3238
3239 diffs = (unsigned long)p->dev ^ (unsigned long)skb->dev;
3240 diffs |= p->vlan_tci ^ skb->vlan_tci;
3241 - diffs |= compare_ether_header(skb_mac_header(p),
3242 - skb_gro_mac_header(skb));
3243 + if (maclen == ETH_HLEN)
3244 + diffs |= compare_ether_header(skb_mac_header(p),
3245 + skb_gro_mac_header(skb));
3246 + else if (!diffs)
3247 + diffs = memcmp(skb_mac_header(p),
3248 + skb_gro_mac_header(skb),
3249 + maclen);
3250 NAPI_GRO_CB(p)->same_flow = !diffs;
3251 NAPI_GRO_CB(p)->flush = 0;
3252 }
3253 diff --git a/net/core/netpoll.c b/net/core/netpoll.c
3254 index 5d4d896..ab0633f 100644
3255 --- a/net/core/netpoll.c
3256 +++ b/net/core/netpoll.c
3257 @@ -194,7 +194,7 @@ static void netpoll_poll_dev(struct net_device *dev)
3258
3259 poll_napi(dev);
3260
3261 - if (dev->priv_flags & IFF_SLAVE) {
3262 + if (dev->flags & IFF_SLAVE) {
3263 if (dev->npinfo) {
3264 struct net_device *bond_dev = dev->master;
3265 struct sk_buff *skb;
3266 diff --git a/net/ipv4/arp.c b/net/ipv4/arp.c
3267 index 96a164a..59a7041 100644
3268 --- a/net/ipv4/arp.c
3269 +++ b/net/ipv4/arp.c
3270 @@ -867,7 +867,8 @@ static int arp_process(struct sk_buff *skb)
3271 if (addr_type == RTN_UNICAST &&
3272 (arp_fwd_proxy(in_dev, dev, rt) ||
3273 arp_fwd_pvlan(in_dev, dev, rt, sip, tip) ||
3274 - pneigh_lookup(&arp_tbl, net, &tip, dev, 0))) {
3275 + (rt->dst.dev != dev &&
3276 + pneigh_lookup(&arp_tbl, net, &tip, dev, 0)))) {
3277 n = neigh_event_ns(&arp_tbl, sha, &sip, dev);
3278 if (n)
3279 neigh_release(n);
3280 diff --git a/net/ipv4/ip_options.c b/net/ipv4/ip_options.c
3281 index 1e60f76..42dd1a9 100644
3282 --- a/net/ipv4/ip_options.c
3283 +++ b/net/ipv4/ip_options.c
3284 @@ -573,8 +573,8 @@ void ip_forward_options(struct sk_buff *skb)
3285 }
3286 if (srrptr + 3 <= srrspace) {
3287 opt->is_changed = 1;
3288 - ip_rt_get_source(&optptr[srrptr-1], skb, rt);
3289 ip_hdr(skb)->daddr = opt->nexthop;
3290 + ip_rt_get_source(&optptr[srrptr-1], skb, rt);
3291 optptr[2] = srrptr+4;
3292 } else if (net_ratelimit())
3293 printk(KERN_CRIT "ip_forward(): Argh! Destination lost!\n");
3294 diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
3295 index 52b5c2d..53113b9 100644
3296 --- a/net/ipv4/tcp_input.c
3297 +++ b/net/ipv4/tcp_input.c
3298 @@ -1310,25 +1310,26 @@ static int tcp_match_skb_to_sack(struct sock *sk, struct sk_buff *skb,
3299 return in_sack;
3300 }
3301
3302 -static u8 tcp_sacktag_one(const struct sk_buff *skb, struct sock *sk,
3303 - struct tcp_sacktag_state *state,
3304 +/* Mark the given newly-SACKed range as such, adjusting counters and hints. */
3305 +static u8 tcp_sacktag_one(struct sock *sk,
3306 + struct tcp_sacktag_state *state, u8 sacked,
3307 + u32 start_seq, u32 end_seq,
3308 int dup_sack, int pcount)
3309 {
3310 struct tcp_sock *tp = tcp_sk(sk);
3311 - u8 sacked = TCP_SKB_CB(skb)->sacked;
3312 int fack_count = state->fack_count;
3313
3314 /* Account D-SACK for retransmitted packet. */
3315 if (dup_sack && (sacked & TCPCB_RETRANS)) {
3316 if (tp->undo_marker && tp->undo_retrans &&
3317 - after(TCP_SKB_CB(skb)->end_seq, tp->undo_marker))
3318 + after(end_seq, tp->undo_marker))
3319 tp->undo_retrans--;
3320 if (sacked & TCPCB_SACKED_ACKED)
3321 state->reord = min(fack_count, state->reord);
3322 }
3323
3324 /* Nothing to do; acked frame is about to be dropped (was ACKed). */
3325 - if (!after(TCP_SKB_CB(skb)->end_seq, tp->snd_una))
3326 + if (!after(end_seq, tp->snd_una))
3327 return sacked;
3328
3329 if (!(sacked & TCPCB_SACKED_ACKED)) {
3330 @@ -1347,13 +1348,13 @@ static u8 tcp_sacktag_one(const struct sk_buff *skb, struct sock *sk,
3331 /* New sack for not retransmitted frame,
3332 * which was in hole. It is reordering.
3333 */
3334 - if (before(TCP_SKB_CB(skb)->seq,
3335 + if (before(start_seq,
3336 tcp_highest_sack_seq(tp)))
3337 state->reord = min(fack_count,
3338 state->reord);
3339
3340 /* SACK enhanced F-RTO (RFC4138; Appendix B) */
3341 - if (!after(TCP_SKB_CB(skb)->end_seq, tp->frto_highmark))
3342 + if (!after(end_seq, tp->frto_highmark))
3343 state->flag |= FLAG_ONLY_ORIG_SACKED;
3344 }
3345
3346 @@ -1371,8 +1372,7 @@ static u8 tcp_sacktag_one(const struct sk_buff *skb, struct sock *sk,
3347
3348 /* Lost marker hint past SACKed? Tweak RFC3517 cnt */
3349 if (!tcp_is_fack(tp) && (tp->lost_skb_hint != NULL) &&
3350 - before(TCP_SKB_CB(skb)->seq,
3351 - TCP_SKB_CB(tp->lost_skb_hint)->seq))
3352 + before(start_seq, TCP_SKB_CB(tp->lost_skb_hint)->seq))
3353 tp->lost_cnt_hint += pcount;
3354
3355 if (fack_count > tp->fackets_out)
3356 @@ -1391,6 +1391,9 @@ static u8 tcp_sacktag_one(const struct sk_buff *skb, struct sock *sk,
3357 return sacked;
3358 }
3359
3360 +/* Shift newly-SACKed bytes from this skb to the immediately previous
3361 + * already-SACKed sk_buff. Mark the newly-SACKed bytes as such.
3362 + */
3363 static int tcp_shifted_skb(struct sock *sk, struct sk_buff *skb,
3364 struct tcp_sacktag_state *state,
3365 unsigned int pcount, int shifted, int mss,
3366 @@ -1398,10 +1401,13 @@ static int tcp_shifted_skb(struct sock *sk, struct sk_buff *skb,
3367 {
3368 struct tcp_sock *tp = tcp_sk(sk);
3369 struct sk_buff *prev = tcp_write_queue_prev(sk, skb);
3370 + u32 start_seq = TCP_SKB_CB(skb)->seq; /* start of newly-SACKed */
3371 + u32 end_seq = start_seq + shifted; /* end of newly-SACKed */
3372
3373 BUG_ON(!pcount);
3374
3375 - if (skb == tp->lost_skb_hint)
3376 + /* Adjust hint for FACK. Non-FACK is handled in tcp_sacktag_one(). */
3377 + if (tcp_is_fack(tp) && (skb == tp->lost_skb_hint))
3378 tp->lost_cnt_hint += pcount;
3379
3380 TCP_SKB_CB(prev)->end_seq += shifted;
3381 @@ -1427,8 +1433,11 @@ static int tcp_shifted_skb(struct sock *sk, struct sk_buff *skb,
3382 skb_shinfo(skb)->gso_type = 0;
3383 }
3384
3385 - /* We discard results */
3386 - tcp_sacktag_one(skb, sk, state, dup_sack, pcount);
3387 + /* Adjust counters and hints for the newly sacked sequence range but
3388 + * discard the return value since prev is already marked.
3389 + */
3390 + tcp_sacktag_one(sk, state, TCP_SKB_CB(skb)->sacked,
3391 + start_seq, end_seq, dup_sack, pcount);
3392
3393 /* Difference in this won't matter, both ACKed by the same cumul. ACK */
3394 TCP_SKB_CB(prev)->sacked |= (TCP_SKB_CB(skb)->sacked & TCPCB_EVER_RETRANS);
3395 @@ -1667,10 +1676,14 @@ static struct sk_buff *tcp_sacktag_walk(struct sk_buff *skb, struct sock *sk,
3396 break;
3397
3398 if (in_sack) {
3399 - TCP_SKB_CB(skb)->sacked = tcp_sacktag_one(skb, sk,
3400 - state,
3401 - dup_sack,
3402 - tcp_skb_pcount(skb));
3403 + TCP_SKB_CB(skb)->sacked =
3404 + tcp_sacktag_one(sk,
3405 + state,
3406 + TCP_SKB_CB(skb)->sacked,
3407 + TCP_SKB_CB(skb)->seq,
3408 + TCP_SKB_CB(skb)->end_seq,
3409 + dup_sack,
3410 + tcp_skb_pcount(skb));
3411
3412 if (!before(TCP_SKB_CB(skb)->seq,
3413 tcp_highest_sack_seq(tp)))
3414 diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
3415 index c89e354..eb90aa8 100644
3416 --- a/net/ipv4/tcp_ipv4.c
3417 +++ b/net/ipv4/tcp_ipv4.c
3418 @@ -650,6 +650,11 @@ static void tcp_v4_send_reset(struct sock *sk, struct sk_buff *skb)
3419 arg.iov[0].iov_len, IPPROTO_TCP, 0);
3420 arg.csumoffset = offsetof(struct tcphdr, check) / 2;
3421 arg.flags = (sk && inet_sk(sk)->transparent) ? IP_REPLY_ARG_NOSRCCHECK : 0;
3422 + /* When socket is gone, all binding information is lost.
3423 + * routing might fail in this case. using iif for oif to
3424 + * make sure we can deliver it
3425 + */
3426 + arg.bound_dev_if = sk ? sk->sk_bound_dev_if : inet_iif(skb);
3427
3428 net = dev_net(skb_dst(skb)->dev);
3429 arg.tos = ip_hdr(skb)->tos;
3430 diff --git a/net/mac80211/main.c b/net/mac80211/main.c
3431 index a7536fd..7d9b21d 100644
3432 --- a/net/mac80211/main.c
3433 +++ b/net/mac80211/main.c
3434 @@ -885,6 +885,8 @@ int ieee80211_register_hw(struct ieee80211_hw *hw)
3435 wiphy_debug(local->hw.wiphy, "Failed to initialize wep: %d\n",
3436 result);
3437
3438 + ieee80211_led_init(local);
3439 +
3440 rtnl_lock();
3441
3442 result = ieee80211_init_rate_ctrl_alg(local,
3443 @@ -906,8 +908,6 @@ int ieee80211_register_hw(struct ieee80211_hw *hw)
3444
3445 rtnl_unlock();
3446
3447 - ieee80211_led_init(local);
3448 -
3449 local->network_latency_notifier.notifier_call =
3450 ieee80211_max_network_latency;
3451 result = pm_qos_add_notifier(PM_QOS_NETWORK_LATENCY,
3452 diff --git a/net/netfilter/ipvs/ip_vs_core.c b/net/netfilter/ipvs/ip_vs_core.c
3453 index 093cc32..6dc7d7d 100644
3454 --- a/net/netfilter/ipvs/ip_vs_core.c
3455 +++ b/net/netfilter/ipvs/ip_vs_core.c
3456 @@ -232,6 +232,7 @@ ip_vs_sched_persist(struct ip_vs_service *svc,
3457 __be16 dport = 0; /* destination port to forward */
3458 unsigned int flags;
3459 struct ip_vs_conn_param param;
3460 + const union nf_inet_addr fwmark = { .ip = htonl(svc->fwmark) };
3461 union nf_inet_addr snet; /* source network of the client,
3462 after masking */
3463
3464 @@ -267,7 +268,6 @@ ip_vs_sched_persist(struct ip_vs_service *svc,
3465 {
3466 int protocol = iph.protocol;
3467 const union nf_inet_addr *vaddr = &iph.daddr;
3468 - const union nf_inet_addr fwmark = { .ip = htonl(svc->fwmark) };
3469 __be16 vport = 0;
3470
3471 if (dst_port == svc->port) {
3472 diff --git a/net/sched/sch_choke.c b/net/sched/sch_choke.c
3473 index 3422b25..081ffb9 100644
3474 --- a/net/sched/sch_choke.c
3475 +++ b/net/sched/sch_choke.c
3476 @@ -225,8 +225,7 @@ struct choke_skb_cb {
3477
3478 static inline struct choke_skb_cb *choke_skb_cb(const struct sk_buff *skb)
3479 {
3480 - BUILD_BUG_ON(sizeof(skb->cb) <
3481 - sizeof(struct qdisc_skb_cb) + sizeof(struct choke_skb_cb));
3482 + qdisc_cb_private_validate(skb, sizeof(struct choke_skb_cb));
3483 return (struct choke_skb_cb *)qdisc_skb_cb(skb)->data;
3484 }
3485
3486 diff --git a/net/sched/sch_netem.c b/net/sched/sch_netem.c
3487 index a4ab207..7801b15 100644
3488 --- a/net/sched/sch_netem.c
3489 +++ b/net/sched/sch_netem.c
3490 @@ -118,8 +118,7 @@ struct netem_skb_cb {
3491
3492 static inline struct netem_skb_cb *netem_skb_cb(struct sk_buff *skb)
3493 {
3494 - BUILD_BUG_ON(sizeof(skb->cb) <
3495 - sizeof(struct qdisc_skb_cb) + sizeof(struct netem_skb_cb));
3496 + qdisc_cb_private_validate(skb, sizeof(struct netem_skb_cb));
3497 return (struct netem_skb_cb *)qdisc_skb_cb(skb)->data;
3498 }
3499
3500 @@ -383,8 +382,8 @@ static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch)
3501 q->counter = 0;
3502
3503 __skb_queue_head(&q->qdisc->q, skb);
3504 - q->qdisc->qstats.backlog += qdisc_pkt_len(skb);
3505 - q->qdisc->qstats.requeues++;
3506 + sch->qstats.backlog += qdisc_pkt_len(skb);
3507 + sch->qstats.requeues++;
3508 ret = NET_XMIT_SUCCESS;
3509 }
3510
3511 diff --git a/net/sched/sch_sfb.c b/net/sched/sch_sfb.c
3512 index e83c272..17859ea 100644
3513 --- a/net/sched/sch_sfb.c
3514 +++ b/net/sched/sch_sfb.c
3515 @@ -93,8 +93,7 @@ struct sfb_skb_cb {
3516
3517 static inline struct sfb_skb_cb *sfb_skb_cb(const struct sk_buff *skb)
3518 {
3519 - BUILD_BUG_ON(sizeof(skb->cb) <
3520 - sizeof(struct qdisc_skb_cb) + sizeof(struct sfb_skb_cb));
3521 + qdisc_cb_private_validate(skb, sizeof(struct sfb_skb_cb));
3522 return (struct sfb_skb_cb *)qdisc_skb_cb(skb)->data;
3523 }
3524
3525 diff --git a/scripts/package/builddeb b/scripts/package/builddeb
3526 index f6cbc3d..3c6c0b1 100644
3527 --- a/scripts/package/builddeb
3528 +++ b/scripts/package/builddeb
3529 @@ -238,14 +238,14 @@ EOF
3530 fi
3531
3532 # Build header package
3533 -(cd $srctree; find . -name Makefile -o -name Kconfig\* -o -name \*.pl > /tmp/files$$)
3534 -(cd $srctree; find arch/$SRCARCH/include include scripts -type f >> /tmp/files$$)
3535 -(cd $objtree; find .config Module.symvers include scripts -type f >> /tmp/objfiles$$)
3536 +(cd $srctree; find . -name Makefile -o -name Kconfig\* -o -name \*.pl > "$objtree/debian/hdrsrcfiles")
3537 +(cd $srctree; find arch/$SRCARCH/include include scripts -type f >> "$objtree/debian/hdrsrcfiles")
3538 +(cd $objtree; find .config Module.symvers include scripts -type f >> "$objtree/debian/hdrobjfiles")
3539 destdir=$kernel_headers_dir/usr/src/linux-headers-$version
3540 mkdir -p "$destdir"
3541 -(cd $srctree; tar -c -f - -T /tmp/files$$) | (cd $destdir; tar -xf -)
3542 -(cd $objtree; tar -c -f - -T /tmp/objfiles$$) | (cd $destdir; tar -xf -)
3543 -rm -f /tmp/files$$ /tmp/objfiles$$
3544 +(cd $srctree; tar -c -f - -T "$objtree/debian/hdrsrcfiles") | (cd $destdir; tar -xf -)
3545 +(cd $objtree; tar -c -f - -T "$objtree/debian/hdrobjfiles") | (cd $destdir; tar -xf -)
3546 +rm -f "$objtree/debian/hdrsrcfiles" "$objtree/debian/hdrobjfiles"
3547 arch=$(dpkg --print-architecture)
3548
3549 cat <<EOF >> debian/control
3550 diff --git a/security/tomoyo/.gitignore b/security/tomoyo/.gitignore
3551 new file mode 100644
3552 index 0000000..5caf1a6
3553 --- /dev/null
3554 +++ b/security/tomoyo/.gitignore
3555 @@ -0,0 +1,2 @@
3556 +builtin-policy.h
3557 +policy/
3558 diff --git a/sound/pci/hda/patch_conexant.c b/sound/pci/hda/patch_conexant.c
3559 index 7072251..08bad5b 100644
3560 --- a/sound/pci/hda/patch_conexant.c
3561 +++ b/sound/pci/hda/patch_conexant.c
3562 @@ -1899,6 +1899,10 @@ static void cxt5051_init_mic_port(struct hda_codec *codec, hda_nid_t nid,
3563 snd_hda_codec_write(codec, nid, 0,
3564 AC_VERB_SET_UNSOLICITED_ENABLE,
3565 AC_USRSP_EN | event);
3566 +}
3567 +
3568 +static void cxt5051_init_mic_jack(struct hda_codec *codec, hda_nid_t nid)
3569 +{
3570 snd_hda_input_jack_add(codec, nid, SND_JACK_MICROPHONE, NULL);
3571 snd_hda_input_jack_report(codec, nid);
3572 }
3573 @@ -1916,7 +1920,6 @@ static int cxt5051_init(struct hda_codec *codec)
3574 struct conexant_spec *spec = codec->spec;
3575
3576 conexant_init(codec);
3577 - conexant_init_jacks(codec);
3578
3579 if (spec->auto_mic & AUTO_MIC_PORTB)
3580 cxt5051_init_mic_port(codec, 0x17, CXT5051_PORTB_EVENT);
3581 @@ -2037,6 +2040,12 @@ static int patch_cxt5051(struct hda_codec *codec)
3582 if (spec->beep_amp)
3583 snd_hda_attach_beep_device(codec, spec->beep_amp);
3584
3585 + conexant_init_jacks(codec);
3586 + if (spec->auto_mic & AUTO_MIC_PORTB)
3587 + cxt5051_init_mic_jack(codec, 0x17);
3588 + if (spec->auto_mic & AUTO_MIC_PORTC)
3589 + cxt5051_init_mic_jack(codec, 0x18);
3590 +
3591 return 0;
3592 }
3593
3594 diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
3595 index 9c197d4..c4c8d78 100644
3596 --- a/sound/pci/hda/patch_realtek.c
3597 +++ b/sound/pci/hda/patch_realtek.c
3598 @@ -79,6 +79,8 @@ enum {
3599 ALC_AUTOMUTE_MIXER, /* mute/unmute mixer widget AMP */
3600 };
3601
3602 +#define MAX_VOL_NIDS 0x40
3603 +
3604 struct alc_spec {
3605 /* codec parameterization */
3606 const struct snd_kcontrol_new *mixers[5]; /* mixer arrays */
3607 @@ -117,8 +119,8 @@ struct alc_spec {
3608 const hda_nid_t *capsrc_nids;
3609 hda_nid_t dig_in_nid; /* digital-in NID; optional */
3610 hda_nid_t mixer_nid; /* analog-mixer NID */
3611 - DECLARE_BITMAP(vol_ctls, 0x20 << 1);
3612 - DECLARE_BITMAP(sw_ctls, 0x20 << 1);
3613 + DECLARE_BITMAP(vol_ctls, MAX_VOL_NIDS << 1);
3614 + DECLARE_BITMAP(sw_ctls, MAX_VOL_NIDS << 1);
3615
3616 /* capture setup for dynamic dual-adc switch */
3617 hda_nid_t cur_adc;
3618 @@ -3068,7 +3070,10 @@ static int alc_auto_fill_dac_nids(struct hda_codec *codec)
3619 static inline unsigned int get_ctl_pos(unsigned int data)
3620 {
3621 hda_nid_t nid = get_amp_nid_(data);
3622 - unsigned int dir = get_amp_direction_(data);
3623 + unsigned int dir;
3624 + if (snd_BUG_ON(nid >= MAX_VOL_NIDS))
3625 + return 0;
3626 + dir = get_amp_direction_(data);
3627 return (nid << 1) | dir;
3628 }
3629
3630 @@ -4224,12 +4229,20 @@ static void alc889_fixup_dac_route(struct hda_codec *codec,
3631 const struct alc_fixup *fix, int action)
3632 {
3633 if (action == ALC_FIXUP_ACT_PRE_PROBE) {
3634 + /* fake the connections during parsing the tree */
3635 hda_nid_t conn1[2] = { 0x0c, 0x0d };
3636 hda_nid_t conn2[2] = { 0x0e, 0x0f };
3637 snd_hda_override_conn_list(codec, 0x14, 2, conn1);
3638 snd_hda_override_conn_list(codec, 0x15, 2, conn1);
3639 snd_hda_override_conn_list(codec, 0x18, 2, conn2);
3640 snd_hda_override_conn_list(codec, 0x1a, 2, conn2);
3641 + } else if (action == ALC_FIXUP_ACT_PROBE) {
3642 + /* restore the connections */
3643 + hda_nid_t conn[5] = { 0x0c, 0x0d, 0x0e, 0x0f, 0x26 };
3644 + snd_hda_override_conn_list(codec, 0x14, 5, conn);
3645 + snd_hda_override_conn_list(codec, 0x15, 5, conn);
3646 + snd_hda_override_conn_list(codec, 0x18, 5, conn);
3647 + snd_hda_override_conn_list(codec, 0x1a, 5, conn);
3648 }
3649 }
3650
3651 diff --git a/sound/soc/codecs/wm8962.c b/sound/soc/codecs/wm8962.c
3652 index d795294..07dd7eb 100644
3653 --- a/sound/soc/codecs/wm8962.c
3654 +++ b/sound/soc/codecs/wm8962.c
3655 @@ -2559,7 +2559,7 @@ static int dsp2_event(struct snd_soc_dapm_widget *w,
3656 return 0;
3657 }
3658
3659 -static const char *st_text[] = { "None", "Right", "Left" };
3660 +static const char *st_text[] = { "None", "Left", "Right" };
3661
3662 static const struct soc_enum str_enum =
3663 SOC_ENUM_SINGLE(WM8962_DAC_DSP_MIXING_1, 2, 3, st_text);