Magellan Linux

Contents of /trunk/kernel-lts/patches-3.4/0137-3.4.38-all-fixes.patch

Parent Directory Parent Directory | Revision Log Revision Log


Revision 2144 - (show annotations) (download)
Sun Mar 31 12:45:45 2013 UTC (11 years, 1 month ago) by niro
File size: 92602 byte(s)
linux-3.4.38
1 diff --git a/arch/x86/lib/usercopy_64.c b/arch/x86/lib/usercopy_64.c
2 index 0d0326f..d70b9a5 100644
3 --- a/arch/x86/lib/usercopy_64.c
4 +++ b/arch/x86/lib/usercopy_64.c
5 @@ -120,10 +120,10 @@ copy_user_handle_tail(char *to, char *from, unsigned len, unsigned zerorest)
6 char c;
7 unsigned zero_len;
8
9 - for (; len; --len) {
10 + for (; len; --len, to++) {
11 if (__get_user_nocheck(c, from++, sizeof(char)))
12 break;
13 - if (__put_user_nocheck(c, to++, sizeof(char)))
14 + if (__put_user_nocheck(c, to, sizeof(char)))
15 break;
16 }
17
18 diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c
19 index 384edc6..8b77fd3 100644
20 --- a/drivers/gpu/drm/drm_edid.c
21 +++ b/drivers/gpu/drm/drm_edid.c
22 @@ -836,7 +836,7 @@ static struct drm_display_mode *drm_mode_detailed(struct drm_device *dev,
23 unsigned vblank = (pt->vactive_vblank_hi & 0xf) << 8 | pt->vblank_lo;
24 unsigned hsync_offset = (pt->hsync_vsync_offset_pulse_width_hi & 0xc0) << 2 | pt->hsync_offset_lo;
25 unsigned hsync_pulse_width = (pt->hsync_vsync_offset_pulse_width_hi & 0x30) << 4 | pt->hsync_pulse_width_lo;
26 - unsigned vsync_offset = (pt->hsync_vsync_offset_pulse_width_hi & 0xc) >> 2 | pt->vsync_offset_pulse_width_lo >> 4;
27 + unsigned vsync_offset = (pt->hsync_vsync_offset_pulse_width_hi & 0xc) << 2 | pt->vsync_offset_pulse_width_lo >> 4;
28 unsigned vsync_pulse_width = (pt->hsync_vsync_offset_pulse_width_hi & 0x3) << 4 | (pt->vsync_offset_pulse_width_lo & 0xf);
29
30 /* ignore tiny modes */
31 @@ -917,6 +917,7 @@ set_size:
32 }
33
34 mode->type = DRM_MODE_TYPE_DRIVER;
35 + mode->vrefresh = drm_mode_vrefresh(mode);
36 drm_mode_set_name(mode);
37
38 return mode;
39 diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
40 index e6162a1..34791fb 100644
41 --- a/drivers/gpu/drm/i915/i915_debugfs.c
42 +++ b/drivers/gpu/drm/i915/i915_debugfs.c
43 @@ -122,7 +122,7 @@ static const char *cache_level_str(int type)
44 static void
45 describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj)
46 {
47 - seq_printf(m, "%p: %s%s %8zdKiB %04x %04x %d %d%s%s%s",
48 + seq_printf(m, "%pK: %s%s %8zdKiB %04x %04x %d %d%s%s%s",
49 &obj->base,
50 get_pin_flag(obj),
51 get_tiling_flag(obj),
52 diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
53 index d4417e3..e97ed61 100644
54 --- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c
55 +++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
56 @@ -957,15 +957,20 @@ validate_exec_list(struct drm_i915_gem_exec_object2 *exec,
57 int count)
58 {
59 int i;
60 + int relocs_total = 0;
61 + int relocs_max = INT_MAX / sizeof(struct drm_i915_gem_relocation_entry);
62
63 for (i = 0; i < count; i++) {
64 char __user *ptr = (char __user *)(uintptr_t)exec[i].relocs_ptr;
65 int length; /* limited by fault_in_pages_readable() */
66
67 - /* First check for malicious input causing overflow */
68 - if (exec[i].relocation_count >
69 - INT_MAX / sizeof(struct drm_i915_gem_relocation_entry))
70 + /* First check for malicious input causing overflow in
71 + * the worst case where we need to allocate the entire
72 + * relocation tree as a single array.
73 + */
74 + if (exec[i].relocation_count > relocs_max - relocs_total)
75 return -EINVAL;
76 + relocs_total += exec[i].relocation_count;
77
78 length = exec[i].relocation_count *
79 sizeof(struct drm_i915_gem_relocation_entry);
80 diff --git a/drivers/gpu/drm/i915/intel_opregion.c b/drivers/gpu/drm/i915/intel_opregion.c
81 index 289140b..cffb007 100644
82 --- a/drivers/gpu/drm/i915/intel_opregion.c
83 +++ b/drivers/gpu/drm/i915/intel_opregion.c
84 @@ -419,6 +419,25 @@ blind_set:
85 goto end;
86 }
87
88 +static void intel_setup_cadls(struct drm_device *dev)
89 +{
90 + struct drm_i915_private *dev_priv = dev->dev_private;
91 + struct intel_opregion *opregion = &dev_priv->opregion;
92 + int i = 0;
93 + u32 disp_id;
94 +
95 + /* Initialize the CADL field by duplicating the DIDL values.
96 + * Technically, this is not always correct as display outputs may exist,
97 + * but not active. This initialization is necessary for some Clevo
98 + * laptops that check this field before processing the brightness and
99 + * display switching hotkeys. Just like DIDL, CADL is NULL-terminated if
100 + * there are less than eight devices. */
101 + do {
102 + disp_id = ioread32(&opregion->acpi->didl[i]);
103 + iowrite32(disp_id, &opregion->acpi->cadl[i]);
104 + } while (++i < 8 && disp_id != 0);
105 +}
106 +
107 void intel_opregion_init(struct drm_device *dev)
108 {
109 struct drm_i915_private *dev_priv = dev->dev_private;
110 @@ -428,8 +447,10 @@ void intel_opregion_init(struct drm_device *dev)
111 return;
112
113 if (opregion->acpi) {
114 - if (drm_core_check_feature(dev, DRIVER_MODESET))
115 + if (drm_core_check_feature(dev, DRIVER_MODESET)) {
116 intel_didl_outputs(dev);
117 + intel_setup_cadls(dev);
118 + }
119
120 /* Notify BIOS we are ready to handle ACPI video ext notifs.
121 * Right now, all the events are handled by the ACPI video module.
122 diff --git a/drivers/gpu/drm/radeon/ni.c b/drivers/gpu/drm/radeon/ni.c
123 index 9934c9d..7dffc57 100644
124 --- a/drivers/gpu/drm/radeon/ni.c
125 +++ b/drivers/gpu/drm/radeon/ni.c
126 @@ -663,13 +663,19 @@ static void cayman_gpu_init(struct radeon_device *rdev)
127 (rdev->pdev->device == 0x9907) ||
128 (rdev->pdev->device == 0x9908) ||
129 (rdev->pdev->device == 0x9909) ||
130 + (rdev->pdev->device == 0x990B) ||
131 + (rdev->pdev->device == 0x990C) ||
132 + (rdev->pdev->device == 0x990F) ||
133 (rdev->pdev->device == 0x9910) ||
134 - (rdev->pdev->device == 0x9917)) {
135 + (rdev->pdev->device == 0x9917) ||
136 + (rdev->pdev->device == 0x9999)) {
137 rdev->config.cayman.max_simds_per_se = 6;
138 rdev->config.cayman.max_backends_per_se = 2;
139 } else if ((rdev->pdev->device == 0x9903) ||
140 (rdev->pdev->device == 0x9904) ||
141 (rdev->pdev->device == 0x990A) ||
142 + (rdev->pdev->device == 0x990D) ||
143 + (rdev->pdev->device == 0x990E) ||
144 (rdev->pdev->device == 0x9913) ||
145 (rdev->pdev->device == 0x9918)) {
146 rdev->config.cayman.max_simds_per_se = 4;
147 @@ -678,6 +684,9 @@ static void cayman_gpu_init(struct radeon_device *rdev)
148 (rdev->pdev->device == 0x9990) ||
149 (rdev->pdev->device == 0x9991) ||
150 (rdev->pdev->device == 0x9994) ||
151 + (rdev->pdev->device == 0x9995) ||
152 + (rdev->pdev->device == 0x9996) ||
153 + (rdev->pdev->device == 0x999A) ||
154 (rdev->pdev->device == 0x99A0)) {
155 rdev->config.cayman.max_simds_per_se = 3;
156 rdev->config.cayman.max_backends_per_se = 1;
157 diff --git a/drivers/gpu/drm/radeon/radeon_benchmark.c b/drivers/gpu/drm/radeon/radeon_benchmark.c
158 index fef7b72..89a0eec 100644
159 --- a/drivers/gpu/drm/radeon/radeon_benchmark.c
160 +++ b/drivers/gpu/drm/radeon/radeon_benchmark.c
161 @@ -141,13 +141,15 @@ static void radeon_benchmark_move(struct radeon_device *rdev, unsigned size,
162 sdomain, ddomain, "dma");
163 }
164
165 - time = radeon_benchmark_do_move(rdev, size, saddr, daddr,
166 - RADEON_BENCHMARK_COPY_BLIT, n);
167 - if (time < 0)
168 - goto out_cleanup;
169 - if (time > 0)
170 - radeon_benchmark_log_results(n, size, time,
171 - sdomain, ddomain, "blit");
172 + if (rdev->asic->copy.blit) {
173 + time = radeon_benchmark_do_move(rdev, size, saddr, daddr,
174 + RADEON_BENCHMARK_COPY_BLIT, n);
175 + if (time < 0)
176 + goto out_cleanup;
177 + if (time > 0)
178 + radeon_benchmark_log_results(n, size, time,
179 + sdomain, ddomain, "blit");
180 + }
181
182 out_cleanup:
183 if (sobj) {
184 diff --git a/drivers/infiniband/ulp/ipoib/ipoib_cm.c b/drivers/infiniband/ulp/ipoib/ipoib_cm.c
185 index 014504d..3767853 100644
186 --- a/drivers/infiniband/ulp/ipoib/ipoib_cm.c
187 +++ b/drivers/infiniband/ulp/ipoib/ipoib_cm.c
188 @@ -755,9 +755,13 @@ void ipoib_cm_send(struct net_device *dev, struct sk_buff *skb, struct ipoib_cm_
189 if (++priv->tx_outstanding == ipoib_sendq_size) {
190 ipoib_dbg(priv, "TX ring 0x%x full, stopping kernel net queue\n",
191 tx->qp->qp_num);
192 - if (ib_req_notify_cq(priv->send_cq, IB_CQ_NEXT_COMP))
193 - ipoib_warn(priv, "request notify on send CQ failed\n");
194 netif_stop_queue(dev);
195 + rc = ib_req_notify_cq(priv->send_cq,
196 + IB_CQ_NEXT_COMP | IB_CQ_REPORT_MISSED_EVENTS);
197 + if (rc < 0)
198 + ipoib_warn(priv, "request notify on send CQ failed\n");
199 + else if (rc)
200 + ipoib_send_comp_handler(priv->send_cq, dev);
201 }
202 }
203 }
204 diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
205 index 6df52c9..dadf337 100644
206 --- a/drivers/net/bonding/bond_main.c
207 +++ b/drivers/net/bonding/bond_main.c
208 @@ -1737,6 +1737,8 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev)
209
210 bond_compute_features(bond);
211
212 + bond_update_speed_duplex(new_slave);
213 +
214 read_lock(&bond->lock);
215
216 new_slave->last_arp_rx = jiffies;
217 @@ -1780,8 +1782,6 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev)
218 new_slave->link = BOND_LINK_DOWN;
219 }
220
221 - bond_update_speed_duplex(new_slave);
222 -
223 if (USES_PRIMARY(bond->params.mode) && bond->params.primary[0]) {
224 /* if there is a primary slave, remember it */
225 if (strcmp(bond->params.primary, new_slave->dev->name) == 0) {
226 @@ -2462,8 +2462,6 @@ static void bond_miimon_commit(struct bonding *bond)
227 bond_set_backup_slave(slave);
228 }
229
230 - bond_update_speed_duplex(slave);
231 -
232 pr_info("%s: link status definitely up for interface %s, %u Mbps %s duplex.\n",
233 bond->dev->name, slave->dev->name,
234 slave->speed, slave->duplex ? "full" : "half");
235 diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.h
236 index 2b46e1e..5905caa 100644
237 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.h
238 +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.h
239 @@ -439,8 +439,9 @@ struct bnx2x_fw_port_stats_old {
240
241 #define UPDATE_QSTAT(s, t) \
242 do { \
243 - qstats->t##_hi = qstats_old->t##_hi + le32_to_cpu(s.hi); \
244 qstats->t##_lo = qstats_old->t##_lo + le32_to_cpu(s.lo); \
245 + qstats->t##_hi = qstats_old->t##_hi + le32_to_cpu(s.hi) \
246 + + ((qstats->t##_lo < qstats_old->t##_lo) ? 1 : 0); \
247 } while (0)
248
249 #define UPDATE_QSTAT_OLD(f) \
250 diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c
251 index e143d8c..15ed91f 100644
252 --- a/drivers/net/ethernet/broadcom/tg3.c
253 +++ b/drivers/net/ethernet/broadcom/tg3.c
254 @@ -3946,6 +3946,14 @@ static void tg3_phy_copper_begin(struct tg3 *tp)
255 tp->link_config.active_speed = tp->link_config.speed;
256 tp->link_config.active_duplex = tp->link_config.duplex;
257
258 + if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
259 + /* With autoneg disabled, 5715 only links up when the
260 + * advertisement register has the configured speed
261 + * enabled.
262 + */
263 + tg3_writephy(tp, MII_ADVERTISE, ADVERTISE_ALL);
264 + }
265 +
266 bmcr = 0;
267 switch (tp->link_config.speed) {
268 default:
269 diff --git a/drivers/net/ethernet/sfc/bitfield.h b/drivers/net/ethernet/sfc/bitfield.h
270 index b26a954..5400a33 100644
271 --- a/drivers/net/ethernet/sfc/bitfield.h
272 +++ b/drivers/net/ethernet/sfc/bitfield.h
273 @@ -120,10 +120,10 @@ typedef union efx_oword {
274 * [0,high-low), with garbage in bits [high-low+1,...).
275 */
276 #define EFX_EXTRACT_NATIVE(native_element, min, max, low, high) \
277 - (((low > max) || (high < min)) ? 0 : \
278 - ((low > min) ? \
279 - ((native_element) >> (low - min)) : \
280 - ((native_element) << (min - low))))
281 + ((low) > (max) || (high) < (min) ? 0 : \
282 + (low) > (min) ? \
283 + (native_element) >> ((low) - (min)) : \
284 + (native_element) << ((min) - (low)))
285
286 /*
287 * Extract bit field portion [low,high) from the 64-bit little-endian
288 @@ -142,27 +142,27 @@ typedef union efx_oword {
289 #define EFX_EXTRACT_OWORD64(oword, low, high) \
290 ((EFX_EXTRACT64((oword).u64[0], 0, 63, low, high) | \
291 EFX_EXTRACT64((oword).u64[1], 64, 127, low, high)) & \
292 - EFX_MASK64(high + 1 - low))
293 + EFX_MASK64((high) + 1 - (low)))
294
295 #define EFX_EXTRACT_QWORD64(qword, low, high) \
296 (EFX_EXTRACT64((qword).u64[0], 0, 63, low, high) & \
297 - EFX_MASK64(high + 1 - low))
298 + EFX_MASK64((high) + 1 - (low)))
299
300 #define EFX_EXTRACT_OWORD32(oword, low, high) \
301 ((EFX_EXTRACT32((oword).u32[0], 0, 31, low, high) | \
302 EFX_EXTRACT32((oword).u32[1], 32, 63, low, high) | \
303 EFX_EXTRACT32((oword).u32[2], 64, 95, low, high) | \
304 EFX_EXTRACT32((oword).u32[3], 96, 127, low, high)) & \
305 - EFX_MASK32(high + 1 - low))
306 + EFX_MASK32((high) + 1 - (low)))
307
308 #define EFX_EXTRACT_QWORD32(qword, low, high) \
309 ((EFX_EXTRACT32((qword).u32[0], 0, 31, low, high) | \
310 EFX_EXTRACT32((qword).u32[1], 32, 63, low, high)) & \
311 - EFX_MASK32(high + 1 - low))
312 + EFX_MASK32((high) + 1 - (low)))
313
314 #define EFX_EXTRACT_DWORD(dword, low, high) \
315 (EFX_EXTRACT32((dword).u32[0], 0, 31, low, high) & \
316 - EFX_MASK32(high + 1 - low))
317 + EFX_MASK32((high) + 1 - (low)))
318
319 #define EFX_OWORD_FIELD64(oword, field) \
320 EFX_EXTRACT_OWORD64(oword, EFX_LOW_BIT(field), \
321 @@ -442,10 +442,10 @@ typedef union efx_oword {
322 cpu_to_le32(EFX_INSERT_NATIVE(min, max, low, high, value))
323
324 #define EFX_INPLACE_MASK64(min, max, low, high) \
325 - EFX_INSERT64(min, max, low, high, EFX_MASK64(high + 1 - low))
326 + EFX_INSERT64(min, max, low, high, EFX_MASK64((high) + 1 - (low)))
327
328 #define EFX_INPLACE_MASK32(min, max, low, high) \
329 - EFX_INSERT32(min, max, low, high, EFX_MASK32(high + 1 - low))
330 + EFX_INSERT32(min, max, low, high, EFX_MASK32((high) + 1 - (low)))
331
332 #define EFX_SET_OWORD64(oword, low, high, value) do { \
333 (oword).u64[0] = (((oword).u64[0] \
334 diff --git a/drivers/net/ethernet/sfc/efx.c b/drivers/net/ethernet/sfc/efx.c
335 index 954b885..a87f351 100644
336 --- a/drivers/net/ethernet/sfc/efx.c
337 +++ b/drivers/net/ethernet/sfc/efx.c
338 @@ -656,25 +656,30 @@ static void efx_stop_datapath(struct efx_nic *efx)
339 struct efx_channel *channel;
340 struct efx_tx_queue *tx_queue;
341 struct efx_rx_queue *rx_queue;
342 + struct pci_dev *dev = efx->pci_dev;
343 int rc;
344
345 EFX_ASSERT_RESET_SERIALISED(efx);
346 BUG_ON(efx->port_enabled);
347
348 - rc = efx_nic_flush_queues(efx);
349 - if (rc && EFX_WORKAROUND_7803(efx)) {
350 - /* Schedule a reset to recover from the flush failure. The
351 - * descriptor caches reference memory we're about to free,
352 - * but falcon_reconfigure_mac_wrapper() won't reconnect
353 - * the MACs because of the pending reset. */
354 - netif_err(efx, drv, efx->net_dev,
355 - "Resetting to recover from flush failure\n");
356 - efx_schedule_reset(efx, RESET_TYPE_ALL);
357 - } else if (rc) {
358 - netif_err(efx, drv, efx->net_dev, "failed to flush queues\n");
359 - } else {
360 - netif_dbg(efx, drv, efx->net_dev,
361 - "successfully flushed all queues\n");
362 + /* Only perform flush if dma is enabled */
363 + if (dev->is_busmaster) {
364 + rc = efx_nic_flush_queues(efx);
365 +
366 + if (rc && EFX_WORKAROUND_7803(efx)) {
367 + /* Schedule a reset to recover from the flush failure. The
368 + * descriptor caches reference memory we're about to free,
369 + * but falcon_reconfigure_mac_wrapper() won't reconnect
370 + * the MACs because of the pending reset. */
371 + netif_err(efx, drv, efx->net_dev,
372 + "Resetting to recover from flush failure\n");
373 + efx_schedule_reset(efx, RESET_TYPE_ALL);
374 + } else if (rc) {
375 + netif_err(efx, drv, efx->net_dev, "failed to flush queues\n");
376 + } else {
377 + netif_dbg(efx, drv, efx->net_dev,
378 + "successfully flushed all queues\n");
379 + }
380 }
381
382 efx_for_each_channel(channel, efx) {
383 @@ -749,6 +754,7 @@ efx_realloc_channels(struct efx_nic *efx, u32 rxq_entries, u32 txq_entries)
384 tx_queue->txd.entries);
385 }
386
387 + efx_device_detach_sync(efx);
388 efx_stop_all(efx);
389 efx_stop_interrupts(efx, true);
390
391 @@ -802,6 +808,7 @@ out:
392
393 efx_start_interrupts(efx, true);
394 efx_start_all(efx);
395 + netif_device_attach(efx->net_dev);
396 return rc;
397
398 rollback:
399 @@ -1596,8 +1603,12 @@ static void efx_stop_all(struct efx_nic *efx)
400 /* Flush efx_mac_work(), refill_workqueue, monitor_work */
401 efx_flush_all(efx);
402
403 - /* Stop the kernel transmit interface late, so the watchdog
404 - * timer isn't ticking over the flush */
405 + /* Stop the kernel transmit interface. This is only valid if
406 + * the device is stopped or detached; otherwise the watchdog
407 + * may fire immediately.
408 + */
409 + WARN_ON(netif_running(efx->net_dev) &&
410 + netif_device_present(efx->net_dev));
411 netif_tx_disable(efx->net_dev);
412
413 efx_stop_datapath(efx);
414 @@ -1916,10 +1927,11 @@ static int efx_change_mtu(struct net_device *net_dev, int new_mtu)
415 if (new_mtu > EFX_MAX_MTU)
416 return -EINVAL;
417
418 - efx_stop_all(efx);
419 -
420 netif_dbg(efx, drv, efx->net_dev, "changing MTU to %d\n", new_mtu);
421
422 + efx_device_detach_sync(efx);
423 + efx_stop_all(efx);
424 +
425 mutex_lock(&efx->mac_lock);
426 /* Reconfigure the MAC before enabling the dma queues so that
427 * the RX buffers don't overflow */
428 @@ -1928,6 +1940,7 @@ static int efx_change_mtu(struct net_device *net_dev, int new_mtu)
429 mutex_unlock(&efx->mac_lock);
430
431 efx_start_all(efx);
432 + netif_device_attach(efx->net_dev);
433 return 0;
434 }
435
436 @@ -2219,7 +2232,7 @@ int efx_reset(struct efx_nic *efx, enum reset_type method)
437 netif_info(efx, drv, efx->net_dev, "resetting (%s)\n",
438 RESET_TYPE(method));
439
440 - netif_device_detach(efx->net_dev);
441 + efx_device_detach_sync(efx);
442 efx_reset_down(efx, method);
443
444 rc = efx->type->reset(efx, method);
445 @@ -2713,7 +2726,7 @@ static int efx_pm_freeze(struct device *dev)
446
447 efx->state = STATE_FINI;
448
449 - netif_device_detach(efx->net_dev);
450 + efx_device_detach_sync(efx);
451
452 efx_stop_all(efx);
453 efx_stop_interrupts(efx, false);
454 diff --git a/drivers/net/ethernet/sfc/efx.h b/drivers/net/ethernet/sfc/efx.h
455 index 70755c9..7db0e5b 100644
456 --- a/drivers/net/ethernet/sfc/efx.h
457 +++ b/drivers/net/ethernet/sfc/efx.h
458 @@ -162,4 +162,17 @@ extern void efx_link_status_changed(struct efx_nic *efx);
459 extern void efx_link_set_advertising(struct efx_nic *efx, u32);
460 extern void efx_link_set_wanted_fc(struct efx_nic *efx, u8);
461
462 +static inline void efx_device_detach_sync(struct efx_nic *efx)
463 +{
464 + struct net_device *dev = efx->net_dev;
465 +
466 + /* Lock/freeze all TX queues so that we can be sure the
467 + * TX scheduler is stopped when we're done and before
468 + * netif_device_present() becomes false.
469 + */
470 + netif_tx_lock_bh(dev);
471 + netif_device_detach(dev);
472 + netif_tx_unlock_bh(dev);
473 +}
474 +
475 #endif /* EFX_EFX_H */
476 diff --git a/drivers/net/ethernet/sfc/falcon.c b/drivers/net/ethernet/sfc/falcon.c
477 index 3a1ca2b..49bcd19 100644
478 --- a/drivers/net/ethernet/sfc/falcon.c
479 +++ b/drivers/net/ethernet/sfc/falcon.c
480 @@ -25,9 +25,12 @@
481 #include "io.h"
482 #include "phy.h"
483 #include "workarounds.h"
484 +#include "selftest.h"
485
486 /* Hardware control for SFC4000 (aka Falcon). */
487
488 +static int falcon_reset_hw(struct efx_nic *efx, enum reset_type method);
489 +
490 static const unsigned int
491 /* "Large" EEPROM device: Atmel AT25640 or similar
492 * 8 KB, 16-bit address, 32 B write block */
493 @@ -1034,10 +1037,34 @@ static const struct efx_nic_register_test falcon_b0_register_tests[] = {
494 EFX_OWORD32(0x0003FF0F, 0x00000000, 0x00000000, 0x00000000) },
495 };
496
497 -static int falcon_b0_test_registers(struct efx_nic *efx)
498 +static int
499 +falcon_b0_test_chip(struct efx_nic *efx, struct efx_self_tests *tests)
500 {
501 - return efx_nic_test_registers(efx, falcon_b0_register_tests,
502 - ARRAY_SIZE(falcon_b0_register_tests));
503 + enum reset_type reset_method = RESET_TYPE_INVISIBLE;
504 + int rc, rc2;
505 +
506 + mutex_lock(&efx->mac_lock);
507 + if (efx->loopback_modes) {
508 + /* We need the 312 clock from the PHY to test the XMAC
509 + * registers, so move into XGMII loopback if available */
510 + if (efx->loopback_modes & (1 << LOOPBACK_XGMII))
511 + efx->loopback_mode = LOOPBACK_XGMII;
512 + else
513 + efx->loopback_mode = __ffs(efx->loopback_modes);
514 + }
515 + __efx_reconfigure_port(efx);
516 + mutex_unlock(&efx->mac_lock);
517 +
518 + efx_reset_down(efx, reset_method);
519 +
520 + tests->registers =
521 + efx_nic_test_registers(efx, falcon_b0_register_tests,
522 + ARRAY_SIZE(falcon_b0_register_tests))
523 + ? -1 : 1;
524 +
525 + rc = falcon_reset_hw(efx, reset_method);
526 + rc2 = efx_reset_up(efx, reset_method, rc == 0);
527 + return rc ? rc : rc2;
528 }
529
530 /**************************************************************************
531 @@ -1765,6 +1792,7 @@ const struct efx_nic_type falcon_a1_nic_type = {
532 .remove_port = falcon_remove_port,
533 .handle_global_event = falcon_handle_global_event,
534 .prepare_flush = falcon_prepare_flush,
535 + .finish_flush = efx_port_dummy_op_void,
536 .update_stats = falcon_update_nic_stats,
537 .start_stats = falcon_start_nic_stats,
538 .stop_stats = falcon_stop_nic_stats,
539 @@ -1807,6 +1835,7 @@ const struct efx_nic_type falcon_b0_nic_type = {
540 .remove_port = falcon_remove_port,
541 .handle_global_event = falcon_handle_global_event,
542 .prepare_flush = falcon_prepare_flush,
543 + .finish_flush = efx_port_dummy_op_void,
544 .update_stats = falcon_update_nic_stats,
545 .start_stats = falcon_start_nic_stats,
546 .stop_stats = falcon_stop_nic_stats,
547 @@ -1818,7 +1847,7 @@ const struct efx_nic_type falcon_b0_nic_type = {
548 .get_wol = falcon_get_wol,
549 .set_wol = falcon_set_wol,
550 .resume_wol = efx_port_dummy_op_void,
551 - .test_registers = falcon_b0_test_registers,
552 + .test_chip = falcon_b0_test_chip,
553 .test_nvram = falcon_test_nvram,
554
555 .revision = EFX_REV_FALCON_B0,
556 diff --git a/drivers/net/ethernet/sfc/mcdi.c b/drivers/net/ethernet/sfc/mcdi.c
557 index 17b6463..eb85217 100644
558 --- a/drivers/net/ethernet/sfc/mcdi.c
559 +++ b/drivers/net/ethernet/sfc/mcdi.c
560 @@ -22,7 +22,7 @@
561 **************************************************************************
562 */
563
564 -#define MCDI_RPC_TIMEOUT 10 /*seconds */
565 +#define MCDI_RPC_TIMEOUT (10 * HZ)
566
567 #define MCDI_PDU(efx) \
568 (efx_port_num(efx) ? MC_SMEM_P1_PDU_OFST : MC_SMEM_P0_PDU_OFST)
569 @@ -120,7 +120,7 @@ static void efx_mcdi_copyout(struct efx_nic *efx, u8 *outbuf, size_t outlen)
570 static int efx_mcdi_poll(struct efx_nic *efx)
571 {
572 struct efx_mcdi_iface *mcdi = efx_mcdi(efx);
573 - unsigned int time, finish;
574 + unsigned long time, finish;
575 unsigned int respseq, respcmd, error;
576 unsigned int pdu = FR_CZ_MC_TREG_SMEM + MCDI_PDU(efx);
577 unsigned int rc, spins;
578 @@ -136,7 +136,7 @@ static int efx_mcdi_poll(struct efx_nic *efx)
579 * and poll once a jiffy (approximately)
580 */
581 spins = TICK_USEC;
582 - finish = get_seconds() + MCDI_RPC_TIMEOUT;
583 + finish = jiffies + MCDI_RPC_TIMEOUT;
584
585 while (1) {
586 if (spins != 0) {
587 @@ -146,7 +146,7 @@ static int efx_mcdi_poll(struct efx_nic *efx)
588 schedule_timeout_uninterruptible(1);
589 }
590
591 - time = get_seconds();
592 + time = jiffies;
593
594 rmb();
595 efx_readd(efx, &reg, pdu);
596 @@ -158,7 +158,7 @@ static int efx_mcdi_poll(struct efx_nic *efx)
597 EFX_DWORD_FIELD(reg, MCDI_HEADER_RESPONSE))
598 break;
599
600 - if (time >= finish)
601 + if (time_after(time, finish))
602 return -ETIMEDOUT;
603 }
604
605 @@ -250,7 +250,7 @@ static int efx_mcdi_await_completion(struct efx_nic *efx)
606 if (wait_event_timeout(
607 mcdi->wq,
608 atomic_read(&mcdi->state) == MCDI_STATE_COMPLETED,
609 - msecs_to_jiffies(MCDI_RPC_TIMEOUT * 1000)) == 0)
610 + MCDI_RPC_TIMEOUT) == 0)
611 return -ETIMEDOUT;
612
613 /* Check if efx_mcdi_set_mode() switched us back to polled completions.
614 @@ -641,9 +641,8 @@ int efx_mcdi_get_board_cfg(struct efx_nic *efx, u8 *mac_address,
615 u16 *fw_subtype_list, u32 *capabilities)
616 {
617 uint8_t outbuf[MC_CMD_GET_BOARD_CFG_OUT_LENMIN];
618 - size_t outlen;
619 + size_t outlen, offset, i;
620 int port_num = efx_port_num(efx);
621 - int offset;
622 int rc;
623
624 BUILD_BUG_ON(MC_CMD_GET_BOARD_CFG_IN_LEN != 0);
625 @@ -663,11 +662,16 @@ int efx_mcdi_get_board_cfg(struct efx_nic *efx, u8 *mac_address,
626 : MC_CMD_GET_BOARD_CFG_OUT_MAC_ADDR_BASE_PORT0_OFST;
627 if (mac_address)
628 memcpy(mac_address, outbuf + offset, ETH_ALEN);
629 - if (fw_subtype_list)
630 - memcpy(fw_subtype_list,
631 - outbuf + MC_CMD_GET_BOARD_CFG_OUT_FW_SUBTYPE_LIST_OFST,
632 - MC_CMD_GET_BOARD_CFG_OUT_FW_SUBTYPE_LIST_MINNUM *
633 - sizeof(fw_subtype_list[0]));
634 + if (fw_subtype_list) {
635 + offset = MC_CMD_GET_BOARD_CFG_OUT_FW_SUBTYPE_LIST_OFST;
636 + for (i = 0;
637 + i < MC_CMD_GET_BOARD_CFG_OUT_FW_SUBTYPE_LIST_MINNUM;
638 + i++) {
639 + fw_subtype_list[i] =
640 + le16_to_cpup((__le16 *)(outbuf + offset));
641 + offset += 2;
642 + }
643 + }
644 if (capabilities) {
645 if (port_num)
646 *capabilities = MCDI_DWORD(outbuf,
647 @@ -1164,6 +1168,9 @@ int efx_mcdi_flush_rxqs(struct efx_nic *efx)
648 __le32 *qid;
649 int rc, count;
650
651 + BUILD_BUG_ON(EFX_MAX_CHANNELS >
652 + MC_CMD_FLUSH_RX_QUEUES_IN_QID_OFST_MAXNUM);
653 +
654 qid = kmalloc(EFX_MAX_CHANNELS * sizeof(*qid), GFP_KERNEL);
655 if (qid == NULL)
656 return -ENOMEM;
657 diff --git a/drivers/net/ethernet/sfc/mcdi.h b/drivers/net/ethernet/sfc/mcdi.h
658 index 0bdf3e3..96f43d2 100644
659 --- a/drivers/net/ethernet/sfc/mcdi.h
660 +++ b/drivers/net/ethernet/sfc/mcdi.h
661 @@ -107,11 +107,13 @@ extern void efx_mcdi_sensor_event(struct efx_nic *efx, efx_qword_t *ev);
662 #define MCDI_EVENT_FIELD(_ev, _field) \
663 EFX_QWORD_FIELD(_ev, MCDI_EVENT_ ## _field)
664 #define MCDI_ARRAY_FIELD(_buf, _field1, _type, _index, _field2) \
665 - EFX_DWORD_FIELD( \
666 + EFX_EXTRACT_DWORD( \
667 *((efx_dword_t *) \
668 (MCDI_ARRAY_PTR(_buf, _field1, _type, _index) + \
669 (MC_CMD_ ## _type ## _TYPEDEF_ ## _field2 ## _OFST & ~3))), \
670 - MC_CMD_ ## _type ## _TYPEDEF_ ## _field2)
671 + MC_CMD_ ## _type ## _TYPEDEF_ ## _field2 ## _LBN & 0x1f, \
672 + (MC_CMD_ ## _type ## _TYPEDEF_ ## _field2 ## _LBN & 0x1f) + \
673 + MC_CMD_ ## _type ## _TYPEDEF_ ## _field2 ## _WIDTH - 1)
674
675 extern void efx_mcdi_print_fwver(struct efx_nic *efx, char *buf, size_t len);
676 extern int efx_mcdi_drv_attach(struct efx_nic *efx, bool driver_operating,
677 diff --git a/drivers/net/ethernet/sfc/net_driver.h b/drivers/net/ethernet/sfc/net_driver.h
678 index f0385e1..b71ef14 100644
679 --- a/drivers/net/ethernet/sfc/net_driver.h
680 +++ b/drivers/net/ethernet/sfc/net_driver.h
681 @@ -68,6 +68,8 @@
682 #define EFX_TXQ_TYPES 4
683 #define EFX_MAX_TX_QUEUES (EFX_TXQ_TYPES * EFX_MAX_CHANNELS)
684
685 +struct efx_self_tests;
686 +
687 /**
688 * struct efx_special_buffer - An Efx special buffer
689 * @addr: CPU base address of the buffer
690 @@ -194,6 +196,7 @@ struct efx_tx_queue {
691 /* Members shared between paths and sometimes updated */
692 unsigned int empty_read_count ____cacheline_aligned_in_smp;
693 #define EFX_EMPTY_COUNT_VALID 0x80000000
694 + atomic_t flush_outstanding;
695 };
696
697 /**
698 @@ -203,6 +206,7 @@ struct efx_tx_queue {
699 * Will be %NULL if the buffer slot is currently free.
700 * @page: The associated page buffer. Valif iff @flags & %EFX_RX_BUF_PAGE.
701 * Will be %NULL if the buffer slot is currently free.
702 + * @page_offset: Offset within page. Valid iff @flags & %EFX_RX_BUF_PAGE.
703 * @len: Buffer length, in bytes.
704 * @flags: Flags for buffer and packet state.
705 */
706 @@ -212,7 +216,8 @@ struct efx_rx_buffer {
707 struct sk_buff *skb;
708 struct page *page;
709 } u;
710 - unsigned int len;
711 + u16 page_offset;
712 + u16 len;
713 u16 flags;
714 };
715 #define EFX_RX_BUF_PAGE 0x0001
716 @@ -887,6 +892,7 @@ static inline unsigned int efx_port_num(struct efx_nic *efx)
717 * @remove_port: Free resources allocated by probe_port()
718 * @handle_global_event: Handle a "global" event (may be %NULL)
719 * @prepare_flush: Prepare the hardware for flushing the DMA queues
720 + * @finish_flush: Clean up after flushing the DMA queues
721 * @update_stats: Update statistics not provided by event handling
722 * @start_stats: Start the regular fetching of statistics
723 * @stop_stats: Stop the regular fetching of statistics
724 @@ -899,7 +905,8 @@ static inline unsigned int efx_port_num(struct efx_nic *efx)
725 * @get_wol: Get WoL configuration from driver state
726 * @set_wol: Push WoL configuration to the NIC
727 * @resume_wol: Synchronise WoL state between driver and MC (e.g. after resume)
728 - * @test_registers: Test read/write functionality of control registers
729 + * @test_chip: Test registers. Should use efx_nic_test_registers(), and is
730 + * expected to reset the NIC.
731 * @test_nvram: Test validity of NVRAM contents
732 * @revision: Hardware architecture revision
733 * @mem_map_size: Memory BAR mapped size
734 @@ -933,6 +940,7 @@ struct efx_nic_type {
735 void (*remove_port)(struct efx_nic *efx);
736 bool (*handle_global_event)(struct efx_channel *channel, efx_qword_t *);
737 void (*prepare_flush)(struct efx_nic *efx);
738 + void (*finish_flush)(struct efx_nic *efx);
739 void (*update_stats)(struct efx_nic *efx);
740 void (*start_stats)(struct efx_nic *efx);
741 void (*stop_stats)(struct efx_nic *efx);
742 @@ -944,7 +952,7 @@ struct efx_nic_type {
743 void (*get_wol)(struct efx_nic *efx, struct ethtool_wolinfo *wol);
744 int (*set_wol)(struct efx_nic *efx, u32 type);
745 void (*resume_wol)(struct efx_nic *efx);
746 - int (*test_registers)(struct efx_nic *efx);
747 + int (*test_chip)(struct efx_nic *efx, struct efx_self_tests *tests);
748 int (*test_nvram)(struct efx_nic *efx);
749
750 int revision;
751 diff --git a/drivers/net/ethernet/sfc/nic.c b/drivers/net/ethernet/sfc/nic.c
752 index 4a9a5be..578e52a6 100644
753 --- a/drivers/net/ethernet/sfc/nic.c
754 +++ b/drivers/net/ethernet/sfc/nic.c
755 @@ -73,6 +73,8 @@
756 _EFX_CHANNEL_MAGIC(_EFX_CHANNEL_MAGIC_TX_DRAIN, \
757 (_tx_queue)->queue)
758
759 +static void efx_magic_event(struct efx_channel *channel, u32 magic);
760 +
761 /**************************************************************************
762 *
763 * Solarstorm hardware access
764 @@ -124,9 +126,6 @@ int efx_nic_test_registers(struct efx_nic *efx,
765 unsigned address = 0, i, j;
766 efx_oword_t mask, imask, original, reg, buf;
767
768 - /* Falcon should be in loopback to isolate the XMAC from the PHY */
769 - WARN_ON(!LOOPBACK_INTERNAL(efx));
770 -
771 for (i = 0; i < n_regs; ++i) {
772 address = regs[i].address;
773 mask = imask = regs[i].mask;
774 @@ -380,7 +379,8 @@ efx_may_push_tx_desc(struct efx_tx_queue *tx_queue, unsigned int write_count)
775 return false;
776
777 tx_queue->empty_read_count = 0;
778 - return ((empty_read_count ^ write_count) & ~EFX_EMPTY_COUNT_VALID) == 0;
779 + return ((empty_read_count ^ write_count) & ~EFX_EMPTY_COUNT_VALID) == 0
780 + && tx_queue->write_count - write_count == 1;
781 }
782
783 /* For each entry inserted into the software descriptor ring, create a
784 @@ -495,6 +495,9 @@ static void efx_flush_tx_queue(struct efx_tx_queue *tx_queue)
785 struct efx_nic *efx = tx_queue->efx;
786 efx_oword_t tx_flush_descq;
787
788 + WARN_ON(atomic_read(&tx_queue->flush_outstanding));
789 + atomic_set(&tx_queue->flush_outstanding, 1);
790 +
791 EFX_POPULATE_OWORD_2(tx_flush_descq,
792 FRF_AZ_TX_FLUSH_DESCQ_CMD, 1,
793 FRF_AZ_TX_FLUSH_DESCQ, tx_queue->queue);
794 @@ -670,6 +673,47 @@ static bool efx_flush_wake(struct efx_nic *efx)
795 && atomic_read(&efx->rxq_flush_pending) > 0));
796 }
797
798 +static bool efx_check_tx_flush_complete(struct efx_nic *efx)
799 +{
800 + bool i = true;
801 + efx_oword_t txd_ptr_tbl;
802 + struct efx_channel *channel;
803 + struct efx_tx_queue *tx_queue;
804 +
805 + efx_for_each_channel(channel, efx) {
806 + efx_for_each_channel_tx_queue(tx_queue, channel) {
807 + efx_reado_table(efx, &txd_ptr_tbl,
808 + FR_BZ_TX_DESC_PTR_TBL, tx_queue->queue);
809 + if (EFX_OWORD_FIELD(txd_ptr_tbl,
810 + FRF_AZ_TX_DESCQ_FLUSH) ||
811 + EFX_OWORD_FIELD(txd_ptr_tbl,
812 + FRF_AZ_TX_DESCQ_EN)) {
813 + netif_dbg(efx, hw, efx->net_dev,
814 + "flush did not complete on TXQ %d\n",
815 + tx_queue->queue);
816 + i = false;
817 + } else if (atomic_cmpxchg(&tx_queue->flush_outstanding,
818 + 1, 0)) {
819 + /* The flush is complete, but we didn't
820 + * receive a flush completion event
821 + */
822 + netif_dbg(efx, hw, efx->net_dev,
823 + "flush complete on TXQ %d, so drain "
824 + "the queue\n", tx_queue->queue);
825 + /* Don't need to increment drain_pending as it
826 + * has already been incremented for the queues
827 + * which did not drain
828 + */
829 + efx_magic_event(channel,
830 + EFX_CHANNEL_MAGIC_TX_DRAIN(
831 + tx_queue));
832 + }
833 + }
834 + }
835 +
836 + return i;
837 +}
838 +
839 /* Flush all the transmit queues, and continue flushing receive queues until
840 * they're all flushed. Wait for the DRAIN events to be recieved so that there
841 * are no more RX and TX events left on any channel. */
842 @@ -681,7 +725,6 @@ int efx_nic_flush_queues(struct efx_nic *efx)
843 struct efx_tx_queue *tx_queue;
844 int rc = 0;
845
846 - efx->fc_disable++;
847 efx->type->prepare_flush(efx);
848
849 efx_for_each_channel(channel, efx) {
850 @@ -731,7 +774,8 @@ int efx_nic_flush_queues(struct efx_nic *efx)
851 timeout);
852 }
853
854 - if (atomic_read(&efx->drain_pending)) {
855 + if (atomic_read(&efx->drain_pending) &&
856 + !efx_check_tx_flush_complete(efx)) {
857 netif_err(efx, hw, efx->net_dev, "failed to flush %d queues "
858 "(rx %d+%d)\n", atomic_read(&efx->drain_pending),
859 atomic_read(&efx->rxq_flush_outstanding),
860 @@ -743,7 +787,7 @@ int efx_nic_flush_queues(struct efx_nic *efx)
861 atomic_set(&efx->rxq_flush_outstanding, 0);
862 }
863
864 - efx->fc_disable--;
865 + efx->type->finish_flush(efx);
866
867 return rc;
868 }
869 @@ -1018,9 +1062,10 @@ efx_handle_tx_flush_done(struct efx_nic *efx, efx_qword_t *event)
870 if (qid < EFX_TXQ_TYPES * efx->n_tx_channels) {
871 tx_queue = efx_get_tx_queue(efx, qid / EFX_TXQ_TYPES,
872 qid % EFX_TXQ_TYPES);
873 -
874 - efx_magic_event(tx_queue->channel,
875 - EFX_CHANNEL_MAGIC_TX_DRAIN(tx_queue));
876 + if (atomic_cmpxchg(&tx_queue->flush_outstanding, 1, 0)) {
877 + efx_magic_event(tx_queue->channel,
878 + EFX_CHANNEL_MAGIC_TX_DRAIN(tx_queue));
879 + }
880 }
881 }
882
883 diff --git a/drivers/net/ethernet/sfc/nic.h b/drivers/net/ethernet/sfc/nic.h
884 index f48ccf6..bbdfcd6 100644
885 --- a/drivers/net/ethernet/sfc/nic.h
886 +++ b/drivers/net/ethernet/sfc/nic.h
887 @@ -316,6 +316,8 @@ static inline int efx_nic_irq_test_irq_cpu(struct efx_nic *efx)
888
889 /* Global Resources */
890 extern int efx_nic_flush_queues(struct efx_nic *efx);
891 +extern void siena_prepare_flush(struct efx_nic *efx);
892 +extern void siena_finish_flush(struct efx_nic *efx);
893 extern void falcon_start_nic_stats(struct efx_nic *efx);
894 extern void falcon_stop_nic_stats(struct efx_nic *efx);
895 extern void falcon_setup_xaui(struct efx_nic *efx);
896 diff --git a/drivers/net/ethernet/sfc/rx.c b/drivers/net/ethernet/sfc/rx.c
897 index 763fa2f..7b3c068 100644
898 --- a/drivers/net/ethernet/sfc/rx.c
899 +++ b/drivers/net/ethernet/sfc/rx.c
900 @@ -95,11 +95,7 @@ static unsigned int rx_refill_limit = 95;
901 static inline unsigned int efx_rx_buf_offset(struct efx_nic *efx,
902 struct efx_rx_buffer *buf)
903 {
904 - /* Offset is always within one page, so we don't need to consider
905 - * the page order.
906 - */
907 - return ((unsigned int) buf->dma_addr & (PAGE_SIZE - 1)) +
908 - efx->type->rx_buffer_hash_size;
909 + return buf->page_offset + efx->type->rx_buffer_hash_size;
910 }
911 static inline unsigned int efx_rx_buf_size(struct efx_nic *efx)
912 {
913 @@ -193,6 +189,7 @@ static int efx_init_rx_buffers_page(struct efx_rx_queue *rx_queue)
914 struct efx_rx_buffer *rx_buf;
915 struct page *page;
916 void *page_addr;
917 + unsigned int page_offset;
918 struct efx_rx_page_state *state;
919 dma_addr_t dma_addr;
920 unsigned index, count;
921 @@ -219,12 +216,14 @@ static int efx_init_rx_buffers_page(struct efx_rx_queue *rx_queue)
922
923 page_addr += sizeof(struct efx_rx_page_state);
924 dma_addr += sizeof(struct efx_rx_page_state);
925 + page_offset = sizeof(struct efx_rx_page_state);
926
927 split:
928 index = rx_queue->added_count & rx_queue->ptr_mask;
929 rx_buf = efx_rx_buffer(rx_queue, index);
930 rx_buf->dma_addr = dma_addr + EFX_PAGE_IP_ALIGN;
931 rx_buf->u.page = page;
932 + rx_buf->page_offset = page_offset + EFX_PAGE_IP_ALIGN;
933 rx_buf->len = efx->rx_buffer_len - EFX_PAGE_IP_ALIGN;
934 rx_buf->flags = EFX_RX_BUF_PAGE;
935 ++rx_queue->added_count;
936 @@ -236,6 +235,7 @@ static int efx_init_rx_buffers_page(struct efx_rx_queue *rx_queue)
937 get_page(page);
938 dma_addr += (PAGE_SIZE >> 1);
939 page_addr += (PAGE_SIZE >> 1);
940 + page_offset += (PAGE_SIZE >> 1);
941 ++count;
942 goto split;
943 }
944 @@ -245,7 +245,8 @@ static int efx_init_rx_buffers_page(struct efx_rx_queue *rx_queue)
945 }
946
947 static void efx_unmap_rx_buffer(struct efx_nic *efx,
948 - struct efx_rx_buffer *rx_buf)
949 + struct efx_rx_buffer *rx_buf,
950 + unsigned int used_len)
951 {
952 if ((rx_buf->flags & EFX_RX_BUF_PAGE) && rx_buf->u.page) {
953 struct efx_rx_page_state *state;
954 @@ -256,6 +257,10 @@ static void efx_unmap_rx_buffer(struct efx_nic *efx,
955 state->dma_addr,
956 efx_rx_buf_size(efx),
957 PCI_DMA_FROMDEVICE);
958 + } else if (used_len) {
959 + dma_sync_single_for_cpu(&efx->pci_dev->dev,
960 + rx_buf->dma_addr, used_len,
961 + DMA_FROM_DEVICE);
962 }
963 } else if (!(rx_buf->flags & EFX_RX_BUF_PAGE) && rx_buf->u.skb) {
964 pci_unmap_single(efx->pci_dev, rx_buf->dma_addr,
965 @@ -278,7 +283,7 @@ static void efx_free_rx_buffer(struct efx_nic *efx,
966 static void efx_fini_rx_buffer(struct efx_rx_queue *rx_queue,
967 struct efx_rx_buffer *rx_buf)
968 {
969 - efx_unmap_rx_buffer(rx_queue->efx, rx_buf);
970 + efx_unmap_rx_buffer(rx_queue->efx, rx_buf, 0);
971 efx_free_rx_buffer(rx_queue->efx, rx_buf);
972 }
973
974 @@ -544,10 +549,10 @@ void efx_rx_packet(struct efx_rx_queue *rx_queue, unsigned int index,
975 goto out;
976 }
977
978 - /* Release card resources - assumes all RX buffers consumed in-order
979 - * per RX queue
980 + /* Release and/or sync DMA mapping - assumes all RX buffers
981 + * consumed in-order per RX queue
982 */
983 - efx_unmap_rx_buffer(efx, rx_buf);
984 + efx_unmap_rx_buffer(efx, rx_buf, len);
985
986 /* Prefetch nice and early so data will (hopefully) be in cache by
987 * the time we look at it.
988 diff --git a/drivers/net/ethernet/sfc/selftest.c b/drivers/net/ethernet/sfc/selftest.c
989 index de4c006..4a51ea3 100644
990 --- a/drivers/net/ethernet/sfc/selftest.c
991 +++ b/drivers/net/ethernet/sfc/selftest.c
992 @@ -120,19 +120,6 @@ static int efx_test_nvram(struct efx_nic *efx, struct efx_self_tests *tests)
993 return rc;
994 }
995
996 -static int efx_test_chip(struct efx_nic *efx, struct efx_self_tests *tests)
997 -{
998 - int rc = 0;
999 -
1000 - /* Test register access */
1001 - if (efx->type->test_registers) {
1002 - rc = efx->type->test_registers(efx);
1003 - tests->registers = rc ? -1 : 1;
1004 - }
1005 -
1006 - return rc;
1007 -}
1008 -
1009 /**************************************************************************
1010 *
1011 * Interrupt and event queue testing
1012 @@ -699,8 +686,7 @@ int efx_selftest(struct efx_nic *efx, struct efx_self_tests *tests,
1013 {
1014 enum efx_loopback_mode loopback_mode = efx->loopback_mode;
1015 int phy_mode = efx->phy_mode;
1016 - enum reset_type reset_method = RESET_TYPE_INVISIBLE;
1017 - int rc_test = 0, rc_reset = 0, rc;
1018 + int rc_test = 0, rc_reset, rc;
1019
1020 efx_selftest_async_cancel(efx);
1021
1022 @@ -735,46 +721,28 @@ int efx_selftest(struct efx_nic *efx, struct efx_self_tests *tests,
1023 /* Detach the device so the kernel doesn't transmit during the
1024 * loopback test and the watchdog timeout doesn't fire.
1025 */
1026 - netif_device_detach(efx->net_dev);
1027 + efx_device_detach_sync(efx);
1028 +
1029 + if (efx->type->test_chip) {
1030 + rc_reset = efx->type->test_chip(efx, tests);
1031 + if (rc_reset) {
1032 + netif_err(efx, hw, efx->net_dev,
1033 + "Unable to recover from chip test\n");
1034 + efx_schedule_reset(efx, RESET_TYPE_DISABLE);
1035 + return rc_reset;
1036 + }
1037
1038 - mutex_lock(&efx->mac_lock);
1039 - if (efx->loopback_modes) {
1040 - /* We need the 312 clock from the PHY to test the XMAC
1041 - * registers, so move into XGMII loopback if available */
1042 - if (efx->loopback_modes & (1 << LOOPBACK_XGMII))
1043 - efx->loopback_mode = LOOPBACK_XGMII;
1044 - else
1045 - efx->loopback_mode = __ffs(efx->loopback_modes);
1046 + if ((tests->registers < 0) && !rc_test)
1047 + rc_test = -EIO;
1048 }
1049
1050 - __efx_reconfigure_port(efx);
1051 - mutex_unlock(&efx->mac_lock);
1052 -
1053 - /* free up all consumers of SRAM (including all the queues) */
1054 - efx_reset_down(efx, reset_method);
1055 -
1056 - rc = efx_test_chip(efx, tests);
1057 - if (rc && !rc_test)
1058 - rc_test = rc;
1059 -
1060 - /* reset the chip to recover from the register test */
1061 - rc_reset = efx->type->reset(efx, reset_method);
1062 -
1063 /* Ensure that the phy is powered and out of loopback
1064 * for the bist and loopback tests */
1065 + mutex_lock(&efx->mac_lock);
1066 efx->phy_mode &= ~PHY_MODE_LOW_POWER;
1067 efx->loopback_mode = LOOPBACK_NONE;
1068 -
1069 - rc = efx_reset_up(efx, reset_method, rc_reset == 0);
1070 - if (rc && !rc_reset)
1071 - rc_reset = rc;
1072 -
1073 - if (rc_reset) {
1074 - netif_err(efx, drv, efx->net_dev,
1075 - "Unable to recover from chip test\n");
1076 - efx_schedule_reset(efx, RESET_TYPE_DISABLE);
1077 - return rc_reset;
1078 - }
1079 + __efx_reconfigure_port(efx);
1080 + mutex_unlock(&efx->mac_lock);
1081
1082 rc = efx_test_phy(efx, tests, flags);
1083 if (rc && !rc_test)
1084 diff --git a/drivers/net/ethernet/sfc/siena.c b/drivers/net/ethernet/sfc/siena.c
1085 index 9f8d7ce..b09ff79 100644
1086 --- a/drivers/net/ethernet/sfc/siena.c
1087 +++ b/drivers/net/ethernet/sfc/siena.c
1088 @@ -25,10 +25,12 @@
1089 #include "workarounds.h"
1090 #include "mcdi.h"
1091 #include "mcdi_pcol.h"
1092 +#include "selftest.h"
1093
1094 /* Hardware control for SFC9000 family including SFL9021 (aka Siena). */
1095
1096 static void siena_init_wol(struct efx_nic *efx);
1097 +static int siena_reset_hw(struct efx_nic *efx, enum reset_type method);
1098
1099
1100 static void siena_push_irq_moderation(struct efx_channel *channel)
1101 @@ -125,6 +127,18 @@ static void siena_remove_port(struct efx_nic *efx)
1102 efx_nic_free_buffer(efx, &efx->stats_buffer);
1103 }
1104
1105 +void siena_prepare_flush(struct efx_nic *efx)
1106 +{
1107 + if (efx->fc_disable++ == 0)
1108 + efx_mcdi_set_mac(efx);
1109 +}
1110 +
1111 +void siena_finish_flush(struct efx_nic *efx)
1112 +{
1113 + if (--efx->fc_disable == 0)
1114 + efx_mcdi_set_mac(efx);
1115 +}
1116 +
1117 static const struct efx_nic_register_test siena_register_tests[] = {
1118 { FR_AZ_ADR_REGION,
1119 EFX_OWORD32(0x0003FFFF, 0x0003FFFF, 0x0003FFFF, 0x0003FFFF) },
1120 @@ -154,10 +168,29 @@ static const struct efx_nic_register_test siena_register_tests[] = {
1121 EFX_OWORD32(0xFFFFFFFF, 0xFFFFFFFF, 0x00000007, 0x00000000) },
1122 };
1123
1124 -static int siena_test_registers(struct efx_nic *efx)
1125 +static int siena_test_chip(struct efx_nic *efx, struct efx_self_tests *tests)
1126 {
1127 - return efx_nic_test_registers(efx, siena_register_tests,
1128 - ARRAY_SIZE(siena_register_tests));
1129 + enum reset_type reset_method = RESET_TYPE_ALL;
1130 + int rc, rc2;
1131 +
1132 + efx_reset_down(efx, reset_method);
1133 +
1134 + /* Reset the chip immediately so that it is completely
1135 + * quiescent regardless of what any VF driver does.
1136 + */
1137 + rc = siena_reset_hw(efx, reset_method);
1138 + if (rc)
1139 + goto out;
1140 +
1141 + tests->registers =
1142 + efx_nic_test_registers(efx, siena_register_tests,
1143 + ARRAY_SIZE(siena_register_tests))
1144 + ? -1 : 1;
1145 +
1146 + rc = siena_reset_hw(efx, reset_method);
1147 +out:
1148 + rc2 = efx_reset_up(efx, reset_method, rc == 0);
1149 + return rc ? rc : rc2;
1150 }
1151
1152 /**************************************************************************
1153 @@ -637,7 +670,8 @@ const struct efx_nic_type siena_a0_nic_type = {
1154 .reset = siena_reset_hw,
1155 .probe_port = siena_probe_port,
1156 .remove_port = siena_remove_port,
1157 - .prepare_flush = efx_port_dummy_op_void,
1158 + .prepare_flush = siena_prepare_flush,
1159 + .finish_flush = siena_finish_flush,
1160 .update_stats = siena_update_nic_stats,
1161 .start_stats = siena_start_nic_stats,
1162 .stop_stats = siena_stop_nic_stats,
1163 @@ -649,7 +683,7 @@ const struct efx_nic_type siena_a0_nic_type = {
1164 .get_wol = siena_get_wol,
1165 .set_wol = siena_set_wol,
1166 .resume_wol = siena_init_wol,
1167 - .test_registers = siena_test_registers,
1168 + .test_chip = siena_test_chip,
1169 .test_nvram = efx_mcdi_nvram_test_all,
1170
1171 .revision = EFX_REV_SIENA_A0,
1172 diff --git a/drivers/net/ethernet/sfc/siena_sriov.c b/drivers/net/ethernet/sfc/siena_sriov.c
1173 index 9cb3b84..4ad8082 100644
1174 --- a/drivers/net/ethernet/sfc/siena_sriov.c
1175 +++ b/drivers/net/ethernet/sfc/siena_sriov.c
1176 @@ -21,6 +21,9 @@
1177 /* Number of longs required to track all the VIs in a VF */
1178 #define VI_MASK_LENGTH BITS_TO_LONGS(1 << EFX_VI_SCALE_MAX)
1179
1180 +/* Maximum number of RX queues supported */
1181 +#define VF_MAX_RX_QUEUES 63
1182 +
1183 /**
1184 * enum efx_vf_tx_filter_mode - TX MAC filtering behaviour
1185 * @VF_TX_FILTER_OFF: Disabled
1186 @@ -578,6 +581,7 @@ static int efx_vfdi_init_rxq(struct efx_vf *vf)
1187 efx_oword_t reg;
1188
1189 if (bad_vf_index(efx, vf_evq) || bad_vf_index(efx, vf_rxq) ||
1190 + vf_rxq >= VF_MAX_RX_QUEUES ||
1191 bad_buf_count(buf_count, EFX_MAX_DMAQ_SIZE)) {
1192 if (net_ratelimit())
1193 netif_err(efx, hw, efx->net_dev,
1194 @@ -683,13 +687,15 @@ static int efx_vfdi_fini_all_queues(struct efx_vf *vf)
1195 __le32 *rxqs;
1196 int rc;
1197
1198 + BUILD_BUG_ON(VF_MAX_RX_QUEUES >
1199 + MC_CMD_FLUSH_RX_QUEUES_IN_QID_OFST_MAXNUM);
1200 +
1201 rxqs = kmalloc(count * sizeof(*rxqs), GFP_KERNEL);
1202 if (rxqs == NULL)
1203 return VFDI_RC_ENOMEM;
1204
1205 rtnl_lock();
1206 - if (efx->fc_disable++ == 0)
1207 - efx_mcdi_set_mac(efx);
1208 + siena_prepare_flush(efx);
1209 rtnl_unlock();
1210
1211 /* Flush all the initialized queues */
1212 @@ -726,8 +732,7 @@ static int efx_vfdi_fini_all_queues(struct efx_vf *vf)
1213 }
1214
1215 rtnl_lock();
1216 - if (--efx->fc_disable == 0)
1217 - efx_mcdi_set_mac(efx);
1218 + siena_finish_flush(efx);
1219 rtnl_unlock();
1220
1221 /* Irrespective of success/failure, fini the queues */
1222 diff --git a/drivers/net/netconsole.c b/drivers/net/netconsole.c
1223 index 63ffbdf..dfffe8d 100644
1224 --- a/drivers/net/netconsole.c
1225 +++ b/drivers/net/netconsole.c
1226 @@ -626,6 +626,7 @@ static int netconsole_netdev_event(struct notifier_block *this,
1227 goto done;
1228
1229 spin_lock_irqsave(&target_list_lock, flags);
1230 +restart:
1231 list_for_each_entry(nt, &target_list, list) {
1232 netconsole_target_get(nt);
1233 if (nt->np.dev == dev) {
1234 @@ -637,21 +638,18 @@ static int netconsole_netdev_event(struct notifier_block *this,
1235 case NETDEV_JOIN:
1236 case NETDEV_UNREGISTER:
1237 /*
1238 + * we might sleep in __netpoll_cleanup()
1239 * rtnl_lock already held
1240 */
1241 - if (nt->np.dev) {
1242 - spin_unlock_irqrestore(
1243 - &target_list_lock,
1244 - flags);
1245 - __netpoll_cleanup(&nt->np);
1246 - spin_lock_irqsave(&target_list_lock,
1247 - flags);
1248 - dev_put(nt->np.dev);
1249 - nt->np.dev = NULL;
1250 - }
1251 + spin_unlock_irqrestore(&target_list_lock, flags);
1252 + __netpoll_cleanup(&nt->np);
1253 + spin_lock_irqsave(&target_list_lock, flags);
1254 + dev_put(nt->np.dev);
1255 + nt->np.dev = NULL;
1256 nt->enabled = 0;
1257 stopped = true;
1258 - break;
1259 + netconsole_target_put(nt);
1260 + goto restart;
1261 }
1262 }
1263 netconsole_target_put(nt);
1264 diff --git a/drivers/net/wireless/mwifiex/join.c b/drivers/net/wireless/mwifiex/join.c
1265 index 8f9382b..b968b35 100644
1266 --- a/drivers/net/wireless/mwifiex/join.c
1267 +++ b/drivers/net/wireless/mwifiex/join.c
1268 @@ -1049,10 +1049,9 @@ mwifiex_cmd_802_11_ad_hoc_join(struct mwifiex_private *priv,
1269 adhoc_join->bss_descriptor.bssid,
1270 adhoc_join->bss_descriptor.ssid);
1271
1272 - for (i = 0; bss_desc->supported_rates[i] &&
1273 - i < MWIFIEX_SUPPORTED_RATES;
1274 - i++)
1275 - ;
1276 + for (i = 0; i < MWIFIEX_SUPPORTED_RATES &&
1277 + bss_desc->supported_rates[i]; i++)
1278 + ;
1279 rates_size = i;
1280
1281 /* Copy Data Rates from the Rates recorded in scan response */
1282 diff --git a/drivers/net/wireless/rt2x00/rt2x00dev.c b/drivers/net/wireless/rt2x00/rt2x00dev.c
1283 index 12b1ff5..8f2c88e 100644
1284 --- a/drivers/net/wireless/rt2x00/rt2x00dev.c
1285 +++ b/drivers/net/wireless/rt2x00/rt2x00dev.c
1286 @@ -1157,7 +1157,9 @@ int rt2x00lib_probe_dev(struct rt2x00_dev *rt2x00dev)
1287 rt2x00dev->hw->wiphy->interface_modes |=
1288 BIT(NL80211_IFTYPE_ADHOC) |
1289 BIT(NL80211_IFTYPE_AP) |
1290 +#ifdef CONFIG_MAC80211_MESH
1291 BIT(NL80211_IFTYPE_MESH_POINT) |
1292 +#endif
1293 BIT(NL80211_IFTYPE_WDS);
1294
1295 /*
1296 diff --git a/drivers/net/wireless/rtlwifi/rtl8192cu/hw.c b/drivers/net/wireless/rtlwifi/rtl8192cu/hw.c
1297 index 0c74d4f..91d2e28 100644
1298 --- a/drivers/net/wireless/rtlwifi/rtl8192cu/hw.c
1299 +++ b/drivers/net/wireless/rtlwifi/rtl8192cu/hw.c
1300 @@ -1377,74 +1377,57 @@ void rtl92cu_card_disable(struct ieee80211_hw *hw)
1301
1302 void rtl92cu_set_check_bssid(struct ieee80211_hw *hw, bool check_bssid)
1303 {
1304 - /* dummy routine needed for callback from rtl_op_configure_filter() */
1305 -}
1306 -
1307 -/*========================================================================== */
1308 -
1309 -static void _rtl92cu_set_check_bssid(struct ieee80211_hw *hw,
1310 - enum nl80211_iftype type)
1311 -{
1312 struct rtl_priv *rtlpriv = rtl_priv(hw);
1313 - u32 reg_rcr = rtl_read_dword(rtlpriv, REG_RCR);
1314 struct rtl_hal *rtlhal = rtl_hal(rtlpriv);
1315 - struct rtl_phy *rtlphy = &(rtlpriv->phy);
1316 - u8 filterout_non_associated_bssid = false;
1317 + u32 reg_rcr = rtl_read_dword(rtlpriv, REG_RCR);
1318
1319 - switch (type) {
1320 - case NL80211_IFTYPE_ADHOC:
1321 - case NL80211_IFTYPE_STATION:
1322 - filterout_non_associated_bssid = true;
1323 - break;
1324 - case NL80211_IFTYPE_UNSPECIFIED:
1325 - case NL80211_IFTYPE_AP:
1326 - default:
1327 - break;
1328 - }
1329 - if (filterout_non_associated_bssid) {
1330 + if (rtlpriv->psc.rfpwr_state != ERFON)
1331 + return;
1332 +
1333 + if (check_bssid) {
1334 + u8 tmp;
1335 if (IS_NORMAL_CHIP(rtlhal->version)) {
1336 - switch (rtlphy->current_io_type) {
1337 - case IO_CMD_RESUME_DM_BY_SCAN:
1338 - reg_rcr |= (RCR_CBSSID_DATA | RCR_CBSSID_BCN);
1339 - rtlpriv->cfg->ops->set_hw_reg(hw,
1340 - HW_VAR_RCR, (u8 *)(&reg_rcr));
1341 - /* enable update TSF */
1342 - _rtl92cu_set_bcn_ctrl_reg(hw, 0, BIT(4));
1343 - break;
1344 - case IO_CMD_PAUSE_DM_BY_SCAN:
1345 - reg_rcr &= ~(RCR_CBSSID_DATA | RCR_CBSSID_BCN);
1346 - rtlpriv->cfg->ops->set_hw_reg(hw,
1347 - HW_VAR_RCR, (u8 *)(&reg_rcr));
1348 - /* disable update TSF */
1349 - _rtl92cu_set_bcn_ctrl_reg(hw, BIT(4), 0);
1350 - break;
1351 - }
1352 + reg_rcr |= (RCR_CBSSID_DATA | RCR_CBSSID_BCN);
1353 + tmp = BIT(4);
1354 } else {
1355 - reg_rcr |= (RCR_CBSSID);
1356 - rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_RCR,
1357 - (u8 *)(&reg_rcr));
1358 - _rtl92cu_set_bcn_ctrl_reg(hw, 0, (BIT(4)|BIT(5)));
1359 + reg_rcr |= RCR_CBSSID;
1360 + tmp = BIT(4) | BIT(5);
1361 }
1362 - } else if (filterout_non_associated_bssid == false) {
1363 + rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_RCR,
1364 + (u8 *) (&reg_rcr));
1365 + _rtl92cu_set_bcn_ctrl_reg(hw, 0, tmp);
1366 + } else {
1367 + u8 tmp;
1368 if (IS_NORMAL_CHIP(rtlhal->version)) {
1369 - reg_rcr &= (~(RCR_CBSSID_DATA | RCR_CBSSID_BCN));
1370 - rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_RCR,
1371 - (u8 *)(&reg_rcr));
1372 - _rtl92cu_set_bcn_ctrl_reg(hw, BIT(4), 0);
1373 + reg_rcr &= ~(RCR_CBSSID_DATA | RCR_CBSSID_BCN);
1374 + tmp = BIT(4);
1375 } else {
1376 - reg_rcr &= (~RCR_CBSSID);
1377 - rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_RCR,
1378 - (u8 *)(&reg_rcr));
1379 - _rtl92cu_set_bcn_ctrl_reg(hw, (BIT(4)|BIT(5)), 0);
1380 + reg_rcr &= ~RCR_CBSSID;
1381 + tmp = BIT(4) | BIT(5);
1382 }
1383 + reg_rcr &= (~(RCR_CBSSID_DATA | RCR_CBSSID_BCN));
1384 + rtlpriv->cfg->ops->set_hw_reg(hw,
1385 + HW_VAR_RCR, (u8 *) (&reg_rcr));
1386 + _rtl92cu_set_bcn_ctrl_reg(hw, tmp, 0);
1387 }
1388 }
1389
1390 +/*========================================================================== */
1391 +
1392 int rtl92cu_set_network_type(struct ieee80211_hw *hw, enum nl80211_iftype type)
1393 {
1394 + struct rtl_priv *rtlpriv = rtl_priv(hw);
1395 +
1396 if (_rtl92cu_set_media_status(hw, type))
1397 return -EOPNOTSUPP;
1398 - _rtl92cu_set_check_bssid(hw, type);
1399 +
1400 + if (rtlpriv->mac80211.link_state == MAC80211_LINKED) {
1401 + if (type != NL80211_IFTYPE_AP)
1402 + rtl92cu_set_check_bssid(hw, true);
1403 + } else {
1404 + rtl92cu_set_check_bssid(hw, false);
1405 + }
1406 +
1407 return 0;
1408 }
1409
1410 @@ -2059,8 +2042,6 @@ void rtl92cu_update_hal_rate_table(struct ieee80211_hw *hw,
1411 (shortgi_rate << 4) | (shortgi_rate);
1412 }
1413 rtl_write_dword(rtlpriv, REG_ARFR0 + ratr_index * 4, ratr_value);
1414 - RT_TRACE(rtlpriv, COMP_RATR, DBG_DMESG, "%x\n",
1415 - rtl_read_dword(rtlpriv, REG_ARFR0));
1416 }
1417
1418 void rtl92cu_update_hal_rate_mask(struct ieee80211_hw *hw, u8 rssi_level)
1419 diff --git a/drivers/platform/x86/asus-laptop.c b/drivers/platform/x86/asus-laptop.c
1420 index 110c777..12da810 100644
1421 --- a/drivers/platform/x86/asus-laptop.c
1422 +++ b/drivers/platform/x86/asus-laptop.c
1423 @@ -860,8 +860,10 @@ static ssize_t show_infos(struct device *dev,
1424 /*
1425 * The HWRS method return informations about the hardware.
1426 * 0x80 bit is for WLAN, 0x100 for Bluetooth.
1427 + * 0x40 for WWAN, 0x10 for WIMAX.
1428 * The significance of others is yet to be found.
1429 - * If we don't find the method, we assume the device are present.
1430 + * We don't currently use this for device detection, and it
1431 + * takes several seconds to run on some systems.
1432 */
1433 rv = acpi_evaluate_integer(asus->handle, "HWRS", NULL, &temp);
1434 if (!ACPI_FAILURE(rv))
1435 @@ -1682,7 +1684,7 @@ static int asus_laptop_get_info(struct asus_laptop *asus)
1436 {
1437 struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
1438 union acpi_object *model = NULL;
1439 - unsigned long long bsts_result, hwrs_result;
1440 + unsigned long long bsts_result;
1441 char *string = NULL;
1442 acpi_status status;
1443
1444 @@ -1744,17 +1746,6 @@ static int asus_laptop_get_info(struct asus_laptop *asus)
1445 if (*string)
1446 pr_notice(" %s model detected\n", string);
1447
1448 - /*
1449 - * The HWRS method return informations about the hardware.
1450 - * 0x80 bit is for WLAN, 0x100 for Bluetooth,
1451 - * 0x40 for WWAN, 0x10 for WIMAX.
1452 - * The significance of others is yet to be found.
1453 - */
1454 - status =
1455 - acpi_evaluate_integer(asus->handle, "HWRS", NULL, &hwrs_result);
1456 - if (!ACPI_FAILURE(status))
1457 - pr_notice(" HWRS returned %x", (int)hwrs_result);
1458 -
1459 if (!acpi_check_handle(asus->handle, METHOD_WL_STATUS, NULL))
1460 asus->have_rsts = true;
1461
1462 diff --git a/drivers/target/iscsi/iscsi_target_auth.c b/drivers/target/iscsi/iscsi_target_auth.c
1463 index db0cf7c..a0fc7b9 100644
1464 --- a/drivers/target/iscsi/iscsi_target_auth.c
1465 +++ b/drivers/target/iscsi/iscsi_target_auth.c
1466 @@ -166,6 +166,7 @@ static int chap_server_compute_md5(
1467 {
1468 char *endptr;
1469 unsigned long id;
1470 + unsigned char id_as_uchar;
1471 unsigned char digest[MD5_SIGNATURE_SIZE];
1472 unsigned char type, response[MD5_SIGNATURE_SIZE * 2 + 2];
1473 unsigned char identifier[10], *challenge = NULL;
1474 @@ -355,7 +356,9 @@ static int chap_server_compute_md5(
1475 goto out;
1476 }
1477
1478 - sg_init_one(&sg, &id, 1);
1479 + /* To handle both endiannesses */
1480 + id_as_uchar = id;
1481 + sg_init_one(&sg, &id_as_uchar, 1);
1482 ret = crypto_hash_update(&desc, &sg, 1);
1483 if (ret < 0) {
1484 pr_err("crypto_hash_update() failed for id\n");
1485 diff --git a/drivers/tty/serial/sunsu.c b/drivers/tty/serial/sunsu.c
1486 index 675303b..76fa1fa 100644
1487 --- a/drivers/tty/serial/sunsu.c
1488 +++ b/drivers/tty/serial/sunsu.c
1489 @@ -968,6 +968,7 @@ static struct uart_ops sunsu_pops = {
1490 #define UART_NR 4
1491
1492 static struct uart_sunsu_port sunsu_ports[UART_NR];
1493 +static int nr_inst; /* Number of already registered ports */
1494
1495 #ifdef CONFIG_SERIO
1496
1497 @@ -1337,13 +1338,8 @@ static int __init sunsu_console_setup(struct console *co, char *options)
1498 printk("Console: ttyS%d (SU)\n",
1499 (sunsu_reg.minor - 64) + co->index);
1500
1501 - /*
1502 - * Check whether an invalid uart number has been specified, and
1503 - * if so, search for the first available port that does have
1504 - * console support.
1505 - */
1506 - if (co->index >= UART_NR)
1507 - co->index = 0;
1508 + if (co->index > nr_inst)
1509 + return -ENODEV;
1510 port = &sunsu_ports[co->index].port;
1511
1512 /*
1513 @@ -1408,7 +1404,6 @@ static enum su_type __devinit su_get_type(struct device_node *dp)
1514
1515 static int __devinit su_probe(struct platform_device *op)
1516 {
1517 - static int inst;
1518 struct device_node *dp = op->dev.of_node;
1519 struct uart_sunsu_port *up;
1520 struct resource *rp;
1521 @@ -1418,16 +1413,16 @@ static int __devinit su_probe(struct platform_device *op)
1522
1523 type = su_get_type(dp);
1524 if (type == SU_PORT_PORT) {
1525 - if (inst >= UART_NR)
1526 + if (nr_inst >= UART_NR)
1527 return -EINVAL;
1528 - up = &sunsu_ports[inst];
1529 + up = &sunsu_ports[nr_inst];
1530 } else {
1531 up = kzalloc(sizeof(*up), GFP_KERNEL);
1532 if (!up)
1533 return -ENOMEM;
1534 }
1535
1536 - up->port.line = inst;
1537 + up->port.line = nr_inst;
1538
1539 spin_lock_init(&up->port.lock);
1540
1541 @@ -1461,6 +1456,8 @@ static int __devinit su_probe(struct platform_device *op)
1542 }
1543 dev_set_drvdata(&op->dev, up);
1544
1545 + nr_inst++;
1546 +
1547 return 0;
1548 }
1549
1550 @@ -1488,7 +1485,7 @@ static int __devinit su_probe(struct platform_device *op)
1551
1552 dev_set_drvdata(&op->dev, up);
1553
1554 - inst++;
1555 + nr_inst++;
1556
1557 return 0;
1558
1559 diff --git a/drivers/usb/class/cdc-acm.c b/drivers/usb/class/cdc-acm.c
1560 index 3a7a950..e411f18 100644
1561 --- a/drivers/usb/class/cdc-acm.c
1562 +++ b/drivers/usb/class/cdc-acm.c
1563 @@ -601,7 +601,6 @@ static void acm_port_destruct(struct tty_port *port)
1564
1565 dev_dbg(&acm->control->dev, "%s\n", __func__);
1566
1567 - tty_unregister_device(acm_tty_driver, acm->minor);
1568 acm_release_minor(acm);
1569 usb_put_intf(acm->control);
1570 kfree(acm->country_codes);
1571 @@ -1418,6 +1417,8 @@ static void acm_disconnect(struct usb_interface *intf)
1572
1573 stop_data_traffic(acm);
1574
1575 + tty_unregister_device(acm_tty_driver, acm->minor);
1576 +
1577 usb_free_urb(acm->ctrlurb);
1578 for (i = 0; i < ACM_NW; i++)
1579 usb_free_urb(acm->wb[i].urb);
1580 diff --git a/drivers/usb/core/hcd-pci.c b/drivers/usb/core/hcd-pci.c
1581 index 622b4a4..2b487d4 100644
1582 --- a/drivers/usb/core/hcd-pci.c
1583 +++ b/drivers/usb/core/hcd-pci.c
1584 @@ -173,6 +173,7 @@ int usb_hcd_pci_probe(struct pci_dev *dev, const struct pci_device_id *id)
1585 struct hc_driver *driver;
1586 struct usb_hcd *hcd;
1587 int retval;
1588 + int hcd_irq = 0;
1589
1590 if (usb_disabled())
1591 return -ENODEV;
1592 @@ -187,15 +188,19 @@ int usb_hcd_pci_probe(struct pci_dev *dev, const struct pci_device_id *id)
1593 return -ENODEV;
1594 dev->current_state = PCI_D0;
1595
1596 - /* The xHCI driver supports MSI and MSI-X,
1597 - * so don't fail if the BIOS doesn't provide a legacy IRQ.
1598 + /*
1599 + * The xHCI driver has its own irq management
1600 + * make sure irq setup is not touched for xhci in generic hcd code
1601 */
1602 - if (!dev->irq && (driver->flags & HCD_MASK) != HCD_USB3) {
1603 - dev_err(&dev->dev,
1604 - "Found HC with no IRQ. Check BIOS/PCI %s setup!\n",
1605 - pci_name(dev));
1606 - retval = -ENODEV;
1607 - goto disable_pci;
1608 + if ((driver->flags & HCD_MASK) != HCD_USB3) {
1609 + if (!dev->irq) {
1610 + dev_err(&dev->dev,
1611 + "Found HC with no IRQ. Check BIOS/PCI %s setup!\n",
1612 + pci_name(dev));
1613 + retval = -ENODEV;
1614 + goto disable_pci;
1615 + }
1616 + hcd_irq = dev->irq;
1617 }
1618
1619 hcd = usb_create_hcd(driver, &dev->dev, pci_name(dev));
1620 @@ -245,7 +250,7 @@ int usb_hcd_pci_probe(struct pci_dev *dev, const struct pci_device_id *id)
1621
1622 pci_set_master(dev);
1623
1624 - retval = usb_add_hcd(hcd, dev->irq, IRQF_SHARED);
1625 + retval = usb_add_hcd(hcd, hcd_irq, IRQF_SHARED);
1626 if (retval != 0)
1627 goto unmap_registers;
1628 set_hs_companion(dev, hcd);
1629 diff --git a/drivers/usb/host/ehci-q.c b/drivers/usb/host/ehci-q.c
1630 index e305689..7261e8f 100644
1631 --- a/drivers/usb/host/ehci-q.c
1632 +++ b/drivers/usb/host/ehci-q.c
1633 @@ -135,7 +135,7 @@ qh_refresh (struct ehci_hcd *ehci, struct ehci_qh *qh)
1634 * qtd is updated in qh_completions(). Update the QH
1635 * overlay here.
1636 */
1637 - if (qh->hw->hw_token & ACTIVE_BIT(ehci)) {
1638 + if (cpu_to_hc32(ehci, qtd->qtd_dma) == qh->hw->hw_current) {
1639 qh->hw->hw_qtd_next = qtd->hw_next;
1640 qtd = NULL;
1641 }
1642 @@ -459,19 +459,11 @@ qh_completions (struct ehci_hcd *ehci, struct ehci_qh *qh)
1643 else if (last_status == -EINPROGRESS && !urb->unlinked)
1644 continue;
1645
1646 - /*
1647 - * If this was the active qtd when the qh was unlinked
1648 - * and the overlay's token is active, then the overlay
1649 - * hasn't been written back to the qtd yet so use its
1650 - * token instead of the qtd's. After the qtd is
1651 - * processed and removed, the overlay won't be valid
1652 - * any more.
1653 - */
1654 - if (state == QH_STATE_IDLE &&
1655 - qh->qtd_list.next == &qtd->qtd_list &&
1656 - (hw->hw_token & ACTIVE_BIT(ehci))) {
1657 + /* qh unlinked; token in overlay may be most current */
1658 + if (state == QH_STATE_IDLE
1659 + && cpu_to_hc32(ehci, qtd->qtd_dma)
1660 + == hw->hw_current) {
1661 token = hc32_to_cpu(ehci, hw->hw_token);
1662 - hw->hw_token &= ~ACTIVE_BIT(ehci);
1663
1664 /* An unlink may leave an incomplete
1665 * async transaction in the TT buffer.
1666 diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c
1667 index b04134a..ec2c89f 100644
1668 --- a/drivers/usb/host/xhci.c
1669 +++ b/drivers/usb/host/xhci.c
1670 @@ -350,7 +350,7 @@ static int xhci_try_enable_msi(struct usb_hcd *hcd)
1671 * generate interrupts. Don't even try to enable MSI.
1672 */
1673 if (xhci->quirks & XHCI_BROKEN_MSI)
1674 - return 0;
1675 + goto legacy_irq;
1676
1677 /* unregister the legacy interrupt */
1678 if (hcd->irq)
1679 @@ -371,6 +371,7 @@ static int xhci_try_enable_msi(struct usb_hcd *hcd)
1680 return -EINVAL;
1681 }
1682
1683 + legacy_irq:
1684 /* fall back to legacy interrupt*/
1685 ret = request_irq(pdev->irq, &usb_hcd_irq, IRQF_SHARED,
1686 hcd->irq_descr, hcd);
1687 diff --git a/drivers/usb/host/xhci.h b/drivers/usb/host/xhci.h
1688 index 5361fd8..81d839f 100644
1689 --- a/drivers/usb/host/xhci.h
1690 +++ b/drivers/usb/host/xhci.h
1691 @@ -206,8 +206,8 @@ struct xhci_op_regs {
1692 /* bits 12:31 are reserved (and should be preserved on writes). */
1693
1694 /* IMAN - Interrupt Management Register */
1695 -#define IMAN_IP (1 << 1)
1696 -#define IMAN_IE (1 << 0)
1697 +#define IMAN_IE (1 << 1)
1698 +#define IMAN_IP (1 << 0)
1699
1700 /* USBSTS - USB status - status bitmasks */
1701 /* HC not running - set to 1 when run/stop bit is cleared. */
1702 diff --git a/drivers/usb/serial/garmin_gps.c b/drivers/usb/serial/garmin_gps.c
1703 index e8eb634..ba5b527 100644
1704 --- a/drivers/usb/serial/garmin_gps.c
1705 +++ b/drivers/usb/serial/garmin_gps.c
1706 @@ -971,10 +971,7 @@ static void garmin_close(struct usb_serial_port *port)
1707 if (!serial)
1708 return;
1709
1710 - mutex_lock(&port->serial->disc_mutex);
1711 -
1712 - if (!port->serial->disconnected)
1713 - garmin_clear(garmin_data_p);
1714 + garmin_clear(garmin_data_p);
1715
1716 /* shutdown our urbs */
1717 usb_kill_urb(port->read_urb);
1718 @@ -983,8 +980,6 @@ static void garmin_close(struct usb_serial_port *port)
1719 /* keep reset state so we know that we must start a new session */
1720 if (garmin_data_p->state != STATE_RESET)
1721 garmin_data_p->state = STATE_DISCONNECTED;
1722 -
1723 - mutex_unlock(&port->serial->disc_mutex);
1724 }
1725
1726
1727 diff --git a/drivers/usb/serial/io_ti.c b/drivers/usb/serial/io_ti.c
1728 index 40a95a7..7e8bb8f 100644
1729 --- a/drivers/usb/serial/io_ti.c
1730 +++ b/drivers/usb/serial/io_ti.c
1731 @@ -2770,6 +2770,7 @@ static struct usb_serial_driver edgeport_2port_device = {
1732 .set_termios = edge_set_termios,
1733 .tiocmget = edge_tiocmget,
1734 .tiocmset = edge_tiocmset,
1735 + .get_icount = edge_get_icount,
1736 .write = edge_write,
1737 .write_room = edge_write_room,
1738 .chars_in_buffer = edge_chars_in_buffer,
1739 diff --git a/drivers/usb/serial/usb-serial.c b/drivers/usb/serial/usb-serial.c
1740 index e4b199c..810695b 100644
1741 --- a/drivers/usb/serial/usb-serial.c
1742 +++ b/drivers/usb/serial/usb-serial.c
1743 @@ -168,6 +168,7 @@ static void destroy_serial(struct kref *kref)
1744 }
1745 }
1746
1747 + usb_put_intf(serial->interface);
1748 usb_put_dev(serial->dev);
1749 kfree(serial);
1750 }
1751 @@ -625,7 +626,7 @@ static struct usb_serial *create_serial(struct usb_device *dev,
1752 }
1753 serial->dev = usb_get_dev(dev);
1754 serial->type = driver;
1755 - serial->interface = interface;
1756 + serial->interface = usb_get_intf(interface);
1757 kref_init(&serial->kref);
1758 mutex_init(&serial->disc_mutex);
1759 serial->minor = SERIAL_TTY_NO_MINOR;
1760 diff --git a/drivers/usb/storage/unusual_devs.h b/drivers/usb/storage/unusual_devs.h
1761 index 5ffa116..8ee632c 100644
1762 --- a/drivers/usb/storage/unusual_devs.h
1763 +++ b/drivers/usb/storage/unusual_devs.h
1764 @@ -488,6 +488,13 @@ UNUSUAL_DEV( 0x04e8, 0x5122, 0x0000, 0x9999,
1765 USB_SC_DEVICE, USB_PR_DEVICE, NULL,
1766 US_FL_MAX_SECTORS_64 | US_FL_BULK_IGNORE_TAG),
1767
1768 +/* Added by Dmitry Artamonow <mad_soft@inbox.ru> */
1769 +UNUSUAL_DEV( 0x04e8, 0x5136, 0x0000, 0x9999,
1770 + "Samsung",
1771 + "YP-Z3",
1772 + USB_SC_DEVICE, USB_PR_DEVICE, NULL,
1773 + US_FL_MAX_SECTORS_64),
1774 +
1775 /* Entry and supporting patch by Theodore Kilgore <kilgota@auburn.edu>.
1776 * Device uses standards-violating 32-byte Bulk Command Block Wrappers and
1777 * reports itself as "Proprietary SCSI Bulk." Cf. device entry 0x084d:0x0011.
1778 diff --git a/drivers/vhost/net.c b/drivers/vhost/net.c
1779 index 4134e53..50a3cb5 100644
1780 --- a/drivers/vhost/net.c
1781 +++ b/drivers/vhost/net.c
1782 @@ -235,7 +235,8 @@ static void handle_tx(struct vhost_net *net)
1783 msg.msg_controllen = 0;
1784 ubufs = NULL;
1785 } else {
1786 - struct ubuf_info *ubuf = &vq->ubuf_info[head];
1787 + struct ubuf_info *ubuf;
1788 + ubuf = vq->ubuf_info + vq->upend_idx;
1789
1790 vq->heads[vq->upend_idx].len = len;
1791 ubuf->callback = vhost_zerocopy_callback;
1792 diff --git a/fs/binfmt_em86.c b/fs/binfmt_em86.c
1793 index 2790c7e..575796a 100644
1794 --- a/fs/binfmt_em86.c
1795 +++ b/fs/binfmt_em86.c
1796 @@ -42,7 +42,6 @@ static int load_em86(struct linux_binprm *bprm,struct pt_regs *regs)
1797 return -ENOEXEC;
1798 }
1799
1800 - bprm->recursion_depth++; /* Well, the bang-shell is implicit... */
1801 allow_write_access(bprm->file);
1802 fput(bprm->file);
1803 bprm->file = NULL;
1804 diff --git a/fs/binfmt_misc.c b/fs/binfmt_misc.c
1805 index e172439..87fa90d 100644
1806 --- a/fs/binfmt_misc.c
1807 +++ b/fs/binfmt_misc.c
1808 @@ -117,10 +117,6 @@ static int load_misc_binary(struct linux_binprm *bprm, struct pt_regs *regs)
1809 if (!enabled)
1810 goto _ret;
1811
1812 - retval = -ENOEXEC;
1813 - if (bprm->recursion_depth > BINPRM_MAX_RECURSION)
1814 - goto _ret;
1815 -
1816 /* to keep locking time low, we copy the interpreter string */
1817 read_lock(&entries_lock);
1818 fmt = check_file(bprm);
1819 @@ -200,8 +196,6 @@ static int load_misc_binary(struct linux_binprm *bprm, struct pt_regs *regs)
1820 if (retval < 0)
1821 goto _error;
1822
1823 - bprm->recursion_depth++;
1824 -
1825 retval = search_binary_handler (bprm, regs);
1826 if (retval < 0)
1827 goto _error;
1828 diff --git a/fs/binfmt_script.c b/fs/binfmt_script.c
1829 index df49d48..8ae4be1 100644
1830 --- a/fs/binfmt_script.c
1831 +++ b/fs/binfmt_script.c
1832 @@ -22,15 +22,13 @@ static int load_script(struct linux_binprm *bprm,struct pt_regs *regs)
1833 char interp[BINPRM_BUF_SIZE];
1834 int retval;
1835
1836 - if ((bprm->buf[0] != '#') || (bprm->buf[1] != '!') ||
1837 - (bprm->recursion_depth > BINPRM_MAX_RECURSION))
1838 + if ((bprm->buf[0] != '#') || (bprm->buf[1] != '!'))
1839 return -ENOEXEC;
1840 /*
1841 * This section does the #! interpretation.
1842 * Sorta complicated, but hopefully it will work. -TYT
1843 */
1844
1845 - bprm->recursion_depth++;
1846 allow_write_access(bprm->file);
1847 fput(bprm->file);
1848 bprm->file = NULL;
1849 diff --git a/fs/cifs/asn1.c b/fs/cifs/asn1.c
1850 index cfd1ce3..1d36db1 100644
1851 --- a/fs/cifs/asn1.c
1852 +++ b/fs/cifs/asn1.c
1853 @@ -614,53 +614,10 @@ decode_negTokenInit(unsigned char *security_blob, int length,
1854 }
1855 }
1856
1857 - /* mechlistMIC */
1858 - if (asn1_header_decode(&ctx, &end, &cls, &con, &tag) == 0) {
1859 - /* Check if we have reached the end of the blob, but with
1860 - no mechListMic (e.g. NTLMSSP instead of KRB5) */
1861 - if (ctx.error == ASN1_ERR_DEC_EMPTY)
1862 - goto decode_negtoken_exit;
1863 - cFYI(1, "Error decoding last part negTokenInit exit3");
1864 - return 0;
1865 - } else if ((cls != ASN1_CTX) || (con != ASN1_CON)) {
1866 - /* tag = 3 indicating mechListMIC */
1867 - cFYI(1, "Exit 4 cls = %d con = %d tag = %d end = %p (%d)",
1868 - cls, con, tag, end, *end);
1869 - return 0;
1870 - }
1871 -
1872 - /* sequence */
1873 - if (asn1_header_decode(&ctx, &end, &cls, &con, &tag) == 0) {
1874 - cFYI(1, "Error decoding last part negTokenInit exit5");
1875 - return 0;
1876 - } else if ((cls != ASN1_UNI) || (con != ASN1_CON)
1877 - || (tag != ASN1_SEQ)) {
1878 - cFYI(1, "cls = %d con = %d tag = %d end = %p (%d)",
1879 - cls, con, tag, end, *end);
1880 - }
1881 -
1882 - /* sequence of */
1883 - if (asn1_header_decode(&ctx, &end, &cls, &con, &tag) == 0) {
1884 - cFYI(1, "Error decoding last part negTokenInit exit 7");
1885 - return 0;
1886 - } else if ((cls != ASN1_CTX) || (con != ASN1_CON)) {
1887 - cFYI(1, "Exit 8 cls = %d con = %d tag = %d end = %p (%d)",
1888 - cls, con, tag, end, *end);
1889 - return 0;
1890 - }
1891 -
1892 - /* general string */
1893 - if (asn1_header_decode(&ctx, &end, &cls, &con, &tag) == 0) {
1894 - cFYI(1, "Error decoding last part negTokenInit exit9");
1895 - return 0;
1896 - } else if ((cls != ASN1_UNI) || (con != ASN1_PRI)
1897 - || (tag != ASN1_GENSTR)) {
1898 - cFYI(1, "Exit10 cls = %d con = %d tag = %d end = %p (%d)",
1899 - cls, con, tag, end, *end);
1900 - return 0;
1901 - }
1902 - cFYI(1, "Need to call asn1_octets_decode() function for %s",
1903 - ctx.pointer); /* is this UTF-8 or ASCII? */
1904 -decode_negtoken_exit:
1905 + /*
1906 + * We currently ignore anything at the end of the SPNEGO blob after
1907 + * the mechTypes have been parsed, since none of that info is
1908 + * used at the moment.
1909 + */
1910 return 1;
1911 }
1912 diff --git a/fs/exec.c b/fs/exec.c
1913 index 51d8629..e3a7e36d 100644
1914 --- a/fs/exec.c
1915 +++ b/fs/exec.c
1916 @@ -1389,6 +1389,10 @@ int search_binary_handler(struct linux_binprm *bprm,struct pt_regs *regs)
1917 struct linux_binfmt *fmt;
1918 pid_t old_pid, old_vpid;
1919
1920 + /* This allows 4 levels of binfmt rewrites before failing hard. */
1921 + if (depth > 5)
1922 + return -ELOOP;
1923 +
1924 retval = security_bprm_check(bprm);
1925 if (retval)
1926 return retval;
1927 @@ -1413,12 +1417,8 @@ int search_binary_handler(struct linux_binprm *bprm,struct pt_regs *regs)
1928 if (!try_module_get(fmt->module))
1929 continue;
1930 read_unlock(&binfmt_lock);
1931 + bprm->recursion_depth = depth + 1;
1932 retval = fn(bprm, regs);
1933 - /*
1934 - * Restore the depth counter to its starting value
1935 - * in this call, so we don't have to rely on every
1936 - * load_binary function to restore it on return.
1937 - */
1938 bprm->recursion_depth = depth;
1939 if (retval >= 0) {
1940 if (depth == 0) {
1941 diff --git a/fs/ext4/extents.c b/fs/ext4/extents.c
1942 index 852d4c2..0efff1e 100644
1943 --- a/fs/ext4/extents.c
1944 +++ b/fs/ext4/extents.c
1945 @@ -2959,6 +2959,7 @@ static int ext4_split_extent(handle_t *handle,
1946 int err = 0;
1947 int uninitialized;
1948 int split_flag1, flags1;
1949 + int allocated = map->m_len;
1950
1951 depth = ext_depth(inode);
1952 ex = path[depth].p_ext;
1953 @@ -2978,6 +2979,8 @@ static int ext4_split_extent(handle_t *handle,
1954 map->m_lblk + map->m_len, split_flag1, flags1);
1955 if (err)
1956 goto out;
1957 + } else {
1958 + allocated = ee_len - (map->m_lblk - ee_block);
1959 }
1960
1961 ext4_ext_drop_refs(path);
1962 @@ -3000,7 +3003,7 @@ static int ext4_split_extent(handle_t *handle,
1963
1964 ext4_ext_show_leaf(inode, path);
1965 out:
1966 - return err ? err : map->m_len;
1967 + return err ? err : allocated;
1968 }
1969
1970 #define EXT4_EXT_ZERO_LEN 7
1971 @@ -3668,6 +3671,7 @@ out:
1972 allocated - map->m_len);
1973 allocated = map->m_len;
1974 }
1975 + map->m_len = allocated;
1976
1977 /*
1978 * If we have done fallocate with the offset that is already
1979 diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c
1980 index a0d7e26..2857e5b 100644
1981 --- a/fs/ext4/inode.c
1982 +++ b/fs/ext4/inode.c
1983 @@ -144,7 +144,8 @@ void ext4_evict_inode(struct inode *inode)
1984 * don't use page cache.
1985 */
1986 if (ext4_should_journal_data(inode) &&
1987 - (S_ISLNK(inode->i_mode) || S_ISREG(inode->i_mode))) {
1988 + (S_ISLNK(inode->i_mode) || S_ISREG(inode->i_mode)) &&
1989 + inode->i_ino != EXT4_JOURNAL_INO) {
1990 journal_t *journal = EXT4_SB(inode->i_sb)->s_journal;
1991 tid_t commit_tid = EXT4_I(inode)->i_datasync_tid;
1992
1993 diff --git a/fs/isofs/export.c b/fs/isofs/export.c
1994 index 516eb21..fd88add 100644
1995 --- a/fs/isofs/export.c
1996 +++ b/fs/isofs/export.c
1997 @@ -135,6 +135,7 @@ isofs_export_encode_fh(struct dentry *dentry,
1998 len = 3;
1999 fh32[0] = ei->i_iget5_block;
2000 fh16[2] = (__u16)ei->i_iget5_offset; /* fh16 [sic] */
2001 + fh16[3] = 0; /* avoid leaking uninitialized data */
2002 fh32[2] = inode->i_generation;
2003 if (connectable && !S_ISDIR(inode->i_mode)) {
2004 struct inode *parent;
2005 diff --git a/fs/jbd2/transaction.c b/fs/jbd2/transaction.c
2006 index de8b4cb..6075ac03 100644
2007 --- a/fs/jbd2/transaction.c
2008 +++ b/fs/jbd2/transaction.c
2009 @@ -1047,9 +1047,12 @@ out:
2010 void jbd2_journal_set_triggers(struct buffer_head *bh,
2011 struct jbd2_buffer_trigger_type *type)
2012 {
2013 - struct journal_head *jh = bh2jh(bh);
2014 + struct journal_head *jh = jbd2_journal_grab_journal_head(bh);
2015
2016 + if (WARN_ON(!jh))
2017 + return;
2018 jh->b_triggers = type;
2019 + jbd2_journal_put_journal_head(jh);
2020 }
2021
2022 void jbd2_buffer_frozen_trigger(struct journal_head *jh, void *mapped_data,
2023 @@ -1101,17 +1104,18 @@ int jbd2_journal_dirty_metadata(handle_t *handle, struct buffer_head *bh)
2024 {
2025 transaction_t *transaction = handle->h_transaction;
2026 journal_t *journal = transaction->t_journal;
2027 - struct journal_head *jh = bh2jh(bh);
2028 + struct journal_head *jh;
2029 int ret = 0;
2030
2031 - jbd_debug(5, "journal_head %p\n", jh);
2032 - JBUFFER_TRACE(jh, "entry");
2033 if (is_handle_aborted(handle))
2034 goto out;
2035 - if (!buffer_jbd(bh)) {
2036 + jh = jbd2_journal_grab_journal_head(bh);
2037 + if (!jh) {
2038 ret = -EUCLEAN;
2039 goto out;
2040 }
2041 + jbd_debug(5, "journal_head %p\n", jh);
2042 + JBUFFER_TRACE(jh, "entry");
2043
2044 jbd_lock_bh_state(bh);
2045
2046 @@ -1202,6 +1206,7 @@ int jbd2_journal_dirty_metadata(handle_t *handle, struct buffer_head *bh)
2047 spin_unlock(&journal->j_list_lock);
2048 out_unlock_bh:
2049 jbd_unlock_bh_state(bh);
2050 + jbd2_journal_put_journal_head(jh);
2051 out:
2052 JBUFFER_TRACE(jh, "exit");
2053 WARN_ON(ret); /* All errors are bugs, so dump the stack */
2054 diff --git a/fs/udf/namei.c b/fs/udf/namei.c
2055 index 38de8f2..78bff11 100644
2056 --- a/fs/udf/namei.c
2057 +++ b/fs/udf/namei.c
2058 @@ -1280,6 +1280,7 @@ static int udf_encode_fh(struct dentry *de, __u32 *fh, int *lenp,
2059 *lenp = 3;
2060 fid->udf.block = location.logicalBlockNum;
2061 fid->udf.partref = location.partitionReferenceNum;
2062 + fid->udf.parent_partref = 0;
2063 fid->udf.generation = inode->i_generation;
2064
2065 if (connectable && !S_ISDIR(inode->i_mode)) {
2066 diff --git a/fs/udf/udf_sb.h b/fs/udf/udf_sb.h
2067 index 42ad69a..9e8e08e 100644
2068 --- a/fs/udf/udf_sb.h
2069 +++ b/fs/udf/udf_sb.h
2070 @@ -82,7 +82,7 @@ struct udf_virtual_data {
2071 struct udf_bitmap {
2072 __u32 s_extLength;
2073 __u32 s_extPosition;
2074 - __u16 s_nr_groups;
2075 + int s_nr_groups;
2076 struct buffer_head **s_block_bitmap;
2077 };
2078
2079 diff --git a/include/drm/drm_pciids.h b/include/drm/drm_pciids.h
2080 index 7207a99..d9928c1 100644
2081 --- a/include/drm/drm_pciids.h
2082 +++ b/include/drm/drm_pciids.h
2083 @@ -572,7 +572,11 @@
2084 {0x1002, 0x9908, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
2085 {0x1002, 0x9909, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
2086 {0x1002, 0x990A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
2087 - {0x1002, 0x990F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
2088 + {0x1002, 0x990B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
2089 + {0x1002, 0x990C, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
2090 + {0x1002, 0x990D, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
2091 + {0x1002, 0x990E, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
2092 + {0x1002, 0x990F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
2093 {0x1002, 0x9910, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
2094 {0x1002, 0x9913, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
2095 {0x1002, 0x9917, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
2096 @@ -583,6 +587,13 @@
2097 {0x1002, 0x9992, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
2098 {0x1002, 0x9993, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
2099 {0x1002, 0x9994, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
2100 + {0x1002, 0x9995, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
2101 + {0x1002, 0x9996, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
2102 + {0x1002, 0x9997, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
2103 + {0x1002, 0x9998, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
2104 + {0x1002, 0x9999, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
2105 + {0x1002, 0x999A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
2106 + {0x1002, 0x999B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
2107 {0x1002, 0x99A0, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
2108 {0x1002, 0x99A2, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
2109 {0x1002, 0x99A4, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
2110 diff --git a/include/linux/binfmts.h b/include/linux/binfmts.h
2111 index eb53e15..5bab59b 100644
2112 --- a/include/linux/binfmts.h
2113 +++ b/include/linux/binfmts.h
2114 @@ -68,8 +68,6 @@ struct linux_binprm {
2115 #define BINPRM_FLAGS_EXECFD_BIT 1
2116 #define BINPRM_FLAGS_EXECFD (1 << BINPRM_FLAGS_EXECFD_BIT)
2117
2118 -#define BINPRM_MAX_RECURSION 4
2119 -
2120 /* Function parameter for binfmt->coredump */
2121 struct coredump_params {
2122 long signr;
2123 diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
2124 index c1bae8d..4aea870 100644
2125 --- a/include/linux/skbuff.h
2126 +++ b/include/linux/skbuff.h
2127 @@ -479,7 +479,7 @@ struct sk_buff {
2128 union {
2129 __u32 mark;
2130 __u32 dropcount;
2131 - __u32 avail_size;
2132 + __u32 reserved_tailroom;
2133 };
2134
2135 sk_buff_data_t transport_header;
2136 @@ -1373,7 +1373,10 @@ static inline int skb_tailroom(const struct sk_buff *skb)
2137 */
2138 static inline int skb_availroom(const struct sk_buff *skb)
2139 {
2140 - return skb_is_nonlinear(skb) ? 0 : skb->avail_size - skb->len;
2141 + if (skb_is_nonlinear(skb))
2142 + return 0;
2143 +
2144 + return skb->end - skb->tail - skb->reserved_tailroom;
2145 }
2146
2147 /**
2148 diff --git a/include/net/inet_frag.h b/include/net/inet_frag.h
2149 index 16ff29a..b289bd2 100644
2150 --- a/include/net/inet_frag.h
2151 +++ b/include/net/inet_frag.h
2152 @@ -33,6 +33,13 @@ struct inet_frag_queue {
2153
2154 #define INETFRAGS_HASHSZ 64
2155
2156 +/* averaged:
2157 + * max_depth = default ipfrag_high_thresh / INETFRAGS_HASHSZ /
2158 + * rounded up (SKB_TRUELEN(0) + sizeof(struct ipq or
2159 + * struct frag_queue))
2160 + */
2161 +#define INETFRAGS_MAXDEPTH 128
2162 +
2163 struct inet_frags {
2164 struct hlist_head hash[INETFRAGS_HASHSZ];
2165 rwlock_t lock;
2166 @@ -64,6 +71,8 @@ int inet_frag_evictor(struct netns_frags *nf, struct inet_frags *f);
2167 struct inet_frag_queue *inet_frag_find(struct netns_frags *nf,
2168 struct inet_frags *f, void *key, unsigned int hash)
2169 __releases(&f->lock);
2170 +void inet_frag_maybe_warn_overflow(struct inet_frag_queue *q,
2171 + const char *prefix);
2172
2173 static inline void inet_frag_put(struct inet_frag_queue *q, struct inet_frags *f)
2174 {
2175 diff --git a/include/net/ip_fib.h b/include/net/ip_fib.h
2176 index 10422ef..2124004 100644
2177 --- a/include/net/ip_fib.h
2178 +++ b/include/net/ip_fib.h
2179 @@ -129,18 +129,16 @@ struct fib_result_nl {
2180 };
2181
2182 #ifdef CONFIG_IP_ROUTE_MULTIPATH
2183 -
2184 #define FIB_RES_NH(res) ((res).fi->fib_nh[(res).nh_sel])
2185 -
2186 -#define FIB_TABLE_HASHSZ 2
2187 -
2188 #else /* CONFIG_IP_ROUTE_MULTIPATH */
2189 -
2190 #define FIB_RES_NH(res) ((res).fi->fib_nh[0])
2191 +#endif /* CONFIG_IP_ROUTE_MULTIPATH */
2192
2193 +#ifdef CONFIG_IP_MULTIPLE_TABLES
2194 #define FIB_TABLE_HASHSZ 256
2195 -
2196 -#endif /* CONFIG_IP_ROUTE_MULTIPATH */
2197 +#else
2198 +#define FIB_TABLE_HASHSZ 2
2199 +#endif
2200
2201 extern __be32 fib_info_update_nh_saddr(struct net *net, struct fib_nh *nh);
2202
2203 diff --git a/kernel/time/tick-broadcast.c b/kernel/time/tick-broadcast.c
2204 index f113755..a13987a 100644
2205 --- a/kernel/time/tick-broadcast.c
2206 +++ b/kernel/time/tick-broadcast.c
2207 @@ -66,7 +66,8 @@ static void tick_broadcast_start_periodic(struct clock_event_device *bc)
2208 */
2209 int tick_check_broadcast_device(struct clock_event_device *dev)
2210 {
2211 - if ((tick_broadcast_device.evtdev &&
2212 + if ((dev->features & CLOCK_EVT_FEAT_DUMMY) ||
2213 + (tick_broadcast_device.evtdev &&
2214 tick_broadcast_device.evtdev->rating >= dev->rating) ||
2215 (dev->features & CLOCK_EVT_FEAT_C3STOP))
2216 return 0;
2217 diff --git a/kernel/time/tick-sched.c b/kernel/time/tick-sched.c
2218 index e603477..01d8e62 100644
2219 --- a/kernel/time/tick-sched.c
2220 +++ b/kernel/time/tick-sched.c
2221 @@ -496,12 +496,17 @@ void tick_nohz_idle_enter(void)
2222 */
2223 void tick_nohz_irq_exit(void)
2224 {
2225 + unsigned long flags;
2226 struct tick_sched *ts = &__get_cpu_var(tick_cpu_sched);
2227
2228 if (!ts->inidle)
2229 return;
2230
2231 + local_irq_save(flags);
2232 +
2233 tick_nohz_stop_sched_tick(ts);
2234 +
2235 + local_irq_restore(flags);
2236 }
2237
2238 /**
2239 diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
2240 index 4a86e64..62f3751 100644
2241 --- a/kernel/trace/ftrace.c
2242 +++ b/kernel/trace/ftrace.c
2243 @@ -3034,8 +3034,8 @@ __unregister_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops,
2244 continue;
2245 }
2246
2247 - hlist_del(&entry->node);
2248 - call_rcu(&entry->rcu, ftrace_free_entry_rcu);
2249 + hlist_del_rcu(&entry->node);
2250 + call_rcu_sched(&entry->rcu, ftrace_free_entry_rcu);
2251 }
2252 }
2253 __disable_ftrace_function_probe();
2254 diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
2255 index 55e4d4c..681a759 100644
2256 --- a/kernel/trace/trace.c
2257 +++ b/kernel/trace/trace.c
2258 @@ -708,7 +708,7 @@ __update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu)
2259 void
2260 update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu)
2261 {
2262 - struct ring_buffer *buf = tr->buffer;
2263 + struct ring_buffer *buf;
2264
2265 if (trace_stop_count)
2266 return;
2267 @@ -720,6 +720,7 @@ update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu)
2268 }
2269 arch_spin_lock(&ftrace_max_lock);
2270
2271 + buf = tr->buffer;
2272 tr->buffer = max_tr.buffer;
2273 max_tr.buffer = buf;
2274
2275 diff --git a/mm/hugetlb.c b/mm/hugetlb.c
2276 index c384e09..e427969 100644
2277 --- a/mm/hugetlb.c
2278 +++ b/mm/hugetlb.c
2279 @@ -2096,8 +2096,12 @@ int hugetlb_report_node_meminfo(int nid, char *buf)
2280 /* Return the number pages of memory we physically have, in PAGE_SIZE units. */
2281 unsigned long hugetlb_total_pages(void)
2282 {
2283 - struct hstate *h = &default_hstate;
2284 - return h->nr_huge_pages * pages_per_huge_page(h);
2285 + struct hstate *h;
2286 + unsigned long nr_total_pages = 0;
2287 +
2288 + for_each_hstate(h)
2289 + nr_total_pages += h->nr_huge_pages * pages_per_huge_page(h);
2290 + return nr_total_pages;
2291 }
2292
2293 static int hugetlb_acct_memory(struct hstate *h, long delta)
2294 diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c
2295 index da7b3a5..3db960c 100644
2296 --- a/net/core/rtnetlink.c
2297 +++ b/net/core/rtnetlink.c
2298 @@ -2050,7 +2050,7 @@ static int rtnetlink_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
2299 struct rtattr *attr = (void *)nlh + NLMSG_ALIGN(min_len);
2300
2301 while (RTA_OK(attr, attrlen)) {
2302 - unsigned flavor = attr->rta_type;
2303 + unsigned int flavor = attr->rta_type & NLA_TYPE_MASK;
2304 if (flavor) {
2305 if (flavor > rta_max[sz_idx])
2306 return -EINVAL;
2307 diff --git a/net/ipv4/inet_fragment.c b/net/ipv4/inet_fragment.c
2308 index 5ff2a51..210b710 100644
2309 --- a/net/ipv4/inet_fragment.c
2310 +++ b/net/ipv4/inet_fragment.c
2311 @@ -21,6 +21,7 @@
2312 #include <linux/rtnetlink.h>
2313 #include <linux/slab.h>
2314
2315 +#include <net/sock.h>
2316 #include <net/inet_frag.h>
2317
2318 static void inet_frag_secret_rebuild(unsigned long dummy)
2319 @@ -271,6 +272,7 @@ struct inet_frag_queue *inet_frag_find(struct netns_frags *nf,
2320 {
2321 struct inet_frag_queue *q;
2322 struct hlist_node *n;
2323 + int depth = 0;
2324
2325 hlist_for_each_entry(q, n, &f->hash[hash], list) {
2326 if (q->net == nf && f->match(q, key)) {
2327 @@ -278,9 +280,25 @@ struct inet_frag_queue *inet_frag_find(struct netns_frags *nf,
2328 read_unlock(&f->lock);
2329 return q;
2330 }
2331 + depth++;
2332 }
2333 read_unlock(&f->lock);
2334
2335 - return inet_frag_create(nf, f, key);
2336 + if (depth <= INETFRAGS_MAXDEPTH)
2337 + return inet_frag_create(nf, f, key);
2338 + else
2339 + return ERR_PTR(-ENOBUFS);
2340 }
2341 EXPORT_SYMBOL(inet_frag_find);
2342 +
2343 +void inet_frag_maybe_warn_overflow(struct inet_frag_queue *q,
2344 + const char *prefix)
2345 +{
2346 + static const char msg[] = "inet_frag_find: Fragment hash bucket"
2347 + " list length grew over limit " __stringify(INETFRAGS_MAXDEPTH)
2348 + ". Dropping fragment.\n";
2349 +
2350 + if (PTR_ERR(q) == -ENOBUFS)
2351 + LIMIT_NETDEBUG(KERN_WARNING "%s%s", prefix, msg);
2352 +}
2353 +EXPORT_SYMBOL(inet_frag_maybe_warn_overflow);
2354 diff --git a/net/ipv4/ip_fragment.c b/net/ipv4/ip_fragment.c
2355 index b7bf6e3..8e28871 100644
2356 --- a/net/ipv4/ip_fragment.c
2357 +++ b/net/ipv4/ip_fragment.c
2358 @@ -295,14 +295,11 @@ static inline struct ipq *ip_find(struct net *net, struct iphdr *iph, u32 user)
2359 hash = ipqhashfn(iph->id, iph->saddr, iph->daddr, iph->protocol);
2360
2361 q = inet_frag_find(&net->ipv4.frags, &ip4_frags, &arg, hash);
2362 - if (q == NULL)
2363 - goto out_nomem;
2364 -
2365 + if (IS_ERR_OR_NULL(q)) {
2366 + inet_frag_maybe_warn_overflow(q, pr_fmt());
2367 + return NULL;
2368 + }
2369 return container_of(q, struct ipq, q);
2370 -
2371 -out_nomem:
2372 - LIMIT_NETDEBUG(KERN_ERR pr_fmt("ip_frag_create: no memory left !\n"));
2373 - return NULL;
2374 }
2375
2376 /* Is the fragment too far ahead to be part of ipq? */
2377 diff --git a/net/ipv4/ip_options.c b/net/ipv4/ip_options.c
2378 index a0d0d9d..b69a370 100644
2379 --- a/net/ipv4/ip_options.c
2380 +++ b/net/ipv4/ip_options.c
2381 @@ -360,7 +360,6 @@ int ip_options_compile(struct net *net,
2382 }
2383 switch (optptr[3]&0xF) {
2384 case IPOPT_TS_TSONLY:
2385 - opt->ts = optptr - iph;
2386 if (skb)
2387 timeptr = &optptr[optptr[2]-1];
2388 opt->ts_needtime = 1;
2389 @@ -371,7 +370,6 @@ int ip_options_compile(struct net *net,
2390 pp_ptr = optptr + 2;
2391 goto error;
2392 }
2393 - opt->ts = optptr - iph;
2394 if (rt) {
2395 memcpy(&optptr[optptr[2]-1], &rt->rt_spec_dst, 4);
2396 timeptr = &optptr[optptr[2]+3];
2397 @@ -385,7 +383,6 @@ int ip_options_compile(struct net *net,
2398 pp_ptr = optptr + 2;
2399 goto error;
2400 }
2401 - opt->ts = optptr - iph;
2402 {
2403 __be32 addr;
2404 memcpy(&addr, &optptr[optptr[2]-1], 4);
2405 @@ -418,12 +415,12 @@ int ip_options_compile(struct net *net,
2406 pp_ptr = optptr + 3;
2407 goto error;
2408 }
2409 - opt->ts = optptr - iph;
2410 if (skb) {
2411 optptr[3] = (optptr[3]&0xF)|((overflow+1)<<4);
2412 opt->is_changed = 1;
2413 }
2414 }
2415 + opt->ts = optptr - iph;
2416 break;
2417 case IPOPT_RA:
2418 if (optlen < 4) {
2419 diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
2420 index 01870bd..dcb19f5 100644
2421 --- a/net/ipv4/tcp.c
2422 +++ b/net/ipv4/tcp.c
2423 @@ -704,7 +704,7 @@ struct sk_buff *sk_stream_alloc_skb(struct sock *sk, int size, gfp_t gfp)
2424 * Make sure that we have exactly size bytes
2425 * available to the caller, no more, no less.
2426 */
2427 - skb->avail_size = size;
2428 + skb->reserved_tailroom = skb->end - skb->tail - size;
2429 return skb;
2430 }
2431 __kfree_skb(skb);
2432 diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
2433 index 2d27e1a..952f7dd 100644
2434 --- a/net/ipv4/tcp_output.c
2435 +++ b/net/ipv4/tcp_output.c
2436 @@ -1096,7 +1096,6 @@ static void __pskb_trim_head(struct sk_buff *skb, int len)
2437 eat = min_t(int, len, skb_headlen(skb));
2438 if (eat) {
2439 __skb_pull(skb, eat);
2440 - skb->avail_size -= eat;
2441 len -= eat;
2442 if (!len)
2443 return;
2444 diff --git a/net/ipv6/netfilter/nf_conntrack_reasm.c b/net/ipv6/netfilter/nf_conntrack_reasm.c
2445 index 38f00b0..52e2f65 100644
2446 --- a/net/ipv6/netfilter/nf_conntrack_reasm.c
2447 +++ b/net/ipv6/netfilter/nf_conntrack_reasm.c
2448 @@ -14,6 +14,8 @@
2449 * 2 of the License, or (at your option) any later version.
2450 */
2451
2452 +#define pr_fmt(fmt) "IPv6-nf: " fmt
2453 +
2454 #include <linux/errno.h>
2455 #include <linux/types.h>
2456 #include <linux/string.h>
2457 @@ -176,13 +178,12 @@ fq_find(__be32 id, u32 user, struct in6_addr *src, struct in6_addr *dst)
2458
2459 q = inet_frag_find(&nf_init_frags, &nf_frags, &arg, hash);
2460 local_bh_enable();
2461 - if (q == NULL)
2462 - goto oom;
2463 + if (IS_ERR_OR_NULL(q)) {
2464 + inet_frag_maybe_warn_overflow(q, pr_fmt());
2465 + return NULL;
2466 + }
2467
2468 return container_of(q, struct nf_ct_frag6_queue, q);
2469 -
2470 -oom:
2471 - return NULL;
2472 }
2473
2474
2475 diff --git a/net/ipv6/reassembly.c b/net/ipv6/reassembly.c
2476 index 9447bd6..5ff412f 100644
2477 --- a/net/ipv6/reassembly.c
2478 +++ b/net/ipv6/reassembly.c
2479 @@ -26,6 +26,9 @@
2480 * YOSHIFUJI,H. @USAGI Always remove fragment header to
2481 * calculate ICV correctly.
2482 */
2483 +
2484 +#define pr_fmt(fmt) "IPv6: " fmt
2485 +
2486 #include <linux/errno.h>
2487 #include <linux/types.h>
2488 #include <linux/string.h>
2489 @@ -240,9 +243,10 @@ fq_find(struct net *net, __be32 id, const struct in6_addr *src, const struct in6
2490 hash = inet6_hash_frag(id, src, dst, ip6_frags.rnd);
2491
2492 q = inet_frag_find(&net->ipv6.frags, &ip6_frags, &arg, hash);
2493 - if (q == NULL)
2494 + if (IS_ERR_OR_NULL(q)) {
2495 + inet_frag_maybe_warn_overflow(q, pr_fmt());
2496 return NULL;
2497 -
2498 + }
2499 return container_of(q, struct frag_queue, q);
2500 }
2501
2502 diff --git a/net/sctp/associola.c b/net/sctp/associola.c
2503 index acd2edb..3c04692 100644
2504 --- a/net/sctp/associola.c
2505 +++ b/net/sctp/associola.c
2506 @@ -1050,7 +1050,7 @@ struct sctp_transport *sctp_assoc_lookup_tsn(struct sctp_association *asoc,
2507 transports) {
2508
2509 if (transport == active)
2510 - break;
2511 + continue;
2512 list_for_each_entry(chunk, &transport->transmitted,
2513 transmitted_list) {
2514 if (key == chunk->subh.data_hdr->tsn) {
2515 diff --git a/net/sctp/sm_statefuns.c b/net/sctp/sm_statefuns.c
2516 index 891f5db..cb1c430 100644
2517 --- a/net/sctp/sm_statefuns.c
2518 +++ b/net/sctp/sm_statefuns.c
2519 @@ -2044,7 +2044,7 @@ sctp_disposition_t sctp_sf_do_5_2_4_dupcook(const struct sctp_endpoint *ep,
2520 }
2521
2522 /* Delete the tempory new association. */
2523 - sctp_add_cmd_sf(commands, SCTP_CMD_NEW_ASOC, SCTP_ASOC(new_asoc));
2524 + sctp_add_cmd_sf(commands, SCTP_CMD_SET_ASOC, SCTP_ASOC(new_asoc));
2525 sctp_add_cmd_sf(commands, SCTP_CMD_DELETE_TCB, SCTP_NULL());
2526
2527 /* Restore association pointer to provide SCTP command interpeter
2528 diff --git a/security/keys/keyctl.c b/security/keys/keyctl.c
2529 index fb767c6..dfc8c22 100644
2530 --- a/security/keys/keyctl.c
2531 +++ b/security/keys/keyctl.c
2532 @@ -1081,12 +1081,12 @@ long keyctl_instantiate_key_iov(key_serial_t id,
2533 ret = rw_copy_check_uvector(WRITE, _payload_iov, ioc,
2534 ARRAY_SIZE(iovstack), iovstack, &iov, 1);
2535 if (ret < 0)
2536 - return ret;
2537 + goto err;
2538 if (ret == 0)
2539 goto no_payload_free;
2540
2541 ret = keyctl_instantiate_key_common(id, iov, ioc, ret, ringid);
2542 -
2543 +err:
2544 if (iov != iovstack)
2545 kfree(iov);
2546 return ret;
2547 diff --git a/sound/pci/hda/hda_codec.c b/sound/pci/hda/hda_codec.c
2548 index cec7479..28e070a 100644
2549 --- a/sound/pci/hda/hda_codec.c
2550 +++ b/sound/pci/hda/hda_codec.c
2551 @@ -2910,7 +2910,7 @@ static unsigned int convert_to_spdif_status(unsigned short val)
2552 if (val & AC_DIG1_PROFESSIONAL)
2553 sbits |= IEC958_AES0_PROFESSIONAL;
2554 if (sbits & IEC958_AES0_PROFESSIONAL) {
2555 - if (sbits & AC_DIG1_EMPHASIS)
2556 + if (val & AC_DIG1_EMPHASIS)
2557 sbits |= IEC958_AES0_PRO_EMPHASIS_5015;
2558 } else {
2559 if (val & AC_DIG1_EMPHASIS)
2560 diff --git a/sound/pci/hda/patch_conexant.c b/sound/pci/hda/patch_conexant.c
2561 index 3897027..033c4c5 100644
2562 --- a/sound/pci/hda/patch_conexant.c
2563 +++ b/sound/pci/hda/patch_conexant.c
2564 @@ -1177,7 +1177,7 @@ static int patch_cxt5045(struct hda_codec *codec)
2565 }
2566
2567 if (spec->beep_amp)
2568 - snd_hda_attach_beep_device(codec, spec->beep_amp);
2569 + snd_hda_attach_beep_device(codec, get_amp_nid_(spec->beep_amp));
2570
2571 return 0;
2572 }
2573 @@ -1960,7 +1960,7 @@ static int patch_cxt5051(struct hda_codec *codec)
2574 }
2575
2576 if (spec->beep_amp)
2577 - snd_hda_attach_beep_device(codec, spec->beep_amp);
2578 + snd_hda_attach_beep_device(codec, get_amp_nid_(spec->beep_amp));
2579
2580 return 0;
2581 }
2582 @@ -3151,7 +3151,7 @@ static int patch_cxt5066(struct hda_codec *codec)
2583 }
2584
2585 if (spec->beep_amp)
2586 - snd_hda_attach_beep_device(codec, spec->beep_amp);
2587 + snd_hda_attach_beep_device(codec, get_amp_nid_(spec->beep_amp));
2588
2589 return 0;
2590 }
2591 @@ -4501,7 +4501,7 @@ static int patch_conexant_auto(struct hda_codec *codec)
2592 spec->capture_stream = &cx_auto_pcm_analog_capture;
2593 codec->patch_ops = cx_auto_patch_ops;
2594 if (spec->beep_amp)
2595 - snd_hda_attach_beep_device(codec, spec->beep_amp);
2596 + snd_hda_attach_beep_device(codec, get_amp_nid_(spec->beep_amp));
2597
2598 /* Some laptops with Conexant chips show stalls in S3 resume,
2599 * which falls into the single-cmd mode.
2600 diff --git a/sound/usb/mixer.c b/sound/usb/mixer.c
2601 index 15b0712..f008fc7 100644
2602 --- a/sound/usb/mixer.c
2603 +++ b/sound/usb/mixer.c
2604 @@ -711,8 +711,9 @@ static int check_input_term(struct mixer_build *state, int id, struct usb_audio_
2605 case UAC2_CLOCK_SELECTOR: {
2606 struct uac_selector_unit_descriptor *d = p1;
2607 /* call recursively to retrieve the channel info */
2608 - if (check_input_term(state, d->baSourceID[0], term) < 0)
2609 - return -ENODEV;
2610 + err = check_input_term(state, d->baSourceID[0], term);
2611 + if (err < 0)
2612 + return err;
2613 term->type = d->bDescriptorSubtype << 16; /* virtual type */
2614 term->id = id;
2615 term->name = uac_selector_unit_iSelector(d);
2616 @@ -1263,8 +1264,9 @@ static int parse_audio_feature_unit(struct mixer_build *state, int unitid, void
2617 return err;
2618
2619 /* determine the input source type and name */
2620 - if (check_input_term(state, hdr->bSourceID, &iterm) < 0)
2621 - return -EINVAL;
2622 + err = check_input_term(state, hdr->bSourceID, &iterm);
2623 + if (err < 0)
2624 + return err;
2625
2626 master_bits = snd_usb_combine_bytes(bmaControls, csize);
2627 /* master configuration quirks */
2628 @@ -2025,7 +2027,7 @@ static int snd_usb_mixer_controls(struct usb_mixer_interface *mixer)
2629 state.oterm.type = le16_to_cpu(desc->wTerminalType);
2630 state.oterm.name = desc->iTerminal;
2631 err = parse_audio_unit(&state, desc->bSourceID);
2632 - if (err < 0)
2633 + if (err < 0 && err != -EINVAL)
2634 return err;
2635 } else { /* UAC_VERSION_2 */
2636 struct uac2_output_terminal_descriptor *desc = p;
2637 @@ -2037,12 +2039,12 @@ static int snd_usb_mixer_controls(struct usb_mixer_interface *mixer)
2638 state.oterm.type = le16_to_cpu(desc->wTerminalType);
2639 state.oterm.name = desc->iTerminal;
2640 err = parse_audio_unit(&state, desc->bSourceID);
2641 - if (err < 0)
2642 + if (err < 0 && err != -EINVAL)
2643 return err;
2644
2645 /* for UAC2, use the same approach to also add the clock selectors */
2646 err = parse_audio_unit(&state, desc->bCSourceID);
2647 - if (err < 0)
2648 + if (err < 0 && err != -EINVAL)
2649 return err;
2650 }
2651 }
2652 diff --git a/tools/hv/hv_kvp_daemon.c b/tools/hv/hv_kvp_daemon.c
2653 index 2984ffb..60a8e29 100644
2654 --- a/tools/hv/hv_kvp_daemon.c
2655 +++ b/tools/hv/hv_kvp_daemon.c
2656 @@ -727,13 +727,19 @@ int main(void)
2657 len = recvfrom(fd, kvp_recv_buffer, sizeof(kvp_recv_buffer), 0,
2658 addr_p, &addr_l);
2659
2660 - if (len < 0 || addr.nl_pid) {
2661 + if (len < 0) {
2662 syslog(LOG_ERR, "recvfrom failed; pid:%u error:%d %s",
2663 addr.nl_pid, errno, strerror(errno));
2664 close(fd);
2665 return -1;
2666 }
2667
2668 + if (addr.nl_pid) {
2669 + syslog(LOG_WARNING, "Received packet from untrusted pid:%u",
2670 + addr.nl_pid);
2671 + continue;
2672 + }
2673 +
2674 incoming_msg = (struct nlmsghdr *)kvp_recv_buffer;
2675 incoming_cn_msg = (struct cn_msg *)NLMSG_DATA(incoming_msg);
2676 hv_msg = (struct hv_kvp_msg *)incoming_cn_msg->data;