Annotation of /trunk/kernel-lts/patches-3.4/0159-3.4.60-all-fixes.patch
Parent Directory | Revision Log
Revision 2276 -
(hide annotations)
(download)
Mon Sep 2 08:10:14 2013 UTC (11 years ago) by niro
File size: 20355 byte(s)
Mon Sep 2 08:10:14 2013 UTC (11 years ago) by niro
File size: 20355 byte(s)
-linux-3.4.60
1 | niro | 2276 | diff --git a/arch/x86/xen/setup.c b/arch/x86/xen/setup.c |
2 | index 017d48a..f8b0260 100644 | ||
3 | --- a/arch/x86/xen/setup.c | ||
4 | +++ b/arch/x86/xen/setup.c | ||
5 | @@ -213,6 +213,17 @@ static void xen_align_and_add_e820_region(u64 start, u64 size, int type) | ||
6 | e820_add_region(start, end - start, type); | ||
7 | } | ||
8 | |||
9 | +void xen_ignore_unusable(struct e820entry *list, size_t map_size) | ||
10 | +{ | ||
11 | + struct e820entry *entry; | ||
12 | + unsigned int i; | ||
13 | + | ||
14 | + for (i = 0, entry = list; i < map_size; i++, entry++) { | ||
15 | + if (entry->type == E820_UNUSABLE) | ||
16 | + entry->type = E820_RAM; | ||
17 | + } | ||
18 | +} | ||
19 | + | ||
20 | /** | ||
21 | * machine_specific_memory_setup - Hook for machine specific memory setup. | ||
22 | **/ | ||
23 | @@ -251,6 +262,17 @@ char * __init xen_memory_setup(void) | ||
24 | } | ||
25 | BUG_ON(rc); | ||
26 | |||
27 | + /* | ||
28 | + * Xen won't allow a 1:1 mapping to be created to UNUSABLE | ||
29 | + * regions, so if we're using the machine memory map leave the | ||
30 | + * region as RAM as it is in the pseudo-physical map. | ||
31 | + * | ||
32 | + * UNUSABLE regions in domUs are not handled and will need | ||
33 | + * a patch in the future. | ||
34 | + */ | ||
35 | + if (xen_initial_domain()) | ||
36 | + xen_ignore_unusable(map, memmap.nr_entries); | ||
37 | + | ||
38 | /* Make sure the Xen-supplied memory map is well-ordered. */ | ||
39 | sanitize_e820_map(map, memmap.nr_entries, &memmap.nr_entries); | ||
40 | |||
41 | diff --git a/drivers/ata/libata-pmp.c b/drivers/ata/libata-pmp.c | ||
42 | index f63a588..f5c35be 100644 | ||
43 | --- a/drivers/ata/libata-pmp.c | ||
44 | +++ b/drivers/ata/libata-pmp.c | ||
45 | @@ -289,24 +289,24 @@ static int sata_pmp_configure(struct ata_device *dev, int print_info) | ||
46 | |||
47 | /* Disable sending Early R_OK. | ||
48 | * With "cached read" HDD testing and multiple ports busy on a SATA | ||
49 | - * host controller, 3726 PMP will very rarely drop a deferred | ||
50 | + * host controller, 3x26 PMP will very rarely drop a deferred | ||
51 | * R_OK that was intended for the host. Symptom will be all | ||
52 | * 5 drives under test will timeout, get reset, and recover. | ||
53 | */ | ||
54 | - if (vendor == 0x1095 && devid == 0x3726) { | ||
55 | + if (vendor == 0x1095 && (devid == 0x3726 || devid == 0x3826)) { | ||
56 | u32 reg; | ||
57 | |||
58 | err_mask = sata_pmp_read(&ap->link, PMP_GSCR_SII_POL, ®); | ||
59 | if (err_mask) { | ||
60 | rc = -EIO; | ||
61 | - reason = "failed to read Sil3726 Private Register"; | ||
62 | + reason = "failed to read Sil3x26 Private Register"; | ||
63 | goto fail; | ||
64 | } | ||
65 | reg &= ~0x1; | ||
66 | err_mask = sata_pmp_write(&ap->link, PMP_GSCR_SII_POL, reg); | ||
67 | if (err_mask) { | ||
68 | rc = -EIO; | ||
69 | - reason = "failed to write Sil3726 Private Register"; | ||
70 | + reason = "failed to write Sil3x26 Private Register"; | ||
71 | goto fail; | ||
72 | } | ||
73 | } | ||
74 | @@ -383,8 +383,8 @@ static void sata_pmp_quirks(struct ata_port *ap) | ||
75 | u16 devid = sata_pmp_gscr_devid(gscr); | ||
76 | struct ata_link *link; | ||
77 | |||
78 | - if (vendor == 0x1095 && devid == 0x3726) { | ||
79 | - /* sil3726 quirks */ | ||
80 | + if (vendor == 0x1095 && (devid == 0x3726 || devid == 0x3826)) { | ||
81 | + /* sil3x26 quirks */ | ||
82 | ata_for_each_link(link, ap, EDGE) { | ||
83 | /* link reports offline after LPM */ | ||
84 | link->flags |= ATA_LFLAG_NO_LPM; | ||
85 | diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h | ||
86 | index dde62bf..d031932 100644 | ||
87 | --- a/drivers/gpu/drm/i915/i915_reg.h | ||
88 | +++ b/drivers/gpu/drm/i915/i915_reg.h | ||
89 | @@ -502,6 +502,8 @@ | ||
90 | will not assert AGPBUSY# and will only | ||
91 | be delivered when out of C3. */ | ||
92 | #define INSTPM_FORCE_ORDERING (1<<7) /* GEN6+ */ | ||
93 | +#define INSTPM_TLB_INVALIDATE (1<<9) | ||
94 | +#define INSTPM_SYNC_FLUSH (1<<5) | ||
95 | #define ACTHD 0x020c8 | ||
96 | #define FW_BLC 0x020d8 | ||
97 | #define FW_BLC2 0x020dc | ||
98 | diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c | ||
99 | index c17325c..99a9df8 100644 | ||
100 | --- a/drivers/gpu/drm/i915/intel_ringbuffer.c | ||
101 | +++ b/drivers/gpu/drm/i915/intel_ringbuffer.c | ||
102 | @@ -767,6 +767,18 @@ void intel_ring_setup_status_page(struct intel_ring_buffer *ring) | ||
103 | |||
104 | I915_WRITE(mmio, (u32)ring->status_page.gfx_addr); | ||
105 | POSTING_READ(mmio); | ||
106 | + | ||
107 | + /* Flush the TLB for this page */ | ||
108 | + if (INTEL_INFO(dev)->gen >= 6) { | ||
109 | + u32 reg = RING_INSTPM(ring->mmio_base); | ||
110 | + I915_WRITE(reg, | ||
111 | + _MASKED_BIT_ENABLE(INSTPM_TLB_INVALIDATE | | ||
112 | + INSTPM_SYNC_FLUSH)); | ||
113 | + if (wait_for((I915_READ(reg) & INSTPM_SYNC_FLUSH) == 0, | ||
114 | + 1000)) | ||
115 | + DRM_ERROR("%s: wait for SyncFlush to complete for TLB invalidation timed out\n", | ||
116 | + ring->name); | ||
117 | + } | ||
118 | } | ||
119 | |||
120 | static int | ||
121 | diff --git a/drivers/net/wireless/hostap/hostap_ioctl.c b/drivers/net/wireless/hostap/hostap_ioctl.c | ||
122 | index 18054d9..dbec2ff 100644 | ||
123 | --- a/drivers/net/wireless/hostap/hostap_ioctl.c | ||
124 | +++ b/drivers/net/wireless/hostap/hostap_ioctl.c | ||
125 | @@ -522,9 +522,9 @@ static int prism2_ioctl_giwaplist(struct net_device *dev, | ||
126 | |||
127 | data->length = prism2_ap_get_sta_qual(local, addr, qual, IW_MAX_AP, 1); | ||
128 | |||
129 | - memcpy(extra, &addr, sizeof(struct sockaddr) * data->length); | ||
130 | + memcpy(extra, addr, sizeof(struct sockaddr) * data->length); | ||
131 | data->flags = 1; /* has quality information */ | ||
132 | - memcpy(extra + sizeof(struct sockaddr) * data->length, &qual, | ||
133 | + memcpy(extra + sizeof(struct sockaddr) * data->length, qual, | ||
134 | sizeof(struct iw_quality) * data->length); | ||
135 | |||
136 | kfree(addr); | ||
137 | diff --git a/drivers/net/wireless/zd1201.c b/drivers/net/wireless/zd1201.c | ||
138 | index a66b93b..1662fcc 100644 | ||
139 | --- a/drivers/net/wireless/zd1201.c | ||
140 | +++ b/drivers/net/wireless/zd1201.c | ||
141 | @@ -98,10 +98,12 @@ static int zd1201_fw_upload(struct usb_device *dev, int apfw) | ||
142 | goto exit; | ||
143 | |||
144 | err = usb_control_msg(dev, usb_rcvctrlpipe(dev, 0), 0x4, | ||
145 | - USB_DIR_IN | 0x40, 0,0, &ret, sizeof(ret), ZD1201_FW_TIMEOUT); | ||
146 | + USB_DIR_IN | 0x40, 0, 0, buf, sizeof(ret), ZD1201_FW_TIMEOUT); | ||
147 | if (err < 0) | ||
148 | goto exit; | ||
149 | |||
150 | + memcpy(&ret, buf, sizeof(ret)); | ||
151 | + | ||
152 | if (ret & 0x80) { | ||
153 | err = -EIO; | ||
154 | goto exit; | ||
155 | diff --git a/drivers/of/fdt.c b/drivers/of/fdt.c | ||
156 | index 91a375f..17fad3b 100644 | ||
157 | --- a/drivers/of/fdt.c | ||
158 | +++ b/drivers/of/fdt.c | ||
159 | @@ -390,6 +390,8 @@ static void __unflatten_device_tree(struct boot_param_header *blob, | ||
160 | mem = (unsigned long) | ||
161 | dt_alloc(size + 4, __alignof__(struct device_node)); | ||
162 | |||
163 | + memset((void *)mem, 0, size); | ||
164 | + | ||
165 | ((__be32 *)mem)[size / 4] = cpu_to_be32(0xdeadbeef); | ||
166 | |||
167 | pr_debug(" unflattening %lx...\n", mem); | ||
168 | diff --git a/drivers/s390/scsi/zfcp_erp.c b/drivers/s390/scsi/zfcp_erp.c | ||
169 | index e1b4f80..5c87270 100644 | ||
170 | --- a/drivers/s390/scsi/zfcp_erp.c | ||
171 | +++ b/drivers/s390/scsi/zfcp_erp.c | ||
172 | @@ -102,10 +102,13 @@ static void zfcp_erp_action_dismiss_port(struct zfcp_port *port) | ||
173 | |||
174 | if (atomic_read(&port->status) & ZFCP_STATUS_COMMON_ERP_INUSE) | ||
175 | zfcp_erp_action_dismiss(&port->erp_action); | ||
176 | - else | ||
177 | - shost_for_each_device(sdev, port->adapter->scsi_host) | ||
178 | + else { | ||
179 | + spin_lock(port->adapter->scsi_host->host_lock); | ||
180 | + __shost_for_each_device(sdev, port->adapter->scsi_host) | ||
181 | if (sdev_to_zfcp(sdev)->port == port) | ||
182 | zfcp_erp_action_dismiss_lun(sdev); | ||
183 | + spin_unlock(port->adapter->scsi_host->host_lock); | ||
184 | + } | ||
185 | } | ||
186 | |||
187 | static void zfcp_erp_action_dismiss_adapter(struct zfcp_adapter *adapter) | ||
188 | @@ -592,9 +595,11 @@ static void _zfcp_erp_lun_reopen_all(struct zfcp_port *port, int clear, | ||
189 | { | ||
190 | struct scsi_device *sdev; | ||
191 | |||
192 | - shost_for_each_device(sdev, port->adapter->scsi_host) | ||
193 | + spin_lock(port->adapter->scsi_host->host_lock); | ||
194 | + __shost_for_each_device(sdev, port->adapter->scsi_host) | ||
195 | if (sdev_to_zfcp(sdev)->port == port) | ||
196 | _zfcp_erp_lun_reopen(sdev, clear, id, 0); | ||
197 | + spin_unlock(port->adapter->scsi_host->host_lock); | ||
198 | } | ||
199 | |||
200 | static void zfcp_erp_strategy_followup_failed(struct zfcp_erp_action *act) | ||
201 | @@ -1435,8 +1440,10 @@ void zfcp_erp_set_adapter_status(struct zfcp_adapter *adapter, u32 mask) | ||
202 | atomic_set_mask(common_mask, &port->status); | ||
203 | read_unlock_irqrestore(&adapter->port_list_lock, flags); | ||
204 | |||
205 | - shost_for_each_device(sdev, adapter->scsi_host) | ||
206 | + spin_lock_irqsave(adapter->scsi_host->host_lock, flags); | ||
207 | + __shost_for_each_device(sdev, adapter->scsi_host) | ||
208 | atomic_set_mask(common_mask, &sdev_to_zfcp(sdev)->status); | ||
209 | + spin_unlock_irqrestore(adapter->scsi_host->host_lock, flags); | ||
210 | } | ||
211 | |||
212 | /** | ||
213 | @@ -1470,11 +1477,13 @@ void zfcp_erp_clear_adapter_status(struct zfcp_adapter *adapter, u32 mask) | ||
214 | } | ||
215 | read_unlock_irqrestore(&adapter->port_list_lock, flags); | ||
216 | |||
217 | - shost_for_each_device(sdev, adapter->scsi_host) { | ||
218 | + spin_lock_irqsave(adapter->scsi_host->host_lock, flags); | ||
219 | + __shost_for_each_device(sdev, adapter->scsi_host) { | ||
220 | atomic_clear_mask(common_mask, &sdev_to_zfcp(sdev)->status); | ||
221 | if (clear_counter) | ||
222 | atomic_set(&sdev_to_zfcp(sdev)->erp_counter, 0); | ||
223 | } | ||
224 | + spin_unlock_irqrestore(adapter->scsi_host->host_lock, flags); | ||
225 | } | ||
226 | |||
227 | /** | ||
228 | @@ -1488,16 +1497,19 @@ void zfcp_erp_set_port_status(struct zfcp_port *port, u32 mask) | ||
229 | { | ||
230 | struct scsi_device *sdev; | ||
231 | u32 common_mask = mask & ZFCP_COMMON_FLAGS; | ||
232 | + unsigned long flags; | ||
233 | |||
234 | atomic_set_mask(mask, &port->status); | ||
235 | |||
236 | if (!common_mask) | ||
237 | return; | ||
238 | |||
239 | - shost_for_each_device(sdev, port->adapter->scsi_host) | ||
240 | + spin_lock_irqsave(port->adapter->scsi_host->host_lock, flags); | ||
241 | + __shost_for_each_device(sdev, port->adapter->scsi_host) | ||
242 | if (sdev_to_zfcp(sdev)->port == port) | ||
243 | atomic_set_mask(common_mask, | ||
244 | &sdev_to_zfcp(sdev)->status); | ||
245 | + spin_unlock_irqrestore(port->adapter->scsi_host->host_lock, flags); | ||
246 | } | ||
247 | |||
248 | /** | ||
249 | @@ -1512,6 +1524,7 @@ void zfcp_erp_clear_port_status(struct zfcp_port *port, u32 mask) | ||
250 | struct scsi_device *sdev; | ||
251 | u32 common_mask = mask & ZFCP_COMMON_FLAGS; | ||
252 | u32 clear_counter = mask & ZFCP_STATUS_COMMON_ERP_FAILED; | ||
253 | + unsigned long flags; | ||
254 | |||
255 | atomic_clear_mask(mask, &port->status); | ||
256 | |||
257 | @@ -1521,13 +1534,15 @@ void zfcp_erp_clear_port_status(struct zfcp_port *port, u32 mask) | ||
258 | if (clear_counter) | ||
259 | atomic_set(&port->erp_counter, 0); | ||
260 | |||
261 | - shost_for_each_device(sdev, port->adapter->scsi_host) | ||
262 | + spin_lock_irqsave(port->adapter->scsi_host->host_lock, flags); | ||
263 | + __shost_for_each_device(sdev, port->adapter->scsi_host) | ||
264 | if (sdev_to_zfcp(sdev)->port == port) { | ||
265 | atomic_clear_mask(common_mask, | ||
266 | &sdev_to_zfcp(sdev)->status); | ||
267 | if (clear_counter) | ||
268 | atomic_set(&sdev_to_zfcp(sdev)->erp_counter, 0); | ||
269 | } | ||
270 | + spin_unlock_irqrestore(port->adapter->scsi_host->host_lock, flags); | ||
271 | } | ||
272 | |||
273 | /** | ||
274 | diff --git a/drivers/s390/scsi/zfcp_qdio.c b/drivers/s390/scsi/zfcp_qdio.c | ||
275 | index e76d003..52c6b59 100644 | ||
276 | --- a/drivers/s390/scsi/zfcp_qdio.c | ||
277 | +++ b/drivers/s390/scsi/zfcp_qdio.c | ||
278 | @@ -224,11 +224,9 @@ int zfcp_qdio_sbals_from_sg(struct zfcp_qdio *qdio, struct zfcp_qdio_req *q_req, | ||
279 | |||
280 | static int zfcp_qdio_sbal_check(struct zfcp_qdio *qdio) | ||
281 | { | ||
282 | - spin_lock_irq(&qdio->req_q_lock); | ||
283 | if (atomic_read(&qdio->req_q_free) || | ||
284 | !(atomic_read(&qdio->adapter->status) & ZFCP_STATUS_ADAPTER_QDIOUP)) | ||
285 | return 1; | ||
286 | - spin_unlock_irq(&qdio->req_q_lock); | ||
287 | return 0; | ||
288 | } | ||
289 | |||
290 | @@ -246,9 +244,8 @@ int zfcp_qdio_sbal_get(struct zfcp_qdio *qdio) | ||
291 | { | ||
292 | long ret; | ||
293 | |||
294 | - spin_unlock_irq(&qdio->req_q_lock); | ||
295 | - ret = wait_event_interruptible_timeout(qdio->req_q_wq, | ||
296 | - zfcp_qdio_sbal_check(qdio), 5 * HZ); | ||
297 | + ret = wait_event_interruptible_lock_irq_timeout(qdio->req_q_wq, | ||
298 | + zfcp_qdio_sbal_check(qdio), qdio->req_q_lock, 5 * HZ); | ||
299 | |||
300 | if (!(atomic_read(&qdio->adapter->status) & ZFCP_STATUS_ADAPTER_QDIOUP)) | ||
301 | return -EIO; | ||
302 | @@ -262,7 +259,6 @@ int zfcp_qdio_sbal_get(struct zfcp_qdio *qdio) | ||
303 | zfcp_erp_adapter_reopen(qdio->adapter, 0, "qdsbg_1"); | ||
304 | } | ||
305 | |||
306 | - spin_lock_irq(&qdio->req_q_lock); | ||
307 | return -EIO; | ||
308 | } | ||
309 | |||
310 | diff --git a/drivers/xen/events.c b/drivers/xen/events.c | ||
311 | index 417c133..33dcad6 100644 | ||
312 | --- a/drivers/xen/events.c | ||
313 | +++ b/drivers/xen/events.c | ||
314 | @@ -324,7 +324,7 @@ static void init_evtchn_cpu_bindings(void) | ||
315 | |||
316 | for_each_possible_cpu(i) | ||
317 | memset(per_cpu(cpu_evtchn_mask, i), | ||
318 | - (i == 0) ? ~0 : 0, sizeof(*per_cpu(cpu_evtchn_mask, i))); | ||
319 | + (i == 0) ? ~0 : 0, NR_EVENT_CHANNELS/8); | ||
320 | } | ||
321 | |||
322 | static inline void clear_evtchn(int port) | ||
323 | diff --git a/fs/nilfs2/segbuf.c b/fs/nilfs2/segbuf.c | ||
324 | index dc9a913..2d8be51 100644 | ||
325 | --- a/fs/nilfs2/segbuf.c | ||
326 | +++ b/fs/nilfs2/segbuf.c | ||
327 | @@ -345,8 +345,7 @@ static void nilfs_end_bio_write(struct bio *bio, int err) | ||
328 | |||
329 | if (err == -EOPNOTSUPP) { | ||
330 | set_bit(BIO_EOPNOTSUPP, &bio->bi_flags); | ||
331 | - bio_put(bio); | ||
332 | - /* to be detected by submit_seg_bio() */ | ||
333 | + /* to be detected by nilfs_segbuf_submit_bio() */ | ||
334 | } | ||
335 | |||
336 | if (!uptodate) | ||
337 | @@ -377,12 +376,12 @@ static int nilfs_segbuf_submit_bio(struct nilfs_segment_buffer *segbuf, | ||
338 | bio->bi_private = segbuf; | ||
339 | bio_get(bio); | ||
340 | submit_bio(mode, bio); | ||
341 | + segbuf->sb_nbio++; | ||
342 | if (bio_flagged(bio, BIO_EOPNOTSUPP)) { | ||
343 | bio_put(bio); | ||
344 | err = -EOPNOTSUPP; | ||
345 | goto failed; | ||
346 | } | ||
347 | - segbuf->sb_nbio++; | ||
348 | bio_put(bio); | ||
349 | |||
350 | wi->bio = NULL; | ||
351 | diff --git a/include/linux/wait.h b/include/linux/wait.h | ||
352 | index 6c6c20e..b305b31 100644 | ||
353 | --- a/include/linux/wait.h | ||
354 | +++ b/include/linux/wait.h | ||
355 | @@ -530,6 +530,63 @@ do { \ | ||
356 | ? 0 : __wait_event_interruptible_locked(wq, condition, 1, 1)) | ||
357 | |||
358 | |||
359 | +#define __wait_event_interruptible_lock_irq_timeout(wq, condition, \ | ||
360 | + lock, ret) \ | ||
361 | +do { \ | ||
362 | + DEFINE_WAIT(__wait); \ | ||
363 | + \ | ||
364 | + for (;;) { \ | ||
365 | + prepare_to_wait(&wq, &__wait, TASK_INTERRUPTIBLE); \ | ||
366 | + if (condition) \ | ||
367 | + break; \ | ||
368 | + if (signal_pending(current)) { \ | ||
369 | + ret = -ERESTARTSYS; \ | ||
370 | + break; \ | ||
371 | + } \ | ||
372 | + spin_unlock_irq(&lock); \ | ||
373 | + ret = schedule_timeout(ret); \ | ||
374 | + spin_lock_irq(&lock); \ | ||
375 | + if (!ret) \ | ||
376 | + break; \ | ||
377 | + } \ | ||
378 | + finish_wait(&wq, &__wait); \ | ||
379 | +} while (0) | ||
380 | + | ||
381 | +/** | ||
382 | + * wait_event_interruptible_lock_irq_timeout - sleep until a condition gets true or a timeout elapses. | ||
383 | + * The condition is checked under the lock. This is expected | ||
384 | + * to be called with the lock taken. | ||
385 | + * @wq: the waitqueue to wait on | ||
386 | + * @condition: a C expression for the event to wait for | ||
387 | + * @lock: a locked spinlock_t, which will be released before schedule() | ||
388 | + * and reacquired afterwards. | ||
389 | + * @timeout: timeout, in jiffies | ||
390 | + * | ||
391 | + * The process is put to sleep (TASK_INTERRUPTIBLE) until the | ||
392 | + * @condition evaluates to true or signal is received. The @condition is | ||
393 | + * checked each time the waitqueue @wq is woken up. | ||
394 | + * | ||
395 | + * wake_up() has to be called after changing any variable that could | ||
396 | + * change the result of the wait condition. | ||
397 | + * | ||
398 | + * This is supposed to be called while holding the lock. The lock is | ||
399 | + * dropped before going to sleep and is reacquired afterwards. | ||
400 | + * | ||
401 | + * The function returns 0 if the @timeout elapsed, -ERESTARTSYS if it | ||
402 | + * was interrupted by a signal, and the remaining jiffies otherwise | ||
403 | + * if the condition evaluated to true before the timeout elapsed. | ||
404 | + */ | ||
405 | +#define wait_event_interruptible_lock_irq_timeout(wq, condition, lock, \ | ||
406 | + timeout) \ | ||
407 | +({ \ | ||
408 | + int __ret = timeout; \ | ||
409 | + \ | ||
410 | + if (!(condition)) \ | ||
411 | + __wait_event_interruptible_lock_irq_timeout( \ | ||
412 | + wq, condition, lock, __ret); \ | ||
413 | + __ret; \ | ||
414 | +}) | ||
415 | + | ||
416 | |||
417 | #define __wait_event_killable(wq, condition, ret) \ | ||
418 | do { \ | ||
419 | diff --git a/kernel/workqueue.c b/kernel/workqueue.c | ||
420 | index a64b94e..575d092 100644 | ||
421 | --- a/kernel/workqueue.c | ||
422 | +++ b/kernel/workqueue.c | ||
423 | @@ -128,6 +128,7 @@ struct worker { | ||
424 | }; | ||
425 | |||
426 | struct work_struct *current_work; /* L: work being processed */ | ||
427 | + work_func_t current_func; /* L: current_work's fn */ | ||
428 | struct cpu_workqueue_struct *current_cwq; /* L: current_work's cwq */ | ||
429 | struct list_head scheduled; /* L: scheduled works */ | ||
430 | struct task_struct *task; /* I: worker task */ | ||
431 | @@ -838,7 +839,8 @@ static struct worker *__find_worker_executing_work(struct global_cwq *gcwq, | ||
432 | struct hlist_node *tmp; | ||
433 | |||
434 | hlist_for_each_entry(worker, tmp, bwh, hentry) | ||
435 | - if (worker->current_work == work) | ||
436 | + if (worker->current_work == work && | ||
437 | + worker->current_func == work->func) | ||
438 | return worker; | ||
439 | return NULL; | ||
440 | } | ||
441 | @@ -848,9 +850,27 @@ static struct worker *__find_worker_executing_work(struct global_cwq *gcwq, | ||
442 | * @gcwq: gcwq of interest | ||
443 | * @work: work to find worker for | ||
444 | * | ||
445 | - * Find a worker which is executing @work on @gcwq. This function is | ||
446 | - * identical to __find_worker_executing_work() except that this | ||
447 | - * function calculates @bwh itself. | ||
448 | + * Find a worker which is executing @work on @gcwq by searching | ||
449 | + * @gcwq->busy_hash which is keyed by the address of @work. For a worker | ||
450 | + * to match, its current execution should match the address of @work and | ||
451 | + * its work function. This is to avoid unwanted dependency between | ||
452 | + * unrelated work executions through a work item being recycled while still | ||
453 | + * being executed. | ||
454 | + * | ||
455 | + * This is a bit tricky. A work item may be freed once its execution | ||
456 | + * starts and nothing prevents the freed area from being recycled for | ||
457 | + * another work item. If the same work item address ends up being reused | ||
458 | + * before the original execution finishes, workqueue will identify the | ||
459 | + * recycled work item as currently executing and make it wait until the | ||
460 | + * current execution finishes, introducing an unwanted dependency. | ||
461 | + * | ||
462 | + * This function checks the work item address, work function and workqueue | ||
463 | + * to avoid false positives. Note that this isn't complete as one may | ||
464 | + * construct a work function which can introduce dependency onto itself | ||
465 | + * through a recycled work item. Well, if somebody wants to shoot oneself | ||
466 | + * in the foot that badly, there's only so much we can do, and if such | ||
467 | + * deadlock actually occurs, it should be easy to locate the culprit work | ||
468 | + * function. | ||
469 | * | ||
470 | * CONTEXT: | ||
471 | * spin_lock_irq(gcwq->lock). | ||
472 | @@ -1721,10 +1741,9 @@ static void move_linked_works(struct work_struct *work, struct list_head *head, | ||
473 | *nextp = n; | ||
474 | } | ||
475 | |||
476 | -static void cwq_activate_first_delayed(struct cpu_workqueue_struct *cwq) | ||
477 | +static void cwq_activate_delayed_work(struct work_struct *work) | ||
478 | { | ||
479 | - struct work_struct *work = list_first_entry(&cwq->delayed_works, | ||
480 | - struct work_struct, entry); | ||
481 | + struct cpu_workqueue_struct *cwq = get_work_cwq(work); | ||
482 | struct list_head *pos = gcwq_determine_ins_pos(cwq->gcwq, cwq); | ||
483 | |||
484 | trace_workqueue_activate_work(work); | ||
485 | @@ -1733,6 +1752,14 @@ static void cwq_activate_first_delayed(struct cpu_workqueue_struct *cwq) | ||
486 | cwq->nr_active++; | ||
487 | } | ||
488 | |||
489 | +static void cwq_activate_first_delayed(struct cpu_workqueue_struct *cwq) | ||
490 | +{ | ||
491 | + struct work_struct *work = list_first_entry(&cwq->delayed_works, | ||
492 | + struct work_struct, entry); | ||
493 | + | ||
494 | + cwq_activate_delayed_work(work); | ||
495 | +} | ||
496 | + | ||
497 | /** | ||
498 | * cwq_dec_nr_in_flight - decrement cwq's nr_in_flight | ||
499 | * @cwq: cwq of interest | ||
500 | @@ -1804,7 +1831,6 @@ __acquires(&gcwq->lock) | ||
501 | struct global_cwq *gcwq = cwq->gcwq; | ||
502 | struct hlist_head *bwh = busy_worker_head(gcwq, work); | ||
503 | bool cpu_intensive = cwq->wq->flags & WQ_CPU_INTENSIVE; | ||
504 | - work_func_t f = work->func; | ||
505 | int work_color; | ||
506 | struct worker *collision; | ||
507 | #ifdef CONFIG_LOCKDEP | ||
508 | @@ -1833,6 +1859,7 @@ __acquires(&gcwq->lock) | ||
509 | debug_work_deactivate(work); | ||
510 | hlist_add_head(&worker->hentry, bwh); | ||
511 | worker->current_work = work; | ||
512 | + worker->current_func = work->func; | ||
513 | worker->current_cwq = cwq; | ||
514 | work_color = get_work_color(work); | ||
515 | |||
516 | @@ -1870,7 +1897,7 @@ __acquires(&gcwq->lock) | ||
517 | lock_map_acquire_read(&cwq->wq->lockdep_map); | ||
518 | lock_map_acquire(&lockdep_map); | ||
519 | trace_workqueue_execute_start(work); | ||
520 | - f(work); | ||
521 | + worker->current_func(work); | ||
522 | /* | ||
523 | * While we must be careful to not use "work" after this, the trace | ||
524 | * point will only record its address. | ||
525 | @@ -1880,11 +1907,10 @@ __acquires(&gcwq->lock) | ||
526 | lock_map_release(&cwq->wq->lockdep_map); | ||
527 | |||
528 | if (unlikely(in_atomic() || lockdep_depth(current) > 0)) { | ||
529 | - printk(KERN_ERR "BUG: workqueue leaked lock or atomic: " | ||
530 | - "%s/0x%08x/%d\n", | ||
531 | - current->comm, preempt_count(), task_pid_nr(current)); | ||
532 | - printk(KERN_ERR " last function: "); | ||
533 | - print_symbol("%s\n", (unsigned long)f); | ||
534 | + pr_err("BUG: workqueue leaked lock or atomic: %s/0x%08x/%d\n" | ||
535 | + " last function: %pf\n", | ||
536 | + current->comm, preempt_count(), task_pid_nr(current), | ||
537 | + worker->current_func); | ||
538 | debug_show_held_locks(current); | ||
539 | dump_stack(); | ||
540 | } | ||
541 | @@ -1898,6 +1924,7 @@ __acquires(&gcwq->lock) | ||
542 | /* we're done with it, release */ | ||
543 | hlist_del_init(&worker->hentry); | ||
544 | worker->current_work = NULL; | ||
545 | + worker->current_func = NULL; | ||
546 | worker->current_cwq = NULL; | ||
547 | cwq_dec_nr_in_flight(cwq, work_color, false); | ||
548 | } | ||
549 | @@ -2625,6 +2652,18 @@ static int try_to_grab_pending(struct work_struct *work) | ||
550 | smp_rmb(); | ||
551 | if (gcwq == get_work_gcwq(work)) { | ||
552 | debug_work_deactivate(work); | ||
553 | + | ||
554 | + /* | ||
555 | + * A delayed work item cannot be grabbed directly | ||
556 | + * because it might have linked NO_COLOR work items | ||
557 | + * which, if left on the delayed_list, will confuse | ||
558 | + * cwq->nr_active management later on and cause | ||
559 | + * stall. Make sure the work item is activated | ||
560 | + * before grabbing. | ||
561 | + */ | ||
562 | + if (*work_data_bits(work) & WORK_STRUCT_DELAYED) | ||
563 | + cwq_activate_delayed_work(work); | ||
564 | + | ||
565 | list_del_init(&work->entry); | ||
566 | cwq_dec_nr_in_flight(get_work_cwq(work), | ||
567 | get_work_color(work), |