Contents of /trunk/kernel-alx-legacy/patches-4.9/0146-4.9.47-all-fixes.patch
Parent Directory | Revision Log
Revision 3608 -
(show annotations)
(download)
Fri Aug 14 07:34:29 2020 UTC (4 years, 1 month ago) by niro
File size: 16062 byte(s)
Fri Aug 14 07:34:29 2020 UTC (4 years, 1 month ago) by niro
File size: 16062 byte(s)
-added kerenl-alx-legacy pkg
1 | diff --git a/Makefile b/Makefile |
2 | index 846ef1b57a02..a0abbfc15a49 100644 |
3 | --- a/Makefile |
4 | +++ b/Makefile |
5 | @@ -1,6 +1,6 @@ |
6 | VERSION = 4 |
7 | PATCHLEVEL = 9 |
8 | -SUBLEVEL = 46 |
9 | +SUBLEVEL = 47 |
10 | EXTRAVERSION = |
11 | NAME = Roaring Lionus |
12 | |
13 | diff --git a/arch/arm/kvm/mmu.c b/arch/arm/kvm/mmu.c |
14 | index 710511cadd50..0c060c5e844a 100644 |
15 | --- a/arch/arm/kvm/mmu.c |
16 | +++ b/arch/arm/kvm/mmu.c |
17 | @@ -829,22 +829,22 @@ void stage2_unmap_vm(struct kvm *kvm) |
18 | * Walks the level-1 page table pointed to by kvm->arch.pgd and frees all |
19 | * underlying level-2 and level-3 tables before freeing the actual level-1 table |
20 | * and setting the struct pointer to NULL. |
21 | - * |
22 | - * Note we don't need locking here as this is only called when the VM is |
23 | - * destroyed, which can only be done once. |
24 | */ |
25 | void kvm_free_stage2_pgd(struct kvm *kvm) |
26 | { |
27 | - if (kvm->arch.pgd == NULL) |
28 | - return; |
29 | + void *pgd = NULL; |
30 | |
31 | spin_lock(&kvm->mmu_lock); |
32 | - unmap_stage2_range(kvm, 0, KVM_PHYS_SIZE); |
33 | + if (kvm->arch.pgd) { |
34 | + unmap_stage2_range(kvm, 0, KVM_PHYS_SIZE); |
35 | + pgd = kvm->arch.pgd; |
36 | + kvm->arch.pgd = NULL; |
37 | + } |
38 | spin_unlock(&kvm->mmu_lock); |
39 | |
40 | /* Free the HW pgd, one page at a time */ |
41 | - free_pages_exact(kvm->arch.pgd, S2_PGD_SIZE); |
42 | - kvm->arch.pgd = NULL; |
43 | + if (pgd) |
44 | + free_pages_exact(pgd, S2_PGD_SIZE); |
45 | } |
46 | |
47 | static pud_t *stage2_get_pud(struct kvm *kvm, struct kvm_mmu_memory_cache *cache, |
48 | diff --git a/arch/arm64/kernel/fpsimd.c b/arch/arm64/kernel/fpsimd.c |
49 | index 394c61db5566..1d5890f19ca3 100644 |
50 | --- a/arch/arm64/kernel/fpsimd.c |
51 | +++ b/arch/arm64/kernel/fpsimd.c |
52 | @@ -157,9 +157,11 @@ void fpsimd_thread_switch(struct task_struct *next) |
53 | |
54 | void fpsimd_flush_thread(void) |
55 | { |
56 | + preempt_disable(); |
57 | memset(¤t->thread.fpsimd_state, 0, sizeof(struct fpsimd_state)); |
58 | fpsimd_flush_task_state(current); |
59 | set_thread_flag(TIF_FOREIGN_FPSTATE); |
60 | + preempt_enable(); |
61 | } |
62 | |
63 | /* |
64 | diff --git a/arch/arm64/mm/fault.c b/arch/arm64/mm/fault.c |
65 | index 0e90c7e0279c..fec5b1ce97f8 100644 |
66 | --- a/arch/arm64/mm/fault.c |
67 | +++ b/arch/arm64/mm/fault.c |
68 | @@ -373,8 +373,11 @@ static int __kprobes do_page_fault(unsigned long addr, unsigned int esr, |
69 | * signal first. We do not need to release the mmap_sem because it |
70 | * would already be released in __lock_page_or_retry in mm/filemap.c. |
71 | */ |
72 | - if ((fault & VM_FAULT_RETRY) && fatal_signal_pending(current)) |
73 | + if ((fault & VM_FAULT_RETRY) && fatal_signal_pending(current)) { |
74 | + if (!user_mode(regs)) |
75 | + goto no_context; |
76 | return 0; |
77 | + } |
78 | |
79 | /* |
80 | * Major/minor page fault accounting is only done on the initial |
81 | diff --git a/arch/x86/include/asm/io.h b/arch/x86/include/asm/io.h |
82 | index d34bd370074b..6c5020163db0 100644 |
83 | --- a/arch/x86/include/asm/io.h |
84 | +++ b/arch/x86/include/asm/io.h |
85 | @@ -304,13 +304,13 @@ static inline unsigned type in##bwl##_p(int port) \ |
86 | static inline void outs##bwl(int port, const void *addr, unsigned long count) \ |
87 | { \ |
88 | asm volatile("rep; outs" #bwl \ |
89 | - : "+S"(addr), "+c"(count) : "d"(port)); \ |
90 | + : "+S"(addr), "+c"(count) : "d"(port) : "memory"); \ |
91 | } \ |
92 | \ |
93 | static inline void ins##bwl(int port, void *addr, unsigned long count) \ |
94 | { \ |
95 | asm volatile("rep; ins" #bwl \ |
96 | - : "+D"(addr), "+c"(count) : "d"(port)); \ |
97 | + : "+D"(addr), "+c"(count) : "d"(port) : "memory"); \ |
98 | } |
99 | |
100 | BUILDIO(b, b, char) |
101 | diff --git a/drivers/net/wireless/intersil/p54/fwio.c b/drivers/net/wireless/intersil/p54/fwio.c |
102 | index 257a9eadd595..4ac6764f4897 100644 |
103 | --- a/drivers/net/wireless/intersil/p54/fwio.c |
104 | +++ b/drivers/net/wireless/intersil/p54/fwio.c |
105 | @@ -488,7 +488,7 @@ int p54_scan(struct p54_common *priv, u16 mode, u16 dwell) |
106 | |
107 | entry += sizeof(__le16); |
108 | chan->pa_points_per_curve = 8; |
109 | - memset(chan->curve_data, 0, sizeof(*chan->curve_data)); |
110 | + memset(chan->curve_data, 0, sizeof(chan->curve_data)); |
111 | memcpy(chan->curve_data, entry, |
112 | sizeof(struct p54_pa_curve_data_sample) * |
113 | min((u8)8, curve_data->points_per_channel)); |
114 | diff --git a/drivers/scsi/isci/remote_node_context.c b/drivers/scsi/isci/remote_node_context.c |
115 | index 1910100638a2..00602abec0ea 100644 |
116 | --- a/drivers/scsi/isci/remote_node_context.c |
117 | +++ b/drivers/scsi/isci/remote_node_context.c |
118 | @@ -66,6 +66,9 @@ const char *rnc_state_name(enum scis_sds_remote_node_context_states state) |
119 | { |
120 | static const char * const strings[] = RNC_STATES; |
121 | |
122 | + if (state >= ARRAY_SIZE(strings)) |
123 | + return "UNKNOWN"; |
124 | + |
125 | return strings[state]; |
126 | } |
127 | #undef C |
128 | diff --git a/drivers/scsi/sg.c b/drivers/scsi/sg.c |
129 | index f753df25ba34..fed37aabf828 100644 |
130 | --- a/drivers/scsi/sg.c |
131 | +++ b/drivers/scsi/sg.c |
132 | @@ -142,6 +142,7 @@ typedef struct sg_fd { /* holds the state of a file descriptor */ |
133 | struct sg_device *parentdp; /* owning device */ |
134 | wait_queue_head_t read_wait; /* queue read until command done */ |
135 | rwlock_t rq_list_lock; /* protect access to list in req_arr */ |
136 | + struct mutex f_mutex; /* protect against changes in this fd */ |
137 | int timeout; /* defaults to SG_DEFAULT_TIMEOUT */ |
138 | int timeout_user; /* defaults to SG_DEFAULT_TIMEOUT_USER */ |
139 | Sg_scatter_hold reserve; /* buffer held for this file descriptor */ |
140 | @@ -155,6 +156,7 @@ typedef struct sg_fd { /* holds the state of a file descriptor */ |
141 | unsigned char next_cmd_len; /* 0: automatic, >0: use on next write() */ |
142 | char keep_orphan; /* 0 -> drop orphan (def), 1 -> keep for read() */ |
143 | char mmap_called; /* 0 -> mmap() never called on this fd */ |
144 | + char res_in_use; /* 1 -> 'reserve' array in use */ |
145 | struct kref f_ref; |
146 | struct execute_work ew; |
147 | } Sg_fd; |
148 | @@ -198,7 +200,6 @@ static void sg_remove_sfp(struct kref *); |
149 | static Sg_request *sg_get_rq_mark(Sg_fd * sfp, int pack_id); |
150 | static Sg_request *sg_add_request(Sg_fd * sfp); |
151 | static int sg_remove_request(Sg_fd * sfp, Sg_request * srp); |
152 | -static int sg_res_in_use(Sg_fd * sfp); |
153 | static Sg_device *sg_get_dev(int dev); |
154 | static void sg_device_destroy(struct kref *kref); |
155 | |
156 | @@ -614,6 +615,7 @@ sg_write(struct file *filp, const char __user *buf, size_t count, loff_t * ppos) |
157 | } |
158 | buf += SZ_SG_HEADER; |
159 | __get_user(opcode, buf); |
160 | + mutex_lock(&sfp->f_mutex); |
161 | if (sfp->next_cmd_len > 0) { |
162 | cmd_size = sfp->next_cmd_len; |
163 | sfp->next_cmd_len = 0; /* reset so only this write() effected */ |
164 | @@ -622,6 +624,7 @@ sg_write(struct file *filp, const char __user *buf, size_t count, loff_t * ppos) |
165 | if ((opcode >= 0xc0) && old_hdr.twelve_byte) |
166 | cmd_size = 12; |
167 | } |
168 | + mutex_unlock(&sfp->f_mutex); |
169 | SCSI_LOG_TIMEOUT(4, sg_printk(KERN_INFO, sdp, |
170 | "sg_write: scsi opcode=0x%02x, cmd_size=%d\n", (int) opcode, cmd_size)); |
171 | /* Determine buffer size. */ |
172 | @@ -721,7 +724,7 @@ sg_new_write(Sg_fd *sfp, struct file *file, const char __user *buf, |
173 | sg_remove_request(sfp, srp); |
174 | return -EINVAL; /* either MMAP_IO or DIRECT_IO (not both) */ |
175 | } |
176 | - if (sg_res_in_use(sfp)) { |
177 | + if (sfp->res_in_use) { |
178 | sg_remove_request(sfp, srp); |
179 | return -EBUSY; /* reserve buffer already being used */ |
180 | } |
181 | @@ -892,7 +895,7 @@ sg_ioctl(struct file *filp, unsigned int cmd_in, unsigned long arg) |
182 | return result; |
183 | if (val) { |
184 | sfp->low_dma = 1; |
185 | - if ((0 == sfp->low_dma) && (0 == sg_res_in_use(sfp))) { |
186 | + if ((0 == sfp->low_dma) && !sfp->res_in_use) { |
187 | val = (int) sfp->reserve.bufflen; |
188 | sg_remove_scat(sfp, &sfp->reserve); |
189 | sg_build_reserve(sfp, val); |
190 | @@ -967,12 +970,18 @@ sg_ioctl(struct file *filp, unsigned int cmd_in, unsigned long arg) |
191 | return -EINVAL; |
192 | val = min_t(int, val, |
193 | max_sectors_bytes(sdp->device->request_queue)); |
194 | + mutex_lock(&sfp->f_mutex); |
195 | if (val != sfp->reserve.bufflen) { |
196 | - if (sg_res_in_use(sfp) || sfp->mmap_called) |
197 | + if (sfp->mmap_called || |
198 | + sfp->res_in_use) { |
199 | + mutex_unlock(&sfp->f_mutex); |
200 | return -EBUSY; |
201 | + } |
202 | + |
203 | sg_remove_scat(sfp, &sfp->reserve); |
204 | sg_build_reserve(sfp, val); |
205 | } |
206 | + mutex_unlock(&sfp->f_mutex); |
207 | return 0; |
208 | case SG_GET_RESERVED_SIZE: |
209 | val = min_t(int, sfp->reserve.bufflen, |
210 | @@ -1727,13 +1736,22 @@ sg_start_req(Sg_request *srp, unsigned char *cmd) |
211 | md = &map_data; |
212 | |
213 | if (md) { |
214 | - if (!sg_res_in_use(sfp) && dxfer_len <= rsv_schp->bufflen) |
215 | + mutex_lock(&sfp->f_mutex); |
216 | + if (dxfer_len <= rsv_schp->bufflen && |
217 | + !sfp->res_in_use) { |
218 | + sfp->res_in_use = 1; |
219 | sg_link_reserve(sfp, srp, dxfer_len); |
220 | - else { |
221 | + } else if ((hp->flags & SG_FLAG_MMAP_IO) && sfp->res_in_use) { |
222 | + mutex_unlock(&sfp->f_mutex); |
223 | + return -EBUSY; |
224 | + } else { |
225 | res = sg_build_indirect(req_schp, sfp, dxfer_len); |
226 | - if (res) |
227 | + if (res) { |
228 | + mutex_unlock(&sfp->f_mutex); |
229 | return res; |
230 | + } |
231 | } |
232 | + mutex_unlock(&sfp->f_mutex); |
233 | |
234 | md->pages = req_schp->pages; |
235 | md->page_order = req_schp->page_order; |
236 | @@ -2024,6 +2042,8 @@ sg_unlink_reserve(Sg_fd * sfp, Sg_request * srp) |
237 | req_schp->sglist_len = 0; |
238 | sfp->save_scat_len = 0; |
239 | srp->res_used = 0; |
240 | + /* Called without mutex lock to avoid deadlock */ |
241 | + sfp->res_in_use = 0; |
242 | } |
243 | |
244 | static Sg_request * |
245 | @@ -2135,6 +2155,7 @@ sg_add_sfp(Sg_device * sdp) |
246 | rwlock_init(&sfp->rq_list_lock); |
247 | |
248 | kref_init(&sfp->f_ref); |
249 | + mutex_init(&sfp->f_mutex); |
250 | sfp->timeout = SG_DEFAULT_TIMEOUT; |
251 | sfp->timeout_user = SG_DEFAULT_TIMEOUT_USER; |
252 | sfp->force_packid = SG_DEF_FORCE_PACK_ID; |
253 | @@ -2210,20 +2231,6 @@ sg_remove_sfp(struct kref *kref) |
254 | schedule_work(&sfp->ew.work); |
255 | } |
256 | |
257 | -static int |
258 | -sg_res_in_use(Sg_fd * sfp) |
259 | -{ |
260 | - const Sg_request *srp; |
261 | - unsigned long iflags; |
262 | - |
263 | - read_lock_irqsave(&sfp->rq_list_lock, iflags); |
264 | - for (srp = sfp->headrp; srp; srp = srp->nextrp) |
265 | - if (srp->res_used) |
266 | - break; |
267 | - read_unlock_irqrestore(&sfp->rq_list_lock, iflags); |
268 | - return srp ? 1 : 0; |
269 | -} |
270 | - |
271 | #ifdef CONFIG_SCSI_PROC_FS |
272 | static int |
273 | sg_idr_max_id(int id, void *p, void *data) |
274 | diff --git a/drivers/staging/wilc1000/linux_wlan.c b/drivers/staging/wilc1000/linux_wlan.c |
275 | index 6370a5efe343..defffa75ae1c 100644 |
276 | --- a/drivers/staging/wilc1000/linux_wlan.c |
277 | +++ b/drivers/staging/wilc1000/linux_wlan.c |
278 | @@ -269,23 +269,12 @@ static struct net_device *get_if_handler(struct wilc *wilc, u8 *mac_header) |
279 | |
280 | int wilc_wlan_set_bssid(struct net_device *wilc_netdev, u8 *bssid, u8 mode) |
281 | { |
282 | - int i = 0; |
283 | - int ret = -1; |
284 | - struct wilc_vif *vif; |
285 | - struct wilc *wilc; |
286 | - |
287 | - vif = netdev_priv(wilc_netdev); |
288 | - wilc = vif->wilc; |
289 | + struct wilc_vif *vif = netdev_priv(wilc_netdev); |
290 | |
291 | - for (i = 0; i < wilc->vif_num; i++) |
292 | - if (wilc->vif[i]->ndev == wilc_netdev) { |
293 | - memcpy(wilc->vif[i]->bssid, bssid, 6); |
294 | - wilc->vif[i]->mode = mode; |
295 | - ret = 0; |
296 | - break; |
297 | - } |
298 | + memcpy(vif->bssid, bssid, 6); |
299 | + vif->mode = mode; |
300 | |
301 | - return ret; |
302 | + return 0; |
303 | } |
304 | |
305 | int wilc_wlan_get_num_conn_ifcs(struct wilc *wilc) |
306 | @@ -1212,16 +1201,11 @@ void WILC_WFI_mgmt_rx(struct wilc *wilc, u8 *buff, u32 size) |
307 | |
308 | void wilc_netdev_cleanup(struct wilc *wilc) |
309 | { |
310 | - int i = 0; |
311 | - struct wilc_vif *vif[NUM_CONCURRENT_IFC]; |
312 | + int i; |
313 | |
314 | - if (wilc && (wilc->vif[0]->ndev || wilc->vif[1]->ndev)) { |
315 | + if (wilc && (wilc->vif[0]->ndev || wilc->vif[1]->ndev)) |
316 | unregister_inetaddr_notifier(&g_dev_notifier); |
317 | |
318 | - for (i = 0; i < NUM_CONCURRENT_IFC; i++) |
319 | - vif[i] = netdev_priv(wilc->vif[i]->ndev); |
320 | - } |
321 | - |
322 | if (wilc && wilc->firmware) { |
323 | release_firmware(wilc->firmware); |
324 | wilc->firmware = NULL; |
325 | @@ -1230,7 +1214,7 @@ void wilc_netdev_cleanup(struct wilc *wilc) |
326 | if (wilc && (wilc->vif[0]->ndev || wilc->vif[1]->ndev)) { |
327 | for (i = 0; i < NUM_CONCURRENT_IFC; i++) |
328 | if (wilc->vif[i]->ndev) |
329 | - if (vif[i]->mac_opened) |
330 | + if (wilc->vif[i]->mac_opened) |
331 | wilc_mac_close(wilc->vif[i]->ndev); |
332 | |
333 | for (i = 0; i < NUM_CONCURRENT_IFC; i++) { |
334 | @@ -1278,9 +1262,9 @@ int wilc_netdev_init(struct wilc **wilc, struct device *dev, int io_type, |
335 | |
336 | vif->idx = wl->vif_num; |
337 | vif->wilc = *wilc; |
338 | + vif->ndev = ndev; |
339 | wl->vif[i] = vif; |
340 | - wl->vif[wl->vif_num]->ndev = ndev; |
341 | - wl->vif_num++; |
342 | + wl->vif_num = i; |
343 | ndev->netdev_ops = &wilc_netdev_ops; |
344 | |
345 | { |
346 | diff --git a/kernel/gcov/base.c b/kernel/gcov/base.c |
347 | index 2f9df37940a0..c51a49c9be70 100644 |
348 | --- a/kernel/gcov/base.c |
349 | +++ b/kernel/gcov/base.c |
350 | @@ -98,6 +98,12 @@ void __gcov_merge_icall_topn(gcov_type *counters, unsigned int n_counters) |
351 | } |
352 | EXPORT_SYMBOL(__gcov_merge_icall_topn); |
353 | |
354 | +void __gcov_exit(void) |
355 | +{ |
356 | + /* Unused. */ |
357 | +} |
358 | +EXPORT_SYMBOL(__gcov_exit); |
359 | + |
360 | /** |
361 | * gcov_enable_events - enable event reporting through gcov_event() |
362 | * |
363 | diff --git a/kernel/gcov/gcc_4_7.c b/kernel/gcov/gcc_4_7.c |
364 | index 6a5c239c7669..46a18e72bce6 100644 |
365 | --- a/kernel/gcov/gcc_4_7.c |
366 | +++ b/kernel/gcov/gcc_4_7.c |
367 | @@ -18,7 +18,9 @@ |
368 | #include <linux/vmalloc.h> |
369 | #include "gcov.h" |
370 | |
371 | -#if (__GNUC__ > 5) || (__GNUC__ == 5 && __GNUC_MINOR__ >= 1) |
372 | +#if (__GNUC__ >= 7) |
373 | +#define GCOV_COUNTERS 9 |
374 | +#elif (__GNUC__ > 5) || (__GNUC__ == 5 && __GNUC_MINOR__ >= 1) |
375 | #define GCOV_COUNTERS 10 |
376 | #elif __GNUC__ == 4 && __GNUC_MINOR__ >= 9 |
377 | #define GCOV_COUNTERS 9 |
378 | diff --git a/kernel/locking/spinlock_debug.c b/kernel/locking/spinlock_debug.c |
379 | index 0374a596cffa..9aa0fccd5d43 100644 |
380 | --- a/kernel/locking/spinlock_debug.c |
381 | +++ b/kernel/locking/spinlock_debug.c |
382 | @@ -103,38 +103,14 @@ static inline void debug_spin_unlock(raw_spinlock_t *lock) |
383 | lock->owner_cpu = -1; |
384 | } |
385 | |
386 | -static void __spin_lock_debug(raw_spinlock_t *lock) |
387 | -{ |
388 | - u64 i; |
389 | - u64 loops = loops_per_jiffy * HZ; |
390 | - |
391 | - for (i = 0; i < loops; i++) { |
392 | - if (arch_spin_trylock(&lock->raw_lock)) |
393 | - return; |
394 | - __delay(1); |
395 | - } |
396 | - /* lockup suspected: */ |
397 | - spin_dump(lock, "lockup suspected"); |
398 | -#ifdef CONFIG_SMP |
399 | - trigger_all_cpu_backtrace(); |
400 | -#endif |
401 | - |
402 | - /* |
403 | - * The trylock above was causing a livelock. Give the lower level arch |
404 | - * specific lock code a chance to acquire the lock. We have already |
405 | - * printed a warning/backtrace at this point. The non-debug arch |
406 | - * specific code might actually succeed in acquiring the lock. If it is |
407 | - * not successful, the end-result is the same - there is no forward |
408 | - * progress. |
409 | - */ |
410 | - arch_spin_lock(&lock->raw_lock); |
411 | -} |
412 | - |
413 | +/* |
414 | + * We are now relying on the NMI watchdog to detect lockup instead of doing |
415 | + * the detection here with an unfair lock which can cause problem of its own. |
416 | + */ |
417 | void do_raw_spin_lock(raw_spinlock_t *lock) |
418 | { |
419 | debug_spin_lock_before(lock); |
420 | - if (unlikely(!arch_spin_trylock(&lock->raw_lock))) |
421 | - __spin_lock_debug(lock); |
422 | + arch_spin_lock(&lock->raw_lock); |
423 | debug_spin_lock_after(lock); |
424 | } |
425 | |
426 | @@ -172,32 +148,6 @@ static void rwlock_bug(rwlock_t *lock, const char *msg) |
427 | |
428 | #define RWLOCK_BUG_ON(cond, lock, msg) if (unlikely(cond)) rwlock_bug(lock, msg) |
429 | |
430 | -#if 0 /* __write_lock_debug() can lock up - maybe this can too? */ |
431 | -static void __read_lock_debug(rwlock_t *lock) |
432 | -{ |
433 | - u64 i; |
434 | - u64 loops = loops_per_jiffy * HZ; |
435 | - int print_once = 1; |
436 | - |
437 | - for (;;) { |
438 | - for (i = 0; i < loops; i++) { |
439 | - if (arch_read_trylock(&lock->raw_lock)) |
440 | - return; |
441 | - __delay(1); |
442 | - } |
443 | - /* lockup suspected: */ |
444 | - if (print_once) { |
445 | - print_once = 0; |
446 | - printk(KERN_EMERG "BUG: read-lock lockup on CPU#%d, " |
447 | - "%s/%d, %p\n", |
448 | - raw_smp_processor_id(), current->comm, |
449 | - current->pid, lock); |
450 | - dump_stack(); |
451 | - } |
452 | - } |
453 | -} |
454 | -#endif |
455 | - |
456 | void do_raw_read_lock(rwlock_t *lock) |
457 | { |
458 | RWLOCK_BUG_ON(lock->magic != RWLOCK_MAGIC, lock, "bad magic"); |
459 | @@ -247,32 +197,6 @@ static inline void debug_write_unlock(rwlock_t *lock) |
460 | lock->owner_cpu = -1; |
461 | } |
462 | |
463 | -#if 0 /* This can cause lockups */ |
464 | -static void __write_lock_debug(rwlock_t *lock) |
465 | -{ |
466 | - u64 i; |
467 | - u64 loops = loops_per_jiffy * HZ; |
468 | - int print_once = 1; |
469 | - |
470 | - for (;;) { |
471 | - for (i = 0; i < loops; i++) { |
472 | - if (arch_write_trylock(&lock->raw_lock)) |
473 | - return; |
474 | - __delay(1); |
475 | - } |
476 | - /* lockup suspected: */ |
477 | - if (print_once) { |
478 | - print_once = 0; |
479 | - printk(KERN_EMERG "BUG: write-lock lockup on CPU#%d, " |
480 | - "%s/%d, %p\n", |
481 | - raw_smp_processor_id(), current->comm, |
482 | - current->pid, lock); |
483 | - dump_stack(); |
484 | - } |
485 | - } |
486 | -} |
487 | -#endif |
488 | - |
489 | void do_raw_write_lock(rwlock_t *lock) |
490 | { |
491 | debug_write_lock_before(lock); |
492 | diff --git a/lib/lz4/lz4hc_compress.c b/lib/lz4/lz4hc_compress.c |
493 | index f344f76b6559..6b2e046a9c61 100644 |
494 | --- a/lib/lz4/lz4hc_compress.c |
495 | +++ b/lib/lz4/lz4hc_compress.c |
496 | @@ -131,7 +131,7 @@ static inline int lz4hc_insertandfindbestmatch(struct lz4hc_data *hc4, |
497 | #endif |
498 | int nbattempts = MAX_NB_ATTEMPTS; |
499 | size_t repl = 0, ml = 0; |
500 | - u16 delta; |
501 | + u16 delta = 0; |
502 | |
503 | /* HC4 match finder */ |
504 | lz4hc_insert(hc4, ip); |