Magellan Linux

Contents of /trunk/kernel-alx-legacy/patches-4.9/0179-4.9.80-all-fixes.patch

Parent Directory Parent Directory | Revision Log Revision Log


Revision 3608 - (show annotations) (download)
Fri Aug 14 07:34:29 2020 UTC (3 years, 8 months ago) by niro
File size: 81551 byte(s)
-added kerenl-alx-legacy pkg
1 diff --git a/Makefile b/Makefile
2 index 4a7e6dff1c2e..9550b6939076 100644
3 --- a/Makefile
4 +++ b/Makefile
5 @@ -1,6 +1,6 @@
6 VERSION = 4
7 PATCHLEVEL = 9
8 -SUBLEVEL = 79
9 +SUBLEVEL = 80
10 EXTRAVERSION =
11 NAME = Roaring Lionus
12
13 diff --git a/arch/arm/boot/dts/bcm-nsp.dtsi b/arch/arm/boot/dts/bcm-nsp.dtsi
14 index 7c9e0fae9bb9..65e0db1d3bd7 100644
15 --- a/arch/arm/boot/dts/bcm-nsp.dtsi
16 +++ b/arch/arm/boot/dts/bcm-nsp.dtsi
17 @@ -85,7 +85,7 @@
18 timer@20200 {
19 compatible = "arm,cortex-a9-global-timer";
20 reg = <0x20200 0x100>;
21 - interrupts = <GIC_PPI 11 IRQ_TYPE_LEVEL_HIGH>;
22 + interrupts = <GIC_PPI 11 IRQ_TYPE_EDGE_RISING>;
23 clocks = <&periph_clk>;
24 };
25
26 @@ -93,7 +93,7 @@
27 compatible = "arm,cortex-a9-twd-timer";
28 reg = <0x20600 0x20>;
29 interrupts = <GIC_PPI 13 (GIC_CPU_MASK_SIMPLE(2) |
30 - IRQ_TYPE_LEVEL_HIGH)>;
31 + IRQ_TYPE_EDGE_RISING)>;
32 clocks = <&periph_clk>;
33 };
34
35 diff --git a/arch/x86/crypto/aesni-intel_glue.c b/arch/x86/crypto/aesni-intel_glue.c
36 index aa8b0672f87a..d9ae404f08c9 100644
37 --- a/arch/x86/crypto/aesni-intel_glue.c
38 +++ b/arch/x86/crypto/aesni-intel_glue.c
39 @@ -906,7 +906,7 @@ static int helper_rfc4106_encrypt(struct aead_request *req)
40
41 if (sg_is_last(req->src) &&
42 req->src->offset + req->src->length <= PAGE_SIZE &&
43 - sg_is_last(req->dst) &&
44 ++ sg_is_last(req->dst) && req->dst->length &&
45 req->dst->offset + req->dst->length <= PAGE_SIZE) {
46 one_entry_in_sg = 1;
47 scatterwalk_start(&src_sg_walk, req->src);
48 diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
49 index cbd1d44da2d3..20cfeeb681c6 100644
50 --- a/arch/x86/include/asm/kvm_host.h
51 +++ b/arch/x86/include/asm/kvm_host.h
52 @@ -1113,7 +1113,8 @@ int x86_emulate_instruction(struct kvm_vcpu *vcpu, unsigned long cr2,
53 static inline int emulate_instruction(struct kvm_vcpu *vcpu,
54 int emulation_type)
55 {
56 - return x86_emulate_instruction(vcpu, 0, emulation_type, NULL, 0);
57 + return x86_emulate_instruction(vcpu, 0,
58 + emulation_type | EMULTYPE_NO_REEXECUTE, NULL, 0);
59 }
60
61 void kvm_enable_efer_bits(u64);
62 diff --git a/arch/x86/kvm/emulate.c b/arch/x86/kvm/emulate.c
63 index c8f8dd8ca0a1..6f5a3b076341 100644
64 --- a/arch/x86/kvm/emulate.c
65 +++ b/arch/x86/kvm/emulate.c
66 @@ -4990,6 +4990,8 @@ int x86_decode_insn(struct x86_emulate_ctxt *ctxt, void *insn, int insn_len)
67 bool op_prefix = false;
68 bool has_seg_override = false;
69 struct opcode opcode;
70 + u16 dummy;
71 + struct desc_struct desc;
72
73 ctxt->memop.type = OP_NONE;
74 ctxt->memopp = NULL;
75 @@ -5008,6 +5010,11 @@ int x86_decode_insn(struct x86_emulate_ctxt *ctxt, void *insn, int insn_len)
76 switch (mode) {
77 case X86EMUL_MODE_REAL:
78 case X86EMUL_MODE_VM86:
79 + def_op_bytes = def_ad_bytes = 2;
80 + ctxt->ops->get_segment(ctxt, &dummy, &desc, NULL, VCPU_SREG_CS);
81 + if (desc.d)
82 + def_op_bytes = def_ad_bytes = 4;
83 + break;
84 case X86EMUL_MODE_PROT16:
85 def_op_bytes = def_ad_bytes = 2;
86 break;
87 diff --git a/arch/x86/kvm/ioapic.c b/arch/x86/kvm/ioapic.c
88 index 6e219e5c07d2..5f810bb80802 100644
89 --- a/arch/x86/kvm/ioapic.c
90 +++ b/arch/x86/kvm/ioapic.c
91 @@ -257,8 +257,7 @@ void kvm_ioapic_scan_entry(struct kvm_vcpu *vcpu, ulong *ioapic_handled_vectors)
92 index == RTC_GSI) {
93 if (kvm_apic_match_dest(vcpu, NULL, 0,
94 e->fields.dest_id, e->fields.dest_mode) ||
95 - (e->fields.trig_mode == IOAPIC_EDGE_TRIG &&
96 - kvm_apic_pending_eoi(vcpu, e->fields.vector)))
97 + kvm_apic_pending_eoi(vcpu, e->fields.vector))
98 __set_bit(e->fields.vector,
99 ioapic_handled_vectors);
100 }
101 @@ -279,6 +278,7 @@ static void ioapic_write_indirect(struct kvm_ioapic *ioapic, u32 val)
102 {
103 unsigned index;
104 bool mask_before, mask_after;
105 + int old_remote_irr, old_delivery_status;
106 union kvm_ioapic_redirect_entry *e;
107
108 switch (ioapic->ioregsel) {
109 @@ -301,14 +301,28 @@ static void ioapic_write_indirect(struct kvm_ioapic *ioapic, u32 val)
110 return;
111 e = &ioapic->redirtbl[index];
112 mask_before = e->fields.mask;
113 + /* Preserve read-only fields */
114 + old_remote_irr = e->fields.remote_irr;
115 + old_delivery_status = e->fields.delivery_status;
116 if (ioapic->ioregsel & 1) {
117 e->bits &= 0xffffffff;
118 e->bits |= (u64) val << 32;
119 } else {
120 e->bits &= ~0xffffffffULL;
121 e->bits |= (u32) val;
122 - e->fields.remote_irr = 0;
123 }
124 + e->fields.remote_irr = old_remote_irr;
125 + e->fields.delivery_status = old_delivery_status;
126 +
127 + /*
128 + * Some OSes (Linux, Xen) assume that Remote IRR bit will
129 + * be cleared by IOAPIC hardware when the entry is configured
130 + * as edge-triggered. This behavior is used to simulate an
131 + * explicit EOI on IOAPICs that don't have the EOI register.
132 + */
133 + if (e->fields.trig_mode == IOAPIC_EDGE_TRIG)
134 + e->fields.remote_irr = 0;
135 +
136 mask_after = e->fields.mask;
137 if (mask_before != mask_after)
138 kvm_fire_mask_notifiers(ioapic->kvm, KVM_IRQCHIP_IOAPIC, index, mask_after);
139 diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
140 index 3ca6d15994e4..178a344f55f8 100644
141 --- a/arch/x86/kvm/vmx.c
142 +++ b/arch/x86/kvm/vmx.c
143 @@ -5194,7 +5194,7 @@ static void vmx_vcpu_reset(struct kvm_vcpu *vcpu, bool init_event)
144 vmcs_write64(GUEST_IA32_DEBUGCTL, 0);
145 }
146
147 - vmcs_writel(GUEST_RFLAGS, 0x02);
148 + kvm_set_rflags(vcpu, X86_EFLAGS_FIXED);
149 kvm_rip_write(vcpu, 0xfff0);
150
151 vmcs_writel(GUEST_GDTR_BASE, 0);
152 @@ -6257,7 +6257,7 @@ static int handle_invalid_guest_state(struct kvm_vcpu *vcpu)
153 if (test_bit(KVM_REQ_EVENT, &vcpu->requests))
154 return 1;
155
156 - err = emulate_instruction(vcpu, EMULTYPE_NO_REEXECUTE);
157 + err = emulate_instruction(vcpu, 0);
158
159 if (err == EMULATE_USER_EXIT) {
160 ++vcpu->stat.mmio_exits;
161 diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
162 index d3f80cccb9aa..e023ef981feb 100644
163 --- a/arch/x86/kvm/x86.c
164 +++ b/arch/x86/kvm/x86.c
165 @@ -1751,10 +1751,13 @@ static u64 __get_kvmclock_ns(struct kvm *kvm)
166 /* both __this_cpu_read() and rdtsc() should be on the same cpu */
167 get_cpu();
168
169 - kvm_get_time_scale(NSEC_PER_SEC, __this_cpu_read(cpu_tsc_khz) * 1000LL,
170 - &hv_clock.tsc_shift,
171 - &hv_clock.tsc_to_system_mul);
172 - ret = __pvclock_read_cycles(&hv_clock, rdtsc());
173 + if (__this_cpu_read(cpu_tsc_khz)) {
174 + kvm_get_time_scale(NSEC_PER_SEC, __this_cpu_read(cpu_tsc_khz) * 1000LL,
175 + &hv_clock.tsc_shift,
176 + &hv_clock.tsc_to_system_mul);
177 + ret = __pvclock_read_cycles(&hv_clock, rdtsc());
178 + } else
179 + ret = ktime_get_boot_ns() + ka->kvmclock_offset;
180
181 put_cpu();
182
183 @@ -5308,7 +5311,7 @@ static int handle_emulation_failure(struct kvm_vcpu *vcpu)
184 vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
185 vcpu->run->internal.suberror = KVM_INTERNAL_ERROR_EMULATION;
186 vcpu->run->internal.ndata = 0;
187 - r = EMULATE_FAIL;
188 + r = EMULATE_USER_EXIT;
189 }
190 kvm_queue_exception(vcpu, UD_VECTOR);
191
192 diff --git a/crypto/Kconfig b/crypto/Kconfig
193 index 84d71482bf08..ab0d93ab5695 100644
194 --- a/crypto/Kconfig
195 +++ b/crypto/Kconfig
196 @@ -120,7 +120,7 @@ config CRYPTO_DH
197
198 config CRYPTO_ECDH
199 tristate "ECDH algorithm"
200 - select CRYTPO_KPP
201 + select CRYPTO_KPP
202 help
203 Generic implementation of the ECDH algorithm
204
205 diff --git a/crypto/af_alg.c b/crypto/af_alg.c
206 index f5e18c2a4852..ca50eeb13097 100644
207 --- a/crypto/af_alg.c
208 +++ b/crypto/af_alg.c
209 @@ -149,7 +149,7 @@ EXPORT_SYMBOL_GPL(af_alg_release_parent);
210
211 static int alg_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
212 {
213 - const u32 forbidden = CRYPTO_ALG_INTERNAL;
214 + const u32 allowed = CRYPTO_ALG_KERN_DRIVER_ONLY;
215 struct sock *sk = sock->sk;
216 struct alg_sock *ask = alg_sk(sk);
217 struct sockaddr_alg *sa = (void *)uaddr;
218 @@ -157,6 +157,10 @@ static int alg_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
219 void *private;
220 int err;
221
222 + /* If caller uses non-allowed flag, return error. */
223 + if ((sa->salg_feat & ~allowed) || (sa->salg_mask & ~allowed))
224 + return -EINVAL;
225 +
226 if (sock->state == SS_CONNECTED)
227 return -EINVAL;
228
229 @@ -175,9 +179,7 @@ static int alg_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
230 if (IS_ERR(type))
231 return PTR_ERR(type);
232
233 - private = type->bind(sa->salg_name,
234 - sa->salg_feat & ~forbidden,
235 - sa->salg_mask & ~forbidden);
236 + private = type->bind(sa->salg_name, sa->salg_feat, sa->salg_mask);
237 if (IS_ERR(private)) {
238 module_put(type->owner);
239 return PTR_ERR(private);
240 diff --git a/crypto/sha3_generic.c b/crypto/sha3_generic.c
241 index 7e8ed96236ce..a68be626017c 100644
242 --- a/crypto/sha3_generic.c
243 +++ b/crypto/sha3_generic.c
244 @@ -18,6 +18,7 @@
245 #include <linux/types.h>
246 #include <crypto/sha3.h>
247 #include <asm/byteorder.h>
248 +#include <asm/unaligned.h>
249
250 #define KECCAK_ROUNDS 24
251
252 @@ -149,7 +150,7 @@ static int sha3_update(struct shash_desc *desc, const u8 *data,
253 unsigned int i;
254
255 for (i = 0; i < sctx->rsizw; i++)
256 - sctx->st[i] ^= ((u64 *) src)[i];
257 + sctx->st[i] ^= get_unaligned_le64(src + 8 * i);
258 keccakf(sctx->st);
259
260 done += sctx->rsiz;
261 @@ -174,7 +175,7 @@ static int sha3_final(struct shash_desc *desc, u8 *out)
262 sctx->buf[sctx->rsiz - 1] |= 0x80;
263
264 for (i = 0; i < sctx->rsizw; i++)
265 - sctx->st[i] ^= ((u64 *) sctx->buf)[i];
266 + sctx->st[i] ^= get_unaligned_le64(sctx->buf + 8 * i);
267
268 keccakf(sctx->st);
269
270 diff --git a/drivers/acpi/device_sysfs.c b/drivers/acpi/device_sysfs.c
271 index 7b2c48fde4e2..201c7ceb7052 100644
272 --- a/drivers/acpi/device_sysfs.c
273 +++ b/drivers/acpi/device_sysfs.c
274 @@ -146,6 +146,10 @@ static int create_pnp_modalias(struct acpi_device *acpi_dev, char *modalias,
275 int count;
276 struct acpi_hardware_id *id;
277
278 + /* Avoid unnecessarily loading modules for non present devices. */
279 + if (!acpi_device_is_present(acpi_dev))
280 + return 0;
281 +
282 /*
283 * Since we skip ACPI_DT_NAMESPACE_HID from the modalias below, 0 should
284 * be returned if ACPI_DT_NAMESPACE_HID is the only ACPI/PNP ID in the
285 diff --git a/drivers/auxdisplay/Kconfig b/drivers/auxdisplay/Kconfig
286 index 10e1b9eee10e..f03cf1df8d6b 100644
287 --- a/drivers/auxdisplay/Kconfig
288 +++ b/drivers/auxdisplay/Kconfig
289 @@ -121,6 +121,7 @@ config CFAG12864B_RATE
290
291 config IMG_ASCII_LCD
292 tristate "Imagination Technologies ASCII LCD Display"
293 + depends on HAS_IOMEM
294 default y if MIPS_MALTA || MIPS_SEAD3
295 select SYSCON
296 help
297 diff --git a/drivers/block/loop.c b/drivers/block/loop.c
298 index 24d6cefceb32..402254d26247 100644
299 --- a/drivers/block/loop.c
300 +++ b/drivers/block/loop.c
301 @@ -1558,9 +1558,8 @@ static int lo_open(struct block_device *bdev, fmode_t mode)
302 return err;
303 }
304
305 -static void lo_release(struct gendisk *disk, fmode_t mode)
306 +static void __lo_release(struct loop_device *lo)
307 {
308 - struct loop_device *lo = disk->private_data;
309 int err;
310
311 if (atomic_dec_return(&lo->lo_refcnt))
312 @@ -1586,6 +1585,13 @@ static void lo_release(struct gendisk *disk, fmode_t mode)
313 mutex_unlock(&lo->lo_ctl_mutex);
314 }
315
316 +static void lo_release(struct gendisk *disk, fmode_t mode)
317 +{
318 + mutex_lock(&loop_index_mutex);
319 + __lo_release(disk->private_data);
320 + mutex_unlock(&loop_index_mutex);
321 +}
322 +
323 static const struct block_device_operations lo_fops = {
324 .owner = THIS_MODULE,
325 .open = lo_open,
326 diff --git a/drivers/cpufreq/Kconfig b/drivers/cpufreq/Kconfig
327 index d8b164a7c4e5..cac26fb22891 100644
328 --- a/drivers/cpufreq/Kconfig
329 +++ b/drivers/cpufreq/Kconfig
330 @@ -273,6 +273,7 @@ endif
331 if MIPS
332 config LOONGSON2_CPUFREQ
333 tristate "Loongson2 CPUFreq Driver"
334 + depends on LEMOTE_MACH2F
335 help
336 This option adds a CPUFreq driver for loongson processors which
337 support software configurable cpu frequency.
338 @@ -285,6 +286,7 @@ config LOONGSON2_CPUFREQ
339
340 config LOONGSON1_CPUFREQ
341 tristate "Loongson1 CPUFreq Driver"
342 + depends on LOONGSON1_LS1B
343 help
344 This option adds a CPUFreq driver for loongson1 processors which
345 support software configurable cpu frequency.
346 diff --git a/drivers/gpio/gpio-ath79.c b/drivers/gpio/gpio-ath79.c
347 index dc37dbe4b46d..a83e97e15c14 100644
348 --- a/drivers/gpio/gpio-ath79.c
349 +++ b/drivers/gpio/gpio-ath79.c
350 @@ -323,3 +323,6 @@ static struct platform_driver ath79_gpio_driver = {
351 };
352
353 module_platform_driver(ath79_gpio_driver);
354 +
355 +MODULE_DESCRIPTION("Atheros AR71XX/AR724X/AR913X GPIO API support");
356 +MODULE_LICENSE("GPL v2");
357 diff --git a/drivers/gpio/gpio-iop.c b/drivers/gpio/gpio-iop.c
358 index 98c7ff2a76e7..8d62db447ec1 100644
359 --- a/drivers/gpio/gpio-iop.c
360 +++ b/drivers/gpio/gpio-iop.c
361 @@ -58,3 +58,7 @@ static int __init iop3xx_gpio_init(void)
362 return platform_driver_register(&iop3xx_gpio_driver);
363 }
364 arch_initcall(iop3xx_gpio_init);
365 +
366 +MODULE_DESCRIPTION("GPIO handling for Intel IOP3xx processors");
367 +MODULE_AUTHOR("Lennert Buytenhek <buytenh@wantstofly.org>");
368 +MODULE_LICENSE("GPL");
369 diff --git a/drivers/gpio/gpio-stmpe.c b/drivers/gpio/gpio-stmpe.c
370 index adba614b3965..abb5a2752511 100644
371 --- a/drivers/gpio/gpio-stmpe.c
372 +++ b/drivers/gpio/gpio-stmpe.c
373 @@ -190,6 +190,16 @@ static void stmpe_gpio_irq_sync_unlock(struct irq_data *d)
374 };
375 int i, j;
376
377 + /*
378 + * STMPE1600: to be able to get IRQ from pins,
379 + * a read must be done on GPMR register, or a write in
380 + * GPSR or GPCR registers
381 + */
382 + if (stmpe->partnum == STMPE1600) {
383 + stmpe_reg_read(stmpe, stmpe->regs[STMPE_IDX_GPMR_LSB]);
384 + stmpe_reg_read(stmpe, stmpe->regs[STMPE_IDX_GPMR_CSB]);
385 + }
386 +
387 for (i = 0; i < CACHE_NR_REGS; i++) {
388 /* STMPE801 and STMPE1600 don't have RE and FE registers */
389 if ((stmpe->partnum == STMPE801 ||
390 @@ -227,21 +237,11 @@ static void stmpe_gpio_irq_unmask(struct irq_data *d)
391 {
392 struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
393 struct stmpe_gpio *stmpe_gpio = gpiochip_get_data(gc);
394 - struct stmpe *stmpe = stmpe_gpio->stmpe;
395 int offset = d->hwirq;
396 int regoffset = offset / 8;
397 int mask = BIT(offset % 8);
398
399 stmpe_gpio->regs[REG_IE][regoffset] |= mask;
400 -
401 - /*
402 - * STMPE1600 workaround: to be able to get IRQ from pins,
403 - * a read must be done on GPMR register, or a write in
404 - * GPSR or GPCR registers
405 - */
406 - if (stmpe->partnum == STMPE1600)
407 - stmpe_reg_read(stmpe,
408 - stmpe->regs[STMPE_IDX_GPMR_LSB + regoffset]);
409 }
410
411 static void stmpe_dbg_show_one(struct seq_file *s,
412 diff --git a/drivers/gpio/gpiolib.c b/drivers/gpio/gpiolib.c
413 index 063d176baa24..f3c3680963b9 100644
414 --- a/drivers/gpio/gpiolib.c
415 +++ b/drivers/gpio/gpiolib.c
416 @@ -705,6 +705,9 @@ static irqreturn_t lineevent_irq_thread(int irq, void *p)
417 struct gpioevent_data ge;
418 int ret, level;
419
420 + /* Do not leak kernel stack to userspace */
421 + memset(&ge, 0, sizeof(ge));
422 +
423 ge.timestamp = ktime_get_real_ns();
424 level = gpiod_get_value_cansleep(le->desc);
425
426 diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c
427 index 1a0a5f7cccbc..47951f4775b9 100644
428 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c
429 +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c
430 @@ -367,29 +367,50 @@ static int kgd_hqd_sdma_load(struct kgd_dev *kgd, void *mqd)
431 {
432 struct amdgpu_device *adev = get_amdgpu_device(kgd);
433 struct cik_sdma_rlc_registers *m;
434 + unsigned long end_jiffies;
435 uint32_t sdma_base_addr;
436 + uint32_t data;
437
438 m = get_sdma_mqd(mqd);
439 sdma_base_addr = get_sdma_base_addr(m);
440
441 - WREG32(sdma_base_addr + mmSDMA0_RLC0_VIRTUAL_ADDR,
442 - m->sdma_rlc_virtual_addr);
443 + WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_CNTL,
444 + m->sdma_rlc_rb_cntl & (~SDMA0_RLC0_RB_CNTL__RB_ENABLE_MASK));
445
446 - WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_BASE,
447 - m->sdma_rlc_rb_base);
448 + end_jiffies = msecs_to_jiffies(2000) + jiffies;
449 + while (true) {
450 + data = RREG32(sdma_base_addr + mmSDMA0_RLC0_CONTEXT_STATUS);
451 + if (data & SDMA0_RLC0_CONTEXT_STATUS__IDLE_MASK)
452 + break;
453 + if (time_after(jiffies, end_jiffies))
454 + return -ETIME;
455 + usleep_range(500, 1000);
456 + }
457 + if (m->sdma_engine_id) {
458 + data = RREG32(mmSDMA1_GFX_CONTEXT_CNTL);
459 + data = REG_SET_FIELD(data, SDMA1_GFX_CONTEXT_CNTL,
460 + RESUME_CTX, 0);
461 + WREG32(mmSDMA1_GFX_CONTEXT_CNTL, data);
462 + } else {
463 + data = RREG32(mmSDMA0_GFX_CONTEXT_CNTL);
464 + data = REG_SET_FIELD(data, SDMA0_GFX_CONTEXT_CNTL,
465 + RESUME_CTX, 0);
466 + WREG32(mmSDMA0_GFX_CONTEXT_CNTL, data);
467 + }
468
469 + WREG32(sdma_base_addr + mmSDMA0_RLC0_DOORBELL,
470 + m->sdma_rlc_doorbell);
471 + WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_RPTR, 0);
472 + WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_WPTR, 0);
473 + WREG32(sdma_base_addr + mmSDMA0_RLC0_VIRTUAL_ADDR,
474 + m->sdma_rlc_virtual_addr);
475 + WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_BASE, m->sdma_rlc_rb_base);
476 WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_BASE_HI,
477 m->sdma_rlc_rb_base_hi);
478 -
479 WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_RPTR_ADDR_LO,
480 m->sdma_rlc_rb_rptr_addr_lo);
481 -
482 WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_RPTR_ADDR_HI,
483 m->sdma_rlc_rb_rptr_addr_hi);
484 -
485 - WREG32(sdma_base_addr + mmSDMA0_RLC0_DOORBELL,
486 - m->sdma_rlc_doorbell);
487 -
488 WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_CNTL,
489 m->sdma_rlc_rb_cntl);
490
491 @@ -493,9 +514,9 @@ static int kgd_hqd_sdma_destroy(struct kgd_dev *kgd, void *mqd,
492 }
493
494 WREG32(sdma_base_addr + mmSDMA0_RLC0_DOORBELL, 0);
495 - WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_RPTR, 0);
496 - WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_WPTR, 0);
497 - WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_BASE, 0);
498 + WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_CNTL,
499 + RREG32(sdma_base_addr + mmSDMA0_RLC0_RB_CNTL) |
500 + SDMA0_RLC0_RB_CNTL__RB_ENABLE_MASK);
501
502 return 0;
503 }
504 diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
505 index f26d1fd53bef..cb505f66d3aa 100644
506 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
507 +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
508 @@ -416,6 +416,10 @@ static bool amdgpu_cs_try_evict(struct amdgpu_cs_parser *p,
509 if (candidate == lobj)
510 break;
511
512 + /* We can't move pinned BOs here */
513 + if (bo->pin_count)
514 + continue;
515 +
516 other = amdgpu_mem_type_to_domain(bo->tbo.mem.mem_type);
517
518 /* Check if this BO is in one of the domains we need space for */
519 diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_cik.c b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_cik.c
520 index d83de985e88c..8577a563600f 100644
521 --- a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_cik.c
522 +++ b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_cik.c
523 @@ -215,8 +215,8 @@ static int update_mqd_sdma(struct mqd_manager *mm, void *mqd,
524 BUG_ON(!mm || !mqd || !q);
525
526 m = get_sdma_mqd(mqd);
527 - m->sdma_rlc_rb_cntl = ffs(q->queue_size / sizeof(unsigned int)) <<
528 - SDMA0_RLC0_RB_CNTL__RB_SIZE__SHIFT |
529 + m->sdma_rlc_rb_cntl = (ffs(q->queue_size / sizeof(unsigned int)) - 1)
530 + << SDMA0_RLC0_RB_CNTL__RB_SIZE__SHIFT |
531 q->vmid << SDMA0_RLC0_RB_CNTL__RB_VMID__SHIFT |
532 1 << SDMA0_RLC0_RB_CNTL__RPTR_WRITEBACK_ENABLE__SHIFT |
533 6 << SDMA0_RLC0_RB_CNTL__RPTR_WRITEBACK_TIMER__SHIFT;
534 diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c
535 index e1fb40b84c72..5425c68d0287 100644
536 --- a/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c
537 +++ b/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c
538 @@ -205,6 +205,24 @@ int pqm_create_queue(struct process_queue_manager *pqm,
539
540 switch (type) {
541 case KFD_QUEUE_TYPE_SDMA:
542 + if (dev->dqm->queue_count >=
543 + CIK_SDMA_QUEUES_PER_ENGINE * CIK_SDMA_ENGINE_NUM) {
544 + pr_err("Over-subscription is not allowed for SDMA.\n");
545 + retval = -EPERM;
546 + goto err_create_queue;
547 + }
548 +
549 + retval = create_cp_queue(pqm, dev, &q, properties, f, *qid);
550 + if (retval != 0)
551 + goto err_create_queue;
552 + pqn->q = q;
553 + pqn->kq = NULL;
554 + retval = dev->dqm->ops.create_queue(dev->dqm, q, &pdd->qpd,
555 + &q->properties.vmid);
556 + pr_debug("DQM returned %d for create_queue\n", retval);
557 + print_queue(q);
558 + break;
559 +
560 case KFD_QUEUE_TYPE_COMPUTE:
561 /* check if there is over subscription */
562 if ((sched_policy == KFD_SCHED_POLICY_HWS_NO_OVERSUBSCRIPTION) &&
563 diff --git a/drivers/gpu/drm/bridge/tc358767.c b/drivers/gpu/drm/bridge/tc358767.c
564 index 44d476ea6d2e..f64f35cdc2ff 100644
565 --- a/drivers/gpu/drm/bridge/tc358767.c
566 +++ b/drivers/gpu/drm/bridge/tc358767.c
567 @@ -97,7 +97,7 @@
568 #define DP0_ACTIVEVAL 0x0650
569 #define DP0_SYNCVAL 0x0654
570 #define DP0_MISC 0x0658
571 -#define TU_SIZE_RECOMMENDED (0x3f << 16) /* LSCLK cycles per TU */
572 +#define TU_SIZE_RECOMMENDED (63) /* LSCLK cycles per TU */
573 #define BPC_6 (0 << 5)
574 #define BPC_8 (1 << 5)
575
576 @@ -318,7 +318,7 @@ static ssize_t tc_aux_transfer(struct drm_dp_aux *aux,
577 tmp = (tmp << 8) | buf[i];
578 i++;
579 if (((i % 4) == 0) || (i == size)) {
580 - tc_write(DP0_AUXWDATA(i >> 2), tmp);
581 + tc_write(DP0_AUXWDATA((i - 1) >> 2), tmp);
582 tmp = 0;
583 }
584 }
585 @@ -603,8 +603,15 @@ static int tc_get_display_props(struct tc_data *tc)
586 ret = drm_dp_link_probe(&tc->aux, &tc->link.base);
587 if (ret < 0)
588 goto err_dpcd_read;
589 - if ((tc->link.base.rate != 162000) && (tc->link.base.rate != 270000))
590 - goto err_dpcd_inval;
591 + if (tc->link.base.rate != 162000 && tc->link.base.rate != 270000) {
592 + dev_dbg(tc->dev, "Falling to 2.7 Gbps rate\n");
593 + tc->link.base.rate = 270000;
594 + }
595 +
596 + if (tc->link.base.num_lanes > 2) {
597 + dev_dbg(tc->dev, "Falling to 2 lanes\n");
598 + tc->link.base.num_lanes = 2;
599 + }
600
601 ret = drm_dp_dpcd_readb(&tc->aux, DP_MAX_DOWNSPREAD, tmp);
602 if (ret < 0)
603 @@ -637,9 +644,6 @@ static int tc_get_display_props(struct tc_data *tc)
604 err_dpcd_read:
605 dev_err(tc->dev, "failed to read DPCD: %d\n", ret);
606 return ret;
607 -err_dpcd_inval:
608 - dev_err(tc->dev, "invalid DPCD\n");
609 - return -EINVAL;
610 }
611
612 static int tc_set_video_mode(struct tc_data *tc, struct drm_display_mode *mode)
613 @@ -655,6 +659,14 @@ static int tc_set_video_mode(struct tc_data *tc, struct drm_display_mode *mode)
614 int lower_margin = mode->vsync_start - mode->vdisplay;
615 int vsync_len = mode->vsync_end - mode->vsync_start;
616
617 + /*
618 + * Recommended maximum number of symbols transferred in a transfer unit:
619 + * DIV_ROUND_UP((input active video bandwidth in bytes) * tu_size,
620 + * (output active video bandwidth in bytes))
621 + * Must be less than tu_size.
622 + */
623 + max_tu_symbol = TU_SIZE_RECOMMENDED - 1;
624 +
625 dev_dbg(tc->dev, "set mode %dx%d\n",
626 mode->hdisplay, mode->vdisplay);
627 dev_dbg(tc->dev, "H margin %d,%d sync %d\n",
628 @@ -664,13 +676,18 @@ static int tc_set_video_mode(struct tc_data *tc, struct drm_display_mode *mode)
629 dev_dbg(tc->dev, "total: %dx%d\n", mode->htotal, mode->vtotal);
630
631
632 - /* LCD Ctl Frame Size */
633 - tc_write(VPCTRL0, (0x40 << 20) /* VSDELAY */ |
634 + /*
635 + * LCD Ctl Frame Size
636 + * datasheet is not clear of vsdelay in case of DPI
637 + * assume we do not need any delay when DPI is a source of
638 + * sync signals
639 + */
640 + tc_write(VPCTRL0, (0 << 20) /* VSDELAY */ |
641 OPXLFMT_RGB888 | FRMSYNC_DISABLED | MSF_DISABLED);
642 - tc_write(HTIM01, (left_margin << 16) | /* H back porch */
643 - (hsync_len << 0)); /* Hsync */
644 - tc_write(HTIM02, (right_margin << 16) | /* H front porch */
645 - (mode->hdisplay << 0)); /* width */
646 + tc_write(HTIM01, (ALIGN(left_margin, 2) << 16) | /* H back porch */
647 + (ALIGN(hsync_len, 2) << 0)); /* Hsync */
648 + tc_write(HTIM02, (ALIGN(right_margin, 2) << 16) | /* H front porch */
649 + (ALIGN(mode->hdisplay, 2) << 0)); /* width */
650 tc_write(VTIM01, (upper_margin << 16) | /* V back porch */
651 (vsync_len << 0)); /* Vsync */
652 tc_write(VTIM02, (lower_margin << 16) | /* V front porch */
653 @@ -689,7 +706,7 @@ static int tc_set_video_mode(struct tc_data *tc, struct drm_display_mode *mode)
654 /* DP Main Stream Attributes */
655 vid_sync_dly = hsync_len + left_margin + mode->hdisplay;
656 tc_write(DP0_VIDSYNCDELAY,
657 - (0x003e << 16) | /* thresh_dly */
658 + (max_tu_symbol << 16) | /* thresh_dly */
659 (vid_sync_dly << 0));
660
661 tc_write(DP0_TOTALVAL, (mode->vtotal << 16) | (mode->htotal));
662 @@ -705,14 +722,8 @@ static int tc_set_video_mode(struct tc_data *tc, struct drm_display_mode *mode)
663 tc_write(DPIPXLFMT, VS_POL_ACTIVE_LOW | HS_POL_ACTIVE_LOW |
664 DE_POL_ACTIVE_HIGH | SUB_CFG_TYPE_CONFIG1 | DPI_BPP_RGB888);
665
666 - /*
667 - * Recommended maximum number of symbols transferred in a transfer unit:
668 - * DIV_ROUND_UP((input active video bandwidth in bytes) * tu_size,
669 - * (output active video bandwidth in bytes))
670 - * Must be less than tu_size.
671 - */
672 - max_tu_symbol = TU_SIZE_RECOMMENDED - 1;
673 - tc_write(DP0_MISC, (max_tu_symbol << 23) | TU_SIZE_RECOMMENDED | BPC_8);
674 + tc_write(DP0_MISC, (max_tu_symbol << 23) | (TU_SIZE_RECOMMENDED << 16) |
675 + BPC_8);
676
677 return 0;
678 err:
679 @@ -808,8 +819,6 @@ static int tc_main_link_setup(struct tc_data *tc)
680 unsigned int rate;
681 u32 dp_phy_ctrl;
682 int timeout;
683 - bool aligned;
684 - bool ready;
685 u32 value;
686 int ret;
687 u8 tmp[8];
688 @@ -954,16 +963,15 @@ static int tc_main_link_setup(struct tc_data *tc)
689 ret = drm_dp_dpcd_read_link_status(aux, tmp + 2);
690 if (ret < 0)
691 goto err_dpcd_read;
692 - ready = (tmp[2] == ((DP_CHANNEL_EQ_BITS << 4) | /* Lane1 */
693 - DP_CHANNEL_EQ_BITS)); /* Lane0 */
694 - aligned = tmp[4] & DP_INTERLANE_ALIGN_DONE;
695 - } while ((--timeout) && !(ready && aligned));
696 + } while ((--timeout) &&
697 + !(drm_dp_channel_eq_ok(tmp + 2, tc->link.base.num_lanes)));
698
699 if (timeout == 0) {
700 /* Read DPCD 0x200-0x201 */
701 ret = drm_dp_dpcd_read(aux, DP_SINK_COUNT, tmp, 2);
702 if (ret < 0)
703 goto err_dpcd_read;
704 + dev_err(dev, "channel(s) EQ not ok\n");
705 dev_info(dev, "0x0200 SINK_COUNT: 0x%02x\n", tmp[0]);
706 dev_info(dev, "0x0201 DEVICE_SERVICE_IRQ_VECTOR: 0x%02x\n",
707 tmp[1]);
708 @@ -974,10 +982,6 @@ static int tc_main_link_setup(struct tc_data *tc)
709 dev_info(dev, "0x0206 ADJUST_REQUEST_LANE0_1: 0x%02x\n",
710 tmp[6]);
711
712 - if (!ready)
713 - dev_err(dev, "Lane0/1 not ready\n");
714 - if (!aligned)
715 - dev_err(dev, "Lane0/1 not aligned\n");
716 return -EAGAIN;
717 }
718
719 @@ -1105,7 +1109,10 @@ static bool tc_bridge_mode_fixup(struct drm_bridge *bridge,
720 static int tc_connector_mode_valid(struct drm_connector *connector,
721 struct drm_display_mode *mode)
722 {
723 - /* Accept any mode */
724 + /* DPI interface clock limitation: upto 154 MHz */
725 + if (mode->clock > 154000)
726 + return MODE_CLOCK_HIGH;
727 +
728 return MODE_OK;
729 }
730
731 diff --git a/drivers/gpu/drm/omapdrm/omap_dmm_tiler.c b/drivers/gpu/drm/omapdrm/omap_dmm_tiler.c
732 index 4ceed7a9762f..4b83e9eeab06 100644
733 --- a/drivers/gpu/drm/omapdrm/omap_dmm_tiler.c
734 +++ b/drivers/gpu/drm/omapdrm/omap_dmm_tiler.c
735 @@ -638,7 +638,8 @@ static int omap_dmm_probe(struct platform_device *dev)
736 match = of_match_node(dmm_of_match, dev->dev.of_node);
737 if (!match) {
738 dev_err(&dev->dev, "failed to find matching device node\n");
739 - return -ENODEV;
740 + ret = -ENODEV;
741 + goto fail;
742 }
743
744 omap_dmm->plat_data = match->data;
745 diff --git a/drivers/gpu/drm/vc4/vc4_irq.c b/drivers/gpu/drm/vc4/vc4_irq.c
746 index 094bc6a475c1..d96c084d3a76 100644
747 --- a/drivers/gpu/drm/vc4/vc4_irq.c
748 +++ b/drivers/gpu/drm/vc4/vc4_irq.c
749 @@ -225,6 +225,9 @@ vc4_irq_uninstall(struct drm_device *dev)
750 /* Clear any pending interrupts we might have left. */
751 V3D_WRITE(V3D_INTCTL, V3D_DRIVER_IRQS);
752
753 + /* Finish any interrupt handler still in flight. */
754 + disable_irq(dev->irq);
755 +
756 cancel_work_sync(&vc4->overflow_mem_work);
757 }
758
759 diff --git a/drivers/gpu/drm/vc4/vc4_v3d.c b/drivers/gpu/drm/vc4/vc4_v3d.c
760 index 7cc346ad9b0b..ce7c21d250cf 100644
761 --- a/drivers/gpu/drm/vc4/vc4_v3d.c
762 +++ b/drivers/gpu/drm/vc4/vc4_v3d.c
763 @@ -173,6 +173,9 @@ static int vc4_v3d_runtime_resume(struct device *dev)
764 struct vc4_dev *vc4 = v3d->vc4;
765
766 vc4_v3d_init_hw(vc4->dev);
767 +
768 + /* We disabled the IRQ as part of vc4_irq_uninstall in suspend. */
769 + enable_irq(vc4->dev->irq);
770 vc4_irq_postinstall(vc4->dev);
771
772 return 0;
773 diff --git a/drivers/hid/wacom_sys.c b/drivers/hid/wacom_sys.c
774 index d72dfb2bbdb8..7a4d39ce51d9 100644
775 --- a/drivers/hid/wacom_sys.c
776 +++ b/drivers/hid/wacom_sys.c
777 @@ -2192,23 +2192,23 @@ static void wacom_remote_destroy_one(struct wacom *wacom, unsigned int index)
778 int i;
779 unsigned long flags;
780
781 - spin_lock_irqsave(&remote->remote_lock, flags);
782 - remote->remotes[index].registered = false;
783 - spin_unlock_irqrestore(&remote->remote_lock, flags);
784 + for (i = 0; i < WACOM_MAX_REMOTES; i++) {
785 + if (remote->remotes[i].serial == serial) {
786
787 - if (remote->remotes[index].battery.battery)
788 - devres_release_group(&wacom->hdev->dev,
789 - &remote->remotes[index].battery.bat_desc);
790 + spin_lock_irqsave(&remote->remote_lock, flags);
791 + remote->remotes[i].registered = false;
792 + spin_unlock_irqrestore(&remote->remote_lock, flags);
793
794 - if (remote->remotes[index].group.name)
795 - devres_release_group(&wacom->hdev->dev,
796 - &remote->remotes[index]);
797 + if (remote->remotes[i].battery.battery)
798 + devres_release_group(&wacom->hdev->dev,
799 + &remote->remotes[i].battery.bat_desc);
800 +
801 + if (remote->remotes[i].group.name)
802 + devres_release_group(&wacom->hdev->dev,
803 + &remote->remotes[i]);
804
805 - for (i = 0; i < WACOM_MAX_REMOTES; i++) {
806 - if (remote->remotes[i].serial == serial) {
807 remote->remotes[i].serial = 0;
808 remote->remotes[i].group.name = NULL;
809 - remote->remotes[i].registered = false;
810 remote->remotes[i].battery.battery = NULL;
811 wacom->led.groups[i].select = WACOM_STATUS_UNKNOWN;
812 }
813 diff --git a/drivers/hwmon/pmbus/pmbus_core.c b/drivers/hwmon/pmbus/pmbus_core.c
814 index ba59eaef2e07..d013acf3f83a 100644
815 --- a/drivers/hwmon/pmbus/pmbus_core.c
816 +++ b/drivers/hwmon/pmbus/pmbus_core.c
817 @@ -20,6 +20,7 @@
818 */
819
820 #include <linux/kernel.h>
821 +#include <linux/math64.h>
822 #include <linux/module.h>
823 #include <linux/init.h>
824 #include <linux/err.h>
825 @@ -476,8 +477,8 @@ static long pmbus_reg2data_linear(struct pmbus_data *data,
826 static long pmbus_reg2data_direct(struct pmbus_data *data,
827 struct pmbus_sensor *sensor)
828 {
829 - long val = (s16) sensor->data;
830 - long m, b, R;
831 + s64 b, val = (s16)sensor->data;
832 + s32 m, R;
833
834 m = data->info->m[sensor->class];
835 b = data->info->b[sensor->class];
836 @@ -505,11 +506,12 @@ static long pmbus_reg2data_direct(struct pmbus_data *data,
837 R--;
838 }
839 while (R < 0) {
840 - val = DIV_ROUND_CLOSEST(val, 10);
841 + val = div_s64(val + 5LL, 10L); /* round closest */
842 R++;
843 }
844
845 - return (val - b) / m;
846 + val = div_s64(val - b, m);
847 + return clamp_val(val, LONG_MIN, LONG_MAX);
848 }
849
850 /*
851 @@ -629,7 +631,8 @@ static u16 pmbus_data2reg_linear(struct pmbus_data *data,
852 static u16 pmbus_data2reg_direct(struct pmbus_data *data,
853 struct pmbus_sensor *sensor, long val)
854 {
855 - long m, b, R;
856 + s64 b, val64 = val;
857 + s32 m, R;
858
859 m = data->info->m[sensor->class];
860 b = data->info->b[sensor->class];
861 @@ -646,18 +649,18 @@ static u16 pmbus_data2reg_direct(struct pmbus_data *data,
862 R -= 3; /* Adjust R and b for data in milli-units */
863 b *= 1000;
864 }
865 - val = val * m + b;
866 + val64 = val64 * m + b;
867
868 while (R > 0) {
869 - val *= 10;
870 + val64 *= 10;
871 R--;
872 }
873 while (R < 0) {
874 - val = DIV_ROUND_CLOSEST(val, 10);
875 + val64 = div_s64(val64 + 5LL, 10L); /* round closest */
876 R++;
877 }
878
879 - return val;
880 + return (u16)clamp_val(val64, S16_MIN, S16_MAX);
881 }
882
883 static u16 pmbus_data2reg_vid(struct pmbus_data *data,
884 diff --git a/drivers/infiniband/hw/mlx5/main.c b/drivers/infiniband/hw/mlx5/main.c
885 index a2120ff0ef4c..5e29fbd3a5a0 100644
886 --- a/drivers/infiniband/hw/mlx5/main.c
887 +++ b/drivers/infiniband/hw/mlx5/main.c
888 @@ -2575,6 +2575,18 @@ static int create_umr_res(struct mlx5_ib_dev *dev)
889 return ret;
890 }
891
892 +static u8 mlx5_get_umr_fence(u8 umr_fence_cap)
893 +{
894 + switch (umr_fence_cap) {
895 + case MLX5_CAP_UMR_FENCE_NONE:
896 + return MLX5_FENCE_MODE_NONE;
897 + case MLX5_CAP_UMR_FENCE_SMALL:
898 + return MLX5_FENCE_MODE_INITIATOR_SMALL;
899 + default:
900 + return MLX5_FENCE_MODE_STRONG_ORDERING;
901 + }
902 +}
903 +
904 static int create_dev_resources(struct mlx5_ib_resources *devr)
905 {
906 struct ib_srq_init_attr attr;
907 @@ -3101,6 +3113,8 @@ static void *mlx5_ib_add(struct mlx5_core_dev *mdev)
908
909 mlx5_ib_internal_fill_odp_caps(dev);
910
911 + dev->umr_fence = mlx5_get_umr_fence(MLX5_CAP_GEN(mdev, umr_fence));
912 +
913 if (MLX5_CAP_GEN(mdev, imaicl)) {
914 dev->ib_dev.alloc_mw = mlx5_ib_alloc_mw;
915 dev->ib_dev.dealloc_mw = mlx5_ib_dealloc_mw;
916 diff --git a/drivers/infiniband/hw/mlx5/mlx5_ib.h b/drivers/infiniband/hw/mlx5/mlx5_ib.h
917 index 86e1e08125ff..d5cc954e8ac2 100644
918 --- a/drivers/infiniband/hw/mlx5/mlx5_ib.h
919 +++ b/drivers/infiniband/hw/mlx5/mlx5_ib.h
920 @@ -345,7 +345,7 @@ struct mlx5_ib_qp {
921 struct mlx5_ib_wq rq;
922
923 u8 sq_signal_bits;
924 - u8 fm_cache;
925 + u8 next_fence;
926 struct mlx5_ib_wq sq;
927
928 /* serialize qp state modifications
929 @@ -643,6 +643,7 @@ struct mlx5_ib_dev {
930 struct list_head qp_list;
931 /* Array with num_ports elements */
932 struct mlx5_ib_port *port;
933 + u8 umr_fence;
934 };
935
936 static inline struct mlx5_ib_cq *to_mibcq(struct mlx5_core_cq *mcq)
937 diff --git a/drivers/infiniband/hw/mlx5/qp.c b/drivers/infiniband/hw/mlx5/qp.c
938 index 2665414b4875..fdd156101a72 100644
939 --- a/drivers/infiniband/hw/mlx5/qp.c
940 +++ b/drivers/infiniband/hw/mlx5/qp.c
941 @@ -3755,24 +3755,6 @@ static void mlx5_bf_copy(u64 __iomem *dst, u64 *src,
942 }
943 }
944
945 -static u8 get_fence(u8 fence, struct ib_send_wr *wr)
946 -{
947 - if (unlikely(wr->opcode == IB_WR_LOCAL_INV &&
948 - wr->send_flags & IB_SEND_FENCE))
949 - return MLX5_FENCE_MODE_STRONG_ORDERING;
950 -
951 - if (unlikely(fence)) {
952 - if (wr->send_flags & IB_SEND_FENCE)
953 - return MLX5_FENCE_MODE_SMALL_AND_FENCE;
954 - else
955 - return fence;
956 - } else if (unlikely(wr->send_flags & IB_SEND_FENCE)) {
957 - return MLX5_FENCE_MODE_FENCE;
958 - }
959 -
960 - return 0;
961 -}
962 -
963 static int begin_wqe(struct mlx5_ib_qp *qp, void **seg,
964 struct mlx5_wqe_ctrl_seg **ctrl,
965 struct ib_send_wr *wr, unsigned *idx,
966 @@ -3801,8 +3783,7 @@ static int begin_wqe(struct mlx5_ib_qp *qp, void **seg,
967 static void finish_wqe(struct mlx5_ib_qp *qp,
968 struct mlx5_wqe_ctrl_seg *ctrl,
969 u8 size, unsigned idx, u64 wr_id,
970 - int nreq, u8 fence, u8 next_fence,
971 - u32 mlx5_opcode)
972 + int nreq, u8 fence, u32 mlx5_opcode)
973 {
974 u8 opmod = 0;
975
976 @@ -3810,7 +3791,6 @@ static void finish_wqe(struct mlx5_ib_qp *qp,
977 mlx5_opcode | ((u32)opmod << 24));
978 ctrl->qpn_ds = cpu_to_be32(size | (qp->trans_qp.base.mqp.qpn << 8));
979 ctrl->fm_ce_se |= fence;
980 - qp->fm_cache = next_fence;
981 if (unlikely(qp->wq_sig))
982 ctrl->signature = wq_sig(ctrl);
983
984 @@ -3870,7 +3850,6 @@ int mlx5_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
985 goto out;
986 }
987
988 - fence = qp->fm_cache;
989 num_sge = wr->num_sge;
990 if (unlikely(num_sge > qp->sq.max_gs)) {
991 mlx5_ib_warn(dev, "\n");
992 @@ -3887,6 +3866,19 @@ int mlx5_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
993 goto out;
994 }
995
996 + if (wr->opcode == IB_WR_LOCAL_INV ||
997 + wr->opcode == IB_WR_REG_MR) {
998 + fence = dev->umr_fence;
999 + next_fence = MLX5_FENCE_MODE_INITIATOR_SMALL;
1000 + } else if (wr->send_flags & IB_SEND_FENCE) {
1001 + if (qp->next_fence)
1002 + fence = MLX5_FENCE_MODE_SMALL_AND_FENCE;
1003 + else
1004 + fence = MLX5_FENCE_MODE_FENCE;
1005 + } else {
1006 + fence = qp->next_fence;
1007 + }
1008 +
1009 switch (ibqp->qp_type) {
1010 case IB_QPT_XRC_INI:
1011 xrc = seg;
1012 @@ -3913,7 +3905,6 @@ int mlx5_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
1013 goto out;
1014
1015 case IB_WR_LOCAL_INV:
1016 - next_fence = MLX5_FENCE_MODE_INITIATOR_SMALL;
1017 qp->sq.wr_data[idx] = IB_WR_LOCAL_INV;
1018 ctrl->imm = cpu_to_be32(wr->ex.invalidate_rkey);
1019 set_linv_wr(qp, &seg, &size);
1020 @@ -3921,7 +3912,6 @@ int mlx5_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
1021 break;
1022
1023 case IB_WR_REG_MR:
1024 - next_fence = MLX5_FENCE_MODE_INITIATOR_SMALL;
1025 qp->sq.wr_data[idx] = IB_WR_REG_MR;
1026 ctrl->imm = cpu_to_be32(reg_wr(wr)->key);
1027 err = set_reg_wr(qp, reg_wr(wr), &seg, &size);
1028 @@ -3944,9 +3934,8 @@ int mlx5_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
1029 goto out;
1030 }
1031
1032 - finish_wqe(qp, ctrl, size, idx, wr->wr_id,
1033 - nreq, get_fence(fence, wr),
1034 - next_fence, MLX5_OPCODE_UMR);
1035 + finish_wqe(qp, ctrl, size, idx, wr->wr_id, nreq,
1036 + fence, MLX5_OPCODE_UMR);
1037 /*
1038 * SET_PSV WQEs are not signaled and solicited
1039 * on error
1040 @@ -3971,9 +3960,8 @@ int mlx5_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
1041 goto out;
1042 }
1043
1044 - finish_wqe(qp, ctrl, size, idx, wr->wr_id,
1045 - nreq, get_fence(fence, wr),
1046 - next_fence, MLX5_OPCODE_SET_PSV);
1047 + finish_wqe(qp, ctrl, size, idx, wr->wr_id, nreq,
1048 + fence, MLX5_OPCODE_SET_PSV);
1049 err = begin_wqe(qp, &seg, &ctrl, wr,
1050 &idx, &size, nreq);
1051 if (err) {
1052 @@ -3983,7 +3971,6 @@ int mlx5_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
1053 goto out;
1054 }
1055
1056 - next_fence = MLX5_FENCE_MODE_INITIATOR_SMALL;
1057 err = set_psv_wr(&sig_handover_wr(wr)->sig_attrs->wire,
1058 mr->sig->psv_wire.psv_idx, &seg,
1059 &size);
1060 @@ -3993,9 +3980,9 @@ int mlx5_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
1061 goto out;
1062 }
1063
1064 - finish_wqe(qp, ctrl, size, idx, wr->wr_id,
1065 - nreq, get_fence(fence, wr),
1066 - next_fence, MLX5_OPCODE_SET_PSV);
1067 + finish_wqe(qp, ctrl, size, idx, wr->wr_id, nreq,
1068 + fence, MLX5_OPCODE_SET_PSV);
1069 + qp->next_fence = MLX5_FENCE_MODE_INITIATOR_SMALL;
1070 num_sge = 0;
1071 goto skip_psv;
1072
1073 @@ -4100,8 +4087,8 @@ int mlx5_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
1074 }
1075 }
1076
1077 - finish_wqe(qp, ctrl, size, idx, wr->wr_id, nreq,
1078 - get_fence(fence, wr), next_fence,
1079 + qp->next_fence = next_fence;
1080 + finish_wqe(qp, ctrl, size, idx, wr->wr_id, nreq, fence,
1081 mlx5_ib_opcode[wr->opcode]);
1082 skip_psv:
1083 if (0)
1084 diff --git a/drivers/md/bcache/btree.c b/drivers/md/bcache/btree.c
1085 index 2efdce07247c..cac297f8170e 100644
1086 --- a/drivers/md/bcache/btree.c
1087 +++ b/drivers/md/bcache/btree.c
1088 @@ -803,7 +803,10 @@ int bch_btree_cache_alloc(struct cache_set *c)
1089 c->shrink.scan_objects = bch_mca_scan;
1090 c->shrink.seeks = 4;
1091 c->shrink.batch = c->btree_pages * 2;
1092 - register_shrinker(&c->shrink);
1093 +
1094 + if (register_shrinker(&c->shrink))
1095 + pr_warn("bcache: %s: could not register shrinker",
1096 + __func__);
1097
1098 return 0;
1099 }
1100 diff --git a/drivers/media/usb/usbtv/usbtv-core.c b/drivers/media/usb/usbtv/usbtv-core.c
1101 index dc76fd41e00f..0324633ede42 100644
1102 --- a/drivers/media/usb/usbtv/usbtv-core.c
1103 +++ b/drivers/media/usb/usbtv/usbtv-core.c
1104 @@ -141,6 +141,7 @@ static void usbtv_disconnect(struct usb_interface *intf)
1105
1106 static struct usb_device_id usbtv_id_table[] = {
1107 { USB_DEVICE(0x1b71, 0x3002) },
1108 + { USB_DEVICE(0x1f71, 0x3301) },
1109 {}
1110 };
1111 MODULE_DEVICE_TABLE(usb, usbtv_id_table);
1112 diff --git a/drivers/mtd/nand/denali_pci.c b/drivers/mtd/nand/denali_pci.c
1113 index de31514df282..d38527e0a2f2 100644
1114 --- a/drivers/mtd/nand/denali_pci.c
1115 +++ b/drivers/mtd/nand/denali_pci.c
1116 @@ -119,3 +119,7 @@ static struct pci_driver denali_pci_driver = {
1117 };
1118
1119 module_pci_driver(denali_pci_driver);
1120 +
1121 +MODULE_DESCRIPTION("PCI driver for Denali NAND controller");
1122 +MODULE_AUTHOR("Intel Corporation and its suppliers");
1123 +MODULE_LICENSE("GPL v2");
1124 diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
1125 index a7e04ff4eaed..cde4b96f3153 100644
1126 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
1127 +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
1128 @@ -1843,8 +1843,8 @@ static int bnxt_get_module_eeprom(struct net_device *dev,
1129 /* Read A2 portion of the EEPROM */
1130 if (length) {
1131 start -= ETH_MODULE_SFF_8436_LEN;
1132 - bnxt_read_sfp_module_eeprom_info(bp, I2C_DEV_ADDR_A2, 1, start,
1133 - length, data);
1134 + rc = bnxt_read_sfp_module_eeprom_info(bp, I2C_DEV_ADDR_A2, 1,
1135 + start, length, data);
1136 }
1137 return rc;
1138 }
1139 diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c
1140 index ca54f7684668..3a61491421b1 100644
1141 --- a/drivers/net/ethernet/intel/igb/igb_main.c
1142 +++ b/drivers/net/ethernet/intel/igb/igb_main.c
1143 @@ -3273,7 +3273,7 @@ static int __igb_close(struct net_device *netdev, bool suspending)
1144
1145 int igb_close(struct net_device *netdev)
1146 {
1147 - if (netif_device_present(netdev))
1148 + if (netif_device_present(netdev) || netdev->dismantle)
1149 return __igb_close(netdev, false);
1150 return 0;
1151 }
1152 diff --git a/drivers/net/ethernet/xilinx/Kconfig b/drivers/net/ethernet/xilinx/Kconfig
1153 index 6d68c8a8f4f2..da4ec575ccf9 100644
1154 --- a/drivers/net/ethernet/xilinx/Kconfig
1155 +++ b/drivers/net/ethernet/xilinx/Kconfig
1156 @@ -34,6 +34,7 @@ config XILINX_AXI_EMAC
1157 config XILINX_LL_TEMAC
1158 tristate "Xilinx LL TEMAC (LocalLink Tri-mode Ethernet MAC) driver"
1159 depends on (PPC || MICROBLAZE)
1160 + depends on !64BIT || BROKEN
1161 select PHYLIB
1162 ---help---
1163 This driver supports the Xilinx 10/100/1000 LocalLink TEMAC
1164 diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/utils.c b/drivers/net/wireless/intel/iwlwifi/mvm/utils.c
1165 index d04babd99b53..ff5ce1ed03c4 100644
1166 --- a/drivers/net/wireless/intel/iwlwifi/mvm/utils.c
1167 +++ b/drivers/net/wireless/intel/iwlwifi/mvm/utils.c
1168 @@ -1040,6 +1040,8 @@ unsigned int iwl_mvm_get_wd_timeout(struct iwl_mvm *mvm,
1169 return le32_to_cpu(txq_timer->p2p_go);
1170 case NL80211_IFTYPE_P2P_DEVICE:
1171 return le32_to_cpu(txq_timer->p2p_device);
1172 + case NL80211_IFTYPE_MONITOR:
1173 + return default_timeout;
1174 default:
1175 WARN_ON(1);
1176 return mvm->cfg->base_params->wd_timeout;
1177 diff --git a/drivers/net/xen-netfront.c b/drivers/net/xen-netfront.c
1178 index 8d498a997e25..1a9dadf7b3cc 100644
1179 --- a/drivers/net/xen-netfront.c
1180 +++ b/drivers/net/xen-netfront.c
1181 @@ -86,6 +86,8 @@ struct netfront_cb {
1182 /* IRQ name is queue name with "-tx" or "-rx" appended */
1183 #define IRQ_NAME_SIZE (QUEUE_NAME_SIZE + 3)
1184
1185 +static DECLARE_WAIT_QUEUE_HEAD(module_unload_q);
1186 +
1187 struct netfront_stats {
1188 u64 packets;
1189 u64 bytes;
1190 @@ -2051,10 +2053,12 @@ static void netback_changed(struct xenbus_device *dev,
1191 break;
1192
1193 case XenbusStateClosed:
1194 + wake_up_all(&module_unload_q);
1195 if (dev->state == XenbusStateClosed)
1196 break;
1197 /* Missed the backend's CLOSING state -- fallthrough */
1198 case XenbusStateClosing:
1199 + wake_up_all(&module_unload_q);
1200 xenbus_frontend_closed(dev);
1201 break;
1202 }
1203 @@ -2160,6 +2164,20 @@ static int xennet_remove(struct xenbus_device *dev)
1204
1205 dev_dbg(&dev->dev, "%s\n", dev->nodename);
1206
1207 + if (xenbus_read_driver_state(dev->otherend) != XenbusStateClosed) {
1208 + xenbus_switch_state(dev, XenbusStateClosing);
1209 + wait_event(module_unload_q,
1210 + xenbus_read_driver_state(dev->otherend) ==
1211 + XenbusStateClosing);
1212 +
1213 + xenbus_switch_state(dev, XenbusStateClosed);
1214 + wait_event(module_unload_q,
1215 + xenbus_read_driver_state(dev->otherend) ==
1216 + XenbusStateClosed ||
1217 + xenbus_read_driver_state(dev->otherend) ==
1218 + XenbusStateUnknown);
1219 + }
1220 +
1221 xennet_disconnect_backend(info);
1222
1223 unregister_netdev(info->netdev);
1224 diff --git a/drivers/power/reset/zx-reboot.c b/drivers/power/reset/zx-reboot.c
1225 index b0b1eb3a78c2..76153ac0706c 100644
1226 --- a/drivers/power/reset/zx-reboot.c
1227 +++ b/drivers/power/reset/zx-reboot.c
1228 @@ -81,3 +81,7 @@ static struct platform_driver zx_reboot_driver = {
1229 },
1230 };
1231 module_platform_driver(zx_reboot_driver);
1232 +
1233 +MODULE_DESCRIPTION("ZTE SoCs reset driver");
1234 +MODULE_AUTHOR("Jun Nie <jun.nie@linaro.org>");
1235 +MODULE_LICENSE("GPL v2");
1236 diff --git a/drivers/scsi/aacraid/commsup.c b/drivers/scsi/aacraid/commsup.c
1237 index 0aeecec1f5ea..e2962f15c189 100644
1238 --- a/drivers/scsi/aacraid/commsup.c
1239 +++ b/drivers/scsi/aacraid/commsup.c
1240 @@ -1416,13 +1416,13 @@ static int _aac_reset_adapter(struct aac_dev *aac, int forced)
1241 * will ensure that i/o is queisced and the card is flushed in that
1242 * case.
1243 */
1244 + aac_free_irq(aac);
1245 aac_fib_map_free(aac);
1246 pci_free_consistent(aac->pdev, aac->comm_size, aac->comm_addr, aac->comm_phys);
1247 aac->comm_addr = NULL;
1248 aac->comm_phys = 0;
1249 kfree(aac->queues);
1250 aac->queues = NULL;
1251 - aac_free_irq(aac);
1252 kfree(aac->fsa_dev);
1253 aac->fsa_dev = NULL;
1254 quirks = aac_get_driver_ident(index)->quirks;
1255 diff --git a/drivers/scsi/ufs/ufshcd.c b/drivers/scsi/ufs/ufshcd.c
1256 index 530034bc2d13..2e9341233f66 100644
1257 --- a/drivers/scsi/ufs/ufshcd.c
1258 +++ b/drivers/scsi/ufs/ufshcd.c
1259 @@ -5327,12 +5327,15 @@ static int ufshcd_config_vreg(struct device *dev,
1260 struct ufs_vreg *vreg, bool on)
1261 {
1262 int ret = 0;
1263 - struct regulator *reg = vreg->reg;
1264 - const char *name = vreg->name;
1265 + struct regulator *reg;
1266 + const char *name;
1267 int min_uV, uA_load;
1268
1269 BUG_ON(!vreg);
1270
1271 + reg = vreg->reg;
1272 + name = vreg->name;
1273 +
1274 if (regulator_count_voltages(reg) > 0) {
1275 min_uV = on ? vreg->min_uV : 0;
1276 ret = regulator_set_voltage(reg, min_uV, vreg->max_uV);
1277 diff --git a/drivers/spi/spi-imx.c b/drivers/spi/spi-imx.c
1278 index deb782f6556c..a6e34f05d44d 100644
1279 --- a/drivers/spi/spi-imx.c
1280 +++ b/drivers/spi/spi-imx.c
1281 @@ -1307,12 +1307,23 @@ static int spi_imx_remove(struct platform_device *pdev)
1282 {
1283 struct spi_master *master = platform_get_drvdata(pdev);
1284 struct spi_imx_data *spi_imx = spi_master_get_devdata(master);
1285 + int ret;
1286
1287 spi_bitbang_stop(&spi_imx->bitbang);
1288
1289 + ret = clk_enable(spi_imx->clk_per);
1290 + if (ret)
1291 + return ret;
1292 +
1293 + ret = clk_enable(spi_imx->clk_ipg);
1294 + if (ret) {
1295 + clk_disable(spi_imx->clk_per);
1296 + return ret;
1297 + }
1298 +
1299 writel(0, spi_imx->base + MXC_CSPICTRL);
1300 - clk_unprepare(spi_imx->clk_ipg);
1301 - clk_unprepare(spi_imx->clk_per);
1302 + clk_disable_unprepare(spi_imx->clk_ipg);
1303 + clk_disable_unprepare(spi_imx->clk_per);
1304 spi_imx_sdma_exit(spi_imx);
1305 spi_master_put(master);
1306
1307 diff --git a/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd.c b/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd.c
1308 index 9e8802181452..e8d9db4d8179 100644
1309 --- a/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd.c
1310 +++ b/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd.c
1311 @@ -824,14 +824,15 @@ struct kib_conn *kiblnd_create_conn(struct kib_peer *peer, struct rdma_cm_id *cm
1312 return conn;
1313
1314 failed_2:
1315 - kiblnd_destroy_conn(conn, true);
1316 + kiblnd_destroy_conn(conn);
1317 + LIBCFS_FREE(conn, sizeof(*conn));
1318 failed_1:
1319 LIBCFS_FREE(init_qp_attr, sizeof(*init_qp_attr));
1320 failed_0:
1321 return NULL;
1322 }
1323
1324 -void kiblnd_destroy_conn(struct kib_conn *conn, bool free_conn)
1325 +void kiblnd_destroy_conn(struct kib_conn *conn)
1326 {
1327 struct rdma_cm_id *cmid = conn->ibc_cmid;
1328 struct kib_peer *peer = conn->ibc_peer;
1329 @@ -894,8 +895,6 @@ void kiblnd_destroy_conn(struct kib_conn *conn, bool free_conn)
1330 rdma_destroy_id(cmid);
1331 atomic_dec(&net->ibn_nconns);
1332 }
1333 -
1334 - LIBCFS_FREE(conn, sizeof(*conn));
1335 }
1336
1337 int kiblnd_close_peer_conns_locked(struct kib_peer *peer, int why)
1338 diff --git a/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd.h b/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd.h
1339 index 14576977200f..30cb2f5b3c15 100644
1340 --- a/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd.h
1341 +++ b/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd.h
1342 @@ -1018,7 +1018,7 @@ int kiblnd_close_peer_conns_locked(struct kib_peer *peer, int why);
1343 struct kib_conn *kiblnd_create_conn(struct kib_peer *peer,
1344 struct rdma_cm_id *cmid,
1345 int state, int version);
1346 -void kiblnd_destroy_conn(struct kib_conn *conn, bool free_conn);
1347 +void kiblnd_destroy_conn(struct kib_conn *conn);
1348 void kiblnd_close_conn(struct kib_conn *conn, int error);
1349 void kiblnd_close_conn_locked(struct kib_conn *conn, int error);
1350
1351 diff --git a/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd_cb.c b/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd_cb.c
1352 index 995f2dac7f26..ea9a0c21d29d 100644
1353 --- a/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd_cb.c
1354 +++ b/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd_cb.c
1355 @@ -3323,11 +3323,13 @@ kiblnd_connd(void *arg)
1356 spin_unlock_irqrestore(lock, flags);
1357 dropped_lock = 1;
1358
1359 - kiblnd_destroy_conn(conn, !peer);
1360 + kiblnd_destroy_conn(conn);
1361
1362 spin_lock_irqsave(lock, flags);
1363 - if (!peer)
1364 + if (!peer) {
1365 + kfree(conn);
1366 continue;
1367 + }
1368
1369 conn->ibc_peer = peer;
1370 if (peer->ibp_reconnected < KIB_RECONN_HIGH_RACE)
1371 diff --git a/drivers/staging/rtl8188eu/os_dep/ioctl_linux.c b/drivers/staging/rtl8188eu/os_dep/ioctl_linux.c
1372 index 4de9dbc93380..c7bf8ab26192 100644
1373 --- a/drivers/staging/rtl8188eu/os_dep/ioctl_linux.c
1374 +++ b/drivers/staging/rtl8188eu/os_dep/ioctl_linux.c
1375 @@ -1397,19 +1397,13 @@ static int rtw_wx_get_essid(struct net_device *dev,
1376 if ((check_fwstate(pmlmepriv, _FW_LINKED)) ||
1377 (check_fwstate(pmlmepriv, WIFI_ADHOC_MASTER_STATE))) {
1378 len = pcur_bss->Ssid.SsidLength;
1379 -
1380 - wrqu->essid.length = len;
1381 -
1382 memcpy(extra, pcur_bss->Ssid.Ssid, len);
1383 -
1384 - wrqu->essid.flags = 1;
1385 } else {
1386 - ret = -1;
1387 - goto exit;
1388 + len = 0;
1389 + *extra = 0;
1390 }
1391 -
1392 -exit:
1393 -
1394 + wrqu->essid.length = len;
1395 + wrqu->essid.flags = 1;
1396
1397 return ret;
1398 }
1399 diff --git a/drivers/tty/serial/imx.c b/drivers/tty/serial/imx.c
1400 index a70356dad1b7..521a6e450755 100644
1401 --- a/drivers/tty/serial/imx.c
1402 +++ b/drivers/tty/serial/imx.c
1403 @@ -2239,12 +2239,14 @@ static void serial_imx_enable_wakeup(struct imx_port *sport, bool on)
1404 val &= ~UCR3_AWAKEN;
1405 writel(val, sport->port.membase + UCR3);
1406
1407 - val = readl(sport->port.membase + UCR1);
1408 - if (on)
1409 - val |= UCR1_RTSDEN;
1410 - else
1411 - val &= ~UCR1_RTSDEN;
1412 - writel(val, sport->port.membase + UCR1);
1413 + if (sport->have_rtscts) {
1414 + val = readl(sport->port.membase + UCR1);
1415 + if (on)
1416 + val |= UCR1_RTSDEN;
1417 + else
1418 + val &= ~UCR1_RTSDEN;
1419 + writel(val, sport->port.membase + UCR1);
1420 + }
1421 }
1422
1423 static int imx_serial_port_suspend_noirq(struct device *dev)
1424 diff --git a/drivers/tty/tty_io.c b/drivers/tty/tty_io.c
1425 index 734a635e7363..8d9f9a803b42 100644
1426 --- a/drivers/tty/tty_io.c
1427 +++ b/drivers/tty/tty_io.c
1428 @@ -1543,6 +1543,9 @@ struct tty_struct *tty_init_dev(struct tty_driver *driver, int idx)
1429 "%s: %s driver does not set tty->port. This will crash the kernel later. Fix the driver!\n",
1430 __func__, tty->driver->name);
1431
1432 + retval = tty_ldisc_lock(tty, 5 * HZ);
1433 + if (retval)
1434 + goto err_release_lock;
1435 tty->port->itty = tty;
1436
1437 /*
1438 @@ -1553,6 +1556,7 @@ struct tty_struct *tty_init_dev(struct tty_driver *driver, int idx)
1439 retval = tty_ldisc_setup(tty, tty->link);
1440 if (retval)
1441 goto err_release_tty;
1442 + tty_ldisc_unlock(tty);
1443 /* Return the tty locked so that it cannot vanish under the caller */
1444 return tty;
1445
1446 @@ -1565,9 +1569,11 @@ struct tty_struct *tty_init_dev(struct tty_driver *driver, int idx)
1447
1448 /* call the tty release_tty routine to clean out this slot */
1449 err_release_tty:
1450 - tty_unlock(tty);
1451 + tty_ldisc_unlock(tty);
1452 tty_info_ratelimited(tty, "ldisc open failed (%d), clearing slot %d\n",
1453 retval, idx);
1454 +err_release_lock:
1455 + tty_unlock(tty);
1456 release_tty(tty, idx);
1457 return ERR_PTR(retval);
1458 }
1459 diff --git a/drivers/tty/tty_ldisc.c b/drivers/tty/tty_ldisc.c
1460 index b0500a0a87b8..3a9e2a2fd4c6 100644
1461 --- a/drivers/tty/tty_ldisc.c
1462 +++ b/drivers/tty/tty_ldisc.c
1463 @@ -336,7 +336,7 @@ static inline void __tty_ldisc_unlock(struct tty_struct *tty)
1464 ldsem_up_write(&tty->ldisc_sem);
1465 }
1466
1467 -static int tty_ldisc_lock(struct tty_struct *tty, unsigned long timeout)
1468 +int tty_ldisc_lock(struct tty_struct *tty, unsigned long timeout)
1469 {
1470 int ret;
1471
1472 @@ -347,7 +347,7 @@ static int tty_ldisc_lock(struct tty_struct *tty, unsigned long timeout)
1473 return 0;
1474 }
1475
1476 -static void tty_ldisc_unlock(struct tty_struct *tty)
1477 +void tty_ldisc_unlock(struct tty_struct *tty)
1478 {
1479 clear_bit(TTY_LDISC_HALTED, &tty->flags);
1480 __tty_ldisc_unlock(tty);
1481 diff --git a/drivers/usb/class/cdc-acm.c b/drivers/usb/class/cdc-acm.c
1482 index ea20b2cc189f..34d23cc99fbd 100644
1483 --- a/drivers/usb/class/cdc-acm.c
1484 +++ b/drivers/usb/class/cdc-acm.c
1485 @@ -375,7 +375,7 @@ static int acm_submit_read_urb(struct acm *acm, int index, gfp_t mem_flags)
1486
1487 res = usb_submit_urb(acm->read_urbs[index], mem_flags);
1488 if (res) {
1489 - if (res != -EPERM) {
1490 + if (res != -EPERM && res != -ENODEV) {
1491 dev_err(&acm->data->dev,
1492 "urb %d failed submission with %d\n",
1493 index, res);
1494 @@ -1706,6 +1706,9 @@ static const struct usb_device_id acm_ids[] = {
1495 { USB_DEVICE(0x0ace, 0x1611), /* ZyDAS 56K USB MODEM - new version */
1496 .driver_info = SINGLE_RX_URB, /* firmware bug */
1497 },
1498 + { USB_DEVICE(0x11ca, 0x0201), /* VeriFone Mx870 Gadget Serial */
1499 + .driver_info = SINGLE_RX_URB,
1500 + },
1501 { USB_DEVICE(0x22b8, 0x7000), /* Motorola Q Phone */
1502 .driver_info = NO_UNION_NORMAL, /* has no union descriptor */
1503 },
1504 diff --git a/drivers/usb/gadget/composite.c b/drivers/usb/gadget/composite.c
1505 index 325bf21ba13b..406758ed0b23 100644
1506 --- a/drivers/usb/gadget/composite.c
1507 +++ b/drivers/usb/gadget/composite.c
1508 @@ -150,7 +150,6 @@ int config_ep_by_speed(struct usb_gadget *g,
1509 struct usb_function *f,
1510 struct usb_ep *_ep)
1511 {
1512 - struct usb_composite_dev *cdev = get_gadget_data(g);
1513 struct usb_endpoint_descriptor *chosen_desc = NULL;
1514 struct usb_descriptor_header **speed_desc = NULL;
1515
1516 @@ -229,8 +228,12 @@ int config_ep_by_speed(struct usb_gadget *g,
1517 _ep->maxburst = comp_desc->bMaxBurst + 1;
1518 break;
1519 default:
1520 - if (comp_desc->bMaxBurst != 0)
1521 + if (comp_desc->bMaxBurst != 0) {
1522 + struct usb_composite_dev *cdev;
1523 +
1524 + cdev = get_gadget_data(g);
1525 ERROR(cdev, "ep0 bMaxBurst must be 0\n");
1526 + }
1527 _ep->maxburst = 1;
1528 break;
1529 }
1530 diff --git a/drivers/usb/gadget/function/f_fs.c b/drivers/usb/gadget/function/f_fs.c
1531 index 7b107e43b1c4..d90bf57ba30e 100644
1532 --- a/drivers/usb/gadget/function/f_fs.c
1533 +++ b/drivers/usb/gadget/function/f_fs.c
1534 @@ -3725,7 +3725,8 @@ static void ffs_closed(struct ffs_data *ffs)
1535 ci = opts->func_inst.group.cg_item.ci_parent->ci_parent;
1536 ffs_dev_unlock();
1537
1538 - unregister_gadget_item(ci);
1539 + if (test_bit(FFS_FL_BOUND, &ffs->flags))
1540 + unregister_gadget_item(ci);
1541 return;
1542 done:
1543 ffs_dev_unlock();
1544 diff --git a/drivers/usb/gadget/udc/core.c b/drivers/usb/gadget/udc/core.c
1545 index d685d82dcf48..e97539fc127e 100644
1546 --- a/drivers/usb/gadget/udc/core.c
1547 +++ b/drivers/usb/gadget/udc/core.c
1548 @@ -913,7 +913,7 @@ int usb_gadget_ep_match_desc(struct usb_gadget *gadget,
1549 return 0;
1550
1551 /* "high bandwidth" works only at high speed */
1552 - if (!gadget_is_dualspeed(gadget) && usb_endpoint_maxp(desc) & (3<<11))
1553 + if (!gadget_is_dualspeed(gadget) && usb_endpoint_maxp_mult(desc) > 1)
1554 return 0;
1555
1556 switch (type) {
1557 diff --git a/drivers/usb/serial/Kconfig b/drivers/usb/serial/Kconfig
1558 index 56ecb8b5115d..584ae8cbaf1c 100644
1559 --- a/drivers/usb/serial/Kconfig
1560 +++ b/drivers/usb/serial/Kconfig
1561 @@ -63,6 +63,7 @@ config USB_SERIAL_SIMPLE
1562 - Google USB serial devices
1563 - HP4x calculators
1564 - a number of Motorola phones
1565 + - Motorola Tetra devices
1566 - Novatel Wireless GPS receivers
1567 - Siemens USB/MPI adapter.
1568 - ViVOtech ViVOpay USB device.
1569 diff --git a/drivers/usb/serial/io_edgeport.c b/drivers/usb/serial/io_edgeport.c
1570 index 464db17b5328..de61271f2ba3 100644
1571 --- a/drivers/usb/serial/io_edgeport.c
1572 +++ b/drivers/usb/serial/io_edgeport.c
1573 @@ -2215,7 +2215,6 @@ static int write_cmd_usb(struct edgeport_port *edge_port,
1574 /* something went wrong */
1575 dev_err(dev, "%s - usb_submit_urb(write command) failed, status = %d\n",
1576 __func__, status);
1577 - usb_kill_urb(urb);
1578 usb_free_urb(urb);
1579 atomic_dec(&CmdUrbs);
1580 return status;
1581 diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c
1582 index a818c43a02ec..1799aa058a5b 100644
1583 --- a/drivers/usb/serial/option.c
1584 +++ b/drivers/usb/serial/option.c
1585 @@ -383,6 +383,9 @@ static void option_instat_callback(struct urb *urb);
1586 #define FOUR_G_SYSTEMS_PRODUCT_W14 0x9603
1587 #define FOUR_G_SYSTEMS_PRODUCT_W100 0x9b01
1588
1589 +/* Fujisoft products */
1590 +#define FUJISOFT_PRODUCT_FS040U 0x9b02
1591 +
1592 /* iBall 3.5G connect wireless modem */
1593 #define IBALL_3_5G_CONNECT 0x9605
1594
1595 @@ -1897,6 +1900,8 @@ static const struct usb_device_id option_ids[] = {
1596 { USB_DEVICE(LONGCHEER_VENDOR_ID, FOUR_G_SYSTEMS_PRODUCT_W100),
1597 .driver_info = (kernel_ulong_t)&four_g_w100_blacklist
1598 },
1599 + {USB_DEVICE(LONGCHEER_VENDOR_ID, FUJISOFT_PRODUCT_FS040U),
1600 + .driver_info = (kernel_ulong_t)&net_intf3_blacklist},
1601 { USB_DEVICE_INTERFACE_CLASS(LONGCHEER_VENDOR_ID, SPEEDUP_PRODUCT_SU9800, 0xff) },
1602 { USB_DEVICE_INTERFACE_CLASS(LONGCHEER_VENDOR_ID, 0x9801, 0xff),
1603 .driver_info = (kernel_ulong_t)&net_intf3_blacklist },
1604 diff --git a/drivers/usb/serial/pl2303.c b/drivers/usb/serial/pl2303.c
1605 index a51b28379850..3da25ad267a2 100644
1606 --- a/drivers/usb/serial/pl2303.c
1607 +++ b/drivers/usb/serial/pl2303.c
1608 @@ -39,6 +39,7 @@ static const struct usb_device_id id_table[] = {
1609 { USB_DEVICE(PL2303_VENDOR_ID, PL2303_PRODUCT_ID_RSAQ2) },
1610 { USB_DEVICE(PL2303_VENDOR_ID, PL2303_PRODUCT_ID_DCU11) },
1611 { USB_DEVICE(PL2303_VENDOR_ID, PL2303_PRODUCT_ID_RSAQ3) },
1612 + { USB_DEVICE(PL2303_VENDOR_ID, PL2303_PRODUCT_ID_CHILITAG) },
1613 { USB_DEVICE(PL2303_VENDOR_ID, PL2303_PRODUCT_ID_PHAROS) },
1614 { USB_DEVICE(PL2303_VENDOR_ID, PL2303_PRODUCT_ID_ALDIGA) },
1615 { USB_DEVICE(PL2303_VENDOR_ID, PL2303_PRODUCT_ID_MMX) },
1616 diff --git a/drivers/usb/serial/pl2303.h b/drivers/usb/serial/pl2303.h
1617 index 3b5a15d1dc0d..123289085ee2 100644
1618 --- a/drivers/usb/serial/pl2303.h
1619 +++ b/drivers/usb/serial/pl2303.h
1620 @@ -17,6 +17,7 @@
1621 #define PL2303_PRODUCT_ID_DCU11 0x1234
1622 #define PL2303_PRODUCT_ID_PHAROS 0xaaa0
1623 #define PL2303_PRODUCT_ID_RSAQ3 0xaaa2
1624 +#define PL2303_PRODUCT_ID_CHILITAG 0xaaa8
1625 #define PL2303_PRODUCT_ID_ALDIGA 0x0611
1626 #define PL2303_PRODUCT_ID_MMX 0x0612
1627 #define PL2303_PRODUCT_ID_GPRS 0x0609
1628 diff --git a/drivers/usb/serial/usb-serial-simple.c b/drivers/usb/serial/usb-serial-simple.c
1629 index e98b6e57b703..6aa7ff2c1cf7 100644
1630 --- a/drivers/usb/serial/usb-serial-simple.c
1631 +++ b/drivers/usb/serial/usb-serial-simple.c
1632 @@ -80,6 +80,11 @@ DEVICE(vivopay, VIVOPAY_IDS);
1633 { USB_DEVICE(0x22b8, 0x2c64) } /* Motorola V950 phone */
1634 DEVICE(moto_modem, MOTO_IDS);
1635
1636 +/* Motorola Tetra driver */
1637 +#define MOTOROLA_TETRA_IDS() \
1638 + { USB_DEVICE(0x0cad, 0x9011) } /* Motorola Solutions TETRA PEI */
1639 +DEVICE(motorola_tetra, MOTOROLA_TETRA_IDS);
1640 +
1641 /* Novatel Wireless GPS driver */
1642 #define NOVATEL_IDS() \
1643 { USB_DEVICE(0x09d7, 0x0100) } /* NovAtel FlexPack GPS */
1644 @@ -110,6 +115,7 @@ static struct usb_serial_driver * const serial_drivers[] = {
1645 &google_device,
1646 &vivopay_device,
1647 &moto_modem_device,
1648 + &motorola_tetra_device,
1649 &novatel_gps_device,
1650 &hp4x_device,
1651 &suunto_device,
1652 @@ -125,6 +131,7 @@ static const struct usb_device_id id_table[] = {
1653 GOOGLE_IDS(),
1654 VIVOPAY_IDS(),
1655 MOTO_IDS(),
1656 + MOTOROLA_TETRA_IDS(),
1657 NOVATEL_IDS(),
1658 HP4X_IDS(),
1659 SUUNTO_IDS(),
1660 diff --git a/drivers/usb/storage/uas.c b/drivers/usb/storage/uas.c
1661 index 9876af4ab64e..6891e9092775 100644
1662 --- a/drivers/usb/storage/uas.c
1663 +++ b/drivers/usb/storage/uas.c
1664 @@ -1076,20 +1076,19 @@ static int uas_post_reset(struct usb_interface *intf)
1665 return 0;
1666
1667 err = uas_configure_endpoints(devinfo);
1668 - if (err) {
1669 + if (err && err != ENODEV)
1670 shost_printk(KERN_ERR, shost,
1671 "%s: alloc streams error %d after reset",
1672 __func__, err);
1673 - return 1;
1674 - }
1675
1676 + /* we must unblock the host in every case lest we deadlock */
1677 spin_lock_irqsave(shost->host_lock, flags);
1678 scsi_report_bus_reset(shost, 0);
1679 spin_unlock_irqrestore(shost->host_lock, flags);
1680
1681 scsi_unblock_requests(shost);
1682
1683 - return 0;
1684 + return err ? 1 : 0;
1685 }
1686
1687 static int uas_suspend(struct usb_interface *intf, pm_message_t message)
1688 diff --git a/drivers/usb/usbip/vhci_hcd.c b/drivers/usb/usbip/vhci_hcd.c
1689 index 7f161b095176..dbe615ba07c9 100644
1690 --- a/drivers/usb/usbip/vhci_hcd.c
1691 +++ b/drivers/usb/usbip/vhci_hcd.c
1692 @@ -300,7 +300,7 @@ static int vhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
1693 case USB_PORT_FEAT_POWER:
1694 usbip_dbg_vhci_rh(
1695 " ClearPortFeature: USB_PORT_FEAT_POWER\n");
1696 - dum->port_status[rhport] = 0;
1697 + dum->port_status[rhport] &= ~USB_PORT_STAT_POWER;
1698 dum->resuming = 0;
1699 break;
1700 case USB_PORT_FEAT_C_RESET:
1701 diff --git a/fs/btrfs/free-space-cache.c b/fs/btrfs/free-space-cache.c
1702 index e4b48f377d3a..c56253a1e5b4 100644
1703 --- a/fs/btrfs/free-space-cache.c
1704 +++ b/fs/btrfs/free-space-cache.c
1705 @@ -1253,7 +1253,7 @@ static int __btrfs_write_out_cache(struct btrfs_root *root, struct inode *inode,
1706 /* Lock all pages first so we can lock the extent safely. */
1707 ret = io_ctl_prepare_pages(io_ctl, inode, 0);
1708 if (ret)
1709 - goto out;
1710 + goto out_unlock;
1711
1712 lock_extent_bits(&BTRFS_I(inode)->io_tree, 0, i_size_read(inode) - 1,
1713 &cached_state);
1714 @@ -1346,6 +1346,7 @@ static int __btrfs_write_out_cache(struct btrfs_root *root, struct inode *inode,
1715 out_nospc:
1716 cleanup_write_cache_enospc(inode, io_ctl, &cached_state, &bitmap_list);
1717
1718 +out_unlock:
1719 if (block_group && (block_group->flags & BTRFS_BLOCK_GROUP_DATA))
1720 up_write(&block_group->data_rwsem);
1721
1722 diff --git a/fs/nfs_common/grace.c b/fs/nfs_common/grace.c
1723 index fd8c9a5bcac4..77d136ac8909 100644
1724 --- a/fs/nfs_common/grace.c
1725 +++ b/fs/nfs_common/grace.c
1726 @@ -30,7 +30,11 @@ locks_start_grace(struct net *net, struct lock_manager *lm)
1727 struct list_head *grace_list = net_generic(net, grace_net_id);
1728
1729 spin_lock(&grace_lock);
1730 - list_add(&lm->list, grace_list);
1731 + if (list_empty(&lm->list))
1732 + list_add(&lm->list, grace_list);
1733 + else
1734 + WARN(1, "double list_add attempt detected in net %x %s\n",
1735 + net->ns.inum, (net == &init_net) ? "(init_net)" : "");
1736 spin_unlock(&grace_lock);
1737 }
1738 EXPORT_SYMBOL_GPL(locks_start_grace);
1739 @@ -104,7 +108,9 @@ grace_exit_net(struct net *net)
1740 {
1741 struct list_head *grace_list = net_generic(net, grace_net_id);
1742
1743 - BUG_ON(!list_empty(grace_list));
1744 + WARN_ONCE(!list_empty(grace_list),
1745 + "net %x %s: grace_list is not empty\n",
1746 + net->ns.inum, __func__);
1747 }
1748
1749 static struct pernet_operations grace_net_ops = {
1750 diff --git a/fs/nfsd/nfs4state.c b/fs/nfsd/nfs4state.c
1751 index 9ebb2d7c8182..f463c4e0b2ea 100644
1752 --- a/fs/nfsd/nfs4state.c
1753 +++ b/fs/nfsd/nfs4state.c
1754 @@ -63,12 +63,16 @@ static const stateid_t zero_stateid = {
1755 static const stateid_t currentstateid = {
1756 .si_generation = 1,
1757 };
1758 +static const stateid_t close_stateid = {
1759 + .si_generation = 0xffffffffU,
1760 +};
1761
1762 static u64 current_sessionid = 1;
1763
1764 #define ZERO_STATEID(stateid) (!memcmp((stateid), &zero_stateid, sizeof(stateid_t)))
1765 #define ONE_STATEID(stateid) (!memcmp((stateid), &one_stateid, sizeof(stateid_t)))
1766 #define CURRENT_STATEID(stateid) (!memcmp((stateid), &currentstateid, sizeof(stateid_t)))
1767 +#define CLOSE_STATEID(stateid) (!memcmp((stateid), &close_stateid, sizeof(stateid_t)))
1768
1769 /* forward declarations */
1770 static bool check_for_locks(struct nfs4_file *fp, struct nfs4_lockowner *lowner);
1771 @@ -4866,7 +4870,8 @@ static __be32 nfsd4_validate_stateid(struct nfs4_client *cl, stateid_t *stateid)
1772 struct nfs4_stid *s;
1773 __be32 status = nfserr_bad_stateid;
1774
1775 - if (ZERO_STATEID(stateid) || ONE_STATEID(stateid))
1776 + if (ZERO_STATEID(stateid) || ONE_STATEID(stateid) ||
1777 + CLOSE_STATEID(stateid))
1778 return status;
1779 /* Client debugging aid. */
1780 if (!same_clid(&stateid->si_opaque.so_clid, &cl->cl_clientid)) {
1781 @@ -4924,7 +4929,8 @@ nfsd4_lookup_stateid(struct nfsd4_compound_state *cstate,
1782 else if (typemask & NFS4_DELEG_STID)
1783 typemask |= NFS4_REVOKED_DELEG_STID;
1784
1785 - if (ZERO_STATEID(stateid) || ONE_STATEID(stateid))
1786 + if (ZERO_STATEID(stateid) || ONE_STATEID(stateid) ||
1787 + CLOSE_STATEID(stateid))
1788 return nfserr_bad_stateid;
1789 status = lookup_clientid(&stateid->si_opaque.so_clid, cstate, nn);
1790 if (status == nfserr_stale_clientid) {
1791 @@ -5175,15 +5181,9 @@ static __be32 nfs4_seqid_op_checks(struct nfsd4_compound_state *cstate, stateid_
1792 status = nfsd4_check_seqid(cstate, sop, seqid);
1793 if (status)
1794 return status;
1795 - if (stp->st_stid.sc_type == NFS4_CLOSED_STID
1796 - || stp->st_stid.sc_type == NFS4_REVOKED_DELEG_STID)
1797 - /*
1798 - * "Closed" stateid's exist *only* to return
1799 - * nfserr_replay_me from the previous step, and
1800 - * revoked delegations are kept only for free_stateid.
1801 - */
1802 - return nfserr_bad_stateid;
1803 - mutex_lock(&stp->st_mutex);
1804 + status = nfsd4_lock_ol_stateid(stp);
1805 + if (status != nfs_ok)
1806 + return status;
1807 status = check_stateid_generation(stateid, &stp->st_stid.sc_stateid, nfsd4_has_session(cstate));
1808 if (status == nfs_ok)
1809 status = nfs4_check_fh(current_fh, &stp->st_stid);
1810 @@ -5407,6 +5407,11 @@ nfsd4_close(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
1811 nfsd4_close_open_stateid(stp);
1812 mutex_unlock(&stp->st_mutex);
1813
1814 + /* See RFC5661 sectionm 18.2.4 */
1815 + if (stp->st_stid.sc_client->cl_minorversion)
1816 + memcpy(&close->cl_stateid, &close_stateid,
1817 + sizeof(close->cl_stateid));
1818 +
1819 /* put reference from nfs4_preprocess_seqid_op */
1820 nfs4_put_stid(&stp->st_stid);
1821 out:
1822 @@ -7007,6 +7012,10 @@ static int nfs4_state_create_net(struct net *net)
1823 INIT_LIST_HEAD(&nn->sessionid_hashtbl[i]);
1824 nn->conf_name_tree = RB_ROOT;
1825 nn->unconf_name_tree = RB_ROOT;
1826 + nn->boot_time = get_seconds();
1827 + nn->grace_ended = false;
1828 + nn->nfsd4_manager.block_opens = true;
1829 + INIT_LIST_HEAD(&nn->nfsd4_manager.list);
1830 INIT_LIST_HEAD(&nn->client_lru);
1831 INIT_LIST_HEAD(&nn->close_lru);
1832 INIT_LIST_HEAD(&nn->del_recall_lru);
1833 @@ -7064,9 +7073,6 @@ nfs4_state_start_net(struct net *net)
1834 ret = nfs4_state_create_net(net);
1835 if (ret)
1836 return ret;
1837 - nn->boot_time = get_seconds();
1838 - nn->grace_ended = false;
1839 - nn->nfsd4_manager.block_opens = true;
1840 locks_start_grace(net, &nn->nfsd4_manager);
1841 nfsd4_client_tracking_init(net);
1842 printk(KERN_INFO "NFSD: starting %ld-second grace period (net %p)\n",
1843 diff --git a/fs/quota/dquot.c b/fs/quota/dquot.c
1844 index 1bfac28b7e7d..f9246ac4eef8 100644
1845 --- a/fs/quota/dquot.c
1846 +++ b/fs/quota/dquot.c
1847 @@ -2985,7 +2985,8 @@ static int __init dquot_init(void)
1848 pr_info("VFS: Dquot-cache hash table entries: %ld (order %ld,"
1849 " %ld bytes)\n", nr_hash, order, (PAGE_SIZE << order));
1850
1851 - register_shrinker(&dqcache_shrinker);
1852 + if (register_shrinker(&dqcache_shrinker))
1853 + panic("Cannot register dquot shrinker");
1854
1855 return 0;
1856 }
1857 diff --git a/fs/reiserfs/super.c b/fs/reiserfs/super.c
1858 index 0a6ad4e71e88..e101d70d2327 100644
1859 --- a/fs/reiserfs/super.c
1860 +++ b/fs/reiserfs/super.c
1861 @@ -2521,7 +2521,6 @@ static ssize_t reiserfs_quota_write(struct super_block *sb, int type,
1862 return err;
1863 if (inode->i_size < off + len - towrite)
1864 i_size_write(inode, off + len - towrite);
1865 - inode->i_version++;
1866 inode->i_mtime = inode->i_ctime = current_time(inode);
1867 mark_inode_dirty(inode);
1868 return len - towrite;
1869 diff --git a/fs/xfs/xfs_aops.c b/fs/xfs/xfs_aops.c
1870 index d31cd1ebd8e9..f3acecf3869d 100644
1871 --- a/fs/xfs/xfs_aops.c
1872 +++ b/fs/xfs/xfs_aops.c
1873 @@ -391,7 +391,7 @@ xfs_map_blocks(
1874 (ip->i_df.if_flags & XFS_IFEXTENTS));
1875 ASSERT(offset <= mp->m_super->s_maxbytes);
1876
1877 - if (offset + count > mp->m_super->s_maxbytes)
1878 + if ((xfs_ufsize_t)offset + count > mp->m_super->s_maxbytes)
1879 count = mp->m_super->s_maxbytes - offset;
1880 end_fsb = XFS_B_TO_FSB(mp, (xfs_ufsize_t)offset + count);
1881 offset_fsb = XFS_B_TO_FSBT(mp, offset);
1882 @@ -1295,7 +1295,7 @@ xfs_map_trim_size(
1883 if (mapping_size > size)
1884 mapping_size = size;
1885 if (offset < i_size_read(inode) &&
1886 - offset + mapping_size >= i_size_read(inode)) {
1887 + (xfs_ufsize_t)offset + mapping_size >= i_size_read(inode)) {
1888 /* limit mapping to block that spans EOF */
1889 mapping_size = roundup_64(i_size_read(inode) - offset,
1890 i_blocksize(inode));
1891 @@ -1347,7 +1347,7 @@ __xfs_get_blocks(
1892 lockmode = xfs_ilock_data_map_shared(ip);
1893
1894 ASSERT(offset <= mp->m_super->s_maxbytes);
1895 - if (offset + size > mp->m_super->s_maxbytes)
1896 + if ((xfs_ufsize_t)offset + size > mp->m_super->s_maxbytes)
1897 size = mp->m_super->s_maxbytes - offset;
1898 end_fsb = XFS_B_TO_FSB(mp, (xfs_ufsize_t)offset + size);
1899 offset_fsb = XFS_B_TO_FSBT(mp, offset);
1900 diff --git a/fs/xfs/xfs_buf.c b/fs/xfs/xfs_buf.c
1901 index eca7baecc9f0..3f45d9867e10 100644
1902 --- a/fs/xfs/xfs_buf.c
1903 +++ b/fs/xfs/xfs_buf.c
1904 @@ -1785,22 +1785,27 @@ xfs_alloc_buftarg(
1905 btp->bt_bdi = blk_get_backing_dev_info(bdev);
1906
1907 if (xfs_setsize_buftarg_early(btp, bdev))
1908 - goto error;
1909 + goto error_free;
1910
1911 if (list_lru_init(&btp->bt_lru))
1912 - goto error;
1913 + goto error_free;
1914
1915 if (percpu_counter_init(&btp->bt_io_count, 0, GFP_KERNEL))
1916 - goto error;
1917 + goto error_lru;
1918
1919 btp->bt_shrinker.count_objects = xfs_buftarg_shrink_count;
1920 btp->bt_shrinker.scan_objects = xfs_buftarg_shrink_scan;
1921 btp->bt_shrinker.seeks = DEFAULT_SEEKS;
1922 btp->bt_shrinker.flags = SHRINKER_NUMA_AWARE;
1923 - register_shrinker(&btp->bt_shrinker);
1924 + if (register_shrinker(&btp->bt_shrinker))
1925 + goto error_pcpu;
1926 return btp;
1927
1928 -error:
1929 +error_pcpu:
1930 + percpu_counter_destroy(&btp->bt_io_count);
1931 +error_lru:
1932 + list_lru_destroy(&btp->bt_lru);
1933 +error_free:
1934 kmem_free(btp);
1935 return NULL;
1936 }
1937 diff --git a/fs/xfs/xfs_dquot.c b/fs/xfs/xfs_dquot.c
1938 index 9d06cc30e875..7a7b3ccf2273 100644
1939 --- a/fs/xfs/xfs_dquot.c
1940 +++ b/fs/xfs/xfs_dquot.c
1941 @@ -1004,14 +1004,22 @@ xfs_qm_dqflush_done(
1942 * holding the lock before removing the dquot from the AIL.
1943 */
1944 if ((lip->li_flags & XFS_LI_IN_AIL) &&
1945 - lip->li_lsn == qip->qli_flush_lsn) {
1946 + ((lip->li_lsn == qip->qli_flush_lsn) ||
1947 + (lip->li_flags & XFS_LI_FAILED))) {
1948
1949 /* xfs_trans_ail_delete() drops the AIL lock. */
1950 spin_lock(&ailp->xa_lock);
1951 - if (lip->li_lsn == qip->qli_flush_lsn)
1952 + if (lip->li_lsn == qip->qli_flush_lsn) {
1953 xfs_trans_ail_delete(ailp, lip, SHUTDOWN_CORRUPT_INCORE);
1954 - else
1955 + } else {
1956 + /*
1957 + * Clear the failed state since we are about to drop the
1958 + * flush lock
1959 + */
1960 + if (lip->li_flags & XFS_LI_FAILED)
1961 + xfs_clear_li_failed(lip);
1962 spin_unlock(&ailp->xa_lock);
1963 + }
1964 }
1965
1966 /*
1967 diff --git a/fs/xfs/xfs_dquot_item.c b/fs/xfs/xfs_dquot_item.c
1968 index 2c7a1629e064..664dea105e76 100644
1969 --- a/fs/xfs/xfs_dquot_item.c
1970 +++ b/fs/xfs/xfs_dquot_item.c
1971 @@ -137,6 +137,26 @@ xfs_qm_dqunpin_wait(
1972 wait_event(dqp->q_pinwait, (atomic_read(&dqp->q_pincount) == 0));
1973 }
1974
1975 +/*
1976 + * Callback used to mark a buffer with XFS_LI_FAILED when items in the buffer
1977 + * have been failed during writeback
1978 + *
1979 + * this informs the AIL that the dquot is already flush locked on the next push,
1980 + * and acquires a hold on the buffer to ensure that it isn't reclaimed before
1981 + * dirty data makes it to disk.
1982 + */
1983 +STATIC void
1984 +xfs_dquot_item_error(
1985 + struct xfs_log_item *lip,
1986 + struct xfs_buf *bp)
1987 +{
1988 + struct xfs_dquot *dqp;
1989 +
1990 + dqp = DQUOT_ITEM(lip)->qli_dquot;
1991 + ASSERT(!completion_done(&dqp->q_flush));
1992 + xfs_set_li_failed(lip, bp);
1993 +}
1994 +
1995 STATIC uint
1996 xfs_qm_dquot_logitem_push(
1997 struct xfs_log_item *lip,
1998 @@ -144,13 +164,28 @@ xfs_qm_dquot_logitem_push(
1999 __acquires(&lip->li_ailp->xa_lock)
2000 {
2001 struct xfs_dquot *dqp = DQUOT_ITEM(lip)->qli_dquot;
2002 - struct xfs_buf *bp = NULL;
2003 + struct xfs_buf *bp = lip->li_buf;
2004 uint rval = XFS_ITEM_SUCCESS;
2005 int error;
2006
2007 if (atomic_read(&dqp->q_pincount) > 0)
2008 return XFS_ITEM_PINNED;
2009
2010 + /*
2011 + * The buffer containing this item failed to be written back
2012 + * previously. Resubmit the buffer for IO
2013 + */
2014 + if (lip->li_flags & XFS_LI_FAILED) {
2015 + if (!xfs_buf_trylock(bp))
2016 + return XFS_ITEM_LOCKED;
2017 +
2018 + if (!xfs_buf_resubmit_failed_buffers(bp, lip, buffer_list))
2019 + rval = XFS_ITEM_FLUSHING;
2020 +
2021 + xfs_buf_unlock(bp);
2022 + return rval;
2023 + }
2024 +
2025 if (!xfs_dqlock_nowait(dqp))
2026 return XFS_ITEM_LOCKED;
2027
2028 @@ -242,7 +277,8 @@ static const struct xfs_item_ops xfs_dquot_item_ops = {
2029 .iop_unlock = xfs_qm_dquot_logitem_unlock,
2030 .iop_committed = xfs_qm_dquot_logitem_committed,
2031 .iop_push = xfs_qm_dquot_logitem_push,
2032 - .iop_committing = xfs_qm_dquot_logitem_committing
2033 + .iop_committing = xfs_qm_dquot_logitem_committing,
2034 + .iop_error = xfs_dquot_item_error
2035 };
2036
2037 /*
2038 diff --git a/fs/xfs/xfs_inode.c b/fs/xfs/xfs_inode.c
2039 index 98ca9f1b6a07..c5f2f1e3cc4b 100644
2040 --- a/fs/xfs/xfs_inode.c
2041 +++ b/fs/xfs/xfs_inode.c
2042 @@ -2429,6 +2429,24 @@ xfs_ifree_cluster(
2043 return 0;
2044 }
2045
2046 +/*
2047 + * Free any local-format buffers sitting around before we reset to
2048 + * extents format.
2049 + */
2050 +static inline void
2051 +xfs_ifree_local_data(
2052 + struct xfs_inode *ip,
2053 + int whichfork)
2054 +{
2055 + struct xfs_ifork *ifp;
2056 +
2057 + if (XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_LOCAL)
2058 + return;
2059 +
2060 + ifp = XFS_IFORK_PTR(ip, whichfork);
2061 + xfs_idata_realloc(ip, -ifp->if_bytes, whichfork);
2062 +}
2063 +
2064 /*
2065 * This is called to return an inode to the inode free list.
2066 * The inode should already be truncated to 0 length and have
2067 @@ -2466,6 +2484,9 @@ xfs_ifree(
2068 if (error)
2069 return error;
2070
2071 + xfs_ifree_local_data(ip, XFS_DATA_FORK);
2072 + xfs_ifree_local_data(ip, XFS_ATTR_FORK);
2073 +
2074 VFS_I(ip)->i_mode = 0; /* mark incore inode as free */
2075 ip->i_d.di_flags = 0;
2076 ip->i_d.di_dmevmask = 0;
2077 diff --git a/include/linux/mlx5/mlx5_ifc.h b/include/linux/mlx5/mlx5_ifc.h
2078 index 25ed105bbcfb..20ee90c47cd5 100644
2079 --- a/include/linux/mlx5/mlx5_ifc.h
2080 +++ b/include/linux/mlx5/mlx5_ifc.h
2081 @@ -737,6 +737,12 @@ enum {
2082 MLX5_CAP_PORT_TYPE_ETH = 0x1,
2083 };
2084
2085 +enum {
2086 + MLX5_CAP_UMR_FENCE_STRONG = 0x0,
2087 + MLX5_CAP_UMR_FENCE_SMALL = 0x1,
2088 + MLX5_CAP_UMR_FENCE_NONE = 0x2,
2089 +};
2090 +
2091 struct mlx5_ifc_cmd_hca_cap_bits {
2092 u8 reserved_at_0[0x80];
2093
2094 @@ -838,7 +844,9 @@ struct mlx5_ifc_cmd_hca_cap_bits {
2095 u8 striding_rq[0x1];
2096 u8 reserved_at_201[0x2];
2097 u8 ipoib_basic_offloads[0x1];
2098 - u8 reserved_at_205[0xa];
2099 + u8 reserved_at_205[0x5];
2100 + u8 umr_fence[0x2];
2101 + u8 reserved_at_20c[0x3];
2102 u8 drain_sigerr[0x1];
2103 u8 cmdif_checksum[0x2];
2104 u8 sigerr_cqe[0x1];
2105 diff --git a/include/linux/tty.h b/include/linux/tty.h
2106 index 40144f382516..a41244fe58d0 100644
2107 --- a/include/linux/tty.h
2108 +++ b/include/linux/tty.h
2109 @@ -394,6 +394,8 @@ extern struct tty_struct *get_current_tty(void);
2110 /* tty_io.c */
2111 extern int __init tty_init(void);
2112 extern const char *tty_name(const struct tty_struct *tty);
2113 +extern int tty_ldisc_lock(struct tty_struct *tty, unsigned long timeout);
2114 +extern void tty_ldisc_unlock(struct tty_struct *tty);
2115 #else
2116 static inline void console_init(void)
2117 { }
2118 diff --git a/mm/kmemleak.c b/mm/kmemleak.c
2119 index d1380ed93fdf..20cf3be9a5e8 100644
2120 --- a/mm/kmemleak.c
2121 +++ b/mm/kmemleak.c
2122 @@ -1442,6 +1442,8 @@ static void kmemleak_scan(void)
2123 if (page_count(page) == 0)
2124 continue;
2125 scan_block(page, page + 1, NULL);
2126 + if (!(pfn % (MAX_SCAN_SIZE / sizeof(*page))))
2127 + cond_resched();
2128 }
2129 }
2130 put_online_mems();
2131 diff --git a/net/mac80211/mesh_hwmp.c b/net/mac80211/mesh_hwmp.c
2132 index b747c9645e43..fed598a202c8 100644
2133 --- a/net/mac80211/mesh_hwmp.c
2134 +++ b/net/mac80211/mesh_hwmp.c
2135 @@ -788,7 +788,7 @@ static void hwmp_rann_frame_process(struct ieee80211_sub_if_data *sdata,
2136 struct mesh_path *mpath;
2137 u8 ttl, flags, hopcount;
2138 const u8 *orig_addr;
2139 - u32 orig_sn, metric, metric_txsta, interval;
2140 + u32 orig_sn, new_metric, orig_metric, last_hop_metric, interval;
2141 bool root_is_gate;
2142
2143 ttl = rann->rann_ttl;
2144 @@ -799,7 +799,7 @@ static void hwmp_rann_frame_process(struct ieee80211_sub_if_data *sdata,
2145 interval = le32_to_cpu(rann->rann_interval);
2146 hopcount = rann->rann_hopcount;
2147 hopcount++;
2148 - metric = le32_to_cpu(rann->rann_metric);
2149 + orig_metric = le32_to_cpu(rann->rann_metric);
2150
2151 /* Ignore our own RANNs */
2152 if (ether_addr_equal(orig_addr, sdata->vif.addr))
2153 @@ -816,7 +816,10 @@ static void hwmp_rann_frame_process(struct ieee80211_sub_if_data *sdata,
2154 return;
2155 }
2156
2157 - metric_txsta = airtime_link_metric_get(local, sta);
2158 + last_hop_metric = airtime_link_metric_get(local, sta);
2159 + new_metric = orig_metric + last_hop_metric;
2160 + if (new_metric < orig_metric)
2161 + new_metric = MAX_METRIC;
2162
2163 mpath = mesh_path_lookup(sdata, orig_addr);
2164 if (!mpath) {
2165 @@ -829,7 +832,7 @@ static void hwmp_rann_frame_process(struct ieee80211_sub_if_data *sdata,
2166 }
2167
2168 if (!(SN_LT(mpath->sn, orig_sn)) &&
2169 - !(mpath->sn == orig_sn && metric < mpath->rann_metric)) {
2170 + !(mpath->sn == orig_sn && new_metric < mpath->rann_metric)) {
2171 rcu_read_unlock();
2172 return;
2173 }
2174 @@ -847,7 +850,7 @@ static void hwmp_rann_frame_process(struct ieee80211_sub_if_data *sdata,
2175 }
2176
2177 mpath->sn = orig_sn;
2178 - mpath->rann_metric = metric + metric_txsta;
2179 + mpath->rann_metric = new_metric;
2180 mpath->is_root = true;
2181 /* Recording RANNs sender address to send individually
2182 * addressed PREQs destined for root mesh STA */
2183 @@ -867,7 +870,7 @@ static void hwmp_rann_frame_process(struct ieee80211_sub_if_data *sdata,
2184 mesh_path_sel_frame_tx(MPATH_RANN, flags, orig_addr,
2185 orig_sn, 0, NULL, 0, broadcast_addr,
2186 hopcount, ttl, interval,
2187 - metric + metric_txsta, 0, sdata);
2188 + new_metric, 0, sdata);
2189 }
2190
2191 rcu_read_unlock();
2192 diff --git a/net/openvswitch/flow_netlink.c b/net/openvswitch/flow_netlink.c
2193 index 07925418c2a5..1668916bdbde 100644
2194 --- a/net/openvswitch/flow_netlink.c
2195 +++ b/net/openvswitch/flow_netlink.c
2196 @@ -1789,14 +1789,11 @@ int ovs_nla_put_mask(const struct sw_flow *flow, struct sk_buff *skb)
2197
2198 #define MAX_ACTIONS_BUFSIZE (32 * 1024)
2199
2200 -static struct sw_flow_actions *nla_alloc_flow_actions(int size, bool log)
2201 +static struct sw_flow_actions *nla_alloc_flow_actions(int size)
2202 {
2203 struct sw_flow_actions *sfa;
2204
2205 - if (size > MAX_ACTIONS_BUFSIZE) {
2206 - OVS_NLERR(log, "Flow action size %u bytes exceeds max", size);
2207 - return ERR_PTR(-EINVAL);
2208 - }
2209 + WARN_ON_ONCE(size > MAX_ACTIONS_BUFSIZE);
2210
2211 sfa = kmalloc(sizeof(*sfa) + size, GFP_KERNEL);
2212 if (!sfa)
2213 @@ -1869,12 +1866,15 @@ static struct nlattr *reserve_sfa_size(struct sw_flow_actions **sfa,
2214 new_acts_size = ksize(*sfa) * 2;
2215
2216 if (new_acts_size > MAX_ACTIONS_BUFSIZE) {
2217 - if ((MAX_ACTIONS_BUFSIZE - next_offset) < req_size)
2218 + if ((MAX_ACTIONS_BUFSIZE - next_offset) < req_size) {
2219 + OVS_NLERR(log, "Flow action size exceeds max %u",
2220 + MAX_ACTIONS_BUFSIZE);
2221 return ERR_PTR(-EMSGSIZE);
2222 + }
2223 new_acts_size = MAX_ACTIONS_BUFSIZE;
2224 }
2225
2226 - acts = nla_alloc_flow_actions(new_acts_size, log);
2227 + acts = nla_alloc_flow_actions(new_acts_size);
2228 if (IS_ERR(acts))
2229 return (void *)acts;
2230
2231 @@ -2500,7 +2500,7 @@ int ovs_nla_copy_actions(struct net *net, const struct nlattr *attr,
2232 {
2233 int err;
2234
2235 - *sfa = nla_alloc_flow_actions(nla_len(attr), log);
2236 + *sfa = nla_alloc_flow_actions(min(nla_len(attr), MAX_ACTIONS_BUFSIZE));
2237 if (IS_ERR(*sfa))
2238 return PTR_ERR(*sfa);
2239
2240 diff --git a/net/sunrpc/xprtsock.c b/net/sunrpc/xprtsock.c
2241 index e01c825bc683..d24d14ea8ba4 100644
2242 --- a/net/sunrpc/xprtsock.c
2243 +++ b/net/sunrpc/xprtsock.c
2244 @@ -2381,6 +2381,7 @@ static void xs_tcp_setup_socket(struct work_struct *work)
2245 case -ECONNREFUSED:
2246 case -ECONNRESET:
2247 case -ENETUNREACH:
2248 + case -EHOSTUNREACH:
2249 case -EADDRINUSE:
2250 case -ENOBUFS:
2251 /* retry with existing socket, after a delay */
2252 diff --git a/tools/gpio/gpio-event-mon.c b/tools/gpio/gpio-event-mon.c
2253 index 1c14c2595158..4b36323ea64b 100644
2254 --- a/tools/gpio/gpio-event-mon.c
2255 +++ b/tools/gpio/gpio-event-mon.c
2256 @@ -23,6 +23,7 @@
2257 #include <getopt.h>
2258 #include <inttypes.h>
2259 #include <sys/ioctl.h>
2260 +#include <sys/types.h>
2261 #include <linux/gpio.h>
2262
2263 int monitor_device(const char *device_name,
2264 diff --git a/tools/power/cpupower/bench/system.c b/tools/power/cpupower/bench/system.c
2265 index c25a74ae51ba..2bb3eef7d5c1 100644
2266 --- a/tools/power/cpupower/bench/system.c
2267 +++ b/tools/power/cpupower/bench/system.c
2268 @@ -61,7 +61,7 @@ int set_cpufreq_governor(char *governor, unsigned int cpu)
2269
2270 dprintf("set %s as cpufreq governor\n", governor);
2271
2272 - if (cpupower_is_cpu_online(cpu) != 0) {
2273 + if (cpupower_is_cpu_online(cpu) != 1) {
2274 perror("cpufreq_cpu_exists");
2275 fprintf(stderr, "error: cpu %u does not exist\n", cpu);
2276 return -1;
2277 diff --git a/tools/power/cpupower/utils/idle_monitor/cpuidle_sysfs.c b/tools/power/cpupower/utils/idle_monitor/cpuidle_sysfs.c
2278 index 1b5da0066ebf..5b3205f16217 100644
2279 --- a/tools/power/cpupower/utils/idle_monitor/cpuidle_sysfs.c
2280 +++ b/tools/power/cpupower/utils/idle_monitor/cpuidle_sysfs.c
2281 @@ -130,15 +130,18 @@ static struct cpuidle_monitor *cpuidle_register(void)
2282 {
2283 int num;
2284 char *tmp;
2285 + int this_cpu;
2286 +
2287 + this_cpu = sched_getcpu();
2288
2289 /* Assume idle state count is the same for all CPUs */
2290 - cpuidle_sysfs_monitor.hw_states_num = cpuidle_state_count(0);
2291 + cpuidle_sysfs_monitor.hw_states_num = cpuidle_state_count(this_cpu);
2292
2293 if (cpuidle_sysfs_monitor.hw_states_num <= 0)
2294 return NULL;
2295
2296 for (num = 0; num < cpuidle_sysfs_monitor.hw_states_num; num++) {
2297 - tmp = cpuidle_state_name(0, num);
2298 + tmp = cpuidle_state_name(this_cpu, num);
2299 if (tmp == NULL)
2300 continue;
2301
2302 @@ -146,7 +149,7 @@ static struct cpuidle_monitor *cpuidle_register(void)
2303 strncpy(cpuidle_cstates[num].name, tmp, CSTATE_NAME_LEN - 1);
2304 free(tmp);
2305
2306 - tmp = cpuidle_state_desc(0, num);
2307 + tmp = cpuidle_state_desc(this_cpu, num);
2308 if (tmp == NULL)
2309 continue;
2310 strncpy(cpuidle_cstates[num].desc, tmp, CSTATE_DESC_LEN - 1);
2311 diff --git a/tools/usb/usbip/src/usbip_bind.c b/tools/usb/usbip/src/usbip_bind.c
2312 index fa46141ae68b..e121cfb1746a 100644
2313 --- a/tools/usb/usbip/src/usbip_bind.c
2314 +++ b/tools/usb/usbip/src/usbip_bind.c
2315 @@ -144,6 +144,7 @@ static int bind_device(char *busid)
2316 int rc;
2317 struct udev *udev;
2318 struct udev_device *dev;
2319 + const char *devpath;
2320
2321 /* Check whether the device with this bus ID exists. */
2322 udev = udev_new();
2323 @@ -152,8 +153,16 @@ static int bind_device(char *busid)
2324 err("device with the specified bus ID does not exist");
2325 return -1;
2326 }
2327 + devpath = udev_device_get_devpath(dev);
2328 udev_unref(udev);
2329
2330 + /* If the device is already attached to vhci_hcd - bail out */
2331 + if (strstr(devpath, USBIP_VHCI_DRV_NAME)) {
2332 + err("bind loop detected: device: %s is attached to %s\n",
2333 + devpath, USBIP_VHCI_DRV_NAME);
2334 + return -1;
2335 + }
2336 +
2337 rc = unbind_other(busid);
2338 if (rc == UNBIND_ST_FAILED) {
2339 err("could not unbind driver from device on busid %s", busid);
2340 diff --git a/tools/usb/usbip/src/usbip_list.c b/tools/usb/usbip/src/usbip_list.c
2341 index f1b38e866dd7..d65a9f444174 100644
2342 --- a/tools/usb/usbip/src/usbip_list.c
2343 +++ b/tools/usb/usbip/src/usbip_list.c
2344 @@ -187,6 +187,7 @@ static int list_devices(bool parsable)
2345 const char *busid;
2346 char product_name[128];
2347 int ret = -1;
2348 + const char *devpath;
2349
2350 /* Create libudev context. */
2351 udev = udev_new();
2352 @@ -209,6 +210,14 @@ static int list_devices(bool parsable)
2353 path = udev_list_entry_get_name(dev_list_entry);
2354 dev = udev_device_new_from_syspath(udev, path);
2355
2356 + /* Ignore devices attached to vhci_hcd */
2357 + devpath = udev_device_get_devpath(dev);
2358 + if (strstr(devpath, USBIP_VHCI_DRV_NAME)) {
2359 + dbg("Skip the device %s already attached to %s\n",
2360 + devpath, USBIP_VHCI_DRV_NAME);
2361 + continue;
2362 + }
2363 +
2364 /* Get device information. */
2365 idVendor = udev_device_get_sysattr_value(dev, "idVendor");
2366 idProduct = udev_device_get_sysattr_value(dev, "idProduct");