Magellan Linux

Contents of /trunk/kernel-magellan/patches-4.13/0101-4.13.2-all-fixes.patch

Parent Directory Parent Directory | Revision Log Revision Log


Revision 2994 - (show annotations) (download)
Mon Oct 9 08:50:00 2017 UTC (6 years, 6 months ago) by niro
File size: 45657 byte(s)
-linux-4.13.2
1 diff --git a/Documentation/driver-api/firmware/request_firmware.rst b/Documentation/driver-api/firmware/request_firmware.rst
2 index 1c2c4967cd43..cc0aea880824 100644
3 --- a/Documentation/driver-api/firmware/request_firmware.rst
4 +++ b/Documentation/driver-api/firmware/request_firmware.rst
5 @@ -44,17 +44,6 @@ request_firmware_nowait
6 .. kernel-doc:: drivers/base/firmware_class.c
7 :functions: request_firmware_nowait
8
9 -Considerations for suspend and resume
10 -=====================================
11 -
12 -During suspend and resume only the built-in firmware and the firmware cache
13 -elements of the firmware API can be used. This is managed by fw_pm_notify().
14 -
15 -fw_pm_notify
16 -------------
17 -.. kernel-doc:: drivers/base/firmware_class.c
18 - :functions: fw_pm_notify
19 -
20 request firmware API expected driver use
21 ========================================
22
23 diff --git a/Makefile b/Makefile
24 index 41a976854cad..8aad6bc50d52 100644
25 --- a/Makefile
26 +++ b/Makefile
27 @@ -1,6 +1,6 @@
28 VERSION = 4
29 PATCHLEVEL = 13
30 -SUBLEVEL = 1
31 +SUBLEVEL = 2
32 EXTRAVERSION =
33 NAME = Fearless Coyote
34
35 diff --git a/arch/arm/mm/fault.c b/arch/arm/mm/fault.c
36 index ff8b0aa2dfde..42f585379e19 100644
37 --- a/arch/arm/mm/fault.c
38 +++ b/arch/arm/mm/fault.c
39 @@ -315,8 +315,11 @@ do_page_fault(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
40 * signal first. We do not need to release the mmap_sem because
41 * it would already be released in __lock_page_or_retry in
42 * mm/filemap.c. */
43 - if ((fault & VM_FAULT_RETRY) && fatal_signal_pending(current))
44 + if ((fault & VM_FAULT_RETRY) && fatal_signal_pending(current)) {
45 + if (!user_mode(regs))
46 + goto no_context;
47 return 0;
48 + }
49
50 /*
51 * Major/minor page fault accounting is only done on the
52 diff --git a/arch/arm64/boot/dts/marvell/armada-37xx.dtsi b/arch/arm64/boot/dts/marvell/armada-37xx.dtsi
53 index 51763d674050..a92ac63addf0 100644
54 --- a/arch/arm64/boot/dts/marvell/armada-37xx.dtsi
55 +++ b/arch/arm64/boot/dts/marvell/armada-37xx.dtsi
56 @@ -323,6 +323,7 @@
57 interrupt-controller;
58 reg = <0x1d00000 0x10000>, /* GICD */
59 <0x1d40000 0x40000>; /* GICR */
60 + interrupts = <GIC_PPI 9 IRQ_TYPE_LEVEL_HIGH>;
61 };
62 };
63
64 diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
65 index 9b1dd114956a..56e68dfac974 100644
66 --- a/arch/x86/kvm/mmu.c
67 +++ b/arch/x86/kvm/mmu.c
68 @@ -4839,7 +4839,8 @@ int kvm_mmu_page_fault(struct kvm_vcpu *vcpu, gva_t cr2, u64 error_code,
69 * Note: AMD only (since it supports the PFERR_GUEST_PAGE_MASK used
70 * in PFERR_NEXT_GUEST_PAGE)
71 */
72 - if (error_code == PFERR_NESTED_GUEST_PAGE) {
73 + if (vcpu->arch.mmu.direct_map &&
74 + error_code == PFERR_NESTED_GUEST_PAGE) {
75 kvm_mmu_unprotect_page(vcpu->kvm, gpa_to_gfn(cr2));
76 return 1;
77 }
78 diff --git a/drivers/base/firmware_class.c b/drivers/base/firmware_class.c
79 index bfbe1e154128..19b63d20f5d3 100644
80 --- a/drivers/base/firmware_class.c
81 +++ b/drivers/base/firmware_class.c
82 @@ -256,38 +256,6 @@ static int fw_cache_piggyback_on_request(const char *name);
83 * guarding for corner cases a global lock should be OK */
84 static DEFINE_MUTEX(fw_lock);
85
86 -static bool __enable_firmware = false;
87 -
88 -static void enable_firmware(void)
89 -{
90 - mutex_lock(&fw_lock);
91 - __enable_firmware = true;
92 - mutex_unlock(&fw_lock);
93 -}
94 -
95 -static void disable_firmware(void)
96 -{
97 - mutex_lock(&fw_lock);
98 - __enable_firmware = false;
99 - mutex_unlock(&fw_lock);
100 -}
101 -
102 -/*
103 - * When disabled only the built-in firmware and the firmware cache will be
104 - * used to look for firmware.
105 - */
106 -static bool firmware_enabled(void)
107 -{
108 - bool enabled = false;
109 -
110 - mutex_lock(&fw_lock);
111 - if (__enable_firmware)
112 - enabled = true;
113 - mutex_unlock(&fw_lock);
114 -
115 - return enabled;
116 -}
117 -
118 static struct firmware_cache fw_cache;
119
120 static struct firmware_buf *__allocate_fw_buf(const char *fw_name,
121 @@ -1239,12 +1207,6 @@ _request_firmware(const struct firmware **firmware_p, const char *name,
122 if (ret <= 0) /* error or already assigned */
123 goto out;
124
125 - if (!firmware_enabled()) {
126 - WARN(1, "firmware request while host is not available\n");
127 - ret = -EHOSTDOWN;
128 - goto out;
129 - }
130 -
131 ret = fw_get_filesystem_firmware(device, fw->priv);
132 if (ret) {
133 if (!(opt_flags & FW_OPT_NO_WARN))
134 @@ -1755,62 +1717,6 @@ static void device_uncache_fw_images_delay(unsigned long delay)
135 msecs_to_jiffies(delay));
136 }
137
138 -/**
139 - * fw_pm_notify - notifier for suspend/resume
140 - * @notify_block: unused
141 - * @mode: mode we are switching to
142 - * @unused: unused
143 - *
144 - * Used to modify the firmware_class state as we move in between states.
145 - * The firmware_class implements a firmware cache to enable device driver
146 - * to fetch firmware upon resume before the root filesystem is ready. We
147 - * disable API calls which do not use the built-in firmware or the firmware
148 - * cache when we know these calls will not work.
149 - *
150 - * The inner logic behind all this is a bit complex so it is worth summarizing
151 - * the kernel's own suspend/resume process with context and focus on how this
152 - * can impact the firmware API.
153 - *
154 - * First a review on how we go to suspend::
155 - *
156 - * pm_suspend() --> enter_state() -->
157 - * sys_sync()
158 - * suspend_prepare() -->
159 - * __pm_notifier_call_chain(PM_SUSPEND_PREPARE, ...);
160 - * suspend_freeze_processes() -->
161 - * freeze_processes() -->
162 - * __usermodehelper_set_disable_depth(UMH_DISABLED);
163 - * freeze all tasks ...
164 - * freeze_kernel_threads()
165 - * suspend_devices_and_enter() -->
166 - * dpm_suspend_start() -->
167 - * dpm_prepare()
168 - * dpm_suspend()
169 - * suspend_enter() -->
170 - * platform_suspend_prepare()
171 - * dpm_suspend_late()
172 - * freeze_enter()
173 - * syscore_suspend()
174 - *
175 - * When we resume we bail out of a loop from suspend_devices_and_enter() and
176 - * unwind back out to the caller enter_state() where we were before as follows::
177 - *
178 - * enter_state() -->
179 - * suspend_devices_and_enter() --> (bail from loop)
180 - * dpm_resume_end() -->
181 - * dpm_resume()
182 - * dpm_complete()
183 - * suspend_finish() -->
184 - * suspend_thaw_processes() -->
185 - * thaw_processes() -->
186 - * __usermodehelper_set_disable_depth(UMH_FREEZING);
187 - * thaw_workqueues();
188 - * thaw all processes ...
189 - * usermodehelper_enable();
190 - * pm_notifier_call_chain(PM_POST_SUSPEND);
191 - *
192 - * fw_pm_notify() works through pm_notifier_call_chain().
193 - */
194 static int fw_pm_notify(struct notifier_block *notify_block,
195 unsigned long mode, void *unused)
196 {
197 @@ -1824,7 +1730,6 @@ static int fw_pm_notify(struct notifier_block *notify_block,
198 */
199 kill_pending_fw_fallback_reqs(true);
200 device_cache_fw_images();
201 - disable_firmware();
202 break;
203
204 case PM_POST_SUSPEND:
205 @@ -1837,7 +1742,6 @@ static int fw_pm_notify(struct notifier_block *notify_block,
206 mutex_lock(&fw_lock);
207 fw_cache.state = FW_LOADER_NO_CACHE;
208 mutex_unlock(&fw_lock);
209 - enable_firmware();
210
211 device_uncache_fw_images_delay(10 * MSEC_PER_SEC);
212 break;
213 @@ -1886,7 +1790,6 @@ static void __init fw_cache_init(void)
214 static int fw_shutdown_notify(struct notifier_block *unused1,
215 unsigned long unused2, void *unused3)
216 {
217 - disable_firmware();
218 /*
219 * Kill all pending fallback requests to avoid both stalling shutdown,
220 * and avoid a deadlock with the usermode_lock.
221 @@ -1902,7 +1805,6 @@ static struct notifier_block fw_shutdown_nb = {
222
223 static int __init firmware_class_init(void)
224 {
225 - enable_firmware();
226 fw_cache_init();
227 register_reboot_notifier(&fw_shutdown_nb);
228 #ifdef CONFIG_FW_LOADER_USER_HELPER
229 @@ -1914,7 +1816,6 @@ static int __init firmware_class_init(void)
230
231 static void __exit firmware_class_exit(void)
232 {
233 - disable_firmware();
234 #ifdef CONFIG_PM_SLEEP
235 unregister_syscore_ops(&fw_syscore_ops);
236 unregister_pm_notifier(&fw_cache.pm_notify);
237 diff --git a/drivers/mtd/nand/mxc_nand.c b/drivers/mtd/nand/mxc_nand.c
238 index a764d5ca7536..5bedf7bc3d88 100644
239 --- a/drivers/mtd/nand/mxc_nand.c
240 +++ b/drivers/mtd/nand/mxc_nand.c
241 @@ -876,6 +876,8 @@ static void mxc_do_addr_cycle(struct mtd_info *mtd, int column, int page_addr)
242 }
243 }
244
245 +#define MXC_V1_ECCBYTES 5
246 +
247 static int mxc_v1_ooblayout_ecc(struct mtd_info *mtd, int section,
248 struct mtd_oob_region *oobregion)
249 {
250 @@ -885,7 +887,7 @@ static int mxc_v1_ooblayout_ecc(struct mtd_info *mtd, int section,
251 return -ERANGE;
252
253 oobregion->offset = (section * 16) + 6;
254 - oobregion->length = nand_chip->ecc.bytes;
255 + oobregion->length = MXC_V1_ECCBYTES;
256
257 return 0;
258 }
259 @@ -907,8 +909,7 @@ static int mxc_v1_ooblayout_free(struct mtd_info *mtd, int section,
260 oobregion->length = 4;
261 }
262 } else {
263 - oobregion->offset = ((section - 1) * 16) +
264 - nand_chip->ecc.bytes + 6;
265 + oobregion->offset = ((section - 1) * 16) + MXC_V1_ECCBYTES + 6;
266 if (section < nand_chip->ecc.steps)
267 oobregion->length = (section * 16) + 6 -
268 oobregion->offset;
269 diff --git a/drivers/mtd/nand/nand_base.c b/drivers/mtd/nand/nand_base.c
270 index c6c18b82f8f4..c05cf874cbb8 100644
271 --- a/drivers/mtd/nand/nand_base.c
272 +++ b/drivers/mtd/nand/nand_base.c
273 @@ -3993,10 +3993,13 @@ static void nand_manufacturer_detect(struct nand_chip *chip)
274 * nand_decode_ext_id() otherwise.
275 */
276 if (chip->manufacturer.desc && chip->manufacturer.desc->ops &&
277 - chip->manufacturer.desc->ops->detect)
278 + chip->manufacturer.desc->ops->detect) {
279 + /* The 3rd id byte holds MLC / multichip data */
280 + chip->bits_per_cell = nand_get_bits_per_cell(chip->id.data[2]);
281 chip->manufacturer.desc->ops->detect(chip);
282 - else
283 + } else {
284 nand_decode_ext_id(chip);
285 + }
286 }
287
288 /*
289 diff --git a/drivers/mtd/nand/nand_hynix.c b/drivers/mtd/nand/nand_hynix.c
290 index b12dc7325378..bd9a6e343848 100644
291 --- a/drivers/mtd/nand/nand_hynix.c
292 +++ b/drivers/mtd/nand/nand_hynix.c
293 @@ -477,7 +477,7 @@ static void hynix_nand_extract_ecc_requirements(struct nand_chip *chip,
294 * The ECC requirements field meaning depends on the
295 * NAND technology.
296 */
297 - u8 nand_tech = chip->id.data[5] & 0x3;
298 + u8 nand_tech = chip->id.data[5] & 0x7;
299
300 if (nand_tech < 3) {
301 /* > 26nm, reference: H27UBG8T2A datasheet */
302 @@ -533,7 +533,7 @@ static void hynix_nand_extract_scrambling_requirements(struct nand_chip *chip,
303 if (nand_tech > 0)
304 chip->options |= NAND_NEED_SCRAMBLING;
305 } else {
306 - nand_tech = chip->id.data[5] & 0x3;
307 + nand_tech = chip->id.data[5] & 0x7;
308
309 /* < 32nm */
310 if (nand_tech > 2)
311 diff --git a/drivers/mtd/nand/qcom_nandc.c b/drivers/mtd/nand/qcom_nandc.c
312 index 88af7145a51a..8928500b5bde 100644
313 --- a/drivers/mtd/nand/qcom_nandc.c
314 +++ b/drivers/mtd/nand/qcom_nandc.c
315 @@ -109,7 +109,11 @@
316 #define READ_ADDR 0
317
318 /* NAND_DEV_CMD_VLD bits */
319 -#define READ_START_VLD 0
320 +#define READ_START_VLD BIT(0)
321 +#define READ_STOP_VLD BIT(1)
322 +#define WRITE_START_VLD BIT(2)
323 +#define ERASE_START_VLD BIT(3)
324 +#define SEQ_READ_START_VLD BIT(4)
325
326 /* NAND_EBI2_ECC_BUF_CFG bits */
327 #define NUM_STEPS 0
328 @@ -148,6 +152,10 @@
329 #define FETCH_ID 0xb
330 #define RESET_DEVICE 0xd
331
332 +/* Default Value for NAND_DEV_CMD_VLD */
333 +#define NAND_DEV_CMD_VLD_VAL (READ_START_VLD | WRITE_START_VLD | \
334 + ERASE_START_VLD | SEQ_READ_START_VLD)
335 +
336 /*
337 * the NAND controller performs reads/writes with ECC in 516 byte chunks.
338 * the driver calls the chunks 'step' or 'codeword' interchangeably
339 @@ -672,8 +680,7 @@ static int nandc_param(struct qcom_nand_host *host)
340
341 /* configure CMD1 and VLD for ONFI param probing */
342 nandc_set_reg(nandc, NAND_DEV_CMD_VLD,
343 - (nandc->vld & ~(1 << READ_START_VLD))
344 - | 0 << READ_START_VLD);
345 + (nandc->vld & ~READ_START_VLD));
346 nandc_set_reg(nandc, NAND_DEV_CMD1,
347 (nandc->cmd1 & ~(0xFF << READ_ADDR))
348 | NAND_CMD_PARAM << READ_ADDR);
349 @@ -1893,7 +1900,7 @@ static int qcom_nand_host_setup(struct qcom_nand_host *host)
350 | wide_bus << WIDE_FLASH
351 | 1 << DEV0_CFG1_ECC_DISABLE;
352
353 - host->ecc_bch_cfg = host->bch_enabled << ECC_CFG_ECC_DISABLE
354 + host->ecc_bch_cfg = !host->bch_enabled << ECC_CFG_ECC_DISABLE
355 | 0 << ECC_SW_RESET
356 | host->cw_data << ECC_NUM_DATA_BYTES
357 | 1 << ECC_FORCE_CLK_OPEN
358 @@ -1972,13 +1979,14 @@ static int qcom_nandc_setup(struct qcom_nand_controller *nandc)
359 {
360 /* kill onenand */
361 nandc_write(nandc, SFLASHC_BURST_CFG, 0);
362 + nandc_write(nandc, NAND_DEV_CMD_VLD, NAND_DEV_CMD_VLD_VAL);
363
364 /* enable ADM DMA */
365 nandc_write(nandc, NAND_FLASH_CHIP_SELECT, DM_EN);
366
367 /* save the original values of these registers */
368 nandc->cmd1 = nandc_read(nandc, NAND_DEV_CMD1);
369 - nandc->vld = nandc_read(nandc, NAND_DEV_CMD_VLD);
370 + nandc->vld = NAND_DEV_CMD_VLD_VAL;
371
372 return 0;
373 }
374 diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/feature.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/feature.c
375 index f1b60740e020..53ae30259989 100644
376 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/feature.c
377 +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/feature.c
378 @@ -159,7 +159,8 @@ void brcmf_feat_attach(struct brcmf_pub *drvr)
379
380 brcmf_feat_firmware_capabilities(ifp);
381 memset(&gscan_cfg, 0, sizeof(gscan_cfg));
382 - if (drvr->bus_if->chip != BRCM_CC_43430_CHIP_ID)
383 + if (drvr->bus_if->chip != BRCM_CC_43430_CHIP_ID &&
384 + drvr->bus_if->chip != BRCM_CC_4345_CHIP_ID)
385 brcmf_feat_iovar_data_set(ifp, BRCMF_FEAT_GSCAN,
386 "pfn_gscan_cfg",
387 &gscan_cfg, sizeof(gscan_cfg));
388 diff --git a/drivers/net/wireless/ralink/rt2x00/rt2800lib.c b/drivers/net/wireless/ralink/rt2x00/rt2800lib.c
389 index 0b75def39c6c..d2c289446c00 100644
390 --- a/drivers/net/wireless/ralink/rt2x00/rt2800lib.c
391 +++ b/drivers/net/wireless/ralink/rt2x00/rt2800lib.c
392 @@ -3702,7 +3702,10 @@ static void rt2800_config_channel(struct rt2x00_dev *rt2x00dev,
393 if (rt2x00_rt(rt2x00dev, RT3572))
394 rt2800_rfcsr_write(rt2x00dev, 8, 0);
395
396 - tx_pin = rt2800_register_read(rt2x00dev, TX_PIN_CFG);
397 + if (rt2x00_rt(rt2x00dev, RT6352))
398 + tx_pin = rt2800_register_read(rt2x00dev, TX_PIN_CFG);
399 + else
400 + tx_pin = 0;
401
402 switch (rt2x00dev->default_ant.tx_chain_num) {
403 case 3:
404 diff --git a/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8723b2ant.c b/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8723b2ant.c
405 index 31965f0ef69d..e8f07573aed9 100644
406 --- a/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8723b2ant.c
407 +++ b/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8723b2ant.c
408 @@ -1183,7 +1183,10 @@ static void btc8723b2ant_set_ant_path(struct btc_coexist *btcoexist,
409 }
410
411 /* fixed internal switch S1->WiFi, S0->BT */
412 - btcoexist->btc_write_4byte(btcoexist, 0x948, 0x0);
413 + if (board_info->btdm_ant_pos == BTC_ANTENNA_AT_MAIN_PORT)
414 + btcoexist->btc_write_2byte(btcoexist, 0x948, 0x0);
415 + else
416 + btcoexist->btc_write_2byte(btcoexist, 0x948, 0x280);
417
418 switch (antpos_type) {
419 case BTC_ANT_WIFI_AT_MAIN:
420 diff --git a/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtcoutsrc.c b/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtcoutsrc.c
421 index e6024b013ca5..00eea3440290 100644
422 --- a/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtcoutsrc.c
423 +++ b/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtcoutsrc.c
424 @@ -173,6 +173,16 @@ static u8 halbtc_get_wifi_central_chnl(struct btc_coexist *btcoexist)
425
426 u8 rtl_get_hwpg_single_ant_path(struct rtl_priv *rtlpriv)
427 {
428 + struct rtl_mod_params *mod_params = rtlpriv->cfg->mod_params;
429 +
430 + /* override ant_num / ant_path */
431 + if (mod_params->ant_sel) {
432 + rtlpriv->btcoexist.btc_info.ant_num =
433 + (mod_params->ant_sel == 1 ? ANT_X2 : ANT_X1);
434 +
435 + rtlpriv->btcoexist.btc_info.single_ant_path =
436 + (mod_params->ant_sel == 1 ? 0 : 1);
437 + }
438 return rtlpriv->btcoexist.btc_info.single_ant_path;
439 }
440
441 @@ -183,6 +193,7 @@ u8 rtl_get_hwpg_bt_type(struct rtl_priv *rtlpriv)
442
443 u8 rtl_get_hwpg_ant_num(struct rtl_priv *rtlpriv)
444 {
445 + struct rtl_mod_params *mod_params = rtlpriv->cfg->mod_params;
446 u8 num;
447
448 if (rtlpriv->btcoexist.btc_info.ant_num == ANT_X2)
449 @@ -190,6 +201,10 @@ u8 rtl_get_hwpg_ant_num(struct rtl_priv *rtlpriv)
450 else
451 num = 1;
452
453 + /* override ant_num / ant_path */
454 + if (mod_params->ant_sel)
455 + num = (mod_params->ant_sel == 1 ? ANT_X2 : ANT_X1) + 1;
456 +
457 return num;
458 }
459
460 @@ -861,7 +876,7 @@ bool exhalbtc_bind_bt_coex_withadapter(void *adapter)
461 {
462 struct btc_coexist *btcoexist = &gl_bt_coexist;
463 struct rtl_priv *rtlpriv = adapter;
464 - u8 ant_num = 2, chip_type, single_ant_path = 0;
465 + u8 ant_num = 2, chip_type;
466
467 if (btcoexist->binded)
468 return false;
469 @@ -896,12 +911,6 @@ bool exhalbtc_bind_bt_coex_withadapter(void *adapter)
470 ant_num = rtl_get_hwpg_ant_num(rtlpriv);
471 exhalbtc_set_ant_num(rtlpriv, BT_COEX_ANT_TYPE_PG, ant_num);
472
473 - /* set default antenna position to main port */
474 - btcoexist->board_info.btdm_ant_pos = BTC_ANTENNA_AT_MAIN_PORT;
475 -
476 - single_ant_path = rtl_get_hwpg_single_ant_path(rtlpriv);
477 - exhalbtc_set_single_ant_path(single_ant_path);
478 -
479 if (rtl_get_hwpg_package_type(rtlpriv) == 0)
480 btcoexist->board_info.tfbga_package = false;
481 else if (rtl_get_hwpg_package_type(rtlpriv) == 1)
482 diff --git a/drivers/nvme/host/fabrics.c b/drivers/nvme/host/fabrics.c
483 index 5f5cd306f76d..ffa7191ddfa5 100644
484 --- a/drivers/nvme/host/fabrics.c
485 +++ b/drivers/nvme/host/fabrics.c
486 @@ -75,7 +75,7 @@ static struct nvmf_host *nvmf_host_default(void)
487
488 kref_init(&host->ref);
489 snprintf(host->nqn, NVMF_NQN_SIZE,
490 - "nqn.2014-08.org.nvmexpress:NVMf:uuid:%pUb", &host->id);
491 + "nqn.2014-08.org.nvmexpress:uuid:%pUb", &host->id);
492
493 mutex_lock(&nvmf_hosts_mutex);
494 list_add_tail(&host->list, &nvmf_hosts);
495 diff --git a/fs/btrfs/super.c b/fs/btrfs/super.c
496 index 12540b6104b5..1618dac7bf74 100644
497 --- a/fs/btrfs/super.c
498 +++ b/fs/btrfs/super.c
499 @@ -1814,6 +1814,8 @@ static int btrfs_remount(struct super_block *sb, int *flags, char *data)
500 goto restore;
501 }
502
503 + btrfs_qgroup_rescan_resume(fs_info);
504 +
505 if (!fs_info->uuid_root) {
506 btrfs_info(fs_info, "creating UUID tree");
507 ret = btrfs_create_uuid_tree(fs_info);
508 diff --git a/fs/nfs/file.c b/fs/nfs/file.c
509 index af330c31f627..a85d1cf9b4a8 100644
510 --- a/fs/nfs/file.c
511 +++ b/fs/nfs/file.c
512 @@ -631,11 +631,11 @@ ssize_t nfs_file_write(struct kiocb *iocb, struct iov_iter *from)
513 if (result <= 0)
514 goto out;
515
516 - result = generic_write_sync(iocb, result);
517 - if (result < 0)
518 - goto out;
519 written = result;
520 iocb->ki_pos += written;
521 + result = generic_write_sync(iocb, written);
522 + if (result < 0)
523 + goto out;
524
525 /* Return error values */
526 if (nfs_need_check_write(file, inode)) {
527 diff --git a/fs/nfs/internal.h b/fs/nfs/internal.h
528 index dc456416d2be..68cc22083639 100644
529 --- a/fs/nfs/internal.h
530 +++ b/fs/nfs/internal.h
531 @@ -251,7 +251,6 @@ int nfs_iocounter_wait(struct nfs_lock_context *l_ctx);
532 extern const struct nfs_pageio_ops nfs_pgio_rw_ops;
533 struct nfs_pgio_header *nfs_pgio_header_alloc(const struct nfs_rw_ops *);
534 void nfs_pgio_header_free(struct nfs_pgio_header *);
535 -void nfs_pgio_data_destroy(struct nfs_pgio_header *);
536 int nfs_generic_pgio(struct nfs_pageio_descriptor *, struct nfs_pgio_header *);
537 int nfs_initiate_pgio(struct rpc_clnt *clnt, struct nfs_pgio_header *hdr,
538 struct rpc_cred *cred, const struct nfs_rpc_ops *rpc_ops,
539 diff --git a/fs/nfs/pagelist.c b/fs/nfs/pagelist.c
540 index de9066a92c0d..d291e6e72573 100644
541 --- a/fs/nfs/pagelist.c
542 +++ b/fs/nfs/pagelist.c
543 @@ -530,16 +530,6 @@ struct nfs_pgio_header *nfs_pgio_header_alloc(const struct nfs_rw_ops *ops)
544 }
545 EXPORT_SYMBOL_GPL(nfs_pgio_header_alloc);
546
547 -/*
548 - * nfs_pgio_header_free - Free a read or write header
549 - * @hdr: The header to free
550 - */
551 -void nfs_pgio_header_free(struct nfs_pgio_header *hdr)
552 -{
553 - hdr->rw_ops->rw_free_header(hdr);
554 -}
555 -EXPORT_SYMBOL_GPL(nfs_pgio_header_free);
556 -
557 /**
558 * nfs_pgio_data_destroy - make @hdr suitable for reuse
559 *
560 @@ -548,14 +538,24 @@ EXPORT_SYMBOL_GPL(nfs_pgio_header_free);
561 *
562 * @hdr: A header that has had nfs_generic_pgio called
563 */
564 -void nfs_pgio_data_destroy(struct nfs_pgio_header *hdr)
565 +static void nfs_pgio_data_destroy(struct nfs_pgio_header *hdr)
566 {
567 if (hdr->args.context)
568 put_nfs_open_context(hdr->args.context);
569 if (hdr->page_array.pagevec != hdr->page_array.page_array)
570 kfree(hdr->page_array.pagevec);
571 }
572 -EXPORT_SYMBOL_GPL(nfs_pgio_data_destroy);
573 +
574 +/*
575 + * nfs_pgio_header_free - Free a read or write header
576 + * @hdr: The header to free
577 + */
578 +void nfs_pgio_header_free(struct nfs_pgio_header *hdr)
579 +{
580 + nfs_pgio_data_destroy(hdr);
581 + hdr->rw_ops->rw_free_header(hdr);
582 +}
583 +EXPORT_SYMBOL_GPL(nfs_pgio_header_free);
584
585 /**
586 * nfs_pgio_rpcsetup - Set up arguments for a pageio call
587 @@ -669,7 +669,6 @@ EXPORT_SYMBOL_GPL(nfs_initiate_pgio);
588 static void nfs_pgio_error(struct nfs_pgio_header *hdr)
589 {
590 set_bit(NFS_IOHDR_REDO, &hdr->flags);
591 - nfs_pgio_data_destroy(hdr);
592 hdr->completion_ops->completion(hdr);
593 }
594
595 @@ -680,7 +679,6 @@ static void nfs_pgio_error(struct nfs_pgio_header *hdr)
596 static void nfs_pgio_release(void *calldata)
597 {
598 struct nfs_pgio_header *hdr = calldata;
599 - nfs_pgio_data_destroy(hdr);
600 hdr->completion_ops->completion(hdr);
601 }
602
603 @@ -714,9 +712,6 @@ void nfs_pageio_init(struct nfs_pageio_descriptor *desc,
604 int io_flags,
605 gfp_t gfp_flags)
606 {
607 - struct nfs_pgio_mirror *new;
608 - int i;
609 -
610 desc->pg_moreio = 0;
611 desc->pg_inode = inode;
612 desc->pg_ops = pg_ops;
613 @@ -732,21 +727,9 @@ void nfs_pageio_init(struct nfs_pageio_descriptor *desc,
614 desc->pg_mirror_count = 1;
615 desc->pg_mirror_idx = 0;
616
617 - if (pg_ops->pg_get_mirror_count) {
618 - /* until we have a request, we don't have an lseg and no
619 - * idea how many mirrors there will be */
620 - new = kcalloc(NFS_PAGEIO_DESCRIPTOR_MIRROR_MAX,
621 - sizeof(struct nfs_pgio_mirror), gfp_flags);
622 - desc->pg_mirrors_dynamic = new;
623 - desc->pg_mirrors = new;
624 -
625 - for (i = 0; i < NFS_PAGEIO_DESCRIPTOR_MIRROR_MAX; i++)
626 - nfs_pageio_mirror_init(&desc->pg_mirrors[i], bsize);
627 - } else {
628 - desc->pg_mirrors_dynamic = NULL;
629 - desc->pg_mirrors = desc->pg_mirrors_static;
630 - nfs_pageio_mirror_init(&desc->pg_mirrors[0], bsize);
631 - }
632 + desc->pg_mirrors_dynamic = NULL;
633 + desc->pg_mirrors = desc->pg_mirrors_static;
634 + nfs_pageio_mirror_init(&desc->pg_mirrors[0], bsize);
635 }
636 EXPORT_SYMBOL_GPL(nfs_pageio_init);
637
638 @@ -865,32 +848,52 @@ static int nfs_generic_pg_pgios(struct nfs_pageio_descriptor *desc)
639 return ret;
640 }
641
642 +static struct nfs_pgio_mirror *
643 +nfs_pageio_alloc_mirrors(struct nfs_pageio_descriptor *desc,
644 + unsigned int mirror_count)
645 +{
646 + struct nfs_pgio_mirror *ret;
647 + unsigned int i;
648 +
649 + kfree(desc->pg_mirrors_dynamic);
650 + desc->pg_mirrors_dynamic = NULL;
651 + if (mirror_count == 1)
652 + return desc->pg_mirrors_static;
653 + ret = kmalloc_array(mirror_count, sizeof(*ret), GFP_NOFS);
654 + if (ret != NULL) {
655 + for (i = 0; i < mirror_count; i++)
656 + nfs_pageio_mirror_init(&ret[i], desc->pg_bsize);
657 + desc->pg_mirrors_dynamic = ret;
658 + }
659 + return ret;
660 +}
661 +
662 /*
663 * nfs_pageio_setup_mirroring - determine if mirroring is to be used
664 * by calling the pg_get_mirror_count op
665 */
666 -static int nfs_pageio_setup_mirroring(struct nfs_pageio_descriptor *pgio,
667 +static void nfs_pageio_setup_mirroring(struct nfs_pageio_descriptor *pgio,
668 struct nfs_page *req)
669 {
670 - int mirror_count = 1;
671 + unsigned int mirror_count = 1;
672
673 - if (!pgio->pg_ops->pg_get_mirror_count)
674 - return 0;
675 -
676 - mirror_count = pgio->pg_ops->pg_get_mirror_count(pgio, req);
677 -
678 - if (pgio->pg_error < 0)
679 - return pgio->pg_error;
680 -
681 - if (!mirror_count || mirror_count > NFS_PAGEIO_DESCRIPTOR_MIRROR_MAX)
682 - return -EINVAL;
683 + if (pgio->pg_ops->pg_get_mirror_count)
684 + mirror_count = pgio->pg_ops->pg_get_mirror_count(pgio, req);
685 + if (mirror_count == pgio->pg_mirror_count || pgio->pg_error < 0)
686 + return;
687
688 - if (WARN_ON_ONCE(!pgio->pg_mirrors_dynamic))
689 - return -EINVAL;
690 + if (!mirror_count || mirror_count > NFS_PAGEIO_DESCRIPTOR_MIRROR_MAX) {
691 + pgio->pg_error = -EINVAL;
692 + return;
693 + }
694
695 + pgio->pg_mirrors = nfs_pageio_alloc_mirrors(pgio, mirror_count);
696 + if (pgio->pg_mirrors == NULL) {
697 + pgio->pg_error = -ENOMEM;
698 + pgio->pg_mirrors = pgio->pg_mirrors_static;
699 + mirror_count = 1;
700 + }
701 pgio->pg_mirror_count = mirror_count;
702 -
703 - return 0;
704 }
705
706 /*
707 diff --git a/fs/nfs/pnfs.c b/fs/nfs/pnfs.c
708 index c383d0913b54..64bb20130edf 100644
709 --- a/fs/nfs/pnfs.c
710 +++ b/fs/nfs/pnfs.c
711 @@ -2274,7 +2274,6 @@ pnfs_write_through_mds(struct nfs_pageio_descriptor *desc,
712 nfs_pageio_reset_write_mds(desc);
713 mirror->pg_recoalesce = 1;
714 }
715 - nfs_pgio_data_destroy(hdr);
716 hdr->release(hdr);
717 }
718
719 @@ -2398,7 +2397,6 @@ pnfs_read_through_mds(struct nfs_pageio_descriptor *desc,
720 nfs_pageio_reset_read_mds(desc);
721 mirror->pg_recoalesce = 1;
722 }
723 - nfs_pgio_data_destroy(hdr);
724 hdr->release(hdr);
725 }
726
727 diff --git a/fs/xfs/xfs_linux.h b/fs/xfs/xfs_linux.h
728 index 9301c5a6060b..dcd1292664b3 100644
729 --- a/fs/xfs/xfs_linux.h
730 +++ b/fs/xfs/xfs_linux.h
731 @@ -270,7 +270,14 @@ static inline uint64_t howmany_64(uint64_t x, uint32_t y)
732 #endif /* DEBUG */
733
734 #ifdef CONFIG_XFS_RT
735 -#define XFS_IS_REALTIME_INODE(ip) ((ip)->i_d.di_flags & XFS_DIFLAG_REALTIME)
736 +
737 +/*
738 + * make sure we ignore the inode flag if the filesystem doesn't have a
739 + * configured realtime device.
740 + */
741 +#define XFS_IS_REALTIME_INODE(ip) \
742 + (((ip)->i_d.di_flags & XFS_DIFLAG_REALTIME) && \
743 + (ip)->i_mount->m_rtdev_targp)
744 #else
745 #define XFS_IS_REALTIME_INODE(ip) (0)
746 #endif
747 diff --git a/lib/radix-tree.c b/lib/radix-tree.c
748 index 898e87998417..79a804f1aab9 100644
749 --- a/lib/radix-tree.c
750 +++ b/lib/radix-tree.c
751 @@ -463,7 +463,7 @@ radix_tree_node_free(struct radix_tree_node *node)
752 * To make use of this facility, the radix tree must be initialised without
753 * __GFP_DIRECT_RECLAIM being passed to INIT_RADIX_TREE().
754 */
755 -static int __radix_tree_preload(gfp_t gfp_mask, unsigned nr)
756 +static __must_check int __radix_tree_preload(gfp_t gfp_mask, unsigned nr)
757 {
758 struct radix_tree_preload *rtp;
759 struct radix_tree_node *node;
760 @@ -2103,7 +2103,8 @@ EXPORT_SYMBOL(radix_tree_tagged);
761 */
762 void idr_preload(gfp_t gfp_mask)
763 {
764 - __radix_tree_preload(gfp_mask, IDR_PRELOAD_SIZE);
765 + if (__radix_tree_preload(gfp_mask, IDR_PRELOAD_SIZE))
766 + preempt_disable();
767 }
768 EXPORT_SYMBOL(idr_preload);
769
770 @@ -2117,13 +2118,13 @@ EXPORT_SYMBOL(idr_preload);
771 */
772 int ida_pre_get(struct ida *ida, gfp_t gfp)
773 {
774 - __radix_tree_preload(gfp, IDA_PRELOAD_SIZE);
775 /*
776 * The IDA API has no preload_end() equivalent. Instead,
777 * ida_get_new() can return -EAGAIN, prompting the caller
778 * to return to the ida_pre_get() step.
779 */
780 - preempt_enable();
781 + if (!__radix_tree_preload(gfp, IDA_PRELOAD_SIZE))
782 + preempt_enable();
783
784 if (!this_cpu_read(ida_bitmap)) {
785 struct ida_bitmap *bitmap = kmalloc(sizeof(*bitmap), gfp);
786 diff --git a/mm/memory.c b/mm/memory.c
787 index 56e48e4593cb..274547075486 100644
788 --- a/mm/memory.c
789 +++ b/mm/memory.c
790 @@ -3888,6 +3888,11 @@ int handle_mm_fault(struct vm_area_struct *vma, unsigned long address,
791 /* do counter updates before entering really critical section. */
792 check_sync_rss_stat(current);
793
794 + if (!arch_vma_access_permitted(vma, flags & FAULT_FLAG_WRITE,
795 + flags & FAULT_FLAG_INSTRUCTION,
796 + flags & FAULT_FLAG_REMOTE))
797 + return VM_FAULT_SIGSEGV;
798 +
799 /*
800 * Enable the memcg OOM handling for faults triggered in user
801 * space. Kernel faults are handled more gracefully.
802 @@ -3895,11 +3900,6 @@ int handle_mm_fault(struct vm_area_struct *vma, unsigned long address,
803 if (flags & FAULT_FLAG_USER)
804 mem_cgroup_oom_enable();
805
806 - if (!arch_vma_access_permitted(vma, flags & FAULT_FLAG_WRITE,
807 - flags & FAULT_FLAG_INSTRUCTION,
808 - flags & FAULT_FLAG_REMOTE))
809 - return VM_FAULT_SIGSEGV;
810 -
811 if (unlikely(is_vm_hugetlb_page(vma)))
812 ret = hugetlb_fault(vma->vm_mm, vma, address, flags);
813 else
814 diff --git a/mm/sparse.c b/mm/sparse.c
815 index 7b4be3fd5cac..cdce7a7bb3f3 100644
816 --- a/mm/sparse.c
817 +++ b/mm/sparse.c
818 @@ -630,7 +630,7 @@ void online_mem_sections(unsigned long start_pfn, unsigned long end_pfn)
819 unsigned long pfn;
820
821 for (pfn = start_pfn; pfn < end_pfn; pfn += PAGES_PER_SECTION) {
822 - unsigned long section_nr = pfn_to_section_nr(start_pfn);
823 + unsigned long section_nr = pfn_to_section_nr(pfn);
824 struct mem_section *ms;
825
826 /* onlining code should never touch invalid ranges */
827 diff --git a/mm/swapfile.c b/mm/swapfile.c
828 index 6ba4aab2db0b..a8952b6563c6 100644
829 --- a/mm/swapfile.c
830 +++ b/mm/swapfile.c
831 @@ -3052,7 +3052,8 @@ SYSCALL_DEFINE2(swapon, const char __user *, specialfile, int, swap_flags)
832 p->flags = 0;
833 spin_unlock(&swap_lock);
834 vfree(swap_map);
835 - vfree(cluster_info);
836 + kvfree(cluster_info);
837 + kvfree(frontswap_map);
838 if (swap_file) {
839 if (inode && S_ISREG(inode->i_mode)) {
840 inode_unlock(inode);
841 diff --git a/net/bluetooth/l2cap_core.c b/net/bluetooth/l2cap_core.c
842 index 303c779bfe38..43ba91c440bc 100644
843 --- a/net/bluetooth/l2cap_core.c
844 +++ b/net/bluetooth/l2cap_core.c
845 @@ -58,7 +58,7 @@ static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
846 u8 code, u8 ident, u16 dlen, void *data);
847 static void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len,
848 void *data);
849 -static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data);
850 +static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data, size_t data_size);
851 static void l2cap_send_disconn_req(struct l2cap_chan *chan, int err);
852
853 static void l2cap_tx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
854 @@ -1473,7 +1473,7 @@ static void l2cap_conn_start(struct l2cap_conn *conn)
855
856 set_bit(CONF_REQ_SENT, &chan->conf_state);
857 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
858 - l2cap_build_conf_req(chan, buf), buf);
859 + l2cap_build_conf_req(chan, buf, sizeof(buf)), buf);
860 chan->num_conf_req++;
861 }
862
863 @@ -2987,12 +2987,15 @@ static inline int l2cap_get_conf_opt(void **ptr, int *type, int *olen,
864 return len;
865 }
866
867 -static void l2cap_add_conf_opt(void **ptr, u8 type, u8 len, unsigned long val)
868 +static void l2cap_add_conf_opt(void **ptr, u8 type, u8 len, unsigned long val, size_t size)
869 {
870 struct l2cap_conf_opt *opt = *ptr;
871
872 BT_DBG("type 0x%2.2x len %u val 0x%lx", type, len, val);
873
874 + if (size < L2CAP_CONF_OPT_SIZE + len)
875 + return;
876 +
877 opt->type = type;
878 opt->len = len;
879
880 @@ -3017,7 +3020,7 @@ static void l2cap_add_conf_opt(void **ptr, u8 type, u8 len, unsigned long val)
881 *ptr += L2CAP_CONF_OPT_SIZE + len;
882 }
883
884 -static void l2cap_add_opt_efs(void **ptr, struct l2cap_chan *chan)
885 +static void l2cap_add_opt_efs(void **ptr, struct l2cap_chan *chan, size_t size)
886 {
887 struct l2cap_conf_efs efs;
888
889 @@ -3045,7 +3048,7 @@ static void l2cap_add_opt_efs(void **ptr, struct l2cap_chan *chan)
890 }
891
892 l2cap_add_conf_opt(ptr, L2CAP_CONF_EFS, sizeof(efs),
893 - (unsigned long) &efs);
894 + (unsigned long) &efs, size);
895 }
896
897 static void l2cap_ack_timeout(struct work_struct *work)
898 @@ -3191,11 +3194,12 @@ static inline void l2cap_txwin_setup(struct l2cap_chan *chan)
899 chan->ack_win = chan->tx_win;
900 }
901
902 -static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data)
903 +static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data, size_t data_size)
904 {
905 struct l2cap_conf_req *req = data;
906 struct l2cap_conf_rfc rfc = { .mode = chan->mode };
907 void *ptr = req->data;
908 + void *endptr = data + data_size;
909 u16 size;
910
911 BT_DBG("chan %p", chan);
912 @@ -3220,7 +3224,7 @@ static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data)
913
914 done:
915 if (chan->imtu != L2CAP_DEFAULT_MTU)
916 - l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->imtu);
917 + l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->imtu, endptr - ptr);
918
919 switch (chan->mode) {
920 case L2CAP_MODE_BASIC:
921 @@ -3239,7 +3243,7 @@ static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data)
922 rfc.max_pdu_size = 0;
923
924 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
925 - (unsigned long) &rfc);
926 + (unsigned long) &rfc, endptr - ptr);
927 break;
928
929 case L2CAP_MODE_ERTM:
930 @@ -3259,21 +3263,21 @@ static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data)
931 L2CAP_DEFAULT_TX_WINDOW);
932
933 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
934 - (unsigned long) &rfc);
935 + (unsigned long) &rfc, endptr - ptr);
936
937 if (test_bit(FLAG_EFS_ENABLE, &chan->flags))
938 - l2cap_add_opt_efs(&ptr, chan);
939 + l2cap_add_opt_efs(&ptr, chan, endptr - ptr);
940
941 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
942 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EWS, 2,
943 - chan->tx_win);
944 + chan->tx_win, endptr - ptr);
945
946 if (chan->conn->feat_mask & L2CAP_FEAT_FCS)
947 if (chan->fcs == L2CAP_FCS_NONE ||
948 test_bit(CONF_RECV_NO_FCS, &chan->conf_state)) {
949 chan->fcs = L2CAP_FCS_NONE;
950 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1,
951 - chan->fcs);
952 + chan->fcs, endptr - ptr);
953 }
954 break;
955
956 @@ -3291,17 +3295,17 @@ static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data)
957 rfc.max_pdu_size = cpu_to_le16(size);
958
959 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
960 - (unsigned long) &rfc);
961 + (unsigned long) &rfc, endptr - ptr);
962
963 if (test_bit(FLAG_EFS_ENABLE, &chan->flags))
964 - l2cap_add_opt_efs(&ptr, chan);
965 + l2cap_add_opt_efs(&ptr, chan, endptr - ptr);
966
967 if (chan->conn->feat_mask & L2CAP_FEAT_FCS)
968 if (chan->fcs == L2CAP_FCS_NONE ||
969 test_bit(CONF_RECV_NO_FCS, &chan->conf_state)) {
970 chan->fcs = L2CAP_FCS_NONE;
971 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1,
972 - chan->fcs);
973 + chan->fcs, endptr - ptr);
974 }
975 break;
976 }
977 @@ -3312,10 +3316,11 @@ static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data)
978 return ptr - data;
979 }
980
981 -static int l2cap_parse_conf_req(struct l2cap_chan *chan, void *data)
982 +static int l2cap_parse_conf_req(struct l2cap_chan *chan, void *data, size_t data_size)
983 {
984 struct l2cap_conf_rsp *rsp = data;
985 void *ptr = rsp->data;
986 + void *endptr = data + data_size;
987 void *req = chan->conf_req;
988 int len = chan->conf_len;
989 int type, hint, olen;
990 @@ -3417,7 +3422,7 @@ static int l2cap_parse_conf_req(struct l2cap_chan *chan, void *data)
991 return -ECONNREFUSED;
992
993 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
994 - (unsigned long) &rfc);
995 + (unsigned long) &rfc, endptr - ptr);
996 }
997
998 if (result == L2CAP_CONF_SUCCESS) {
999 @@ -3430,7 +3435,7 @@ static int l2cap_parse_conf_req(struct l2cap_chan *chan, void *data)
1000 chan->omtu = mtu;
1001 set_bit(CONF_MTU_DONE, &chan->conf_state);
1002 }
1003 - l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->omtu);
1004 + l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->omtu, endptr - ptr);
1005
1006 if (remote_efs) {
1007 if (chan->local_stype != L2CAP_SERV_NOTRAFIC &&
1008 @@ -3444,7 +3449,7 @@ static int l2cap_parse_conf_req(struct l2cap_chan *chan, void *data)
1009
1010 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS,
1011 sizeof(efs),
1012 - (unsigned long) &efs);
1013 + (unsigned long) &efs, endptr - ptr);
1014 } else {
1015 /* Send PENDING Conf Rsp */
1016 result = L2CAP_CONF_PENDING;
1017 @@ -3477,7 +3482,7 @@ static int l2cap_parse_conf_req(struct l2cap_chan *chan, void *data)
1018 set_bit(CONF_MODE_DONE, &chan->conf_state);
1019
1020 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
1021 - sizeof(rfc), (unsigned long) &rfc);
1022 + sizeof(rfc), (unsigned long) &rfc, endptr - ptr);
1023
1024 if (test_bit(FLAG_EFS_ENABLE, &chan->flags)) {
1025 chan->remote_id = efs.id;
1026 @@ -3491,7 +3496,7 @@ static int l2cap_parse_conf_req(struct l2cap_chan *chan, void *data)
1027 le32_to_cpu(efs.sdu_itime);
1028 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS,
1029 sizeof(efs),
1030 - (unsigned long) &efs);
1031 + (unsigned long) &efs, endptr - ptr);
1032 }
1033 break;
1034
1035 @@ -3505,7 +3510,7 @@ static int l2cap_parse_conf_req(struct l2cap_chan *chan, void *data)
1036 set_bit(CONF_MODE_DONE, &chan->conf_state);
1037
1038 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
1039 - (unsigned long) &rfc);
1040 + (unsigned long) &rfc, endptr - ptr);
1041
1042 break;
1043
1044 @@ -3527,10 +3532,11 @@ static int l2cap_parse_conf_req(struct l2cap_chan *chan, void *data)
1045 }
1046
1047 static int l2cap_parse_conf_rsp(struct l2cap_chan *chan, void *rsp, int len,
1048 - void *data, u16 *result)
1049 + void *data, size_t size, u16 *result)
1050 {
1051 struct l2cap_conf_req *req = data;
1052 void *ptr = req->data;
1053 + void *endptr = data + size;
1054 int type, olen;
1055 unsigned long val;
1056 struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
1057 @@ -3548,13 +3554,13 @@ static int l2cap_parse_conf_rsp(struct l2cap_chan *chan, void *rsp, int len,
1058 chan->imtu = L2CAP_DEFAULT_MIN_MTU;
1059 } else
1060 chan->imtu = val;
1061 - l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->imtu);
1062 + l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->imtu, endptr - ptr);
1063 break;
1064
1065 case L2CAP_CONF_FLUSH_TO:
1066 chan->flush_to = val;
1067 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FLUSH_TO,
1068 - 2, chan->flush_to);
1069 + 2, chan->flush_to, endptr - ptr);
1070 break;
1071
1072 case L2CAP_CONF_RFC:
1073 @@ -3568,13 +3574,13 @@ static int l2cap_parse_conf_rsp(struct l2cap_chan *chan, void *rsp, int len,
1074 chan->fcs = 0;
1075
1076 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
1077 - sizeof(rfc), (unsigned long) &rfc);
1078 + sizeof(rfc), (unsigned long) &rfc, endptr - ptr);
1079 break;
1080
1081 case L2CAP_CONF_EWS:
1082 chan->ack_win = min_t(u16, val, chan->ack_win);
1083 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EWS, 2,
1084 - chan->tx_win);
1085 + chan->tx_win, endptr - ptr);
1086 break;
1087
1088 case L2CAP_CONF_EFS:
1089 @@ -3587,7 +3593,7 @@ static int l2cap_parse_conf_rsp(struct l2cap_chan *chan, void *rsp, int len,
1090 return -ECONNREFUSED;
1091
1092 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS, sizeof(efs),
1093 - (unsigned long) &efs);
1094 + (unsigned long) &efs, endptr - ptr);
1095 break;
1096
1097 case L2CAP_CONF_FCS:
1098 @@ -3692,7 +3698,7 @@ void __l2cap_connect_rsp_defer(struct l2cap_chan *chan)
1099 return;
1100
1101 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
1102 - l2cap_build_conf_req(chan, buf), buf);
1103 + l2cap_build_conf_req(chan, buf, sizeof(buf)), buf);
1104 chan->num_conf_req++;
1105 }
1106
1107 @@ -3900,7 +3906,7 @@ static struct l2cap_chan *l2cap_connect(struct l2cap_conn *conn,
1108 u8 buf[128];
1109 set_bit(CONF_REQ_SENT, &chan->conf_state);
1110 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
1111 - l2cap_build_conf_req(chan, buf), buf);
1112 + l2cap_build_conf_req(chan, buf, sizeof(buf)), buf);
1113 chan->num_conf_req++;
1114 }
1115
1116 @@ -3978,7 +3984,7 @@ static int l2cap_connect_create_rsp(struct l2cap_conn *conn,
1117 break;
1118
1119 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
1120 - l2cap_build_conf_req(chan, req), req);
1121 + l2cap_build_conf_req(chan, req, sizeof(req)), req);
1122 chan->num_conf_req++;
1123 break;
1124
1125 @@ -4090,7 +4096,7 @@ static inline int l2cap_config_req(struct l2cap_conn *conn,
1126 }
1127
1128 /* Complete config. */
1129 - len = l2cap_parse_conf_req(chan, rsp);
1130 + len = l2cap_parse_conf_req(chan, rsp, sizeof(rsp));
1131 if (len < 0) {
1132 l2cap_send_disconn_req(chan, ECONNRESET);
1133 goto unlock;
1134 @@ -4124,7 +4130,7 @@ static inline int l2cap_config_req(struct l2cap_conn *conn,
1135 if (!test_and_set_bit(CONF_REQ_SENT, &chan->conf_state)) {
1136 u8 buf[64];
1137 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
1138 - l2cap_build_conf_req(chan, buf), buf);
1139 + l2cap_build_conf_req(chan, buf, sizeof(buf)), buf);
1140 chan->num_conf_req++;
1141 }
1142
1143 @@ -4184,7 +4190,7 @@ static inline int l2cap_config_rsp(struct l2cap_conn *conn,
1144 char buf[64];
1145
1146 len = l2cap_parse_conf_rsp(chan, rsp->data, len,
1147 - buf, &result);
1148 + buf, sizeof(buf), &result);
1149 if (len < 0) {
1150 l2cap_send_disconn_req(chan, ECONNRESET);
1151 goto done;
1152 @@ -4214,7 +4220,7 @@ static inline int l2cap_config_rsp(struct l2cap_conn *conn,
1153 /* throw out any old stored conf requests */
1154 result = L2CAP_CONF_SUCCESS;
1155 len = l2cap_parse_conf_rsp(chan, rsp->data, len,
1156 - req, &result);
1157 + req, sizeof(req), &result);
1158 if (len < 0) {
1159 l2cap_send_disconn_req(chan, ECONNRESET);
1160 goto done;
1161 @@ -4791,7 +4797,7 @@ static void l2cap_do_create(struct l2cap_chan *chan, int result,
1162 set_bit(CONF_REQ_SENT, &chan->conf_state);
1163 l2cap_send_cmd(chan->conn, l2cap_get_ident(chan->conn),
1164 L2CAP_CONF_REQ,
1165 - l2cap_build_conf_req(chan, buf), buf);
1166 + l2cap_build_conf_req(chan, buf, sizeof(buf)), buf);
1167 chan->num_conf_req++;
1168 }
1169 }
1170 @@ -7465,7 +7471,7 @@ static void l2cap_security_cfm(struct hci_conn *hcon, u8 status, u8 encrypt)
1171 set_bit(CONF_REQ_SENT, &chan->conf_state);
1172 l2cap_send_cmd(conn, l2cap_get_ident(conn),
1173 L2CAP_CONF_REQ,
1174 - l2cap_build_conf_req(chan, buf),
1175 + l2cap_build_conf_req(chan, buf, sizeof(buf)),
1176 buf);
1177 chan->num_conf_req++;
1178 }
1179 diff --git a/tools/testing/selftests/timers/Makefile b/tools/testing/selftests/timers/Makefile
1180 index a9b86133b9b3..dfa916e651fb 100644
1181 --- a/tools/testing/selftests/timers/Makefile
1182 +++ b/tools/testing/selftests/timers/Makefile
1183 @@ -14,20 +14,20 @@ TEST_GEN_PROGS_EXTENDED = alarmtimer-suspend valid-adjtimex adjtick change_skew
1184
1185 include ../lib.mk
1186
1187 +define RUN_DESTRUCTIVE_TESTS
1188 + @for TEST in $(TEST_GEN_PROGS_EXTENDED); do \
1189 + BASENAME_TEST=`basename $$TEST`; \
1190 + if [ ! -x $$BASENAME_TEST ]; then \
1191 + echo "selftests: Warning: file $$BASENAME_TEST is not executable, correct this.";\
1192 + echo "selftests: $$BASENAME_TEST [FAIL]"; \
1193 + else \
1194 + cd `dirname $$TEST`; (./$$BASENAME_TEST && echo "selftests: $$BASENAME_TEST [PASS]") || echo "selftests: $$BASENAME_TEST [FAIL]"; cd -;\
1195 + fi; \
1196 + done;
1197 +endef
1198 +
1199 # these tests require escalated privileges
1200 # and may modify the system time or trigger
1201 # other behavior like suspend
1202 run_destructive_tests: run_tests
1203 - ./alarmtimer-suspend
1204 - ./valid-adjtimex
1205 - ./adjtick
1206 - ./change_skew
1207 - ./skew_consistency
1208 - ./clocksource-switch
1209 - ./freq-step
1210 - ./leap-a-day -s -i 10
1211 - ./leapcrash
1212 - ./set-tz
1213 - ./set-tai
1214 - ./set-2038
1215 -
1216 + $(RUN_DESTRUCTIVE_TESTS)
1217 diff --git a/tools/testing/selftests/timers/leap-a-day.c b/tools/testing/selftests/timers/leap-a-day.c
1218 index fb46ad6ac92c..067017634057 100644
1219 --- a/tools/testing/selftests/timers/leap-a-day.c
1220 +++ b/tools/testing/selftests/timers/leap-a-day.c
1221 @@ -190,18 +190,18 @@ int main(int argc, char **argv)
1222 struct sigevent se;
1223 struct sigaction act;
1224 int signum = SIGRTMAX;
1225 - int settime = 0;
1226 + int settime = 1;
1227 int tai_time = 0;
1228 int insert = 1;
1229 - int iterations = -1;
1230 + int iterations = 10;
1231 int opt;
1232
1233 /* Process arguments */
1234 while ((opt = getopt(argc, argv, "sti:")) != -1) {
1235 switch (opt) {
1236 - case 's':
1237 - printf("Setting time to speed up testing\n");
1238 - settime = 1;
1239 + case 'w':
1240 + printf("Only setting leap-flag, not changing time. It could take up to a day for leap to trigger.\n");
1241 + settime = 0;
1242 break;
1243 case 'i':
1244 iterations = atoi(optarg);
1245 @@ -210,9 +210,10 @@ int main(int argc, char **argv)
1246 tai_time = 1;
1247 break;
1248 default:
1249 - printf("Usage: %s [-s] [-i <iterations>]\n", argv[0]);
1250 - printf(" -s: Set time to right before leap second each iteration\n");
1251 - printf(" -i: Number of iterations\n");
1252 + printf("Usage: %s [-w] [-i <iterations>]\n", argv[0]);
1253 + printf(" -w: Set flag and wait for leap second each iteration");
1254 + printf(" (default sets time to right before leapsecond)\n");
1255 + printf(" -i: Number of iterations (-1 = infinite, default is 10)\n");
1256 printf(" -t: Print TAI time\n");
1257 exit(-1);
1258 }
1259 diff --git a/tools/testing/selftests/x86/fsgsbase.c b/tools/testing/selftests/x86/fsgsbase.c
1260 index b4967d875236..f249e042b3b5 100644
1261 --- a/tools/testing/selftests/x86/fsgsbase.c
1262 +++ b/tools/testing/selftests/x86/fsgsbase.c
1263 @@ -285,9 +285,12 @@ static void *threadproc(void *ctx)
1264 }
1265 }
1266
1267 -static void set_gs_and_switch_to(unsigned long local, unsigned long remote)
1268 +static void set_gs_and_switch_to(unsigned long local,
1269 + unsigned short force_sel,
1270 + unsigned long remote)
1271 {
1272 unsigned long base;
1273 + unsigned short sel_pre_sched, sel_post_sched;
1274
1275 bool hard_zero = false;
1276 if (local == HARD_ZERO) {
1277 @@ -297,6 +300,8 @@ static void set_gs_and_switch_to(unsigned long local, unsigned long remote)
1278
1279 printf("[RUN]\tARCH_SET_GS(0x%lx)%s, then schedule to 0x%lx\n",
1280 local, hard_zero ? " and clear gs" : "", remote);
1281 + if (force_sel)
1282 + printf("\tBefore schedule, set selector to 0x%hx\n", force_sel);
1283 if (syscall(SYS_arch_prctl, ARCH_SET_GS, local) != 0)
1284 err(1, "ARCH_SET_GS");
1285 if (hard_zero)
1286 @@ -307,18 +312,35 @@ static void set_gs_and_switch_to(unsigned long local, unsigned long remote)
1287 printf("[FAIL]\tGSBASE wasn't set as expected\n");
1288 }
1289
1290 + if (force_sel) {
1291 + asm volatile ("mov %0, %%gs" : : "rm" (force_sel));
1292 + sel_pre_sched = force_sel;
1293 + local = read_base(GS);
1294 +
1295 + /*
1296 + * Signal delivery seems to mess up weird selectors. Put it
1297 + * back.
1298 + */
1299 + asm volatile ("mov %0, %%gs" : : "rm" (force_sel));
1300 + } else {
1301 + asm volatile ("mov %%gs, %0" : "=rm" (sel_pre_sched));
1302 + }
1303 +
1304 remote_base = remote;
1305 ftx = 1;
1306 syscall(SYS_futex, &ftx, FUTEX_WAKE, 0, NULL, NULL, 0);
1307 while (ftx != 0)
1308 syscall(SYS_futex, &ftx, FUTEX_WAIT, 1, NULL, NULL, 0);
1309
1310 + asm volatile ("mov %%gs, %0" : "=rm" (sel_post_sched));
1311 base = read_base(GS);
1312 - if (base == local) {
1313 - printf("[OK]\tGSBASE remained 0x%lx\n", local);
1314 + if (base == local && sel_pre_sched == sel_post_sched) {
1315 + printf("[OK]\tGS/BASE remained 0x%hx/0x%lx\n",
1316 + sel_pre_sched, local);
1317 } else {
1318 nerrs++;
1319 - printf("[FAIL]\tGSBASE changed to 0x%lx\n", base);
1320 + printf("[FAIL]\tGS/BASE changed from 0x%hx/0x%lx to 0x%hx/0x%lx\n",
1321 + sel_pre_sched, local, sel_post_sched, base);
1322 }
1323 }
1324
1325 @@ -381,8 +403,15 @@ int main()
1326
1327 for (int local = 0; local < 4; local++) {
1328 for (int remote = 0; remote < 4; remote++) {
1329 - set_gs_and_switch_to(bases_with_hard_zero[local],
1330 - bases_with_hard_zero[remote]);
1331 + for (unsigned short s = 0; s < 5; s++) {
1332 + unsigned short sel = s;
1333 + if (s == 4)
1334 + asm ("mov %%ss, %0" : "=rm" (sel));
1335 + set_gs_and_switch_to(
1336 + bases_with_hard_zero[local],
1337 + sel,
1338 + bases_with_hard_zero[remote]);
1339 + }
1340 }
1341 }
1342