Contents of /trunk/kernel-alx-legacy/patches-4.9/0299-4.9.200-all-fixes.patch
Parent Directory | Revision Log
Revision 3608 -
(show annotations)
(download)
Fri Aug 14 07:34:29 2020 UTC (4 years, 1 month ago) by niro
File size: 48972 byte(s)
Fri Aug 14 07:34:29 2020 UTC (4 years, 1 month ago) by niro
File size: 48972 byte(s)
-added kerenl-alx-legacy pkg
1 | diff --git a/Makefile b/Makefile |
2 | index b7f6639f4e7a..84410351b27c 100644 |
3 | --- a/Makefile |
4 | +++ b/Makefile |
5 | @@ -1,6 +1,6 @@ |
6 | VERSION = 4 |
7 | PATCHLEVEL = 9 |
8 | -SUBLEVEL = 199 |
9 | +SUBLEVEL = 200 |
10 | EXTRAVERSION = |
11 | NAME = Roaring Lionus |
12 | |
13 | @@ -834,6 +834,18 @@ KBUILD_CFLAGS += $(call cc-option,-Werror=date-time) |
14 | # enforce correct pointer usage |
15 | KBUILD_CFLAGS += $(call cc-option,-Werror=incompatible-pointer-types) |
16 | |
17 | +# Require designated initializers for all marked structures |
18 | +KBUILD_CFLAGS += $(call cc-option,-Werror=designated-init) |
19 | + |
20 | +# change __FILE__ to the relative path from the srctree |
21 | +KBUILD_CFLAGS += $(call cc-option,-fmacro-prefix-map=$(srctree)/=) |
22 | + |
23 | +# ensure -fcf-protection is disabled when using retpoline as it is |
24 | +# incompatible with -mindirect-branch=thunk-extern |
25 | +ifdef CONFIG_RETPOLINE |
26 | +KBUILD_CFLAGS += $(call cc-option,-fcf-protection=none) |
27 | +endif |
28 | + |
29 | # use the deterministic mode of AR if available |
30 | KBUILD_ARFLAGS := $(call ar-option,D) |
31 | |
32 | diff --git a/arch/arm/boot/dts/imx7s.dtsi b/arch/arm/boot/dts/imx7s.dtsi |
33 | index edc5ddeb851a..0a7ea1a765f9 100644 |
34 | --- a/arch/arm/boot/dts/imx7s.dtsi |
35 | +++ b/arch/arm/boot/dts/imx7s.dtsi |
36 | @@ -437,7 +437,7 @@ |
37 | compatible = "fsl,imx7d-gpt", "fsl,imx6sx-gpt"; |
38 | reg = <0x302d0000 0x10000>; |
39 | interrupts = <GIC_SPI 55 IRQ_TYPE_LEVEL_HIGH>; |
40 | - clocks = <&clks IMX7D_CLK_DUMMY>, |
41 | + clocks = <&clks IMX7D_GPT1_ROOT_CLK>, |
42 | <&clks IMX7D_GPT1_ROOT_CLK>; |
43 | clock-names = "ipg", "per"; |
44 | }; |
45 | @@ -446,7 +446,7 @@ |
46 | compatible = "fsl,imx7d-gpt", "fsl,imx6sx-gpt"; |
47 | reg = <0x302e0000 0x10000>; |
48 | interrupts = <GIC_SPI 54 IRQ_TYPE_LEVEL_HIGH>; |
49 | - clocks = <&clks IMX7D_CLK_DUMMY>, |
50 | + clocks = <&clks IMX7D_GPT2_ROOT_CLK>, |
51 | <&clks IMX7D_GPT2_ROOT_CLK>; |
52 | clock-names = "ipg", "per"; |
53 | status = "disabled"; |
54 | @@ -456,7 +456,7 @@ |
55 | compatible = "fsl,imx7d-gpt", "fsl,imx6sx-gpt"; |
56 | reg = <0x302f0000 0x10000>; |
57 | interrupts = <GIC_SPI 53 IRQ_TYPE_LEVEL_HIGH>; |
58 | - clocks = <&clks IMX7D_CLK_DUMMY>, |
59 | + clocks = <&clks IMX7D_GPT3_ROOT_CLK>, |
60 | <&clks IMX7D_GPT3_ROOT_CLK>; |
61 | clock-names = "ipg", "per"; |
62 | status = "disabled"; |
63 | @@ -466,7 +466,7 @@ |
64 | compatible = "fsl,imx7d-gpt", "fsl,imx6sx-gpt"; |
65 | reg = <0x30300000 0x10000>; |
66 | interrupts = <GIC_SPI 52 IRQ_TYPE_LEVEL_HIGH>; |
67 | - clocks = <&clks IMX7D_CLK_DUMMY>, |
68 | + clocks = <&clks IMX7D_GPT4_ROOT_CLK>, |
69 | <&clks IMX7D_GPT4_ROOT_CLK>; |
70 | clock-names = "ipg", "per"; |
71 | status = "disabled"; |
72 | diff --git a/arch/arm/boot/dts/logicpd-torpedo-som.dtsi b/arch/arm/boot/dts/logicpd-torpedo-som.dtsi |
73 | index ceb49d15d243..20ee7ca8c653 100644 |
74 | --- a/arch/arm/boot/dts/logicpd-torpedo-som.dtsi |
75 | +++ b/arch/arm/boot/dts/logicpd-torpedo-som.dtsi |
76 | @@ -266,3 +266,7 @@ |
77 | &twl_gpio { |
78 | ti,use-leds; |
79 | }; |
80 | + |
81 | +&twl_keypad { |
82 | + status = "disabled"; |
83 | +}; |
84 | diff --git a/arch/arm/mach-davinci/dm365.c b/arch/arm/mach-davinci/dm365.c |
85 | index ef3add999263..8db549c56914 100644 |
86 | --- a/arch/arm/mach-davinci/dm365.c |
87 | +++ b/arch/arm/mach-davinci/dm365.c |
88 | @@ -864,8 +864,8 @@ static s8 dm365_queue_priority_mapping[][2] = { |
89 | }; |
90 | |
91 | static const struct dma_slave_map dm365_edma_map[] = { |
92 | - { "davinci-mcbsp.0", "tx", EDMA_FILTER_PARAM(0, 2) }, |
93 | - { "davinci-mcbsp.0", "rx", EDMA_FILTER_PARAM(0, 3) }, |
94 | + { "davinci-mcbsp", "tx", EDMA_FILTER_PARAM(0, 2) }, |
95 | + { "davinci-mcbsp", "rx", EDMA_FILTER_PARAM(0, 3) }, |
96 | { "davinci_voicecodec", "tx", EDMA_FILTER_PARAM(0, 2) }, |
97 | { "davinci_voicecodec", "rx", EDMA_FILTER_PARAM(0, 3) }, |
98 | { "spi_davinci.2", "tx", EDMA_FILTER_PARAM(0, 10) }, |
99 | diff --git a/arch/arm/mm/alignment.c b/arch/arm/mm/alignment.c |
100 | index 7d5f4c736a16..cd18eda014c2 100644 |
101 | --- a/arch/arm/mm/alignment.c |
102 | +++ b/arch/arm/mm/alignment.c |
103 | @@ -767,6 +767,36 @@ do_alignment_t32_to_handler(unsigned long *pinstr, struct pt_regs *regs, |
104 | return NULL; |
105 | } |
106 | |
107 | +static int alignment_get_arm(struct pt_regs *regs, u32 *ip, unsigned long *inst) |
108 | +{ |
109 | + u32 instr = 0; |
110 | + int fault; |
111 | + |
112 | + if (user_mode(regs)) |
113 | + fault = get_user(instr, ip); |
114 | + else |
115 | + fault = probe_kernel_address(ip, instr); |
116 | + |
117 | + *inst = __mem_to_opcode_arm(instr); |
118 | + |
119 | + return fault; |
120 | +} |
121 | + |
122 | +static int alignment_get_thumb(struct pt_regs *regs, u16 *ip, u16 *inst) |
123 | +{ |
124 | + u16 instr = 0; |
125 | + int fault; |
126 | + |
127 | + if (user_mode(regs)) |
128 | + fault = get_user(instr, ip); |
129 | + else |
130 | + fault = probe_kernel_address(ip, instr); |
131 | + |
132 | + *inst = __mem_to_opcode_thumb16(instr); |
133 | + |
134 | + return fault; |
135 | +} |
136 | + |
137 | static int |
138 | do_alignment(unsigned long addr, unsigned int fsr, struct pt_regs *regs) |
139 | { |
140 | @@ -774,10 +804,10 @@ do_alignment(unsigned long addr, unsigned int fsr, struct pt_regs *regs) |
141 | unsigned long instr = 0, instrptr; |
142 | int (*handler)(unsigned long addr, unsigned long instr, struct pt_regs *regs); |
143 | unsigned int type; |
144 | - unsigned int fault; |
145 | u16 tinstr = 0; |
146 | int isize = 4; |
147 | int thumb2_32b = 0; |
148 | + int fault; |
149 | |
150 | if (interrupts_enabled(regs)) |
151 | local_irq_enable(); |
152 | @@ -786,15 +816,14 @@ do_alignment(unsigned long addr, unsigned int fsr, struct pt_regs *regs) |
153 | |
154 | if (thumb_mode(regs)) { |
155 | u16 *ptr = (u16 *)(instrptr & ~1); |
156 | - fault = probe_kernel_address(ptr, tinstr); |
157 | - tinstr = __mem_to_opcode_thumb16(tinstr); |
158 | + |
159 | + fault = alignment_get_thumb(regs, ptr, &tinstr); |
160 | if (!fault) { |
161 | if (cpu_architecture() >= CPU_ARCH_ARMv7 && |
162 | IS_T32(tinstr)) { |
163 | /* Thumb-2 32-bit */ |
164 | - u16 tinst2 = 0; |
165 | - fault = probe_kernel_address(ptr + 1, tinst2); |
166 | - tinst2 = __mem_to_opcode_thumb16(tinst2); |
167 | + u16 tinst2; |
168 | + fault = alignment_get_thumb(regs, ptr + 1, &tinst2); |
169 | instr = __opcode_thumb32_compose(tinstr, tinst2); |
170 | thumb2_32b = 1; |
171 | } else { |
172 | @@ -803,8 +832,7 @@ do_alignment(unsigned long addr, unsigned int fsr, struct pt_regs *regs) |
173 | } |
174 | } |
175 | } else { |
176 | - fault = probe_kernel_address((void *)instrptr, instr); |
177 | - instr = __mem_to_opcode_arm(instr); |
178 | + fault = alignment_get_arm(regs, (void *)instrptr, &instr); |
179 | } |
180 | |
181 | if (fault) { |
182 | diff --git a/arch/mips/bcm63xx/prom.c b/arch/mips/bcm63xx/prom.c |
183 | index 7019e2967009..bbbf8057565b 100644 |
184 | --- a/arch/mips/bcm63xx/prom.c |
185 | +++ b/arch/mips/bcm63xx/prom.c |
186 | @@ -84,7 +84,7 @@ void __init prom_init(void) |
187 | * Here we will start up CPU1 in the background and ask it to |
188 | * reconfigure itself then go back to sleep. |
189 | */ |
190 | - memcpy((void *)0xa0000200, &bmips_smp_movevec, 0x20); |
191 | + memcpy((void *)0xa0000200, bmips_smp_movevec, 0x20); |
192 | __sync(); |
193 | set_c0_cause(C_SW0); |
194 | cpumask_set_cpu(1, &bmips_booted_mask); |
195 | diff --git a/arch/mips/include/asm/bmips.h b/arch/mips/include/asm/bmips.h |
196 | index a92aee7b977a..23f55af7d6ba 100644 |
197 | --- a/arch/mips/include/asm/bmips.h |
198 | +++ b/arch/mips/include/asm/bmips.h |
199 | @@ -75,11 +75,11 @@ static inline int register_bmips_smp_ops(void) |
200 | #endif |
201 | } |
202 | |
203 | -extern char bmips_reset_nmi_vec; |
204 | -extern char bmips_reset_nmi_vec_end; |
205 | -extern char bmips_smp_movevec; |
206 | -extern char bmips_smp_int_vec; |
207 | -extern char bmips_smp_int_vec_end; |
208 | +extern char bmips_reset_nmi_vec[]; |
209 | +extern char bmips_reset_nmi_vec_end[]; |
210 | +extern char bmips_smp_movevec[]; |
211 | +extern char bmips_smp_int_vec[]; |
212 | +extern char bmips_smp_int_vec_end[]; |
213 | |
214 | extern int bmips_smp_enabled; |
215 | extern int bmips_cpu_offset; |
216 | diff --git a/arch/mips/kernel/smp-bmips.c b/arch/mips/kernel/smp-bmips.c |
217 | index d4a293b68249..416d53f587e7 100644 |
218 | --- a/arch/mips/kernel/smp-bmips.c |
219 | +++ b/arch/mips/kernel/smp-bmips.c |
220 | @@ -453,10 +453,10 @@ static void bmips_wr_vec(unsigned long dst, char *start, char *end) |
221 | |
222 | static inline void bmips_nmi_handler_setup(void) |
223 | { |
224 | - bmips_wr_vec(BMIPS_NMI_RESET_VEC, &bmips_reset_nmi_vec, |
225 | - &bmips_reset_nmi_vec_end); |
226 | - bmips_wr_vec(BMIPS_WARM_RESTART_VEC, &bmips_smp_int_vec, |
227 | - &bmips_smp_int_vec_end); |
228 | + bmips_wr_vec(BMIPS_NMI_RESET_VEC, bmips_reset_nmi_vec, |
229 | + bmips_reset_nmi_vec_end); |
230 | + bmips_wr_vec(BMIPS_WARM_RESTART_VEC, bmips_smp_int_vec, |
231 | + bmips_smp_int_vec_end); |
232 | } |
233 | |
234 | struct reset_vec_info { |
235 | diff --git a/drivers/dma/qcom/bam_dma.c b/drivers/dma/qcom/bam_dma.c |
236 | index 6497f5283e3b..81acbde13394 100644 |
237 | --- a/drivers/dma/qcom/bam_dma.c |
238 | +++ b/drivers/dma/qcom/bam_dma.c |
239 | @@ -686,7 +686,21 @@ static int bam_dma_terminate_all(struct dma_chan *chan) |
240 | |
241 | /* remove all transactions, including active transaction */ |
242 | spin_lock_irqsave(&bchan->vc.lock, flag); |
243 | + /* |
244 | + * If we have transactions queued, then some might be committed to the |
245 | + * hardware in the desc fifo. The only way to reset the desc fifo is |
246 | + * to do a hardware reset (either by pipe or the entire block). |
247 | + * bam_chan_init_hw() will trigger a pipe reset, and also reinit the |
248 | + * pipe. If the pipe is left disabled (default state after pipe reset) |
249 | + * and is accessed by a connected hardware engine, a fatal error in |
250 | + * the BAM will occur. There is a small window where this could happen |
251 | + * with bam_chan_init_hw(), but it is assumed that the caller has |
252 | + * stopped activity on any attached hardware engine. Make sure to do |
253 | + * this first so that the BAM hardware doesn't cause memory corruption |
254 | + * by accessing freed resources. |
255 | + */ |
256 | if (bchan->curr_txd) { |
257 | + bam_chan_init_hw(bchan, bchan->curr_txd->dir); |
258 | list_add(&bchan->curr_txd->vd.node, &bchan->vc.desc_issued); |
259 | bchan->curr_txd = NULL; |
260 | } |
261 | diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet.c b/drivers/net/ethernet/broadcom/genet/bcmgenet.c |
262 | index 1bb923e3a2bc..4a4782b3cc1b 100644 |
263 | --- a/drivers/net/ethernet/broadcom/genet/bcmgenet.c |
264 | +++ b/drivers/net/ethernet/broadcom/genet/bcmgenet.c |
265 | @@ -1914,6 +1914,8 @@ static void bcmgenet_link_intr_enable(struct bcmgenet_priv *priv) |
266 | */ |
267 | if (priv->internal_phy) { |
268 | int0_enable |= UMAC_IRQ_LINK_EVENT; |
269 | + if (GENET_IS_V1(priv) || GENET_IS_V2(priv) || GENET_IS_V3(priv)) |
270 | + int0_enable |= UMAC_IRQ_PHY_DET_R; |
271 | } else if (priv->ext_phy) { |
272 | int0_enable |= UMAC_IRQ_LINK_EVENT; |
273 | } else if (priv->phy_interface == PHY_INTERFACE_MODE_MOCA) { |
274 | @@ -2531,6 +2533,10 @@ static void bcmgenet_irq_task(struct work_struct *work) |
275 | bcmgenet_power_up(priv, GENET_POWER_WOL_MAGIC); |
276 | } |
277 | |
278 | + if (status & UMAC_IRQ_PHY_DET_R && |
279 | + priv->dev->phydev->autoneg != AUTONEG_ENABLE) |
280 | + phy_init_hw(priv->dev->phydev); |
281 | + |
282 | /* Link UP/DOWN event */ |
283 | if (status & UMAC_IRQ_LINK_EVENT) |
284 | phy_mac_interrupt(priv->phydev, |
285 | @@ -2627,8 +2633,7 @@ static irqreturn_t bcmgenet_isr0(int irq, void *dev_id) |
286 | } |
287 | |
288 | /* all other interested interrupts handled in bottom half */ |
289 | - status &= (UMAC_IRQ_LINK_EVENT | |
290 | - UMAC_IRQ_MPD_R); |
291 | + status &= (UMAC_IRQ_LINK_EVENT | UMAC_IRQ_MPD_R | UMAC_IRQ_PHY_DET_R); |
292 | if (status) { |
293 | /* Save irq status for bottom-half processing. */ |
294 | spin_lock_irqsave(&priv->lock, flags); |
295 | diff --git a/drivers/net/ethernet/hisilicon/hip04_eth.c b/drivers/net/ethernet/hisilicon/hip04_eth.c |
296 | index f7882c1fde16..407e1177d9d1 100644 |
297 | --- a/drivers/net/ethernet/hisilicon/hip04_eth.c |
298 | +++ b/drivers/net/ethernet/hisilicon/hip04_eth.c |
299 | @@ -174,6 +174,7 @@ struct hip04_priv { |
300 | dma_addr_t rx_phys[RX_DESC_NUM]; |
301 | unsigned int rx_head; |
302 | unsigned int rx_buf_size; |
303 | + unsigned int rx_cnt_remaining; |
304 | |
305 | struct device_node *phy_node; |
306 | struct phy_device *phy; |
307 | @@ -487,7 +488,6 @@ static int hip04_rx_poll(struct napi_struct *napi, int budget) |
308 | struct hip04_priv *priv = container_of(napi, struct hip04_priv, napi); |
309 | struct net_device *ndev = priv->ndev; |
310 | struct net_device_stats *stats = &ndev->stats; |
311 | - unsigned int cnt = hip04_recv_cnt(priv); |
312 | struct rx_desc *desc; |
313 | struct sk_buff *skb; |
314 | unsigned char *buf; |
315 | @@ -500,8 +500,8 @@ static int hip04_rx_poll(struct napi_struct *napi, int budget) |
316 | |
317 | /* clean up tx descriptors */ |
318 | tx_remaining = hip04_tx_reclaim(ndev, false); |
319 | - |
320 | - while (cnt && !last) { |
321 | + priv->rx_cnt_remaining += hip04_recv_cnt(priv); |
322 | + while (priv->rx_cnt_remaining && !last) { |
323 | buf = priv->rx_buf[priv->rx_head]; |
324 | skb = build_skb(buf, priv->rx_buf_size); |
325 | if (unlikely(!skb)) { |
326 | @@ -547,11 +547,13 @@ refill: |
327 | hip04_set_recv_desc(priv, phys); |
328 | |
329 | priv->rx_head = RX_NEXT(priv->rx_head); |
330 | - if (rx >= budget) |
331 | + if (rx >= budget) { |
332 | + --priv->rx_cnt_remaining; |
333 | goto done; |
334 | + } |
335 | |
336 | - if (--cnt == 0) |
337 | - cnt = hip04_recv_cnt(priv); |
338 | + if (--priv->rx_cnt_remaining == 0) |
339 | + priv->rx_cnt_remaining += hip04_recv_cnt(priv); |
340 | } |
341 | |
342 | if (!(priv->reg_inten & RCV_INT)) { |
343 | @@ -636,6 +638,7 @@ static int hip04_mac_open(struct net_device *ndev) |
344 | int i; |
345 | |
346 | priv->rx_head = 0; |
347 | + priv->rx_cnt_remaining = 0; |
348 | priv->tx_head = 0; |
349 | priv->tx_tail = 0; |
350 | hip04_reset_ppe(priv); |
351 | diff --git a/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c b/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c |
352 | index 79944302dd46..7d1e8ab956e6 100644 |
353 | --- a/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c |
354 | +++ b/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c |
355 | @@ -470,12 +470,31 @@ void mlx4_init_quotas(struct mlx4_dev *dev) |
356 | priv->mfunc.master.res_tracker.res_alloc[RES_MPT].quota[pf]; |
357 | } |
358 | |
359 | -static int get_max_gauranteed_vfs_counter(struct mlx4_dev *dev) |
360 | +static int |
361 | +mlx4_calc_res_counter_guaranteed(struct mlx4_dev *dev, |
362 | + struct resource_allocator *res_alloc, |
363 | + int vf) |
364 | { |
365 | - /* reduce the sink counter */ |
366 | - return (dev->caps.max_counters - 1 - |
367 | - (MLX4_PF_COUNTERS_PER_PORT * MLX4_MAX_PORTS)) |
368 | - / MLX4_MAX_PORTS; |
369 | + struct mlx4_active_ports actv_ports; |
370 | + int ports, counters_guaranteed; |
371 | + |
372 | + /* For master, only allocate according to the number of phys ports */ |
373 | + if (vf == mlx4_master_func_num(dev)) |
374 | + return MLX4_PF_COUNTERS_PER_PORT * dev->caps.num_ports; |
375 | + |
376 | + /* calculate real number of ports for the VF */ |
377 | + actv_ports = mlx4_get_active_ports(dev, vf); |
378 | + ports = bitmap_weight(actv_ports.ports, dev->caps.num_ports); |
379 | + counters_guaranteed = ports * MLX4_VF_COUNTERS_PER_PORT; |
380 | + |
381 | + /* If we do not have enough counters for this VF, do not |
382 | + * allocate any for it. '-1' to reduce the sink counter. |
383 | + */ |
384 | + if ((res_alloc->res_reserved + counters_guaranteed) > |
385 | + (dev->caps.max_counters - 1)) |
386 | + return 0; |
387 | + |
388 | + return counters_guaranteed; |
389 | } |
390 | |
391 | int mlx4_init_resource_tracker(struct mlx4_dev *dev) |
392 | @@ -483,7 +502,6 @@ int mlx4_init_resource_tracker(struct mlx4_dev *dev) |
393 | struct mlx4_priv *priv = mlx4_priv(dev); |
394 | int i, j; |
395 | int t; |
396 | - int max_vfs_guarantee_counter = get_max_gauranteed_vfs_counter(dev); |
397 | |
398 | priv->mfunc.master.res_tracker.slave_list = |
399 | kzalloc(dev->num_slaves * sizeof(struct slave_list), |
400 | @@ -600,16 +618,8 @@ int mlx4_init_resource_tracker(struct mlx4_dev *dev) |
401 | break; |
402 | case RES_COUNTER: |
403 | res_alloc->quota[t] = dev->caps.max_counters; |
404 | - if (t == mlx4_master_func_num(dev)) |
405 | - res_alloc->guaranteed[t] = |
406 | - MLX4_PF_COUNTERS_PER_PORT * |
407 | - MLX4_MAX_PORTS; |
408 | - else if (t <= max_vfs_guarantee_counter) |
409 | - res_alloc->guaranteed[t] = |
410 | - MLX4_VF_COUNTERS_PER_PORT * |
411 | - MLX4_MAX_PORTS; |
412 | - else |
413 | - res_alloc->guaranteed[t] = 0; |
414 | + res_alloc->guaranteed[t] = |
415 | + mlx4_calc_res_counter_guaranteed(dev, res_alloc, t); |
416 | res_alloc->res_free -= res_alloc->guaranteed[t]; |
417 | break; |
418 | default: |
419 | diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c |
420 | index b6ee0c1690d8..340bd98b8dbd 100644 |
421 | --- a/drivers/net/vxlan.c |
422 | +++ b/drivers/net/vxlan.c |
423 | @@ -2049,8 +2049,11 @@ static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev, |
424 | label = info->key.label; |
425 | udp_sum = !!(info->key.tun_flags & TUNNEL_CSUM); |
426 | |
427 | - if (info->options_len) |
428 | + if (info->options_len) { |
429 | + if (info->options_len < sizeof(*md)) |
430 | + goto drop; |
431 | md = ip_tunnel_info_opts(info); |
432 | + } |
433 | } else { |
434 | md->gbp = skb->mark; |
435 | } |
436 | diff --git a/drivers/of/unittest.c b/drivers/of/unittest.c |
437 | index 0a1ebbbd3f16..92530525e355 100644 |
438 | --- a/drivers/of/unittest.c |
439 | +++ b/drivers/of/unittest.c |
440 | @@ -933,6 +933,7 @@ static int __init unittest_data_add(void) |
441 | of_fdt_unflatten_tree(unittest_data, NULL, &unittest_data_node); |
442 | if (!unittest_data_node) { |
443 | pr_warn("%s: No tree to attach; not running tests\n", __func__); |
444 | + kfree(unittest_data); |
445 | return -ENODATA; |
446 | } |
447 | of_node_set_flag(unittest_data_node, OF_DETACHED); |
448 | diff --git a/drivers/pinctrl/bcm/pinctrl-ns2-mux.c b/drivers/pinctrl/bcm/pinctrl-ns2-mux.c |
449 | index 13a4c2774157..6adfb379ac7e 100644 |
450 | --- a/drivers/pinctrl/bcm/pinctrl-ns2-mux.c |
451 | +++ b/drivers/pinctrl/bcm/pinctrl-ns2-mux.c |
452 | @@ -640,8 +640,8 @@ static int ns2_pinmux_enable(struct pinctrl_dev *pctrl_dev, |
453 | const struct ns2_pin_function *func; |
454 | const struct ns2_pin_group *grp; |
455 | |
456 | - if (grp_select > pinctrl->num_groups || |
457 | - func_select > pinctrl->num_functions) |
458 | + if (grp_select >= pinctrl->num_groups || |
459 | + func_select >= pinctrl->num_functions) |
460 | return -EINVAL; |
461 | |
462 | func = &pinctrl->functions[func_select]; |
463 | diff --git a/drivers/regulator/pfuze100-regulator.c b/drivers/regulator/pfuze100-regulator.c |
464 | index 86b348740fcd..ffb1f61d2c75 100644 |
465 | --- a/drivers/regulator/pfuze100-regulator.c |
466 | +++ b/drivers/regulator/pfuze100-regulator.c |
467 | @@ -608,7 +608,13 @@ static int pfuze100_regulator_probe(struct i2c_client *client, |
468 | |
469 | /* SW2~SW4 high bit check and modify the voltage value table */ |
470 | if (i >= sw_check_start && i <= sw_check_end) { |
471 | - regmap_read(pfuze_chip->regmap, desc->vsel_reg, &val); |
472 | + ret = regmap_read(pfuze_chip->regmap, |
473 | + desc->vsel_reg, &val); |
474 | + if (ret) { |
475 | + dev_err(&client->dev, "Fails to read from the register.\n"); |
476 | + return ret; |
477 | + } |
478 | + |
479 | if (val & sw_hi) { |
480 | if (pfuze_chip->chip_id == PFUZE3000) { |
481 | desc->volt_table = pfuze3000_sw2hi; |
482 | diff --git a/drivers/regulator/ti-abb-regulator.c b/drivers/regulator/ti-abb-regulator.c |
483 | index d2f994298753..6d17357b3a24 100644 |
484 | --- a/drivers/regulator/ti-abb-regulator.c |
485 | +++ b/drivers/regulator/ti-abb-regulator.c |
486 | @@ -173,19 +173,14 @@ static int ti_abb_wait_txdone(struct device *dev, struct ti_abb *abb) |
487 | while (timeout++ <= abb->settling_time) { |
488 | status = ti_abb_check_txdone(abb); |
489 | if (status) |
490 | - break; |
491 | + return 0; |
492 | |
493 | udelay(1); |
494 | } |
495 | |
496 | - if (timeout > abb->settling_time) { |
497 | - dev_warn_ratelimited(dev, |
498 | - "%s:TRANXDONE timeout(%duS) int=0x%08x\n", |
499 | - __func__, timeout, readl(abb->int_base)); |
500 | - return -ETIMEDOUT; |
501 | - } |
502 | - |
503 | - return 0; |
504 | + dev_warn_ratelimited(dev, "%s:TRANXDONE timeout(%duS) int=0x%08x\n", |
505 | + __func__, timeout, readl(abb->int_base)); |
506 | + return -ETIMEDOUT; |
507 | } |
508 | |
509 | /** |
510 | @@ -205,19 +200,14 @@ static int ti_abb_clear_all_txdone(struct device *dev, const struct ti_abb *abb) |
511 | |
512 | status = ti_abb_check_txdone(abb); |
513 | if (!status) |
514 | - break; |
515 | + return 0; |
516 | |
517 | udelay(1); |
518 | } |
519 | |
520 | - if (timeout > abb->settling_time) { |
521 | - dev_warn_ratelimited(dev, |
522 | - "%s:TRANXDONE timeout(%duS) int=0x%08x\n", |
523 | - __func__, timeout, readl(abb->int_base)); |
524 | - return -ETIMEDOUT; |
525 | - } |
526 | - |
527 | - return 0; |
528 | + dev_warn_ratelimited(dev, "%s:TRANXDONE timeout(%duS) int=0x%08x\n", |
529 | + __func__, timeout, readl(abb->int_base)); |
530 | + return -ETIMEDOUT; |
531 | } |
532 | |
533 | /** |
534 | diff --git a/drivers/scsi/Kconfig b/drivers/scsi/Kconfig |
535 | index 17b1574920fd..941e3f25b4a9 100644 |
536 | --- a/drivers/scsi/Kconfig |
537 | +++ b/drivers/scsi/Kconfig |
538 | @@ -986,7 +986,7 @@ config SCSI_SNI_53C710 |
539 | |
540 | config 53C700_LE_ON_BE |
541 | bool |
542 | - depends on SCSI_LASI700 |
543 | + depends on SCSI_LASI700 || SCSI_SNI_53C710 |
544 | default y |
545 | |
546 | config SCSI_STEX |
547 | diff --git a/drivers/scsi/device_handler/scsi_dh_alua.c b/drivers/scsi/device_handler/scsi_dh_alua.c |
548 | index 98787588247b..60c288526355 100644 |
549 | --- a/drivers/scsi/device_handler/scsi_dh_alua.c |
550 | +++ b/drivers/scsi/device_handler/scsi_dh_alua.c |
551 | @@ -527,6 +527,7 @@ static int alua_rtpg(struct scsi_device *sdev, struct alua_port_group *pg) |
552 | unsigned int tpg_desc_tbl_off; |
553 | unsigned char orig_transition_tmo; |
554 | unsigned long flags; |
555 | + bool transitioning_sense = false; |
556 | |
557 | if (!pg->expiry) { |
558 | unsigned long transition_tmo = ALUA_FAILOVER_TIMEOUT * HZ; |
559 | @@ -571,13 +572,19 @@ static int alua_rtpg(struct scsi_device *sdev, struct alua_port_group *pg) |
560 | goto retry; |
561 | } |
562 | /* |
563 | - * Retry on ALUA state transition or if any |
564 | - * UNIT ATTENTION occurred. |
565 | + * If the array returns with 'ALUA state transition' |
566 | + * sense code here it cannot return RTPG data during |
567 | + * transition. So set the state to 'transitioning' directly. |
568 | */ |
569 | if (sense_hdr.sense_key == NOT_READY && |
570 | - sense_hdr.asc == 0x04 && sense_hdr.ascq == 0x0a) |
571 | - err = SCSI_DH_RETRY; |
572 | - else if (sense_hdr.sense_key == UNIT_ATTENTION) |
573 | + sense_hdr.asc == 0x04 && sense_hdr.ascq == 0x0a) { |
574 | + transitioning_sense = true; |
575 | + goto skip_rtpg; |
576 | + } |
577 | + /* |
578 | + * Retry on any other UNIT ATTENTION occurred. |
579 | + */ |
580 | + if (sense_hdr.sense_key == UNIT_ATTENTION) |
581 | err = SCSI_DH_RETRY; |
582 | if (err == SCSI_DH_RETRY && |
583 | pg->expiry != 0 && time_before(jiffies, pg->expiry)) { |
584 | @@ -665,7 +672,11 @@ static int alua_rtpg(struct scsi_device *sdev, struct alua_port_group *pg) |
585 | off = 8 + (desc[7] * 4); |
586 | } |
587 | |
588 | + skip_rtpg: |
589 | spin_lock_irqsave(&pg->lock, flags); |
590 | + if (transitioning_sense) |
591 | + pg->state = SCSI_ACCESS_STATE_TRANSITIONING; |
592 | + |
593 | sdev_printk(KERN_INFO, sdev, |
594 | "%s: port group %02x state %c %s supports %c%c%c%c%c%c%c\n", |
595 | ALUA_DH_NAME, pg->group_id, print_alua_state(pg->state), |
596 | diff --git a/drivers/scsi/sni_53c710.c b/drivers/scsi/sni_53c710.c |
597 | index 76278072147e..b0f5220ae23a 100644 |
598 | --- a/drivers/scsi/sni_53c710.c |
599 | +++ b/drivers/scsi/sni_53c710.c |
600 | @@ -78,10 +78,8 @@ static int snirm710_probe(struct platform_device *dev) |
601 | |
602 | base = res->start; |
603 | hostdata = kzalloc(sizeof(*hostdata), GFP_KERNEL); |
604 | - if (!hostdata) { |
605 | - dev_printk(KERN_ERR, dev, "Failed to allocate host data\n"); |
606 | + if (!hostdata) |
607 | return -ENOMEM; |
608 | - } |
609 | |
610 | hostdata->dev = &dev->dev; |
611 | dma_set_mask(&dev->dev, DMA_BIT_MASK(32)); |
612 | diff --git a/drivers/target/target_core_device.c b/drivers/target/target_core_device.c |
613 | index cc38a3509f78..c3d576ed6f13 100644 |
614 | --- a/drivers/target/target_core_device.c |
615 | +++ b/drivers/target/target_core_device.c |
616 | @@ -1046,27 +1046,6 @@ passthrough_parse_cdb(struct se_cmd *cmd, |
617 | { |
618 | unsigned char *cdb = cmd->t_task_cdb; |
619 | |
620 | - /* |
621 | - * Clear a lun set in the cdb if the initiator talking to use spoke |
622 | - * and old standards version, as we can't assume the underlying device |
623 | - * won't choke up on it. |
624 | - */ |
625 | - switch (cdb[0]) { |
626 | - case READ_10: /* SBC - RDProtect */ |
627 | - case READ_12: /* SBC - RDProtect */ |
628 | - case READ_16: /* SBC - RDProtect */ |
629 | - case SEND_DIAGNOSTIC: /* SPC - SELF-TEST Code */ |
630 | - case VERIFY: /* SBC - VRProtect */ |
631 | - case VERIFY_16: /* SBC - VRProtect */ |
632 | - case WRITE_VERIFY: /* SBC - VRProtect */ |
633 | - case WRITE_VERIFY_12: /* SBC - VRProtect */ |
634 | - case MAINTENANCE_IN: /* SPC - Parameter Data Format for SA RTPG */ |
635 | - break; |
636 | - default: |
637 | - cdb[1] &= 0x1f; /* clear logical unit number */ |
638 | - break; |
639 | - } |
640 | - |
641 | /* |
642 | * For REPORT LUNS we always need to emulate the response, for everything |
643 | * else, pass it up. |
644 | diff --git a/fs/cifs/cifsglob.h b/fs/cifs/cifsglob.h |
645 | index 5367b684c1f7..7ae21ad420fb 100644 |
646 | --- a/fs/cifs/cifsglob.h |
647 | +++ b/fs/cifs/cifsglob.h |
648 | @@ -1178,6 +1178,11 @@ void cifsFileInfo_put(struct cifsFileInfo *cifs_file); |
649 | struct cifsInodeInfo { |
650 | bool can_cache_brlcks; |
651 | struct list_head llist; /* locks helb by this inode */ |
652 | + /* |
653 | + * NOTE: Some code paths call down_read(lock_sem) twice, so |
654 | + * we must always use use cifs_down_write() instead of down_write() |
655 | + * for this semaphore to avoid deadlocks. |
656 | + */ |
657 | struct rw_semaphore lock_sem; /* protect the fields above */ |
658 | /* BB add in lists for dirty pages i.e. write caching info for oplock */ |
659 | struct list_head openFileList; |
660 | diff --git a/fs/cifs/cifsproto.h b/fs/cifs/cifsproto.h |
661 | index cd8025a249bb..cdf244df91c2 100644 |
662 | --- a/fs/cifs/cifsproto.h |
663 | +++ b/fs/cifs/cifsproto.h |
664 | @@ -138,6 +138,7 @@ extern int cifs_unlock_range(struct cifsFileInfo *cfile, |
665 | struct file_lock *flock, const unsigned int xid); |
666 | extern int cifs_push_mandatory_locks(struct cifsFileInfo *cfile); |
667 | |
668 | +extern void cifs_down_write(struct rw_semaphore *sem); |
669 | extern struct cifsFileInfo *cifs_new_fileinfo(struct cifs_fid *fid, |
670 | struct file *file, |
671 | struct tcon_link *tlink, |
672 | diff --git a/fs/cifs/file.c b/fs/cifs/file.c |
673 | index 3504ef015493..1c3f262d9c4d 100644 |
674 | --- a/fs/cifs/file.c |
675 | +++ b/fs/cifs/file.c |
676 | @@ -280,6 +280,13 @@ cifs_has_mand_locks(struct cifsInodeInfo *cinode) |
677 | return has_locks; |
678 | } |
679 | |
680 | +void |
681 | +cifs_down_write(struct rw_semaphore *sem) |
682 | +{ |
683 | + while (!down_write_trylock(sem)) |
684 | + msleep(10); |
685 | +} |
686 | + |
687 | struct cifsFileInfo * |
688 | cifs_new_fileinfo(struct cifs_fid *fid, struct file *file, |
689 | struct tcon_link *tlink, __u32 oplock) |
690 | @@ -305,7 +312,7 @@ cifs_new_fileinfo(struct cifs_fid *fid, struct file *file, |
691 | INIT_LIST_HEAD(&fdlocks->locks); |
692 | fdlocks->cfile = cfile; |
693 | cfile->llist = fdlocks; |
694 | - down_write(&cinode->lock_sem); |
695 | + cifs_down_write(&cinode->lock_sem); |
696 | list_add(&fdlocks->llist, &cinode->llist); |
697 | up_write(&cinode->lock_sem); |
698 | |
699 | @@ -457,7 +464,7 @@ void _cifsFileInfo_put(struct cifsFileInfo *cifs_file, bool wait_oplock_handler) |
700 | * Delete any outstanding lock records. We'll lose them when the file |
701 | * is closed anyway. |
702 | */ |
703 | - down_write(&cifsi->lock_sem); |
704 | + cifs_down_write(&cifsi->lock_sem); |
705 | list_for_each_entry_safe(li, tmp, &cifs_file->llist->locks, llist) { |
706 | list_del(&li->llist); |
707 | cifs_del_lock_waiters(li); |
708 | @@ -1011,7 +1018,7 @@ static void |
709 | cifs_lock_add(struct cifsFileInfo *cfile, struct cifsLockInfo *lock) |
710 | { |
711 | struct cifsInodeInfo *cinode = CIFS_I(d_inode(cfile->dentry)); |
712 | - down_write(&cinode->lock_sem); |
713 | + cifs_down_write(&cinode->lock_sem); |
714 | list_add_tail(&lock->llist, &cfile->llist->locks); |
715 | up_write(&cinode->lock_sem); |
716 | } |
717 | @@ -1033,7 +1040,7 @@ cifs_lock_add_if(struct cifsFileInfo *cfile, struct cifsLockInfo *lock, |
718 | |
719 | try_again: |
720 | exist = false; |
721 | - down_write(&cinode->lock_sem); |
722 | + cifs_down_write(&cinode->lock_sem); |
723 | |
724 | exist = cifs_find_lock_conflict(cfile, lock->offset, lock->length, |
725 | lock->type, &conf_lock, CIFS_LOCK_OP); |
726 | @@ -1055,7 +1062,7 @@ try_again: |
727 | (lock->blist.next == &lock->blist)); |
728 | if (!rc) |
729 | goto try_again; |
730 | - down_write(&cinode->lock_sem); |
731 | + cifs_down_write(&cinode->lock_sem); |
732 | list_del_init(&lock->blist); |
733 | } |
734 | |
735 | @@ -1108,7 +1115,7 @@ cifs_posix_lock_set(struct file *file, struct file_lock *flock) |
736 | return rc; |
737 | |
738 | try_again: |
739 | - down_write(&cinode->lock_sem); |
740 | + cifs_down_write(&cinode->lock_sem); |
741 | if (!cinode->can_cache_brlcks) { |
742 | up_write(&cinode->lock_sem); |
743 | return rc; |
744 | @@ -1312,7 +1319,7 @@ cifs_push_locks(struct cifsFileInfo *cfile) |
745 | int rc = 0; |
746 | |
747 | /* we are going to update can_cache_brlcks here - need a write access */ |
748 | - down_write(&cinode->lock_sem); |
749 | + cifs_down_write(&cinode->lock_sem); |
750 | if (!cinode->can_cache_brlcks) { |
751 | up_write(&cinode->lock_sem); |
752 | return rc; |
753 | @@ -1501,7 +1508,7 @@ cifs_unlock_range(struct cifsFileInfo *cfile, struct file_lock *flock, |
754 | if (!buf) |
755 | return -ENOMEM; |
756 | |
757 | - down_write(&cinode->lock_sem); |
758 | + cifs_down_write(&cinode->lock_sem); |
759 | for (i = 0; i < 2; i++) { |
760 | cur = buf; |
761 | num = 0; |
762 | diff --git a/fs/cifs/smb2file.c b/fs/cifs/smb2file.c |
763 | index dee5250701de..41f1a5dd33a5 100644 |
764 | --- a/fs/cifs/smb2file.c |
765 | +++ b/fs/cifs/smb2file.c |
766 | @@ -138,7 +138,7 @@ smb2_unlock_range(struct cifsFileInfo *cfile, struct file_lock *flock, |
767 | |
768 | cur = buf; |
769 | |
770 | - down_write(&cinode->lock_sem); |
771 | + cifs_down_write(&cinode->lock_sem); |
772 | list_for_each_entry_safe(li, tmp, &cfile->llist->locks, llist) { |
773 | if (flock->fl_start > li->offset || |
774 | (flock->fl_start + length) < |
775 | diff --git a/include/linux/gfp.h b/include/linux/gfp.h |
776 | index f8041f9de31e..d11f56bc9c7e 100644 |
777 | --- a/include/linux/gfp.h |
778 | +++ b/include/linux/gfp.h |
779 | @@ -284,6 +284,29 @@ static inline bool gfpflags_allow_blocking(const gfp_t gfp_flags) |
780 | return !!(gfp_flags & __GFP_DIRECT_RECLAIM); |
781 | } |
782 | |
783 | +/** |
784 | + * gfpflags_normal_context - is gfp_flags a normal sleepable context? |
785 | + * @gfp_flags: gfp_flags to test |
786 | + * |
787 | + * Test whether @gfp_flags indicates that the allocation is from the |
788 | + * %current context and allowed to sleep. |
789 | + * |
790 | + * An allocation being allowed to block doesn't mean it owns the %current |
791 | + * context. When direct reclaim path tries to allocate memory, the |
792 | + * allocation context is nested inside whatever %current was doing at the |
793 | + * time of the original allocation. The nested allocation may be allowed |
794 | + * to block but modifying anything %current owns can corrupt the outer |
795 | + * context's expectations. |
796 | + * |
797 | + * %true result from this function indicates that the allocation context |
798 | + * can sleep and use anything that's associated with %current. |
799 | + */ |
800 | +static inline bool gfpflags_normal_context(const gfp_t gfp_flags) |
801 | +{ |
802 | + return (gfp_flags & (__GFP_DIRECT_RECLAIM | __GFP_MEMALLOC)) == |
803 | + __GFP_DIRECT_RECLAIM; |
804 | +} |
805 | + |
806 | #ifdef CONFIG_HIGHMEM |
807 | #define OPT_ZONE_HIGHMEM ZONE_HIGHMEM |
808 | #else |
809 | diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h |
810 | index f8761774a94f..e37112ac332f 100644 |
811 | --- a/include/linux/skbuff.h |
812 | +++ b/include/linux/skbuff.h |
813 | @@ -1178,7 +1178,8 @@ static inline __u32 skb_get_hash_flowi4(struct sk_buff *skb, const struct flowi4 |
814 | return skb->hash; |
815 | } |
816 | |
817 | -__u32 skb_get_hash_perturb(const struct sk_buff *skb, u32 perturb); |
818 | +__u32 skb_get_hash_perturb(const struct sk_buff *skb, |
819 | + const siphash_key_t *perturb); |
820 | |
821 | static inline __u32 skb_get_hash_raw(const struct sk_buff *skb) |
822 | { |
823 | diff --git a/include/net/flow_dissector.h b/include/net/flow_dissector.h |
824 | index d9534927d93b..1505cf7a4aaf 100644 |
825 | --- a/include/net/flow_dissector.h |
826 | +++ b/include/net/flow_dissector.h |
827 | @@ -3,6 +3,7 @@ |
828 | |
829 | #include <linux/types.h> |
830 | #include <linux/in6.h> |
831 | +#include <linux/siphash.h> |
832 | #include <uapi/linux/if_ether.h> |
833 | |
834 | /** |
835 | @@ -151,7 +152,7 @@ struct flow_dissector { |
836 | struct flow_keys { |
837 | struct flow_dissector_key_control control; |
838 | #define FLOW_KEYS_HASH_START_FIELD basic |
839 | - struct flow_dissector_key_basic basic; |
840 | + struct flow_dissector_key_basic basic __aligned(SIPHASH_ALIGNMENT); |
841 | struct flow_dissector_key_tags tags; |
842 | struct flow_dissector_key_vlan vlan; |
843 | struct flow_dissector_key_keyid keyid; |
844 | diff --git a/include/net/fq.h b/include/net/fq.h |
845 | index 6d8521a30c5c..2c7687902789 100644 |
846 | --- a/include/net/fq.h |
847 | +++ b/include/net/fq.h |
848 | @@ -70,7 +70,7 @@ struct fq { |
849 | struct list_head backlogs; |
850 | spinlock_t lock; |
851 | u32 flows_cnt; |
852 | - u32 perturbation; |
853 | + siphash_key_t perturbation; |
854 | u32 limit; |
855 | u32 memory_limit; |
856 | u32 memory_usage; |
857 | diff --git a/include/net/fq_impl.h b/include/net/fq_impl.h |
858 | index 4e6131cd3f43..45a0d9a006a0 100644 |
859 | --- a/include/net/fq_impl.h |
860 | +++ b/include/net/fq_impl.h |
861 | @@ -105,7 +105,7 @@ static struct fq_flow *fq_flow_classify(struct fq *fq, |
862 | |
863 | lockdep_assert_held(&fq->lock); |
864 | |
865 | - hash = skb_get_hash_perturb(skb, fq->perturbation); |
866 | + hash = skb_get_hash_perturb(skb, &fq->perturbation); |
867 | idx = reciprocal_scale(hash, fq->flows_cnt); |
868 | flow = &fq->flows[idx]; |
869 | |
870 | @@ -252,7 +252,7 @@ static int fq_init(struct fq *fq, int flows_cnt) |
871 | INIT_LIST_HEAD(&fq->backlogs); |
872 | spin_lock_init(&fq->lock); |
873 | fq->flows_cnt = max_t(u32, flows_cnt, 1); |
874 | - fq->perturbation = prandom_u32(); |
875 | + get_random_bytes(&fq->perturbation, sizeof(fq->perturbation)); |
876 | fq->quantum = 300; |
877 | fq->limit = 8192; |
878 | fq->memory_limit = 16 << 20; /* 16 MBytes */ |
879 | diff --git a/include/net/sock.h b/include/net/sock.h |
880 | index 116308632fae..469c012a6d01 100644 |
881 | --- a/include/net/sock.h |
882 | +++ b/include/net/sock.h |
883 | @@ -2045,12 +2045,17 @@ struct sk_buff *sk_stream_alloc_skb(struct sock *sk, int size, gfp_t gfp, |
884 | * sk_page_frag - return an appropriate page_frag |
885 | * @sk: socket |
886 | * |
887 | - * If socket allocation mode allows current thread to sleep, it means its |
888 | - * safe to use the per task page_frag instead of the per socket one. |
889 | + * Use the per task page_frag instead of the per socket one for |
890 | + * optimization when we know that we're in the normal context and owns |
891 | + * everything that's associated with %current. |
892 | + * |
893 | + * gfpflags_allow_blocking() isn't enough here as direct reclaim may nest |
894 | + * inside other socket operations and end up recursing into sk_page_frag() |
895 | + * while it's already in use. |
896 | */ |
897 | static inline struct page_frag *sk_page_frag(struct sock *sk) |
898 | { |
899 | - if (gfpflags_allow_blocking(sk->sk_allocation)) |
900 | + if (gfpflags_normal_context(sk->sk_allocation)) |
901 | return ¤t->task_frag; |
902 | |
903 | return &sk->sk_frag; |
904 | diff --git a/kernel/time/alarmtimer.c b/kernel/time/alarmtimer.c |
905 | index a519d3cab1a2..6aef4a0bed29 100644 |
906 | --- a/kernel/time/alarmtimer.c |
907 | +++ b/kernel/time/alarmtimer.c |
908 | @@ -586,7 +586,7 @@ static void alarm_timer_get(struct k_itimer *timr, |
909 | static int alarm_timer_del(struct k_itimer *timr) |
910 | { |
911 | if (!rtcdev) |
912 | - return -ENOTSUPP; |
913 | + return -EOPNOTSUPP; |
914 | |
915 | if (alarm_try_to_cancel(&timr->it.alarm.alarmtimer) < 0) |
916 | return TIMER_RETRY; |
917 | @@ -610,7 +610,7 @@ static int alarm_timer_set(struct k_itimer *timr, int flags, |
918 | ktime_t exp; |
919 | |
920 | if (!rtcdev) |
921 | - return -ENOTSUPP; |
922 | + return -EOPNOTSUPP; |
923 | |
924 | if (flags & ~TIMER_ABSTIME) |
925 | return -EINVAL; |
926 | diff --git a/net/core/datagram.c b/net/core/datagram.c |
927 | index 146502f310ce..619c63a74594 100644 |
928 | --- a/net/core/datagram.c |
929 | +++ b/net/core/datagram.c |
930 | @@ -96,7 +96,7 @@ int __skb_wait_for_more_packets(struct sock *sk, int *err, long *timeo_p, |
931 | if (error) |
932 | goto out_err; |
933 | |
934 | - if (sk->sk_receive_queue.prev != skb) |
935 | + if (READ_ONCE(sk->sk_receive_queue.prev) != skb) |
936 | goto out; |
937 | |
938 | /* Socket shut down? */ |
939 | diff --git a/net/core/ethtool.c b/net/core/ethtool.c |
940 | index ffe7b03c9ab5..454f73fcb3a6 100644 |
941 | --- a/net/core/ethtool.c |
942 | +++ b/net/core/ethtool.c |
943 | @@ -1438,11 +1438,13 @@ static int ethtool_reset(struct net_device *dev, char __user *useraddr) |
944 | |
945 | static int ethtool_get_wol(struct net_device *dev, char __user *useraddr) |
946 | { |
947 | - struct ethtool_wolinfo wol = { .cmd = ETHTOOL_GWOL }; |
948 | + struct ethtool_wolinfo wol; |
949 | |
950 | if (!dev->ethtool_ops->get_wol) |
951 | return -EOPNOTSUPP; |
952 | |
953 | + memset(&wol, 0, sizeof(struct ethtool_wolinfo)); |
954 | + wol.cmd = ETHTOOL_GWOL; |
955 | dev->ethtool_ops->get_wol(dev, &wol); |
956 | |
957 | if (copy_to_user(useraddr, &wol, sizeof(wol))) |
958 | diff --git a/net/core/flow_dissector.c b/net/core/flow_dissector.c |
959 | index ab7c50026cae..26b0f70d2f1c 100644 |
960 | --- a/net/core/flow_dissector.c |
961 | +++ b/net/core/flow_dissector.c |
962 | @@ -563,45 +563,34 @@ out_bad: |
963 | } |
964 | EXPORT_SYMBOL(__skb_flow_dissect); |
965 | |
966 | -static u32 hashrnd __read_mostly; |
967 | +static siphash_key_t hashrnd __read_mostly; |
968 | static __always_inline void __flow_hash_secret_init(void) |
969 | { |
970 | net_get_random_once(&hashrnd, sizeof(hashrnd)); |
971 | } |
972 | |
973 | -static __always_inline u32 __flow_hash_words(const u32 *words, u32 length, |
974 | - u32 keyval) |
975 | +static const void *flow_keys_hash_start(const struct flow_keys *flow) |
976 | { |
977 | - return jhash2(words, length, keyval); |
978 | -} |
979 | - |
980 | -static inline const u32 *flow_keys_hash_start(const struct flow_keys *flow) |
981 | -{ |
982 | - const void *p = flow; |
983 | - |
984 | - BUILD_BUG_ON(FLOW_KEYS_HASH_OFFSET % sizeof(u32)); |
985 | - return (const u32 *)(p + FLOW_KEYS_HASH_OFFSET); |
986 | + BUILD_BUG_ON(FLOW_KEYS_HASH_OFFSET % SIPHASH_ALIGNMENT); |
987 | + return &flow->FLOW_KEYS_HASH_START_FIELD; |
988 | } |
989 | |
990 | static inline size_t flow_keys_hash_length(const struct flow_keys *flow) |
991 | { |
992 | - size_t diff = FLOW_KEYS_HASH_OFFSET + sizeof(flow->addrs); |
993 | - BUILD_BUG_ON((sizeof(*flow) - FLOW_KEYS_HASH_OFFSET) % sizeof(u32)); |
994 | - BUILD_BUG_ON(offsetof(typeof(*flow), addrs) != |
995 | - sizeof(*flow) - sizeof(flow->addrs)); |
996 | + size_t len = offsetof(typeof(*flow), addrs) - FLOW_KEYS_HASH_OFFSET; |
997 | |
998 | switch (flow->control.addr_type) { |
999 | case FLOW_DISSECTOR_KEY_IPV4_ADDRS: |
1000 | - diff -= sizeof(flow->addrs.v4addrs); |
1001 | + len += sizeof(flow->addrs.v4addrs); |
1002 | break; |
1003 | case FLOW_DISSECTOR_KEY_IPV6_ADDRS: |
1004 | - diff -= sizeof(flow->addrs.v6addrs); |
1005 | + len += sizeof(flow->addrs.v6addrs); |
1006 | break; |
1007 | case FLOW_DISSECTOR_KEY_TIPC_ADDRS: |
1008 | - diff -= sizeof(flow->addrs.tipcaddrs); |
1009 | + len += sizeof(flow->addrs.tipcaddrs); |
1010 | break; |
1011 | } |
1012 | - return (sizeof(*flow) - diff) / sizeof(u32); |
1013 | + return len; |
1014 | } |
1015 | |
1016 | __be32 flow_get_u32_src(const struct flow_keys *flow) |
1017 | @@ -667,14 +656,15 @@ static inline void __flow_hash_consistentify(struct flow_keys *keys) |
1018 | } |
1019 | } |
1020 | |
1021 | -static inline u32 __flow_hash_from_keys(struct flow_keys *keys, u32 keyval) |
1022 | +static inline u32 __flow_hash_from_keys(struct flow_keys *keys, |
1023 | + const siphash_key_t *keyval) |
1024 | { |
1025 | u32 hash; |
1026 | |
1027 | __flow_hash_consistentify(keys); |
1028 | |
1029 | - hash = __flow_hash_words(flow_keys_hash_start(keys), |
1030 | - flow_keys_hash_length(keys), keyval); |
1031 | + hash = siphash(flow_keys_hash_start(keys), |
1032 | + flow_keys_hash_length(keys), keyval); |
1033 | if (!hash) |
1034 | hash = 1; |
1035 | |
1036 | @@ -684,12 +674,13 @@ static inline u32 __flow_hash_from_keys(struct flow_keys *keys, u32 keyval) |
1037 | u32 flow_hash_from_keys(struct flow_keys *keys) |
1038 | { |
1039 | __flow_hash_secret_init(); |
1040 | - return __flow_hash_from_keys(keys, hashrnd); |
1041 | + return __flow_hash_from_keys(keys, &hashrnd); |
1042 | } |
1043 | EXPORT_SYMBOL(flow_hash_from_keys); |
1044 | |
1045 | static inline u32 ___skb_get_hash(const struct sk_buff *skb, |
1046 | - struct flow_keys *keys, u32 keyval) |
1047 | + struct flow_keys *keys, |
1048 | + const siphash_key_t *keyval) |
1049 | { |
1050 | skb_flow_dissect_flow_keys(skb, keys, |
1051 | FLOW_DISSECTOR_F_STOP_AT_FLOW_LABEL); |
1052 | @@ -737,7 +728,7 @@ u32 __skb_get_hash_symmetric(struct sk_buff *skb) |
1053 | NULL, 0, 0, 0, |
1054 | FLOW_DISSECTOR_F_STOP_AT_FLOW_LABEL); |
1055 | |
1056 | - return __flow_hash_from_keys(&keys, hashrnd); |
1057 | + return __flow_hash_from_keys(&keys, &hashrnd); |
1058 | } |
1059 | EXPORT_SYMBOL_GPL(__skb_get_hash_symmetric); |
1060 | |
1061 | @@ -757,13 +748,14 @@ void __skb_get_hash(struct sk_buff *skb) |
1062 | |
1063 | __flow_hash_secret_init(); |
1064 | |
1065 | - hash = ___skb_get_hash(skb, &keys, hashrnd); |
1066 | + hash = ___skb_get_hash(skb, &keys, &hashrnd); |
1067 | |
1068 | __skb_set_sw_hash(skb, hash, flow_keys_have_l4(&keys)); |
1069 | } |
1070 | EXPORT_SYMBOL(__skb_get_hash); |
1071 | |
1072 | -__u32 skb_get_hash_perturb(const struct sk_buff *skb, u32 perturb) |
1073 | +__u32 skb_get_hash_perturb(const struct sk_buff *skb, |
1074 | + const siphash_key_t *perturb) |
1075 | { |
1076 | struct flow_keys keys; |
1077 | |
1078 | diff --git a/net/dccp/ipv4.c b/net/dccp/ipv4.c |
1079 | index 1d6d3aaa8c3d..322268b88fec 100644 |
1080 | --- a/net/dccp/ipv4.c |
1081 | +++ b/net/dccp/ipv4.c |
1082 | @@ -121,7 +121,7 @@ int dccp_v4_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len) |
1083 | inet->inet_daddr, |
1084 | inet->inet_sport, |
1085 | inet->inet_dport); |
1086 | - inet->inet_id = dp->dccps_iss ^ jiffies; |
1087 | + inet->inet_id = prandom_u32(); |
1088 | |
1089 | err = dccp_connect(sk); |
1090 | rt = NULL; |
1091 | @@ -417,7 +417,7 @@ struct sock *dccp_v4_request_recv_sock(const struct sock *sk, |
1092 | RCU_INIT_POINTER(newinet->inet_opt, rcu_dereference(ireq->ireq_opt)); |
1093 | newinet->mc_index = inet_iif(skb); |
1094 | newinet->mc_ttl = ip_hdr(skb)->ttl; |
1095 | - newinet->inet_id = jiffies; |
1096 | + newinet->inet_id = prandom_u32(); |
1097 | |
1098 | if (dst == NULL && (dst = inet_csk_route_child_sock(sk, newsk, req)) == NULL) |
1099 | goto put_and_exit; |
1100 | diff --git a/net/dsa/dsa2.c b/net/dsa/dsa2.c |
1101 | index 0f99297b2fb3..f1792a847d0b 100644 |
1102 | --- a/net/dsa/dsa2.c |
1103 | +++ b/net/dsa/dsa2.c |
1104 | @@ -59,7 +59,7 @@ static struct dsa_switch_tree *dsa_add_dst(u32 tree) |
1105 | dst->tree = tree; |
1106 | dst->cpu_switch = -1; |
1107 | INIT_LIST_HEAD(&dst->list); |
1108 | - list_add_tail(&dsa_switch_trees, &dst->list); |
1109 | + list_add_tail(&dst->list, &dsa_switch_trees); |
1110 | kref_init(&dst->refcount); |
1111 | |
1112 | return dst; |
1113 | diff --git a/net/ipv4/datagram.c b/net/ipv4/datagram.c |
1114 | index f915abff1350..d3eddfd13875 100644 |
1115 | --- a/net/ipv4/datagram.c |
1116 | +++ b/net/ipv4/datagram.c |
1117 | @@ -75,7 +75,7 @@ int __ip4_datagram_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len |
1118 | inet->inet_dport = usin->sin_port; |
1119 | sk->sk_state = TCP_ESTABLISHED; |
1120 | sk_set_txhash(sk); |
1121 | - inet->inet_id = jiffies; |
1122 | + inet->inet_id = prandom_u32(); |
1123 | |
1124 | sk_dst_set(sk, &rt->dst); |
1125 | err = 0; |
1126 | diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c |
1127 | index 848f2c1da8a5..cced424e1176 100644 |
1128 | --- a/net/ipv4/tcp_ipv4.c |
1129 | +++ b/net/ipv4/tcp_ipv4.c |
1130 | @@ -239,7 +239,7 @@ int tcp_v4_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len) |
1131 | inet->inet_sport, |
1132 | usin->sin_port); |
1133 | |
1134 | - inet->inet_id = tp->write_seq ^ jiffies; |
1135 | + inet->inet_id = prandom_u32(); |
1136 | |
1137 | err = tcp_connect(sk); |
1138 | |
1139 | @@ -1307,7 +1307,7 @@ struct sock *tcp_v4_syn_recv_sock(const struct sock *sk, struct sk_buff *skb, |
1140 | inet_csk(newsk)->icsk_ext_hdr_len = 0; |
1141 | if (inet_opt) |
1142 | inet_csk(newsk)->icsk_ext_hdr_len = inet_opt->opt.optlen; |
1143 | - newinet->inet_id = newtp->write_seq ^ jiffies; |
1144 | + newinet->inet_id = prandom_u32(); |
1145 | |
1146 | if (!dst) { |
1147 | dst = inet_csk_route_child_sock(sk, newsk, req); |
1148 | diff --git a/net/sched/sch_fq_codel.c b/net/sched/sch_fq_codel.c |
1149 | index a5ea0e9b6be4..29b7465c9d8a 100644 |
1150 | --- a/net/sched/sch_fq_codel.c |
1151 | +++ b/net/sched/sch_fq_codel.c |
1152 | @@ -57,7 +57,7 @@ struct fq_codel_sched_data { |
1153 | struct fq_codel_flow *flows; /* Flows table [flows_cnt] */ |
1154 | u32 *backlogs; /* backlog table [flows_cnt] */ |
1155 | u32 flows_cnt; /* number of flows */ |
1156 | - u32 perturbation; /* hash perturbation */ |
1157 | + siphash_key_t perturbation; /* hash perturbation */ |
1158 | u32 quantum; /* psched_mtu(qdisc_dev(sch)); */ |
1159 | u32 drop_batch_size; |
1160 | u32 memory_limit; |
1161 | @@ -75,7 +75,7 @@ struct fq_codel_sched_data { |
1162 | static unsigned int fq_codel_hash(const struct fq_codel_sched_data *q, |
1163 | struct sk_buff *skb) |
1164 | { |
1165 | - u32 hash = skb_get_hash_perturb(skb, q->perturbation); |
1166 | + u32 hash = skb_get_hash_perturb(skb, &q->perturbation); |
1167 | |
1168 | return reciprocal_scale(hash, q->flows_cnt); |
1169 | } |
1170 | @@ -482,7 +482,7 @@ static int fq_codel_init(struct Qdisc *sch, struct nlattr *opt) |
1171 | q->memory_limit = 32 << 20; /* 32 MBytes */ |
1172 | q->drop_batch_size = 64; |
1173 | q->quantum = psched_mtu(qdisc_dev(sch)); |
1174 | - q->perturbation = prandom_u32(); |
1175 | + get_random_bytes(&q->perturbation, sizeof(q->perturbation)); |
1176 | INIT_LIST_HEAD(&q->new_flows); |
1177 | INIT_LIST_HEAD(&q->old_flows); |
1178 | codel_params_init(&q->cparams); |
1179 | diff --git a/net/sched/sch_hhf.c b/net/sched/sch_hhf.c |
1180 | index fe32239253a6..1367fe94d630 100644 |
1181 | --- a/net/sched/sch_hhf.c |
1182 | +++ b/net/sched/sch_hhf.c |
1183 | @@ -4,11 +4,11 @@ |
1184 | * Copyright (C) 2013 Nandita Dukkipati <nanditad@google.com> |
1185 | */ |
1186 | |
1187 | -#include <linux/jhash.h> |
1188 | #include <linux/jiffies.h> |
1189 | #include <linux/module.h> |
1190 | #include <linux/skbuff.h> |
1191 | #include <linux/vmalloc.h> |
1192 | +#include <linux/siphash.h> |
1193 | #include <net/pkt_sched.h> |
1194 | #include <net/sock.h> |
1195 | |
1196 | @@ -125,7 +125,7 @@ struct wdrr_bucket { |
1197 | |
1198 | struct hhf_sched_data { |
1199 | struct wdrr_bucket buckets[WDRR_BUCKET_CNT]; |
1200 | - u32 perturbation; /* hash perturbation */ |
1201 | + siphash_key_t perturbation; /* hash perturbation */ |
1202 | u32 quantum; /* psched_mtu(qdisc_dev(sch)); */ |
1203 | u32 drop_overlimit; /* number of times max qdisc packet |
1204 | * limit was hit |
1205 | @@ -263,7 +263,7 @@ static enum wdrr_bucket_idx hhf_classify(struct sk_buff *skb, struct Qdisc *sch) |
1206 | } |
1207 | |
1208 | /* Get hashed flow-id of the skb. */ |
1209 | - hash = skb_get_hash_perturb(skb, q->perturbation); |
1210 | + hash = skb_get_hash_perturb(skb, &q->perturbation); |
1211 | |
1212 | /* Check if this packet belongs to an already established HH flow. */ |
1213 | flow_pos = hash & HHF_BIT_MASK; |
1214 | @@ -593,7 +593,7 @@ static int hhf_init(struct Qdisc *sch, struct nlattr *opt) |
1215 | |
1216 | sch->limit = 1000; |
1217 | q->quantum = psched_mtu(qdisc_dev(sch)); |
1218 | - q->perturbation = prandom_u32(); |
1219 | + get_random_bytes(&q->perturbation, sizeof(q->perturbation)); |
1220 | INIT_LIST_HEAD(&q->new_buckets); |
1221 | INIT_LIST_HEAD(&q->old_buckets); |
1222 | |
1223 | diff --git a/net/sched/sch_sfb.c b/net/sched/sch_sfb.c |
1224 | index 20a350bd1b1d..bc176bd48c02 100644 |
1225 | --- a/net/sched/sch_sfb.c |
1226 | +++ b/net/sched/sch_sfb.c |
1227 | @@ -22,7 +22,7 @@ |
1228 | #include <linux/errno.h> |
1229 | #include <linux/skbuff.h> |
1230 | #include <linux/random.h> |
1231 | -#include <linux/jhash.h> |
1232 | +#include <linux/siphash.h> |
1233 | #include <net/ip.h> |
1234 | #include <net/pkt_sched.h> |
1235 | #include <net/inet_ecn.h> |
1236 | @@ -48,7 +48,7 @@ struct sfb_bucket { |
1237 | * (Section 4.4 of SFB reference : moving hash functions) |
1238 | */ |
1239 | struct sfb_bins { |
1240 | - u32 perturbation; /* jhash perturbation */ |
1241 | + siphash_key_t perturbation; /* siphash key */ |
1242 | struct sfb_bucket bins[SFB_LEVELS][SFB_NUMBUCKETS]; |
1243 | }; |
1244 | |
1245 | @@ -219,7 +219,8 @@ static u32 sfb_compute_qlen(u32 *prob_r, u32 *avgpm_r, const struct sfb_sched_da |
1246 | |
1247 | static void sfb_init_perturbation(u32 slot, struct sfb_sched_data *q) |
1248 | { |
1249 | - q->bins[slot].perturbation = prandom_u32(); |
1250 | + get_random_bytes(&q->bins[slot].perturbation, |
1251 | + sizeof(q->bins[slot].perturbation)); |
1252 | } |
1253 | |
1254 | static void sfb_swap_slot(struct sfb_sched_data *q) |
1255 | @@ -314,9 +315,9 @@ static int sfb_enqueue(struct sk_buff *skb, struct Qdisc *sch, |
1256 | /* If using external classifiers, get result and record it. */ |
1257 | if (!sfb_classify(skb, fl, &ret, &salt)) |
1258 | goto other_drop; |
1259 | - sfbhash = jhash_1word(salt, q->bins[slot].perturbation); |
1260 | + sfbhash = siphash_1u32(salt, &q->bins[slot].perturbation); |
1261 | } else { |
1262 | - sfbhash = skb_get_hash_perturb(skb, q->bins[slot].perturbation); |
1263 | + sfbhash = skb_get_hash_perturb(skb, &q->bins[slot].perturbation); |
1264 | } |
1265 | |
1266 | |
1267 | @@ -352,7 +353,7 @@ static int sfb_enqueue(struct sk_buff *skb, struct Qdisc *sch, |
1268 | /* Inelastic flow */ |
1269 | if (q->double_buffering) { |
1270 | sfbhash = skb_get_hash_perturb(skb, |
1271 | - q->bins[slot].perturbation); |
1272 | + &q->bins[slot].perturbation); |
1273 | if (!sfbhash) |
1274 | sfbhash = 1; |
1275 | sfb_skb_cb(skb)->hashes[slot] = sfbhash; |
1276 | diff --git a/net/sched/sch_sfq.c b/net/sched/sch_sfq.c |
1277 | index d8c2b6baaad2..a8d82cb7f073 100644 |
1278 | --- a/net/sched/sch_sfq.c |
1279 | +++ b/net/sched/sch_sfq.c |
1280 | @@ -18,7 +18,7 @@ |
1281 | #include <linux/errno.h> |
1282 | #include <linux/init.h> |
1283 | #include <linux/skbuff.h> |
1284 | -#include <linux/jhash.h> |
1285 | +#include <linux/siphash.h> |
1286 | #include <linux/slab.h> |
1287 | #include <linux/vmalloc.h> |
1288 | #include <net/netlink.h> |
1289 | @@ -120,7 +120,7 @@ struct sfq_sched_data { |
1290 | u8 headdrop; |
1291 | u8 maxdepth; /* limit of packets per flow */ |
1292 | |
1293 | - u32 perturbation; |
1294 | + siphash_key_t perturbation; |
1295 | u8 cur_depth; /* depth of longest slot */ |
1296 | u8 flags; |
1297 | unsigned short scaled_quantum; /* SFQ_ALLOT_SIZE(quantum) */ |
1298 | @@ -158,7 +158,7 @@ static inline struct sfq_head *sfq_dep_head(struct sfq_sched_data *q, sfq_index |
1299 | static unsigned int sfq_hash(const struct sfq_sched_data *q, |
1300 | const struct sk_buff *skb) |
1301 | { |
1302 | - return skb_get_hash_perturb(skb, q->perturbation) & (q->divisor - 1); |
1303 | + return skb_get_hash_perturb(skb, &q->perturbation) & (q->divisor - 1); |
1304 | } |
1305 | |
1306 | static unsigned int sfq_classify(struct sk_buff *skb, struct Qdisc *sch, |
1307 | @@ -607,9 +607,11 @@ static void sfq_perturbation(unsigned long arg) |
1308 | struct Qdisc *sch = (struct Qdisc *)arg; |
1309 | struct sfq_sched_data *q = qdisc_priv(sch); |
1310 | spinlock_t *root_lock = qdisc_lock(qdisc_root_sleeping(sch)); |
1311 | + siphash_key_t nkey; |
1312 | |
1313 | + get_random_bytes(&nkey, sizeof(nkey)); |
1314 | spin_lock(root_lock); |
1315 | - q->perturbation = prandom_u32(); |
1316 | + q->perturbation = nkey; |
1317 | if (!q->filter_list && q->tail) |
1318 | sfq_rehash(sch); |
1319 | spin_unlock(root_lock); |
1320 | @@ -681,7 +683,7 @@ static int sfq_change(struct Qdisc *sch, struct nlattr *opt) |
1321 | del_timer(&q->perturb_timer); |
1322 | if (q->perturb_period) { |
1323 | mod_timer(&q->perturb_timer, jiffies + q->perturb_period); |
1324 | - q->perturbation = prandom_u32(); |
1325 | + get_random_bytes(&q->perturbation, sizeof(q->perturbation)); |
1326 | } |
1327 | sch_tree_unlock(sch); |
1328 | kfree(p); |
1329 | @@ -737,7 +739,7 @@ static int sfq_init(struct Qdisc *sch, struct nlattr *opt) |
1330 | q->quantum = psched_mtu(qdisc_dev(sch)); |
1331 | q->scaled_quantum = SFQ_ALLOT_SIZE(q->quantum); |
1332 | q->perturb_period = 0; |
1333 | - q->perturbation = prandom_u32(); |
1334 | + get_random_bytes(&q->perturbation, sizeof(q->perturbation)); |
1335 | |
1336 | if (opt) { |
1337 | int err = sfq_change(sch, opt); |
1338 | diff --git a/net/sctp/socket.c b/net/sctp/socket.c |
1339 | index c952abf22535..21ec92011585 100644 |
1340 | --- a/net/sctp/socket.c |
1341 | +++ b/net/sctp/socket.c |
1342 | @@ -7734,7 +7734,7 @@ void sctp_copy_sock(struct sock *newsk, struct sock *sk, |
1343 | newinet->inet_rcv_saddr = inet->inet_rcv_saddr; |
1344 | newinet->inet_dport = htons(asoc->peer.port); |
1345 | newinet->pmtudisc = inet->pmtudisc; |
1346 | - newinet->inet_id = asoc->next_tsn ^ jiffies; |
1347 | + newinet->inet_id = prandom_u32(); |
1348 | |
1349 | newinet->uc_ttl = inet->uc_ttl; |
1350 | newinet->mc_loop = 1; |
1351 | diff --git a/sound/soc/codecs/wm_adsp.c b/sound/soc/codecs/wm_adsp.c |
1352 | index c03c9da076c2..28eb55bc4663 100644 |
1353 | --- a/sound/soc/codecs/wm_adsp.c |
1354 | +++ b/sound/soc/codecs/wm_adsp.c |
1355 | @@ -948,8 +948,7 @@ static unsigned int wmfw_convert_flags(unsigned int in, unsigned int len) |
1356 | } |
1357 | |
1358 | if (in) { |
1359 | - if (in & WMFW_CTL_FLAG_READABLE) |
1360 | - out |= rd; |
1361 | + out |= rd; |
1362 | if (in & WMFW_CTL_FLAG_WRITEABLE) |
1363 | out |= wr; |
1364 | if (in & WMFW_CTL_FLAG_VOLATILE) |
1365 | diff --git a/sound/soc/rockchip/rockchip_i2s.c b/sound/soc/rockchip/rockchip_i2s.c |
1366 | index 08bfee447a36..94b6f9c7dd6b 100644 |
1367 | --- a/sound/soc/rockchip/rockchip_i2s.c |
1368 | +++ b/sound/soc/rockchip/rockchip_i2s.c |
1369 | @@ -649,7 +649,7 @@ static int rockchip_i2s_probe(struct platform_device *pdev) |
1370 | ret = devm_snd_dmaengine_pcm_register(&pdev->dev, NULL, 0); |
1371 | if (ret) { |
1372 | dev_err(&pdev->dev, "Could not register PCM\n"); |
1373 | - return ret; |
1374 | + goto err_suspend; |
1375 | } |
1376 | |
1377 | return 0; |
1378 | diff --git a/tools/perf/builtin-kmem.c b/tools/perf/builtin-kmem.c |
1379 | index d426dcb18ce9..496a4ca11667 100644 |
1380 | --- a/tools/perf/builtin-kmem.c |
1381 | +++ b/tools/perf/builtin-kmem.c |
1382 | @@ -674,6 +674,7 @@ static char *compact_gfp_flags(char *gfp_flags) |
1383 | new = realloc(new_flags, len + strlen(cpt) + 2); |
1384 | if (new == NULL) { |
1385 | free(new_flags); |
1386 | + free(orig_flags); |
1387 | return NULL; |
1388 | } |
1389 | |
1390 | diff --git a/tools/testing/selftests/net/reuseport_dualstack.c b/tools/testing/selftests/net/reuseport_dualstack.c |
1391 | index 90958aaaafb9..2737d6a595f4 100644 |
1392 | --- a/tools/testing/selftests/net/reuseport_dualstack.c |
1393 | +++ b/tools/testing/selftests/net/reuseport_dualstack.c |
1394 | @@ -128,7 +128,7 @@ static void test(int *rcv_fds, int count, int proto) |
1395 | { |
1396 | struct epoll_event ev; |
1397 | int epfd, i, test_fd; |
1398 | - uint16_t test_family; |
1399 | + int test_family; |
1400 | socklen_t len; |
1401 | |
1402 | epfd = epoll_create(1); |
1403 | @@ -145,6 +145,7 @@ static void test(int *rcv_fds, int count, int proto) |
1404 | send_from_v4(proto); |
1405 | |
1406 | test_fd = receive_once(epfd, proto); |
1407 | + len = sizeof(test_family); |
1408 | if (getsockopt(test_fd, SOL_SOCKET, SO_DOMAIN, &test_family, &len)) |
1409 | error(1, errno, "failed to read socket domain"); |
1410 | if (test_family != AF_INET) |