Contents of /trunk/kernel26-alx/patches-3.10/0143-3.10.44-all-fixes.patch
Parent Directory | Revision Log
Revision 2672 -
(show annotations)
(download)
Tue Jul 21 16:46:35 2015 UTC (9 years, 2 months ago) by niro
File size: 26517 byte(s)
Tue Jul 21 16:46:35 2015 UTC (9 years, 2 months ago) by niro
File size: 26517 byte(s)
-3.10.84-alx-r1
1 | diff --git a/Makefile b/Makefile |
2 | index 9cf513828341..e55476c4aef0 100644 |
3 | --- a/Makefile |
4 | +++ b/Makefile |
5 | @@ -1,6 +1,6 @@ |
6 | VERSION = 3 |
7 | PATCHLEVEL = 10 |
8 | -SUBLEVEL = 43 |
9 | +SUBLEVEL = 44 |
10 | EXTRAVERSION = |
11 | NAME = TOSSUG Baby Fish |
12 | |
13 | diff --git a/arch/arm/boot/dts/armada-xp-gp.dts b/arch/arm/boot/dts/armada-xp-gp.dts |
14 | index 76db557adbe7..f97550420fcc 100644 |
15 | --- a/arch/arm/boot/dts/armada-xp-gp.dts |
16 | +++ b/arch/arm/boot/dts/armada-xp-gp.dts |
17 | @@ -124,7 +124,7 @@ |
18 | /* Device Bus parameters are required */ |
19 | |
20 | /* Read parameters */ |
21 | - devbus,bus-width = <8>; |
22 | + devbus,bus-width = <16>; |
23 | devbus,turn-off-ps = <60000>; |
24 | devbus,badr-skew-ps = <0>; |
25 | devbus,acc-first-ps = <124000>; |
26 | diff --git a/arch/arm/boot/dts/armada-xp-openblocks-ax3-4.dts b/arch/arm/boot/dts/armada-xp-openblocks-ax3-4.dts |
27 | index fdea75c73411..9746d0e7fcb4 100644 |
28 | --- a/arch/arm/boot/dts/armada-xp-openblocks-ax3-4.dts |
29 | +++ b/arch/arm/boot/dts/armada-xp-openblocks-ax3-4.dts |
30 | @@ -152,7 +152,7 @@ |
31 | /* Device Bus parameters are required */ |
32 | |
33 | /* Read parameters */ |
34 | - devbus,bus-width = <8>; |
35 | + devbus,bus-width = <16>; |
36 | devbus,turn-off-ps = <60000>; |
37 | devbus,badr-skew-ps = <0>; |
38 | devbus,acc-first-ps = <124000>; |
39 | diff --git a/drivers/ata/ahci.c b/drivers/ata/ahci.c |
40 | index 4942058402a4..b0d33d9533aa 100644 |
41 | --- a/drivers/ata/ahci.c |
42 | +++ b/drivers/ata/ahci.c |
43 | @@ -444,10 +444,14 @@ static const struct pci_device_id ahci_pci_tbl[] = { |
44 | .driver_data = board_ahci_yes_fbs }, /* 88se9172 */ |
45 | { PCI_DEVICE(PCI_VENDOR_ID_MARVELL_EXT, 0x9192), |
46 | .driver_data = board_ahci_yes_fbs }, /* 88se9172 on some Gigabyte */ |
47 | + { PCI_DEVICE(PCI_VENDOR_ID_MARVELL_EXT, 0x91a0), |
48 | + .driver_data = board_ahci_yes_fbs }, |
49 | { PCI_DEVICE(PCI_VENDOR_ID_MARVELL_EXT, 0x91a3), |
50 | .driver_data = board_ahci_yes_fbs }, |
51 | { PCI_DEVICE(PCI_VENDOR_ID_MARVELL_EXT, 0x9230), |
52 | .driver_data = board_ahci_yes_fbs }, |
53 | + { PCI_DEVICE(PCI_VENDOR_ID_TTI, 0x0642), |
54 | + .driver_data = board_ahci_yes_fbs }, |
55 | |
56 | /* Promise */ |
57 | { PCI_VDEVICE(PROMISE, 0x3f20), board_ahci }, /* PDC42819 */ |
58 | diff --git a/drivers/infiniband/ulp/isert/ib_isert.c b/drivers/infiniband/ulp/isert/ib_isert.c |
59 | index bae20f8bb034..144999918022 100644 |
60 | --- a/drivers/infiniband/ulp/isert/ib_isert.c |
61 | +++ b/drivers/infiniband/ulp/isert/ib_isert.c |
62 | @@ -382,6 +382,14 @@ isert_connect_request(struct rdma_cm_id *cma_id, struct rdma_cm_event *event) |
63 | struct ib_device *ib_dev = cma_id->device; |
64 | int ret = 0; |
65 | |
66 | + spin_lock_bh(&np->np_thread_lock); |
67 | + if (!np->enabled) { |
68 | + spin_unlock_bh(&np->np_thread_lock); |
69 | + pr_debug("iscsi_np is not enabled, reject connect request\n"); |
70 | + return rdma_reject(cma_id, NULL, 0); |
71 | + } |
72 | + spin_unlock_bh(&np->np_thread_lock); |
73 | + |
74 | pr_debug("Entering isert_connect_request cma_id: %p, context: %p\n", |
75 | cma_id, cma_id->context); |
76 | |
77 | diff --git a/drivers/misc/mei/hw-me.c b/drivers/misc/mei/hw-me.c |
78 | index 1bf3f8b5ce3a..06311c5ada36 100644 |
79 | --- a/drivers/misc/mei/hw-me.c |
80 | +++ b/drivers/misc/mei/hw-me.c |
81 | @@ -183,6 +183,7 @@ static void mei_me_hw_reset(struct mei_device *dev, bool intr_enable) |
82 | else |
83 | hcsr &= ~H_IE; |
84 | |
85 | + dev->recvd_hw_ready = false; |
86 | mei_me_reg_write(hw, H_CSR, hcsr); |
87 | |
88 | if (dev->dev_state == MEI_DEV_POWER_DOWN) |
89 | @@ -233,10 +234,7 @@ static bool mei_me_hw_is_ready(struct mei_device *dev) |
90 | static int mei_me_hw_ready_wait(struct mei_device *dev) |
91 | { |
92 | int err; |
93 | - if (mei_me_hw_is_ready(dev)) |
94 | - return 0; |
95 | |
96 | - dev->recvd_hw_ready = false; |
97 | mutex_unlock(&dev->device_lock); |
98 | err = wait_event_interruptible_timeout(dev->wait_hw_ready, |
99 | dev->recvd_hw_ready, |
100 | diff --git a/drivers/net/ethernet/mellanox/mlx4/en_cq.c b/drivers/net/ethernet/mellanox/mlx4/en_cq.c |
101 | index 1e6c594d6d04..58c18d3a4880 100644 |
102 | --- a/drivers/net/ethernet/mellanox/mlx4/en_cq.c |
103 | +++ b/drivers/net/ethernet/mellanox/mlx4/en_cq.c |
104 | @@ -55,7 +55,6 @@ int mlx4_en_create_cq(struct mlx4_en_priv *priv, |
105 | |
106 | cq->ring = ring; |
107 | cq->is_tx = mode; |
108 | - spin_lock_init(&cq->lock); |
109 | |
110 | err = mlx4_alloc_hwq_res(mdev->dev, &cq->wqres, |
111 | cq->buf_size, 2 * PAGE_SIZE); |
112 | diff --git a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c |
113 | index 89c47ea84b50..063f3f4d4867 100644 |
114 | --- a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c |
115 | +++ b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c |
116 | @@ -1190,15 +1190,11 @@ static void mlx4_en_netpoll(struct net_device *dev) |
117 | { |
118 | struct mlx4_en_priv *priv = netdev_priv(dev); |
119 | struct mlx4_en_cq *cq; |
120 | - unsigned long flags; |
121 | int i; |
122 | |
123 | for (i = 0; i < priv->rx_ring_num; i++) { |
124 | cq = &priv->rx_cq[i]; |
125 | - spin_lock_irqsave(&cq->lock, flags); |
126 | - napi_synchronize(&cq->napi); |
127 | - mlx4_en_process_rx_cq(dev, cq, 0); |
128 | - spin_unlock_irqrestore(&cq->lock, flags); |
129 | + napi_schedule(&cq->napi); |
130 | } |
131 | } |
132 | #endif |
133 | diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h |
134 | index b1d7657b2bf5..628e1f9355a8 100644 |
135 | --- a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h |
136 | +++ b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h |
137 | @@ -299,7 +299,6 @@ struct mlx4_en_cq { |
138 | struct mlx4_cq mcq; |
139 | struct mlx4_hwq_resources wqres; |
140 | int ring; |
141 | - spinlock_t lock; |
142 | struct net_device *dev; |
143 | struct napi_struct napi; |
144 | int size; |
145 | diff --git a/drivers/scsi/megaraid/megaraid_sas.h b/drivers/scsi/megaraid/megaraid_sas.h |
146 | index 684cc343cf09..b52121358385 100644 |
147 | --- a/drivers/scsi/megaraid/megaraid_sas.h |
148 | +++ b/drivers/scsi/megaraid/megaraid_sas.h |
149 | @@ -1295,7 +1295,6 @@ struct megasas_instance { |
150 | u32 *reply_queue; |
151 | dma_addr_t reply_queue_h; |
152 | |
153 | - unsigned long base_addr; |
154 | struct megasas_register_set __iomem *reg_set; |
155 | |
156 | struct megasas_pd_list pd_list[MEGASAS_MAX_PD]; |
157 | diff --git a/drivers/scsi/megaraid/megaraid_sas_base.c b/drivers/scsi/megaraid/megaraid_sas_base.c |
158 | index b3e5c1787876..4956c99ed90e 100644 |
159 | --- a/drivers/scsi/megaraid/megaraid_sas_base.c |
160 | +++ b/drivers/scsi/megaraid/megaraid_sas_base.c |
161 | @@ -3461,6 +3461,7 @@ static int megasas_init_fw(struct megasas_instance *instance) |
162 | u32 max_sectors_1; |
163 | u32 max_sectors_2; |
164 | u32 tmp_sectors, msix_enable; |
165 | + resource_size_t base_addr; |
166 | struct megasas_register_set __iomem *reg_set; |
167 | struct megasas_ctrl_info *ctrl_info; |
168 | unsigned long bar_list; |
169 | @@ -3469,14 +3470,14 @@ static int megasas_init_fw(struct megasas_instance *instance) |
170 | /* Find first memory bar */ |
171 | bar_list = pci_select_bars(instance->pdev, IORESOURCE_MEM); |
172 | instance->bar = find_first_bit(&bar_list, sizeof(unsigned long)); |
173 | - instance->base_addr = pci_resource_start(instance->pdev, instance->bar); |
174 | if (pci_request_selected_regions(instance->pdev, instance->bar, |
175 | "megasas: LSI")) { |
176 | printk(KERN_DEBUG "megasas: IO memory region busy!\n"); |
177 | return -EBUSY; |
178 | } |
179 | |
180 | - instance->reg_set = ioremap_nocache(instance->base_addr, 8192); |
181 | + base_addr = pci_resource_start(instance->pdev, instance->bar); |
182 | + instance->reg_set = ioremap_nocache(base_addr, 8192); |
183 | |
184 | if (!instance->reg_set) { |
185 | printk(KERN_DEBUG "megasas: Failed to map IO mem\n"); |
186 | diff --git a/drivers/target/iscsi/iscsi_target.c b/drivers/target/iscsi/iscsi_target.c |
187 | index 58c479d13b57..68dbd88babbd 100644 |
188 | --- a/drivers/target/iscsi/iscsi_target.c |
189 | +++ b/drivers/target/iscsi/iscsi_target.c |
190 | @@ -460,6 +460,7 @@ int iscsit_del_np(struct iscsi_np *np) |
191 | spin_lock_bh(&np->np_thread_lock); |
192 | np->np_exports--; |
193 | if (np->np_exports) { |
194 | + np->enabled = true; |
195 | spin_unlock_bh(&np->np_thread_lock); |
196 | return 0; |
197 | } |
198 | diff --git a/drivers/target/iscsi/iscsi_target_core.h b/drivers/target/iscsi/iscsi_target_core.h |
199 | index 8907dcdc0db9..e117870eb445 100644 |
200 | --- a/drivers/target/iscsi/iscsi_target_core.h |
201 | +++ b/drivers/target/iscsi/iscsi_target_core.h |
202 | @@ -760,6 +760,7 @@ struct iscsi_np { |
203 | int np_ip_proto; |
204 | int np_sock_type; |
205 | enum np_thread_state_table np_thread_state; |
206 | + bool enabled; |
207 | enum iscsi_timer_flags_table np_login_timer_flags; |
208 | u32 np_exports; |
209 | enum np_flags_table np_flags; |
210 | diff --git a/drivers/target/iscsi/iscsi_target_login.c b/drivers/target/iscsi/iscsi_target_login.c |
211 | index bc788c52b6cc..0d6c3dd25679 100644 |
212 | --- a/drivers/target/iscsi/iscsi_target_login.c |
213 | +++ b/drivers/target/iscsi/iscsi_target_login.c |
214 | @@ -250,6 +250,28 @@ static void iscsi_login_set_conn_values( |
215 | mutex_unlock(&auth_id_lock); |
216 | } |
217 | |
218 | +static __printf(2, 3) int iscsi_change_param_sprintf( |
219 | + struct iscsi_conn *conn, |
220 | + const char *fmt, ...) |
221 | +{ |
222 | + va_list args; |
223 | + unsigned char buf[64]; |
224 | + |
225 | + memset(buf, 0, sizeof buf); |
226 | + |
227 | + va_start(args, fmt); |
228 | + vsnprintf(buf, sizeof buf, fmt, args); |
229 | + va_end(args); |
230 | + |
231 | + if (iscsi_change_param_value(buf, conn->param_list, 0) < 0) { |
232 | + iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_TARGET_ERR, |
233 | + ISCSI_LOGIN_STATUS_NO_RESOURCES); |
234 | + return -1; |
235 | + } |
236 | + |
237 | + return 0; |
238 | +} |
239 | + |
240 | /* |
241 | * This is the leading connection of a new session, |
242 | * or session reinstatement. |
243 | @@ -339,7 +361,6 @@ static int iscsi_login_zero_tsih_s2( |
244 | { |
245 | struct iscsi_node_attrib *na; |
246 | struct iscsi_session *sess = conn->sess; |
247 | - unsigned char buf[32]; |
248 | bool iser = false; |
249 | |
250 | sess->tpg = conn->tpg; |
251 | @@ -380,26 +401,16 @@ static int iscsi_login_zero_tsih_s2( |
252 | * |
253 | * In our case, we have already located the struct iscsi_tiqn at this point. |
254 | */ |
255 | - memset(buf, 0, 32); |
256 | - sprintf(buf, "TargetPortalGroupTag=%hu", ISCSI_TPG_S(sess)->tpgt); |
257 | - if (iscsi_change_param_value(buf, conn->param_list, 0) < 0) { |
258 | - iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_TARGET_ERR, |
259 | - ISCSI_LOGIN_STATUS_NO_RESOURCES); |
260 | + if (iscsi_change_param_sprintf(conn, "TargetPortalGroupTag=%hu", sess->tpg->tpgt)) |
261 | return -1; |
262 | - } |
263 | |
264 | /* |
265 | * Workaround for Initiators that have broken connection recovery logic. |
266 | * |
267 | * "We would really like to get rid of this." Linux-iSCSI.org team |
268 | */ |
269 | - memset(buf, 0, 32); |
270 | - sprintf(buf, "ErrorRecoveryLevel=%d", na->default_erl); |
271 | - if (iscsi_change_param_value(buf, conn->param_list, 0) < 0) { |
272 | - iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_TARGET_ERR, |
273 | - ISCSI_LOGIN_STATUS_NO_RESOURCES); |
274 | + if (iscsi_change_param_sprintf(conn, "ErrorRecoveryLevel=%d", na->default_erl)) |
275 | return -1; |
276 | - } |
277 | |
278 | if (iscsi_login_disable_FIM_keys(conn->param_list, conn) < 0) |
279 | return -1; |
280 | @@ -411,12 +422,9 @@ static int iscsi_login_zero_tsih_s2( |
281 | unsigned long mrdsl, off; |
282 | int rc; |
283 | |
284 | - sprintf(buf, "RDMAExtensions=Yes"); |
285 | - if (iscsi_change_param_value(buf, conn->param_list, 0) < 0) { |
286 | - iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_TARGET_ERR, |
287 | - ISCSI_LOGIN_STATUS_NO_RESOURCES); |
288 | + if (iscsi_change_param_sprintf(conn, "RDMAExtensions=Yes")) |
289 | return -1; |
290 | - } |
291 | + |
292 | /* |
293 | * Make MaxRecvDataSegmentLength PAGE_SIZE aligned for |
294 | * Immediate Data + Unsolicitied Data-OUT if necessary.. |
295 | @@ -446,12 +454,8 @@ static int iscsi_login_zero_tsih_s2( |
296 | pr_warn("Aligning ISER MaxRecvDataSegmentLength: %lu down" |
297 | " to PAGE_SIZE\n", mrdsl); |
298 | |
299 | - sprintf(buf, "MaxRecvDataSegmentLength=%lu\n", mrdsl); |
300 | - if (iscsi_change_param_value(buf, conn->param_list, 0) < 0) { |
301 | - iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_TARGET_ERR, |
302 | - ISCSI_LOGIN_STATUS_NO_RESOURCES); |
303 | + if (iscsi_change_param_sprintf(conn, "MaxRecvDataSegmentLength=%lu\n", mrdsl)) |
304 | return -1; |
305 | - } |
306 | } |
307 | |
308 | return 0; |
309 | @@ -984,6 +988,7 @@ int iscsi_target_setup_login_socket( |
310 | } |
311 | |
312 | np->np_transport = t; |
313 | + np->enabled = true; |
314 | return 0; |
315 | } |
316 | |
317 | diff --git a/drivers/target/iscsi/iscsi_target_tpg.c b/drivers/target/iscsi/iscsi_target_tpg.c |
318 | index f31b4c5cdf3f..75a4e83842c2 100644 |
319 | --- a/drivers/target/iscsi/iscsi_target_tpg.c |
320 | +++ b/drivers/target/iscsi/iscsi_target_tpg.c |
321 | @@ -175,13 +175,16 @@ void iscsit_put_tpg(struct iscsi_portal_group *tpg) |
322 | |
323 | static void iscsit_clear_tpg_np_login_thread( |
324 | struct iscsi_tpg_np *tpg_np, |
325 | - struct iscsi_portal_group *tpg) |
326 | + struct iscsi_portal_group *tpg, |
327 | + bool shutdown) |
328 | { |
329 | if (!tpg_np->tpg_np) { |
330 | pr_err("struct iscsi_tpg_np->tpg_np is NULL!\n"); |
331 | return; |
332 | } |
333 | |
334 | + if (shutdown) |
335 | + tpg_np->tpg_np->enabled = false; |
336 | iscsit_reset_np_thread(tpg_np->tpg_np, tpg_np, tpg); |
337 | } |
338 | |
339 | @@ -197,7 +200,7 @@ void iscsit_clear_tpg_np_login_threads( |
340 | continue; |
341 | } |
342 | spin_unlock(&tpg->tpg_np_lock); |
343 | - iscsit_clear_tpg_np_login_thread(tpg_np, tpg); |
344 | + iscsit_clear_tpg_np_login_thread(tpg_np, tpg, false); |
345 | spin_lock(&tpg->tpg_np_lock); |
346 | } |
347 | spin_unlock(&tpg->tpg_np_lock); |
348 | @@ -520,7 +523,7 @@ static int iscsit_tpg_release_np( |
349 | struct iscsi_portal_group *tpg, |
350 | struct iscsi_np *np) |
351 | { |
352 | - iscsit_clear_tpg_np_login_thread(tpg_np, tpg); |
353 | + iscsit_clear_tpg_np_login_thread(tpg_np, tpg, true); |
354 | |
355 | pr_debug("CORE[%s] - Removed Network Portal: %s:%hu,%hu on %s\n", |
356 | tpg->tpg_tiqn->tiqn, np->np_ip, np->np_port, tpg->tpgt, |
357 | diff --git a/drivers/target/target_core_alua.c b/drivers/target/target_core_alua.c |
358 | index f608fbc14a27..df58a67f81e0 100644 |
359 | --- a/drivers/target/target_core_alua.c |
360 | +++ b/drivers/target/target_core_alua.c |
361 | @@ -409,7 +409,16 @@ static inline int core_alua_state_standby( |
362 | case REPORT_LUNS: |
363 | case RECEIVE_DIAGNOSTIC: |
364 | case SEND_DIAGNOSTIC: |
365 | + case READ_CAPACITY: |
366 | return 0; |
367 | + case SERVICE_ACTION_IN: |
368 | + switch (cdb[1] & 0x1f) { |
369 | + case SAI_READ_CAPACITY_16: |
370 | + return 0; |
371 | + default: |
372 | + *alua_ascq = ASCQ_04H_ALUA_TG_PT_STANDBY; |
373 | + return 1; |
374 | + } |
375 | case MAINTENANCE_IN: |
376 | switch (cdb[1] & 0x1f) { |
377 | case MI_REPORT_TARGET_PGS: |
378 | diff --git a/drivers/target/target_core_configfs.c b/drivers/target/target_core_configfs.c |
379 | index 4a8bd36d3958..8cda4080b597 100644 |
380 | --- a/drivers/target/target_core_configfs.c |
381 | +++ b/drivers/target/target_core_configfs.c |
382 | @@ -2034,6 +2034,11 @@ static ssize_t target_core_alua_tg_pt_gp_store_attr_alua_access_state( |
383 | " tg_pt_gp ID: %hu\n", tg_pt_gp->tg_pt_gp_valid_id); |
384 | return -EINVAL; |
385 | } |
386 | + if (!(dev->dev_flags & DF_CONFIGURED)) { |
387 | + pr_err("Unable to set alua_access_state while device is" |
388 | + " not configured\n"); |
389 | + return -ENODEV; |
390 | + } |
391 | |
392 | ret = strict_strtoul(page, 0, &tmp); |
393 | if (ret < 0) { |
394 | diff --git a/fs/attr.c b/fs/attr.c |
395 | index 8dd5825ec708..66fa6251c398 100644 |
396 | --- a/fs/attr.c |
397 | +++ b/fs/attr.c |
398 | @@ -50,14 +50,14 @@ int inode_change_ok(const struct inode *inode, struct iattr *attr) |
399 | if ((ia_valid & ATTR_UID) && |
400 | (!uid_eq(current_fsuid(), inode->i_uid) || |
401 | !uid_eq(attr->ia_uid, inode->i_uid)) && |
402 | - !inode_capable(inode, CAP_CHOWN)) |
403 | + !capable_wrt_inode_uidgid(inode, CAP_CHOWN)) |
404 | return -EPERM; |
405 | |
406 | /* Make sure caller can chgrp. */ |
407 | if ((ia_valid & ATTR_GID) && |
408 | (!uid_eq(current_fsuid(), inode->i_uid) || |
409 | (!in_group_p(attr->ia_gid) && !gid_eq(attr->ia_gid, inode->i_gid))) && |
410 | - !inode_capable(inode, CAP_CHOWN)) |
411 | + !capable_wrt_inode_uidgid(inode, CAP_CHOWN)) |
412 | return -EPERM; |
413 | |
414 | /* Make sure a caller can chmod. */ |
415 | @@ -67,7 +67,7 @@ int inode_change_ok(const struct inode *inode, struct iattr *attr) |
416 | /* Also check the setgid bit! */ |
417 | if (!in_group_p((ia_valid & ATTR_GID) ? attr->ia_gid : |
418 | inode->i_gid) && |
419 | - !inode_capable(inode, CAP_FSETID)) |
420 | + !capable_wrt_inode_uidgid(inode, CAP_FSETID)) |
421 | attr->ia_mode &= ~S_ISGID; |
422 | } |
423 | |
424 | @@ -160,7 +160,7 @@ void setattr_copy(struct inode *inode, const struct iattr *attr) |
425 | umode_t mode = attr->ia_mode; |
426 | |
427 | if (!in_group_p(inode->i_gid) && |
428 | - !inode_capable(inode, CAP_FSETID)) |
429 | + !capable_wrt_inode_uidgid(inode, CAP_FSETID)) |
430 | mode &= ~S_ISGID; |
431 | inode->i_mode = mode; |
432 | } |
433 | diff --git a/fs/inode.c b/fs/inode.c |
434 | index 00d5fc3b86e1..1b300a06b8be 100644 |
435 | --- a/fs/inode.c |
436 | +++ b/fs/inode.c |
437 | @@ -1837,14 +1837,18 @@ EXPORT_SYMBOL(inode_init_owner); |
438 | * inode_owner_or_capable - check current task permissions to inode |
439 | * @inode: inode being checked |
440 | * |
441 | - * Return true if current either has CAP_FOWNER to the inode, or |
442 | - * owns the file. |
443 | + * Return true if current either has CAP_FOWNER in a namespace with the |
444 | + * inode owner uid mapped, or owns the file. |
445 | */ |
446 | bool inode_owner_or_capable(const struct inode *inode) |
447 | { |
448 | + struct user_namespace *ns; |
449 | + |
450 | if (uid_eq(current_fsuid(), inode->i_uid)) |
451 | return true; |
452 | - if (inode_capable(inode, CAP_FOWNER)) |
453 | + |
454 | + ns = current_user_ns(); |
455 | + if (ns_capable(ns, CAP_FOWNER) && kuid_has_mapping(ns, inode->i_uid)) |
456 | return true; |
457 | return false; |
458 | } |
459 | diff --git a/fs/namei.c b/fs/namei.c |
460 | index 1211ee5a1cb3..6ac16a37ded2 100644 |
461 | --- a/fs/namei.c |
462 | +++ b/fs/namei.c |
463 | @@ -321,10 +321,11 @@ int generic_permission(struct inode *inode, int mask) |
464 | |
465 | if (S_ISDIR(inode->i_mode)) { |
466 | /* DACs are overridable for directories */ |
467 | - if (inode_capable(inode, CAP_DAC_OVERRIDE)) |
468 | + if (capable_wrt_inode_uidgid(inode, CAP_DAC_OVERRIDE)) |
469 | return 0; |
470 | if (!(mask & MAY_WRITE)) |
471 | - if (inode_capable(inode, CAP_DAC_READ_SEARCH)) |
472 | + if (capable_wrt_inode_uidgid(inode, |
473 | + CAP_DAC_READ_SEARCH)) |
474 | return 0; |
475 | return -EACCES; |
476 | } |
477 | @@ -334,7 +335,7 @@ int generic_permission(struct inode *inode, int mask) |
478 | * at least one exec bit set. |
479 | */ |
480 | if (!(mask & MAY_EXEC) || (inode->i_mode & S_IXUGO)) |
481 | - if (inode_capable(inode, CAP_DAC_OVERRIDE)) |
482 | + if (capable_wrt_inode_uidgid(inode, CAP_DAC_OVERRIDE)) |
483 | return 0; |
484 | |
485 | /* |
486 | @@ -342,7 +343,7 @@ int generic_permission(struct inode *inode, int mask) |
487 | */ |
488 | mask &= MAY_READ | MAY_WRITE | MAY_EXEC; |
489 | if (mask == MAY_READ) |
490 | - if (inode_capable(inode, CAP_DAC_READ_SEARCH)) |
491 | + if (capable_wrt_inode_uidgid(inode, CAP_DAC_READ_SEARCH)) |
492 | return 0; |
493 | |
494 | return -EACCES; |
495 | @@ -2199,7 +2200,7 @@ static inline int check_sticky(struct inode *dir, struct inode *inode) |
496 | return 0; |
497 | if (uid_eq(dir->i_uid, fsuid)) |
498 | return 0; |
499 | - return !inode_capable(inode, CAP_FOWNER); |
500 | + return !capable_wrt_inode_uidgid(inode, CAP_FOWNER); |
501 | } |
502 | |
503 | /* |
504 | diff --git a/include/linux/capability.h b/include/linux/capability.h |
505 | index d9a4f7f40f32..15f90929fb51 100644 |
506 | --- a/include/linux/capability.h |
507 | +++ b/include/linux/capability.h |
508 | @@ -211,7 +211,7 @@ extern bool has_ns_capability_noaudit(struct task_struct *t, |
509 | extern bool capable(int cap); |
510 | extern bool ns_capable(struct user_namespace *ns, int cap); |
511 | extern bool nsown_capable(int cap); |
512 | -extern bool inode_capable(const struct inode *inode, int cap); |
513 | +extern bool capable_wrt_inode_uidgid(const struct inode *inode, int cap); |
514 | extern bool file_ns_capable(const struct file *file, struct user_namespace *ns, int cap); |
515 | |
516 | /* audit system wants to get cap info from files as well */ |
517 | diff --git a/kernel/auditsc.c b/kernel/auditsc.c |
518 | index 9845cb32b60a..03a3af8538bd 100644 |
519 | --- a/kernel/auditsc.c |
520 | +++ b/kernel/auditsc.c |
521 | @@ -733,6 +733,22 @@ static enum audit_state audit_filter_task(struct task_struct *tsk, char **key) |
522 | return AUDIT_BUILD_CONTEXT; |
523 | } |
524 | |
525 | +static int audit_in_mask(const struct audit_krule *rule, unsigned long val) |
526 | +{ |
527 | + int word, bit; |
528 | + |
529 | + if (val > 0xffffffff) |
530 | + return false; |
531 | + |
532 | + word = AUDIT_WORD(val); |
533 | + if (word >= AUDIT_BITMASK_SIZE) |
534 | + return false; |
535 | + |
536 | + bit = AUDIT_BIT(val); |
537 | + |
538 | + return rule->mask[word] & bit; |
539 | +} |
540 | + |
541 | /* At syscall entry and exit time, this filter is called if the |
542 | * audit_state is not low enough that auditing cannot take place, but is |
543 | * also not high enough that we already know we have to write an audit |
544 | @@ -750,11 +766,8 @@ static enum audit_state audit_filter_syscall(struct task_struct *tsk, |
545 | |
546 | rcu_read_lock(); |
547 | if (!list_empty(list)) { |
548 | - int word = AUDIT_WORD(ctx->major); |
549 | - int bit = AUDIT_BIT(ctx->major); |
550 | - |
551 | list_for_each_entry_rcu(e, list, list) { |
552 | - if ((e->rule.mask[word] & bit) == bit && |
553 | + if (audit_in_mask(&e->rule, ctx->major) && |
554 | audit_filter_rules(tsk, &e->rule, ctx, NULL, |
555 | &state, false)) { |
556 | rcu_read_unlock(); |
557 | @@ -774,20 +787,16 @@ static enum audit_state audit_filter_syscall(struct task_struct *tsk, |
558 | static int audit_filter_inode_name(struct task_struct *tsk, |
559 | struct audit_names *n, |
560 | struct audit_context *ctx) { |
561 | - int word, bit; |
562 | int h = audit_hash_ino((u32)n->ino); |
563 | struct list_head *list = &audit_inode_hash[h]; |
564 | struct audit_entry *e; |
565 | enum audit_state state; |
566 | |
567 | - word = AUDIT_WORD(ctx->major); |
568 | - bit = AUDIT_BIT(ctx->major); |
569 | - |
570 | if (list_empty(list)) |
571 | return 0; |
572 | |
573 | list_for_each_entry_rcu(e, list, list) { |
574 | - if ((e->rule.mask[word] & bit) == bit && |
575 | + if (audit_in_mask(&e->rule, ctx->major) && |
576 | audit_filter_rules(tsk, &e->rule, ctx, n, &state, false)) { |
577 | ctx->current_state = state; |
578 | return 1; |
579 | diff --git a/kernel/capability.c b/kernel/capability.c |
580 | index f6c2ce5701e1..d52eecc0942b 100644 |
581 | --- a/kernel/capability.c |
582 | +++ b/kernel/capability.c |
583 | @@ -445,22 +445,18 @@ bool nsown_capable(int cap) |
584 | } |
585 | |
586 | /** |
587 | - * inode_capable - Check superior capability over inode |
588 | + * capable_wrt_inode_uidgid - Check nsown_capable and uid and gid mapped |
589 | * @inode: The inode in question |
590 | * @cap: The capability in question |
591 | * |
592 | - * Return true if the current task has the given superior capability |
593 | - * targeted at it's own user namespace and that the given inode is owned |
594 | - * by the current user namespace or a child namespace. |
595 | - * |
596 | - * Currently we check to see if an inode is owned by the current |
597 | - * user namespace by seeing if the inode's owner maps into the |
598 | - * current user namespace. |
599 | - * |
600 | + * Return true if the current task has the given capability targeted at |
601 | + * its own user namespace and that the given inode's uid and gid are |
602 | + * mapped into the current user namespace. |
603 | */ |
604 | -bool inode_capable(const struct inode *inode, int cap) |
605 | +bool capable_wrt_inode_uidgid(const struct inode *inode, int cap) |
606 | { |
607 | struct user_namespace *ns = current_user_ns(); |
608 | |
609 | - return ns_capable(ns, cap) && kuid_has_mapping(ns, inode->i_uid); |
610 | + return ns_capable(ns, cap) && kuid_has_mapping(ns, inode->i_uid) && |
611 | + kgid_has_mapping(ns, inode->i_gid); |
612 | } |
613 | diff --git a/mm/compaction.c b/mm/compaction.c |
614 | index 18a90b4d0bfc..fb797a32362f 100644 |
615 | --- a/mm/compaction.c |
616 | +++ b/mm/compaction.c |
617 | @@ -657,17 +657,21 @@ static void isolate_freepages(struct zone *zone, |
618 | struct compact_control *cc) |
619 | { |
620 | struct page *page; |
621 | - unsigned long high_pfn, low_pfn, pfn, z_end_pfn, end_pfn; |
622 | + unsigned long high_pfn, low_pfn, pfn, z_end_pfn; |
623 | int nr_freepages = cc->nr_freepages; |
624 | struct list_head *freelist = &cc->freepages; |
625 | |
626 | /* |
627 | * Initialise the free scanner. The starting point is where we last |
628 | - * scanned from (or the end of the zone if starting). The low point |
629 | - * is the end of the pageblock the migration scanner is using. |
630 | + * successfully isolated from, zone-cached value, or the end of the |
631 | + * zone when isolating for the first time. We need this aligned to |
632 | + * the pageblock boundary, because we do pfn -= pageblock_nr_pages |
633 | + * in the for loop. |
634 | + * The low boundary is the end of the pageblock the migration scanner |
635 | + * is using. |
636 | */ |
637 | - pfn = cc->free_pfn; |
638 | - low_pfn = cc->migrate_pfn + pageblock_nr_pages; |
639 | + pfn = cc->free_pfn & ~(pageblock_nr_pages-1); |
640 | + low_pfn = ALIGN(cc->migrate_pfn + 1, pageblock_nr_pages); |
641 | |
642 | /* |
643 | * Take care that if the migration scanner is at the end of the zone |
644 | @@ -683,9 +687,10 @@ static void isolate_freepages(struct zone *zone, |
645 | * pages on cc->migratepages. We stop searching if the migrate |
646 | * and free page scanners meet or enough free pages are isolated. |
647 | */ |
648 | - for (; pfn > low_pfn && cc->nr_migratepages > nr_freepages; |
649 | + for (; pfn >= low_pfn && cc->nr_migratepages > nr_freepages; |
650 | pfn -= pageblock_nr_pages) { |
651 | unsigned long isolated; |
652 | + unsigned long end_pfn; |
653 | |
654 | if (!pfn_valid(pfn)) |
655 | continue; |
656 | @@ -713,13 +718,10 @@ static void isolate_freepages(struct zone *zone, |
657 | isolated = 0; |
658 | |
659 | /* |
660 | - * As pfn may not start aligned, pfn+pageblock_nr_page |
661 | - * may cross a MAX_ORDER_NR_PAGES boundary and miss |
662 | - * a pfn_valid check. Ensure isolate_freepages_block() |
663 | - * only scans within a pageblock |
664 | + * Take care when isolating in last pageblock of a zone which |
665 | + * ends in the middle of a pageblock. |
666 | */ |
667 | - end_pfn = ALIGN(pfn + 1, pageblock_nr_pages); |
668 | - end_pfn = min(end_pfn, z_end_pfn); |
669 | + end_pfn = min(pfn + pageblock_nr_pages, z_end_pfn); |
670 | isolated = isolate_freepages_block(cc, pfn, end_pfn, |
671 | freelist, false); |
672 | nr_freepages += isolated; |
673 | @@ -738,7 +740,14 @@ static void isolate_freepages(struct zone *zone, |
674 | /* split_free_page does not map the pages */ |
675 | map_pages(freelist); |
676 | |
677 | - cc->free_pfn = high_pfn; |
678 | + /* |
679 | + * If we crossed the migrate scanner, we want to keep it that way |
680 | + * so that compact_finished() may detect this |
681 | + */ |
682 | + if (pfn < low_pfn) |
683 | + cc->free_pfn = max(pfn, zone->zone_start_pfn); |
684 | + else |
685 | + cc->free_pfn = high_pfn; |
686 | cc->nr_freepages = nr_freepages; |
687 | } |
688 | |
689 | @@ -947,6 +956,14 @@ static int compact_zone(struct zone *zone, struct compact_control *cc) |
690 | } |
691 | |
692 | /* |
693 | + * Clear pageblock skip if there were failures recently and compaction |
694 | + * is about to be retried after being deferred. kswapd does not do |
695 | + * this reset as it'll reset the cached information when going to sleep. |
696 | + */ |
697 | + if (compaction_restarting(zone, cc->order) && !current_is_kswapd()) |
698 | + __reset_isolation_suitable(zone); |
699 | + |
700 | + /* |
701 | * Setup to move all movable pages to the end of the zone. Used cached |
702 | * information on where the scanners should start but check that it |
703 | * is initialised by ensuring the values are within zone boundaries. |
704 | @@ -962,14 +979,6 @@ static int compact_zone(struct zone *zone, struct compact_control *cc) |
705 | zone->compact_cached_migrate_pfn = cc->migrate_pfn; |
706 | } |
707 | |
708 | - /* |
709 | - * Clear pageblock skip if there were failures recently and compaction |
710 | - * is about to be retried after being deferred. kswapd does not do |
711 | - * this reset as it'll reset the cached information when going to sleep. |
712 | - */ |
713 | - if (compaction_restarting(zone, cc->order) && !current_is_kswapd()) |
714 | - __reset_isolation_suitable(zone); |
715 | - |
716 | migrate_prep_local(); |
717 | |
718 | while ((ret = compact_finished(zone, cc)) == COMPACT_CONTINUE) { |
719 | @@ -1003,7 +1012,11 @@ static int compact_zone(struct zone *zone, struct compact_control *cc) |
720 | if (err) { |
721 | putback_movable_pages(&cc->migratepages); |
722 | cc->nr_migratepages = 0; |
723 | - if (err == -ENOMEM) { |
724 | + /* |
725 | + * migrate_pages() may return -ENOMEM when scanners meet |
726 | + * and we want compact_finished() to detect it |
727 | + */ |
728 | + if (err == -ENOMEM && cc->free_pfn > cc->migrate_pfn) { |
729 | ret = COMPACT_PARTIAL; |
730 | goto out; |
731 | } |
732 | diff --git a/net/ipv4/netfilter/nf_defrag_ipv4.c b/net/ipv4/netfilter/nf_defrag_ipv4.c |
733 | index 742815518b0f..4cfb3bd1677c 100644 |
734 | --- a/net/ipv4/netfilter/nf_defrag_ipv4.c |
735 | +++ b/net/ipv4/netfilter/nf_defrag_ipv4.c |
736 | @@ -22,7 +22,6 @@ |
737 | #endif |
738 | #include <net/netfilter/nf_conntrack_zones.h> |
739 | |
740 | -/* Returns new sk_buff, or NULL */ |
741 | static int nf_ct_ipv4_gather_frags(struct sk_buff *skb, u_int32_t user) |
742 | { |
743 | int err; |
744 | @@ -33,8 +32,10 @@ static int nf_ct_ipv4_gather_frags(struct sk_buff *skb, u_int32_t user) |
745 | err = ip_defrag(skb, user); |
746 | local_bh_enable(); |
747 | |
748 | - if (!err) |
749 | + if (!err) { |
750 | ip_send_check(ip_hdr(skb)); |
751 | + skb->local_df = 1; |
752 | + } |
753 | |
754 | return err; |
755 | } |