Magellan Linux

Contents of /trunk/kernel-alx/patches-3.10/0181-3.10.82-all-fixes.patch

Parent Directory Parent Directory | Revision Log Revision Log


Revision 2669 - (show annotations) (download)
Tue Jul 21 16:20:24 2015 UTC (8 years, 9 months ago) by niro
File size: 7862 byte(s)
-linux-3.10.82
1 diff --git a/Makefile b/Makefile
2 index 6d19e37d36d5..5e3e665a10b7 100644
3 --- a/Makefile
4 +++ b/Makefile
5 @@ -1,6 +1,6 @@
6 VERSION = 3
7 PATCHLEVEL = 10
8 -SUBLEVEL = 81
9 +SUBLEVEL = 82
10 EXTRAVERSION =
11 NAME = TOSSUG Baby Fish
12
13 diff --git a/drivers/crypto/caam/caamrng.c b/drivers/crypto/caam/caamrng.c
14 index d1939a9539c0..04aefffb4dd9 100644
15 --- a/drivers/crypto/caam/caamrng.c
16 +++ b/drivers/crypto/caam/caamrng.c
17 @@ -56,7 +56,7 @@
18
19 /* Buffer, its dma address and lock */
20 struct buf_data {
21 - u8 buf[RN_BUF_SIZE];
22 + u8 buf[RN_BUF_SIZE] ____cacheline_aligned;
23 dma_addr_t addr;
24 struct completion filled;
25 u32 hw_desc[DESC_JOB_O_LEN];
26 diff --git a/drivers/gpu/drm/mgag200/mgag200_mode.c b/drivers/gpu/drm/mgag200/mgag200_mode.c
27 index f6341e8622ee..7bd2acce9f81 100644
28 --- a/drivers/gpu/drm/mgag200/mgag200_mode.c
29 +++ b/drivers/gpu/drm/mgag200/mgag200_mode.c
30 @@ -1487,6 +1487,11 @@ static int mga_vga_mode_valid(struct drm_connector *connector,
31 return MODE_BANDWIDTH;
32 }
33
34 + if ((mode->hdisplay % 8) != 0 || (mode->hsync_start % 8) != 0 ||
35 + (mode->hsync_end % 8) != 0 || (mode->htotal % 8) != 0) {
36 + return MODE_H_ILLEGAL;
37 + }
38 +
39 if (mode->crtc_hdisplay > 2048 || mode->crtc_hsync_start > 4096 ||
40 mode->crtc_hsync_end > 4096 || mode->crtc_htotal > 4096 ||
41 mode->crtc_vdisplay > 2048 || mode->crtc_vsync_start > 4096 ||
42 diff --git a/drivers/scsi/lpfc/lpfc_sli.c b/drivers/scsi/lpfc/lpfc_sli.c
43 index 572579f87de4..90861416b9e9 100644
44 --- a/drivers/scsi/lpfc/lpfc_sli.c
45 +++ b/drivers/scsi/lpfc/lpfc_sli.c
46 @@ -263,6 +263,16 @@ lpfc_sli4_eq_get(struct lpfc_queue *q)
47 return NULL;
48
49 q->hba_index = idx;
50 +
51 + /*
52 + * insert barrier for instruction interlock : data from the hardware
53 + * must have the valid bit checked before it can be copied and acted
54 + * upon. Given what was seen in lpfc_sli4_cq_get() of speculative
55 + * instructions allowing action on content before valid bit checked,
56 + * add barrier here as well. May not be needed as "content" is a
57 + * single 32-bit entity here (vs multi word structure for cq's).
58 + */
59 + mb();
60 return eqe;
61 }
62
63 @@ -368,6 +378,17 @@ lpfc_sli4_cq_get(struct lpfc_queue *q)
64
65 cqe = q->qe[q->hba_index].cqe;
66 q->hba_index = idx;
67 +
68 + /*
69 + * insert barrier for instruction interlock : data from the hardware
70 + * must have the valid bit checked before it can be copied and acted
71 + * upon. Speculative instructions were allowing a bcopy at the start
72 + * of lpfc_sli4_fp_handle_wcqe(), which is called immediately
73 + * after our return, to copy data before the valid bit check above
74 + * was done. As such, some of the copied data was stale. The barrier
75 + * ensures the check is before any data is copied.
76 + */
77 + mb();
78 return cqe;
79 }
80
81 diff --git a/fs/pipe.c b/fs/pipe.c
82 index 0e0752ef2715..3e7ab278bb0c 100644
83 --- a/fs/pipe.c
84 +++ b/fs/pipe.c
85 @@ -117,25 +117,27 @@ void pipe_wait(struct pipe_inode_info *pipe)
86 }
87
88 static int
89 -pipe_iov_copy_from_user(void *to, struct iovec *iov, unsigned long len,
90 - int atomic)
91 +pipe_iov_copy_from_user(void *addr, int *offset, struct iovec *iov,
92 + size_t *remaining, int atomic)
93 {
94 unsigned long copy;
95
96 - while (len > 0) {
97 + while (*remaining > 0) {
98 while (!iov->iov_len)
99 iov++;
100 - copy = min_t(unsigned long, len, iov->iov_len);
101 + copy = min_t(unsigned long, *remaining, iov->iov_len);
102
103 if (atomic) {
104 - if (__copy_from_user_inatomic(to, iov->iov_base, copy))
105 + if (__copy_from_user_inatomic(addr + *offset,
106 + iov->iov_base, copy))
107 return -EFAULT;
108 } else {
109 - if (copy_from_user(to, iov->iov_base, copy))
110 + if (copy_from_user(addr + *offset,
111 + iov->iov_base, copy))
112 return -EFAULT;
113 }
114 - to += copy;
115 - len -= copy;
116 + *offset += copy;
117 + *remaining -= copy;
118 iov->iov_base += copy;
119 iov->iov_len -= copy;
120 }
121 @@ -143,25 +145,27 @@ pipe_iov_copy_from_user(void *to, struct iovec *iov, unsigned long len,
122 }
123
124 static int
125 -pipe_iov_copy_to_user(struct iovec *iov, const void *from, unsigned long len,
126 - int atomic)
127 +pipe_iov_copy_to_user(struct iovec *iov, void *addr, int *offset,
128 + size_t *remaining, int atomic)
129 {
130 unsigned long copy;
131
132 - while (len > 0) {
133 + while (*remaining > 0) {
134 while (!iov->iov_len)
135 iov++;
136 - copy = min_t(unsigned long, len, iov->iov_len);
137 + copy = min_t(unsigned long, *remaining, iov->iov_len);
138
139 if (atomic) {
140 - if (__copy_to_user_inatomic(iov->iov_base, from, copy))
141 + if (__copy_to_user_inatomic(iov->iov_base,
142 + addr + *offset, copy))
143 return -EFAULT;
144 } else {
145 - if (copy_to_user(iov->iov_base, from, copy))
146 + if (copy_to_user(iov->iov_base,
147 + addr + *offset, copy))
148 return -EFAULT;
149 }
150 - from += copy;
151 - len -= copy;
152 + *offset += copy;
153 + *remaining -= copy;
154 iov->iov_base += copy;
155 iov->iov_len -= copy;
156 }
157 @@ -395,7 +399,7 @@ pipe_read(struct kiocb *iocb, const struct iovec *_iov,
158 struct pipe_buffer *buf = pipe->bufs + curbuf;
159 const struct pipe_buf_operations *ops = buf->ops;
160 void *addr;
161 - size_t chars = buf->len;
162 + size_t chars = buf->len, remaining;
163 int error, atomic;
164
165 if (chars > total_len)
166 @@ -409,9 +413,11 @@ pipe_read(struct kiocb *iocb, const struct iovec *_iov,
167 }
168
169 atomic = !iov_fault_in_pages_write(iov, chars);
170 + remaining = chars;
171 redo:
172 addr = ops->map(pipe, buf, atomic);
173 - error = pipe_iov_copy_to_user(iov, addr + buf->offset, chars, atomic);
174 + error = pipe_iov_copy_to_user(iov, addr, &buf->offset,
175 + &remaining, atomic);
176 ops->unmap(pipe, buf, addr);
177 if (unlikely(error)) {
178 /*
179 @@ -426,7 +432,6 @@ redo:
180 break;
181 }
182 ret += chars;
183 - buf->offset += chars;
184 buf->len -= chars;
185
186 /* Was it a packet buffer? Clean up and exit */
187 @@ -531,6 +536,7 @@ pipe_write(struct kiocb *iocb, const struct iovec *_iov,
188 if (ops->can_merge && offset + chars <= PAGE_SIZE) {
189 int error, atomic = 1;
190 void *addr;
191 + size_t remaining = chars;
192
193 error = ops->confirm(pipe, buf);
194 if (error)
195 @@ -539,8 +545,8 @@ pipe_write(struct kiocb *iocb, const struct iovec *_iov,
196 iov_fault_in_pages_read(iov, chars);
197 redo1:
198 addr = ops->map(pipe, buf, atomic);
199 - error = pipe_iov_copy_from_user(offset + addr, iov,
200 - chars, atomic);
201 + error = pipe_iov_copy_from_user(addr, &offset, iov,
202 + &remaining, atomic);
203 ops->unmap(pipe, buf, addr);
204 ret = error;
205 do_wakeup = 1;
206 @@ -575,6 +581,8 @@ redo1:
207 struct page *page = pipe->tmp_page;
208 char *src;
209 int error, atomic = 1;
210 + int offset = 0;
211 + size_t remaining;
212
213 if (!page) {
214 page = alloc_page(GFP_HIGHUSER);
215 @@ -595,14 +603,15 @@ redo1:
216 chars = total_len;
217
218 iov_fault_in_pages_read(iov, chars);
219 + remaining = chars;
220 redo2:
221 if (atomic)
222 src = kmap_atomic(page);
223 else
224 src = kmap(page);
225
226 - error = pipe_iov_copy_from_user(src, iov, chars,
227 - atomic);
228 + error = pipe_iov_copy_from_user(src, &offset, iov,
229 + &remaining, atomic);
230 if (atomic)
231 kunmap_atomic(src);
232 else
233 diff --git a/kernel/trace/trace_events_filter.c b/kernel/trace/trace_events_filter.c
234 index 0a1edc694d67..fe3e086d38e9 100644
235 --- a/kernel/trace/trace_events_filter.c
236 +++ b/kernel/trace/trace_events_filter.c
237 @@ -1328,19 +1328,24 @@ static int check_preds(struct filter_parse_state *ps)
238 {
239 int n_normal_preds = 0, n_logical_preds = 0;
240 struct postfix_elt *elt;
241 + int cnt = 0;
242
243 list_for_each_entry(elt, &ps->postfix, list) {
244 - if (elt->op == OP_NONE)
245 + if (elt->op == OP_NONE) {
246 + cnt++;
247 continue;
248 + }
249
250 + cnt--;
251 if (elt->op == OP_AND || elt->op == OP_OR) {
252 n_logical_preds++;
253 continue;
254 }
255 n_normal_preds++;
256 + WARN_ON_ONCE(cnt < 0);
257 }
258
259 - if (!n_normal_preds || n_logical_preds >= n_normal_preds) {
260 + if (cnt != 1 || !n_normal_preds || n_logical_preds >= n_normal_preds) {
261 parse_error(ps, FILT_ERR_INVALID_FILTER, 0);
262 return -EINVAL;
263 }