1 /*
2  *   fs/cifs/cifsacl.c
3  *
4  *   Copyright (C) International Business Machines  Corp., 2007,2008
5  *   Author(s): Steve French (sfrench@us.ibm.com)
6  *
7  *   Contains the routines for mapping CIFS/NTFS ACLs
8  *
9  *   This library is free software; you can redistribute it and/or modify
10  *   it under the terms of the GNU Lesser General Public License as published
11  *   by the Free Software Foundation; either version 2.1 of the License, or
12  *   (at your option) any later version.
13  *
14  *   This library is distributed in the hope that it will be useful,
15  *   but WITHOUT ANY WARRANTY; without even the implied warranty of
16  *   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See
17  *   the GNU Lesser General Public License for more details.
18  *
19  *   You should have received a copy of the GNU Lesser General Public License
20  *   along with this library; if not, write to the Free Software
21  *   Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
22  */
23 
24 #include <linux/fs.h>
25 #include <linux/slab.h>
26 #include <linux/string.h>
27 #include <linux/keyctl.h>
28 #include <linux/key-type.h>
29 #include <keys/user-type.h>
30 #include "cifspdu.h"
31 #include "cifsglob.h"
32 #include "cifsacl.h"
33 #include "cifsproto.h"
34 #include "cifs_debug.h"
35 
36 /* security id for everyone/world system group */
37 static const struct cifs_sid sid_everyone = {
38 	1, 1, {0, 0, 0, 0, 0, 1}, {0} };
39 /* security id for Authenticated Users system group */
40 static const struct cifs_sid sid_authusers = {
41 	1, 1, {0, 0, 0, 0, 0, 5}, {__constant_cpu_to_le32(11)} };
42 /* group users */
43 static const struct cifs_sid sid_user = {1, 2 , {0, 0, 0, 0, 0, 5}, {} };
44 
45 const struct cred *root_cred;
46 
47 static void
shrink_idmap_tree(struct rb_root * root,int nr_to_scan,int * nr_rem,int * nr_del)48 shrink_idmap_tree(struct rb_root *root, int nr_to_scan, int *nr_rem,
49 			int *nr_del)
50 {
51 	struct rb_node *node;
52 	struct rb_node *tmp;
53 	struct cifs_sid_id *psidid;
54 
55 	node = rb_first(root);
56 	while (node) {
57 		tmp = node;
58 		node = rb_next(tmp);
59 		psidid = rb_entry(tmp, struct cifs_sid_id, rbnode);
60 		if (nr_to_scan == 0 || *nr_del == nr_to_scan)
61 			++(*nr_rem);
62 		else {
63 			if (time_after(jiffies, psidid->time + SID_MAP_EXPIRE)
64 						&& psidid->refcount == 0) {
65 				rb_erase(tmp, root);
66 				++(*nr_del);
67 			} else
68 				++(*nr_rem);
69 		}
70 	}
71 }
72 
73 /*
74  * Run idmap cache shrinker.
75  */
76 static int
cifs_idmap_shrinker(struct shrinker * shrink,struct shrink_control * sc)77 cifs_idmap_shrinker(struct shrinker *shrink, struct shrink_control *sc)
78 {
79 	int nr_to_scan = sc->nr_to_scan;
80 	int nr_del = 0;
81 	int nr_rem = 0;
82 	struct rb_root *root;
83 
84 	root = &uidtree;
85 	spin_lock(&siduidlock);
86 	shrink_idmap_tree(root, nr_to_scan, &nr_rem, &nr_del);
87 	spin_unlock(&siduidlock);
88 
89 	root = &gidtree;
90 	spin_lock(&sidgidlock);
91 	shrink_idmap_tree(root, nr_to_scan, &nr_rem, &nr_del);
92 	spin_unlock(&sidgidlock);
93 
94 	root = &siduidtree;
95 	spin_lock(&uidsidlock);
96 	shrink_idmap_tree(root, nr_to_scan, &nr_rem, &nr_del);
97 	spin_unlock(&uidsidlock);
98 
99 	root = &sidgidtree;
100 	spin_lock(&gidsidlock);
101 	shrink_idmap_tree(root, nr_to_scan, &nr_rem, &nr_del);
102 	spin_unlock(&gidsidlock);
103 
104 	return nr_rem;
105 }
106 
107 static void
sid_rb_insert(struct rb_root * root,unsigned long cid,struct cifs_sid_id ** psidid,char * typestr)108 sid_rb_insert(struct rb_root *root, unsigned long cid,
109 		struct cifs_sid_id **psidid, char *typestr)
110 {
111 	char *strptr;
112 	struct rb_node *node = root->rb_node;
113 	struct rb_node *parent = NULL;
114 	struct rb_node **linkto = &(root->rb_node);
115 	struct cifs_sid_id *lsidid;
116 
117 	while (node) {
118 		lsidid = rb_entry(node, struct cifs_sid_id, rbnode);
119 		parent = node;
120 		if (cid > lsidid->id) {
121 			linkto = &(node->rb_left);
122 			node = node->rb_left;
123 		}
124 		if (cid < lsidid->id) {
125 			linkto = &(node->rb_right);
126 			node = node->rb_right;
127 		}
128 	}
129 
130 	(*psidid)->id = cid;
131 	(*psidid)->time = jiffies - (SID_MAP_RETRY + 1);
132 	(*psidid)->refcount = 0;
133 
134 	sprintf((*psidid)->sidstr, "%s", typestr);
135 	strptr = (*psidid)->sidstr + strlen((*psidid)->sidstr);
136 	sprintf(strptr, "%ld", cid);
137 
138 	clear_bit(SID_ID_PENDING, &(*psidid)->state);
139 	clear_bit(SID_ID_MAPPED, &(*psidid)->state);
140 
141 	rb_link_node(&(*psidid)->rbnode, parent, linkto);
142 	rb_insert_color(&(*psidid)->rbnode, root);
143 }
144 
145 static struct cifs_sid_id *
sid_rb_search(struct rb_root * root,unsigned long cid)146 sid_rb_search(struct rb_root *root, unsigned long cid)
147 {
148 	struct rb_node *node = root->rb_node;
149 	struct cifs_sid_id *lsidid;
150 
151 	while (node) {
152 		lsidid = rb_entry(node, struct cifs_sid_id, rbnode);
153 		if (cid > lsidid->id)
154 			node = node->rb_left;
155 		else if (cid < lsidid->id)
156 			node = node->rb_right;
157 		else /* node found */
158 			return lsidid;
159 	}
160 
161 	return NULL;
162 }
163 
164 static struct shrinker cifs_shrinker = {
165 	.shrink = cifs_idmap_shrinker,
166 	.seeks = DEFAULT_SEEKS,
167 };
168 
169 static int
cifs_idmap_key_instantiate(struct key * key,const void * data,size_t datalen)170 cifs_idmap_key_instantiate(struct key *key, const void *data, size_t datalen)
171 {
172 	char *payload;
173 
174 	payload = kmalloc(datalen, GFP_KERNEL);
175 	if (!payload)
176 		return -ENOMEM;
177 
178 	memcpy(payload, data, datalen);
179 	key->payload.data = payload;
180 	key->datalen = datalen;
181 	return 0;
182 }
183 
184 static inline void
cifs_idmap_key_destroy(struct key * key)185 cifs_idmap_key_destroy(struct key *key)
186 {
187 	kfree(key->payload.data);
188 }
189 
190 struct key_type cifs_idmap_key_type = {
191 	.name        = "cifs.idmap",
192 	.instantiate = cifs_idmap_key_instantiate,
193 	.destroy     = cifs_idmap_key_destroy,
194 	.describe    = user_describe,
195 	.match       = user_match,
196 };
197 
198 static void
sid_to_str(struct cifs_sid * sidptr,char * sidstr)199 sid_to_str(struct cifs_sid *sidptr, char *sidstr)
200 {
201 	int i;
202 	unsigned long saval;
203 	char *strptr;
204 
205 	strptr = sidstr;
206 
207 	sprintf(strptr, "%s", "S");
208 	strptr = sidstr + strlen(sidstr);
209 
210 	sprintf(strptr, "-%d", sidptr->revision);
211 	strptr = sidstr + strlen(sidstr);
212 
213 	for (i = 0; i < 6; ++i) {
214 		if (sidptr->authority[i]) {
215 			sprintf(strptr, "-%d", sidptr->authority[i]);
216 			strptr = sidstr + strlen(sidstr);
217 		}
218 	}
219 
220 	for (i = 0; i < sidptr->num_subauth; ++i) {
221 		saval = le32_to_cpu(sidptr->sub_auth[i]);
222 		sprintf(strptr, "-%ld", saval);
223 		strptr = sidstr + strlen(sidstr);
224 	}
225 }
226 
227 static void
id_rb_insert(struct rb_root * root,struct cifs_sid * sidptr,struct cifs_sid_id ** psidid,char * typestr)228 id_rb_insert(struct rb_root *root, struct cifs_sid *sidptr,
229 		struct cifs_sid_id **psidid, char *typestr)
230 {
231 	int rc;
232 	char *strptr;
233 	struct rb_node *node = root->rb_node;
234 	struct rb_node *parent = NULL;
235 	struct rb_node **linkto = &(root->rb_node);
236 	struct cifs_sid_id *lsidid;
237 
238 	while (node) {
239 		lsidid = rb_entry(node, struct cifs_sid_id, rbnode);
240 		parent = node;
241 		rc = compare_sids(sidptr, &((lsidid)->sid));
242 		if (rc > 0) {
243 			linkto = &(node->rb_left);
244 			node = node->rb_left;
245 		} else if (rc < 0) {
246 			linkto = &(node->rb_right);
247 			node = node->rb_right;
248 		}
249 	}
250 
251 	memcpy(&(*psidid)->sid, sidptr, sizeof(struct cifs_sid));
252 	(*psidid)->time = jiffies - (SID_MAP_RETRY + 1);
253 	(*psidid)->refcount = 0;
254 
255 	sprintf((*psidid)->sidstr, "%s", typestr);
256 	strptr = (*psidid)->sidstr + strlen((*psidid)->sidstr);
257 	sid_to_str(&(*psidid)->sid, strptr);
258 
259 	clear_bit(SID_ID_PENDING, &(*psidid)->state);
260 	clear_bit(SID_ID_MAPPED, &(*psidid)->state);
261 
262 	rb_link_node(&(*psidid)->rbnode, parent, linkto);
263 	rb_insert_color(&(*psidid)->rbnode, root);
264 }
265 
266 static struct cifs_sid_id *
id_rb_search(struct rb_root * root,struct cifs_sid * sidptr)267 id_rb_search(struct rb_root *root, struct cifs_sid *sidptr)
268 {
269 	int rc;
270 	struct rb_node *node = root->rb_node;
271 	struct cifs_sid_id *lsidid;
272 
273 	while (node) {
274 		lsidid = rb_entry(node, struct cifs_sid_id, rbnode);
275 		rc = compare_sids(sidptr, &((lsidid)->sid));
276 		if (rc > 0) {
277 			node = node->rb_left;
278 		} else if (rc < 0) {
279 			node = node->rb_right;
280 		} else /* node found */
281 			return lsidid;
282 	}
283 
284 	return NULL;
285 }
286 
287 static int
sidid_pending_wait(void * unused)288 sidid_pending_wait(void *unused)
289 {
290 	schedule();
291 	return signal_pending(current) ? -ERESTARTSYS : 0;
292 }
293 
294 static int
id_to_sid(unsigned long cid,uint sidtype,struct cifs_sid * ssid)295 id_to_sid(unsigned long cid, uint sidtype, struct cifs_sid *ssid)
296 {
297 	int rc = 0;
298 	struct key *sidkey;
299 	const struct cred *saved_cred;
300 	struct cifs_sid *lsid;
301 	struct cifs_sid_id *psidid, *npsidid;
302 	struct rb_root *cidtree;
303 	spinlock_t *cidlock;
304 
305 	if (sidtype == SIDOWNER) {
306 		cidlock = &siduidlock;
307 		cidtree = &uidtree;
308 	} else if (sidtype == SIDGROUP) {
309 		cidlock = &sidgidlock;
310 		cidtree = &gidtree;
311 	} else
312 		return -EINVAL;
313 
314 	spin_lock(cidlock);
315 	psidid = sid_rb_search(cidtree, cid);
316 
317 	if (!psidid) { /* node does not exist, allocate one & attempt adding */
318 		spin_unlock(cidlock);
319 		npsidid = kzalloc(sizeof(struct cifs_sid_id), GFP_KERNEL);
320 		if (!npsidid)
321 			return -ENOMEM;
322 
323 		npsidid->sidstr = kmalloc(SIDLEN, GFP_KERNEL);
324 		if (!npsidid->sidstr) {
325 			kfree(npsidid);
326 			return -ENOMEM;
327 		}
328 
329 		spin_lock(cidlock);
330 		psidid = sid_rb_search(cidtree, cid);
331 		if (psidid) { /* node happened to get inserted meanwhile */
332 			++psidid->refcount;
333 			spin_unlock(cidlock);
334 			kfree(npsidid->sidstr);
335 			kfree(npsidid);
336 		} else {
337 			psidid = npsidid;
338 			sid_rb_insert(cidtree, cid, &psidid,
339 					sidtype == SIDOWNER ? "oi:" : "gi:");
340 			++psidid->refcount;
341 			spin_unlock(cidlock);
342 		}
343 	} else {
344 		++psidid->refcount;
345 		spin_unlock(cidlock);
346 	}
347 
348 	/*
349 	 * If we are here, it is safe to access psidid and its fields
350 	 * since a reference was taken earlier while holding the spinlock.
351 	 * A reference on the node is put without holding the spinlock
352 	 * and it is OK to do so in this case, shrinker will not erase
353 	 * this node until all references are put and we do not access
354 	 * any fields of the node after a reference is put .
355 	 */
356 	if (test_bit(SID_ID_MAPPED, &psidid->state)) {
357 		memcpy(ssid, &psidid->sid, sizeof(struct cifs_sid));
358 		psidid->time = jiffies; /* update ts for accessing */
359 		goto id_sid_out;
360 	}
361 
362 	if (time_after(psidid->time + SID_MAP_RETRY, jiffies)) {
363 		rc = -EINVAL;
364 		goto id_sid_out;
365 	}
366 
367 	if (!test_and_set_bit(SID_ID_PENDING, &psidid->state)) {
368 		saved_cred = override_creds(root_cred);
369 		sidkey = request_key(&cifs_idmap_key_type, psidid->sidstr, "");
370 		if (IS_ERR(sidkey)) {
371 			rc = -EINVAL;
372 			cFYI(1, "%s: Can't map and id to a SID", __func__);
373 		} else {
374 			lsid = (struct cifs_sid *)sidkey->payload.data;
375 			memcpy(&psidid->sid, lsid,
376 				sidkey->datalen < sizeof(struct cifs_sid) ?
377 				sidkey->datalen : sizeof(struct cifs_sid));
378 			memcpy(ssid, &psidid->sid,
379 				sidkey->datalen < sizeof(struct cifs_sid) ?
380 				sidkey->datalen : sizeof(struct cifs_sid));
381 			set_bit(SID_ID_MAPPED, &psidid->state);
382 			key_put(sidkey);
383 			kfree(psidid->sidstr);
384 		}
385 		psidid->time = jiffies; /* update ts for accessing */
386 		revert_creds(saved_cred);
387 		clear_bit(SID_ID_PENDING, &psidid->state);
388 		wake_up_bit(&psidid->state, SID_ID_PENDING);
389 	} else {
390 		rc = wait_on_bit(&psidid->state, SID_ID_PENDING,
391 				sidid_pending_wait, TASK_INTERRUPTIBLE);
392 		if (rc) {
393 			cFYI(1, "%s: sidid_pending_wait interrupted %d",
394 					__func__, rc);
395 			--psidid->refcount;
396 			return rc;
397 		}
398 		if (test_bit(SID_ID_MAPPED, &psidid->state))
399 			memcpy(ssid, &psidid->sid, sizeof(struct cifs_sid));
400 		else
401 			rc = -EINVAL;
402 	}
403 id_sid_out:
404 	--psidid->refcount;
405 	return rc;
406 }
407 
408 static int
sid_to_id(struct cifs_sb_info * cifs_sb,struct cifs_sid * psid,struct cifs_fattr * fattr,uint sidtype)409 sid_to_id(struct cifs_sb_info *cifs_sb, struct cifs_sid *psid,
410 		struct cifs_fattr *fattr, uint sidtype)
411 {
412 	int rc;
413 	unsigned long cid;
414 	struct key *idkey;
415 	const struct cred *saved_cred;
416 	struct cifs_sid_id *psidid, *npsidid;
417 	struct rb_root *cidtree;
418 	spinlock_t *cidlock;
419 
420 	if (sidtype == SIDOWNER) {
421 		cid = cifs_sb->mnt_uid; /* default uid, in case upcall fails */
422 		cidlock = &siduidlock;
423 		cidtree = &uidtree;
424 	} else if (sidtype == SIDGROUP) {
425 		cid = cifs_sb->mnt_gid; /* default gid, in case upcall fails */
426 		cidlock = &sidgidlock;
427 		cidtree = &gidtree;
428 	} else
429 		return -ENOENT;
430 
431 	spin_lock(cidlock);
432 	psidid = id_rb_search(cidtree, psid);
433 
434 	if (!psidid) { /* node does not exist, allocate one & attempt adding */
435 		spin_unlock(cidlock);
436 		npsidid = kzalloc(sizeof(struct cifs_sid_id), GFP_KERNEL);
437 		if (!npsidid)
438 			return -ENOMEM;
439 
440 		npsidid->sidstr = kmalloc(SIDLEN, GFP_KERNEL);
441 		if (!npsidid->sidstr) {
442 			kfree(npsidid);
443 			return -ENOMEM;
444 		}
445 
446 		spin_lock(cidlock);
447 		psidid = id_rb_search(cidtree, psid);
448 		if (psidid) { /* node happened to get inserted meanwhile */
449 			++psidid->refcount;
450 			spin_unlock(cidlock);
451 			kfree(npsidid->sidstr);
452 			kfree(npsidid);
453 		} else {
454 			psidid = npsidid;
455 			id_rb_insert(cidtree, psid, &psidid,
456 					sidtype == SIDOWNER ? "os:" : "gs:");
457 			++psidid->refcount;
458 			spin_unlock(cidlock);
459 		}
460 	} else {
461 		++psidid->refcount;
462 		spin_unlock(cidlock);
463 	}
464 
465 	/*
466 	 * If we are here, it is safe to access psidid and its fields
467 	 * since a reference was taken earlier while holding the spinlock.
468 	 * A reference on the node is put without holding the spinlock
469 	 * and it is OK to do so in this case, shrinker will not erase
470 	 * this node until all references are put and we do not access
471 	 * any fields of the node after a reference is put .
472 	 */
473 	if (test_bit(SID_ID_MAPPED, &psidid->state)) {
474 		cid = psidid->id;
475 		psidid->time = jiffies; /* update ts for accessing */
476 		goto sid_to_id_out;
477 	}
478 
479 	if (time_after(psidid->time + SID_MAP_RETRY, jiffies))
480 		goto sid_to_id_out;
481 
482 	if (!test_and_set_bit(SID_ID_PENDING, &psidid->state)) {
483 		saved_cred = override_creds(root_cred);
484 		idkey = request_key(&cifs_idmap_key_type, psidid->sidstr, "");
485 		if (IS_ERR(idkey))
486 			cFYI(1, "%s: Can't map SID to an id", __func__);
487 		else {
488 			cid = *(unsigned long *)idkey->payload.value;
489 			psidid->id = cid;
490 			set_bit(SID_ID_MAPPED, &psidid->state);
491 			key_put(idkey);
492 			kfree(psidid->sidstr);
493 		}
494 		revert_creds(saved_cred);
495 		psidid->time = jiffies; /* update ts for accessing */
496 		clear_bit(SID_ID_PENDING, &psidid->state);
497 		wake_up_bit(&psidid->state, SID_ID_PENDING);
498 	} else {
499 		rc = wait_on_bit(&psidid->state, SID_ID_PENDING,
500 				sidid_pending_wait, TASK_INTERRUPTIBLE);
501 		if (rc) {
502 			cFYI(1, "%s: sidid_pending_wait interrupted %d",
503 					__func__, rc);
504 			--psidid->refcount; /* decremented without spinlock */
505 			return rc;
506 		}
507 		if (test_bit(SID_ID_MAPPED, &psidid->state))
508 			cid = psidid->id;
509 	}
510 
511 sid_to_id_out:
512 	--psidid->refcount; /* decremented without spinlock */
513 	if (sidtype == SIDOWNER)
514 		fattr->cf_uid = cid;
515 	else
516 		fattr->cf_gid = cid;
517 
518 	return 0;
519 }
520 
521 int
init_cifs_idmap(void)522 init_cifs_idmap(void)
523 {
524 	struct cred *cred;
525 	struct key *keyring;
526 	int ret;
527 
528 	cFYI(1, "Registering the %s key type\n", cifs_idmap_key_type.name);
529 
530 	/* create an override credential set with a special thread keyring in
531 	 * which requests are cached
532 	 *
533 	 * this is used to prevent malicious redirections from being installed
534 	 * with add_key().
535 	 */
536 	cred = prepare_kernel_cred(NULL);
537 	if (!cred)
538 		return -ENOMEM;
539 
540 	keyring = key_alloc(&key_type_keyring, ".cifs_idmap", 0, 0, cred,
541 			    (KEY_POS_ALL & ~KEY_POS_SETATTR) |
542 			    KEY_USR_VIEW | KEY_USR_READ,
543 			    KEY_ALLOC_NOT_IN_QUOTA);
544 	if (IS_ERR(keyring)) {
545 		ret = PTR_ERR(keyring);
546 		goto failed_put_cred;
547 	}
548 
549 	ret = key_instantiate_and_link(keyring, NULL, 0, NULL, NULL);
550 	if (ret < 0)
551 		goto failed_put_key;
552 
553 	ret = register_key_type(&cifs_idmap_key_type);
554 	if (ret < 0)
555 		goto failed_put_key;
556 
557 	/* instruct request_key() to use this special keyring as a cache for
558 	 * the results it looks up */
559 	cred->thread_keyring = keyring;
560 	cred->jit_keyring = KEY_REQKEY_DEFL_THREAD_KEYRING;
561 	root_cred = cred;
562 
563 	spin_lock_init(&siduidlock);
564 	uidtree = RB_ROOT;
565 	spin_lock_init(&sidgidlock);
566 	gidtree = RB_ROOT;
567 
568 	spin_lock_init(&uidsidlock);
569 	siduidtree = RB_ROOT;
570 	spin_lock_init(&gidsidlock);
571 	sidgidtree = RB_ROOT;
572 	register_shrinker(&cifs_shrinker);
573 
574 	cFYI(1, "cifs idmap keyring: %d\n", key_serial(keyring));
575 	return 0;
576 
577 failed_put_key:
578 	key_put(keyring);
579 failed_put_cred:
580 	put_cred(cred);
581 	return ret;
582 }
583 
584 void
exit_cifs_idmap(void)585 exit_cifs_idmap(void)
586 {
587 	key_revoke(root_cred->thread_keyring);
588 	unregister_key_type(&cifs_idmap_key_type);
589 	put_cred(root_cred);
590 	unregister_shrinker(&cifs_shrinker);
591 	cFYI(1, "Unregistered %s key type\n", cifs_idmap_key_type.name);
592 }
593 
594 void
cifs_destroy_idmaptrees(void)595 cifs_destroy_idmaptrees(void)
596 {
597 	struct rb_root *root;
598 	struct rb_node *node;
599 
600 	root = &uidtree;
601 	spin_lock(&siduidlock);
602 	while ((node = rb_first(root)))
603 		rb_erase(node, root);
604 	spin_unlock(&siduidlock);
605 
606 	root = &gidtree;
607 	spin_lock(&sidgidlock);
608 	while ((node = rb_first(root)))
609 		rb_erase(node, root);
610 	spin_unlock(&sidgidlock);
611 
612 	root = &siduidtree;
613 	spin_lock(&uidsidlock);
614 	while ((node = rb_first(root)))
615 		rb_erase(node, root);
616 	spin_unlock(&uidsidlock);
617 
618 	root = &sidgidtree;
619 	spin_lock(&gidsidlock);
620 	while ((node = rb_first(root)))
621 		rb_erase(node, root);
622 	spin_unlock(&gidsidlock);
623 }
624 
625 /* if the two SIDs (roughly equivalent to a UUID for a user or group) are
626    the same returns 1, if they do not match returns 0 */
compare_sids(const struct cifs_sid * ctsid,const struct cifs_sid * cwsid)627 int compare_sids(const struct cifs_sid *ctsid, const struct cifs_sid *cwsid)
628 {
629 	int i;
630 	int num_subauth, num_sat, num_saw;
631 
632 	if ((!ctsid) || (!cwsid))
633 		return 1;
634 
635 	/* compare the revision */
636 	if (ctsid->revision != cwsid->revision) {
637 		if (ctsid->revision > cwsid->revision)
638 			return 1;
639 		else
640 			return -1;
641 	}
642 
643 	/* compare all of the six auth values */
644 	for (i = 0; i < 6; ++i) {
645 		if (ctsid->authority[i] != cwsid->authority[i]) {
646 			if (ctsid->authority[i] > cwsid->authority[i])
647 				return 1;
648 			else
649 				return -1;
650 		}
651 	}
652 
653 	/* compare all of the subauth values if any */
654 	num_sat = ctsid->num_subauth;
655 	num_saw = cwsid->num_subauth;
656 	num_subauth = num_sat < num_saw ? num_sat : num_saw;
657 	if (num_subauth) {
658 		for (i = 0; i < num_subauth; ++i) {
659 			if (ctsid->sub_auth[i] != cwsid->sub_auth[i]) {
660 				if (le32_to_cpu(ctsid->sub_auth[i]) >
661 					le32_to_cpu(cwsid->sub_auth[i]))
662 					return 1;
663 				else
664 					return -1;
665 			}
666 		}
667 	}
668 
669 	return 0; /* sids compare/match */
670 }
671 
672 
673 /* copy ntsd, owner sid, and group sid from a security descriptor to another */
copy_sec_desc(const struct cifs_ntsd * pntsd,struct cifs_ntsd * pnntsd,__u32 sidsoffset)674 static void copy_sec_desc(const struct cifs_ntsd *pntsd,
675 				struct cifs_ntsd *pnntsd, __u32 sidsoffset)
676 {
677 	int i;
678 
679 	struct cifs_sid *owner_sid_ptr, *group_sid_ptr;
680 	struct cifs_sid *nowner_sid_ptr, *ngroup_sid_ptr;
681 
682 	/* copy security descriptor control portion */
683 	pnntsd->revision = pntsd->revision;
684 	pnntsd->type = pntsd->type;
685 	pnntsd->dacloffset = cpu_to_le32(sizeof(struct cifs_ntsd));
686 	pnntsd->sacloffset = 0;
687 	pnntsd->osidoffset = cpu_to_le32(sidsoffset);
688 	pnntsd->gsidoffset = cpu_to_le32(sidsoffset + sizeof(struct cifs_sid));
689 
690 	/* copy owner sid */
691 	owner_sid_ptr = (struct cifs_sid *)((char *)pntsd +
692 				le32_to_cpu(pntsd->osidoffset));
693 	nowner_sid_ptr = (struct cifs_sid *)((char *)pnntsd + sidsoffset);
694 
695 	nowner_sid_ptr->revision = owner_sid_ptr->revision;
696 	nowner_sid_ptr->num_subauth = owner_sid_ptr->num_subauth;
697 	for (i = 0; i < 6; i++)
698 		nowner_sid_ptr->authority[i] = owner_sid_ptr->authority[i];
699 	for (i = 0; i < 5; i++)
700 		nowner_sid_ptr->sub_auth[i] = owner_sid_ptr->sub_auth[i];
701 
702 	/* copy group sid */
703 	group_sid_ptr = (struct cifs_sid *)((char *)pntsd +
704 				le32_to_cpu(pntsd->gsidoffset));
705 	ngroup_sid_ptr = (struct cifs_sid *)((char *)pnntsd + sidsoffset +
706 					sizeof(struct cifs_sid));
707 
708 	ngroup_sid_ptr->revision = group_sid_ptr->revision;
709 	ngroup_sid_ptr->num_subauth = group_sid_ptr->num_subauth;
710 	for (i = 0; i < 6; i++)
711 		ngroup_sid_ptr->authority[i] = group_sid_ptr->authority[i];
712 	for (i = 0; i < 5; i++)
713 		ngroup_sid_ptr->sub_auth[i] = group_sid_ptr->sub_auth[i];
714 
715 	return;
716 }
717 
718 
719 /*
720    change posix mode to reflect permissions
721    pmode is the existing mode (we only want to overwrite part of this
722    bits to set can be: S_IRWXU, S_IRWXG or S_IRWXO ie 00700 or 00070 or 00007
723 */
access_flags_to_mode(__le32 ace_flags,int type,umode_t * pmode,umode_t * pbits_to_set)724 static void access_flags_to_mode(__le32 ace_flags, int type, umode_t *pmode,
725 				 umode_t *pbits_to_set)
726 {
727 	__u32 flags = le32_to_cpu(ace_flags);
728 	/* the order of ACEs is important.  The canonical order is to begin with
729 	   DENY entries followed by ALLOW, otherwise an allow entry could be
730 	   encountered first, making the subsequent deny entry like "dead code"
731 	   which would be superflous since Windows stops when a match is made
732 	   for the operation you are trying to perform for your user */
733 
734 	/* For deny ACEs we change the mask so that subsequent allow access
735 	   control entries do not turn on the bits we are denying */
736 	if (type == ACCESS_DENIED) {
737 		if (flags & GENERIC_ALL)
738 			*pbits_to_set &= ~S_IRWXUGO;
739 
740 		if ((flags & GENERIC_WRITE) ||
741 			((flags & FILE_WRITE_RIGHTS) == FILE_WRITE_RIGHTS))
742 			*pbits_to_set &= ~S_IWUGO;
743 		if ((flags & GENERIC_READ) ||
744 			((flags & FILE_READ_RIGHTS) == FILE_READ_RIGHTS))
745 			*pbits_to_set &= ~S_IRUGO;
746 		if ((flags & GENERIC_EXECUTE) ||
747 			((flags & FILE_EXEC_RIGHTS) == FILE_EXEC_RIGHTS))
748 			*pbits_to_set &= ~S_IXUGO;
749 		return;
750 	} else if (type != ACCESS_ALLOWED) {
751 		cERROR(1, "unknown access control type %d", type);
752 		return;
753 	}
754 	/* else ACCESS_ALLOWED type */
755 
756 	if (flags & GENERIC_ALL) {
757 		*pmode |= (S_IRWXUGO & (*pbits_to_set));
758 		cFYI(DBG2, "all perms");
759 		return;
760 	}
761 	if ((flags & GENERIC_WRITE) ||
762 			((flags & FILE_WRITE_RIGHTS) == FILE_WRITE_RIGHTS))
763 		*pmode |= (S_IWUGO & (*pbits_to_set));
764 	if ((flags & GENERIC_READ) ||
765 			((flags & FILE_READ_RIGHTS) == FILE_READ_RIGHTS))
766 		*pmode |= (S_IRUGO & (*pbits_to_set));
767 	if ((flags & GENERIC_EXECUTE) ||
768 			((flags & FILE_EXEC_RIGHTS) == FILE_EXEC_RIGHTS))
769 		*pmode |= (S_IXUGO & (*pbits_to_set));
770 
771 	cFYI(DBG2, "access flags 0x%x mode now 0x%x", flags, *pmode);
772 	return;
773 }
774 
775 /*
776    Generate access flags to reflect permissions mode is the existing mode.
777    This function is called for every ACE in the DACL whose SID matches
778    with either owner or group or everyone.
779 */
780 
mode_to_access_flags(umode_t mode,umode_t bits_to_use,__u32 * pace_flags)781 static void mode_to_access_flags(umode_t mode, umode_t bits_to_use,
782 				__u32 *pace_flags)
783 {
784 	/* reset access mask */
785 	*pace_flags = 0x0;
786 
787 	/* bits to use are either S_IRWXU or S_IRWXG or S_IRWXO */
788 	mode &= bits_to_use;
789 
790 	/* check for R/W/X UGO since we do not know whose flags
791 	   is this but we have cleared all the bits sans RWX for
792 	   either user or group or other as per bits_to_use */
793 	if (mode & S_IRUGO)
794 		*pace_flags |= SET_FILE_READ_RIGHTS;
795 	if (mode & S_IWUGO)
796 		*pace_flags |= SET_FILE_WRITE_RIGHTS;
797 	if (mode & S_IXUGO)
798 		*pace_flags |= SET_FILE_EXEC_RIGHTS;
799 
800 	cFYI(DBG2, "mode: 0x%x, access flags now 0x%x", mode, *pace_flags);
801 	return;
802 }
803 
fill_ace_for_sid(struct cifs_ace * pntace,const struct cifs_sid * psid,__u64 nmode,umode_t bits)804 static __u16 fill_ace_for_sid(struct cifs_ace *pntace,
805 			const struct cifs_sid *psid, __u64 nmode, umode_t bits)
806 {
807 	int i;
808 	__u16 size = 0;
809 	__u32 access_req = 0;
810 
811 	pntace->type = ACCESS_ALLOWED;
812 	pntace->flags = 0x0;
813 	mode_to_access_flags(nmode, bits, &access_req);
814 	if (!access_req)
815 		access_req = SET_MINIMUM_RIGHTS;
816 	pntace->access_req = cpu_to_le32(access_req);
817 
818 	pntace->sid.revision = psid->revision;
819 	pntace->sid.num_subauth = psid->num_subauth;
820 	for (i = 0; i < 6; i++)
821 		pntace->sid.authority[i] = psid->authority[i];
822 	for (i = 0; i < psid->num_subauth; i++)
823 		pntace->sid.sub_auth[i] = psid->sub_auth[i];
824 
825 	size = 1 + 1 + 2 + 4 + 1 + 1 + 6 + (psid->num_subauth * 4);
826 	pntace->size = cpu_to_le16(size);
827 
828 	return size;
829 }
830 
831 
832 #ifdef CONFIG_CIFS_DEBUG2
dump_ace(struct cifs_ace * pace,char * end_of_acl)833 static void dump_ace(struct cifs_ace *pace, char *end_of_acl)
834 {
835 	int num_subauth;
836 
837 	/* validate that we do not go past end of acl */
838 
839 	if (le16_to_cpu(pace->size) < 16) {
840 		cERROR(1, "ACE too small %d", le16_to_cpu(pace->size));
841 		return;
842 	}
843 
844 	if (end_of_acl < (char *)pace + le16_to_cpu(pace->size)) {
845 		cERROR(1, "ACL too small to parse ACE");
846 		return;
847 	}
848 
849 	num_subauth = pace->sid.num_subauth;
850 	if (num_subauth) {
851 		int i;
852 		cFYI(1, "ACE revision %d num_auth %d type %d flags %d size %d",
853 			pace->sid.revision, pace->sid.num_subauth, pace->type,
854 			pace->flags, le16_to_cpu(pace->size));
855 		for (i = 0; i < num_subauth; ++i) {
856 			cFYI(1, "ACE sub_auth[%d]: 0x%x", i,
857 				le32_to_cpu(pace->sid.sub_auth[i]));
858 		}
859 
860 		/* BB add length check to make sure that we do not have huge
861 			num auths and therefore go off the end */
862 	}
863 
864 	return;
865 }
866 #endif
867 
868 
parse_dacl(struct cifs_acl * pdacl,char * end_of_acl,struct cifs_sid * pownersid,struct cifs_sid * pgrpsid,struct cifs_fattr * fattr)869 static void parse_dacl(struct cifs_acl *pdacl, char *end_of_acl,
870 		       struct cifs_sid *pownersid, struct cifs_sid *pgrpsid,
871 		       struct cifs_fattr *fattr)
872 {
873 	int i;
874 	int num_aces = 0;
875 	int acl_size;
876 	char *acl_base;
877 	struct cifs_ace **ppace;
878 
879 	/* BB need to add parm so we can store the SID BB */
880 
881 	if (!pdacl) {
882 		/* no DACL in the security descriptor, set
883 		   all the permissions for user/group/other */
884 		fattr->cf_mode |= S_IRWXUGO;
885 		return;
886 	}
887 
888 	/* validate that we do not go past end of acl */
889 	if (end_of_acl < (char *)pdacl + le16_to_cpu(pdacl->size)) {
890 		cERROR(1, "ACL too small to parse DACL");
891 		return;
892 	}
893 
894 	cFYI(DBG2, "DACL revision %d size %d num aces %d",
895 		le16_to_cpu(pdacl->revision), le16_to_cpu(pdacl->size),
896 		le32_to_cpu(pdacl->num_aces));
897 
898 	/* reset rwx permissions for user/group/other.
899 	   Also, if num_aces is 0 i.e. DACL has no ACEs,
900 	   user/group/other have no permissions */
901 	fattr->cf_mode &= ~(S_IRWXUGO);
902 
903 	acl_base = (char *)pdacl;
904 	acl_size = sizeof(struct cifs_acl);
905 
906 	num_aces = le32_to_cpu(pdacl->num_aces);
907 	if (num_aces > 0) {
908 		umode_t user_mask = S_IRWXU;
909 		umode_t group_mask = S_IRWXG;
910 		umode_t other_mask = S_IRWXU | S_IRWXG | S_IRWXO;
911 
912 		if (num_aces > ULONG_MAX / sizeof(struct cifs_ace *))
913 			return;
914 		ppace = kmalloc(num_aces * sizeof(struct cifs_ace *),
915 				GFP_KERNEL);
916 		if (!ppace) {
917 			cERROR(1, "DACL memory allocation error");
918 			return;
919 		}
920 
921 		for (i = 0; i < num_aces; ++i) {
922 			ppace[i] = (struct cifs_ace *) (acl_base + acl_size);
923 #ifdef CONFIG_CIFS_DEBUG2
924 			dump_ace(ppace[i], end_of_acl);
925 #endif
926 			if (compare_sids(&(ppace[i]->sid), pownersid) == 0)
927 				access_flags_to_mode(ppace[i]->access_req,
928 						     ppace[i]->type,
929 						     &fattr->cf_mode,
930 						     &user_mask);
931 			if (compare_sids(&(ppace[i]->sid), pgrpsid) == 0)
932 				access_flags_to_mode(ppace[i]->access_req,
933 						     ppace[i]->type,
934 						     &fattr->cf_mode,
935 						     &group_mask);
936 			if (compare_sids(&(ppace[i]->sid), &sid_everyone) == 0)
937 				access_flags_to_mode(ppace[i]->access_req,
938 						     ppace[i]->type,
939 						     &fattr->cf_mode,
940 						     &other_mask);
941 			if (compare_sids(&(ppace[i]->sid), &sid_authusers) == 0)
942 				access_flags_to_mode(ppace[i]->access_req,
943 						     ppace[i]->type,
944 						     &fattr->cf_mode,
945 						     &other_mask);
946 
947 
948 /*			memcpy((void *)(&(cifscred->aces[i])),
949 				(void *)ppace[i],
950 				sizeof(struct cifs_ace)); */
951 
952 			acl_base = (char *)ppace[i];
953 			acl_size = le16_to_cpu(ppace[i]->size);
954 		}
955 
956 		kfree(ppace);
957 	}
958 
959 	return;
960 }
961 
962 
set_chmod_dacl(struct cifs_acl * pndacl,struct cifs_sid * pownersid,struct cifs_sid * pgrpsid,__u64 nmode)963 static int set_chmod_dacl(struct cifs_acl *pndacl, struct cifs_sid *pownersid,
964 			struct cifs_sid *pgrpsid, __u64 nmode)
965 {
966 	u16 size = 0;
967 	struct cifs_acl *pnndacl;
968 
969 	pnndacl = (struct cifs_acl *)((char *)pndacl + sizeof(struct cifs_acl));
970 
971 	size += fill_ace_for_sid((struct cifs_ace *) ((char *)pnndacl + size),
972 					pownersid, nmode, S_IRWXU);
973 	size += fill_ace_for_sid((struct cifs_ace *)((char *)pnndacl + size),
974 					pgrpsid, nmode, S_IRWXG);
975 	size += fill_ace_for_sid((struct cifs_ace *)((char *)pnndacl + size),
976 					 &sid_everyone, nmode, S_IRWXO);
977 
978 	pndacl->size = cpu_to_le16(size + sizeof(struct cifs_acl));
979 	pndacl->num_aces = cpu_to_le32(3);
980 
981 	return 0;
982 }
983 
984 
parse_sid(struct cifs_sid * psid,char * end_of_acl)985 static int parse_sid(struct cifs_sid *psid, char *end_of_acl)
986 {
987 	/* BB need to add parm so we can store the SID BB */
988 
989 	/* validate that we do not go past end of ACL - sid must be at least 8
990 	   bytes long (assuming no sub-auths - e.g. the null SID */
991 	if (end_of_acl < (char *)psid + 8) {
992 		cERROR(1, "ACL too small to parse SID %p", psid);
993 		return -EINVAL;
994 	}
995 
996 	if (psid->num_subauth) {
997 #ifdef CONFIG_CIFS_DEBUG2
998 		int i;
999 		cFYI(1, "SID revision %d num_auth %d",
1000 			psid->revision, psid->num_subauth);
1001 
1002 		for (i = 0; i < psid->num_subauth; i++) {
1003 			cFYI(1, "SID sub_auth[%d]: 0x%x ", i,
1004 				le32_to_cpu(psid->sub_auth[i]));
1005 		}
1006 
1007 		/* BB add length check to make sure that we do not have huge
1008 			num auths and therefore go off the end */
1009 		cFYI(1, "RID 0x%x",
1010 			le32_to_cpu(psid->sub_auth[psid->num_subauth-1]));
1011 #endif
1012 	}
1013 
1014 	return 0;
1015 }
1016 
1017 
1018 /* Convert CIFS ACL to POSIX form */
parse_sec_desc(struct cifs_sb_info * cifs_sb,struct cifs_ntsd * pntsd,int acl_len,struct cifs_fattr * fattr)1019 static int parse_sec_desc(struct cifs_sb_info *cifs_sb,
1020 		struct cifs_ntsd *pntsd, int acl_len, struct cifs_fattr *fattr)
1021 {
1022 	int rc = 0;
1023 	struct cifs_sid *owner_sid_ptr, *group_sid_ptr;
1024 	struct cifs_acl *dacl_ptr; /* no need for SACL ptr */
1025 	char *end_of_acl = ((char *)pntsd) + acl_len;
1026 	__u32 dacloffset;
1027 
1028 	if (pntsd == NULL)
1029 		return -EIO;
1030 
1031 	owner_sid_ptr = (struct cifs_sid *)((char *)pntsd +
1032 				le32_to_cpu(pntsd->osidoffset));
1033 	group_sid_ptr = (struct cifs_sid *)((char *)pntsd +
1034 				le32_to_cpu(pntsd->gsidoffset));
1035 	dacloffset = le32_to_cpu(pntsd->dacloffset);
1036 	dacl_ptr = (struct cifs_acl *)((char *)pntsd + dacloffset);
1037 	cFYI(DBG2, "revision %d type 0x%x ooffset 0x%x goffset 0x%x "
1038 		 "sacloffset 0x%x dacloffset 0x%x",
1039 		 pntsd->revision, pntsd->type, le32_to_cpu(pntsd->osidoffset),
1040 		 le32_to_cpu(pntsd->gsidoffset),
1041 		 le32_to_cpu(pntsd->sacloffset), dacloffset);
1042 /*	cifs_dump_mem("owner_sid: ", owner_sid_ptr, 64); */
1043 	rc = parse_sid(owner_sid_ptr, end_of_acl);
1044 	if (rc) {
1045 		cFYI(1, "%s: Error %d parsing Owner SID", __func__, rc);
1046 		return rc;
1047 	}
1048 	rc = sid_to_id(cifs_sb, owner_sid_ptr, fattr, SIDOWNER);
1049 	if (rc) {
1050 		cFYI(1, "%s: Error %d mapping Owner SID to uid", __func__, rc);
1051 		return rc;
1052 	}
1053 
1054 	rc = parse_sid(group_sid_ptr, end_of_acl);
1055 	if (rc) {
1056 		cFYI(1, "%s: Error %d mapping Owner SID to gid", __func__, rc);
1057 		return rc;
1058 	}
1059 	rc = sid_to_id(cifs_sb, group_sid_ptr, fattr, SIDGROUP);
1060 	if (rc) {
1061 		cFYI(1, "%s: Error %d mapping Group SID to gid", __func__, rc);
1062 		return rc;
1063 	}
1064 
1065 	if (dacloffset)
1066 		parse_dacl(dacl_ptr, end_of_acl, owner_sid_ptr,
1067 			   group_sid_ptr, fattr);
1068 	else
1069 		cFYI(1, "no ACL"); /* BB grant all or default perms? */
1070 
1071 	return rc;
1072 }
1073 
1074 /* Convert permission bits from mode to equivalent CIFS ACL */
build_sec_desc(struct cifs_ntsd * pntsd,struct cifs_ntsd * pnntsd,__u32 secdesclen,__u64 nmode,uid_t uid,gid_t gid,int * aclflag)1075 static int build_sec_desc(struct cifs_ntsd *pntsd, struct cifs_ntsd *pnntsd,
1076 	__u32 secdesclen, __u64 nmode, uid_t uid, gid_t gid, int *aclflag)
1077 {
1078 	int rc = 0;
1079 	__u32 dacloffset;
1080 	__u32 ndacloffset;
1081 	__u32 sidsoffset;
1082 	struct cifs_sid *owner_sid_ptr, *group_sid_ptr;
1083 	struct cifs_sid *nowner_sid_ptr, *ngroup_sid_ptr;
1084 	struct cifs_acl *dacl_ptr = NULL;  /* no need for SACL ptr */
1085 	struct cifs_acl *ndacl_ptr = NULL; /* no need for SACL ptr */
1086 
1087 	if (nmode != NO_CHANGE_64) { /* chmod */
1088 		owner_sid_ptr = (struct cifs_sid *)((char *)pntsd +
1089 				le32_to_cpu(pntsd->osidoffset));
1090 		group_sid_ptr = (struct cifs_sid *)((char *)pntsd +
1091 				le32_to_cpu(pntsd->gsidoffset));
1092 		dacloffset = le32_to_cpu(pntsd->dacloffset);
1093 		dacl_ptr = (struct cifs_acl *)((char *)pntsd + dacloffset);
1094 		ndacloffset = sizeof(struct cifs_ntsd);
1095 		ndacl_ptr = (struct cifs_acl *)((char *)pnntsd + ndacloffset);
1096 		ndacl_ptr->revision = dacl_ptr->revision;
1097 		ndacl_ptr->size = 0;
1098 		ndacl_ptr->num_aces = 0;
1099 
1100 		rc = set_chmod_dacl(ndacl_ptr, owner_sid_ptr, group_sid_ptr,
1101 					nmode);
1102 		sidsoffset = ndacloffset + le16_to_cpu(ndacl_ptr->size);
1103 		/* copy sec desc control portion & owner and group sids */
1104 		copy_sec_desc(pntsd, pnntsd, sidsoffset);
1105 		*aclflag = CIFS_ACL_DACL;
1106 	} else {
1107 		memcpy(pnntsd, pntsd, secdesclen);
1108 		if (uid != NO_CHANGE_32) { /* chown */
1109 			owner_sid_ptr = (struct cifs_sid *)((char *)pnntsd +
1110 					le32_to_cpu(pnntsd->osidoffset));
1111 			nowner_sid_ptr = kmalloc(sizeof(struct cifs_sid),
1112 								GFP_KERNEL);
1113 			if (!nowner_sid_ptr)
1114 				return -ENOMEM;
1115 			rc = id_to_sid(uid, SIDOWNER, nowner_sid_ptr);
1116 			if (rc) {
1117 				cFYI(1, "%s: Mapping error %d for owner id %d",
1118 						__func__, rc, uid);
1119 				kfree(nowner_sid_ptr);
1120 				return rc;
1121 			}
1122 			memcpy(owner_sid_ptr, nowner_sid_ptr,
1123 					sizeof(struct cifs_sid));
1124 			kfree(nowner_sid_ptr);
1125 			*aclflag = CIFS_ACL_OWNER;
1126 		}
1127 		if (gid != NO_CHANGE_32) { /* chgrp */
1128 			group_sid_ptr = (struct cifs_sid *)((char *)pnntsd +
1129 					le32_to_cpu(pnntsd->gsidoffset));
1130 			ngroup_sid_ptr = kmalloc(sizeof(struct cifs_sid),
1131 								GFP_KERNEL);
1132 			if (!ngroup_sid_ptr)
1133 				return -ENOMEM;
1134 			rc = id_to_sid(gid, SIDGROUP, ngroup_sid_ptr);
1135 			if (rc) {
1136 				cFYI(1, "%s: Mapping error %d for group id %d",
1137 						__func__, rc, gid);
1138 				kfree(ngroup_sid_ptr);
1139 				return rc;
1140 			}
1141 			memcpy(group_sid_ptr, ngroup_sid_ptr,
1142 					sizeof(struct cifs_sid));
1143 			kfree(ngroup_sid_ptr);
1144 			*aclflag = CIFS_ACL_GROUP;
1145 		}
1146 	}
1147 
1148 	return rc;
1149 }
1150 
get_cifs_acl_by_fid(struct cifs_sb_info * cifs_sb,__u16 fid,u32 * pacllen)1151 static struct cifs_ntsd *get_cifs_acl_by_fid(struct cifs_sb_info *cifs_sb,
1152 		__u16 fid, u32 *pacllen)
1153 {
1154 	struct cifs_ntsd *pntsd = NULL;
1155 	int xid, rc;
1156 	struct tcon_link *tlink = cifs_sb_tlink(cifs_sb);
1157 
1158 	if (IS_ERR(tlink))
1159 		return ERR_CAST(tlink);
1160 
1161 	xid = GetXid();
1162 	rc = CIFSSMBGetCIFSACL(xid, tlink_tcon(tlink), fid, &pntsd, pacllen);
1163 	FreeXid(xid);
1164 
1165 	cifs_put_tlink(tlink);
1166 
1167 	cFYI(1, "%s: rc = %d ACL len %d", __func__, rc, *pacllen);
1168 	if (rc)
1169 		return ERR_PTR(rc);
1170 	return pntsd;
1171 }
1172 
get_cifs_acl_by_path(struct cifs_sb_info * cifs_sb,const char * path,u32 * pacllen)1173 static struct cifs_ntsd *get_cifs_acl_by_path(struct cifs_sb_info *cifs_sb,
1174 		const char *path, u32 *pacllen)
1175 {
1176 	struct cifs_ntsd *pntsd = NULL;
1177 	int oplock = 0;
1178 	int xid, rc, create_options = 0;
1179 	__u16 fid;
1180 	struct cifs_tcon *tcon;
1181 	struct tcon_link *tlink = cifs_sb_tlink(cifs_sb);
1182 
1183 	if (IS_ERR(tlink))
1184 		return ERR_CAST(tlink);
1185 
1186 	tcon = tlink_tcon(tlink);
1187 	xid = GetXid();
1188 
1189 	if (backup_cred(cifs_sb))
1190 		create_options |= CREATE_OPEN_BACKUP_INTENT;
1191 
1192 	rc = CIFSSMBOpen(xid, tcon, path, FILE_OPEN, READ_CONTROL,
1193 			create_options, &fid, &oplock, NULL, cifs_sb->local_nls,
1194 			cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MAP_SPECIAL_CHR);
1195 	if (!rc) {
1196 		rc = CIFSSMBGetCIFSACL(xid, tcon, fid, &pntsd, pacllen);
1197 		CIFSSMBClose(xid, tcon, fid);
1198 	}
1199 
1200 	cifs_put_tlink(tlink);
1201 	FreeXid(xid);
1202 
1203 	cFYI(1, "%s: rc = %d ACL len %d", __func__, rc, *pacllen);
1204 	if (rc)
1205 		return ERR_PTR(rc);
1206 	return pntsd;
1207 }
1208 
1209 /* Retrieve an ACL from the server */
get_cifs_acl(struct cifs_sb_info * cifs_sb,struct inode * inode,const char * path,u32 * pacllen)1210 struct cifs_ntsd *get_cifs_acl(struct cifs_sb_info *cifs_sb,
1211 				      struct inode *inode, const char *path,
1212 				      u32 *pacllen)
1213 {
1214 	struct cifs_ntsd *pntsd = NULL;
1215 	struct cifsFileInfo *open_file = NULL;
1216 
1217 	if (inode)
1218 		open_file = find_readable_file(CIFS_I(inode), true);
1219 	if (!open_file)
1220 		return get_cifs_acl_by_path(cifs_sb, path, pacllen);
1221 
1222 	pntsd = get_cifs_acl_by_fid(cifs_sb, open_file->netfid, pacllen);
1223 	cifsFileInfo_put(open_file);
1224 	return pntsd;
1225 }
1226 
1227  /* Set an ACL on the server */
set_cifs_acl(struct cifs_ntsd * pnntsd,__u32 acllen,struct inode * inode,const char * path,int aclflag)1228 int set_cifs_acl(struct cifs_ntsd *pnntsd, __u32 acllen,
1229 			struct inode *inode, const char *path, int aclflag)
1230 {
1231 	int oplock = 0;
1232 	int xid, rc, access_flags, create_options = 0;
1233 	__u16 fid;
1234 	struct cifs_tcon *tcon;
1235 	struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
1236 	struct tcon_link *tlink = cifs_sb_tlink(cifs_sb);
1237 
1238 	if (IS_ERR(tlink))
1239 		return PTR_ERR(tlink);
1240 
1241 	tcon = tlink_tcon(tlink);
1242 	xid = GetXid();
1243 
1244 	if (backup_cred(cifs_sb))
1245 		create_options |= CREATE_OPEN_BACKUP_INTENT;
1246 
1247 	if (aclflag == CIFS_ACL_OWNER || aclflag == CIFS_ACL_GROUP)
1248 		access_flags = WRITE_OWNER;
1249 	else
1250 		access_flags = WRITE_DAC;
1251 
1252 	rc = CIFSSMBOpen(xid, tcon, path, FILE_OPEN, access_flags,
1253 			create_options, &fid, &oplock, NULL, cifs_sb->local_nls,
1254 			cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MAP_SPECIAL_CHR);
1255 	if (rc) {
1256 		cERROR(1, "Unable to open file to set ACL");
1257 		goto out;
1258 	}
1259 
1260 	rc = CIFSSMBSetCIFSACL(xid, tcon, fid, pnntsd, acllen, aclflag);
1261 	cFYI(DBG2, "SetCIFSACL rc = %d", rc);
1262 
1263 	CIFSSMBClose(xid, tcon, fid);
1264 out:
1265 	FreeXid(xid);
1266 	cifs_put_tlink(tlink);
1267 	return rc;
1268 }
1269 
1270 /* Translate the CIFS ACL (simlar to NTFS ACL) for a file into mode bits */
1271 int
cifs_acl_to_fattr(struct cifs_sb_info * cifs_sb,struct cifs_fattr * fattr,struct inode * inode,const char * path,const __u16 * pfid)1272 cifs_acl_to_fattr(struct cifs_sb_info *cifs_sb, struct cifs_fattr *fattr,
1273 		  struct inode *inode, const char *path, const __u16 *pfid)
1274 {
1275 	struct cifs_ntsd *pntsd = NULL;
1276 	u32 acllen = 0;
1277 	int rc = 0;
1278 
1279 	cFYI(DBG2, "converting ACL to mode for %s", path);
1280 
1281 	if (pfid)
1282 		pntsd = get_cifs_acl_by_fid(cifs_sb, *pfid, &acllen);
1283 	else
1284 		pntsd = get_cifs_acl(cifs_sb, inode, path, &acllen);
1285 
1286 	/* if we can retrieve the ACL, now parse Access Control Entries, ACEs */
1287 	if (IS_ERR(pntsd)) {
1288 		rc = PTR_ERR(pntsd);
1289 		cERROR(1, "%s: error %d getting sec desc", __func__, rc);
1290 	} else {
1291 		rc = parse_sec_desc(cifs_sb, pntsd, acllen, fattr);
1292 		kfree(pntsd);
1293 		if (rc)
1294 			cERROR(1, "parse sec desc failed rc = %d", rc);
1295 	}
1296 
1297 	return rc;
1298 }
1299 
1300 /* Convert mode bits to an ACL so we can update the ACL on the server */
1301 int
id_mode_to_cifs_acl(struct inode * inode,const char * path,__u64 nmode,uid_t uid,gid_t gid)1302 id_mode_to_cifs_acl(struct inode *inode, const char *path, __u64 nmode,
1303 			uid_t uid, gid_t gid)
1304 {
1305 	int rc = 0;
1306 	int aclflag = CIFS_ACL_DACL; /* default flag to set */
1307 	__u32 secdesclen = 0;
1308 	struct cifs_ntsd *pntsd = NULL; /* acl obtained from server */
1309 	struct cifs_ntsd *pnntsd = NULL; /* modified acl to be sent to server */
1310 
1311 	cFYI(DBG2, "set ACL from mode for %s", path);
1312 
1313 	/* Get the security descriptor */
1314 	pntsd = get_cifs_acl(CIFS_SB(inode->i_sb), inode, path, &secdesclen);
1315 
1316 	/* Add three ACEs for owner, group, everyone getting rid of
1317 	   other ACEs as chmod disables ACEs and set the security descriptor */
1318 
1319 	if (IS_ERR(pntsd)) {
1320 		rc = PTR_ERR(pntsd);
1321 		cERROR(1, "%s: error %d getting sec desc", __func__, rc);
1322 	} else {
1323 		/* allocate memory for the smb header,
1324 		   set security descriptor request security descriptor
1325 		   parameters, and secuirty descriptor itself */
1326 
1327 		secdesclen = secdesclen < DEFSECDESCLEN ?
1328 					DEFSECDESCLEN : secdesclen;
1329 		pnntsd = kmalloc(secdesclen, GFP_KERNEL);
1330 		if (!pnntsd) {
1331 			cERROR(1, "Unable to allocate security descriptor");
1332 			kfree(pntsd);
1333 			return -ENOMEM;
1334 		}
1335 
1336 		rc = build_sec_desc(pntsd, pnntsd, secdesclen, nmode, uid, gid,
1337 					&aclflag);
1338 
1339 		cFYI(DBG2, "build_sec_desc rc: %d", rc);
1340 
1341 		if (!rc) {
1342 			/* Set the security descriptor */
1343 			rc = set_cifs_acl(pnntsd, secdesclen, inode,
1344 						path, aclflag);
1345 			cFYI(DBG2, "set_cifs_acl rc: %d", rc);
1346 		}
1347 
1348 		kfree(pnntsd);
1349 		kfree(pntsd);
1350 	}
1351 
1352 	return rc;
1353 }
1354