2 * Copyright (C) 2002,2003 by Andreas Gruenbacher <a.gruenbacher@computer.org>
4 * Fixes from William Schumacher incorporated on 15 March 2001.
5 * (Reported by Charles Bertsch, <CBertsch@microtest.com>).
9 * This file contains generic functions for manipulating
10 * POSIX 1003.1e draft standard 17 ACLs.
13 #include <linux/kernel.h>
14 #include <linux/slab.h>
15 #include <linux/atomic.h>
17 #include <linux/sched.h>
18 #include <linux/posix_acl.h>
19 #include <linux/posix_acl_xattr.h>
20 #include <linux/export.h>
21 #include <linux/user_namespace.h>
23 EXPORT_SYMBOL(posix_acl_init);
24 EXPORT_SYMBOL(posix_acl_alloc);
25 EXPORT_SYMBOL(posix_acl_valid);
26 EXPORT_SYMBOL(posix_acl_equiv_mode);
27 EXPORT_SYMBOL(posix_acl_from_mode);
29 struct posix_acl *get_acl(struct inode *inode, int type)
31 struct posix_acl *acl;
33 acl = get_cached_acl(inode, type);
34 if (acl != ACL_NOT_CACHED)
37 if (!IS_POSIXACL(inode))
41 * A filesystem can force a ACL callback by just never filling the
42 * ACL cache. But normally you'd fill the cache either at inode
43 * instantiation time, or on the first ->get_acl call.
45 * If the filesystem doesn't have a get_acl() function at all, we'll
46 * just create the negative cache entry.
48 if (!inode->i_op->get_acl) {
49 set_cached_acl(inode, type, NULL);
52 return inode->i_op->get_acl(inode, type);
54 EXPORT_SYMBOL(get_acl);
57 * Init a fresh posix_acl
60 posix_acl_init(struct posix_acl *acl, int count)
62 atomic_set(&acl->a_refcount, 1);
67 * Allocate a new ACL with the specified number of entries.
70 posix_acl_alloc(int count, gfp_t flags)
72 const size_t size = sizeof(struct posix_acl) +
73 count * sizeof(struct posix_acl_entry);
74 struct posix_acl *acl = kmalloc(size, flags);
76 posix_acl_init(acl, count);
83 static struct posix_acl *
84 posix_acl_clone(const struct posix_acl *acl, gfp_t flags)
86 struct posix_acl *clone = NULL;
89 int size = sizeof(struct posix_acl) + acl->a_count *
90 sizeof(struct posix_acl_entry);
91 clone = kmemdup(acl, size, flags);
93 atomic_set(&clone->a_refcount, 1);
99 * Check if an acl is valid. Returns 0 if it is, or -E... otherwise.
102 posix_acl_valid(const struct posix_acl *acl)
104 const struct posix_acl_entry *pa, *pe;
105 int state = ACL_USER_OBJ;
106 kuid_t prev_uid = INVALID_UID;
107 kgid_t prev_gid = INVALID_GID;
110 FOREACH_ACL_ENTRY(pa, acl, pe) {
111 if (pa->e_perm & ~(ACL_READ|ACL_WRITE|ACL_EXECUTE))
115 if (state == ACL_USER_OBJ) {
122 if (state != ACL_USER)
124 if (!uid_valid(pa->e_uid))
126 if (uid_valid(prev_uid) &&
127 uid_lte(pa->e_uid, prev_uid))
129 prev_uid = pa->e_uid;
134 if (state == ACL_USER) {
141 if (state != ACL_GROUP)
143 if (!gid_valid(pa->e_gid))
145 if (gid_valid(prev_gid) &&
146 gid_lte(pa->e_gid, prev_gid))
148 prev_gid = pa->e_gid;
153 if (state != ACL_GROUP)
159 if (state == ACL_OTHER ||
160 (state == ACL_GROUP && !needs_mask)) {
176 * Returns 0 if the acl can be exactly represented in the traditional
177 * file mode permission bits, or else 1. Returns -E... on error.
180 posix_acl_equiv_mode(const struct posix_acl *acl, umode_t *mode_p)
182 const struct posix_acl_entry *pa, *pe;
186 FOREACH_ACL_ENTRY(pa, acl, pe) {
189 mode |= (pa->e_perm & S_IRWXO) << 6;
192 mode |= (pa->e_perm & S_IRWXO) << 3;
195 mode |= pa->e_perm & S_IRWXO;
198 mode = (mode & ~S_IRWXG) |
199 ((pa->e_perm & S_IRWXO) << 3);
211 *mode_p = (*mode_p & ~S_IRWXUGO) | mode;
216 * Create an ACL representing the file mode permission bits of an inode.
219 posix_acl_from_mode(umode_t mode, gfp_t flags)
221 struct posix_acl *acl = posix_acl_alloc(3, flags);
223 return ERR_PTR(-ENOMEM);
225 acl->a_entries[0].e_tag = ACL_USER_OBJ;
226 acl->a_entries[0].e_perm = (mode & S_IRWXU) >> 6;
228 acl->a_entries[1].e_tag = ACL_GROUP_OBJ;
229 acl->a_entries[1].e_perm = (mode & S_IRWXG) >> 3;
231 acl->a_entries[2].e_tag = ACL_OTHER;
232 acl->a_entries[2].e_perm = (mode & S_IRWXO);
237 * Return 0 if current is granted want access to the inode
238 * by the acl. Returns -E... otherwise.
241 posix_acl_permission(struct inode *inode, const struct posix_acl *acl, int want)
243 const struct posix_acl_entry *pa, *pe, *mask_obj;
246 want &= MAY_READ | MAY_WRITE | MAY_EXEC | MAY_NOT_BLOCK;
248 FOREACH_ACL_ENTRY(pa, acl, pe) {
251 /* (May have been checked already) */
252 if (uid_eq(inode->i_uid, current_fsuid()))
256 if (uid_eq(pa->e_uid, current_fsuid()))
260 if (in_group_p(inode->i_gid)) {
262 if ((pa->e_perm & want) == want)
267 if (in_group_p(pa->e_gid)) {
269 if ((pa->e_perm & want) == want)
287 for (mask_obj = pa+1; mask_obj != pe; mask_obj++) {
288 if (mask_obj->e_tag == ACL_MASK) {
289 if ((pa->e_perm & mask_obj->e_perm & want) == want)
296 if ((pa->e_perm & want) == want)
302 * Modify acl when creating a new inode. The caller must ensure the acl is
303 * only referenced once.
305 * mode_p initially must contain the mode parameter to the open() / creat()
306 * system calls. All permissions that are not granted by the acl are removed.
307 * The permissions in the acl are changed to reflect the mode_p parameter.
309 static int posix_acl_create_masq(struct posix_acl *acl, umode_t *mode_p)
311 struct posix_acl_entry *pa, *pe;
312 struct posix_acl_entry *group_obj = NULL, *mask_obj = NULL;
313 umode_t mode = *mode_p;
316 /* assert(atomic_read(acl->a_refcount) == 1); */
318 FOREACH_ACL_ENTRY(pa, acl, pe) {
321 pa->e_perm &= (mode >> 6) | ~S_IRWXO;
322 mode &= (pa->e_perm << 6) | ~S_IRWXU;
335 pa->e_perm &= mode | ~S_IRWXO;
336 mode &= pa->e_perm | ~S_IRWXO;
350 mask_obj->e_perm &= (mode >> 3) | ~S_IRWXO;
351 mode &= (mask_obj->e_perm << 3) | ~S_IRWXG;
355 group_obj->e_perm &= (mode >> 3) | ~S_IRWXO;
356 mode &= (group_obj->e_perm << 3) | ~S_IRWXG;
359 *mode_p = (*mode_p & ~S_IRWXUGO) | mode;
364 * Modify the ACL for the chmod syscall.
366 static int posix_acl_chmod_masq(struct posix_acl *acl, umode_t mode)
368 struct posix_acl_entry *group_obj = NULL, *mask_obj = NULL;
369 struct posix_acl_entry *pa, *pe;
371 /* assert(atomic_read(acl->a_refcount) == 1); */
373 FOREACH_ACL_ENTRY(pa, acl, pe) {
376 pa->e_perm = (mode & S_IRWXU) >> 6;
392 pa->e_perm = (mode & S_IRWXO);
401 mask_obj->e_perm = (mode & S_IRWXG) >> 3;
405 group_obj->e_perm = (mode & S_IRWXG) >> 3;
412 posix_acl_create(struct posix_acl **acl, gfp_t gfp, umode_t *mode_p)
414 struct posix_acl *clone = posix_acl_clone(*acl, gfp);
417 err = posix_acl_create_masq(clone, mode_p);
419 posix_acl_release(clone);
423 posix_acl_release(*acl);
427 EXPORT_SYMBOL(posix_acl_create);
430 posix_acl_chmod(struct posix_acl **acl, gfp_t gfp, umode_t mode)
432 struct posix_acl *clone = posix_acl_clone(*acl, gfp);
435 err = posix_acl_chmod_masq(clone, mode);
437 posix_acl_release(clone);
441 posix_acl_release(*acl);
445 EXPORT_SYMBOL(posix_acl_chmod);
448 * Fix up the uids and gids in posix acl extended attributes in place.
450 static void posix_acl_fix_xattr_userns(
451 struct user_namespace *to, struct user_namespace *from,
452 void *value, size_t size)
454 posix_acl_xattr_header *header = (posix_acl_xattr_header *)value;
455 posix_acl_xattr_entry *entry = (posix_acl_xattr_entry *)(header+1), *end;
462 if (size < sizeof(posix_acl_xattr_header))
464 if (header->a_version != cpu_to_le32(POSIX_ACL_XATTR_VERSION))
467 count = posix_acl_xattr_count(size);
473 for (end = entry + count; entry != end; entry++) {
474 switch(le16_to_cpu(entry->e_tag)) {
476 uid = make_kuid(from, le32_to_cpu(entry->e_id));
477 entry->e_id = cpu_to_le32(from_kuid(to, uid));
480 gid = make_kgid(from, le32_to_cpu(entry->e_id));
481 entry->e_id = cpu_to_le32(from_kgid(to, gid));
489 void posix_acl_fix_xattr_from_user(void *value, size_t size)
491 struct user_namespace *user_ns = current_user_ns();
492 if (user_ns == &init_user_ns)
494 posix_acl_fix_xattr_userns(&init_user_ns, user_ns, value, size);
497 void posix_acl_fix_xattr_to_user(void *value, size_t size)
499 struct user_namespace *user_ns = current_user_ns();
500 if (user_ns == &init_user_ns)
502 posix_acl_fix_xattr_userns(user_ns, &init_user_ns, value, size);
506 * Convert from extended attribute to in-memory representation.
509 posix_acl_from_xattr(struct user_namespace *user_ns,
510 const void *value, size_t size)
512 posix_acl_xattr_header *header = (posix_acl_xattr_header *)value;
513 posix_acl_xattr_entry *entry = (posix_acl_xattr_entry *)(header+1), *end;
515 struct posix_acl *acl;
516 struct posix_acl_entry *acl_e;
520 if (size < sizeof(posix_acl_xattr_header))
521 return ERR_PTR(-EINVAL);
522 if (header->a_version != cpu_to_le32(POSIX_ACL_XATTR_VERSION))
523 return ERR_PTR(-EOPNOTSUPP);
525 count = posix_acl_xattr_count(size);
527 return ERR_PTR(-EINVAL);
531 acl = posix_acl_alloc(count, GFP_NOFS);
533 return ERR_PTR(-ENOMEM);
534 acl_e = acl->a_entries;
536 for (end = entry + count; entry != end; acl_e++, entry++) {
537 acl_e->e_tag = le16_to_cpu(entry->e_tag);
538 acl_e->e_perm = le16_to_cpu(entry->e_perm);
540 switch(acl_e->e_tag) {
550 le32_to_cpu(entry->e_id));
551 if (!uid_valid(acl_e->e_uid))
557 le32_to_cpu(entry->e_id));
558 if (!gid_valid(acl_e->e_gid))
569 posix_acl_release(acl);
570 return ERR_PTR(-EINVAL);
572 EXPORT_SYMBOL (posix_acl_from_xattr);
575 * Convert from in-memory to extended attribute representation.
578 posix_acl_to_xattr(struct user_namespace *user_ns, const struct posix_acl *acl,
579 void *buffer, size_t size)
581 posix_acl_xattr_header *ext_acl = (posix_acl_xattr_header *)buffer;
582 posix_acl_xattr_entry *ext_entry = ext_acl->a_entries;
585 real_size = posix_acl_xattr_size(acl->a_count);
588 if (real_size > size)
591 ext_acl->a_version = cpu_to_le32(POSIX_ACL_XATTR_VERSION);
593 for (n=0; n < acl->a_count; n++, ext_entry++) {
594 const struct posix_acl_entry *acl_e = &acl->a_entries[n];
595 ext_entry->e_tag = cpu_to_le16(acl_e->e_tag);
596 ext_entry->e_perm = cpu_to_le16(acl_e->e_perm);
597 switch(acl_e->e_tag) {
600 cpu_to_le32(from_kuid(user_ns, acl_e->e_uid));
604 cpu_to_le32(from_kgid(user_ns, acl_e->e_gid));
607 ext_entry->e_id = cpu_to_le32(ACL_UNDEFINED_ID);
613 EXPORT_SYMBOL (posix_acl_to_xattr);