aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/user.c
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/user.c')
-rw-r--r--kernel/user.c200
1 files changed, 200 insertions, 0 deletions
diff --git a/kernel/user.c b/kernel/user.c
new file mode 100644
index 00000000..9e03e9c1
--- /dev/null
+++ b/kernel/user.c
@@ -0,0 +1,200 @@
+/*
+ * The "user cache".
+ *
+ * (C) Copyright 1991-2000 Linus Torvalds
+ *
+ * We have a per-user structure to keep track of how many
+ * processes, files etc the user has claimed, in order to be
+ * able to have per-user limits for system resources.
+ */
+
+#include <linux/init.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include <linux/bitops.h>
+#include <linux/key.h>
+#include <linux/interrupt.h>
+#include <linux/module.h>
+#include <linux/user_namespace.h>
+
+/*
+ * userns count is 1 for root user, 1 for init_uts_ns,
+ * and 1 for... ?
+ */
+struct user_namespace init_user_ns = {
+ .kref = {
+ .refcount = ATOMIC_INIT(3),
+ },
+ .creator = &root_user,
+};
+EXPORT_SYMBOL_GPL(init_user_ns);
+
+/*
+ * UID task count cache, to get fast user lookup in "alloc_uid"
+ * when changing user ID's (ie setuid() and friends).
+ */
+
+#define UIDHASH_MASK (UIDHASH_SZ - 1)
+#define __uidhashfn(uid) (((uid >> UIDHASH_BITS) + uid) & UIDHASH_MASK)
+#define uidhashentry(ns, uid) ((ns)->uidhash_table + __uidhashfn((uid)))
+
+static struct kmem_cache *uid_cachep;
+
+/*
+ * The uidhash_lock is mostly taken from process context, but it is
+ * occasionally also taken from softirq/tasklet context, when
+ * task-structs get RCU-freed. Hence all locking must be softirq-safe.
+ * But free_uid() is also called with local interrupts disabled, and running
+ * local_bh_enable() with local interrupts disabled is an error - we'll run
+ * softirq callbacks, and they can unconditionally enable interrupts, and
+ * the caller of free_uid() didn't expect that..
+ */
+static DEFINE_SPINLOCK(uidhash_lock);
+
+/* root_user.__count is 2, 1 for init task cred, 1 for init_user_ns->user_ns */
+struct user_struct root_user = {
+ .__count = ATOMIC_INIT(2),
+ .processes = ATOMIC_INIT(1),
+ .files = ATOMIC_INIT(0),
+ .sigpending = ATOMIC_INIT(0),
+ .locked_shm = 0,
+ .user_ns = &init_user_ns,
+};
+
+/*
+ * These routines must be called with the uidhash spinlock held!
+ */
+static void uid_hash_insert(struct user_struct *up, struct hlist_head *hashent)
+{
+ hlist_add_head(&up->uidhash_node, hashent);
+}
+
+static void uid_hash_remove(struct user_struct *up)
+{
+ hlist_del_init(&up->uidhash_node);
+ put_user_ns(up->user_ns);
+}
+
+static struct user_struct *uid_hash_find(uid_t uid, struct hlist_head *hashent)
+{
+ struct user_struct *user;
+ struct hlist_node *h;
+
+ hlist_for_each_entry(user, h, hashent, uidhash_node) {
+ if (user->uid == uid) {
+ atomic_inc(&user->__count);
+ return user;
+ }
+ }
+
+ return NULL;
+}
+
+/* IRQs are disabled and uidhash_lock is held upon function entry.
+ * IRQ state (as stored in flags) is restored and uidhash_lock released
+ * upon function exit.
+ */
+static void free_user(struct user_struct *up, unsigned long flags)
+ __releases(&uidhash_lock)
+{
+ uid_hash_remove(up);
+ spin_unlock_irqrestore(&uidhash_lock, flags);
+ key_put(up->uid_keyring);
+ key_put(up->session_keyring);
+ kmem_cache_free(uid_cachep, up);
+}
+
+/*
+ * Locate the user_struct for the passed UID. If found, take a ref on it. The
+ * caller must undo that ref with free_uid().
+ *
+ * If the user_struct could not be found, return NULL.
+ */
+struct user_struct *find_user(uid_t uid)
+{
+ struct user_struct *ret;
+ unsigned long flags;
+ struct user_namespace *ns = current_user_ns();
+
+ spin_lock_irqsave(&uidhash_lock, flags);
+ ret = uid_hash_find(uid, uidhashentry(ns, uid));
+ spin_unlock_irqrestore(&uidhash_lock, flags);
+ return ret;
+}
+
+void free_uid(struct user_struct *up)
+{
+ unsigned long flags;
+
+ if (!up)
+ return;
+
+ local_irq_save(flags);
+ if (atomic_dec_and_lock(&up->__count, &uidhash_lock))
+ free_user(up, flags);
+ else
+ local_irq_restore(flags);
+}
+
+struct user_struct *alloc_uid(struct user_namespace *ns, uid_t uid)
+{
+ struct hlist_head *hashent = uidhashentry(ns, uid);
+ struct user_struct *up, *new;
+
+ spin_lock_irq(&uidhash_lock);
+ up = uid_hash_find(uid, hashent);
+ spin_unlock_irq(&uidhash_lock);
+
+ if (!up) {
+ new = kmem_cache_zalloc(uid_cachep, GFP_KERNEL);
+ if (!new)
+ goto out_unlock;
+
+ new->uid = uid;
+ atomic_set(&new->__count, 1);
+
+ new->user_ns = get_user_ns(ns);
+
+ /*
+ * Before adding this, check whether we raced
+ * on adding the same user already..
+ */
+ spin_lock_irq(&uidhash_lock);
+ up = uid_hash_find(uid, hashent);
+ if (up) {
+ put_user_ns(ns);
+ key_put(new->uid_keyring);
+ key_put(new->session_keyring);
+ kmem_cache_free(uid_cachep, new);
+ } else {
+ uid_hash_insert(new, hashent);
+ up = new;
+ }
+ spin_unlock_irq(&uidhash_lock);
+ }
+
+ return up;
+
+out_unlock:
+ return NULL;
+}
+
+static int __init uid_cache_init(void)
+{
+ int n;
+
+ uid_cachep = kmem_cache_create("uid_cache", sizeof(struct user_struct),
+ 0, SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL);
+
+ for(n = 0; n < UIDHASH_SZ; ++n)
+ INIT_HLIST_HEAD(init_user_ns.uidhash_table + n);
+
+ /* Insert the root user immediately (init already runs as root) */
+ spin_lock_irq(&uidhash_lock);
+ uid_hash_insert(&root_user, uidhashentry(&init_user_ns, 0));
+ spin_unlock_irq(&uidhash_lock);
+
+ return 0;
+}
+
+module_init(uid_cache_init);