From: Miklos Szeredi Date: Mon, 13 Mar 2006 17:39:56 +0000 (+0000) Subject: fix X-Git-Tag: fuse_2_6_0_pre2~4 X-Git-Url: http://git.maquefel.me/?a=commitdiff_plain;h=67d26d4afe1a3729d912c41fe361c9c579262233;p=qemu-gpiodev%2Flibfuse.git fix --- diff --git a/ChangeLog b/ChangeLog index 28cdb7d..3da7948 100644 --- a/ChangeLog +++ b/ChangeLog @@ -1,3 +1,7 @@ +2006-03-13 Miklos Szeredi + + * kernel: replace global spinlock with a per-connection spinlock + 2006-03-10 Miklos Szeredi * Fix source compatibility breakage for fuse_unmount(). Report diff --git a/kernel/dev.c b/kernel/dev.c index f9ac1ff..c968941 100644 --- a/kernel/dev.c +++ b/kernel/dev.c @@ -25,13 +25,11 @@ static kmem_cache_t *fuse_req_cachep; static struct fuse_conn *fuse_get_conn(struct file *file) { - struct fuse_conn *fc; - spin_lock(&fuse_lock); - fc = file->private_data; - if (fc && !fc->connected) - fc = NULL; - spin_unlock(&fuse_lock); - return fc; + /* + * Lockless access is OK, because file->private data is set + * once during mount and is valid until the file is released. + */ + return file->private_data; } static void fuse_request_init(struct fuse_req *req) @@ -136,11 +134,11 @@ static struct fuse_req *do_get_request(struct fuse_conn *fc) { struct fuse_req *req; - spin_lock(&fuse_lock); + spin_lock(&fc->lock); BUG_ON(list_empty(&fc->unused_list)); req = list_entry(fc->unused_list.next, struct fuse_req, list); list_del_init(&req->list); - spin_unlock(&fuse_lock); + spin_unlock(&fc->lock); fuse_request_init(req); req->preallocated = 1; req->in.h.uid = current->fsuid; @@ -166,7 +164,7 @@ struct fuse_req *fuse_get_request(struct fuse_conn *fc) return do_get_request(fc); } -/* Must be called with fuse_lock held */ +/* Must be called with fc->lock held */ static void fuse_putback_request(struct fuse_conn *fc, struct fuse_req *req) { if (req->preallocated) { @@ -185,9 +183,9 @@ static void fuse_putback_request(struct fuse_conn *fc, struct fuse_req *req) void fuse_put_request(struct fuse_conn *fc, struct fuse_req *req) { if (atomic_dec_and_test(&req->count)) { - spin_lock(&fuse_lock); + spin_lock(&fc->lock); fuse_putback_request(fc, req); - spin_unlock(&fuse_lock); + spin_unlock(&fc->lock); } } @@ -197,15 +195,15 @@ static void fuse_put_request_locked(struct fuse_conn *fc, struct fuse_req *req) fuse_putback_request(fc, req); } -void fuse_release_background(struct fuse_req *req) +void fuse_release_background(struct fuse_conn *fc, struct fuse_req *req) { iput(req->inode); iput(req->inode2); if (req->file) fput(req->file); - spin_lock(&fuse_lock); + spin_lock(&fc->lock); list_del(&req->bg_entry); - spin_unlock(&fuse_lock); + spin_unlock(&fc->lock); } /* @@ -224,7 +222,7 @@ void fuse_release_background(struct fuse_req *req) * interrupted and put in the background, it will return with an error * and hence never be reset and reused. * - * Called with fuse_lock, unlocks it + * Called with fc->lock, unlocks it */ static void request_end(struct fuse_conn *fc, struct fuse_req *req) { @@ -233,14 +231,14 @@ static void request_end(struct fuse_conn *fc, struct fuse_req *req) if (!req->background) { wake_up(&req->waitq); fuse_put_request_locked(fc, req); - spin_unlock(&fuse_lock); + spin_unlock(&fc->lock); } else { void (*end) (struct fuse_conn *, struct fuse_req *) = req->end; req->end = NULL; - spin_unlock(&fuse_lock); + spin_unlock(&fc->lock); down_read(&fc->sbput_sem); if (fc->mounted) - fuse_release_background(req); + fuse_release_background(fc, req); up_read(&fc->sbput_sem); if (end) end(fc, req); @@ -290,16 +288,16 @@ static void background_request(struct fuse_conn *fc, struct fuse_req *req) get_file(req->file); } -/* Called with fuse_lock held. Releases, and then reacquires it. */ +/* Called with fc->lock held. Releases, and then reacquires it. */ static void request_wait_answer(struct fuse_conn *fc, struct fuse_req *req) { sigset_t oldset; - spin_unlock(&fuse_lock); + spin_unlock(&fc->lock); block_sigs(&oldset); wait_event_interruptible(req->waitq, req->state == FUSE_REQ_FINISHED); restore_sigs(&oldset); - spin_lock(&fuse_lock); + spin_lock(&fc->lock); if (req->state == FUSE_REQ_FINISHED && !req->interrupted) return; @@ -313,9 +311,9 @@ static void request_wait_answer(struct fuse_conn *fc, struct fuse_req *req) locked state, there mustn't be any filesystem operation (e.g. page fault), since that could lead to deadlock */ - spin_unlock(&fuse_lock); + spin_unlock(&fc->lock); wait_event(req->waitq, !req->locked); - spin_lock(&fuse_lock); + spin_lock(&fc->lock); } if (req->state == FUSE_REQ_PENDING) { list_del(&req->list); @@ -366,7 +364,7 @@ static void queue_request(struct fuse_conn *fc, struct fuse_req *req) void request_send(struct fuse_conn *fc, struct fuse_req *req) { req->isreply = 1; - spin_lock(&fuse_lock); + spin_lock(&fc->lock); if (!fc->connected) req->out.h.error = -ENOTCONN; else if (fc->conn_error) @@ -379,15 +377,15 @@ void request_send(struct fuse_conn *fc, struct fuse_req *req) request_wait_answer(fc, req); } - spin_unlock(&fuse_lock); + spin_unlock(&fc->lock); } static void request_send_nowait(struct fuse_conn *fc, struct fuse_req *req) { - spin_lock(&fuse_lock); + spin_lock(&fc->lock); if (fc->connected) { queue_request(fc, req); - spin_unlock(&fuse_lock); + spin_unlock(&fc->lock); } else { req->out.h.error = -ENOTCONN; request_end(fc, req); @@ -403,9 +401,9 @@ void request_send_noreply(struct fuse_conn *fc, struct fuse_req *req) void request_send_background(struct fuse_conn *fc, struct fuse_req *req) { req->isreply = 1; - spin_lock(&fuse_lock); + spin_lock(&fc->lock); background_request(fc, req); - spin_unlock(&fuse_lock); + spin_unlock(&fc->lock); request_send_nowait(fc, req); } @@ -414,16 +412,16 @@ void request_send_background(struct fuse_conn *fc, struct fuse_req *req) * anything that could cause a page-fault. If the request was already * interrupted bail out. */ -static int lock_request(struct fuse_req *req) +static int lock_request(struct fuse_conn *fc, struct fuse_req *req) { int err = 0; if (req) { - spin_lock(&fuse_lock); + spin_lock(&fc->lock); if (req->interrupted) err = -ENOENT; else req->locked = 1; - spin_unlock(&fuse_lock); + spin_unlock(&fc->lock); } return err; } @@ -433,18 +431,19 @@ static int lock_request(struct fuse_req *req) * requester thread is currently waiting for it to be unlocked, so * wake it up. */ -static void unlock_request(struct fuse_req *req) +static void unlock_request(struct fuse_conn *fc, struct fuse_req *req) { if (req) { - spin_lock(&fuse_lock); + spin_lock(&fc->lock); req->locked = 0; if (req->interrupted) wake_up(&req->waitq); - spin_unlock(&fuse_lock); + spin_unlock(&fc->lock); } } struct fuse_copy_state { + struct fuse_conn *fc; int write; struct fuse_req *req; const struct iovec *iov; @@ -457,11 +456,12 @@ struct fuse_copy_state { unsigned len; }; -static void fuse_copy_init(struct fuse_copy_state *cs, int write, - struct fuse_req *req, const struct iovec *iov, - unsigned long nr_segs) +static void fuse_copy_init(struct fuse_copy_state *cs, struct fuse_conn *fc, + int write, struct fuse_req *req, + const struct iovec *iov, unsigned long nr_segs) { memset(cs, 0, sizeof(*cs)); + cs->fc = fc; cs->write = write; cs->req = req; cs->iov = iov; @@ -491,7 +491,7 @@ static int fuse_copy_fill(struct fuse_copy_state *cs) unsigned long offset; int err; - unlock_request(cs->req); + unlock_request(cs->fc, cs->req); fuse_copy_finish(cs); if (!cs->seglen) { BUG_ON(!cs->nr_segs); @@ -514,7 +514,7 @@ static int fuse_copy_fill(struct fuse_copy_state *cs) cs->seglen -= cs->len; cs->addr += cs->len; - return lock_request(cs->req); + return lock_request(cs->fc, cs->req); } /* Do as much copy to/from userspace buffer as we can */ @@ -626,9 +626,9 @@ static void request_wait(struct fuse_conn *fc) if (signal_pending(current)) break; - spin_unlock(&fuse_lock); + spin_unlock(&fc->lock); schedule(); - spin_lock(&fuse_lock); + spin_lock(&fc->lock); } set_current_state(TASK_RUNNING); remove_wait_queue(&fc->waitq, &wait); @@ -658,19 +658,16 @@ static ssize_t fuse_dev_readv(struct file *file, const struct iovec *iov, unsigned long nr_segs, loff_t *off) { int err; - struct fuse_conn *fc; struct fuse_req *req; struct fuse_in *in; struct fuse_copy_state cs; unsigned reqsize; - - restart: - spin_lock(&fuse_lock); - fc = file->private_data; - err = -EPERM; + struct fuse_conn *fc = fuse_get_conn(file); if (!fc) - goto err_unlock; + return -EPERM; + restart: + spin_lock(&fc->lock); err = -EAGAIN; if((file->f_flags & O_NONBLOCK) && fc->connected && list_empty(&fc->pending)) @@ -699,14 +696,14 @@ static ssize_t fuse_dev_readv(struct file *file, const struct iovec *iov, request_end(fc, req); goto restart; } - spin_unlock(&fuse_lock); - fuse_copy_init(&cs, 1, req, iov, nr_segs); + spin_unlock(&fc->lock); + fuse_copy_init(&cs, fc, 1, req, iov, nr_segs); err = fuse_copy_one(&cs, &in->h, sizeof(in->h)); if (!err) err = fuse_copy_args(&cs, in->numargs, in->argpages, (struct fuse_arg *) in->args, 0); fuse_copy_finish(&cs); - spin_lock(&fuse_lock); + spin_lock(&fc->lock); req->locked = 0; if (!err && req->interrupted) err = -ENOENT; @@ -721,12 +718,12 @@ static ssize_t fuse_dev_readv(struct file *file, const struct iovec *iov, else { req->state = FUSE_REQ_SENT; list_move_tail(&req->list, &fc->processing); - spin_unlock(&fuse_lock); + spin_unlock(&fc->lock); } return reqsize; err_unlock: - spin_unlock(&fuse_lock); + spin_unlock(&fc->lock); return err; } @@ -793,9 +790,9 @@ static ssize_t fuse_dev_writev(struct file *file, const struct iovec *iov, struct fuse_copy_state cs; struct fuse_conn *fc = fuse_get_conn(file); if (!fc) - return -ENODEV; + return -EPERM; - fuse_copy_init(&cs, 0, NULL, iov, nr_segs); + fuse_copy_init(&cs, fc, 0, NULL, iov, nr_segs); if (nbytes < sizeof(struct fuse_out_header)) return -EINVAL; @@ -807,7 +804,7 @@ static ssize_t fuse_dev_writev(struct file *file, const struct iovec *iov, oh.len != nbytes) goto err_finish; - spin_lock(&fuse_lock); + spin_lock(&fc->lock); err = -ENOENT; if (!fc->connected) goto err_unlock; @@ -818,9 +815,9 @@ static ssize_t fuse_dev_writev(struct file *file, const struct iovec *iov, goto err_unlock; if (req->interrupted) { - spin_unlock(&fuse_lock); + spin_unlock(&fc->lock); fuse_copy_finish(&cs); - spin_lock(&fuse_lock); + spin_lock(&fc->lock); request_end(fc, req); return -ENOENT; } @@ -828,12 +825,12 @@ static ssize_t fuse_dev_writev(struct file *file, const struct iovec *iov, req->out.h = oh; req->locked = 1; cs.req = req; - spin_unlock(&fuse_lock); + spin_unlock(&fc->lock); err = copy_out_args(&cs, &req->out, nbytes); fuse_copy_finish(&cs); - spin_lock(&fuse_lock); + spin_lock(&fc->lock); req->locked = 0; if (!err) { if (req->interrupted) @@ -845,7 +842,7 @@ static ssize_t fuse_dev_writev(struct file *file, const struct iovec *iov, return err ? err : nbytes; err_unlock: - spin_unlock(&fuse_lock); + spin_unlock(&fc->lock); err_finish: fuse_copy_finish(&cs); return err; @@ -862,18 +859,19 @@ static ssize_t fuse_dev_write(struct file *file, const char __user *buf, static unsigned fuse_dev_poll(struct file *file, poll_table *wait) { - struct fuse_conn *fc = fuse_get_conn(file); unsigned mask = POLLOUT | POLLWRNORM; - + struct fuse_conn *fc = fuse_get_conn(file); if (!fc) - return -ENODEV; + return POLLERR; poll_wait(file, &fc->waitq, wait); - spin_lock(&fuse_lock); - if (!list_empty(&fc->pending)) - mask |= POLLIN | POLLRDNORM; - spin_unlock(&fuse_lock); + spin_lock(&fc->lock); + if (!fc->connected) + mask = POLLERR; + else if (!list_empty(&fc->pending)) + mask |= POLLIN | POLLRDNORM; + spin_unlock(&fc->lock); return mask; } @@ -881,7 +879,7 @@ static unsigned fuse_dev_poll(struct file *file, poll_table *wait) /* * Abort all requests on the given list (pending or processing) * - * This function releases and reacquires fuse_lock + * This function releases and reacquires fc->lock */ static void end_requests(struct fuse_conn *fc, struct list_head *head) { @@ -890,7 +888,7 @@ static void end_requests(struct fuse_conn *fc, struct list_head *head) req = list_entry(head->next, struct fuse_req, list); req->out.h.error = -ECONNABORTED; request_end(fc, req); - spin_lock(&fuse_lock); + spin_lock(&fc->lock); } } @@ -921,10 +919,10 @@ static void end_io_requests(struct fuse_conn *fc) req->end = NULL; /* The end function will consume this reference */ __fuse_get_request(req); - spin_unlock(&fuse_lock); + spin_unlock(&fc->lock); wait_event(req->waitq, !req->locked); end(fc, req); - spin_lock(&fuse_lock); + spin_lock(&fc->lock); } } } @@ -951,7 +949,7 @@ static void end_io_requests(struct fuse_conn *fc) */ void fuse_abort_conn(struct fuse_conn *fc) { - spin_lock(&fuse_lock); + spin_lock(&fc->lock); if (fc->connected) { fc->connected = 0; end_io_requests(fc); @@ -960,22 +958,18 @@ void fuse_abort_conn(struct fuse_conn *fc) wake_up_all(&fc->waitq); kill_fasync(&fc->fasync, SIGIO, POLL_IN); } - spin_unlock(&fuse_lock); + spin_unlock(&fc->lock); } static int fuse_dev_release(struct inode *inode, struct file *file) { - struct fuse_conn *fc; - - spin_lock(&fuse_lock); - fc = file->private_data; + struct fuse_conn *fc = fuse_get_conn(file); if (fc) { + spin_lock(&fc->lock); fc->connected = 0; end_requests(fc, &fc->pending); end_requests(fc, &fc->processing); - } - spin_unlock(&fuse_lock); - if (fc) { + spin_unlock(&fc->lock); fasync_helper(-1, file, 0, &fc->fasync); kobject_put(&fc->kobj); } @@ -986,9 +980,8 @@ static int fuse_dev_release(struct inode *inode, struct file *file) static int fuse_dev_fasync(int fd, struct file *file, int on) { struct fuse_conn *fc = fuse_get_conn(file); - if (!fc) - return -ENODEV; + return -EPERM; /* No locking - fasync_helper does its own locking */ return fasync_helper(fd, file, on, &fc->fasync); diff --git a/kernel/fuse_i.h b/kernel/fuse_i.h index 11f556d..d1f3e73 100644 --- a/kernel/fuse_i.h +++ b/kernel/fuse_i.h @@ -241,7 +241,7 @@ struct fuse_req { /* * The following bitfields are either set once before the * request is queued or setting/clearing them is protected by - * fuse_lock + * fuse_conn->lock */ /** True if the request has reply */ @@ -310,6 +310,9 @@ struct fuse_req { * unmounted. */ struct fuse_conn { + /** Lock protecting accessess to members of this structure */ + spinlock_t lock; + /** The user id for this mount */ uid_t user_id; @@ -450,21 +453,6 @@ static inline u64 get_node_id(struct inode *inode) /** Device operations */ extern struct file_operations fuse_dev_operations; -/** - * This is the single global spinlock which protects FUSE's structures - * - * The following data is protected by this lock: - * - * - the private_data field of the device file - * - the s_fs_info field of the super block - * - unused_list, pending, processing lists in fuse_conn - * - background list in fuse_conn - * - the unique request ID counter reqctr in fuse_conn - * - the sb (super_block) field in fuse_conn - * - the file (device file) field in fuse_conn - */ -extern spinlock_t fuse_lock; - /** * Get a filled in inode */ @@ -589,7 +577,7 @@ void request_send_background(struct fuse_conn *fc, struct fuse_req *req); /** * Release inodes and file associated with background request */ -void fuse_release_background(struct fuse_req *req); +void fuse_release_background(struct fuse_conn *fc, struct fuse_req *req); /* Abort all requests */ void fuse_abort_conn(struct fuse_conn *fc); diff --git a/kernel/inode.c b/kernel/inode.c index f65dd5e..cc63189 100644 --- a/kernel/inode.c +++ b/kernel/inode.c @@ -21,7 +21,6 @@ #else #include "compat/parser.h" #endif -#include MODULE_AUTHOR("Miklos Szeredi "); MODULE_DESCRIPTION("Filesystem in Userspace"); @@ -29,7 +28,6 @@ MODULE_DESCRIPTION("Filesystem in Userspace"); MODULE_LICENSE("GPL"); #endif -spinlock_t fuse_lock; static kmem_cache_t *fuse_inode_cachep; #ifdef KERNEL_2_6 static struct subsystem connections_subsys; @@ -282,13 +280,14 @@ static void fuse_put_super(struct super_block *sb) down_write(&fc->sbput_sem); while (!list_empty(&fc->background)) - fuse_release_background(list_entry(fc->background.next, + fuse_release_background(fc, + list_entry(fc->background.next, struct fuse_req, bg_entry)); - spin_lock(&fuse_lock); + spin_lock(&fc->lock); fc->mounted = 0; fc->connected = 0; - spin_unlock(&fuse_lock); + spin_unlock(&fc->lock); up_write(&fc->sbput_sem); /* Flush all readers on this fs */ kill_fasync(&fc->fasync, SIGIO, POLL_IN); @@ -491,6 +490,7 @@ static struct fuse_conn *new_conn(void) fc = kzalloc(sizeof(*fc), GFP_KERNEL); if (fc) { int i; + spin_lock_init(&fc->lock); init_waitqueue_head(&fc->waitq); INIT_LIST_HEAD(&fc->pending); INIT_LIST_HEAD(&fc->processing); @@ -520,42 +520,10 @@ static struct fuse_conn *new_conn(void) fc->bdi.unplug_io_fn = default_unplug_io_fn; #endif fc->reqctr = 0; - fc->fasync = NULL; } return fc; } -static struct fuse_conn *get_conn(struct file *file, struct super_block *sb) -{ - struct fuse_conn *fc; - int err; - - err = -EINVAL; - if (file->f_op != &fuse_dev_operations) - goto out_err; - - err = -ENOMEM; - fc = new_conn(); - if (!fc) - goto out_err; - - spin_lock(&fuse_lock); - err = -EINVAL; - if (file->private_data) - goto out_unlock; - - kobject_get(&fc->kobj); - file->private_data = fc; - spin_unlock(&fuse_lock); - return fc; - - out_unlock: - spin_unlock(&fuse_lock); - kobject_put(&fc->kobj); - out_err: - return ERR_PTR(err); -} - static struct inode *get_root_inode(struct super_block *sb, unsigned mode) { struct fuse_attr attr; @@ -706,12 +674,9 @@ static void fuse_send_init(struct fuse_conn *fc) #ifdef KERNEL_2_6 static unsigned long long conn_id(void) { + /* BKL is held for ->get_sb() */ static unsigned long long ctr = 1; - unsigned long long val; - spin_lock(&fuse_lock); - val = ctr++; - spin_unlock(&fuse_lock); - return val; + return ctr++; } #endif @@ -742,10 +707,17 @@ static int fuse_fill_super(struct super_block *sb, void *data, int silent) if (!file) return -EINVAL; - fc = get_conn(file, sb); - fput(file); - if (IS_ERR(fc)) - return PTR_ERR(fc); + if (file->f_op != &fuse_dev_operations) + return -EINVAL; + + /* Setting file->private_data can't race with other mount() + instances, since BKL is held for ->get_sb() */ + if (file->private_data) + return -EINVAL; + + fc = new_conn(); + if (!fc) + return -ENOMEM; fc->flags = d.flags; fc->user_id = d.user_id; @@ -777,10 +749,16 @@ static int fuse_fill_super(struct super_block *sb, void *data, int silent) #endif sb->s_root = root_dentry; - spin_lock(&fuse_lock); fc->mounted = 1; fc->connected = 1; - spin_unlock(&fuse_lock); + kobject_get(&fc->kobj); + file->private_data = fc; + /* + * atomic_dec_and_test() in fput() provides the necessary + * memory barrier for file->private_data to be visible on all + * CPUs after this + */ + fput(file); fuse_send_init(fc); @@ -791,6 +769,7 @@ static int fuse_fill_super(struct super_block *sb, void *data, int silent) dput(root_dentry); #endif err: + fput(file); kobject_put(&fc->kobj); return err; } @@ -994,7 +973,6 @@ static int __init fuse_init(void) printk("fuse distribution version: %s\n", FUSE_VERSION); #endif - spin_lock_init(&fuse_lock); res = fuse_fs_init(); if (res) goto err; diff --git a/lib/fuse_lowlevel.c b/lib/fuse_lowlevel.c index 8437eb4..421aedf 100644 --- a/lib/fuse_lowlevel.c +++ b/lib/fuse_lowlevel.c @@ -1146,6 +1146,7 @@ int fuse_sync_compat_args(struct fuse_args *args) { struct fuse_ll_compat_conf conf; + memset(&conf, 0, sizeof(conf)); if (fuse_opt_parse(args, &conf, fuse_ll_opts_compat, NULL) == -1) return -1;