summaryrefslogtreecommitdiffstats
path: root/fs
diff options
context:
space:
mode:
authorKaiGai Kohei <kaigai@ak.jp.nec.com>2006-05-19 00:43:53 +0900
committerKaiGai Kohei <kaigai@ak.jp.nec.com>2006-05-19 00:43:53 +0900
commit20a92fc74c5c91c7bc5693d51acc2b99aceb0465 (patch)
tree41bf535f38ff1a29c560bcf622e9b4ef03c2c106 /fs
parent21b9879bf2817aca343cdda11ade6a87f5373e74 (diff)
parentf6a673b3f4f93c1c50e1b18f29254b0531b722a8 (diff)
downloadkernel-crypto-20a92fc74c5c91c7bc5693d51acc2b99aceb0465.tar.gz
kernel-crypto-20a92fc74c5c91c7bc5693d51acc2b99aceb0465.tar.xz
kernel-crypto-20a92fc74c5c91c7bc5693d51acc2b99aceb0465.zip
Merge git://git.infradead.org/mtd-2.6
Diffstat (limited to 'fs')
-rw-r--r--fs/9p/fcall.c21
-rw-r--r--fs/9p/mux.c222
-rw-r--r--fs/9p/mux.h4
-rw-r--r--fs/9p/vfs_file.c13
-rw-r--r--fs/9p/vfs_inode.c19
-rw-r--r--fs/autofs4/autofs_i.h5
-rw-r--r--fs/autofs4/root.c10
-rw-r--r--fs/autofs4/waitq.c77
-rw-r--r--fs/compat.c4
-rw-r--r--fs/ext3/inode.c13
-rw-r--r--fs/jffs2/compr.c4
-rw-r--r--fs/jffs2/compr_zlib.c4
-rw-r--r--fs/jffs2/file.c20
-rw-r--r--fs/jffs2/nodelist.c4
-rw-r--r--fs/jffs2/scan.c6
-rw-r--r--fs/jffs2/summary.c2
-rw-r--r--fs/jffs2/super.c14
-rw-r--r--fs/locks.c21
-rw-r--r--fs/namespace.c7
-rw-r--r--fs/open.c1
-rw-r--r--fs/partitions/check.c3
-rw-r--r--fs/smbfs/dir.c5
-rw-r--r--fs/smbfs/request.c4
-rw-r--r--fs/splice.c74
-rw-r--r--fs/xfs/xfs_alloc.c5
-rw-r--r--fs/xfs/xfs_rename.c12
-rw-r--r--fs/xfs/xfs_vfsops.c27
-rw-r--r--fs/xfs/xfs_vnodeops.c2
28 files changed, 361 insertions, 242 deletions
diff --git a/fs/9p/fcall.c b/fs/9p/fcall.c
index 71742ba150c..6f2617820a4 100644
--- a/fs/9p/fcall.c
+++ b/fs/9p/fcall.c
@@ -98,23 +98,20 @@ v9fs_t_attach(struct v9fs_session_info *v9ses, char *uname, char *aname,
static void v9fs_t_clunk_cb(void *a, struct v9fs_fcall *tc,
struct v9fs_fcall *rc, int err)
{
- int fid;
+ int fid, id;
struct v9fs_session_info *v9ses;
- if (err)
- return;
-
+ id = 0;
fid = tc->params.tclunk.fid;
- kfree(tc);
-
- if (!rc)
- return;
-
- v9ses = a;
- if (rc->id == RCLUNK)
- v9fs_put_idpool(fid, &v9ses->fidpool);
+ if (rc)
+ id = rc->id;
+ kfree(tc);
kfree(rc);
+ if (id == RCLUNK) {
+ v9ses = a;
+ v9fs_put_idpool(fid, &v9ses->fidpool);
+ }
}
/**
diff --git a/fs/9p/mux.c b/fs/9p/mux.c
index 3e5b124a721..f4407eb276c 100644
--- a/fs/9p/mux.c
+++ b/fs/9p/mux.c
@@ -50,15 +50,23 @@ enum {
Wpending = 8, /* can write */
};
+enum {
+ None,
+ Flushing,
+ Flushed,
+};
+
struct v9fs_mux_poll_task;
struct v9fs_req {
+ spinlock_t lock;
int tag;
struct v9fs_fcall *tcall;
struct v9fs_fcall *rcall;
int err;
v9fs_mux_req_callback cb;
void *cba;
+ int flush;
struct list_head req_list;
};
@@ -96,8 +104,8 @@ struct v9fs_mux_poll_task {
struct v9fs_mux_rpc {
struct v9fs_mux_data *m;
- struct v9fs_req *req;
int err;
+ struct v9fs_fcall *tcall;
struct v9fs_fcall *rcall;
wait_queue_head_t wqueue;
};
@@ -524,10 +532,9 @@ again:
static void process_request(struct v9fs_mux_data *m, struct v9fs_req *req)
{
- int ecode, tag;
+ int ecode;
struct v9fs_str *ename;
- tag = req->tag;
if (!req->err && req->rcall->id == RERROR) {
ecode = req->rcall->params.rerror.errno;
ename = &req->rcall->params.rerror.error;
@@ -553,23 +560,6 @@ static void process_request(struct v9fs_mux_data *m, struct v9fs_req *req)
if (!req->err)
req->err = -EIO;
}
-
- if (req->err == ERREQFLUSH)
- return;
-
- if (req->cb) {
- dprintk(DEBUG_MUX, "calling callback tcall %p rcall %p\n",
- req->tcall, req->rcall);
-
- (*req->cb) (req->cba, req->tcall, req->rcall, req->err);
- req->cb = NULL;
- } else
- kfree(req->rcall);
-
- v9fs_mux_put_tag(m, tag);
-
- wake_up(&m->equeue);
- kfree(req);
}
/**
@@ -669,17 +659,26 @@ static void v9fs_read_work(void *a)
list_for_each_entry_safe(rreq, rptr, &m->req_list, req_list) {
if (rreq->tag == rcall->tag) {
req = rreq;
- req->rcall = rcall;
- list_del(&req->req_list);
- spin_unlock(&m->lock);
- process_request(m, req);
+ if (req->flush != Flushing)
+ list_del(&req->req_list);
break;
}
-
}
+ spin_unlock(&m->lock);
- if (!req) {
- spin_unlock(&m->lock);
+ if (req) {
+ req->rcall = rcall;
+ process_request(m, req);
+
+ if (req->flush != Flushing) {
+ if (req->cb)
+ (*req->cb) (req, req->cba);
+ else
+ kfree(req->rcall);
+
+ wake_up(&m->equeue);
+ }
+ } else {
if (err >= 0 && rcall->id != RFLUSH)
dprintk(DEBUG_ERROR,
"unexpected response mux %p id %d tag %d\n",
@@ -746,7 +745,6 @@ static struct v9fs_req *v9fs_send_request(struct v9fs_mux_data *m,
return ERR_PTR(-ENOMEM);
v9fs_set_tag(tc, n);
-
if ((v9fs_debug_level&DEBUG_FCALL) == DEBUG_FCALL) {
char buf[150];
@@ -754,12 +752,14 @@ static struct v9fs_req *v9fs_send_request(struct v9fs_mux_data *m,
printk(KERN_NOTICE "<<< %p %s\n", m, buf);
}
+ spin_lock_init(&req->lock);
req->tag = n;
req->tcall = tc;
req->rcall = NULL;
req->err = 0;
req->cb = cb;
req->cba = cba;
+ req->flush = None;
spin_lock(&m->lock);
list_add_tail(&req->req_list, &m->unsent_req_list);
@@ -776,72 +776,108 @@ static struct v9fs_req *v9fs_send_request(struct v9fs_mux_data *m,
return req;
}
-static void v9fs_mux_flush_cb(void *a, struct v9fs_fcall *tc,
- struct v9fs_fcall *rc, int err)
+static void v9fs_mux_free_request(struct v9fs_mux_data *m, struct v9fs_req *req)
+{
+ v9fs_mux_put_tag(m, req->tag);
+ kfree(req);
+}
+
+static void v9fs_mux_flush_cb(struct v9fs_req *freq, void *a)
{
v9fs_mux_req_callback cb;
int tag;
struct v9fs_mux_data *m;
- struct v9fs_req *req, *rptr;
+ struct v9fs_req *req, *rreq, *rptr;
m = a;
- dprintk(DEBUG_MUX, "mux %p tc %p rc %p err %d oldtag %d\n", m, tc,
- rc, err, tc->params.tflush.oldtag);
+ dprintk(DEBUG_MUX, "mux %p tc %p rc %p err %d oldtag %d\n", m,
+ freq->tcall, freq->rcall, freq->err,
+ freq->tcall->params.tflush.oldtag);
spin_lock(&m->lock);
cb = NULL;
- tag = tc->params.tflush.oldtag;
- list_for_each_entry_safe(req, rptr, &m->req_list, req_list) {
- if (req->tag == tag) {
+ tag = freq->tcall->params.tflush.oldtag;
+ req = NULL;
+ list_for_each_entry_safe(rreq, rptr, &m->req_list, req_list) {
+ if (rreq->tag == tag) {
+ req = rreq;
list_del(&req->req_list);
- if (req->cb) {
- cb = req->cb;
- req->cb = NULL;
- spin_unlock(&m->lock);
- (*cb) (req->cba, req->tcall, req->rcall,
- req->err);
- }
- kfree(req);
- wake_up(&m->equeue);
break;
}
}
+ spin_unlock(&m->lock);
- if (!cb)
- spin_unlock(&m->lock);
+ if (req) {
+ spin_lock(&req->lock);
+ req->flush = Flushed;
+ spin_unlock(&req->lock);
+
+ if (req->cb)
+ (*req->cb) (req, req->cba);
+ else
+ kfree(req->rcall);
+
+ wake_up(&m->equeue);
+ }
- v9fs_mux_put_tag(m, tag);
- kfree(tc);
- kfree(rc);
+ kfree(freq->tcall);
+ kfree(freq->rcall);
+ v9fs_mux_free_request(m, freq);
}
-static void
+static int
v9fs_mux_flush_request(struct v9fs_mux_data *m, struct v9fs_req *req)
{
struct v9fs_fcall *fc;
+ struct v9fs_req *rreq, *rptr;
dprintk(DEBUG_MUX, "mux %p req %p tag %d\n", m, req, req->tag);
+ /* if a response was received for a request, do nothing */
+ spin_lock(&req->lock);
+ if (req->rcall || req->err) {
+ spin_unlock(&req->lock);
+ dprintk(DEBUG_MUX, "mux %p req %p response already received\n", m, req);
+ return 0;
+ }
+
+ req->flush = Flushing;
+ spin_unlock(&req->lock);
+
+ spin_lock(&m->lock);
+ /* if the request is not sent yet, just remove it from the list */
+ list_for_each_entry_safe(rreq, rptr, &m->unsent_req_list, req_list) {
+ if (rreq->tag == req->tag) {
+ dprintk(DEBUG_MUX, "mux %p req %p request is not sent yet\n", m, req);
+ list_del(&rreq->req_list);
+ req->flush = Flushed;
+ spin_unlock(&m->lock);
+ if (req->cb)
+ (*req->cb) (req, req->cba);
+ return 0;
+ }
+ }
+ spin_unlock(&m->lock);
+
+ clear_thread_flag(TIF_SIGPENDING);
fc = v9fs_create_tflush(req->tag);
v9fs_send_request(m, fc, v9fs_mux_flush_cb, m);
+ return 1;
}
static void
-v9fs_mux_rpc_cb(void *a, struct v9fs_fcall *tc, struct v9fs_fcall *rc, int err)
+v9fs_mux_rpc_cb(struct v9fs_req *req, void *a)
{
struct v9fs_mux_rpc *r;
- if (err == ERREQFLUSH) {
- kfree(rc);
- dprintk(DEBUG_MUX, "err req flush\n");
- return;
- }
-
+ dprintk(DEBUG_MUX, "req %p r %p\n", req, a);
r = a;
- dprintk(DEBUG_MUX, "mux %p req %p tc %p rc %p err %d\n", r->m, r->req,
- tc, rc, err);
- r->rcall = rc;
- r->err = err;
+ r->rcall = req->rcall;
+ r->err = req->err;
+
+ if (req->flush!=None && !req->err)
+ r->err = -ERESTARTSYS;
+
wake_up(&r->wqueue);
}
@@ -856,12 +892,13 @@ int
v9fs_mux_rpc(struct v9fs_mux_data *m, struct v9fs_fcall *tc,
struct v9fs_fcall **rc)
{
- int err;
+ int err, sigpending;
unsigned long flags;
struct v9fs_req *req;
struct v9fs_mux_rpc r;
r.err = 0;
+ r.tcall = tc;
r.rcall = NULL;
r.m = m;
init_waitqueue_head(&r.wqueue);
@@ -869,48 +906,50 @@ v9fs_mux_rpc(struct v9fs_mux_data *m, struct v9fs_fcall *tc,
if (rc)
*rc = NULL;
+ sigpending = 0;
+ if (signal_pending(current)) {
+ sigpending = 1;
+ clear_thread_flag(TIF_SIGPENDING);
+ }
+
req = v9fs_send_request(m, tc, v9fs_mux_rpc_cb, &r);
if (IS_ERR(req)) {
err = PTR_ERR(req);
dprintk(DEBUG_MUX, "error %d\n", err);
- return PTR_ERR(req);
+ return err;
}
- r.req = req;
- dprintk(DEBUG_MUX, "mux %p tc %p tag %d rpc %p req %p\n", m, tc,
- req->tag, &r, req);
err = wait_event_interruptible(r.wqueue, r.rcall != NULL || r.err < 0);
if (r.err < 0)
err = r.err;
if (err == -ERESTARTSYS && m->trans->status == Connected && m->err == 0) {
- spin_lock(&m->lock);
- req->tcall = NULL;
- req->err = ERREQFLUSH;
- spin_unlock(&m->lock);
+ if (v9fs_mux_flush_request(m, req)) {
+ /* wait until we get response of the flush message */
+ do {
+ clear_thread_flag(TIF_SIGPENDING);
+ err = wait_event_interruptible(r.wqueue,
+ r.rcall || r.err);
+ } while (!r.rcall && !r.err && err==-ERESTARTSYS &&
+ m->trans->status==Connected && !m->err);
+ }
+ sigpending = 1;
+ }
- clear_thread_flag(TIF_SIGPENDING);
- v9fs_mux_flush_request(m, req);
+ if (sigpending) {
spin_lock_irqsave(&current->sighand->siglock, flags);
recalc_sigpending();
spin_unlock_irqrestore(&current->sighand->siglock, flags);
}
- if (!err) {
- if (r.rcall)
- dprintk(DEBUG_MUX, "got response id %d tag %d\n",
- r.rcall->id, r.rcall->tag);
-
- if (rc)
- *rc = r.rcall;
- else
- kfree(r.rcall);
- } else {
+ if (rc)
+ *rc = r.rcall;
+ else
kfree(r.rcall);
- dprintk(DEBUG_MUX, "got error %d\n", err);
- if (err > 0)
- err = -EIO;
- }
+
+ v9fs_mux_free_request(m, req);
+ if (err > 0)
+ err = -EIO;
return err;
}
@@ -951,12 +990,15 @@ void v9fs_mux_cancel(struct v9fs_mux_data *m, int err)
struct v9fs_req *req, *rtmp;
LIST_HEAD(cancel_list);
- dprintk(DEBUG_MUX, "mux %p err %d\n", m, err);
+ dprintk(DEBUG_ERROR, "mux %p err %d\n", m, err);
m->err = err;
spin_lock(&m->lock);
list_for_each_entry_safe(req, rtmp, &m->req_list, req_list) {
list_move(&req->req_list, &cancel_list);
}
+ list_for_each_entry_safe(req, rtmp, &m->unsent_req_list, req_list) {
+ list_move(&req->req_list, &cancel_list);
+ }
spin_unlock(&m->lock);
list_for_each_entry_safe(req, rtmp, &cancel_list, req_list) {
@@ -965,11 +1007,9 @@ void v9fs_mux_cancel(struct v9fs_mux_data *m, int err)
req->err = err;
if (req->cb)
- (*req->cb) (req->cba, req->tcall, req->rcall, req->err);
+ (*req->cb) (req, req->cba);
else
kfree(req->rcall);
-
- kfree(req);
}
wake_up(&m->equeue);
diff --git a/fs/9p/mux.h b/fs/9p/mux.h
index e90bfd32ea4..fb10c50186a 100644
--- a/fs/9p/mux.h
+++ b/fs/9p/mux.h
@@ -24,6 +24,7 @@
*/
struct v9fs_mux_data;
+struct v9fs_req;
/**
* v9fs_mux_req_callback - callback function that is called when the
@@ -36,8 +37,7 @@ struct v9fs_mux_data;
* @rc - response call
* @err - error code (non-zero if error occured)
*/
-typedef void (*v9fs_mux_req_callback)(void *a, struct v9fs_fcall *tc,
- struct v9fs_fcall *rc, int err);
+typedef void (*v9fs_mux_req_callback)(struct v9fs_req *req, void *a);
int v9fs_mux_global_init(void);
void v9fs_mux_global_exit(void);
diff --git a/fs/9p/vfs_file.c b/fs/9p/vfs_file.c
index 083dcfcd158..1a8e46084f0 100644
--- a/fs/9p/vfs_file.c
+++ b/fs/9p/vfs_file.c
@@ -72,11 +72,17 @@ int v9fs_file_open(struct inode *inode, struct file *file)
return -ENOSPC;
}
- err = v9fs_t_walk(v9ses, vfid->fid, fid, NULL, NULL);
+ err = v9fs_t_walk(v9ses, vfid->fid, fid, NULL, &fcall);
if (err < 0) {
dprintk(DEBUG_ERROR, "rewalk didn't work\n");
- goto put_fid;
+ if (fcall && fcall->id == RWALK)
+ goto clunk_fid;
+ else {
+ v9fs_put_idpool(fid, &v9ses->fidpool);
+ goto free_fcall;
+ }
}
+ kfree(fcall);
/* TODO: do special things for O_EXCL, O_NOFOLLOW, O_SYNC */
/* translate open mode appropriately */
@@ -109,8 +115,7 @@ int v9fs_file_open(struct inode *inode, struct file *file)
clunk_fid:
v9fs_t_clunk(v9ses, fid);
-put_fid:
- v9fs_put_idpool(fid, &v9ses->fidpool);
+free_fcall:
kfree(fcall);
return err;
diff --git a/fs/9p/vfs_inode.c b/fs/9p/vfs_inode.c
index 133db366d30..2cb87ba4b1c 100644
--- a/fs/9p/vfs_inode.c
+++ b/fs/9p/vfs_inode.c
@@ -270,7 +270,10 @@ v9fs_create(struct v9fs_session_info *v9ses, u32 pfid, char *name, u32 perm,
err = v9fs_t_walk(v9ses, pfid, fid, NULL, &fcall);
if (err < 0) {
PRINT_FCALL_ERROR("clone error", fcall);
- goto put_fid;
+ if (fcall && fcall->id == RWALK)
+ goto clunk_fid;
+ else
+ goto put_fid;
}
kfree(fcall);
@@ -322,6 +325,9 @@ v9fs_clone_walk(struct v9fs_session_info *v9ses, u32 fid, struct dentry *dentry)
&fcall);
if (err < 0) {
+ if (fcall && fcall->id == RWALK)
+ goto clunk_fid;
+
PRINT_FCALL_ERROR("walk error", fcall);
v9fs_put_idpool(nfid, &v9ses->fidpool);
goto error;
@@ -640,19 +646,26 @@ static struct dentry *v9fs_vfs_lookup(struct inode *dir, struct dentry *dentry,
}
result = v9fs_t_walk(v9ses, dirfidnum, newfid,
- (char *)dentry->d_name.name, NULL);
+ (char *)dentry->d_name.name, &fcall);
+
if (result < 0) {
- v9fs_put_idpool(newfid, &v9ses->fidpool);
+ if (fcall && fcall->id == RWALK)
+ v9fs_t_clunk(v9ses, newfid);
+ else
+ v9fs_put_idpool(newfid, &v9ses->fidpool);
+
if (result == -ENOENT) {
d_add(dentry, NULL);
dprintk(DEBUG_VFS,
"Return negative dentry %p count %d\n",
dentry, atomic_read(&dentry->d_count));
+ kfree(fcall);
return NULL;
}
dprintk(DEBUG_ERROR, "walk error:%d\n", result);
goto FreeFcall;
}
+ kfree(fcall);
result = v9fs_t_stat(v9ses, newfid, &fcall);
if (result < 0) {
diff --git a/fs/autofs4/autofs_i.h b/fs/autofs4/autofs_i.h
index 57c4903614e..d6603d02304 100644
--- a/fs/autofs4/autofs_i.h
+++ b/fs/autofs4/autofs_i.h
@@ -74,8 +74,8 @@ struct autofs_wait_queue {
struct autofs_wait_queue *next;
autofs_wqt_t wait_queue_token;
/* We use the following to see what we are waiting for */
- int hash;
- int len;
+ unsigned int hash;
+ unsigned int len;
char *name;
u32 dev;
u64 ino;
@@ -85,7 +85,6 @@ struct autofs_wait_queue {
pid_t tgid;
/* This is for status reporting upon return */
int status;
- atomic_t notify;
atomic_t wait_ctr;
};
diff --git a/fs/autofs4/root.c b/fs/autofs4/root.c
index 84e030c8ddd..5100f984783 100644
--- a/fs/autofs4/root.c
+++ b/fs/autofs4/root.c
@@ -327,6 +327,7 @@ static int try_to_fill_dentry(struct dentry *dentry, int flags)
static void *autofs4_follow_link(struct dentry *dentry, struct nameidata *nd)
{
struct autofs_sb_info *sbi = autofs4_sbi(dentry->d_sb);
+ struct autofs_info *ino = autofs4_dentry_ino(dentry);
int oz_mode = autofs4_oz_mode(sbi);
unsigned int lookup_type;
int status;
@@ -340,13 +341,8 @@ static void *autofs4_follow_link(struct dentry *dentry, struct nameidata *nd)
if (oz_mode || !lookup_type)
goto done;
- /*
- * If a request is pending wait for it.
- * If it's a mount then it won't be expired till at least
- * a liitle later and if it's an expire then we might need
- * to mount it again.
- */
- if (autofs4_ispending(dentry)) {
+ /* If an expire request is pending wait for it. */
+ if (ino && (ino->flags & AUTOFS_INF_EXPIRING)) {
DPRINTK("waiting for active request %p name=%.*s",
dentry, dentry->d_name.len, dentry->d_name.name);
diff --git a/fs/autofs4/waitq.c b/fs/autofs4/waitq.c
index 142ab6aa2aa..ce103e7b0bc 100644
--- a/fs/autofs4/waitq.c
+++ b/fs/autofs4/waitq.c
@@ -189,14 +189,30 @@ static int autofs4_getpath(struct autofs_sb_info *sbi,
return len;
}
+static struct autofs_wait_queue *
+autofs4_find_wait(struct autofs_sb_info *sbi,
+ char *name, unsigned int hash, unsigned int len)
+{
+ struct autofs_wait_queue *wq;
+
+ for (wq = sbi->queues; wq; wq = wq->next) {
+ if (wq->hash == hash &&
+ wq->len == len &&
+ wq->name && !memcmp(wq->name, name, len))
+ break;
+ }
+ return wq;
+}
+
int autofs4_wait(struct autofs_sb_info *sbi, struct dentry *dentry,
enum autofs_notify notify)
{
+ struct autofs_info *ino;
struct autofs_wait_queue *wq;
char *name;
unsigned int len = 0;
unsigned int hash = 0;
- int status;
+ int status, type;
/* In catatonic mode, we don't wait for nobody */
if (sbi->catatonic)
@@ -223,21 +239,41 @@ int autofs4_wait(struct autofs_sb_info *sbi, struct dentry *dentry,
return -EINTR;
}
- for (wq = sbi->queues ; wq ; wq = wq->next) {
- if (wq->hash == dentry->d_name.hash &&
- wq->len == len &&
- wq->name && !memcmp(wq->name, name, len))
- break;
- }
+ wq = autofs4_find_wait(sbi, name, hash, len);
+ ino = autofs4_dentry_ino(dentry);
+ if (!wq && ino && notify == NFY_NONE) {
+ /*
+ * Either we've betean the pending expire to post it's
+ * wait or it finished while we waited on the mutex.
+ * So we need to wait till either, the wait appears
+ * or the expire finishes.
+ */
+
+ while (ino->flags & AUTOFS_INF_EXPIRING) {
+ mutex_unlock(&sbi->wq_mutex);
+ schedule_timeout_interruptible(HZ/10);
+ if (mutex_lock_interruptible(&sbi->wq_mutex)) {
+ kfree(name);
+ return -EINTR;
+ }
+ wq = autofs4_find_wait(sbi, name, hash, len);
+ if (wq)
+ break;
+ }
- if (!wq) {
- /* Can't wait for an expire if there's no mount */
- if (notify == NFY_NONE && !d_mountpoint(dentry)) {
+ /*
+ * Not ideal but the status has already gone. Of the two
+ * cases where we wait on NFY_NONE neither depend on the
+ * return status of the wait.
+ */
+ if (!wq) {
kfree(name);
mutex_unlock(&sbi->wq_mutex);
- return -ENOENT;
+ return 0;
}
+ }
+ if (!wq) {
/* Create a new wait queue */
wq = kmalloc(sizeof(struct autofs_wait_queue),GFP_KERNEL);
if (!wq) {
@@ -263,20 +299,7 @@ int autofs4_wait(struct autofs_sb_info *sbi, struct dentry *dentry,
wq->tgid = current->tgid;
wq->status = -EINTR; /* Status return if interrupted */
atomic_set(&wq->wait_ctr, 2);
- atomic_set(&wq->notify, 1);
- mutex_unlock(&sbi->wq_mutex);
- } else {
- atomic_inc(&wq->wait_ctr);
mutex_unlock(&sbi->wq_mutex);
- kfree(name);
- DPRINTK("existing wait id = 0x%08lx, name = %.*s, nfy=%d",
- (unsigned long) wq->wait_queue_token, wq->len, wq->name, notify);
- }
-
- if (notify != NFY_NONE && atomic_read(&wq->notify)) {
- int type;
-
- atomic_dec(&wq->notify);
if (sbi->version < 5) {
if (notify == NFY_MOUNT)
@@ -299,6 +322,12 @@ int autofs4_wait(struct autofs_sb_info *sbi, struct dentry *dentry,
/* autofs4_notify_daemon() may block */
autofs4_notify_daemon(sbi, wq, type);
+ } else {
+ atomic_inc(&wq->wait_ctr);
+ mutex_unlock(&sbi->wq_mutex);
+ kfree(name);
+ DPRINTK("existing wait id = 0x%08lx, name = %.*s, nfy=%d",
+ (unsigned long) wq->wait_queue_token, wq->len, wq->name, notify);
}
/* wq->name is NULL if and only if the lock is already released */
diff --git a/fs/compat.c b/fs/compat.c
index 3f3e8f4d43d..01f39f87f37 100644
--- a/fs/compat.c
+++ b/fs/compat.c
@@ -1323,7 +1323,7 @@ compat_sys_vmsplice(int fd, const struct compat_iovec __user *iov32,
{
unsigned i;
struct iovec *iov;
- if (nr_segs >= UIO_MAXIOV)
+ if (nr_segs > UIO_MAXIOV)
return -EINVAL;
iov = compat_alloc_user_space(nr_segs * sizeof(struct iovec));
for (i = 0; i < nr_segs; i++) {
@@ -1913,7 +1913,7 @@ asmlinkage long compat_sys_ppoll(struct pollfd __user *ufds,
}
if (sigmask) {
- if (sigsetsize |= sizeof(compat_sigset_t))
+ if (sigsetsize != sizeof(compat_sigset_t))
return -EINVAL;
if (copy_from_user(&ss32, sigmask, sizeof(ss32)))
return -EFAULT;
diff --git a/fs/ext3/inode.c b/fs/ext3/inode.c
index 48ae0339af1..2edd7eec88f 100644
--- a/fs/ext3/inode.c
+++ b/fs/ext3/inode.c
@@ -711,7 +711,7 @@ static int ext3_splice_branch(handle_t *handle, struct inode *inode,
* direct blocks blocks
*/
if (num == 0 && blks > 1) {
- current_block = le32_to_cpu(where->key + 1);
+ current_block = le32_to_cpu(where->key) + 1;
for (i = 1; i < blks; i++)
*(where->p + i ) = cpu_to_le32(current_block++);
}
@@ -724,7 +724,7 @@ static int ext3_splice_branch(handle_t *handle, struct inode *inode,
if (block_i) {
block_i->last_alloc_logical_block = block + blks - 1;
block_i->last_alloc_physical_block =
- le32_to_cpu(where[num].key + blks - 1);
+ le32_to_cpu(where[num].key) + blks - 1;
}
/* We are done with atomic stuff, now do the rest of housekeeping */
@@ -814,11 +814,13 @@ int ext3_get_blocks_handle(handle_t *handle, struct inode *inode,
/* Simplest case - block found, no allocation needed */
if (!partial) {
- first_block = chain[depth - 1].key;
+ first_block = le32_to_cpu(chain[depth - 1].key);
clear_buffer_new(bh_result);
count++;
/*map more blocks*/
while (count < maxblocks && count <= blocks_to_boundary) {
+ unsigned long blk;
+
if (!verify_chain(chain, partial)) {
/*
* Indirect block might be removed by
@@ -831,8 +833,9 @@ int ext3_get_blocks_handle(handle_t *handle, struct inode *inode,
count = 0;
break;
}
- if (le32_to_cpu(*(chain[depth-1].p+count) ==
- (first_block + count)))
+ blk = le32_to_cpu(*(chain[depth-1].p + count));
+
+ if (blk == first_block + count)
count++;
else
break;
diff --git a/fs/jffs2/compr.c b/fs/jffs2/compr.c
index e7944e665b9..5f45e01d71e 100644
--- a/fs/jffs2/compr.c
+++ b/fs/jffs2/compr.c
@@ -412,7 +412,7 @@ void jffs2_free_comprbuf(unsigned char *comprbuf, unsigned char *orig)
kfree(comprbuf);
}
-int jffs2_compressors_init(void)
+int __init jffs2_compressors_init(void)
{
/* Registering compressors */
#ifdef CONFIG_JFFS2_ZLIB
@@ -440,7 +440,7 @@ int jffs2_compressors_init(void)
return 0;
}
-int jffs2_compressors_exit(void)
+int __exit jffs2_compressors_exit(void)
{
/* Unregistering compressors */
#ifdef CONFIG_JFFS2_RUBIN
diff --git a/fs/jffs2/compr_zlib.c b/fs/jffs2/compr_zlib.c
index 5c63e0cdcf4..d43cbac4fb9 100644
--- a/fs/jffs2/compr_zlib.c
+++ b/fs/jffs2/compr_zlib.c
@@ -60,7 +60,7 @@ static int __init alloc_workspaces(void)
return 0;
}
-static void free_workspaces(void)
+static void __exit free_workspaces(void)
{
vfree(def_strm.workspace);
vfree(inf_strm.workspace);
@@ -216,7 +216,7 @@ int __init jffs2_zlib_init(void)
return ret;
}
-void jffs2_zlib_exit(void)
+void __exit jffs2_zlib_exit(void)
{
jffs2_unregister_compressor(&jffs2_zlib_comp);
free_workspaces();
diff --git a/fs/jffs2/file.c b/fs/jffs2/file.c
index e92187f34d5..e18c9437d58 100644
--- a/fs/jffs2/file.c
+++ b/fs/jffs2/file.c
@@ -220,12 +220,20 @@ static int jffs2_commit_write (struct file *filp, struct page *pg,
D1(printk(KERN_DEBUG "jffs2_commit_write(): ino #%lu, page at 0x%lx, range %d-%d, flags %lx\n",
inode->i_ino, pg->index << PAGE_CACHE_SHIFT, start, end, pg->flags));
- if (!start && end == PAGE_CACHE_SIZE) {
- /* We need to avoid deadlock with page_cache_read() in
- jffs2_garbage_collect_pass(). So we have to mark the
- page up to date, to prevent page_cache_read() from
- trying to re-lock it. */
- SetPageUptodate(pg);
+ if (end == PAGE_CACHE_SIZE) {
+ if (!start) {
+ /* We need to avoid deadlock with page_cache_read() in
+ jffs2_garbage_collect_pass(). So we have to mark the
+ page up to date, to prevent page_cache_read() from
+ trying to re-lock it. */
+ SetPageUptodate(pg);
+ } else {
+ /* When writing out the end of a page, write out the
+ _whole_ page. This helps to reduce the number of
+ nodes in files which have many short writes, like
+ syslog files. */
+ start = aligned_start = 0;
+ }
}
ri = jffs2_alloc_raw_inode();
diff --git a/fs/jffs2/nodelist.c b/fs/jffs2/nodelist.c
index 9c575733659..4973cd648ba 100644
--- a/fs/jffs2/nodelist.c
+++ b/fs/jffs2/nodelist.c
@@ -438,7 +438,7 @@ static int check_node_data(struct jffs2_sb_info *c, struct jffs2_tmp_dnode_info
if (c->mtd->point) {
err = c->mtd->point(c->mtd, ofs, len, &retlen, &buffer);
if (!err && retlen < tn->csize) {
- JFFS2_WARNING("MTD point returned len too short: %u instead of %u.\n", retlen, tn->csize);
+ JFFS2_WARNING("MTD point returned len too short: %zu instead of %u.\n", retlen, tn->csize);
c->mtd->unpoint(c->mtd, buffer, ofs, len);
} else if (err)
JFFS2_WARNING("MTD point failed: error code %d.\n", err);
@@ -461,7 +461,7 @@ static int check_node_data(struct jffs2_sb_info *c, struct jffs2_tmp_dnode_info
}
if (retlen != len) {
- JFFS2_ERROR("short read at %#08x: %d instead of %d.\n", ofs, retlen, len);
+ JFFS2_ERROR("short read at %#08x: %zd instead of %d.\n", ofs, retlen, len);
err = -EIO;
goto free_out;
}
diff --git a/fs/jffs2/scan.c b/fs/jffs2/scan.c
index 0a79fc921e9..5847e76ce16 100644
--- a/fs/jffs2/scan.c
+++ b/fs/jffs2/scan.c
@@ -222,9 +222,6 @@ int jffs2_scan_medium(struct jffs2_sb_info *c)
}
}
- if (jffs2_sum_active() && s)
- kfree(s);
-
/* Nextblock dirty is always seen as wasted, because we cannot recycle it now */
if (c->nextblock && (c->nextblock->dirty_size)) {
c->nextblock->wasted_size += c->nextblock->dirty_size;
@@ -266,6 +263,9 @@ int jffs2_scan_medium(struct jffs2_sb_info *c)
else
c->mtd->unpoint(c->mtd, flashbuf, 0, c->mtd->size);
#endif
+ if (s)
+ kfree(s);
+
return ret;
}
diff --git a/fs/jffs2/summary.c b/fs/jffs2/summary.c
index 9763d73c0da..439b9f6d583 100644
--- a/fs/jffs2/summary.c
+++ b/fs/jffs2/summary.c
@@ -853,7 +853,7 @@ static int jffs2_sum_write_data(struct jffs2_sb_info *c, struct jffs2_eraseblock
if (ret || (retlen != infosize)) {
- JFFS2_WARNING("Write of %d bytes at 0x%08x failed. returned %d, retlen %zu\n",
+ JFFS2_WARNING("Write of %u bytes at 0x%08x failed. returned %d, retlen %zd\n",
infosize, jeb->offset + c->sector_size - jeb->free_size, ret, retlen);
c->summary->sum_size = JFFS2_SUMMARY_NOSUM_SIZE;
diff --git a/fs/jffs2/super.c b/fs/jffs2/super.c
index c8b539ee7d8..9d0521451f5 100644
--- a/fs/jffs2/super.c
+++ b/fs/jffs2/super.c
@@ -324,6 +324,18 @@ static int __init init_jffs2_fs(void)
{
int ret;
+ /* Paranoia checks for on-medium structures. If we ask GCC
+ to pack them with __attribute__((packed)) then it _also_
+ assumes that they're not aligned -- so it emits crappy
+ code on some architectures. Ideally we want an attribute
+ which means just 'no padding', without the alignment
+ thing. But GCC doesn't have that -- we have to just
+ hope the structs are the right sizes, instead. */
+ BUG_ON(sizeof(struct jffs2_unknown_node) != 12);
+ BUG_ON(sizeof(struct jffs2_raw_dirent) != 40);
+ BUG_ON(sizeof(struct jffs2_raw_inode) != 68);
+ BUG_ON(sizeof(struct jffs2_raw_summary) != 32);
+
printk(KERN_INFO "JFFS2 version 2.2."
#ifdef CONFIG_JFFS2_FS_WRITEBUFFER
" (NAND)"
@@ -331,7 +343,7 @@ static int __init init_jffs2_fs(void)
#ifdef CONFIG_JFFS2_SUMMARY
" (SUMMARY) "
#endif
- " (C) 2001-2003 Red Hat, Inc.\n");
+ " (C) 2001-2006 Red Hat, Inc.\n");
jffs2_inode_cachep = kmem_cache_create("jffs2_i",
sizeof(struct jffs2_inode_info),
diff --git a/fs/locks.c b/fs/locks.c
index efad798824d..6f99c0a6f83 100644
--- a/fs/locks.c
+++ b/fs/locks.c
@@ -446,15 +446,14 @@ static struct lock_manager_operations lease_manager_ops = {
*/
static int lease_init(struct file *filp, int type, struct file_lock *fl)
{
+ if (assign_type(fl, type) != 0)
+ return -EINVAL;
+
fl->fl_owner = current->files;
fl->fl_pid = current->tgid;
fl->fl_file = filp;
fl->fl_flags = FL_LEASE;
- if (assign_type(fl, type) != 0) {
- locks_free_lock(fl);
- return -EINVAL;
- }
fl->fl_start = 0;
fl->fl_end = OFFSET_MAX;
fl->fl_ops = NULL;
@@ -466,16 +465,19 @@ static int lease_init(struct file *filp, int type, struct file_lock *fl)
static int lease_alloc(struct file *filp, int type, struct file_lock **flp)
{
struct file_lock *fl = locks_alloc_lock();
- int error;
+ int error = -ENOMEM;
if (fl == NULL)
- return -ENOMEM;
+ goto out;
error = lease_init(filp, type, fl);
- if (error)
- return error;
+ if (error) {
+ locks_free_lock(fl);
+ fl = NULL;
+ }
+out:
*flp = fl;
- return 0;
+ return error;
}
/* Check if two locks overlap each other.
@@ -1372,6 +1374,7 @@ static int __setlease(struct file *filp, long arg, struct file_lock **flp)
goto out;
if (my_before != NULL) {
+ *flp = *my_before;
error = lease->fl_lmops->fl_change(my_before, arg);
goto out;
}
diff --git a/fs/namespace.c b/fs/namespace.c
index 2c5f1f80bdc..bf478addb85 100644
--- a/fs/namespace.c
+++ b/fs/namespace.c
@@ -899,13 +899,11 @@ static int do_change_type(struct nameidata *nd, int flag)
/*
* do loopback mount.
*/
-static int do_loopback(struct nameidata *nd, char *old_name, unsigned long flags, int mnt_flags)
+static int do_loopback(struct nameidata *nd, char *old_name, int recurse)
{
struct nameidata old_nd;
struct vfsmount *mnt = NULL;
- int recurse = flags & MS_REC;
int err = mount_is_safe(nd);
-
if (err)
return err;
if (!old_name || !*old_name)
@@ -939,7 +937,6 @@ static int do_loopback(struct nameidata *nd, char *old_name, unsigned long flags
spin_unlock(&vfsmount_lock);
release_mounts(&umount_list);
}
- mnt->mnt_flags = mnt_flags;
out:
up_write(&namespace_sem);
@@ -1353,7 +1350,7 @@ long do_mount(char *dev_name, char *dir_name, char *type_page,
retval = do_remount(&nd, flags & ~MS_REMOUNT, mnt_flags,
data_page);
else if (flags & MS_BIND)
- retval = do_loopback(&nd, dev_name, flags, mnt_flags);
+ retval = do_loopback(&nd, dev_name, flags & MS_REC);
else if (flags & (MS_SHARED | MS_PRIVATE | MS_SLAVE | MS_UNBINDABLE))
retval = do_change_type(&nd, flags);
else if (flags & MS_MOVE)
diff --git a/fs/open.c b/fs/open.c
index 53ec28c3677..317b7c7f38a 100644
--- a/fs/open.c
+++ b/fs/open.c
@@ -1124,7 +1124,6 @@ asmlinkage long sys_openat(int dfd, const char __user *filename, int flags,
prevent_tail_call(ret);
return ret;
}
-EXPORT_SYMBOL_GPL(sys_openat);
#ifndef __alpha__
diff --git a/fs/partitions/check.c b/fs/partitions/check.c
index 45ae7dd3c65..7ef1f094de9 100644
--- a/fs/partitions/check.c
+++ b/fs/partitions/check.c
@@ -533,6 +533,7 @@ void del_gendisk(struct gendisk *disk)
devfs_remove_disk(disk);
+ kobject_uevent(&disk->kobj, KOBJ_REMOVE);
if (disk->holder_dir)
kobject_unregister(disk->holder_dir);
if (disk->slave_dir)
@@ -545,7 +546,7 @@ void del_gendisk(struct gendisk *disk)
kfree(disk_name);
}
put_device(disk->driverfs_dev);
+ disk->driverfs_dev = NULL;
}
- kobject_uevent(&disk->kobj, KOBJ_REMOVE);
kobject_del(&disk->kobj);
}
diff --git a/fs/smbfs/dir.c b/fs/smbfs/dir.c
index 34c7a11d91f..70d9c5a37f5 100644
--- a/fs/smbfs/dir.c
+++ b/fs/smbfs/dir.c
@@ -434,6 +434,11 @@ smb_lookup(struct inode *dir, struct dentry *dentry, struct nameidata *nd)
if (dentry->d_name.len > SMB_MAXNAMELEN)
goto out;
+ /* Do not allow lookup of names with backslashes in */
+ error = -EINVAL;
+ if (memchr(dentry->d_name.name, '\\', dentry->d_name.len))
+ goto out;
+
lock_kernel();
error = smb_proc_getattr(dentry, &finfo);
#ifdef SMBFS_PARANOIA
diff --git a/fs/smbfs/request.c b/fs/smbfs/request.c
index c71c375863c..c71dd2760d3 100644
--- a/fs/smbfs/request.c
+++ b/fs/smbfs/request.c
@@ -339,9 +339,11 @@ int smb_add_request(struct smb_request *req)
/*
* On timeout or on interrupt we want to try and remove the
* request from the recvq/xmitq.
+ * First check if the request is still part of a queue. (May
+ * have been removed by some error condition)
*/
smb_lock_server(server);
- if (!(req->rq_flags & SMB_REQ_RECEIVED)) {
+ if (!list_empty(&req->rq_queue)) {
list_del_init(&req->rq_queue);
smb_rput(req);
}
diff --git a/fs/splice.c b/fs/splice.c
index 7fb04970c72..a285fd746dc 100644
--- a/fs/splice.c
+++ b/fs/splice.c
@@ -51,7 +51,7 @@ struct splice_pipe_desc {
* addition of remove_mapping(). If success is returned, the caller may
* attempt to reuse this page for another destination.
*/
-static int page_cache_pipe_buf_steal(struct pipe_inode_info *info,
+static int page_cache_pipe_buf_steal(struct pipe_inode_info *pipe,
struct pipe_buffer *buf)
{
struct page *page = buf->page;
@@ -78,16 +78,18 @@ static int page_cache_pipe_buf_steal(struct pipe_inode_info *info,
return 1;
}
+ buf->flags |= PIPE_BUF_FLAG_LRU;
return 0;
}
-static void page_cache_pipe_buf_release(struct pipe_inode_info *info,
+static void page_cache_pipe_buf_release(struct pipe_inode_info *pipe,
struct pipe_buffer *buf)
{
page_cache_release(buf->page);
+ buf->flags &= ~PIPE_BUF_FLAG_LRU;
}
-static int page_cache_pipe_buf_pin(struct pipe_inode_info *info,
+static int page_cache_pipe_buf_pin(struct pipe_inode_info *pipe,
struct pipe_buffer *buf)
{
struct page *page = buf->page;
@@ -141,6 +143,7 @@ static int user_page_pipe_buf_steal(struct pipe_inode_info *pipe,
if (!(buf->flags & PIPE_BUF_FLAG_GIFT))
return 1;
+ buf->flags |= PIPE_BUF_FLAG_LRU;
return generic_pipe_buf_steal(pipe, buf);
}
@@ -321,6 +324,8 @@ __generic_file_splice_read(struct file *in, loff_t *ppos,
mapping_gfp_mask(mapping));
if (unlikely(error)) {
page_cache_release(page);
+ if (error == -EEXIST)
+ continue;
break;
}
/*
@@ -497,14 +502,14 @@ EXPORT_SYMBOL(generic_file_splice_read);
* Send 'sd->len' bytes to socket from 'sd->file' at position 'sd->pos'
* using sendpage(). Return the number of bytes sent.
*/
-static int pipe_to_sendpage(struct pipe_inode_info *info,
+static int pipe_to_sendpage(struct pipe_inode_info *pipe,
struct pipe_buffer *buf, struct splice_desc *sd)
{
struct file *file = sd->file;
loff_t pos = sd->pos;
int ret, more;
- ret = buf->ops->pin(info, buf);
+ ret = buf->ops->pin(pipe, buf);
if (!ret) {
more = (sd->flags & SPLICE_F_MORE) || sd->len < sd->total_len;
@@ -535,7 +540,7 @@ static int pipe_to_sendpage(struct pipe_inode_info *info,
* SPLICE_F_MOVE isn't set, or we cannot move the page, we simply create
* a new page in the output file page cache and fill/dirty that.
*/
-static int pipe_to_file(struct pipe_inode_info *info, struct pipe_buffer *buf,
+static int pipe_to_file(struct pipe_inode_info *pipe, struct pipe_buffer *buf,
struct splice_desc *sd)
{
struct file *file = sd->file;
@@ -549,7 +554,7 @@ static int pipe_to_file(struct pipe_inode_info *info, struct pipe_buffer *buf,
/*
* make sure the data in this buffer is uptodate
*/
- ret = buf->ops->pin(info, buf);
+ ret = buf->ops->pin(pipe, buf);
if (unlikely(ret))
return ret;
@@ -566,37 +571,23 @@ static int pipe_to_file(struct pipe_inode_info *info, struct pipe_buffer *buf,
*/
if ((sd->flags & SPLICE_F_MOVE) && this_len == PAGE_CACHE_SIZE) {
/*
- * If steal succeeds, buf->page is now pruned from the vm
- * side (page cache) and we can reuse it. The page will also
- * be locked on successful return.
+ * If steal succeeds, buf->page is now pruned from the
+ * pagecache and we can reuse it. The page will also be
+ * locked on successful return.
*/
- if (buf->ops->steal(info, buf))
+ if (buf->ops->steal(pipe, buf))
goto find_page;
page = buf->page;
- page_cache_get(page);
-
- /*
- * page must be on the LRU for adding to the pagecache.
- * Check this without grabbing the zone lock, if it isn't
- * the do grab the zone lock, recheck, and add if necessary.
- */
- if (!PageLRU(page)) {
- struct zone *zone = page_zone(page);
-
- spin_lock_irq(&zone->lru_lock);
- if (!PageLRU(page)) {
- SetPageLRU(page);
- add_page_to_inactive_list(zone, page);
- }
- spin_unlock_irq(&zone->lru_lock);
- }
-
if (add_to_page_cache(page, mapping, index, gfp_mask)) {
- page_cache_release(page);
unlock_page(page);
goto find_page;
}
+
+ page_cache_get(page);
+
+ if (!(buf->flags & PIPE_BUF_FLAG_LRU))
+ lru_cache_add(page);
} else {
find_page:
page = find_lock_page(mapping, index);
@@ -647,23 +638,36 @@ find_page:
}
ret = mapping->a_ops->prepare_write(file, page, offset, offset+this_len);
- if (ret == AOP_TRUNCATED_PAGE) {
+ if (unlikely(ret)) {
+ loff_t isize = i_size_read(mapping->host);
+
+ if (ret != AOP_TRUNCATED_PAGE)
+ unlock_page(page);
page_cache_release(page);
- goto find_page;
- } else if (ret)
+ if (ret == AOP_TRUNCATED_PAGE)
+ goto find_page;
+
+ /*
+ * prepare_write() may have instantiated a few blocks
+ * outside i_size. Trim these off again.
+ */
+ if (sd->pos + this_len > isize)
+ vmtruncate(mapping->host, isize);
+
goto out;
+ }
if (buf->page != page) {
/*
* Careful, ->map() uses KM_USER0!
*/
- char *src = buf->ops->map(info, buf, 1);
+ char *src = buf->ops->map(pipe, buf, 1);
char *dst = kmap_atomic(page, KM_USER1);
memcpy(dst + offset, src + buf->offset, this_len);
flush_dcache_page(page);
kunmap_atomic(dst, KM_USER1);
- buf->ops->unmap(info, buf, src);
+ buf->ops->unmap(pipe, buf, src);
}
ret = mapping->a_ops->commit_write(file, page, offset, offset+this_len);
diff --git a/fs/xfs/xfs_alloc.c b/fs/xfs/xfs_alloc.c
index 64ee07db0d5..8558226281c 100644
--- a/fs/xfs/xfs_alloc.c
+++ b/fs/xfs/xfs_alloc.c
@@ -1942,8 +1942,10 @@ xfs_alloc_fix_freelist(
/*
* Allocate as many blocks as possible at once.
*/
- if ((error = xfs_alloc_ag_vextent(&targs)))
+ if ((error = xfs_alloc_ag_vextent(&targs))) {
+ xfs_trans_brelse(tp, agflbp);
return error;
+ }
/*
* Stop if we run out. Won't happen if callers are obeying
* the restrictions correctly. Can happen for free calls
@@ -1960,6 +1962,7 @@ xfs_alloc_fix_freelist(
return error;
}
}
+ xfs_trans_brelse(tp, agflbp);
args->agbp = agbp;
return 0;
}
diff --git a/fs/xfs/xfs_rename.c b/fs/xfs/xfs_rename.c
index 81a05cfd77d..1f148762eb2 100644
--- a/fs/xfs/xfs_rename.c
+++ b/fs/xfs/xfs_rename.c
@@ -316,6 +316,18 @@ xfs_rename(
}
}
+ /*
+ * If we are using project inheritance, we only allow renames
+ * into our tree when the project IDs are the same; else the
+ * tree quota mechanism would be circumvented.
+ */
+ if (unlikely((target_dp->i_d.di_flags & XFS_DIFLAG_PROJINHERIT) &&
+ (target_dp->i_d.di_projid != src_ip->i_d.di_projid))) {
+ error = XFS_ERROR(EXDEV);
+ xfs_rename_unlock4(inodes, XFS_ILOCK_SHARED);
+ goto rele_return;
+ }
+
new_parent = (src_dp != target_dp);
src_is_directory = ((src_ip->i_d.di_mode & S_IFMT) == S_IFDIR);
diff --git a/fs/xfs/xfs_vfsops.c b/fs/xfs/xfs_vfsops.c
index f0e09ca1413..36ea1b2094f 100644
--- a/fs/xfs/xfs_vfsops.c
+++ b/fs/xfs/xfs_vfsops.c
@@ -669,31 +669,22 @@ xfs_mntupdate(
xfs_mount_t *mp = XFS_BHVTOM(bdp);
int error;
- if (args->flags & XFSMNT_BARRIER)
- mp->m_flags |= XFS_MOUNT_BARRIER;
- else
- mp->m_flags &= ~XFS_MOUNT_BARRIER;
-
- if ((vfsp->vfs_flag & VFS_RDONLY) &&
- !(*flags & MS_RDONLY)) {
- vfsp->vfs_flag &= ~VFS_RDONLY;
-
- if (args->flags & XFSMNT_BARRIER)
+ if (!(*flags & MS_RDONLY)) { /* rw/ro -> rw */
+ if (vfsp->vfs_flag & VFS_RDONLY)
+ vfsp->vfs_flag &= ~VFS_RDONLY;
+ if (args->flags & XFSMNT_BARRIER) {
+ mp->m_flags |= XFS_MOUNT_BARRIER;
xfs_mountfs_check_barriers(mp);
- }
-
- if (!(vfsp->vfs_flag & VFS_RDONLY) &&
- (*flags & MS_RDONLY)) {
+ } else {
+ mp->m_flags &= ~XFS_MOUNT_BARRIER;
+ }
+ } else if (!(vfsp->vfs_flag & VFS_RDONLY)) { /* rw -> ro */
VFS_SYNC(vfsp, SYNC_FSDATA|SYNC_BDFLUSH|SYNC_ATTR, NULL, error);
-
xfs_quiesce_fs(mp);
-
- /* Ok now write out an unmount record */
xfs_log_unmount_write(mp);
xfs_unmountfs_writesb(mp);
vfsp->vfs_flag |= VFS_RDONLY;
}
-
return 0;
}
diff --git a/fs/xfs/xfs_vnodeops.c b/fs/xfs/xfs_vnodeops.c
index fa71b305ba5..7027ae68ee3 100644
--- a/fs/xfs/xfs_vnodeops.c
+++ b/fs/xfs/xfs_vnodeops.c
@@ -2663,7 +2663,7 @@ xfs_link(
*/
if (unlikely((tdp->i_d.di_flags & XFS_DIFLAG_PROJINHERIT) &&
(tdp->i_d.di_projid != sip->i_d.di_projid))) {
- error = XFS_ERROR(EPERM);
+ error = XFS_ERROR(EXDEV);
goto error_return;
}