summaryrefslogtreecommitdiffstats
path: root/fs
diff options
context:
space:
mode:
authorAnton Arapov <anton@redhat.com>2012-09-12 09:18:33 +0200
committerAnton Arapov <anton@redhat.com>2012-09-12 09:19:26 +0200
commit985ef6b2108ed28ffd5f6630e1e0fce2e2a775f2 (patch)
treeeb9091ebd60e14eea65a9e6b5140f98d88e8a590 /fs
parent1d44b6f3fcf6058fb7c960b7558766967e8028f7 (diff)
downloadkernel-uprobes-985ef6b2108ed28ffd5f6630e1e0fce2e2a775f2.tar.gz
kernel-uprobes-985ef6b2108ed28ffd5f6630e1e0fce2e2a775f2.tar.xz
kernel-uprobes-985ef6b2108ed28ffd5f6630e1e0fce2e2a775f2.zip
fedora kernel: 021ce7bee3cfdcbf16da1256b2c9f40f7e9bbd9ev3.5.3-1
Signed-off-by: Anton Arapov <anton@redhat.com>
Diffstat (limited to 'fs')
-rw-r--r--fs/btrfs/async-thread.c9
-rw-r--r--fs/cifs/cifsglob.h6
-rw-r--r--fs/cifs/connect.c9
-rw-r--r--fs/cifs/file.c17
-rw-r--r--fs/exec.c2
-rw-r--r--fs/exofs/ore.c14
-rw-r--r--fs/ext4/balloc.c65
-rw-r--r--fs/ext4/bitmap.c13
-rw-r--r--fs/ext4/ext4.h6
-rw-r--r--fs/ext4/extents.c47
-rw-r--r--fs/ext4/ialloc.c3
-rw-r--r--fs/ext4/inode.c41
-rw-r--r--fs/ext4/namei.c11
-rw-r--r--fs/ext4/resize.c7
-rw-r--r--fs/ext4/super.c180
-rw-r--r--fs/ext4/xattr.c11
-rw-r--r--fs/fuse/file.c2
-rw-r--r--fs/jbd2/journal.c3
-rw-r--r--fs/nfs/file.c7
-rw-r--r--fs/nfs/idmap.c26
-rw-r--r--fs/nfsd/nfs4state.c4
-rw-r--r--fs/nfsd/nfs4xdr.c2
-rw-r--r--fs/nilfs2/ioctl.c4
-rw-r--r--fs/nilfs2/super.c3
-rw-r--r--fs/nilfs2/the_nilfs.c1
-rw-r--r--fs/nilfs2/the_nilfs.h2
-rw-r--r--fs/select.c10
27 files changed, 336 insertions, 169 deletions
diff --git a/fs/btrfs/async-thread.c b/fs/btrfs/async-thread.c
index 42704149b72..58b7d14b08e 100644
--- a/fs/btrfs/async-thread.c
+++ b/fs/btrfs/async-thread.c
@@ -206,10 +206,17 @@ static noinline void run_ordered_completions(struct btrfs_workers *workers,
work->ordered_func(work);
- /* now take the lock again and call the freeing code */
+ /* now take the lock again and drop our item from the list */
spin_lock(&workers->order_lock);
list_del(&work->order_list);
+ spin_unlock(&workers->order_lock);
+
+ /*
+ * we don't want to call the ordered free functions
+ * with the lock held though
+ */
work->ordered_free(work);
+ spin_lock(&workers->order_lock);
}
spin_unlock(&workers->order_lock);
diff --git a/fs/cifs/cifsglob.h b/fs/cifs/cifsglob.h
index 6df0cbe1cbc..d86ba9f2519 100644
--- a/fs/cifs/cifsglob.h
+++ b/fs/cifs/cifsglob.h
@@ -657,13 +657,13 @@ struct cifs_io_parms {
* Take a reference on the file private data. Must be called with
* cifs_file_list_lock held.
*/
-static inline
-struct cifsFileInfo *cifsFileInfo_get(struct cifsFileInfo *cifs_file)
+static inline void
+cifsFileInfo_get_locked(struct cifsFileInfo *cifs_file)
{
++cifs_file->count;
- return cifs_file;
}
+struct cifsFileInfo *cifsFileInfo_get(struct cifsFileInfo *cifs_file);
void cifsFileInfo_put(struct cifsFileInfo *cifs_file);
/*
diff --git a/fs/cifs/connect.c b/fs/cifs/connect.c
index 94b7788c318..f9670054e90 100644
--- a/fs/cifs/connect.c
+++ b/fs/cifs/connect.c
@@ -238,8 +238,8 @@ static const match_table_t cifs_mount_option_tokens = {
enum {
Opt_sec_krb5, Opt_sec_krb5i, Opt_sec_krb5p,
Opt_sec_ntlmsspi, Opt_sec_ntlmssp,
- Opt_ntlm, Opt_sec_ntlmi, Opt_sec_ntlmv2i,
- Opt_sec_nontlm, Opt_sec_lanman,
+ Opt_ntlm, Opt_sec_ntlmi, Opt_sec_ntlmv2,
+ Opt_sec_ntlmv2i, Opt_sec_lanman,
Opt_sec_none,
Opt_sec_err
@@ -253,8 +253,9 @@ static const match_table_t cifs_secflavor_tokens = {
{ Opt_sec_ntlmssp, "ntlmssp" },
{ Opt_ntlm, "ntlm" },
{ Opt_sec_ntlmi, "ntlmi" },
+ { Opt_sec_ntlmv2, "nontlm" },
+ { Opt_sec_ntlmv2, "ntlmv2" },
{ Opt_sec_ntlmv2i, "ntlmv2i" },
- { Opt_sec_nontlm, "nontlm" },
{ Opt_sec_lanman, "lanman" },
{ Opt_sec_none, "none" },
@@ -1167,7 +1168,7 @@ static int cifs_parse_security_flavors(char *value,
case Opt_sec_ntlmi:
vol->secFlg |= CIFSSEC_MAY_NTLM | CIFSSEC_MUST_SIGN;
break;
- case Opt_sec_nontlm:
+ case Opt_sec_ntlmv2:
vol->secFlg |= CIFSSEC_MAY_NTLMV2;
break;
case Opt_sec_ntlmv2i:
diff --git a/fs/cifs/file.c b/fs/cifs/file.c
index 513adbc211d..2cbda4eb9e1 100644
--- a/fs/cifs/file.c
+++ b/fs/cifs/file.c
@@ -284,6 +284,15 @@ cifs_new_fileinfo(__u16 fileHandle, struct file *file,
static void cifs_del_lock_waiters(struct cifsLockInfo *lock);
+struct cifsFileInfo *
+cifsFileInfo_get(struct cifsFileInfo *cifs_file)
+{
+ spin_lock(&cifs_file_list_lock);
+ cifsFileInfo_get_locked(cifs_file);
+ spin_unlock(&cifs_file_list_lock);
+ return cifs_file;
+}
+
/*
* Release a reference on the file private data. This may involve closing
* the filehandle out on the server. Must be called without holding
@@ -1563,7 +1572,7 @@ struct cifsFileInfo *find_readable_file(struct cifsInodeInfo *cifs_inode,
if (!open_file->invalidHandle) {
/* found a good file */
/* lock it so it will not be closed on us */
- cifsFileInfo_get(open_file);
+ cifsFileInfo_get_locked(open_file);
spin_unlock(&cifs_file_list_lock);
return open_file;
} /* else might as well continue, and look for
@@ -1615,7 +1624,7 @@ refind_writable:
if (OPEN_FMODE(open_file->f_flags) & FMODE_WRITE) {
if (!open_file->invalidHandle) {
/* found a good writable file */
- cifsFileInfo_get(open_file);
+ cifsFileInfo_get_locked(open_file);
spin_unlock(&cifs_file_list_lock);
return open_file;
} else {
@@ -1632,7 +1641,7 @@ refind_writable:
if (inv_file) {
any_available = false;
- cifsFileInfo_get(inv_file);
+ cifsFileInfo_get_locked(inv_file);
}
spin_unlock(&cifs_file_list_lock);
@@ -3082,8 +3091,6 @@ static int cifs_readpages(struct file *file, struct address_space *mapping,
break;
}
- spin_lock(&cifs_file_list_lock);
- spin_unlock(&cifs_file_list_lock);
rdata->cfile = cifsFileInfo_get(open_file);
rdata->mapping = mapping;
rdata->offset = offset;
diff --git a/fs/exec.c b/fs/exec.c
index da27b91ff1e..e95aeeddd25 100644
--- a/fs/exec.c
+++ b/fs/exec.c
@@ -1020,7 +1020,7 @@ static void flush_old_files(struct files_struct * files)
unsigned long set, i;
j++;
- i = j * __NFDBITS;
+ i = j * BITS_PER_LONG;
fdt = files_fdtable(files);
if (i >= fdt->max_fds)
break;
diff --git a/fs/exofs/ore.c b/fs/exofs/ore.c
index 24a49d47e93..1585db1aa36 100644
--- a/fs/exofs/ore.c
+++ b/fs/exofs/ore.c
@@ -837,11 +837,11 @@ static int _write_mirror(struct ore_io_state *ios, int cur_comp)
bio->bi_rw |= REQ_WRITE;
}
- osd_req_write(or, _ios_obj(ios, dev), per_dev->offset,
- bio, per_dev->length);
+ osd_req_write(or, _ios_obj(ios, cur_comp),
+ per_dev->offset, bio, per_dev->length);
ORE_DBGMSG("write(0x%llx) offset=0x%llx "
"length=0x%llx dev=%d\n",
- _LLU(_ios_obj(ios, dev)->id),
+ _LLU(_ios_obj(ios, cur_comp)->id),
_LLU(per_dev->offset),
_LLU(per_dev->length), dev);
} else if (ios->kern_buff) {
@@ -853,20 +853,20 @@ static int _write_mirror(struct ore_io_state *ios, int cur_comp)
(ios->si.unit_off + ios->length >
ios->layout->stripe_unit));
- ret = osd_req_write_kern(or, _ios_obj(ios, per_dev->dev),
+ ret = osd_req_write_kern(or, _ios_obj(ios, cur_comp),
per_dev->offset,
ios->kern_buff, ios->length);
if (unlikely(ret))
goto out;
ORE_DBGMSG2("write_kern(0x%llx) offset=0x%llx "
"length=0x%llx dev=%d\n",
- _LLU(_ios_obj(ios, dev)->id),
+ _LLU(_ios_obj(ios, cur_comp)->id),
_LLU(per_dev->offset),
_LLU(ios->length), per_dev->dev);
} else {
- osd_req_set_attributes(or, _ios_obj(ios, dev));
+ osd_req_set_attributes(or, _ios_obj(ios, cur_comp));
ORE_DBGMSG2("obj(0x%llx) set_attributes=%d dev=%d\n",
- _LLU(_ios_obj(ios, dev)->id),
+ _LLU(_ios_obj(ios, cur_comp)->id),
ios->out_attr_len, dev);
}
diff --git a/fs/ext4/balloc.c b/fs/ext4/balloc.c
index cee7812cc3c..1b5089067d0 100644
--- a/fs/ext4/balloc.c
+++ b/fs/ext4/balloc.c
@@ -280,14 +280,18 @@ struct ext4_group_desc * ext4_get_group_desc(struct super_block *sb,
return desc;
}
-static int ext4_valid_block_bitmap(struct super_block *sb,
- struct ext4_group_desc *desc,
- unsigned int block_group,
- struct buffer_head *bh)
+/*
+ * Return the block number which was discovered to be invalid, or 0 if
+ * the block bitmap is valid.
+ */
+static ext4_fsblk_t ext4_valid_block_bitmap(struct super_block *sb,
+ struct ext4_group_desc *desc,
+ unsigned int block_group,
+ struct buffer_head *bh)
{
ext4_grpblk_t offset;
ext4_grpblk_t next_zero_bit;
- ext4_fsblk_t bitmap_blk;
+ ext4_fsblk_t blk;
ext4_fsblk_t group_first_block;
if (EXT4_HAS_INCOMPAT_FEATURE(sb, EXT4_FEATURE_INCOMPAT_FLEX_BG)) {
@@ -297,37 +301,33 @@ static int ext4_valid_block_bitmap(struct super_block *sb,
* or it has to also read the block group where the bitmaps
* are located to verify they are set.
*/
- return 1;
+ return 0;
}
group_first_block = ext4_group_first_block_no(sb, block_group);
/* check whether block bitmap block number is set */
- bitmap_blk = ext4_block_bitmap(sb, desc);
- offset = bitmap_blk - group_first_block;
+ blk = ext4_block_bitmap(sb, desc);
+ offset = blk - group_first_block;
if (!ext4_test_bit(offset, bh->b_data))
/* bad block bitmap */
- goto err_out;
+ return blk;
/* check whether the inode bitmap block number is set */
- bitmap_blk = ext4_inode_bitmap(sb, desc);
- offset = bitmap_blk - group_first_block;
+ blk = ext4_inode_bitmap(sb, desc);
+ offset = blk - group_first_block;
if (!ext4_test_bit(offset, bh->b_data))
/* bad block bitmap */
- goto err_out;
+ return blk;
/* check whether the inode table block number is set */
- bitmap_blk = ext4_inode_table(sb, desc);
- offset = bitmap_blk - group_first_block;
+ blk = ext4_inode_table(sb, desc);
+ offset = blk - group_first_block;
next_zero_bit = ext4_find_next_zero_bit(bh->b_data,
offset + EXT4_SB(sb)->s_itb_per_group,
offset);
- if (next_zero_bit >= offset + EXT4_SB(sb)->s_itb_per_group)
- /* good bitmap for inode tables */
- return 1;
-
-err_out:
- ext4_error(sb, "Invalid block bitmap - block_group = %d, block = %llu",
- block_group, bitmap_blk);
+ if (next_zero_bit < offset + EXT4_SB(sb)->s_itb_per_group)
+ /* bad bitmap for inode tables */
+ return blk;
return 0;
}
@@ -336,14 +336,26 @@ void ext4_validate_block_bitmap(struct super_block *sb,
unsigned int block_group,
struct buffer_head *bh)
{
+ ext4_fsblk_t blk;
+
if (buffer_verified(bh))
return;
ext4_lock_group(sb, block_group);
- if (ext4_valid_block_bitmap(sb, desc, block_group, bh) &&
- ext4_block_bitmap_csum_verify(sb, block_group, desc, bh,
- EXT4_BLOCKS_PER_GROUP(sb) / 8))
- set_buffer_verified(bh);
+ blk = ext4_valid_block_bitmap(sb, desc, block_group, bh);
+ if (unlikely(blk != 0)) {
+ ext4_unlock_group(sb, block_group);
+ ext4_error(sb, "bg %u: block %llu: invalid block bitmap",
+ block_group, blk);
+ return;
+ }
+ if (unlikely(!ext4_block_bitmap_csum_verify(sb, block_group,
+ desc, bh, EXT4_BLOCKS_PER_GROUP(sb) / 8))) {
+ ext4_unlock_group(sb, block_group);
+ ext4_error(sb, "bg %u: bad block bitmap checksum", block_group);
+ return;
+ }
+ set_buffer_verified(bh);
ext4_unlock_group(sb, block_group);
}
@@ -609,7 +621,8 @@ ext4_fsblk_t ext4_count_free_clusters(struct super_block *sb)
if (bitmap_bh == NULL)
continue;
- x = ext4_count_free(bitmap_bh, sb->s_blocksize);
+ x = ext4_count_free(bitmap_bh->b_data,
+ EXT4_BLOCKS_PER_GROUP(sb) / 8);
printk(KERN_DEBUG "group %u: stored = %d, counted = %u\n",
i, ext4_free_group_clusters(sb, gdp), x);
bitmap_count += x;
diff --git a/fs/ext4/bitmap.c b/fs/ext4/bitmap.c
index b319721da26..ad9d96e2370 100644
--- a/fs/ext4/bitmap.c
+++ b/fs/ext4/bitmap.c
@@ -11,24 +11,18 @@
#include <linux/jbd2.h>
#include "ext4.h"
-#ifdef EXT4FS_DEBUG
-
static const int nibblemap[] = {4, 3, 3, 2, 3, 2, 2, 1, 3, 2, 2, 1, 2, 1, 1, 0};
-unsigned int ext4_count_free(struct buffer_head *map, unsigned int numchars)
+unsigned int ext4_count_free(char *bitmap, unsigned int numchars)
{
unsigned int i, sum = 0;
- if (!map)
- return 0;
for (i = 0; i < numchars; i++)
- sum += nibblemap[map->b_data[i] & 0xf] +
- nibblemap[(map->b_data[i] >> 4) & 0xf];
+ sum += nibblemap[bitmap[i] & 0xf] +
+ nibblemap[(bitmap[i] >> 4) & 0xf];
return sum;
}
-#endif /* EXT4FS_DEBUG */
-
int ext4_inode_bitmap_csum_verify(struct super_block *sb, ext4_group_t group,
struct ext4_group_desc *gdp,
struct buffer_head *bh, int sz)
@@ -92,7 +86,6 @@ int ext4_block_bitmap_csum_verify(struct super_block *sb, ext4_group_t group,
if (provided == calculated)
return 1;
- ext4_error(sb, "Bad block bitmap checksum: block_group = %u", group);
return 0;
}
diff --git a/fs/ext4/ext4.h b/fs/ext4/ext4.h
index cfc4e01b3c8..01434f25917 100644
--- a/fs/ext4/ext4.h
+++ b/fs/ext4/ext4.h
@@ -1161,8 +1161,7 @@ struct ext4_sb_info {
unsigned long s_desc_per_block; /* Number of group descriptors per block */
ext4_group_t s_groups_count; /* Number of groups in the fs */
ext4_group_t s_blockfile_groups;/* Groups acceptable for non-extent files */
- unsigned long s_overhead_last; /* Last calculated overhead */
- unsigned long s_blocks_last; /* Last seen block count */
+ unsigned long s_overhead; /* # of fs overhead clusters */
unsigned int s_cluster_ratio; /* Number of blocks per cluster */
unsigned int s_cluster_bits; /* log2 of s_cluster_ratio */
loff_t s_bitmap_maxbytes; /* max bytes for bitmap files */
@@ -1852,7 +1851,7 @@ struct mmpd_data {
# define NORET_AND noreturn,
/* bitmap.c */
-extern unsigned int ext4_count_free(struct buffer_head *, unsigned);
+extern unsigned int ext4_count_free(char *bitmap, unsigned numchars);
void ext4_inode_bitmap_csum_set(struct super_block *sb, ext4_group_t group,
struct ext4_group_desc *gdp,
struct buffer_head *bh, int sz);
@@ -2037,6 +2036,7 @@ extern int ext4_group_extend(struct super_block *sb,
extern int ext4_resize_fs(struct super_block *sb, ext4_fsblk_t n_blocks_count);
/* super.c */
+extern int ext4_calculate_overhead(struct super_block *sb);
extern int ext4_superblock_csum_verify(struct super_block *sb,
struct ext4_super_block *es);
extern void ext4_superblock_csum_set(struct super_block *sb,
diff --git a/fs/ext4/extents.c b/fs/ext4/extents.c
index 91341ec6e06..9752106662d 100644
--- a/fs/ext4/extents.c
+++ b/fs/ext4/extents.c
@@ -2570,10 +2570,10 @@ static int ext4_ext_remove_space(struct inode *inode, ext4_lblk_t start,
{
struct super_block *sb = inode->i_sb;
int depth = ext_depth(inode);
- struct ext4_ext_path *path;
+ struct ext4_ext_path *path = NULL;
ext4_fsblk_t partial_cluster = 0;
handle_t *handle;
- int i, err;
+ int i = 0, err;
ext_debug("truncate since %u to %u\n", start, end);
@@ -2606,8 +2606,12 @@ again:
}
depth = ext_depth(inode);
ex = path[depth].p_ext;
- if (!ex)
+ if (!ex) {
+ ext4_ext_drop_refs(path);
+ kfree(path);
+ path = NULL;
goto cont;
+ }
ee_block = le32_to_cpu(ex->ee_block);
@@ -2637,8 +2641,6 @@ again:
if (err < 0)
goto out;
}
- ext4_ext_drop_refs(path);
- kfree(path);
}
cont:
@@ -2647,19 +2649,28 @@ cont:
* after i_size and walking into the tree depth-wise.
*/
depth = ext_depth(inode);
- path = kzalloc(sizeof(struct ext4_ext_path) * (depth + 1), GFP_NOFS);
- if (path == NULL) {
- ext4_journal_stop(handle);
- return -ENOMEM;
- }
- path[0].p_depth = depth;
- path[0].p_hdr = ext_inode_hdr(inode);
+ if (path) {
+ int k = i = depth;
+ while (--k > 0)
+ path[k].p_block =
+ le16_to_cpu(path[k].p_hdr->eh_entries)+1;
+ } else {
+ path = kzalloc(sizeof(struct ext4_ext_path) * (depth + 1),
+ GFP_NOFS);
+ if (path == NULL) {
+ ext4_journal_stop(handle);
+ return -ENOMEM;
+ }
+ path[0].p_depth = depth;
+ path[0].p_hdr = ext_inode_hdr(inode);
+ i = 0;
- if (ext4_ext_check(inode, path[0].p_hdr, depth)) {
- err = -EIO;
- goto out;
+ if (ext4_ext_check(inode, path[0].p_hdr, depth)) {
+ err = -EIO;
+ goto out;
+ }
}
- i = err = 0;
+ err = 0;
while (i >= 0 && err == 0) {
if (i == depth) {
@@ -2773,8 +2784,10 @@ cont:
out:
ext4_ext_drop_refs(path);
kfree(path);
- if (err == -EAGAIN)
+ if (err == -EAGAIN) {
+ path = NULL;
goto again;
+ }
ext4_journal_stop(handle);
return err;
diff --git a/fs/ext4/ialloc.c b/fs/ext4/ialloc.c
index d48e8b14928..6866bc233e9 100644
--- a/fs/ext4/ialloc.c
+++ b/fs/ext4/ialloc.c
@@ -1054,7 +1054,8 @@ unsigned long ext4_count_free_inodes(struct super_block *sb)
if (!bitmap_bh)
continue;
- x = ext4_count_free(bitmap_bh, EXT4_INODES_PER_GROUP(sb) / 8);
+ x = ext4_count_free(bitmap_bh->b_data,
+ EXT4_INODES_PER_GROUP(sb) / 8);
printk(KERN_DEBUG "group %lu: stored = %d, counted = %lu\n",
(unsigned long) i, ext4_free_inodes_count(sb, gdp), x);
bitmap_count += x;
diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c
index 02bc8cbe728..47a3e00e02e 100644
--- a/fs/ext4/inode.c
+++ b/fs/ext4/inode.c
@@ -346,6 +346,15 @@ void ext4_da_update_reserve_space(struct inode *inode,
used = ei->i_reserved_data_blocks;
}
+ if (unlikely(ei->i_allocated_meta_blocks > ei->i_reserved_meta_blocks)) {
+ ext4_msg(inode->i_sb, KERN_NOTICE, "%s: ino %lu, allocated %d "
+ "with only %d reserved metadata blocks\n", __func__,
+ inode->i_ino, ei->i_allocated_meta_blocks,
+ ei->i_reserved_meta_blocks);
+ WARN_ON(1);
+ ei->i_allocated_meta_blocks = ei->i_reserved_meta_blocks;
+ }
+
/* Update per-inode reservations */
ei->i_reserved_data_blocks -= used;
ei->i_reserved_meta_blocks -= ei->i_allocated_meta_blocks;
@@ -1171,6 +1180,17 @@ static int ext4_da_reserve_space(struct inode *inode, ext4_lblk_t lblock)
struct ext4_inode_info *ei = EXT4_I(inode);
unsigned int md_needed;
int ret;
+ ext4_lblk_t save_last_lblock;
+ int save_len;
+
+ /*
+ * We will charge metadata quota at writeout time; this saves
+ * us from metadata over-estimation, though we may go over by
+ * a small amount in the end. Here we just reserve for data.
+ */
+ ret = dquot_reserve_block(inode, EXT4_C2B(sbi, 1));
+ if (ret)
+ return ret;
/*
* recalculate the amount of metadata blocks to reserve
@@ -1179,32 +1199,31 @@ static int ext4_da_reserve_space(struct inode *inode, ext4_lblk_t lblock)
*/
repeat:
spin_lock(&ei->i_block_reservation_lock);
+ /*
+ * ext4_calc_metadata_amount() has side effects, which we have
+ * to be prepared undo if we fail to claim space.
+ */
+ save_len = ei->i_da_metadata_calc_len;
+ save_last_lblock = ei->i_da_metadata_calc_last_lblock;
md_needed = EXT4_NUM_B2C(sbi,
ext4_calc_metadata_amount(inode, lblock));
trace_ext4_da_reserve_space(inode, md_needed);
- spin_unlock(&ei->i_block_reservation_lock);
/*
- * We will charge metadata quota at writeout time; this saves
- * us from metadata over-estimation, though we may go over by
- * a small amount in the end. Here we just reserve for data.
- */
- ret = dquot_reserve_block(inode, EXT4_C2B(sbi, 1));
- if (ret)
- return ret;
- /*
* We do still charge estimated metadata to the sb though;
* we cannot afford to run out of free blocks.
*/
if (ext4_claim_free_clusters(sbi, md_needed + 1, 0)) {
- dquot_release_reservation_block(inode, EXT4_C2B(sbi, 1));
+ ei->i_da_metadata_calc_len = save_len;
+ ei->i_da_metadata_calc_last_lblock = save_last_lblock;
+ spin_unlock(&ei->i_block_reservation_lock);
if (ext4_should_retry_alloc(inode->i_sb, &retries)) {
yield();
goto repeat;
}
+ dquot_release_reservation_block(inode, EXT4_C2B(sbi, 1));
return -ENOSPC;
}
- spin_lock(&ei->i_block_reservation_lock);
ei->i_reserved_data_blocks++;
ei->i_reserved_meta_blocks += md_needed;
spin_unlock(&ei->i_block_reservation_lock);
diff --git a/fs/ext4/namei.c b/fs/ext4/namei.c
index 5845cd97bf8..0edaf18d843 100644
--- a/fs/ext4/namei.c
+++ b/fs/ext4/namei.c
@@ -2918,8 +2918,15 @@ static int ext4_rename(struct inode *old_dir, struct dentry *old_dentry,
PARENT_INO(dir_bh->b_data, new_dir->i_sb->s_blocksize) =
cpu_to_le32(new_dir->i_ino);
BUFFER_TRACE(dir_bh, "call ext4_handle_dirty_metadata");
- retval = ext4_handle_dirty_dirent_node(handle, old_inode,
- dir_bh);
+ if (is_dx(old_inode)) {
+ retval = ext4_handle_dirty_dx_node(handle,
+ old_inode,
+ dir_bh);
+ } else {
+ retval = ext4_handle_dirty_dirent_node(handle,
+ old_inode,
+ dir_bh);
+ }
if (retval) {
ext4_std_error(old_dir->i_sb, retval);
goto end_rename;
diff --git a/fs/ext4/resize.c b/fs/ext4/resize.c
index 7ea6cbb4412..17d38de4068 100644
--- a/fs/ext4/resize.c
+++ b/fs/ext4/resize.c
@@ -1197,7 +1197,7 @@ static void ext4_update_super(struct super_block *sb,
struct ext4_new_group_data *group_data = flex_gd->groups;
struct ext4_sb_info *sbi = EXT4_SB(sb);
struct ext4_super_block *es = sbi->s_es;
- int i;
+ int i, ret;
BUG_ON(flex_gd->count == 0 || group_data == NULL);
/*
@@ -1272,6 +1272,11 @@ static void ext4_update_super(struct super_block *sb,
&sbi->s_flex_groups[flex_group].free_inodes);
}
+ /*
+ * Update the fs overhead information
+ */
+ ext4_calculate_overhead(sb);
+
if (test_opt(sb, DEBUG))
printk(KERN_DEBUG "EXT4-fs: added group %u:"
"%llu blocks(%llu free %llu reserved)\n", flex_gd->count,
diff --git a/fs/ext4/super.c b/fs/ext4/super.c
index eb7aa3e4ef0..41598ee2e85 100644
--- a/fs/ext4/super.c
+++ b/fs/ext4/super.c
@@ -976,6 +976,7 @@ static struct inode *ext4_alloc_inode(struct super_block *sb)
ei->i_reserved_meta_blocks = 0;
ei->i_allocated_meta_blocks = 0;
ei->i_da_metadata_calc_len = 0;
+ ei->i_da_metadata_calc_last_lblock = 0;
spin_lock_init(&(ei->i_block_reservation_lock));
#ifdef CONFIG_QUOTA
ei->i_reserved_quota = 0;
@@ -3085,6 +3086,118 @@ static int set_journal_csum_feature_set(struct super_block *sb)
return ret;
}
+/*
+ * Note: calculating the overhead so we can be compatible with
+ * historical BSD practice is quite difficult in the face of
+ * clusters/bigalloc. This is because multiple metadata blocks from
+ * different block group can end up in the same allocation cluster.
+ * Calculating the exact overhead in the face of clustered allocation
+ * requires either O(all block bitmaps) in memory or O(number of block
+ * groups**2) in time. We will still calculate the superblock for
+ * older file systems --- and if we come across with a bigalloc file
+ * system with zero in s_overhead_clusters the estimate will be close to
+ * correct especially for very large cluster sizes --- but for newer
+ * file systems, it's better to calculate this figure once at mkfs
+ * time, and store it in the superblock. If the superblock value is
+ * present (even for non-bigalloc file systems), we will use it.
+ */
+static int count_overhead(struct super_block *sb, ext4_group_t grp,
+ char *buf)
+{
+ struct ext4_sb_info *sbi = EXT4_SB(sb);
+ struct ext4_group_desc *gdp;
+ ext4_fsblk_t first_block, last_block, b;
+ ext4_group_t i, ngroups = ext4_get_groups_count(sb);
+ int s, j, count = 0;
+
+ if (!EXT4_HAS_RO_COMPAT_FEATURE(sb, EXT4_FEATURE_RO_COMPAT_BIGALLOC))
+ return (ext4_bg_has_super(sb, grp) + ext4_bg_num_gdb(sb, grp) +
+ sbi->s_itb_per_group + 2);
+
+ first_block = le32_to_cpu(sbi->s_es->s_first_data_block) +
+ (grp * EXT4_BLOCKS_PER_GROUP(sb));
+ last_block = first_block + EXT4_BLOCKS_PER_GROUP(sb) - 1;
+ for (i = 0; i < ngroups; i++) {
+ gdp = ext4_get_group_desc(sb, i, NULL);
+ b = ext4_block_bitmap(sb, gdp);
+ if (b >= first_block && b <= last_block) {
+ ext4_set_bit(EXT4_B2C(sbi, b - first_block), buf);
+ count++;
+ }
+ b = ext4_inode_bitmap(sb, gdp);
+ if (b >= first_block && b <= last_block) {
+ ext4_set_bit(EXT4_B2C(sbi, b - first_block), buf);
+ count++;
+ }
+ b = ext4_inode_table(sb, gdp);
+ if (b >= first_block && b + sbi->s_itb_per_group <= last_block)
+ for (j = 0; j < sbi->s_itb_per_group; j++, b++) {
+ int c = EXT4_B2C(sbi, b - first_block);
+ ext4_set_bit(c, buf);
+ count++;
+ }
+ if (i != grp)
+ continue;
+ s = 0;
+ if (ext4_bg_has_super(sb, grp)) {
+ ext4_set_bit(s++, buf);
+ count++;
+ }
+ for (j = ext4_bg_num_gdb(sb, grp); j > 0; j--) {
+ ext4_set_bit(EXT4_B2C(sbi, s++), buf);
+ count++;
+ }
+ }
+ if (!count)
+ return 0;
+ return EXT4_CLUSTERS_PER_GROUP(sb) -
+ ext4_count_free(buf, EXT4_CLUSTERS_PER_GROUP(sb) / 8);
+}
+
+/*
+ * Compute the overhead and stash it in sbi->s_overhead
+ */
+int ext4_calculate_overhead(struct super_block *sb)
+{
+ struct ext4_sb_info *sbi = EXT4_SB(sb);
+ struct ext4_super_block *es = sbi->s_es;
+ ext4_group_t i, ngroups = ext4_get_groups_count(sb);
+ ext4_fsblk_t overhead = 0;
+ char *buf = (char *) get_zeroed_page(GFP_KERNEL);
+
+ memset(buf, 0, PAGE_SIZE);
+ if (!buf)
+ return -ENOMEM;
+
+ /*
+ * Compute the overhead (FS structures). This is constant
+ * for a given filesystem unless the number of block groups
+ * changes so we cache the previous value until it does.
+ */
+
+ /*
+ * All of the blocks before first_data_block are overhead
+ */
+ overhead = EXT4_B2C(sbi, le32_to_cpu(es->s_first_data_block));
+
+ /*
+ * Add the overhead found in each block group
+ */
+ for (i = 0; i < ngroups; i++) {
+ int blks;
+
+ blks = count_overhead(sb, i, buf);
+ overhead += blks;
+ if (blks)
+ memset(buf, 0, PAGE_SIZE);
+ cond_resched();
+ }
+ sbi->s_overhead = overhead;
+ smp_wmb();
+ free_page((unsigned long) buf);
+ return 0;
+}
+
static int ext4_fill_super(struct super_block *sb, void *data, int silent)
{
char *orig_data = kstrdup(data, GFP_KERNEL);
@@ -3735,6 +3848,18 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent)
no_journal:
/*
+ * Get the # of file system overhead blocks from the
+ * superblock if present.
+ */
+ if (es->s_overhead_clusters)
+ sbi->s_overhead = le32_to_cpu(es->s_overhead_clusters);
+ else {
+ ret = ext4_calculate_overhead(sb);
+ if (ret)
+ goto failed_mount_wq;
+ }
+
+ /*
* The maximum number of concurrent works can be high and
* concurrency isn't really necessary. Limit it to 1.
*/
@@ -4286,6 +4411,7 @@ static void ext4_clear_journal_err(struct super_block *sb,
ext4_commit_super(sb, 1);
jbd2_journal_clear_err(journal);
+ jbd2_journal_update_sb_errno(journal);
}
}
@@ -4600,67 +4726,21 @@ restore_opts:
return err;
}
-/*
- * Note: calculating the overhead so we can be compatible with
- * historical BSD practice is quite difficult in the face of
- * clusters/bigalloc. This is because multiple metadata blocks from
- * different block group can end up in the same allocation cluster.
- * Calculating the exact overhead in the face of clustered allocation
- * requires either O(all block bitmaps) in memory or O(number of block
- * groups**2) in time. We will still calculate the superblock for
- * older file systems --- and if we come across with a bigalloc file
- * system with zero in s_overhead_clusters the estimate will be close to
- * correct especially for very large cluster sizes --- but for newer
- * file systems, it's better to calculate this figure once at mkfs
- * time, and store it in the superblock. If the superblock value is
- * present (even for non-bigalloc file systems), we will use it.
- */
static int ext4_statfs(struct dentry *dentry, struct kstatfs *buf)
{
struct super_block *sb = dentry->d_sb;
struct ext4_sb_info *sbi = EXT4_SB(sb);
struct ext4_super_block *es = sbi->s_es;
- struct ext4_group_desc *gdp;
+ ext4_fsblk_t overhead = 0;
u64 fsid;
s64 bfree;
- if (test_opt(sb, MINIX_DF)) {
- sbi->s_overhead_last = 0;
- } else if (es->s_overhead_clusters) {
- sbi->s_overhead_last = le32_to_cpu(es->s_overhead_clusters);
- } else if (sbi->s_blocks_last != ext4_blocks_count(es)) {
- ext4_group_t i, ngroups = ext4_get_groups_count(sb);
- ext4_fsblk_t overhead = 0;
-
- /*
- * Compute the overhead (FS structures). This is constant
- * for a given filesystem unless the number of block groups
- * changes so we cache the previous value until it does.
- */
-
- /*
- * All of the blocks before first_data_block are
- * overhead
- */
- overhead = EXT4_B2C(sbi, le32_to_cpu(es->s_first_data_block));
-
- /*
- * Add the overhead found in each block group
- */
- for (i = 0; i < ngroups; i++) {
- gdp = ext4_get_group_desc(sb, i, NULL);
- overhead += ext4_num_overhead_clusters(sb, i, gdp);
- cond_resched();
- }
- sbi->s_overhead_last = overhead;
- smp_wmb();
- sbi->s_blocks_last = ext4_blocks_count(es);
- }
+ if (!test_opt(sb, MINIX_DF))
+ overhead = sbi->s_overhead;
buf->f_type = EXT4_SUPER_MAGIC;
buf->f_bsize = sb->s_blocksize;
- buf->f_blocks = (ext4_blocks_count(es) -
- EXT4_C2B(sbi, sbi->s_overhead_last));
+ buf->f_blocks = ext4_blocks_count(es) - EXT4_C2B(sbi, sbi->s_overhead);
bfree = percpu_counter_sum_positive(&sbi->s_freeclusters_counter) -
percpu_counter_sum_positive(&sbi->s_dirtyclusters_counter);
/* prevent underflow in case that few free space is available */
diff --git a/fs/ext4/xattr.c b/fs/ext4/xattr.c
index e56c9ed7d6e..2cdb98d6298 100644
--- a/fs/ext4/xattr.c
+++ b/fs/ext4/xattr.c
@@ -127,19 +127,16 @@ static __le32 ext4_xattr_block_csum(struct inode *inode,
struct ext4_xattr_header *hdr)
{
struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
- struct ext4_inode_info *ei = EXT4_I(inode);
__u32 csum, old;
old = hdr->h_checksum;
hdr->h_checksum = 0;
- if (le32_to_cpu(hdr->h_refcount) != 1) {
- block_nr = cpu_to_le64(block_nr);
- csum = ext4_chksum(sbi, sbi->s_csum_seed, (__u8 *)&block_nr,
- sizeof(block_nr));
- } else
- csum = ei->i_csum_seed;
+ block_nr = cpu_to_le64(block_nr);
+ csum = ext4_chksum(sbi, sbi->s_csum_seed, (__u8 *)&block_nr,
+ sizeof(block_nr));
csum = ext4_chksum(sbi, csum, (__u8 *)hdr,
EXT4_BLOCK_SIZE(inode->i_sb));
+
hdr->h_checksum = old;
return cpu_to_le32(csum);
}
diff --git a/fs/fuse/file.c b/fs/fuse/file.c
index b321a688cde..514f12a5c77 100644
--- a/fs/fuse/file.c
+++ b/fs/fuse/file.c
@@ -1700,7 +1700,7 @@ static int fuse_verify_ioctl_iov(struct iovec *iov, size_t count)
size_t n;
u32 max = FUSE_MAX_PAGES_PER_REQ << PAGE_SHIFT;
- for (n = 0; n < count; n++) {
+ for (n = 0; n < count; n++, iov++) {
if (iov->iov_len > (size_t) max)
return -ENOMEM;
max -= iov->iov_len;
diff --git a/fs/jbd2/journal.c b/fs/jbd2/journal.c
index e9a3c4c8559..bd23f2ebaa6 100644
--- a/fs/jbd2/journal.c
+++ b/fs/jbd2/journal.c
@@ -1377,7 +1377,7 @@ static void jbd2_mark_journal_empty(journal_t *journal)
* Update a journal's errno. Write updated superblock to disk waiting for IO
* to complete.
*/
-static void jbd2_journal_update_sb_errno(journal_t *journal)
+void jbd2_journal_update_sb_errno(journal_t *journal)
{
journal_superblock_t *sb = journal->j_superblock;
@@ -1390,6 +1390,7 @@ static void jbd2_journal_update_sb_errno(journal_t *journal)
jbd2_write_superblock(journal, WRITE_SYNC);
}
+EXPORT_SYMBOL(jbd2_journal_update_sb_errno);
/*
* Read the superblock for a given journal, performing initial
diff --git a/fs/nfs/file.c b/fs/nfs/file.c
index a6708e6b438..90757691ec8 100644
--- a/fs/nfs/file.c
+++ b/fs/nfs/file.c
@@ -459,8 +459,11 @@ static int nfs_release_page(struct page *page, gfp_t gfp)
dfprintk(PAGECACHE, "NFS: release_page(%p)\n", page);
- /* Only do I/O if gfp is a superset of GFP_KERNEL */
- if (mapping && (gfp & GFP_KERNEL) == GFP_KERNEL) {
+ /* Only do I/O if gfp is a superset of GFP_KERNEL, and we're not
+ * doing this memory reclaim for a fs-related allocation.
+ */
+ if (mapping && (gfp & GFP_KERNEL) == GFP_KERNEL &&
+ !(current->flags & PF_FSTRANS)) {
int how = FLUSH_SYNC;
/* Don't let kswapd deadlock waiting for OOM RPC calls */
diff --git a/fs/nfs/idmap.c b/fs/nfs/idmap.c
index 864c51e4b40..1b5058b4043 100644
--- a/fs/nfs/idmap.c
+++ b/fs/nfs/idmap.c
@@ -205,12 +205,18 @@ static int nfs_idmap_init_keyring(void)
if (ret < 0)
goto failed_put_key;
+ ret = register_key_type(&key_type_id_resolver_legacy);
+ if (ret < 0)
+ goto failed_reg_legacy;
+
set_bit(KEY_FLAG_ROOT_CAN_CLEAR, &keyring->flags);
cred->thread_keyring = keyring;
cred->jit_keyring = KEY_REQKEY_DEFL_THREAD_KEYRING;
id_resolver_cache = cred;
return 0;
+failed_reg_legacy:
+ unregister_key_type(&key_type_id_resolver);
failed_put_key:
key_put(keyring);
failed_put_cred:
@@ -222,6 +228,7 @@ static void nfs_idmap_quit_keyring(void)
{
key_revoke(id_resolver_cache->thread_keyring);
unregister_key_type(&key_type_id_resolver);
+ unregister_key_type(&key_type_id_resolver_legacy);
put_cred(id_resolver_cache);
}
@@ -385,7 +392,7 @@ static const struct rpc_pipe_ops idmap_upcall_ops = {
};
static struct key_type key_type_id_resolver_legacy = {
- .name = "id_resolver",
+ .name = "id_legacy",
.instantiate = user_instantiate,
.match = user_match,
.revoke = user_revoke,
@@ -674,6 +681,7 @@ static int nfs_idmap_legacy_upcall(struct key_construction *cons,
if (ret < 0)
goto out2;
+ BUG_ON(idmap->idmap_key_cons != NULL);
idmap->idmap_key_cons = cons;
ret = rpc_queue_upcall(idmap->idmap_pipe, msg);
@@ -687,8 +695,7 @@ out2:
out1:
kfree(msg);
out0:
- key_revoke(cons->key);
- key_revoke(cons->authkey);
+ complete_request_key(cons, ret);
return ret;
}
@@ -722,11 +729,18 @@ idmap_pipe_downcall(struct file *filp, const char __user *src, size_t mlen)
{
struct rpc_inode *rpci = RPC_I(filp->f_path.dentry->d_inode);
struct idmap *idmap = (struct idmap *)rpci->private;
- struct key_construction *cons = idmap->idmap_key_cons;
+ struct key_construction *cons;
struct idmap_msg im;
size_t namelen_in;
int ret;
+ /* If instantiation is successful, anyone waiting for key construction
+ * will have been woken up and someone else may now have used
+ * idmap_key_cons - so after this point we may no longer touch it.
+ */
+ cons = ACCESS_ONCE(idmap->idmap_key_cons);
+ idmap->idmap_key_cons = NULL;
+
if (mlen != sizeof(im)) {
ret = -ENOSPC;
goto out;
@@ -739,7 +753,7 @@ idmap_pipe_downcall(struct file *filp, const char __user *src, size_t mlen)
if (!(im.im_status & IDMAP_STATUS_SUCCESS)) {
ret = mlen;
- complete_request_key(idmap->idmap_key_cons, -ENOKEY);
+ complete_request_key(cons, -ENOKEY);
goto out_incomplete;
}
@@ -756,7 +770,7 @@ idmap_pipe_downcall(struct file *filp, const char __user *src, size_t mlen)
}
out:
- complete_request_key(idmap->idmap_key_cons, ret);
+ complete_request_key(cons, ret);
out_incomplete:
return ret;
}
diff --git a/fs/nfsd/nfs4state.c b/fs/nfsd/nfs4state.c
index 94effd5bc4a..e8ead04221b 100644
--- a/fs/nfsd/nfs4state.c
+++ b/fs/nfsd/nfs4state.c
@@ -1215,7 +1215,7 @@ static bool groups_equal(struct group_info *g1, struct group_info *g2)
return true;
}
-static int
+static bool
same_creds(struct svc_cred *cr1, struct svc_cred *cr2)
{
if ((cr1->cr_flavor != cr2->cr_flavor)
@@ -1227,7 +1227,7 @@ same_creds(struct svc_cred *cr1, struct svc_cred *cr2)
return true;
if (!cr1->cr_principal || !cr2->cr_principal)
return false;
- return 0 == strcmp(cr1->cr_principal, cr1->cr_principal);
+ return 0 == strcmp(cr1->cr_principal, cr2->cr_principal);
}
static void gen_clid(struct nfs4_client *clp)
diff --git a/fs/nfsd/nfs4xdr.c b/fs/nfsd/nfs4xdr.c
index 4949667c84e..6322df36031 100644
--- a/fs/nfsd/nfs4xdr.c
+++ b/fs/nfsd/nfs4xdr.c
@@ -2259,7 +2259,7 @@ out_acl:
if (bmval0 & FATTR4_WORD0_CASE_INSENSITIVE) {
if ((buflen -= 4) < 0)
goto out_resource;
- WRITE32(1);
+ WRITE32(0);
}
if (bmval0 & FATTR4_WORD0_CASE_PRESERVING) {
if ((buflen -= 4) < 0)
diff --git a/fs/nilfs2/ioctl.c b/fs/nilfs2/ioctl.c
index 06658caa18b..0b6387c67e6 100644
--- a/fs/nilfs2/ioctl.c
+++ b/fs/nilfs2/ioctl.c
@@ -182,7 +182,7 @@ static int nilfs_ioctl_change_cpmode(struct inode *inode, struct file *filp,
if (copy_from_user(&cpmode, argp, sizeof(cpmode)))
goto out;
- down_read(&inode->i_sb->s_umount);
+ mutex_lock(&nilfs->ns_snapshot_mount_mutex);
nilfs_transaction_begin(inode->i_sb, &ti, 0);
ret = nilfs_cpfile_change_cpmode(
@@ -192,7 +192,7 @@ static int nilfs_ioctl_change_cpmode(struct inode *inode, struct file *filp,
else
nilfs_transaction_commit(inode->i_sb); /* never fails */
- up_read(&inode->i_sb->s_umount);
+ mutex_unlock(&nilfs->ns_snapshot_mount_mutex);
out:
mnt_drop_write_file(filp);
return ret;
diff --git a/fs/nilfs2/super.c b/fs/nilfs2/super.c
index 1099a76cee5..496904b96b2 100644
--- a/fs/nilfs2/super.c
+++ b/fs/nilfs2/super.c
@@ -948,6 +948,8 @@ static int nilfs_attach_snapshot(struct super_block *s, __u64 cno,
struct nilfs_root *root;
int ret;
+ mutex_lock(&nilfs->ns_snapshot_mount_mutex);
+
down_read(&nilfs->ns_segctor_sem);
ret = nilfs_cpfile_is_snapshot(nilfs->ns_cpfile, cno);
up_read(&nilfs->ns_segctor_sem);
@@ -972,6 +974,7 @@ static int nilfs_attach_snapshot(struct super_block *s, __u64 cno,
ret = nilfs_get_root_dentry(s, root, root_dentry);
nilfs_put_root(root);
out:
+ mutex_unlock(&nilfs->ns_snapshot_mount_mutex);
return ret;
}
diff --git a/fs/nilfs2/the_nilfs.c b/fs/nilfs2/the_nilfs.c
index 501b7f8b739..41e6a04a561 100644
--- a/fs/nilfs2/the_nilfs.c
+++ b/fs/nilfs2/the_nilfs.c
@@ -76,6 +76,7 @@ struct the_nilfs *alloc_nilfs(struct block_device *bdev)
nilfs->ns_bdev = bdev;
atomic_set(&nilfs->ns_ndirtyblks, 0);
init_rwsem(&nilfs->ns_sem);
+ mutex_init(&nilfs->ns_snapshot_mount_mutex);
INIT_LIST_HEAD(&nilfs->ns_dirty_files);
INIT_LIST_HEAD(&nilfs->ns_gc_inodes);
spin_lock_init(&nilfs->ns_inode_lock);
diff --git a/fs/nilfs2/the_nilfs.h b/fs/nilfs2/the_nilfs.h
index 9992b11312f..de7435f0ef5 100644
--- a/fs/nilfs2/the_nilfs.h
+++ b/fs/nilfs2/the_nilfs.h
@@ -47,6 +47,7 @@ enum {
* @ns_flags: flags
* @ns_bdev: block device
* @ns_sem: semaphore for shared states
+ * @ns_snapshot_mount_mutex: mutex to protect snapshot mounts
* @ns_sbh: buffer heads of on-disk super blocks
* @ns_sbp: pointers to super block data
* @ns_sbwtime: previous write time of super block
@@ -99,6 +100,7 @@ struct the_nilfs {
struct block_device *ns_bdev;
struct rw_semaphore ns_sem;
+ struct mutex ns_snapshot_mount_mutex;
/*
* used for
diff --git a/fs/select.c b/fs/select.c
index bae321569df..db14c781335 100644
--- a/fs/select.c
+++ b/fs/select.c
@@ -345,8 +345,8 @@ static int max_select_fd(unsigned long n, fd_set_bits *fds)
struct fdtable *fdt;
/* handle last in-complete long-word first */
- set = ~(~0UL << (n & (__NFDBITS-1)));
- n /= __NFDBITS;
+ set = ~(~0UL << (n & (BITS_PER_LONG-1)));
+ n /= BITS_PER_LONG;
fdt = files_fdtable(current->files);
open_fds = fdt->open_fds + n;
max = 0;
@@ -373,7 +373,7 @@ get_max:
max++;
set >>= 1;
} while (set);
- max += n * __NFDBITS;
+ max += n * BITS_PER_LONG;
}
return max;
@@ -435,11 +435,11 @@ int do_select(int n, fd_set_bits *fds, struct timespec *end_time)
in = *inp++; out = *outp++; ex = *exp++;
all_bits = in | out | ex;
if (all_bits == 0) {
- i += __NFDBITS;
+ i += BITS_PER_LONG;
continue;
}
- for (j = 0; j < __NFDBITS; ++j, ++i, bit <<= 1) {
+ for (j = 0; j < BITS_PER_LONG; ++j, ++i, bit <<= 1) {
int fput_needed;
if (i >= n)
break;