summaryrefslogtreecommitdiffstats
path: root/fs/btrfs/ctree.h
diff options
context:
space:
mode:
authorJosef Bacik <jbacik@redhat.com>2008-09-23 13:14:11 -0400
committerChris Mason <chris.mason@oracle.com>2008-09-25 11:04:07 -0400
commit0f9dd46cda36b8de3b9f48bc42bd09d20b9c3b52 (patch)
tree2dcba11fd2fb5a4227fd8f8d2d076641f115a7b4 /fs/btrfs/ctree.h
parentef8bbdfe7e12dc9b4e80756f6d606c4639c65851 (diff)
downloadkernel-crypto-0f9dd46cda36b8de3b9f48bc42bd09d20b9c3b52.tar.gz
kernel-crypto-0f9dd46cda36b8de3b9f48bc42bd09d20b9c3b52.tar.xz
kernel-crypto-0f9dd46cda36b8de3b9f48bc42bd09d20b9c3b52.zip
Btrfs: free space accounting redo
1) replace the per fs_info extent_io_tree that tracked free space with two rb-trees per block group to track free space areas via offset and size. The reason to do this is because most allocations come with a hint byte where to start, so we can usually find a chunk of free space at that hint byte to satisfy the allocation and get good space packing. If we cannot find free space at or after the given offset we fall back on looking for a chunk of the given size as close to that given offset as possible. When we fall back on the size search we also try to find a slot as close to the size we want as possible, to avoid breaking small chunks off of huge areas if possible. 2) remove the extent_io_tree that tracked the block group cache from fs_info and replaced it with an rb-tree thats tracks block group cache via offset. also added a per space_info list that tracks the block group cache for the particular space so we can lookup related block groups easily. 3) cleaned up the allocation code to make it a little easier to read and a little less complicated. Basically there are 3 steps, first look from our provided hint. If we couldn't find from that given hint, start back at our original search start and look for space from there. If that fails try to allocate space if we can and start looking again. If not we're screwed and need to start over again. 4) small fixes. there were some issues in volumes.c where we wouldn't allocate the rest of the disk. fixed cow_file_range to actually pass the alloc_hint, which has helped a good bit in making the fs_mark test I run have semi-normal results as we run out of space. Generally with data allocations we don't track where we last allocated from, so everytime we did a data allocation we'd search through every block group that we have looking for free space. Now searching a block group with no free space isn't terribly time consuming, it was causing a slight degradation as we got more data block groups. The alloc_hint has fixed this slight degredation and made things semi-normal. There is still one nagging problem I'm working on where we will get ENOSPC when there is definitely plenty of space. This only happens with metadata allocations, and only when we are almost full. So you generally hit the 85% mark first, but sometimes you'll hit the BUG before you hit the 85% wall. I'm still tracking it down, but until then this seems to be pretty stable and make a significant performance gain. Signed-off-by: Chris Mason <chris.mason@oracle.com>
Diffstat (limited to 'fs/btrfs/ctree.h')
-rw-r--r--fs/btrfs/ctree.h46
1 files changed, 42 insertions, 4 deletions
diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h
index eb65fd80888..730aae3bc18 100644
--- a/fs/btrfs/ctree.h
+++ b/fs/btrfs/ctree.h
@@ -483,7 +483,6 @@ struct btrfs_csum_item {
#define BTRFS_BLOCK_GROUP_DUP (1 << 5)
#define BTRFS_BLOCK_GROUP_RAID10 (1 << 6)
-
struct btrfs_block_group_item {
__le64 used;
__le64 chunk_objectid;
@@ -498,17 +497,40 @@ struct btrfs_space_info {
int full;
int force_alloc;
struct list_head list;
+
+ /* for block groups in our same type */
+ struct list_head block_groups;
+ spinlock_t lock;
+};
+
+struct btrfs_free_space {
+ struct rb_node bytes_index;
+ struct rb_node offset_index;
+ u64 offset;
+ u64 bytes;
};
struct btrfs_block_group_cache {
struct btrfs_key key;
struct btrfs_block_group_item item;
- struct btrfs_space_info *space_info;
spinlock_t lock;
u64 pinned;
u64 flags;
int cached;
int ro;
+ int dirty;
+
+ struct btrfs_space_info *space_info;
+
+ /* free space cache stuff */
+ struct rb_root free_space_bytes;
+ struct rb_root free_space_offset;
+
+ /* block group cache stuff */
+ struct rb_node cache_node;
+
+ /* for block groups in the same raid type */
+ struct list_head list;
};
struct btrfs_device;
@@ -525,8 +547,10 @@ struct btrfs_fs_info {
struct btrfs_root *log_root_tree;
struct radix_tree_root fs_roots_radix;
- struct extent_io_tree free_space_cache;
- struct extent_io_tree block_group_cache;
+ /* block group cache stuff */
+ spinlock_t block_group_cache_lock;
+ struct rb_root block_group_cache_tree;
+
struct extent_io_tree pinned_extents;
struct extent_io_tree pending_del;
struct extent_io_tree extent_ins;
@@ -1814,4 +1838,18 @@ int btrfs_sync_fs(struct super_block *sb, int wait);
int btrfs_check_acl(struct inode *inode, int mask);
int btrfs_init_acl(struct inode *inode, struct inode *dir);
int btrfs_acl_chmod(struct inode *inode);
+
+/* free-space-cache.c */
+int btrfs_add_free_space(struct btrfs_block_group_cache *block_group,
+ u64 bytenr, u64 size);
+int btrfs_remove_free_space(struct btrfs_block_group_cache *block_group,
+ u64 bytenr, u64 size);
+void btrfs_remove_free_space_cache(struct btrfs_block_group_cache
+ *block_group);
+struct btrfs_free_space *btrfs_find_free_space(struct btrfs_block_group_cache
+ *block_group, u64 offset,
+ u64 bytes);
+void btrfs_dump_free_space(struct btrfs_block_group_cache *block_group,
+ u64 bytes);
+u64 btrfs_block_group_free_space(struct btrfs_block_group_cache *block_group);
#endif