diff options
author | mohit84 <moagrawa@redhat.com> | 2020-10-11 10:56:57 +0530 |
---|---|---|
committer | GitHub <noreply@github.com> | 2020-10-11 10:56:57 +0530 |
commit | ecdc77ceb9a5864be1fd0b3d7f919fa9ce60132e (patch) | |
tree | 43604064dee2495ebbd32d777778508fa90eb07c /xlators/cluster | |
parent | 8d54899724a31f29848e1461f68ce2cf40585056 (diff) | |
download | glusterfs-ecdc77ceb9a5864be1fd0b3d7f919fa9ce60132e.tar.gz glusterfs-ecdc77ceb9a5864be1fd0b3d7f919fa9ce60132e.tar.xz glusterfs-ecdc77ceb9a5864be1fd0b3d7f919fa9ce60132e.zip |
core: configure optimum inode table hash_size for shd (#1576)
In brick_mux environment a shd process consume high memory.
After print the statedump i have found it allocates 1M per afr xlator
for all bricks.In case of configure 4k volumes it consumes almost total
6G RSS size in which 4G consumes by inode_tables
[cluster/replicate.test1-replicate-0 - usage-type gf_common_mt_list_head memusage]
size=1273488
num_allocs=2
max_size=1273488
max_num_allocs=2
total_allocs=2
inode_new_table function allocates memory(1M) for a list of inode and dentry hash.
For shd lru_limit size is 1 so we don't need to create a big hash table so to reduce
RSS size for shd process pass optimum bucket count at the time of creating inode_table.
Change-Id: I039716d42321a232fdee1ee8fd50295e638715bb
Fixes: #1538
Signed-off-by: Mohit Agrawal <moagrawa@redhat.com>
Diffstat (limited to 'xlators/cluster')
-rw-r--r-- | xlators/cluster/afr/src/afr.c | 10 | ||||
-rw-r--r-- | xlators/cluster/dht/src/dht-rebalance.c | 2 | ||||
-rw-r--r-- | xlators/cluster/ec/src/ec.c | 2 |
3 files changed, 11 insertions, 3 deletions
diff --git a/xlators/cluster/afr/src/afr.c b/xlators/cluster/afr/src/afr.c index df7366f0a6..b60b3ed9b9 100644 --- a/xlators/cluster/afr/src/afr.c +++ b/xlators/cluster/afr/src/afr.c @@ -633,7 +633,15 @@ init(xlator_t *this) goto out; } - this->itable = inode_table_new(SHD_INODE_LRU_LIMIT, this); + if (priv->shd.iamshd) { + /* Number of hash bucket should be prime number so declare 131 + total dentry hash buckets + */ + this->itable = inode_table_new(SHD_INODE_LRU_LIMIT, this, 131, 128); + } else { + this->itable = inode_table_new(SHD_INODE_LRU_LIMIT, this, 0, 0); + } + if (!this->itable) { ret = -ENOMEM; goto out; diff --git a/xlators/cluster/dht/src/dht-rebalance.c b/xlators/cluster/dht/src/dht-rebalance.c index 8ba8082bd8..1293fe1f86 100644 --- a/xlators/cluster/dht/src/dht-rebalance.c +++ b/xlators/cluster/dht/src/dht-rebalance.c @@ -2405,7 +2405,7 @@ dht_build_root_inode(xlator_t *this, inode_t **inode) inode_table_t *itable = NULL; static uuid_t root_gfid = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1}; - itable = inode_table_new(0, this); + itable = inode_table_new(0, this, 0, 0); if (!itable) return; diff --git a/xlators/cluster/ec/src/ec.c b/xlators/cluster/ec/src/ec.c index 7344be4968..ee11535cfe 100644 --- a/xlators/cluster/ec/src/ec.c +++ b/xlators/cluster/ec/src/ec.c @@ -875,7 +875,7 @@ init(xlator_t *this) if (ec_assign_read_mask(ec, read_mask_str)) goto failed; - this->itable = inode_table_new(EC_SHD_INODE_LRU_LIMIT, this); + this->itable = inode_table_new(EC_SHD_INODE_LRU_LIMIT, this, 0, 0); if (!this->itable) goto failed; |