From a5dda683328f99c781f92c66cc52ffc0639bef58 Mon Sep 17 00:00:00 2001 From: Eric Paris Date: Thu, 12 Feb 2009 14:50:11 -0500 Subject: SELinux: check seqno when updating an avc_node The avc update node callbacks do not check the seqno of the caller with the seqno of the node found. It is possible that a policy change could happen (although almost impossibly unlikely) in which a permissive or permissive_domain decision is not valid for the entry found. Simply pass and check that the seqno of the caller and the seqno of the node found match. Signed-off-by: Eric Paris Acked-by: Stephen Smalley Signed-off-by: James Morris --- security/selinux/avc.c | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) (limited to 'security/selinux/avc.c') diff --git a/security/selinux/avc.c b/security/selinux/avc.c index eb41f43e277..0d00f4874f3 100644 --- a/security/selinux/avc.c +++ b/security/selinux/avc.c @@ -742,13 +742,15 @@ static inline int avc_sidcmp(u32 x, u32 y) * @event : Updating event * @perms : Permission mask bits * @ssid,@tsid,@tclass : identifier of an AVC entry + * @seqno : sequence number when decision was made * * if a valid AVC entry doesn't exist,this function returns -ENOENT. * if kmalloc() called internal returns NULL, this function returns -ENOMEM. * otherwise, this function update the AVC entry. The original AVC-entry object * will release later by RCU. */ -static int avc_update_node(u32 event, u32 perms, u32 ssid, u32 tsid, u16 tclass) +static int avc_update_node(u32 event, u32 perms, u32 ssid, u32 tsid, u16 tclass, + u32 seqno) { int hvalue, rc = 0; unsigned long flag; @@ -767,7 +769,8 @@ static int avc_update_node(u32 event, u32 perms, u32 ssid, u32 tsid, u16 tclass) list_for_each_entry(pos, &avc_cache.slots[hvalue], list) { if (ssid == pos->ae.ssid && tsid == pos->ae.tsid && - tclass == pos->ae.tclass){ + tclass == pos->ae.tclass && + seqno == pos->ae.avd.seqno){ orig = pos; break; } @@ -908,7 +911,7 @@ int avc_has_perm_noaudit(u32 ssid, u32 tsid, rc = -EACCES; else if (!selinux_enforcing || security_permissive_sid(ssid)) avc_update_node(AVC_CALLBACK_GRANT, requested, ssid, - tsid, tclass); + tsid, tclass, p_ae->avd.seqno); else rc = -EACCES; } -- cgit From 906d27d9d28fd50fb40026e56842d8f6806a7a04 Mon Sep 17 00:00:00 2001 From: Eric Paris Date: Thu, 12 Feb 2009 14:50:43 -0500 Subject: SELinux: remove the unused ae.used Currently SELinux code has an atomic which was intended to track how many times an avc entry was used and to evict entries when they haven't been used recently. Instead we never let this atomic get above 1 and evict when it is first checked for eviction since it hits zero. This is a total waste of time so I'm completely dropping ae.used. This change resulted in about a 3% faster avc_has_perm_noaudit when running oprofile against a tbench benchmark. Signed-off-by: Eric Paris Reviewed by: Paul Moore Acked-by: Stephen Smalley Signed-off-by: James Morris --- security/selinux/avc.c | 28 +++++++--------------------- 1 file changed, 7 insertions(+), 21 deletions(-) (limited to 'security/selinux/avc.c') diff --git a/security/selinux/avc.c b/security/selinux/avc.c index 0d00f4874f3..0afb990fdfa 100644 --- a/security/selinux/avc.c +++ b/security/selinux/avc.c @@ -88,7 +88,6 @@ struct avc_entry { u32 tsid; u16 tclass; struct av_decision avd; - atomic_t used; /* used recently */ }; struct avc_node { @@ -316,16 +315,13 @@ static inline int avc_reclaim_node(void) rcu_read_lock(); list_for_each_entry(node, &avc_cache.slots[hvalue], list) { - if (atomic_dec_and_test(&node->ae.used)) { - /* Recently Unused */ - avc_node_delete(node); - avc_cache_stats_incr(reclaims); - ecx++; - if (ecx >= AVC_CACHE_RECLAIM) { - rcu_read_unlock(); - spin_unlock_irqrestore(&avc_cache.slots_lock[hvalue], flags); - goto out; - } + avc_node_delete(node); + avc_cache_stats_incr(reclaims); + ecx++; + if (ecx >= AVC_CACHE_RECLAIM) { + rcu_read_unlock(); + spin_unlock_irqrestore(&avc_cache.slots_lock[hvalue], flags); + goto out; } } rcu_read_unlock(); @@ -345,7 +341,6 @@ static struct avc_node *avc_alloc_node(void) INIT_RCU_HEAD(&node->rhead); INIT_LIST_HEAD(&node->list); - atomic_set(&node->ae.used, 1); avc_cache_stats_incr(allocations); if (atomic_inc_return(&avc_cache.active_nodes) > avc_cache_threshold) @@ -378,15 +373,6 @@ static inline struct avc_node *avc_search_node(u32 ssid, u32 tsid, u16 tclass) } } - if (ret == NULL) { - /* cache miss */ - goto out; - } - - /* cache hit */ - if (atomic_read(&ret->ae.used) != 1) - atomic_set(&ret->ae.used, 1); -out: return ret; } -- cgit From 21193dcd1f3570ddfd8a04f4465e484c1f94252f Mon Sep 17 00:00:00 2001 From: Eric Paris Date: Thu, 12 Feb 2009 14:50:49 -0500 Subject: SELinux: more careful use of avd in avc_has_perm_noaudit we are often needlessly jumping through hoops when it comes to avd entries in avc_has_perm_noaudit and we have extra initialization and memcpy which are just wasting performance. Try to clean the function up a bit. This patch resulted in a 13% drop in time spent in avc_has_perm_noaudit in my oprofile sampling of a tbench benchmark. Signed-off-by: Eric Paris Reviewed-by: Paul Moore Acked-by: Stephen Smalley Signed-off-by: James Morris --- security/selinux/avc.c | 43 ++++++++++++++++++++++++------------------- 1 file changed, 24 insertions(+), 19 deletions(-) (limited to 'security/selinux/avc.c') diff --git a/security/selinux/avc.c b/security/selinux/avc.c index 0afb990fdfa..2a84dec4adf 100644 --- a/security/selinux/avc.c +++ b/security/selinux/avc.c @@ -350,12 +350,12 @@ out: return node; } -static void avc_node_populate(struct avc_node *node, u32 ssid, u32 tsid, u16 tclass, struct avc_entry *ae) +static void avc_node_populate(struct avc_node *node, u32 ssid, u32 tsid, u16 tclass, struct av_decision *avd) { node->ae.ssid = ssid; node->ae.tsid = tsid; node->ae.tclass = tclass; - memcpy(&node->ae.avd, &ae->avd, sizeof(node->ae.avd)); + memcpy(&node->ae.avd, avd, sizeof(node->ae.avd)); } static inline struct avc_node *avc_search_node(u32 ssid, u32 tsid, u16 tclass) @@ -435,31 +435,31 @@ static int avc_latest_notif_update(int seqno, int is_insert) * @ssid: source security identifier * @tsid: target security identifier * @tclass: target security class - * @ae: AVC entry + * @avd: resulting av decision * * Insert an AVC entry for the SID pair * (@ssid, @tsid) and class @tclass. * The access vectors and the sequence number are * normally provided by the security server in * response to a security_compute_av() call. If the - * sequence number @ae->avd.seqno is not less than the latest + * sequence number @avd->seqno is not less than the latest * revocation notification, then the function copies * the access vectors into a cache entry, returns * avc_node inserted. Otherwise, this function returns NULL. */ -static struct avc_node *avc_insert(u32 ssid, u32 tsid, u16 tclass, struct avc_entry *ae) +static struct avc_node *avc_insert(u32 ssid, u32 tsid, u16 tclass, struct av_decision *avd) { struct avc_node *pos, *node = NULL; int hvalue; unsigned long flag; - if (avc_latest_notif_update(ae->avd.seqno, 1)) + if (avc_latest_notif_update(avd->seqno, 1)) goto out; node = avc_alloc_node(); if (node) { hvalue = avc_hash(ssid, tsid, tclass); - avc_node_populate(node, ssid, tsid, tclass, ae); + avc_node_populate(node, ssid, tsid, tclass, avd); spin_lock_irqsave(&avc_cache.slots_lock[hvalue], flag); list_for_each_entry(pos, &avc_cache.slots[hvalue], list) { @@ -772,7 +772,7 @@ static int avc_update_node(u32 event, u32 perms, u32 ssid, u32 tsid, u16 tclass, * Copy and replace original node. */ - avc_node_populate(node, ssid, tsid, tclass, &orig->ae); + avc_node_populate(node, ssid, tsid, tclass, &orig->ae.avd); switch (event) { case AVC_CALLBACK_GRANT: @@ -864,10 +864,10 @@ int avc_ss_reset(u32 seqno) int avc_has_perm_noaudit(u32 ssid, u32 tsid, u16 tclass, u32 requested, unsigned flags, - struct av_decision *avd) + struct av_decision *in_avd) { struct avc_node *node; - struct avc_entry entry, *p_ae; + struct av_decision avd_entry, *avd; int rc = 0; u32 denied; @@ -878,26 +878,31 @@ int avc_has_perm_noaudit(u32 ssid, u32 tsid, node = avc_lookup(ssid, tsid, tclass, requested); if (!node) { rcu_read_unlock(); - rc = security_compute_av(ssid, tsid, tclass, requested, &entry.avd); + + if (in_avd) + avd = in_avd; + else + avd = &avd_entry; + + rc = security_compute_av(ssid, tsid, tclass, requested, avd); if (rc) goto out; rcu_read_lock(); - node = avc_insert(ssid, tsid, tclass, &entry); + node = avc_insert(ssid, tsid, tclass, avd); + } else { + if (in_avd) + memcpy(in_avd, &node->ae.avd, sizeof(*in_avd)); + avd = &node->ae.avd; } - p_ae = node ? &node->ae : &entry; - - if (avd) - memcpy(avd, &p_ae->avd, sizeof(*avd)); - - denied = requested & ~(p_ae->avd.allowed); + denied = requested & ~(avd->allowed); if (denied) { if (flags & AVC_STRICT) rc = -EACCES; else if (!selinux_enforcing || security_permissive_sid(ssid)) avc_update_node(AVC_CALLBACK_GRANT, requested, ssid, - tsid, tclass, p_ae->avd.seqno); + tsid, tclass, avd->seqno); else rc = -EACCES; } -- cgit From f1c6381a6e337adcecf84be2a838bd9e610e2365 Mon Sep 17 00:00:00 2001 From: Eric Paris Date: Thu, 12 Feb 2009 14:50:54 -0500 Subject: SELinux: remove unused av.decided field It appears there was an intention to have the security server only decide certain permissions and leave other for later as some sort of a portential performance win. We are currently always deciding all 32 bits of permissions and this is a useless couple of branches and wasted space. This patch completely drops the av.decided concept. This in a 17% reduction in the time spent in avc_has_perm_noaudit based on oprofile sampling of a tbench benchmark. Signed-off-by: Eric Paris Reviewed-by: Paul Moore Acked-by: Stephen Smalley Signed-off-by: James Morris --- security/selinux/avc.c | 15 +++++---------- 1 file changed, 5 insertions(+), 10 deletions(-) (limited to 'security/selinux/avc.c') diff --git a/security/selinux/avc.c b/security/selinux/avc.c index 2a84dec4adf..326aa78bd42 100644 --- a/security/selinux/avc.c +++ b/security/selinux/avc.c @@ -381,30 +381,25 @@ static inline struct avc_node *avc_search_node(u32 ssid, u32 tsid, u16 tclass) * @ssid: source security identifier * @tsid: target security identifier * @tclass: target security class - * @requested: requested permissions, interpreted based on @tclass * * Look up an AVC entry that is valid for the - * @requested permissions between the SID pair * (@ssid, @tsid), interpreting the permissions * based on @tclass. If a valid AVC entry exists, * then this function return the avc_node. * Otherwise, this function returns NULL. */ -static struct avc_node *avc_lookup(u32 ssid, u32 tsid, u16 tclass, u32 requested) +static struct avc_node *avc_lookup(u32 ssid, u32 tsid, u16 tclass) { struct avc_node *node; avc_cache_stats_incr(lookups); node = avc_search_node(ssid, tsid, tclass); - if (node && ((node->ae.avd.decided & requested) == requested)) { + if (node) avc_cache_stats_incr(hits); - goto out; - } + else + avc_cache_stats_incr(misses); - node = NULL; - avc_cache_stats_incr(misses); -out: return node; } @@ -875,7 +870,7 @@ int avc_has_perm_noaudit(u32 ssid, u32 tsid, rcu_read_lock(); - node = avc_lookup(ssid, tsid, tclass, requested); + node = avc_lookup(ssid, tsid, tclass); if (!node) { rcu_read_unlock(); -- cgit From edf3d1aecd0d608acbd561b0c527e1d41abcb657 Mon Sep 17 00:00:00 2001 From: Eric Paris Date: Thu, 12 Feb 2009 14:50:59 -0500 Subject: SELinux: code readability with avc_cache The code making use of struct avc_cache was not easy to read thanks to liberal use of &avc_cache.{slots_lock,slots}[hvalue] throughout. This patch simply creates local pointers and uses those instead of the long global names. Signed-off-by: Eric Paris Signed-off-by: James Morris --- security/selinux/avc.c | 63 +++++++++++++++++++++++++++++++++++--------------- 1 file changed, 44 insertions(+), 19 deletions(-) (limited to 'security/selinux/avc.c') diff --git a/security/selinux/avc.c b/security/selinux/avc.c index 326aa78bd42..9dd5c506a82 100644 --- a/security/selinux/avc.c +++ b/security/selinux/avc.c @@ -92,12 +92,12 @@ struct avc_entry { struct avc_node { struct avc_entry ae; - struct list_head list; + struct list_head list; /* anchored in avc_cache->slots[i] */ struct rcu_head rhead; }; struct avc_cache { - struct list_head slots[AVC_CACHE_SLOTS]; + struct list_head slots[AVC_CACHE_SLOTS]; /* head for avc_node->list */ spinlock_t slots_lock[AVC_CACHE_SLOTS]; /* lock for writes */ atomic_t lru_hint; /* LRU hint for reclaim scan */ atomic_t active_nodes; @@ -249,16 +249,18 @@ int avc_get_hash_stats(char *page) { int i, chain_len, max_chain_len, slots_used; struct avc_node *node; + struct list_head *head; rcu_read_lock(); slots_used = 0; max_chain_len = 0; for (i = 0; i < AVC_CACHE_SLOTS; i++) { - if (!list_empty(&avc_cache.slots[i])) { + head = &avc_cache.slots[i]; + if (!list_empty(head)) { slots_used++; chain_len = 0; - list_for_each_entry_rcu(node, &avc_cache.slots[i], list) + list_for_each_entry_rcu(node, head, list) chain_len++; if (chain_len > max_chain_len) max_chain_len = chain_len; @@ -306,26 +308,30 @@ static inline int avc_reclaim_node(void) struct avc_node *node; int hvalue, try, ecx; unsigned long flags; + struct list_head *head; + spinlock_t *lock; for (try = 0, ecx = 0; try < AVC_CACHE_SLOTS; try++) { hvalue = atomic_inc_return(&avc_cache.lru_hint) & (AVC_CACHE_SLOTS - 1); + head = &avc_cache.slots[hvalue]; + lock = &avc_cache.slots_lock[hvalue]; - if (!spin_trylock_irqsave(&avc_cache.slots_lock[hvalue], flags)) + if (!spin_trylock_irqsave(lock, flags)) continue; rcu_read_lock(); - list_for_each_entry(node, &avc_cache.slots[hvalue], list) { + list_for_each_entry(node, head, list) { avc_node_delete(node); avc_cache_stats_incr(reclaims); ecx++; if (ecx >= AVC_CACHE_RECLAIM) { rcu_read_unlock(); - spin_unlock_irqrestore(&avc_cache.slots_lock[hvalue], flags); + spin_unlock_irqrestore(lock, flags); goto out; } } rcu_read_unlock(); - spin_unlock_irqrestore(&avc_cache.slots_lock[hvalue], flags); + spin_unlock_irqrestore(lock, flags); } out: return ecx; @@ -362,9 +368,11 @@ static inline struct avc_node *avc_search_node(u32 ssid, u32 tsid, u16 tclass) { struct avc_node *node, *ret = NULL; int hvalue; + struct list_head *head; hvalue = avc_hash(ssid, tsid, tclass); - list_for_each_entry_rcu(node, &avc_cache.slots[hvalue], list) { + head = &avc_cache.slots[hvalue]; + list_for_each_entry_rcu(node, head, list) { if (ssid == node->ae.ssid && tclass == node->ae.tclass && tsid == node->ae.tsid) { @@ -453,11 +461,17 @@ static struct avc_node *avc_insert(u32 ssid, u32 tsid, u16 tclass, struct av_dec node = avc_alloc_node(); if (node) { + struct list_head *head; + spinlock_t *lock; + hvalue = avc_hash(ssid, tsid, tclass); avc_node_populate(node, ssid, tsid, tclass, avd); - spin_lock_irqsave(&avc_cache.slots_lock[hvalue], flag); - list_for_each_entry(pos, &avc_cache.slots[hvalue], list) { + head = &avc_cache.slots[hvalue]; + lock = &avc_cache.slots_lock[hvalue]; + + spin_lock_irqsave(lock, flag); + list_for_each_entry(pos, head, list) { if (pos->ae.ssid == ssid && pos->ae.tsid == tsid && pos->ae.tclass == tclass) { @@ -465,9 +479,9 @@ static struct avc_node *avc_insert(u32 ssid, u32 tsid, u16 tclass, struct av_dec goto found; } } - list_add_rcu(&node->list, &avc_cache.slots[hvalue]); + list_add_rcu(&node->list, head); found: - spin_unlock_irqrestore(&avc_cache.slots_lock[hvalue], flag); + spin_unlock_irqrestore(lock, flag); } out: return node; @@ -736,6 +750,8 @@ static int avc_update_node(u32 event, u32 perms, u32 ssid, u32 tsid, u16 tclass, int hvalue, rc = 0; unsigned long flag; struct avc_node *pos, *node, *orig = NULL; + struct list_head *head; + spinlock_t *lock; node = avc_alloc_node(); if (!node) { @@ -745,9 +761,13 @@ static int avc_update_node(u32 event, u32 perms, u32 ssid, u32 tsid, u16 tclass, /* Lock the target slot */ hvalue = avc_hash(ssid, tsid, tclass); - spin_lock_irqsave(&avc_cache.slots_lock[hvalue], flag); - list_for_each_entry(pos, &avc_cache.slots[hvalue], list) { + head = &avc_cache.slots[hvalue]; + lock = &avc_cache.slots_lock[hvalue]; + + spin_lock_irqsave(lock, flag); + + list_for_each_entry(pos, head, list) { if (ssid == pos->ae.ssid && tsid == pos->ae.tsid && tclass == pos->ae.tclass && @@ -792,7 +812,7 @@ static int avc_update_node(u32 event, u32 perms, u32 ssid, u32 tsid, u16 tclass, } avc_node_replace(node, orig); out_unlock: - spin_unlock_irqrestore(&avc_cache.slots_lock[hvalue], flag); + spin_unlock_irqrestore(lock, flag); out: return rc; } @@ -807,18 +827,23 @@ int avc_ss_reset(u32 seqno) int i, rc = 0, tmprc; unsigned long flag; struct avc_node *node; + struct list_head *head; + spinlock_t *lock; for (i = 0; i < AVC_CACHE_SLOTS; i++) { - spin_lock_irqsave(&avc_cache.slots_lock[i], flag); + head = &avc_cache.slots[i]; + lock = &avc_cache.slots_lock[i]; + + spin_lock_irqsave(lock, flag); /* * With preemptable RCU, the outer spinlock does not * prevent RCU grace periods from ending. */ rcu_read_lock(); - list_for_each_entry(node, &avc_cache.slots[i], list) + list_for_each_entry(node, head, list) avc_node_delete(node); rcu_read_unlock(); - spin_unlock_irqrestore(&avc_cache.slots_lock[i], flag); + spin_unlock_irqrestore(lock, flag); } for (c = avc_callbacks; c; c = c->next) { -- cgit From 26036651c562609d1f52d181f9d2cccbf89929b1 Mon Sep 17 00:00:00 2001 From: Eric Paris Date: Thu, 12 Feb 2009 14:51:04 -0500 Subject: SELinux: convert the avc cache hash list to an hlist We do not need O(1) access to the tail of the avc cache lists and so we are wasting lots of space using struct list_head instead of struct hlist_head. This patch converts the avc cache to use hlists in which there is a single pointer from the head which saves us about 4k of global memory. Resulted in about a 1.5% decrease in time spent in avc_has_perm_noaudit based on oprofile sampling of tbench. Although likely within the noise.... Signed-off-by: Eric Paris Reviewed-by: Paul Moore Signed-off-by: James Morris --- security/selinux/avc.c | 47 +++++++++++++++++++++++++++-------------------- 1 file changed, 27 insertions(+), 20 deletions(-) (limited to 'security/selinux/avc.c') diff --git a/security/selinux/avc.c b/security/selinux/avc.c index 9dd5c506a82..7f9b5fac877 100644 --- a/security/selinux/avc.c +++ b/security/selinux/avc.c @@ -92,12 +92,12 @@ struct avc_entry { struct avc_node { struct avc_entry ae; - struct list_head list; /* anchored in avc_cache->slots[i] */ + struct hlist_node list; /* anchored in avc_cache->slots[i] */ struct rcu_head rhead; }; struct avc_cache { - struct list_head slots[AVC_CACHE_SLOTS]; /* head for avc_node->list */ + struct hlist_head slots[AVC_CACHE_SLOTS]; /* head for avc_node->list */ spinlock_t slots_lock[AVC_CACHE_SLOTS]; /* lock for writes */ atomic_t lru_hint; /* LRU hint for reclaim scan */ atomic_t active_nodes; @@ -233,7 +233,7 @@ void __init avc_init(void) int i; for (i = 0; i < AVC_CACHE_SLOTS; i++) { - INIT_LIST_HEAD(&avc_cache.slots[i]); + INIT_HLIST_HEAD(&avc_cache.slots[i]); spin_lock_init(&avc_cache.slots_lock[i]); } atomic_set(&avc_cache.active_nodes, 0); @@ -249,7 +249,7 @@ int avc_get_hash_stats(char *page) { int i, chain_len, max_chain_len, slots_used; struct avc_node *node; - struct list_head *head; + struct hlist_head *head; rcu_read_lock(); @@ -257,10 +257,12 @@ int avc_get_hash_stats(char *page) max_chain_len = 0; for (i = 0; i < AVC_CACHE_SLOTS; i++) { head = &avc_cache.slots[i]; - if (!list_empty(head)) { + if (!hlist_empty(head)) { + struct hlist_node *next; + slots_used++; chain_len = 0; - list_for_each_entry_rcu(node, head, list) + hlist_for_each_entry_rcu(node, next, head, list) chain_len++; if (chain_len > max_chain_len) max_chain_len = chain_len; @@ -284,7 +286,7 @@ static void avc_node_free(struct rcu_head *rhead) static void avc_node_delete(struct avc_node *node) { - list_del_rcu(&node->list); + hlist_del_rcu(&node->list); call_rcu(&node->rhead, avc_node_free); atomic_dec(&avc_cache.active_nodes); } @@ -298,7 +300,7 @@ static void avc_node_kill(struct avc_node *node) static void avc_node_replace(struct avc_node *new, struct avc_node *old) { - list_replace_rcu(&old->list, &new->list); + hlist_replace_rcu(&old->list, &new->list); call_rcu(&old->rhead, avc_node_free); atomic_dec(&avc_cache.active_nodes); } @@ -308,7 +310,8 @@ static inline int avc_reclaim_node(void) struct avc_node *node; int hvalue, try, ecx; unsigned long flags; - struct list_head *head; + struct hlist_head *head; + struct hlist_node *next; spinlock_t *lock; for (try = 0, ecx = 0; try < AVC_CACHE_SLOTS; try++) { @@ -320,7 +323,7 @@ static inline int avc_reclaim_node(void) continue; rcu_read_lock(); - list_for_each_entry(node, head, list) { + hlist_for_each_entry(node, next, head, list) { avc_node_delete(node); avc_cache_stats_incr(reclaims); ecx++; @@ -346,7 +349,7 @@ static struct avc_node *avc_alloc_node(void) goto out; INIT_RCU_HEAD(&node->rhead); - INIT_LIST_HEAD(&node->list); + INIT_HLIST_NODE(&node->list); avc_cache_stats_incr(allocations); if (atomic_inc_return(&avc_cache.active_nodes) > avc_cache_threshold) @@ -368,11 +371,12 @@ static inline struct avc_node *avc_search_node(u32 ssid, u32 tsid, u16 tclass) { struct avc_node *node, *ret = NULL; int hvalue; - struct list_head *head; + struct hlist_head *head; + struct hlist_node *next; hvalue = avc_hash(ssid, tsid, tclass); head = &avc_cache.slots[hvalue]; - list_for_each_entry_rcu(node, head, list) { + hlist_for_each_entry_rcu(node, next, head, list) { if (ssid == node->ae.ssid && tclass == node->ae.tclass && tsid == node->ae.tsid) { @@ -461,7 +465,8 @@ static struct avc_node *avc_insert(u32 ssid, u32 tsid, u16 tclass, struct av_dec node = avc_alloc_node(); if (node) { - struct list_head *head; + struct hlist_head *head; + struct hlist_node *next; spinlock_t *lock; hvalue = avc_hash(ssid, tsid, tclass); @@ -471,7 +476,7 @@ static struct avc_node *avc_insert(u32 ssid, u32 tsid, u16 tclass, struct av_dec lock = &avc_cache.slots_lock[hvalue]; spin_lock_irqsave(lock, flag); - list_for_each_entry(pos, head, list) { + hlist_for_each_entry(pos, next, head, list) { if (pos->ae.ssid == ssid && pos->ae.tsid == tsid && pos->ae.tclass == tclass) { @@ -479,7 +484,7 @@ static struct avc_node *avc_insert(u32 ssid, u32 tsid, u16 tclass, struct av_dec goto found; } } - list_add_rcu(&node->list, head); + hlist_add_head_rcu(&node->list, head); found: spin_unlock_irqrestore(lock, flag); } @@ -750,7 +755,8 @@ static int avc_update_node(u32 event, u32 perms, u32 ssid, u32 tsid, u16 tclass, int hvalue, rc = 0; unsigned long flag; struct avc_node *pos, *node, *orig = NULL; - struct list_head *head; + struct hlist_head *head; + struct hlist_node *next; spinlock_t *lock; node = avc_alloc_node(); @@ -767,7 +773,7 @@ static int avc_update_node(u32 event, u32 perms, u32 ssid, u32 tsid, u16 tclass, spin_lock_irqsave(lock, flag); - list_for_each_entry(pos, head, list) { + hlist_for_each_entry(pos, next, head, list) { if (ssid == pos->ae.ssid && tsid == pos->ae.tsid && tclass == pos->ae.tclass && @@ -827,7 +833,8 @@ int avc_ss_reset(u32 seqno) int i, rc = 0, tmprc; unsigned long flag; struct avc_node *node; - struct list_head *head; + struct hlist_head *head; + struct hlist_node *next; spinlock_t *lock; for (i = 0; i < AVC_CACHE_SLOTS; i++) { @@ -840,7 +847,7 @@ int avc_ss_reset(u32 seqno) * prevent RCU grace periods from ending. */ rcu_read_lock(); - list_for_each_entry(node, head, list) + hlist_for_each_entry(node, next, head, list) avc_node_delete(node); rcu_read_unlock(); spin_unlock_irqrestore(lock, flag); -- cgit