summaryrefslogtreecommitdiffstats
path: root/src/sss_client
diff options
context:
space:
mode:
authorLukas Slebodnik <lslebodn@redhat.com>2014-07-16 14:32:04 +0200
committerJakub Hrozek <jhrozek@redhat.com>2014-07-23 21:08:28 +0200
commit0d22416f94dff7756091e983518ed3684cc9597a (patch)
treef7b89ce36fcdd9ecf5ff26c76a401de3a9490827 /src/sss_client
parentb53f1e74acee48c5f22f7532829e1934c68d4637 (diff)
downloadsssd-0d22416f94dff7756091e983518ed3684cc9597a.tar.gz
sssd-0d22416f94dff7756091e983518ed3684cc9597a.tar.xz
sssd-0d22416f94dff7756091e983518ed3684cc9597a.zip
sss_client: thread safe initialisation of sss_cli_mc_ctx
In multi threaded application, it may happen that more threads will call function getpwuid(or similar) and sss client will not have initialized structure for fast memory cache. This structure is initialized just once. There isn't any problem with multi threaded application after successful initialisation. The race condition will happen if more threads try to initialise structure sss_cli_mc_ctx in function sss_nss_mc_get_ctx (ctx->initialized is false) It takes some time to initialise mmap cache: open file, get file size, mmap file, initialize structure sss_cli_mc_ctx. One of problems is that file with memory cache can be opened more times (file descriptor leak), but the race condition is with initialising structure sss_cli_mc_ctx. One tread will start to initialise this structure; another thread will think that structure is already initialised and will check consistency of this structure. It will fail because 1st thread did not finish initialisation. Therefore 2nd thread will return EINVAL and will do clean up in done section: munmap, close file and reset structure data. The 1st thread will finish an try to use memory cache, but structure was zero initialised by 2nd thread and it will cause dereference of NULL pointer in 1st thread (SIGSEGV) or dividing by zero in murmurhash function(SIGFPE) Function sss_nss_mc_get_ctx was split into two parts for simplification of locking and unlocking. The locking is used only in new static function sss_nss_mc_init_ctx. This function will not be called very often therefore the same mutex is used as in other nss functions. Resolves: https://fedorahosted.org/sssd/ticket/2380 Reviewed-by: Michal Židek <mzidek@redhat.com> Reviewed-by: Sumit Bose <sbose@redhat.com>
Diffstat (limited to 'src/sss_client')
-rw-r--r--src/sss_client/nss_mc_common.c44
1 files changed, 37 insertions, 7 deletions
diff --git a/src/sss_client/nss_mc_common.c b/src/sss_client/nss_mc_common.c
index db9be94b4..cd1ac42da 100644
--- a/src/sss_client/nss_mc_common.c
+++ b/src/sss_client/nss_mc_common.c
@@ -31,6 +31,7 @@
#include <string.h>
#include <stdlib.h>
#include "nss_mc.h"
+#include "sss_cli.h"
#include "util/io.h"
/* FIXME: hook up to library destructor to avoid leaks */
@@ -101,18 +102,15 @@ errno_t sss_nss_check_header(struct sss_cli_mc_ctx *ctx)
return 0;
}
-errno_t sss_nss_mc_get_ctx(const char *name, struct sss_cli_mc_ctx *ctx)
+static errno_t sss_nss_mc_init_ctx(const char *name,
+ struct sss_cli_mc_ctx *ctx)
{
struct stat fdstat;
char *file = NULL;
- char *envval;
int ret;
- envval = getenv("SSS_NSS_USE_MEMCACHE");
- if (envval && strcasecmp(envval, "NO") == 0) {
- return EPERM;
- }
-
+ sss_nss_lock();
+ /* check if ctx is initialised by previous thread. */
if (ctx->initialized) {
ret = sss_nss_check_header(ctx);
goto done;
@@ -168,6 +166,38 @@ done:
memset(ctx, 0, sizeof(struct sss_cli_mc_ctx));
}
free(file);
+ sss_nss_unlock();
+
+ return ret;
+}
+
+errno_t sss_nss_mc_get_ctx(const char *name, struct sss_cli_mc_ctx *ctx)
+{
+ char *envval;
+ int ret;
+
+ envval = getenv("SSS_NSS_USE_MEMCACHE");
+ if (envval && strcasecmp(envval, "NO") == 0) {
+ return EPERM;
+ }
+
+ if (ctx->initialized) {
+ ret = sss_nss_check_header(ctx);
+ goto done;
+ }
+
+ ret = sss_nss_mc_init_ctx(name, ctx);
+
+done:
+ if (ret) {
+ if ((ctx->mmap_base != NULL) && (ctx->mmap_size != 0)) {
+ munmap(ctx->mmap_base, ctx->mmap_size);
+ }
+ if (ctx->fd != -1) {
+ close(ctx->fd);
+ }
+ memset(ctx, 0, sizeof(struct sss_cli_mc_ctx));
+ }
return ret;
}