summaryrefslogtreecommitdiffstats
path: root/src/util
diff options
context:
space:
mode:
authorGreg Hudson <ghudson@mit.edu>2013-05-10 14:01:48 -0400
committerGreg Hudson <ghudson@mit.edu>2013-05-14 13:31:41 -0400
commit6350fd0c909d84c00200885e722cc902049ada05 (patch)
treea880eae4b875d2b94747048a7092f619c79d33c2 /src/util
parent1799f7b5d9cf4390148248d603d99a3695ddfafe (diff)
downloadkrb5-6350fd0c909d84c00200885e722cc902049ada05.tar.gz
krb5-6350fd0c909d84c00200885e722cc902049ada05.tar.xz
krb5-6350fd0c909d84c00200885e722cc902049ada05.zip
Assume mutex locking cannot fail
Locking and unlocking a non-recursive mutex is a simple memory operation and should not fail on any reasonable platform with correct usage. A pthread mutex can return EDEADLK on lock or EPERM on unlock, or EINVAL if the mutex is uninitialized, but all of these conditions would reflect serious bugs in the calling code. Change the k5_mutex_lock and k5_mutex_unlock wrappers to return void and adjust all call sites. Propagate this change through k5_cc_mutex_lock and k5_cc_mutex_unlock as well.
Diffstat (limited to 'src/util')
-rw-r--r--src/util/et/com_err.c14
-rw-r--r--src/util/et/error_message.c26
-rw-r--r--src/util/profile/prof_file.c59
-rw-r--r--src/util/profile/prof_init.c15
-rw-r--r--src/util/profile/prof_int.h4
-rw-r--r--src/util/profile/prof_set.c16
-rw-r--r--src/util/profile/prof_tree.c34
-rw-r--r--src/util/support/errors.c10
-rw-r--r--src/util/support/threads.c96
9 files changed, 97 insertions, 177 deletions
diff --git a/src/util/et/com_err.c b/src/util/et/com_err.c
index 96922ec24f..c1e3be7725 100644
--- a/src/util/et/com_err.c
+++ b/src/util/et/com_err.c
@@ -106,9 +106,7 @@ void KRB5_CALLCONV com_err_va(const char *whoami,
err = com_err_finish_init();
if (err)
goto best_try;
- err = k5_mutex_lock(&com_err_hook_lock);
- if (err)
- goto best_try;
+ k5_mutex_lock(&com_err_hook_lock);
p = com_err_hook ? com_err_hook : default_com_err_proc;
(*p)(whoami, code, fmt, ap);
k5_mutex_unlock(&com_err_hook_lock);
@@ -144,9 +142,9 @@ void KRB5_CALLCONV_C com_err(const char *whoami,
/* Make a separate function because the assert invocations below
use the macro expansion on some platforms, which may be insanely
long and incomprehensible. */
-static int com_err_lock_hook_handle(void)
+static void com_err_lock_hook_handle(void)
{
- return k5_mutex_lock(&com_err_hook_lock);
+ k5_mutex_lock(&com_err_hook_lock);
}
et_old_error_hook_func set_com_err_hook (et_old_error_hook_func new_proc)
@@ -156,8 +154,7 @@ et_old_error_hook_func set_com_err_hook (et_old_error_hook_func new_proc)
/* Broken initialization? What can we do? */
if (com_err_finish_init() != 0)
abort();
- if (com_err_lock_hook_handle() != 0)
- abort();
+ com_err_lock_hook_handle();
x = com_err_hook;
com_err_hook = new_proc;
k5_mutex_unlock(&com_err_hook_lock);
@@ -171,8 +168,7 @@ et_old_error_hook_func reset_com_err_hook ()
/* Broken initialization? What can we do? */
if (com_err_finish_init() != 0)
abort();
- if (com_err_lock_hook_handle() != 0)
- abort();
+ com_err_lock_hook_handle();
x = com_err_hook;
com_err_hook = NULL;
k5_mutex_unlock(&com_err_hook_lock);
diff --git a/src/util/et/error_message.c b/src/util/et/error_message.c
index 01f2c03cab..50ede704d7 100644
--- a/src/util/et/error_message.c
+++ b/src/util/et/error_message.c
@@ -75,8 +75,7 @@ void com_err_terminate(void)
#endif
k5_key_delete(K5_KEY_COM_ERR);
k5_mutex_destroy(&com_err_hook_lock);
- if (k5_mutex_lock(&et_list_lock) != 0)
- return;
+ k5_mutex_lock(&et_list_lock);
for (e = et_list; e; e = enext) {
enext = e->next;
free(e);
@@ -121,7 +120,6 @@ error_message(long code)
unsigned int divisor = 100;
char *cp, *cp1;
const struct error_table *table;
- int merr;
l_offset = (unsigned long)code & ((1<<ERRCODE_RANGE)-1);
offset = l_offset;
@@ -159,9 +157,7 @@ error_message(long code)
if (CALL_INIT_FUNCTION(com_err_initialize))
return 0;
- merr = k5_mutex_lock(&et_list_lock);
- if (merr)
- goto oops;
+ k5_mutex_lock(&et_list_lock);
dprintf(("scanning list for %x\n", table_num));
for (e = et_list; e != NULL; e = e->next) {
dprintf(("\t%x = %s\n", e->table->base & ERRCODE_MAX,
@@ -276,7 +272,6 @@ errcode_t KRB5_CALLCONV
add_error_table(const struct error_table *et)
{
struct et_list *e;
- int merr;
if (CALL_INIT_FUNCTION(com_err_initialize))
return 0;
@@ -287,11 +282,7 @@ add_error_table(const struct error_table *et)
e->table = et;
- merr = k5_mutex_lock(&et_list_lock);
- if (merr) {
- free(e);
- return merr;
- }
+ k5_mutex_lock(&et_list_lock);
e->next = et_list;
et_list = e;
@@ -300,20 +291,18 @@ add_error_table(const struct error_table *et)
if (et->msgs[et->n_msgs] != NULL && et->msgs[et->n_msgs + 1] != NULL)
bindtextdomain(et->msgs[et->n_msgs], et->msgs[et->n_msgs + 1]);
- return k5_mutex_unlock(&et_list_lock);
+ k5_mutex_unlock(&et_list_lock);
+ return 0;
}
errcode_t KRB5_CALLCONV
remove_error_table(const struct error_table *et)
{
struct et_list **ep, *e;
- int merr;
if (CALL_INIT_FUNCTION(com_err_initialize))
return 0;
- merr = k5_mutex_lock(&et_list_lock);
- if (merr)
- return merr;
+ k5_mutex_lock(&et_list_lock);
/* Remove the entry that matches the error table instance. */
for (ep = &et_list; *ep; ep = &(*ep)->next) {
@@ -321,7 +310,8 @@ remove_error_table(const struct error_table *et)
e = *ep;
*ep = e->next;
free(e);
- return k5_mutex_unlock(&et_list_lock);
+ k5_mutex_unlock(&et_list_lock);
+ return 0;
}
}
k5_mutex_unlock(&et_list_lock);
diff --git a/src/util/profile/prof_file.c b/src/util/profile/prof_file.c
index b0bb087ebd..76411db8ab 100644
--- a/src/util/profile/prof_file.c
+++ b/src/util/profile/prof_file.c
@@ -239,13 +239,7 @@ errcode_t profile_open_file(const_profile_filespec_t filespec,
return ENOMEM;
}
- retval = k5_mutex_lock(&g_shared_trees_mutex);
- if (retval) {
- free(expanded_filename);
- free(prf);
- scan_shared_trees_unlocked();
- return retval;
- }
+ k5_mutex_lock(&g_shared_trees_mutex);
scan_shared_trees_locked();
for (data = g_shared_trees; data; data = data->next) {
if (!strcmp(data->filespec, expanded_filename)
@@ -255,7 +249,7 @@ errcode_t profile_open_file(const_profile_filespec_t filespec,
}
if (data) {
data->refcount++;
- (void) k5_mutex_unlock(&g_shared_trees_mutex);
+ k5_mutex_unlock(&g_shared_trees_mutex);
retval = profile_update_file_data(data, NULL);
free(expanded_filename);
if (retval) {
@@ -268,7 +262,7 @@ errcode_t profile_open_file(const_profile_filespec_t filespec,
scan_shared_trees_unlocked();
return 0;
}
- (void) k5_mutex_unlock(&g_shared_trees_mutex);
+ k5_mutex_unlock(&g_shared_trees_mutex);
data = profile_make_prf_data(expanded_filename);
if (data == NULL) {
free(prf);
@@ -291,18 +285,13 @@ errcode_t profile_open_file(const_profile_filespec_t filespec,
return retval;
}
- retval = k5_mutex_lock(&g_shared_trees_mutex);
- if (retval) {
- profile_close_file(prf);
- scan_shared_trees_unlocked();
- return retval;
- }
+ k5_mutex_lock(&g_shared_trees_mutex);
scan_shared_trees_locked();
data->flags |= PROFILE_FILE_SHARED;
data->next = g_shared_trees;
g_shared_trees = data;
scan_shared_trees_locked();
- (void) k5_mutex_unlock(&g_shared_trees_mutex);
+ k5_mutex_unlock(&g_shared_trees_mutex);
*ret_prof = prf;
return 0;
@@ -381,14 +370,12 @@ errcode_t profile_update_file_data_locked(prf_data_t data, char **ret_modspec)
errcode_t profile_update_file_data(prf_data_t data, char **ret_modspec)
{
- errcode_t retval, retval2;
+ errcode_t retval;
- retval = k5_mutex_lock(&data->lock);
- if (retval)
- return retval;
+ k5_mutex_lock(&data->lock);
retval = profile_update_file_data_locked(data, ret_modspec);
- retval2 = k5_mutex_unlock(&data->lock);
- return retval ? retval : retval2;
+ k5_mutex_unlock(&data->lock);
+ return retval;
}
static int
@@ -487,9 +474,8 @@ errout:
errcode_t profile_flush_file_data_to_buffer (prf_data_t data, char **bufp)
{
errcode_t retval;
- retval = k5_mutex_lock(&data->lock);
- if (retval)
- return retval;
+
+ k5_mutex_lock(&data->lock);
retval = profile_write_tree_to_buffer(data->root, bufp);
k5_mutex_unlock(&data->lock);
return retval;
@@ -502,9 +488,7 @@ errcode_t profile_flush_file_data(prf_data_t data)
if (!data || data->magic != PROF_MAGIC_FILE_DATA)
return PROF_MAGIC_FILE_DATA;
- retval = k5_mutex_lock(&data->lock);
- if (retval)
- return retval;
+ k5_mutex_lock(&data->lock);
if ((data->flags & PROFILE_FILE_DIRTY) == 0) {
k5_mutex_unlock(&data->lock);
@@ -523,9 +507,7 @@ errcode_t profile_flush_file_data_to_file(prf_data_t data, const char *outfile)
if (!data || data->magic != PROF_MAGIC_FILE_DATA)
return PROF_MAGIC_FILE_DATA;
- retval = k5_mutex_lock(&data->lock);
- if (retval)
- return retval;
+ k5_mutex_lock(&data->lock);
retval = write_data_to_file(data, outfile, 1);
k5_mutex_unlock(&data->lock);
return retval;
@@ -535,12 +517,9 @@ errcode_t profile_flush_file_data_to_file(prf_data_t data, const char *outfile)
void profile_dereference_data(prf_data_t data)
{
- int err;
- err = k5_mutex_lock(&g_shared_trees_mutex);
- if (err)
- return;
+ k5_mutex_lock(&g_shared_trees_mutex);
profile_dereference_data_locked(data);
- (void) k5_mutex_unlock(&g_shared_trees_mutex);
+ k5_mutex_unlock(&g_shared_trees_mutex);
}
void profile_dereference_data_locked(prf_data_t data)
{
@@ -551,13 +530,13 @@ void profile_dereference_data_locked(prf_data_t data)
scan_shared_trees_locked();
}
-int profile_lock_global()
+void profile_lock_global()
{
- return k5_mutex_lock(&g_shared_trees_mutex);
+ k5_mutex_lock(&g_shared_trees_mutex);
}
-int profile_unlock_global()
+void profile_unlock_global()
{
- return k5_mutex_unlock(&g_shared_trees_mutex);
+ k5_mutex_unlock(&g_shared_trees_mutex);
}
void profile_free_file(prf_file_t prf)
diff --git a/src/util/profile/prof_init.c b/src/util/profile/prof_init.c
index 5b2bb22261..4e3d84ee67 100644
--- a/src/util/profile/prof_init.c
+++ b/src/util/profile/prof_init.c
@@ -278,13 +278,7 @@ copy_vtable_profile(profile_t profile, profile_t *ret_new_profile)
/* Increment the refcount on the library handle if there is one. */
if (profile->lib_handle) {
- err = k5_mutex_lock(&profile->lib_handle->lock);
- if (err) {
- /* Don't decrement the refcount we failed to increment. */
- new_profile->lib_handle = NULL;
- profile_abandon(new_profile);
- return err;
- }
+ k5_mutex_lock(&profile->lib_handle->lock);
profile->lib_handle->refcount++;
k5_mutex_unlock(&profile->lib_handle->lock);
}
@@ -479,7 +473,6 @@ void KRB5_CALLCONV
profile_abandon(profile_t profile)
{
prf_file_t p, next;
- errcode_t err;
if (!profile || profile->magic != PROF_MAGIC_PROFILE)
return;
@@ -489,13 +482,13 @@ profile_abandon(profile_t profile)
profile->vt->cleanup(profile->cbdata);
if (profile->lib_handle) {
/* Decrement the refcount on the handle and maybe free it. */
- err = k5_mutex_lock(&profile->lib_handle->lock);
- if (!err && --profile->lib_handle->refcount == 0) {
+ k5_mutex_lock(&profile->lib_handle->lock);
+ if (--profile->lib_handle->refcount == 0) {
krb5int_close_plugin(profile->lib_handle->plugin_handle);
k5_mutex_unlock(&profile->lib_handle->lock);
k5_mutex_destroy(&profile->lib_handle->lock);
free(profile->lib_handle);
- } else if (!err)
+ } else
k5_mutex_unlock(&profile->lib_handle->lock);
}
free(profile->vt);
diff --git a/src/util/profile/prof_int.h b/src/util/profile/prof_int.h
index da1b937f10..6700b50760 100644
--- a/src/util/profile/prof_int.h
+++ b/src/util/profile/prof_int.h
@@ -244,8 +244,8 @@ int profile_file_is_writable
void profile_dereference_data (prf_data_t);
void profile_dereference_data_locked (prf_data_t);
-int profile_lock_global (void);
-int profile_unlock_global (void);
+void profile_lock_global (void);
+void profile_unlock_global (void);
/* prof_init.c -- included from profile.h */
errcode_t profile_ser_size
diff --git a/src/util/profile/prof_set.c b/src/util/profile/prof_set.c
index 52e5b0519c..b21023681b 100644
--- a/src/util/profile/prof_set.c
+++ b/src/util/profile/prof_set.c
@@ -34,9 +34,7 @@ static errcode_t rw_setup(profile_t profile)
file = profile->first_file;
- retval = profile_lock_global();
- if (retval)
- return retval;
+ profile_lock_global();
/* Don't update the file if we've already made modifications */
if (file->data->flags & PROFILE_FILE_DIRTY) {
@@ -106,9 +104,7 @@ profile_update_relation(profile_t profile, const char **names,
if (!old_value || !*old_value)
return PROF_EINVAL;
- retval = k5_mutex_lock(&profile->first_file->data->lock);
- if (retval)
- return retval;
+ k5_mutex_lock(&profile->first_file->data->lock);
section = profile->first_file->data->root;
for (cpp = names; cpp[1]; cpp++) {
state = 0;
@@ -214,9 +210,7 @@ profile_rename_section(profile_t profile, const char **names,
if (names == 0 || names[0] == 0 || names[1] == 0)
return PROF_BAD_NAMESET;
- retval = k5_mutex_lock(&profile->first_file->data->lock);
- if (retval)
- return retval;
+ k5_mutex_lock(&profile->first_file->data->lock);
section = profile->first_file->data->root;
for (cpp = names; cpp[1]; cpp++) {
state = 0;
@@ -273,9 +267,7 @@ profile_add_relation(profile_t profile, const char **names,
if (names == 0 || names[0] == 0 || names[1] == 0)
return PROF_BAD_NAMESET;
- retval = k5_mutex_lock(&profile->first_file->data->lock);
- if (retval)
- return retval;
+ k5_mutex_lock(&profile->first_file->data->lock);
section = profile->first_file->data->root;
for (cpp = names; cpp[1]; cpp++) {
state = 0;
diff --git a/src/util/profile/prof_tree.c b/src/util/profile/prof_tree.c
index 4543441883..cc5bdfd80b 100644
--- a/src/util/profile/prof_tree.c
+++ b/src/util/profile/prof_tree.c
@@ -468,11 +468,8 @@ errcode_t profile_node_iterator(void **iter_p,
* If the file has changed, then the node pointer is invalid,
* so we'll have search the file again looking for it.
*/
- if (iter->file) {
- retval = k5_mutex_lock(&iter->file->data->lock);
- if (retval)
- return retval;
- }
+ if (iter->file)
+ k5_mutex_lock(&iter->file->data->lock);
if (iter->node && (iter->file->data->upd_serial != iter->file_serial)) {
iter->flags &= ~PROFILE_ITER_FINAL_SEEN;
skip_num = iter->num;
@@ -503,13 +500,8 @@ get_new_file:
if (retval == ENOENT || retval == EACCES) {
/* XXX memory leak? */
iter->file = iter->file->next;
- if (iter->file) {
- retval = k5_mutex_lock(&iter->file->data->lock);
- if (retval) {
- profile_node_iterator_free(iter_p);
- return retval;
- }
- }
+ if (iter->file)
+ k5_mutex_lock(&iter->file->data->lock);
skip_num = 0;
retval = 0;
goto get_new_file;
@@ -541,13 +533,8 @@ get_new_file:
if (!section) {
k5_mutex_unlock(&iter->file->data->lock);
iter->file = iter->file->next;
- if (iter->file) {
- retval = k5_mutex_lock(&iter->file->data->lock);
- if (retval) {
- profile_node_iterator_free(iter_p);
- return retval;
- }
- }
+ if (iter->file)
+ k5_mutex_lock(&iter->file->data->lock);
skip_num = 0;
goto get_new_file;
}
@@ -579,13 +566,8 @@ get_new_file:
if (!p) {
k5_mutex_unlock(&iter->file->data->lock);
iter->file = iter->file->next;
- if (iter->file) {
- retval = k5_mutex_lock(&iter->file->data->lock);
- if (retval) {
- profile_node_iterator_free(iter_p);
- return retval;
- }
- }
+ if (iter->file)
+ k5_mutex_lock(&iter->file->data->lock);
iter->node = 0;
skip_num = 0;
goto get_new_file;
diff --git a/src/util/support/errors.c b/src/util/support/errors.c
index 1c13a4aa63..d820873e28 100644
--- a/src/util/support/errors.c
+++ b/src/util/support/errors.c
@@ -108,9 +108,10 @@ k5_get_error(struct errinfo *ep, long code)
if (code == ep->code && ep->msg != NULL)
return oom_check(strdup(ep->msg));
- if (initialize() || lock())
+ if (initialize())
return oom_check(strdup(_("Kerberos library initialization failure")));
+ lock();
if (fptr == NULL) {
unlock();
#ifdef HAVE_STRERROR_R
@@ -153,8 +154,7 @@ void
k5_set_error_info_callout_fn(const char *(KRB5_CALLCONV *f)(long))
{
initialize();
- if (lock() == 0) {
- fptr = f;
- unlock();
- }
+ lock();
+ fptr = f;
+ unlock();
}
diff --git a/src/util/support/threads.c b/src/util/support/threads.c
index 4370c0589d..a97789624e 100644
--- a/src/util/support/threads.c
+++ b/src/util/support/threads.c
@@ -180,35 +180,36 @@ static void thread_termination(void *);
static void thread_termination (void *tptr)
{
- int err = k5_mutex_lock(&key_lock);
- if (err == 0) {
- int i, pass, none_found;
- struct tsd_block *t = tptr;
-
- /* Make multiple passes in case, for example, a libkrb5 cleanup
- function wants to print out an error message, which causes
- com_err to allocate a thread-specific buffer, after we just
- freed up the old one.
-
- Shouldn't actually happen, if we're careful, but check just in
- case. */
-
- pass = 0;
- none_found = 0;
- while (pass < 4 && !none_found) {
- none_found = 1;
- for (i = 0; i < K5_KEY_MAX; i++) {
- if (destructors_set[i] && destructors[i] && t->values[i]) {
- void *v = t->values[i];
- t->values[i] = 0;
- (*destructors[i])(v);
- none_found = 0;
- }
+ int i, pass, none_found;
+ struct tsd_block *t = tptr;
+
+ k5_mutex_lock(&key_lock);
+
+ /*
+ * Make multiple passes in case, for example, a libkrb5 cleanup
+ * function wants to print out an error message, which causes
+ * com_err to allocate a thread-specific buffer, after we just
+ * freed up the old one.
+ *
+ * Shouldn't actually happen, if we're careful, but check just in
+ * case.
+ */
+
+ pass = 0;
+ none_found = 0;
+ while (pass < 4 && !none_found) {
+ none_found = 1;
+ for (i = 0; i < K5_KEY_MAX; i++) {
+ if (destructors_set[i] && destructors[i] && t->values[i]) {
+ void *v = t->values[i];
+ t->values[i] = 0;
+ (*destructors[i])(v);
+ none_found = 0;
}
}
- free (t);
- err = k5_mutex_unlock(&key_lock);
}
+ free (t);
+ k5_mutex_unlock(&key_lock);
/* remove thread from global linked list */
}
@@ -328,7 +329,6 @@ int k5_key_register (k5_key_t keynum, void (*destructor)(void *))
assert(destructors_set[keynum] == 0);
destructors[keynum] = destructor;
destructors_set[keynum] = 1;
- err = 0;
#elif defined(_WIN32)
@@ -338,17 +338,14 @@ int k5_key_register (k5_key_t keynum, void (*destructor)(void *))
destructors_set[keynum] = 1;
destructors[keynum] = destructor;
LeaveCriticalSection(&key_lock);
- err = 0;
#else /* POSIX */
- err = k5_mutex_lock(&key_lock);
- if (err == 0) {
- assert(destructors_set[keynum] == 0);
- destructors_set[keynum] = 1;
- destructors[keynum] = destructor;
- err = k5_mutex_unlock(&key_lock);
- }
+ k5_mutex_lock(&key_lock);
+ assert(destructors_set[keynum] == 0);
+ destructors_set[keynum] = 1;
+ destructors[keynum] = destructor;
+ k5_mutex_unlock(&key_lock);
#endif
return 0;
@@ -381,21 +378,12 @@ int k5_key_delete (k5_key_t keynum)
#else /* POSIX */
- {
- int err;
-
- /* XXX RESOURCE LEAK:
-
- Need to destroy the allocated objects first! */
-
- err = k5_mutex_lock(&key_lock);
- if (err == 0) {
- assert(destructors_set[keynum] == 1);
- destructors_set[keynum] = 0;
- destructors[keynum] = NULL;
- k5_mutex_unlock(&key_lock);
- }
- }
+ /* XXX RESOURCE LEAK: Need to destroy the allocated objects first! */
+ k5_mutex_lock(&key_lock);
+ assert(destructors_set[keynum] == 1);
+ destructors_set[keynum] = 0;
+ destructors[keynum] = NULL;
+ k5_mutex_unlock(&key_lock);
#endif
@@ -512,13 +500,13 @@ krb5int_mutex_free (k5_mutex_t *m)
}
/* Callable versions of the various macros. */
-int KRB5_CALLCONV
+void KRB5_CALLCONV
krb5int_mutex_lock (k5_mutex_t *m)
{
- return k5_mutex_lock (m);
+ k5_mutex_lock (m);
}
-int KRB5_CALLCONV
+void KRB5_CALLCONV
krb5int_mutex_unlock (k5_mutex_t *m)
{
- return k5_mutex_unlock (m);
+ k5_mutex_unlock (m);
}