summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorMiloslav Trmač <mitr@redhat.com>2010-06-30 14:05:58 +0200
committerNikos Mavrogiannopoulos <nmav@gnutls.org>2010-07-19 09:20:54 +0200
commit15f3c611f8d31cc0fc973481ffa3aec420ed004a (patch)
tree9367dfaa55bcc8e0b43bee396dd3792c93090772
parentc455b175a7cfdbcb11fd3662abcd83d6c6599091 (diff)
downloadcryptodev-linux-15f3c611f8d31cc0fc973481ffa3aec420ed004a.tar.gz
cryptodev-linux-15f3c611f8d31cc0fc973481ffa3aec420ed004a.tar.xz
cryptodev-linux-15f3c611f8d31cc0fc973481ffa3aec420ed004a.zip
Fix a race in ncr_data_set.
More than one thread could could pass the if (... > max_data_size) test, leading to multiple executions of "data->data_size += get.data_size", resulting in data->data_size > data->max_data_size. This is a minimal fix that ensures kernel data structure consistency, but the behavior won't look atomic from user space (two threads appending N and M bytes could result in N, M, or N+M more bytes). It relies on the assumption that reads and writes of size_t are atomic.
-rw-r--r--ncr-data.c9
1 files changed, 6 insertions, 3 deletions
diff --git a/ncr-data.c b/ncr-data.c
index 93c69ac..6a45e4d 100644
--- a/ncr-data.c
+++ b/ncr-data.c
@@ -282,21 +282,24 @@ int ncr_data_set(struct list_sem_st* lst, void __user* arg)
}
data->data_size = get.data_size;
} else {
+ size_t offset;
+
+ offset = data->data_size;
/* get.data_size <= data->max_data_size, which is limited in
data_alloc(), so there is no integer overflow. */
- if (get.data_size+data->data_size > data->max_data_size) {
+ if (get.data_size+offset > data->max_data_size) {
err();
ret = -EINVAL;
goto cleanup;
}
if (get.data != NULL) {
- ret = copy_from_user(&data->data[data->data_size], get.data, get.data_size);
+ ret = copy_from_user(&data->data[offset], get.data, get.data_size);
if (unlikely(ret)) {
err();
goto cleanup;
}
}
- data->data_size += get.data_size;
+ data->data_size = offset + get.data_size;
}
ret = 0;