summaryrefslogtreecommitdiffstats
path: root/ldap/servers/slapd/slapi_counter.c
blob: c3f1f4492ba3003fdb56646ad3ded3fdde555047 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
/** BEGIN COPYRIGHT BLOCK
 * This Program is free software; you can redistribute it and/or modify it under
 * the terms of the GNU General Public License as published by the Free Software
 * Foundation; version 2 of the License.
 * 
 * This Program is distributed in the hope that it will be useful, but WITHOUT
 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
 * FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details.
 * 
 * You should have received a copy of the GNU General Public License along with
 * this Program; if not, write to the Free Software Foundation, Inc., 59 Temple
 * Place, Suite 330, Boston, MA 02111-1307 USA.
 * 
 * In addition, as a special exception, Red Hat, Inc. gives You the additional
 * right to link the code of this Program with code not covered under the GNU
 * General Public License ("Non-GPL Code") and to distribute linked combinations
 * including the two, subject to the limitations in this paragraph. Non-GPL Code
 * permitted under this exception must only link to the code of this Program
 * through those well defined interfaces identified in the file named EXCEPTION
 * found in the source code files (the "Approved Interfaces"). The files of
 * Non-GPL Code may instantiate templates or use macros or inline functions from
 * the Approved Interfaces without causing the resulting work to be covered by
 * the GNU General Public License. Only Red Hat, Inc. may make changes or
 * additions to the list of Approved Interfaces. You must obey the GNU General
 * Public License in all respects for all of the Program code and other code used
 * in conjunction with the Program except the Non-GPL Code covered by this
 * exception. If you modify this file, you may extend this exception to your
 * version of the file, but you are not obligated to do so. If you do not wish to
 * provide this exception without modification, you must delete this exception
 * statement from your version and license this file solely under the GPL without
 * exception. 
 * 
 * 
 * Copyright (C) 2008 Red Hat, Inc.
 * All rights reserved.
 * END COPYRIGHT BLOCK **/

#ifdef HAVE_CONFIG_H
#  include <config.h>
#endif

#include "slap.h"

#ifdef SOLARIS
PRUint64 _sparcv9_AtomicSet(PRUint64 *address, PRUint64 newval);
PRUint64 _sparcv9_AtomicAdd(PRUint64 *address, PRUint64 val);
PRUint64 _sparcv9_AtomicSub(PRUint64 *address, PRUint64 val);
#endif

#ifdef HPUX
#ifdef ATOMIC_64BIT_OPERATIONS
#include <machine/sys/inline.h>
#endif
#endif

#ifdef ATOMIC_64BIT_OPERATIONS
#if defined LINUX && (defined CPU_x86 || !HAVE_DECL___SYNC_ADD_AND_FETCH)
/* On systems that don't have the 64-bit GCC atomic builtins, we need to
 * implement our own atomic functions using inline assembly code. */
PRUint64 __sync_add_and_fetch_8(PRUint64 *ptr, PRUint64 addval);
PRUint64 __sync_sub_and_fetch_8(PRUint64 *ptr, PRUint64 subval);
#endif

#if defined LINUX && !HAVE_DECL___SYNC_ADD_AND_FETCH
/* Systems that have the atomic builtins defined, but don't have
 * implementations for 64-bit values will automatically try to
 * call the __sync_*_8 versions we provide.  If the atomic builtins
 * are not defined at all, we define them here to use our local
 * functions. */
#define __sync_add_and_fetch __sync_add_and_fetch_8
#define __sync_sub_and_fetch __sync_sub_and_fetch_8
#endif
#endif /* ATOMIC_64BIT_OPERATIONS */


/*
 * Counter Structure
 */
typedef struct slapi_counter {
    PRUint64 value;
#ifndef ATOMIC_64BIT_OPERATIONS
    Slapi_Mutex *lock;
#endif
} slapi_counter;

/*
 * slapi_counter_new()
 *
 * Allocates and initializes a new Slapi_Counter.
 */
Slapi_Counter *slapi_counter_new()
{
    Slapi_Counter *counter = NULL;

    counter = (Slapi_Counter *)slapi_ch_calloc(1, sizeof(Slapi_Counter));

    if (counter != NULL) {
        slapi_counter_init(counter);
    }

    return counter;
}

/*
 * slapi_counter_init()
 *
 * Initializes a Slapi_Counter.
 */
void slapi_counter_init(Slapi_Counter *counter)
{
    if (counter != NULL) {
#ifndef ATOMIC_64BIT_OPERATIONS
        /* Create the lock if necessary. */
        if (counter->lock == NULL) {
            counter->lock = slapi_new_mutex();
        }
#endif
        
        /* Set the value to 0. */
        slapi_counter_set_value(counter, 0);
    }
}

/*
 * slapi_counter_destroy()
 *
 * Destroy's a Slapi_Counter and sets the
 * pointer to NULL to prevent reuse.
 */
void slapi_counter_destroy(Slapi_Counter **counter)
{
    if ((counter != NULL) && (*counter != NULL)) {
#ifndef ATOMIC_64BIT_OPERATIONS
        slapi_destroy_mutex((*counter)->lock);
#endif
        slapi_ch_free((void **)counter);
    }
}

/*
 * slapi_counter_increment()
 *
 * Atomically increments a Slapi_Counter.
 */
PRUint64 slapi_counter_increment(Slapi_Counter *counter)
{
    return slapi_counter_add(counter, 1);
}

/*
 * slapi_counter_decrement()
 *
 * Atomically decrements a Slapi_Counter. Note
 * that this will not prevent you from wrapping
 * around 0.
 */
PRUint64 slapi_counter_decrement(Slapi_Counter *counter)
{
    return slapi_counter_subtract(counter, 1);
}

/*
 * slapi_counter_add()
 *
 * Atomically add a value to a Slapi_Counter.
 */
PRUint64 slapi_counter_add(Slapi_Counter *counter, PRUint64 addvalue)
{
    PRUint64 newvalue = 0;
#ifdef HPUX
    PRUint64 prev = 0;
#endif

    if (counter == NULL) {
        return newvalue;
    }

#ifndef ATOMIC_64BIT_OPERATIONS
    slapi_lock_mutex(counter->lock);
    counter->value += addvalue;
    newvalue = counter->value;
    slapi_unlock_mutex(counter->lock);
#else
#ifdef LINUX
    newvalue = __sync_add_and_fetch(&(counter->value), addvalue);
#elif defined(SOLARIS)
    newvalue = _sparcv9_AtomicAdd(&(counter->value), addvalue);
#elif defined(HPUX)
    /* fetchadd only works with values of 1, 4, 8, and 16.  In addition, it requires
     * it's argument to be an integer constant. */
    if (addvalue == 1) {
        newvalue = _Asm_fetchadd(_FASZ_D, _SEM_ACQ, &(counter->value), 1, _LDHINT_NONE);
        newvalue += 1;
    } else if  (addvalue == 4) {
        newvalue = _Asm_fetchadd(_FASZ_D, _SEM_ACQ, &(counter->value), 4, _LDHINT_NONE);
        newvalue += 4;
    } else if (addvalue == 8) {
        newvalue = _Asm_fetchadd(_FASZ_D, _SEM_ACQ, &(counter->value), 8, _LDHINT_NONE);
        newvalue += 8;
    } else if (addvalue == 16) {
        newvalue = _Asm_fetchadd(_FASZ_D, _SEM_ACQ, &(counter->value), 16, _LDHINT_NONE);
        newvalue += 16;
    } else {
        /* For other values, we have to use cmpxchg. */
        do {
            prev = slapi_counter_get_value(counter);
            newvalue = prev + addvalue;
            /* Put prev in a register for cmpxchg to compare against */
           _Asm_mov_to_ar(_AREG_CCV, prev);
        } while (prev != _Asm_cmpxchg(_FASZ_D, _SEM_ACQ, &(counter->value), newvalue, _LDHINT_NONE));
    }
#endif
#endif /* ATOMIC_64BIT_OPERATIONS */

    return newvalue;
}

/*
 * slapi_counter_subtract()
 *
 * Atomically subtract a value from a Slapi_Counter.  Note
 * that this will not prevent you from wrapping around 0.
 */
PRUint64 slapi_counter_subtract(Slapi_Counter *counter, PRUint64 subvalue)
{
    PRUint64 newvalue = 0;
#ifdef HPUX
    PRUint64 prev = 0;
#endif

    if (counter == NULL) {
        return newvalue;
    }

#ifndef ATOMIC_64BIT_OPERATIONS
    slapi_lock_mutex(counter->lock);
    counter->value -= subvalue;
    newvalue = counter->value;
    slapi_unlock_mutex(counter->lock);
#else
#ifdef LINUX
    newvalue = __sync_sub_and_fetch(&(counter->value), subvalue);
#elif defined(SOLARIS)
    newvalue = _sparcv9_AtomicSub(&(counter->value), subvalue);
#elif defined(HPUX)
    /* fetchadd only works with values of -1, -4, -8, and -16.  In addition, it requires
     * it's argument to be an integer constant. */
    if (subvalue == 1) {
        newvalue = _Asm_fetchadd(_FASZ_D, _SEM_ACQ, &(counter->value), -1, _LDHINT_NONE);
        newvalue -= 1;
    } else if  (subvalue == 4) {
        newvalue = _Asm_fetchadd(_FASZ_D, _SEM_ACQ, &(counter->value), -4, _LDHINT_NONE);
        newvalue -= 4;
    } else if (subvalue == 8) {
        newvalue = _Asm_fetchadd(_FASZ_D, _SEM_ACQ, &(counter->value), -8, _LDHINT_NONE);
        newvalue -= 8;
    } else if (subvalue == 16) {
        newvalue = _Asm_fetchadd(_FASZ_D, _SEM_ACQ, &(counter->value), -16, _LDHINT_NONE);
        newvalue -= 16;
    } else {
        /* For other values, we have to use cmpxchg. */
        do {
            prev = slapi_counter_get_value(counter);
            newvalue = prev - subvalue;
            /* Put prev in a register for cmpxchg to compare against */
           _Asm_mov_to_ar(_AREG_CCV, prev);
        } while (prev != _Asm_cmpxchg(_FASZ_D, _SEM_ACQ, &(counter->value), newvalue, _LDHINT_NONE));
    }
#endif
#endif /* ATOMIC_64BIT_OPERATIONS */

    return newvalue;
}

/*
 * slapi_counter_set_value()
 *
 * Atomically sets the value of a Slapi_Counter.
 */
PRUint64 slapi_counter_set_value(Slapi_Counter *counter, PRUint64 newvalue)
{
    PRUint64 value = 0;

    if (counter == NULL) {
        return value;
    }

#ifndef ATOMIC_64BIT_OPERATIONS
    slapi_lock_mutex(counter->lock);
    counter->value = newvalue;
    slapi_unlock_mutex(counter->lock);
    return newvalue;
#else
#ifdef LINUX
/* Use our own inline assembly for an atomic set if
 * the builtins aren't available. */
#if defined CPU_x86 || !HAVE_DECL___SYNC_ADD_AND_FETCH
    /*
     * %0 = counter->value
     * %1 = newvalue
     */
    __asm__ __volatile__(
#ifdef CPU_x86
        /* Save the PIC register */
        " pushl %%ebx;"
#endif /* CPU_x86 */
        /* Put value of counter->value in EDX:EAX */
        "retryset: movl %0, %%eax;"
        " movl 4%0, %%edx;"
        /* Put newval in ECX:EBX */
        " movl %1, %%ebx;"
        " movl 4+%1, %%ecx;"
        /* If EDX:EAX and counter-> are the same,
         * replace *ptr with ECX:EBX */
        " lock; cmpxchg8b %0;"
        " jnz retryset;"
#ifdef CPU_x86
        /* Restore the PIC register */
        " popl %%ebx"
#endif /* CPU_x86 */
        : "+o" (counter->value)
        : "m" (newvalue)
#ifdef CPU_x86
        : "memory", "eax", "ecx", "edx", "cc");
#else
        : "memory", "eax", "ebx", "ecx", "edx", "cc");
#endif

    return newvalue;
#else
    while (1) {
        value = counter->value;
        if (__sync_bool_compare_and_swap(&(counter->value), value, newvalue)) {
            return newvalue;
        }
    }
#endif /* CPU_x86 || !HAVE_DECL___SYNC_ADD_AND_FETCH */
#elif defined(SOLARIS)
    _sparcv9_AtomicSet(&(counter->value), newvalue);
    return newvalue;
#elif defined(HPUX)
    do {
        value = counter->value;
        /* Put value in a register for cmpxchg to compare against */
        _Asm_mov_to_ar(_AREG_CCV, value);
    } while (value != _Asm_cmpxchg(_FASZ_D, _SEM_ACQ, &(counter->value), newvalue, _LDHINT_NONE));
    return newvalue;
#endif
#endif /* ATOMIC_64BIT_OPERATIONS */
}

/*
 * slapi_counter_get_value()
 *
 * Returns the value of a Slapi_Counter.
 */
PRUint64 slapi_counter_get_value(Slapi_Counter *counter)
{
    PRUint64 value = 0;

    if (counter == NULL) {
        return value;
    }

#ifndef ATOMIC_64BIT_OPERATIONS
    slapi_lock_mutex(counter->lock);
    value = counter->value;
    slapi_unlock_mutex(counter->lock);
#else
#ifdef LINUX
/* Use our own inline assembly for an atomic get if
 * the builtins aren't available. */
#if defined CPU_x86 || !HAVE_DECL___SYNC_ADD_AND_FETCH
    /*
     * %0 = counter->value
     * %1 = value
     */
    __asm__ __volatile__(
#ifdef CPU_x86
        /* Save the PIC register */
        " pushl %%ebx;"
#endif /* CPU_x86 */
        /* Put value of counter->value in EDX:EAX */
        "retryget: movl %0, %%eax;"
        " movl 4%0, %%edx;"
        /* Copy EDX:EAX to ECX:EBX */
        " movl %%eax, %%ebx;"
        " movl %%edx, %%ecx;"
        /* If EDX:EAX and counter->value are the same,
         * replace *ptr with ECX:EBX */
        " lock; cmpxchg8b %0;"
        " jnz retryget;"
        /* Put retreived value into value */
        " movl %%ebx, %1;"
        " movl %%ecx, 4%1;"
#ifdef CPU_x86
        /* Restore the PIC register */
        " popl %%ebx"
#endif /* CPU_x86 */
        : "+o" (counter->value), "=m" (value)
        : 
#ifdef CPU_x86
        : "memory", "eax", "ecx", "edx", "cc");
#else
        : "memory", "eax", "ebx", "ecx", "edx", "cc");
#endif
#else
    while (1) {
        value = counter->value;
        if (__sync_bool_compare_and_swap(&(counter->value), value, value)) {
            break;
        }
    }
#endif /* CPU_x86 || !HAVE_DECL___SYNC_ADD_AND_FETCH */
#elif defined(SOLARIS)
    while (1) {
        value = counter->value;
        if (value == _sparcv9_AtomicSet(&(counter->value), value)) {
            break;
        }
    }
#elif defined(HPUX)
    do {
        value = counter->value;
        /* Put value in a register for cmpxchg to compare against */
        _Asm_mov_to_ar(_AREG_CCV, value);
    } while (value != _Asm_cmpxchg(_FASZ_D, _SEM_ACQ, &(counter->value), value, _LDHINT_NONE));
#endif
#endif /* ATOMIC_64BIT_OPERATIONS */

    return value;
}

#ifdef ATOMIC_64BIT_OPERATIONS
#if defined LINUX && (defined CPU_x86 || !HAVE_DECL___SYNC_ADD_AND_FETCH)
/* On systems that don't have the 64-bit GCC atomic builtins, we need to
 * implement our own atomic add and subtract functions using inline
 * assembly code. */
PRUint64 __sync_add_and_fetch_8(PRUint64 *ptr, PRUint64 addval)
{
    PRUint64 retval = 0;

    /*
     * %0 = *ptr
     * %1 = retval
     * %2 = addval
     */
    __asm__ __volatile__(
#ifdef CPU_x86
        /* Save the PIC register */
        " pushl %%ebx;"
#endif /* CPU_x86 */
        /* Put value of *ptr in EDX:EAX */
        "retryadd: movl %0, %%eax;"
        " movl 4%0, %%edx;"
        /* Put addval in ECX:EBX */
        " movl %2, %%ebx;"
        " movl 4+%2, %%ecx;"
        /* Add value from EDX:EAX to value in ECX:EBX */
        " addl %%eax, %%ebx;"
        " adcl %%edx, %%ecx;"
        /* If EDX:EAX and *ptr are the same, replace ptr with ECX:EBX */
        " lock; cmpxchg8b %0;"
        " jnz retryadd;"
        /* Put new value into retval */
        " movl %%ebx, %1;"
        " movl %%ecx, 4%1;"
#ifdef CPU_x86
        /* Restore the PIC register */
        " popl %%ebx"
#endif /* CPU_x86 */
        : "+o" (*ptr), "=m" (retval)
        : "m" (addval)
#ifdef CPU_x86
        : "memory", "eax", "ecx", "edx", "cc");
#else
        : "memory", "eax", "ebx", "ecx", "edx", "cc");
#endif

    return retval;
}

PRUint64 __sync_sub_and_fetch_8(PRUint64 *ptr, PRUint64 subval)
{
    PRUint64 retval = 0;

    /*
     * %0 = *ptr
     * %1 = retval
     * %2 = subval
     */
    __asm__ __volatile__(
#ifdef CPU_x86
        /* Save the PIC register */
        " pushl %%ebx;"
#endif /* CPU_x86 */
        /* Put value of *ptr in EDX:EAX */
        "retrysub: movl %0, %%eax;"
        " movl 4%0, %%edx;"
        /* Copy EDX:EAX to ECX:EBX */
        " movl %%eax, %%ebx;"
        " movl %%edx, %%ecx;"
        /* Subtract subval from value in ECX:EBX */
        " subl %2, %%ebx;"
        " sbbl 4+%2, %%ecx;"
        /* If EDX:EAX and ptr are the same, replace *ptr with ECX:EBX */
        " lock; cmpxchg8b %0;"
        " jnz retrysub;"
        /* Put new value into retval */
        " movl %%ebx, %1;"
        " movl %%ecx, 4%1;"
#ifdef CPU_x86
        /* Restore the PIC register */
        " popl %%ebx"
#endif /* CPU_x86 */
        : "+o" (*ptr), "=m" (retval)
        : "m" (subval)
#ifdef CPU_x86 
        : "memory", "eax", "ecx", "edx", "cc");
#else
        : "memory", "eax", "ebx", "ecx", "edx", "cc");
#endif

    return retval;
}
#endif /* LINUX && (defined CPU_x86 || !HAVE_DECL___SYNC_ADD_AND_FETCH) */
#endif /* ATOMIC_64BIT_OPERATIONS */