diff options
-rw-r--r-- | source3/configure.in | 8 | ||||
-rw-r--r-- | source3/tdb/spinlock.c | 45 |
2 files changed, 53 insertions, 0 deletions
diff --git a/source3/configure.in b/source3/configure.in index bf161947401..15defbe8f12 100644 --- a/source3/configure.in +++ b/source3/configure.in @@ -2013,6 +2013,14 @@ if test x"$samba_cv_SYSCONF_SC_NGROUPS_MAX" = x"yes"; then AC_DEFINE(SYSCONF_SC_NGROUPS_MAX,1,[Whether sysconf(_SC_NGROUPS_MAX) is available]) fi +AC_CACHE_CHECK([for sysconf(_SC_NPROC_ONLN)],samba_cv_SYSCONF_SC_NPROC_ONLN,[ +AC_TRY_RUN([#include <unistd.h> +main() { exit(sysconf(_SC_NPROC_ONLN) == -1 ? 1 : 0); }], +samba_cv_SYSCONF_SC_NPROC_ONLN=yes,samba_cv_SYSCONF_SC_NPROC_ONLN=no,samba_cv_SYSCONF_SC_NPROC_ONLN=cross)]) +if test x"$samba_cv_SYSCONF_SC_NPROC_ONLN" = x"yes"; then + AC_DEFINE(SYSCONF_SC_NPROC_ONLN,1,[Whether sysconf(_SC_NPROC_ONLN) is available]) +fi + AC_CACHE_CHECK([for root],samba_cv_HAVE_ROOT,[ AC_TRY_RUN([main() { exit(getuid() != 0); }], samba_cv_HAVE_ROOT=yes,samba_cv_HAVE_ROOT=no,samba_cv_HAVE_ROOT=cross)]) diff --git a/source3/tdb/spinlock.c b/source3/tdb/spinlock.c index 3fddeafb2c1..3b3ebefded3 100644 --- a/source3/tdb/spinlock.c +++ b/source3/tdb/spinlock.c @@ -143,6 +143,47 @@ static inline int __spin_is_locked(spinlock_t *lock) return (*lock != 1); } +#elif defined(MIPS_SPINLOCKS) && defined(sgi) && (_COMPILER_VERSION >= 730) + +/* Implement spinlocks on IRIX using the MIPSPro atomic fetch operations. See + * sync(3) for the details of the intrinsic operations. + * + * "sgi" and "_COMPILER_VERSION" are always defined by MIPSPro. + */ + +#if defined(STANDALONE) + +/* MIPSPro 7.3 has "__inline" as an extension, but not "inline. */ +#define inline __inline + +#endif /* STANDALONE */ + +/* Returns 0 if the lock is acquired, EBUSY otherwise. */ +static inline int __spin_trylock(spinlock_t *lock) +{ + unsigned int val; + val = __lock_test_and_set(lock, 1); + return val == 0 ? 0 : EBUSY; +} + +static inline void __spin_unlock(spinlock_t *lock) +{ + __lock_release(lock); +} + +static inline void __spin_lock_init(spinlock_t *lock) +{ + __lock_release(lock); +} + +/* Returns 1 if the lock is held, 0 otherwise. */ +static inline int __spin_is_locked(spinlock_t *lock) +{ + unsigned int val; + val = __add_and_fetch(lock, 0); + return val; +} + #elif defined(MIPS_SPINLOCKS) static inline unsigned int load_linked(unsigned long addr) @@ -221,7 +262,11 @@ static void yield_cpu(void) static int this_is_smp(void) { +#if defined(HAVE_SYSCONF) && defined(SYSCONF_SC_NPROC_ONLN) + return (sysconf(_SC_NPROC_ONLN) > 1) ? 1 : 0; +#else return 0; +#endif } /* |