summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorGreg Hudson <ghudson@mit.edu>2014-02-17 00:18:41 -0500
committerGreg Hudson <ghudson@mit.edu>2014-02-19 16:21:01 -0500
commit26d874412983c4c9979a9f5e7bec51834ad4cda5 (patch)
treeef6e3d847157b18baf192d9e25006d66dcf61a4a
parent5489cb326bc5b13c9dcb0f76228cc964dd9fdb5d (diff)
downloadkrb5-26d874412983c4c9979a9f5e7bec51834ad4cda5.tar.gz
krb5-26d874412983c4c9979a9f5e7bec51834ad4cda5.tar.xz
krb5-26d874412983c4c9979a9f5e7bec51834ad4cda5.zip
Use TAILQ macros instead of CIRCLEQ in libdb2
The optimizer in gcc 4.8.1 (but not the current gcc head revision) breaks the queue.h CIRCLEQ macros, apparently due to an overzealous strict aliasing deduction. Use TAILQ macros in the libdb2 mpool code instead. ticket: 7860
-rw-r--r--src/plugins/kdb/db2/libdb2/mpool/mpool.c43
-rw-r--r--src/plugins/kdb/db2/libdb2/mpool/mpool.h8
2 files changed, 24 insertions, 27 deletions
diff --git a/src/plugins/kdb/db2/libdb2/mpool/mpool.c b/src/plugins/kdb/db2/libdb2/mpool/mpool.c
index acdc1b8276..7941a9f81c 100644
--- a/src/plugins/kdb/db2/libdb2/mpool/mpool.c
+++ b/src/plugins/kdb/db2/libdb2/mpool/mpool.c
@@ -81,9 +81,9 @@ mpool_open(key, fd, pagesize, maxcache)
/* Allocate and initialize the MPOOL cookie. */
if ((mp = (MPOOL *)calloc(1, sizeof(MPOOL))) == NULL)
return (NULL);
- CIRCLEQ_INIT(&mp->lqh);
+ TAILQ_INIT(&mp->lqh);
for (entry = 0; entry < HASHSIZE; ++entry)
- CIRCLEQ_INIT(&mp->hqh[entry]);
+ TAILQ_INIT(&mp->hqh[entry]);
mp->maxcache = maxcache;
mp->npages = sb.st_size / pagesize;
mp->pagesize = pagesize;
@@ -143,8 +143,8 @@ mpool_new(mp, pgnoaddr, flags)
bp->flags = MPOOL_PINNED | MPOOL_INUSE;
head = &mp->hqh[HASHKEY(bp->pgno)];
- CIRCLEQ_INSERT_HEAD(head, bp, hq);
- CIRCLEQ_INSERT_TAIL(&mp->lqh, bp, q);
+ TAILQ_INSERT_HEAD(head, bp, hq);
+ TAILQ_INSERT_TAIL(&mp->lqh, bp, q);
return (bp->page);
}
@@ -168,8 +168,8 @@ mpool_delete(mp, page)
/* Remove from the hash and lru queues. */
head = &mp->hqh[HASHKEY(bp->pgno)];
- CIRCLEQ_REMOVE(head, bp, hq);
- CIRCLEQ_REMOVE(&mp->lqh, bp, q);
+ TAILQ_REMOVE(head, bp, hq);
+ TAILQ_REMOVE(&mp->lqh, bp, q);
free(bp);
return (RET_SUCCESS);
@@ -208,10 +208,10 @@ mpool_get(mp, pgno, flags)
* of the lru chain.
*/
head = &mp->hqh[HASHKEY(bp->pgno)];
- CIRCLEQ_REMOVE(head, bp, hq);
- CIRCLEQ_INSERT_HEAD(head, bp, hq);
- CIRCLEQ_REMOVE(&mp->lqh, bp, q);
- CIRCLEQ_INSERT_TAIL(&mp->lqh, bp, q);
+ TAILQ_REMOVE(head, bp, hq);
+ TAILQ_INSERT_HEAD(head, bp, hq);
+ TAILQ_REMOVE(&mp->lqh, bp, q);
+ TAILQ_INSERT_TAIL(&mp->lqh, bp, q);
/* Return a pinned page. */
bp->flags |= MPOOL_PINNED;
@@ -261,8 +261,8 @@ mpool_get(mp, pgno, flags)
* of the lru chain.
*/
head = &mp->hqh[HASHKEY(bp->pgno)];
- CIRCLEQ_INSERT_HEAD(head, bp, hq);
- CIRCLEQ_INSERT_TAIL(&mp->lqh, bp, q);
+ TAILQ_INSERT_HEAD(head, bp, hq);
+ TAILQ_INSERT_TAIL(&mp->lqh, bp, q);
/* Run through the user's filter. */
if (mp->pgin != NULL)
@@ -311,8 +311,8 @@ mpool_close(mp)
BKT *bp;
/* Free up any space allocated to the lru pages. */
- while ((bp = mp->lqh.cqh_first) != (void *)&mp->lqh) {
- CIRCLEQ_REMOVE(&mp->lqh, mp->lqh.cqh_first, q);
+ while ((bp = mp->lqh.tqh_first) != NULL) {
+ TAILQ_REMOVE(&mp->lqh, mp->lqh.tqh_first, q);
free(bp);
}
@@ -332,8 +332,7 @@ mpool_sync(mp)
BKT *bp;
/* Walk the lru chain, flushing any dirty pages to disk. */
- for (bp = mp->lqh.cqh_first;
- bp != (void *)&mp->lqh; bp = bp->q.cqe_next)
+ for (bp = mp->lqh.tqh_first; bp != NULL; bp = bp->q.tqe_next)
if (bp->flags & MPOOL_DIRTY &&
mpool_write(mp, bp) == RET_ERROR)
return (RET_ERROR);
@@ -363,8 +362,7 @@ mpool_bkt(mp)
* off any lists. If we don't find anything we grow the cache anyway.
* The cache never shrinks.
*/
- for (bp = mp->lqh.cqh_first;
- bp != (void *)&mp->lqh; bp = bp->q.cqe_next)
+ for (bp = mp->lqh.tqh_first; bp != NULL; bp = bp->q.tqe_next)
if (!(bp->flags & MPOOL_PINNED)) {
/* Flush if dirty. */
if (bp->flags & MPOOL_DIRTY &&
@@ -375,8 +373,8 @@ mpool_bkt(mp)
#endif
/* Remove from the hash and lru queues. */
head = &mp->hqh[HASHKEY(bp->pgno)];
- CIRCLEQ_REMOVE(head, bp, hq);
- CIRCLEQ_REMOVE(&mp->lqh, bp, q);
+ TAILQ_REMOVE(head, bp, hq);
+ TAILQ_REMOVE(&mp->lqh, bp, q);
#if defined(DEBUG) && !defined(DEBUG_IDX0SPLIT)
{ void *spage;
spage = bp->page;
@@ -450,7 +448,7 @@ mpool_look(mp, pgno)
BKT *bp;
head = &mp->hqh[HASHKEY(pgno)];
- for (bp = head->cqh_first; bp != (void *)head; bp = bp->hq.cqe_next)
+ for (bp = head->tqh_first; bp != NULL; bp = bp->hq.tqe_next)
if ((bp->pgno == pgno) && (bp->flags & MPOOL_INUSE)) {
#ifdef STATISTICS
++mp->cachehit;
@@ -494,8 +492,7 @@ mpool_stat(mp)
sep = "";
cnt = 0;
- for (bp = mp->lqh.cqh_first;
- bp != (void *)&mp->lqh; bp = bp->q.cqe_next) {
+ for (bp = mp->lqh.tqh_first; bp != NULL; bp = bp->q.tqe_next) {
(void)fprintf(stderr, "%s%d", sep, bp->pgno);
if (bp->flags & MPOOL_DIRTY)
(void)fprintf(stderr, "d");
diff --git a/src/plugins/kdb/db2/libdb2/mpool/mpool.h b/src/plugins/kdb/db2/libdb2/mpool/mpool.h
index 627ad5b368..bed5ed3a1c 100644
--- a/src/plugins/kdb/db2/libdb2/mpool/mpool.h
+++ b/src/plugins/kdb/db2/libdb2/mpool/mpool.h
@@ -47,8 +47,8 @@
/* The BKT structures are the elements of the queues. */
typedef struct _bkt {
- CIRCLEQ_ENTRY(_bkt) hq; /* hash queue */
- CIRCLEQ_ENTRY(_bkt) q; /* lru queue */
+ TAILQ_ENTRY(_bkt) hq; /* hash queue */
+ TAILQ_ENTRY(_bkt) q; /* lru queue */
void *page; /* page */
db_pgno_t pgno; /* page number */
@@ -59,9 +59,9 @@ typedef struct _bkt {
} BKT;
typedef struct MPOOL {
- CIRCLEQ_HEAD(_lqh, _bkt) lqh; /* lru queue head */
+ TAILQ_HEAD(_lqh, _bkt) lqh; /* lru queue head */
/* hash queue array */
- CIRCLEQ_HEAD(_hqh, _bkt) hqh[HASHSIZE];
+ TAILQ_HEAD(_hqh, _bkt) hqh[HASHSIZE];
db_pgno_t curcache; /* current number of cached pages */
db_pgno_t maxcache; /* max number of cached pages */
db_pgno_t npages; /* number of pages in the file */