Mailing List Archive

r3381 - trunk/varnish-cache/bin/varnishd
Author: phk
Date: 2008-11-10 20:46:25 +0100 (Mon, 10 Nov 2008)
New Revision: 3381

Modified:
trunk/varnish-cache/bin/varnishd/Makefile.am
trunk/varnish-cache/bin/varnishd/cache.h
trunk/varnish-cache/bin/varnishd/cache_backend.c
trunk/varnish-cache/bin/varnishd/cache_backend.h
trunk/varnish-cache/bin/varnishd/cache_backend_cfg.c
trunk/varnish-cache/bin/varnishd/cache_ban.c
trunk/varnish-cache/bin/varnishd/cache_cli.c
trunk/varnish-cache/bin/varnishd/cache_expire.c
trunk/varnish-cache/bin/varnishd/cache_hash.c
trunk/varnish-cache/bin/varnishd/cache_main.c
trunk/varnish-cache/bin/varnishd/cache_pool.c
trunk/varnish-cache/bin/varnishd/cache_session.c
trunk/varnish-cache/bin/varnishd/cache_vcl.c
trunk/varnish-cache/bin/varnishd/hash_classic.c
trunk/varnish-cache/bin/varnishd/hash_simple_list.c
trunk/varnish-cache/bin/varnishd/shmlog.c
trunk/varnish-cache/bin/varnishd/storage_file.c
trunk/varnish-cache/bin/varnishd/storage_malloc.c
trunk/varnish-cache/bin/varnishd/storage_synth.c
trunk/varnish-cache/bin/varnishd/storage_umem.c
Log:
Take the full step and wrap all our mutex operations in proper C functions
instead of increasingly unwieldy macros.

Amongst other things, this will make it much easier to do lock
profiling, contest statistics, asserts etc.

This commit is largely mechanically generated and should not result in
any changed functionality.

Locks retain the "mtx" monicker, as a reminder that they are mutexes.

No performance impact expected.


Modified: trunk/varnish-cache/bin/varnishd/Makefile.am
===================================================================
--- trunk/varnish-cache/bin/varnishd/Makefile.am 2008-11-10 12:43:58 UTC (rev 3380)
+++ trunk/varnish-cache/bin/varnishd/Makefile.am 2008-11-10 19:46:25 UTC (rev 3381)
@@ -27,6 +27,7 @@
cache_http.c \
cache_httpconn.c \
cache_main.c \
+ cache_lck.c \
cache_panic.c \
cache_pipe.c \
cache_pool.c \

Modified: trunk/varnish-cache/bin/varnishd/cache.h
===================================================================
--- trunk/varnish-cache/bin/varnishd/cache.h 2008-11-10 12:43:58 UTC (rev 3380)
+++ trunk/varnish-cache/bin/varnishd/cache.h 2008-11-10 19:46:25 UTC (rev 3381)
@@ -87,6 +87,7 @@
struct vrt_backend;
struct cli_proto;
struct ban;
+struct lock { void *priv; }; // Opaque

/*--------------------------------------------------------------------*/

@@ -295,7 +296,7 @@
#define OBJHEAD_MAGIC 0x1b96615d
void *hashpriv;

- pthread_mutex_t mtx;
+ struct lock mtx;
VTAILQ_HEAD(,object) objects;
char *hash;
unsigned hashlen;
@@ -512,6 +513,27 @@
void THR_SetSession(const struct sess *sp);
const struct sess * THR_GetSession(void);

+/* cache_lck.c */
+
+/* Internal functions, call only through macros below */
+void Lck__Lock(struct lock *lck, const char *p, const char *f, int l);
+void Lck__Unlock(struct lock *lck, const char *p, const char *f, int l);
+int Lck__Trylock(struct lock *lck, const char *p, const char *f, int l);
+void Lck__New(struct lock *lck, const char *w);
+void Lck__Assert(struct lock *lck, int held);
+
+/* public interface: */
+void LCK_Init(void);
+void Lck_Delete(struct lock *lck);
+void Lck_CondWait(pthread_cond_t *cond, struct lock *lck);
+
+#define Lck_New(a) Lck__New(a, #a);
+#define Lck_Lock(a) Lck__Lock(a, __func__, __FILE__, __LINE__)
+#define Lck_Unlock(a) Lck__Unlock(a, __func__, __FILE__, __LINE__)
+#define Lck_Trylock(a) Lck__Trylock(a, __func__, __FILE__, __LINE__)
+#define Lck_AssertHeld(a) Lck__Assert(a, 1)
+#define Lck_AssertNotHeld(a) Lck__Assert(a, 0)
+
/* cache_panic.c */
void PAN_Init(void);

@@ -612,72 +634,6 @@
struct vsb *SMS_Makesynth(struct object *obj);
void SMS_Finish(struct object *obj);

-#define MTX pthread_mutex_t
-#define MTX_INIT(foo) AZ(pthread_mutex_init(foo, NULL))
-#define MTX_DESTROY(foo) AZ(pthread_mutex_destroy(foo))
-
-#ifdef __flexelint_v9__
-#define TRYLOCK(foo, r) \
-do { \
- (r) = pthread_mutex_trylock(foo); \
-} while (0)
-#define LOCK(foo) \
-do { \
- AZ(pthread_mutex_lock(foo)); \
-} while (0)
-#define UNLOCK(foo) \
-do { \
- AZ(pthread_mutex_unlock(foo)); \
-} while (0)
-
-#else
-#define TRYLOCK(foo, r) \
-do { \
- (r) = pthread_mutex_trylock(foo); \
- assert(r == 0 || r == EBUSY); \
- if (params->diag_bitmap & 0x8) { \
- VSL(SLT_Debug, 0, \
- "MTX_TRYLOCK(%s,%s,%d," #foo ") = %d", \
- __func__, __FILE__, __LINE__, (r)); \
- } \
-} while (0)
-#define LOCK(foo) \
-do { \
- if (!(params->diag_bitmap & 0x18)) { \
- AZ(pthread_mutex_lock(foo)); \
- } else { \
- int ixjd = pthread_mutex_trylock(foo); \
- assert(ixjd == 0 || ixjd == EBUSY); \
- if (ixjd) { \
- VSL(SLT_Debug, 0, \
- "MTX_CONTEST(%s,%s,%d," #foo ")", \
- __func__, __FILE__, __LINE__); \
- AZ(pthread_mutex_lock(foo)); \
- } else if (params->diag_bitmap & 0x8) { \
- VSL(SLT_Debug, 0, \
- "MTX_LOCK(%s,%s,%d," #foo ")", \
- __func__, __FILE__, __LINE__); \
- } \
- } \
-} while (0)
-#define UNLOCK(foo) \
-do { \
- AZ(pthread_mutex_unlock(foo)); \
- if (params->diag_bitmap & 0x8) \
- VSL(SLT_Debug, 0, \
- "MTX_UNLOCK(%s,%s,%d," #foo ")", \
- __func__, __FILE__, __LINE__); \
-} while (0)
-#endif
-
-#if defined(HAVE_PTHREAD_MUTEX_ISOWNED_NP)
-#define ALOCKED(mutex) AN(pthread_mutex_isowned_np((mutex)))
-#elif defined(DIAGNOSTICS)
-#define ALOCKED(mutex) AN(pthread_mutex_trylock((mutex)))
-#else
-#define ALOCKED(mutex) (void)(mutex)
-#endif
-
/*
* A normal pointer difference is signed, but we never want a negative value
* so this little tool will make sure we don't get that.

Modified: trunk/varnish-cache/bin/varnishd/cache_backend.c
===================================================================
--- trunk/varnish-cache/bin/varnishd/cache_backend.c 2008-11-10 12:43:58 UTC (rev 3380)
+++ trunk/varnish-cache/bin/varnishd/cache_backend.c 2008-11-10 19:46:25 UTC (rev 3381)
@@ -147,11 +147,11 @@
struct bereq *bereq;
volatile unsigned len;

- LOCK(&VBE_mtx);
+ Lck_Lock(&VBE_mtx);
bereq = VTAILQ_FIRST(&bereq_head);
if (bereq != NULL)
VTAILQ_REMOVE(&bereq_head, bereq, list);
- UNLOCK(&VBE_mtx);
+ Lck_Unlock(&VBE_mtx);
if (bereq != NULL) {
CHECK_OBJ(bereq, BEREQ_MAGIC);
} else {
@@ -177,9 +177,9 @@

CHECK_OBJ_NOTNULL(bereq, BEREQ_MAGIC);
WS_Reset(bereq->ws, NULL);
- LOCK(&VBE_mtx);
+ Lck_Lock(&VBE_mtx);
VTAILQ_INSERT_HEAD(&bereq_head, bereq, list);
- UNLOCK(&VBE_mtx);
+ Lck_Unlock(&VBE_mtx);
}

/*--------------------------------------------------------------------
@@ -195,13 +195,13 @@

vc = VTAILQ_FIRST(&vbe_conns);
if (vc != NULL) {
- LOCK(&VBE_mtx);
+ Lck_Lock(&VBE_mtx);
vc = VTAILQ_FIRST(&vbe_conns);
if (vc != NULL) {
VSL_stats->backend_unused--;
VTAILQ_REMOVE(&vbe_conns, vc, list);
}
- UNLOCK(&VBE_mtx);
+ Lck_Unlock(&VBE_mtx);
}
if (vc != NULL)
return (vc);
@@ -222,10 +222,10 @@
assert(vc->fd < 0);

if (params->cache_vbe_conns) {
- LOCK(&VBE_mtx);
+ Lck_Lock(&VBE_mtx);
VTAILQ_INSERT_HEAD(&vbe_conns, vc, list);
VSL_stats->backend_unused++;
- UNLOCK(&VBE_mtx);
+ Lck_Unlock(&VBE_mtx);
} else {
VSL_stats->n_vbe_conn--;
free(vc);
@@ -239,10 +239,10 @@
{
int s;

- LOCK(&bp->mtx);
+ Lck_Lock(&bp->mtx);
bp->refcount++;
bp->n_conn++; /* It mostly works */
- UNLOCK(&bp->mtx);
+ Lck_Unlock(&bp->mtx);

s = -1;
assert(bp->ipv6 != NULL || bp->ipv4 != NULL);
@@ -257,10 +257,10 @@
s = VBE_TryConnect(sp, PF_INET6, bp->ipv6, bp->ipv6len, bp);

if (s < 0) {
- LOCK(&bp->mtx);
+ Lck_Lock(&bp->mtx);
bp->n_conn--;
bp->refcount--; /* Only keep ref on success */
- UNLOCK(&bp->mtx);
+ Lck_Unlock(&bp->mtx);
}
return (s);
}
@@ -295,7 +295,7 @@

/* first look for vbe_conn's we can recycle */
while (1) {
- LOCK(&bp->mtx);
+ Lck_Lock(&bp->mtx);
vc = VTAILQ_FIRST(&bp->connlist);
if (vc != NULL) {
bp->refcount++;
@@ -303,7 +303,7 @@
assert(vc->fd >= 0);
VTAILQ_REMOVE(&bp->connlist, vc, list);
}
- UNLOCK(&bp->mtx);
+ Lck_Unlock(&bp->mtx);
if (vc == NULL)
break;
if (VBE_CheckFd(vc->fd)) {
@@ -379,7 +379,7 @@
bp = sp->vbe->backend;

WSL(sp->wrk, SLT_BackendReuse, sp->vbe->fd, "%s", bp->vcl_name);
- LOCK(&bp->mtx);
+ Lck_Lock(&bp->mtx);
VSL_stats->backend_recycle++;
VTAILQ_INSERT_HEAD(&bp->connlist, sp->vbe, list);
sp->vbe = NULL;

Modified: trunk/varnish-cache/bin/varnishd/cache_backend.h
===================================================================
--- trunk/varnish-cache/bin/varnishd/cache_backend.h 2008-11-10 12:43:58 UTC (rev 3380)
+++ trunk/varnish-cache/bin/varnishd/cache_backend.h 2008-11-10 19:46:25 UTC (rev 3381)
@@ -109,7 +109,7 @@

VTAILQ_ENTRY(backend) list;
int refcount;
- pthread_mutex_t mtx;
+ struct lock mtx;

struct sockaddr *ipv4;
socklen_t ipv4len;
@@ -129,7 +129,7 @@
struct vbe_conn *VBE_GetVbe(struct sess *sp, struct backend *bp);

/* cache_backend_cfg.c */
-extern MTX VBE_mtx;
+extern struct lock VBE_mtx;
void VBE_DropRefConn(struct backend *);
void VBE_DropRef(struct backend *);
void VBE_DropRefLocked(struct backend *b);

Modified: trunk/varnish-cache/bin/varnishd/cache_backend_cfg.c
===================================================================
--- trunk/varnish-cache/bin/varnishd/cache_backend_cfg.c 2008-11-10 12:43:58 UTC (rev 3380)
+++ trunk/varnish-cache/bin/varnishd/cache_backend_cfg.c 2008-11-10 19:46:25 UTC (rev 3381)
@@ -48,7 +48,7 @@
#include "cache_backend.h"
#include "cli_priv.h"

-MTX VBE_mtx;
+struct lock VBE_mtx;

/*
* The list of backends is not locked, it is only ever accessed from
@@ -105,7 +105,7 @@
assert(b->refcount > 0);

i = --b->refcount;
- UNLOCK(&b->mtx);
+ Lck_Unlock(&b->mtx);
if (i > 0)
return;

@@ -128,7 +128,7 @@

CHECK_OBJ_NOTNULL(b, BACKEND_MAGIC);

- LOCK(&b->mtx);
+ Lck_Lock(&b->mtx);
VBE_DropRefLocked(b);
}

@@ -138,7 +138,7 @@

CHECK_OBJ_NOTNULL(b, BACKEND_MAGIC);

- LOCK(&b->mtx);
+ Lck_Lock(&b->mtx);
assert(b->n_conn > 0);
b->n_conn--;
VBE_DropRefLocked(b);
@@ -207,7 +207,7 @@
/* Create new backend */
ALLOC_OBJ(b, BACKEND_MAGIC);
XXXAN(b);
- MTX_INIT(&b->mtx);
+ Lck_New(&b->mtx);
b->refcount = 1;

VTAILQ_INIT(&b->connlist);
@@ -283,6 +283,6 @@
VBE_Init(void)
{

- MTX_INIT(&VBE_mtx);
+ Lck_New(&VBE_mtx);
CLI_AddFuncs(DEBUG_CLI, debug_cmds);
}

Modified: trunk/varnish-cache/bin/varnishd/cache_ban.c
===================================================================
--- trunk/varnish-cache/bin/varnishd/cache_ban.c 2008-11-10 12:43:58 UTC (rev 3380)
+++ trunk/varnish-cache/bin/varnishd/cache_ban.c 2008-11-10 19:46:25 UTC (rev 3381)
@@ -57,7 +57,7 @@
};

static VTAILQ_HEAD(banhead,ban) ban_head = VTAILQ_HEAD_INITIALIZER(ban_head);
-static MTX ban_mtx;
+static struct lock ban_mtx;

/*
* We maintain ban_start as a pointer to the first element of the list
@@ -95,7 +95,7 @@
b->hash = hash;
b->ban = strdup(regexp);
AN(b->ban);
- LOCK(&ban_mtx);
+ Lck_Lock(&ban_mtx);
VTAILQ_INSERT_HEAD(&ban_head, b, list);
ban_start = b;
VSL_stats->n_purge++;
@@ -106,7 +106,7 @@
be->refcount++;
} else
be = NULL;
- UNLOCK(&ban_mtx);
+ Lck_Unlock(&ban_mtx);

if (be == NULL)
return (0);
@@ -125,11 +125,11 @@
bi->flags |= BAN_F_GONE;
pcount++;
}
- LOCK(&ban_mtx);
+ Lck_Lock(&ban_mtx);
be->refcount--;
/* XXX: We should check if the tail can be removed */
VSL_stats->n_purge_dups += pcount;
- UNLOCK(&ban_mtx);
+ Lck_Unlock(&ban_mtx);

return (0);
}
@@ -140,10 +140,10 @@

CHECK_OBJ_NOTNULL(o, OBJECT_MAGIC);
AZ(o->ban);
- LOCK(&ban_mtx);
+ Lck_Lock(&ban_mtx);
o->ban = ban_start;
ban_start->refcount++;
- UNLOCK(&ban_mtx);
+ Lck_Unlock(&ban_mtx);
}

void
@@ -155,7 +155,7 @@
if (o->ban == NULL)
return;
CHECK_OBJ_NOTNULL(o->ban, BAN_MAGIC);
- LOCK(&ban_mtx);
+ Lck_Lock(&ban_mtx);
o->ban->refcount--;
o->ban = NULL;

@@ -168,7 +168,7 @@
} else {
b = NULL;
}
- UNLOCK(&ban_mtx);
+ Lck_Unlock(&ban_mtx);
if (b != NULL) {
free(b->ban);
regfree(&b->regexp);
@@ -205,13 +205,13 @@
break;
}

- LOCK(&ban_mtx);
+ Lck_Lock(&ban_mtx);
o->ban->refcount--;
if (b == o->ban) /* not banned */
b0->refcount++;
VSL_stats->n_purge_obj_test++;
VSL_stats->n_purge_re_test += tests;
- UNLOCK(&ban_mtx);
+ Lck_Unlock(&ban_mtx);

if (b == o->ban) { /* not banned */
o->ban = b0;
@@ -285,7 +285,7 @@
BAN_Init(void)
{

- MTX_INIT(&ban_mtx);
+ Lck_New(&ban_mtx);
CLI_AddFuncs(PUBLIC_CLI, ban_cmds);
/* Add an initial ban, since the list can never be empty */
(void)BAN_Add(NULL, ".", 0);

Modified: trunk/varnish-cache/bin/varnishd/cache_cli.c
===================================================================
--- trunk/varnish-cache/bin/varnishd/cache_cli.c 2008-11-10 12:43:58 UTC (rev 3380)
+++ trunk/varnish-cache/bin/varnishd/cache_cli.c 2008-11-10 19:46:25 UTC (rev 3381)
@@ -53,8 +53,8 @@
#include "vlu.h"
#include "vsb.h"

-pthread_t cli_thread;
-static MTX cli_mtx;
+pthread_t cli_thread;
+static struct lock cli_mtx;

/*
* The CLI commandlist is split in three:
@@ -81,12 +81,12 @@
case DEBUG_CLI: cp = &ccf_debug_cli; break;
default: INCOMPL();
}
- LOCK(&cli_mtx);
+ Lck_Lock(&cli_mtx);
c = cli_concat(*cp, p);
AN(c);
free(*cp);
*cp = c;
- UNLOCK(&cli_mtx);
+ Lck_Unlock(&cli_mtx);
}

/*--------------------------------------------------------------------
@@ -105,7 +105,7 @@
VCL_Poll();
VBE_Poll();
vsb_clear(cli->sb);
- LOCK(&cli_mtx);
+ Lck_Lock(&cli_mtx);
cli_dispatch(cli, ccf_master_cli, p);
if (cli->result == CLIS_UNKNOWN) {
vsb_clear(cli->sb);
@@ -117,7 +117,7 @@
cli->result = CLIS_OK;
cli_dispatch(cli, ccf_debug_cli, p);
}
- UNLOCK(&cli_mtx);
+ Lck_Unlock(&cli_mtx);
vsb_finish(cli->sb);
AZ(vsb_overflowed(cli->sb));
i = cli_writeres(heritage.cli_out, cli);
@@ -242,7 +242,7 @@
CLI_Init(void)
{

- MTX_INIT(&cli_mtx);
+ Lck_New(&cli_mtx);
cli_thread = pthread_self();

CLI_AddFuncs(MASTER_CLI, master_cmds);

Modified: trunk/varnish-cache/bin/varnishd/cache_expire.c
===================================================================
--- trunk/varnish-cache/bin/varnishd/cache_expire.c 2008-11-10 12:43:58 UTC (rev 3380)
+++ trunk/varnish-cache/bin/varnishd/cache_expire.c 2008-11-10 19:46:25 UTC (rev 3381)
@@ -86,7 +86,7 @@

static pthread_t exp_thread;
static struct binheap *exp_heap;
-static MTX exp_mtx;
+static struct lock exp_mtx;
static VTAILQ_HEAD(,objexp) lru = VTAILQ_HEAD_INITIALIZER(lru);

/*
@@ -176,12 +176,12 @@
assert(o->entered != 0 && !isnan(o->entered));
oe->lru_stamp = o->entered;
update_object_when(o);
- LOCK(&exp_mtx);
+ Lck_Lock(&exp_mtx);
binheap_insert(exp_heap, oe);
assert(oe->timer_idx != BINHEAP_NOIDX);
VTAILQ_INSERT_TAIL(&lru, oe, list);
oe->on_lru = 1;
- UNLOCK(&exp_mtx);
+ Lck_Unlock(&exp_mtx);
}

/*--------------------------------------------------------------------
@@ -196,7 +196,6 @@
void
EXP_Touch(const struct object *o, double now)
{
- int i;
struct objexp *oe;

CHECK_OBJ_NOTNULL(o, OBJECT_MAGIC);
@@ -206,8 +205,7 @@
CHECK_OBJ_NOTNULL(oe, OBJEXP_MAGIC);
if (oe->lru_stamp + params->lru_timeout > now)
return;
- TRYLOCK(&exp_mtx, i);
- if (i)
+ if (Lck_Trylock(&exp_mtx))
return;
if (oe->on_lru) {
VTAILQ_REMOVE(&lru, oe, list);
@@ -215,7 +213,7 @@
oe->lru_stamp = now;
VSL_stats->n_lru_moved++;
}
- UNLOCK(&exp_mtx);
+ Lck_Unlock(&exp_mtx);
}

/*--------------------------------------------------------------------
@@ -238,13 +236,13 @@
return;
CHECK_OBJ_NOTNULL(oe, OBJEXP_MAGIC);
update_object_when(o);
- LOCK(&exp_mtx);
+ Lck_Lock(&exp_mtx);
assert(oe->timer_idx != BINHEAP_NOIDX);
binheap_delete(exp_heap, oe->timer_idx); /* XXX: binheap_shuffle() ? */
assert(oe->timer_idx == BINHEAP_NOIDX);
binheap_insert(exp_heap, oe);
assert(oe->timer_idx != BINHEAP_NOIDX);
- UNLOCK(&exp_mtx);
+ Lck_Unlock(&exp_mtx);
}


@@ -278,11 +276,11 @@
VCL_Get(&sp->vcl);
t = TIM_real();
while (1) {
- LOCK(&exp_mtx);
+ Lck_Lock(&exp_mtx);
oe = binheap_root(exp_heap);
CHECK_OBJ_ORNULL(oe, OBJEXP_MAGIC);
if (oe == NULL || oe->timer_when > t) { /* XXX: > or >= ? */
- UNLOCK(&exp_mtx);
+ Lck_Unlock(&exp_mtx);
WSL_Flush(&ww, 0);
AZ(sleep(1));
VCL_Refresh(&sp->vcl);
@@ -305,7 +303,7 @@
}

assert(oe->on_lru);
- UNLOCK(&exp_mtx);
+ Lck_Unlock(&exp_mtx);

WSL(&ww, SLT_ExpPick, 0, "%u %s", o->xid, oe->timer_what);

@@ -319,10 +317,10 @@
o->xid);
}
update_object_when(o);
- LOCK(&exp_mtx);
+ Lck_Lock(&exp_mtx);
binheap_insert(exp_heap, oe);
assert(oe->timer_idx != BINHEAP_NOIDX);
- UNLOCK(&exp_mtx);
+ Lck_Unlock(&exp_mtx);
} else {
assert(oe->timer_what == tmr_ttl);
sp->obj = o;
@@ -332,11 +330,11 @@
assert(sp->handling == VCL_RET_DISCARD);
WSL(&ww, SLT_ExpKill, 0,
"%u %d", o->xid, (int)(o->ttl - t));
- LOCK(&exp_mtx);
+ Lck_Lock(&exp_mtx);
VTAILQ_REMOVE(&lru, o->objexp, list);
oe->on_lru = 0;
VSL_stats->n_expired++;
- UNLOCK(&exp_mtx);
+ Lck_Unlock(&exp_mtx);
del_objexp(o);
HSH_Deref(o);
}
@@ -367,7 +365,7 @@
* NB: Checking refcount here is no guarantee that it does not gain
* another ref while we ponder its destiny without the lock held.
*/
- LOCK(&exp_mtx);
+ Lck_Lock(&exp_mtx);
VTAILQ_FOREACH(oe, &lru, list) {
CHECK_OBJ_NOTNULL(oe, OBJEXP_MAGIC);
if (oe->timer_idx == BINHEAP_NOIDX) /* exp_timer has it */
@@ -388,7 +386,7 @@
assert(oe->timer_idx == BINHEAP_NOIDX);
VSL_stats->n_lru_nuked++;
}
- UNLOCK(&exp_mtx);
+ Lck_Unlock(&exp_mtx);

if (oe == NULL)
return (-1);
@@ -414,14 +412,14 @@
assert(sp->handling == VCL_RET_KEEP);

/* Insert in binheap and lru again */
- LOCK(&exp_mtx);
+ Lck_Lock(&exp_mtx);
VSL_stats->n_lru_nuked--; /* It was premature */
VSL_stats->n_lru_saved++;
binheap_insert(exp_heap, oe);
assert(oe->timer_idx != BINHEAP_NOIDX);
VTAILQ_INSERT_TAIL(&lru, oe, list);
oe->on_lru = 1;
- UNLOCK(&exp_mtx);
+ Lck_Unlock(&exp_mtx);
return (0);
}

@@ -456,7 +454,7 @@
EXP_Init(void)
{

- MTX_INIT(&exp_mtx);
+ Lck_New(&exp_mtx);
exp_heap = binheap_new(NULL, object_cmp, object_update);
XXXAN(exp_heap);
AZ(pthread_create(&exp_thread, NULL, exp_timer, NULL));

Modified: trunk/varnish-cache/bin/varnishd/cache_hash.c
===================================================================
--- trunk/varnish-cache/bin/varnishd/cache_hash.c 2008-11-10 12:43:58 UTC (rev 3380)
+++ trunk/varnish-cache/bin/varnishd/cache_hash.c 2008-11-10 19:46:25 UTC (rev 3381)
@@ -93,7 +93,7 @@
w->nobjhead->magic = OBJHEAD_MAGIC;
VTAILQ_INIT(&w->nobjhead->objects);
VTAILQ_INIT(&w->nobjhead->waitinglist);
- MTX_INIT(&w->nobjhead->mtx);
+ Lck_New(&w->nobjhead->mtx);
VSL_stats->n_objecthead++;
} else
CHECK_OBJ_NOTNULL(w->nobjhead, OBJHEAD_MAGIC);
@@ -205,13 +205,13 @@
oh = sp->objhead;
sp->objhead = NULL;
CHECK_OBJ_NOTNULL(oh, OBJHEAD_MAGIC);
- LOCK(&oh->mtx);
+ Lck_Lock(&oh->mtx);
} else {
oh = hash->lookup(sp, w->nobjhead);
CHECK_OBJ_NOTNULL(oh, OBJHEAD_MAGIC);
if (oh == w->nobjhead)
w->nobjhead = NULL;
- LOCK(&oh->mtx);
+ Lck_Lock(&oh->mtx);
}

busy_o = NULL;
@@ -257,7 +257,7 @@
o->refcnt++;
if (o->hits < INT_MAX)
o->hits++;
- UNLOCK(&oh->mtx);
+ Lck_Unlock(&oh->mtx);
if (params->log_hash)
WSP(sp, SLT_Hash, "%s", oh->hash);
(void)hash->deref(oh);
@@ -269,7 +269,7 @@
if (sp->esis == 0)
VTAILQ_INSERT_TAIL(&oh->waitinglist, sp, list);
sp->objhead = oh;
- UNLOCK(&oh->mtx);
+ Lck_Unlock(&oh->mtx);
return (NULL);
}

@@ -285,7 +285,7 @@
o->parent = grace_o;
grace_o->refcnt++;
}
- UNLOCK(&oh->mtx);
+ Lck_Unlock(&oh->mtx);
if (params->log_hash)
WSP(sp, SLT_Hash, "%s", oh->hash);
/*
@@ -333,7 +333,7 @@
oh = o->objhead;
if (oh != NULL) {
CHECK_OBJ(oh, OBJHEAD_MAGIC);
- LOCK(&oh->mtx);
+ Lck_Lock(&oh->mtx);
}
o->busy = 0;
if (oh != NULL)
@@ -343,7 +343,7 @@
if (parent != NULL)
parent->child = NULL;
if (oh != NULL)
- UNLOCK(&oh->mtx);
+ Lck_Unlock(&oh->mtx);
if (parent != NULL)
HSH_Deref(parent);
}
@@ -357,12 +357,12 @@
oh = o->objhead;
if (oh != NULL) {
CHECK_OBJ(oh, OBJHEAD_MAGIC);
- LOCK(&oh->mtx);
+ Lck_Lock(&oh->mtx);
}
assert(o->refcnt > 0);
o->refcnt++;
if (oh != NULL)
- UNLOCK(&oh->mtx);
+ Lck_Unlock(&oh->mtx);
}

void
@@ -377,7 +377,7 @@
CHECK_OBJ(oh, OBJHEAD_MAGIC);

/* drop ref on object */
- LOCK(&oh->mtx);
+ Lck_Lock(&oh->mtx);
}
assert(o->refcnt > 0);
r = --o->refcnt;
@@ -386,7 +386,7 @@
if (oh != NULL) {
if (!r)
VTAILQ_REMOVE(&oh->objects, o, list);
- UNLOCK(&oh->mtx);
+ Lck_Unlock(&oh->mtx);
}

/* If still referenced, done */
@@ -411,7 +411,7 @@
if (hash->deref(oh))
return;
assert(VTAILQ_EMPTY(&oh->objects));
- MTX_DESTROY(&oh->mtx);
+ Lck_Delete(&oh->mtx);
VSL_stats->n_objecthead--;
free(oh->hash);
FREE_OBJ(oh);

Modified: trunk/varnish-cache/bin/varnishd/cache_main.c
===================================================================
--- trunk/varnish-cache/bin/varnishd/cache_main.c 2008-11-10 12:43:58 UTC (rev 3380)
+++ trunk/varnish-cache/bin/varnishd/cache_main.c 2008-11-10 19:46:25 UTC (rev 3381)
@@ -101,6 +101,10 @@

THR_SetName("cache-main");

+ VSL_Init(); /* First, LCK needs it. */
+
+ LCK_Init(); /* Locking, must be first */
+
PAN_Init();
CLI_Init();
Fetch_Init();
@@ -113,7 +117,6 @@

VBE_Init();
VBP_Init();
- VSL_Init();
WRK_Init();

EXP_Init();

Modified: trunk/varnish-cache/bin/varnishd/cache_pool.c
===================================================================
--- trunk/varnish-cache/bin/varnishd/cache_pool.c 2008-11-10 12:43:58 UTC (rev 3380)
+++ trunk/varnish-cache/bin/varnishd/cache_pool.c 2008-11-10 19:46:25 UTC (rev 3381)
@@ -79,7 +79,7 @@
struct wq {
unsigned magic;
#define WQ_MAGIC 0x606658fa
- MTX mtx;
+ struct lock mtx;
struct workerhead idle;
VTAILQ_HEAD(, workreq) overflow;
unsigned nthr;
@@ -95,7 +95,7 @@
static unsigned nthr_max;

static pthread_cond_t herder_cond;
-static MTX herder_mtx;
+static struct lock herder_mtx;

/*--------------------------------------------------------------------
* Write data to fd
@@ -249,7 +249,7 @@

VSL(SLT_WorkThread, 0, "%p start", w);

- LOCK(&qp->mtx);
+ Lck_Lock(&qp->mtx);
qp->nthr++;
while (1) {
CHECK_OBJ_NOTNULL(w, WORKER_MAGIC);
@@ -263,20 +263,20 @@
if (isnan(w->lastused))
w->lastused = TIM_real();
VTAILQ_INSERT_HEAD(&qp->idle, w, list);
- AZ(pthread_cond_wait(&w->cond, &qp->mtx));
+ Lck_CondWait(&w->cond, &qp->mtx);
}
if (w->wrq == NULL)
break;
- UNLOCK(&qp->mtx);
+ Lck_Unlock(&qp->mtx);
AN(w->wrq);
AN(w->wrq->func);
w->lastused = NAN;
w->wrq->func(w, w->wrq->priv);
w->wrq = NULL;
- LOCK(&qp->mtx);
+ Lck_Lock(&qp->mtx);
}
qp->nthr--;
- UNLOCK(&qp->mtx);
+ Lck_Unlock(&qp->mtx);

VSL(SLT_WorkThread, 0, "%p end", w);
if (w->vcl != NULL)
@@ -285,7 +285,7 @@
if (w->srcaddr != NULL)
free(w->srcaddr);
if (w->nobjhead != NULL) {
- MTX_DESTROY(&w->nobjhead->mtx);
+ Lck_Delete(&w->nobjhead->mtx);
FREE_OBJ(w->nobjhead);
}
if (w->nobj!= NULL)
@@ -318,13 +318,13 @@
qp = wq[onq];
nq = onq;

- LOCK(&qp->mtx);
+ Lck_Lock(&qp->mtx);

/* If there are idle threads, we tickle the first one into action */
w = VTAILQ_FIRST(&qp->idle);
if (w != NULL) {
VTAILQ_REMOVE(&qp->idle, w, list);
- UNLOCK(&qp->mtx);
+ Lck_Unlock(&qp->mtx);
w->wrq = wrq;
AZ(pthread_cond_signal(&w->cond));
return (0);
@@ -333,14 +333,14 @@
/* If we have too much in the overflow already, refuse. */
if (qp->nqueue > ovfl_max) {
qp->ndrop++;
- UNLOCK(&qp->mtx);
+ Lck_Unlock(&qp->mtx);
return (-1);
}

VTAILQ_INSERT_TAIL(&qp->overflow, wrq, list);
qp->noverflow++;
qp->nqueue++;
- UNLOCK(&qp->mtx);
+ Lck_Unlock(&qp->mtx);
AZ(pthread_cond_signal(&herder_cond));
return (0);
}
@@ -412,7 +412,7 @@
wq[u] = calloc(sizeof *wq[u], 1);
XXXAN(wq[u]);
wq[u]->magic = WQ_MAGIC;
- MTX_INIT(&wq[u]->mtx);
+ Lck_New(&wq[u]->mtx);
VTAILQ_INIT(&wq[u]->overflow);
VTAILQ_INIT(&wq[u]->idle);
}
@@ -429,7 +429,7 @@
{
struct worker *w = NULL;

- LOCK(&qp->mtx);
+ Lck_Lock(&qp->mtx);
vs->n_wrk += qp->nthr;
vs->n_wrk_queue += qp->nqueue;
vs->n_wrk_drop += qp->ndrop;
@@ -442,7 +442,7 @@
else
w = NULL;
}
- UNLOCK(&qp->mtx);
+ Lck_Unlock(&qp->mtx);

/* And give it a kiss on the cheek... */
if (w != NULL) {
@@ -572,9 +572,9 @@
* We cannot avoid getting a mutex, so we have a
* bogo mutex just for POSIX_STUPIDITY
*/
- AZ(pthread_mutex_lock(&herder_mtx));
- AZ(pthread_cond_wait(&herder_cond, &herder_mtx));
- AZ(pthread_mutex_unlock(&herder_mtx));
+ Lck_Lock(&herder_mtx);
+ Lck_CondWait(&herder_cond, &herder_mtx);
+ Lck_Unlock(&herder_mtx);
wrk_breed_flock(wq[u]);
}
}
@@ -588,7 +588,7 @@
pthread_t tp;

AZ(pthread_cond_init(&herder_cond, NULL));
- AZ(pthread_mutex_init(&herder_mtx, NULL));
+ Lck_New(&herder_mtx);

wrk_addpools(params->wthread_pools);
AZ(pthread_create(&tp, NULL, wrk_herdtimer_thread, NULL));

Modified: trunk/varnish-cache/bin/varnishd/cache_session.c
===================================================================
--- trunk/varnish-cache/bin/varnishd/cache_session.c 2008-11-10 12:43:58 UTC (rev 3380)
+++ trunk/varnish-cache/bin/varnishd/cache_session.c 2008-11-10 19:46:25 UTC (rev 3381)
@@ -78,7 +78,7 @@
};

static unsigned ses_qp;
-static MTX ses_mem_mtx;
+static struct lock ses_mem_mtx;

/*--------------------------------------------------------------------*/

@@ -103,11 +103,11 @@
unsigned magic;
#define SRCADDRHEAD_MAGIC 0x38231a8b
VTAILQ_HEAD(,srcaddr) head;
- MTX mtx;
+ struct lock mtx;
} *srchash;

static unsigned nsrchash;
-static MTX stat_mtx;
+static struct lock stat_mtx;

/*--------------------------------------------------------------------
* Assign a srcaddr to this session.
@@ -140,7 +140,7 @@
XXXAN(sp->wrk->srcaddr);
}

- LOCK(&ch->mtx);
+ Lck_Lock(&ch->mtx);
c3 = NULL;
VTAILQ_FOREACH_SAFE(c, &ch->head, list, c2) {
if (c->hash == u && !strcmp(c->addr, sp->addr)) {
@@ -155,7 +155,7 @@
VTAILQ_REMOVE(&ch->head, c3, list);
VSL_stats->n_srcaddr--;
}
- UNLOCK(&ch->mtx);
+ Lck_Unlock(&ch->mtx);
if (c3 != NULL)
free(c3);
return;
@@ -183,7 +183,7 @@
VSL_stats->n_srcaddr_act++;
VTAILQ_INSERT_TAIL(&ch->head, c3, list);
sp->srcaddr = c3;
- UNLOCK(&ch->mtx);
+ Lck_Unlock(&ch->mtx);
}

/*--------------------------------------------------------------------*/
@@ -198,13 +198,13 @@
CHECK_OBJ(sp->srcaddr, SRCADDR_MAGIC);
ch = sp->srcaddr->sah;
CHECK_OBJ(ch, SRCADDRHEAD_MAGIC);
- LOCK(&ch->mtx);
+ Lck_Lock(&ch->mtx);
assert(sp->srcaddr->nref > 0);
sp->srcaddr->nref--;
if (sp->srcaddr->nref == 0)
VSL_stats->n_srcaddr_act--;
sp->srcaddr = NULL;
- UNLOCK(&ch->mtx);
+ Lck_Unlock(&ch->mtx);
}

/*--------------------------------------------------------------------*/
@@ -228,21 +228,21 @@
if (sp->srcaddr != NULL) {
/* XXX: only report once per second ? */
CHECK_OBJ(sp->srcaddr, SRCADDR_MAGIC);
- LOCK(&sp->srcaddr->sah->mtx);
+ Lck_Lock(&sp->srcaddr->sah->mtx);
ses_sum_acct(&sp->srcaddr->acct, a);
b = sp->srcaddr->acct;
- UNLOCK(&sp->srcaddr->sah->mtx);
+ Lck_Unlock(&sp->srcaddr->sah->mtx);
WSL(sp->wrk, SLT_StatAddr, 0,
"%s 0 %.0f %ju %ju %ju %ju %ju %ju %ju",
sp->srcaddr->addr, sp->t_end - b.first,
b.sess, b.req, b.pipe, b.pass,
b.fetch, b.hdrbytes, b.bodybytes);
}
- LOCK(&stat_mtx);
+ Lck_Lock(&stat_mtx);
#define ACCT(foo) VSL_stats->s_##foo += a->foo;
#include "acct_fields.h"
#undef ACCT
- UNLOCK(&stat_mtx);
+ Lck_Unlock(&stat_mtx);
memset(a, 0, sizeof *a);
}

@@ -266,9 +266,9 @@
* If that queue is empty, flip queues holding the lock
* and try the new unlocked queue.
*/
- LOCK(&ses_mem_mtx);
+ Lck_Lock(&ses_mem_mtx);
ses_qp = 1 - ses_qp;
- UNLOCK(&ses_mem_mtx);
+ Lck_Unlock(&ses_mem_mtx);
sm = VTAILQ_FIRST(&ses_free_mem[ses_qp]);
}
if (sm != NULL) {
@@ -343,9 +343,9 @@
VSL_stats->n_sess_mem--;
free(sm);
} else {
- LOCK(&ses_mem_mtx);
+ Lck_Lock(&ses_mem_mtx);
VTAILQ_INSERT_HEAD(&ses_free_mem[1 - ses_qp], sm, list);
- UNLOCK(&ses_mem_mtx);
+ Lck_Unlock(&ses_mem_mtx);
}
}

@@ -362,8 +362,8 @@
for (i = 0; i < nsrchash; i++) {
srchash[i].magic = SRCADDRHEAD_MAGIC;
VTAILQ_INIT(&srchash[i].head);
- MTX_INIT(&srchash[i].mtx);
+ Lck_New(&srchash[i].mtx);
}
- MTX_INIT(&stat_mtx);
- MTX_INIT(&ses_mem_mtx);
+ Lck_New(&stat_mtx);
+ Lck_New(&ses_mem_mtx);
}

Modified: trunk/varnish-cache/bin/varnishd/cache_vcl.c
===================================================================
--- trunk/varnish-cache/bin/varnishd/cache_vcl.c 2008-11-10 12:43:58 UTC (rev 3380)
+++ trunk/varnish-cache/bin/varnishd/cache_vcl.c 2008-11-10 19:46:25 UTC (rev 3381)
@@ -64,7 +64,7 @@
VTAILQ_HEAD_INITIALIZER(vcl_head);


-static MTX vcl_mtx;
+static struct lock vcl_mtx;
static struct vcls *vcl_active; /* protected by vcl_mtx */

/*--------------------------------------------------------------------*/
@@ -83,13 +83,13 @@
VCL_Get(struct VCL_conf **vcc)
{

- LOCK(&vcl_mtx);
+ Lck_Lock(&vcl_mtx);
AN(vcl_active);
*vcc = vcl_active->conf;
AN(*vcc);
AZ((*vcc)->discard);
(*vcc)->busy++;
- UNLOCK(&vcl_mtx);
+ Lck_Unlock(&vcl_mtx);
}

void
@@ -100,14 +100,14 @@
vc = *vcc;
*vcc = NULL;

- LOCK(&vcl_mtx);
+ Lck_Lock(&vcl_mtx);
assert(vc->busy > 0);
vc->busy--;
/*
* We do not garbage collect discarded VCL's here, that happens
* in VCL_Poll() which is called from the CLI thread.
*/
- UNLOCK(&vcl_mtx);
+ Lck_Unlock(&vcl_mtx);
}

/*--------------------------------------------------------------------*/
@@ -167,10 +167,10 @@
}
REPLACE(vcl->name, name);
VTAILQ_INSERT_TAIL(&vcl_head, vcl, list);
- LOCK(&vcl_mtx);
+ Lck_Lock(&vcl_mtx);
if (vcl_active == NULL)
vcl_active = vcl;
- UNLOCK(&vcl_mtx);
+ Lck_Unlock(&vcl_mtx);
cli_out(cli, "Loaded \"%s\" as \"%s\"", fn , name);
vcl->conf->init_func(cli);
VSL_stats->n_vcl++;
@@ -264,9 +264,9 @@
cli_out(cli, "VCL '%s' unknown", av[2]);
return;
}
- LOCK(&vcl_mtx);
+ Lck_Lock(&vcl_mtx);
if (vcl == vcl_active) {
- UNLOCK(&vcl_mtx);
+ Lck_Unlock(&vcl_mtx);
cli_result(cli, CLIS_PARAM);
cli_out(cli, "VCL %s is the active VCL", av[2]);
return;
@@ -274,7 +274,7 @@
VSL_stats->n_vcl_discard++;
VSL_stats->n_vcl_avail--;
vcl->conf->discard = 1;
- UNLOCK(&vcl_mtx);
+ Lck_Unlock(&vcl_mtx);
if (vcl->conf->busy == 0)
VCL_Nuke(vcl);
}
@@ -292,9 +292,9 @@
cli_result(cli, CLIS_PARAM);
return;
}
- LOCK(&vcl_mtx);
+ Lck_Lock(&vcl_mtx);
vcl_active = vcl;
- UNLOCK(&vcl_mtx);
+ Lck_Unlock(&vcl_mtx);
}

/*--------------------------------------------------------------------*/
@@ -350,5 +350,5 @@
{

CLI_AddFuncs(MASTER_CLI, vcl_cmds);
- MTX_INIT(&vcl_mtx);
+ Lck_New(&vcl_mtx);
}

Modified: trunk/varnish-cache/bin/varnishd/hash_classic.c
===================================================================
--- trunk/varnish-cache/bin/varnishd/hash_classic.c 2008-11-10 12:43:58 UTC (rev 3380)
+++ trunk/varnish-cache/bin/varnishd/hash_classic.c 2008-11-10 19:46:25 UTC (rev 3381)
@@ -58,7 +58,7 @@
unsigned magic;
#define HCL_HEAD_MAGIC 0x0f327016
VTAILQ_HEAD(, hcl_entry) head;
- MTX mtx;
+ struct lock mtx;
};

static unsigned hcl_nhash = 16383;
@@ -110,7 +110,7 @@

for (u = 0; u < hcl_nhash; u++) {
VTAILQ_INIT(&hcl_head[u].head);
- MTX_INIT(&hcl_head[u].mtx);
+ Lck_New(&hcl_head[u].mtx);
hcl_head[u].magic = HCL_HEAD_MAGIC;
}
}
@@ -151,7 +151,7 @@
he2 = NULL;

for (r = 0; r < 2; r++ ) {
- LOCK(&hp->mtx);
+ Lck_Lock(&hp->mtx);
VTAILQ_FOREACH(he, &hp->head, list) {
CHECK_OBJ_NOTNULL(he, HCL_ENTRY_MAGIC);
if (sp->lhashptr < he->oh->hashlen)
@@ -169,7 +169,7 @@
break;
he->refcnt++;
roh = he->oh;
- UNLOCK(&hp->mtx);
+ Lck_Unlock(&hp->mtx);
/*
* If we loose the race, we need to clean up
* the work we did for our second attempt.
@@ -183,7 +183,7 @@
return (roh);
}
if (noh == NULL) {
- UNLOCK(&hp->mtx);
+ Lck_Unlock(&hp->mtx);
return (NULL);
}
if (he2 != NULL) {
@@ -193,10 +193,10 @@
VTAILQ_INSERT_TAIL(&hp->head, he2, list);
he2->refcnt++;
noh = he2->oh;
- UNLOCK(&hp->mtx);
+ Lck_Unlock(&hp->mtx);
return (noh);
}
- UNLOCK(&hp->mtx);
+ Lck_Unlock(&hp->mtx);

he2 = calloc(sizeof *he2, 1);
XXXAN(he2);
@@ -234,12 +234,12 @@
assert(he->refcnt > 0);
assert(he->hash < hcl_nhash);
assert(hp == &hcl_head[he->hash]);
- LOCK(&hp->mtx);
+ Lck_Lock(&hp->mtx);
if (--he->refcnt == 0)
VTAILQ_REMOVE(&hp->head, he, list);
else
he = NULL;
- UNLOCK(&hp->mtx);
+ Lck_Unlock(&hp->mtx);
if (he == NULL)
return (1);
free(he);

Modified: trunk/varnish-cache/bin/varnishd/hash_simple_list.c
===================================================================
--- trunk/varnish-cache/bin/varnishd/hash_simple_list.c 2008-11-10 12:43:58 UTC (rev 3380)
+++ trunk/varnish-cache/bin/varnishd/hash_simple_list.c 2008-11-10 19:46:25 UTC (rev 3381)
@@ -51,7 +51,7 @@
};

static VTAILQ_HEAD(, hsl_entry) hsl_head = VTAILQ_HEAD_INITIALIZER(hsl_head);
-static MTX hsl_mutex;
+static struct lock hsl_mtx;

/*--------------------------------------------------------------------
* The ->init method is called during process start and allows
@@ -62,7 +62,7 @@
hsl_start(void)
{

- MTX_INIT(&hsl_mutex);
+ Lck_New(&hsl_mtx);
}

/*--------------------------------------------------------------------
@@ -78,7 +78,7 @@
struct hsl_entry *he, *he2;
int i;

- LOCK(&hsl_mutex);
+ Lck_Lock(&hsl_mtx);
VTAILQ_FOREACH(he, &hsl_head, list) {
i = HSH_Compare(sp, he->obj);
if (i < 0)
@@ -87,7 +87,7 @@
break;
he->refcnt++;
nobj = he->obj;
- UNLOCK(&hsl_mutex);
+ Lck_Unlock(&hsl_mtx);
return (nobj);
}
if (nobj != NULL) {
@@ -107,7 +107,7 @@
else
VTAILQ_INSERT_TAIL(&hsl_head, he2, list);
}
- UNLOCK(&hsl_mutex);
+ Lck_Unlock(&hsl_mtx);
return (nobj);
}

@@ -123,14 +123,14 @@

AN(obj->hashpriv);
he = obj->hashpriv;
- LOCK(&hsl_mutex);
+ Lck_Lock(&hsl_mtx);
if (--he->refcnt == 0) {
VTAILQ_REMOVE(&hsl_head, he, list);
free(he);
ret = 0;
} else
ret = 1;
- UNLOCK(&hsl_mutex);
+ Lck_Unlock(&hsl_mtx);
return (ret);
}


Modified: trunk/varnish-cache/bin/varnishd/shmlog.c
===================================================================
--- trunk/varnish-cache/bin/varnishd/shmlog.c 2008-11-10 12:43:58 UTC (rev 3380)
+++ trunk/varnish-cache/bin/varnishd/shmlog.c 2008-11-10 19:46:25 UTC (rev 3381)
@@ -64,7 +64,7 @@
struct varnish_stats *VSL_stats;
static struct shmloghead *loghead;
static unsigned char *logstart;
-static MTX vsl_mtx;
+static pthread_mutex_t vsl_mtx;


static void
@@ -287,7 +287,7 @@
assert(loghead->hdrsize == sizeof *loghead);
/* XXX more check sanity of loghead ? */
logstart = (unsigned char *)loghead + loghead->start;
- MTX_INIT(&vsl_mtx);
+ AZ(pthread_mutex_init(&vsl_mtx, NULL));
loghead->starttime = TIM_real();
loghead->panicstr[0] = '\0';
memset(VSL_stats, 0, sizeof *VSL_stats);

Modified: trunk/varnish-cache/bin/varnishd/storage_file.c
===================================================================
--- trunk/varnish-cache/bin/varnishd/storage_file.c 2008-11-10 12:43:58 UTC (rev 3380)
+++ trunk/varnish-cache/bin/varnishd/storage_file.c 2008-11-10 19:46:25 UTC (rev 3381)
@@ -115,7 +115,7 @@
struct smfhead order;
struct smfhead free[NBUCKET];
struct smfhead used;
- MTX mtx;
+ struct lock mtx;
};

/*--------------------------------------------------------------------*/
@@ -609,7 +609,7 @@
/* XXX */
if (sum < MINPAGES * (off_t)getpagesize())
exit (2);
- MTX_INIT(&sc->mtx);
+ Lck_New(&sc->mtx);

VSL_stats->sm_bfree += sc->filesize;
}
@@ -625,18 +625,18 @@
assert(size > 0);
size += (sc->pagesize - 1);
size &= ~(sc->pagesize - 1);
- LOCK(&sc->mtx);
+ Lck_Lock(&sc->mtx);
VSL_stats->sm_nreq++;
smf = alloc_smf(sc, size);
if (smf == NULL) {
- UNLOCK(&sc->mtx);
+ Lck_Unlock(&sc->mtx);
return (NULL);
}
CHECK_OBJ_NOTNULL(smf, SMF_MAGIC);
VSL_stats->sm_nobj++;
VSL_stats->sm_balloc += smf->size;
VSL_stats->sm_bfree -= smf->size;
- UNLOCK(&sc->mtx);
+ Lck_Unlock(&sc->mtx);
CHECK_OBJ_NOTNULL(&smf->s, STORAGE_MAGIC); /*lint !e774 */
XXXAN(smf);
assert(smf->size == size);
@@ -668,12 +668,12 @@
size += (sc->pagesize - 1);
size &= ~(sc->pagesize - 1);
if (smf->size > size) {
- LOCK(&sc->mtx);
+ Lck_Lock(&sc->mtx);
VSL_stats->sm_balloc -= (smf->size - size);
VSL_stats->sm_bfree += (smf->size - size);
trim_smf(smf, size);
assert(smf->size == size);
- UNLOCK(&sc->mtx);
+ Lck_Unlock(&sc->mtx);
smf->s.space = size;
}
}
@@ -690,12 +690,12 @@
CHECK_OBJ_NOTNULL(s, STORAGE_MAGIC);
CAST_OBJ_NOTNULL(smf, s->priv, SMF_MAGIC);
sc = smf->sc;
- LOCK(&sc->mtx);
+ Lck_Lock(&sc->mtx);
VSL_stats->sm_nobj--;
VSL_stats->sm_balloc -= smf->size;
VSL_stats->sm_bfree += smf->size;
free_smf(smf);
- UNLOCK(&sc->mtx);
+ Lck_Unlock(&sc->mtx);
}

/*--------------------------------------------------------------------*/

Modified: trunk/varnish-cache/bin/varnishd/storage_malloc.c
===================================================================
--- trunk/varnish-cache/bin/varnishd/storage_malloc.c 2008-11-10 12:43:58 UTC (rev 3380)
+++ trunk/varnish-cache/bin/varnishd/storage_malloc.c 2008-11-10 19:46:25 UTC (rev 3381)
@@ -44,7 +44,7 @@
#include "stevedore.h"

static size_t sma_max = SIZE_MAX;
-static MTX sma_mtx;
+static struct lock sma_mtx;

struct sma {
struct storage s;
@@ -56,7 +56,7 @@
{
struct sma *sma;

- LOCK(&sma_mtx);
+ Lck_Lock(&sma_mtx);
VSL_stats->sma_nreq++;
if (VSL_stats->sma_nbytes + size > sma_max)
size = 0;
@@ -65,7 +65,7 @@
VSL_stats->sma_nbytes += size;
VSL_stats->sma_balloc += size;
}
- UNLOCK(&sma_mtx);
+ Lck_Unlock(&sma_mtx);

if (size == 0)
return (NULL);
@@ -94,11 +94,11 @@
CHECK_OBJ_NOTNULL(s, STORAGE_MAGIC);
sma = s->priv;
assert(sma->sz == sma->s.space);
- LOCK(&sma_mtx);
+ Lck_Lock(&sma_mtx);
VSL_stats->sma_nobj--;
VSL_stats->sma_nbytes -= sma->sz;
VSL_stats->sma_bfree += sma->sz;
- UNLOCK(&sma_mtx);
+ Lck_Unlock(&sma_mtx);
free(sma->s.ptr);
free(sma);
}
@@ -113,11 +113,11 @@
sma = s->priv;
assert(sma->sz == sma->s.space);
if ((p = realloc(sma->s.ptr, size)) != NULL) {
- LOCK(&sma_mtx);
+ Lck_Lock(&sma_mtx);
VSL_stats->sma_nbytes -= (sma->sz - size);
VSL_stats->sma_bfree += sma->sz - size;
sma->sz = size;
- UNLOCK(&sma_mtx);
+ Lck_Unlock(&sma_mtx);
sma->s.ptr = p;
sma->s.space = size;
}
@@ -150,7 +150,7 @@
sma_open(const struct stevedore *st)
{
(void)st;
- AZ(pthread_mutex_init(&sma_mtx, NULL));
+ Lck_New(&sma_mtx);
}

struct stevedore sma_stevedore = {

Modified: trunk/varnish-cache/bin/varnishd/storage_synth.c
===================================================================
--- trunk/varnish-cache/bin/varnishd/storage_synth.c 2008-11-10 12:43:58 UTC (rev 3380)
+++ trunk/varnish-cache/bin/varnishd/storage_synth.c 2008-11-10 19:46:25 UTC (rev 3381)
@@ -43,18 +43,18 @@
#include "vsb.h"
#include "stevedore.h"

-static MTX sms_mtx;
+static struct lock sms_mtx;

static void
sms_free(struct storage *sto)
{

CHECK_OBJ_NOTNULL(sto, STORAGE_MAGIC);
- LOCK(&sms_mtx);
+ Lck_Lock(&sms_mtx);
VSL_stats->sms_nobj--;
VSL_stats->sms_nbytes -= sto->len;
VSL_stats->sms_bfree += sto->len;
- UNLOCK(&sms_mtx);
+ Lck_Unlock(&sms_mtx);
vsb_delete(sto->priv);
free(sto);
}
@@ -63,7 +63,7 @@
SMS_Init(void)
{

- AZ(pthread_mutex_init(&sms_mtx, NULL));
+ Lck_New(&sms_mtx);
}

static struct stevedore sms_stevedore = {
@@ -82,10 +82,10 @@
HSH_Freestore(obj);
obj->len = 0;

- LOCK(&sms_mtx);
+ Lck_Lock(&sms_mtx);
VSL_stats->sms_nreq++;
VSL_stats->sms_nobj++;
- UNLOCK(&sms_mtx);
+ Lck_Unlock(&sms_mtx);

sto = calloc(sizeof *sto, 1);
XXXAN(sto);

Modified: trunk/varnish-cache/bin/varnishd/storage_umem.c
===================================================================
--- trunk/varnish-cache/bin/varnishd/storage_umem.c 2008-11-10 12:43:58 UTC (rev 3380)
+++ trunk/varnish-cache/bin/varnishd/storage_umem.c 2008-11-10 19:46:25 UTC (rev 3381)
@@ -61,7 +61,7 @@
{
struct smu *smu;

- LOCK(&smu_mtx);
+ Lck_Lock(&smu_mtx);
VSL_stats->sma_nreq++;
if (VSL_stats->sma_nbytes + size > smu_max)
size = 0;
@@ -70,7 +70,7 @@
VSL_stats->sma_nbytes += size;
VSL_stats->sma_balloc += size;
}
- UNLOCK(&smu_mtx);
+ Lck_Unlock(&smu_mtx);

if (size == 0)
return (NULL);
@@ -99,11 +99,11 @@
CHECK_OBJ_NOTNULL(s, STORAGE_MAGIC);
smu = s->priv;
assert(smu->sz == smu->s.space);
- LOCK(&smu_mtx);
+ Lck_Lock(&smu_mtx);
VSL_stats->sma_nobj--;
VSL_stats->sma_nbytes -= smu->sz;
VSL_stats->sma_bfree += smu->sz;
- UNLOCK(&smu_mtx);
+ Lck_Unlock(&smu_mtx);
umem_free(smu->s.ptr, smu->s.space);
umem_free(smu, sizeof *smu);
}
@@ -120,11 +120,11 @@
if ((p = umem_alloc(size, UMEM_DEFAULT)) != NULL) {
memcpy(p, smu->s.ptr, size);
umem_free(smu->s.ptr, smu->s.space);
- LOCK(&smu_mtx);
+ Lck_Lock(&smu_mtx);
VSL_stats->sma_nbytes -= (smu->sz - size);
VSL_stats->sma_bfree += smu->sz - size;
smu->sz = size;
- UNLOCK(&smu_mtx);
+ Lck_Unlock(&smu_mtx);
smu->s.ptr = p;
smu->s.space = size;
}