Mailing List Archive

r895 - trunk/varnish-cache/bin/varnishd
Author: phk
Date: 2006-08-22 12:46:16 +0200 (Tue, 22 Aug 2006)
New Revision: 895

Modified:
trunk/varnish-cache/bin/varnishd/shmlog.c
Log:
Optimize shmlog writing:

If we know the record length, only hold the mutex while we reserve
the space. Until we change the first byte, nothing bad can happen.

XXX: a memory barrier is strictly speaking necessary before we assign
the first byte.

If there are no '%' in the format string, treat as fixed length for
speed.



Modified: trunk/varnish-cache/bin/varnishd/shmlog.c
===================================================================
--- trunk/varnish-cache/bin/varnishd/shmlog.c 2006-08-22 10:40:55 UTC (rev 894)
+++ trunk/varnish-cache/bin/varnishd/shmlog.c 2006-08-22 10:46:16 UTC (rev 895)
@@ -61,6 +61,7 @@
e = b + l;
}

+ /* Only hold the lock while we find our space */
AZ(pthread_mutex_lock(&vsl_mtx));
assert(loghead->ptr < loghead->size);

@@ -68,17 +69,18 @@
if (loghead->ptr + 5 + l + 1 > loghead->size)
vsl_wrap();
p = logstart + loghead->ptr;
+ loghead->ptr += 5 + l;
+ p[5 + l] = SLT_ENDMARKER;
+ assert(loghead->ptr < loghead->size);
+ AZ(pthread_mutex_unlock(&vsl_mtx));
+
p[1] = l & 0xff;
p[2] = (id >> 8) & 0xff;
p[3] = id & 0xff;
memcpy(p + 4, b, l);
p[4 + l] = '\0';
- p[5 + l] = SLT_ENDMARKER;
+ /* XXX: memory barrier */
p[0] = tag;
-
- loghead->ptr += 5 + l;
- assert(loghead->ptr < loghead->size);
- AZ(pthread_mutex_unlock(&vsl_mtx));
}


@@ -91,6 +93,12 @@

va_start(ap, fmt);

+ p = strchr(fmt, '%');
+ if (p == NULL) {
+ VSLR(tag, id, fmt, NULL);
+ return;
+ }
+
AZ(pthread_mutex_lock(&vsl_mtx));
assert(loghead->ptr < loghead->size);