Mailing List Archive

[master] 98b6b9548 Optimize 304 template object copying
commit 98b6b954848b3a0fe1d81c852142d79d95edf395
Author: Nils Goroll <nils.goroll@uplex.de>
Date: Wed Nov 1 10:48:25 2023 +0100

Optimize 304 template object copying

When copying a stale object after a 304 revalidation, we iterated over
it and allocated new storage for each storage segment.

So, at best, we kept the fragmentation of the existing object, or made
it even worse. This is particularly relevant when the existing object
was created from a chunked response, in which case the original
segments might have been particularly small and, consequently, many in
number.

Because we wait for the stale object to complete, we know the total
length (OA_LEN) upfront, and can ask the storage serving the copy for
exactly the right length. This is much more efficient, as
fragmentation is lowered and the storage engine might make a much
better allocation decision when it knows the full length rather than
getting piecemeal requests.

We implement the improved allocation scheme through additional state
kept for the duration of the template object copy: struct
vbf_objiter_priv holds the pointer to the newly created busy object,
the total yet unallocated length (initialized to OA_LEN of the existing
object) and pointer/length to the unused portion of the currently open
segment, if any.

diff --git a/bin/varnishd/cache/cache_fetch.c b/bin/varnishd/cache/cache_fetch.c
index e365f6c88..9a6ed4ec6 100644
--- a/bin/varnishd/cache/cache_fetch.c
+++ b/bin/varnishd/cache/cache_fetch.c
@@ -760,28 +760,56 @@ vbf_stp_fetchend(struct worker *wrk, struct busyobj *bo)
/*--------------------------------------------------------------------
*/

+struct vbf_objiter_priv {
+ unsigned magic;
+#define VBF_OBITER_PRIV_MAGIC 0x3c272a17
+ struct busyobj *bo;
+ // not yet allocated
+ ssize_t l;
+ // current allocation
+ uint8_t *p;
+ ssize_t pl;
+};
+
static int v_matchproto_(objiterate_f)
vbf_objiterate(void *priv, unsigned flush, const void *ptr, ssize_t len)
{
- struct busyobj *bo;
+ struct vbf_objiter_priv *vop;
ssize_t l;
const uint8_t *ps = ptr;
- uint8_t *pd;

- CAST_OBJ_NOTNULL(bo, priv, BUSYOBJ_MAGIC);
+ CAST_OBJ_NOTNULL(vop, priv, VBF_OBITER_PRIV_MAGIC);
+ CHECK_OBJ_NOTNULL(vop->bo, BUSYOBJ_MAGIC);

flush &= OBJ_ITER_END;

while (len > 0) {
- l = len;
- if (VFP_GetStorage(bo->vfc, &l, &pd) != VFP_OK)
- return (1);
- l = vmin(l, len);
- memcpy(pd, ps, l);
- VFP_Extend(bo->vfc, l, flush && l == len ? VFP_END : VFP_OK);
+ if (vop->pl == 0) {
+ vop->p = NULL;
+ AN(vop->l);
+ vop->pl = vop->l;
+ if (VFP_GetStorage(vop->bo->vfc, &vop->pl, &vop->p)
+ != VFP_OK)
+ return (1);
+ if (vop->pl < vop->l)
+ vop->l -= vop->pl;
+ else
+ vop->l = 0;
+ }
+ AN(vop->pl);
+ AN(vop->p);
+
+ l = vmin(vop->pl, len);
+ memcpy(vop->p, ps, l);
+ VFP_Extend(vop->bo->vfc, l,
+ flush && l == len ? VFP_END : VFP_OK);
ps += l;
+ vop->p += l;
len -= l;
+ vop->pl -= l;
}
+ if (flush)
+ AZ(vop->l);
return (0);
}

@@ -791,6 +819,7 @@ vbf_stp_condfetch(struct worker *wrk, struct busyobj *bo)
struct boc *stale_boc;
enum boc_state_e stale_state;
struct objcore *oc, *stale_oc;
+ struct vbf_objiter_priv vop[1];

CHECK_OBJ_NOTNULL(wrk, WORKER_MAGIC);
CHECK_OBJ_NOTNULL(bo, BUSYOBJ_MAGIC);
@@ -850,7 +879,10 @@ vbf_stp_condfetch(struct worker *wrk, struct busyobj *bo)
ObjSetState(wrk, oc, BOS_STREAM);
}

- if (ObjIterate(wrk, stale_oc, bo, vbf_objiterate, 0))
+ INIT_OBJ(vop, VBF_OBITER_PRIV_MAGIC);
+ vop->bo = bo;
+ vop->l = ObjGetLen(bo->wrk, stale_oc);
+ if (ObjIterate(wrk, stale_oc, vop, vbf_objiterate, 0))
(void)VFP_Error(bo->vfc, "Template object failed");

if (bo->vfc->failed) {
_______________________________________________
varnish-commit mailing list
varnish-commit@varnish-cache.org
https://www.varnish-cache.org/lists/mailman/listinfo/varnish-commit