summaryrefslogtreecommitdiff
path: root/vere/pkg/past
diff options
context:
space:
mode:
Diffstat (limited to 'vere/pkg/past')
-rw-r--r--vere/pkg/past/build.zig88
-rw-r--r--vere/pkg/past/build.zig.zon23
-rw-r--r--vere/pkg/past/migrate.h13
-rw-r--r--vere/pkg/past/migrate_v2.c368
-rw-r--r--vere/pkg/past/migrate_v3.c100
-rw-r--r--vere/pkg/past/migrate_v4.c29
-rw-r--r--vere/pkg/past/migrate_v5.c167
-rw-r--r--vere/pkg/past/v1.c636
-rw-r--r--vere/pkg/past/v1.h42
-rw-r--r--vere/pkg/past/v2.c116
-rw-r--r--vere/pkg/past/v2.h216
-rw-r--r--vere/pkg/past/v3.c22
-rw-r--r--vere/pkg/past/v3.h92
-rw-r--r--vere/pkg/past/v4.c826
-rw-r--r--vere/pkg/past/v4.h242
-rw-r--r--vere/pkg/past/v5.h60
16 files changed, 3040 insertions, 0 deletions
diff --git a/vere/pkg/past/build.zig b/vere/pkg/past/build.zig
new file mode 100644
index 0000000..6f059af
--- /dev/null
+++ b/vere/pkg/past/build.zig
@@ -0,0 +1,88 @@
+const std = @import("std");
+
+pub fn build(b: *std.Build) !void {
+ const target = b.standardTargetOptions(.{});
+ const optimize = b.standardOptimizeOption(.{});
+
+ const copts: []const []const u8 =
+ b.option([]const []const u8, "copt", "") orelse &.{};
+
+ const pkg_past = b.addStaticLibrary(.{
+ .name = "past",
+ .target = target,
+ .optimize = optimize,
+ });
+
+ if (target.result.os.tag.isDarwin() and !target.query.isNative()) {
+ const macos_sdk = b.lazyDependency("macos_sdk", .{
+ .target = target,
+ .optimize = optimize,
+ });
+ if (macos_sdk != null) {
+ pkg_past.addSystemIncludePath(macos_sdk.?.path("usr/include"));
+ pkg_past.addLibraryPath(macos_sdk.?.path("usr/lib"));
+ pkg_past.addFrameworkPath(macos_sdk.?.path("System/Library/Frameworks"));
+ }
+ }
+
+ const pkg_c3 = b.dependency("pkg_c3", .{
+ .target = target,
+ .optimize = optimize,
+ .copt = copts,
+ });
+
+ const pkg_noun = b.dependency("pkg_noun", .{
+ .target = target,
+ .optimize = optimize,
+ .copt = copts,
+ });
+
+ const gmp = b.dependency("gmp", .{
+ .target = target,
+ .optimize = optimize,
+ });
+
+ pkg_past.linkLibC();
+
+ pkg_past.linkLibrary(pkg_c3.artifact("c3"));
+ pkg_past.linkLibrary(pkg_noun.artifact("noun"));
+ pkg_past.linkLibrary(gmp.artifact("gmp"));
+
+ var flags = std.ArrayList([]const u8).init(b.allocator);
+ defer flags.deinit();
+ try flags.appendSlice(&.{
+ // "-pedantic",
+ "-std=gnu23",
+ });
+ try flags.appendSlice(copts);
+
+ pkg_past.addCSourceFiles(.{
+ .root = b.path(""),
+ .files = &c_source_files,
+ .flags = flags.items,
+ });
+
+ for (install_headers) |h| pkg_past.installHeader(b.path(h), h);
+
+ b.installArtifact(pkg_past);
+}
+
+const c_source_files = [_][]const u8{
+ "v1.c",
+ "v2.c",
+ "v3.c",
+ "v4.c",
+ "migrate_v2.c",
+ "migrate_v3.c",
+ "migrate_v4.c",
+ "migrate_v5.c",
+};
+
+const install_headers = [_][]const u8{
+ "v1.h",
+ "v2.h",
+ "v3.h",
+ "v4.h",
+ "v5.h",
+ "migrate.h",
+};
diff --git a/vere/pkg/past/build.zig.zon b/vere/pkg/past/build.zig.zon
new file mode 100644
index 0000000..f69189b
--- /dev/null
+++ b/vere/pkg/past/build.zig.zon
@@ -0,0 +1,23 @@
+.{
+ .name = "past",
+ .version = "0.0.1",
+ .dependencies = .{
+ .macos_sdk = .{
+ .url = "https://github.com/joseluisq/macosx-sdks/releases/download/14.5/MacOSX14.5.sdk.tar.xz",
+ .hash = "N-V-__8AAKtK4FMzqcFsY_ZrpMg9bGH0h7BqZDXtVyAerMtM",
+ .lazy = true,
+ },
+ .pkg_c3 = .{
+ .path = "../c3",
+ },
+ .pkg_noun = .{
+ .path = "../noun",
+ },
+ .gmp = .{
+ .path = "../../ext/gmp",
+ },
+ },
+ .paths = .{
+ "",
+ },
+}
diff --git a/vere/pkg/past/migrate.h b/vere/pkg/past/migrate.h
new file mode 100644
index 0000000..14f6022
--- /dev/null
+++ b/vere/pkg/past/migrate.h
@@ -0,0 +1,13 @@
+#ifndef U3_MIGRATE_H
+#define U3_MIGRATE_H
+
+void
+u3_migrate_v2(c3_d eve_d);
+void
+u3_migrate_v3(c3_d eve_d);
+void
+u3_migrate_v4(c3_d eve_d);
+void
+u3_migrate_v5(c3_d eve_d);
+
+#endif /* U3_MIGRATE_H */
diff --git a/vere/pkg/past/migrate_v2.c b/vere/pkg/past/migrate_v2.c
new file mode 100644
index 0000000..2aed4d9
--- /dev/null
+++ b/vere/pkg/past/migrate_v2.c
@@ -0,0 +1,368 @@
+#include "v1.h"
+#include "v2.h"
+#include "options.h"
+
+static void
+_migv2h_rewrite(u3p(u3h_root) har_p);
+
+
+/*** allocate.c
+***/
+
+static u3_noun
+_migv2_rewritten_noun(u3_noun som)
+{
+ if ( c3y == u3a_v2_is_cat(som) ) {
+ return som;
+ }
+ u3_post som_p = u3a_v2_rewritten(u3a_v1_to_off(som));
+
+ if ( c3y == u3a_v2_is_pug(som) ) {
+ som_p = u3a_v2_to_pug(som_p);
+ }
+ else {
+ som_p = u3a_v2_to_pom(som_p);
+ }
+
+ return som_p;
+}
+
+static void
+_migv2_rewrite_noun(u3_noun som)
+{
+ if ( c3n == u3a_v2_is_cell(som) ) {
+ return;
+ }
+
+ if ( c3n == u3a_v2_rewrite_ptr(u3a_v1_to_ptr((som))) ) return;
+
+ u3a_v2_cell* cel = (u3a_v2_cell*) u3a_v1_to_ptr(som);
+
+ _migv2_rewrite_noun(cel->hed);
+ _migv2_rewrite_noun(cel->tel);
+
+ cel->hed = _migv2_rewritten_noun(cel->hed);
+ cel->tel = _migv2_rewritten_noun(cel->tel);
+}
+
+/* _migv2a_rewrite_compact(): rewrite pointers in ad-hoc persistent road structures.
+*/
+void
+_migv2a_rewrite_compact(void)
+{
+ _migv2_rewrite_noun(u3R_v2->ski.gul);
+ _migv2_rewrite_noun(u3R_v2->bug.tax);
+ _migv2_rewrite_noun(u3R_v2->bug.mer);
+ _migv2_rewrite_noun(u3R_v2->pro.don);
+ _migv2_rewrite_noun(u3R_v2->pro.day);
+ _migv2_rewrite_noun(u3R_v2->pro.trace);
+ _migv2h_rewrite(u3R_v2->cax.har_p);
+
+ u3R_v2->ski.gul = _migv2_rewritten_noun(u3R_v2->ski.gul);
+ u3R_v2->bug.tax = _migv2_rewritten_noun(u3R_v2->bug.tax);
+ u3R_v2->bug.mer = _migv2_rewritten_noun(u3R_v2->bug.mer);
+ u3R_v2->pro.don = _migv2_rewritten_noun(u3R_v2->pro.don);
+ u3R_v2->pro.day = _migv2_rewritten_noun(u3R_v2->pro.day);
+ u3R_v2->pro.trace = _migv2_rewritten_noun(u3R_v2->pro.trace);
+ u3R_v2->cax.har_p = u3a_v2_rewritten(u3R_v2->cax.har_p);
+}
+
+
+/*** hashtable.c
+***/
+
+/* _migv2h_rewrite_buck(): rewrite buck for compaction.
+*/
+void
+_migv2h_rewrite_buck(u3h_v2_buck* hab_u)
+{
+ if ( c3n == u3a_v2_rewrite_ptr(hab_u) ) return;
+ c3_w i_w;
+
+ for ( i_w = 0; i_w < hab_u->len_w; i_w++ ) {
+ u3_noun som = u3h_v2_slot_to_noun(hab_u->sot_w[i_w]);
+ hab_u->sot_w[i_w] = u3h_v2_noun_to_slot(_migv2_rewritten_noun(som));
+ _migv2_rewrite_noun(som);
+ }
+}
+
+/* _migv2h_rewrite_node(): rewrite node for compaction.
+*/
+void
+_migv2h_rewrite_node(u3h_v2_node* han_u, c3_w lef_w)
+{
+ if ( c3n == u3a_v2_rewrite_ptr(han_u) ) return;
+
+ c3_w len_w = c3_pc_w(han_u->map_w);
+ c3_w i_w;
+
+ lef_w -= 5;
+
+ for ( i_w = 0; i_w < len_w; i_w++ ) {
+ c3_w sot_w = han_u->sot_w[i_w];
+
+ if ( _(u3h_v2_slot_is_noun(sot_w)) ) {
+ u3_noun kev = u3h_v2_slot_to_noun(sot_w);
+ han_u->sot_w[i_w] = u3h_v2_noun_to_slot(_migv2_rewritten_noun(kev));
+
+ _migv2_rewrite_noun(kev);
+ }
+ else {
+ void* hav_v = u3h_v1_slot_to_node(sot_w);
+ u3h_v2_node* nod_u = u3v2to(u3h_v2_node, u3a_v2_rewritten(u3v2of(u3h_v2_node,hav_v)));
+
+ han_u->sot_w[i_w] = u3h_v2_node_to_slot(nod_u);
+
+ if ( 0 == lef_w ) {
+ _migv2h_rewrite_buck(hav_v);
+ } else {
+ _migv2h_rewrite_node(hav_v, lef_w);
+ }
+ }
+ }
+}
+
+/* _migv2h_rewrite(): rewrite pointers during compaction.
+*/
+void
+_migv2h_rewrite(u3p(u3h_v2_root) har_p)
+{
+ u3h_v2_root* har_u = u3v2to(u3h_v2_root, har_p);
+ c3_w i_w;
+
+ if ( c3n == u3a_v2_rewrite_ptr(har_u) ) return;
+
+ for ( i_w = 0; i_w < 64; i_w++ ) {
+ c3_w sot_w = har_u->sot_w[i_w];
+
+ if ( _(u3h_v2_slot_is_noun(sot_w)) ) {
+ u3_noun kev = u3h_v2_slot_to_noun(sot_w);
+ har_u->sot_w[i_w] = u3h_v2_noun_to_slot(_migv2_rewritten_noun(kev));
+
+ _migv2_rewrite_noun(kev);
+ }
+ else if ( _(u3h_v2_slot_is_node(sot_w)) ) {
+ u3h_v2_node* han_u = (u3h_v2_node*) u3h_v1_slot_to_node(sot_w);
+ u3h_v2_node* nod_u = u3v2to(u3h_v2_node, u3a_v2_rewritten(u3v2of(u3h_v2_node,han_u)));
+
+ har_u->sot_w[i_w] = u3h_v2_node_to_slot(nod_u);
+
+ _migv2h_rewrite_node(han_u, 25);
+ }
+ }
+}
+
+
+/* _migv2j_rewrite_compact(): rewrite jet state for compaction.
+ *
+ * NB: u3R_v2->jed.han_p *must* be cleared (currently via u3j_v2_reclaim above)
+ * since it contains hanks which are not nouns but have loom pointers.
+ * Alternately, rewrite the entries with u3h_v2_walk, using u3j_v2_mark as a
+ * template for how to walk. There's an untested attempt at this in git
+ * history at e8a307a.
+*/
+void
+_migv2j_rewrite_compact(void)
+{
+ _migv2h_rewrite(u3R_v2->jed.war_p);
+ _migv2h_rewrite(u3R_v2->jed.cod_p);
+ _migv2h_rewrite(u3R_v2->jed.han_p);
+ _migv2h_rewrite(u3R_v2->jed.bas_p);
+
+ _migv2h_rewrite(u3R_v2->jed.hot_p);
+ u3R_v2->jed.hot_p = u3a_v2_rewritten(u3R_v2->jed.hot_p);
+
+ u3R_v2->jed.war_p = u3a_v2_rewritten(u3R_v2->jed.war_p);
+ u3R_v2->jed.cod_p = u3a_v2_rewritten(u3R_v2->jed.cod_p);
+ u3R_v2->jed.han_p = u3a_v2_rewritten(u3R_v2->jed.han_p);
+ u3R_v2->jed.bas_p = u3a_v2_rewritten(u3R_v2->jed.bas_p);
+}
+
+/* _migv2n_rewrite_compact(): rewrite the bytecode cache for compaction.
+ *
+ * NB: u3R_v2->byc.har_p *must* be cleared (currently via u3n_v2_reclaim above),
+ * since it contains things that look like nouns but aren't.
+ * Specifically, it contains "cells" where the tail is a
+ * pointer to a u3a_v2_malloc'ed block that contains loom pointers.
+ *
+ * You should be able to walk this with u3h_v2_walk and rewrite the
+ * pointers, but you need to be careful to handle that u3a_v2_malloc
+ * pointers can't be turned into a box by stepping back two words. You
+ * must step back one word to get the padding, step then step back that
+ * many more words (plus one?).
+ */
+void
+_migv2n_rewrite_compact(void)
+{
+ _migv2h_rewrite(u3R_v2->byc.har_p);
+ u3R_v2->byc.har_p = u3a_v2_rewritten(u3R_v2->byc.har_p);
+}
+
+/* _migv2v_rewrite_compact(): rewrite arvo kernel for compaction.
+*/
+void
+_migv2v_rewrite_compact(void)
+{
+ u3v_v2_arvo* arv_u = &(u3H_v2->arv_u);
+
+ _migv2_rewrite_noun(arv_u->roc);
+ _migv2_rewrite_noun(arv_u->now);
+ _migv2_rewrite_noun(arv_u->yot);
+
+ arv_u->roc = _migv2_rewritten_noun(arv_u->roc);
+ arv_u->now = _migv2_rewritten_noun(arv_u->now);
+ arv_u->yot = _migv2_rewritten_noun(arv_u->yot);
+}
+
+/* _cm_pack_rewrite(): trace through arena, rewriting pointers.
+*/
+static void
+_cm_pack_rewrite(void)
+{
+ _migv2v_rewrite_compact();
+ _migv2j_rewrite_compact();
+ _migv2n_rewrite_compact();
+ _migv2a_rewrite_compact();
+}
+
+static void
+_migrate_reclaim(void)
+{
+ // XX update this and similar printfs
+ fprintf(stderr, "loom: migration reclaim\r\n");
+ u3m_v1_reclaim();
+}
+
+static void
+_migrate_seek(const u3a_v2_road *rod_u)
+{
+ /*
+ very much like u3a_v2_pack_seek with the following changes:
+ - there is no need to account for free space as |pack is performed before
+ the migration
+ - odd sized boxes will be padded by one word to achieve an even size
+ - rut will be moved from one word ahead of u3_Loom to two words ahead
+ */
+ c3_w * box_w = u3a_v2_into(rod_u->rut_p);
+ c3_w * end_w = u3a_v2_into(rod_u->hat_p);
+ u3_post new_p = (rod_u->rut_p + 1 + c3_wiseof(u3a_v2_box));
+ u3a_v2_box * box_u = (void *)box_w;
+
+ fprintf(stderr, "loom: migration seek\r\n");
+
+ for (; box_w < end_w
+ ; box_w += box_u->siz_w
+ , box_u = (void*)box_w)
+ {
+ if (!box_u->use_w)
+ continue;
+ u3_assert(box_u->siz_w);
+ u3_assert(box_u->use_w);
+ box_w[box_u->siz_w - 1] = new_p;
+ new_p = c3_align(new_p + box_u->siz_w, 2, C3_ALGHI);
+ }
+}
+
+static void
+_migrate_rewrite(void)
+{
+ fprintf(stderr, "loom: migration rewrite\r\n");
+
+ _cm_pack_rewrite();
+}
+
+static void
+_migrate_move(u3a_v2_road *rod_u)
+{
+ fprintf(stderr, "loom: migration move\r\n");
+
+ c3_z hiz_z = u3a_v2_heap(rod_u) * sizeof(c3_w);
+
+ /* calculate required shift distance to prevent write head overlapping read head */
+ c3_w off_w = 1; /* at least 1 word because u3R_v1->rut_p migrates from 1 to 2 */
+ for (u3a_v2_box *box_u = u3a_v2_into(rod_u->rut_p)
+ ; (void *)box_u < u3a_v2_into(rod_u->hat_p)
+ ; box_u = (void *)((c3_w *)box_u + box_u->siz_w))
+ off_w += box_u->siz_w & 1; /* odd-sized boxes are padded by one word */
+
+ /* shift */
+ memmove(u3a_v2_into(u3H_v2->rod_u.rut_p + off_w),
+ u3a_v2_into(u3H_v2->rod_u.rut_p),
+ hiz_z);
+ /* manually zero the former rut */
+ *(c3_w *)u3a_v2_into(rod_u->rut_p) = 0;
+
+ /* relocate boxes to DWORD-aligned addresses stored in trailing size word */
+ c3_w *box_w = u3a_v2_into(rod_u->rut_p + off_w);
+ c3_w *end_w = u3a_v2_into(rod_u->hat_p + off_w);
+ u3a_v2_box *old_u = (void *)box_w;
+ c3_w siz_w = old_u->siz_w;
+ u3p(c3_w) new_p = rod_u->rut_p + 1 + c3_wiseof(u3a_v2_box);
+ c3_w *new_w;
+
+ for (; box_w < end_w
+ ; box_w += siz_w
+ , old_u = (void *)box_w
+ , siz_w = old_u->siz_w) {
+ old_u->use_w &= 0x7fffffff;
+
+ if (!old_u->use_w)
+ continue;
+
+ new_w = (void *)u3a_v2_botox(u3a_v2_into(new_p));
+ u3_assert(box_w[siz_w - 1] == new_p);
+ u3_assert(new_w <= box_w);
+
+ c3_w i_w;
+ for (i_w = 0; i_w < siz_w - 1; i_w++)
+ new_w[i_w] = box_w[i_w];
+
+ if (siz_w & 1) {
+ new_w[i_w++] = 0; /* pad odd sized boxes */
+ new_w[i_w++] = siz_w + 1; /* restore trailing size word */
+ new_w[0] = siz_w + 1; /* and the leading size word */
+ }
+ else {
+ new_w[i_w++] = siz_w;
+ }
+
+ new_p += i_w;
+ }
+
+ /* restore proper heap state */
+ rod_u->rut_p = 2;
+ rod_u->hat_p = new_p - c3_wiseof(u3a_v2_box);
+
+ /* like |pack, clear the free lists and cell allocator */
+ for (c3_w i_w = 0; i_w < u3a_v2_fbox_no; i_w++)
+ u3R_v1->all.fre_p[i_w] = 0;
+
+ u3R_v1->all.fre_w = 0;
+ u3R_v1->all.cel_p = 0;
+}
+
+void
+u3_migrate_v2(c3_d eve_d)
+{
+ u3_v1_load(u3C.wor_i);
+
+ if ( eve_d != u3H_v1->arv_u.eve_d ) {
+ fprintf(stderr, "loom: migrate (v2) stale snapshot: have %"
+ PRIu64 ", need %" PRIu64 "\r\n",
+ u3H_v1->arv_u.eve_d, eve_d);
+ abort();
+ }
+
+ fprintf(stderr, "loom: pointer compression migration running...\r\n");
+
+ /* perform the migration in a pattern similar to |pack */
+ _migrate_reclaim();
+ _migrate_seek(&u3H_v1->rod_u);
+ _migrate_rewrite();
+ _migrate_move(&u3H_v1->rod_u);
+
+ /* finally update the version and commit to disk */
+ u3H_v1->ver_w = U3V_VER2;
+
+ fprintf(stderr, "loom: pointer compression migration done\r\n");
+}
diff --git a/vere/pkg/past/migrate_v3.c b/vere/pkg/past/migrate_v3.c
new file mode 100644
index 0000000..9c79413
--- /dev/null
+++ b/vere/pkg/past/migrate_v3.c
@@ -0,0 +1,100 @@
+#include "v2.h"
+#include "v3.h"
+#include "options.h"
+
+/* u3_migrate_v3: perform loom migration if necessary.
+*/
+void
+u3_migrate_v3(c3_d eve_d)
+{
+ u3_v2_load(u3C.wor_i);
+
+ if ( eve_d != u3H_v2->arv_u.eve_d ) {
+ fprintf(stderr, "loom: migrate (v3) stale snapshot: have %"
+ PRIu64 ", need %" PRIu64 "\r\n",
+ u3H_v2->arv_u.eve_d, eve_d);
+ abort();
+ }
+
+ fprintf(stderr, "loom: memoization migration running...\r\n");
+
+ // set globals (required for aliased functions)
+ u3R_v3 = (u3a_v3_road*)u3R_v2;
+ u3H_v3 = (u3v_v3_home*)u3H_v2;
+ u3a_v2_ream();
+
+ // free bytecode caches in old road
+ u3j_v2_reclaim();
+ u3n_v2_reclaim();
+
+ // old road
+ u3v_v2_home* hum_u = u3H_v2;
+ u3a_v2_road* rud_u = &hum_u->rod_u;
+
+ // new home, new road
+ u3v_v3_home hom_u = {0};
+ u3a_v3_road rod_u = {0};
+
+ // copy members, one-by-one, from old road to new road
+ rod_u.par_p = rud_u->par_p;
+ rod_u.kid_p = rud_u->kid_p;
+ rod_u.nex_p = rud_u->nex_p;
+
+ rod_u.cap_p = rud_u->cap_p;
+ rod_u.hat_p = rud_u->hat_p;
+ rod_u.mat_p = rud_u->mat_p;
+ rod_u.rut_p = rud_u->rut_p;
+ rod_u.ear_p = rud_u->ear_p;
+
+ // no need to zero-out fut_w
+ // no need to do anything with esc
+
+ rod_u.how.fag_w = rud_u->how.fag_w;
+
+ memcpy(rod_u.all.fre_p, rud_u->all.fre_p, sizeof(rud_u->all.fre_p));
+ rod_u.all.cel_p = rud_u->all.cel_p;
+ rod_u.all.fre_w = rud_u->all.fre_w;
+ rod_u.all.max_w = rud_u->all.max_w;
+
+ rod_u.jed.hot_p = rud_u->jed.hot_p;
+ rod_u.jed.war_p = rud_u->jed.war_p;
+ rod_u.jed.cod_p = rud_u->jed.cod_p;
+ rod_u.jed.han_p = rud_u->jed.han_p;
+ rod_u.jed.bas_p = rud_u->jed.bas_p;
+
+ rod_u.byc.har_p = rud_u->byc.har_p;
+
+ rod_u.ski.gul = rud_u->ski.gul;
+
+ rod_u.bug.tax = rud_u->bug.tax;
+ rod_u.bug.mer = rud_u->bug.mer;
+
+ rod_u.pro.nox_d = rud_u->pro.nox_d;
+ rod_u.pro.cel_d = rud_u->pro.cel_d;
+ rod_u.pro.don = rud_u->pro.don;
+ rod_u.pro.trace = rud_u->pro.trace;
+ rod_u.pro.day = rud_u->pro.day;
+
+ rod_u.cax.har_p = rud_u->cax.har_p;
+
+ // prepare the new home, update the version
+ hom_u.arv_u = hum_u->arv_u;
+ hom_u.rod_u = rod_u;
+
+ // place the new home over the old one
+ c3_w *mem_w = u3_Loom_v3 + u3a_v3_walign;
+ c3_w len_w = u3C.wor_i - u3a_v3_walign;
+ c3_w siz_w = c3_wiseof(u3v_v3_home);
+ c3_w *mat_w = c3_align(mem_w + len_w - siz_w, u3a_v3_balign, C3_ALGLO);
+ memcpy(mat_w, &hom_u, sizeof(u3v_v3_home));
+
+ // set globals
+ u3H_v3 = (void*)mat_w;
+ u3R_v3 = &u3H_v3->rod_u;
+ u3H_v3->ver_w = U3V_VER3;
+
+ // initialize persistent cache
+ u3R_v3->cax.per_p = u3h_v3_new_cache(u3C.per_w);
+
+ fprintf(stderr, "loom: memoization migration done\r\n");
+}
diff --git a/vere/pkg/past/migrate_v4.c b/vere/pkg/past/migrate_v4.c
new file mode 100644
index 0000000..3ca4176
--- /dev/null
+++ b/vere/pkg/past/migrate_v4.c
@@ -0,0 +1,29 @@
+#include "v3.h"
+#include "v4.h"
+#include "options.h"
+
+# define u3m_v3_reclaim u3m_v4_reclaim
+
+/* u3_migrate_v4: perform loom migration if necessary.
+*/
+void
+u3_migrate_v4(c3_d eve_d)
+{
+ u3_v3_load(u3C.wor_i);
+
+ if ( eve_d != u3H_v3->arv_u.eve_d ) {
+ fprintf(stderr, "loom: migrate (v4) stale snapshot: have %"
+ PRIu64 ", need %" PRIu64 "\r\n",
+ u3H_v3->arv_u.eve_d, eve_d);
+ abort();
+ }
+
+ fprintf(stderr, "loom: bytecode alignment migration running...\r\n");
+
+ u3m_v3_reclaim();
+
+ u3H_v4 = u3H_v3;
+ u3H_v4->ver_w = U3V_VER4;
+
+ fprintf(stderr, "loom: bytecode alignment migration done\r\n");
+}
diff --git a/vere/pkg/past/migrate_v5.c b/vere/pkg/past/migrate_v5.c
new file mode 100644
index 0000000..8ceb5db
--- /dev/null
+++ b/vere/pkg/past/migrate_v5.c
@@ -0,0 +1,167 @@
+#include "v4.h"
+#include "options.h"
+
+static c3_d
+_v4_hash(u3_noun foo)
+{
+ return foo * 11400714819323198485ULL;
+}
+
+static c3_i
+_v4_cmp(u3_noun a, u3_noun b)
+{
+ return a == b;
+}
+
+#define NAME _v4_to_v5
+#define KEY_TY u3_noun
+#define VAL_TY u3_noun
+#define HASH_FN _v4_hash
+#define CMPR_FN _v4_cmp
+#include "verstable.h"
+
+typedef struct {
+ u3_weak hed;
+ u3_v4_noun cel;
+} _copy_frame;
+
+typedef struct {
+ _v4_to_v5 map_u;
+ c3_w len_w;
+ c3_w siz_w;
+ _copy_frame *tac;
+ u3_post ham_p;
+} _copy_ctx;
+
+static u3_atom
+_copy_atom(u3_atom old)
+{
+ u3a_v4_atom *old_u = u3a_v4_to_ptr(old);
+ c3_w *nov_w = u3a_v5_walloc(old_u->len_w + c3_wiseof(u3a_v5_atom));
+ u3a_v5_atom *vat_u = (void *)nov_w;
+
+ vat_u->use_w = 1;
+ vat_u->mug_w = old_u->mug_w;
+ vat_u->len_w = old_u->len_w;
+
+ memcpy(vat_u->buf_w, old_u->buf_w, old_u->len_w << 2);
+
+ return u3a_v5_to_pug(u3a_v5_outa(nov_w));
+}
+
+static u3_noun
+_copy_v4_next(_copy_ctx *cop_u, u3_noun old)
+{
+ _v4_to_v5_itr vit_u;
+ _copy_frame *top_u;
+
+ while ( 1 ) {
+ if ( c3y == u3a_v4_is_cat(old) ) return old;
+
+ vit_u = vt_get(&(cop_u->map_u), old);
+
+ if ( !vt_is_end(vit_u) ) return u3a_v5_gain(vit_u.data->val);
+
+ if ( c3n == u3a_v4_is_cell(old) ) {
+ u3_atom new = _copy_atom(old);
+ vit_u = vt_insert( &(cop_u->map_u), old, new );
+ u3_assert( !vt_is_end(vit_u) );
+ return new;
+ }
+
+ if ( cop_u->len_w == cop_u->siz_w ) {
+ cop_u->siz_w += c3_min(cop_u->siz_w, 1024);
+ cop_u->tac = c3_realloc(cop_u->tac, sizeof(*cop_u->tac) * cop_u->siz_w);
+ }
+
+ top_u = &(cop_u->tac[cop_u->len_w++]);
+ top_u->hed = u3_v4_none;
+ top_u->cel = old;
+ old = u3a_v4_head(old);
+ continue;
+ }
+}
+
+static u3_noun
+_copy_v4_noun(_copy_ctx *cop_u, u3_noun old)
+{
+ _v4_to_v5_itr vit_u;
+ _copy_frame *top_u;
+ u3a_v4_cell *cel_u;
+ u3_noun new;
+
+ cop_u->len_w = 0;
+
+ new = _copy_v4_next(cop_u, old);
+
+ while ( cop_u->len_w ) {
+ top_u = &(cop_u->tac[cop_u->len_w - 1]);
+
+ if ( u3_none == top_u->hed ) {
+ top_u->hed = new;
+ new = _copy_v4_next(cop_u, u3a_v4_tail(top_u->cel));
+ }
+ else {
+ new = u3i_v5_cell(top_u->hed, new);
+ vit_u = vt_insert( &(cop_u->map_u), top_u->cel, new );
+ u3_assert( !vt_is_end(vit_u) );
+ cop_u->len_w--;
+ }
+ }
+
+ return new;
+}
+
+static void
+_copy_v4_hamt(u3_noun kev, void* ptr_v)
+{
+ _copy_ctx *cop_u = ptr_v;
+ u3_noun key = _copy_v4_noun(cop_u, u3a_v4_head(kev));
+ u3_noun val = _copy_v4_noun(cop_u, u3a_v4_tail(kev));
+ u3h_v5_put(cop_u->ham_p, key, val);
+ u3a_v5_lose(key);
+}
+
+void
+u3_migrate_v5(c3_d eve_d)
+{
+ _copy_ctx cop_u = {0};
+
+ // XX assumes u3m_init() and u3m_pave(c3y) have already been called
+
+ u3_v4_load(u3C.wor_i);
+
+ if ( eve_d != u3A_v4->eve_d ) {
+ fprintf(stderr, "loom: migrate (v5) stale snapshot: have %"
+ PRIu64 ", need %" PRIu64 "\r\n",
+ u3A_v4->eve_d, eve_d);
+ abort();
+ }
+
+ fprintf(stderr, "loom: allocator migration running...\r\n");
+
+ cop_u.siz_w = 32;
+ cop_u.tac = c3_malloc(sizeof(*cop_u.tac) * cop_u.siz_w);
+ vt_init(&(cop_u.map_u));
+
+ // XX install cel_p temporarily?
+
+ u3A_v5->eve_d = u3A_v4->eve_d;
+ u3A_v5->roc = _copy_v4_noun(&cop_u, u3A_v4->roc);
+
+ cop_u.ham_p = u3R_v5->jed.cod_p;
+ u3h_v4_walk_with(u3R_v4->jed.cod_p, _copy_v4_hamt, &cop_u);
+ cop_u.ham_p = u3R_v5->cax.per_p;
+ u3h_v4_walk_with(u3R_v4->cax.per_p, _copy_v4_hamt, &cop_u);
+
+ // NB: pave does *not* allocate hot_p
+ //
+ u3j_v5_boot(c3y);
+ u3j_v5_ream();
+
+ vt_cleanup(&cop_u.map_u);
+
+ c3_free(cop_u.tac);
+
+ fprintf(stderr, "loom: allocator migration done\r\n");
+}
diff --git a/vere/pkg/past/v1.c b/vere/pkg/past/v1.c
new file mode 100644
index 0000000..e5349ad
--- /dev/null
+++ b/vere/pkg/past/v1.c
@@ -0,0 +1,636 @@
+#include "v1.h"
+
+ /*** allocate.h
+ ***/
+
+# define u3a_v1_botox u3a_v2_botox
+# define u3a_v1_box u3a_v2_box
+# define u3a_v1_cell u3a_v2_cell
+# define u3a_v1_fbox u3a_v2_fbox
+# define u3a_v1_fbox_no u3a_v2_fbox_no
+# define u3a_v1_into u3a_v2_into
+# define u3a_v1_is_cat u3a_v2_is_cat
+# define u3a_v1_is_north u3a_v2_is_north
+# define u3a_v1_is_pom u3a_v2_is_pom
+# define u3a_v1_minimum u3a_v2_minimum
+# define u3a_v1_outa u3a_v2_outa
+
+# define u3v1to u3v2to
+# define u3v1of u3v2of
+
+ /*** hashtable.h
+ ***/
+
+# define u3h_v1_buck u3h_v2_buck
+# define u3h_v1_node u3h_v2_node
+# define u3h_v1_root u3h_v2_root
+# define u3h_v1_slot_is_node u3h_v2_slot_is_node
+# define u3h_v1_slot_is_noun u3h_v2_slot_is_noun
+# define u3h_v1_slot_to_noun u3h_v2_slot_to_noun
+
+ /* u3h_v1_free(): free hashtable.
+ */
+ void
+ u3h_v1_free_nodes(u3p(u3h_v1_root) har_p);
+
+
+ /*** jets.h
+ ***/
+# define u3j_v1_fink u3j_v2_fink
+# define u3j_v1_fist u3j_v2_fist
+# define u3j_v1_hank u3j_v2_hank
+# define u3j_v1_rite u3j_v2_rite
+# define u3j_v1_site u3j_v2_site
+
+
+ /*** nock.h
+ ***/
+# define u3n_v1_memo u3n_v2_memo
+# define u3n_v1_prog u3n_v2_prog
+
+
+ /*** vortex.h
+ ***/
+# define u3A_v1 u3A_v2
+
+
+/*** allocate.c
+***/
+
+/* _box_v1_slot(): select the right free list to search for a block.
+*/
+static c3_w
+_box_v1_slot(c3_w siz_w)
+{
+ if ( siz_w < u3a_v1_minimum ) {
+ return 0;
+ }
+ else {
+ c3_w i_w = 1;
+
+ while ( 1 ) {
+ if ( i_w == u3a_v1_fbox_no ) {
+ return (i_w - 1);
+ }
+ if ( siz_w < 16 ) {
+ return i_w;
+ }
+ siz_w = (siz_w + 1) >> 1;
+ i_w += 1;
+ }
+ }
+}
+
+/* _box_v1_make(): construct a box.
+*/
+static u3a_v1_box*
+_box_v1_make(void* box_v, c3_w siz_w, c3_w use_w)
+{
+ u3a_v1_box* box_u = box_v;
+ c3_w* box_w = box_v;
+
+ u3_assert(siz_w >= u3a_v1_minimum);
+
+ box_w[0] = siz_w;
+ box_w[siz_w - 1] = siz_w;
+ box_u->use_w = use_w;
+
+ return box_u;
+}
+
+/* _box_v1_attach(): attach a box to the free list.
+*/
+static void
+_box_v1_attach(u3a_v1_box* box_u)
+{
+ u3_assert(box_u->siz_w >= (1 + c3_wiseof(u3a_v1_fbox)));
+ u3_assert(0 != u3v1of(u3a_v1_fbox, box_u));
+
+ {
+ c3_w sel_w = _box_v1_slot(box_u->siz_w);
+ u3p(u3a_v1_fbox) fre_p = u3v1of(u3a_v1_fbox, box_u);
+ u3p(u3a_v1_fbox)* pfr_p = &u3R_v1->all.fre_p[sel_w];
+ u3p(u3a_v1_fbox) nex_p = *pfr_p;
+
+ u3v1to(u3a_v1_fbox, fre_p)->pre_p = 0;
+ u3v1to(u3a_v1_fbox, fre_p)->nex_p = nex_p;
+ if ( u3v1to(u3a_v1_fbox, fre_p)->nex_p ) {
+ u3v1to(u3a_v1_fbox, u3v1to(u3a_v1_fbox, fre_p)->nex_p)->pre_p = fre_p;
+ }
+ (*pfr_p) = fre_p;
+ }
+}
+
+/* _box_v1_detach(): detach a box from the free list.
+*/
+static void
+_box_v1_detach(u3a_v1_box* box_u)
+{
+ u3p(u3a_v1_fbox) fre_p = u3v1of(u3a_v1_fbox, box_u);
+ u3p(u3a_v1_fbox) pre_p = u3v1to(u3a_v1_fbox, fre_p)->pre_p;
+ u3p(u3a_v1_fbox) nex_p = u3v1to(u3a_v1_fbox, fre_p)->nex_p;
+
+
+ if ( nex_p ) {
+ if ( u3v1to(u3a_v1_fbox, nex_p)->pre_p != fre_p ) {
+ u3_assert(!"loom: corrupt");
+ }
+ u3v1to(u3a_v1_fbox, nex_p)->pre_p = pre_p;
+ }
+ if ( pre_p ) {
+ if( u3v1to(u3a_v1_fbox, pre_p)->nex_p != fre_p ) {
+ u3_assert(!"loom: corrupt");
+ }
+ u3v1to(u3a_v1_fbox, pre_p)->nex_p = nex_p;
+ }
+ else {
+ c3_w sel_w = _box_v1_slot(box_u->siz_w);
+
+ if ( fre_p != u3R_v1->all.fre_p[sel_w] ) {
+ u3_assert(!"loom: corrupt");
+ }
+ u3R_v1->all.fre_p[sel_w] = nex_p;
+ }
+}
+
+/* _box_v1_free(): free and coalesce.
+*/
+static void
+_box_v1_free(u3a_v1_box* box_u)
+{
+ c3_w* box_w = (c3_w *)(void *)box_u;
+
+ u3_assert(box_u->use_w != 0);
+ box_u->use_w -= 1;
+ if ( 0 != box_u->use_w ) {
+ return;
+ }
+
+ // we're always migrating a north road, so no need to check for it
+ {
+ /* Try to coalesce with the block below.
+ */
+ if ( box_w != u3a_v1_into(u3R_v1->rut_p) ) {
+ c3_w laz_w = *(box_w - 1);
+ u3a_v1_box* pox_u = (u3a_v1_box*)(void *)(box_w - laz_w);
+
+ if ( 0 == pox_u->use_w ) {
+ _box_v1_detach(pox_u);
+ _box_v1_make(pox_u, (laz_w + box_u->siz_w), 0);
+
+ box_u = pox_u;
+ box_w = (c3_w*)(void *)pox_u;
+ }
+ }
+
+ /* Try to coalesce with the block above, or the wilderness.
+ */
+ if ( (box_w + box_u->siz_w) == u3a_v1_into(u3R_v1->hat_p) ) {
+ u3R_v1->hat_p = u3a_v1_outa(box_w);
+ }
+ else {
+ u3a_v1_box* nox_u = (u3a_v1_box*)(void *)(box_w + box_u->siz_w);
+
+ if ( 0 == nox_u->use_w ) {
+ _box_v1_detach(nox_u);
+ _box_v1_make(box_u, (box_u->siz_w + nox_u->siz_w), 0);
+ }
+ _box_v1_attach(box_u);
+ }
+ }
+}
+
+/* u3a_v1_wfree(): free storage.
+*/
+void
+u3a_v1_wfree(void* tox_v)
+{
+ _box_v1_free(u3a_v1_botox(tox_v));
+}
+
+/* u3a_v1_free(): free for aligned malloc.
+*/
+void
+u3a_v1_free(void* tox_v)
+{
+ if (NULL == tox_v)
+ return;
+
+ c3_w* tox_w = tox_v;
+ c3_w pad_w = tox_w[-1];
+ c3_w* org_w = tox_w - (pad_w + 1);
+
+ u3a_v1_wfree(org_w);
+}
+
+/* u3a_v1_reclaim(): clear ad-hoc persistent caches to reclaim memory.
+*/
+void
+u3a_v1_reclaim(void)
+{
+ // clear the memoization cache
+ //
+ u3h_v1_free_nodes(u3R_v1->cax.har_p);
+}
+
+/* _me_v1_lose_north(): lose on a north road.
+*/
+static void
+_me_v1_lose_north(u3_noun dog)
+{
+top:
+ {
+ c3_w* dog_w = u3a_v1_to_ptr(dog);
+ u3a_v1_box* box_u = u3a_v1_botox(dog_w);
+
+ if ( box_u->use_w > 1 ) {
+ box_u->use_w -= 1;
+ }
+ else {
+ if ( 0 == box_u->use_w ) {
+ fprintf(stderr, "bail: foul\r\n");
+ abort();
+ }
+ else {
+ if ( _(u3a_v1_is_pom(dog)) ) {
+ u3a_v1_cell* dog_u = (void *)dog_w;
+ u3_noun h_dog = dog_u->hed;
+ u3_noun t_dog = dog_u->tel;
+
+ if ( !_(u3a_v1_is_cat(h_dog)) ) {
+ _me_v1_lose_north(h_dog);
+ }
+ u3a_v1_wfree(dog_w);
+ if ( !_(u3a_v1_is_cat(t_dog)) ) {
+ dog = t_dog;
+ goto top;
+ }
+ }
+ else {
+ u3a_v1_wfree(dog_w);
+ }
+ }
+ }
+ }
+}
+
+/* u3a_v1_lose(): lose a reference count.
+*/
+void
+u3a_v1_lose(u3_noun som)
+{
+ if ( !_(u3a_v1_is_cat(som)) ) {
+ _me_v1_lose_north(som);
+ }
+}
+
+
+/*** hashtable.c
+***/
+
+/* _ch_v1_free_buck(): free bucket
+*/
+static void
+_ch_v1_free_buck(u3h_v1_buck* hab_u)
+{
+ c3_w i_w;
+
+ for ( i_w = 0; i_w < hab_u->len_w; i_w++ ) {
+ u3a_v1_lose(u3h_v1_slot_to_noun(hab_u->sot_w[i_w]));
+ }
+ u3a_v1_wfree(hab_u);
+}
+
+/* _ch_v1_free_node(): free node.
+*/
+static void
+_ch_v1_free_node(u3h_v1_node* han_u, c3_w lef_w)
+{
+ c3_w len_w = c3_pc_w(han_u->map_w);
+ c3_w i_w;
+
+ lef_w -= 5;
+
+ for ( i_w = 0; i_w < len_w; i_w++ ) {
+ c3_w sot_w = han_u->sot_w[i_w];
+
+ if ( _(u3h_v1_slot_is_noun(sot_w)) ) {
+ u3a_v1_lose(u3h_v1_slot_to_noun(sot_w));
+ }
+ else {
+ void* hav_v = u3h_v1_slot_to_node(sot_w);
+
+ if ( 0 == lef_w ) {
+ _ch_v1_free_buck(hav_v);
+ } else {
+ _ch_v1_free_node(hav_v, lef_w);
+ }
+ }
+ }
+ u3a_v1_wfree(han_u);
+}
+
+/* u3h_v1_free_nodes(): free hashtable nodes.
+*/
+void
+u3h_v1_free_nodes(u3p(u3h_v1_root) har_p)
+{
+ u3h_v1_root* har_u = u3v1to(u3h_v1_root, har_p);
+ c3_w i_w;
+
+ for ( i_w = 0; i_w < 64; i_w++ ) {
+ c3_w sot_w = har_u->sot_w[i_w];
+
+ if ( _(u3h_v1_slot_is_noun(sot_w)) ) {
+ u3a_v1_lose(u3h_v1_slot_to_noun(sot_w));
+ }
+ else if ( _(u3h_v1_slot_is_node(sot_w)) ) {
+ u3h_v1_node* han_u = (u3h_v1_node*) u3h_v1_slot_to_node(sot_w);
+
+ _ch_v1_free_node(han_u, 25);
+ }
+ har_u->sot_w[i_w] = 0;
+ }
+ har_u->use_w = 0;
+ har_u->arm_u.mug_w = 0;
+ har_u->arm_u.inx_w = 0;
+}
+
+/* _ch_v1_walk_buck(): walk bucket for gc.
+*/
+static void
+_ch_v1_walk_buck(u3h_v1_buck* hab_u, void (*fun_f)(u3_noun, void*), void* wit)
+{
+ c3_w i_w;
+
+ for ( i_w = 0; i_w < hab_u->len_w; i_w++ ) {
+ fun_f(u3h_v1_slot_to_noun(hab_u->sot_w[i_w]), wit);
+ }
+}
+
+/* _ch_v1_walk_node(): walk node for gc.
+*/
+static void
+_ch_v1_walk_node(u3h_v1_node* han_u, c3_w lef_w, void (*fun_f)(u3_noun, void*), void* wit)
+{
+ c3_w len_w = c3_pc_w(han_u->map_w);
+ c3_w i_w;
+
+ lef_w -= 5;
+
+ for ( i_w = 0; i_w < len_w; i_w++ ) {
+ c3_w sot_w = han_u->sot_w[i_w];
+
+ if ( _(u3h_v1_slot_is_noun(sot_w)) ) {
+ u3_noun kev = u3h_v1_slot_to_noun(sot_w);
+
+ fun_f(kev, wit);
+ }
+ else {
+ void* hav_v = u3h_v1_slot_to_node(sot_w);
+
+ if ( 0 == lef_w ) {
+ _ch_v1_walk_buck(hav_v, fun_f, wit);
+ } else {
+ _ch_v1_walk_node(hav_v, lef_w, fun_f, wit);
+ }
+ }
+ }
+}
+
+/* u3h_v1_walk_with(): traverse hashtable with key, value fn and data
+ * argument; RETAINS.
+*/
+void
+u3h_v1_walk_with(u3p(u3h_v1_root) har_p,
+ void (*fun_f)(u3_noun, void*),
+ void* wit)
+{
+ u3h_v1_root* har_u = u3v1to(u3h_v1_root, har_p);
+ c3_w i_w;
+
+ for ( i_w = 0; i_w < 64; i_w++ ) {
+ c3_w sot_w = har_u->sot_w[i_w];
+
+ if ( _(u3h_v1_slot_is_noun(sot_w)) ) {
+ u3_noun kev = u3h_v1_slot_to_noun(sot_w);
+
+ fun_f(kev, wit);
+ }
+ else if ( _(u3h_v1_slot_is_node(sot_w)) ) {
+ u3h_v1_node* han_u = (u3h_v1_node*) u3h_v1_slot_to_node(sot_w);
+
+ _ch_v1_walk_node(han_u, 25, fun_f, wit);
+ }
+ }
+}
+
+/* _ch_v1_walk_plain(): use plain u3_noun fun_f for each node
+ */
+static void
+_ch_v1_walk_plain(u3_noun kev, void* wit)
+{
+ void (*fun_f)(u3_noun) = (void (*)(u3_noun))wit;
+ fun_f(kev);
+}
+
+/* u3h_v1_walk(): u3h_v1_walk_with, but with no data argument
+ */
+void
+u3h_v1_walk(u3p(u3h_v1_root) har_p, void (*fun_f)(u3_noun))
+{
+ u3h_v1_walk_with(har_p, _ch_v1_walk_plain, (void *)fun_f);
+}
+
+
+/*** jets.c
+***/
+
+/* _cj_fink_free(): lose and free everything in a u3j_v1_fink.
+*/
+static void
+_cj_v1_fink_free(u3p(u3j_v1_fink) fin_p)
+{
+ c3_w i_w;
+ u3j_v1_fink* fin_u = u3v1to(u3j_v1_fink, fin_p);
+ u3a_v1_lose(fin_u->sat);
+ for ( i_w = 0; i_w < fin_u->len_w; ++i_w ) {
+ u3j_v1_fist* fis_u = &(fin_u->fis_u[i_w]);
+ u3a_v1_lose(fis_u->bat);
+ u3a_v1_lose(fis_u->pax);
+ }
+ u3a_v1_wfree(fin_u);
+}
+
+/* u3j_v1_rite_lose(): lose references of u3j_v1_rite (but do not free).
+ */
+void
+u3j_v1_rite_lose(u3j_v1_rite* rit_u)
+{
+ if ( (c3y == rit_u->own_o) && u3_none != rit_u->clu ) {
+ u3a_v1_lose(rit_u->clu);
+ _cj_v1_fink_free(rit_u->fin_p);
+ }
+}
+
+
+/* u3j_v1_site_lose(): lose references of u3j_v1_site (but do not free).
+ */
+void
+u3j_v1_site_lose(u3j_v1_site* sit_u)
+{
+ u3a_v1_lose(sit_u->axe);
+ if ( u3_none != sit_u->bat ) {
+ u3a_v1_lose(sit_u->bat);
+ }
+ if ( u3_none != sit_u->bas ) {
+ u3a_v1_lose(sit_u->bas);
+ }
+ if ( u3_none != sit_u->loc ) {
+ u3a_v1_lose(sit_u->loc);
+ u3a_v1_lose(sit_u->lab);
+ if ( c3y == sit_u->fon_o ) {
+ if ( sit_u->fin_p ) {
+ _cj_v1_fink_free(sit_u->fin_p);
+ }
+ }
+ }
+}
+
+/* _cj_v1_free_hank(): free an entry from the hank cache.
+*/
+static void
+_cj_v1_free_hank(u3_noun kev)
+{
+ u3a_v1_cell* cel_u = (u3a_v1_cell*) u3a_v1_to_ptr(kev);
+ u3j_v1_hank* han_u = u3v1to(u3j_v1_hank, cel_u->tel);
+ if ( u3_none != han_u->hax ) {
+ u3a_v1_lose(han_u->hax);
+ u3j_v1_site_lose(&(han_u->sit_u));
+ }
+ u3a_v1_wfree(han_u);
+}
+
+/* u3j_v1_reclaim(): clear ad-hoc persistent caches to reclaim memory.
+*/
+void
+u3j_v1_reclaim(void)
+{
+ // clear the jet hank cache
+ //
+ u3h_v1_walk(u3R_v1->jed.han_p, _cj_v1_free_hank);
+ u3h_v1_free_nodes(u3R_v1->jed.han_p);
+}
+
+
+/*** nock.c
+***/
+
+/* _cn_v1_prog_free(): free memory retained by program pog_u
+*/
+static void
+_cn_v1_prog_free(u3n_v1_prog* pog_u)
+{
+ // fix up pointers for loom portability
+ pog_u->byc_u.ops_y = (c3_y*) ((void*) pog_u) + sizeof(u3n_v1_prog);
+ pog_u->lit_u.non = (u3_noun*) (pog_u->byc_u.ops_y + pog_u->byc_u.len_w);
+ pog_u->mem_u.sot_u = (u3n_v1_memo*) (pog_u->lit_u.non + pog_u->lit_u.len_w);
+ pog_u->cal_u.sit_u = (u3j_v1_site*) (pog_u->mem_u.sot_u + pog_u->mem_u.len_w);
+ pog_u->reg_u.rit_u = (u3j_v1_rite*) (pog_u->cal_u.sit_u + pog_u->cal_u.len_w);
+
+ c3_w dex_w;
+ for (dex_w = 0; dex_w < pog_u->lit_u.len_w; ++dex_w) {
+ u3a_v1_lose(pog_u->lit_u.non[dex_w]);
+ }
+ for (dex_w = 0; dex_w < pog_u->mem_u.len_w; ++dex_w) {
+ u3a_v1_lose(pog_u->mem_u.sot_u[dex_w].key);
+ }
+ for (dex_w = 0; dex_w < pog_u->cal_u.len_w; ++dex_w) {
+ u3j_v1_site_lose(&(pog_u->cal_u.sit_u[dex_w]));
+ }
+ for (dex_w = 0; dex_w < pog_u->reg_u.len_w; ++dex_w) {
+ u3j_v1_rite_lose(&(pog_u->reg_u.rit_u[dex_w]));
+ }
+ u3a_v1_free(pog_u);
+}
+
+/* _n_v1_feb(): u3h_v1_walk helper for u3n_v1_free
+ */
+static void
+_n_v1_feb(u3_noun kev)
+{
+ u3a_v1_cell *cel_u = (u3a_v1_cell*) u3a_v1_to_ptr(kev);
+ _cn_v1_prog_free(u3v1to(u3n_v1_prog, cel_u->tel));
+}
+
+/* u3n_v1_free(): free bytecode cache
+ */
+void
+u3n_v1_free(void)
+{
+ u3p(u3h_v1_root) har_p = u3R_v1->byc.har_p;
+ u3h_v1_walk(har_p, _n_v1_feb);
+ u3h_v1_free_nodes(har_p);
+}
+
+/* u3n_v1_reclaim(): clear ad-hoc persistent caches to reclaim memory.
+*/
+void
+u3n_v1_reclaim(void)
+{
+ // clear the bytecode cache
+ //
+ // We can't just u3h_v1_free() -- the value is a post to a u3n_v1_prog.
+ // Note that the hank cache *must* also be freed (in u3j_v1_reclaim())
+ //
+ u3n_v1_free();
+}
+
+
+/*** vortex.c
+***/
+
+/* u3v_v1_reclaim(): clear ad-hoc persistent caches to reclaim memory.
+*/
+void
+u3v_v1_reclaim(void)
+{
+ // clear the u3v_wish cache
+ //
+ u3a_v1_lose(u3A_v1->yot);
+ u3A_v1->yot = u3_nul;
+}
+
+/*** init
+***/
+
+void
+u3_v1_load(c3_z wor_i)
+{
+ c3_w len_w = wor_i - 1;
+ c3_w ver_w = *(u3_Loom_v1 + len_w);
+
+ u3_assert( U3V_VER1 == ver_w );
+
+ c3_w* mem_w = u3_Loom_v1 + 1;
+ c3_w siz_w = c3_wiseof(u3v_v1_home);
+ c3_w* mat_w = (mem_w + len_w) - siz_w;
+
+ u3H_v1 = (void *)mat_w;
+ u3R_v1 = &u3H_v1->rod_u;
+
+ u3R_v1->cap_p = u3R_v1->mat_p = u3a_v1_outa(u3H_v1);
+}
+
+
+/*** manage.c
+***/
+
+void
+u3m_v1_reclaim(void)
+{
+ u3v_v1_reclaim();
+ u3j_v1_reclaim();
+ u3n_v1_reclaim();
+ u3a_v1_reclaim();
+}
diff --git a/vere/pkg/past/v1.h b/vere/pkg/past/v1.h
new file mode 100644
index 0000000..cf811d4
--- /dev/null
+++ b/vere/pkg/past/v1.h
@@ -0,0 +1,42 @@
+#ifndef U3_V1_H
+#define U3_V1_H
+
+#include "v2.h"
+
+ /*** allocate.h
+ ***/
+# define u3_Loom_v1 u3_Loom_v2
+
+# define u3a_v1_to_off(som) ((som) & 0x3fffffff)
+# define u3a_v1_to_ptr(som) (u3a_v1_into(u3a_v1_to_off(som)))
+
+# define u3a_v1_into u3a_v2_into
+# define u3a_v1_outa u3a_v2_outa
+# define u3R_v1 u3R_v2
+
+
+ /*** hashtable.h
+ ***/
+# define u3h_v1_slot_to_node(sot) (u3a_v1_into((sot) & 0x3fffffff))
+
+
+ /*** manage.h
+ ***/
+ /* u3m_v1_reclaim: clear persistent caches to reclaim memory
+ */
+ void
+ u3m_v1_reclaim(void);
+
+
+ /*** vortex.h
+ ***/
+# define u3H_v1 u3H_v2
+# define u3v_v1_home u3v_v2_home
+
+
+ /*** init
+ ***/
+ void
+ u3_v1_load(c3_z wor_i);
+
+#endif /* U3_V1_H */
diff --git a/vere/pkg/past/v2.c b/vere/pkg/past/v2.c
new file mode 100644
index 0000000..46ef8ea
--- /dev/null
+++ b/vere/pkg/past/v2.c
@@ -0,0 +1,116 @@
+#include "v2.h"
+
+# define u3h_v2_free u3h_v3_free
+# define u3h_v2_walk u3h_v3_walk
+# define u3h_v2_new u3h_v3_new
+
+u3a_v2_road* u3a_v2_Road;
+u3v_v2_home* u3v_v2_Home;
+
+
+/*** jets.c
+***/
+
+/* u3j_v2_reclaim(): clear ad-hoc persistent caches to reclaim memory.
+*/
+void
+u3j_v2_reclaim(void)
+{
+ // set globals (required for aliased functions)
+ // XX confirm
+ u3H_v3 = (u3v_v3_home*) u3H_v2;
+ u3R_v3 = (u3a_v3_road*) u3R_v2;
+
+ // clear the jet hank cache
+ //
+ u3h_v2_walk(u3R_v2->jed.han_p, u3j_v2_free_hank);
+ u3h_v2_free(u3R_v2->jed.han_p);
+ u3R_v2->jed.han_p = u3h_v2_new();
+}
+
+
+/*** nock.c
+***/
+
+/* _cn_v2_prog_free(): free memory retained by program pog_u
+*/
+static void
+_cn_v2_prog_free(u3n_v2_prog* pog_u)
+{
+ // fix up pointers for loom portability
+ pog_u->byc_u.ops_y = (c3_y*) ((void*) pog_u) + sizeof(u3n_v2_prog);
+ pog_u->lit_u.non = (u3_noun*) (pog_u->byc_u.ops_y + pog_u->byc_u.len_w);
+ pog_u->mem_u.sot_u = (u3n_v2_memo*) (pog_u->lit_u.non + pog_u->lit_u.len_w);
+ pog_u->cal_u.sit_u = (u3j_v2_site*) (pog_u->mem_u.sot_u + pog_u->mem_u.len_w);
+ pog_u->reg_u.rit_u = (u3j_v2_rite*) (pog_u->cal_u.sit_u + pog_u->cal_u.len_w);
+
+ c3_w dex_w;
+ for (dex_w = 0; dex_w < pog_u->lit_u.len_w; ++dex_w) {
+ u3a_v2_lose(pog_u->lit_u.non[dex_w]);
+ }
+ for (dex_w = 0; dex_w < pog_u->mem_u.len_w; ++dex_w) {
+ u3a_v2_lose(pog_u->mem_u.sot_u[dex_w].key);
+ }
+ for (dex_w = 0; dex_w < pog_u->cal_u.len_w; ++dex_w) {
+ u3j_v2_site_lose(&(pog_u->cal_u.sit_u[dex_w]));
+ }
+ for (dex_w = 0; dex_w < pog_u->reg_u.len_w; ++dex_w) {
+ u3j_v2_rite_lose(&(pog_u->reg_u.rit_u[dex_w]));
+ }
+ u3a_v2_free(pog_u);
+}
+
+/* _n_v2_feb(): u3h_v2_walk helper for u3n_v2_free
+ */
+static void
+_n_v2_feb(u3_noun kev)
+{
+ u3a_v2_cell *cel_u = (u3a_v2_cell*) u3a_v2_to_ptr(kev);
+ _cn_v2_prog_free(u3v2to(u3n_v2_prog, cel_u->tel));
+}
+
+/* u3n_v2_free(): free bytecode cache
+ */
+void
+u3n_v2_free(void)
+{
+ u3p(u3h_v2_root) har_p = u3R_v2->byc.har_p;
+ u3h_v2_walk(har_p, _n_v2_feb);
+ u3h_v2_free(har_p);
+}
+
+/* u3n_v2_reclaim(): clear ad-hoc persistent caches to reclaim memory.
+*/
+void
+u3n_v2_reclaim(void)
+{
+ // set globals (required for aliased functions)
+ u3H_v3 = (u3v_v3_home*) u3H_v2;
+ u3R_v3 = (u3a_v3_road*) u3R_v2;
+
+ // clear the bytecode cache
+ u3n_v2_free();
+ u3R_v2->byc.har_p = u3h_v2_new();
+}
+
+
+/*** init
+***/
+
+void
+u3_v2_load(c3_z wor_i)
+{
+ c3_w ver_w = *(u3_Loom_v2 + wor_i - 1);
+
+ u3_assert( U3V_VER2 == ver_w );
+
+ c3_w *mem_w = u3_Loom_v2 + u3a_v2_walign;
+ c3_w len_w = wor_i - u3a_v2_walign;
+ c3_w suz_w = c3_wiseof(u3v_v2_home);
+ c3_w *mut_w = c3_align(mem_w + len_w - suz_w, u3a_v2_balign, C3_ALGLO);
+
+ // set v2 globals
+ u3H_v2 = (void *)mut_w;
+ u3R_v2 = &u3H_v2->rod_u;
+ u3R_v2->cap_p = u3R_v2->mat_p = u3a_v2_outa(u3H_v2);
+}
diff --git a/vere/pkg/past/v2.h b/vere/pkg/past/v2.h
new file mode 100644
index 0000000..cf042ec
--- /dev/null
+++ b/vere/pkg/past/v2.h
@@ -0,0 +1,216 @@
+#ifndef U3_V2_H
+#define U3_V2_H
+
+#include "v3.h"
+
+ /*** allocate.h
+ ***/
+# define u3_Loom_v2 u3_Loom_v3
+
+# define u3a_v2_heap u3a_v3_heap
+# define u3a_v2_is_cat u3a_v3_is_cat
+# define u3a_v2_is_cell u3a_v3_is_cell
+# define u3a_v2_is_north u3a_v3_is_north
+# define u3a_v2_is_pom u3a_v3_is_pom
+# define u3a_v2_is_pug u3a_v3_is_pug
+# define u3a_v2_vits u3a_v3_vits
+
+# define u3a_v2_into u3a_v3_into
+# define u3a_v2_outa u3a_v3_outa
+
+# define u3a_v2_botox u3a_v3_botox
+# define u3a_v2_box u3a_v3_box
+# define u3a_v2_cell u3a_v3_cell
+# define u3a_v2_fbox u3a_v3_fbox
+# define u3a_v2_fbox_no u3a_v3_fbox_no
+# define u3a_v2_minimum u3a_v3_minimum
+# define u3a_v2_rewrite_ptr u3a_v3_rewrite_ptr
+# define u3a_v2_rewritten u3a_v3_rewritten
+# define u3a_v2_to_pug u3a_v3_to_pug
+# define u3a_v2_to_pom u3a_v3_to_pom
+# define u3a_v2_wfree u3a_v3_wfree
+
+# define u3v2to u3v3to
+# define u3v2of u3v3of
+
+# define u3a_v2_free u3a_v3_free
+# define u3a_v2_lose u3a_v3_lose
+# define u3a_v2_to_off u3a_v3_to_off
+# define u3a_v2_to_ptr u3a_v3_to_ptr
+# define u3a_v2_ream u3a_v3_ream
+# define u3a_v2_balign u3a_v3_balign
+# define u3a_v2_walign u3a_v3_walign
+
+ /* u3a_v2_road: contiguous allocation and execution context.
+ */
+ typedef struct _u3a_v2_road {
+ u3p(struct _u3a_v2_road) par_p; // parent road
+ u3p(struct _u3a_v2_road) kid_p; // child road list
+ u3p(struct _u3a_v2_road) nex_p; // sibling road
+
+ u3p(c3_w) cap_p; // top of transient region
+ u3p(c3_w) hat_p; // top of durable region
+ u3p(c3_w) mat_p; // bottom of transient region
+ u3p(c3_w) rut_p; // bottom of durable region
+ u3p(c3_w) ear_p; // original cap if kid is live
+
+ c3_w fut_w[32]; // futureproof buffer
+
+ struct { // escape buffer
+ union {
+ jmp_buf buf;
+ c3_w buf_w[256]; // futureproofing
+ };
+ } esc;
+
+ struct { // miscellaneous config
+ c3_w fag_w; // flag bits
+ } how; //
+
+ struct { // allocation pools
+ u3p(u3a_v2_fbox) fre_p[u3a_v2_fbox_no]; // heap by node size log
+ u3p(u3a_v2_fbox) cel_p; // custom cell allocator
+ c3_w fre_w; // number of free words
+ c3_w max_w; // maximum allocated
+ } all;
+
+ struct {
+ u3p(u3h_root) hot_p; // hot state (home road only)
+ u3p(u3h_root) war_p; // warm state
+ u3p(u3h_root) cod_p; // cold state
+ u3p(u3h_root) han_p; // hank cache
+ u3p(u3h_root) bas_p; // battery hashes
+ } jed; // jet dashboard
+
+ struct { // bytecode state
+ u3p(u3h_root) har_p; // formula->post of bytecode
+ } byc;
+
+ struct { // namespace
+ u3_noun gul; // (list $+(* (unit (unit)))) now
+ } ski;
+
+ struct { // trace stack
+ u3_noun tax; // (list ,*)
+ u3_noun mer; // emergency buffer to release
+ } bug;
+
+ struct { // profile stack
+ c3_d nox_d; // nock steps
+ c3_d cel_d; // cell allocations
+ u3_noun don; // (list batt)
+ u3_noun trace; // (list trace)
+ u3_noun day; // doss, only in u3H (moveme)
+ } pro;
+
+ struct { // memoization
+ u3p(u3h_v2_root) har_p; // (map (pair term noun) noun)
+ } cax;
+ } u3a_v2_road;
+
+ /** Globals.
+ **/
+ /// Current road (thread-local).
+ extern u3a_v2_road* u3a_v2_Road;
+# define u3R_v2 u3a_v2_Road
+
+
+ /*** jets.h
+ ***/
+# define u3j_v2_fink u3j_v3_fink
+# define u3j_v2_fist u3j_v3_fist
+# define u3j_v2_hank u3j_v3_hank
+# define u3j_v2_rite u3j_v3_rite
+# define u3j_v2_site u3j_v3_site
+# define u3j_v2_rite_lose u3j_v3_rite_lose
+# define u3j_v2_site_lose u3j_v3_site_lose
+# define u3j_v2_free_hank u3j_v3_free_hank
+
+ /* u3j_v2_reclaim(): clear ad-hoc persistent caches to reclaim memory.
+ */
+ void
+ u3j_v2_reclaim(void);
+
+ /*** hashtable.h
+ ***/
+# define u3h_v2_buck u3h_v3_buck
+# define u3h_v2_node u3h_v3_node
+# define u3h_v2_root u3h_v3_root
+# define u3h_v2_slot_is_node u3h_v3_slot_is_node
+# define u3h_v2_slot_is_noun u3h_v3_slot_is_noun
+# define u3h_v2_noun_to_slot u3h_v3_noun_to_slot
+# define u3h_v2_slot_to_noun u3h_v3_slot_to_noun
+
+# define u3h_v2_slot_to_node(sot) (u3a_v2_into(((sot) & 0x3fffffff) << u3a_v2_vits))
+# define u3h_v2_node_to_slot(ptr) ((u3a_v2_outa((ptr)) >> u3a_v2_vits) | 0x40000000)
+
+
+ /*** nock.h
+ ***/
+ /* u3n_memo: %memo hint space
+ */
+ typedef struct {
+ c3_l sip_l;
+ u3_noun key;
+ } u3n_v2_memo;
+
+ /* u3n_v2_prog: program compiled from nock
+ */
+ typedef struct _u3n_v2_prog {
+ struct {
+ c3_o own_o; // program owns ops_y?
+ c3_w len_w; // length of bytecode (bytes)
+ c3_y* ops_y; // actual array of bytes
+ } byc_u; // bytecode
+ struct {
+ c3_w len_w; // number of literals
+ u3_noun* non; // array of literals
+ } lit_u; // literals
+ struct {
+ c3_w len_w; // number of memo slots
+ u3n_v2_memo* sot_u; // array of memo slots
+ } mem_u; // memo slot data
+ struct {
+ c3_w len_w; // number of calls sites
+ u3j_v2_site* sit_u; // array of sites
+ } cal_u; // call site data
+ struct {
+ c3_w len_w; // number of registration sites
+ u3j_v2_rite* rit_u; // array of sites
+ } reg_u; // registration site data
+ } u3n_v2_prog;
+
+ /* u3n_v2_reclaim(): clear ad-hoc persistent caches to reclaim memory.
+ */
+ void
+ u3n_v2_reclaim(void);
+
+
+ /*** vortex.h
+ ***/
+# define u3v_v2_arvo u3v_v3_arvo
+# define u3v_v2_version u3v_v3_version
+
+ /* u3v_v2_home: all internal (within image) state.
+ ** NB: version must be last for discriminability in north road
+ */
+ typedef struct _u3v_v2_home {
+ u3a_v2_road rod_u; // storage state
+ u3v_v2_arvo arv_u; // arvo state
+ u3v_v2_version ver_w; // version number
+ } u3v_v2_home;
+
+ /** Globals.
+ **/
+ /// Arvo internal state.
+ extern u3v_v2_home* u3v_v2_Home;
+# define u3H_v2 u3v_v2_Home
+# define u3A_v2 (&(u3v_v2_Home->arv_u))
+
+
+ /*** init
+ ***/
+ void
+ u3_v2_load(c3_z wor_i);
+
+#endif /* U3_V2_H */
diff --git a/vere/pkg/past/v3.c b/vere/pkg/past/v3.c
new file mode 100644
index 0000000..05326c0
--- /dev/null
+++ b/vere/pkg/past/v3.c
@@ -0,0 +1,22 @@
+#include "v3.h"
+
+/*** init
+***/
+
+void
+u3_v3_load(c3_z wor_i)
+{
+ c3_w ver_w = *(u3_Loom_v3 + wor_i - 1);
+
+ u3_assert( U3V_VER3 == ver_w );
+
+ c3_w* mem_w = u3_Loom_v3 + u3a_v3_walign;
+ c3_w siz_w = c3_wiseof(u3v_v3_home);
+ c3_w len_w = wor_i - u3a_v3_walign;
+ c3_w* mat_w = c3_align(mem_w + len_w - siz_w, u3a_v3_balign, C3_ALGLO);
+
+ u3H_v3 = (void *)mat_w;
+ u3R_v3 = &u3H_v3->rod_u;
+
+ u3R_v3->cap_p = u3R_v3->mat_p = u3a_v3_outa(u3H_v3);
+}
diff --git a/vere/pkg/past/v3.h b/vere/pkg/past/v3.h
new file mode 100644
index 0000000..ba3b8c2
--- /dev/null
+++ b/vere/pkg/past/v3.h
@@ -0,0 +1,92 @@
+#ifndef U3_V3_H
+#define U3_V3_H
+
+#include "v4.h"
+
+ /*** allocate.h
+ ***/
+# define u3_Loom_v3 u3_Loom_v4
+# define u3R_v3 u3a_v4_Road
+
+# define u3a_v3_heap u3a_v4_heap
+# define u3a_v3_is_cat u3a_v4_is_cat
+# define u3a_v3_is_cell u3a_v4_is_cell
+# define u3a_v3_is_north u3a_v4_is_north
+# define u3a_v3_is_pom u3a_v4_is_pom
+# define u3a_v3_is_pug u3a_v4_is_pug
+# define u3a_v3_vits u3a_v4_vits
+
+# define u3a_v3_road u3a_v4_road
+# define u3a_v3_walloc u3a_v4_walloc
+# define u3a_v3_into u3a_v4_into
+# define u3a_v3_outa u3a_v4_outa
+
+# define u3a_v3_botox u3a_v4_botox
+# define u3a_v3_box u3a_v4_box
+# define u3a_v3_cell u3a_v4_cell
+# define u3a_v3_fbox u3a_v4_fbox
+# define u3a_v3_fbox_no u3a_v4_fbox_no
+# define u3a_v3_minimum u3a_v4_minimum
+# define u3a_v3_rewrite u3a_v4_rewrite
+# define u3a_v3_rewrite_ptr u3a_v4_rewrite_ptr
+# define u3a_v3_rewritten u3a_v4_rewritten
+# define u3a_v3_to_pug u3a_v4_to_pug
+# define u3a_v3_to_pom u3a_v4_to_pom
+# define u3a_v3_wfree u3a_v4_wfree
+
+# define u3v3to u3v4to
+# define u3v3of u3v4of
+
+# define u3a_v3_free u3a_v4_free
+# define u3a_v3_lose u3a_v4_lose
+# define u3a_v3_to_off u3a_v4_to_off
+# define u3a_v3_to_ptr u3a_v4_to_ptr
+# define u3a_v3_ream u3a_v4_ream
+# define u3a_v3_balign u3a_v4_balign
+# define u3a_v3_walign u3a_v4_walign
+
+
+ /*** jets.h
+ ***/
+# define u3j_v3_fink u3j_v4_fink
+# define u3j_v3_fist u3j_v4_fist
+# define u3j_v3_hank u3j_v4_hank
+# define u3j_v3_rite u3j_v4_rite
+# define u3j_v3_site u3j_v4_site
+
+# define u3j_v3_rite_lose u3j_v4_rite_lose
+# define u3j_v3_site_lose u3j_v4_site_lose
+# define u3j_v3_free_hank u3j_v4_free_hank
+
+
+ /*** hashtable.h
+ ***/
+# define u3h_v3_buck u3h_v4_buck
+# define u3h_v3_node u3h_v4_node
+# define u3h_v3_root u3h_v4_root
+
+# define u3h_v3_free u3h_v4_free
+# define u3h_v3_walk u3h_v4_walk
+# define u3h_v3_new u3h_v4_new
+# define u3h_v3_new_cache u3h_v4_new_cache
+# define u3h_v3_slot_is_node u3h_v4_slot_is_node
+# define u3h_v3_slot_is_noun u3h_v4_slot_is_noun
+# define u3h_v3_noun_to_slot u3h_v4_noun_to_slot
+# define u3h_v3_slot_to_noun u3h_v4_slot_to_noun
+
+
+ /*** vortex.h
+ ***/
+# define u3v_v3_version u3v_v4_version
+# define u3v_v3_arvo u3v_v4_arvo
+# define u3H_v3 u3v_v4_Home
+# define u3A_v3 (&(u3H_v3)->arv_u)
+# define u3v_v3_home u3v_v4_home
+
+
+ /*** init
+ ***/
+ void
+ u3_v3_load(c3_z wor_i);
+
+#endif /* U3_V3_H */
diff --git a/vere/pkg/past/v4.c b/vere/pkg/past/v4.c
new file mode 100644
index 0000000..2d9bd05
--- /dev/null
+++ b/vere/pkg/past/v4.c
@@ -0,0 +1,826 @@
+#include "v4.h"
+
+#include "nock.h"
+
+ /*** current
+ ***/
+# define u3a_v4_is_pom u3a_v5_is_pom
+# define u3a_v4_north_is_normal u3a_v5_north_is_normal
+# define u3n_v4_prog u3n_v5_prog
+
+# define u3a_v4_boxed(len_w) (len_w + c3_wiseof(u3a_v4_box) + 1)
+# define u3a_v4_boxto(box_v) ( (void *) \
+ ( (u3a_v4_box *)(void *)(box_v) + 1 ) )
+# define u3a_v4_botox(tox_v) ( (u3a_v4_box *)(void *)(tox_v) - 1 )
+# define u3h_v4_slot_to_node(sot) (u3a_v4_into(((sot) & 0x3fffffff) << u3a_v4_vits))
+
+u3a_v4_road* u3a_v4_Road;
+u3v_v4_home* u3v_v4_Home;
+
+/*** allocate.c
+***/
+
+static c3_w
+_box_v4_slot(c3_w siz_w)
+{
+ if ( u3a_v4_minimum == siz_w ) {
+ return 0;
+ }
+ else if ( !(siz_w >> 4) ) {
+ return 1;
+ }
+ else {
+ c3_w bit_w = c3_bits_word(siz_w) - 3;
+ c3_w max_w = u3a_v4_fbox_no - 1;
+ return c3_min(bit_w, max_w);
+ }
+}
+
+static u3a_v4_box*
+_box_v4_make(void* box_v, c3_w siz_w, c3_w use_w)
+{
+ u3a_v4_box* box_u = box_v;
+ c3_w* box_w = box_v;
+
+ u3_assert(siz_w >= u3a_v4_minimum);
+
+ box_u->siz_w = siz_w;
+ box_w[siz_w - 1] = siz_w; /* stor size at end of allocation as well */
+ box_u->use_w = use_w;
+
+ return box_u;
+}
+
+/* _box_v4_attach(): attach a box to the free list.
+*/
+static void
+_box_v4_attach(u3a_v4_box* box_u)
+{
+ u3_assert(box_u->siz_w >= (1 + c3_wiseof(u3a_v4_fbox)));
+ u3_assert(0 != u3v4of(u3a_v4_fbox, box_u));
+
+ {
+ c3_w sel_w = _box_v4_slot(box_u->siz_w);
+ u3p(u3a_v4_fbox) fre_p = u3v4of(u3a_v4_fbox, box_u);
+ u3p(u3a_v4_fbox)* pfr_p = &u3R_v4->all.fre_p[sel_w];
+ u3p(u3a_v4_fbox) nex_p = *pfr_p;
+
+ u3v4to(u3a_v4_fbox, fre_p)->pre_p = 0;
+ u3v4to(u3a_v4_fbox, fre_p)->nex_p = nex_p;
+ if ( u3v4to(u3a_v4_fbox, fre_p)->nex_p ) {
+ u3v4to(u3a_v4_fbox, u3v4to(u3a_v4_fbox, fre_p)->nex_p)->pre_p = fre_p;
+ }
+ (*pfr_p) = fre_p;
+ }
+}
+
+/* _box_v4_detach(): detach a box from the free list.
+*/
+static void
+_box_v4_detach(u3a_v4_box* box_u)
+{
+ u3p(u3a_v4_fbox) fre_p = u3v4of(u3a_v4_fbox, box_u);
+ u3p(u3a_v4_fbox) pre_p = u3v4to(u3a_v4_fbox, fre_p)->pre_p;
+ u3p(u3a_v4_fbox) nex_p = u3v4to(u3a_v4_fbox, fre_p)->nex_p;
+
+ if ( nex_p ) {
+ if ( u3v4to(u3a_v4_fbox, nex_p)->pre_p != fre_p ) {
+ u3_assert(!"loom: corrupt");
+ }
+ u3v4to(u3a_v4_fbox, nex_p)->pre_p = pre_p;
+ }
+ if ( pre_p ) {
+ if( u3v4to(u3a_v4_fbox, pre_p)->nex_p != fre_p ) {
+ u3_assert(!"loom: corrupt");
+ }
+ u3v4to(u3a_v4_fbox, pre_p)->nex_p = nex_p;
+ }
+ else {
+ c3_w sel_w = _box_v4_slot(box_u->siz_w);
+
+ if ( fre_p != u3R_v4->all.fre_p[sel_w] ) {
+ u3_assert(!"loom: corrupt");
+ }
+ u3R_v4->all.fre_p[sel_w] = nex_p;
+ }
+}
+
+/* _ca_box_make_hat(): allocate directly on the hat.
+*/
+static u3a_v4_box*
+_ca_v4_box_make_hat(c3_w len_w, c3_w ald_w, c3_w off_w, c3_w use_w)
+{
+ c3_w
+ pad_w, /* padding between returned pointer and box */
+ siz_w; /* total size of allocation */
+ u3_post
+ box_p, /* start of box */
+ all_p; /* start of returned pointer */
+
+ // NB: always north
+ {
+ box_p = all_p = u3R_v4->hat_p;
+ all_p += c3_wiseof(u3a_v4_box) + off_w;
+ pad_w = c3_align(all_p, ald_w, C3_ALGHI)
+ - all_p;
+ siz_w = c3_align(len_w + pad_w, u3a_v4_walign, C3_ALGHI);
+
+ if ( (siz_w >= (u3R_v4->cap_p - u3R_v4->hat_p)) ) {
+ // XX wat do
+ fprintf(stderr, "bail: meme\r\n");
+ abort();
+ return 0;
+ }
+ u3R_v4->hat_p += siz_w;
+ }
+
+
+ return _box_v4_make(u3a_v4_into(box_p), siz_w, use_w);
+}
+
+
+/* _ca_willoc(): u3a_v4_walloc() internals.
+*/
+static void*
+_ca_v4_willoc(c3_w len_w, c3_w ald_w, c3_w off_w)
+{
+ c3_w siz_w = c3_max(u3a_v4_minimum, u3a_v4_boxed(len_w));
+ c3_w sel_w = _box_v4_slot(siz_w);
+
+ if ( (sel_w != 0) && (sel_w != u3a_v4_fbox_no - 1) ) {
+ sel_w += 1;
+ }
+
+ while ( 1 ) {
+ u3p(u3a_v4_fbox) *pfr_p = &u3R_v4->all.fre_p[sel_w];
+
+ while ( 1 ) {
+ /* increment until we get a non-null freelist */
+ if ( 0 == *pfr_p ) {
+ if ( sel_w < (u3a_v4_fbox_no - 1) ) {
+ sel_w += 1;
+ break;
+ }
+ else {
+ // nothing in top free list; chip away at the hat
+ //
+ u3a_v4_box* box_u;
+ box_u = _ca_v4_box_make_hat(siz_w, ald_w, off_w, 1);
+ return u3a_v4_boxto(box_u);
+ }
+ }
+ else { /* we got a non-null freelist */
+ u3_post all_p = *pfr_p;
+ all_p += c3_wiseof(u3a_v4_box) + off_w;
+ c3_w pad_w = c3_align(all_p, ald_w, C3_ALGHI) - all_p;
+ c3_w des_w = c3_align(siz_w + pad_w, u3a_v4_walign, C3_ALGHI);
+
+ if ( (des_w) > u3v4to(u3a_v4_fbox, *pfr_p)->box_u.siz_w ) {
+ /* This free block is too small. Continue searching.
+ */
+ pfr_p = &(u3v4to(u3a_v4_fbox, *pfr_p)->nex_p);
+ continue;
+ }
+ else { /* free block fits desired alloc size */
+ u3a_v4_box* box_u = &(u3v4to(u3a_v4_fbox, *pfr_p)->box_u);
+
+ /* We have found a free block of adequate size. Remove it
+ ** from the free list.
+ */
+ /* misc free list consistency checks.
+ TODO: in the future should probably only run for C3DBG builds */
+ {
+ if ( (0 != u3v4to(u3a_v4_fbox, *pfr_p)->pre_p) &&
+ (u3v4to(u3a_v4_fbox, u3v4to(u3a_v4_fbox, *pfr_p)->pre_p)->nex_p
+ != (*pfr_p)) )
+ { /* this->pre->nex isn't this */
+ u3_assert(!"loom: corrupt");
+ }
+
+ if( (0 != u3v4to(u3a_v4_fbox, *pfr_p)->nex_p) &&
+ (u3v4to(u3a_v4_fbox, u3v4to(u3a_v4_fbox, *pfr_p)->nex_p)->pre_p
+ != (*pfr_p)) )
+ { /* this->nex->pre isn't this */
+ u3_assert(!"loom: corrupt");
+ }
+
+ /* pop the block */
+ /* this->nex->pre = this->pre */
+ if ( 0 != u3v4to(u3a_v4_fbox, *pfr_p)->nex_p ) {
+ u3v4to(u3a_v4_fbox, u3v4to(u3a_v4_fbox, *pfr_p)->nex_p)->pre_p =
+ u3v4to(u3a_v4_fbox, *pfr_p)->pre_p;
+ }
+ /* this = this->nex */
+ *pfr_p = u3v4to(u3a_v4_fbox, *pfr_p)->nex_p;
+ }
+
+ /* If we can chop off another block, do it.
+ */
+ if ( (des_w + u3a_v4_minimum) <= box_u->siz_w ) {
+ /* Split the block.
+ */
+ c3_w* box_w = ((c3_w *)(void *)box_u);
+ c3_w* end_w = box_w + des_w;
+ c3_w lef_w = (box_u->siz_w - des_w);
+
+ _box_v4_attach(_box_v4_make(end_w, lef_w, 0));
+ return u3a_v4_boxto(_box_v4_make(box_w, des_w, 1));
+ }
+ else {
+ u3_assert(0 == box_u->use_w);
+ box_u->use_w = 1;
+ return u3a_v4_boxto(box_u);
+ }
+ }
+ }
+ }
+ }
+}
+
+static void*
+_ca_v4_walloc(c3_w len_w, c3_w ald_w, c3_w off_w)
+{
+ void* ptr_v;
+ ptr_v = _ca_v4_willoc(len_w, ald_w, off_w);
+ return ptr_v;
+}
+
+/* u3a_v4_walloc(): allocate storage words on hat heap.
+*/
+void*
+u3a_v4_walloc(c3_w len_w)
+{
+ void* ptr_v;
+ ptr_v = _ca_v4_walloc(len_w, 1, 0);
+ return ptr_v;
+}
+
+
+/* _box_v4_free(): free and coalesce.
+*/
+static void
+_box_v4_free(u3a_v4_box* box_u)
+{
+ c3_w* box_w = (c3_w *)(void *)box_u;
+
+ u3_assert(box_u->use_w != 0);
+ box_u->use_w -= 1;
+ if ( 0 != box_u->use_w ) {
+ return;
+ }
+
+ // always north
+ { /* north */
+ /* Try to coalesce with the block below.
+ */
+ if ( box_w != u3a_v4_into(u3R_v4->rut_p) ) {
+ c3_w laz_w = *(box_w - 1); /* the size of a box stored at the end of its allocation */
+ u3a_v4_box* pox_u = (u3a_v4_box*)(void *)(box_w - laz_w); /* the head of the adjacent box below */
+
+ if ( 0 == pox_u->use_w ) {
+ _box_v4_detach(pox_u);
+ _box_v4_make(pox_u, (laz_w + box_u->siz_w), 0);
+
+ box_u = pox_u;
+ box_w = (c3_w*)(void *)pox_u;
+ }
+ }
+
+ /* Try to coalesce with the block above, or the wilderness.
+ */
+ if ( (box_w + box_u->siz_w) == u3a_v4_into(u3R_v4->hat_p) ) {
+ u3R_v4->hat_p = u3a_v4_outa(box_w);
+ }
+ else {
+ u3a_v4_box* nox_u = (u3a_v4_box*)(void *)(box_w + box_u->siz_w);
+
+ if ( 0 == nox_u->use_w ) {
+ _box_v4_detach(nox_u);
+ _box_v4_make(box_u, (box_u->siz_w + nox_u->siz_w), 0);
+ }
+ _box_v4_attach(box_u);
+ }
+ } /* end north */
+}
+
+/* u3a_v4_wfree(): free storage.
+*/
+void
+u3a_v4_wfree(void* tox_v)
+{
+ _box_v4_free(u3a_v4_botox(tox_v));
+}
+
+/* u3a_v4_free(): free for aligned malloc.
+*/
+void
+u3a_v4_free(void* tox_v)
+{
+ if (NULL == tox_v)
+ return;
+
+ c3_w* tox_w = tox_v;
+ c3_w pad_w = tox_w[-1];
+ c3_w* org_w = tox_w - (pad_w + 1);
+
+ u3a_v4_wfree(org_w);
+}
+
+/* _me_lose_north(): lose on a north road.
+*/
+static void
+_me_v4_lose_north(u3_noun dog)
+{
+top:
+ if ( c3y == u3a_v4_north_is_normal(u3R_v4, dog) ) {
+ c3_w* dog_w = u3a_v4_to_ptr(dog);
+ u3a_v4_box* box_u = u3a_v4_botox(dog_w);
+
+ if ( box_u->use_w > 1 ) {
+ box_u->use_w -= 1;
+ }
+ else {
+ if ( 0 == box_u->use_w ) {
+ fprintf(stderr, "bail: foul\r\n");
+ abort();
+ }
+ else {
+ if ( _(u3a_v4_is_pom(dog)) ) {
+ u3a_v4_cell* dog_u = (void *)dog_w;
+ u3_noun h_dog = dog_u->hed;
+ u3_noun t_dog = dog_u->tel;
+
+ if ( !_(u3a_v4_is_cat(h_dog)) ) {
+ _me_v4_lose_north(h_dog);
+ }
+ u3a_v4_wfree(dog_w); // same as cfree at home
+ if ( !_(u3a_v4_is_cat(t_dog)) ) {
+ dog = t_dog;
+ goto top;
+ }
+ }
+ else {
+ u3a_v4_wfree(dog_w);
+ }
+ }
+ }
+ }
+}
+
+/* u3a_v4_lose(): lose a reference count.
+*/
+void
+u3a_v4_lose(u3_noun som)
+{
+ if ( !_(u3a_v4_is_cat(som)) ) {
+ _me_v4_lose_north(som);
+ }
+}
+
+/* u3a_v4_ream(): ream free-lists.
+*/
+void
+u3a_v4_ream(void)
+{
+ u3p(u3a_v4_fbox) lit_p;
+ u3a_v4_fbox* fox_u;
+ c3_w sel_w, i_w;
+
+ for ( i_w = 0; i_w < u3a_v4_fbox_no; i_w++ ) {
+ lit_p = u3R_v4->all.fre_p[i_w];
+
+ while ( lit_p ) {
+ fox_u = u3v4to(u3a_v4_fbox, lit_p);
+ lit_p = fox_u->nex_p;
+ sel_w = _box_v4_slot(fox_u->box_u.siz_w);
+
+ if ( sel_w != i_w ) {
+ // inlined _box_detach()
+ //
+ {
+ u3p(u3a_v4_fbox) fre_p = u3v4of(u3a_v4_fbox, &(fox_u->box_u));
+ u3p(u3a_v4_fbox) pre_p = u3v4to(u3a_v4_fbox, fre_p)->pre_p;
+ u3p(u3a_v4_fbox) nex_p = u3v4to(u3a_v4_fbox, fre_p)->nex_p;
+
+ if ( nex_p ) {
+ if ( u3v4to(u3a_v4_fbox, nex_p)->pre_p != fre_p ) {
+ u3_assert(!"loom: corrupt");
+ }
+ u3v4to(u3a_v4_fbox, nex_p)->pre_p = pre_p;
+ }
+ if ( pre_p ) {
+ if( u3v4to(u3a_v4_fbox, pre_p)->nex_p != fre_p ) {
+ u3_assert(!"loom: corrupt");
+ }
+ u3v4to(u3a_v4_fbox, pre_p)->nex_p = nex_p;
+ }
+ else {
+ if ( fre_p != u3R_v4->all.fre_p[i_w] ) {
+ u3_assert(!"loom: corrupt");
+ }
+ u3R_v4->all.fre_p[i_w] = nex_p;
+ }
+ }
+
+ // inlined _box_attach()
+ {
+ u3p(u3a_v4_fbox) fre_p = u3v4of(u3a_v4_fbox, &(fox_u->box_u));
+ u3p(u3a_v4_fbox)* pfr_p = &u3R_v4->all.fre_p[sel_w];
+ u3p(u3a_v4_fbox) nex_p = *pfr_p;
+
+ u3v4to(u3a_v4_fbox, fre_p)->pre_p = 0;
+ u3v4to(u3a_v4_fbox, fre_p)->nex_p = nex_p;
+ if ( nex_p ) {
+ u3v4to(u3a_v4_fbox, nex_p)->pre_p = fre_p;
+ }
+ (*pfr_p) = fre_p;
+ }
+ }
+ }
+ }
+}
+
+/* u3a_v4_rewrite_ptr(): mark a pointer as already having been rewritten
+*/
+c3_o
+u3a_v4_rewrite_ptr(void* ptr_v)
+{
+ u3a_v4_box* box_u = u3a_v4_botox(ptr_v);
+ if ( box_u->use_w & 0x80000000 ) {
+ /* Already rewritten.
+ */
+ return c3n;
+ }
+ box_u->use_w |= 0x80000000;
+ return c3y;
+}
+
+u3_post
+u3a_v4_rewritten(u3_post ptr_v)
+{
+ u3a_v4_box* box_u = u3a_v4_botox(u3a_v4_into(ptr_v));
+ c3_w* box_w = (c3_w*) box_u;
+ return (u3_post)box_w[box_u->siz_w - 1];
+}
+
+ /*** jets.h
+ ***/
+/* _cj_fink_free(): lose and free everything in a u3j_fink.
+*/
+static void
+_cj_v4_fink_free(u3p(u3j_v4_fink) fin_p)
+{
+ c3_w i_w;
+ u3j_v4_fink* fin_u = u3v4to(u3j_v4_fink, fin_p);
+ u3a_v4_lose(fin_u->sat);
+ for ( i_w = 0; i_w < fin_u->len_w; ++i_w ) {
+ u3j_v4_fist* fis_u = &(fin_u->fis_u[i_w]);
+ u3a_v4_lose(fis_u->bat);
+ u3a_v4_lose(fis_u->pax);
+ }
+ u3a_v4_wfree(fin_u);
+}
+
+/* u3j_v4_site_lose(): lose references of u3j_site (but do not free).
+ */
+void
+u3j_v4_site_lose(u3j_v4_site* sit_u)
+{
+ u3a_v4_lose(sit_u->axe);
+ if ( u3_none != sit_u->bat ) {
+ u3a_v4_lose(sit_u->bat);
+ }
+ if ( u3_none != sit_u->bas ) {
+ u3a_v4_lose(sit_u->bas);
+ }
+ if ( u3_none != sit_u->loc ) {
+ u3a_v4_lose(sit_u->loc);
+ u3a_v4_lose(sit_u->lab);
+ if ( c3y == sit_u->fon_o ) {
+ _cj_v4_fink_free(sit_u->fin_p);
+ }
+ }
+}
+
+/* u3j_rite_lose(): lose references of u3j_rite (but do not free).
+ */
+void
+u3j_v4_rite_lose(u3j_v4_rite* rit_u)
+{
+ if ( (c3y == rit_u->own_o) && u3_none != rit_u->clu ) {
+ u3a_v4_lose(rit_u->clu);
+ _cj_v4_fink_free(rit_u->fin_p);
+ }
+}
+
+/* u3j_free_hank(): free an entry from the hank cache.
+*/
+void
+u3j_v4_free_hank(u3_noun kev)
+{
+ u3a_v4_cell* kev_u = (u3a_v4_cell*) u3a_v4_to_ptr(kev);
+ u3j_v4_hank* han_u = u3v4to(u3j_v4_hank, kev_u->tel);
+ if ( u3_none != han_u->hax ) {
+ u3a_v4_lose(han_u->hax);
+ u3j_v4_site_lose(&(han_u->sit_u));
+ }
+ u3a_v4_wfree(han_u);
+}
+
+
+ /*** hashtable.h
+ ***/
+/* u3h_v4_new_cache(): create hashtable with bounded size.
+*/
+u3p(u3h_v4_root)
+u3h_v4_new_cache(c3_w max_w)
+{
+ u3h_v4_root* har_u = u3a_v4_walloc(c3_wiseof(u3h_v4_root));
+ u3p(u3h_v4_root) har_p = u3v4of(u3h_v4_root, har_u);
+ c3_w i_w;
+
+ har_u->max_w = max_w;
+ har_u->use_w = 0;
+ har_u->arm_u.mug_w = 0;
+ har_u->arm_u.inx_w = 0;
+
+ for ( i_w = 0; i_w < 64; i_w++ ) {
+ har_u->sot_w[i_w] = 0;
+ }
+ return har_p;
+}
+
+/* u3h_v4_new(): create hashtable.
+*/
+u3p(u3h_v4_root)
+u3h_v4_new(void)
+{
+ return u3h_v4_new_cache(0);
+}
+
+/* _ch_free_buck(): free bucket
+*/
+static void
+_ch_v4_free_buck(u3h_v4_buck* hab_u)
+{
+ //fprintf(stderr, "free buck\r\n");
+ c3_w i_w;
+
+ for ( i_w = 0; i_w < hab_u->len_w; i_w++ ) {
+ u3a_v4_lose(u3h_v4_slot_to_noun(hab_u->sot_w[i_w]));
+ }
+ u3a_v4_wfree(hab_u);
+}
+
+/* _ch_free_node(): free node.
+*/
+static void
+_ch_v4_free_node(u3h_v4_node* han_u, c3_w lef_w, c3_o pin_o)
+{
+ c3_w len_w = c3_pc_w(han_u->map_w);
+ c3_w i_w;
+
+ lef_w -= 5;
+
+ for ( i_w = 0; i_w < len_w; i_w++ ) {
+ c3_w sot_w = han_u->sot_w[i_w];
+ if ( _(u3h_v4_slot_is_null(sot_w))) {
+ } else if ( _(u3h_v4_slot_is_noun(sot_w)) ) {
+ u3a_v4_lose(u3h_v4_slot_to_noun(sot_w));
+ } else {
+ void* hav_v = u3h_v4_slot_to_node(sot_w);
+
+ if ( 0 == lef_w ) {
+ _ch_v4_free_buck(hav_v);
+ } else {
+ _ch_v4_free_node(hav_v, lef_w, pin_o);
+ }
+ }
+ }
+ u3a_v4_wfree(han_u);
+}
+
+/* u3h_v4_free(): free hashtable.
+*/
+void
+u3h_v4_free(u3p(u3h_v4_root) har_p)
+{
+ u3h_v4_root* har_u = u3v4to(u3h_v4_root, har_p);
+ c3_w i_w;
+
+ for ( i_w = 0; i_w < 64; i_w++ ) {
+ c3_w sot_w = har_u->sot_w[i_w];
+
+ if ( _(u3h_v4_slot_is_noun(sot_w)) ) {
+ u3a_v4_lose(u3h_v4_slot_to_noun(sot_w));
+ }
+ else if ( _(u3h_v4_slot_is_node(sot_w)) ) {
+ u3h_v4_node* han_u = u3h_v4_slot_to_node(sot_w);
+
+ _ch_v4_free_node(han_u, 25, i_w == 57);
+ }
+ }
+ u3a_v4_wfree(har_u);
+}
+
+
+/* _ch_walk_buck(): walk bucket for gc.
+*/
+static void
+_ch_v4_walk_buck(u3h_v4_buck* hab_u, void (*fun_f)(u3_noun, void*), void* wit)
+{
+ c3_w i_w;
+
+ for ( i_w = 0; i_w < hab_u->len_w; i_w++ ) {
+ fun_f(u3h_v4_slot_to_noun(hab_u->sot_w[i_w]), wit);
+ }
+}
+
+/* _ch_walk_node(): walk node for gc.
+*/
+static void
+_ch_v4_walk_node(u3h_v4_node* han_u, c3_w lef_w, void (*fun_f)(u3_noun, void*), void* wit)
+{
+ c3_w len_w = c3_pc_w(han_u->map_w);
+ c3_w i_w;
+
+ lef_w -= 5;
+
+ for ( i_w = 0; i_w < len_w; i_w++ ) {
+ c3_w sot_w = han_u->sot_w[i_w];
+
+ if ( _(u3h_v4_slot_is_noun(sot_w)) ) {
+ u3_noun kev = u3h_v4_slot_to_noun(sot_w);
+
+ fun_f(kev, wit);
+ }
+ else {
+ void* hav_v = u3h_v4_slot_to_node(sot_w);
+
+ if ( 0 == lef_w ) {
+ _ch_v4_walk_buck(hav_v, fun_f, wit);
+ } else {
+ _ch_v4_walk_node(hav_v, lef_w, fun_f, wit);
+ }
+ }
+ }
+}
+
+/* u3h_v4_walk_with(): traverse hashtable with key, value fn and data
+ * argument; RETAINS.
+*/
+void
+u3h_v4_walk_with(u3p(u3h_v4_root) har_p,
+ void (*fun_f)(u3_noun, void*),
+ void* wit)
+{
+ u3h_v4_root* har_u = u3v4to(u3h_v4_root, har_p);
+ c3_w i_w;
+
+ for ( i_w = 0; i_w < 64; i_w++ ) {
+ c3_w sot_w = har_u->sot_w[i_w];
+
+ if ( _(u3h_v4_slot_is_noun(sot_w)) ) {
+ u3_noun kev = u3h_v4_slot_to_noun(sot_w);
+
+ fun_f(kev, wit);
+ }
+ else if ( _(u3h_v4_slot_is_node(sot_w)) ) {
+ u3h_v4_node* han_u = u3h_v4_slot_to_node(sot_w);
+
+ _ch_v4_walk_node(han_u, 25, fun_f, wit);
+ }
+ }
+}
+
+/* _ch_walk_plain(): use plain u3_noun fun_f for each node
+ */
+static void
+_ch_v4_walk_plain(u3_noun kev, void* wit)
+{
+ void (*fun_f)(u3_noun) = (void (*)(u3_noun))wit;
+ fun_f(kev);
+}
+
+/* u3h_v4_walk(): u3h_v4_walk_with, but with no data argument
+*/
+void
+u3h_v4_walk(u3p(u3h_v4_root) har_p, void (*fun_f)(u3_noun))
+{
+ u3h_v4_walk_with(har_p, _ch_v4_walk_plain, (void *)fun_f);
+}
+
+/* u3j_v4_reclaim(): clear ad-hoc persistent caches to reclaim memory.
+*/
+void
+u3j_v4_reclaim(void)
+{
+ // clear the jet hank cache
+ //
+ u3h_v4_walk(u3R_v4->jed.han_p, u3j_v4_free_hank);
+ u3h_v4_free(u3R_v4->jed.han_p);
+ u3R_v4->jed.han_p = u3h_v4_new();
+}
+
+
+/* _cn_prog_free(): free memory retained by program pog_u
+*/
+static void
+_cn_v4_prog_free(u3n_v4_prog* pog_u)
+{
+ // ream pointers inline
+ //
+ c3_w pad_w = (8 - pog_u->byc_u.len_w % 8) % 8;
+ c3_w pod_w = pog_u->lit_u.len_w % 2;
+ c3_w ped_w = pog_u->mem_u.len_w % 2;
+
+ pog_u->byc_u.ops_y = (c3_y*)((void*) pog_u) + sizeof(u3n_v4_prog);
+ pog_u->lit_u.non = (u3_noun*) (pog_u->byc_u.ops_y + pog_u->byc_u.len_w + pad_w);
+ pog_u->mem_u.sot_u = (u3n_memo*) (pog_u->lit_u.non + pog_u->lit_u.len_w + pod_w);
+ pog_u->cal_u.sit_u = (u3j_v4_site*) (pog_u->mem_u.sot_u + pog_u->mem_u.len_w + ped_w);
+ pog_u->reg_u.rit_u = (u3j_v4_rite*) (pog_u->cal_u.sit_u + pog_u->cal_u.len_w);
+
+ // NB: site reaming elided
+
+ c3_w dex_w;
+ for (dex_w = 0; dex_w < pog_u->lit_u.len_w; ++dex_w) {
+ u3a_v4_lose(pog_u->lit_u.non[dex_w]);
+ }
+ for (dex_w = 0; dex_w < pog_u->mem_u.len_w; ++dex_w) {
+ u3a_v4_lose(pog_u->mem_u.sot_u[dex_w].key);
+ }
+ for (dex_w = 0; dex_w < pog_u->cal_u.len_w; ++dex_w) {
+ u3j_v4_site_lose(&(pog_u->cal_u.sit_u[dex_w]));
+ }
+ for (dex_w = 0; dex_w < pog_u->reg_u.len_w; ++dex_w) {
+ u3j_v4_rite_lose(&(pog_u->reg_u.rit_u[dex_w]));
+ }
+ u3a_v4_free(pog_u);
+}
+
+/* _n_feb(): u3h_walk helper for u3n_free
+ */
+static void
+_n_v4_feb(u3_noun kev)
+{
+ u3a_v4_cell* kev_u = (u3a_v4_cell*) u3a_v4_to_ptr(kev);
+ _cn_v4_prog_free(u3v4to(u3n_v4_prog, kev_u->tel));
+}
+
+/* u3n_free(): free bytecode cache
+ */
+void
+u3n_v4_free()
+{
+ u3p(u3h_v4_root) har_p = u3R_v4->byc.har_p;
+ u3h_v4_walk(har_p, _n_v4_feb);
+ u3h_v4_free(har_p);
+}
+
+/* u3n_reclaim(): clear ad-hoc persistent caches to reclaim memory.
+*/
+void
+u3n_v4_reclaim(void)
+{
+ // clear the bytecode cache
+ //
+ // We can't just u3h_free() -- the value is a post to a u3n_prog.
+ // Note that the hank cache *must* also be freed (in u3j_reclaim())
+ //
+ u3n_v4_free();
+ u3R_v4->byc.har_p = u3h_v4_new();
+}
+
+ /*** manage.h
+ ***/
+
+/* u3m_reclaim: clear persistent caches to reclaim memory.
+*/
+void
+u3m_v4_reclaim(void)
+{
+ // NB: subset: u3a and u3v excluded
+ u3j_v4_reclaim();
+ u3n_v4_reclaim();
+}
+
+/*** init
+***/
+
+void
+u3_v4_load(c3_z wor_i)
+{
+ c3_w ver_w = *(u3_Loom_v4 + wor_i - 1);
+
+ u3_assert( U3V_VER4 == ver_w );
+
+ c3_w* mem_w = u3_Loom_v4 + u3a_v4_walign;
+ c3_w siz_w = c3_wiseof(u3v_v4_home);
+ c3_w len_w = wor_i - u3a_v4_walign;
+ c3_w* mat_w = c3_align(mem_w + len_w - siz_w, u3a_v4_balign, C3_ALGLO);
+
+ u3H_v4 = (void *)mat_w;
+ u3R_v4 = &u3H_v4->rod_u;
+
+ u3R_v4->cap_p = u3R_v4->mat_p = u3a_v4_outa(u3H_v4);
+} \ No newline at end of file
diff --git a/vere/pkg/past/v4.h b/vere/pkg/past/v4.h
new file mode 100644
index 0000000..0d0e536
--- /dev/null
+++ b/vere/pkg/past/v4.h
@@ -0,0 +1,242 @@
+#ifndef U3_V4_H
+#define U3_V4_H
+
+#include "v5.h"
+
+ /*** current
+ ***/
+# define u3_v4_noun u3_v5_noun
+# define u3_v4_none u3_v5_none
+
+# define u3a_v4_heap u3a_v5_heap
+# define u3a_v4_is_cat u3a_v5_is_cat
+# define u3a_v4_is_cell u3a_v5_is_cell
+# define u3a_v4_is_north u3a_v5_is_north
+# define u3a_v4_is_pom u3a_v5_is_pom
+# define u3a_v4_is_pug u3a_v5_is_pug
+# define u3a_v4_north_is_normal u3a_v5_north_is_normal
+
+# define u3j_v4_fink u3j_v5_fink
+# define u3j_v4_fist u3j_v5_fist
+# define u3j_v4_hank u3j_v5_hank
+# define u3j_v4_rite u3j_v5_rite
+# define u3j_v4_site u3j_v5_site
+
+# define u3h_v4_buck u3h_v5_buck
+# define u3h_v4_node u3h_v5_node
+# define u3h_v4_root u3h_v5_root
+# define u3h_v4_slot_is_node u3h_v5_slot_is_node
+# define u3h_v4_slot_is_noun u3h_v5_slot_is_noun
+# define u3h_v4_slot_is_null u3h_v5_slot_is_null
+# define u3h_v4_noun_to_slot u3h_v5_noun_to_slot
+# define u3h_v4_slot_to_noun u3h_v5_slot_to_noun
+
+ /*** allocate.h
+ ***/
+
+# define u3_Loom_v4 (u3_Loom + ((c3_z)1 << u3a_bits_max))
+# define u3a_v4_vits 1
+# define u3a_v4_walign (1 << u3a_v4_vits)
+# define u3a_v4_balign (sizeof(c3_w)*u3a_v4_walign)
+# define u3a_v4_fbox_no 27
+# define u3a_v4_minimum ((c3_w)( 1 + c3_wiseof(u3a_v4_box) + c3_wiseof(u3a_v4_cell) ))
+# define u3a_v4_into(x) ((void *)(u3_Loom_v4 + (x)))
+# define u3a_v4_outa(p) ((c3_w *)(void *)(p) - u3_Loom_v4)
+# define u3v4to(type, x) ((type *)u3a_v4_into(x))
+# define u3v4of(type, x) (u3a_v4_outa((type*)x))
+
+
+ typedef struct {
+ c3_w mug_w;
+ } u3a_v4_noun;
+
+ typedef struct {
+ c3_w mug_w;
+ c3_w len_w;
+ c3_w buf_w[0];
+ } u3a_v4_atom;
+
+ typedef struct {
+ c3_w mug_w;
+ u3_noun hed;
+ u3_noun tel;
+ } u3a_v4_cell;
+
+ typedef struct _u3a_v4_box {
+ c3_w siz_w; // size of this box
+ c3_w use_w; // reference count; free if 0
+ } u3a_v4_box;
+
+ typedef struct _u3a_v4_fbox {
+ u3a_v4_box box_u;
+ u3p(struct _u3a_v4_fbox) pre_p;
+ u3p(struct _u3a_v4_fbox) nex_p;
+ } u3a_v4_fbox;
+
+ typedef struct _u3a_v4_road {
+ u3p(struct _u3a_v4_road) par_p; // parent road
+ u3p(struct _u3a_v4_road) kid_p; // child road list
+ u3p(struct _u3a_v4_road) nex_p; // sibling road
+
+ u3p(c3_w) cap_p; // top of transient region
+ u3p(c3_w) hat_p; // top of durable region
+ u3p(c3_w) mat_p; // bottom of transient region
+ u3p(c3_w) rut_p; // bottom of durable region
+ u3p(c3_w) ear_p; // original cap if kid is live
+
+ c3_w off_w; // spin stack offset
+ c3_w fow_w; // spin stack overflow count
+
+ c3_w fut_w[30]; // futureproof buffer
+
+ struct { // escape buffer
+ union {
+ jmp_buf buf;
+ c3_w buf_w[256]; // futureproofing
+ };
+ } esc;
+
+ struct { // miscellaneous config
+ c3_w fag_w; // flag bits
+ } how; //
+
+ struct { // allocation pools
+ u3p(u3a_v4_fbox) fre_p[u3a_v4_fbox_no]; // heap by node size log
+ u3p(u3a_v4_fbox) cel_p; // custom cell allocator
+ c3_w fre_w; // number of free words
+ c3_w max_w; // maximum allocated
+ } all;
+
+ struct {
+ u3p(u3h_root) hot_p; // hot state (home road only)
+ u3p(u3h_root) war_p; // warm state
+ u3p(u3h_root) cod_p; // cold state
+ u3p(u3h_root) han_p; // hank cache
+ u3p(u3h_root) bas_p; // battery hashes
+ } jed; // jet dashboard
+
+ struct { // bytecode state
+ u3p(u3h_v4_root) har_p; // formula->post of bytecode
+ } byc;
+
+ struct { // scry namespace
+ u3_noun gul; // (list $+(* (unit (unit)))) now
+ } ski;
+
+ struct { // trace stack
+ u3_noun tax; // (list ,*)
+ u3_noun mer; // emergency buffer to release
+ } bug;
+
+ struct { // profile stack
+ c3_d nox_d; // nock steps
+ c3_d cel_d; // cell allocations
+ u3_noun don; // (list batt)
+ u3_noun trace; // (list trace)
+ u3_noun day; // doss, only in u3H (moveme)
+ } pro;
+
+ struct { // memoization caches
+ u3p(u3h_v4_root) har_p; // transient
+ u3p(u3h_v4_root) per_p; // persistent
+ } cax;
+ } u3a_v4_road;
+ typedef u3a_v4_road u3_v4_road;
+
+ extern u3a_v4_road* u3a_v4_Road;
+# define u3R_v4 u3a_v4_Road
+
+# define u3a_v4_to_off(som) (((som) & 0x3fffffff) << u3a_v4_vits)
+# define u3a_v4_to_ptr(som) (u3a_v4_into(u3a_v4_to_off(som)))
+# define u3a_v4_to_pug(off) ((off >> u3a_v4_vits) | 0x80000000)
+# define u3a_v4_to_pom(off) ((off >> u3a_v4_vits) | 0xc0000000)
+# define u3a_v4_botox(tox_v) ( (u3a_v4_box *)(void *)(tox_v) - 1 )
+
+// XX abort() instead of u3m_bail?
+//
+# define u3a_v4_head(som) \
+ ( _(u3a_v4_is_cell(som)) \
+ ? ( ((u3a_v4_cell *)u3a_v4_to_ptr(som))->hed )\
+ : u3m_bail(c3__exit) )
+# define u3a_v4_tail(som) \
+ ( _(u3a_v4_is_cell(som)) \
+ ? ( ((u3a_v4_cell *)u3a_v4_to_ptr(som))->tel )\
+ : u3m_bail(c3__exit) )
+
+ void*
+ u3a_v4_walloc(c3_w len_w);
+ void
+ u3a_v4_wfree(void* lag_v);
+ void
+ u3a_v4_free(void* tox_v);
+ void
+ u3a_v4_lose(u3_weak som);
+ void
+ u3a_v4_ream(void);
+ c3_o
+ u3a_v4_rewrite_ptr(void* ptr_v);
+ u3_post
+ u3a_v4_rewritten(u3_post som_p);
+
+
+ /*** jets.h
+ ***/
+ void
+ u3j_v4_rite_lose(u3j_v4_rite* rit_u);
+ void
+ u3j_v4_site_lose(u3j_v4_site* sit_u);
+ void
+ u3j_v4_free_hank(u3_noun kev);
+
+
+ /*** hashtable.h
+ ***/
+ void
+ u3h_v4_free(u3p(u3h_v4_root) har_p);
+ u3p(u3h_v4_root)
+ u3h_v4_new(void);
+ u3p(u3h_v4_root)
+ u3h_v4_new_cache(c3_w max_w);
+ void
+ u3h_v4_walk(u3p(u3h_v4_root) har_p, void (*fun_f)(u3_noun));
+ void
+ u3h_v4_walk_with(u3p(u3h_v4_root) har_p,
+ void (*fun_f)(u3_noun, void*),
+ void* wit);
+
+ /*** manage.h
+ ***/
+ void
+ u3m_v4_reclaim(void);
+
+ /*** vortex.h
+ ***/
+ typedef struct __attribute__((__packed__)) _u3v_v4_arvo {
+ c3_d eve_d; // event number
+ u3_noun yot; // cached gates
+ u3_noun now; // current time
+ u3_noun roc; // kernel core
+ } u3v_v4_arvo;
+
+ typedef c3_w u3v_v4_version;
+
+ /* u3v_home: all internal (within image) state.
+ ** NB: version must be last for discriminability in north road
+ */
+ typedef struct _u3v_v4_home {
+ u3a_v4_road rod_u; // storage state
+ u3v_v4_arvo arv_u; // arvo state
+ u3v_v4_version ver_w; // version number
+ } u3v_v4_home;
+
+extern u3v_v4_home* u3v_v4_Home;
+# define u3H_v4 u3v_v4_Home
+# define u3A_v4 (&(u3v_v4_Home->arv_u))
+
+
+ /*** init
+ ***/
+ void
+ u3_v4_load(c3_z wor_i);
+
+#endif /* U3_V4_H */
diff --git a/vere/pkg/past/v5.h b/vere/pkg/past/v5.h
new file mode 100644
index 0000000..4dcdb86
--- /dev/null
+++ b/vere/pkg/past/v5.h
@@ -0,0 +1,60 @@
+#ifndef U3_V5_H
+#define U3_V5_H
+
+#include "allocate.h"
+#include "hashtable.h"
+#include "imprison.h"
+#include "jets.h"
+#include "nock.h"
+#include "retrieve.h"
+#include "vortex.h"
+
+ /*** current
+ ***/
+# define u3_v5_cell u3_cell
+# define u3_v5_noun u3_noun
+# define u3_v5_none u3_none
+
+# define u3A_v5 u3A
+# define u3R_v5 u3R
+# define u3j_v5_boot u3j_boot
+# define u3j_v5_ream u3j_ream
+# define u3a_v5_walloc u3a_walloc
+# define u3a_v5_to_pug u3a_to_pug
+# define u3a_v5_outa u3a_outa
+# define u3a_v5_gain u3a_gain
+# define u3i_v5_cell u3i_cell
+# define u3h_v5_put u3h_put
+# define u3a_v5_lose u3a_lose
+
+# define u3a_v5_atom u3a_atom
+# define u3a_v5_is_atom u3a_is_atom
+# define u3a_v5_is_pom u3a_is_pom
+# define u3a_v5_north_is_normal u3a_north_is_normal
+# define u3n_v5_prog u3n_prog
+# define u3r_v5_mug_both u3r_mug_both
+# define u3r_v5_mug_words u3r_mug_words
+
+# define u3a_v5_heap u3a_heap
+# define u3a_v5_is_cat u3a_is_cat
+# define u3a_v5_is_cell u3a_is_cell
+# define u3a_v5_is_north u3a_is_north
+# define u3a_v5_is_pom u3a_is_pom
+# define u3a_v5_is_pug u3a_is_pug
+
+# define u3j_v5_fink u3j_fink
+# define u3j_v5_fist u3j_fist
+# define u3j_v5_hank u3j_hank
+# define u3j_v5_rite u3j_rite
+# define u3j_v5_site u3j_site
+
+# define u3h_v5_buck u3h_buck
+# define u3h_v5_node u3h_node
+# define u3h_v5_root u3h_root
+# define u3h_v5_slot_is_node u3h_slot_is_node
+# define u3h_v5_slot_is_noun u3h_slot_is_noun
+# define u3h_v5_slot_is_null u3h_slot_is_null
+# define u3h_v5_noun_to_slot u3h_noun_to_slot
+# define u3h_v5_slot_to_noun u3h_slot_to_noun
+
+#endif /* U3_V5_H */