summaryrefslogtreecommitdiff
path: root/vere/pkg/noun/v2/manage.c
blob: 16981d21cc66aa49d388aa7eda057dcee4c25520 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
/// @file

#include "v2/manage.h"

#include "v1/allocate.h"
#include "v2/allocate.h"
#include "v2/hashtable.h"
#include "v2/jets.h"
#include "v2/nock.h"
#include "v2/options.h"
#include "../vortex.h"
#include "v1/vortex.h"
#include "v2/vortex.h"

/* _cm_pack_rewrite(): trace through arena, rewriting pointers.
*/
static void
_cm_pack_rewrite(void)
{
  u3v_v2_mig_rewrite_compact();
  u3j_v2_mig_rewrite_compact();
  u3n_v2_mig_rewrite_compact();
  u3a_v2_mig_rewrite_compact();
}

static void
_migrate_reclaim(void)
{
  //  XX update this and similar printfs
  fprintf(stderr, "loom: migration reclaim\r\n");
  u3m_v1_reclaim();
}

static void
_migrate_seek(const u3a_v2_road *rod_u)
{
  /*
    very much like u3a_v2_pack_seek with the following changes:
    - there is no need to account for free space as |pack is performed before
      the migration
    - odd sized boxes will be padded by one word to achieve an even size
    - rut will be moved from one word ahead of u3_Loom to two words ahead
  */
  c3_w *    box_w = u3a_v2_into(rod_u->rut_p);
  c3_w *    end_w = u3a_v2_into(rod_u->hat_p);
  u3_post   new_p = (rod_u->rut_p + 1 + c3_wiseof(u3a_v2_box));
  u3a_v2_box * box_u = (void *)box_w;

  fprintf(stderr, "loom: migration seek\r\n");

  for (; box_w < end_w
         ; box_w += box_u->siz_w
         , box_u = (void*)box_w)
    {
      if (!box_u->use_w)
        continue;
      u3_assert(box_u->siz_w);
      u3_assert(box_u->use_w);
      box_w[box_u->siz_w - 1] = new_p;
      new_p = c3_align(new_p + box_u->siz_w, 2, C3_ALGHI);
    }
}

static void
_migrate_rewrite(void)
{
  fprintf(stderr, "loom: migration rewrite\r\n");

  _cm_pack_rewrite();
}

static void
_migrate_move(u3a_v2_road *rod_u)
{
  fprintf(stderr, "loom: migration move\r\n");

  c3_z hiz_z = u3a_v2_heap(rod_u) * sizeof(c3_w);

  /* calculate required shift distance to prevent write head overlapping read head */
  c3_w  off_w = 1;  /* at least 1 word because u3R_v1->rut_p migrates from 1 to 2 */
  for (u3a_v2_box *box_u = u3a_v2_into(rod_u->rut_p)
         ; (void *)box_u < u3a_v2_into(rod_u->hat_p)
         ; box_u = (void *)((c3_w *)box_u + box_u->siz_w))
    off_w += box_u->siz_w & 1; /* odd-sized boxes are padded by one word */

  /* shift */
  memmove(u3a_v2_into(u3H_v2->rod_u.rut_p + off_w),
          u3a_v2_into(u3H_v2->rod_u.rut_p),
          hiz_z);
  /* manually zero the former rut */
  *(c3_w *)u3a_v2_into(rod_u->rut_p) = 0;

  /* relocate boxes to DWORD-aligned addresses stored in trailing size word */
  c3_w *box_w = u3a_v2_into(rod_u->rut_p + off_w);
  c3_w *end_w = u3a_v2_into(rod_u->hat_p + off_w);
  u3a_v2_box *old_u = (void *)box_w;
  c3_w siz_w = old_u->siz_w;
  u3p(c3_w) new_p = rod_u->rut_p + 1 + c3_wiseof(u3a_v2_box);
  c3_w *new_w;

  for (; box_w < end_w
         ; box_w += siz_w
         , old_u = (void *)box_w
         , siz_w = old_u->siz_w) {
    old_u->use_w &= 0x7fffffff;

    if (!old_u->use_w)
      continue;

    new_w = (void *)u3a_v2_botox(u3a_v2_into(new_p));
    u3_assert(box_w[siz_w - 1] == new_p);
    u3_assert(new_w <= box_w);

    c3_w i_w;
    for (i_w = 0; i_w < siz_w - 1; i_w++)
      new_w[i_w] = box_w[i_w];

    if (siz_w & 1) {
      new_w[i_w++] = 0;         /* pad odd sized boxes */
      new_w[i_w++] = siz_w + 1; /* restore trailing size word */
      new_w[0] = siz_w + 1;     /* and the leading size word */
    }
    else {
      new_w[i_w++] = siz_w;
    }

    new_p += i_w;
  }

  /* restore proper heap state */
  rod_u->rut_p = 2;
  rod_u->hat_p = new_p - c3_wiseof(u3a_v2_box);

  /* like |pack, clear the free lists and cell allocator */
  for (c3_w i_w = 0; i_w < u3a_v2_fbox_no; i_w++)
    u3R_v1->all.fre_p[i_w] = 0;

  u3R_v1->all.fre_w = 0;
  u3R_v1->all.cel_p = 0;
}


/* u3m_v2_migrate: perform loom migration if necessary.
*/
void
u3m_v2_migrate(void)
{
  c3_w len_w = u3C_v2.wor_i - 1;
  c3_w ver_w = *(u3_Loom + len_w);

  u3_assert( U3V_VER1 == ver_w );

  c3_w* mem_w = u3_Loom + 1;
  c3_w  siz_w = c3_wiseof(u3v_v1_home);
  c3_w* mat_w = (mem_w + len_w) - siz_w;

  u3H_v1 = (void *)mat_w;
  u3R_v1 = &u3H_v1->rod_u;

  u3R_v1->cap_p = u3R_v1->mat_p = u3a_v1_outa(u3H_v1);

  fprintf(stderr, "loom: pointer compression migration running...\r\n");

  /* perform the migration in a pattern similar to |pack */
  _migrate_reclaim();
  _migrate_seek(&u3H_v1->rod_u);
  _migrate_rewrite();
  _migrate_move(&u3H_v1->rod_u);

  /* finally update the version and commit to disk */
  u3H_v1->ver_w = U3V_VER2;

  fprintf(stderr, "loom: pointer compression migration done\r\n");
}