flok 0.0.41 → 0.0.42

Sign up to get free protection for your applications and to get access to all the features.
checksums.yaml CHANGED
@@ -1,7 +1,7 @@
1
1
  ---
2
2
  SHA1:
3
- metadata.gz: d5f0a382decc21b5e55f99815a7b23ecf1f53009
4
- data.tar.gz: b5f74e961712e188e97fc94da0b51fbac5e664b2
3
+ metadata.gz: bebf06696d30319a7889c7039753077ad38eda75
4
+ data.tar.gz: 4c6bc9ed05585a54a65a5a4e995e16a60c85351f
5
5
  SHA512:
6
- metadata.gz: 56c35e867d9766f03239f2ce0343fac6ba34f4af931b6b30e1b69ce3bd5c13e2d3dd8b9a11e9e6d16dbf87247a1757f68d7eedd26fe20da14297a558cd95394c
7
- data.tar.gz: 48bb0f9c0b18b43582b0960bcd661f233fbe3d471025b41402a32dc32a343a40a23e108741f5fcb9b076e74555c572979828f88cdc1bf3f4cc8d23c254a6d68c
6
+ metadata.gz: 2075905fba3ce9231fd8cdec4ed32e5ac7bed5b41be301cb75e9de651951b631239e4fa5d0f2166f2d1a4f89c3e6e04ab4319ee8e6d6a7c8bb03c54d4cdef322
7
+ data.tar.gz: 591a0e811654e08ad243c574cfa58a8681cf522df86bf610c3c8ebdff141700f6d4fec6c35c05066a7019d8f9940b852f1dd31fb893dcf7c23b83249dc935604
@@ -0,0 +1,18 @@
1
+ //Configure pg_dummy0
2
+ <% [0].each do |i| %>
3
+ function pg_dummy<%= i %>_init(ns, options) {
4
+ pg_dummy<%= i %>_init_params = {ns: ns, options: options};
5
+ pg_dummy<%= i %>_ns = ns;
6
+
7
+ pg_dummy<%= i %>_spec_did_init = true;
8
+ }
9
+
10
+ function pg_dummy<%= i %>_watch(id, page) {
11
+ }
12
+
13
+ function pg_dummy<%= i %>_unwatch(id) {
14
+ }
15
+
16
+ function pg_dummy<%= i %>_write(page) {
17
+ }
18
+ <% end %>
@@ -24,8 +24,25 @@ service :vm do
24
24
 
25
25
  //Cache
26
26
  function vm_cache_write(ns, page) {
27
+ <% if @debug %>
28
+ if (vm_transaction_in_progress === false) { throw "vm_cache_write called but a transaction was not in progress. Make sure to call vm_transaction_begin and vm_transaction_end" }
29
+ if (vm_transaction_ns !== null && vm_transaction_ns !== ns) { throw "vm_cache_write called, and is within a vm_transaction but the ns given: " + ns + " does not match the transaction ns of: " + vm_transaction_ns };
30
+ <% end %>
31
+
32
+ //Namespace is needed for vm_transaction_end
33
+ vm_transaction_ns = ns;
34
+
35
+ vm_rehash_page(page);
36
+
27
37
  var old = vm_cache[ns][page._id];
28
- if (old && old._hash == page._hash) { return; }
38
+ if (old) {
39
+ //Same, don't do anything
40
+ if (old._hash === page._hash) { return; }
41
+
42
+ //Diff
43
+ vm_transaction_diffs.push(vm_diff(old, page));
44
+ vm_transaction_changed_ids.push(page._id);
45
+ }
29
46
 
30
47
  vm_dirty[ns][page._id] = page;
31
48
  vm_cache[ns][page._id] = page;
@@ -39,39 +56,6 @@ service :vm do
39
56
  }
40
57
  }
41
58
 
42
- function vm_rehash_page(page) {
43
- var z = 0;
44
-
45
- //head and next are optional
46
- if (page._head) { var z = crc32(0, page._head) }
47
- if (page._next) { z = crc32(z, page._next) }
48
-
49
- z = crc32(z, page._id)
50
-
51
- //Hash differently based on type
52
- if (page._type === "array") {
53
- var e = page.entries;
54
- for (var i = 0; i < e.length; ++i) {
55
- z = crc32(z, e[i]._sig);
56
- }
57
- } else if (page._type === "hash") {
58
- var keys = Object.keys(page.entries);
59
- var e = page.entries;
60
- var q = 0;
61
- for (var i = 0; i < keys.length; ++i) {
62
- var _sig = e[keys[i]]._sig;
63
- var r = crc32(0, _sig);
64
- q = q + r;
65
- }
66
- q = +q;
67
- z = crc32(z, q.toString());
68
- } <% if @debug %> else {
69
- throw "vm_rehash_page got an unspported type: "+page._type;
70
- }
71
- <% end %>
72
- page._hash = z.toString();
73
- }
74
-
75
59
  function vm_pageout() {
76
60
  <% @options[:pagers].each do |p| %>
77
61
  //Get id_to_page mappings
@@ -111,6 +95,349 @@ service :vm do
111
95
  <% if @debug %>
112
96
  vm_write_list = [];
113
97
  <% end %>
98
+
99
+ //Generic Page Helpers
100
+ ///////////////////////////////////////////////////////////////////////////
101
+ function vm_create_page(id) {
102
+ if (id === undefined) {
103
+ id = gen_id();
104
+ }
105
+
106
+ var page = {
107
+ _id: id,
108
+ _head: null,
109
+ _next: null,
110
+ _hash: null,
111
+ entries: [],
112
+ __index: {},
113
+ };
114
+
115
+ return page;
116
+ }
117
+
118
+ function vm_copy_page(page) {
119
+ var page = {
120
+ _id: page._id,
121
+ _head: page._head,
122
+ _next: page._next,
123
+ _hash: page._hash,
124
+ entries: JSON.parse(JSON.stringify(page.entries)),
125
+ };
126
+
127
+ return page;
128
+ }
129
+
130
+ function vm_rehash_page(page) {
131
+ var z = 0;
132
+
133
+ //head and next are optional
134
+ if (page._head) { var z = crc32(0, page._head) }
135
+ if (page._next) { z = crc32(z, page._next) }
136
+
137
+ z = crc32(z, page._id)
138
+
139
+ //Hash differently based on type
140
+ var e = page.entries;
141
+ for (var i = 0; i < e.length; ++i) {
142
+ z = crc32(z, e[i]._sig);
143
+ }
144
+
145
+ page._hash = z.toString();
146
+ }
147
+
148
+ function vm_reindex_page(page) {
149
+ page.__index = {};
150
+ for (var i = 0; i < page.entries.length; ++i) {
151
+ page.__index[page.entries[i]._id] = i;
152
+ }
153
+ }
154
+ ///////////////////////////////////////////////////////////////////////////
155
+
156
+ //vm_diff helpers
157
+ ///////////////////////////////////////////////////////////////////////////
158
+ function vm_diff(old_page, new_page) {
159
+ var diff_log = [];
160
+ if (old_page._head !== new_page._head) {
161
+ diff_log.push(["HEAD_M", new_page._head])
162
+ }
163
+
164
+ if (old_page._next !== new_page._next) {
165
+ diff_log.push(["NEXT_M", new_page._next])
166
+ }
167
+
168
+ var from_entries = old_page.entries;
169
+ var to_entries = new_page.entries;
170
+
171
+ //Calculated lists
172
+ var ins = [];
173
+ var dels = [];
174
+ var moves = [];
175
+ var modify = [];
176
+
177
+ //a_prime is Union (ordered) of from
178
+ //b_prime is Union (ordered) of to
179
+ var a_prime = [];
180
+ var b_prime = [];
181
+
182
+ //Save all entry sigs
183
+ var from_entries_sig = [];
184
+ for (var i = 0; i < from_entries.length; ++i) {
185
+ from_entries_sig[from_entries[i]._id] = from_entries[i]._sig;
186
+ }
187
+
188
+ //Need to re-index page for the modify code which needs to know the index
189
+ //of the id of the new entry
190
+ vm_reindex_page(new_page);
191
+
192
+ //Save all the entry sigs
193
+ var to_entries_sig = [];
194
+ for (var i = 0; i < to_entries.length; ++i) {
195
+ to_entries_sig[to_entries[i]._id] = to_entries[i]._sig;
196
+ }
197
+
198
+ //I. Calculate all elements in to_entries that are not in from_entries
199
+ //for each one of those elements, mark it as insertion and remove them in reverse order.
200
+ for (var i = 0; i < to_entries.length; ++i) {
201
+ //Does the entry *not* exist in from_entries?
202
+ var to_entry_id = to_entries[i]._id;
203
+ if (from_entries_sig[to_entry_id] === undefined) {
204
+ ins.push(["+", i, to_entries[i]]);
205
+ } else {
206
+ //The entry *does* exist, therefore it must be part of the shared
207
+ b_prime.push(to_entries[i]._id);
208
+ }
209
+ }
210
+
211
+ for (var i = 0; i < from_entries.length; ++i) {
212
+ var from_entry_id = from_entries[i]._id;
213
+ if (to_entries_sig[from_entry_id] === undefined) {
214
+ dels.push(["-", from_entries[i]._id]);
215
+ } else {
216
+ a_prime.push(from_entries[i]._id);
217
+
218
+ if (from_entries[i]._sig != to_entries_sig[from_entry_id]) {
219
+ modify.push(["M", new_page.entries[new_page.__index[from_entry_id]]]);
220
+ }
221
+ }
222
+ }
223
+
224
+ //*==================================*
225
+ //| Wild UNOPTIMIZED ALGORITHM |
226
+ //| |
227
+ //| appeared! |
228
+ //| v |
229
+ //*==================================*
230
+ while(1) {
231
+ var wdiff = 0;
232
+ var wb_index;
233
+ var wa_index;
234
+
235
+ for (var i = 0; i < a_prime.length; ++i) {
236
+ var b_index = b_prime.indexOf(a_prime[i]);
237
+ var diff = b_index - i;
238
+
239
+ if (Math.abs(diff) > Math.abs(wdiff)) {
240
+ wdiff = diff;
241
+ wa_index = i;
242
+ wb_index = b_index;
243
+ }
244
+ }
245
+
246
+ if (Math.abs(wdiff) > 0) {
247
+ var r = a_prime.splice(wa_index, 1);
248
+ a_prime.splice(wb_index, 0, r[0]);
249
+
250
+ moves.push([">", wb_index, r[0]]);
251
+ } else {
252
+ break
253
+ }
254
+ }
255
+
256
+ var res = diff_log.concat(dels).concat(modify).concat(moves).concat(ins);
257
+ return res;
258
+ }
259
+
260
+ function vm_diff_replay(page, diff) {
261
+ for (var i = 0; i < diff.length; ++i) {
262
+ vm_reindex_page(page);
263
+ var e = diff[i];
264
+
265
+ //vm_diff type
266
+ var type = e[0];
267
+ if (type === "+") {
268
+ var eindex = e[1];
269
+ var entry = e[2];
270
+
271
+ //Ignore insertion if an element already exists with the given id
272
+ if (page["__index"][entry["_id"]] === undefined) {
273
+ //Insertion
274
+ page.entries.splice(eindex, 0, entry);
275
+ }
276
+ } else if (type === ">") {
277
+ var eindex = e[1];
278
+ var entry_id = e[2];
279
+
280
+ var current_index = page["__index"][entry_id];
281
+ if (current_index !== undefined) {
282
+ var entry = page.entries.splice(current_index, 1)[0];
283
+ page.entries.splice(eindex, 0, entry);
284
+ }
285
+ } else if (type === "M") {
286
+ var entry = e[1];
287
+
288
+ //Take out old, put in new
289
+ if (page["__index"][entry["_id"]] !== undefined) {
290
+ page.entries.splice(page["__index"][entry["_id"]], 1, entry);
291
+ }
292
+ } else if (type === "-") {
293
+ var eid = e[1];
294
+
295
+ var index = page.__index[eid];
296
+
297
+ //Take out
298
+ if (page["__index"][eid] !== undefined) {
299
+ page.entries.splice(index, 1);
300
+ }
301
+ } else if (type === "HEAD_M") {
302
+ page._head = e[1];
303
+ } else if (type === "NEXT_M") {
304
+ page._next = e[1];
305
+ }
306
+ }
307
+ }
308
+ ///////////////////////////////////////////////////////////////////////////
309
+
310
+ //Commit helpers
311
+ ///////////////////////////////////////////////////////////////////////////
312
+ function vm_commit(older, newer) {
313
+ newer.__changes_id = gen_id();
314
+
315
+ if (older.__changes && !older.__base) {
316
+ newer.__base = older;
317
+ } else if (older.__changes) {
318
+ newer.__base = older.__base;
319
+ }
320
+
321
+ if (older.__base) {
322
+ newer.__changes = vm_diff(older.__base, newer);
323
+ } else {
324
+ newer.__changes = vm_diff(older, newer);
325
+ }
326
+ }
327
+
328
+ function vm_rebase(newer, older) {
329
+ if (newer.__changes && !newer.__base) {
330
+ <% if @debug %>
331
+ if (newer.__changes_id === undefined) {
332
+ throw "__changes_id did not exist on newer: " + JSON.stringify(newer) + " but it did have __changes";
333
+ }
334
+ <% end %>
335
+ older.__changes = newer.__changes;
336
+ older.__changes_id = newer.__changes_id;
337
+
338
+ vm_diff_replay(older, older.__changes);
339
+ } else if (newer.__changes && newer.__base) {
340
+ <% if @debug %>
341
+ if (newer.__changes_id === undefined) {
342
+ throw "__changes_id did not exist on newer: " + JSON.stringify(newer) + " but it did have __changes";
343
+ }
344
+ <% end %>
345
+
346
+ //Reconstruct the __base by playing newer.__base.__changes ontop of older (which is the base we are rebasing on)
347
+ //Imagine that you texted a teacher changes, but are unsure whether that teacher has received those changes, meanwhile,
348
+ //the teacher texts you a new fresh copy of the page. You must now keep track of the changes you texted her (newer.__base.__changes)
349
+ //while still being able to create a new list of changes for any future changes that you make (as we diff pages to create the changes)
350
+ //So we reconstruct the newer.__base page by taking what the teacher gave us, trash the newer.__base page, but replay the changes
351
+ //that newer.__base.__changes had onto the copy the teacher gave us. E.g. we cross out "Sally" on our list, text teacher that we crossed
352
+ //out sally. Teacher gave us a new list that has "Bill" Crossed out. We Then take the new list and cross out "Sally" and call that our new
353
+ //base page.
354
+ vm_diff_replay(older, newer.__base.__changes);
355
+
356
+ //Copy the page, we need to use the copy as a '__base' page because we want the non-copied older page to be the non-base version. (And we
357
+ //will make it the 'non' base version by again, replaying changes from the 'newer.__changes') after setting the __base to the copy.
358
+ var older_copy = vm_copy_page(older);
359
+ older_copy.__changes = newer.__base.__changes;
360
+ older_copy.__changes_id = newer.__base.__changes_id;
361
+ vm_reindex_page(older_copy);
362
+ older.__base = older_copy;
363
+
364
+ //Now update the older page w/ the `newer.__changes`
365
+ vm_diff_replay(older, newer.__changes);
366
+
367
+ //Calculate diff for older
368
+ older.__changes = vm_diff(older.__base, older);
369
+ older.__changes_id = gen_id();
370
+ }
371
+ }
372
+
373
+ function vm_mark_changes_synced(page, changes_id) {
374
+ if (page.__base === undefined && changes_id === page.__changes_id) {
375
+ delete page.__changes;
376
+ delete page.__changes_id;
377
+ } else if (page.__base !== undefined && changes_id === page.__base.__changes_id) {
378
+ delete page.__base;
379
+ }
380
+ }
381
+ ///////////////////////////////////////////////////////////////////////////
382
+
383
+ //vm transaction helpers
384
+ ///////////////////////////////////////////////////////////////////////////
385
+ vm_transaction_in_progress = false;
386
+ function vm_transaction_begin() {
387
+ <% if @debug %>
388
+ if (vm_transaction_in_progress === true) { throw "vm_transaction_begin called but a transaction was already in progress" }
389
+ <% end %>
390
+ vm_transaction_in_progress = true;
391
+ vm_transaction_diffs = [];
392
+ vm_transaction_changed_ids = [];
393
+ vm_transaction_ns = null;
394
+ }
395
+
396
+ function vm_transaction_end() {
397
+ <% if @debug %>
398
+ if (vm_transaction_in_progress === false) { throw "vm_transaction_end called but vm_transaction_begin was never called" }
399
+ <% end %>
400
+ vm_transaction_in_progress = false;
401
+
402
+ for (var i = 0; i < vm_transaction_changed_ids.length; ++i) {
403
+ var page_id = vm_transaction_changed_ids[i];
404
+ var bps = vm_notify_map[vm_transaction_ns][page_id];
405
+ if (bps !== undefined) {
406
+ pieces = [];
407
+ for (var x = 0; x < vm_transaction_diffs[i].length; ++x) {
408
+ //Get diff entry
409
+ var diff_entry = vm_transaction_diffs[i][x];
410
+ pieces.push(diff_entry);
411
+
412
+ //For all listening controllers
413
+ for (var y = 0; y < bps.length; ++y) {
414
+ var bp = bps[y];
415
+
416
+ if (diff_entry[0] === "M") {
417
+ int_event_defer(bp, "entry_modify", {page_id: page_id, entry: diff_entry[1]});
418
+ } else if (diff_entry[0] === "-") {
419
+ int_event_defer(bp, "entry_del", {page_id: page_id, entry_id: diff_entry[1]});
420
+ } else if (diff_entry[0] === ">") {
421
+ var eindex = diff_entry[1];
422
+ var eid = diff_entry[2];
423
+ int_event_defer(bp, "entry_move", {entry_id: eid, from_page_id: page_id, to_page_id: page_id, to_page_index: eindex});
424
+ } else if (diff_entry[0] === "+") {
425
+ var eindex = diff_entry[1];
426
+ var entry = diff_entry[2];
427
+ int_event_defer(bp, "entry_ins", {page_id: page_id, index: eindex, entry: entry});
428
+ } else if (diff_entry[0] === "NEXT_M") {
429
+ int_event_defer(bp, "next_changed", {page_id: page_id, value: diff_entry[1]});
430
+ } else if (diff_entry[0] === "HEAD_M") {
431
+ int_event_defer(bp, "head_changed", {page_id: page_id, value: diff_entry[1]});
432
+ }
433
+ }
434
+ }
435
+
436
+ //throw JSON.stringify(pieces);
437
+ }
438
+ }
439
+ }
440
+ ///////////////////////////////////////////////////////////////////////////
114
441
  }
115
442
 
116
443
  on_wakeup %{
@@ -8,35 +8,20 @@ Each pager belongs to a *namespace*; page faults hit a namespace and then the pa
8
8
  Fun aside; Because of the hashing schemantics; this paging system solves the age old problem of ... how do you show that data has changed *now* when to be assured that you have perferctly synchronized data with the server?;... you need to do a 3-way handshake with the updates. You could have a network server pager that supports writes but dosen't forward those to the network. That way, you can locally modify the page and then if the modifications were guessed correctly, the server would not even send back a page modification update! (Locally, the page would have been propogated as well). In the meantime, after modifying the local page, you would send a real network request to the server which would in turn update it's own paging system but at that point, the server would check in with you about your pages, but miraculously, because you gussed the updated page correctly, no modifications will need to be made. You could even purposefully put a 'not_synced' key in and actually show the user when the page was correctly synchronized.
9
9
 
10
10
  ##Pages
11
- Each page is either of a `array` type or `hash` type.
12
-
13
- ###Array type
11
+ ###Example
14
12
  ```ruby
15
13
  page_example = {
16
14
  _head: <<uuid STR or NULL>>,
17
- _type: "array",
18
15
  _next: <<uuid STR or NULL>,
19
16
  _id: <<uuid STR>,
20
17
  entries: [
21
18
  {_id: <<uuid STR>>, _sig: <<random_signature for inserts and modifies STR>>},
22
19
  ...
23
20
  ],
24
- _hash: <<CRC32 >
25
- }
26
- ```
27
-
28
- ###Hash type
29
- ```ruby
30
- page_example = {
31
- _head: <<uuid STR or NULL>>,
32
- _type: "hash",
33
- _next: <<uuid STR or NULL>,
34
- _id: <<uuid STR>
35
- entries: {
36
- "my_id0" => {_sig: <<random_signature for inserts and modifies STR>>},
37
- ...
38
- },
39
- _hash: <<CRC32 >
21
+ _hash: <<CRC32>>,
22
+ __index: {
23
+ entry_id: entry_index,
24
+ }
40
25
  }
41
26
  ```
42
27
 
@@ -44,10 +29,8 @@ page_example = {
44
29
  * `_next (string or null)` - The next element on this list. If `_next` is non-existant, then this page is the endpoint of the list.
45
30
  * `_id (string)` - The name of this page. Even if every key changed, the `_id` will not change. This is supposed to indicate, semantically, that this page still *means* the same thing. For example, imagine a page. If all entries were to be **removed** from this page and new entries were **inserted** on this page, then it would be semantically sound to say that the entries were **changed**.
46
31
  * `entries`
47
- * `_type == 'array'`
48
- * An array of dictionaries. Each element contains a `_id` that is analogous to the page `_id`. (These are not the same, but carry the same semantics). Entries also have a `_sig` which should be a generated hash value that changes when the entry changes.
49
- * `_type == 'hash'`
50
- * A dictionary of dictionaries. Entries have a `_sig` which should be a generated hash value that changes when the entry changes.
32
+ * An array of dictionaries. Each element contains a `_id` that is analogous to the page `_id`. (These are not the same, but carry the same semantics). Entries also have a `_sig` which should be a generated hash value that changes when the entry changes.
33
+ * `__index` - A dictionary mapping entry `_id` into an index of the `entries` array.
51
34
  * `_hash (string)` - All entry `_id's`, `_next`, the page `_id`, and `head` are hashed togeather. Any changes to this page will cause this `_hash` to change which makes it a useful way to check if a page is modified and needs to be updated. The hash function is an ordered CRC32 function run in the following order. See [Calculating Page Hash](#calculating_page_hash).
52
35
 
53
36
  ------
@@ -71,6 +54,21 @@ Assuming a crc function of `crc32(seed, string)`
71
54
 
72
55
  ------
73
56
 
57
+ ##Schemas & Data-Types
58
+
59
+ ####`vm_diff_entry`
60
+ See [VM Diff](./vm/diff.md) for specific information.
61
+
62
+ ###`Based page`
63
+ A based page contains the additional keys of `__base` and `__changes`, and these keys are not `null`. Optionally, it may contain the keys
64
+ `__base_sync` (which is also not null).
65
+ * `__base` - A copy of the fully synchronized page (fully embedded)
66
+ * `__changes` - An `vm_diff` array of changes from either `__base`, or if not `null` and not `undefined`, the `__base_sync` page.
67
+ * `__base_sync` - An optional key, serves the same purpose as `__base`, but when synchronizing, the `__base_sync` is used for `__changes` as
68
+ `__base_sync` holds a full copy of the currently in sync page.
69
+
70
+ Pages that are being synhronized are known as a `based in-sync page`.
71
+
74
72
  ##Configuration
75
73
  The paging service may be configured in your `./config/services.rb`. You must set an array of pagers where each pager is responsible for a particular
76
74
  namespace. See [VM Pagers](./vm/pagers.md) for more info.
@@ -130,7 +128,8 @@ if (page is not resident in memory && not_synchronous) {
130
128
  * Parameters
131
129
  * `ns` - The namespace of the page, e.g. 'user'
132
130
  * `id` - Watching the page that contains this in the `_id` field
133
- * `sync (optional)` - If set to `true` then the disk read and cache read will be performed synchronously. Additionally, all future cache reads / updates will be performed synchronously.
131
+ * `sync (optional)` - If set to `true` then the cache read will be performed synchronously; however, the disk read will still be performed
132
+ asynchronously. Additionally, all future cache reads / updates will be performed synchronously.
134
133
  * Event Responses
135
134
  * `read_res` - Whenever a change occurs to a page or the first read.
136
135
  * Returns an immutable page in params
@@ -167,12 +166,44 @@ Pageout is embodied in the function named `vm_pageout()`. This will asynchronous
167
166
  must support `unwatch` removal which we only receive the `bp`, `ns`, and `key`.
168
167
 
169
168
  ##Helper Methods
170
- ###Pager specific
171
- * `vm_cache_write(ns, page)` - Save a page to cache memory. This will not recalculate the page hash. The page will be stored in `vm_cache[ns][id]` by.
172
169
 
173
- ###Page modification
174
- * `vm_rehash_page(page)` - Calculates the hash for a page and modifies that page with the new `_hash` field. If the `_hash` field does not exist, it
170
+ ###Functional
171
+ ####Page modification (assuming inputs are modifiable)
172
+ * **Generic Page**
173
+ * `vm_create_page(id)` - **this does not write anything to memory. It has no side effects except returning a hash**.
174
+ * `vm_create_page()` - Same as vm_create_page, but generates an id fore you.
175
+ * `vm_copy_page(page)` - Creates a copy of the page. Only copies the `_head`, `_next`, `_id`, `entries`, `_hash`
176
+ * `vm_rehash_page(page)` - Calculates the hash for a page and modifies that page with the new `_hash` field. If the `_hash` field does not exist, it
175
177
  will create it
178
+ * `vm_reindex_page(page)` - Recalculates the `__index` field of the page. If `__index` does not exist, it is added.
179
+ * **Diff helpers**
180
+ * See [VM Diff](./vm/diff.md) section on *Functional Kernel
181
+ * **Commit helpers**
182
+ * `vm_commit(older, newer)` - Modifications will be done to `newer`. It is assumed that `newer` is neither based nor changed. This is typical of a
183
+ new page creation. It is assumed that `older` is either `[unbased, nochanges]`, `[unbased, changes]` or `[based[unbased, changes], changes]`.
184
+ You would use this when a page is being written over a page that already exists. This will mark page as having changes.
185
+ 1. `older: [unbased, nochanges]` - `newer.__changes` will equal `vm_diff(older, newer)` and `newer.__changes_id` will be generated.
186
+ 2. `older: [unbased, changes]` - `newer.__base` will point to `older`. `newer.__changes` will equal `vm_diff(older, newer)` and
187
+ `newer__changes_id` will be generated.
188
+ 3. `older: [based[unbased, changes], changes]]` - `newer.__base` will point to `older.__base`. Then `newer.__changes` will equal
189
+ `vm_diff(older.__base, newer)` and `newer.__changes_id` will be generated.
190
+ * `vm_rebase(newer, older)` - Modifications are done to `older`. It is assumed that `older` is not based nor changed. This is typical of a
191
+ synchronized page from a server. It is assumed that `newer` is either `[unbased, nochanges]`, `[unbased, changes]` or `[based[unbased,
192
+ changes], changes]`.
193
+ 1. `newer: [unbased, nochanges]` - No changes as `newer` does not contain any changes, therefore, `older` is the *truth*.
194
+ 2. `newer: [unbased, changes]` - `older` takes `newer.__changes` and `newer.__changes_id`. `older` then replays `older.__changes` on itself.
195
+ 3. `newer: [based[unbased, changes], changes]]`
196
+ 1. `older` takes `newer.__base.__changes` and `newer.__base.__changes_id`. `older` then replays `older.__changes` onto itself.
197
+ 2. `older` clones itself, let that clone be called `oldest`. `older.__base` is set to `oldest`.
198
+ 3. `older` replays `newer.__changes` onto itself.
199
+ 4. `older` then calculates `__changes` based off `oldest`.
200
+ * `vm_mark_changes_synced(page, changes_id)` - Will reverse the steps of `vm_commit`. If the page has changes but is not based, then the changes are removed if the
201
+ `__changes_id` of the page matches `changes_id`. If the page is based (implying the base page has changes and the page has changes as all base
202
+ pages have changes), then if the `changes_id` matches the **base** `__changes_id` , the `__base` is removed from the page. If `changes_id`
203
+ does not match in either of the cases, then nothing happends. This may happend if a synchronization errousouly comes in.
204
+ ###Non functional
205
+ ####Pager specific
206
+ * `vm_cache_write(ns, page)` - Save a page to cache memory. This will not recalculate the page hash. The page will be stored in `vm_cache[ns][id]` by.
176
207
 
177
208
  ### <a name='user_page_modification_helpers'></a>User page modification helpers (Controller Macros)
178
209
  You should never directly edit a page in user land; if you do; the pager has no way of knowing that you made modifications. Additionally, if you have multiple controllers watching a page, and it is modified in one controller, those other controllers
@@ -183,19 +214,17 @@ Aside, modifying a page goes against the semantics of the vm system; you're thin
183
214
  If you're creating a new page, please use these macros as well; just switch out `CopyPage` for `NewPage`.
184
215
 
185
216
  ####Per entry
186
- * `NewPage(type, id)` - Returns a new blank page; internally creates a page that has a null `_next`, `_head`, and `entries` array with 0 elements. type can either be `array` or `hash`. `_id` is generated if it is not passed.
217
+ * `NewPage(type, id)` - Returns a new blank page; internally creates a page that has a null `_next`, `_head`, and `entries` array with 0 elements. `_id` is generated if it is not passed.
187
218
  * `CopyPage(page)` - Copies a page and returns the new page. Internally this copies the entire page with the exception of the
188
219
  `_hash` field.
189
-
190
- * For both `array` and `hashes`, the following functions work (albeit different semantics). For array types, the `eindex` is an integer in the array, For hash types, the `eindex` is a key inside the dictionary.
191
- * `EntryDel(page, eindex)` - Remove a single entry from a page. (Internally this deletes the array entry).
192
- * `EntryInsert(page, eindex, entry)` - Insert an entry, entry should be a dictionary value.
193
- * For arrays, this generates the `_sig` and `_id` for you.
194
- * For hashes, this generates the `_sig` for you.
195
- * `EntryMutable(page, eindex)` - Set a mutable entry at a specific index which you can then modify. The signature is changed for you. You can not
196
- use this with dot syntax like `EntryMutable(page, eindex).id = 'foo'`, you may only get a variable.
197
- * `SetPageNext(page, id)` - Sets the `_next` id for the page
198
- * `SetPageHead(page, id)` - Sets the `_head` id for the page
220
+ * `EntryDel(page, eid)` - Remove a single entry from a page. (Internally this deletes the array entry).
221
+ * `EntryInsertAtIndex(page, eindex, entry)` - Insert an entry at a specific index. This generates the `_sig` and `_id` for you.
222
+ * `EntryInsertAtId(page, eid, entry)` - Insert an entry with a particular `_id`. This generates `_sig` for you. It will be put at the end of the
223
+ array
224
+ * `EntryMutable(page, eid)` - Set a mutable entry at a specific index which you can then modify. The signature is changed for you. You can not
225
+ use this with dot syntax like `EntryMutable(page, eindex).id = 'foo'`, you may only get a variable.
226
+ * `SetPageNext(page, id)` - Sets the `_next` id for the page
227
+ * `SetPageHead(page, id)` - Sets the `_head` id for the page
199
228
 
200
229
  Here is an example of a page being modified inside a controller after a `read_res`
201
230
  ```js