flok 0.0.82 → 0.0.83
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +4 -4
- data/app/kern/pagers/pg_sockio.js +47 -1
- data/app/kern/services/vm.rb +3 -0
- data/docs/kernel_api.md +3 -0
- data/docs/mod/persist.md +1 -1
- data/docs/services/vm.md +11 -1
- data/docs/services/vm/pagers.md +4 -1
- data/docs/todo.md +2 -0
- data/lib/flok/version.rb +1 -1
- data/spec/env/kern.rb +5 -2
- data/spec/kern/assets/vm/controller12.rb +25 -30
- data/spec/kern/assets/vm/pg_sockio/unmark_changes.rb +34 -0
- data/spec/kern/assets/vm/pg_sockio/watch.rb +6 -5
- data/spec/kern/assets/vm/pg_sockio/watch3.rb +31 -0
- data/spec/kern/vm_service_spec.rb +19 -16
- data/spec/kern/vm_sockio_pager_spec.rb +365 -29
- metadata +6 -2
checksums.yaml
CHANGED
@@ -1,7 +1,7 @@
|
|
1
1
|
---
|
2
2
|
SHA1:
|
3
|
-
metadata.gz:
|
4
|
-
data.tar.gz:
|
3
|
+
metadata.gz: 7bd8fa8fdb50bddf42da16826b0046fb59193ce7
|
4
|
+
data.tar.gz: f65050b61273c66c424aeae68568cf0eb7272512
|
5
5
|
SHA512:
|
6
|
-
metadata.gz:
|
7
|
-
data.tar.gz:
|
6
|
+
metadata.gz: d70b9cdc7e87a2b068b55b255a112838a04bac27dca48d5d05b1d6e5023d71a05930856dfe3a256161f4b557a1fbb13996cd7d561c7b45808882256d39057c34
|
7
|
+
data.tar.gz: 36c1164fdefca9a1f9a53b9e845d4cdebfd66f1ae8084bfa0f3d4433a8bcf834003694d71453fb81d46a55ffe73b95c919c5799652b0d14de3d2bf01b40a1f58
|
@@ -2,10 +2,14 @@
|
|
2
2
|
<% [0].each do |i| %>
|
3
3
|
//Destination for events sent from the sockio driver
|
4
4
|
function __pg_sockio<%= i %>_xevent_handler(ep, ename, einfo) {
|
5
|
+
//Events
|
6
|
+
//update - From the sockio interface, an update refers to a page update
|
7
|
+
//interval - Every 15 seconds, from the callout timer.
|
5
8
|
if (ename === "update") {
|
6
9
|
//If changes_id was given
|
7
10
|
if (einfo.changes_id !== undefined) {
|
8
|
-
|
11
|
+
//This is a friendly function that will ignore mis-matches of changes_id
|
12
|
+
vm_mark_changes_synced(vm_cache[pg_sockio<%= i %>_ns][einfo.page._id], einfo.changes_id);
|
9
13
|
}
|
10
14
|
|
11
15
|
//If page exists, then we need to rebase the page, this will actually
|
@@ -15,10 +19,39 @@
|
|
15
19
|
vm_rebase(vm_cache[pg_sockio<%= i %>_ns][einfo.page._id], einfo.page);
|
16
20
|
}
|
17
21
|
|
22
|
+
//Mark page as synced if it contains no changes, we don't need to check base
|
23
|
+
//here because if it has base, it has __changes as well
|
24
|
+
if (einfo.page.__changes === undefined) {
|
25
|
+
vm_pg_unmark_needs_sync(pg_sockio<%= i %>_ns, einfo.page._id)
|
26
|
+
}
|
27
|
+
|
18
28
|
//Write out page
|
19
29
|
vm_transaction_begin();
|
20
30
|
vm_cache_write(pg_sockio<%= i %>_ns, einfo.page);
|
21
31
|
vm_transaction_end();
|
32
|
+
} else if (ename === "interval") {
|
33
|
+
|
34
|
+
//Create watch_list
|
35
|
+
var watch_list = [];
|
36
|
+
var watched_keys = Object.keys(vm_notify_map[pg_sockio<%= i %>_ns]);
|
37
|
+
for (var i = 0; i < watched_keys.length; ++i) {
|
38
|
+
var key = watched_keys[i];
|
39
|
+
watch_list.push(key);
|
40
|
+
|
41
|
+
//If entry exists, put in the hash value
|
42
|
+
if (vm_cache[pg_sockio<%= i %>_ns][key] === undefined) {
|
43
|
+
watch_list.push(null);
|
44
|
+
} else {
|
45
|
+
watch_list.push(vm_cache[pg_sockio<%= i %>_ns][key]._hash);
|
46
|
+
}
|
47
|
+
}
|
48
|
+
|
49
|
+
//Synchronize the watchlist with the server
|
50
|
+
var resync_info = {
|
51
|
+
watch_list: watch_list
|
52
|
+
};
|
53
|
+
|
54
|
+
SEND("net", "if_sockio_send", pg_sockio<%= i %>_bp, "resync", resync_info);
|
22
55
|
} else {
|
23
56
|
<% if @debug %>
|
24
57
|
throw "pg_sockio<%= i %>_xevent_handler received an event called: " + ename + "that it does not know how to handle. This event should never have even been forwarded, but you may have missed adding the handler code if you did request a forward"
|
@@ -46,6 +79,9 @@
|
|
46
79
|
//Signal that the socket.io driver should forward all events to the socket defined by pg_sockio{N}_bp
|
47
80
|
//to the endpoint (with the same reference)
|
48
81
|
SEND("net", "if_sockio_fwd", pg_sockio<%= i %>_bp, "update", pg_sockio<%= i %>_bp);
|
82
|
+
|
83
|
+
//Request a timer every 15 seconds
|
84
|
+
reg_interval(pg_sockio<%= i %>_bp, "interval", 15*4);
|
49
85
|
}
|
50
86
|
|
51
87
|
function pg_sockio<%= i %>_watch(id, page) {
|
@@ -56,6 +92,10 @@
|
|
56
92
|
}
|
57
93
|
|
58
94
|
function pg_sockio<%= i %>_unwatch(id) {
|
95
|
+
var info = {
|
96
|
+
page_id: id
|
97
|
+
}
|
98
|
+
SEND("net", "if_sockio_send", pg_sockio<%= i %>_bp, "unwatch", info);
|
59
99
|
}
|
60
100
|
|
61
101
|
function pg_sockio<%= i %>_write(page) {
|
@@ -70,6 +110,12 @@
|
|
70
110
|
vm_cache_write(pg_sockio<%= i %>_ns, page);
|
71
111
|
vm_transaction_end();
|
72
112
|
|
113
|
+
//Mark pages as needing a synchronization
|
114
|
+
vm_pg_mark_needs_sync(pg_sockio<%= i %>_ns, page._id);
|
115
|
+
}
|
116
|
+
|
117
|
+
function pg_sockio<%= i %>_sync(page_id) {
|
118
|
+
var page = vm_cache[pg_sockio<%= i %>_ns][page_id];
|
73
119
|
//Clone page and send a copy to the server
|
74
120
|
var copied = vm_copy_page(page);
|
75
121
|
var info = {page: copied, changes: page.__changes, changes_id: page.__changes_id};
|
data/app/kern/services/vm.rb
CHANGED
@@ -793,6 +793,9 @@ service :vm do
|
|
793
793
|
}
|
794
794
|
|
795
795
|
vm_notify_map[params.ns][params.id].splice(midx, 1);
|
796
|
+
|
797
|
+
//Remove from notify map if no entries exist
|
798
|
+
if (vm_notify_map[params.ns][params.id].length === 0) { delete vm_notify_map[params.ns][params.id]; }
|
796
799
|
|
797
800
|
delete vm_bp_to_nmap[bp][params.ns][params.id];
|
798
801
|
|
data/docs/kernel_api.md
CHANGED
@@ -39,6 +39,9 @@ instead.
|
|
39
39
|
0 is the master root view. Returns base pointer. `event_gw` is a pointer to a `vc`. If it is null, then any events coming in will not be sent to
|
40
40
|
somewhere else if they do not match any 'on' for the current action.
|
41
41
|
|
42
|
+
##Timers
|
43
|
+
* See [./callout.md](Callout) for how to register a timer
|
44
|
+
|
42
45
|
##Things that are compiled into the kernel from user given data
|
43
46
|
`MODS` - A list of modules that was specified in `./app/drivers/$PLATFORM/config.yml`
|
44
47
|
`PLATFORM` - The platform that this kernel was compiled with
|
data/docs/mod/persist.md
CHANGED
@@ -11,7 +11,7 @@ Persistance management. Loosely based on redis.
|
|
11
11
|
`if_per_set_f(ns, key, tp)` - Tell the driver to dereference the telepointer and to save it to disk.
|
12
12
|
|
13
13
|
For race conditions, e.g, an asynchronous set is followed by a synchronous get, it is undefined as to what that behavior that will be.
|
14
|
-
|
14
|
+
I If the page does not exist, the hash value is null.t is expected that the kernel should manage the write-back cache and that the driver should not attempt a write back cache unless
|
15
15
|
it is convenient to do so.
|
16
16
|
|
17
17
|
###Kernel interrupts
|
data/docs/services/vm.md
CHANGED
@@ -222,6 +222,15 @@ The pager synchronization daemon is embodied in the function called `vm_pg_sync_
|
|
222
222
|
`__changes_id` of the page matches `changes_id`. If the page is based (implying the base page has changes and the page has changes as all base
|
223
223
|
pages have changes), then if the `changes_id` matches the **base** `__changes_id` , the `__base` is removed from the page. If `changes_id`
|
224
224
|
does not match in either of the cases, then nothing happends. This may happend if a synchronization errousouly comes in.
|
225
|
+
* **Why do we have both `vm_rebase` and `vm_mark_changes_synced`?**
|
226
|
+
* They are used under similar circumstances. You always `vm_mark_changes_synced` before calling `vm_rebase` on a page. The reasoning is that
|
227
|
+
`vm_rebase` will assume that the cached page does not contain changes if they are present in `older`. If you didn't do this, then the
|
228
|
+
cached page would be rebased and could contain changes even though it's already been rebased on an older page. E.g. `newer[changes, nobase]`
|
229
|
+
rebased would be `older[changes, nobase]` where `changes` are equal on the `newer` and `older` but clearly that's incorrect. Another way of
|
230
|
+
looking at it is that `vm_rebase` is saying that you are rebasing **on an authority** page and therefore needs no evidence that the page was
|
231
|
+
an authority (which is why the `changes_id` can be stripped). Method 3 of looking at it is that `vm_rebase` on a `newer[changes,
|
232
|
+
based[changes, nobase]]` with `older` where `older` contains the changes of `newer.__base.__changes`, would result in `older` having
|
233
|
+
`newer.__base.__changes` fast-forwarded over it, which it would already contain those changes.
|
225
234
|
###Non functional (functional as is in lambda calculus, or lisp (no **global** state changes but may modify parameters)
|
226
235
|
####Pager specific
|
227
236
|
* `vm_cache_write(ns, page)` - Save a page to cache memory. This will not recalculate the page hash. The page will be stored in `vm_cache[ns][id]` by.
|
@@ -229,7 +238,8 @@ The pager synchronization daemon is embodied in the function called `vm_pg_sync_
|
|
229
238
|
* The page_id is added to the `vm_unsynced` with the value of 0; see above in `Datatypes & Structures` for details. i.e.
|
230
239
|
`vm_unsynced[$PAGER_NS][page_id] = 0`
|
231
240
|
* the pager's routine of `sync` is called immediately. The page must exist in cache at this point.
|
232
|
-
* `vm_pg_unmark_needs_sync(ns, page_id)` - Removes the page from the pending synchronization queue `delete vm_unsynced[$PAGER_NS][page_id]`)
|
241
|
+
* `vm_pg_unmark_needs_sync(ns, page_id)` - Removes the page from the pending synchronization queue `delete vm_unsynced[$PAGER_NS][page_id]`). If
|
242
|
+
it's not in the synchronization queue, then nothing will happend
|
233
243
|
|
234
244
|
### <a name='user_page_modification_helpers'></a>User page modification helpers (Controller Macros)
|
235
245
|
You should never directly edit a page in user land; if you do; the pager has no way of knowing that you made modifications. Additionally, if you have multiple controllers watching a page, and it is modified in one controller, those other controllers
|
data/docs/services/vm/pagers.md
CHANGED
@@ -78,7 +78,10 @@ This pager connects to a socket.io server via the `sockio` module.
|
|
78
78
|
that the endpoint shares the same value as the socket's base pointer.
|
79
79
|
* **Functions**
|
80
80
|
* `init` - Will begin trying to establish a connection to the server. When pages are written,
|
81
|
-
* `watch` - Signals to the socket.io server that a page is being watched via `watch` event with parameters `{page_id:}
|
81
|
+
* `watch` - Signals to the socket.io server that a page is being watched via `watch` event with parameters `{page_id:}`. Additionally, at a periodic interval (10 seconds) by default,
|
82
|
+
the server will be notified of the entire watch-list via the event `resync` which contains the key `watch_list` which will return an array of [page_id, page_hash, page_id, page_hash...] where page_id is the pages `_id` and
|
83
|
+
`page_hash` is the page's `__hash`. If the page does not exist, the hash value is null.
|
84
|
+
|
82
85
|
* `unwatch` - Signals to the socket.io server that a page is no longer being watched via `unwatch` event with parameters `{page_id:}`
|
83
86
|
* `write` -
|
84
87
|
* Sends the sockio server the `write` event with a hash containing `page`, and optionally (`changes`, and `changes_id`). The `page` contains only the basic
|
data/docs/todo.md
CHANGED
@@ -10,6 +10,8 @@
|
|
10
10
|
2. The pager synchronization daemon and functions associated with marking paging as needing synchronization will attempt to sync at one time and not
|
11
11
|
store the data if there is a crash or exit before synchronization completes. Furethermore, too many unsynced pages will wreck havok as they wil be
|
12
12
|
dispatched at the same time via the synchronization daemon.
|
13
|
+
3. A `watch` request on a page does technically asynchronously dispatch a disk read request at the same time the pager's request goes through. If a pager was to write
|
14
|
+
a page before this disk read, directly via `vm_cache_write`, then the cached page would not be loaded in time.
|
13
15
|
|
14
16
|
###Ideas for improvement
|
15
17
|
0. The `Raise` function should not actually signal the controller, it could just inline that
|
data/lib/flok/version.rb
CHANGED
data/spec/env/kern.rb
CHANGED
@@ -31,8 +31,7 @@ shared_context "kern" do
|
|
31
31
|
|
32
32
|
#Will return everything put into the 'dump' dictionary (pre-defined for your convenience)
|
33
33
|
def evald str
|
34
|
-
self.eval "dump = {}"
|
35
|
-
self.eval str
|
34
|
+
self.eval "dump = {}; #{str}"
|
36
35
|
_dump = self.dump("dump")
|
37
36
|
|
38
37
|
return DumpHelper.new(_dump)
|
@@ -204,6 +203,10 @@ shared_context "kern" do
|
|
204
203
|
end
|
205
204
|
end
|
206
205
|
|
206
|
+
def ignore
|
207
|
+
@q.shift
|
208
|
+
end
|
209
|
+
|
207
210
|
#Expect the queue to not contain a message matching
|
208
211
|
def expect_not_to_contain msg_name, &block
|
209
212
|
original_q = JSON.parse(@q.to_json)
|
@@ -1,43 +1,38 @@
|
|
1
1
|
controller :my_controller do
|
2
2
|
services :vm
|
3
3
|
|
4
|
+
on_entry %{
|
5
|
+
read_res_params = [];
|
6
|
+
}
|
7
|
+
|
4
8
|
action :my_action do
|
5
9
|
on_entry %{
|
6
|
-
|
7
|
-
hello: "world"
|
8
|
-
}
|
9
|
-
|
10
|
-
page = NewPage("array", "test");
|
11
|
-
SetPageHead(page, "head");
|
12
|
-
SetPageNext(page, "next");
|
13
|
-
EntryInsert(page, 0, entry);
|
14
|
-
|
15
|
-
page2 = CopyPage(page)
|
16
|
-
EntryInsert(page, 0, entry);
|
17
|
-
|
18
|
-
|
19
|
-
var watch_info = {
|
20
|
-
ns: "spec",
|
21
|
-
id: "test"
|
22
|
-
}
|
23
|
-
|
24
|
-
var write_info = {
|
25
|
-
ns: "spec",
|
26
|
-
page: page
|
27
|
-
};
|
10
|
+
}
|
28
11
|
|
29
|
-
|
30
|
-
|
31
|
-
|
32
|
-
|
12
|
+
on "write_first", %{
|
13
|
+
var page = vm_create_page("test")
|
14
|
+
info_write = {ns: "spec", page: page}
|
15
|
+
Request("vm", "write", info_write);
|
16
|
+
context.page = page;
|
17
|
+
}
|
33
18
|
|
19
|
+
on "watch_first", %{
|
20
|
+
var info_watch = {ns: "spec", id: "test"}
|
21
|
+
Request("vm", "watch", info_watch);
|
22
|
+
}
|
34
23
|
|
35
|
-
|
36
|
-
|
37
|
-
|
38
|
-
|
24
|
+
on "modify_first", %{
|
25
|
+
var page = vm_copy_page(context.page);
|
26
|
+
page.entries.push({
|
27
|
+
_sig: "test",
|
28
|
+
_id: "test",
|
29
|
+
value: "test"
|
30
|
+
});
|
31
|
+
var info_write = {ns: "spec", page: page};
|
32
|
+
Request("vm", "write", info_write);
|
39
33
|
}
|
40
34
|
|
35
|
+
|
41
36
|
on "read_res", %{
|
42
37
|
read_res_params.push(params);
|
43
38
|
}
|
@@ -0,0 +1,34 @@
|
|
1
|
+
controller :my_controller do
|
2
|
+
services :vm
|
3
|
+
|
4
|
+
action :my_action do
|
5
|
+
on_entry %{
|
6
|
+
}
|
7
|
+
|
8
|
+
on "create_page", %{
|
9
|
+
var page = vm_create_page("test");
|
10
|
+
var info = {ns: "sockio", page: page};
|
11
|
+
Request("vm", "write", info);
|
12
|
+
|
13
|
+
var info2 = {ns: "sockio", id: "test"};
|
14
|
+
Request("vm", "watch", info2);
|
15
|
+
}
|
16
|
+
|
17
|
+
on "modify_page", %{
|
18
|
+
//Modify the page
|
19
|
+
var new_page = vm_copy_page(read_page);
|
20
|
+
|
21
|
+
new_page.entries.push({
|
22
|
+
_id: gen_id(),
|
23
|
+
_sig: gen_id(),
|
24
|
+
value: "foo"
|
25
|
+
});
|
26
|
+
var info = {ns: "sockio", page: new_page}
|
27
|
+
Request("vm", "write", info);
|
28
|
+
}
|
29
|
+
|
30
|
+
on "read_res", %{
|
31
|
+
read_page = params;
|
32
|
+
}
|
33
|
+
end
|
34
|
+
end
|
@@ -3,16 +3,17 @@ controller :my_controller do
|
|
3
3
|
|
4
4
|
action :my_action do
|
5
5
|
on_entry %{
|
6
|
-
var watch_info = {
|
7
|
-
ns: "sockio",
|
8
|
-
id: "test",
|
9
|
-
};
|
10
|
-
|
6
|
+
var watch_info = { ns: "sockio", id: "test", };
|
11
7
|
Request("vm", "watch", watch_info);
|
12
8
|
}
|
13
9
|
|
14
10
|
on "read_res", %{
|
15
11
|
read_res_params = params;
|
16
12
|
}
|
13
|
+
|
14
|
+
on "unwatch", %{
|
15
|
+
var unwatch_info = { ns: "sockio", id: "test"};
|
16
|
+
Request("vm", "unwatch", unwatch_info);
|
17
|
+
}
|
17
18
|
end
|
18
19
|
end
|
@@ -0,0 +1,31 @@
|
|
1
|
+
controller :my_controller do
|
2
|
+
services :vm
|
3
|
+
|
4
|
+
action :my_action do
|
5
|
+
on_entry %{
|
6
|
+
//Setup watch
|
7
|
+
var watch_info0 = { ns: "sockio", id: "test0", };
|
8
|
+
var watch_info1 = { ns: "sockio", id: "test1", };
|
9
|
+
|
10
|
+
Request("vm", "watch", watch_info0);
|
11
|
+
Request("vm", "watch", watch_info1);
|
12
|
+
}
|
13
|
+
|
14
|
+
on "write_test1", %{
|
15
|
+
//Write test1
|
16
|
+
var test1 = vm_create_page("test1");
|
17
|
+
var write_info1 = { ns: "sockio", page: test1};
|
18
|
+
Request("vm", "write", write_info1);
|
19
|
+
}
|
20
|
+
|
21
|
+
on "read_res", %{
|
22
|
+
read_res_params = params;
|
23
|
+
}
|
24
|
+
|
25
|
+
#Unwatch test1 page
|
26
|
+
on "unwatch_test1", %{
|
27
|
+
var unwatch_info1 = { ns: "sockio", id: "test1", };
|
28
|
+
Request("vm", "unwatch", unwatch_info1);
|
29
|
+
}
|
30
|
+
end
|
31
|
+
end
|
@@ -363,32 +363,35 @@ RSpec.describe "kern:vm_service" do
|
|
363
363
|
expect(read_res_params).to eq(vm_write_list)
|
364
364
|
end
|
365
365
|
|
366
|
-
it "non-sync watch does send two watch callbacks to a controller if there is cached content" do
|
366
|
+
it "non-sync watch does send two watch callbacks to a controller if there is cached content followed by a write" do
|
367
367
|
ctx = flok_new_user File.read('./spec/kern/assets/vm/controller12.rb'), File.read("./spec/kern/assets/vm/config4.rb")
|
368
368
|
|
369
369
|
ctx.eval %{
|
370
370
|
base = _embed("my_controller", 1, {}, null);
|
371
371
|
}
|
372
372
|
|
373
|
-
#
|
374
|
-
|
375
|
-
|
373
|
+
#Step 1. Write a page into cache
|
374
|
+
################################################################################
|
375
|
+
#Trigger controller 'write_first'
|
376
|
+
@driver.int "int_event", [ ctx.eval("base"), "write_first", {} ]
|
376
377
|
|
377
|
-
|
378
|
+
#Write should have trigger a disk read (to ensure there is no page in cache) vhich we respond
|
379
|
+
#with nothing
|
380
|
+
@driver.ignore_up_to "if_per_get"
|
381
|
+
@driver.int "int_per_get_res", ["vm", "spec", "test", nil]
|
382
|
+
################################################################################
|
378
383
|
|
379
|
-
#
|
380
|
-
|
381
|
-
|
384
|
+
#Step 2. Watch that page
|
385
|
+
################################################################################
|
386
|
+
#Trigger controller 'watch_first'
|
387
|
+
@driver.int "int_event", [ ctx.eval("base"), "watch_first", {} ]
|
382
388
|
|
383
|
-
|
389
|
+
#Asynchronous dispatch
|
390
|
+
100.times { @ctx.eval("int_dispatch()")}
|
384
391
|
|
385
|
-
#
|
386
|
-
|
387
|
-
|
388
|
-
|
389
|
-
#And they should have been read in order
|
390
|
-
vm_write_list = JSON.parse(ctx.eval("JSON.stringify(vm_write_list)"));
|
391
|
-
expect(read_res_params).to eq(vm_write_list)
|
392
|
+
#Should have triggered the read_res
|
393
|
+
expect(@ctx.dump("read_res_params").count).to eq(1)
|
394
|
+
################################################################################
|
392
395
|
end
|
393
396
|
|
394
397
|
it "vm_cache_write does not tell controllers an update has occurred if the page requested to cache was already cached" do
|
@@ -60,6 +60,26 @@ RSpec.describe "kern:sockio_pager" do
|
|
60
60
|
@driver.mexpect "if_sockio_fwd", [Integer, "update", dump["pg_sockio0_bp"]], 1
|
61
61
|
end
|
62
62
|
|
63
|
+
it "Does send an unwatch request via socket.io when a page is unwatched" do
|
64
|
+
ctx = flok_new_user File.read('./spec/kern/assets/vm/pg_sockio/watch.rb'), File.read("./spec/kern/assets/vm/pg_sockio/config.rb")
|
65
|
+
ctx.eval %{
|
66
|
+
//Call embed on main root view
|
67
|
+
base = _embed("my_controller", 0, {}, null);
|
68
|
+
|
69
|
+
//Drain queue
|
70
|
+
int_dispatch([]);
|
71
|
+
}
|
72
|
+
|
73
|
+
@driver.int "int_event", [ @ctx.eval("base"), "unwatch", {} ]
|
74
|
+
|
75
|
+
#Expect an unwatch request
|
76
|
+
@driver.ignore_up_to "if_sockio_send", 1 do |e|
|
77
|
+
e[1] == "unwatch"
|
78
|
+
end
|
79
|
+
unwatch_msg = @driver.get "if_sockio_send", 1
|
80
|
+
expect(unwatch_msg[2]).to eq({"page_id" => "test"})
|
81
|
+
end
|
82
|
+
|
63
83
|
it "Does send a watch request via socket.io when a page is watched" do
|
64
84
|
ctx = flok_new_user File.read('./spec/kern/assets/vm/pg_sockio/watch.rb'), File.read("./spec/kern/assets/vm/pg_sockio/config.rb")
|
65
85
|
ctx.eval %{
|
@@ -77,7 +97,90 @@ RSpec.describe "kern:sockio_pager" do
|
|
77
97
|
}], 1
|
78
98
|
end
|
79
99
|
|
80
|
-
it "Does
|
100
|
+
it "Does send a watch request at periodic intervals of all pages that are currently watched and then does not send pages that have been unwatched" do
|
101
|
+
ctx = flok_new_user File.read('./spec/kern/assets/vm/pg_sockio/watch3.rb'), File.read("./spec/kern/assets/vm/pg_sockio/config.rb")
|
102
|
+
ctx.eval %{
|
103
|
+
//Call embed on main root view
|
104
|
+
base = _embed("my_controller", 0, {}, null);
|
105
|
+
|
106
|
+
//Drain queue
|
107
|
+
int_dispatch([]);
|
108
|
+
}
|
109
|
+
|
110
|
+
#Get through the first two watches triggered by the original watch
|
111
|
+
@driver.ignore_up_to "if_sockio_send", 1
|
112
|
+
@driver.mexpect "if_sockio_send", [Integer, "watch", {
|
113
|
+
"page_id" => "test0"
|
114
|
+
}], 1
|
115
|
+
@driver.ignore_up_to "if_sockio_send", 1
|
116
|
+
@driver.mexpect "if_sockio_send", [Integer, "watch", {
|
117
|
+
"page_id" => "test1"
|
118
|
+
}], 1
|
119
|
+
|
120
|
+
#Now we wait 15 seconds
|
121
|
+
(15*4).times { @driver.int "int_timer" }
|
122
|
+
|
123
|
+
#Now we should have a message for the synhronization of watchlist
|
124
|
+
@driver.ignore_up_to("if_sockio_send", 1) { |e| next e[1] == "resync" }
|
125
|
+
resync_res = @driver.get "if_sockio_send", 1
|
126
|
+
resync_info = resync_res[2] #Hash on end contains the actual data from the message
|
127
|
+
expect(resync_info.keys).to include("watch_list"); #Should have a watch list
|
128
|
+
|
129
|
+
#Check the watchlist we got, first get the hash values for the pages
|
130
|
+
expected_watch_list = []
|
131
|
+
expected_watch_list += ["test0", nil]
|
132
|
+
expected_watch_list += ["test1", nil]
|
133
|
+
expect(resync_info["watch_list"]).to eq(expected_watch_list)
|
134
|
+
|
135
|
+
#Test 2 - Now we are changing a page, so test1 should have a hash value
|
136
|
+
#######################################################################################################
|
137
|
+
#Update test1, which will try to read from disk, respond with a blank page
|
138
|
+
@driver.int "int_event", [ @ctx.eval("base"), "write_test1", {} ]
|
139
|
+
@driver.ignore_up_to "if_per_get", 2 do |e|
|
140
|
+
next e[2] == "test1"
|
141
|
+
end
|
142
|
+
@driver.int "int_per_get_res", ["vm", "sockio", "test1", nil]
|
143
|
+
|
144
|
+
#Now we wait 15 seconds (again)
|
145
|
+
(15*4).times { @driver.int "int_timer" }
|
146
|
+
|
147
|
+
#Now we should have a message for the synhronization of watchlist
|
148
|
+
@driver.ignore #it's incomplete... so
|
149
|
+
@driver.ignore_up_to("if_sockio_send", 1) { |e| next e[1] == "resync" }
|
150
|
+
resync_res = @driver.get "if_sockio_send", 1
|
151
|
+
resync_info = resync_res[2] #Hash on end contains the actual data from the message
|
152
|
+
expect(resync_info.keys).to include("watch_list"); #Should have a watch list
|
153
|
+
|
154
|
+
#Check the watchlist we got, first get the hash values for the pages
|
155
|
+
expected_watch_list = []
|
156
|
+
expected_watch_list += ["test0", nil]
|
157
|
+
expected_watch_list += ["test1", @ctx.eval("vm_cache.sockio.test1._hash")]
|
158
|
+
expect(resync_info["watch_list"]).to eq(expected_watch_list)
|
159
|
+
#######################################################################################################
|
160
|
+
|
161
|
+
#Test 3 - Now we unwatch a page
|
162
|
+
#######################################################################################################
|
163
|
+
#Unwatch test1
|
164
|
+
@driver.int "int_event", [ @ctx.eval("base"), "unwatch_test1", {} ]
|
165
|
+
|
166
|
+
#Now we wait 15 seconds (again)
|
167
|
+
(15*4).times { @driver.int "int_timer" }
|
168
|
+
|
169
|
+
#Now we should have a message for the synhronization of watchlist
|
170
|
+
@driver.ignore #it's incomplete... so
|
171
|
+
@driver.ignore_up_to("if_sockio_send", 1) { |e| next e[1] == "resync" }
|
172
|
+
resync_res = @driver.get "if_sockio_send", 1
|
173
|
+
resync_info = resync_res[2] #Hash on end contains the actual data from the message
|
174
|
+
expect(resync_info.keys).to include("watch_list"); #Should have a watch list
|
175
|
+
|
176
|
+
#Check the watchlist we got, first get the hash values for the pages
|
177
|
+
expected_watch_list = []
|
178
|
+
expected_watch_list += ["test0", nil]
|
179
|
+
expect(resync_info["watch_list"]).to eq(expected_watch_list)
|
180
|
+
#######################################################################################################
|
181
|
+
end
|
182
|
+
|
183
|
+
it "Does write a page to vm_cache that **does** already exist as <unbased, nochanges> the page receives an 'update' response from the external socket.io without a changes id (server result should be written into cache as-is); should no longer exist in unsynced" do
|
81
184
|
ctx = flok_new_user File.read('./spec/kern/assets/vm/pg_sockio/watch2.rb'), File.read("./spec/kern/assets/vm/pg_sockio/config.rb")
|
82
185
|
dump = ctx.evald %{
|
83
186
|
//Call embed on main root view
|
@@ -88,6 +191,9 @@ RSpec.describe "kern:sockio_pager" do
|
|
88
191
|
|
89
192
|
//pg_sockio0 socket address & the endpoint for the event callback
|
90
193
|
dump.pg_sockio0_bp = pg_sockio0_bp;
|
194
|
+
|
195
|
+
//Mark page as unsynced manually
|
196
|
+
vm_unsynced["sockio"]["test"] = 0;
|
91
197
|
}
|
92
198
|
|
93
199
|
#sockio driver should have been signaled (which it should respond asynchronously, and presumabely, after the disk)
|
@@ -144,9 +250,14 @@ RSpec.describe "kern:sockio_pager" do
|
|
144
250
|
expect(post_read_res_dump["read_res_params"][1]["entries"]).to eq([
|
145
251
|
{"_id" => "foo3", "_sig" => "foo3", "value" => "bar3"}
|
146
252
|
])
|
253
|
+
|
254
|
+
#Should no longer be unsynced
|
255
|
+
vm_unsynced = @ctx.dump("vm_unsynced")
|
256
|
+
expect(vm_unsynced["sockio"]).to eq({
|
257
|
+
})
|
147
258
|
end
|
148
259
|
|
149
|
-
it "Does write a page to vm_cache that **does** already exist as <unbased, changes> receives an 'update' response from the external socket.io without a changes_id" do
|
260
|
+
it "Does write a page to vm_cache that **does** already exist as <unbased, changes> receives an 'update' response from the external socket.io without a changes_id. Should still be in the vm_unsynced" do
|
150
261
|
ctx = flok_new_user File.read('./spec/kern/assets/vm/pg_sockio/watch2.rb'), File.read("./spec/kern/assets/vm/pg_sockio/config.rb")
|
151
262
|
dump = ctx.evald %{
|
152
263
|
//Call embed on main root view
|
@@ -157,6 +268,9 @@ RSpec.describe "kern:sockio_pager" do
|
|
157
268
|
|
158
269
|
//pg_sockio0 socket address & the endpoint for the event callback
|
159
270
|
dump.pg_sockio0_bp = pg_sockio0_bp;
|
271
|
+
|
272
|
+
//Mark page as unsynced manually
|
273
|
+
vm_unsynced["sockio"]["test"] = 0;
|
160
274
|
}
|
161
275
|
|
162
276
|
#sockio driver should have been signaled (which it should respond asynchronously, and presumabely, after the disk)
|
@@ -225,9 +339,195 @@ RSpec.describe "kern:sockio_pager" do
|
|
225
339
|
{"_id" => "foo2", "_sig" => "foo2", "value" => "bar2"},
|
226
340
|
{"_id" => "foo4", "_sig" => "foo4", "value" => "bar4"},
|
227
341
|
])
|
342
|
+
|
343
|
+
#Should still be unsynced as it contains changes (we only removed changes on __base which is double buffered)
|
344
|
+
vm_unsynced = @ctx.dump("vm_unsynced")
|
345
|
+
expect(vm_unsynced["sockio"]).to eq({
|
346
|
+
"test" => 0
|
347
|
+
})
|
348
|
+
|
349
|
+
end
|
350
|
+
|
351
|
+
it "Does write a page to vm_cache that **does** already exist as <unbased, changes> receives an 'update' response from the external socket.io with a mis-matching changes_id. Should still be in the vm_unsynced" do
|
352
|
+
ctx = flok_new_user File.read('./spec/kern/assets/vm/pg_sockio/watch2.rb'), File.read("./spec/kern/assets/vm/pg_sockio/config.rb")
|
353
|
+
dump = ctx.evald %{
|
354
|
+
//Call embed on main root view
|
355
|
+
dump.base = _embed("my_controller", 0, {}, null);
|
356
|
+
|
357
|
+
//Drain queue
|
358
|
+
int_dispatch([]);
|
359
|
+
|
360
|
+
//pg_sockio0 socket address & the endpoint for the event callback
|
361
|
+
dump.pg_sockio0_bp = pg_sockio0_bp;
|
362
|
+
|
363
|
+
//Mark page as unsynced manually
|
364
|
+
vm_unsynced["sockio"]["test"] = 0;
|
365
|
+
}
|
366
|
+
|
367
|
+
#sockio driver should have been signaled (which it should respond asynchronously, and presumabely, after the disk)
|
368
|
+
@driver.ignore_up_to "if_sockio_send"
|
369
|
+
@driver.mexpect "if_sockio_send", [Integer, "watch", {
|
370
|
+
"page_id" => "test"
|
371
|
+
}], 1
|
372
|
+
|
373
|
+
#The disk should have been signaled
|
374
|
+
@driver.ignore_up_to "if_per_get"
|
375
|
+
@driver.mexpect "if_per_get", ["vm", "sockio", "test"], 2
|
376
|
+
|
377
|
+
|
378
|
+
#The disk should respond with a page that contains changes
|
379
|
+
@driver.int "int_per_get_res", ["vm", "sockio", "test", {
|
380
|
+
"_id" => "test",
|
381
|
+
"_next" => nil,
|
382
|
+
"_head" => nil,
|
383
|
+
"entries" => [
|
384
|
+
{"_id" => "foo1", "_sig" => "foo1", "value" => "bar1"},
|
385
|
+
{"_id" => "foo2", "_sig" => "foo2", "value" => "bar2"}
|
386
|
+
],
|
387
|
+
"__changes" => [
|
388
|
+
["+", 0, {"_id" => "foo1", "_sig" => "foo1", "value" => "bar1"}],
|
389
|
+
["+", 1, {"_id" => "foo2", "_sig" => "foo2", "value" => "bar2"}],
|
390
|
+
["-", "foo3"],
|
391
|
+
],
|
392
|
+
"__changes_id" => "foo"
|
393
|
+
}]
|
394
|
+
|
395
|
+
#We (driver sockio) received a watch request for a page with the id 'test'
|
396
|
+
#Now we are imagining that the socket.io driver received back some
|
397
|
+
#data and is now signaling to the kernel that data is available (as it sends to an
|
398
|
+
#event endpoint equal to the socket bp)
|
399
|
+
@driver.int "int_event", [dump["pg_sockio0_bp"], "update", {page: {
|
400
|
+
_id: "test",
|
401
|
+
_next: nil,
|
402
|
+
_head: nil,
|
403
|
+
entries: [
|
404
|
+
{"_id" => "foo3", "_sig" => "foo3", "value" => "bar3"},
|
405
|
+
{"_id" => "foo4", "_sig" => "foo4", "value" => "bar4"}
|
406
|
+
],
|
407
|
+
}, changes_id: "foo2"}]
|
408
|
+
|
409
|
+
post_read_res_dump = ctx.evald %{
|
410
|
+
for (var i = 0; i < 100; ++i) {
|
411
|
+
//Drain queue (vm_cache_write defers to controller)
|
412
|
+
int_dispatch([]);
|
413
|
+
}
|
414
|
+
|
415
|
+
dump.read_res_params = read_res_params;
|
416
|
+
}
|
417
|
+
|
418
|
+
#The controller should have received a notification that a page was updated twice, one
|
419
|
+
#for the disk response and one for the pager response
|
420
|
+
expect(post_read_res_dump["read_res_params"].length).to eq(2)
|
421
|
+
expect(post_read_res_dump["read_res_params"][0]["entries"]).to eq([
|
422
|
+
{"_id" => "foo1", "_sig" => "foo1", "value" => "bar1"},
|
423
|
+
{"_id" => "foo2", "_sig" => "foo2", "value" => "bar2"}
|
424
|
+
])
|
425
|
+
|
426
|
+
#Next page should be rebased ontop of the incomming page, such that changes are played *over* it
|
427
|
+
#which includes deletion of foo3
|
428
|
+
expect(post_read_res_dump["read_res_params"][1]["entries"]).to eq([
|
429
|
+
{"_id" => "foo1", "_sig" => "foo1", "value" => "bar1"},
|
430
|
+
{"_id" => "foo2", "_sig" => "foo2", "value" => "bar2"},
|
431
|
+
{"_id" => "foo4", "_sig" => "foo4", "value" => "bar4"},
|
432
|
+
])
|
433
|
+
|
434
|
+
#Should still be unsynced as it contains changes (we only removed changes on __base which is double buffered)
|
435
|
+
vm_unsynced = @ctx.dump("vm_unsynced")
|
436
|
+
expect(vm_unsynced["sockio"]).to eq({
|
437
|
+
"test" => 0
|
438
|
+
})
|
439
|
+
end
|
440
|
+
|
441
|
+
it "Does write a page to vm_cache that **does** already exist as <unbased, changes> receives an 'update' response from the external socket.io with matching changes_id. Should not still be in the vm_unsynced" do
|
442
|
+
ctx = flok_new_user File.read('./spec/kern/assets/vm/pg_sockio/watch2.rb'), File.read("./spec/kern/assets/vm/pg_sockio/config.rb")
|
443
|
+
dump = ctx.evald %{
|
444
|
+
//Call embed on main root view
|
445
|
+
dump.base = _embed("my_controller", 0, {}, null);
|
446
|
+
|
447
|
+
//Drain queue
|
448
|
+
int_dispatch([]);
|
449
|
+
|
450
|
+
//pg_sockio0 socket address & the endpoint for the event callback
|
451
|
+
dump.pg_sockio0_bp = pg_sockio0_bp;
|
452
|
+
|
453
|
+
//Mark page as unsynced manually
|
454
|
+
vm_unsynced["sockio"]["test"] = 0;
|
455
|
+
}
|
456
|
+
|
457
|
+
#sockio driver should have been signaled (which it should respond asynchronously, and presumabely, after the disk)
|
458
|
+
@driver.ignore_up_to "if_sockio_send"
|
459
|
+
@driver.mexpect "if_sockio_send", [Integer, "watch", {
|
460
|
+
"page_id" => "test"
|
461
|
+
}], 1
|
462
|
+
|
463
|
+
#The disk should have been signaled
|
464
|
+
@driver.ignore_up_to "if_per_get"
|
465
|
+
@driver.mexpect "if_per_get", ["vm", "sockio", "test"], 2
|
466
|
+
|
467
|
+
|
468
|
+
#The disk should respond with a page that contains changes
|
469
|
+
@driver.int "int_per_get_res", ["vm", "sockio", "test", {
|
470
|
+
"_id" => "test",
|
471
|
+
"_next" => nil,
|
472
|
+
"_head" => nil,
|
473
|
+
"entries" => [
|
474
|
+
{"_id" => "foo1", "_sig" => "foo1", "value" => "bar1"},
|
475
|
+
{"_id" => "foo2", "_sig" => "foo2", "value" => "bar2"}
|
476
|
+
],
|
477
|
+
"__changes" => [
|
478
|
+
["+", 0, {"_id" => "foo1", "_sig" => "foo1", "value" => "bar1"}],
|
479
|
+
["+", 1, {"_id" => "foo2", "_sig" => "foo2", "value" => "bar2"}],
|
480
|
+
["-", "foo3"],
|
481
|
+
],
|
482
|
+
"__changes_id" => "foo"
|
483
|
+
}]
|
484
|
+
|
485
|
+
#We (driver sockio) received a watch request for a page with the id 'test'
|
486
|
+
#Now we are imagining that the socket.io driver received back some
|
487
|
+
#data and is now signaling to the kernel that data is available (as it sends to an
|
488
|
+
#event endpoint equal to the socket bp)
|
489
|
+
@driver.int "int_event", [dump["pg_sockio0_bp"], "update", {page: {
|
490
|
+
_id: "test",
|
491
|
+
_next: nil,
|
492
|
+
_head: nil,
|
493
|
+
entries: [
|
494
|
+
{"_id" => "foo3", "_sig" => "foo3", "value" => "bar3"},
|
495
|
+
{"_id" => "foo4", "_sig" => "foo4", "value" => "bar4"}
|
496
|
+
],
|
497
|
+
}, changes_id: "foo"}]
|
498
|
+
|
499
|
+
post_read_res_dump = ctx.evald %{
|
500
|
+
for (var i = 0; i < 100; ++i) {
|
501
|
+
//Drain queue (vm_cache_write defers to controller)
|
502
|
+
int_dispatch([]);
|
503
|
+
}
|
504
|
+
|
505
|
+
dump.read_res_params = read_res_params;
|
506
|
+
}
|
507
|
+
|
508
|
+
#The controller should have received a notification that a page was updated twice, one
|
509
|
+
#for the disk response and one for the pager response
|
510
|
+
expect(post_read_res_dump["read_res_params"].length).to eq(2)
|
511
|
+
expect(post_read_res_dump["read_res_params"][0]["entries"]).to eq([
|
512
|
+
{"_id" => "foo1", "_sig" => "foo1", "value" => "bar1"},
|
513
|
+
{"_id" => "foo2", "_sig" => "foo2", "value" => "bar2"}
|
514
|
+
])
|
515
|
+
|
516
|
+
#Next page should be rebased ontop of the incomming page, such that changes are played *over* it
|
517
|
+
#which includes deletion of foo3
|
518
|
+
expect(post_read_res_dump["read_res_params"][1]["entries"]).to eq([
|
519
|
+
{"_id" => "foo3", "_sig" => "foo3", "value" => "bar3"},
|
520
|
+
{"_id" => "foo4", "_sig" => "foo4", "value" => "bar4"},
|
521
|
+
])
|
522
|
+
|
523
|
+
#Should still be unsynced as it contains changes (we only removed changes on __base which is double buffered)
|
524
|
+
vm_unsynced = @ctx.dump("vm_unsynced")
|
525
|
+
expect(vm_unsynced["sockio"]).to eq({
|
526
|
+
})
|
228
527
|
end
|
229
528
|
|
230
|
-
|
529
|
+
|
530
|
+
it "Does write a page to vm_cache that **does** already exist as <based<unbased, changes>, changes> receives an 'update' response from the external socket.io without a changes_id. Should still exist in vm_unsynced" do
|
231
531
|
ctx = flok_new_user File.read('./spec/kern/assets/vm/pg_sockio/watch2.rb'), File.read("./spec/kern/assets/vm/pg_sockio/config.rb")
|
232
532
|
dump = ctx.evald %{
|
233
533
|
//Call embed on main root view
|
@@ -238,6 +538,9 @@ RSpec.describe "kern:sockio_pager" do
|
|
238
538
|
|
239
539
|
//pg_sockio0 socket address & the endpoint for the event callback
|
240
540
|
dump.pg_sockio0_bp = pg_sockio0_bp;
|
541
|
+
|
542
|
+
//Mark page as unsynced manually
|
543
|
+
vm_unsynced["sockio"]["test"] = 0;
|
241
544
|
}
|
242
545
|
|
243
546
|
#sockio driver should have been signaled (which it should respond asynchronously, and presumabely, after the disk)
|
@@ -323,9 +626,15 @@ RSpec.describe "kern:sockio_pager" do
|
|
323
626
|
{"_id" => "foo4", "_sig" => "foo4", "value" => "bar4"},
|
324
627
|
{"_id" => "foo5", "_sig" => "foo5", "value" => "bar5"},
|
325
628
|
])
|
629
|
+
|
630
|
+
#Should still be unsynced as it contains changes (we only removed changes on __base which is double buffered)
|
631
|
+
vm_unsynced = @ctx.dump("vm_unsynced")
|
632
|
+
expect(vm_unsynced["sockio"]).to eq({
|
633
|
+
"test" => 0
|
634
|
+
})
|
326
635
|
end
|
327
636
|
|
328
|
-
it "Does write a page to vm_cache that **does** already exist as
|
637
|
+
it "Does write a page to vm_cache that **does** already exist as [changes, based[changes, unbased]] receives an 'update' response from the external socket.io **with** an existing changes_id but keeps that page in vm_unsynced" do
|
329
638
|
ctx = flok_new_user File.read('./spec/kern/assets/vm/pg_sockio/watch2.rb'), File.read("./spec/kern/assets/vm/pg_sockio/config.rb")
|
330
639
|
dump = ctx.evald %{
|
331
640
|
//Call embed on main root view
|
@@ -336,6 +645,9 @@ RSpec.describe "kern:sockio_pager" do
|
|
336
645
|
|
337
646
|
//pg_sockio0 socket address & the endpoint for the event callback
|
338
647
|
dump.pg_sockio0_bp = pg_sockio0_bp;
|
648
|
+
|
649
|
+
//Mark page as unsynced manually
|
650
|
+
vm_unsynced["sockio"]["test"] = 0;
|
339
651
|
}
|
340
652
|
|
341
653
|
#sockio driver should have been signaled (which it should respond asynchronously, and presumabely, after the disk)
|
@@ -349,7 +661,7 @@ RSpec.describe "kern:sockio_pager" do
|
|
349
661
|
@driver.mexpect "if_per_get", ["vm", "sockio", "test"], 2
|
350
662
|
|
351
663
|
|
352
|
-
#The disk should respond with a page that contains changes
|
664
|
+
#The disk should respond with a page that contains <based<nobase, changes>, changes>
|
353
665
|
@driver.int "int_per_get_res", ["vm", "sockio", "test", {
|
354
666
|
"_id" => "test",
|
355
667
|
"_next" => nil,
|
@@ -363,25 +675,37 @@ RSpec.describe "kern:sockio_pager" do
|
|
363
675
|
["+", 1, {"_id" => "foo2", "_sig" => "foo2", "value" => "bar2"}],
|
364
676
|
["-", "foo3"],
|
365
677
|
],
|
366
|
-
"__changes_id" => "foo"
|
678
|
+
"__changes_id" => "foo",
|
679
|
+
"__base" => {
|
680
|
+
"_id" => "test",
|
681
|
+
"_next" => nil,
|
682
|
+
"_head" => nil,
|
683
|
+
"entries" => [
|
684
|
+
{"_id" => "fooX", "_sig" => "fooX", "value" => "barX"},
|
685
|
+
{"_id" => "foo3", "_sig" => "foo3", "value" => "bar3"}
|
686
|
+
],
|
687
|
+
"__changes_id" => "foo2",
|
688
|
+
"__changes" => [
|
689
|
+
["-", "fooX"],
|
690
|
+
["+", 1, {"_id" => "foo3", "_sig" => "foo3", "value" => "bar3"}]
|
691
|
+
]
|
692
|
+
}
|
367
693
|
}]
|
368
694
|
|
369
695
|
#We (driver sockio) received a watch request for a page with the id 'test'
|
370
696
|
#Now we are imagining that the socket.io driver received back some
|
371
697
|
#data and is now signaling to the kernel that data is available (as it sends to an
|
372
698
|
#event endpoint equal to the socket bp)
|
373
|
-
@driver.int "int_event", [dump["pg_sockio0_bp"], "update", {
|
374
|
-
|
375
|
-
|
376
|
-
|
377
|
-
|
378
|
-
|
379
|
-
|
380
|
-
|
381
|
-
|
382
|
-
|
383
|
-
changes_id: "foo"
|
384
|
-
}]
|
699
|
+
@driver.int "int_event", [dump["pg_sockio0_bp"], "update", {page: {
|
700
|
+
_id: "test",
|
701
|
+
_next: nil,
|
702
|
+
_head: nil,
|
703
|
+
entries: [
|
704
|
+
{"_id" => "foo4", "_sig" => "foo4", "value" => "bar4"},
|
705
|
+
{"_id" => "foo5", "_sig" => "foo5", "value" => "bar5"},
|
706
|
+
{"_id" => "fooX", "_sig" => "fooX", "value" => "barX"},
|
707
|
+
],
|
708
|
+
}, changes_id: "foo2"}]
|
385
709
|
|
386
710
|
post_read_res_dump = ctx.evald %{
|
387
711
|
for (var i = 0; i < 100; ++i) {
|
@@ -399,22 +723,26 @@ RSpec.describe "kern:sockio_pager" do
|
|
399
723
|
{"_id" => "foo1", "_sig" => "foo1", "value" => "bar1"},
|
400
724
|
{"_id" => "foo2", "_sig" => "foo2", "value" => "bar2"}
|
401
725
|
])
|
402
|
-
#Changes should be present in second copy
|
403
|
-
expect(post_read_res_dump["read_res_params"][0]["__changes_id"]).not_to eq(nil)
|
404
|
-
|
405
726
|
|
406
|
-
#Next
|
407
|
-
#
|
408
|
-
#
|
727
|
+
#Next version is a double replay. First, the server page is called the new 'base', then changes from the
|
728
|
+
#old base are played ontop of the server page. Then the top-level changes are recalculated based on this new page,
|
729
|
+
#and then replayed on the server's page *again* (a linked copy where the first replayed sits at __base.
|
409
730
|
expect(post_read_res_dump["read_res_params"][1]["entries"]).to eq([
|
410
|
-
|
411
|
-
|
731
|
+
{"_id" => "foo1", "_sig" => "foo1", "value" => "bar1"},
|
732
|
+
{"_id" => "foo2", "_sig" => "foo2", "value" => "bar2"},
|
733
|
+
{"_id" => "foo4", "_sig" => "foo4", "value" => "bar4"},
|
734
|
+
{"_id" => "foo5", "_sig" => "foo5", "value" => "bar5"},
|
735
|
+
{"_id" => "fooX", "_sig" => "fooX", "value" => "barX"},
|
412
736
|
])
|
413
|
-
|
414
|
-
|
737
|
+
|
738
|
+
#Should still be unsynced as it contains changes (we only removed changes on __base which is double buffered)
|
739
|
+
vm_unsynced = @ctx.dump("vm_unsynced")
|
740
|
+
expect(vm_unsynced["sockio"]).to eq({
|
741
|
+
"test" => 0
|
742
|
+
})
|
415
743
|
end
|
416
744
|
|
417
|
-
it "Does write a page to vm_cache that **does not** already exist when the page receives an 'update' response from the external socket.io" do
|
745
|
+
it "Does write a page to vm_cache that **does not** already exist when the page receives an 'update' response from the external socket.io. Should not exist in vm_unsynced anymore" do
|
418
746
|
ctx = flok_new_user File.read('./spec/kern/assets/vm/pg_sockio/watch.rb'), File.read("./spec/kern/assets/vm/pg_sockio/config.rb")
|
419
747
|
dump = ctx.evald %{
|
420
748
|
//Call embed on main root view
|
@@ -425,6 +753,9 @@ RSpec.describe "kern:sockio_pager" do
|
|
425
753
|
|
426
754
|
//pg_sockio0 socket address & the endpoint for the event callback
|
427
755
|
dump.pg_sockio0_bp = pg_sockio0_bp;
|
756
|
+
|
757
|
+
//Mark page as unsynced manually
|
758
|
+
vm_unsynced["sockio"]["test"] = 0;
|
428
759
|
}
|
429
760
|
|
430
761
|
#We received a watch request for a page with the id 'test'
|
@@ -450,6 +781,11 @@ RSpec.describe "kern:sockio_pager" do
|
|
450
781
|
expect(post_read_res_dump["read_res_params"]["entries"]).to eq([
|
451
782
|
{"_id" => "foo", "_sig" => "foo", "value" => "bar"}
|
452
783
|
])
|
784
|
+
|
785
|
+
#Should not be in the queue anymore
|
786
|
+
vm_unsynced = @ctx.dump("vm_unsynced")
|
787
|
+
expect(vm_unsynced["sockio"]).to eq({
|
788
|
+
})
|
453
789
|
end
|
454
790
|
|
455
791
|
it "Does accept writes of pages that don't currently exist in cache; they go into vm_cache as-is and are sent to the sockio driver" do
|
metadata
CHANGED
@@ -1,14 +1,14 @@
|
|
1
1
|
--- !ruby/object:Gem::Specification
|
2
2
|
name: flok
|
3
3
|
version: !ruby/object:Gem::Version
|
4
|
-
version: 0.0.
|
4
|
+
version: 0.0.83
|
5
5
|
platform: ruby
|
6
6
|
authors:
|
7
7
|
- seo
|
8
8
|
autorequire:
|
9
9
|
bindir: bin
|
10
10
|
cert_chain: []
|
11
|
-
date: 2015-08-
|
11
|
+
date: 2015-08-14 00:00:00.000000000 Z
|
12
12
|
dependencies:
|
13
13
|
- !ruby/object:Gem::Dependency
|
14
14
|
name: execjs
|
@@ -1278,8 +1278,10 @@ files:
|
|
1278
1278
|
- spec/kern/assets/vm/pg_sockio/config.rb
|
1279
1279
|
- spec/kern/assets/vm/pg_sockio/config_no_url.rb
|
1280
1280
|
- spec/kern/assets/vm/pg_sockio/nothing.rb
|
1281
|
+
- spec/kern/assets/vm/pg_sockio/unmark_changes.rb
|
1281
1282
|
- spec/kern/assets/vm/pg_sockio/watch.rb
|
1282
1283
|
- spec/kern/assets/vm/pg_sockio/watch2.rb
|
1284
|
+
- spec/kern/assets/vm/pg_sockio/watch3.rb
|
1283
1285
|
- spec/kern/assets/vm/pg_sockio/write.rb
|
1284
1286
|
- spec/kern/assets/vm/pg_sockio/write2.rb
|
1285
1287
|
- spec/kern/assets/vm/pg_sockio/write3.rb
|
@@ -2230,8 +2232,10 @@ test_files:
|
|
2230
2232
|
- spec/kern/assets/vm/pg_sockio/config.rb
|
2231
2233
|
- spec/kern/assets/vm/pg_sockio/config_no_url.rb
|
2232
2234
|
- spec/kern/assets/vm/pg_sockio/nothing.rb
|
2235
|
+
- spec/kern/assets/vm/pg_sockio/unmark_changes.rb
|
2233
2236
|
- spec/kern/assets/vm/pg_sockio/watch.rb
|
2234
2237
|
- spec/kern/assets/vm/pg_sockio/watch2.rb
|
2238
|
+
- spec/kern/assets/vm/pg_sockio/watch3.rb
|
2235
2239
|
- spec/kern/assets/vm/pg_sockio/write.rb
|
2236
2240
|
- spec/kern/assets/vm/pg_sockio/write2.rb
|
2237
2241
|
- spec/kern/assets/vm/pg_sockio/write3.rb
|