flok 0.0.38 → 0.0.39
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +4 -4
- data/app/drivers/chrome/src/dispatch.js +41 -6
- data/app/drivers/chrome/src/persist.js +1 -10
- data/app/kern/dispatch.js +17 -23
- data/app/kern/gen_id.js +8 -0
- data/app/kern/macro.rb +20 -18
- data/app/kern/pagers/pg_spec0.js +20 -0
- data/app/kern/services/vm.rb +176 -30
- data/docs/client_api.md +3 -1
- data/docs/compilation.md +1 -1
- data/docs/dispatch.md +91 -0
- data/docs/kernel_api.md +3 -2
- data/docs/messaging.md +6 -1
- data/docs/mod/persist.md +4 -3
- data/docs/project_layout.md +2 -2
- data/docs/services/vm.md +116 -41
- data/docs/services/vm/pagers.md +38 -46
- data/lib/flok.rb +1 -0
- data/lib/flok/build.rb +3 -4
- data/lib/flok/macro.rb +27 -0
- data/lib/flok/services_compiler.rb +12 -8
- data/lib/flok/user_compiler.rb +131 -4
- data/lib/flok/version.rb +1 -1
- data/spec/env/kern.rb +71 -0
- data/spec/etc/macro_spec.rb +3 -8
- data/spec/etc/service_compiler/service3.rb +27 -0
- data/spec/etc/services_compiler_spec.rb +35 -27
- data/spec/iface/driver/dispatch_spec.rb +20 -0
- data/spec/iface/driver/persist_spec.rb +9 -24
- data/spec/iface/kern/ping_spec.rb +3 -24
- data/spec/kern/assets/vm/config4.rb +12 -0
- data/spec/kern/assets/vm/controller10.rb +26 -0
- data/spec/kern/assets/vm/controller11.rb +33 -0
- data/spec/kern/assets/vm/controller12.rb +45 -0
- data/spec/kern/assets/vm/controller13.rb +40 -0
- data/spec/kern/assets/vm/controller14.rb +14 -0
- data/spec/kern/assets/vm/controller15.rb +15 -0
- data/spec/kern/assets/vm/controller16.rb +29 -0
- data/spec/kern/assets/vm/controller17.rb +30 -0
- data/spec/kern/assets/vm/controller18.rb +28 -0
- data/spec/kern/assets/vm/controller19.rb +14 -0
- data/spec/kern/assets/vm/controller19b.rb +15 -0
- data/spec/kern/assets/vm/controller20.rb +19 -0
- data/spec/kern/assets/vm/controller21.rb +40 -0
- data/spec/kern/assets/vm/controller7.rb +18 -0
- data/spec/kern/assets/vm/controller8.rb +38 -0
- data/spec/kern/assets/vm/controller8b.rb +18 -0
- data/spec/kern/assets/vm/controller9.rb +20 -0
- data/spec/kern/assets/vm/controller_exc_2watch.rb +15 -0
- data/spec/kern/assets/vm/controller_exc_ewatch.rb +14 -0
- data/spec/kern/assets/vm/macros/copy_page_c.rb +23 -0
- data/spec/kern/assets/vm/macros/entry_del_c.rb +18 -0
- data/spec/kern/assets/vm/macros/entry_insert_c.rb +21 -0
- data/spec/kern/assets/vm/macros/entry_mutable_c.rb +33 -0
- data/spec/kern/assets/vm/macros/new_page_c.rb +7 -0
- data/spec/kern/assets/vm/macros/new_page_c2.rb +7 -0
- data/spec/kern/assets/vm/macros/set_page_head_c.rb +18 -0
- data/spec/kern/assets/vm/macros/set_page_next_c.rb +18 -0
- data/spec/kern/controller_macro_spec.rb +186 -0
- data/spec/kern/dispatch_spec.rb +125 -0
- data/spec/kern/functions_spec.rb +15 -0
- data/spec/kern/vm_service_spec.rb +874 -173
- metadata +70 -5
- data/docs/scheduling.md +0 -46
- data/spec/kern/rest_service_spec.rb +0 -45
checksums.yaml
CHANGED
@@ -1,7 +1,7 @@
|
|
1
1
|
---
|
2
2
|
SHA1:
|
3
|
-
metadata.gz:
|
4
|
-
data.tar.gz:
|
3
|
+
metadata.gz: 069696218ad132364486c4d0256e3f274f8e19e3
|
4
|
+
data.tar.gz: 4d2f67fe000dbeb7baf2077632ede49358f5dc6e
|
5
5
|
SHA512:
|
6
|
-
metadata.gz:
|
7
|
-
data.tar.gz:
|
6
|
+
metadata.gz: 127c0e54aa11cf77024c208600cff1027350272b39d57f8abd28f6e6030a8765483f3a732cdb7cdc39d221ba0ead994228b811e4f0da87efec5d68b9506ab516
|
7
|
+
data.tar.gz: 8c2cd9a6c868e0ffbfc8359817cea534221ab0a8e623fc75f70c64e49233716a78ee68cc5e968624efb0e35041648d4d0281b9a5501b2c3fa501673361054ccb
|
@@ -7,6 +7,13 @@
|
|
7
7
|
//Here is an example with two successive calls
|
8
8
|
// [2, 'mul', 3, 4, 1, 'print', 'hello world']
|
9
9
|
function if_dispatch(qq) {
|
10
|
+
if (qq[0] == 'i') {
|
11
|
+
qq.shift();
|
12
|
+
if_dispatch_call_int_end = true
|
13
|
+
} else {
|
14
|
+
if_dispatch_call_int_end = false
|
15
|
+
}
|
16
|
+
|
10
17
|
//If debug socket is attached, forward events to it
|
11
18
|
//and do not process the events
|
12
19
|
<% if @mods.include? "debug" %>
|
@@ -22,13 +29,32 @@ function if_dispatch(qq) {
|
|
22
29
|
//The very first thing is the queue type
|
23
30
|
var queueType = q.shift();
|
24
31
|
|
25
|
-
//
|
26
|
-
|
27
|
-
|
28
|
-
|
32
|
+
//Main queue events are run synchronously on w.r.t to this thread of execution
|
33
|
+
//Asynchronous events are dispatched individually
|
34
|
+
if (queueType === 0) {
|
35
|
+
//Where there is still things left on the queue
|
36
|
+
while (q.length > 0) {
|
37
|
+
//Grab the first thing off the queue, this is the arg count
|
38
|
+
var argc = q.shift();
|
29
39
|
|
30
|
-
|
31
|
-
|
40
|
+
//Grab the next thing and look that up in the function table. Pass args left
|
41
|
+
this[q.shift()].apply(null, q.splice(0, argc));
|
42
|
+
}
|
43
|
+
} else {
|
44
|
+
//Dispatch asynchronous queue events
|
45
|
+
while (q.length > 0) {
|
46
|
+
//Grab the next thing and look that up in the function table. Pass args left
|
47
|
+
function({
|
48
|
+
var argc = q.shift();
|
49
|
+
var q0 = q.shift();
|
50
|
+
var q1 = q.splice(0, argc);
|
51
|
+
async_call = function() {
|
52
|
+
this[q0].apply(null, q1);
|
53
|
+
}
|
54
|
+
|
55
|
+
setTimeout(async_call, 0);
|
56
|
+
})();
|
57
|
+
}
|
32
58
|
}
|
33
59
|
}
|
34
60
|
|
@@ -36,6 +62,12 @@ function if_dispatch(qq) {
|
|
36
62
|
<% if @mods.include? "debug" %>
|
37
63
|
}
|
38
64
|
<% end %>
|
65
|
+
|
66
|
+
|
67
|
+
if (if_dispatch_call_int_end) {
|
68
|
+
if_dispatch_call_int_end = false;
|
69
|
+
int_dispatch([])
|
70
|
+
}
|
39
71
|
}
|
40
72
|
|
41
73
|
function ping() {
|
@@ -50,3 +82,6 @@ function ping2(arg1, arg2) {
|
|
50
82
|
int_dispatch([1, "pong2", arg1])
|
51
83
|
int_dispatch([2, "pong2", arg1, arg2])
|
52
84
|
}
|
85
|
+
|
86
|
+
function ping_nothing() {
|
87
|
+
}
|
@@ -14,16 +14,7 @@ function if_per_del_ns(ns) {
|
|
14
14
|
}
|
15
15
|
|
16
16
|
function if_per_get(s, ns, key) {
|
17
|
-
function async() {
|
18
|
-
var _ns = store.namespace(ns);
|
19
|
-
var res = _ns.get(key);
|
20
|
-
int_dispatch([2, "int_per_get_res", s, res]);
|
21
|
-
}
|
22
|
-
setTimeout(async, 0);
|
23
|
-
}
|
24
|
-
|
25
|
-
function if_per_get_sync(s, ns, key) {
|
26
17
|
var _ns = store.namespace(ns);
|
27
18
|
var res = _ns.get(key);
|
28
|
-
int_dispatch([2, "int_per_get_res", s, res]);
|
19
|
+
int_dispatch([2, "int_per_get_res", s, ns, res]);
|
29
20
|
}
|
data/app/kern/dispatch.js
CHANGED
@@ -30,6 +30,9 @@ function int_dispatch(q) {
|
|
30
30
|
//Now push all of what we can back
|
31
31
|
var dump = [];
|
32
32
|
|
33
|
+
//Add 'i' to start
|
34
|
+
var incomplete = false;
|
35
|
+
|
33
36
|
//Send main queue
|
34
37
|
if (main_q.length > 0) {
|
35
38
|
var out = [0];
|
@@ -43,20 +46,21 @@ function int_dispatch(q) {
|
|
43
46
|
if (net_q.length > 0 && net_q_rem > 0) {
|
44
47
|
//Always pick the minimum between the amount remaining and the q length
|
45
48
|
var n = net_q.length < net_q_rem ? net_q.length : net_q_rem;
|
49
|
+
if (n != net_q.length) { incomplete = true; }
|
46
50
|
|
47
51
|
var out = [1];
|
48
52
|
var piece = net_q.splice(0, n);
|
49
53
|
for (var i = 0; i < piece.length; ++i) {
|
50
54
|
out.push.apply(out, piece[i]);
|
51
55
|
}
|
52
|
-
dump.push(out);
|
53
56
|
|
54
|
-
|
57
|
+
dump.push(out);
|
55
58
|
}
|
56
59
|
|
57
60
|
if (disk_q.length > 0 && disk_q_rem > 0) {
|
58
61
|
//Always pick the minimum between the amount remaining and the q length
|
59
62
|
var n = disk_q.length < disk_q_rem ? disk_q.length : disk_q_rem;
|
63
|
+
if (n != disk_q.length) { incomplete = true; }
|
60
64
|
|
61
65
|
var out = [2];
|
62
66
|
var piece = disk_q.splice(0, n);
|
@@ -64,13 +68,12 @@ function int_dispatch(q) {
|
|
64
68
|
out.push.apply(out, piece[i]);
|
65
69
|
}
|
66
70
|
dump.push(out);
|
67
|
-
|
68
|
-
disk_q_rem -= n;
|
69
71
|
}
|
70
72
|
|
71
73
|
if (cpu_q.length > 0 && cpu_q_rem > 0) {
|
72
74
|
//Always pick the minimum between the amount remaining and the q length
|
73
75
|
var n = cpu_q.length < cpu_q_rem ? cpu_q.length : cpu_q_rem;
|
76
|
+
if (n != cpu_q.length) { incomplete = true; }
|
74
77
|
|
75
78
|
var out = [3];
|
76
79
|
var piece = cpu_q.splice(0, n);
|
@@ -78,13 +81,12 @@ function int_dispatch(q) {
|
|
78
81
|
out.push.apply(out, piece[i]);
|
79
82
|
}
|
80
83
|
dump.push(out);
|
81
|
-
|
82
|
-
cpu_q_rem -= n;
|
83
84
|
}
|
84
85
|
|
85
86
|
if (gpu_q.length > 0 && gpu_q_rem > 0) {
|
86
87
|
//Always pick the minimum between the amount remaining and the q length
|
87
88
|
var n = gpu_q.length < gpu_q_rem ? gpu_q.length : gpu_q_rem;
|
89
|
+
if (n != gpu_q.length) { incomplete = true; }
|
88
90
|
|
89
91
|
var out = [4];
|
90
92
|
var piece = gpu_q.splice(0, n);
|
@@ -92,19 +94,9 @@ function int_dispatch(q) {
|
|
92
94
|
out.push.apply(out, piece[i]);
|
93
95
|
}
|
94
96
|
dump.push(out);
|
95
|
-
|
96
|
-
gpu_q_rem -= n;
|
97
97
|
}
|
98
98
|
|
99
|
-
|
100
|
-
if (async_q.length > 0) {
|
101
|
-
var out = [5];
|
102
|
-
for (var i = 0; i < async_q.length; ++i) {
|
103
|
-
out.push.apply(out, async_q[i]);
|
104
|
-
}
|
105
|
-
dump.push(out);
|
106
|
-
async_q = [];
|
107
|
-
}
|
99
|
+
if (incomplete) { dump.unshift("i"); }
|
108
100
|
|
109
101
|
if (dump.length != 0) {
|
110
102
|
if_dispatch(dump);
|
@@ -135,8 +127,6 @@ function ping3(arg1) {
|
|
135
127
|
SEND("cpu", "pong3");
|
136
128
|
} else if (arg1 == "gpu") {
|
137
129
|
SEND("gpu", "pong3");
|
138
|
-
} else if (arg1 == "async") {
|
139
|
-
SEND("async", "pong3");
|
140
130
|
}
|
141
131
|
}
|
142
132
|
|
@@ -151,8 +141,6 @@ function ping4(arg1) {
|
|
151
141
|
SEND("cpu", "pong4");
|
152
142
|
} else if (arg1 == "gpu") {
|
153
143
|
SEND("gpu", "pong4");
|
154
|
-
} else if (arg1 == "async") {
|
155
|
-
SEND("async", "pong4");
|
156
144
|
}
|
157
145
|
}
|
158
146
|
|
@@ -166,7 +154,6 @@ function ping4_int(arg1) {
|
|
166
154
|
++cpu_q_rem;
|
167
155
|
} else if (arg1 == "gpu") {
|
168
156
|
++gpu_q_rem;
|
169
|
-
} else if (arg1 == "async") {
|
170
157
|
}
|
171
158
|
}
|
172
159
|
|
@@ -176,7 +163,6 @@ net_q = [];
|
|
176
163
|
disk_q = [];
|
177
164
|
cpu_q = [];
|
178
165
|
gpu_q = [];
|
179
|
-
async_q = [];
|
180
166
|
|
181
167
|
//Each queue has a max # of things that can be en-queued
|
182
168
|
//These are decremented when the message is sent (not just queued)
|
@@ -185,3 +171,11 @@ net_q_rem = 5;
|
|
185
171
|
disk_q_rem = 5;
|
186
172
|
cpu_q_rem = 5;
|
187
173
|
gpu_q_rem = 5;
|
174
|
+
|
175
|
+
<% if @debug %>
|
176
|
+
function spec_dispatch_q(queue, count) {
|
177
|
+
for (var i = 0; i < count; ++i) {
|
178
|
+
queue.push([0, "spec"]);
|
179
|
+
}
|
180
|
+
}
|
181
|
+
<% end %>
|
data/app/kern/gen_id.js
ADDED
data/app/kern/macro.rb
CHANGED
@@ -1,24 +1,26 @@
|
|
1
|
-
|
2
|
-
|
3
|
-
|
1
|
+
module Flok
|
2
|
+
#Process one js code file at a time
|
3
|
+
def self.macro_process text
|
4
|
+
out = StringIO.new
|
4
5
|
|
5
|
-
|
6
|
-
|
7
|
-
|
8
|
-
|
9
|
-
|
10
|
-
|
11
|
-
|
12
|
-
|
6
|
+
text.split("\n").each do |l|
|
7
|
+
#Send macro
|
8
|
+
if l =~ /SEND/
|
9
|
+
l.strip!
|
10
|
+
l.gsub!(/SEND\(/, "")
|
11
|
+
l.gsub! /\)$/, ""
|
12
|
+
l.gsub! /\);$/, ""
|
13
|
+
o = l.split(",").map{|e| e.strip}
|
13
14
|
|
14
|
-
|
15
|
+
queue_name = o.shift.gsub(/"/, "")
|
15
16
|
|
16
|
-
|
17
|
-
|
18
|
-
|
19
|
-
|
17
|
+
res = %{#{queue_name}_q.push([#{o.count-1}, #{o.join(", ")}])}
|
18
|
+
out.puts res
|
19
|
+
else
|
20
|
+
out.puts l
|
21
|
+
end
|
20
22
|
end
|
21
|
-
end
|
22
23
|
|
23
|
-
|
24
|
+
return out.string
|
25
|
+
end
|
24
26
|
end
|
@@ -0,0 +1,20 @@
|
|
1
|
+
<% if @debug %>
|
2
|
+
function pg_spec0_init(ns, options) {
|
3
|
+
pg_spec0_watchlist = [];
|
4
|
+
pg_spec0_unwatchlist = [];
|
5
|
+
pg_spec0_init_params = {ns: ns, options: options};
|
6
|
+
pg_spec0_ns = ns;
|
7
|
+
}
|
8
|
+
|
9
|
+
function pg_spec0_watch(id, page) {
|
10
|
+
pg_spec0_watchlist.push({id: id, page: page});
|
11
|
+
}
|
12
|
+
|
13
|
+
function pg_spec0_unwatch(id) {
|
14
|
+
pg_spec0_unwatchlist.push(id);
|
15
|
+
}
|
16
|
+
|
17
|
+
function pg_spec0_write(page) {
|
18
|
+
vm_cache_write(pg_spec0_ns, page);
|
19
|
+
}
|
20
|
+
<% end %>
|
data/app/kern/services/vm.rb
CHANGED
@@ -7,31 +7,91 @@ service :vm do
|
|
7
7
|
<% end %>
|
8
8
|
};
|
9
9
|
|
10
|
+
vm_dirty = {
|
11
|
+
<% @options[:pagers].each do |p| %>
|
12
|
+
<%= p[:namespace] %>: {},
|
13
|
+
<% end %>
|
14
|
+
};
|
15
|
+
|
16
|
+
vm_bp_to_nmap = {};
|
17
|
+
|
10
18
|
//Notification listeners, converts ns+key to an array of base pointers
|
11
19
|
vm_notify_map = {};
|
12
20
|
|
13
21
|
//Cache
|
14
|
-
function vm_cache_write(ns,
|
15
|
-
vm_cache[ns][
|
16
|
-
|
22
|
+
function vm_cache_write(ns, page) {
|
23
|
+
var old = vm_cache[ns][page._id];
|
24
|
+
if (old && old._hash == page._hash) { return; }
|
25
|
+
|
26
|
+
vm_dirty[ns][page._id] = page;
|
27
|
+
vm_cache[ns][page._id] = page;
|
17
28
|
|
18
|
-
//Notification of a change
|
19
|
-
function vm_notify(ns, key) {
|
20
29
|
var a = vm_notify_map[ns];
|
21
30
|
if (a) {
|
22
|
-
var b = a[
|
31
|
+
var b = a[page._id];
|
23
32
|
|
24
33
|
if (b) {
|
25
34
|
for (var i = 0; i < b.length; ++i) {
|
26
|
-
|
27
|
-
if (ns === "<%= p[:namespace] %>") {
|
28
|
-
<%= p[:name] %>_read(ns, b[i], key);
|
29
|
-
}
|
30
|
-
<% end %>
|
35
|
+
int_event(b, "read_res", page);
|
31
36
|
}
|
32
37
|
}
|
33
38
|
}
|
34
39
|
}
|
40
|
+
|
41
|
+
function vm_rehash_page(page) {
|
42
|
+
var z = 0;
|
43
|
+
|
44
|
+
//head and next are optional
|
45
|
+
if (page._head) { var z = crc32(0, page._head) }
|
46
|
+
if (page._next) { z = crc32(z, page._next) }
|
47
|
+
|
48
|
+
z = crc32(z, page._id)
|
49
|
+
|
50
|
+
var e = page.entries;
|
51
|
+
for (var i = 0; i < e.length; ++i) {
|
52
|
+
z = crc32(z, e[i]._sig);
|
53
|
+
}
|
54
|
+
|
55
|
+
page._hash = z.toString();
|
56
|
+
}
|
57
|
+
|
58
|
+
function vm_pageout() {
|
59
|
+
<% @options[:pagers].each do |p| %>
|
60
|
+
//Get id_to_page mappings
|
61
|
+
var id_to_page = vm_dirty["<%= p[:namespace] %>"];
|
62
|
+
if (id_to_page) {
|
63
|
+
var ids = Object.keys(id_to_page);
|
64
|
+
|
65
|
+
//For each mapping, write the page
|
66
|
+
for (var i = 0; i < ids.length; ++i) {
|
67
|
+
var p = id_to_page[ids[i]];
|
68
|
+
SEND("main", "if_per_set", "<%= p[:namespace] %>", ids[i], p);
|
69
|
+
}
|
70
|
+
<% end %>
|
71
|
+
}
|
72
|
+
|
73
|
+
//Clear dirty list
|
74
|
+
vm_dirty = {
|
75
|
+
<% @options[:pagers].each do |p| %>
|
76
|
+
<%= p[:namespace] %>: {},
|
77
|
+
<% end %>
|
78
|
+
};
|
79
|
+
}
|
80
|
+
|
81
|
+
//Part of the persist module
|
82
|
+
//res is page
|
83
|
+
function int_per_get_res(s, ns, res) {
|
84
|
+
//If there is already a cached entry, a pager beat us to it
|
85
|
+
//ignore this for now because the pager should be more up to
|
86
|
+
//date
|
87
|
+
if (vm_cache[ns][res._id]) { return };
|
88
|
+
|
89
|
+
vm_cache_write(ns, res);
|
90
|
+
}
|
91
|
+
|
92
|
+
<% if @debug %>
|
93
|
+
vm_write_list = [];
|
94
|
+
<% end %>
|
35
95
|
}
|
36
96
|
|
37
97
|
on_wakeup %{
|
@@ -44,7 +104,7 @@ service :vm do
|
|
44
104
|
|
45
105
|
//Call init functions
|
46
106
|
<% @options[:pagers].each do |p| %>
|
47
|
-
<%= p[:name] %>_init(<%= (p[:options] || {}).to_json %>);
|
107
|
+
<%= p[:name] %>_init("<%= p[:namespace] %>", <%= (p[:options] || {}).to_json %>);
|
48
108
|
<% end %>
|
49
109
|
}
|
50
110
|
|
@@ -55,6 +115,35 @@ service :vm do
|
|
55
115
|
}
|
56
116
|
|
57
117
|
on_disconnect %{
|
118
|
+
//We need to remove all the entries in vm_notify_map, but we only
|
119
|
+
//get an array of bp for each array in vm_notify_map[ns][key]...
|
120
|
+
//So we use the inverted lookup of vm_bp_to_nmap[bp][ns][key] to get a pointer
|
121
|
+
//to vm_notify_map[ns][key] and associated index. We then delete all the
|
122
|
+
//entries out of vm_notify_map
|
123
|
+
|
124
|
+
//Foreach namespace
|
125
|
+
var nss = Object.keys(vm_bp_to_nmap[bp]);
|
126
|
+
for (var i = 0; i < nss.length; ++i) {
|
127
|
+
//Namespace node
|
128
|
+
var nn = vm_bp_to_nmap[bp][nss[i]];
|
129
|
+
|
130
|
+
//Get all keys (which are ids)
|
131
|
+
var nnk = Object.keys(nn);
|
132
|
+
|
133
|
+
for (var x = 0; x < nnk.length; ++x) {
|
134
|
+
//Array contains [node (pointer to vm_notify_map[ns][key]), index] where index points to base pointer of this
|
135
|
+
//controller in the array
|
136
|
+
var arr = nn[nnk[i]][0]
|
137
|
+
var idx = nn[nnk[i]][1]
|
138
|
+
|
139
|
+
//Remove
|
140
|
+
arr.splice(idx, 1);
|
141
|
+
}
|
142
|
+
|
143
|
+
}
|
144
|
+
|
145
|
+
//Now we just clean up vm_bp_to_nmap because it's no longer used
|
146
|
+
delete vm_bp_to_nmap[bp];
|
58
147
|
}
|
59
148
|
|
60
149
|
on "read_sync", %{
|
@@ -76,27 +165,19 @@ service :vm do
|
|
76
165
|
int_event(bp, "read_sync_res", res);
|
77
166
|
}
|
78
167
|
|
79
|
-
on "
|
168
|
+
on "write", %{
|
80
169
|
<% raise "No pagers given in options for vm" unless @options[:pagers] %>
|
81
170
|
|
82
|
-
|
83
|
-
|
84
|
-
int_event(bp, "read_res", {key: params.key, value: cres});
|
85
|
-
}
|
171
|
+
//We are going to fix the _hash on the page
|
172
|
+
vm_rehash_page(params.page);
|
86
173
|
|
87
|
-
<% @
|
88
|
-
|
89
|
-
<%= p[:name] %>_read(params.ns, bp, params.key);
|
90
|
-
}
|
174
|
+
<% if @debug %>
|
175
|
+
vm_write_list.push(params.page);
|
91
176
|
<% end %>
|
92
|
-
}
|
93
|
-
|
94
|
-
on "write", %{
|
95
|
-
<% raise "No pagers given in options for vm" unless @options[:pagers] %>
|
96
177
|
|
97
178
|
<% @options[:pagers].each do |p| %>
|
98
179
|
if (params.ns === "<%= p[:namespace] %>") {
|
99
|
-
<%= p[:name] %>_write(params.
|
180
|
+
<%= p[:name] %>_write(params.page);
|
100
181
|
}
|
101
182
|
<% end %>
|
102
183
|
}
|
@@ -104,6 +185,9 @@ service :vm do
|
|
104
185
|
on "watch", %{
|
105
186
|
<% raise "No pagers given in options for vm" unless @options[:pagers] %>
|
106
187
|
|
188
|
+
//Cache entry
|
189
|
+
var cache_entry = vm_cache[params.ns][params.id];
|
190
|
+
|
107
191
|
//Ensure map exists
|
108
192
|
////////////////////////////////////////////////
|
109
193
|
var a = vm_notify_map[params.ns];
|
@@ -112,18 +196,57 @@ service :vm do
|
|
112
196
|
vm_notify_map[params.ns] = a;
|
113
197
|
}
|
114
198
|
|
115
|
-
var b = a[params.
|
199
|
+
var b = a[params.id];
|
116
200
|
if (!b) {
|
117
201
|
b = [];
|
118
|
-
a[params.
|
202
|
+
a[params.id] = b;
|
119
203
|
}
|
120
204
|
|
205
|
+
<% if @debug %>
|
206
|
+
var midx = vm_notify_map[params.ns][params.id].indexOf(bp)
|
207
|
+
if (midx != -1) {
|
208
|
+
throw "Multiple calls to watch for the ns: " + params.ns + " and id: " + params.id
|
209
|
+
}
|
210
|
+
<% end %>
|
121
211
|
b.push(bp)
|
122
212
|
////////////////////////////////////////////////
|
123
213
|
|
214
|
+
//Add to vm_bp_to_nmap
|
215
|
+
////////////////////////////////////////////////
|
216
|
+
//Construct
|
217
|
+
if (vm_bp_to_nmap[bp] === undefined) { vm_bp_to_nmap[bp] = {}; }
|
218
|
+
if (vm_bp_to_nmap[bp][params.ns] === undefined) { vm_bp_to_nmap[bp][params.ns] = {}; }
|
219
|
+
|
220
|
+
//Add reverse mapping, length-1 because it was just pushed
|
221
|
+
vm_bp_to_nmap[bp][params.ns][params.id] = [b, b.length-1];
|
222
|
+
|
223
|
+
//If cache exists, then signal controller *now* while we wait for the pager
|
224
|
+
if (cache_entry) {
|
225
|
+
int_event(bp, "read_res", cache_entry);
|
226
|
+
}
|
227
|
+
|
228
|
+
//Send a request now for disk read for sync
|
229
|
+
if (!cache_entry && params.sync) {
|
230
|
+
SEND("main", "if_per_get", "vm", params.ns, params.id);
|
231
|
+
}
|
232
|
+
|
233
|
+
//Do not signal pager if there is a watch request already in place
|
234
|
+
//as pager already knows; if it's equal to 1, this is the 'first'
|
235
|
+
//watch to go through as we have no info on it but just added it
|
236
|
+
if (vm_notify_map[params.ns][params.id].length > 1) { return; }
|
237
|
+
|
238
|
+
//While we're waiting for the pager try loading from disk, if this
|
239
|
+
//disk request is slower than the pager response, that's ok...
|
240
|
+
//the disk response will double check to see if the cache got set
|
241
|
+
//somewhere and not set it itself.
|
242
|
+
if (!cache_entry && !params.sync) {
|
243
|
+
SEND("disk", "if_per_get", "vm", params.ns, params.id);
|
244
|
+
}
|
245
|
+
|
246
|
+
//Now load the appropriate pager
|
124
247
|
<% @options[:pagers].each do |p| %>
|
125
248
|
if (params.ns === "<%= p[:namespace] %>") {
|
126
|
-
<%= p[:name] %>_watch(params.
|
249
|
+
<%= p[:name] %>_watch(params.id, cache_entry);
|
127
250
|
}
|
128
251
|
<% end %>
|
129
252
|
}
|
@@ -131,11 +254,34 @@ service :vm do
|
|
131
254
|
on "unwatch", %{
|
132
255
|
<% raise "No pagers given in options for vm" unless @options[:pagers] %>
|
133
256
|
|
257
|
+
var midx = vm_notify_map[params.ns][params.id].indexOf(bp)
|
258
|
+
vm_notify_map[params.ns][params.id].splice(midx, 1);
|
259
|
+
|
260
|
+
delete vm_bp_to_nmap[bp][params.ns][params.id];
|
261
|
+
|
134
262
|
<% @options[:pagers].each do |p| %>
|
135
263
|
if (params.ns === "<%= p[:namespace] %>") {
|
136
|
-
<%= p[:name] %>_unwatch(params.
|
264
|
+
<%= p[:name] %>_unwatch(params.id);
|
137
265
|
}
|
138
266
|
<% end %>
|
139
267
|
}
|
140
268
|
|
269
|
+
on "unwatch", %{
|
270
|
+
<% raise "No pagers given in options for vm" unless @options[:pagers] %>
|
271
|
+
|
272
|
+
var midx = vm_notify_map[params.ns][params.id].indexOf(bp)
|
273
|
+
vm_notify_map[params.ns][params.id].splice(midx, 1);
|
274
|
+
|
275
|
+
delete vm_bp_to_nmap[bp][params.ns][params.id];
|
276
|
+
|
277
|
+
<% @options[:pagers].each do |p| %>
|
278
|
+
if (params.ns === "<%= p[:namespace] %>") {
|
279
|
+
<%= p[:name] %>_unwatch(params.id);
|
280
|
+
}
|
281
|
+
<% end %>
|
282
|
+
}
|
283
|
+
|
284
|
+
every 20.seconds, %{
|
285
|
+
vm_pageout();
|
286
|
+
}
|
141
287
|
end
|