flok 0.0.38 → 0.0.39
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +4 -4
- data/app/drivers/chrome/src/dispatch.js +41 -6
- data/app/drivers/chrome/src/persist.js +1 -10
- data/app/kern/dispatch.js +17 -23
- data/app/kern/gen_id.js +8 -0
- data/app/kern/macro.rb +20 -18
- data/app/kern/pagers/pg_spec0.js +20 -0
- data/app/kern/services/vm.rb +176 -30
- data/docs/client_api.md +3 -1
- data/docs/compilation.md +1 -1
- data/docs/dispatch.md +91 -0
- data/docs/kernel_api.md +3 -2
- data/docs/messaging.md +6 -1
- data/docs/mod/persist.md +4 -3
- data/docs/project_layout.md +2 -2
- data/docs/services/vm.md +116 -41
- data/docs/services/vm/pagers.md +38 -46
- data/lib/flok.rb +1 -0
- data/lib/flok/build.rb +3 -4
- data/lib/flok/macro.rb +27 -0
- data/lib/flok/services_compiler.rb +12 -8
- data/lib/flok/user_compiler.rb +131 -4
- data/lib/flok/version.rb +1 -1
- data/spec/env/kern.rb +71 -0
- data/spec/etc/macro_spec.rb +3 -8
- data/spec/etc/service_compiler/service3.rb +27 -0
- data/spec/etc/services_compiler_spec.rb +35 -27
- data/spec/iface/driver/dispatch_spec.rb +20 -0
- data/spec/iface/driver/persist_spec.rb +9 -24
- data/spec/iface/kern/ping_spec.rb +3 -24
- data/spec/kern/assets/vm/config4.rb +12 -0
- data/spec/kern/assets/vm/controller10.rb +26 -0
- data/spec/kern/assets/vm/controller11.rb +33 -0
- data/spec/kern/assets/vm/controller12.rb +45 -0
- data/spec/kern/assets/vm/controller13.rb +40 -0
- data/spec/kern/assets/vm/controller14.rb +14 -0
- data/spec/kern/assets/vm/controller15.rb +15 -0
- data/spec/kern/assets/vm/controller16.rb +29 -0
- data/spec/kern/assets/vm/controller17.rb +30 -0
- data/spec/kern/assets/vm/controller18.rb +28 -0
- data/spec/kern/assets/vm/controller19.rb +14 -0
- data/spec/kern/assets/vm/controller19b.rb +15 -0
- data/spec/kern/assets/vm/controller20.rb +19 -0
- data/spec/kern/assets/vm/controller21.rb +40 -0
- data/spec/kern/assets/vm/controller7.rb +18 -0
- data/spec/kern/assets/vm/controller8.rb +38 -0
- data/spec/kern/assets/vm/controller8b.rb +18 -0
- data/spec/kern/assets/vm/controller9.rb +20 -0
- data/spec/kern/assets/vm/controller_exc_2watch.rb +15 -0
- data/spec/kern/assets/vm/controller_exc_ewatch.rb +14 -0
- data/spec/kern/assets/vm/macros/copy_page_c.rb +23 -0
- data/spec/kern/assets/vm/macros/entry_del_c.rb +18 -0
- data/spec/kern/assets/vm/macros/entry_insert_c.rb +21 -0
- data/spec/kern/assets/vm/macros/entry_mutable_c.rb +33 -0
- data/spec/kern/assets/vm/macros/new_page_c.rb +7 -0
- data/spec/kern/assets/vm/macros/new_page_c2.rb +7 -0
- data/spec/kern/assets/vm/macros/set_page_head_c.rb +18 -0
- data/spec/kern/assets/vm/macros/set_page_next_c.rb +18 -0
- data/spec/kern/controller_macro_spec.rb +186 -0
- data/spec/kern/dispatch_spec.rb +125 -0
- data/spec/kern/functions_spec.rb +15 -0
- data/spec/kern/vm_service_spec.rb +874 -173
- metadata +70 -5
- data/docs/scheduling.md +0 -46
- data/spec/kern/rest_service_spec.rb +0 -45
@@ -0,0 +1,186 @@
|
|
1
|
+
#This was created later than the controllers, so not all macros may be
|
2
|
+
#tested in here. Additionally, some macros may be harder to test, so
|
3
|
+
#this contains mostly non-side-effect (functionalish) macros that do not
|
4
|
+
#make other function calls. e.g. vm page macros
|
5
|
+
|
6
|
+
Dir.chdir File.join File.dirname(__FILE__), '../../'
|
7
|
+
require './spec/env/kern.rb'
|
8
|
+
require './spec/lib/helpers.rb'
|
9
|
+
require './spec/lib/io_extensions.rb'
|
10
|
+
require './spec/lib/rspec_extensions.rb'
|
11
|
+
|
12
|
+
RSpec.describe "kern:controller_macro_spec" do
|
13
|
+
include_context "kern"
|
14
|
+
|
15
|
+
it "Can use the NewPage macro" do
|
16
|
+
#Compile the controller
|
17
|
+
ctx = flok_new_user File.read('./spec/kern/assets/vm/macros/new_page_c.rb')
|
18
|
+
ctx.eval %{
|
19
|
+
base = _embed("controller", 0, {}, null);
|
20
|
+
int_dispatch([]);
|
21
|
+
}
|
22
|
+
|
23
|
+
#Check the page variable
|
24
|
+
page = JSON.parse(ctx.eval("JSON.stringify(page)"))
|
25
|
+
expect(page).to eq({
|
26
|
+
"_head" => nil,
|
27
|
+
"_next" => nil,
|
28
|
+
"entries" => [],
|
29
|
+
"_id" => nil
|
30
|
+
})
|
31
|
+
end
|
32
|
+
|
33
|
+
it "Can use the NewPage macro with id" do
|
34
|
+
#Compile the controller
|
35
|
+
ctx = flok_new_user File.read('./spec/kern/assets/vm/macros/new_page_c2.rb')
|
36
|
+
ctx.eval %{
|
37
|
+
base = _embed("controller", 0, {}, null);
|
38
|
+
int_dispatch([]);
|
39
|
+
}
|
40
|
+
|
41
|
+
#Check the page variable
|
42
|
+
page = JSON.parse(ctx.eval("JSON.stringify(page)"))
|
43
|
+
expect(page).to eq({
|
44
|
+
"_head" => nil,
|
45
|
+
"_next" => nil,
|
46
|
+
"entries" => [],
|
47
|
+
"_id" => "test"
|
48
|
+
})
|
49
|
+
end
|
50
|
+
|
51
|
+
it "Can use the CopyPage macro" do
|
52
|
+
#Compile the controller
|
53
|
+
ctx = flok_new_user File.read('./spec/kern/assets/vm/macros/copy_page_c.rb')
|
54
|
+
ctx.eval %{
|
55
|
+
base = _embed("controller", 0, {}, null);
|
56
|
+
int_dispatch([]);
|
57
|
+
}
|
58
|
+
|
59
|
+
#not_copied page is just a reference to original_page, checked for sanity
|
60
|
+
original_page = JSON.parse(ctx.eval("JSON.stringify(original_page)"))
|
61
|
+
not_copied_page = JSON.parse(ctx.eval("JSON.stringify(not_copied_page)"))
|
62
|
+
copied_page = JSON.parse(ctx.eval("JSON.stringify(copied_page)"))
|
63
|
+
|
64
|
+
#What the copied page should look like after a copy
|
65
|
+
copied_should_look_like = JSON.parse(original_page.to_json)
|
66
|
+
copied_should_look_like["_next"] = "test" #Set in controller
|
67
|
+
copied_should_look_like.delete "_hash"
|
68
|
+
|
69
|
+
expect(not_copied_page).to eq(original_page)
|
70
|
+
expect(copied_page).to eq(copied_should_look_like)
|
71
|
+
end
|
72
|
+
|
73
|
+
it "Can use the EntryDel macro" do
|
74
|
+
#Compile the controller
|
75
|
+
ctx = flok_new_user File.read('./spec/kern/assets/vm/macros/entry_del_c.rb')
|
76
|
+
ctx.eval %{
|
77
|
+
base = _embed("controller", 0, {}, null);
|
78
|
+
int_dispatch([]);
|
79
|
+
}
|
80
|
+
|
81
|
+
#not_copied page is just a reference to original_page, checked for sanity
|
82
|
+
original_page = JSON.parse(ctx.eval("JSON.stringify(original_page)"))
|
83
|
+
page = JSON.parse(ctx.eval("JSON.stringify(page)"))
|
84
|
+
|
85
|
+
expect(page).to eq({
|
86
|
+
"_head" => "head",
|
87
|
+
"_id" => "id",
|
88
|
+
"_next" => "next",
|
89
|
+
"entries" => []
|
90
|
+
})
|
91
|
+
|
92
|
+
expect(original_page).to eq({
|
93
|
+
"_head" => "head",
|
94
|
+
"_id" => "id",
|
95
|
+
"_next" => "next",
|
96
|
+
"entries" => [{
|
97
|
+
"_id" => "id", "_sig" => "sig"
|
98
|
+
}],
|
99
|
+
"_hash" => "hash"
|
100
|
+
})
|
101
|
+
end
|
102
|
+
|
103
|
+
it "Can use the EntryInsert macro" do
|
104
|
+
#Compile the controller
|
105
|
+
ctx = flok_new_user File.read('./spec/kern/assets/vm/macros/entry_insert_c.rb')
|
106
|
+
ctx.eval %{
|
107
|
+
base = _embed("controller", 0, {}, null);
|
108
|
+
int_dispatch([]);
|
109
|
+
}
|
110
|
+
|
111
|
+
#not_copied page is just a reference to original_page, checked for sanity
|
112
|
+
original_page = JSON.parse(ctx.eval("JSON.stringify(original_page)"))
|
113
|
+
page = JSON.parse(ctx.eval("JSON.stringify(page)"))
|
114
|
+
|
115
|
+
expect(page["entries"].count).to eq(3)
|
116
|
+
expect(page["entries"][0]["hello"]).to eq("world")
|
117
|
+
expect(page["entries"][2]["hello"]).to eq("world2")
|
118
|
+
|
119
|
+
expect(page["entries"][0]["_sig"]).not_to eq(nil)
|
120
|
+
expect(page["entries"][0]["_sig"]).not_to eq(page["entries"][1]["_sig"])
|
121
|
+
expect(page["entries"][0]["_id"]).not_to eq(nil)
|
122
|
+
expect(page["entries"][2]["_id"]).not_to eq(page["entries"][0]["_id"])
|
123
|
+
expect(page["entries"][2]["_id"]).not_to eq(page["entries"][1]["_id"])
|
124
|
+
end
|
125
|
+
|
126
|
+
it "Can use the EntryMutable macro" do
|
127
|
+
#Compile the controller
|
128
|
+
ctx = flok_new_user File.read('./spec/kern/assets/vm/macros/entry_mutable_c.rb')
|
129
|
+
ctx.eval %{
|
130
|
+
base = _embed("controller", 0, {}, null);
|
131
|
+
int_dispatch([]);
|
132
|
+
}
|
133
|
+
|
134
|
+
#not_copied page is just a reference to original_page, checked for sanity
|
135
|
+
original_page = JSON.parse(ctx.eval("JSON.stringify(original_page)"))
|
136
|
+
page = JSON.parse(ctx.eval("JSON.stringify(page)"))
|
137
|
+
|
138
|
+
entries = page["entries"]
|
139
|
+
original_entries = original_page["entries"]
|
140
|
+
expect(entries[0]["hello"]).to eq("world")
|
141
|
+
expect(entries[0]["foo"]).to eq(nil)
|
142
|
+
expect(entries[2]["hello"]).to eq("world")
|
143
|
+
expect(entries[2]["foo"]).to eq(nil)
|
144
|
+
|
145
|
+
#Shared
|
146
|
+
expect(entries[1]["foo"]).to eq("bar")
|
147
|
+
expect(entries[1]["hello"]).to eq(nil)
|
148
|
+
|
149
|
+
#This should remain unchanged
|
150
|
+
expect(original_page["entries"]).to eq([
|
151
|
+
{"_id" => "id", "_sig" => "sig"},
|
152
|
+
{"_id" => "id2", "_sig" => "sig2", "foo" => "bar"},
|
153
|
+
{"_id" => "id3", "_sig" => "sig3"},
|
154
|
+
])
|
155
|
+
end
|
156
|
+
|
157
|
+
it "Can use the SetPageNext macro" do
|
158
|
+
#Compile the controller
|
159
|
+
ctx = flok_new_user File.read('./spec/kern/assets/vm/macros/set_page_next_c.rb')
|
160
|
+
ctx.eval %{
|
161
|
+
base = _embed("controller", 0, {}, null);
|
162
|
+
int_dispatch([]);
|
163
|
+
}
|
164
|
+
|
165
|
+
#not_copied page is just a reference to original_page, checked for sanity
|
166
|
+
original_page = JSON.parse(ctx.eval("JSON.stringify(original_page)"))
|
167
|
+
page = JSON.parse(ctx.eval("JSON.stringify(page)"))
|
168
|
+
|
169
|
+
expect(page["_next"]).to eq("test")
|
170
|
+
end
|
171
|
+
|
172
|
+
it "Can use the SetPageHead macro" do
|
173
|
+
#Compile the controller
|
174
|
+
ctx = flok_new_user File.read('./spec/kern/assets/vm/macros/set_page_head_c.rb')
|
175
|
+
ctx.eval %{
|
176
|
+
base = _embed("controller", 0, {}, null);
|
177
|
+
int_dispatch([]);
|
178
|
+
}
|
179
|
+
|
180
|
+
#not_copied page is just a reference to original_page, checked for sanity
|
181
|
+
original_page = JSON.parse(ctx.eval("JSON.stringify(original_page)"))
|
182
|
+
page = JSON.parse(ctx.eval("JSON.stringify(page)"))
|
183
|
+
|
184
|
+
expect(page["_head"]).to eq("test")
|
185
|
+
end
|
186
|
+
end
|
@@ -0,0 +1,125 @@
|
|
1
|
+
Dir.chdir File.join File.dirname(__FILE__), '../../'
|
2
|
+
require './spec/env/kern.rb'
|
3
|
+
require './spec/lib/helpers.rb'
|
4
|
+
require './spec/lib/io_extensions.rb'
|
5
|
+
require './spec/lib/rspec_extensions.rb'
|
6
|
+
|
7
|
+
RSpec.describe "kern:dispatch_spec" do
|
8
|
+
#Max number of items to be queued in queues other than main
|
9
|
+
#per dispatch (as per specs)
|
10
|
+
MAX_Q = 5
|
11
|
+
|
12
|
+
include_context "kern"
|
13
|
+
|
14
|
+
it "Can call spec_dispatch" do
|
15
|
+
#Compile the controller
|
16
|
+
ctx = flok_new_user File.read('./spec/kern/assets/blank.rb')
|
17
|
+
|
18
|
+
#Register callout
|
19
|
+
ctx.eval %{
|
20
|
+
spec_dispatch_q(main_q, 2);
|
21
|
+
}
|
22
|
+
|
23
|
+
main_q = ctx.dump "main_q"
|
24
|
+
expect(main_q).to eq [[0, "spec"], [0, "spec"]]
|
25
|
+
end
|
26
|
+
|
27
|
+
it "Does disptach an unlimited number of items from the main queue" do
|
28
|
+
#Compile the controller
|
29
|
+
ctx = flok_new_user File.read('./spec/kern/assets/blank.rb')
|
30
|
+
|
31
|
+
#Register callout
|
32
|
+
ctx.eval %{
|
33
|
+
spec_dispatch_q(main_q, 10);
|
34
|
+
}
|
35
|
+
|
36
|
+
ctx.eval("int_dispatch([])")
|
37
|
+
q = @driver.dump_q
|
38
|
+
|
39
|
+
expect(q).to eq([[0, [0, "spec"]*10].flatten])
|
40
|
+
end
|
41
|
+
|
42
|
+
queues = [
|
43
|
+
"main",
|
44
|
+
"net",
|
45
|
+
"disk",
|
46
|
+
"cpu",
|
47
|
+
"gpu"
|
48
|
+
]
|
49
|
+
|
50
|
+
queues.each_with_index do |qname, qindex|
|
51
|
+
#Don't do main queue
|
52
|
+
next if qname == "main"
|
53
|
+
|
54
|
+
it "Does not disptach an unlimited number of items from the #{qname} queue" do
|
55
|
+
#Compile the controller
|
56
|
+
ctx = flok_new_user File.read('./spec/kern/assets/blank.rb')
|
57
|
+
|
58
|
+
#Register callout
|
59
|
+
ctx.eval %{
|
60
|
+
spec_dispatch_q(#{qname}_q, #{MAX_Q+1});
|
61
|
+
}
|
62
|
+
|
63
|
+
#Get partial queue, should have 'i' because we want more things than the max
|
64
|
+
ctx.eval("int_dispatch([])")
|
65
|
+
q = @driver.dump_q
|
66
|
+
expect(q).to eq(['i', [qindex, [0, "spec"]*MAX_Q].flatten])
|
67
|
+
end
|
68
|
+
|
69
|
+
it "Does dispatch the rest of the items after the first two incomplete disptaches" do
|
70
|
+
#Compile the controller
|
71
|
+
ctx = flok_new_user File.read('./spec/kern/assets/blank.rb')
|
72
|
+
|
73
|
+
#Register callout
|
74
|
+
ctx.eval %{
|
75
|
+
spec_dispatch_q(#{qname}_q, #{MAX_Q*3});
|
76
|
+
}
|
77
|
+
|
78
|
+
#Get partial queue, should have 'i' because we want more things than the max
|
79
|
+
ctx.eval("int_dispatch([])")
|
80
|
+
q = @driver.dump_q
|
81
|
+
expect(q).to eq(['i', [qindex, [0, "spec"]*MAX_Q].flatten])
|
82
|
+
|
83
|
+
#Get partial queue, should have 'i' because we want more things than the max
|
84
|
+
ctx.eval("int_dispatch([])")
|
85
|
+
q = @driver.dump_q
|
86
|
+
expect(q).to eq(['i', [qindex, [0, "spec"]*MAX_Q].flatten])
|
87
|
+
|
88
|
+
#Last piece, should not have an 'i'
|
89
|
+
ctx.eval("int_dispatch([])")
|
90
|
+
q = @driver.dump_q
|
91
|
+
expect(q).to eq([[qindex, [0, "spec"]*MAX_Q].flatten])
|
92
|
+
end
|
93
|
+
|
94
|
+
|
95
|
+
it "Does disptach at MAX_Q number of items from the #{qname} queue" do
|
96
|
+
#Compile the controller
|
97
|
+
ctx = flok_new_user File.read('./spec/kern/assets/blank.rb')
|
98
|
+
|
99
|
+
#Register callout
|
100
|
+
ctx.eval %{
|
101
|
+
spec_dispatch_q(#{qname}_q, #{MAX_Q});
|
102
|
+
}
|
103
|
+
|
104
|
+
ctx.eval("int_dispatch([])")
|
105
|
+
q = @driver.dump_q
|
106
|
+
|
107
|
+
expect(q).to eq([[qindex, [0, "spec"]*MAX_Q].flatten])
|
108
|
+
end
|
109
|
+
|
110
|
+
it "Does disptach all at (MAX_Q-1) number of items from the #{qname} queue" do
|
111
|
+
#Compile the controller
|
112
|
+
ctx = flok_new_user File.read('./spec/kern/assets/blank.rb')
|
113
|
+
|
114
|
+
#Register callout
|
115
|
+
ctx.eval %{
|
116
|
+
spec_dispatch_q(#{qname}_q, #{MAX_Q}-1);
|
117
|
+
}
|
118
|
+
|
119
|
+
ctx.eval("int_dispatch([])")
|
120
|
+
q = @driver.dump_q
|
121
|
+
|
122
|
+
expect(q).to eq([[qindex, [0, "spec"]*(MAX_Q-1)].flatten])
|
123
|
+
end
|
124
|
+
end
|
125
|
+
end
|
data/spec/kern/functions_spec.rb
CHANGED
@@ -26,4 +26,19 @@ RSpec.describe "kern:functions_spec" do
|
|
26
26
|
expect(res).not_to eq(res2)
|
27
27
|
end
|
28
28
|
|
29
|
+
it "can use gen_id" do
|
30
|
+
ctx = flok_new_user File.read('./spec/kern/assets/controller0.rb')
|
31
|
+
|
32
|
+
#Run the check
|
33
|
+
res = ctx.eval("gen_id()")
|
34
|
+
res2 = ctx.eval("gen_id()")
|
35
|
+
reset_for_ctx
|
36
|
+
|
37
|
+
ctx2 = flok_new_user File.read('./spec/kern/assets/controller0.rb')
|
38
|
+
|
39
|
+
res3 = ctx2.eval("gen_id()")
|
40
|
+
expect(res.class).to eq(String)
|
41
|
+
expect(res).not_to eq(res2)
|
42
|
+
expect(res3).not_to eq(res)
|
43
|
+
end
|
29
44
|
end
|
@@ -5,191 +5,892 @@ require './spec/env/kern.rb'
|
|
5
5
|
require './spec/lib/helpers.rb'
|
6
6
|
require './spec/lib/io_extensions.rb'
|
7
7
|
require './spec/lib/rspec_extensions.rb'
|
8
|
+
require 'zlib'
|
8
9
|
|
9
10
|
RSpec.describe "kern:vm_service" do
|
11
|
+
include Zlib
|
10
12
|
include_context "kern"
|
11
13
|
|
12
|
-
|
13
|
-
|
14
|
-
|
15
|
-
|
16
|
-
|
17
|
-
|
18
|
-
|
19
|
-
|
20
|
-
|
21
|
-
|
22
|
-
|
23
|
-
|
24
|
-
|
25
|
-
|
26
|
-
|
27
|
-
|
28
|
-
|
29
|
-
|
30
|
-
|
31
|
-
|
32
|
-
|
33
|
-
|
34
|
-
|
35
|
-
|
36
|
-
|
37
|
-
|
38
|
-
|
39
|
-
|
40
|
-
|
41
|
-
|
42
|
-
|
43
|
-
|
44
|
-
|
45
|
-
|
46
|
-
|
47
|
-
|
48
|
-
|
49
|
-
|
50
|
-
|
51
|
-
|
52
|
-
|
53
|
-
|
54
|
-
|
55
|
-
|
56
|
-
|
57
|
-
|
58
|
-
|
59
|
-
|
60
|
-
|
61
|
-
|
62
|
-
|
63
|
-
|
64
|
-
|
65
|
-
|
66
|
-
#
|
67
|
-
|
68
|
-
|
69
|
-
|
70
|
-
|
71
|
-
|
72
|
-
|
73
|
-
|
74
|
-
|
75
|
-
|
76
|
-
|
77
|
-
|
78
|
-
|
79
|
-
|
80
|
-
|
81
|
-
|
82
|
-
|
83
|
-
|
84
|
-
|
85
|
-
|
86
|
-
|
87
|
-
|
88
|
-
|
89
|
-
|
90
|
-
|
91
|
-
|
92
|
-
|
93
|
-
|
94
|
-
|
95
|
-
|
96
|
-
|
97
|
-
|
98
|
-
|
99
|
-
|
100
|
-
|
101
|
-
|
102
|
-
|
103
|
-
|
104
|
-
|
105
|
-
|
106
|
-
|
107
|
-
|
108
|
-
|
109
|
-
#
|
110
|
-
|
111
|
-
|
112
|
-
|
113
|
-
|
114
|
-
|
115
|
-
|
116
|
-
|
117
|
-
|
118
|
-
|
119
|
-
|
120
|
-
##Verify that the read did return from the spec0 pager
|
121
|
-
#res = JSON.parse(ctx.eval("JSON.stringify(read_res_params)"))
|
122
|
-
#expect(res).to eq({
|
123
|
-
#"key" => 33,
|
124
|
-
#"value" => 22
|
125
|
-
#})
|
126
|
-
#end
|
127
|
-
|
128
|
-
# it "Write then read does will hit the pager for the read, a write is not guaranteed to be 1 to 1 but a read is, additionally, the reads only went through once" do
|
129
|
-
##Compile the controller
|
130
|
-
#ctx = flok_new_user File.read('./spec/kern/assets/vm/controller4.rb'), File.read("./spec/kern/assets/vm/config1.rb")
|
131
|
-
|
132
|
-
##Run the embed function
|
133
|
-
#ctx.eval %{
|
134
|
-
#//Call embed on main root view
|
135
|
-
#base = _embed("my_controller", 0, {}, null);
|
136
|
-
|
137
|
-
#//Drain queue
|
138
|
-
#int_dispatch([]);
|
139
|
-
#}
|
140
|
-
|
141
|
-
#expect(ctx.eval("spec0_read_count")).to eq(1)
|
142
|
-
#end
|
143
|
-
|
144
|
-
#it "Can read through and then send another read_res for a change on the page" do
|
145
|
-
##Compile the controller
|
146
|
-
#ctx = flok_new_user File.read('./spec/kern/assets/vm/controller5.rb'), File.read("./spec/kern/assets/vm/config2.rb")
|
147
|
-
|
148
|
-
##Run the embed function
|
149
|
-
#ctx.eval %{
|
150
|
-
#//Call embed on main root view
|
151
|
-
#base = _embed("my_controller", 0, {}, null);
|
152
|
-
|
153
|
-
#//Drain queue
|
154
|
-
#int_dispatch([]);
|
155
|
-
#}
|
156
|
-
|
157
|
-
##read_res from spec is called multiple times and returns an array of the parms
|
158
|
-
#res = JSON.parse(ctx.eval("JSON.stringify(read_res_called_with)"))
|
159
|
-
|
160
|
-
##Expect 2 responses, first is cache miss, second is cache hit, third is cache updated
|
161
|
-
#expect(res).to eq [
|
162
|
-
#{"key" => "my_key", "value" => "a"},
|
163
|
-
#{"key" => "my_key", "value" => "a"},
|
164
|
-
#{"key" => "my_key", "value" => "b"}
|
165
|
-
#]
|
166
|
-
#end
|
167
|
-
|
168
|
-
it "Can watch a key and then be sent a read_res whenever that key changes" do
|
169
|
-
#Compile the controller
|
170
|
-
ctx = flok_new_user File.read('./spec/kern/assets/vm/controller6.rb'), File.read("./spec/kern/assets/vm/config3.rb")
|
171
|
-
|
172
|
-
#Run the embed function
|
173
|
-
ctx.eval %{
|
174
|
-
//Call embed on main root view
|
14
|
+
it "vm_rehash_page can calculate the hash correctly" do
|
15
|
+
ctx = flok_new_user File.read('./spec/kern/assets/vm/controller0.rb'), File.read("./spec/kern/assets/vm/config3.rb")
|
16
|
+
|
17
|
+
#Run the check
|
18
|
+
res = ctx.eval %{
|
19
|
+
//Manually construct a page
|
20
|
+
var page = {
|
21
|
+
_head: null,
|
22
|
+
_next: null,
|
23
|
+
_id: "hello",
|
24
|
+
entries: [
|
25
|
+
{_id: "hello2", _sig: "nohteunth"},
|
26
|
+
]
|
27
|
+
}
|
28
|
+
|
29
|
+
vm_rehash_page(page);
|
30
|
+
}
|
31
|
+
|
32
|
+
#Calculate hash ourselves
|
33
|
+
hash = crc32("hello")
|
34
|
+
hash = crc32("nohteunth", hash)
|
35
|
+
page = JSON.parse(ctx.eval("JSON.stringify(page)"))
|
36
|
+
page = JSON.parse(ctx.eval("JSON.stringify(page)"))
|
37
|
+
|
38
|
+
#Expect the same hash
|
39
|
+
expect(page).to eq({
|
40
|
+
"_head" => nil,
|
41
|
+
"_next" => nil,
|
42
|
+
"_id" => "hello",
|
43
|
+
"entries" => [
|
44
|
+
{"_id" => "hello2", "_sig" => "nohteunth"}
|
45
|
+
],
|
46
|
+
"_hash" => hash.to_s
|
47
|
+
})
|
48
|
+
end
|
49
|
+
|
50
|
+
it "vm_rehash_page can calculate the hash correctly with head and next" do
|
51
|
+
ctx = flok_new_user File.read('./spec/kern/assets/vm/controller0.rb'), File.read("./spec/kern/assets/vm/config3.rb")
|
52
|
+
|
53
|
+
#Run the check
|
54
|
+
res = ctx.eval %{
|
55
|
+
//Manually construct a page
|
56
|
+
var page = {
|
57
|
+
_head: "a",
|
58
|
+
_next: "b",
|
59
|
+
_id: "hello",
|
60
|
+
entries: [
|
61
|
+
{_id: "hello2", _sig: "nohteunth"},
|
62
|
+
]
|
63
|
+
}
|
64
|
+
|
65
|
+
vm_rehash_page(page);
|
66
|
+
}
|
67
|
+
|
68
|
+
#Calculate hash ourselves
|
69
|
+
hash = crc32("a")
|
70
|
+
hash = crc32("b", hash)
|
71
|
+
hash = crc32("hello", hash)
|
72
|
+
hash = crc32("nohteunth", hash)
|
73
|
+
page = JSON.parse(ctx.eval("JSON.stringify(page)"))
|
74
|
+
|
75
|
+
#Expect the same hash
|
76
|
+
expect(page).to eq({
|
77
|
+
"_head" => "a",
|
78
|
+
"_next" => "b",
|
79
|
+
"_id" => "hello",
|
80
|
+
"entries" => [
|
81
|
+
{"_id" => "hello2", "_sig" => "nohteunth"}
|
82
|
+
],
|
83
|
+
"_hash" => hash.to_s
|
84
|
+
})
|
85
|
+
end
|
86
|
+
|
87
|
+
it "Can call vm_cache_write and save it to vm_cache[ns][id]" do
|
88
|
+
ctx = flok_new_user File.read('./spec/kern/assets/vm/controller0.rb'), File.read("./spec/kern/assets/vm/config3.rb")
|
89
|
+
|
90
|
+
#Run the check
|
91
|
+
res = ctx.eval %{
|
92
|
+
//Manually construct a page
|
93
|
+
page = {
|
94
|
+
_head: "a",
|
95
|
+
_next: "b",
|
96
|
+
_id: "hello",
|
97
|
+
entries: [
|
98
|
+
{_id: "hello2", _sig: "nohteunth"},
|
99
|
+
]
|
100
|
+
}
|
101
|
+
|
102
|
+
vm_rehash_page(page);
|
103
|
+
|
104
|
+
//Save page
|
105
|
+
vm_cache_write("user", page);
|
106
|
+
}
|
107
|
+
|
108
|
+
vm_cache = JSON.parse(ctx.eval("JSON.stringify(vm_cache)"))
|
109
|
+
page = JSON.parse(ctx.eval("JSON.stringify(page)"));
|
110
|
+
|
111
|
+
#Expect the same hash
|
112
|
+
expect(vm_cache).to eq({
|
113
|
+
"user" => {
|
114
|
+
page["_id"] => page
|
115
|
+
}
|
116
|
+
})
|
117
|
+
end
|
118
|
+
|
119
|
+
it "Can create a copy of pg_spec0 and receive the correct things in it's initialization" do
|
120
|
+
ctx = flok_new_user File.read('./spec/kern/assets/vm/controller0.rb'), File.read("./spec/kern/assets/vm/config4.rb")
|
121
|
+
ctx.eval %{
|
175
122
|
base = _embed("my_controller", 0, {}, null);
|
176
123
|
|
177
124
|
//Drain queue
|
178
125
|
int_dispatch([]);
|
179
126
|
}
|
180
127
|
|
181
|
-
|
182
|
-
|
128
|
+
pg_spec0_init_params = JSON.parse(ctx.eval("JSON.stringify(pg_spec0_init_params)"))
|
129
|
+
|
130
|
+
#Expect options and ns to match in config4
|
131
|
+
expect(pg_spec0_init_params).to eq({
|
132
|
+
"ns" => "spec",
|
133
|
+
"options" => {"hello" => "world"}
|
134
|
+
})
|
135
|
+
end
|
136
|
+
|
137
|
+
it "Does call pagers watch function with a undefined page when no page exists in cache" do
|
138
|
+
ctx = flok_new_user File.read('./spec/kern/assets/vm/controller7.rb'), File.read("./spec/kern/assets/vm/config4.rb")
|
139
|
+
|
140
|
+
ctx.eval %{
|
141
|
+
base = _embed("my_controller", 0, {}, null);
|
142
|
+
|
143
|
+
//Drain queue
|
144
|
+
int_dispatch([]);
|
145
|
+
}
|
146
|
+
|
147
|
+
#We are watching a page that should have been stored in cache at this point
|
148
|
+
pg_spec0_watchlist = JSON.parse(ctx.eval("JSON.stringify(pg_spec0_watchlist)"))
|
149
|
+
|
150
|
+
#Expect options and ns to match in config4
|
151
|
+
expect(pg_spec0_watchlist).to eq([{
|
152
|
+
"id" => "my_key"
|
153
|
+
}])
|
154
|
+
end
|
155
|
+
|
156
|
+
it "Does call pagers watch function with a page when the page requested for a watch is stored in cache" do
|
157
|
+
ctx = flok_new_user File.read('./spec/kern/assets/vm/controller7.rb'), File.read("./spec/kern/assets/vm/config4.rb")
|
158
|
+
|
159
|
+
#We are going to manually store a page in cache as this page should be retrieved
|
160
|
+
#for the watch attempt
|
161
|
+
res = ctx.eval %{
|
162
|
+
//Manually construct a page as we are going to test the watch function
|
163
|
+
//which receives a call to watch with the hash of this page so the
|
164
|
+
//watch function can tell if the page has changed (e.g. if you are connecting)
|
165
|
+
//to a remote server
|
166
|
+
page = {
|
167
|
+
_head: "a",
|
168
|
+
_next: "b",
|
169
|
+
_id: "my_key",
|
170
|
+
entries: [
|
171
|
+
{_id: "hello2", _sig: "nohteunth"},
|
172
|
+
]
|
173
|
+
}
|
174
|
+
|
175
|
+
vm_rehash_page(page);
|
176
|
+
|
177
|
+
//Save page for the spec pager
|
178
|
+
vm_cache_write("spec", page);
|
179
|
+
}
|
180
|
+
|
181
|
+
#This hash was calculated during vm_rehash_page
|
182
|
+
page = JSON.parse(ctx.eval("JSON.stringify(page)"))
|
183
|
+
|
184
|
+
ctx.eval %{
|
185
|
+
base = _embed("my_controller", 0, {}, null);
|
186
|
+
|
187
|
+
//Drain queue
|
188
|
+
int_dispatch([]);
|
189
|
+
}
|
190
|
+
|
191
|
+
#We are watching a page that should have been stored in cache at this point
|
192
|
+
pg_spec0_watchlist = JSON.parse(ctx.eval("JSON.stringify(pg_spec0_watchlist)"))
|
193
|
+
|
194
|
+
#Expect options and ns to match in config4
|
195
|
+
expect(pg_spec0_watchlist).to eq([{
|
196
|
+
"id" => "my_key",
|
197
|
+
"page" => page
|
198
|
+
}])
|
199
|
+
end
|
200
|
+
|
201
|
+
it "throws an exception if multiple watches are attempted" do
|
202
|
+
ctx = flok_new_user File.read('./spec/kern/assets/vm/controller_exc_2watch.rb'), File.read("./spec/kern/assets/vm/config4.rb")
|
203
|
+
|
204
|
+
expect {
|
205
|
+
ctx.eval %{
|
206
|
+
base = _embed("my_controller", 1, {}, null);
|
207
|
+
|
208
|
+
//Drain queue
|
209
|
+
int_dispatch([]);
|
210
|
+
}
|
211
|
+
}.to raise_exception
|
212
|
+
end
|
213
|
+
|
214
|
+
it "throws an exception if unwatch is called before watch" do
|
215
|
+
ctx = flok_new_user File.read('./spec/kern/assets/vm/controller_exc_ewatch.rb'), File.read("./spec/kern/assets/vm/config4.rb")
|
216
|
+
|
217
|
+
expect {
|
218
|
+
ctx.eval %{
|
219
|
+
base = _embed("my_controller", 1, {}, null);
|
220
|
+
|
221
|
+
//Drain queue
|
222
|
+
int_dispatch([]);
|
223
|
+
}
|
224
|
+
}.to raise_exception
|
225
|
+
end
|
226
|
+
|
227
|
+
it "multiple sequential watch requests from two controllers for a namespace do not hit the pager multiple times" do
|
228
|
+
ctx = flok_new_user File.read('./spec/kern/assets/vm/controller8.rb'), File.read("./spec/kern/assets/vm/config4.rb")
|
229
|
+
|
230
|
+
ctx.eval %{
|
231
|
+
base = _embed("my_controller", 1, {}, null);
|
232
|
+
base2 = _embed("my_controller", base+2, {}, null);
|
233
|
+
|
234
|
+
//Drain queue
|
235
|
+
int_dispatch([]);
|
236
|
+
}
|
237
|
+
|
238
|
+
#We are watching a page that should have been stored in cache at this point
|
239
|
+
pg_spec0_watchlist = JSON.parse(ctx.eval("JSON.stringify(pg_spec0_watchlist)"))
|
240
|
+
|
241
|
+
#Expect options and ns to match in config4
|
242
|
+
expect(pg_spec0_watchlist).to eq([{
|
243
|
+
"id" => "my_key"
|
244
|
+
}])
|
245
|
+
end
|
246
|
+
|
247
|
+
it "unwatch request to pager does call the pagers unwatch function" do
|
248
|
+
ctx = flok_new_user File.read('./spec/kern/assets/vm/controller9.rb'), File.read("./spec/kern/assets/vm/config4.rb")
|
249
|
+
|
250
|
+
ctx.eval %{
|
251
|
+
base = _embed("my_controller", 1, {}, null);
|
252
|
+
|
253
|
+
//Drain queue
|
254
|
+
int_dispatch([]);
|
255
|
+
}
|
256
|
+
|
257
|
+
#We are watching a page that should have been stored in cache at this point
|
258
|
+
pg_spec0_unwatchlist = JSON.parse(ctx.eval("JSON.stringify(pg_spec0_unwatchlist)"))
|
259
|
+
|
260
|
+
expect(pg_spec0_unwatchlist).to eq(["my_key"])
|
261
|
+
end
|
262
|
+
|
263
|
+
it "watch unwatch and watch request for a namespace does hit the pager multiple times" do
|
264
|
+
ctx = flok_new_user File.read('./spec/kern/assets/vm/controller9.rb'), File.read("./spec/kern/assets/vm/config4.rb")
|
265
|
+
|
266
|
+
ctx.eval %{
|
267
|
+
base = _embed("my_controller", 1, {}, null);
|
268
|
+
|
269
|
+
//Drain queue
|
270
|
+
int_dispatch([]);
|
271
|
+
}
|
272
|
+
|
273
|
+
#We are watching a page that should have been stored in cache at this point
|
274
|
+
pg_spec0_watchlist = JSON.parse(ctx.eval("JSON.stringify(pg_spec0_watchlist)"))
|
275
|
+
|
276
|
+
#Expect options and ns to match in config4
|
277
|
+
expect(pg_spec0_watchlist).to eq([{
|
278
|
+
"id" => "my_key"
|
279
|
+
}, {
|
280
|
+
"id" => "my_key"
|
281
|
+
}])
|
282
|
+
end
|
283
|
+
|
284
|
+
it "sends write requests to the pager" do
|
285
|
+
ctx = flok_new_user File.read('./spec/kern/assets/vm/controller10.rb'), File.read("./spec/kern/assets/vm/config4.rb")
|
286
|
+
|
287
|
+
ctx.eval %{
|
288
|
+
base = _embed("my_controller", 1, {}, null);
|
289
|
+
|
290
|
+
//Drain queue
|
291
|
+
int_dispatch([]);
|
292
|
+
}
|
293
|
+
|
294
|
+
#Expect the page to be written to cache
|
295
|
+
vm_cache = JSON.parse(ctx.eval("JSON.stringify(vm_cache)"));
|
296
|
+
vm_write_list = JSON.parse(ctx.eval("JSON.stringify(vm_write_list[0])"));
|
297
|
+
expect(vm_cache["spec"]["test"]).to eq(vm_write_list)
|
298
|
+
end
|
299
|
+
|
300
|
+
it "sends watch callback to controller when cache is written to via read_res" do
|
301
|
+
ctx = flok_new_user File.read('./spec/kern/assets/vm/controller11.rb'), File.read("./spec/kern/assets/vm/config4.rb")
|
302
|
+
|
303
|
+
ctx.eval %{
|
304
|
+
base = _embed("my_controller", 1, {}, null);
|
305
|
+
|
306
|
+
//Drain queue
|
307
|
+
int_dispatch([]);
|
308
|
+
}
|
309
|
+
|
310
|
+
read_res_params = JSON.parse(ctx.eval("JSON.stringify(read_res_params)"))
|
311
|
+
vm_write_list = JSON.parse(ctx.eval("JSON.stringify(vm_write_list[0])"));
|
312
|
+
expect(read_res_params).to eq(vm_write_list)
|
313
|
+
end
|
314
|
+
|
315
|
+
it "does send two watch callbacks to a controller if there is cached content" do
|
316
|
+
ctx = flok_new_user File.read('./spec/kern/assets/vm/controller12.rb'), File.read("./spec/kern/assets/vm/config4.rb")
|
317
|
+
|
318
|
+
ctx.eval %{
|
319
|
+
base = _embed("my_controller", 1, {}, null);
|
320
|
+
|
321
|
+
//Drain queue
|
322
|
+
int_dispatch([]);
|
323
|
+
}
|
324
|
+
|
325
|
+
read_res_params = JSON.parse(ctx.eval("JSON.stringify(read_res_params)"))
|
326
|
+
vm_write_list = JSON.parse(ctx.eval("JSON.stringify(vm_write_list)"));
|
327
|
+
expect(read_res_params).to eq(vm_write_list)
|
328
|
+
end
|
329
|
+
|
330
|
+
it "vm_cache_write does not tell controllers an update has occurred if the page requested to cache was already cached" do
|
331
|
+
ctx = flok_new_user File.read('./spec/kern/assets/vm/controller13.rb'), File.read("./spec/kern/assets/vm/config4.rb")
|
332
|
+
|
333
|
+
ctx.eval %{
|
334
|
+
base = _embed("my_controller", 1, {}, null);
|
335
|
+
|
336
|
+
//Drain queue
|
337
|
+
int_dispatch([]);
|
338
|
+
}
|
339
|
+
|
340
|
+
read_res_params = JSON.parse(ctx.eval("JSON.stringify(read_res_params)"))
|
341
|
+
vm_write_list = JSON.parse(ctx.eval("JSON.stringify(vm_write_list)"));
|
342
|
+
expect(read_res_params).to eq([vm_write_list[0]])
|
343
|
+
end
|
344
|
+
|
345
|
+
it "updates vm_notify_map when a watch takes place" do
|
346
|
+
ctx = flok_new_user File.read('./spec/kern/assets/vm/controller14.rb'), File.read("./spec/kern/assets/vm/config4.rb")
|
347
|
+
|
348
|
+
ctx.eval %{
|
349
|
+
base = _embed("my_controller", 1, {}, null);
|
350
|
+
|
351
|
+
//Drain queue
|
352
|
+
int_dispatch([]);
|
353
|
+
}
|
354
|
+
|
355
|
+
base = ctx.eval("base")
|
356
|
+
vm_notify_map = JSON.parse(ctx.eval("JSON.stringify(vm_notify_map)"));
|
357
|
+
expect(vm_notify_map).to eq({
|
358
|
+
"spec" => {
|
359
|
+
"test" => [base]
|
360
|
+
}
|
361
|
+
})
|
362
|
+
end
|
363
|
+
|
364
|
+
it "updates vm_bp_to_nmap when a watch takes place" do
|
365
|
+
ctx = flok_new_user File.read('./spec/kern/assets/vm/controller14.rb'), File.read("./spec/kern/assets/vm/config4.rb")
|
366
|
+
|
367
|
+
ctx.eval %{
|
368
|
+
base = _embed("my_controller", 1, {}, null);
|
369
|
+
|
370
|
+
//Drain queue
|
371
|
+
int_dispatch([]);
|
372
|
+
}
|
373
|
+
|
374
|
+
base = ctx.eval("base")
|
375
|
+
vm_bp_to_nmap = JSON.parse(ctx.eval("JSON.stringify(vm_bp_to_nmap)"));
|
376
|
+
expect(vm_bp_to_nmap).to eq({
|
377
|
+
base.to_s => {
|
378
|
+
"spec" => {
|
379
|
+
"test" => [[3], 0]
|
380
|
+
}
|
381
|
+
}
|
382
|
+
})
|
383
|
+
|
384
|
+
#Removing the element from the given pointer in vm_bp_to_nmap to the array will also alter vm_notify_map's array
|
385
|
+
#if it is a reference
|
386
|
+
ctx.eval %{
|
387
|
+
//Grab the array that contains [node, index] where node is a reference to an array of vm_notify_map[ns][key]
|
388
|
+
var e = vm_bp_to_nmap[base]["spec"]["test"];
|
389
|
+
var node = e[0];
|
390
|
+
var index = e[1];
|
391
|
+
|
392
|
+
//Remove an element from the node
|
393
|
+
node.splice(index, 1);
|
394
|
+
}
|
395
|
+
|
396
|
+
vm_notify_map_after = JSON.parse(ctx.eval("JSON.stringify(vm_notify_map)"))
|
397
|
+
expect(vm_notify_map_after).to eq({
|
398
|
+
"spec" => {
|
399
|
+
"test" => []
|
400
|
+
}
|
401
|
+
})
|
402
|
+
end
|
183
403
|
|
184
|
-
|
185
|
-
|
404
|
+
it "updates vm_notify_map when an unwatch takes place" do
|
405
|
+
ctx = flok_new_user File.read('./spec/kern/assets/vm/controller15.rb'), File.read("./spec/kern/assets/vm/config4.rb")
|
186
406
|
|
187
|
-
|
188
|
-
|
189
|
-
|
190
|
-
|
191
|
-
|
192
|
-
|
407
|
+
ctx.eval %{
|
408
|
+
base = _embed("my_controller", 1, {}, null);
|
409
|
+
|
410
|
+
//Drain queue
|
411
|
+
int_dispatch([]);
|
412
|
+
}
|
413
|
+
|
414
|
+
base = ctx.eval("base")
|
415
|
+
vm_notify_map = JSON.parse(ctx.eval("JSON.stringify(vm_notify_map)"));
|
416
|
+
expect(vm_notify_map).to eq({
|
417
|
+
"spec" => {
|
418
|
+
"test" => []
|
419
|
+
}
|
420
|
+
})
|
193
421
|
end
|
194
422
|
|
423
|
+
it "updates vm_bp_to_nmap when an unwatch takes place" do
|
424
|
+
ctx = flok_new_user File.read('./spec/kern/assets/vm/controller15.rb'), File.read("./spec/kern/assets/vm/config4.rb")
|
425
|
+
|
426
|
+
ctx.eval %{
|
427
|
+
base = _embed("my_controller", 1, {}, null);
|
428
|
+
|
429
|
+
//Drain queue
|
430
|
+
int_dispatch([]);
|
431
|
+
}
|
432
|
+
|
433
|
+
base = ctx.eval("base")
|
434
|
+
vm_bp_to_nmap = JSON.parse(ctx.eval("JSON.stringify(vm_bp_to_nmap)"));
|
435
|
+
expect(vm_bp_to_nmap).to eq({
|
436
|
+
base.to_s => {
|
437
|
+
"spec" => {}
|
438
|
+
}
|
439
|
+
})
|
440
|
+
end
|
441
|
+
|
442
|
+
it "Erases entries in vm_bp_to_nmap and vm_notify_map for a controller that disconnects" do
|
443
|
+
ctx = flok_new_user File.read('./spec/kern/assets/vm/controller16.rb'), File.read("./spec/kern/assets/vm/config4.rb")
|
444
|
+
|
445
|
+
ctx.eval %{
|
446
|
+
base = _embed("my_controller", 1, {}, null);
|
447
|
+
|
448
|
+
//Drain queue
|
449
|
+
int_dispatch([3, "int_event", base, "next", {}]);
|
450
|
+
}
|
451
|
+
|
452
|
+
#vm_bp_To_nmap should be blank
|
453
|
+
base = ctx.eval("base")
|
454
|
+
vm_bp_to_nmap = JSON.parse(ctx.eval("JSON.stringify(vm_bp_to_nmap)"));
|
455
|
+
expect(vm_bp_to_nmap).to eq({})
|
456
|
+
|
457
|
+
#vm_notify_map should not contain the entries for the base address anymore
|
458
|
+
base = ctx.eval("base")
|
459
|
+
vm_notify_map = JSON.parse(ctx.eval("JSON.stringify(vm_notify_map)"));
|
460
|
+
expect(vm_notify_map).to eq({
|
461
|
+
"spec" => {
|
462
|
+
"test" => []
|
463
|
+
}
|
464
|
+
})
|
465
|
+
end
|
466
|
+
|
467
|
+
it "Erases entries in vm_bp_to_nmap and vm_notify_map for a controller that disconnects with two controllers maintaining correct" do
|
468
|
+
ctx = flok_new_user File.read('./spec/kern/assets/vm/controller16.rb'), File.read("./spec/kern/assets/vm/config4.rb")
|
469
|
+
|
470
|
+
ctx.eval %{
|
471
|
+
base = _embed("my_controller", 1, {}, null);
|
472
|
+
|
473
|
+
//Drain queue
|
474
|
+
int_dispatch([3, "int_event", base, "next", {}]);
|
475
|
+
}
|
476
|
+
|
477
|
+
#vm_bp_To_nmap should be blank
|
478
|
+
base = ctx.eval("base")
|
479
|
+
vm_bp_to_nmap = JSON.parse(ctx.eval("JSON.stringify(vm_bp_to_nmap)"));
|
480
|
+
expect(vm_bp_to_nmap).to eq({})
|
481
|
+
|
482
|
+
#vm_notify_map should not contain the entries for the base address anymore
|
483
|
+
base = ctx.eval("base")
|
484
|
+
vm_notify_map = JSON.parse(ctx.eval("JSON.stringify(vm_notify_map)"));
|
485
|
+
expect(vm_notify_map).to eq({
|
486
|
+
"spec" => {
|
487
|
+
"test" => []
|
488
|
+
}
|
489
|
+
})
|
490
|
+
end
|
491
|
+
|
492
|
+
it "Stores dirty pages written via vm_cache_write in vm_dirty" do
|
493
|
+
ctx = flok_new_user File.read('./spec/kern/assets/vm/controller0.rb'), File.read("./spec/kern/assets/vm/config3.rb")
|
494
|
+
|
495
|
+
#Run the check
|
496
|
+
res = ctx.eval %{
|
497
|
+
//Manually construct a page
|
498
|
+
page = {
|
499
|
+
_head: "a",
|
500
|
+
_next: "b",
|
501
|
+
_id: "hello",
|
502
|
+
entries: [
|
503
|
+
{_id: "hello2", _sig: "nohteunth"},
|
504
|
+
]
|
505
|
+
}
|
506
|
+
|
507
|
+
vm_rehash_page(page);
|
508
|
+
|
509
|
+
//Save page
|
510
|
+
vm_cache_write("user", page);
|
511
|
+
}
|
512
|
+
|
513
|
+
vm_dirty = JSON.parse(ctx.eval("JSON.stringify(vm_dirty)"))
|
514
|
+
page = JSON.parse(ctx.eval("JSON.stringify(page)"));
|
515
|
+
|
516
|
+
#Expect the same hash
|
517
|
+
expect(vm_dirty).to eq({
|
518
|
+
"user" => {
|
519
|
+
page["_id"] => page
|
520
|
+
}
|
521
|
+
})
|
522
|
+
end
|
523
|
+
|
524
|
+
it "Tries to write to disk when the pageout runs" do
|
525
|
+
ctx = flok_new_user File.read('./spec/kern/assets/vm/controller18.rb'), File.read("./spec/kern/assets/vm/config4.rb")
|
526
|
+
|
527
|
+
ctx.eval %{
|
528
|
+
base = _embed("my_controller", 1, {}, null);
|
529
|
+
|
530
|
+
//Call pageout *now*
|
531
|
+
vm_pageout();
|
532
|
+
|
533
|
+
//Drain queue
|
534
|
+
int_dispatch([]);
|
535
|
+
}
|
536
|
+
|
537
|
+
page = JSON.parse(ctx.eval("JSON.stringify(page)"))
|
538
|
+
|
539
|
+
@driver.ignore_up_to "if_per_set", 0
|
540
|
+
@driver.mexpect("if_per_set", ["spec", page["_id"], page])
|
541
|
+
end
|
542
|
+
|
543
|
+
it "Does send a read request from disk cache when watching a key for the first time" do
|
544
|
+
ctx = flok_new_user File.read('./spec/kern/assets/vm/controller19.rb'), File.read("./spec/kern/assets/vm/config4.rb")
|
545
|
+
|
546
|
+
ctx.eval %{
|
547
|
+
base = _embed("my_controller", 1, {}, null);
|
548
|
+
|
549
|
+
//Call pageout *now*
|
550
|
+
vm_pageout();
|
551
|
+
|
552
|
+
//Drain queue
|
553
|
+
int_dispatch([]);
|
554
|
+
}
|
555
|
+
|
556
|
+
@driver.ignore_up_to "if_per_get", 2
|
557
|
+
@driver.mexpect("if_per_get", ["vm", "spec", "test"], 2)
|
558
|
+
end
|
559
|
+
|
560
|
+
it "Does send a sync read request from disk cache when watching a key for the first time with sync: true" do
|
561
|
+
ctx = flok_new_user File.read('./spec/kern/assets/vm/controller19b.rb'), File.read("./spec/kern/assets/vm/config4.rb")
|
562
|
+
|
563
|
+
ctx.eval %{
|
564
|
+
base = _embed("my_controller", 1, {}, null);
|
565
|
+
|
566
|
+
//Call pageout *now*
|
567
|
+
vm_pageout();
|
568
|
+
|
569
|
+
//Drain queue
|
570
|
+
int_dispatch([]);
|
571
|
+
}
|
572
|
+
|
573
|
+
@driver.ignore_up_to "if_per_get", 0
|
574
|
+
@driver.mexpect("if_per_get", ["vm", "spec", "test"], 0)
|
575
|
+
end
|
576
|
+
|
577
|
+
it "Only sends one disk read request when multiple non-sync watches are attempted" do
|
578
|
+
ctx = flok_new_user File.read('./spec/kern/assets/vm/controller8.rb'), File.read("./spec/kern/assets/vm/config4.rb")
|
579
|
+
|
580
|
+
ctx.eval %{
|
581
|
+
base = _embed("my_controller", 1, {}, null);
|
582
|
+
base2 = _embed("my_controller", base+2, {}, null);
|
583
|
+
|
584
|
+
//Drain queue
|
585
|
+
int_dispatch([]);
|
586
|
+
}
|
587
|
+
|
588
|
+
@driver.ignore_up_to "if_per_get", 2
|
589
|
+
@driver.get "if_per_get", 2
|
590
|
+
|
591
|
+
#There should not be another request for the drive
|
592
|
+
expect {
|
593
|
+
@driver.ignore_up_to "if_per_get"
|
594
|
+
}.to raise_exception
|
595
|
+
end
|
596
|
+
|
597
|
+
it "Only sends one disk read request when multiple watches are attempted, and the first watch is sync: true" do
|
598
|
+
ctx = flok_new_user File.read('./spec/kern/assets/vm/controller8.rb'), File.read("./spec/kern/assets/vm/config4.rb")
|
599
|
+
|
600
|
+
ctx.eval %{
|
601
|
+
base = _embed("my_controller_sync", 1, {}, null);
|
602
|
+
base2 = _embed("my_controller", base+2, {}, null);
|
603
|
+
|
604
|
+
//Drain queue
|
605
|
+
int_dispatch([]);
|
606
|
+
}
|
607
|
+
|
608
|
+
@driver.ignore_up_to "if_per_get", 0
|
609
|
+
@driver.get "if_per_get", 0
|
610
|
+
|
611
|
+
#There should not be another request for the drive
|
612
|
+
expect {
|
613
|
+
@driver.ignore_up_to "if_per_get"
|
614
|
+
}.to raise_exception
|
615
|
+
end
|
616
|
+
|
617
|
+
it "Sends two disk read request when multiple watches are attempted, and the second watch is sync: true but the disk does not read back before it is requested" do
|
618
|
+
ctx = flok_new_user File.read('./spec/kern/assets/vm/controller8.rb'), File.read("./spec/kern/assets/vm/config4.rb")
|
619
|
+
|
620
|
+
ctx.eval %{
|
621
|
+
base = _embed("my_controller", 1, {}, null);
|
622
|
+
base2 = _embed("my_controller_sync", base+2, {}, null);
|
623
|
+
|
624
|
+
//Drain queue
|
625
|
+
int_dispatch([]);
|
626
|
+
}
|
627
|
+
|
628
|
+
#The inner controller's on_entry is called before, so it's in reverse order
|
629
|
+
@driver.ignore_up_to "if_per_get", 0
|
630
|
+
@driver.get "if_per_get", 0
|
631
|
+
@driver.ignore_up_to "if_per_get", 2
|
632
|
+
end
|
633
|
+
|
634
|
+
it "Sends one disk read request when multiple watches are attempted, and the second watch is sync: true and the disk *does* read back before it is requested" do
|
635
|
+
ctx = flok_new_user File.read('./spec/kern/assets/vm/controller8.rb'), File.read("./spec/kern/assets/vm/config4.rb");
|
636
|
+
|
637
|
+
ctx.eval %{
|
638
|
+
base = _embed("my_controller", 1, {}, null);
|
639
|
+
|
640
|
+
//Drain queue
|
641
|
+
int_dispatch([]);
|
642
|
+
}
|
643
|
+
|
644
|
+
page0 = JSON.parse(ctx.eval("JSON.stringify(page0)"))
|
645
|
+
@driver.int "int_per_get_res", ["vm", "spec", page0]
|
646
|
+
|
647
|
+
ctx.eval %{
|
648
|
+
base2 = _embed("my_controller_sync", base+2, {}, null);
|
649
|
+
}
|
650
|
+
|
651
|
+
#The inner controller's on_entry is called before, so it's in reverse order
|
652
|
+
@driver.ignore_up_to "if_per_get", 2
|
653
|
+
@driver.get "if_per_get", 2
|
654
|
+
|
655
|
+
#There should not be another request for the drive
|
656
|
+
expect {
|
657
|
+
@driver.ignore_up_to "if_per_get"
|
658
|
+
}.to raise_exception
|
659
|
+
end
|
660
|
+
|
661
|
+
it "Only sends one disk read request when multiple sync watches are attempted" do
|
662
|
+
ctx = flok_new_user File.read('./spec/kern/assets/vm/controller8.rb'), File.read("./spec/kern/assets/vm/config4.rb")
|
663
|
+
|
664
|
+
ctx.eval %{
|
665
|
+
base = _embed("my_controller_sync", 1, {}, null);
|
666
|
+
|
667
|
+
//Drain queue
|
668
|
+
int_dispatch([]);
|
669
|
+
}
|
670
|
+
|
671
|
+
@driver.ignore_up_to "if_per_get", 0
|
672
|
+
@driver.get "if_per_get", 0
|
673
|
+
|
674
|
+
page = JSON.parse(ctx.eval("JSON.stringify(page)"))
|
675
|
+
@driver.int "int_per_get_res", ["vm", "spec", page]
|
676
|
+
|
677
|
+
ctx.eval %{
|
678
|
+
base2 = _embed("my_controller_sync", base+2, {}, null);
|
679
|
+
}
|
680
|
+
|
681
|
+
#There should not be another request for the drive
|
682
|
+
expect {
|
683
|
+
@driver.ignore_up_to "if_per_get"
|
684
|
+
}.to raise_exception
|
685
|
+
end
|
686
|
+
|
687
|
+
|
688
|
+
it "Clears the dirty page when pageout runs" do
|
689
|
+
ctx = flok_new_user File.read('./spec/kern/assets/vm/controller18.rb'), File.read("./spec/kern/assets/vm/config4.rb")
|
690
|
+
|
691
|
+
ctx.eval %{
|
692
|
+
base = _embed("my_controller", 1, {}, null);
|
693
|
+
|
694
|
+
//Drain queue
|
695
|
+
int_dispatch([3, "int_event", base, "next", {}]);
|
696
|
+
}
|
697
|
+
|
698
|
+
ctx.eval("vm_pageout()");
|
699
|
+
|
700
|
+
vm_dirty = JSON.parse(ctx.eval("JSON.stringify(vm_dirty)"))
|
701
|
+
expect(vm_dirty).to eq({
|
702
|
+
"spec" => {}
|
703
|
+
})
|
704
|
+
end
|
705
|
+
|
706
|
+
it "Responds twice to watch with a missing cache but where the disk has a copy and then the pager responds" do
|
707
|
+
ctx = flok_new_user File.read('./spec/kern/assets/vm/controller20.rb'), File.read("./spec/kern/assets/vm/config4.rb")
|
708
|
+
|
709
|
+
ctx.eval %{
|
710
|
+
base = _embed("my_controller", 1, {}, null);
|
711
|
+
|
712
|
+
//Manually construct a page
|
713
|
+
page = {
|
714
|
+
_head: null,
|
715
|
+
_next: null,
|
716
|
+
_id: "hello",
|
717
|
+
entries: [
|
718
|
+
{_id: "hello2", _sig: "nohteunth"},
|
719
|
+
]
|
720
|
+
}
|
721
|
+
|
722
|
+
//Manually construct another page that would normally be written
|
723
|
+
//by a 'pager' to the cache
|
724
|
+
page2 = {
|
725
|
+
_head: null,
|
726
|
+
_next: null,
|
727
|
+
_id: "hello",
|
728
|
+
entries: [
|
729
|
+
{_id: "hello2", _sig: "nohteunth"},
|
730
|
+
{_id: "hello3", _sig: "athoeuntz"}
|
731
|
+
]
|
732
|
+
}
|
733
|
+
|
734
|
+
//Recalculate hashes
|
735
|
+
vm_rehash_page(page);
|
736
|
+
vm_rehash_page(page2);
|
737
|
+
|
738
|
+
//Drain queue
|
739
|
+
int_dispatch([]);
|
740
|
+
}
|
741
|
+
|
742
|
+
#Copies of JS pages in ruby dictionary format
|
743
|
+
page = JSON.parse(ctx.eval("JSON.stringify(page)"))
|
744
|
+
page2 = JSON.parse(ctx.eval("JSON.stringify(page2)"))
|
745
|
+
|
746
|
+
#At this point, flok should have attempted to grab a page to fill
|
747
|
+
#the *now* blank cache. We are going to send it the first page.
|
748
|
+
@driver.ignore_up_to "if_per_get", 2
|
749
|
+
@driver.get "if_per_get", 2
|
750
|
+
@driver.int "int_per_get_res", ["vm", "spec", page]
|
751
|
+
|
752
|
+
#Now, we pretend that a pager has written to the cache because it has
|
753
|
+
#received data back
|
754
|
+
ctx.eval(%{vm_cache_write("spec", page2)})
|
755
|
+
|
756
|
+
res = JSON.parse(ctx.eval("JSON.stringify(read_res)"))
|
757
|
+
expect(res).to eq([
|
758
|
+
page, page2
|
759
|
+
])
|
760
|
+
end
|
761
|
+
|
762
|
+
it "Responds once to watch with a missing cache but where the pager responds before the disk" do
|
763
|
+
ctx = flok_new_user File.read('./spec/kern/assets/vm/controller20.rb'), File.read("./spec/kern/assets/vm/config4.rb")
|
764
|
+
|
765
|
+
ctx.eval %{
|
766
|
+
base = _embed("my_controller", 1, {}, null);
|
767
|
+
|
768
|
+
//Manually construct a page
|
769
|
+
page = {
|
770
|
+
_head: null,
|
771
|
+
_next: null,
|
772
|
+
_id: "hello",
|
773
|
+
entries: [
|
774
|
+
{_id: "hello2", _sig: "nohteunth"},
|
775
|
+
]
|
776
|
+
}
|
777
|
+
|
778
|
+
//Manually construct another page that would normally be written
|
779
|
+
//by a 'pager' to the cache
|
780
|
+
page2 = {
|
781
|
+
_head: null,
|
782
|
+
_next: null,
|
783
|
+
_id: "hello",
|
784
|
+
entries: [
|
785
|
+
{_id: "hello2", _sig: "nohteunth"},
|
786
|
+
{_id: "hello3", _sig: "athoeuntz"}
|
787
|
+
]
|
788
|
+
}
|
789
|
+
|
790
|
+
//Recalculate hashes
|
791
|
+
vm_rehash_page(page);
|
792
|
+
vm_rehash_page(page2);
|
793
|
+
|
794
|
+
//Drain queue
|
795
|
+
int_dispatch([]);
|
796
|
+
}
|
797
|
+
|
798
|
+
#Copies of JS pages in ruby dictionary format
|
799
|
+
page = JSON.parse(ctx.eval("JSON.stringify(page)"))
|
800
|
+
page2 = JSON.parse(ctx.eval("JSON.stringify(page2)"))
|
801
|
+
|
802
|
+
#At this point, flok should have attempted to grab a page to fill
|
803
|
+
#the *now* blank cache. We are going to send it the first page.
|
804
|
+
@driver.ignore_up_to "if_per_get", 2
|
805
|
+
@driver.get "if_per_get", 2
|
806
|
+
|
807
|
+
#Now, we pretend that a pager has written to the cache because it has
|
808
|
+
#received data back
|
809
|
+
ctx.eval(%{vm_cache_write("spec", page2)})
|
810
|
+
|
811
|
+
#And then we let the cache from disk reply, which should be ignored
|
812
|
+
#because the cache is already there from the pager
|
813
|
+
@driver.int "int_per_get_res", ["vm", "spec", page]
|
814
|
+
|
815
|
+
res = JSON.parse(ctx.eval("JSON.stringify(read_res)"))
|
816
|
+
expect(res).to eq([
|
817
|
+
page2
|
818
|
+
])
|
819
|
+
end
|
820
|
+
|
821
|
+
it "Does within 21 seconds of a write on bootup, write to disk" do
|
822
|
+
ctx = flok_new_user File.read('./spec/kern/assets/vm/controller18.rb'), File.read("./spec/kern/assets/vm/config4.rb")
|
823
|
+
|
824
|
+
ctx.eval %{
|
825
|
+
base = _embed("my_controller", 1, {}, null);
|
826
|
+
|
827
|
+
//Drain queue
|
828
|
+
int_dispatch([]);
|
829
|
+
}
|
830
|
+
|
831
|
+
page = JSON.parse(ctx.eval("JSON.stringify(page)"))
|
832
|
+
|
833
|
+
(4*21).times do
|
834
|
+
@driver.int "int_timer", []
|
835
|
+
end
|
836
|
+
|
837
|
+
@driver.ignore_up_to "if_per_set", 0
|
838
|
+
@driver.mexpect("if_per_set", ["spec", page["_id"], page])
|
839
|
+
end
|
840
|
+
|
841
|
+
it "Does not attempt to write twice to disk after 41 seconds if there is no pending data to write" do
|
842
|
+
ctx = flok_new_user File.read('./spec/kern/assets/vm/controller21.rb'), File.read("./spec/kern/assets/vm/config4.rb")
|
843
|
+
|
844
|
+
ctx.eval %{
|
845
|
+
base = _embed("my_controller", 1, {}, null);
|
846
|
+
|
847
|
+
//Drain queue
|
848
|
+
int_dispatch([]);
|
849
|
+
}
|
850
|
+
|
851
|
+
page = JSON.parse(ctx.eval("JSON.stringify(page)"))
|
852
|
+
|
853
|
+
(4*41).times do
|
854
|
+
@driver.int "int_timer", []
|
855
|
+
end
|
856
|
+
|
857
|
+
@driver.ignore_up_to "if_per_set", 0
|
858
|
+
@driver.mexpect("if_per_set", ["spec", page["_id"], page])
|
859
|
+
|
860
|
+
expect {
|
861
|
+
@driver.ignore_up_to "if_per_set"
|
862
|
+
}.to raise_exception
|
863
|
+
end
|
864
|
+
|
865
|
+
it "Does attempt to write twice to disk after 41 seconds if there is pending data to write" do
|
866
|
+
ctx = flok_new_user File.read('./spec/kern/assets/vm/controller21.rb'), File.read("./spec/kern/assets/vm/config4.rb")
|
867
|
+
|
868
|
+
ctx.eval %{
|
869
|
+
base = _embed("my_controller", 1, {}, null);
|
870
|
+
|
871
|
+
//Drain queue
|
872
|
+
int_dispatch([]);
|
873
|
+
}
|
874
|
+
|
875
|
+
base = ctx.eval("base")
|
876
|
+
page = JSON.parse(ctx.eval("JSON.stringify(page)"))
|
877
|
+
|
878
|
+
(4*21).times do
|
879
|
+
@driver.int "int_timer", []
|
880
|
+
end
|
881
|
+
|
882
|
+
@driver.ignore_up_to "if_per_set", 0
|
883
|
+
@driver.mexpect("if_per_set", ["spec", page["_id"], page])
|
884
|
+
|
885
|
+
#Call next on controller which will write an new page
|
886
|
+
ctx.eval %{ int_dispatch([3, "int_event", base, "next", {}]); }
|
887
|
+
|
888
|
+
page2 = JSON.parse(ctx.eval("JSON.stringify(page2)"))
|
889
|
+
(4*21).times do
|
890
|
+
@driver.int "int_timer", []
|
891
|
+
end
|
892
|
+
|
893
|
+
@driver.ignore_up_to "if_per_set", 0
|
894
|
+
@driver.mexpect("if_per_set", ["spec", page2["_id"], page2])
|
895
|
+
end
|
195
896
|
end
|