flok 0.0.40 → 0.0.41

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (101) hide show
  1. checksums.yaml +4 -4
  2. data/app/drivers/chrome/src/dispatch.js +7 -2
  3. data/app/kern/dispatch.js +10 -1
  4. data/app/kern/mod/event.js +9 -0
  5. data/app/kern/pagers/pg_mem.js +21 -0
  6. data/app/kern/pagers/pg_net_sim.js +44 -0
  7. data/app/kern/pagers/pg_spec.js +23 -0
  8. data/app/kern/services/vm.rb +60 -64
  9. data/bin/flok +44 -23
  10. data/docs/callout.md +1 -1
  11. data/docs/client_api.md +5 -2
  12. data/docs/config_yml.md +41 -0
  13. data/docs/controllers.md +4 -0
  14. data/docs/datatypes.md +5 -2
  15. data/docs/debug_server.md +2 -0
  16. data/docs/dispatch.md +8 -3
  17. data/docs/known_issues.md +6 -0
  18. data/docs/mod/event.md +25 -20
  19. data/docs/mod/persist.md +1 -1
  20. data/docs/mod/speech.md +12 -0
  21. data/docs/project.md +2 -2
  22. data/docs/services/vm.md +46 -17
  23. data/docs/services/vm/pagers.md +22 -2
  24. data/lib/flok/build.rb +0 -4
  25. data/lib/flok/user_compiler.rb +123 -47
  26. data/lib/flok/user_compiler_templates/ctable.js.erb +39 -1
  27. data/lib/flok/version.rb +1 -1
  28. data/spec/env/global.rb +1 -0
  29. data/spec/env/kern.rb +5 -1
  30. data/spec/etc/cli_spec.rb +337 -322
  31. data/spec/etc/service_compiler/config0.rb +1 -1
  32. data/spec/etc/service_compiler/config0b.rb +1 -0
  33. data/spec/etc/services_compiler_spec.rb +29 -29
  34. data/spec/etc/user_compiler/controller0b.rb +9 -0
  35. data/spec/etc/user_compiler/controller0timer.rb +16 -0
  36. data/spec/etc/user_compiler_spec.rb +24 -1
  37. data/spec/iface/driver/dispatch_spec.rb +8 -2
  38. data/spec/iface/driver/persist_spec.rb +11 -0
  39. data/spec/kern/assets/controller0defer.rb +18 -0
  40. data/spec/kern/assets/controller0defer0.rb +13 -0
  41. data/spec/kern/assets/controller0defer2.rb +17 -0
  42. data/spec/kern/assets/global_on_entry.rb +8 -0
  43. data/spec/kern/assets/global_on_entry2.rb +16 -0
  44. data/spec/kern/assets/global_on_entry3.rb +17 -0
  45. data/spec/kern/assets/global_on_entry4.rb +12 -0
  46. data/spec/kern/assets/interval.rb +29 -0
  47. data/spec/kern/assets/interval2.rb +33 -0
  48. data/spec/kern/assets/interval3.rb +39 -0
  49. data/spec/kern/assets/service1.rb +4 -1
  50. data/spec/kern/assets/service_controller1.rb +11 -0
  51. data/spec/kern/assets/specimin/controller0.rb +74 -0
  52. data/spec/kern/assets/vm/config5.rb +20 -0
  53. data/spec/kern/assets/vm/config6.rb +20 -0
  54. data/spec/kern/assets/vm/controller10.rb +1 -1
  55. data/spec/kern/assets/vm/controller11.rb +1 -1
  56. data/spec/kern/assets/vm/controller12.rb +1 -1
  57. data/spec/kern/assets/vm/controller13.rb +1 -1
  58. data/spec/kern/assets/vm/controller16b.rb +28 -0
  59. data/spec/kern/assets/vm/controller18.rb +1 -1
  60. data/spec/kern/assets/vm/controller21.rb +1 -1
  61. data/spec/kern/assets/vm/controller22.rb +8 -0
  62. data/spec/kern/assets/vm/controller_exc_ewatch.rb +1 -0
  63. data/spec/kern/assets/vm/controller_exc_ewatch2.rb +30 -0
  64. data/spec/kern/assets/vm/controller_exc_ewatch3.rb +16 -0
  65. data/spec/kern/assets/vm/controller_exc_ewatch4.rb +16 -0
  66. data/spec/kern/assets/vm/macros/copy_page_c.rb +1 -0
  67. data/spec/kern/assets/vm/macros/copy_page_ch.rb +25 -0
  68. data/spec/kern/assets/vm/macros/entry_del_c.rb +1 -0
  69. data/spec/kern/assets/vm/macros/entry_del_ch.rb +20 -0
  70. data/spec/kern/assets/vm/macros/entry_insert_c.rb +1 -0
  71. data/spec/kern/assets/vm/macros/entry_insert_ch.rb +23 -0
  72. data/spec/kern/assets/vm/macros/entry_mutable_c.rb +8 -7
  73. data/spec/kern/assets/vm/macros/entry_mutable_ch.rb +34 -0
  74. data/spec/kern/assets/vm/macros/new_page_c.rb +1 -1
  75. data/spec/kern/assets/vm/macros/new_page_c2.rb +1 -1
  76. data/spec/kern/assets/vm/macros/new_page_ch.rb +7 -0
  77. data/spec/kern/assets/vm/pg_mem/config.rb +10 -0
  78. data/spec/kern/assets/vm/pg_mem/config1.rb +10 -0
  79. data/spec/kern/assets/vm/pg_mem/config2.rb +10 -0
  80. data/spec/kern/assets/vm/pg_mem/config3.rb +15 -0
  81. data/spec/kern/assets/vm/pg_mem/write.rb +23 -0
  82. data/spec/kern/assets/vm/pg_mem/write2.rb +38 -0
  83. data/spec/kern/assets/vm/pg_net_sim/config.rb +10 -0
  84. data/spec/kern/assets/vm/pg_net_sim/nothing.rb +12 -0
  85. data/spec/kern/assets/vm/pg_net_sim/pages.json +1 -0
  86. data/spec/kern/assets/vm/pg_net_sim/watch.rb +18 -0
  87. data/spec/kern/callout_spec.rb +1 -1
  88. data/spec/kern/controller_macro_spec.rb +153 -20
  89. data/spec/kern/controller_spec.rb +232 -1
  90. data/spec/kern/debug_ui_spec.rb +235 -235
  91. data/spec/kern/event_spec.rb +112 -0
  92. data/spec/kern/service_controller_spec.rb +14 -2
  93. data/spec/kern/vm_service_mem_pagers_spec.rb +117 -0
  94. data/spec/kern/vm_service_net_sim_pager_spec.rb +97 -0
  95. data/spec/kern/vm_service_spec.rb +304 -17
  96. data/spec/kern/vm_service_spec2.rb +39 -0
  97. metadata +88 -6
  98. data/app/kern/pagers/mem_pager.js +0 -2
  99. data/app/kern/pagers/pg_spec0.js +0 -20
  100. data/lib/flok/project_template/Guardfile +0 -7
  101. data/lib/flok/project_template/config/config.yml +0 -1
checksums.yaml CHANGED
@@ -1,7 +1,7 @@
1
1
  ---
2
2
  SHA1:
3
- metadata.gz: cc78ec824536368816557ea2429cc882fa3e6ad2
4
- data.tar.gz: 80886a45fa70d64ab8d1d9d1806452636215a834
3
+ metadata.gz: d5f0a382decc21b5e55f99815a7b23ecf1f53009
4
+ data.tar.gz: b5f74e961712e188e97fc94da0b51fbac5e664b2
5
5
  SHA512:
6
- metadata.gz: 0c42dd2f5b49bad2ce1dce1fb1b15f5ace5d20936d01df4f4efb5658c5338332c39a81363091cba50d5d2fb5d1a9a41c15c79198d9f87493e1c93c8b4d60331c
7
- data.tar.gz: cc7ac536f24ce5f3b0501cc1523148c36bc916e4bb8694d1862b24d2ff5b955434c4bf63f24e1b82fc37cb10b5ab671d18e921c0271c268a4fb921b5fb5bfe0e
6
+ metadata.gz: 56c35e867d9766f03239f2ce0343fac6ba34f4af931b6b30e1b69ce3bd5c13e2d3dd8b9a11e9e6d16dbf87247a1757f68d7eedd26fe20da14297a558cd95394c
7
+ data.tar.gz: 48bb0f9c0b18b43582b0960bcd661f233fbe3d471025b41402a32dc32a343a40a23e108741f5fcb9b076e74555c572979828f88cdc1bf3f4cc8d23c254a6d68c
@@ -63,10 +63,15 @@ function if_dispatch(qq) {
63
63
  }
64
64
  <% end %>
65
65
 
66
-
66
+ //If the array was marked incomplete, we need to grab
67
+ //more info so we send a blank array for a request
67
68
  if (if_dispatch_call_int_end) {
68
69
  if_dispatch_call_int_end = false;
69
- int_dispatch([])
70
+
71
+ var call = function() {
72
+ int_dispatch([]);
73
+ };
74
+ setTimeout(call, 0);
70
75
  }
71
76
  }
72
77
 
@@ -9,6 +9,15 @@
9
9
  //Here is an example with two successive calls
10
10
  // [2, 'mul', 3, 4, 1, 'print', 'hello world']
11
11
  function int_dispatch(q) {
12
+ //If there are things on the defer queue, then grab
13
+ //one of them for now and do it
14
+ if (edefer_q.length > 0) {
15
+ var ep = edefer_q.shift();
16
+ var ename = edefer_q.shift();
17
+ var info = edefer_q.shift();
18
+ int_event(ep, ename, info);
19
+ }
20
+
12
21
  //Where there is still things left on the queue
13
22
  while (q.length > 0) {
14
23
  //Grab the first thing off the queue, this is the arg count
@@ -96,7 +105,7 @@ function int_dispatch(q) {
96
105
  dump.push(out);
97
106
  }
98
107
 
99
- if (incomplete) { dump.unshift("i"); }
108
+ if (incomplete || edefer_q.length > 0) { dump.unshift("i"); }
100
109
 
101
110
  if (dump.length != 0) {
102
111
  if_dispatch(dump);
@@ -1,6 +1,9 @@
1
1
  //Event handler table
2
2
  evt = {};
3
3
 
4
+ //Event defer queue
5
+ edefer_q = [];
6
+
4
7
  function int_event(ep, event_name, info) {
5
8
  var f = evt[ep];
6
9
  if (f != undefined) {
@@ -19,6 +22,12 @@ function dereg_evt(ep) {
19
22
  delete evt[ep];
20
23
  }
21
24
 
25
+ function int_event_defer(ep, ename, info) {
26
+ edefer_q.push(ep);
27
+ edefer_q.push(ename);
28
+ edefer_q.push(info);
29
+ }
30
+
22
31
  //Spec helpers
23
32
  ////////////////////////////////////////////////////////////////
24
33
  function spec_event_handler(ep, event_name, info) {
@@ -0,0 +1,21 @@
1
+ //Configure pg_mem0, pg_mem1, pg_mem2
2
+ <% [0, 1, 2].each do |i| %>
3
+ function pg_mem<%= i %>_init(ns, options) {
4
+ pg_mem<%= i %>_init_params = {ns: ns, options: options};
5
+ pg_mem<%= i %>_ns = ns;
6
+
7
+ <% if @debug %>
8
+ pg_mem<%= i %>_spec_did_init = true;
9
+ <% end %>
10
+ }
11
+
12
+ function pg_mem<%= i %>_watch(id, page) {
13
+ }
14
+
15
+ function pg_mem<%= i %>_unwatch(id) {
16
+ }
17
+
18
+ function pg_mem<%= i %>_write(page) {
19
+ vm_cache_write(pg_mem<%= i %>_ns, page);
20
+ }
21
+ <% end %>
@@ -0,0 +1,44 @@
1
+ <% if @debug %>
2
+ function pg_net_sim_init(ns, options) {
3
+ pg_net_sim_spec_did_init = true;
4
+ pg_net_sim_ns = ns;
5
+
6
+ //Set timer to tick every 2 seconds
7
+ reg_evt(-9393, pg_net_sim_tick_handler);
8
+ reg_interval(-9393, "tick", 4*2);
9
+
10
+ pg_net_sim_waiting_for_response = [];
11
+ }
12
+
13
+ function pg_net_sim_tick_handler(ep, ename, info) {
14
+ while (pg_net_sim_waiting_for_response.length > 0) {
15
+ var e = pg_net_sim_waiting_for_response.shift();
16
+ vm_cache_write(pg_net_sim_ns, pg_net_sim_stored_pages[e.id]);
17
+ }
18
+ }
19
+
20
+ function pg_net_sim_watch(id, page) {
21
+ if (pg_net_sim_stored_pages[id] === undefined) {
22
+ throw "Could not get page with id: " + id;
23
+ }
24
+
25
+ pg_net_sim_waiting_for_response.push({id: id});
26
+ }
27
+
28
+ function pg_net_sim_unwatch(id) {
29
+ }
30
+
31
+ function pg_net_sim_write(page) {
32
+ vm_cache_write(pg_net_sim_ns, page);
33
+ }
34
+
35
+ //Special support function to simulate pages stored
36
+ //on a server
37
+ pg_net_sim_stored_pages = {};
38
+ function pg_net_sim_load_pages(pages) {
39
+ for (var i = 0; i < pages.length; ++i) {
40
+ var page = pages[i];
41
+ pg_net_sim_stored_pages[page._id] = page;
42
+ }
43
+ }
44
+ <% end %>
@@ -0,0 +1,23 @@
1
+ //Configure pg_spec0 and pg_spec1
2
+ <% if @debug %>
3
+ <% [0, 1].each do |i| %>
4
+ function pg_spec<%= i %>_init(ns, options) {
5
+ pg_spec<%= i %>_watchlist = [];
6
+ pg_spec<%= i %>_unwatchlist = [];
7
+ pg_spec<%= i %>_init_params = {ns: ns, options: options};
8
+ pg_spec<%= i %>_ns = ns;
9
+ }
10
+
11
+ function pg_spec<%= i %>_watch(id, page) {
12
+ pg_spec<%= i %>_watchlist.push({id: id, page: page});
13
+ }
14
+
15
+ function pg_spec<%= i %>_unwatch(id) {
16
+ pg_spec<%= i %>_unwatchlist.push(id);
17
+ }
18
+
19
+ function pg_spec<%= i %>_write(page) {
20
+ vm_cache_write(pg_spec<%= i %>_ns, page);
21
+ }
22
+ <% end %>
23
+ <% end %>
@@ -16,7 +16,11 @@ service :vm do
16
16
  vm_bp_to_nmap = {};
17
17
 
18
18
  //Notification listeners, converts ns+key to an array of base pointers
19
- vm_notify_map = {};
19
+ vm_notify_map = {
20
+ <% @options[:pagers].each do |p| %>
21
+ <%= p[:namespace] %>: {},
22
+ <% end %>
23
+ };
20
24
 
21
25
  //Cache
22
26
  function vm_cache_write(ns, page) {
@@ -26,14 +30,11 @@ service :vm do
26
30
  vm_dirty[ns][page._id] = page;
27
31
  vm_cache[ns][page._id] = page;
28
32
 
29
- var a = vm_notify_map[ns];
30
- if (a) {
31
- var b = a[page._id];
32
-
33
- if (b) {
34
- for (var i = 0; i < b.length; ++i) {
35
- int_event(b, "read_res", page);
36
- }
33
+ //Try to lookup view controller(s) to notify
34
+ var nbp = vm_notify_map[ns][page._id];
35
+ if (nbp) {
36
+ for (var i = 0; i < nbp.length; ++i) {
37
+ int_event_defer(nbp[i], "read_res", page);
37
38
  }
38
39
  }
39
40
  }
@@ -47,11 +48,27 @@ service :vm do
47
48
 
48
49
  z = crc32(z, page._id)
49
50
 
50
- var e = page.entries;
51
- for (var i = 0; i < e.length; ++i) {
52
- z = crc32(z, e[i]._sig);
51
+ //Hash differently based on type
52
+ if (page._type === "array") {
53
+ var e = page.entries;
54
+ for (var i = 0; i < e.length; ++i) {
55
+ z = crc32(z, e[i]._sig);
56
+ }
57
+ } else if (page._type === "hash") {
58
+ var keys = Object.keys(page.entries);
59
+ var e = page.entries;
60
+ var q = 0;
61
+ for (var i = 0; i < keys.length; ++i) {
62
+ var _sig = e[keys[i]]._sig;
63
+ var r = crc32(0, _sig);
64
+ q = q + r;
65
+ }
66
+ q = +q;
67
+ z = crc32(z, q.toString());
68
+ } <% if @debug %> else {
69
+ throw "vm_rehash_page got an unspported type: "+page._type;
53
70
  }
54
-
71
+ <% end %>
55
72
  page._hash = z.toString();
56
73
  }
57
74
 
@@ -67,10 +84,9 @@ service :vm do
67
84
  var p = id_to_page[ids[i]];
68
85
  SEND("disk", "if_per_set", "<%= p[:namespace] %>", ids[i], p);
69
86
  }
70
- <% end %>
71
87
  }
88
+ <% end %>
72
89
 
73
- //Clear dirty list
74
90
  vm_dirty = {
75
91
  <% @options[:pagers].each do |p| %>
76
92
  <%= p[:namespace] %>: {},
@@ -81,6 +97,9 @@ service :vm do
81
97
  //Part of the persist module
82
98
  //res is page
83
99
  function int_per_get_res(s, ns, res) {
100
+ //If the key didn't exist, ignore it
101
+ if (res === null) { return; }
102
+
84
103
  //If there is already a cached entry, a pager beat us to it
85
104
  //ignore this for now because the pager should be more up to
86
105
  //date
@@ -112,6 +131,7 @@ service :vm do
112
131
  }
113
132
 
114
133
  on_connect %{
134
+ vm_bp_to_nmap[bp] = {};
115
135
  }
116
136
 
117
137
  on_disconnect %{
@@ -146,25 +166,6 @@ service :vm do
146
166
  delete vm_bp_to_nmap[bp];
147
167
  }
148
168
 
149
- on "read_sync", %{
150
- <% raise "No pagers given in options for vm" unless @options[:pagers] %>
151
-
152
- var cres = vm_cache[params.ns][params.key];
153
- if (cres != undefined) {
154
- int_event(bp, "read_res", {key: params.key, value: cres});
155
- return;
156
- }
157
-
158
- <% @options[:pagers].each do |p| %>
159
- if (params.ns === "<%= p[:namespace] %>") {
160
- var res = <%= p[:name] %>_read_sync(params.ns, bp, params.key);
161
- }
162
- <% end %>
163
- vm_read_sync_called = true;
164
-
165
- int_event(bp, "read_sync_res", res);
166
- }
167
-
168
169
  on "write", %{
169
170
  <% raise "No pagers given in options for vm" unless @options[:pagers] %>
170
171
 
@@ -190,31 +191,24 @@ service :vm do
190
191
 
191
192
  //Ensure map exists
192
193
  ////////////////////////////////////////////////
193
- var a = vm_notify_map[params.ns];
194
- if (!a) {
195
- a = {};
196
- vm_notify_map[params.ns] = a;
197
- }
198
-
199
- var b = a[params.id];
194
+ var b = vm_notify_map[params.ns][params.id];
200
195
  if (!b) {
201
196
  b = [];
202
- a[params.id] = b;
197
+ vm_notify_map[params.ns][params.id] = b;
198
+ }
199
+
200
+ //Check if it exists, if it's already being watched, ignore it
201
+ var midx = vm_notify_map[params.ns][params.id].indexOf(bp)
202
+ if (midx != -1) {
203
+ return;
203
204
  }
204
205
 
205
- <% if @debug %>
206
- var midx = vm_notify_map[params.ns][params.id].indexOf(bp)
207
- if (midx != -1) {
208
- throw "Multiple calls to watch for the ns: " + params.ns + " and id: " + params.id
209
- }
210
- <% end %>
211
206
  b.push(bp)
212
207
  ////////////////////////////////////////////////
213
208
 
214
209
  //Add to vm_bp_to_nmap
215
210
  ////////////////////////////////////////////////
216
211
  //Construct
217
- if (vm_bp_to_nmap[bp] === undefined) { vm_bp_to_nmap[bp] = {}; }
218
212
  if (vm_bp_to_nmap[bp][params.ns] === undefined) { vm_bp_to_nmap[bp][params.ns] = {}; }
219
213
 
220
214
  //Add reverse mapping, length-1 because it was just pushed
@@ -222,7 +216,12 @@ service :vm do
222
216
 
223
217
  //If cache exists, then signal controller *now* while we wait for the pager
224
218
  if (cache_entry) {
225
- int_event(bp, "read_res", cache_entry);
219
+ //If sync flag is set, then send the data *now*
220
+ if (params.sync) {
221
+ int_event(bp, "read_res", cache_entry);
222
+ } else {
223
+ int_event_defer(bp, "read_res", cache_entry);
224
+ }
226
225
  }
227
226
 
228
227
  //Send a request now for disk read for sync
@@ -251,25 +250,22 @@ service :vm do
251
250
  <% end %>
252
251
  }
253
252
 
254
- on "unwatch", %{
255
- <% raise "No pagers given in options for vm" unless @options[:pagers] %>
256
-
257
- var midx = vm_notify_map[params.ns][params.id].indexOf(bp)
258
- vm_notify_map[params.ns][params.id].splice(midx, 1);
259
-
260
- delete vm_bp_to_nmap[bp][params.ns][params.id];
261
-
262
- <% @options[:pagers].each do |p| %>
263
- if (params.ns === "<%= p[:namespace] %>") {
264
- <%= p[:name] %>_unwatch(params.id);
265
- }
266
- <% end %>
267
- }
268
253
 
269
254
  on "unwatch", %{
270
255
  <% raise "No pagers given in options for vm" unless @options[:pagers] %>
271
256
 
257
+ //It won't have an array if it was never watched
258
+ if (vm_notify_map[params.ns][params.id] === undefined) {
259
+ return;
260
+ }
261
+
262
+ //Get the position of bp in the watch array, this may not exist, in which case
263
+ //this controller is not actually watching it.
272
264
  var midx = vm_notify_map[params.ns][params.id].indexOf(bp)
265
+ if (midx === -1) {
266
+ return;
267
+ }
268
+
273
269
  vm_notify_map[params.ns][params.id].splice(midx, 1);
274
270
 
275
271
  delete vm_bp_to_nmap[bp][params.ns][params.id];
data/bin/flok CHANGED
@@ -1,6 +1,6 @@
1
1
  #!/usr/bin/env ruby
2
2
 
3
- require 'flok'
3
+ require_relative '../lib/flok'
4
4
  require 'thor'
5
5
  require 'fileutils'
6
6
  require 'webrick'
@@ -45,7 +45,8 @@ class FlokCLI < Thor
45
45
 
46
46
  #3. Build the client's ./app/controllers/*.rb into './products/$PLATFORM/user_compiler.js'
47
47
  controller_glob_path = "#{local_products_path}/#{platform}/glob/controllers.rb"
48
- Flok.src_glob("rb", './app/controllers', controller_glob_path)
48
+ Flok.src_glob_r("rb", './app/controllers', controller_glob_path)
49
+
49
50
  user_compiler_js = Flok::UserCompiler.compile File.read(controller_glob_path)
50
51
  services_config_js = File.read "./config/services.rb"
51
52
  Flok.src_glob("rb", './app/services', File.join(local_products_path, platform, "./services/user_services.rb"))
@@ -95,29 +96,49 @@ class FlokCLI < Thor
95
96
  #Ensure puts does something because it's on another thread
96
97
  $stdout.sync = true
97
98
 
98
- ###########################################################################
99
- #We execute two tasks that run at the same time, (1) the webrick hoster
100
- #and (2) the guard reloader that triggers build
101
- ###########################################################################
102
- #1) Launch webrick server
103
- server = WEBrick::HTTPServer.new :Port => 9992, :DocumentRoot => "./products/#{platform}", :StartCallback => Proc.new {
104
- }
105
- server_started = false
106
-
107
- #2) Wait for initial build to launch (all_on_start)
108
- sh2("guard") do |inp, out|
109
- loop do
110
- res = out.readline
111
- if res =~ /BUILD RAN/
112
- #Start server
113
- Thread.new { server.start } unless server_started
114
- server_started = true
115
-
116
- puts "BUILD RAN"
117
- else
118
- $stderr.puts res
99
+ begin
100
+ @pid = fork do
101
+ server = WEBrick::HTTPServer.new :Port => 9992, :DocumentRoot => ".", :StartCallback => Proc.new {
102
+ puts "SERVER STARTED"
103
+ }
104
+
105
+ @build_q = Queue.new
106
+ @build_q.push 0
107
+ server.mount_proc "/" do |req, res|
108
+ #Semaphore for build; multiple requests execute 1 build
109
+ unless @build_q.empty?
110
+ @build_q.pop
111
+
112
+ _res = system("#{__FILE__} build")
113
+
114
+ @build_q.push 0
115
+ else
116
+ @build_q.pop
117
+ @build_q.push 0
118
+ _res = true
119
+ end
120
+
121
+ $stderr.puts "res = #{_res}"
122
+
123
+ #/products/application_user.js, etc. local path with dot
124
+ path = "./products/#{platform}/#{req.path}"
125
+ if _res == true
126
+ res.body = File.read(path)
127
+ else
128
+ res.body = ""
129
+ end
130
+ res.header["Access-Control-Allow-Origin"] = "*"
131
+ #res.header["Content-Type"] = "json/text"
119
132
  end
133
+
134
+ server.start
135
+ end
136
+
137
+ loop do
138
+ sleep 1
120
139
  end
140
+ ensure
141
+ Process.kill :KILL, @pid
121
142
  end
122
143
  end
123
144