flok 0.0.40 → 0.0.41
Sign up to get free protection for your applications and to get access to all the features.
- checksums.yaml +4 -4
- data/app/drivers/chrome/src/dispatch.js +7 -2
- data/app/kern/dispatch.js +10 -1
- data/app/kern/mod/event.js +9 -0
- data/app/kern/pagers/pg_mem.js +21 -0
- data/app/kern/pagers/pg_net_sim.js +44 -0
- data/app/kern/pagers/pg_spec.js +23 -0
- data/app/kern/services/vm.rb +60 -64
- data/bin/flok +44 -23
- data/docs/callout.md +1 -1
- data/docs/client_api.md +5 -2
- data/docs/config_yml.md +41 -0
- data/docs/controllers.md +4 -0
- data/docs/datatypes.md +5 -2
- data/docs/debug_server.md +2 -0
- data/docs/dispatch.md +8 -3
- data/docs/known_issues.md +6 -0
- data/docs/mod/event.md +25 -20
- data/docs/mod/persist.md +1 -1
- data/docs/mod/speech.md +12 -0
- data/docs/project.md +2 -2
- data/docs/services/vm.md +46 -17
- data/docs/services/vm/pagers.md +22 -2
- data/lib/flok/build.rb +0 -4
- data/lib/flok/user_compiler.rb +123 -47
- data/lib/flok/user_compiler_templates/ctable.js.erb +39 -1
- data/lib/flok/version.rb +1 -1
- data/spec/env/global.rb +1 -0
- data/spec/env/kern.rb +5 -1
- data/spec/etc/cli_spec.rb +337 -322
- data/spec/etc/service_compiler/config0.rb +1 -1
- data/spec/etc/service_compiler/config0b.rb +1 -0
- data/spec/etc/services_compiler_spec.rb +29 -29
- data/spec/etc/user_compiler/controller0b.rb +9 -0
- data/spec/etc/user_compiler/controller0timer.rb +16 -0
- data/spec/etc/user_compiler_spec.rb +24 -1
- data/spec/iface/driver/dispatch_spec.rb +8 -2
- data/spec/iface/driver/persist_spec.rb +11 -0
- data/spec/kern/assets/controller0defer.rb +18 -0
- data/spec/kern/assets/controller0defer0.rb +13 -0
- data/spec/kern/assets/controller0defer2.rb +17 -0
- data/spec/kern/assets/global_on_entry.rb +8 -0
- data/spec/kern/assets/global_on_entry2.rb +16 -0
- data/spec/kern/assets/global_on_entry3.rb +17 -0
- data/spec/kern/assets/global_on_entry4.rb +12 -0
- data/spec/kern/assets/interval.rb +29 -0
- data/spec/kern/assets/interval2.rb +33 -0
- data/spec/kern/assets/interval3.rb +39 -0
- data/spec/kern/assets/service1.rb +4 -1
- data/spec/kern/assets/service_controller1.rb +11 -0
- data/spec/kern/assets/specimin/controller0.rb +74 -0
- data/spec/kern/assets/vm/config5.rb +20 -0
- data/spec/kern/assets/vm/config6.rb +20 -0
- data/spec/kern/assets/vm/controller10.rb +1 -1
- data/spec/kern/assets/vm/controller11.rb +1 -1
- data/spec/kern/assets/vm/controller12.rb +1 -1
- data/spec/kern/assets/vm/controller13.rb +1 -1
- data/spec/kern/assets/vm/controller16b.rb +28 -0
- data/spec/kern/assets/vm/controller18.rb +1 -1
- data/spec/kern/assets/vm/controller21.rb +1 -1
- data/spec/kern/assets/vm/controller22.rb +8 -0
- data/spec/kern/assets/vm/controller_exc_ewatch.rb +1 -0
- data/spec/kern/assets/vm/controller_exc_ewatch2.rb +30 -0
- data/spec/kern/assets/vm/controller_exc_ewatch3.rb +16 -0
- data/spec/kern/assets/vm/controller_exc_ewatch4.rb +16 -0
- data/spec/kern/assets/vm/macros/copy_page_c.rb +1 -0
- data/spec/kern/assets/vm/macros/copy_page_ch.rb +25 -0
- data/spec/kern/assets/vm/macros/entry_del_c.rb +1 -0
- data/spec/kern/assets/vm/macros/entry_del_ch.rb +20 -0
- data/spec/kern/assets/vm/macros/entry_insert_c.rb +1 -0
- data/spec/kern/assets/vm/macros/entry_insert_ch.rb +23 -0
- data/spec/kern/assets/vm/macros/entry_mutable_c.rb +8 -7
- data/spec/kern/assets/vm/macros/entry_mutable_ch.rb +34 -0
- data/spec/kern/assets/vm/macros/new_page_c.rb +1 -1
- data/spec/kern/assets/vm/macros/new_page_c2.rb +1 -1
- data/spec/kern/assets/vm/macros/new_page_ch.rb +7 -0
- data/spec/kern/assets/vm/pg_mem/config.rb +10 -0
- data/spec/kern/assets/vm/pg_mem/config1.rb +10 -0
- data/spec/kern/assets/vm/pg_mem/config2.rb +10 -0
- data/spec/kern/assets/vm/pg_mem/config3.rb +15 -0
- data/spec/kern/assets/vm/pg_mem/write.rb +23 -0
- data/spec/kern/assets/vm/pg_mem/write2.rb +38 -0
- data/spec/kern/assets/vm/pg_net_sim/config.rb +10 -0
- data/spec/kern/assets/vm/pg_net_sim/nothing.rb +12 -0
- data/spec/kern/assets/vm/pg_net_sim/pages.json +1 -0
- data/spec/kern/assets/vm/pg_net_sim/watch.rb +18 -0
- data/spec/kern/callout_spec.rb +1 -1
- data/spec/kern/controller_macro_spec.rb +153 -20
- data/spec/kern/controller_spec.rb +232 -1
- data/spec/kern/debug_ui_spec.rb +235 -235
- data/spec/kern/event_spec.rb +112 -0
- data/spec/kern/service_controller_spec.rb +14 -2
- data/spec/kern/vm_service_mem_pagers_spec.rb +117 -0
- data/spec/kern/vm_service_net_sim_pager_spec.rb +97 -0
- data/spec/kern/vm_service_spec.rb +304 -17
- data/spec/kern/vm_service_spec2.rb +39 -0
- metadata +88 -6
- data/app/kern/pagers/mem_pager.js +0 -2
- data/app/kern/pagers/pg_spec0.js +0 -20
- data/lib/flok/project_template/Guardfile +0 -7
- data/lib/flok/project_template/config/config.yml +0 -1
data/docs/callout.md
CHANGED
@@ -5,4 +5,4 @@ and interval timers and to send a custom event to the given port.
|
|
5
5
|
##Registration
|
6
6
|
You may register for a timer event via `reg_timeout(ep, ename, ticks)`. This will wait `ticks` before firing.
|
7
7
|
To continually fire, you may use `reg_interval(ep, ename, ticks)` which will continue to fire every `ticks`. If `ep` is no longer in the `evt`, then
|
8
|
-
the entry will no longer exist.
|
8
|
+
the entry will no longer exist. (the timer will automatically be de-registered)
|
data/docs/client_api.md
CHANGED
@@ -17,7 +17,10 @@ Client API covers controller action event handlers.
|
|
17
17
|
* `params` - What was passed in the event
|
18
18
|
* `__base__` - The address of the controller
|
19
19
|
* `__info__` - Holds the `context`, current action, etc. See [Datatypes](./datatypes.md)
|
20
|
-
### Controller on_entry
|
20
|
+
### Controller on_entry (actions)
|
21
|
+
* `context` - The information for the controllers context
|
22
|
+
* `__base__` - The address of the controller
|
23
|
+
* `__info__` - Holds the `context`, current action, etc. See [Datatypes](./datatypes.md)
|
24
|
+
### Controller on_entry (global)
|
21
25
|
* `context` - The information for the controllers context
|
22
26
|
* `__base__` - The address of the controller
|
23
|
-
* `__info__` - Holds the `context`, current action, etc. See [Datatypes](./datatypes.md)
|
data/docs/config_yml.md
ADDED
@@ -0,0 +1,41 @@
|
|
1
|
+
#config.yml
|
2
|
+
Config.yml stores the configuration, per platform, for each project. The `config.yml` file contains information relating to:
|
3
|
+
1. What *modules* to include. A *module* directive (`MODS`) tells flok what files to include from it's `$FLOK_GEM/app/kern/mods` directory. The specific platform driver may also read the modules list; but it is common for platform drivers to just have built-in modules that would be working if paired with the correct kernel modules interrupt handlers.
|
4
|
+
2. What `defines` to use. A `define` shows up in the `@defines` array for kernel source code and may enable/disable sections of code. Documentation will indicate whether features need to have a `define` derective to be enabled. An example of this is simulation support of the `speech` module through `speech_sim`.
|
5
|
+
3. `debug_attach` - A special directive used by the specs suite to understand the scheme used for the debugging server.
|
6
|
+
|
7
|
+
##Example
|
8
|
+
```yml
|
9
|
+
DEBUG:
|
10
|
+
debug_attach: socket_io
|
11
|
+
mods:
|
12
|
+
- ui
|
13
|
+
- event
|
14
|
+
- net
|
15
|
+
- segue
|
16
|
+
- controller
|
17
|
+
- debug
|
18
|
+
- sockio
|
19
|
+
- persist
|
20
|
+
- timer
|
21
|
+
defines:
|
22
|
+
- mem_pager
|
23
|
+
- sockio_pager
|
24
|
+
RELEASE:
|
25
|
+
mods:
|
26
|
+
- ui
|
27
|
+
- event
|
28
|
+
- net
|
29
|
+
- segue
|
30
|
+
- controller
|
31
|
+
- sockio
|
32
|
+
- persist
|
33
|
+
- timer
|
34
|
+
defines:
|
35
|
+
- mem_pager
|
36
|
+
- sockio_pager
|
37
|
+
|
38
|
+
```
|
39
|
+
|
40
|
+
##Where does a config.yml come from
|
41
|
+
The `config.yml` starts its life inside the flok gem's `$FLOK_GEM/app/drivers/$PLATFORM/config.yml` where it is later copied into new flok projects when created with `flok new` into your `$PROJECT/config/platforms/$PLATFORM/config.yml` via the `$FLOK_GEM/lib/flok/project_template` directory where an `erb` file called `config.yml` reads straight from the `$FLOK_GEM/app/drivers/$PLATFORM/config.yml` file.
|
data/docs/controllers.md
CHANGED
@@ -26,6 +26,10 @@ controller "tab_controller" do
|
|
26
26
|
spots "content"
|
27
27
|
services "my_service" #See docs on services for what this means
|
28
28
|
|
29
|
+
#Global on_entry, will only be run once on the first action
|
30
|
+
on_entry %{
|
31
|
+
}
|
32
|
+
|
29
33
|
#You can also define macros for shared action traits
|
30
34
|
macro "my_macro" do
|
31
35
|
on "shared_clicked" do
|
data/docs/datatypes.md
CHANGED
@@ -12,7 +12,9 @@ ctable_entry {
|
|
12
12
|
actions, //A dictionary [String:action_info] that corresponds to a dictionary of action_info object's based on the action's name.
|
13
13
|
spots, //An array fo spot names for this controller
|
14
14
|
name, //The name of the controller, useful for certain lookup operations, this is also the ctable key
|
15
|
-
__init__, //A function that is called when this controller is created. Signals service connection
|
15
|
+
__init__, //A function that is called when this controller is created. Signals service connection and the controller on_entry bits.
|
16
|
+
Additionally, all interval timers are configured here based on their unique names. Actions that are not active will not receive these events (they
|
17
|
+
will be ignored).
|
16
18
|
__dealloc__ //A function that is called when this controller is destroyed via parent controller switching actions in Goto. Signals services d/c
|
17
19
|
}
|
18
20
|
```
|
@@ -23,7 +25,8 @@ dictate what happends when events come in, etc.
|
|
23
25
|
```javascript
|
24
26
|
action_info {
|
25
27
|
on_entry //A function that is called when this action is initialized.
|
26
|
-
handlers //A dictionary [String:f(
|
28
|
+
handlers //A dictionary [String:f(base)] of event handlers for events that occur. Timer events are given a unique name and stored here like
|
29
|
+
`3toht_5_sec`
|
27
30
|
}
|
28
31
|
```
|
29
32
|
|
data/docs/debug_server.md
CHANGED
@@ -5,6 +5,8 @@ information. The key `debug_attach` holds a protocol key like `socket_io`. This
|
|
5
5
|
The `debug` module implements the `debug server`. Although, the server is a seperate
|
6
6
|
piece of code that dosen't fit into the same `if` and `int` paradigm. The specs in the `debug server` also use various `debug` module helpers.
|
7
7
|
|
8
|
+
======
|
9
|
+
|
8
10
|
## `socket_io` Protocol
|
9
11
|
This protocol states that the driver must repeadeately attempt to connect to the socket.io port located at `localhost:9999`.
|
10
12
|
|
data/docs/dispatch.md
CHANGED
@@ -7,7 +7,7 @@ we will be forced to block anyway, so it makes sense to allow large tranfers (bu
|
|
7
7
|
|
8
8
|
In order to relieve this problem, *flok* restricts the number of pipelined messages **per queue** to 5 with the exception of the `main` queue (the only synchronous queue). That means you
|
9
9
|
can have a total of `(N*5)` messages assuming there are `N` queue types (at the time of this writing, there are 5 not including the `main` queue). It is unlikely that all queues will be used
|
10
|
-
as most requests on the flok client will not use multiple resources in one pipelined stage. The client is responsible for requesting more data until no more data is available.
|
10
|
+
as most requests on the flok client will not use multiple resources in one pipelined stage. The client is responsible for requesting more data until no more data is available. The client knows about more data waiting by receiving the string "i" at the start of the response array (see below).
|
11
11
|
|
12
12
|
##Confusion about synchronous and asynchronous
|
13
13
|
There are various stages of message processing so it can be confusing as to what is excatly synchronous and asynchronous. Flok assumes a few things
|
@@ -66,10 +66,15 @@ And it receives this in `res`:
|
|
66
66
|
```
|
67
67
|
|
68
68
|
Notice how it's the same as the int_dispatch from the server except that queue 1 (`net_q`) is missing 1 message ([1, "download", "..."]). The 'i' at the start
|
69
|
-
indicates that the request is 'incomplete' and the client should request with a blank request array following completion of dequing all these events.
|
70
|
-
So the flok server still
|
69
|
+
indicates that the request is 'incomplete' and the client should request with a blank request array following completion of dequing all these events. The second request should be **asynchronous** w.r.t to the frist request and the third request asynchronous w.r.t to the second request. This is because, if there were enough events, the main thread would be blocked until the full queue was finished de-queing. This raises an issue; what happends if a request comes through before the asynchronous request comes through? Nothing special. Clients should prioritize the request queue to dispatch things that need `int_event` *now* instead of wait until the queue is drained. While the requests will do the same thing either which way; you don't want to pre-empt the higher priority request while it's waiting for a low priority queue drain else you lose the benefits.
|
70
|
+
So the flok server still has the following in it's queues. The `net_q` will be transfered after the next client request which will take place
|
71
71
|
after the `int_dispatch` call as the client should always call `int_dispatch` as many times until it gets a blank que `int_dispatch` as many times until it gets a blank queue.
|
72
72
|
|
73
|
+
Additionally, the `i` flag can be used with no information initially given. This arises when the `event` module en-queues an incomplete request because the `event` module needs to support defering the next event call.
|
74
|
+
|
75
|
+
|
76
|
+
**Again, we can not stress how important it is to ensure that incompletion de-queueing is asynchronous. Behaviorally, your program will be the same; but it has a much larger opportunity to cause latency as the de-queing itself (and remmeber, requests that request incompleteness but are not incomplete themselves are **still** synchronous**
|
77
|
+
|
73
78
|
Note that:
|
74
79
|
While at first you might think we need to test that int_dispatch called intra-respond of our if_event needs to test whether or not we still send
|
75
80
|
out blank [] to int_dispatch; this is not the case. In the real world, flok is supposed to also make any necessary if_disptach calls during all
|
@@ -0,0 +1,6 @@
|
|
1
|
+
#Known issues & Bugs
|
2
|
+
|
3
|
+
0000. A `every` event used in a controller's action will time correctly on the first action, but subsequent actions will be off by at most 3 ticks.
|
4
|
+
This is because we do not have any way (currently) to reset the timing queue in the controller as it reisters for all timing events at init. See
|
5
|
+
"Does not call intervals of other actions; and still works when switching back actions" in `spec/kern/controller_spec.rb`
|
6
|
+
0001. `Goto` macro does not recursively 'dealloc' everything.
|
data/docs/mod/event.md
CHANGED
@@ -1,20 +1,25 @@
|
|
1
|
-
#Event (event.js)
|
2
|
-
|
3
|
-
### Functions
|
4
|
-
`if_event(ep, event_name, event)` - Receive an event at some object located at `ep`. This is a platform defined opaque pointer.
|
5
|
-
|
6
|
-
### Interrupts
|
7
|
-
`int_event(ep,
|
8
|
-
For example, the `vc` (view controller) subsystem will receive any events sent when the `ep` is an opaque pointer to a
|
9
|
-
./app/driver/$PLATFORM/config.yml`)
|
10
|
-
file is used to compile only the modules into the flok kernel that the driver supports.rface controller from `ui`.
|
11
|
-
|
12
|
-
|
13
|
-
|
14
|
-
|
15
|
-
|
16
|
-
|
17
|
-
|
18
|
-
|
19
|
-
|
20
|
-
|
1
|
+
#Event (event.js)
|
2
|
+
|
3
|
+
### Functions
|
4
|
+
`if_event(ep, event_name, event)` - Receive an event at some object located at `ep`. This is a platform defined opaque pointer.
|
5
|
+
|
6
|
+
### Interrupts
|
7
|
+
* `int_event(ep, ename, event)` - Send an event back to *Flok* through an event. The `ep` in this case is dependent on the sub-system. Dispatching is provided through the `evt` (event vector table). On the flok kernel, using, `reg_evt` and `dereg_evt` will determine what happens post int_event. If `ep` is no longer valid, the event in ignored. Returns `false` if the destination does not exist and `true` otherwise.
|
8
|
+
For example, the `vc` (view controller) subsystem will receive any events sent when the `ep` is an opaque pointer to a
|
9
|
+
./app/driver/$PLATFORM/config.yml`)
|
10
|
+
file is used to compile only the modules into the flok kernel that the driver supports.rface controller from `ui`.
|
11
|
+
|
12
|
+
* `int_event_defer(ep, ename, event)` - Same as `int_event` except that the event will be sent to the appropriate receiver at some point in the future, and guaranteed not in the current thread of execution. This is used internally by the flok kernel; so the fact that it's an external interface dosen't make a lot of sense.
|
13
|
+
|
14
|
+
#Deferred (asynchronous) events
|
15
|
+
When you call `int_event_defer`, as you should be calling it since it's not really meant to be used outside the kernel, you enqueue the event on the `edefer_q` array as a array containing [`ep`, `ename`, `event`]. Shifting this array will yield the oldest `ep`, followed by `ename`, and then the oldest `event` until nothing remains. At this point, nothing happens. When `int_dispatch` is called, it first checks to see if there is anything on the `edefer_q`. If there is, it takes one thing off the queue and executes it. If there are things remaining on `edefer_q` after completion of `int_dispatch`, the next request going out is marked `incomplete` with a leading `i` (see [dispatch](./dispatch.md)) so that another request will be made to `int_dispatch` in the near future.
|
16
|
+
|
17
|
+
### Helper function
|
18
|
+
`reg_evt(ep, f)` - Register a function to be called when `ep` is sent a message, function looks like function f(ep, ename, info)
|
19
|
+
`dereg_evt(ep)` - Disable notifications to a function
|
20
|
+
|
21
|
+
### Kernel spec related
|
22
|
+
* `spec_event_handler(ep, event_name, event)` - This function should send the message `spec_event_handler_res(ep, event_name, event)` if called
|
23
|
+
* `int_spec_event_dereg` - This function should de-register 3848392 from being an event
|
24
|
+
|
25
|
+
Additionally, you should register the event pointer `3848392` to call the spec_event_handler_res.
|
data/docs/mod/persist.md
CHANGED
@@ -3,7 +3,7 @@ Persistance management. Loosely based on redis.
|
|
3
3
|
|
4
4
|
###Driver messages
|
5
5
|
`if_per_set(ns, key, value)` - Set a key and value
|
6
|
-
`if_per_get(s, ns, key)` - Get a key's value, a message `int_get_res` will be sent back, `s` is the session key that will also be sent back
|
6
|
+
`if_per_get(s, ns, key)` - Get a key's value, a message `int_get_res` will be sent back, `s` is the session key that will also be sent back. If there is no key, `null` will be sent back.
|
7
7
|
`if_per_del(ns, key)` - Delete a particular key
|
8
8
|
`if_per_del_ns(ns)` - Delete an entire namespace
|
9
9
|
|
data/docs/mod/speech.md
ADDED
@@ -0,0 +1,12 @@
|
|
1
|
+
#Speech (speech.js)
|
2
|
+
There is a software-simulation of the `speech` module. To enable it, put `speech_sim` in the `enable` section of your config file. The speech sim will replicate the rate at which speech progresses but will not produce audio (obviously).
|
3
|
+
|
4
|
+
###Client interface
|
5
|
+
`if_speech_say(text)` - Start speaking this text. Multiple things will never be queued.
|
6
|
+
`if_speech_cancel()` - Stop any speaking in progress; if there is no speaking then nothing should happend
|
7
|
+
|
8
|
+
###Kernel interrupts
|
9
|
+
`int_speech_cancelled()` - The speech was cancelled
|
10
|
+
`int_speech_finished()` - The speech completed succesfully
|
11
|
+
`int_speech_started()` - The speech has started
|
12
|
+
`int_will_speak_range(offset, count)` - The speech is in the process of talking over a range of the text
|
data/docs/project.md
CHANGED
@@ -6,7 +6,7 @@ You create and build projects via the `flok` command. You must set the `$FLOK_EN
|
|
6
6
|
|
7
7
|
* `flok new <path>` - Create a new flok project
|
8
8
|
* `flok build` - Build a flok project. Generates files in `./products`
|
9
|
-
* `flok server` - Trigger auto-rebuild when a file is
|
9
|
+
* `flok server` - Trigger auto-rebuild when a file is requested in the `./app` folder and hosts the `products/$PLATFORM` folder on `http://localhost:9992/`. e.g. `http://localhost:9992/application_user.js`. Outputs `SERVER STARTED` when server is fully launched.
|
10
10
|
|
11
11
|
###Folder structure
|
12
12
|
* `app/`
|
@@ -29,7 +29,7 @@ You create and build projects via the `flok` command. You must set the `$FLOK_EN
|
|
29
29
|
###User build process
|
30
30
|
1. The gem is build via `build:world` using the platform given in build.
|
31
31
|
2. Copy everything in the gems ./flok/products/$PLATFORM -> $PROJECT/products/$PLATFORM and ./flok/app/kern/services/*.rb -> $PROJECT/products/$PLATFORM/services/kern_services.rb
|
32
|
-
3. The controllers in `./app/controllers
|
32
|
+
3. The controllers in `./app/controllers/**/*.rb` are run through the `user_compiler` in `./lib/flok/user_compiler.rb` and then saved to the projects
|
33
33
|
`./products/$PLATFORM/glob/user_compiler.js` and the `./app/services/*.rb` are globbed into `./products/$PLATFORM/services/user_services.rb`
|
34
34
|
4. The `./products/$PLATFORM/services/*.rb` file are globbed into `./products/$PLATFORM/services/combined_services.rb`
|
35
35
|
5. The service configuration in `./config/services.rb` is read and run through `services_compiler` and files from
|
data/docs/services/vm.md
CHANGED
@@ -8,10 +8,13 @@ Each pager belongs to a *namespace*; page faults hit a namespace and then the pa
|
|
8
8
|
Fun aside; Because of the hashing schemantics; this paging system solves the age old problem of ... how do you show that data has changed *now* when to be assured that you have perferctly synchronized data with the server?;... you need to do a 3-way handshake with the updates. You could have a network server pager that supports writes but dosen't forward those to the network. That way, you can locally modify the page and then if the modifications were guessed correctly, the server would not even send back a page modification update! (Locally, the page would have been propogated as well). In the meantime, after modifying the local page, you would send a real network request to the server which would in turn update it's own paging system but at that point, the server would check in with you about your pages, but miraculously, because you gussed the updated page correctly, no modifications will need to be made. You could even purposefully put a 'not_synced' key in and actually show the user when the page was correctly synchronized.
|
9
9
|
|
10
10
|
##Pages
|
11
|
-
Each page is a
|
11
|
+
Each page is either of a `array` type or `hash` type.
|
12
|
+
|
13
|
+
###Array type
|
12
14
|
```ruby
|
13
15
|
page_example = {
|
14
16
|
_head: <<uuid STR or NULL>>,
|
17
|
+
_type: "array",
|
15
18
|
_next: <<uuid STR or NULL>,
|
16
19
|
_id: <<uuid STR>,
|
17
20
|
entries: [
|
@@ -20,12 +23,31 @@ page_example = {
|
|
20
23
|
],
|
21
24
|
_hash: <<CRC32 >
|
22
25
|
}
|
26
|
+
```
|
27
|
+
|
28
|
+
###Hash type
|
29
|
+
```ruby
|
30
|
+
page_example = {
|
31
|
+
_head: <<uuid STR or NULL>>,
|
32
|
+
_type: "hash",
|
33
|
+
_next: <<uuid STR or NULL>,
|
34
|
+
_id: <<uuid STR>
|
35
|
+
entries: {
|
36
|
+
"my_id0" => {_sig: <<random_signature for inserts and modifies STR>>},
|
37
|
+
...
|
38
|
+
},
|
39
|
+
_hash: <<CRC32 >
|
40
|
+
}
|
23
41
|
```
|
24
42
|
|
25
43
|
* `_head (string or null)` - An optional pointer that indicates a *head* page. The head pages are special pages that contain 0 elements in the entries array, no `_head` key, and `_next` points to the *head* of the list. A head page might be used to pull down the latest news where the head will tell you whether or not there is anything left for you to receive.
|
26
44
|
* `_next (string or null)` - The next element on this list. If `_next` is non-existant, then this page is the endpoint of the list.
|
27
45
|
* `_id (string)` - The name of this page. Even if every key changed, the `_id` will not change. This is supposed to indicate, semantically, that this page still *means* the same thing. For example, imagine a page. If all entries were to be **removed** from this page and new entries were **inserted** on this page, then it would be semantically sound to say that the entries were **changed**.
|
28
|
-
* `entries
|
46
|
+
* `entries`
|
47
|
+
* `_type == 'array'`
|
48
|
+
* An array of dictionaries. Each element contains a `_id` that is analogous to the page `_id`. (These are not the same, but carry the same semantics). Entries also have a `_sig` which should be a generated hash value that changes when the entry changes.
|
49
|
+
* `_type == 'hash'`
|
50
|
+
* A dictionary of dictionaries. Entries have a `_sig` which should be a generated hash value that changes when the entry changes.
|
29
51
|
* `_hash (string)` - All entry `_id's`, `_next`, the page `_id`, and `head` are hashed togeather. Any changes to this page will cause this `_hash` to change which makes it a useful way to check if a page is modified and needs to be updated. The hash function is an ordered CRC32 function run in the following order. See [Calculating Page Hash](#calculating_page_hash).
|
30
52
|
|
31
53
|
------
|
@@ -36,7 +58,12 @@ The `_hash` value of a page is calculated in the following way:
|
|
36
58
|
1. `z = crc32(z, _head) if _head`
|
37
59
|
2. `z = crc32(z, _next) if _next`
|
38
60
|
3. `z = crc32(z, _id)`
|
39
|
-
4. `
|
61
|
+
4. `_type` dependent
|
62
|
+
* For `_type == 'array'`
|
63
|
+
* `z = crc32(z, entriesN._sig)` where N goes through all entries in order.
|
64
|
+
* For `_type == 'hash'`
|
65
|
+
* `R = crc32(0, entries[key]._sig)` is calcuated for each entry; R is an array.
|
66
|
+
* `z = crc32(z, r0+r1+r2+...)` where `r0, r1, ...` are the elements of the array R we just calculated. This makes order not important.
|
40
67
|
|
41
68
|
If a key is null, then the crc step is skipped for that key. e.g. if `_head` was null, then `z = crc32(0, _head)` would be skipped
|
42
69
|
|
@@ -103,12 +130,10 @@ if (page is not resident in memory && not_synchronous) {
|
|
103
130
|
* Parameters
|
104
131
|
* `ns` - The namespace of the page, e.g. 'user'
|
105
132
|
* `id` - Watching the page that contains this in the `_id` field
|
106
|
-
* `sync (optional)` - If set to `true` then the disk read will be performed synchronously.
|
133
|
+
* `sync (optional)` - If set to `true` then the disk read and cache read will be performed synchronously. Additionally, all future cache reads / updates will be performed synchronously.
|
107
134
|
* Event Responses
|
108
135
|
* `read_res` - Whenever a change occurs to a page or the first read.
|
109
136
|
* Returns an immutable page in params
|
110
|
-
* Debug mode
|
111
|
-
* When `@debug`, an exception will be thrown if you attempt to watch the same key from one controller multiple times.
|
112
137
|
|
113
138
|
###`unwatch`
|
114
139
|
This is how you **unwatch** a page. For view controllers that are destroyed, it is not necessary to manually `unwatch` as the `vm` service will be notified on it's disconnection and automatically remove any watched pages for it's base pointer. This should be used for thingcs like scroll lists where the view controller is no longer interested in part of a page-list.
|
@@ -127,7 +152,7 @@ use the modification helpers. These modification helpers implement copy on write
|
|
127
152
|
* If in `@debug` mode, the variable `vm_write_list` contains an array dictionary of the last page passed to the pager (tail is latest).
|
128
153
|
|
129
154
|
##Cache
|
130
|
-
See below with `vm_cache_write` for how to write to the cache. Each pager can choose whether or not to cache; some pagers may cache only reads while others will cache writes. Failure to write to the cache at all will cause `watch` to never trigger. Some pagers may use a trick where writes are allowed, and go directly to the cache but nowhere else. This is to allow things like *pending* transactions where you can locally fake data until a server response is received which will both wipe the fake write and insert the new one. Cache writes will trigger `watch`; if you write to cache with `vm_cache_write` with a page that has the same `_hash` as a page that already exists in cache, no `watch` events will be triggered. Additionally, calling `vm_cache_write` with a non-modified page will result in no performance penalty.
|
155
|
+
See below with `vm_cache_write` for how to write to the cache. Each pager can choose whether or not to cache; some pagers may cache only reads while others will cache writes. Failure to write to the cache at all will cause `watch` to never trigger. Some pagers may use a trick where writes are allowed, and go directly to the cache but nowhere else. This is to allow things like *pending* transactions where you can locally fake data until a server response is received which will both wipe the fake write and insert the new one. Cache writes will trigger `watch`; if you write to cache with `vm_cache_write` with a page that has the same `_hash` as a page that already exists in cache, no `watch` events will be triggered. Additionally, calling `vm_cache_write` with a non-modified page will result in no performance penalty. `vm_cache_write` notifies controllers asynchronously and is not effected by the `watch` flag on controllers.
|
131
156
|
|
132
157
|
###Pageout & Cache Synchronization
|
133
158
|
Cache will periodically be synchronized to disk via the `pageout` service. When flok reloads itself, and the `vm` service gets a `watch` or `watch_sync` request, the `vm` service will attempt to read from the `vm_cache` first and then read the page from disk (write that disk read to cache). The only difference between `watch_sync` and `watch` is that `watch_sync` will synchronously pull from disk and panic if there is no cache available for the page). (Both `watch` and `watch_sync` will always call the pager's after the cache read as well.)
|
@@ -135,8 +160,8 @@ Cache will periodically be synchronized to disk via the `pageout` service. When
|
|
135
160
|
Pageout is embodied in the function named `vm_pageout()`. This will asynchronously write `vm_dirty` to disk and clear `vm_dirty` once the write has been commited. `vm_pageout()` is called every minute by the interval timer in this service.
|
136
161
|
|
137
162
|
###Datatypes & Structures (Opaque, do not directly modify)
|
138
|
-
* `vm_cache` - The main area for storing the cache. Stored in `vm_cache[ns][key]
|
139
|
-
* `vm_dirty` - Pages recently written to cache go on the dirty list so that they may be written when the pageout handler runs. Dictionary contains map for `vm_dirty[ns][page._id] => page` for all dirty pages. Pages are removed from the dictionary when they are written in the pageout.
|
163
|
+
* `vm_cache` - The main area for storing the cache. Stored in `vm_cache[ns][key]`. Contains all namespaces by default with blank hashes.
|
164
|
+
* `vm_dirty` - Pages recently written to cache go on the dirty list so that they may be written when the pageout handler runs. Dictionary contains map for `vm_dirty[ns][page._id] => page` for all dirty pages. Pages are removed from the dictionary when they are written in the pageout. Contains all namespaces by default with blank hashes.
|
140
165
|
* `vm_notify_map` - The dictionary used to lookup what controllers need to be notified about changes. Stored in `vm_notify_map[ns][id]` which yields an array of controller base pointers.
|
141
166
|
* `vm_bp_to_nmap` - A dictionary that maps a `bp` key (usually from a controller) to a dictionary. This dictionary contains a mapping of `bp => ns => id` to an array that contains `[node, index]` where `node` is a reference to `vm_notify_map[ns][id]`. This inverted map must (a) provide a way for `unwatch` to quickly remove entries from itself and (b) provide a way for all entries in `vm_notify_map` to be removed when something (usually a controller) disconrnects.
|
142
167
|
must support `unwatch` removal which we only receive the `bp`, `ns`, and `key`.
|
@@ -155,18 +180,22 @@ will not receive the notifications of the page modifications. Once using these m
|
|
155
180
|
|
156
181
|
Aside, modifying a page goes against the semantics of the vm system; you're thinking of it wrong if you think that's ok. The VM system lets the pager decide what the semantics of a `write` actually means. That may mean it does not directly modify the page; maybe it sends the write request to a server which then validates the request, and then the response on the watched page that was modified will then update your controller.
|
157
182
|
|
158
|
-
If you're creating a new page, please use these macros as well; just switch out `CopyPage` for `NewPage`.
|
183
|
+
If you're creating a new page, please use these macros as well; just switch out `CopyPage` for `NewPage`.
|
159
184
|
|
160
185
|
####Per entry
|
161
|
-
* `NewPage(id)` - Returns a new blank page; internally creates a page that has a null `_next`, `_head`, and `entries` array with 0 elements.
|
162
|
-
`_id` is generated if it is not passed.
|
186
|
+
* `NewPage(type, id)` - Returns a new blank page; internally creates a page that has a null `_next`, `_head`, and `entries` array with 0 elements. type can either be `array` or `hash`. `_id` is generated if it is not passed.
|
163
187
|
* `CopyPage(page)` - Copies a page and returns the new page. Internally this copies the entire page with the exception of the
|
164
188
|
`_hash` field.
|
165
|
-
|
166
|
-
* `
|
167
|
-
|
168
|
-
|
169
|
-
|
189
|
+
|
190
|
+
* For both `array` and `hashes`, the following functions work (albeit different semantics). For array types, the `eindex` is an integer in the array, For hash types, the `eindex` is a key inside the dictionary.
|
191
|
+
* `EntryDel(page, eindex)` - Remove a single entry from a page. (Internally this deletes the array entry).
|
192
|
+
* `EntryInsert(page, eindex, entry)` - Insert an entry, entry should be a dictionary value.
|
193
|
+
* For arrays, this generates the `_sig` and `_id` for you.
|
194
|
+
* For hashes, this generates the `_sig` for you.
|
195
|
+
* `EntryMutable(page, eindex)` - Set a mutable entry at a specific index which you can then modify. The signature is changed for you. You can not
|
196
|
+
use this with dot syntax like `EntryMutable(page, eindex).id = 'foo'`, you may only get a variable.
|
197
|
+
* `SetPageNext(page, id)` - Sets the `_next` id for the page
|
198
|
+
* `SetPageHead(page, id)` - Sets the `_head` id for the page
|
170
199
|
|
171
200
|
Here is an example of a page being modified inside a controller after a `read_res`
|
172
201
|
```js
|
data/docs/services/vm/pagers.md
CHANGED
@@ -28,11 +28,31 @@ Please name your pagers `pg_XXXX` to help make it clear that you are writing a p
|
|
28
28
|
####Default memory pager | `pg_mem0`
|
29
29
|
The *default memory pager* does not do anything on `watch` or `unwatch`. It depends on the cache to reply to `watch` and `watch_sync` requests created by controllers. Controllers may write to this pager via `write` which this pager will then send directly to `vm_cache_write`. This pager is always compiled into the kernel.
|
30
30
|
|
31
|
-
####Spec pager | `pg_spec0`
|
31
|
+
####Spec pager | `pg_spec0`, `pg_spec1`
|
32
32
|
This pager does the following when calls are made to it's functions, it's designed to assist with `vm` kernel specs.
|
33
33
|
* `init` - Sets `pg_spec0_init_params` to `{ns: ns, options: options}`
|
34
34
|
* `watch` - Appends `{id: id, hash: hash}` to `pg_spec0_watchlist`
|
35
35
|
* `unwatch` - appends id to `pg_spec0_unwatchlist`
|
36
36
|
* `write` - Writes the given page to `vm_cache_write`
|
37
37
|
|
38
|
-
|
38
|
+
These pagers only exists if the environment is in `DEBUG` mode (`@debug` is enabled).
|
39
|
+
|
40
|
+
####Net Sim pager | `pg_net_sim`
|
41
|
+
This pager is designed to simulate a slowly loading data like a network interface. It does not allow writes.
|
42
|
+
* `init` - Sets `pg_net_sim_spec_did_init` to `true`
|
43
|
+
* `watch` - Set a timer that will elapse after 2 seconds at which point the data will be loaded into the cache.
|
44
|
+
* `unwatch` - Does nothing
|
45
|
+
* `write` - Does nothing
|
46
|
+
* Functions
|
47
|
+
* `pg_net_sim_load_pages(arr)` - Given an array of pages, the net sim will load these pages and then when `watch` is called,
|
48
|
+
it will load those pages, after 2 seconds, into the `vm_cache`
|
49
|
+
|
50
|
+
These pagers only exists if the environment is in `DEBUG` mode (`@debug` is enabled).
|
51
|
+
|
52
|
+
####Mem pager | `pg_mem0`, `pg_mem1`, `pg_mem2`
|
53
|
+
This pager provides you with local memory that will be automatically cached to disk. It has 3 copies
|
54
|
+
(`pg_mem0`, `pg_mem'`) etc. because you can use each one in a different namespace.
|
55
|
+
* `init` - Sets `pg_mem0_spec_did_init` to `true` if `@debug`
|
56
|
+
* `watch` - Does nothing
|
57
|
+
* `unwatch` - Does nothing
|
58
|
+
* `write` - Writes the given page to `vm_cache_write`
|
data/lib/flok/build.rb
CHANGED
@@ -76,10 +76,6 @@ module Flok
|
|
76
76
|
################################################################################################################
|
77
77
|
#MODS - List mods listed in config.yml
|
78
78
|
#---------------------------------------------------------------------------------------
|
79
|
-
#Load the driver config.yml
|
80
|
-
driver_config = YAML.load_file("./app/drivers/#{platform}/config.yml")
|
81
|
-
raise "No config.yml found in your 'platform: #{platform}' driver" unless driver_config
|
82
|
-
|
83
79
|
#Create array that looks like a javascript array with single quotes
|
84
80
|
mods = Flok::Platform.mods(environment)
|
85
81
|
mods_js_arr = "[" + mods.map{|e| "'#{e}'"}.join(", ") + "]"
|
data/lib/flok/user_compiler.rb
CHANGED
@@ -1,5 +1,7 @@
|
|
1
1
|
#Compile a controller ruby file into a javascript string
|
2
2
|
|
3
|
+
require 'active_support'
|
4
|
+
require 'active_support/core_ext/numeric'
|
3
5
|
require 'erb'
|
4
6
|
module Flok
|
5
7
|
module UserCompiler
|
@@ -60,36 +62,8 @@ module Flok
|
|
60
62
|
attr_accessor :controller_name, :action_name, :name
|
61
63
|
end
|
62
64
|
|
63
|
-
|
64
|
-
|
65
|
-
|
66
|
-
def initialize controller, name, ctx, &block
|
67
|
-
@controller = controller
|
68
|
-
@name = name
|
69
|
-
@ctx = ctx
|
70
|
-
@ons = [] #Event handlers
|
71
|
-
|
72
|
-
self.instance_eval(&block)
|
73
|
-
end
|
74
|
-
|
75
|
-
def on_entry js_src
|
76
|
-
#returns a string
|
77
|
-
@on_entry_src = macro(js_src)
|
78
|
-
end
|
79
|
-
|
80
|
-
def on name, js_src
|
81
|
-
@ons << {:name => name, :src => macro(js_src)}
|
82
|
-
end
|
83
|
-
|
84
|
-
def macro js_src
|
85
|
-
lines = js_src.split("\n").map do |line|
|
86
|
-
|
87
|
-
end
|
88
|
-
|
89
|
-
return lines.join("\n")
|
90
|
-
end
|
91
|
-
|
92
|
-
def macro text
|
65
|
+
module UserCompilerMacro
|
66
|
+
def _macro text
|
93
67
|
out = StringIO.new
|
94
68
|
|
95
69
|
text.split("\n").each do |l|
|
@@ -184,7 +158,8 @@ module Flok
|
|
184
158
|
var old_action = __info__.action;
|
185
159
|
__info__.action = "#{action_name}";
|
186
160
|
|
187
|
-
//Remove all views
|
161
|
+
//Remove all views, we don't have to recurse because removal of a view
|
162
|
+
//is supposed to remove *all* view controllers of that tree as well.
|
188
163
|
var embeds = __info__.embeds;
|
189
164
|
for (var i = 0; i < __info__.embeds.length; ++i) {
|
190
165
|
for (var j = 0; j < __info__.embeds[i].length; ++j) {
|
@@ -251,21 +226,33 @@ module Flok
|
|
251
226
|
|
252
227
|
#For CopyPage(original_page), page_var is original_page
|
253
228
|
#This only supports variable names at this time
|
254
|
-
exp.match /\((
|
229
|
+
exp.match /\((.*?),(.*?)\);?/
|
230
|
+
exp.match /\((.*)\)/ if $1 == nil
|
231
|
+
|
255
232
|
|
256
233
|
#Get the id value the user wants, but we have to be careful
|
257
234
|
#because if nothing is passed, then we need to set it to null
|
258
|
-
|
259
|
-
|
260
|
-
|
261
|
-
|
235
|
+
type_var = $1
|
236
|
+
id_var = $2
|
237
|
+
|
238
|
+
type_var = type_var.gsub(/"/, "").strip
|
239
|
+
id_var = (id_var || "null").strip
|
240
|
+
|
241
|
+
raise "NewPage was not given a type" if type_var == ""
|
242
|
+
raise "NewPage type is not valid #{type_var.inspect}" unless ["array", "hash"].include? type_var
|
243
|
+
|
244
|
+
type_var_to_entries = {
|
245
|
+
"array" => "[]",
|
246
|
+
"hash" => "{}",
|
247
|
+
}
|
262
248
|
|
263
249
|
out << %{
|
264
250
|
#{lvar} {
|
265
251
|
_head: null,
|
266
252
|
_next: null,
|
267
|
-
entries: [],
|
253
|
+
entries: #{type_var_to_entries[type_var]},
|
268
254
|
_id: #{id_var},
|
255
|
+
_type: "#{type_var}",
|
269
256
|
}
|
270
257
|
}
|
271
258
|
elsif l =~ /CopyPage/
|
@@ -279,18 +266,29 @@ module Flok
|
|
279
266
|
page_var = $1
|
280
267
|
|
281
268
|
out << %{
|
269
|
+
|
282
270
|
var __page__ = {
|
283
271
|
_head: #{page_var}._head,
|
284
272
|
_next: #{page_var}._next,
|
285
273
|
_id: #{page_var}._id,
|
286
|
-
|
274
|
+
_type: #{page_var}._type,
|
287
275
|
}
|
288
276
|
|
289
277
|
//This is a shallow clone, but we own this array
|
290
278
|
//When a mutable entry needs to be created, an entry will be cloned
|
291
279
|
//and swappend out
|
292
|
-
|
293
|
-
__page__.entries
|
280
|
+
if (#{page_var}._type === "array") {
|
281
|
+
__page__.entries = [];
|
282
|
+
for (var i = 0; i < #{page_var}.entries.length; ++i) {
|
283
|
+
__page__.entries.push(#{page_var}.entries[i]);
|
284
|
+
}
|
285
|
+
} else if (#{page_var}._type === "hash") {
|
286
|
+
__page__.entries = {};
|
287
|
+
var keys = Object.keys(#{page_var}.entries);
|
288
|
+
for (var i = 0; i < keys.length; ++i) {
|
289
|
+
var key = keys[i];
|
290
|
+
__page__.entries[key] = #{page_var}.entries[key];
|
291
|
+
}
|
294
292
|
}
|
295
293
|
|
296
294
|
#{lvar} __page__;
|
@@ -307,7 +305,11 @@ module Flok
|
|
307
305
|
index_var = $2
|
308
306
|
|
309
307
|
out << %{
|
310
|
-
#{page_var}.
|
308
|
+
if (#{page_var}._type === "array") {
|
309
|
+
#{page_var}.entries.splice(#{index_var}, 1);
|
310
|
+
} else if (#{page_var}._type === "hash") {
|
311
|
+
delete #{page_var}.entries[#{index_var}];
|
312
|
+
}
|
311
313
|
}
|
312
314
|
|
313
315
|
elsif l =~ /EntryInsert/
|
@@ -322,10 +324,21 @@ module Flok
|
|
322
324
|
index_var = $2
|
323
325
|
entry_var = $3
|
324
326
|
|
327
|
+
page_var.strip!
|
328
|
+
index_var.strip!
|
329
|
+
entry_var.strip!
|
330
|
+
|
325
331
|
out << %{
|
326
|
-
|
327
|
-
#{
|
328
|
-
|
332
|
+
|
333
|
+
if (#{page_var}._type === "array") {
|
334
|
+
#{entry_var}._id = gen_id();
|
335
|
+
#{entry_var}._sig = gen_id();
|
336
|
+
#{page_var}.entries.splice(#{index_var}, 0, #{entry_var});
|
337
|
+
} else if (#{page_var}._type === "hash") {
|
338
|
+
#{entry_var}._sig = gen_id();
|
339
|
+
#{page_var}.entries[#{index_var}] = #{entry_var};
|
340
|
+
}
|
341
|
+
|
329
342
|
}
|
330
343
|
|
331
344
|
elsif l =~ /SetPageNext/
|
@@ -369,9 +382,28 @@ module Flok
|
|
369
382
|
page_var = $1
|
370
383
|
index_var = $2
|
371
384
|
|
385
|
+
|
372
386
|
out << %{
|
373
|
-
|
374
|
-
|
387
|
+
if (#{page_var}._type === "array") {
|
388
|
+
//Duplicate entry
|
389
|
+
#{page_var}.entries.splice(#{index_var}, 1, JSON.parse(JSON.stringify(#{page_var}.entries[#{index_var}])));
|
390
|
+
|
391
|
+
//Here's our new entry
|
392
|
+
var ne = #{page_var}.entries[#{index_var}];
|
393
|
+
ne._sig = gen_id();
|
394
|
+
|
395
|
+
#{lvar} #{page_var}.entries[#{index_var}];
|
396
|
+
} else if (#{page_var}._type === "hash") {
|
397
|
+
//Duplicate entry
|
398
|
+
#{page_var}.entries[#{index_var}] = JSON.parse(JSON.stringify(#{page_var}.entries[#{index_var}]));
|
399
|
+
|
400
|
+
//Here's our new entry
|
401
|
+
var ne = #{page_var}.entries[#{index_var}];
|
402
|
+
ne._sig = gen_id();
|
403
|
+
|
404
|
+
#{lvar} #{page_var}.entries[#{index_var}];
|
405
|
+
|
406
|
+
}
|
375
407
|
}
|
376
408
|
else
|
377
409
|
out.puts l
|
@@ -381,6 +413,44 @@ module Flok
|
|
381
413
|
return out.string
|
382
414
|
end
|
383
415
|
|
416
|
+
end
|
417
|
+
|
418
|
+
class UserCompilerAction
|
419
|
+
attr_accessor :controller, :name, :ons, :every_handlers
|
420
|
+
include UserCompilerMacro
|
421
|
+
|
422
|
+
def initialize controller, name, ctx, &block
|
423
|
+
@controller = controller
|
424
|
+
@name = name
|
425
|
+
@ctx = ctx
|
426
|
+
@_on_entry_src = ""
|
427
|
+
@ons = [] #Event handlers
|
428
|
+
@every_handlers = []
|
429
|
+
|
430
|
+
self.instance_eval(&block)
|
431
|
+
end
|
432
|
+
|
433
|
+
def on_entry js_src
|
434
|
+
#returns a string
|
435
|
+
@_on_entry_src = _macro(js_src)
|
436
|
+
end
|
437
|
+
|
438
|
+
def on_entry_src
|
439
|
+
return @_on_entry_src
|
440
|
+
end
|
441
|
+
|
442
|
+
def on name, js_src
|
443
|
+
@ons << {:name => name, :src => _macro(js_src)}
|
444
|
+
end
|
445
|
+
|
446
|
+
def every seconds, str
|
447
|
+
@every_handlers << {
|
448
|
+
:name => "#{seconds}_sec_#{SecureRandom.hex[0..6]}",
|
449
|
+
:ticks => seconds*4,
|
450
|
+
:src => _macro(str)
|
451
|
+
}
|
452
|
+
end
|
453
|
+
|
384
454
|
#You can def things in controller and use them as macros inside actions
|
385
455
|
#But these defs. live in the UserCompilerController instance and we need
|
386
456
|
#to delegate these calls to the controller that are not available in the action
|
@@ -395,7 +465,9 @@ module Flok
|
|
395
465
|
end
|
396
466
|
|
397
467
|
class UserCompilerController
|
398
|
-
|
468
|
+
include UserCompilerMacro
|
469
|
+
|
470
|
+
attr_accessor :name, :spots, :macros, :_services, :_on_entry
|
399
471
|
def initialize name, ctx, &block
|
400
472
|
@name = name
|
401
473
|
@ctx = ctx
|
@@ -411,6 +483,10 @@ module Flok
|
|
411
483
|
@macros[name] = block
|
412
484
|
end
|
413
485
|
|
486
|
+
def on_entry str
|
487
|
+
@_on_entry = _macro(str)
|
488
|
+
end
|
489
|
+
|
414
490
|
#Names of spots
|
415
491
|
def spots *spots
|
416
492
|
@spots += spots
|