taskinator 0.2.0 → 0.3.0

Sign up to get free protection for your applications and to get access to all the features.
Files changed (40) hide show
  1. checksums.yaml +4 -4
  2. data/CHANGELOG.md +10 -0
  3. data/Gemfile +17 -2
  4. data/Gemfile.lock +57 -18
  5. data/README.md +20 -16
  6. data/lib/taskinator/definition.rb +2 -2
  7. data/lib/taskinator/instrumentation.rb +77 -0
  8. data/lib/taskinator/persistence.rb +72 -61
  9. data/lib/taskinator/process.rb +118 -99
  10. data/lib/taskinator/queues/delayed_job.rb +0 -14
  11. data/lib/taskinator/queues/resque.rb +0 -18
  12. data/lib/taskinator/queues/sidekiq.rb +0 -14
  13. data/lib/taskinator/queues.rb +0 -5
  14. data/lib/taskinator/task.rb +113 -70
  15. data/lib/taskinator/version.rb +1 -1
  16. data/lib/taskinator/visitor.rb +6 -0
  17. data/lib/taskinator/workflow.rb +36 -0
  18. data/lib/taskinator.rb +3 -2
  19. data/spec/examples/process_examples.rb +6 -9
  20. data/spec/examples/queue_adapter_examples.rb +2 -12
  21. data/spec/examples/task_examples.rb +5 -8
  22. data/spec/support/process_methods.rb +25 -0
  23. data/spec/support/task_methods.rb +13 -0
  24. data/spec/support/test_flows.rb +1 -3
  25. data/spec/support/test_instrumenter.rb +39 -0
  26. data/spec/support/test_queue.rb +0 -12
  27. data/spec/taskinator/definition_spec.rb +3 -5
  28. data/spec/taskinator/instrumentation_spec.rb +98 -0
  29. data/spec/taskinator/persistence_spec.rb +3 -41
  30. data/spec/taskinator/process_spec.rb +36 -34
  31. data/spec/taskinator/queues/delayed_job_spec.rb +0 -41
  32. data/spec/taskinator/queues/resque_spec.rb +0 -51
  33. data/spec/taskinator/queues/sidekiq_spec.rb +0 -50
  34. data/spec/taskinator/queues_spec.rb +1 -1
  35. data/spec/taskinator/task_spec.rb +96 -64
  36. data/spec/taskinator/test_flows_spec.rb +266 -1
  37. data/taskinator.gemspec +0 -21
  38. metadata +12 -173
  39. data/lib/taskinator/job_worker.rb +0 -17
  40. data/spec/taskinator/job_worker_spec.rb +0 -62
checksums.yaml CHANGED
@@ -1,7 +1,7 @@
1
1
  ---
2
2
  SHA1:
3
- metadata.gz: 1905dc0aad01e4bd348ad38a40100df9649e3a18
4
- data.tar.gz: d1e4aa938c9d3a0d56fc4090f71024dec84e32fd
3
+ metadata.gz: 683a1ffedb432057d353d7ebbc31dff766bce83c
4
+ data.tar.gz: 92c78c8a6e2c87769269147557c9c2cb13be52fa
5
5
  SHA512:
6
- metadata.gz: 6cd0c7d2fa8d1f133ddce568ae969fe488401ba9f6311c39143456749b4eea1429edbc63ec080a5a8b60fb84bf312bdcd5758084269b16ae83b7b437877e15d1
7
- data.tar.gz: dfcbeced7346f8c0bdcc70e5814e2b2e943ecd10182ac0285a88e8e0bdfc0eb1a227c900390328861bb4012e4feffbf8fe7d41033583d68eb1f25d198f1ef027
6
+ metadata.gz: d9449a313b53581d06109e16d6f1a268a5f786848fc4d725e0852b773dbe99b1a68898a88c0d077bfb48590b976dbc4ca821e9207f20b50343f924493c35d9aa
7
+ data.tar.gz: 5c621a66e20dfef0f7800482ea5dd308c7470393537022791d3398a4f12c9350ca40a89267de82a149adcaf5c8561ae8b6aa732826ab85843e0f330cb701396f
data/CHANGELOG.md CHANGED
@@ -1,3 +1,13 @@
1
+ v0.3.0 - 28 Aug 2015
2
+ ---
3
+ Added created_at and updated_at to process and task as attributes.
4
+ Improved serialization visitor to include an optional converter block for deserialization of attribute values.
5
+ Corrections to lazy loader logic and speed improvements.
6
+ Removed JobWorker as it's no longer necessary.
7
+ Improvements to instrumentation.
8
+ Removed workflow gem, and refactored process and task to implement the basics instead.
9
+ Several bug fixes.
10
+
1
11
  v0.2.0 - 31 Jul 2015
2
12
  ---
3
13
  Bug fix for `create_process_remotely` so that it returns the process uuid instead of nil.
data/Gemfile CHANGED
@@ -3,5 +3,20 @@ source 'https://rubygems.org'
3
3
  # Specify your gem's dependencies in Taskinator.gemspec
4
4
  gemspec
5
5
 
6
- # add as a git gem dependency until version 1.3.0 is released to RubyGems.
7
- gem 'workflow', :git => 'https://github.com/virtualstaticvoid/workflow.git', :branch => :master
6
+ # queues
7
+ gem 'sidekiq' , '>= 3.5.0', :github => "mperham/sidekiq"
8
+ gem 'rspec-sidekiq' , '>= 2.1.0'
9
+
10
+ gem 'delayed_job' , '>= 4.0.0'
11
+
12
+ gem 'resque' , '>= 1.25.2'
13
+ gem 'resque_spec' , '>= 0.16.0'
14
+
15
+ # other
16
+ gem 'bundler' , '>= 1.6.0'
17
+ gem 'rake' , '>= 10.3.0'
18
+ gem 'activesupport' , '>= 4.0.0'
19
+ gem 'rspec'
20
+ gem 'coveralls' , '>= 0.7.0'
21
+ gem 'pry' , '>= 0.9.0'
22
+ gem 'pry-byebug' , '>= 1.3.0'
data/Gemfile.lock CHANGED
@@ -1,14 +1,18 @@
1
1
  GIT
2
- remote: https://github.com/virtualstaticvoid/workflow.git
3
- revision: 403a9e44bf49f4d154156d5701e3d67b115ed6da
4
- branch: master
2
+ remote: git://github.com/mperham/sidekiq.git
3
+ revision: b0cdd4cc868e7564b78ee34965067e025ec41689
5
4
  specs:
6
- workflow (1.3.0)
5
+ sidekiq (3.5.0)
6
+ celluloid (~> 0.17.0)
7
+ connection_pool (~> 2.2, >= 2.2.0)
8
+ json (~> 1.0)
9
+ redis (~> 3.2, >= 3.2.1)
10
+ redis-namespace (~> 1.5, >= 1.5.2)
7
11
 
8
12
  PATH
9
13
  remote: .
10
14
  specs:
11
- taskinator (0.2.0)
15
+ taskinator (0.3.0)
12
16
  connection_pool (>= 2.2.0)
13
17
  json (>= 1.8.2)
14
18
  redis (>= 3.2.1)
@@ -25,7 +29,46 @@ GEM
25
29
  tzinfo (~> 1.1)
26
30
  byebug (5.0.0)
27
31
  columnize (= 0.9.0)
28
- celluloid (0.16.0)
32
+ celluloid (0.17.1.1)
33
+ bundler
34
+ celluloid-essentials
35
+ celluloid-extras
36
+ celluloid-fsm
37
+ celluloid-pool
38
+ celluloid-supervision
39
+ dotenv
40
+ nenv
41
+ rspec-logsplit (>= 0.1.2)
42
+ timers (~> 4.0.0)
43
+ celluloid-essentials (0.20.2)
44
+ bundler
45
+ dotenv
46
+ nenv
47
+ rspec-logsplit (>= 0.1.2)
48
+ timers (~> 4.0.0)
49
+ celluloid-extras (0.20.0)
50
+ bundler
51
+ dotenv
52
+ nenv
53
+ rspec-logsplit (>= 0.1.2)
54
+ timers (~> 4.0.0)
55
+ celluloid-fsm (0.20.0)
56
+ bundler
57
+ dotenv
58
+ nenv
59
+ rspec-logsplit (>= 0.1.2)
60
+ timers (~> 4.0.0)
61
+ celluloid-pool (0.20.0)
62
+ bundler
63
+ dotenv
64
+ nenv
65
+ rspec-logsplit (>= 0.1.2)
66
+ timers (~> 4.0.0)
67
+ celluloid-supervision (0.20.1)
68
+ bundler
69
+ dotenv
70
+ nenv
71
+ rspec-logsplit (>= 0.1.2)
29
72
  timers (~> 4.0.0)
30
73
  coderay (1.1.0)
31
74
  columnize (0.9.0)
@@ -42,6 +85,7 @@ GEM
42
85
  docile (1.1.5)
43
86
  domain_name (0.5.24)
44
87
  unf (>= 0.0.5, < 1.0.0)
88
+ dotenv (2.0.2)
45
89
  hitimes (1.2.2)
46
90
  http-cookie (1.0.2)
47
91
  domain_name (~> 0.5)
@@ -49,9 +93,10 @@ GEM
49
93
  json (1.8.3)
50
94
  method_source (0.8.2)
51
95
  mime-types (2.6.1)
52
- minitest (5.7.0)
96
+ minitest (5.8.0)
53
97
  mono_logger (1.1.0)
54
98
  multi_json (1.11.2)
99
+ nenv (0.2.0)
55
100
  netrc (0.10.3)
56
101
  pry (0.10.1)
57
102
  coderay (~> 1.1.0)
@@ -91,19 +136,14 @@ GEM
91
136
  rspec-expectations (3.3.1)
92
137
  diff-lcs (>= 1.2.0, < 2.0)
93
138
  rspec-support (~> 3.3.0)
139
+ rspec-logsplit (0.1.3)
94
140
  rspec-mocks (3.3.2)
95
141
  diff-lcs (>= 1.2.0, < 2.0)
96
142
  rspec-support (~> 3.3.0)
97
- rspec-sidekiq (2.0.0)
143
+ rspec-sidekiq (2.1.0)
98
144
  rspec (~> 3.0, >= 3.0.0)
99
145
  sidekiq (>= 2.4.0)
100
146
  rspec-support (3.3.0)
101
- sidekiq (3.4.2)
102
- celluloid (~> 0.16.0)
103
- connection_pool (~> 2.2, >= 2.2.0)
104
- json (~> 1.0)
105
- redis (~> 3.2, >= 3.2.1)
106
- redis-namespace (~> 1.5, >= 1.5.2)
107
147
  simplecov (0.10.0)
108
148
  docile (~> 1.1.0)
109
149
  json (~> 1.8)
@@ -121,7 +161,7 @@ GEM
121
161
  tilt (2.0.1)
122
162
  timers (4.0.1)
123
163
  hitimes
124
- tins (1.5.4)
164
+ tins (1.6.0)
125
165
  tzinfo (1.2.2)
126
166
  thread_safe (~> 0.1)
127
167
  unf (0.1.4)
@@ -144,7 +184,6 @@ DEPENDENCIES
144
184
  resque (>= 1.25.2)
145
185
  resque_spec (>= 0.16.0)
146
186
  rspec
147
- rspec-sidekiq (>= 2.0.0)
148
- sidekiq (>= 3.0.0)
187
+ rspec-sidekiq (>= 2.1.0)
188
+ sidekiq (>= 3.5.0)!
149
189
  taskinator!
150
- workflow!
data/README.md CHANGED
@@ -546,7 +546,7 @@ To monitor the state of the processes, use the `Taskinator::Api::Processes` clas
546
546
  processes = Taskinator::Api::Processes.new
547
547
  processes.each do |process|
548
548
  # => output the unique process identifier and current state
549
- puts [:process, process.uuid, process.current_state.name]
549
+ puts [:process, process.uuid, process.current_state]
550
550
  end
551
551
  ```
552
552
 
@@ -608,29 +608,33 @@ end
608
608
 
609
609
  The following instrumentation events are issued:
610
610
 
611
- | Event | When |
612
- |------------------------------------|---------------------------------------------|
613
- | `taskinator.process.created` | After a process gets created |
614
- | `taskinator.process.saved` | After a process has been persisted to Redis |
615
- | `taskinator.process.enqueued` | After a process is enqueued for processing |
616
- | `taskinator.process.completed` | After a process has completed processing |
617
- | `taskinator.process.cancelled` | After a process has been cancelled |
618
- | `taskinator.process.failed` | After a process has failed |
619
- | `taskinator.task.enqueued` | After a task has been enqueued |
620
- | `taskinator.task.executed` | After a task has executed |
621
- | `taskinator.job.enqueued` | After a job has been enqueued |
622
- | `taskinator.job.executed` | After a job has executed |
623
- | `taskinator.subprocess.enqueued` | After a sub process has been enqueued |
624
- | `taskinator.subprocess.executed` | After a sub process has executed |
611
+ | Event | When |
612
+ |------------------------------------|-----------------------------------------------------------|
613
+ | `taskinator.process.created` | After a root process gets created |
614
+ | `taskinator.process.saved` | After a root process has been persisted to Redis |
615
+ | `taskinator.process.enqueued` | After a process or subprocess is enqueued for processing |
616
+ | `taskinator.process.processing` | When a process or subprocess is processing |
617
+ | `taskinator.process.paused` | When a process or subprocess is paused |
618
+ | `taskinator.process.resumed` | When a process or subprocess is resumed |
619
+ | `taskinator.process.completed` | After a process or subprocess has completed processing |
620
+ | `taskinator.process.cancelled` | After a process or subprocess has been cancelled |
621
+ | `taskinator.process.failed` | After a process or subprocess has failed |
622
+ | `taskinator.task.enqueued` | After a task has been enqueued |
623
+ | `taskinator.task.processing` | When a task is processing |
624
+ | `taskinator.task.completed` | After a task has completed |
625
+ | `taskinator.task.cancelled` | After a task has been cancelled |
626
+ | `taskinator.task.failed` | After a task has failed |
625
627
 
626
628
  For all events, the data included contains the following information:
627
629
 
628
630
  | Key | Value |
629
631
  |--------------------------|-------------------------------------------------------|
632
+ | `:type` | The type name of the component reporting the event |
630
633
  | `:process_uuid` | The UUID of the root process |
631
634
  | `:process_options` | Options hash of the root process |
632
635
  | `:uuid` | The UUID of the respective task, job or sub process |
633
- | `:tasks_count` | The total count of tasks for the given process |
636
+ | `:options` | Options hash of the component |
637
+ | `:state` | State of the component |
634
638
  | `:percentage_completed` | The percentage of completed tasks |
635
639
  | `:percentage_failed` | The percentage of failed tasks |
636
640
  | `:percentage_cancelled` | The percentage of cancelled tasks |
@@ -48,7 +48,7 @@ module Taskinator
48
48
  process = factory.call(self, options)
49
49
 
50
50
  # this may take long... up to users definition
51
- Taskinator.instrumenter.instrument('taskinator.process.created', :uuid => process.uuid) do
51
+ Taskinator.instrumenter.instrument('taskinator.process.created', :uuid => process.uuid, :state => :initial) do
52
52
  Builder.new(process, self, *args).instance_eval(&block)
53
53
  end
54
54
 
@@ -56,7 +56,7 @@ module Taskinator
56
56
  unless subprocess
57
57
 
58
58
  # instrument separately
59
- Taskinator.instrumenter.instrument('taskinator.process.saved', :uuid => process.uuid) do
59
+ Taskinator.instrumenter.instrument('taskinator.process.saved', :uuid => process.uuid, :state => :initial) do
60
60
 
61
61
  # this will visit "sub processes" and persist them too
62
62
  process.save
@@ -0,0 +1,77 @@
1
+ module Taskinator
2
+ module Instrumentation
3
+
4
+ def instrument(event, payload={})
5
+ Taskinator.instrumenter.instrument(event, payload) do
6
+ yield
7
+ end
8
+ end
9
+
10
+ # helper methods for instrumentation payloads
11
+
12
+ def enqueued_payload(additional={})
13
+ payload_for(:enqueued, additional)
14
+ end
15
+
16
+ def processing_payload(additional={})
17
+ payload_for(:processing, additional)
18
+ end
19
+
20
+ def paused_payload(additional={})
21
+ payload_for(:paused, additional)
22
+ end
23
+
24
+ def resumed_payload(additional={})
25
+ payload_for(:resumed, additional)
26
+ end
27
+
28
+ def completed_payload(additional={})
29
+ payload_for(:completed, additional)
30
+ end
31
+
32
+ def cancelled_payload(additional={})
33
+ payload_for(:cancelled, additional)
34
+ end
35
+
36
+ def failed_payload(exception, additional={})
37
+ payload_for(:failed, { :exception => exception.to_s, :backtrace => exception.backtrace }.merge(additional))
38
+ end
39
+
40
+ private
41
+
42
+ def payload_for(state, additional={})
43
+
44
+ # need to cache here, since this method hits redis, so can't be part of multi statement following
45
+ process_key = self.process_key
46
+
47
+ tasks_count, processing_count, completed_count, cancelled_count, failed_count = Taskinator.redis do |conn|
48
+ conn.hmget process_key,
49
+ :tasks_count,
50
+ :tasks_processing,
51
+ :tasks_completed,
52
+ :tasks_cancelled,
53
+ :tasks_failed
54
+ end
55
+
56
+ tasks_count = tasks_count.to_f
57
+
58
+ return OpenStruct.new(
59
+ {
60
+ :type => self.class,
61
+ :process_uuid => process_uuid,
62
+ :process_options => process_options,
63
+ :uuid => uuid,
64
+ :options => options,
65
+ :state => state,
66
+ :percentage_failed => (tasks_count > 0) ? (failed_count.to_i / tasks_count) * 100.0 : 0.0,
67
+ :percentage_cancelled => (tasks_count > 0) ? (cancelled_count.to_i / tasks_count) * 100.0 : 0.0,
68
+ :percentage_processing => (tasks_count > 0) ? (processing_count.to_i / tasks_count) * 100.0 : 0.0,
69
+ :percentage_completed => (tasks_count > 0) ? (completed_count.to_i / tasks_count) * 100.0 : 0.0,
70
+ :instance => self
71
+ }.merge(additional)
72
+ ).freeze
73
+
74
+ end
75
+
76
+ end
77
+ end
@@ -35,17 +35,6 @@ module Taskinator
35
35
  "taskinator:#{base_key}:#{uuid}"
36
36
  end
37
37
 
38
- # retrieves the workflow state for the given identifier
39
- # this prevents to need to load the entire object when
40
- # querying for the status of an instance
41
- def state_for(uuid)
42
- key = key_for(uuid)
43
- state = Taskinator.redis do |conn|
44
- conn.hget(key, :state) || 'initial'
45
- end
46
- state.to_sym
47
- end
48
-
49
38
  # fetches the instance for given identifier
50
39
  # optionally, provide a hash to use for the instance cache
51
40
  # this argument is defaulted, so top level callers don't
@@ -68,10 +57,11 @@ module Taskinator
68
57
  visitor = RedisSerializationVisitor.new(conn, self).visit
69
58
  conn.hmset(
70
59
  Taskinator::Process.key_for(uuid),
71
- :tasks_count, visitor.task_count,
72
- :tasks_failed, 0,
73
- :tasks_completed, 0,
74
- :tasks_cancelled, 0,
60
+ :tasks_count, visitor.task_count,
61
+ :tasks_failed, 0,
62
+ :tasks_processing, 0,
63
+ :tasks_completed, 0,
64
+ :tasks_cancelled, 0,
75
65
  )
76
66
  true
77
67
  end
@@ -99,17 +89,32 @@ module Taskinator
99
89
  # this method is called from the workflow gem
100
90
  def load_workflow_state
101
91
  state = Taskinator.redis do |conn|
102
- conn.hget(self.key, :state)
92
+ conn.hget(self.key, :state) || 'initial'
103
93
  end
104
- (state || 'initial').to_sym
94
+ state.to_sym
105
95
  end
106
96
 
107
97
  # persists the workflow state
108
98
  # this method is called from the workflow gem
109
99
  def persist_workflow_state(new_state)
100
+ @updated_at = Time.now.utc
110
101
  Taskinator.redis do |conn|
111
- conn.hset(self.key, :state, new_state)
102
+ process_key = self.process_key
103
+ conn.multi do
104
+ conn.hmset(
105
+ self.key,
106
+ :state, new_state,
107
+ :updated_at, @updated_at
108
+ )
109
+
110
+ # also update the "root" process
111
+ conn.hset(
112
+ process_key,
113
+ :updated_at, @updated_at
114
+ )
115
+ end
112
116
  end
117
+ new_state
113
118
  end
114
119
 
115
120
  # persists the error information
@@ -121,7 +126,8 @@ module Taskinator
121
126
  self.key,
122
127
  :error_type, error.class.name,
123
128
  :error_message, error.message,
124
- :error_backtrace, JSON.generate(error.backtrace || [])
129
+ :error_backtrace, JSON.generate(error.backtrace || []),
130
+ :updated_at, Time.now.utc
125
131
  )
126
132
  end
127
133
  end
@@ -133,7 +139,7 @@ module Taskinator
133
139
  error_type, error_message, error_backtrace =
134
140
  conn.hmget(self.key, :error_type, :error_message, :error_backtrace)
135
141
 
136
- [error_type, error_message, JSON.parse(error_backtrace)]
142
+ [error_type, error_message, JSON.parse(error_backtrace || '[]')]
137
143
  end
138
144
  end
139
145
 
@@ -149,19 +155,24 @@ module Taskinator
149
155
  %w(
150
156
  failed
151
157
  cancelled
158
+ processing
152
159
  completed
153
160
  ).each do |status|
154
161
 
155
162
  define_method "count_#{status}" do
156
163
  count = Taskinator.redis do |conn|
157
- conn.hget self.process_key, status
164
+ conn.hget self.process_key, "tasks_#{status}"
158
165
  end
159
166
  count.to_i
160
167
  end
161
168
 
162
169
  define_method "incr_#{status}" do
163
170
  Taskinator.redis do |conn|
164
- conn.hincrby self.process_key, status, 1
171
+ process_key = self.process_key
172
+ conn.multi do
173
+ conn.hincrby process_key, "tasks_#{status}", 1
174
+ conn.hset process_key, :updated_at, Time.now.utc
175
+ end
165
176
  end
166
177
  end
167
178
 
@@ -183,34 +194,6 @@ module Taskinator
183
194
  end
184
195
  end
185
196
 
186
- # prepairs the meta data for instrumentation events
187
- def instrumentation_payload(additional={})
188
-
189
- # need to cache here, since this method hits redis, so can't be part of multi statement following
190
- process_key = self.process_key
191
-
192
- tasks_count, completed_count, cancelled_count, failed_count = Taskinator.redis do |conn|
193
- conn.hmget process_key, :tasks_count, :completed, :cancelled, :failed
194
- end
195
-
196
- tasks_count = tasks_count.to_f
197
- completed_percent = tasks_count > 0 ? (completed_count.to_i / tasks_count) * 100.0 : 0.0
198
- cancelled_percent = tasks_count > 0 ? (cancelled_count.to_i / tasks_count) * 100.0 : 0.0
199
- failed_percent = tasks_count > 0 ? (failed_count.to_i / tasks_count) * 100.0 : 0.0
200
-
201
- return {
202
- :type => self.class.name,
203
- :process_uuid => process_uuid,
204
- :process_options => process_options,
205
- :uuid => uuid,
206
- :percentage_failed => failed_percent,
207
- :percentage_cancelled => cancelled_percent,
208
- :percentage_completed => completed_percent,
209
- :tasks_count => tasks_count
210
- }.merge(additional)
211
-
212
- end
213
-
214
197
  end
215
198
 
216
199
  class RedisSerializationVisitor < Visitor::Base
@@ -244,6 +227,9 @@ module Taskinator
244
227
  # add the process uuid and root key, for easy access later!
245
228
  @hmset += [:process_uuid, @root.uuid]
246
229
 
230
+ # add the default state
231
+ @hmset += [:state, :initial]
232
+
247
233
  # NB: splat args
248
234
  @conn.hmset(*@hmset)
249
235
 
@@ -259,7 +245,6 @@ module Taskinator
259
245
  end
260
246
 
261
247
  def visit_tasks(tasks)
262
- @hmset += [:task_count, tasks.count] # not used currently, but for informational purposes
263
248
  tasks.each do |task|
264
249
  RedisSerializationVisitor.new(@conn, task, @base_visitor).visit
265
250
  @conn.rpush "#{@key}:tasks", task.uuid
@@ -272,6 +257,14 @@ module Taskinator
272
257
  @hmset += [attribute, value] if value
273
258
  end
274
259
 
260
+ def visit_attribute_time(attribute)
261
+ visit_attribute(attribute)
262
+ end
263
+
264
+ def visit_attribute_enum(attribute, type)
265
+ visit_attribute(attribute)
266
+ end
267
+
275
268
  def visit_process_reference(attribute)
276
269
  process = @instance.send(attribute)
277
270
  @hmset += [attribute, process.uuid] if process
@@ -374,7 +367,30 @@ module Taskinator
374
367
 
375
368
  def visit_attribute(attribute)
376
369
  value = @attribute_values[attribute]
377
- @instance.instance_variable_set("@#{attribute}", value) if value
370
+ if value
371
+ # converted block given?
372
+ if block_given?
373
+ @instance.instance_variable_set("@#{attribute}", yield(value))
374
+ else
375
+ @instance.instance_variable_set("@#{attribute}", value)
376
+ end
377
+ end
378
+ end
379
+
380
+ def visit_attribute_time(attribute)
381
+ visit_attribute(attribute) do |value|
382
+ Time.parse(value)
383
+ end
384
+ end
385
+
386
+ # NB: assumes the enum type's members have integer values!
387
+ def visit_attribute_enum(attribute, type)
388
+ visit_attribute(attribute) do |value|
389
+ const_value = type.constants.select {|c| type.const_get(c) == value.to_i }.first
390
+ const_value ?
391
+ type.const_get(const_value) :
392
+ (defined?(type::Default) ? type::Default : nil)
393
+ end
378
394
  end
379
395
 
380
396
  def visit_type(attribute)
@@ -405,11 +421,11 @@ module Taskinator
405
421
  # arbitrary instance to perform it's work
406
422
  #
407
423
  def lazy_instance_for(base, uuid)
408
- type, process_uuid = Taskinator.redis do |conn|
409
- conn.hmget(base.key_for(uuid), :type, :process_uuid)
424
+ type = Taskinator.redis do |conn|
425
+ conn.hget(base.key_for(uuid), :type)
410
426
  end
411
427
  klass = Kernel.const_get(type)
412
- LazyLoader.new(klass, uuid, process_uuid, @instance_cache)
428
+ LazyLoader.new(klass, uuid, @instance_cache)
413
429
  end
414
430
  end
415
431
 
@@ -424,17 +440,12 @@ module Taskinator
424
440
  # E.g. this is useful for tasks which refer to their parent processes
425
441
  #
426
442
 
427
- def initialize(type, uuid, process_uuid, instance_cache={})
443
+ def initialize(type, uuid, instance_cache={})
428
444
  @type = type
429
445
  @uuid = uuid
430
- @process_uuid = process_uuid
431
446
  @instance_cache = instance_cache
432
447
  end
433
448
 
434
- # shadows the real methods, but will be the same!
435
- attr_reader :process_uuid
436
- attr_reader :uuid
437
-
438
449
  def __getobj__
439
450
  # only fetch the object as needed
440
451
  # and memoize for subsequent calls