solidflow 0.1.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,468 @@
1
+ # frozen_string_literal: true
2
+
3
+ # frozen_string_literal: true
4
+
5
+ require "ostruct"
6
+
7
+ module SolidFlow
8
+ class Runner
9
+ def initialize(store: SolidFlow.store, time_provider: SolidFlow.configuration.time_provider)
10
+ @store = store
11
+ @time_provider = time_provider
12
+ end
13
+
14
+ def run(execution_id)
15
+ store.with_execution(execution_id) do |execution_record|
16
+ workflow_class = SolidFlow.configuration.workflow_registry.fetch(execution_record[:workflow])
17
+ events = store.load_history(execution_id)
18
+
19
+ state = Replay.new(workflow_class:, events:, execution_record:).call
20
+
21
+ Determinism.assert_graph!(workflow_class, state.execution_state.graph_signature)
22
+
23
+ return state if state.finished?
24
+
25
+ step_index = state.execution_state.cursor_index
26
+ step_definition = workflow_class.steps[step_index]
27
+
28
+ unless step_definition
29
+ complete_execution(execution_id, workflow_class, state, state.ctx)
30
+ return state
31
+ end
32
+
33
+ workflow = workflow_class.new(
34
+ ctx: state.ctx.deep_dup,
35
+ execution: build_execution_struct(state.execution_state),
36
+ history: state.history
37
+ )
38
+
39
+ consume_pending_signals(execution_id, workflow, state)
40
+
41
+ if step_definition.task?
42
+ handle_task_step(execution_id, workflow, step_definition, state)
43
+ else
44
+ handle_inline_step(execution_id, workflow, step_definition, state)
45
+ end
46
+ end
47
+ end
48
+
49
+ private
50
+
51
+ attr_reader :store, :time_provider
52
+
53
+ def build_execution_struct(execution_state)
54
+ OpenStruct.new(
55
+ id: execution_state.id,
56
+ workflow: execution_state.workflow,
57
+ state: execution_state.state,
58
+ cursor_step: execution_state.cursor_step,
59
+ cursor_index: execution_state.cursor_index,
60
+ graph_signature: execution_state.graph_signature,
61
+ metadata: execution_state.metadata
62
+ )
63
+ end
64
+
65
+ def consume_pending_signals(execution_id, workflow, state)
66
+ state.signal_buffer.to_a.each do |message|
67
+ next if message.consumed?
68
+ next unless workflow.signal_defined?(message.name)
69
+
70
+ SolidFlow.instrument(
71
+ "solidflow.signal.consumed",
72
+ execution_id:,
73
+ workflow: workflow.class.workflow_name,
74
+ signal: message.name,
75
+ payload: message.payload
76
+ )
77
+
78
+ workflow.apply_signal(message.name, message.payload)
79
+ persist_context(execution_id, workflow.ctx)
80
+ store.persist_signal_consumed(execution_id:, signal_name: message.name)
81
+ end
82
+ end
83
+
84
+ def handle_inline_step(execution_id, workflow, step_definition, state)
85
+ SolidFlow.instrument(
86
+ "solidflow.step.started",
87
+ execution_id:,
88
+ workflow: workflow.class.workflow_name,
89
+ step: step_definition.name
90
+ )
91
+
92
+ workflow.reset_wait_context!
93
+ result = nil
94
+
95
+ begin
96
+ result =
97
+ if step_definition.block?
98
+ workflow.instance_exec(&step_definition.block)
99
+ else
100
+ nil
101
+ end
102
+ rescue Errors::Cancelled => cancellation
103
+ handle_cancellation(execution_id, workflow, step_definition, state, cancellation)
104
+ return
105
+ rescue StandardError => e
106
+ fail_execution(
107
+ execution_id,
108
+ workflow,
109
+ step_definition,
110
+ state,
111
+ {
112
+ message: e.message,
113
+ class: e.class.name,
114
+ backtrace: e.backtrace
115
+ }
116
+ )
117
+ return
118
+ end
119
+
120
+ wait_instructions = workflow.consume_wait_instructions
121
+
122
+ if wait_instructions.any?
123
+ handle_waiting_step(execution_id, workflow, step_definition, wait_instructions)
124
+ return
125
+ end
126
+
127
+ ctx_snapshot = workflow.ctx.deep_dup
128
+ persist_context(execution_id, ctx_snapshot)
129
+
130
+ store.append_event(
131
+ execution_id:,
132
+ type: :step_completed,
133
+ payload: {
134
+ step: step_definition.name,
135
+ result: result,
136
+ ctx_snapshot: ctx_snapshot
137
+ }
138
+ )
139
+
140
+ advance_cursor(execution_id, state.execution_state, workflow.class, step_definition)
141
+
142
+ SolidFlow.instrument(
143
+ "solidflow.step.completed",
144
+ execution_id:,
145
+ workflow: workflow.class.workflow_name,
146
+ step: step_definition.name,
147
+ result:
148
+ )
149
+ end
150
+
151
+ def handle_waiting_step(execution_id, workflow, step_definition, wait_instructions)
152
+ instruction_payloads = wait_instructions.map { |instr| instr.to_h }
153
+
154
+ store.append_event(
155
+ execution_id:,
156
+ type: :step_waiting,
157
+ payload: {
158
+ step: step_definition.name,
159
+ instructions: instruction_payloads
160
+ }
161
+ )
162
+
163
+ wait_instructions.each do |instruction|
164
+ case instruction.type.to_sym
165
+ when :timer
166
+ schedule_timer(execution_id, workflow, step_definition, instruction)
167
+ when :signal
168
+ # No-op; signals are appended when received.
169
+ end
170
+ end
171
+
172
+ SolidFlow.instrument(
173
+ "solidflow.step.waiting",
174
+ execution_id:,
175
+ workflow: workflow.class.workflow_name,
176
+ step: step_definition.name,
177
+ instructions: instruction_payloads
178
+ )
179
+ end
180
+
181
+ def schedule_timer(execution_id, workflow, step_definition, instruction)
182
+ run_at =
183
+ if instruction.options[:delay_seconds]
184
+ time_provider.call + instruction.options[:delay_seconds]
185
+ elsif instruction.options[:run_at]
186
+ instruction.options[:run_at]
187
+ else
188
+ raise Errors::WaitInstructionError, "Timer instruction missing scheduling data"
189
+ end
190
+
191
+ store.schedule_timer(
192
+ execution_id:,
193
+ step: step_definition.name,
194
+ run_at: run_at,
195
+ instruction: instruction.to_h,
196
+ metadata: instruction.options[:metadata]
197
+ )
198
+ end
199
+
200
+ def handle_task_step(execution_id, workflow, step_definition, state)
201
+ task_state = state.task_states[step_definition.name]
202
+ next_attempt = task_state ? task_state.attempt + 1 : 1
203
+
204
+ retry_policy = default_retry_policy.merge(step_definition.retry_policy || {})
205
+ max_attempts = retry_policy[:max_attempts] || 1
206
+
207
+ if task_state&.finished? && task_state.status == :failed && next_attempt > max_attempts
208
+ fail_execution(execution_id, workflow, step_definition, state, task_state.last_error)
209
+ return
210
+ end
211
+
212
+ return if task_state && !task_state.failed?
213
+
214
+ idempotency_key = Idempotency.evaluate(
215
+ step_definition.idempotency_key,
216
+ workflow: workflow,
217
+ step: step_definition
218
+ )
219
+
220
+ arguments = resolve_task_arguments(step_definition, workflow)
221
+
222
+ schedule_at = compute_backoff_timestamp(retry_policy, next_attempt)
223
+
224
+ store.append_event(
225
+ execution_id:,
226
+ type: :task_scheduled,
227
+ payload: {
228
+ step: step_definition.name,
229
+ attempt: next_attempt,
230
+ arguments: arguments,
231
+ idempotency_key: idempotency_key
232
+ }
233
+ )
234
+
235
+ if next_attempt > 1
236
+ SolidFlow.instrument(
237
+ "solidflow.task.retried",
238
+ execution_id:,
239
+ workflow: workflow.class.workflow_name,
240
+ step: step_definition.name,
241
+ task: step_definition.task,
242
+ attempt: next_attempt
243
+ )
244
+ end
245
+
246
+ store.schedule_task(
247
+ execution_id:,
248
+ step: step_definition.name,
249
+ task: step_definition.task,
250
+ arguments:,
251
+ headers: {
252
+ execution_id: execution_id,
253
+ step_name: step_definition.name,
254
+ attempt: next_attempt,
255
+ idempotency_key: idempotency_key,
256
+ workflow_name: workflow.class.workflow_name,
257
+ metadata: workflow.execution.metadata
258
+ },
259
+ run_at: schedule_at
260
+ )
261
+
262
+ SolidFlow.instrument(
263
+ "solidflow.task.scheduled",
264
+ execution_id:,
265
+ workflow: workflow.class.workflow_name,
266
+ step: step_definition.name,
267
+ task: step_definition.task,
268
+ attempt: next_attempt,
269
+ idempotency_key: idempotency_key,
270
+ schedule_at: schedule_at
271
+ )
272
+ end
273
+
274
+ def resolve_task_arguments(step_definition, workflow)
275
+ arguments = step_definition.options[:arguments]
276
+
277
+ case arguments
278
+ when Proc
279
+ workflow.instance_exec(&arguments)
280
+ when Symbol
281
+ workflow.public_send(arguments)
282
+ when Hash
283
+ arguments.deep_dup
284
+ when nil
285
+ workflow.ctx.deep_dup
286
+ else
287
+ arguments
288
+ end
289
+ end
290
+
291
+ def compute_backoff_timestamp(retry_policy, attempt)
292
+ return nil if attempt <= 1
293
+
294
+ initial_delay = retry_policy[:initial_delay] || 0
295
+ backoff = retry_policy[:backoff]&.to_sym || :constant
296
+
297
+ delay =
298
+ case backoff
299
+ when :constant
300
+ initial_delay
301
+ when :exponential
302
+ initial_delay * (2**(attempt - 2))
303
+ else
304
+ initial_delay
305
+ end
306
+
307
+ delay = delay.to_f
308
+ delay = 0 if delay.negative?
309
+
310
+ time_provider.call + delay
311
+ end
312
+
313
+ def default_retry_policy
314
+ {
315
+ max_attempts: 1,
316
+ initial_delay: 0,
317
+ backoff: :constant
318
+ }
319
+ end
320
+
321
+ def persist_context(execution_id, ctx)
322
+ store.persist_context(
323
+ execution_id:,
324
+ ctx:
325
+ )
326
+ end
327
+
328
+ def advance_cursor(execution_id, execution_state, workflow_class, step_definition)
329
+ next_index = execution_state.cursor_index + 1
330
+ next_step = workflow_class.steps[next_index]&.name
331
+
332
+ new_state = if next_index >= workflow_class.steps.size
333
+ "completed"
334
+ else
335
+ "running"
336
+ end
337
+
338
+ store.update_execution(
339
+ execution_id:,
340
+ attributes: {
341
+ cursor_index: next_index,
342
+ cursor_step: next_step,
343
+ state: new_state
344
+ }
345
+ )
346
+
347
+ if new_state == "completed"
348
+ store.append_event(
349
+ execution_id:,
350
+ type: :workflow_completed,
351
+ payload: {}
352
+ )
353
+ SolidFlow.instrument(
354
+ "solidflow.execution.completed",
355
+ execution_id:,
356
+ workflow: workflow_class.workflow_name
357
+ )
358
+ else
359
+ store.enqueue_execution(
360
+ execution_id:,
361
+ reason: :step_advanced
362
+ )
363
+ end
364
+ end
365
+
366
+ def complete_execution(execution_id, workflow_class, state, ctx)
367
+ persist_context(execution_id, ctx)
368
+ store.update_execution(
369
+ execution_id:,
370
+ attributes: {
371
+ state: "completed"
372
+ }
373
+ )
374
+ store.append_event(
375
+ execution_id:,
376
+ type: :workflow_completed,
377
+ payload: {}
378
+ )
379
+ SolidFlow.instrument(
380
+ "solidflow.execution.completed",
381
+ execution_id:,
382
+ workflow: workflow_class.workflow_name
383
+ )
384
+ end
385
+
386
+ def fail_execution(execution_id, workflow, step_definition, state, error)
387
+ store.update_execution(
388
+ execution_id:,
389
+ attributes: {
390
+ state: "failed",
391
+ last_error: error
392
+ }
393
+ )
394
+
395
+ unless state.history.any? { |event| event.type.to_sym == :workflow_failed }
396
+ store.append_event(
397
+ execution_id:,
398
+ type: :workflow_failed,
399
+ payload: {
400
+ step: step_definition.name,
401
+ error: error
402
+ }
403
+ )
404
+ end
405
+
406
+ schedule_compensations(execution_id, workflow, state)
407
+
408
+ SolidFlow.instrument(
409
+ "solidflow.execution.failed",
410
+ execution_id:,
411
+ workflow: workflow.class.workflow_name,
412
+ step: step_definition.name,
413
+ error: error
414
+ )
415
+ end
416
+
417
+ def handle_cancellation(execution_id, workflow, step_definition, state, exception)
418
+ store.update_execution(
419
+ execution_id:,
420
+ attributes: {
421
+ state: "cancelled",
422
+ last_error: { message: exception.message }
423
+ }
424
+ )
425
+
426
+ store.append_event(
427
+ execution_id:,
428
+ type: :workflow_cancelled,
429
+ payload: {
430
+ step: step_definition.name,
431
+ reason: exception.message
432
+ }
433
+ )
434
+
435
+ SolidFlow.instrument(
436
+ "solidflow.execution.cancelled",
437
+ execution_id:,
438
+ workflow: workflow.class.workflow_name,
439
+ step: step_definition.name,
440
+ reason: exception.message
441
+ )
442
+
443
+ schedule_compensations(execution_id, workflow, state)
444
+ end
445
+
446
+ def schedule_compensations(execution_id, workflow, state)
447
+ compensation_map = workflow.class.compensations
448
+ return if compensation_map.empty?
449
+
450
+ completed_steps = state.step_states.values
451
+ .select(&:completed?)
452
+ .map(&:name)
453
+
454
+ completed_steps.reverse_each do |step_name|
455
+ task = compensation_map[step_name]
456
+ next unless task
457
+
458
+ store.schedule_compensation(
459
+ execution_id:,
460
+ workflow_class: workflow.class,
461
+ step: step_name,
462
+ compensation_task: task,
463
+ context: workflow.ctx.deep_dup
464
+ )
465
+ end
466
+ end
467
+ end
468
+ end
@@ -0,0 +1,24 @@
1
+ # frozen_string_literal: true
2
+
3
+ require "oj"
4
+
5
+ module SolidFlow
6
+ module Serializers
7
+ # Wrapper for Oj to provide consistent serialization configuration.
8
+ class Oj
9
+ def initialize(mode: :strict)
10
+ @mode = mode
11
+ end
12
+
13
+ def dump(object)
14
+ ::Oj.dump(object, mode: @mode)
15
+ end
16
+
17
+ def load(json)
18
+ return {} if json.nil? || json.empty?
19
+
20
+ ::Oj.load(json, mode: @mode, symbol_keys: false)
21
+ end
22
+ end
23
+ end
24
+ end
@@ -0,0 +1,47 @@
1
+ # frozen_string_literal: true
2
+
3
+ module SolidFlow
4
+ module Signals
5
+ Message = Struct.new(
6
+ :name,
7
+ :payload,
8
+ :metadata,
9
+ :received_at,
10
+ :consumed,
11
+ keyword_init: true
12
+ ) do
13
+ def consumed?
14
+ consumed
15
+ end
16
+
17
+ def consume!
18
+ self.consumed = true
19
+ end
20
+ end
21
+
22
+ # In-memory signal buffer used during replay to determine if waits can proceed.
23
+ class Buffer
24
+ def initialize(messages = [])
25
+ @messages = messages
26
+ end
27
+
28
+ def push(message)
29
+ @messages << message
30
+ end
31
+
32
+ def consume(name)
33
+ entry = @messages.find { |m| m.name == name.to_sym && !m.consumed? }
34
+ entry&.consume!
35
+ entry
36
+ end
37
+
38
+ def pending?(name)
39
+ @messages.any? { |m| m.name == name.to_sym && !m.consumed? }
40
+ end
41
+
42
+ def to_a
43
+ @messages.map(&:dup)
44
+ end
45
+ end
46
+ end
47
+ end