taski 0.5.0 → 0.7.1
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +4 -4
- data/CHANGELOG.md +50 -0
- data/README.md +168 -21
- data/docs/GUIDE.md +394 -0
- data/examples/README.md +65 -17
- data/examples/{context_demo.rb → args_demo.rb} +27 -27
- data/examples/clean_demo.rb +204 -0
- data/examples/data_pipeline_demo.rb +1 -1
- data/examples/group_demo.rb +113 -0
- data/examples/large_tree_demo.rb +519 -0
- data/examples/reexecution_demo.rb +93 -80
- data/examples/simple_progress_demo.rb +80 -0
- data/examples/system_call_demo.rb +56 -0
- data/lib/taski/{context.rb → args.rb} +3 -3
- data/lib/taski/execution/base_progress_display.rb +348 -0
- data/lib/taski/execution/execution_context.rb +383 -0
- data/lib/taski/execution/executor.rb +405 -134
- data/lib/taski/execution/plain_progress_display.rb +76 -0
- data/lib/taski/execution/registry.rb +17 -1
- data/lib/taski/execution/scheduler.rb +308 -0
- data/lib/taski/execution/simple_progress_display.rb +173 -0
- data/lib/taski/execution/task_output_pipe.rb +42 -0
- data/lib/taski/execution/task_output_router.rb +287 -0
- data/lib/taski/execution/task_wrapper.rb +215 -52
- data/lib/taski/execution/tree_progress_display.rb +349 -212
- data/lib/taski/execution/worker_pool.rb +104 -0
- data/lib/taski/section.rb +16 -3
- data/lib/taski/static_analysis/visitor.rb +3 -0
- data/lib/taski/task.rb +218 -37
- data/lib/taski/test_helper/errors.rb +13 -0
- data/lib/taski/test_helper/minitest.rb +38 -0
- data/lib/taski/test_helper/mock_registry.rb +51 -0
- data/lib/taski/test_helper/mock_wrapper.rb +46 -0
- data/lib/taski/test_helper/rspec.rb +38 -0
- data/lib/taski/test_helper.rb +214 -0
- data/lib/taski/version.rb +1 -1
- data/lib/taski.rb +211 -23
- data/sig/taski.rbs +207 -27
- metadata +25 -8
- data/docs/advanced-features.md +0 -625
- data/docs/api-guide.md +0 -509
- data/docs/error-handling.md +0 -684
- data/examples/section_progress_demo.rb +0 -78
|
@@ -1,129 +1,204 @@
|
|
|
1
1
|
# frozen_string_literal: true
|
|
2
2
|
|
|
3
3
|
require "monitor"
|
|
4
|
-
require "etc"
|
|
5
4
|
|
|
6
5
|
module Taski
|
|
7
6
|
module Execution
|
|
8
7
|
# Producer-Consumer pattern executor for parallel task execution.
|
|
9
8
|
#
|
|
10
|
-
#
|
|
11
|
-
#
|
|
12
|
-
#
|
|
9
|
+
# Executor is the orchestrator that coordinates all execution components.
|
|
10
|
+
#
|
|
11
|
+
# == Architecture
|
|
12
|
+
#
|
|
13
|
+
# Executor
|
|
14
|
+
# ├── Scheduler: Dependency management and execution order
|
|
15
|
+
# ├── WorkerPool: Thread management and task distribution
|
|
16
|
+
# └── ExecutionContext: Observer notifications and output capture
|
|
17
|
+
# └── Observers (e.g., TreeProgressDisplay)
|
|
18
|
+
#
|
|
19
|
+
# == Execution Flow
|
|
13
20
|
#
|
|
14
|
-
#
|
|
15
|
-
#
|
|
21
|
+
# 1. Build dependency graph via Scheduler
|
|
22
|
+
# 2. Set up progress display via ExecutionContext
|
|
23
|
+
# 3. Start WorkerPool threads
|
|
24
|
+
# 4. Enqueue ready tasks (no dependencies) to WorkerPool
|
|
25
|
+
# 5. Run event loop:
|
|
26
|
+
# - Pop completion events from workers
|
|
27
|
+
# - Mark completed in Scheduler
|
|
28
|
+
# - Enqueue newly ready tasks to WorkerPool
|
|
29
|
+
# 6. Shutdown WorkerPool when root task completes
|
|
30
|
+
# 7. Teardown progress display
|
|
31
|
+
#
|
|
32
|
+
# == Communication Queues
|
|
33
|
+
#
|
|
34
|
+
# - Execution Queue (Main -> Worker): Tasks ready to execute (via WorkerPool)
|
|
16
35
|
# - Completion Queue (Worker -> Main): Events from workers
|
|
36
|
+
#
|
|
37
|
+
# == Thread Safety
|
|
38
|
+
#
|
|
39
|
+
# - Main Thread: Manages all state, coordinates execution, handles events
|
|
40
|
+
# - Worker Threads: Execute tasks and send completion events (via WorkerPool)
|
|
17
41
|
class Executor
|
|
18
|
-
# Task execution states for the executor's internal tracking
|
|
19
|
-
STATE_PENDING = :pending
|
|
20
|
-
STATE_ENQUEUED = :enqueued
|
|
21
|
-
STATE_COMPLETED = :completed
|
|
22
|
-
|
|
23
42
|
class << self
|
|
24
43
|
# Execute a task and all its dependencies
|
|
25
44
|
# @param root_task_class [Class] The root task class to execute
|
|
26
45
|
# @param registry [Registry] The task registry
|
|
27
|
-
|
|
28
|
-
|
|
46
|
+
##
|
|
47
|
+
# Create a new Executor and run execution for the specified root task class.
|
|
48
|
+
# @param root_task_class [Class] The top-level task class to execute.
|
|
49
|
+
# @param registry [Taski::Registry] Registry providing task definitions and state.
|
|
50
|
+
# @param execution_context [ExecutionContext, nil] Optional execution context to use; when nil a default context is created.
|
|
51
|
+
# @return [Object] The result returned by the execution of the root task.
|
|
52
|
+
def execute(root_task_class, registry:, execution_context: nil)
|
|
53
|
+
new(registry: registry, execution_context: execution_context).execute(root_task_class)
|
|
54
|
+
end
|
|
55
|
+
|
|
56
|
+
# Execute clean for a task and all its dependencies (in reverse order)
|
|
57
|
+
# @param root_task_class [Class] The root task class to clean
|
|
58
|
+
# @param registry [Registry] The task registry
|
|
59
|
+
##
|
|
60
|
+
# Runs reverse-order clean execution beginning at the given root task class.
|
|
61
|
+
# @param [Class] root_task_class - The root task class whose dependency graph will drive the clean run.
|
|
62
|
+
# @param [Object] registry - Task registry used to resolve and track tasks during execution.
|
|
63
|
+
# @param [ExecutionContext, nil] execution_context - Optional execution context for observers and output capture; if `nil`, a default context is created.
|
|
64
|
+
def execute_clean(root_task_class, registry:, execution_context: nil)
|
|
65
|
+
new(registry: registry, execution_context: execution_context).execute_clean(root_task_class)
|
|
29
66
|
end
|
|
30
67
|
end
|
|
31
68
|
|
|
32
|
-
|
|
69
|
+
##
|
|
70
|
+
# Initialize an Executor and its internal coordination components.
|
|
71
|
+
# @param [Object] registry - Task registry used to look up task definitions and state.
|
|
72
|
+
# @param [Integer, nil] worker_count - Optional number of worker threads to use; when `nil`,
|
|
73
|
+
# uses Taski.args_worker_count which retrieves the worker count from the runtime args.
|
|
74
|
+
# @param [Taski::Execution::ExecutionContext, nil] execution_context - Optional execution context for observers and output capture; when `nil` a default context (with progress observer and execution trigger) is created.
|
|
75
|
+
def initialize(registry:, worker_count: nil, execution_context: nil)
|
|
33
76
|
@registry = registry
|
|
34
|
-
@worker_count = worker_count || default_worker_count
|
|
35
|
-
@execution_queue = Queue.new
|
|
36
77
|
@completion_queue = Queue.new
|
|
37
|
-
@workers = []
|
|
38
78
|
|
|
39
|
-
#
|
|
40
|
-
@
|
|
41
|
-
|
|
42
|
-
|
|
79
|
+
# ExecutionContext for observer pattern and output capture
|
|
80
|
+
@execution_context = execution_context || create_default_execution_context
|
|
81
|
+
|
|
82
|
+
# Scheduler for dependency management
|
|
83
|
+
@scheduler = Scheduler.new
|
|
84
|
+
|
|
85
|
+
# Determine effective worker count: explicit param > args > default
|
|
86
|
+
# Store as instance variable for consistent use in both run and clean phases
|
|
87
|
+
@effective_worker_count = worker_count || Taski.args_worker_count
|
|
88
|
+
|
|
89
|
+
# WorkerPool for thread management
|
|
90
|
+
@worker_pool = WorkerPool.new(
|
|
91
|
+
registry: @registry,
|
|
92
|
+
worker_count: @effective_worker_count
|
|
93
|
+
) { |task_class, wrapper| execute_task(task_class, wrapper) }
|
|
43
94
|
end
|
|
44
95
|
|
|
45
96
|
# Execute root task and all dependencies
|
|
46
|
-
|
|
97
|
+
##
|
|
98
|
+
# Execute the task graph rooted at the given task class.
|
|
99
|
+
#
|
|
100
|
+
# Builds the dependency graph, starts progress reporting and worker threads,
|
|
101
|
+
# enqueues tasks that are ready (no unmet dependencies), and processes worker
|
|
102
|
+
# completion events until the root task finishes. After completion or abort,
|
|
103
|
+
# shuts down workers, stops progress reporting, and restores stdout capture if
|
|
104
|
+
# this executor configured it.
|
|
105
|
+
# @param root_task_class [Class] The root task class to execute.
|
|
47
106
|
def execute(root_task_class)
|
|
48
107
|
# Build dependency graph from static analysis
|
|
49
|
-
build_dependency_graph(root_task_class)
|
|
50
|
-
|
|
51
|
-
# Set up tree progress display with root task (before start)
|
|
52
|
-
setup_tree_progress(root_task_class)
|
|
108
|
+
@scheduler.build_dependency_graph(root_task_class)
|
|
53
109
|
|
|
54
|
-
|
|
55
|
-
|
|
110
|
+
with_display_lifecycle(root_task_class) do
|
|
111
|
+
# Start worker threads
|
|
112
|
+
@worker_pool.start
|
|
56
113
|
|
|
57
|
-
|
|
58
|
-
|
|
114
|
+
# Enqueue tasks with no dependencies
|
|
115
|
+
enqueue_ready_tasks
|
|
59
116
|
|
|
60
|
-
|
|
61
|
-
|
|
117
|
+
# Main event loop - continues until root task completes
|
|
118
|
+
run_main_loop(root_task_class)
|
|
62
119
|
|
|
63
|
-
|
|
64
|
-
|
|
65
|
-
|
|
66
|
-
# Shutdown workers
|
|
67
|
-
shutdown_workers
|
|
120
|
+
# Shutdown workers
|
|
121
|
+
@worker_pool.shutdown
|
|
122
|
+
end
|
|
68
123
|
|
|
69
|
-
#
|
|
70
|
-
|
|
124
|
+
# Raise aggregated errors if any tasks failed
|
|
125
|
+
raise_if_any_failures
|
|
71
126
|
end
|
|
72
127
|
|
|
73
|
-
|
|
128
|
+
# Execute clean for root task and all dependencies (in reverse dependency order)
|
|
129
|
+
# Clean operations run in reverse: root task cleans first, then dependencies
|
|
130
|
+
##
|
|
131
|
+
# Executes the clean workflow for the given root task in reverse dependency order.
|
|
132
|
+
# Sets up progress display and optional output capture, starts a dedicated clean worker pool,
|
|
133
|
+
# enqueues ready-to-clean tasks, processes completion events until all tasks are cleaned,
|
|
134
|
+
# then shuts down workers and tears down progress and output capture as needed.
|
|
135
|
+
# @param [Class] root_task_class - The root task class to clean
|
|
136
|
+
def execute_clean(root_task_class)
|
|
137
|
+
# Build reverse dependency graph for clean order
|
|
138
|
+
# This must happen first to ensure root task and all static dependencies are included
|
|
139
|
+
@scheduler.build_reverse_dependency_graph(root_task_class)
|
|
140
|
+
|
|
141
|
+
# Merge runtime dependencies (e.g., Section's dynamically selected implementations)
|
|
142
|
+
# This allows clean to include tasks that were selected at runtime during run phase
|
|
143
|
+
runtime_deps = @execution_context.runtime_dependencies
|
|
144
|
+
@scheduler.merge_runtime_dependencies(runtime_deps)
|
|
145
|
+
|
|
146
|
+
with_display_lifecycle(root_task_class) do
|
|
147
|
+
# Create a new worker pool for clean operations
|
|
148
|
+
# Uses the same worker count as the run phase
|
|
149
|
+
@clean_worker_pool = WorkerPool.new(
|
|
150
|
+
registry: @registry,
|
|
151
|
+
worker_count: @effective_worker_count
|
|
152
|
+
) { |task_class, wrapper| execute_clean_task(task_class, wrapper) }
|
|
153
|
+
|
|
154
|
+
# Start worker threads
|
|
155
|
+
@clean_worker_pool.start
|
|
156
|
+
|
|
157
|
+
# Enqueue tasks ready for clean (no reverse dependencies)
|
|
158
|
+
enqueue_ready_clean_tasks
|
|
159
|
+
|
|
160
|
+
# Main event loop - continues until all tasks are cleaned
|
|
161
|
+
run_clean_main_loop(root_task_class)
|
|
162
|
+
|
|
163
|
+
# Shutdown workers
|
|
164
|
+
@clean_worker_pool.shutdown
|
|
165
|
+
end
|
|
74
166
|
|
|
75
|
-
|
|
76
|
-
|
|
167
|
+
# Raise aggregated errors if any clean tasks failed
|
|
168
|
+
raise_if_any_clean_failures
|
|
77
169
|
end
|
|
78
170
|
|
|
79
|
-
|
|
80
|
-
# Populates @dependencies and @task_states
|
|
81
|
-
def build_dependency_graph(root_task_class)
|
|
82
|
-
# @type var queue: Array[singleton(Taski::Task)]
|
|
83
|
-
queue = [root_task_class]
|
|
84
|
-
|
|
85
|
-
while (task_class = queue.shift)
|
|
86
|
-
next if @task_states.key?(task_class)
|
|
87
|
-
|
|
88
|
-
deps = task_class.cached_dependencies
|
|
89
|
-
@dependencies[task_class] = deps.dup
|
|
90
|
-
@task_states[task_class] = STATE_PENDING
|
|
91
|
-
|
|
92
|
-
deps.each { |dep| queue << dep }
|
|
93
|
-
end
|
|
94
|
-
end
|
|
171
|
+
private
|
|
95
172
|
|
|
96
|
-
# Enqueue tasks that
|
|
173
|
+
# Enqueue all tasks that are ready to execute
|
|
97
174
|
def enqueue_ready_tasks
|
|
98
|
-
@
|
|
99
|
-
next unless @task_states[task_class] == STATE_PENDING
|
|
100
|
-
next unless ready_to_execute?(task_class)
|
|
101
|
-
|
|
175
|
+
@scheduler.next_ready_tasks.each do |task_class|
|
|
102
176
|
enqueue_task(task_class)
|
|
103
177
|
end
|
|
104
178
|
end
|
|
105
179
|
|
|
106
|
-
# Check if a task is ready to execute
|
|
107
|
-
def ready_to_execute?(task_class)
|
|
108
|
-
task_deps = @dependencies[task_class] || Set.new
|
|
109
|
-
task_deps.subset?(@completed_tasks)
|
|
110
|
-
end
|
|
111
|
-
|
|
112
180
|
# Enqueue a single task for execution
|
|
113
181
|
def enqueue_task(task_class)
|
|
114
182
|
return if @registry.abort_requested?
|
|
115
183
|
|
|
116
|
-
@
|
|
184
|
+
@scheduler.mark_enqueued(task_class)
|
|
117
185
|
|
|
118
186
|
wrapper = get_or_create_wrapper(task_class)
|
|
119
|
-
|
|
120
|
-
|
|
121
|
-
|
|
122
|
-
|
|
187
|
+
unless wrapper.mark_running
|
|
188
|
+
# Task is either already running or completed in another context (e.g., parent Executor)
|
|
189
|
+
# Wait for the task to complete if it's running elsewhere
|
|
190
|
+
wrapper.wait_for_completion
|
|
191
|
+
|
|
192
|
+
# Now mark it as completed in the scheduler and enqueue newly ready tasks
|
|
193
|
+
@scheduler.mark_completed(task_class)
|
|
194
|
+
enqueue_ready_tasks
|
|
195
|
+
return
|
|
196
|
+
end
|
|
123
197
|
|
|
124
|
-
@
|
|
198
|
+
@execution_context.notify_task_registered(task_class)
|
|
199
|
+
@execution_context.notify_task_started(task_class)
|
|
125
200
|
|
|
126
|
-
|
|
201
|
+
@worker_pool.enqueue(task_class, wrapper)
|
|
127
202
|
end
|
|
128
203
|
|
|
129
204
|
# Get or create a task wrapper via Registry
|
|
@@ -131,40 +206,16 @@ module Taski
|
|
|
131
206
|
@registry.get_or_create(task_class) do
|
|
132
207
|
task_instance = task_class.allocate
|
|
133
208
|
task_instance.send(:initialize)
|
|
134
|
-
TaskWrapper.new(task_instance, registry: @registry)
|
|
135
|
-
end
|
|
136
|
-
end
|
|
137
|
-
|
|
138
|
-
# Start worker threads
|
|
139
|
-
def start_workers
|
|
140
|
-
@worker_count.times do
|
|
141
|
-
worker = Thread.new { worker_loop }
|
|
142
|
-
@workers << worker
|
|
143
|
-
@registry.register_thread(worker)
|
|
144
|
-
end
|
|
145
|
-
end
|
|
146
|
-
|
|
147
|
-
# Worker thread main loop
|
|
148
|
-
def worker_loop
|
|
149
|
-
loop do
|
|
150
|
-
work_item = @execution_queue.pop
|
|
151
|
-
break if work_item == :shutdown
|
|
152
|
-
|
|
153
|
-
task_class = work_item[:task_class]
|
|
154
|
-
wrapper = work_item[:wrapper]
|
|
155
|
-
|
|
156
|
-
debug_log("Worker executing: #{task_class}")
|
|
157
|
-
|
|
158
|
-
execute_task(task_class, wrapper)
|
|
209
|
+
TaskWrapper.new(task_instance, registry: @registry, execution_context: @execution_context)
|
|
159
210
|
end
|
|
160
211
|
end
|
|
161
212
|
|
|
162
|
-
# Execute a task and send completion event
|
|
213
|
+
# Execute a task and send completion event (called by WorkerPool)
|
|
163
214
|
def execute_task(task_class, wrapper)
|
|
164
215
|
return if @registry.abort_requested?
|
|
165
216
|
|
|
166
|
-
|
|
167
|
-
result =
|
|
217
|
+
with_task_context(task_class) do
|
|
218
|
+
result = wrapper.task.run
|
|
168
219
|
wrapper.mark_completed(result)
|
|
169
220
|
@completion_queue.push({task_class: task_class, wrapper: wrapper})
|
|
170
221
|
rescue Taski::TaskAbortException => e
|
|
@@ -177,71 +228,291 @@ module Taski
|
|
|
177
228
|
end
|
|
178
229
|
end
|
|
179
230
|
|
|
180
|
-
# Execute task run method
|
|
181
|
-
# Note: Previously captured stdout for progress display, but this was removed
|
|
182
|
-
# due to thread-safety concerns with global $stdout mutation.
|
|
183
|
-
def execute_task_run(wrapper)
|
|
184
|
-
wrapper.task.run
|
|
185
|
-
end
|
|
186
|
-
|
|
187
231
|
# Main thread event loop - continues until root task completes
|
|
188
232
|
def run_main_loop(root_task_class)
|
|
189
|
-
until @
|
|
190
|
-
break if @registry.abort_requested? &&
|
|
233
|
+
until @scheduler.completed?(root_task_class)
|
|
234
|
+
break if @registry.abort_requested? && !@scheduler.running_tasks?
|
|
191
235
|
|
|
192
236
|
event = @completion_queue.pop
|
|
193
237
|
handle_completion(event)
|
|
194
238
|
end
|
|
195
239
|
end
|
|
196
240
|
|
|
197
|
-
|
|
198
|
-
|
|
199
|
-
|
|
200
|
-
|
|
201
|
-
# Handle task completion event
|
|
241
|
+
##
|
|
242
|
+
# Marks the given task as completed in the scheduler and enqueues any tasks that become ready as a result.
|
|
243
|
+
# @param [Hash] event - Completion event containing information about the finished task.
|
|
244
|
+
# @param [Class] event[:task_class] - The task class that completed.
|
|
202
245
|
def handle_completion(event)
|
|
203
246
|
task_class = event[:task_class]
|
|
204
247
|
|
|
205
248
|
debug_log("Completed: #{task_class}")
|
|
206
249
|
|
|
207
|
-
@
|
|
208
|
-
@completed_tasks.add(task_class)
|
|
250
|
+
@scheduler.mark_completed(task_class)
|
|
209
251
|
|
|
210
252
|
# Enqueue newly ready tasks
|
|
211
253
|
enqueue_ready_tasks
|
|
212
254
|
end
|
|
213
255
|
|
|
214
|
-
#
|
|
215
|
-
|
|
216
|
-
|
|
217
|
-
|
|
256
|
+
# ========================================
|
|
257
|
+
# Clean Execution Methods
|
|
258
|
+
# ========================================
|
|
259
|
+
|
|
260
|
+
##
|
|
261
|
+
# Enqueues all tasks that are currently ready to be cleaned.
|
|
262
|
+
def enqueue_ready_clean_tasks
|
|
263
|
+
@scheduler.next_ready_clean_tasks.each do |task_class|
|
|
264
|
+
enqueue_clean_task(task_class)
|
|
265
|
+
end
|
|
218
266
|
end
|
|
219
267
|
|
|
220
|
-
|
|
221
|
-
|
|
222
|
-
|
|
268
|
+
##
|
|
269
|
+
# Enqueues a single task for reverse-order (clean) execution.
|
|
270
|
+
# If execution has been aborted, does nothing. Marks the task as clean-enqueued,
|
|
271
|
+
# skips if the task is not registered or not eligible to run, notifies the
|
|
272
|
+
# execution context that cleaning has started, and schedules the task on the
|
|
273
|
+
# clean worker pool.
|
|
274
|
+
# @param [Class] task_class - The task class to enqueue for clean execution.
|
|
275
|
+
def enqueue_clean_task(task_class)
|
|
276
|
+
return if @registry.abort_requested?
|
|
223
277
|
|
|
224
|
-
|
|
278
|
+
@scheduler.mark_clean_enqueued(task_class)
|
|
279
|
+
|
|
280
|
+
wrapper = get_or_create_wrapper(task_class)
|
|
281
|
+
return unless wrapper.mark_clean_running
|
|
282
|
+
|
|
283
|
+
@execution_context.notify_clean_started(task_class)
|
|
284
|
+
|
|
285
|
+
@clean_worker_pool.enqueue(task_class, wrapper)
|
|
225
286
|
end
|
|
226
287
|
|
|
227
|
-
|
|
228
|
-
|
|
229
|
-
|
|
288
|
+
##
|
|
289
|
+
# Executes the clean lifecycle for a task and emits a completion event.
|
|
290
|
+
#
|
|
291
|
+
# Runs the task's `clean` method, updates the provided wrapper with success or failure
|
|
292
|
+
# (which handles timing and observer notification), and pushes a completion event onto
|
|
293
|
+
# the executor's completion queue.
|
|
294
|
+
# This method respects an abort requested state from the registry (no-op if abort already requested)
|
|
295
|
+
# and triggers a registry abort when a `Taski::TaskAbortException` is raised.
|
|
296
|
+
# It also starts and stops per-task output capture when available and sets the thread-local
|
|
297
|
+
# `ExecutionContext.current` for the duration of the clean.
|
|
298
|
+
# @param [Class] task_class - The task class being cleaned.
|
|
299
|
+
# @param [Taski::Execution::TaskWrapper] wrapper - The wrapper instance for the task, used to record clean success or failure.
|
|
300
|
+
def execute_clean_task(task_class, wrapper)
|
|
301
|
+
return if @registry.abort_requested?
|
|
302
|
+
|
|
303
|
+
with_task_context(task_class) do
|
|
304
|
+
result = wrapper.task.clean
|
|
305
|
+
wrapper.mark_clean_completed(result)
|
|
306
|
+
@completion_queue.push({task_class: task_class, wrapper: wrapper, clean: true})
|
|
307
|
+
rescue Taski::TaskAbortException => e
|
|
308
|
+
@registry.request_abort!
|
|
309
|
+
wrapper.mark_clean_failed(e)
|
|
310
|
+
@completion_queue.push({task_class: task_class, wrapper: wrapper, error: e, clean: true})
|
|
311
|
+
rescue => e
|
|
312
|
+
wrapper.mark_clean_failed(e)
|
|
313
|
+
@completion_queue.push({task_class: task_class, wrapper: wrapper, error: e, clean: true})
|
|
314
|
+
end
|
|
315
|
+
end
|
|
316
|
+
|
|
317
|
+
##
|
|
318
|
+
# Runs the main event loop that processes clean completion events until all tasks have been cleaned.
|
|
319
|
+
# Continuously pops events from the internal completion queue and delegates them to the clean completion handler,
|
|
320
|
+
# stopping early if an abort is requested and no clean tasks are running.
|
|
321
|
+
# @param [Class] root_task_class - The root task class that defines the overall clean lifecycle.
|
|
322
|
+
def run_clean_main_loop(root_task_class)
|
|
323
|
+
# Find all tasks in the dependency graph
|
|
324
|
+
# Continue until all tasks have been cleaned
|
|
325
|
+
until all_tasks_cleaned?
|
|
326
|
+
break if @registry.abort_requested? && !@scheduler.running_clean_tasks?
|
|
327
|
+
|
|
328
|
+
event = @completion_queue.pop
|
|
329
|
+
handle_clean_completion(event)
|
|
330
|
+
end
|
|
331
|
+
end
|
|
332
|
+
|
|
333
|
+
##
|
|
334
|
+
# Processes a clean completion event and advances the cleaning workflow.
|
|
335
|
+
# Marks the completed task in the scheduler and enqueues any tasks that become ready to clean.
|
|
336
|
+
# @param [Hash] event - A completion event hash containing the `:task_class` key for the task that finished cleaning.
|
|
337
|
+
def handle_clean_completion(event)
|
|
338
|
+
task_class = event[:task_class]
|
|
230
339
|
|
|
231
|
-
|
|
340
|
+
debug_log("Clean completed: #{task_class}")
|
|
341
|
+
|
|
342
|
+
@scheduler.mark_clean_completed(task_class)
|
|
343
|
+
|
|
344
|
+
# Enqueue newly ready clean tasks
|
|
345
|
+
enqueue_ready_clean_tasks
|
|
346
|
+
end
|
|
347
|
+
|
|
348
|
+
##
|
|
349
|
+
# Determines whether all tasks have finished their clean phase.
|
|
350
|
+
# @return [Boolean] `true` if there are no ready-to-clean tasks and no running clean tasks, `false` otherwise.
|
|
351
|
+
def all_tasks_cleaned?
|
|
352
|
+
@scheduler.next_ready_clean_tasks.empty? && !@scheduler.running_clean_tasks?
|
|
353
|
+
end
|
|
354
|
+
|
|
355
|
+
# Notify observers about the root task
|
|
356
|
+
# @param root_task_class [Class] The root task class
|
|
357
|
+
# @return [void]
|
|
358
|
+
def setup_progress_display(root_task_class)
|
|
359
|
+
@execution_context.notify_set_root_task(root_task_class)
|
|
360
|
+
end
|
|
361
|
+
|
|
362
|
+
# Set up output capture if progress display is active and not already set up
|
|
363
|
+
# @return [Boolean] true if this executor set up the capture
|
|
364
|
+
def setup_output_capture_if_needed
|
|
365
|
+
return false unless Taski.progress_display
|
|
366
|
+
return false if @execution_context.output_capture_active?
|
|
367
|
+
|
|
368
|
+
@execution_context.setup_output_capture($stdout)
|
|
369
|
+
true
|
|
370
|
+
end
|
|
371
|
+
|
|
372
|
+
# Tear down output capture and restore original $stdout
|
|
373
|
+
# @return [void]
|
|
374
|
+
def teardown_output_capture
|
|
375
|
+
@execution_context.teardown_output_capture
|
|
376
|
+
end
|
|
377
|
+
|
|
378
|
+
def start_progress_display
|
|
379
|
+
@execution_context.notify_start
|
|
232
380
|
end
|
|
233
381
|
|
|
234
382
|
def stop_progress_display
|
|
383
|
+
@execution_context.notify_stop
|
|
384
|
+
end
|
|
385
|
+
|
|
386
|
+
# Execute a block with task-local context set up.
|
|
387
|
+
# Sets ExecutionContext.current, Taski.current_registry, and output capture.
|
|
388
|
+
# Cleans up all context in ensure block.
|
|
389
|
+
#
|
|
390
|
+
# @param task_class [Class] The task class being executed
|
|
391
|
+
# @yield The block to execute with context set up
|
|
392
|
+
def with_task_context(task_class)
|
|
393
|
+
output_capture = @execution_context.output_capture
|
|
394
|
+
output_capture&.start_capture(task_class)
|
|
395
|
+
|
|
396
|
+
ExecutionContext.current = @execution_context
|
|
397
|
+
Taski.set_current_registry(@registry)
|
|
398
|
+
|
|
399
|
+
yield
|
|
400
|
+
ensure
|
|
401
|
+
output_capture&.stop_capture
|
|
402
|
+
ExecutionContext.current = nil
|
|
403
|
+
Taski.clear_current_registry
|
|
404
|
+
end
|
|
405
|
+
|
|
406
|
+
# Execute a block with progress display and output capture lifecycle.
|
|
407
|
+
# Sets up progress display, output capture, starts display, then yields.
|
|
408
|
+
# Ensures proper cleanup even on interrupt.
|
|
409
|
+
#
|
|
410
|
+
# @param root_task_class [Class] The root task class
|
|
411
|
+
# @yield The block to execute
|
|
412
|
+
def with_display_lifecycle(root_task_class)
|
|
413
|
+
setup_progress_display(root_task_class)
|
|
414
|
+
should_teardown_capture = setup_output_capture_if_needed
|
|
415
|
+
start_progress_display
|
|
416
|
+
|
|
417
|
+
yield
|
|
418
|
+
ensure
|
|
419
|
+
stop_progress_display
|
|
420
|
+
@saved_output_capture = @execution_context.output_capture
|
|
421
|
+
teardown_output_capture if should_teardown_capture
|
|
422
|
+
end
|
|
423
|
+
|
|
424
|
+
def create_default_execution_context
|
|
425
|
+
context = ExecutionContext.new
|
|
235
426
|
progress = Taski.progress_display
|
|
236
|
-
|
|
427
|
+
context.add_observer(progress) if progress
|
|
428
|
+
|
|
429
|
+
# Set execution trigger to break circular dependency with TaskWrapper
|
|
430
|
+
context.execution_trigger = ->(task_class, registry) do
|
|
431
|
+
Executor.execute(task_class, registry: registry, execution_context: context)
|
|
432
|
+
end
|
|
237
433
|
|
|
238
|
-
|
|
434
|
+
context
|
|
239
435
|
end
|
|
240
436
|
|
|
241
437
|
def debug_log(message)
|
|
242
438
|
return unless ENV["TASKI_DEBUG"]
|
|
243
439
|
puts "[Executor] #{message}"
|
|
244
440
|
end
|
|
441
|
+
|
|
442
|
+
# Raise error(s) if any tasks failed during execution
|
|
443
|
+
# TaskAbortException: raised directly (abort takes priority)
|
|
444
|
+
# All other errors: raises AggregateError containing all failures
|
|
445
|
+
def raise_if_any_failures
|
|
446
|
+
raise_if_any_failures_from(
|
|
447
|
+
@registry.failed_wrappers,
|
|
448
|
+
error_accessor: ->(w) { w.error }
|
|
449
|
+
)
|
|
450
|
+
end
|
|
451
|
+
|
|
452
|
+
# Raise error(s) if any tasks failed during clean execution
|
|
453
|
+
# TaskAbortException: raised directly (abort takes priority)
|
|
454
|
+
# All other errors: raises AggregateError containing all failures
|
|
455
|
+
def raise_if_any_clean_failures
|
|
456
|
+
raise_if_any_failures_from(
|
|
457
|
+
@registry.failed_clean_wrappers,
|
|
458
|
+
error_accessor: ->(w) { w.clean_error }
|
|
459
|
+
)
|
|
460
|
+
end
|
|
461
|
+
|
|
462
|
+
# Generic method to raise errors from failed wrappers
|
|
463
|
+
# @param failed_wrappers [Array<TaskWrapper>] Failed wrappers
|
|
464
|
+
# @param error_accessor [Proc] Lambda to extract error from wrapper
|
|
465
|
+
def raise_if_any_failures_from(failed_wrappers, error_accessor:)
|
|
466
|
+
return if failed_wrappers.empty?
|
|
467
|
+
|
|
468
|
+
# TaskAbortException takes priority - raise the first one directly
|
|
469
|
+
abort_wrapper = failed_wrappers.find { |w| error_accessor.call(w).is_a?(TaskAbortException) }
|
|
470
|
+
raise error_accessor.call(abort_wrapper) if abort_wrapper
|
|
471
|
+
|
|
472
|
+
# Flatten nested AggregateErrors and deduplicate by original error object_id
|
|
473
|
+
failures = flatten_failures_from(failed_wrappers, error_accessor: error_accessor)
|
|
474
|
+
unique_failures = failures.uniq { |f| error_identity(f.error) }
|
|
475
|
+
|
|
476
|
+
raise AggregateError.new(unique_failures)
|
|
477
|
+
end
|
|
478
|
+
|
|
479
|
+
# Flatten AggregateErrors into individual TaskFailure objects
|
|
480
|
+
# Wraps original errors with task-specific Error class for rescue matching
|
|
481
|
+
# @param failed_wrappers [Array<TaskWrapper>] Failed wrappers
|
|
482
|
+
# @param error_accessor [Proc] Lambda to extract error from wrapper
|
|
483
|
+
def flatten_failures_from(failed_wrappers, error_accessor:)
|
|
484
|
+
output_capture = @saved_output_capture
|
|
485
|
+
|
|
486
|
+
failed_wrappers.flat_map do |wrapper|
|
|
487
|
+
error = error_accessor.call(wrapper)
|
|
488
|
+
case error
|
|
489
|
+
when AggregateError
|
|
490
|
+
error.errors
|
|
491
|
+
else
|
|
492
|
+
wrapped_error = wrap_with_task_error(wrapper.task.class, error)
|
|
493
|
+
output_lines = output_capture&.recent_lines_for(wrapper.task.class) || []
|
|
494
|
+
[TaskFailure.new(task_class: wrapper.task.class, error: wrapped_error, output_lines: output_lines)]
|
|
495
|
+
end
|
|
496
|
+
end
|
|
497
|
+
end
|
|
498
|
+
|
|
499
|
+
# Wraps an error with the task-specific Error class
|
|
500
|
+
# @param task_class [Class] The task class
|
|
501
|
+
# @param error [Exception] The original error
|
|
502
|
+
# @return [TaskError] The wrapped error
|
|
503
|
+
def wrap_with_task_error(task_class, error)
|
|
504
|
+
# Don't double-wrap if already a TaskError
|
|
505
|
+
return error if error.is_a?(TaskError)
|
|
506
|
+
|
|
507
|
+
error_class = task_class.const_get(:Error)
|
|
508
|
+
error_class.new(error, task_class: task_class)
|
|
509
|
+
end
|
|
510
|
+
|
|
511
|
+
# Returns a unique identifier for an error, used for deduplication
|
|
512
|
+
# For TaskError, uses the wrapped cause's object_id
|
|
513
|
+
def error_identity(error)
|
|
514
|
+
error.is_a?(TaskError) ? error.cause&.object_id || error.object_id : error.object_id
|
|
515
|
+
end
|
|
245
516
|
end
|
|
246
517
|
end
|
|
247
518
|
end
|