taski 0.8.3 → 0.9.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +4 -4
- data/CHANGELOG.md +39 -0
- data/README.md +65 -50
- data/docs/GUIDE.md +41 -56
- data/examples/README.md +10 -29
- data/examples/clean_demo.rb +25 -65
- data/examples/large_tree_demo.rb +356 -0
- data/examples/message_demo.rb +0 -1
- data/examples/progress_demo.rb +13 -24
- data/examples/reexecution_demo.rb +8 -44
- data/lib/taski/execution/execution_facade.rb +150 -0
- data/lib/taski/execution/executor.rb +156 -357
- data/lib/taski/execution/registry.rb +15 -19
- data/lib/taski/execution/scheduler.rb +161 -140
- data/lib/taski/execution/task_observer.rb +41 -0
- data/lib/taski/execution/task_output_router.rb +41 -58
- data/lib/taski/execution/task_wrapper.rb +123 -219
- data/lib/taski/execution/worker_pool.rb +238 -64
- data/lib/taski/logging.rb +105 -0
- data/lib/taski/progress/layout/base.rb +600 -0
- data/lib/taski/progress/layout/filters.rb +126 -0
- data/lib/taski/progress/layout/log.rb +27 -0
- data/lib/taski/progress/layout/simple.rb +166 -0
- data/lib/taski/progress/layout/tags.rb +76 -0
- data/lib/taski/progress/layout/theme_drop.rb +84 -0
- data/lib/taski/progress/layout/tree.rb +300 -0
- data/lib/taski/progress/theme/base.rb +224 -0
- data/lib/taski/progress/theme/compact.rb +58 -0
- data/lib/taski/progress/theme/default.rb +25 -0
- data/lib/taski/progress/theme/detail.rb +48 -0
- data/lib/taski/progress/theme/plain.rb +40 -0
- data/lib/taski/static_analysis/analyzer.rb +5 -17
- data/lib/taski/static_analysis/dependency_graph.rb +19 -1
- data/lib/taski/static_analysis/visitor.rb +1 -39
- data/lib/taski/task.rb +44 -58
- data/lib/taski/test_helper/errors.rb +1 -1
- data/lib/taski/test_helper.rb +21 -35
- data/lib/taski/version.rb +1 -1
- data/lib/taski.rb +60 -61
- data/sig/taski.rbs +194 -203
- metadata +31 -8
- data/examples/section_demo.rb +0 -195
- data/lib/taski/execution/base_progress_display.rb +0 -393
- data/lib/taski/execution/execution_context.rb +0 -390
- data/lib/taski/execution/plain_progress_display.rb +0 -76
- data/lib/taski/execution/simple_progress_display.rb +0 -247
- data/lib/taski/execution/tree_progress_display.rb +0 -643
- data/lib/taski/section.rb +0 -74
|
@@ -4,100 +4,274 @@ require "etc"
|
|
|
4
4
|
|
|
5
5
|
module Taski
|
|
6
6
|
module Execution
|
|
7
|
-
|
|
8
|
-
|
|
9
|
-
|
|
10
|
-
|
|
11
|
-
#
|
|
12
|
-
#
|
|
13
|
-
#
|
|
14
|
-
#
|
|
15
|
-
#
|
|
16
|
-
# == API
|
|
17
|
-
#
|
|
18
|
-
# - {#start} - Start all worker threads
|
|
19
|
-
# - {#enqueue} - Add a task to the execution queue
|
|
20
|
-
# - {#shutdown} - Gracefully shutdown all worker threads
|
|
21
|
-
# - {#execution_queue} - Access the underlying Queue (for testing)
|
|
7
|
+
def self.default_worker_count
|
|
8
|
+
Etc.nprocessors.clamp(2, 8)
|
|
9
|
+
end
|
|
10
|
+
|
|
11
|
+
# WorkerPool manages N threads, each with its own command Queue.
|
|
12
|
+
# Tasks are executed within Fibers on worker threads.
|
|
13
|
+
# When a Fiber yields [:need_dep, dep_class, method], the worker
|
|
14
|
+
# resolves the dependency via TaskWrapper#request_value:
|
|
22
15
|
#
|
|
23
|
-
#
|
|
16
|
+
# - :completed → resume Fiber immediately with the value
|
|
17
|
+
# - :wait → park the Fiber (it will be resumed later via the thread's queue)
|
|
18
|
+
# - :start → start the dependency as a nested Fiber on the same thread
|
|
24
19
|
#
|
|
25
|
-
#
|
|
26
|
-
#
|
|
27
|
-
#
|
|
20
|
+
# Worker threads process these commands:
|
|
21
|
+
# - [:execute, task_class, wrapper] → create and drive a new Fiber
|
|
22
|
+
# - [:execute_clean, task_class, wrapper] → run clean directly (no Fiber)
|
|
23
|
+
# - [:resume, fiber, value] → resume a parked Fiber with a value
|
|
24
|
+
# - [:resume_error, fiber, error] → resume a parked Fiber with an error
|
|
25
|
+
# - :shutdown → exit the worker loop
|
|
28
26
|
class WorkerPool
|
|
29
|
-
attr_reader :
|
|
27
|
+
attr_reader :worker_count
|
|
30
28
|
|
|
31
|
-
|
|
32
|
-
# @param worker_count [Integer, nil] Number of worker threads (defaults to CPU count)
|
|
33
|
-
# @param on_execute [Proc] Callback to execute a task, receives (task_class, wrapper)
|
|
34
|
-
def initialize(registry:, worker_count: nil, &on_execute)
|
|
35
|
-
@worker_count = worker_count || default_worker_count
|
|
29
|
+
def initialize(registry:, execution_facade:, completion_queue:, worker_count: nil)
|
|
36
30
|
@registry = registry
|
|
37
|
-
@
|
|
38
|
-
@
|
|
39
|
-
@
|
|
31
|
+
@execution_facade = execution_facade
|
|
32
|
+
@worker_count = worker_count || Execution.default_worker_count
|
|
33
|
+
@completion_queue = completion_queue
|
|
34
|
+
@threads = []
|
|
35
|
+
@thread_queues = []
|
|
36
|
+
@next_thread_index = 0
|
|
37
|
+
@fiber_contexts_mutex = Mutex.new
|
|
38
|
+
@fiber_contexts = {}
|
|
39
|
+
@task_start_times_mutex = Mutex.new
|
|
40
|
+
@task_start_times = {}
|
|
40
41
|
end
|
|
41
42
|
|
|
42
|
-
# Start all worker threads.
|
|
43
43
|
def start
|
|
44
44
|
@worker_count.times do
|
|
45
|
-
|
|
46
|
-
@
|
|
47
|
-
|
|
45
|
+
queue = Queue.new
|
|
46
|
+
@thread_queues << queue
|
|
47
|
+
thread = Thread.new(queue) { |q| worker_loop(q) }
|
|
48
|
+
@threads << thread
|
|
49
|
+
@registry.register_thread(thread)
|
|
48
50
|
end
|
|
49
51
|
end
|
|
50
52
|
|
|
51
|
-
#
|
|
52
|
-
#
|
|
53
|
-
# @param task_class [Class] The task class to execute
|
|
54
|
-
# @param wrapper [TaskWrapper] The task wrapper
|
|
53
|
+
# Round-robins across worker threads.
|
|
55
54
|
def enqueue(task_class, wrapper)
|
|
56
|
-
@
|
|
57
|
-
|
|
55
|
+
queue = @thread_queues[@next_thread_index % @worker_count]
|
|
56
|
+
@next_thread_index += 1
|
|
57
|
+
queue.push([:execute, task_class, wrapper])
|
|
58
|
+
Taski::Logging.debug(Taski::Logging::Events::WORKER_POOL_ENQUEUED, task: task_class.name, thread_index: (@next_thread_index - 1) % @worker_count)
|
|
58
59
|
end
|
|
59
60
|
|
|
60
|
-
#
|
|
61
|
-
def
|
|
62
|
-
|
|
63
|
-
@
|
|
61
|
+
# Clean tasks run directly without Fiber wrapping.
|
|
62
|
+
def enqueue_clean(task_class, wrapper)
|
|
63
|
+
queue = @thread_queues[@next_thread_index % @worker_count]
|
|
64
|
+
@next_thread_index += 1
|
|
65
|
+
queue.push([:execute_clean, task_class, wrapper])
|
|
64
66
|
end
|
|
65
67
|
|
|
66
|
-
|
|
67
|
-
|
|
68
|
-
@
|
|
68
|
+
def shutdown
|
|
69
|
+
@thread_queues.each { |q| q.push(:shutdown) }
|
|
70
|
+
@threads.each(&:join)
|
|
69
71
|
end
|
|
70
72
|
|
|
71
73
|
private
|
|
72
74
|
|
|
73
|
-
def
|
|
74
|
-
|
|
75
|
+
def worker_loop(queue)
|
|
76
|
+
loop do
|
|
77
|
+
cmd = queue.pop
|
|
78
|
+
break if cmd == :shutdown
|
|
79
|
+
|
|
80
|
+
case cmd[0]
|
|
81
|
+
when :execute
|
|
82
|
+
_, task_class, wrapper = cmd
|
|
83
|
+
drive_fiber(task_class, wrapper, queue)
|
|
84
|
+
when :resume
|
|
85
|
+
_, fiber, value = cmd
|
|
86
|
+
resume_fiber(fiber, value, queue)
|
|
87
|
+
when :resume_error
|
|
88
|
+
_, fiber, error = cmd
|
|
89
|
+
resume_fiber_with_error(fiber, error, queue)
|
|
90
|
+
when :execute_clean
|
|
91
|
+
_, task_class, wrapper = cmd
|
|
92
|
+
execute_clean_task(task_class, wrapper)
|
|
93
|
+
end
|
|
94
|
+
end
|
|
75
95
|
end
|
|
76
96
|
|
|
77
|
-
|
|
78
|
-
|
|
79
|
-
|
|
80
|
-
|
|
97
|
+
# Drive a new Fiber for a task. The caller MUST have already called
|
|
98
|
+
# wrapper.mark_running before enqueueing — drive_fiber never calls it.
|
|
99
|
+
def drive_fiber(task_class, wrapper, queue)
|
|
100
|
+
return if @registry.abort_requested?
|
|
81
101
|
|
|
82
|
-
|
|
83
|
-
|
|
102
|
+
fiber = Fiber.new do
|
|
103
|
+
setup_run_thread_locals
|
|
104
|
+
wrapper.task.run
|
|
105
|
+
end
|
|
84
106
|
|
|
85
|
-
|
|
107
|
+
now = Time.now
|
|
108
|
+
@task_start_times_mutex.synchronize { @task_start_times[task_class] = now }
|
|
109
|
+
Taski::Logging.info(Taski::Logging::Events::TASK_STARTED, task: task_class.name)
|
|
110
|
+
@execution_facade.notify_task_updated(task_class, previous_state: :pending, current_state: :running, phase: :run, timestamp: now)
|
|
86
111
|
|
|
87
|
-
|
|
88
|
-
|
|
89
|
-
|
|
90
|
-
|
|
91
|
-
|
|
92
|
-
|
|
93
|
-
|
|
112
|
+
start_output_capture(task_class)
|
|
113
|
+
drive_fiber_loop(fiber, task_class, wrapper, queue)
|
|
114
|
+
end
|
|
115
|
+
|
|
116
|
+
# Drive a Fiber forward by resuming it with resume_value.
|
|
117
|
+
# fiber.resume is called INSIDE this method so that exceptions
|
|
118
|
+
# are caught by the rescue and routed to fail_task.
|
|
119
|
+
def drive_fiber_loop(fiber, task_class, wrapper, queue, resume_value = nil)
|
|
120
|
+
result = fiber.resume(resume_value)
|
|
121
|
+
|
|
122
|
+
while fiber.alive?
|
|
123
|
+
if result.is_a?(Array) && result[0] == :need_dep
|
|
124
|
+
_, dep_class, method = result
|
|
125
|
+
handle_dependency(dep_class, method, fiber, task_class, wrapper, queue)
|
|
126
|
+
return # Fiber is either continuing or parked
|
|
127
|
+
else
|
|
128
|
+
break
|
|
94
129
|
end
|
|
95
130
|
end
|
|
131
|
+
|
|
132
|
+
complete_task(task_class, wrapper, result)
|
|
133
|
+
rescue => e
|
|
134
|
+
fail_task(task_class, wrapper, e)
|
|
135
|
+
end
|
|
136
|
+
|
|
137
|
+
def handle_dependency(dep_class, method, fiber, task_class, wrapper, queue)
|
|
138
|
+
dep_wrapper = @registry.create_wrapper(dep_class, execution_facade: @execution_facade)
|
|
139
|
+
status = dep_wrapper.request_value(method, queue, fiber)
|
|
140
|
+
|
|
141
|
+
case status[0]
|
|
142
|
+
when :completed
|
|
143
|
+
drive_fiber_loop(fiber, task_class, wrapper, queue, status[1])
|
|
144
|
+
when :failed
|
|
145
|
+
drive_fiber_loop(fiber, task_class, wrapper, queue, [:_taski_error, status[1]])
|
|
146
|
+
when :wait
|
|
147
|
+
store_fiber_context(fiber, task_class, wrapper)
|
|
148
|
+
when :start
|
|
149
|
+
store_fiber_context(fiber, task_class, wrapper)
|
|
150
|
+
start_dependency(dep_class, dep_wrapper, queue)
|
|
151
|
+
end
|
|
152
|
+
end
|
|
153
|
+
|
|
154
|
+
# Resume a parked Fiber from the thread queue.
|
|
155
|
+
# Restores fiber context before resuming since teardown_thread_locals
|
|
156
|
+
# cleared thread-local state when the fiber was parked.
|
|
157
|
+
def resume_fiber(fiber, value, queue)
|
|
158
|
+
context = get_fiber_context(fiber)
|
|
159
|
+
return unless context
|
|
160
|
+
|
|
161
|
+
task_class, wrapper = context
|
|
162
|
+
setup_run_thread_locals
|
|
163
|
+
start_output_capture(task_class)
|
|
164
|
+
drive_fiber_loop(fiber, task_class, wrapper, queue, value)
|
|
165
|
+
end
|
|
166
|
+
|
|
167
|
+
def resume_fiber_with_error(fiber, error, queue)
|
|
168
|
+
context = get_fiber_context(fiber)
|
|
169
|
+
return unless context
|
|
170
|
+
|
|
171
|
+
task_class, wrapper = context
|
|
172
|
+
setup_run_thread_locals
|
|
173
|
+
start_output_capture(task_class)
|
|
174
|
+
drive_fiber_loop(fiber, task_class, wrapper, queue, [:_taski_error, error])
|
|
175
|
+
end
|
|
176
|
+
|
|
177
|
+
# Start a dependency task as a new Fiber on this thread.
|
|
178
|
+
# The wrapper is already RUNNING (set atomically by request_value).
|
|
179
|
+
def start_dependency(dep_class, dep_wrapper, queue)
|
|
180
|
+
drive_fiber(dep_class, dep_wrapper, queue)
|
|
181
|
+
end
|
|
182
|
+
|
|
183
|
+
def complete_task(task_class, wrapper, result)
|
|
184
|
+
stop_output_capture
|
|
185
|
+
duration = task_duration_ms(task_class)
|
|
186
|
+
Taski::Logging.info(Taski::Logging::Events::TASK_COMPLETED, task: task_class.name, duration_ms: duration)
|
|
187
|
+
wrapper.mark_completed(result)
|
|
188
|
+
@completion_queue.push({task_class: task_class, wrapper: wrapper})
|
|
189
|
+
teardown_thread_locals
|
|
190
|
+
end
|
|
191
|
+
|
|
192
|
+
def fail_task(task_class, wrapper, error)
|
|
193
|
+
stop_output_capture
|
|
194
|
+
@registry.request_abort! if error.is_a?(Taski::TaskAbortException)
|
|
195
|
+
duration = task_duration_ms(task_class)
|
|
196
|
+
Taski::Logging.error(Taski::Logging::Events::TASK_FAILED, task: task_class.name, duration_ms: duration)
|
|
197
|
+
wrapper.mark_failed(error)
|
|
198
|
+
@completion_queue.push({task_class: task_class, wrapper: wrapper, error: error})
|
|
199
|
+
teardown_thread_locals
|
|
200
|
+
end
|
|
201
|
+
|
|
202
|
+
# Execute a clean task directly (no Fiber needed).
|
|
203
|
+
def execute_clean_task(task_class, wrapper)
|
|
204
|
+
return if @registry.abort_requested?
|
|
205
|
+
|
|
206
|
+
setup_clean_thread_locals
|
|
207
|
+
start_output_capture(task_class)
|
|
208
|
+
clean_start = Time.now
|
|
209
|
+
@execution_facade.notify_task_updated(task_class, previous_state: :pending, current_state: :running, phase: :clean, timestamp: clean_start)
|
|
210
|
+
Taski::Logging.debug(Taski::Logging::Events::TASK_CLEAN_STARTED, task: task_class.name)
|
|
211
|
+
|
|
212
|
+
result = wrapper.task.clean
|
|
213
|
+
duration = ((Time.now - clean_start) * 1000).round(1)
|
|
214
|
+
Taski::Logging.debug(Taski::Logging::Events::TASK_CLEAN_COMPLETED, task: task_class.name, duration_ms: duration)
|
|
215
|
+
wrapper.mark_clean_completed(result)
|
|
216
|
+
@completion_queue.push({task_class: task_class, wrapper: wrapper, clean: true})
|
|
217
|
+
rescue => e
|
|
218
|
+
@registry.request_abort! if e.is_a?(Taski::TaskAbortException)
|
|
219
|
+
duration = ((Time.now - clean_start) * 1000).round(1) if clean_start
|
|
220
|
+
Taski::Logging.warn(Taski::Logging::Events::TASK_CLEAN_FAILED, task: task_class.name, duration_ms: duration)
|
|
221
|
+
wrapper.mark_clean_failed(e)
|
|
222
|
+
@completion_queue.push({task_class: task_class, wrapper: wrapper, error: e, clean: true})
|
|
223
|
+
ensure
|
|
224
|
+
stop_output_capture
|
|
225
|
+
teardown_thread_locals
|
|
226
|
+
end
|
|
227
|
+
|
|
228
|
+
# Set up context for clean execution (no Fiber flag).
|
|
229
|
+
def setup_clean_thread_locals
|
|
230
|
+
Thread.current[:taski_current_phase] = :clean
|
|
231
|
+
ExecutionFacade.current = @execution_facade
|
|
232
|
+
Taski.set_current_registry(@registry)
|
|
233
|
+
end
|
|
234
|
+
|
|
235
|
+
def setup_run_thread_locals
|
|
236
|
+
Thread.current[:taski_fiber_context] = true
|
|
237
|
+
Thread.current[:taski_current_phase] = :run
|
|
238
|
+
ExecutionFacade.current = @execution_facade
|
|
239
|
+
Taski.set_current_registry(@registry)
|
|
240
|
+
end
|
|
241
|
+
|
|
242
|
+
def teardown_thread_locals
|
|
243
|
+
Thread.current[:taski_fiber_context] = nil
|
|
244
|
+
Thread.current[:taski_current_phase] = nil
|
|
245
|
+
ExecutionFacade.current = nil
|
|
246
|
+
Taski.clear_current_registry
|
|
247
|
+
end
|
|
248
|
+
|
|
249
|
+
def task_duration_ms(task_class)
|
|
250
|
+
start = @task_start_times_mutex.synchronize { @task_start_times.delete(task_class) }
|
|
251
|
+
return nil unless start
|
|
252
|
+
((Time.now - start) * 1000).round(1)
|
|
253
|
+
end
|
|
254
|
+
|
|
255
|
+
def start_output_capture(task_class)
|
|
256
|
+
output_capture = @execution_facade.output_capture
|
|
257
|
+
output_capture&.start_capture(task_class)
|
|
258
|
+
end
|
|
259
|
+
|
|
260
|
+
def stop_output_capture
|
|
261
|
+
output_capture = @execution_facade.output_capture
|
|
262
|
+
output_capture&.stop_capture
|
|
263
|
+
end
|
|
264
|
+
|
|
265
|
+
def store_fiber_context(fiber, task_class, wrapper)
|
|
266
|
+
@fiber_contexts_mutex.synchronize do
|
|
267
|
+
@fiber_contexts[fiber.object_id] = [task_class, wrapper]
|
|
268
|
+
end
|
|
96
269
|
end
|
|
97
270
|
|
|
98
|
-
def
|
|
99
|
-
|
|
100
|
-
|
|
271
|
+
def get_fiber_context(fiber)
|
|
272
|
+
@fiber_contexts_mutex.synchronize do
|
|
273
|
+
@fiber_contexts.delete(fiber.object_id)
|
|
274
|
+
end
|
|
101
275
|
end
|
|
102
276
|
end
|
|
103
277
|
end
|
|
@@ -0,0 +1,105 @@
|
|
|
1
|
+
# frozen_string_literal: true
|
|
2
|
+
|
|
3
|
+
require "json"
|
|
4
|
+
require "monitor"
|
|
5
|
+
require "time"
|
|
6
|
+
module Taski
|
|
7
|
+
# Logging module provides structured logging support for debugging and monitoring.
|
|
8
|
+
# Logging is disabled by default and has zero overhead when not configured.
|
|
9
|
+
#
|
|
10
|
+
# @example Basic setup
|
|
11
|
+
# require 'logger'
|
|
12
|
+
# Taski.logger = Logger.new($stderr, level: Logger::INFO)
|
|
13
|
+
#
|
|
14
|
+
# @example JSON output for monitoring systems
|
|
15
|
+
# Taski.logger = Logger.new('/var/log/taski.log')
|
|
16
|
+
module Logging
|
|
17
|
+
# Event type constants
|
|
18
|
+
module Events
|
|
19
|
+
# Execution lifecycle
|
|
20
|
+
EXECUTION_STARTED = "execution.started"
|
|
21
|
+
EXECUTION_COMPLETED = "execution.completed"
|
|
22
|
+
|
|
23
|
+
# Task lifecycle
|
|
24
|
+
TASK_STARTED = "task.started"
|
|
25
|
+
TASK_COMPLETED = "task.completed"
|
|
26
|
+
TASK_FAILED = "task.failed"
|
|
27
|
+
TASK_SKIPPED = "task.skipped"
|
|
28
|
+
TASK_ERROR_DETAIL = "task.error_detail"
|
|
29
|
+
TASK_OUTPUT = "task.output"
|
|
30
|
+
|
|
31
|
+
# Clean lifecycle
|
|
32
|
+
TASK_CLEAN_STARTED = "task.clean_started"
|
|
33
|
+
TASK_CLEAN_COMPLETED = "task.clean_completed"
|
|
34
|
+
TASK_CLEAN_FAILED = "task.clean_failed"
|
|
35
|
+
|
|
36
|
+
# Dependency resolution
|
|
37
|
+
DEPENDENCY_RESOLVED = "dependency.resolved"
|
|
38
|
+
|
|
39
|
+
# Internal components (debug-level)
|
|
40
|
+
WORKER_POOL_ENQUEUED = "worker_pool.enqueued"
|
|
41
|
+
EXECUTOR_TASK_COMPLETED = "executor.task_completed"
|
|
42
|
+
EXECUTOR_CLEAN_COMPLETED = "executor.clean_completed"
|
|
43
|
+
OUTPUT_ROUTER_START_CAPTURE = "output_router.start_capture"
|
|
44
|
+
OUTPUT_ROUTER_STOP_CAPTURE = "output_router.stop_capture"
|
|
45
|
+
OUTPUT_ROUTER_STOP_CAPTURE_UNREGISTERED = "output_router.stop_capture_unregistered"
|
|
46
|
+
OUTPUT_ROUTER_DRAIN_PIPE = "output_router.drain_pipe"
|
|
47
|
+
OUTPUT_ROUTER_STORE_LINES = "output_router.store_lines"
|
|
48
|
+
OBSERVER_ERROR = "observer.error"
|
|
49
|
+
end
|
|
50
|
+
|
|
51
|
+
# Log severity levels matching Ruby Logger
|
|
52
|
+
module Levels
|
|
53
|
+
DEBUG = 0
|
|
54
|
+
INFO = 1
|
|
55
|
+
WARN = 2
|
|
56
|
+
ERROR = 3
|
|
57
|
+
end
|
|
58
|
+
|
|
59
|
+
class << self
|
|
60
|
+
# Log a structured event. No-op if logger is nil.
|
|
61
|
+
#
|
|
62
|
+
# @param level [Integer] Log level (DEBUG, INFO, WARN, ERROR)
|
|
63
|
+
# @param event [String] Event type constant
|
|
64
|
+
# @param task [String, nil] Task class name
|
|
65
|
+
# @param data [Hash] Additional event data
|
|
66
|
+
def log(level, event, task: nil, **data)
|
|
67
|
+
logger = Taski.logger
|
|
68
|
+
return unless logger
|
|
69
|
+
|
|
70
|
+
entry = build_entry(event, task, data)
|
|
71
|
+
message = entry.to_json
|
|
72
|
+
|
|
73
|
+
case level
|
|
74
|
+
when Levels::DEBUG
|
|
75
|
+
logger.debug(message)
|
|
76
|
+
when Levels::INFO
|
|
77
|
+
logger.info(message)
|
|
78
|
+
when Levels::WARN
|
|
79
|
+
logger.warn(message)
|
|
80
|
+
when Levels::ERROR
|
|
81
|
+
logger.error(message)
|
|
82
|
+
end
|
|
83
|
+
end
|
|
84
|
+
|
|
85
|
+
# Convenience methods for each log level
|
|
86
|
+
def debug(event, **kwargs) = log(Levels::DEBUG, event, **kwargs)
|
|
87
|
+
def info(event, **kwargs) = log(Levels::INFO, event, **kwargs)
|
|
88
|
+
def warn(event, **kwargs) = log(Levels::WARN, event, **kwargs)
|
|
89
|
+
def error(event, **kwargs) = log(Levels::ERROR, event, **kwargs)
|
|
90
|
+
|
|
91
|
+
private
|
|
92
|
+
|
|
93
|
+
def build_entry(event, task, data)
|
|
94
|
+
entry = {
|
|
95
|
+
timestamp: Time.now.utc.iso8601(3),
|
|
96
|
+
event: event,
|
|
97
|
+
thread_id: Thread.current.object_id
|
|
98
|
+
}
|
|
99
|
+
entry[:task] = task if task
|
|
100
|
+
entry[:data] = data unless data.empty?
|
|
101
|
+
entry
|
|
102
|
+
end
|
|
103
|
+
end
|
|
104
|
+
end
|
|
105
|
+
end
|