ocak 0.1.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (45) hide show
  1. checksums.yaml +7 -0
  2. data/LICENSE.txt +21 -0
  3. data/README.md +268 -0
  4. data/bin/ocak +7 -0
  5. data/lib/ocak/agent_generator.rb +171 -0
  6. data/lib/ocak/claude_runner.rb +169 -0
  7. data/lib/ocak/cli.rb +28 -0
  8. data/lib/ocak/commands/audit.rb +25 -0
  9. data/lib/ocak/commands/clean.rb +30 -0
  10. data/lib/ocak/commands/debt.rb +21 -0
  11. data/lib/ocak/commands/design.rb +34 -0
  12. data/lib/ocak/commands/init.rb +212 -0
  13. data/lib/ocak/commands/resume.rb +128 -0
  14. data/lib/ocak/commands/run.rb +60 -0
  15. data/lib/ocak/commands/status.rb +102 -0
  16. data/lib/ocak/config.rb +109 -0
  17. data/lib/ocak/issue_fetcher.rb +137 -0
  18. data/lib/ocak/logger.rb +192 -0
  19. data/lib/ocak/merge_manager.rb +158 -0
  20. data/lib/ocak/pipeline_runner.rb +389 -0
  21. data/lib/ocak/pipeline_state.rb +51 -0
  22. data/lib/ocak/planner.rb +68 -0
  23. data/lib/ocak/process_runner.rb +82 -0
  24. data/lib/ocak/stack_detector.rb +333 -0
  25. data/lib/ocak/stream_parser.rb +189 -0
  26. data/lib/ocak/templates/agents/auditor.md.erb +87 -0
  27. data/lib/ocak/templates/agents/documenter.md.erb +67 -0
  28. data/lib/ocak/templates/agents/implementer.md.erb +154 -0
  29. data/lib/ocak/templates/agents/merger.md.erb +97 -0
  30. data/lib/ocak/templates/agents/pipeline.md.erb +126 -0
  31. data/lib/ocak/templates/agents/planner.md.erb +86 -0
  32. data/lib/ocak/templates/agents/reviewer.md.erb +98 -0
  33. data/lib/ocak/templates/agents/security_reviewer.md.erb +112 -0
  34. data/lib/ocak/templates/gitignore_additions.txt +10 -0
  35. data/lib/ocak/templates/hooks/post_edit_lint.sh.erb +57 -0
  36. data/lib/ocak/templates/hooks/task_completed_test.sh.erb +34 -0
  37. data/lib/ocak/templates/ocak.yml.erb +99 -0
  38. data/lib/ocak/templates/skills/audit/SKILL.md.erb +132 -0
  39. data/lib/ocak/templates/skills/debt/SKILL.md.erb +128 -0
  40. data/lib/ocak/templates/skills/design/SKILL.md.erb +131 -0
  41. data/lib/ocak/templates/skills/scan_file/SKILL.md.erb +113 -0
  42. data/lib/ocak/verification.rb +83 -0
  43. data/lib/ocak/worktree_manager.rb +92 -0
  44. data/lib/ocak.rb +13 -0
  45. metadata +115 -0
@@ -0,0 +1,389 @@
1
+ # frozen_string_literal: true
2
+
3
+ require 'json'
4
+ require 'fileutils'
5
+ require 'shellwords'
6
+ require_relative 'pipeline_state'
7
+ require_relative 'verification'
8
+ require_relative 'planner'
9
+
10
+ module Ocak
11
+ class PipelineRunner
12
+ include Verification
13
+ include Planner
14
+
15
+ StepContext = Struct.new(:issue_number, :idx, :role, :result, :state, :logger, :chdir)
16
+
17
+ def initialize(config:, options: {})
18
+ @config = config
19
+ @options = options
20
+ @watch_formatter = options[:watch] ? WatchFormatter.new : nil
21
+ @shutting_down = false
22
+ @active_issues = []
23
+ @active_mutex = Mutex.new
24
+ end
25
+
26
+ def run
27
+ if @options[:single]
28
+ run_single(@options[:single])
29
+ else
30
+ run_loop
31
+ end
32
+ end
33
+
34
+ def shutdown!
35
+ @shutting_down = true
36
+ logger = build_logger
37
+ logger.info('Graceful shutdown initiated...')
38
+
39
+ # Transition any in-progress issues back to ready
40
+ issues = IssueFetcher.new(config: @config, logger: logger)
41
+ @active_mutex.synchronize do
42
+ @active_issues.each do |issue_number|
43
+ logger.info("Returning issue ##{issue_number} to ready queue")
44
+ issues.transition(issue_number, from: @config.label_in_progress, to: @config.label_ready)
45
+ rescue StandardError => e
46
+ logger.warn("Failed to reset issue ##{issue_number}: #{e.message}")
47
+ end
48
+ end
49
+ end
50
+
51
+ private
52
+
53
+ # --- Single Issue Mode ---
54
+
55
+ def run_single(issue_number)
56
+ logger = build_logger(issue_number: issue_number)
57
+ claude = build_claude(logger)
58
+ issues = IssueFetcher.new(config: @config)
59
+
60
+ logger.info("Running single issue mode for ##{issue_number}")
61
+
62
+ if @options[:dry_run]
63
+ logger.info("[DRY RUN] Would run pipeline for issue ##{issue_number}")
64
+ return
65
+ end
66
+
67
+ issues.transition(issue_number, from: @config.label_ready, to: @config.label_in_progress)
68
+
69
+ result = run_pipeline(issue_number, logger: logger, claude: claude)
70
+
71
+ if result[:success]
72
+ claude.run_agent('merger', "Create a PR, merge it, and close issue ##{issue_number}",
73
+ chdir: @config.project_dir)
74
+ issues.transition(issue_number, from: @config.label_in_progress, to: @config.label_completed)
75
+ logger.info("Issue ##{issue_number} completed successfully")
76
+ else
77
+ issues.transition(issue_number, from: @config.label_in_progress, to: @config.label_failed)
78
+ issues.comment(issue_number,
79
+ "Pipeline failed at phase: #{result[:phase]}\n\n```\n#{result[:output][0..1000]}\n```")
80
+ logger.error("Issue ##{issue_number} failed at phase: #{result[:phase]}")
81
+ end
82
+ end
83
+
84
+ # --- Poll Loop ---
85
+
86
+ def run_loop
87
+ logger = build_logger
88
+ issues = IssueFetcher.new(config: @config, logger: logger)
89
+
90
+ # Clean up stale worktrees from previous runs
91
+ cleanup_stale_worktrees(logger)
92
+
93
+ loop do
94
+ break if @shutting_down
95
+
96
+ logger.info("Checking for #{@config.label_ready} issues...")
97
+ ready = issues.fetch_ready
98
+
99
+ if ready.empty?
100
+ logger.info('No ready issues found')
101
+ else
102
+ logger.info("Found #{ready.size} ready issue(s): #{ready.map { |i| "##{i['number']}" }.join(', ')}")
103
+ process_issues(ready, logger: logger, issues: issues)
104
+ end
105
+
106
+ break if @options[:once]
107
+
108
+ logger.info("Sleeping #{@config.poll_interval}s...")
109
+ sleep @config.poll_interval
110
+ end
111
+ end
112
+
113
+ # --- Batch Processing ---
114
+
115
+ def process_issues(ready_issues, logger:, issues:)
116
+ if ready_issues.size > @config.max_issues_per_run
117
+ logger.warn("Capping to #{@config.max_issues_per_run} issues (found #{ready_issues.size})")
118
+ ready_issues = ready_issues.first(@config.max_issues_per_run)
119
+ end
120
+
121
+ claude = build_claude(logger)
122
+ batches = plan_batches(ready_issues, logger: logger, claude: claude)
123
+
124
+ batches.each_with_index do |batch, idx|
125
+ batch_issues = batch['issues'][0...@config.max_parallel]
126
+ logger.info("Running batch #{idx + 1}/#{batches.size} (#{batch_issues.size} issues)")
127
+
128
+ if @options[:dry_run]
129
+ batch_issues.each { |i| logger.info("[DRY RUN] Would process issue ##{i['number']}: #{i['title']}") }
130
+ next
131
+ end
132
+
133
+ run_batch(batch_issues, logger: logger, issues: issues)
134
+ end
135
+ end
136
+
137
+ def run_batch(batch_issues, logger:, issues:)
138
+ worktrees = WorktreeManager.new(config: @config)
139
+
140
+ # Process issues in parallel
141
+ threads = batch_issues.map do |issue|
142
+ Thread.new { process_one_issue(issue, worktrees: worktrees, issues: issues) }
143
+ end
144
+
145
+ results = threads.map(&:value)
146
+
147
+ # Merge successful issues sequentially
148
+ results.select { |r| r[:success] }.each do |result|
149
+ merger = MergeManager.new(
150
+ config: @config, claude: build_claude(logger), logger: logger, watch: @watch_formatter
151
+ )
152
+
153
+ if merger.merge(result[:issue_number], result[:worktree])
154
+ issues.transition(result[:issue_number], from: @config.label_in_progress, to: @config.label_completed)
155
+ logger.info("Issue ##{result[:issue_number]} merged successfully")
156
+ else
157
+ issues.transition(result[:issue_number], from: @config.label_in_progress, to: @config.label_failed)
158
+ logger.error("Issue ##{result[:issue_number]} merge failed")
159
+ end
160
+ end
161
+
162
+ # Clean up all worktrees
163
+ results.each do |result|
164
+ next unless result[:worktree]
165
+
166
+ worktrees.remove(result[:worktree])
167
+ rescue StandardError => e
168
+ logger.warn("Failed to clean worktree for ##{result[:issue_number]}: #{e.message}")
169
+ end
170
+ end
171
+
172
+ def process_one_issue(issue, worktrees:, issues:)
173
+ issue_number = issue['number']
174
+ logger = build_logger(issue_number: issue_number)
175
+ claude = build_claude(logger)
176
+ worktree = nil
177
+
178
+ @active_mutex.synchronize { @active_issues << issue_number }
179
+ issues.transition(issue_number, from: @config.label_ready, to: @config.label_in_progress)
180
+ worktree = worktrees.create(issue_number, setup_command: @config.setup_command)
181
+ logger.info("Created worktree at #{worktree.path} (branch: #{worktree.branch})")
182
+
183
+ result = run_pipeline(issue_number, logger: logger, claude: claude, chdir: worktree.path,
184
+ complexity: issue.fetch('complexity', 'full'))
185
+
186
+ build_issue_result(result, issue_number: issue_number, worktree: worktree, issues: issues)
187
+ rescue StandardError => e
188
+ logger.error("Unexpected error: #{e.message}\n#{e.backtrace.first(5).join("\n")}")
189
+ issues.transition(issue_number, from: @config.label_in_progress, to: @config.label_failed)
190
+ { issue_number: issue_number, success: false, worktree: worktree, error: e.message }
191
+ ensure
192
+ @active_mutex.synchronize { @active_issues.delete(issue_number) }
193
+ end
194
+
195
+ def build_issue_result(result, issue_number:, worktree:, issues:)
196
+ if result[:success]
197
+ { issue_number: issue_number, success: true, worktree: worktree }
198
+ else
199
+ issues.transition(issue_number, from: @config.label_in_progress, to: @config.label_failed)
200
+ issues.comment(issue_number,
201
+ "Pipeline failed at phase: #{result[:phase]}\n\n```\n#{result[:output][0..1000]}\n```")
202
+ { issue_number: issue_number, success: false, worktree: worktree }
203
+ end
204
+ end
205
+
206
+ # --- Pipeline Execution ---
207
+
208
+ def run_pipeline(issue_number, logger:, claude:, chdir: nil, skip_steps: [], complexity: 'full')
209
+ chdir ||= @config.project_dir
210
+ logger.info("=== Starting pipeline for issue ##{issue_number} (#{complexity}) ===")
211
+
212
+ state = { last_review_output: nil, had_fixes: false, completed_steps: [], total_cost: 0.0,
213
+ complexity: complexity }
214
+
215
+ failure = run_pipeline_steps(issue_number, state, logger: logger, claude: claude, chdir: chdir,
216
+ skip_steps: skip_steps)
217
+ log_cost_summary(state[:total_cost], logger)
218
+ return failure if failure
219
+
220
+ failure = run_final_verification(logger: logger, claude: claude, chdir: chdir)
221
+ return failure if failure
222
+
223
+ # Clean up state file on success
224
+ pipeline_state.delete(issue_number)
225
+
226
+ logger.info("=== Pipeline complete for issue ##{issue_number} ===")
227
+ { success: true, output: 'Pipeline completed successfully' }
228
+ end
229
+
230
+ def run_pipeline_steps(issue_number, state, logger:, claude:, chdir:, skip_steps: [])
231
+ @config.steps.each_with_index do |step, idx|
232
+ step = symbolize(step)
233
+ role = step[:role].to_s
234
+
235
+ if skip_steps.include?(idx)
236
+ logger.info("Skipping #{role} (already completed)")
237
+ next
238
+ end
239
+
240
+ next if skip_step?(step, state, logger)
241
+
242
+ result = execute_step(step, issue_number, state[:last_review_output], logger: logger, claude: claude,
243
+ chdir: chdir)
244
+ ctx = StepContext.new(issue_number, idx, role, result, state, logger, chdir)
245
+ failure = record_step_result(ctx)
246
+ return failure if failure
247
+ end
248
+ nil
249
+ end
250
+
251
+ def record_step_result(ctx)
252
+ update_pipeline_state(ctx.role, ctx.result, ctx.state)
253
+ ctx.state[:completed_steps] << ctx.idx
254
+ ctx.state[:total_cost] += ctx.result.cost_usd.to_f
255
+ save_step_progress(ctx)
256
+
257
+ check_step_failure(ctx) || check_cost_budget(ctx.state, ctx.logger)
258
+ end
259
+
260
+ def save_step_progress(ctx)
261
+ pipeline_state.save(ctx.issue_number,
262
+ completed_steps: ctx.state[:completed_steps],
263
+ worktree_path: ctx.chdir,
264
+ branch: current_branch(ctx.chdir))
265
+ end
266
+
267
+ def check_step_failure(ctx)
268
+ return nil if ctx.result.success? || !%w[implement merge].include?(ctx.role)
269
+
270
+ ctx.logger.error("#{ctx.role} failed")
271
+ { success: false, phase: ctx.role, output: ctx.result.output }
272
+ end
273
+
274
+ def check_cost_budget(state, logger)
275
+ return nil unless @config.cost_budget && state[:total_cost] > @config.cost_budget
276
+
277
+ cost = format('%.2f', state[:total_cost])
278
+ budget = format('%.2f', @config.cost_budget)
279
+ logger.error("Cost budget exceeded ($#{cost}/$#{budget})")
280
+ { success: false, phase: 'budget', output: "Cost budget exceeded: $#{cost}" }
281
+ end
282
+
283
+ def skip_step?(step, state, logger)
284
+ role = step[:role].to_s
285
+ condition = step[:condition]
286
+
287
+ if step[:complexity] == 'full' && state[:complexity] == 'simple'
288
+ logger.info("Skipping #{role} — fast-track issue")
289
+ return true
290
+ end
291
+ if condition == 'has_findings' && !state[:last_review_output]&.include?("\u{1F534}")
292
+ logger.info("Skipping #{role} — no blocking findings")
293
+ return true
294
+ end
295
+ if condition == 'had_fixes' && !state[:had_fixes]
296
+ logger.info("Skipping #{role} — no fixes were made")
297
+ return true
298
+ end
299
+ false
300
+ end
301
+
302
+ def execute_step(step, issue_number, review_output, logger:, claude:, chdir:)
303
+ agent = step[:agent].to_s
304
+ role = step[:role].to_s
305
+ logger.info("--- Phase: #{role} (#{agent}) ---")
306
+ prompt = build_step_prompt(role, issue_number, review_output)
307
+ claude.run_agent(agent.tr('_', '-'), prompt, chdir: chdir)
308
+ end
309
+
310
+ def update_pipeline_state(role, result, state)
311
+ case role
312
+ when 'review', 'verify', 'security', 'audit'
313
+ state[:last_review_output] = result.output
314
+ when 'fix'
315
+ state[:had_fixes] = true
316
+ state[:last_review_output] = nil
317
+ when 'implement'
318
+ state[:last_review_output] = nil
319
+ end
320
+ end
321
+
322
+ def run_final_verification(logger:, claude:, chdir:)
323
+ return nil unless @config.test_command || @config.lint_check_command
324
+
325
+ logger.info('--- Final verification ---')
326
+ result = run_final_checks(logger, chdir: chdir)
327
+ return nil if result[:success]
328
+
329
+ logger.warn('Final checks failed, attempting fix...')
330
+ claude.run_agent('implementer',
331
+ "Fix these test/lint failures:\n\n#{result[:output]}",
332
+ chdir: chdir)
333
+ result = run_final_checks(logger, chdir: chdir)
334
+ return nil if result[:success]
335
+
336
+ { success: false, phase: 'final-verify', output: result[:output] }
337
+ end
338
+
339
+ # --- Cost ---
340
+
341
+ def log_cost_summary(total_cost, logger)
342
+ return if total_cost.zero?
343
+
344
+ budget = @config.cost_budget
345
+ budget_str = budget ? " / $#{format('%.2f', budget)} budget" : ''
346
+ logger.info("Pipeline cost: $#{format('%.4f', total_cost)}#{budget_str}")
347
+ end
348
+
349
+ # --- Cleanup ---
350
+
351
+ def cleanup_stale_worktrees(logger)
352
+ worktrees = WorktreeManager.new(config: @config)
353
+ removed = worktrees.clean_stale
354
+ removed.each { |path| logger.info("Cleaned stale worktree: #{path}") }
355
+ rescue StandardError => e
356
+ logger.warn("Stale worktree cleanup failed: #{e.message}")
357
+ end
358
+
359
+ # --- Helpers ---
360
+
361
+ def build_logger(issue_number: nil)
362
+ PipelineLogger.new(
363
+ log_dir: File.join(@config.project_dir, @config.log_dir),
364
+ issue_number: issue_number
365
+ )
366
+ end
367
+
368
+ def build_claude(logger)
369
+ ClaudeRunner.new(config: @config, logger: logger, watch: @watch_formatter)
370
+ end
371
+
372
+ def pipeline_state
373
+ @pipeline_state ||= PipelineState.new(log_dir: File.join(@config.project_dir, @config.log_dir))
374
+ end
375
+
376
+ def current_branch(chdir)
377
+ stdout, = Open3.capture3('git', 'rev-parse', '--abbrev-ref', 'HEAD', chdir: chdir)
378
+ stdout.strip
379
+ rescue StandardError
380
+ nil
381
+ end
382
+
383
+ def symbolize(hash)
384
+ return hash unless hash.is_a?(Hash)
385
+
386
+ hash.transform_keys(&:to_sym)
387
+ end
388
+ end
389
+ end
@@ -0,0 +1,51 @@
1
+ # frozen_string_literal: true
2
+
3
+ require 'json'
4
+ require 'fileutils'
5
+
6
+ module Ocak
7
+ class PipelineState
8
+ def initialize(log_dir:)
9
+ @log_dir = log_dir
10
+ end
11
+
12
+ def save(issue_number, completed_steps:, worktree_path: nil, branch: nil)
13
+ FileUtils.mkdir_p(@log_dir)
14
+ File.write(state_path(issue_number), JSON.pretty_generate({
15
+ issue_number: issue_number,
16
+ completed_steps: completed_steps,
17
+ worktree_path: worktree_path,
18
+ branch: branch,
19
+ updated_at: Time.now.iso8601
20
+ }))
21
+ end
22
+
23
+ def load(issue_number)
24
+ path = state_path(issue_number)
25
+ return nil unless File.exist?(path)
26
+
27
+ JSON.parse(File.read(path), symbolize_names: true)
28
+ rescue JSON::ParserError
29
+ nil
30
+ end
31
+
32
+ def delete(issue_number)
33
+ path = state_path(issue_number)
34
+ FileUtils.rm_f(path)
35
+ end
36
+
37
+ def list
38
+ Dir.glob(File.join(@log_dir, 'issue-*-state.json')).filter_map do |path|
39
+ JSON.parse(File.read(path), symbolize_names: true)
40
+ rescue JSON::ParserError
41
+ nil
42
+ end
43
+ end
44
+
45
+ private
46
+
47
+ def state_path(issue_number)
48
+ File.join(@log_dir, "issue-#{issue_number}-state.json")
49
+ end
50
+ end
51
+ end
@@ -0,0 +1,68 @@
1
+ # frozen_string_literal: true
2
+
3
+ require 'json'
4
+
5
+ module Ocak
6
+ # Batch planning logic extracted from PipelineRunner.
7
+ module Planner
8
+ STEP_PROMPTS = {
9
+ 'implement' => 'Implement GitHub issue #%<issue>s',
10
+ 'review' => 'Review the changes for GitHub issue #%<issue>s. Run: git diff main',
11
+ 'verify' => 'Review the changes for GitHub issue #%<issue>s. Run: git diff main',
12
+ 'security' => 'Security review changes for GitHub issue #%<issue>s. Run: git diff main',
13
+ 'document' => 'Add documentation for changes in GitHub issue #%<issue>s',
14
+ 'audit' => 'Audit the changed files for issue #%<issue>s. Run: git diff main --name-only',
15
+ 'merge' => 'Create a PR, merge it, and close issue #%<issue>s',
16
+ 'create_pr' => 'Create a PR, merge it, and close issue #%<issue>s'
17
+ }.freeze
18
+
19
+ def build_step_prompt(role, issue_number, review_output)
20
+ if role == 'fix'
21
+ "Fix these review findings for issue ##{issue_number}:\n\n#{review_output}"
22
+ elsif STEP_PROMPTS.key?(role)
23
+ format(STEP_PROMPTS[role], issue: issue_number)
24
+ else
25
+ "Run #{role} for GitHub issue ##{issue_number}"
26
+ end
27
+ end
28
+
29
+ def plan_batches(issues, logger:, claude:)
30
+ return sequential_batches(issues) if issues.size <= 1
31
+
32
+ issue_json = JSON.generate(issues.map { |i| { number: i['number'], title: i['title'] } })
33
+ result = claude.run_agent(
34
+ 'planner',
35
+ "Analyze these issues and output parallelization batches as JSON:\n\n#{issue_json}"
36
+ )
37
+
38
+ unless result.success?
39
+ logger.warn('Planner failed, falling back to sequential')
40
+ return sequential_batches(issues)
41
+ end
42
+
43
+ parse_planner_output(result.output, issues, logger)
44
+ end
45
+
46
+ def parse_planner_output(output, issues, logger)
47
+ json_match = output.match(/\{[\s\S]*"batches"[\s\S]*\}/)
48
+ if json_match
49
+ parsed = JSON.parse(json_match[0])
50
+ parsed['batches']
51
+ else
52
+ logger.warn('Could not parse planner output, falling back to sequential')
53
+ sequential_batches(issues)
54
+ end
55
+ rescue JSON::ParserError => e
56
+ logger.warn("JSON parse error from planner: #{e.message}")
57
+ sequential_batches(issues)
58
+ end
59
+
60
+ def sequential_batches(issues)
61
+ issues.map.with_index do |i, idx|
62
+ issue = i.dup
63
+ issue['complexity'] ||= 'full'
64
+ { 'batch' => idx + 1, 'issues' => [issue] }
65
+ end
66
+ end
67
+ end
68
+ end
@@ -0,0 +1,82 @@
1
+ # frozen_string_literal: true
2
+
3
+ require 'open3'
4
+
5
+ module Ocak
6
+ # Runs a subprocess with streaming line output and timeout support.
7
+ module ProcessRunner
8
+ module_function
9
+
10
+ def run(cmd, chdir:, timeout: nil, on_line: nil)
11
+ stdout = +''
12
+ stderr = +''
13
+ line_buf = +''
14
+
15
+ Open3.popen3(*cmd, chdir: chdir) do |stdin, out, err, wait_thr|
16
+ stdin.close
17
+ ctx = {
18
+ stdout: +'', stderr: +'', line_buf: +'',
19
+ deadline: timeout ? Process.clock_gettime(Process::CLOCK_MONOTONIC) + timeout : nil,
20
+ timeout: timeout, wait_thr: wait_thr, on_line: on_line
21
+ }
22
+
23
+ stdout, stderr, line_buf = read_streams(out, err, ctx)
24
+ on_line&.call(line_buf.chomp) unless line_buf.empty?
25
+ [stdout, stderr, wait_thr.value]
26
+ end
27
+ rescue Errno::ENOENT => e
28
+ ['', e.message, ClaudeRunner::FailedStatus.instance]
29
+ end
30
+
31
+ def read_streams(out, err, ctx)
32
+ readers = [out, err]
33
+
34
+ until readers.empty?
35
+ remaining = ctx[:deadline] ? ctx[:deadline] - Process.clock_gettime(Process::CLOCK_MONOTONIC) : 5
36
+
37
+ if ctx[:deadline] && remaining <= 0
38
+ kill_process(ctx[:wait_thr].pid)
39
+ return ['', "Timed out after #{ctx[:timeout]}s", +'']
40
+ end
41
+
42
+ read_available(readers, remaining, ctx)
43
+ end
44
+
45
+ [ctx[:stdout], ctx[:stderr], ctx[:line_buf]]
46
+ end
47
+
48
+ def kill_process(pid)
49
+ Process.kill('TERM', pid)
50
+ sleep 2
51
+ Process.kill('KILL', pid)
52
+ rescue Errno::ESRCH
53
+ nil
54
+ end
55
+
56
+ def read_available(readers, remaining, ctx)
57
+ ready = IO.select(readers, nil, nil, [remaining, 1].min)
58
+ return unless ready
59
+
60
+ ready[0].each do |io|
61
+ chunk = io.read_nonblock(8192)
62
+ if io == readers[0]
63
+ ctx[:stdout] << chunk
64
+ process_lines(ctx[:line_buf], chunk, ctx[:on_line])
65
+ else
66
+ ctx[:stderr] << chunk
67
+ end
68
+ rescue EOFError
69
+ readers.delete(io)
70
+ end
71
+ end
72
+
73
+ def process_lines(line_buf, chunk, on_line)
74
+ return unless on_line
75
+
76
+ line_buf << chunk
77
+ while (idx = line_buf.index("\n"))
78
+ on_line.call(line_buf.slice!(0, idx + 1).chomp)
79
+ end
80
+ end
81
+ end
82
+ end