ares-runtime 2.0.1
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +7 -0
- data/CHANGELOG.md +10 -0
- data/LICENSE.txt +21 -0
- data/README.md +110 -0
- data/config/models.yml +25 -0
- data/config/ollama.yml +4 -0
- data/config/workspaces.yml +6 -0
- data/exe/ares +75 -0
- data/lib/ares/cli.rb +37 -0
- data/lib/ares/runtime/adapters/base_adapter.rb +68 -0
- data/lib/ares/runtime/adapters/claude_adapter.rb +35 -0
- data/lib/ares/runtime/adapters/codex_adapter.rb +35 -0
- data/lib/ares/runtime/adapters/cursor_adapter.rb +32 -0
- data/lib/ares/runtime/adapters/ollama_adapter.rb +37 -0
- data/lib/ares/runtime/config_cli.rb +18 -0
- data/lib/ares/runtime/config_manager.rb +137 -0
- data/lib/ares/runtime/context_loader.rb +45 -0
- data/lib/ares/runtime/core_subsystem.rb +36 -0
- data/lib/ares/runtime/diagnostic_parser.rb +159 -0
- data/lib/ares/runtime/doctor.rb +34 -0
- data/lib/ares/runtime/engine_chain.rb +108 -0
- data/lib/ares/runtime/git_manager.rb +26 -0
- data/lib/ares/runtime/initializer.rb +30 -0
- data/lib/ares/runtime/logs_cli.rb +35 -0
- data/lib/ares/runtime/model_selector.rb +36 -0
- data/lib/ares/runtime/ollama_client_factory.rb +43 -0
- data/lib/ares/runtime/planner/ollama_planner.rb +51 -0
- data/lib/ares/runtime/planner/tiny_task_processor.rb +129 -0
- data/lib/ares/runtime/prompt_builder.rb +52 -0
- data/lib/ares/runtime/quota_manager.rb +48 -0
- data/lib/ares/runtime/router.rb +285 -0
- data/lib/ares/runtime/task_logger.rb +37 -0
- data/lib/ares/runtime/task_manager.rb +9 -0
- data/lib/ares/runtime/terminal_runner.rb +37 -0
- data/lib/ares/runtime/tui.rb +211 -0
- data/lib/ares/runtime/version.rb +7 -0
- data/lib/ares/runtime.rb +5 -0
- data/lib/ares_runtime.rb +63 -0
- metadata +240 -0
|
@@ -0,0 +1,45 @@
|
|
|
1
|
+
# frozen_string_literal: true
|
|
2
|
+
|
|
3
|
+
module Ares
|
|
4
|
+
module Runtime
|
|
5
|
+
class ContextLoader
|
|
6
|
+
def self.load
|
|
7
|
+
root = find_workspace_root(Dir.pwd)
|
|
8
|
+
|
|
9
|
+
agents_file = File.join(root, 'AGENTS.md')
|
|
10
|
+
skills_dir = File.join(root, '.skills')
|
|
11
|
+
|
|
12
|
+
agents = File.exist?(agents_file) ? File.read(agents_file) : ''
|
|
13
|
+
skills = ''
|
|
14
|
+
|
|
15
|
+
if Dir.exist?(skills_dir)
|
|
16
|
+
Dir.glob(File.join(skills_dir, '**/SKILL.md')).each do |file|
|
|
17
|
+
skills += "#{File.read(file)}\n"
|
|
18
|
+
end
|
|
19
|
+
end
|
|
20
|
+
|
|
21
|
+
"Workspace Root: #{root}\n#{agents}\n#{skills}"
|
|
22
|
+
end
|
|
23
|
+
|
|
24
|
+
def self.find_workspace_root(path)
|
|
25
|
+
# Check explicitly registered workspaces first
|
|
26
|
+
registered = ConfigManager.load_merged('workspaces.yml')[:workspaces] || []
|
|
27
|
+
registered.each do |workspace|
|
|
28
|
+
return workspace if path.start_with?(workspace)
|
|
29
|
+
end
|
|
30
|
+
|
|
31
|
+
# Walk up the tree looking for AGENTS.md
|
|
32
|
+
current = path
|
|
33
|
+
while current != '/'
|
|
34
|
+
# Stop at home directory or root
|
|
35
|
+
break if current == Dir.home || current == '/'
|
|
36
|
+
return current if File.exist?(File.join(current, 'AGENTS.md'))
|
|
37
|
+
|
|
38
|
+
current = File.expand_path('..', current)
|
|
39
|
+
end
|
|
40
|
+
|
|
41
|
+
path # Default to CWD if no root found
|
|
42
|
+
end
|
|
43
|
+
end
|
|
44
|
+
end
|
|
45
|
+
end
|
|
@@ -0,0 +1,36 @@
|
|
|
1
|
+
# frozen_string_literal: true
|
|
2
|
+
|
|
3
|
+
require_relative 'task_logger'
|
|
4
|
+
require_relative 'planner/ollama_planner'
|
|
5
|
+
require_relative 'planner/tiny_task_processor'
|
|
6
|
+
require_relative 'ollama_client_factory'
|
|
7
|
+
|
|
8
|
+
module Ares
|
|
9
|
+
module Runtime
|
|
10
|
+
# Subsystem Facade that initializes and bundles core dependencies for the Router.
|
|
11
|
+
class CoreSubsystem
|
|
12
|
+
attr_reader :logger, :planner, :selector, :tiny_processor, :ollama_healthy
|
|
13
|
+
|
|
14
|
+
def initialize
|
|
15
|
+
@logger = TaskLogger.new
|
|
16
|
+
@ollama_healthy = initialize_ollama
|
|
17
|
+
|
|
18
|
+
@planner = OllamaPlanner.new(healthy: @ollama_healthy)
|
|
19
|
+
@selector = ModelSelector.new
|
|
20
|
+
@tiny_processor = TinyTaskProcessor.new(healthy: @ollama_healthy)
|
|
21
|
+
end
|
|
22
|
+
|
|
23
|
+
private
|
|
24
|
+
|
|
25
|
+
def initialize_ollama
|
|
26
|
+
healthy = OllamaClientFactory.health_check?
|
|
27
|
+
if healthy
|
|
28
|
+
puts "✅ Local AI Engine (Ollama) is available."
|
|
29
|
+
else
|
|
30
|
+
puts "⚠️ Local AI (Ollama) unavailable. Running in Safe Mode for planning/diagnostics."
|
|
31
|
+
end
|
|
32
|
+
healthy
|
|
33
|
+
end
|
|
34
|
+
end
|
|
35
|
+
end
|
|
36
|
+
end
|
|
@@ -0,0 +1,159 @@
|
|
|
1
|
+
# frozen_string_literal: true
|
|
2
|
+
|
|
3
|
+
require 'json'
|
|
4
|
+
|
|
5
|
+
module Ares
|
|
6
|
+
module Runtime
|
|
7
|
+
# Parses RuboCop and RSpec JSON output into a unified diagnostic structure.
|
|
8
|
+
# Avoids sending raw logs to LLMs for summarization.
|
|
9
|
+
class DiagnosticParser
|
|
10
|
+
def self.parse(output, type:)
|
|
11
|
+
return fallback_parse(output, type) if output.nil? || output.strip.empty?
|
|
12
|
+
|
|
13
|
+
clean_output = strip_ansi(output)
|
|
14
|
+
case type
|
|
15
|
+
when :lint then parse_rubocop(clean_output)
|
|
16
|
+
when :syntax then parse_syntax(clean_output)
|
|
17
|
+
else parse_rspec(clean_output)
|
|
18
|
+
end
|
|
19
|
+
end
|
|
20
|
+
|
|
21
|
+
def self.strip_ansi(text)
|
|
22
|
+
text.to_s.gsub(/\e\[([;\d]+)?m/, '')
|
|
23
|
+
end
|
|
24
|
+
|
|
25
|
+
def self.parse_rubocop(output)
|
|
26
|
+
data = JSON.parse(output)
|
|
27
|
+
failed_items = []
|
|
28
|
+
files = []
|
|
29
|
+
|
|
30
|
+
data['files']&.each do |file|
|
|
31
|
+
path = file['path']
|
|
32
|
+
file['offenses']&.each do |offense|
|
|
33
|
+
line = offense.dig('location', 'line') || offense.dig('location', 'start_line')
|
|
34
|
+
failed_items << "#{path}:#{line}: #{offense['message']}"
|
|
35
|
+
files << { 'path' => path, 'line' => line.to_i }
|
|
36
|
+
end
|
|
37
|
+
end
|
|
38
|
+
|
|
39
|
+
{
|
|
40
|
+
'failed_items' => failed_items,
|
|
41
|
+
'error_summary' => build_error_summary(failed_items, 'RuboCop'),
|
|
42
|
+
'files' => remove_duplicates(files)
|
|
43
|
+
}
|
|
44
|
+
rescue JSON::ParserError
|
|
45
|
+
parse_text_rubocop(output)
|
|
46
|
+
end
|
|
47
|
+
|
|
48
|
+
def self.parse_text_rubocop(output)
|
|
49
|
+
failed_items = []
|
|
50
|
+
files = []
|
|
51
|
+
output.each_line do |line|
|
|
52
|
+
# Match standard RuboCop line: path:line:col: C: Message
|
|
53
|
+
# Using match instead of match? to capture data safely
|
|
54
|
+
m = line.match(/([^:\s]+):(\d+):(\d+): ([A-Z]): (.+)/)
|
|
55
|
+
next unless m
|
|
56
|
+
|
|
57
|
+
path = m[1]&.strip
|
|
58
|
+
line_num = m[2].to_i
|
|
59
|
+
letter = m[4]
|
|
60
|
+
msg = m[5]&.strip
|
|
61
|
+
|
|
62
|
+
next unless path && msg
|
|
63
|
+
|
|
64
|
+
# Only collect if it looks like a real RuboCop offense level
|
|
65
|
+
if %w[C W E F].include?(letter)
|
|
66
|
+
failed_items << "#{path}:#{line_num}: #{msg}"
|
|
67
|
+
files << { 'path' => path, 'line' => line_num }
|
|
68
|
+
end
|
|
69
|
+
end
|
|
70
|
+
|
|
71
|
+
{
|
|
72
|
+
'failed_items' => failed_items,
|
|
73
|
+
'error_summary' => build_error_summary(failed_items, 'RuboCop'),
|
|
74
|
+
'files' => remove_duplicates(files)
|
|
75
|
+
}
|
|
76
|
+
end
|
|
77
|
+
|
|
78
|
+
def self.parse_rspec(output)
|
|
79
|
+
data = JSON.parse(output)
|
|
80
|
+
failed_items = []
|
|
81
|
+
files = []
|
|
82
|
+
|
|
83
|
+
data['examples']&.each do |ex|
|
|
84
|
+
next unless ex['status'] == 'failed'
|
|
85
|
+
|
|
86
|
+
path = ex['file_path']&.delete_prefix('./')
|
|
87
|
+
line = ex['line_number']
|
|
88
|
+
msg = ex.dig('exception', 'message') || ex['full_description']
|
|
89
|
+
failed_items << "#{path}:#{line}: #{msg}"
|
|
90
|
+
files << { 'path' => path, 'line' => line.to_i }
|
|
91
|
+
end
|
|
92
|
+
|
|
93
|
+
{
|
|
94
|
+
'failed_items' => failed_items,
|
|
95
|
+
'error_summary' => build_error_summary(failed_items, 'RSpec'),
|
|
96
|
+
'files' => remove_duplicates(files)
|
|
97
|
+
}
|
|
98
|
+
rescue JSON::ParserError
|
|
99
|
+
parse_text_rspec(output)
|
|
100
|
+
end
|
|
101
|
+
|
|
102
|
+
def self.parse_text_rspec(output)
|
|
103
|
+
failed_items = []
|
|
104
|
+
files = []
|
|
105
|
+
# Match typical RSpec failure location: # ./path/to/spec.rb:123:in `...'
|
|
106
|
+
output.scan(%r{#\s+\./(.+?):(\d+):in}).each do |path, line|
|
|
107
|
+
failed_items << "Failure at #{path}:#{line}"
|
|
108
|
+
files << { 'path' => path, 'line' => line.to_i }
|
|
109
|
+
end
|
|
110
|
+
|
|
111
|
+
{
|
|
112
|
+
'failed_items' => failed_items,
|
|
113
|
+
'error_summary' => build_error_summary(failed_items, 'RSpec'),
|
|
114
|
+
'files' => remove_duplicates(files)
|
|
115
|
+
}
|
|
116
|
+
end
|
|
117
|
+
|
|
118
|
+
def self.parse_syntax(output)
|
|
119
|
+
failed_items = []
|
|
120
|
+
files = []
|
|
121
|
+
output.each_line do |line|
|
|
122
|
+
m = line.match(/\A(.+?):(\d+):\s*(.+)\z/)
|
|
123
|
+
next unless m
|
|
124
|
+
|
|
125
|
+
path = m[1].strip
|
|
126
|
+
line_num = m[2].to_i
|
|
127
|
+
msg = m[3].strip
|
|
128
|
+
failed_items << "#{path}:#{line_num}: #{msg}"
|
|
129
|
+
files << { 'path' => path, 'line' => line_num }
|
|
130
|
+
end
|
|
131
|
+
|
|
132
|
+
{
|
|
133
|
+
'failed_items' => failed_items,
|
|
134
|
+
'error_summary' => build_error_summary(failed_items, 'syntax'),
|
|
135
|
+
'files' => remove_duplicates(files)
|
|
136
|
+
}
|
|
137
|
+
end
|
|
138
|
+
|
|
139
|
+
def self.build_error_summary(failed_items, source)
|
|
140
|
+
count = failed_items.size
|
|
141
|
+
return "No #{source} issues found." if count.zero?
|
|
142
|
+
|
|
143
|
+
"There are #{count} failed #{source.downcase} item(s)."
|
|
144
|
+
end
|
|
145
|
+
|
|
146
|
+
def self.remove_duplicates(files)
|
|
147
|
+
files.uniq { |f| [f['path'], f['line']] }
|
|
148
|
+
end
|
|
149
|
+
|
|
150
|
+
def self.fallback_parse(output, type)
|
|
151
|
+
{
|
|
152
|
+
'failed_items' => [output.to_s.lines.first(50).join],
|
|
153
|
+
'error_summary' => "Parsed #{type} output via fallback.",
|
|
154
|
+
'files' => []
|
|
155
|
+
}
|
|
156
|
+
end
|
|
157
|
+
end
|
|
158
|
+
end
|
|
159
|
+
end
|
|
@@ -0,0 +1,34 @@
|
|
|
1
|
+
# frozen_string_literal: true
|
|
2
|
+
|
|
3
|
+
module Ares
|
|
4
|
+
module Runtime
|
|
5
|
+
class Doctor
|
|
6
|
+
def self.run
|
|
7
|
+
puts "Running Ares diagnostics...\n\n"
|
|
8
|
+
|
|
9
|
+
check_ollama
|
|
10
|
+
check_claude
|
|
11
|
+
check_codex
|
|
12
|
+
check_cursor
|
|
13
|
+
|
|
14
|
+
puts "\nDiagnostics complete."
|
|
15
|
+
end
|
|
16
|
+
|
|
17
|
+
def self.check_ollama
|
|
18
|
+
puts "Ollama: #{system('which ollama > /dev/null') ? 'OK' : 'Missing'}"
|
|
19
|
+
end
|
|
20
|
+
|
|
21
|
+
def self.check_claude
|
|
22
|
+
puts "Claude CLI: #{system('which claude > /dev/null') ? 'OK' : 'Missing'}"
|
|
23
|
+
end
|
|
24
|
+
|
|
25
|
+
def self.check_codex
|
|
26
|
+
puts "Codex CLI: #{system('which codex > /dev/null') ? 'OK' : 'Missing'}"
|
|
27
|
+
end
|
|
28
|
+
|
|
29
|
+
def self.check_cursor
|
|
30
|
+
puts "Cursor CLI: #{system('which cursor > /dev/null') ? 'OK' : 'Missing'}"
|
|
31
|
+
end
|
|
32
|
+
end
|
|
33
|
+
end
|
|
34
|
+
end
|
|
@@ -0,0 +1,108 @@
|
|
|
1
|
+
# frozen_string_literal: true
|
|
2
|
+
|
|
3
|
+
require_relative 'adapters/claude_adapter'
|
|
4
|
+
require_relative 'adapters/codex_adapter'
|
|
5
|
+
require_relative 'adapters/cursor_adapter'
|
|
6
|
+
require_relative 'adapters/ollama_adapter'
|
|
7
|
+
|
|
8
|
+
module Ares
|
|
9
|
+
module Runtime
|
|
10
|
+
# Chain of Responsibility Handler linking CLI engines for automated fallback.
|
|
11
|
+
class EngineChain
|
|
12
|
+
attr_accessor :next_handler
|
|
13
|
+
attr_reader :engine_name
|
|
14
|
+
|
|
15
|
+
def initialize(engine_name)
|
|
16
|
+
@engine_name = engine_name
|
|
17
|
+
@next_handler = nil
|
|
18
|
+
@adapter = get_adapter(engine_name)
|
|
19
|
+
end
|
|
20
|
+
|
|
21
|
+
# Standard execution chain
|
|
22
|
+
def call(prompt, options, attempt: 1, total: 1)
|
|
23
|
+
if attempt > 1
|
|
24
|
+
puts "Falling back to #{@engine_name} (attempt #{attempt}/#{total})..."
|
|
25
|
+
else
|
|
26
|
+
puts "Executing task via #{@engine_name} (attempt #{attempt}/#{total})..."
|
|
27
|
+
end
|
|
28
|
+
|
|
29
|
+
begin
|
|
30
|
+
QuotaManager.increment_usage(@engine_name)
|
|
31
|
+
|
|
32
|
+
@adapter.call(prompt, options[:model], **adapter_options(options))
|
|
33
|
+
rescue StandardError => e
|
|
34
|
+
puts "\n⚠️ #{@engine_name} failed: #{e.message.split("\n").first}"
|
|
35
|
+
|
|
36
|
+
raise 'All available AI engines failed to execute the task.' unless @next_handler
|
|
37
|
+
|
|
38
|
+
@next_handler.call(prompt, options, attempt: attempt + 1, total: total)
|
|
39
|
+
end
|
|
40
|
+
end
|
|
41
|
+
|
|
42
|
+
# Specialized call for fix escalation
|
|
43
|
+
def call_fix(prompt, options, attempt: 1, total: 1, &checkpoint_block)
|
|
44
|
+
if attempt > 1
|
|
45
|
+
puts "Falling back to #{@engine_name} for fix (attempt #{attempt}/#{total})..."
|
|
46
|
+
else
|
|
47
|
+
puts "Applying fix via #{@engine_name} (attempt #{attempt}/#{total})..."
|
|
48
|
+
end
|
|
49
|
+
|
|
50
|
+
checkpoint_block&.call(@engine_name)
|
|
51
|
+
|
|
52
|
+
begin
|
|
53
|
+
QuotaManager.increment_usage(@engine_name)
|
|
54
|
+
|
|
55
|
+
@adapter.call(prompt, options[:model], **adapter_options(options))
|
|
56
|
+
rescue StandardError => e
|
|
57
|
+
puts "\n⚠️ #{@engine_name} failed during fix: #{e.message.split("\n").first}"
|
|
58
|
+
|
|
59
|
+
raise 'All available AI engines failed to apply the fix.' unless @next_handler
|
|
60
|
+
|
|
61
|
+
@next_handler.call_fix(prompt, options, attempt: attempt + 1, total: total, &checkpoint_block)
|
|
62
|
+
end
|
|
63
|
+
end
|
|
64
|
+
|
|
65
|
+
# Factory method to build the chain
|
|
66
|
+
def self.build(engine_names)
|
|
67
|
+
return nil if engine_names.empty?
|
|
68
|
+
|
|
69
|
+
first_handler = new(engine_names.first)
|
|
70
|
+
current = first_handler
|
|
71
|
+
|
|
72
|
+
engine_names[1..].each do |name|
|
|
73
|
+
handler = new(name)
|
|
74
|
+
current.next_handler = handler
|
|
75
|
+
current = handler
|
|
76
|
+
end
|
|
77
|
+
|
|
78
|
+
first_handler
|
|
79
|
+
end
|
|
80
|
+
|
|
81
|
+
private
|
|
82
|
+
|
|
83
|
+
def get_adapter(engine)
|
|
84
|
+
case engine
|
|
85
|
+
when 'claude' then Ares::Runtime::ClaudeAdapter.new
|
|
86
|
+
when 'cursor' then Ares::Runtime::CursorAdapter.new
|
|
87
|
+
when 'codex' then Ares::Runtime::CodexAdapter.new
|
|
88
|
+
when 'ollama' then Ares::Runtime::OllamaAdapter.new
|
|
89
|
+
else raise "Unknown engine: #{engine}"
|
|
90
|
+
end
|
|
91
|
+
end
|
|
92
|
+
|
|
93
|
+
def adapter_options(options)
|
|
94
|
+
opts = {}
|
|
95
|
+
case @engine_name
|
|
96
|
+
when 'claude'
|
|
97
|
+
opts[:fork_session] = options[:fork_session] if options.key?(:fork_session)
|
|
98
|
+
when 'cursor'
|
|
99
|
+
opts[:resume] = options.fetch(:resume, true)
|
|
100
|
+
opts[:cloud] = options[:cloud] if options.key?(:cloud)
|
|
101
|
+
when 'codex'
|
|
102
|
+
opts[:resume] = options.fetch(:resume, true)
|
|
103
|
+
end
|
|
104
|
+
opts
|
|
105
|
+
end
|
|
106
|
+
end
|
|
107
|
+
end
|
|
108
|
+
end
|
|
@@ -0,0 +1,26 @@
|
|
|
1
|
+
# frozen_string_literal: true
|
|
2
|
+
|
|
3
|
+
module Ares
|
|
4
|
+
module Runtime
|
|
5
|
+
# Manages Git operations for task-based branching and committing.
|
|
6
|
+
class GitManager
|
|
7
|
+
def self.create_branch(task_id, task_description = nil)
|
|
8
|
+
# Default to main or master as base branch
|
|
9
|
+
base = `git rev-parse --verify main >/dev/null 2>&1 && echo main || echo master`.strip
|
|
10
|
+
|
|
11
|
+
slug = if task_description
|
|
12
|
+
task_description.downcase.gsub(/[^a-z0-9]/, '-').squeeze('-').slice(0, 40).strip.gsub(/^-|-$/, '')
|
|
13
|
+
end
|
|
14
|
+
branch_name = slug ? "task-#{task_id}-#{slug}" : "task-#{task_id}"
|
|
15
|
+
|
|
16
|
+
# Branch from the base branch to keep a flat history
|
|
17
|
+
`git checkout -b #{branch_name} #{base}`
|
|
18
|
+
end
|
|
19
|
+
|
|
20
|
+
def self.commit_changes(task_id, task_description)
|
|
21
|
+
`git add .`
|
|
22
|
+
`git commit -m "ares: task-#{task_id} #{task_description}"`
|
|
23
|
+
end
|
|
24
|
+
end
|
|
25
|
+
end
|
|
26
|
+
end
|
|
@@ -0,0 +1,30 @@
|
|
|
1
|
+
# frozen_string_literal: true
|
|
2
|
+
|
|
3
|
+
require 'fileutils'
|
|
4
|
+
|
|
5
|
+
module Ares
|
|
6
|
+
module Runtime
|
|
7
|
+
class Initializer
|
|
8
|
+
def self.run
|
|
9
|
+
root = ConfigManager.project_root
|
|
10
|
+
target = File.join(root, 'config', 'ares')
|
|
11
|
+
|
|
12
|
+
if Dir.exist?(target)
|
|
13
|
+
puts 'Ares already initialized.'
|
|
14
|
+
return
|
|
15
|
+
end
|
|
16
|
+
|
|
17
|
+
FileUtils.mkdir_p(target)
|
|
18
|
+
copy_default('models.yml', target)
|
|
19
|
+
copy_default('ollama.yml', target)
|
|
20
|
+
|
|
21
|
+
puts "Initialized Ares in #{target}"
|
|
22
|
+
end
|
|
23
|
+
|
|
24
|
+
def self.copy_default(file, target)
|
|
25
|
+
source = ConfigManager.gem_default_path(file)
|
|
26
|
+
FileUtils.cp(source, File.join(target, file)) if File.exist?(source)
|
|
27
|
+
end
|
|
28
|
+
end
|
|
29
|
+
end
|
|
30
|
+
end
|
|
@@ -0,0 +1,35 @@
|
|
|
1
|
+
# frozen_string_literal: true
|
|
2
|
+
|
|
3
|
+
module Ares
|
|
4
|
+
module Runtime
|
|
5
|
+
class LogsCLI
|
|
6
|
+
def self.run
|
|
7
|
+
project_log_dir = File.join(ConfigManager.project_root, 'logs')
|
|
8
|
+
global_log_dir = File.expand_path('~/.ares/logs')
|
|
9
|
+
|
|
10
|
+
log_dir = Dir.exist?(project_log_dir) ? project_log_dir : global_log_dir
|
|
11
|
+
|
|
12
|
+
unless Dir.exist?(log_dir)
|
|
13
|
+
puts "No logs found in #{log_dir}."
|
|
14
|
+
return
|
|
15
|
+
end
|
|
16
|
+
|
|
17
|
+
logs = Dir.glob("#{log_dir}/*.json").sort_by { |f| File.mtime(f) }.last(10).reverse
|
|
18
|
+
|
|
19
|
+
if logs.empty?
|
|
20
|
+
puts "No JSON logs found in #{log_dir}."
|
|
21
|
+
return
|
|
22
|
+
end
|
|
23
|
+
|
|
24
|
+
logs.each do |file|
|
|
25
|
+
puts "\n--- Task: #{File.basename(file, '.json')} ---"
|
|
26
|
+
data = JSON.parse(File.read(file))
|
|
27
|
+
puts "Timestamp: #{data['timestamp']}"
|
|
28
|
+
puts "Task: #{data['task']}"
|
|
29
|
+
puts "Engine: #{data.dig('selection', 'engine')}"
|
|
30
|
+
puts "Result: #{data['result']&.slice(0, 100)}..."
|
|
31
|
+
end
|
|
32
|
+
end
|
|
33
|
+
end
|
|
34
|
+
end
|
|
35
|
+
end
|
|
@@ -0,0 +1,36 @@
|
|
|
1
|
+
# frozen_string_literal: true
|
|
2
|
+
|
|
3
|
+
module Ares
|
|
4
|
+
module Runtime
|
|
5
|
+
class ModelSelector
|
|
6
|
+
CONFIDENCE_THRESHOLD = 0.7
|
|
7
|
+
|
|
8
|
+
def self.select(plan)
|
|
9
|
+
@config = ConfigManager.load_models
|
|
10
|
+
|
|
11
|
+
task_type = plan['task_type'] || 'refactor'
|
|
12
|
+
confidence = plan['confidence'] || 1.0
|
|
13
|
+
|
|
14
|
+
# Escalate if confidence is low or risk is explicitly high
|
|
15
|
+
return { engine: :claude, model: 'opus' } if confidence < CONFIDENCE_THRESHOLD || plan['risk_level'] == 'high'
|
|
16
|
+
|
|
17
|
+
# Use string key lookup as ConfigManager returns keys as strings sometimes
|
|
18
|
+
rule = @config[task_type.to_sym] || @config[:refactor]
|
|
19
|
+
engine = rule[:engine].to_sym
|
|
20
|
+
|
|
21
|
+
# Safety: restrict Ollama from code-modifying tasks if configured incorrectly
|
|
22
|
+
if engine == :ollama && %w[refactor architecture bulk_patch test_generation].include?(task_type.to_s)
|
|
23
|
+
engine = :claude
|
|
24
|
+
model = 'sonnet'
|
|
25
|
+
else
|
|
26
|
+
model = rule[:model]
|
|
27
|
+
end
|
|
28
|
+
|
|
29
|
+
{
|
|
30
|
+
engine: engine,
|
|
31
|
+
model: model
|
|
32
|
+
}
|
|
33
|
+
end
|
|
34
|
+
end
|
|
35
|
+
end
|
|
36
|
+
end
|
|
@@ -0,0 +1,43 @@
|
|
|
1
|
+
# frozen_string_literal: true
|
|
2
|
+
|
|
3
|
+
require 'ollama_client'
|
|
4
|
+
require 'timeout'
|
|
5
|
+
require_relative 'config_manager'
|
|
6
|
+
|
|
7
|
+
module Ares
|
|
8
|
+
module Runtime
|
|
9
|
+
# Centralizes Ollama client creation with strict timeouts and fail-fast logic.
|
|
10
|
+
class OllamaClientFactory
|
|
11
|
+
def self.build(timeout_seconds: 10)
|
|
12
|
+
config_data = ConfigManager.load_ollama
|
|
13
|
+
config = Ollama::Config.new
|
|
14
|
+
config.base_url = config_data[:base_url]
|
|
15
|
+
config.num_ctx = config_data[:num_ctx]
|
|
16
|
+
config.timeout = timeout_seconds
|
|
17
|
+
config.retries = 0 # Fail fast, let Ares handle fallbacks
|
|
18
|
+
|
|
19
|
+
Ollama::Client.new(config: config)
|
|
20
|
+
end
|
|
21
|
+
|
|
22
|
+
# 5s strict health check to determine if local AI is available.
|
|
23
|
+
def self.health_check?
|
|
24
|
+
Timeout.timeout(5) do
|
|
25
|
+
client = build(timeout_seconds: 4)
|
|
26
|
+
# 'tags' call is a lightweight way to check connectivity
|
|
27
|
+
client.tags
|
|
28
|
+
true
|
|
29
|
+
end
|
|
30
|
+
rescue StandardError, Timeout::Error
|
|
31
|
+
false
|
|
32
|
+
end
|
|
33
|
+
|
|
34
|
+
# Executes an Ollama call with a hard timeout and a provided fallback block.
|
|
35
|
+
def self.with_resilience(hard_timeout: 15, fallback_value: nil, &block)
|
|
36
|
+
Timeout.timeout(hard_timeout, &block)
|
|
37
|
+
rescue StandardError, Timeout::Error => e
|
|
38
|
+
puts "\n⚠️ Local AI (Ollama) Failure: #{e.message.split("\n").first}. Triggering Safe Mode fallback."
|
|
39
|
+
fallback_value
|
|
40
|
+
end
|
|
41
|
+
end
|
|
42
|
+
end
|
|
43
|
+
end
|
|
@@ -0,0 +1,51 @@
|
|
|
1
|
+
# frozen_string_literal: true
|
|
2
|
+
|
|
3
|
+
require 'ollama_client'
|
|
4
|
+
require 'timeout'
|
|
5
|
+
require_relative '../../../../config/planner_schema'
|
|
6
|
+
require_relative '../ollama_client_factory'
|
|
7
|
+
|
|
8
|
+
module Ares
|
|
9
|
+
module Runtime
|
|
10
|
+
class OllamaPlanner
|
|
11
|
+
PLANNER_TIMEOUT = 30 # Longer once health is verified
|
|
12
|
+
HARD_TIMEOUT = 35
|
|
13
|
+
|
|
14
|
+
def initialize(healthy: true)
|
|
15
|
+
@healthy = healthy
|
|
16
|
+
@client = healthy ? OllamaClientFactory.build(timeout_seconds: PLANNER_TIMEOUT) : nil
|
|
17
|
+
end
|
|
18
|
+
|
|
19
|
+
def plan(task)
|
|
20
|
+
return safe_default_plan(task) unless @healthy
|
|
21
|
+
|
|
22
|
+
OllamaClientFactory.with_resilience(
|
|
23
|
+
hard_timeout: HARD_TIMEOUT,
|
|
24
|
+
fallback_value: safe_default_plan(task)
|
|
25
|
+
) do
|
|
26
|
+
@client.generate(prompt: build_prompt(task), schema: PLANNER_SCHEMA)
|
|
27
|
+
end
|
|
28
|
+
end
|
|
29
|
+
|
|
30
|
+
private
|
|
31
|
+
|
|
32
|
+
def safe_default_plan(task)
|
|
33
|
+
{
|
|
34
|
+
'task_type' => 'refactor',
|
|
35
|
+
'risk_level' => 'medium',
|
|
36
|
+
'confidence' => 1.0,
|
|
37
|
+
'slices' => [task],
|
|
38
|
+
'explanation' => 'Safe Mode Default: Proceeding with high-level refactor plan.'
|
|
39
|
+
}
|
|
40
|
+
end
|
|
41
|
+
|
|
42
|
+
def build_prompt(task)
|
|
43
|
+
PromptBuilder.new
|
|
44
|
+
.add_instruction("Analyze the following engineering task.\nDecompose it into discrete, executable work units (slices).\nAssign a task type, risk level, and confidence score.")
|
|
45
|
+
.add_task(task)
|
|
46
|
+
.add_instruction('Respond strictly as JSON. Slices should be a clean array of strings.')
|
|
47
|
+
.build
|
|
48
|
+
end
|
|
49
|
+
end
|
|
50
|
+
end
|
|
51
|
+
end
|