ace-llm-providers-cli 0.27.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +7 -0
- data/.ace-defaults/llm/providers/claude.yml +24 -0
- data/.ace-defaults/llm/providers/codex.yml +22 -0
- data/.ace-defaults/llm/providers/codexoss.yml +13 -0
- data/.ace-defaults/llm/providers/gemini.yml +32 -0
- data/.ace-defaults/llm/providers/opencode.yml +26 -0
- data/.ace-defaults/llm/providers/pi.yml +43 -0
- data/CHANGELOG.md +457 -0
- data/LICENSE +21 -0
- data/README.md +36 -0
- data/Rakefile +14 -0
- data/exe/ace-llm-providers-cli-check +76 -0
- data/lib/ace/llm/providers/cli/atoms/args_normalizer.rb +82 -0
- data/lib/ace/llm/providers/cli/atoms/auth_checker.rb +74 -0
- data/lib/ace/llm/providers/cli/atoms/command_formatters.rb +19 -0
- data/lib/ace/llm/providers/cli/atoms/command_rewriter.rb +75 -0
- data/lib/ace/llm/providers/cli/atoms/execution_context.rb +28 -0
- data/lib/ace/llm/providers/cli/atoms/provider_detector.rb +48 -0
- data/lib/ace/llm/providers/cli/atoms/session_finders/claude_session_finder.rb +79 -0
- data/lib/ace/llm/providers/cli/atoms/session_finders/codex_session_finder.rb +84 -0
- data/lib/ace/llm/providers/cli/atoms/session_finders/gemini_session_finder.rb +66 -0
- data/lib/ace/llm/providers/cli/atoms/session_finders/open_code_session_finder.rb +119 -0
- data/lib/ace/llm/providers/cli/atoms/session_finders/pi_session_finder.rb +87 -0
- data/lib/ace/llm/providers/cli/atoms/skill_command_rewriter.rb +30 -0
- data/lib/ace/llm/providers/cli/atoms/worktree_dir_resolver.rb +56 -0
- data/lib/ace/llm/providers/cli/claude_code_client.rb +358 -0
- data/lib/ace/llm/providers/cli/claude_oai_client.rb +322 -0
- data/lib/ace/llm/providers/cli/cli_args_support.rb +19 -0
- data/lib/ace/llm/providers/cli/codex_client.rb +291 -0
- data/lib/ace/llm/providers/cli/codex_oai_client.rb +274 -0
- data/lib/ace/llm/providers/cli/gemini_client.rb +346 -0
- data/lib/ace/llm/providers/cli/molecules/health_checker.rb +80 -0
- data/lib/ace/llm/providers/cli/molecules/safe_capture.rb +153 -0
- data/lib/ace/llm/providers/cli/molecules/session_finder.rb +44 -0
- data/lib/ace/llm/providers/cli/molecules/skill_name_reader.rb +64 -0
- data/lib/ace/llm/providers/cli/open_code_client.rb +271 -0
- data/lib/ace/llm/providers/cli/pi_client.rb +331 -0
- data/lib/ace/llm/providers/cli/version.rb +11 -0
- data/lib/ace/llm/providers/cli.rb +47 -0
- metadata +139 -0
|
@@ -0,0 +1,153 @@
|
|
|
1
|
+
# frozen_string_literal: true
|
|
2
|
+
|
|
3
|
+
require "open3"
|
|
4
|
+
|
|
5
|
+
module Ace
|
|
6
|
+
module LLM
|
|
7
|
+
module Providers
|
|
8
|
+
module CLI
|
|
9
|
+
module Molecules
|
|
10
|
+
# Thread-safe command execution with process-level timeout.
|
|
11
|
+
#
|
|
12
|
+
# Replaces the unsafe Timeout.timeout { Open3.capture3(...) } pattern
|
|
13
|
+
# which causes "stream closed in another thread (IOError)" when the
|
|
14
|
+
# timeout fires while Open3's internal reader threads hold pipe handles.
|
|
15
|
+
#
|
|
16
|
+
# Uses Open3.popen3 + Process.kill so the child process is terminated
|
|
17
|
+
# directly — no thread interruption, no IOError.
|
|
18
|
+
class SafeCapture
|
|
19
|
+
# @param cmd [Array<String>] Command arguments
|
|
20
|
+
# @param timeout [Integer] Timeout in seconds
|
|
21
|
+
# @param stdin_data [String, nil] Data to write to stdin
|
|
22
|
+
# @param chdir [String, nil] Working directory
|
|
23
|
+
# @param env [Hash, nil] Environment variables (merged with current env)
|
|
24
|
+
# @param provider_name [String] Provider name for error messages
|
|
25
|
+
# @param isolate_process_group [Boolean] Spawn subprocess in isolated process group
|
|
26
|
+
# @param cleanup_group_on_exit [Boolean] Best-effort cleanup of descendants on success
|
|
27
|
+
# @return [Array(String, String, Process::Status)] [stdout, stderr, status]
|
|
28
|
+
# @raise [Ace::LLM::ProviderError] on timeout
|
|
29
|
+
def self.call(cmd, timeout:, stdin_data: nil, chdir: nil, env: nil, provider_name: "CLI",
|
|
30
|
+
isolate_process_group: true, cleanup_group_on_exit: true)
|
|
31
|
+
normalized_timeout = normalize_timeout(timeout)
|
|
32
|
+
opts = {}
|
|
33
|
+
opts[:chdir] = chdir if chdir
|
|
34
|
+
opts[:pgroup] = true if isolate_process_group
|
|
35
|
+
|
|
36
|
+
args = env ? [env, *cmd] : cmd
|
|
37
|
+
|
|
38
|
+
Open3.popen3(*args, **opts) do |stdin, stdout, stderr, wait_thr|
|
|
39
|
+
pid = wait_thr.pid
|
|
40
|
+
pgid = safe_getpgid(pid)
|
|
41
|
+
debug_log(provider_name, "spawn pid=#{pid} pgid=#{pgid || "n/a"}")
|
|
42
|
+
|
|
43
|
+
begin
|
|
44
|
+
stdin.write(stdin_data) if stdin_data
|
|
45
|
+
rescue Errno::EPIPE
|
|
46
|
+
# Subprocess exited before consuming stdin — continue to capture stderr for the real error
|
|
47
|
+
end
|
|
48
|
+
stdin.close
|
|
49
|
+
|
|
50
|
+
out_reader = Thread.new { safe_read_stream(stdout) }
|
|
51
|
+
err_reader = Thread.new { safe_read_stream(stderr) }
|
|
52
|
+
out_reader.report_on_exception = false
|
|
53
|
+
err_reader.report_on_exception = false
|
|
54
|
+
|
|
55
|
+
unless wait_thr.join(normalized_timeout)
|
|
56
|
+
# Timeout: kill subprocess group (and descendants), then clean up
|
|
57
|
+
terminate_subprocess_tree(pid: pid, pgid: pgid, provider_name: provider_name)
|
|
58
|
+
wait_thr.join(5)
|
|
59
|
+
|
|
60
|
+
stdout.close unless stdout.closed?
|
|
61
|
+
stderr.close unless stderr.closed?
|
|
62
|
+
out_reader.join(1)
|
|
63
|
+
err_reader.join(1)
|
|
64
|
+
out_reader.kill if out_reader.alive?
|
|
65
|
+
err_reader.kill if err_reader.alive?
|
|
66
|
+
raise Ace::LLM::ProviderError,
|
|
67
|
+
"#{provider_name} CLI execution timed out after #{normalized_timeout} seconds"
|
|
68
|
+
end
|
|
69
|
+
|
|
70
|
+
status = wait_thr.value
|
|
71
|
+
if isolate_process_group && cleanup_group_on_exit
|
|
72
|
+
terminate_descendants_after_success(pid: pid, pgid: pgid, provider_name: provider_name)
|
|
73
|
+
end
|
|
74
|
+
|
|
75
|
+
[out_reader.value, err_reader.value, status]
|
|
76
|
+
end
|
|
77
|
+
end
|
|
78
|
+
|
|
79
|
+
class << self
|
|
80
|
+
private
|
|
81
|
+
|
|
82
|
+
def safe_read_stream(io)
|
|
83
|
+
io.read
|
|
84
|
+
rescue IOError
|
|
85
|
+
""
|
|
86
|
+
end
|
|
87
|
+
|
|
88
|
+
def normalize_timeout(value)
|
|
89
|
+
return value if value.is_a?(Numeric) && value.finite?
|
|
90
|
+
|
|
91
|
+
normalized = value.to_s.strip
|
|
92
|
+
normalized_timeout = Float(normalized)
|
|
93
|
+
raise ArgumentError, "timeout must be positive" unless normalized_timeout.positive?
|
|
94
|
+
|
|
95
|
+
normalized_timeout
|
|
96
|
+
rescue ArgumentError, TypeError
|
|
97
|
+
raise ArgumentError, "timeout must be a positive numeric value, got #{value.inspect}"
|
|
98
|
+
end
|
|
99
|
+
|
|
100
|
+
def terminate_subprocess_tree(pid:, pgid:, provider_name:)
|
|
101
|
+
debug_log(provider_name, "timeout cleanup pid=#{pid} pgid=#{pgid || "n/a"}")
|
|
102
|
+
terminate_group_or_pid("TERM", pid, pgid)
|
|
103
|
+
sleep(0.1)
|
|
104
|
+
terminate_group_or_pid("KILL", pid, pgid)
|
|
105
|
+
end
|
|
106
|
+
|
|
107
|
+
def terminate_descendants_after_success(pid:, pgid:, provider_name:)
|
|
108
|
+
return unless pgid
|
|
109
|
+
return unless group_alive?(pgid)
|
|
110
|
+
|
|
111
|
+
debug_log(provider_name, "post-exit cleanup pgid=#{pgid}")
|
|
112
|
+
terminate_group_or_pid("TERM", pid, pgid)
|
|
113
|
+
sleep(0.05)
|
|
114
|
+
terminate_group_or_pid("KILL", pid, pgid) if group_alive?(pgid)
|
|
115
|
+
end
|
|
116
|
+
|
|
117
|
+
def terminate_group_or_pid(signal, pid, pgid)
|
|
118
|
+
if pgid
|
|
119
|
+
Process.kill(signal, -pgid)
|
|
120
|
+
else
|
|
121
|
+
Process.kill(signal, pid)
|
|
122
|
+
end
|
|
123
|
+
rescue Errno::ESRCH, Errno::EPERM
|
|
124
|
+
nil
|
|
125
|
+
end
|
|
126
|
+
|
|
127
|
+
def safe_getpgid(pid)
|
|
128
|
+
Process.getpgid(pid)
|
|
129
|
+
rescue Errno::ESRCH
|
|
130
|
+
nil
|
|
131
|
+
end
|
|
132
|
+
|
|
133
|
+
def group_alive?(pgid)
|
|
134
|
+
Process.kill(0, -pgid)
|
|
135
|
+
true
|
|
136
|
+
rescue Errno::ESRCH
|
|
137
|
+
false
|
|
138
|
+
rescue Errno::EPERM
|
|
139
|
+
true
|
|
140
|
+
end
|
|
141
|
+
|
|
142
|
+
def debug_log(provider_name, message)
|
|
143
|
+
return unless ENV["ACE_LLM_DEBUG_SUBPROCESS"] == "1"
|
|
144
|
+
|
|
145
|
+
warn("[SafeCapture][#{provider_name}] #{message}")
|
|
146
|
+
end
|
|
147
|
+
end
|
|
148
|
+
end
|
|
149
|
+
end
|
|
150
|
+
end
|
|
151
|
+
end
|
|
152
|
+
end
|
|
153
|
+
end
|
|
@@ -0,0 +1,44 @@
|
|
|
1
|
+
# frozen_string_literal: true
|
|
2
|
+
|
|
3
|
+
require_relative "../atoms/session_finders/claude_session_finder"
|
|
4
|
+
require_relative "../atoms/session_finders/codex_session_finder"
|
|
5
|
+
require_relative "../atoms/session_finders/pi_session_finder"
|
|
6
|
+
require_relative "../atoms/session_finders/gemini_session_finder"
|
|
7
|
+
require_relative "../atoms/session_finders/open_code_session_finder"
|
|
8
|
+
|
|
9
|
+
module Ace
|
|
10
|
+
module LLM
|
|
11
|
+
module Providers
|
|
12
|
+
module CLI
|
|
13
|
+
module Molecules
|
|
14
|
+
# Dispatches session detection to the appropriate provider-specific finder.
|
|
15
|
+
#
|
|
16
|
+
# Used as a fallback when a provider doesn't natively return a session_id.
|
|
17
|
+
# Each finder scans the provider's local session storage and matches by prompt.
|
|
18
|
+
class SessionFinder
|
|
19
|
+
FINDERS = {
|
|
20
|
+
"claude" => Atoms::SessionFinders::ClaudeSessionFinder,
|
|
21
|
+
"codex" => Atoms::SessionFinders::CodexSessionFinder,
|
|
22
|
+
"pi" => Atoms::SessionFinders::PiSessionFinder,
|
|
23
|
+
"gemini" => Atoms::SessionFinders::GeminiSessionFinder,
|
|
24
|
+
"opencode" => Atoms::SessionFinders::OpenCodeSessionFinder
|
|
25
|
+
}.freeze
|
|
26
|
+
|
|
27
|
+
# @param provider [String] provider name
|
|
28
|
+
# @param working_dir [String] project directory
|
|
29
|
+
# @param prompt [String] the prompt sent to the provider
|
|
30
|
+
# @return [Hash, nil] { session_id:, session_path: } or nil
|
|
31
|
+
def self.call(provider:, working_dir:, prompt:)
|
|
32
|
+
finder = FINDERS[provider]
|
|
33
|
+
return nil unless finder
|
|
34
|
+
|
|
35
|
+
finder.call(working_dir: working_dir, prompt: prompt)
|
|
36
|
+
rescue
|
|
37
|
+
nil
|
|
38
|
+
end
|
|
39
|
+
end
|
|
40
|
+
end
|
|
41
|
+
end
|
|
42
|
+
end
|
|
43
|
+
end
|
|
44
|
+
end
|
|
@@ -0,0 +1,64 @@
|
|
|
1
|
+
# frozen_string_literal: true
|
|
2
|
+
|
|
3
|
+
require "date"
|
|
4
|
+
require "yaml"
|
|
5
|
+
|
|
6
|
+
module Ace
|
|
7
|
+
module LLM
|
|
8
|
+
module Providers
|
|
9
|
+
module CLI
|
|
10
|
+
module Molecules
|
|
11
|
+
# Reads skill names from SKILL.md frontmatter in a skills directory.
|
|
12
|
+
#
|
|
13
|
+
# Scans `#{skills_dir}/*/SKILL.md` for YAML frontmatter with a `name:` field.
|
|
14
|
+
# Results are cached per directory path since skills don't change during a session.
|
|
15
|
+
class SkillNameReader
|
|
16
|
+
def initialize
|
|
17
|
+
@cache = {}
|
|
18
|
+
end
|
|
19
|
+
|
|
20
|
+
# Read skill names from a skills directory.
|
|
21
|
+
#
|
|
22
|
+
# @param skills_dir [String] Path to the skills directory
|
|
23
|
+
# @return [Array<String>] Array of skill names (e.g. ["ace-onboard", "ace-git-commit"])
|
|
24
|
+
def call(skills_dir)
|
|
25
|
+
return [] unless skills_dir && Dir.exist?(skills_dir)
|
|
26
|
+
|
|
27
|
+
@cache[skills_dir] ||= read_skill_names(skills_dir)
|
|
28
|
+
end
|
|
29
|
+
|
|
30
|
+
private
|
|
31
|
+
|
|
32
|
+
def read_skill_names(skills_dir)
|
|
33
|
+
skill_files = Dir.glob(File.join(skills_dir, "*", "SKILL.md"))
|
|
34
|
+
names = []
|
|
35
|
+
|
|
36
|
+
skill_files.each do |path|
|
|
37
|
+
name = extract_skill_name(path)
|
|
38
|
+
names << name if name
|
|
39
|
+
end
|
|
40
|
+
|
|
41
|
+
names.sort
|
|
42
|
+
end
|
|
43
|
+
|
|
44
|
+
def extract_skill_name(path)
|
|
45
|
+
content = File.read(path, encoding: "utf-8")
|
|
46
|
+
|
|
47
|
+
# Parse YAML frontmatter (between --- delimiters)
|
|
48
|
+
return nil unless content.start_with?("---")
|
|
49
|
+
|
|
50
|
+
end_index = content.index("---", 3)
|
|
51
|
+
return nil unless end_index
|
|
52
|
+
|
|
53
|
+
frontmatter = content[3...end_index].strip
|
|
54
|
+
data = YAML.safe_load(frontmatter, permitted_classes: [Date])
|
|
55
|
+
data["name"] if data.is_a?(Hash)
|
|
56
|
+
rescue Errno::ENOENT, Psych::SyntaxError
|
|
57
|
+
nil
|
|
58
|
+
end
|
|
59
|
+
end
|
|
60
|
+
end
|
|
61
|
+
end
|
|
62
|
+
end
|
|
63
|
+
end
|
|
64
|
+
end
|
|
@@ -0,0 +1,271 @@
|
|
|
1
|
+
# frozen_string_literal: true
|
|
2
|
+
|
|
3
|
+
require "json"
|
|
4
|
+
require "open3"
|
|
5
|
+
require "shellwords"
|
|
6
|
+
|
|
7
|
+
require_relative "cli_args_support"
|
|
8
|
+
require_relative "atoms/execution_context"
|
|
9
|
+
|
|
10
|
+
module Ace
|
|
11
|
+
module LLM
|
|
12
|
+
module Providers
|
|
13
|
+
module CLI
|
|
14
|
+
# Client for interacting with OpenCode CLI
|
|
15
|
+
# Provides access to multiple AI providers through OpenCode's unified platform
|
|
16
|
+
class OpenCodeClient < Ace::LLM::Organisms::BaseClient
|
|
17
|
+
include CliArgsSupport
|
|
18
|
+
|
|
19
|
+
# Not used for CLI interaction but required by BaseClient
|
|
20
|
+
API_BASE_URL = "https://models.dev"
|
|
21
|
+
DEFAULT_GENERATION_CONFIG = {}.freeze
|
|
22
|
+
|
|
23
|
+
# Provider registration - auto-registers as "opencode"
|
|
24
|
+
def self.provider_name
|
|
25
|
+
"opencode"
|
|
26
|
+
end
|
|
27
|
+
|
|
28
|
+
# Default model (can be overridden by config)
|
|
29
|
+
DEFAULT_MODEL = "google/gemini-2.5-flash"
|
|
30
|
+
|
|
31
|
+
def initialize(model: nil, **options)
|
|
32
|
+
@model = model || DEFAULT_MODEL
|
|
33
|
+
# Skip normal BaseClient initialization that requires API key
|
|
34
|
+
@options = options
|
|
35
|
+
@generation_config = options[:generation_config] || {}
|
|
36
|
+
end
|
|
37
|
+
|
|
38
|
+
# Override to indicate this client doesn't need API credentials
|
|
39
|
+
def needs_credentials?
|
|
40
|
+
false
|
|
41
|
+
end
|
|
42
|
+
|
|
43
|
+
# Generate a response from the LLM
|
|
44
|
+
# @param messages [Array<Hash>] Conversation messages
|
|
45
|
+
# @param options [Hash] Generation options
|
|
46
|
+
# @return [Hash] Response with text and metadata
|
|
47
|
+
def generate(messages, **options)
|
|
48
|
+
validate_opencode_availability!
|
|
49
|
+
|
|
50
|
+
# Convert messages to prompt format
|
|
51
|
+
prompt = format_messages_as_prompt(messages)
|
|
52
|
+
|
|
53
|
+
# Build full prompt with system instruction for accurate token accounting
|
|
54
|
+
full_prompt = build_full_prompt(prompt, options)
|
|
55
|
+
|
|
56
|
+
cmd = build_opencode_command_with_prompt(full_prompt, options)
|
|
57
|
+
stdout, stderr, status = execute_opencode_command(cmd, options: options)
|
|
58
|
+
|
|
59
|
+
parse_opencode_response(stdout, stderr, status, full_prompt, options)
|
|
60
|
+
rescue => e
|
|
61
|
+
handle_opencode_error(e)
|
|
62
|
+
end
|
|
63
|
+
|
|
64
|
+
# List available OpenCode models
|
|
65
|
+
def list_models
|
|
66
|
+
# Return a standard set of models that OpenCode typically supports
|
|
67
|
+
# Actual models come from YAML config
|
|
68
|
+
[
|
|
69
|
+
{id: "google/gemini-2.5-flash", name: "Gemini 2.5 Flash", description: "Fast Google model", context_size: 1_000_000},
|
|
70
|
+
{id: "google/gemini-2.0-flash-experimental", name: "Gemini 2.0 Flash", description: "Experimental Google model", context_size: 1_000_000},
|
|
71
|
+
{id: "google/gemini-1.5-pro", name: "Gemini 1.5 Pro", description: "Advanced Google model", context_size: 2_000_000},
|
|
72
|
+
{id: "anthropic/claude-3-5-sonnet", name: "Claude 3.5 Sonnet", description: "Anthropic model", context_size: 200_000},
|
|
73
|
+
{id: "anthropic/claude-3-5-haiku", name: "Claude 3.5 Haiku", description: "Fast Anthropic model", context_size: 200_000},
|
|
74
|
+
{id: "openai/gpt-4o", name: "GPT-4 Omni", description: "OpenAI model", context_size: 128_000},
|
|
75
|
+
{id: "openai/gpt-4o-mini", name: "GPT-4 Omni Mini", description: "Small OpenAI model", context_size: 128_000}
|
|
76
|
+
]
|
|
77
|
+
end
|
|
78
|
+
|
|
79
|
+
private
|
|
80
|
+
|
|
81
|
+
def format_messages_as_prompt(messages)
|
|
82
|
+
# Handle both array of message hashes and string prompt
|
|
83
|
+
return messages if messages.is_a?(String)
|
|
84
|
+
|
|
85
|
+
# Convert array of messages to formatted prompt
|
|
86
|
+
formatted = messages.map do |msg|
|
|
87
|
+
role = msg[:role] || msg["role"]
|
|
88
|
+
content = msg[:content] || msg["content"]
|
|
89
|
+
|
|
90
|
+
case role
|
|
91
|
+
when "system"
|
|
92
|
+
"System: #{content}"
|
|
93
|
+
when "user"
|
|
94
|
+
"User: #{content}"
|
|
95
|
+
when "assistant"
|
|
96
|
+
"Assistant: #{content}"
|
|
97
|
+
else
|
|
98
|
+
content
|
|
99
|
+
end
|
|
100
|
+
end
|
|
101
|
+
|
|
102
|
+
formatted.join("\n\n")
|
|
103
|
+
end
|
|
104
|
+
|
|
105
|
+
def opencode_available?
|
|
106
|
+
system("which opencode > /dev/null 2>&1")
|
|
107
|
+
end
|
|
108
|
+
|
|
109
|
+
def validate_opencode_availability!
|
|
110
|
+
unless opencode_available?
|
|
111
|
+
raise Ace::LLM::ProviderError, "OpenCode CLI not found. Install with: npm install -g opencode-cli or visit https://opencode.dev"
|
|
112
|
+
end
|
|
113
|
+
|
|
114
|
+
# Check if OpenCode is authenticated (quick check)
|
|
115
|
+
unless opencode_authenticated?
|
|
116
|
+
raise Ace::LLM::AuthenticationError, "OpenCode authentication required. Run 'opencode auth' to configure"
|
|
117
|
+
end
|
|
118
|
+
end
|
|
119
|
+
|
|
120
|
+
def opencode_authenticated?
|
|
121
|
+
# Quick check if OpenCode can execute (will fail fast if not authenticated)
|
|
122
|
+
|
|
123
|
+
cmd = ["opencode", "--version"]
|
|
124
|
+
_, _, status = Open3.capture3(*cmd)
|
|
125
|
+
status.success?
|
|
126
|
+
rescue
|
|
127
|
+
false
|
|
128
|
+
end
|
|
129
|
+
|
|
130
|
+
# Build command array with pre-built full prompt
|
|
131
|
+
# @param full_prompt [String] The complete prompt (already includes system instruction if any)
|
|
132
|
+
# @param options [Hash] Generation options (unused for command flags, kept for API compatibility)
|
|
133
|
+
# @return [Array<String>] Command array ready for execution
|
|
134
|
+
def build_opencode_command_with_prompt(full_prompt, options)
|
|
135
|
+
cmd = ["opencode", "run"]
|
|
136
|
+
|
|
137
|
+
# Add model selection with fallback chain
|
|
138
|
+
model_to_use = @model || @generation_config[:model] || DEFAULT_MODEL
|
|
139
|
+
cmd << "--model" << model_to_use
|
|
140
|
+
|
|
141
|
+
# Add JSON format for structured output (less likely to prompt interactively)
|
|
142
|
+
cmd << "--format" << "json"
|
|
143
|
+
|
|
144
|
+
# User CLI args after generated flags so they take precedence (last-wins),
|
|
145
|
+
# but before positional prompt arg
|
|
146
|
+
cmd.concat(normalized_cli_args(options))
|
|
147
|
+
|
|
148
|
+
# Prompt is passed as positional argument (not via --prompt flag)
|
|
149
|
+
# NOTE: OpenCode CLI does not support --temperature, --max-tokens, or --system flags
|
|
150
|
+
# Coerce to string to handle nil or non-string inputs gracefully
|
|
151
|
+
cmd << full_prompt.to_s
|
|
152
|
+
|
|
153
|
+
cmd
|
|
154
|
+
end
|
|
155
|
+
|
|
156
|
+
# Legacy method for backward compatibility and tests
|
|
157
|
+
# @deprecated Use build_full_prompt + build_opencode_command_with_prompt instead
|
|
158
|
+
def build_opencode_command(prompt, options)
|
|
159
|
+
full_prompt = build_full_prompt(prompt, options)
|
|
160
|
+
build_opencode_command_with_prompt(full_prompt, options)
|
|
161
|
+
end
|
|
162
|
+
|
|
163
|
+
# Build full prompt by prepending system instruction if provided
|
|
164
|
+
#
|
|
165
|
+
# OpenCode CLI does not support a --system flag, so we prepend system
|
|
166
|
+
# instructions to the main prompt using the "System: " prefix format.
|
|
167
|
+
#
|
|
168
|
+
# @param prompt [String] The main user prompt (may already contain "System:" from message formatting)
|
|
169
|
+
# @param options [Hash] Options that may contain system instruction keys
|
|
170
|
+
# @return [String] Full prompt with system instruction prepended if provided
|
|
171
|
+
# @note System instruction priority order (first match wins):
|
|
172
|
+
# 1. options[:system_instruction]
|
|
173
|
+
# 2. options[:system]
|
|
174
|
+
# 3. options[:system_prompt]
|
|
175
|
+
# 4. @generation_config[:system_prompt]
|
|
176
|
+
# @note If the prompt already starts with "System:" (from format_messages_as_prompt),
|
|
177
|
+
# the options-based system instruction is skipped to avoid duplication.
|
|
178
|
+
def build_full_prompt(prompt, options)
|
|
179
|
+
prompt_str = prompt.to_s
|
|
180
|
+
|
|
181
|
+
# Skip prepending if prompt already has a system instruction from message formatting
|
|
182
|
+
# This prevents double "System:" prefixes when messages contain role: "system"
|
|
183
|
+
return prompt_str if prompt_str.start_with?("System:")
|
|
184
|
+
|
|
185
|
+
system_content = options[:system_instruction] ||
|
|
186
|
+
options[:system] ||
|
|
187
|
+
options[:system_prompt] ||
|
|
188
|
+
@generation_config[:system_prompt]
|
|
189
|
+
|
|
190
|
+
if system_content
|
|
191
|
+
"System: #{system_content}\n\n#{prompt_str}"
|
|
192
|
+
else
|
|
193
|
+
prompt_str
|
|
194
|
+
end
|
|
195
|
+
end
|
|
196
|
+
|
|
197
|
+
def execute_opencode_command(cmd, timeout: nil, options: {})
|
|
198
|
+
timeout_val = timeout || @options[:timeout] || 120
|
|
199
|
+
working_dir = Atoms::ExecutionContext.resolve_working_dir(
|
|
200
|
+
working_dir: options[:working_dir],
|
|
201
|
+
subprocess_env: options[:subprocess_env]
|
|
202
|
+
)
|
|
203
|
+
Molecules::SafeCapture.call(
|
|
204
|
+
cmd,
|
|
205
|
+
timeout: timeout_val,
|
|
206
|
+
stdin_data: "",
|
|
207
|
+
chdir: working_dir,
|
|
208
|
+
provider_name: "OpenCode"
|
|
209
|
+
)
|
|
210
|
+
end
|
|
211
|
+
|
|
212
|
+
def parse_opencode_response(stdout, stderr, status, prompt, options)
|
|
213
|
+
unless status.success?
|
|
214
|
+
error_msg = stderr.empty? ? stdout : stderr
|
|
215
|
+
|
|
216
|
+
# Detect common error patterns for better error messages
|
|
217
|
+
if error_msg.include?("400") || error_msg.include?("Bad Request")
|
|
218
|
+
raise Ace::LLM::ProviderError, "OpenCode API request failed (400 Bad Request). The model or prompt may be invalid."
|
|
219
|
+
end
|
|
220
|
+
|
|
221
|
+
raise Ace::LLM::ProviderError, "OpenCode CLI failed: #{error_msg}"
|
|
222
|
+
end
|
|
223
|
+
|
|
224
|
+
begin
|
|
225
|
+
# Try to parse as JSON first
|
|
226
|
+
response = JSON.parse(stdout)
|
|
227
|
+
text = response["result"] || response["text"] || response["response"] || ""
|
|
228
|
+
rescue JSON::ParserError
|
|
229
|
+
# Fall back to treating entire output as text
|
|
230
|
+
text = stdout.strip
|
|
231
|
+
response = {}
|
|
232
|
+
end
|
|
233
|
+
|
|
234
|
+
# Build metadata
|
|
235
|
+
metadata = build_metadata(response, text, prompt, options)
|
|
236
|
+
|
|
237
|
+
# Return hash compatible with ace-llm format
|
|
238
|
+
{
|
|
239
|
+
text: text,
|
|
240
|
+
metadata: metadata
|
|
241
|
+
}
|
|
242
|
+
end
|
|
243
|
+
|
|
244
|
+
def build_metadata(response, text, prompt, options)
|
|
245
|
+
# Build standard metadata structure
|
|
246
|
+
usage = response["usage"] || {}
|
|
247
|
+
|
|
248
|
+
# Rough token estimation if not provided
|
|
249
|
+
prompt_tokens = usage["input_tokens"] || (prompt.to_s.length / 4).round
|
|
250
|
+
output_tokens = usage["output_tokens"] || (text.length / 4).round
|
|
251
|
+
|
|
252
|
+
{
|
|
253
|
+
provider: "opencode",
|
|
254
|
+
model: @model || DEFAULT_MODEL,
|
|
255
|
+
input_tokens: prompt_tokens,
|
|
256
|
+
output_tokens: output_tokens,
|
|
257
|
+
total_tokens: prompt_tokens + output_tokens,
|
|
258
|
+
finish_reason: response["finish_reason"] || "success",
|
|
259
|
+
timestamp: Time.now.utc.iso8601
|
|
260
|
+
}
|
|
261
|
+
end
|
|
262
|
+
|
|
263
|
+
def handle_opencode_error(error)
|
|
264
|
+
# Re-raise the error for proper handling by the base client error flow
|
|
265
|
+
raise error
|
|
266
|
+
end
|
|
267
|
+
end
|
|
268
|
+
end
|
|
269
|
+
end
|
|
270
|
+
end
|
|
271
|
+
end
|