ace-llm-providers-cli 0.27.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (40) hide show
  1. checksums.yaml +7 -0
  2. data/.ace-defaults/llm/providers/claude.yml +24 -0
  3. data/.ace-defaults/llm/providers/codex.yml +22 -0
  4. data/.ace-defaults/llm/providers/codexoss.yml +13 -0
  5. data/.ace-defaults/llm/providers/gemini.yml +32 -0
  6. data/.ace-defaults/llm/providers/opencode.yml +26 -0
  7. data/.ace-defaults/llm/providers/pi.yml +43 -0
  8. data/CHANGELOG.md +457 -0
  9. data/LICENSE +21 -0
  10. data/README.md +36 -0
  11. data/Rakefile +14 -0
  12. data/exe/ace-llm-providers-cli-check +76 -0
  13. data/lib/ace/llm/providers/cli/atoms/args_normalizer.rb +82 -0
  14. data/lib/ace/llm/providers/cli/atoms/auth_checker.rb +74 -0
  15. data/lib/ace/llm/providers/cli/atoms/command_formatters.rb +19 -0
  16. data/lib/ace/llm/providers/cli/atoms/command_rewriter.rb +75 -0
  17. data/lib/ace/llm/providers/cli/atoms/execution_context.rb +28 -0
  18. data/lib/ace/llm/providers/cli/atoms/provider_detector.rb +48 -0
  19. data/lib/ace/llm/providers/cli/atoms/session_finders/claude_session_finder.rb +79 -0
  20. data/lib/ace/llm/providers/cli/atoms/session_finders/codex_session_finder.rb +84 -0
  21. data/lib/ace/llm/providers/cli/atoms/session_finders/gemini_session_finder.rb +66 -0
  22. data/lib/ace/llm/providers/cli/atoms/session_finders/open_code_session_finder.rb +119 -0
  23. data/lib/ace/llm/providers/cli/atoms/session_finders/pi_session_finder.rb +87 -0
  24. data/lib/ace/llm/providers/cli/atoms/skill_command_rewriter.rb +30 -0
  25. data/lib/ace/llm/providers/cli/atoms/worktree_dir_resolver.rb +56 -0
  26. data/lib/ace/llm/providers/cli/claude_code_client.rb +358 -0
  27. data/lib/ace/llm/providers/cli/claude_oai_client.rb +322 -0
  28. data/lib/ace/llm/providers/cli/cli_args_support.rb +19 -0
  29. data/lib/ace/llm/providers/cli/codex_client.rb +291 -0
  30. data/lib/ace/llm/providers/cli/codex_oai_client.rb +274 -0
  31. data/lib/ace/llm/providers/cli/gemini_client.rb +346 -0
  32. data/lib/ace/llm/providers/cli/molecules/health_checker.rb +80 -0
  33. data/lib/ace/llm/providers/cli/molecules/safe_capture.rb +153 -0
  34. data/lib/ace/llm/providers/cli/molecules/session_finder.rb +44 -0
  35. data/lib/ace/llm/providers/cli/molecules/skill_name_reader.rb +64 -0
  36. data/lib/ace/llm/providers/cli/open_code_client.rb +271 -0
  37. data/lib/ace/llm/providers/cli/pi_client.rb +331 -0
  38. data/lib/ace/llm/providers/cli/version.rb +11 -0
  39. data/lib/ace/llm/providers/cli.rb +47 -0
  40. metadata +139 -0
@@ -0,0 +1,274 @@
1
+ # frozen_string_literal: true
2
+
3
+ require "json"
4
+ require "open3"
5
+ require "shellwords"
6
+
7
+ require_relative "cli_args_support"
8
+ require_relative "atoms/execution_context"
9
+ require_relative "atoms/command_rewriter"
10
+ require_relative "atoms/command_formatters"
11
+ require_relative "atoms/worktree_dir_resolver"
12
+ require_relative "molecules/skill_name_reader"
13
+
14
+ module Ace
15
+ module LLM
16
+ module Providers
17
+ module CLI
18
+ # Client for interacting with Codex CLI targeting OpenAI-compatible providers
19
+ # Dynamically configures codex to use any backend (Z.ai, DeepSeek, etc.)
20
+ # via -c flag overrides for model_provider and model_providers config
21
+ class CodexOaiClient < Ace::LLM::Organisms::BaseClient
22
+ include CliArgsSupport
23
+
24
+ API_BASE_URL = "https://api.openai.com"
25
+ DEFAULT_GENERATION_CONFIG = {}.freeze
26
+
27
+ def self.provider_name
28
+ "codexoai"
29
+ end
30
+
31
+ DEFAULT_MODEL = "zai/glm-5"
32
+
33
+ def initialize(model: nil, **options)
34
+ @model = model || DEFAULT_MODEL
35
+ @options = options
36
+ @generation_config = options[:generation_config] || {}
37
+ @backends = options[:backends] || {}
38
+ @skill_name_reader = Molecules::SkillNameReader.new
39
+ end
40
+
41
+ def needs_credentials?
42
+ false
43
+ end
44
+
45
+ # Generate a response from the LLM
46
+ # @param messages [Array<Hash>] Conversation messages
47
+ # @param options [Hash] Generation options
48
+ # @return [Hash] Response with text and metadata
49
+ def generate(messages, **options)
50
+ validate_codex_availability!
51
+
52
+ prompt = format_messages_as_prompt(messages)
53
+ subprocess_env = options[:subprocess_env]
54
+ working_dir = Atoms::ExecutionContext.resolve_working_dir(
55
+ working_dir: options[:working_dir],
56
+ subprocess_env: subprocess_env
57
+ )
58
+ prompt = rewrite_skill_commands(prompt, working_dir: working_dir)
59
+
60
+ cmd = build_codex_oai_command(prompt, options, working_dir: working_dir)
61
+ stdout, stderr, status = execute_codex_command(cmd, prompt, options)
62
+
63
+ parse_codex_response(stdout, stderr, status, prompt, options)
64
+ rescue => e
65
+ handle_codex_error(e)
66
+ end
67
+
68
+ # List available models
69
+ def list_models
70
+ [
71
+ {id: "zai/glm-5", name: "GLM-5", description: "Z.ai flagship model", context_size: 128_000},
72
+ {id: "zai/glm-4.7", name: "GLM-4.7", description: "Z.ai balanced model", context_size: 128_000},
73
+ {id: "zai/glm-4.6", name: "GLM-4.6", description: "Z.ai fast model", context_size: 128_000}
74
+ ]
75
+ end
76
+
77
+ # Split "backend/model" into ["backend", "model"]
78
+ # @param model_string [String] e.g. "zai/glm-5"
79
+ # @return [Array<String>] e.g. ["zai", "glm-5"]
80
+ def split_backend_model(model_string)
81
+ return [nil, nil] unless model_string
82
+
83
+ parts = model_string.split("/", 2)
84
+ return [nil, nil] unless parts.length == 2
85
+
86
+ parts
87
+ end
88
+
89
+ private
90
+
91
+ def format_messages_as_prompt(messages)
92
+ return messages if messages.is_a?(String)
93
+
94
+ system_msg = messages.find { |m| (m[:role] || m["role"]) == "system" }
95
+ other_msgs = messages.reject { |m| (m[:role] || m["role"]) == "system" }
96
+
97
+ formatted = other_msgs.map do |msg|
98
+ role = msg[:role] || msg["role"]
99
+ content = msg[:content] || msg["content"]
100
+
101
+ case role
102
+ when "user"
103
+ "User: #{content}"
104
+ when "assistant"
105
+ "Assistant: #{content}"
106
+ else
107
+ content
108
+ end
109
+ end
110
+
111
+ if system_msg
112
+ system_content = system_msg[:content] || system_msg["content"]
113
+ formatted.unshift("System: #{system_content}")
114
+ end
115
+
116
+ formatted.join("\n\n")
117
+ end
118
+
119
+ def codex_available?
120
+ system("which codex > /dev/null 2>&1")
121
+ end
122
+
123
+ def validate_codex_availability!
124
+ unless codex_available?
125
+ raise Ace::LLM::ProviderError, "Codex CLI not found. Install with: npm install -g @openai/codex or visit https://codex.ai"
126
+ end
127
+ end
128
+
129
+ def build_codex_oai_command(prompt, options, working_dir: nil)
130
+ working_dir ||= Atoms::ExecutionContext.resolve_working_dir(
131
+ working_dir: options[:working_dir],
132
+ subprocess_env: options[:subprocess_env]
133
+ )
134
+ cmd = ["codex", "exec"]
135
+
136
+ # Add sandbox mode if specified
137
+ if options[:sandbox]
138
+ cmd << "--sandbox" << options[:sandbox].to_s
139
+ end
140
+
141
+ # Parse backend/model from the model string
142
+ backend_name, model_name = split_backend_model(@model)
143
+
144
+ if backend_name && model_name
145
+ backend_config = @backends[backend_name] || @backends[backend_name.to_sym] || {}
146
+
147
+ # Set the model provider
148
+ cmd << "-c" << "model_provider=\"#{backend_name}\""
149
+
150
+ # Provider name (required by codex)
151
+ provider_display = backend_config["name"] || backend_config[:name] || backend_name
152
+ cmd << "-c" << "model_providers.#{backend_name}.name=\"#{provider_display}\""
153
+
154
+ # Configure backend-specific settings
155
+ if (base_url = backend_config["base_url"] || backend_config[:base_url])
156
+ cmd << "-c" << "model_providers.#{backend_name}.base_url=\"#{base_url}\""
157
+ end
158
+
159
+ if (env_key = backend_config["env_key"] || backend_config[:env_key])
160
+ cmd << "-c" << "model_providers.#{backend_name}.env_key=\"#{env_key}\""
161
+ end
162
+
163
+ # Set the model
164
+ cmd << "-m" << model_name
165
+ end
166
+
167
+ # Add writable dir for git worktree metadata
168
+ if (git_dir = Atoms::WorktreeDirResolver.call(working_dir: working_dir))
169
+ cmd << "--add-dir" << git_dir
170
+ end
171
+
172
+ # User CLI args last so they take precedence
173
+ cmd.concat(normalized_cli_args(options))
174
+
175
+ cmd
176
+ end
177
+
178
+ def execute_codex_command(cmd, prompt, options)
179
+ input = prompt.to_s
180
+
181
+ system_content = options[:system_instruction] ||
182
+ options[:system] ||
183
+ options[:system_prompt] ||
184
+ @generation_config[:system_prompt]
185
+
186
+ if system_content && !prompt.include?("System:")
187
+ input = "System: #{system_content}\n\nUser: #{input}"
188
+ end
189
+
190
+ timeout_val = @options[:timeout] || 120
191
+ working_dir = Atoms::ExecutionContext.resolve_working_dir(
192
+ working_dir: options[:working_dir],
193
+ subprocess_env: options[:subprocess_env]
194
+ )
195
+ Molecules::SafeCapture.call(
196
+ cmd,
197
+ timeout: timeout_val,
198
+ stdin_data: input,
199
+ chdir: working_dir,
200
+ provider_name: "Codex OAI"
201
+ )
202
+ end
203
+
204
+ def parse_codex_response(stdout, stderr, status, prompt, options)
205
+ unless status.success?
206
+ error_msg = stderr.empty? ? stdout : stderr
207
+ raise Ace::LLM::ProviderError, "Codex OAI CLI failed: #{error_msg}"
208
+ end
209
+
210
+ lines = stdout.split("\n")
211
+ response_start = lines.find_index { |line| line.include?("codex") }
212
+
213
+ if response_start && response_start < lines.length - 1
214
+ response_lines = lines[(response_start + 1)..-1]
215
+ response_lines = response_lines.reject { |line| line.include?("tokens used") }
216
+ text = response_lines.join("\n").strip
217
+ else
218
+ text = stdout.strip
219
+ end
220
+
221
+ metadata = build_synthetic_metadata(text, prompt)
222
+
223
+ {
224
+ text: text,
225
+ metadata: metadata
226
+ }
227
+ end
228
+
229
+ def build_synthetic_metadata(response_text, prompt)
230
+ prompt_tokens = (prompt.to_s.length / 4).round
231
+ response_tokens = (response_text.length / 4).round
232
+
233
+ {
234
+ provider: "codexoai",
235
+ model: @model || DEFAULT_MODEL,
236
+ input_tokens: prompt_tokens,
237
+ output_tokens: response_tokens,
238
+ total_tokens: prompt_tokens + response_tokens,
239
+ finish_reason: "success",
240
+ timestamp: Time.now.utc.iso8601
241
+ }
242
+ end
243
+
244
+ def handle_codex_error(error)
245
+ raise error
246
+ end
247
+
248
+ def rewrite_skill_commands(prompt, working_dir: nil)
249
+ skills_dir = resolve_skills_dir(working_dir: working_dir)
250
+ return prompt unless skills_dir
251
+
252
+ skill_names = @skill_name_reader.call(skills_dir)
253
+ return prompt if skill_names.empty?
254
+
255
+ Atoms::CommandRewriter.call(prompt, skill_names: skill_names, formatter: Atoms::CommandFormatters::CODEX_FORMATTER)
256
+ end
257
+
258
+ def resolve_skills_dir(working_dir: nil)
259
+ configured = @options[:skills_dir] || @generation_config[:skills_dir]
260
+ return configured if configured && Dir.exist?(configured)
261
+
262
+ working_dir ||= Atoms::ExecutionContext.resolve_working_dir
263
+ candidate_dirs = [
264
+ File.join(working_dir, ".codex", "skills"),
265
+ File.join(working_dir, ".agent", "skills"),
266
+ File.join(working_dir, ".claude", "skills")
267
+ ]
268
+ candidate_dirs.find { |dir| Dir.exist?(dir) }
269
+ end
270
+ end
271
+ end
272
+ end
273
+ end
274
+ end
@@ -0,0 +1,346 @@
1
+ # frozen_string_literal: true
2
+
3
+ require "fileutils"
4
+ require "json"
5
+
6
+ require_relative "cli_args_support"
7
+ require_relative "atoms/execution_context"
8
+
9
+ module Ace
10
+ module LLM
11
+ module Providers
12
+ module CLI
13
+ # Client for interacting with Google Gemini CLI
14
+ # Provides access to Gemini models through subprocess execution
15
+ class GeminiClient < Ace::LLM::Organisms::BaseClient
16
+ include CliArgsSupport
17
+
18
+ # Not used for CLI interaction but required by BaseClient
19
+ API_BASE_URL = "https://generativelanguage.googleapis.com"
20
+ DEFAULT_GENERATION_CONFIG = {}.freeze
21
+
22
+ # Default maximum prompt length before switching to file-based prompts (100K characters)
23
+ # This can be overridden via config: default_options.max_prompt_length
24
+ # Gemini's actual token limit is much higher (~1M tokens), but this provides
25
+ # a reasonable safeguard for accidental misuse
26
+ DEFAULT_MAX_PROMPT_LENGTH = 100_000
27
+
28
+ # Provider registration - auto-registers as "gemini"
29
+ def self.provider_name
30
+ "gemini"
31
+ end
32
+
33
+ # Default model (can be overridden by config)
34
+ DEFAULT_MODEL = "gemini-2.5-flash"
35
+
36
+ def initialize(model: nil, **options)
37
+ @model = model || DEFAULT_MODEL
38
+ # Skip normal BaseClient initialization that requires API key
39
+ @options = options
40
+ @generation_config = options[:generation_config] || {}
41
+ end
42
+
43
+ # Override to indicate this client doesn't need API credentials
44
+ def needs_credentials?
45
+ false
46
+ end
47
+
48
+ # Generate a response from the LLM
49
+ # @param messages [Array<Hash>] Conversation messages
50
+ # @param options [Hash] Generation options
51
+ # @return [Hash] Response with text and metadata
52
+ def generate(messages, **options)
53
+ validate_gemini_availability!
54
+
55
+ # Convert messages to prompt format
56
+ prompt = format_messages_as_prompt(messages)
57
+
58
+ cmd = build_gemini_command(prompt, options)
59
+ stdout, stderr, status = execute_gemini_command(cmd, prompt, options)
60
+
61
+ parse_gemini_response(stdout, stderr, status, prompt, options)
62
+ end
63
+
64
+ # List available Gemini models
65
+ # Note: This list should stay in sync with .ace-defaults/llm/providers/gemini.yml
66
+ # Project-level additions (like preview models) are handled by the config cascade
67
+ def list_models
68
+ [
69
+ {id: "gemini-2.5-flash", name: "Gemini 2.5 Flash", description: "Fast, efficient Gemini model", context_size: 1_048_576},
70
+ {id: "gemini-2.5-pro", name: "Gemini 2.5 Pro", description: "Advanced Gemini model", context_size: 1_048_576},
71
+ {id: "gemini-2.0-flash", name: "Gemini 2.0 Flash", description: "Fast Gemini model", context_size: 1_048_576},
72
+ {id: "gemini-1.5-pro-latest", name: "Gemini 1.5 Pro", description: "Previous generation Pro model", context_size: 2_097_152}
73
+ ]
74
+ end
75
+
76
+ private
77
+
78
+ def format_messages_as_prompt(messages)
79
+ # Handle both array of message hashes and string prompt
80
+ return messages if messages.is_a?(String)
81
+
82
+ # Extract system message if present
83
+ system_msg = messages.find { |m| (m[:role] || m["role"]) == "system" }
84
+ other_msgs = messages.reject { |m| (m[:role] || m["role"]) == "system" }
85
+
86
+ # Format remaining messages
87
+ formatted = other_msgs.map do |msg|
88
+ role = msg[:role] || msg["role"]
89
+ content = msg[:content] || msg["content"]
90
+
91
+ case role
92
+ when "user"
93
+ "User: #{content}"
94
+ when "assistant"
95
+ "Assistant: #{content}"
96
+ else
97
+ content
98
+ end
99
+ end
100
+
101
+ # Join messages
102
+ prompt = formatted.join("\n\n")
103
+
104
+ # Prepend system message if present (Gemini CLI doesn't support native system prompts)
105
+ if system_msg
106
+ system_content = system_msg[:content] || system_msg["content"]
107
+ # Check if system prompt already embedded
108
+ if prompt.include?("System:")
109
+ prompt
110
+ else
111
+ "System: #{system_content}\n\n#{prompt}"
112
+ end
113
+ else
114
+ prompt
115
+ end
116
+ end
117
+
118
+ def gemini_available?
119
+ system("which", "gemini", out: File::NULL, err: File::NULL)
120
+ end
121
+
122
+ def validate_gemini_availability!
123
+ unless gemini_available?
124
+ raise Ace::LLM::ProviderError, "Gemini CLI not found. Install with: npm install -g @google/gemini-cli or visit https://geminicli.com"
125
+ end
126
+ # Authentication is handled by the CLI itself - no pre-check needed
127
+ end
128
+
129
+ def build_gemini_command(prompt, options)
130
+ # If caller provided file paths, use them directly (e.g., ace-review session files)
131
+ # This avoids creating duplicate temp files and conflicting system prompts
132
+ if options[:system_file] && options[:prompt_file]
133
+ return build_command_with_existing_files(options[:system_file], options[:prompt_file], options)
134
+ end
135
+
136
+ # Calculate total prompt length with system prompt
137
+ system_prompt = @generation_config[:system_prompt]
138
+ total_length = prompt.to_s.length + (system_prompt&.length || 0) + "System: \n\n".length
139
+
140
+ # Check if we need to use file references for large prompts
141
+ # max_prompt_length is configurable via default_options in provider config
142
+ max_length = @generation_config[:max_prompt_length] || @options[:max_prompt_length] || DEFAULT_MAX_PROMPT_LENGTH
143
+ if total_length > max_length
144
+ build_command_with_file_references(prompt, system_prompt, options)
145
+ else
146
+ build_standard_command(prompt, system_prompt, options)
147
+ end
148
+ end
149
+
150
+ def build_command_with_existing_files(system_file, prompt_file, options)
151
+ # Build instruction prompt referencing existing files
152
+ # No temp file creation - caller has already saved prompts to files
153
+ file_refs = []
154
+ file_refs << "Read the system instructions: #{system_file}"
155
+ file_refs << "Read the user context: #{prompt_file}"
156
+ file_refs << "Follow the instructions in the files."
157
+
158
+ new_prompt = file_refs.join("\n")
159
+
160
+ # Build gemini CLI command with file reading enabled
161
+ cmd = ["gemini"]
162
+ cmd << "--output-format" << "json"
163
+
164
+ # Add model selection if not default
165
+ if @model && @model != DEFAULT_MODEL
166
+ cmd << "--model" << @model
167
+ end
168
+
169
+ # User CLI args after generated flags so they take precedence (last-wins),
170
+ # but before positional prompt arg
171
+ append_cli_args(cmd, options)
172
+ cmd << new_prompt
173
+
174
+ cmd
175
+ end
176
+
177
+ def build_standard_command(prompt, system_prompt, options)
178
+ # Prepend default system prompt if no system message exists
179
+ unless prompt.include?("System:")
180
+ if system_prompt
181
+ prompt = "System: #{system_prompt}\n\n#{prompt}"
182
+ end
183
+ end
184
+
185
+ # Build gemini CLI command for headless execution
186
+ # Note: prompt is passed as positional argument for one-shot mode
187
+ cmd = ["gemini"]
188
+ cmd << "--output-format" << "json"
189
+
190
+ # Add model selection if not default
191
+ if @model && @model != DEFAULT_MODEL
192
+ cmd << "--model" << @model
193
+ end
194
+
195
+ # User CLI args after generated flags so they take precedence (last-wins),
196
+ # but before positional prompt arg
197
+ append_cli_args(cmd, options)
198
+ cmd << prompt
199
+
200
+ cmd
201
+ end
202
+
203
+ def build_command_with_file_references(prompt, system_prompt, options)
204
+ # Use project .ace-local directory so Gemini CLI can access the files
205
+ # (system temp /var/folders is outside Gemini's workspace)
206
+ cache_dir = create_prompt_cache_dir(options[:working_dir], subprocess_env: options[:subprocess_env])
207
+ timestamp = Time.now.strftime("%Y%m%d-%H%M%S-%L")
208
+
209
+ # Write system prompt to cache file
210
+ system_file_path = nil
211
+ if system_prompt
212
+ system_file_path = File.join(cache_dir, "system-#{timestamp}.txt")
213
+ File.write(system_file_path, system_prompt)
214
+ end
215
+
216
+ # Write user prompt to cache file
217
+ user_file_path = File.join(cache_dir, "user-#{timestamp}.txt")
218
+ File.write(user_file_path, prompt)
219
+
220
+ # Build instruction prompt with file references
221
+ file_refs = []
222
+ file_refs << "Read this system instruction: #{system_file_path}" if system_file_path
223
+ file_refs << "Read the user instructions: #{user_file_path}"
224
+ file_refs << "Follow the instructions in the file#{"s" if system_file_path}."
225
+
226
+ new_prompt = file_refs.join("\n")
227
+
228
+ # Build gemini CLI command for one-shot execution with file reading
229
+ # Note: prompt is passed as positional argument (not -i which conflicts with stdin)
230
+ # Enable read_file tool without confirmation for headless execution
231
+ cmd = ["gemini"]
232
+ cmd << "--output-format" << "json"
233
+
234
+ # Add model selection if not default
235
+ if @model && @model != DEFAULT_MODEL
236
+ cmd << "--model" << @model
237
+ end
238
+
239
+ # User CLI args after generated flags so they take precedence (last-wins),
240
+ # but before positional prompt arg
241
+ append_cli_args(cmd, options)
242
+ cmd << new_prompt
243
+
244
+ cmd
245
+ end
246
+
247
+ def create_prompt_cache_dir(working_dir = nil, subprocess_env: nil)
248
+ cache_dir = File.join(find_project_root(working_dir, subprocess_env: subprocess_env), ".ace-local", "llm", "prompts")
249
+ FileUtils.mkdir_p(cache_dir) unless Dir.exist?(cache_dir)
250
+ cache_dir
251
+ end
252
+
253
+ def append_cli_args(cmd, options)
254
+ cmd.concat(normalized_cli_args(options))
255
+ end
256
+
257
+ def execute_gemini_command(cmd, prompt, options)
258
+ timeout_val = options[:timeout] || @options[:timeout] || 120
259
+ project_root = find_project_root(options[:working_dir], subprocess_env: options[:subprocess_env])
260
+ Molecules::SafeCapture.call(cmd, timeout: timeout_val, chdir: project_root, provider_name: "Gemini")
261
+ end
262
+
263
+ def find_project_root(working_dir = nil, subprocess_env: nil)
264
+ require "ace/support/fs"
265
+ start_path = Atoms::ExecutionContext.resolve_working_dir(
266
+ working_dir: working_dir,
267
+ subprocess_env: subprocess_env
268
+ )
269
+ Ace::Support::Fs::Molecules::ProjectRootFinder.find(start_path: start_path) || start_path
270
+ end
271
+
272
+ def parse_gemini_response(stdout, stderr, status, prompt, options)
273
+ unless status.success?
274
+ error_msg = stderr.empty? ? stdout : stderr
275
+ raise Ace::LLM::ProviderError, "Gemini CLI failed: #{error_msg}"
276
+ end
277
+
278
+ # Try to parse JSON output first
279
+ begin
280
+ parsed = JSON.parse(stdout)
281
+
282
+ # Extract response text from parsed JSON
283
+ # Gemini CLI JSON format: { "response": "...", "stats": { ... } }
284
+ text = if parsed["response"]
285
+ parsed["response"]
286
+ elsif parsed["candidates"] && parsed["candidates"].first
287
+ parsed["candidates"].first["content"] || parsed["candidates"].first["text"]
288
+ else
289
+ # Fallback to raw output if JSON structure unexpected
290
+ stdout.strip
291
+ end
292
+
293
+ # Extract metadata from stats if available
294
+ metadata = extract_metadata_from_json(parsed, prompt)
295
+ rescue JSON::ParserError
296
+ # Fallback to raw text output if JSON parsing fails
297
+ text = stdout.strip
298
+ metadata = build_synthetic_metadata(text, prompt)
299
+ end
300
+
301
+ # Return hash compatible with ace-llm format
302
+ {
303
+ text: text,
304
+ metadata: metadata
305
+ }
306
+ end
307
+
308
+ def extract_metadata_from_json(parsed, prompt)
309
+ # Try to extract metadata from Gemini CLI JSON response
310
+ stats = parsed["stats"] || {}
311
+ tokens = stats["tokens"] || {}
312
+
313
+ {
314
+ provider: "gemini",
315
+ model: @model || DEFAULT_MODEL,
316
+ input_tokens: tokens["promptTokens"] || tokens["input"] || 0,
317
+ output_tokens: tokens["candidatesTokens"] || tokens["output"] || 0,
318
+ total_tokens: tokens["totalTokens"] || tokens["total"] || 0,
319
+ finish_reason: "success",
320
+ timestamp: Time.now.utc.iso8601
321
+ }
322
+ end
323
+
324
+ def build_synthetic_metadata(response_text, prompt)
325
+ # Create synthetic metadata if JSON metadata not available
326
+ # Token estimation: ~4 characters per token is a reasonable approximation for
327
+ # English text with Gemini's tokenizer. This varies by language and content type
328
+ # but provides useful estimates when actual token counts aren't available.
329
+ prompt_tokens = (prompt.to_s.length / 4).round
330
+ response_tokens = (response_text.length / 4).round
331
+
332
+ {
333
+ provider: "gemini",
334
+ model: @model || DEFAULT_MODEL,
335
+ input_tokens: prompt_tokens,
336
+ output_tokens: response_tokens,
337
+ total_tokens: prompt_tokens + response_tokens,
338
+ finish_reason: "success",
339
+ timestamp: Time.now.utc.iso8601
340
+ }
341
+ end
342
+ end
343
+ end
344
+ end
345
+ end
346
+ end
@@ -0,0 +1,80 @@
1
+ # frozen_string_literal: true
2
+
3
+ require_relative "../atoms/provider_detector"
4
+ require_relative "../atoms/auth_checker"
5
+
6
+ module Ace
7
+ module Llm
8
+ module Providers
9
+ module Cli
10
+ module Molecules
11
+ # Orchestrates provider detection and authentication checking
12
+ class HealthChecker
13
+ PROVIDERS = {
14
+ "claude" => {
15
+ name: "Claude Code",
16
+ provider: "claude",
17
+ check_cmd: ["claude", "--version"],
18
+ install_cmd: "npm install -g @anthropic-ai/claude-cli"
19
+ },
20
+ "codex" => {
21
+ name: "Codex",
22
+ provider: "codex",
23
+ check_cmd: ["codex", "--version"],
24
+ install_cmd: "npm install -g @openai/codex",
25
+ install_url: "https://codex.ai"
26
+ },
27
+ "opencode" => {
28
+ name: "OpenCode",
29
+ provider: "opencode",
30
+ check_cmd: ["opencode", "--version"],
31
+ install_cmd: "npm install -g opencode-cli",
32
+ install_url: "https://opencode.dev"
33
+ },
34
+ "codex-oss" => {
35
+ name: "Codex OSS",
36
+ provider: "codexoss",
37
+ check_cmd: ["codex-oss", "--version"],
38
+ install_cmd: "pip install codex-oss",
39
+ install_url: "https://github.com/codex-oss/codex"
40
+ }
41
+ }.freeze
42
+
43
+ # Check all providers and return results
44
+ # @return [Array<Hash>] Results for each provider
45
+ def check_all
46
+ PROVIDERS.map do |cli_name, config|
47
+ check_provider(cli_name, config)
48
+ end
49
+ end
50
+
51
+ private
52
+
53
+ def check_provider(cli_name, config)
54
+ result = {
55
+ name: config[:name],
56
+ provider: config[:provider],
57
+ config: config,
58
+ available: false,
59
+ authenticated: false,
60
+ version: nil,
61
+ auth_status: "Not checked"
62
+ }
63
+
64
+ if Atoms::ProviderDetector.available?(cli_name)
65
+ result[:available] = true
66
+ result[:version] = Atoms::ProviderDetector.version(config[:check_cmd])
67
+
68
+ auth_result = Atoms::AuthChecker.check(config[:provider])
69
+ result[:authenticated] = auth_result[:authenticated]
70
+ result[:auth_status] = auth_result[:message]
71
+ end
72
+
73
+ result
74
+ end
75
+ end
76
+ end
77
+ end
78
+ end
79
+ end
80
+ end