ace-llm-providers-cli 0.27.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (40) hide show
  1. checksums.yaml +7 -0
  2. data/.ace-defaults/llm/providers/claude.yml +24 -0
  3. data/.ace-defaults/llm/providers/codex.yml +22 -0
  4. data/.ace-defaults/llm/providers/codexoss.yml +13 -0
  5. data/.ace-defaults/llm/providers/gemini.yml +32 -0
  6. data/.ace-defaults/llm/providers/opencode.yml +26 -0
  7. data/.ace-defaults/llm/providers/pi.yml +43 -0
  8. data/CHANGELOG.md +457 -0
  9. data/LICENSE +21 -0
  10. data/README.md +36 -0
  11. data/Rakefile +14 -0
  12. data/exe/ace-llm-providers-cli-check +76 -0
  13. data/lib/ace/llm/providers/cli/atoms/args_normalizer.rb +82 -0
  14. data/lib/ace/llm/providers/cli/atoms/auth_checker.rb +74 -0
  15. data/lib/ace/llm/providers/cli/atoms/command_formatters.rb +19 -0
  16. data/lib/ace/llm/providers/cli/atoms/command_rewriter.rb +75 -0
  17. data/lib/ace/llm/providers/cli/atoms/execution_context.rb +28 -0
  18. data/lib/ace/llm/providers/cli/atoms/provider_detector.rb +48 -0
  19. data/lib/ace/llm/providers/cli/atoms/session_finders/claude_session_finder.rb +79 -0
  20. data/lib/ace/llm/providers/cli/atoms/session_finders/codex_session_finder.rb +84 -0
  21. data/lib/ace/llm/providers/cli/atoms/session_finders/gemini_session_finder.rb +66 -0
  22. data/lib/ace/llm/providers/cli/atoms/session_finders/open_code_session_finder.rb +119 -0
  23. data/lib/ace/llm/providers/cli/atoms/session_finders/pi_session_finder.rb +87 -0
  24. data/lib/ace/llm/providers/cli/atoms/skill_command_rewriter.rb +30 -0
  25. data/lib/ace/llm/providers/cli/atoms/worktree_dir_resolver.rb +56 -0
  26. data/lib/ace/llm/providers/cli/claude_code_client.rb +358 -0
  27. data/lib/ace/llm/providers/cli/claude_oai_client.rb +322 -0
  28. data/lib/ace/llm/providers/cli/cli_args_support.rb +19 -0
  29. data/lib/ace/llm/providers/cli/codex_client.rb +291 -0
  30. data/lib/ace/llm/providers/cli/codex_oai_client.rb +274 -0
  31. data/lib/ace/llm/providers/cli/gemini_client.rb +346 -0
  32. data/lib/ace/llm/providers/cli/molecules/health_checker.rb +80 -0
  33. data/lib/ace/llm/providers/cli/molecules/safe_capture.rb +153 -0
  34. data/lib/ace/llm/providers/cli/molecules/session_finder.rb +44 -0
  35. data/lib/ace/llm/providers/cli/molecules/skill_name_reader.rb +64 -0
  36. data/lib/ace/llm/providers/cli/open_code_client.rb +271 -0
  37. data/lib/ace/llm/providers/cli/pi_client.rb +331 -0
  38. data/lib/ace/llm/providers/cli/version.rb +11 -0
  39. data/lib/ace/llm/providers/cli.rb +47 -0
  40. metadata +139 -0
@@ -0,0 +1,322 @@
1
+ # frozen_string_literal: true
2
+
3
+ require "json"
4
+ require "open3"
5
+ require "shellwords"
6
+
7
+ require_relative "cli_args_support"
8
+ require_relative "atoms/execution_context"
9
+ require_relative "atoms/command_rewriter"
10
+ require_relative "atoms/command_formatters"
11
+ require_relative "molecules/skill_name_reader"
12
+
13
+ module Ace
14
+ module LLM
15
+ module Providers
16
+ module CLI
17
+ # Client for Claude over Anthropic-compatible APIs (Z.ai, OpenRouter, etc.)
18
+ # Uses the claude CLI subprocess with backend-specific env vars to route
19
+ # requests through alternative Anthropic-compatible endpoints.
20
+ class ClaudeOaiClient < Ace::LLM::Organisms::BaseClient
21
+ include CliArgsSupport
22
+
23
+ API_BASE_URL = "https://api.z.ai"
24
+ DEFAULT_GENERATION_CONFIG = {}.freeze
25
+
26
+ def self.provider_name
27
+ "claudeoai"
28
+ end
29
+
30
+ DEFAULT_MODEL = "zai/glm-5"
31
+
32
+ def initialize(model: nil, **options)
33
+ @model = model || DEFAULT_MODEL
34
+ @options = options
35
+ @generation_config = options[:generation_config] || {}
36
+ @backends = options[:backends] || {}
37
+ @skill_name_reader = Molecules::SkillNameReader.new
38
+ end
39
+
40
+ def needs_credentials?
41
+ false
42
+ end
43
+
44
+ # Generate a response from the LLM
45
+ # @param messages [Array<Hash>] Conversation messages
46
+ # @param options [Hash] Generation options
47
+ # @return [Hash] Response with text and metadata
48
+ def generate(messages, **options)
49
+ validate_claude_availability!
50
+
51
+ prompt = format_messages_as_prompt(messages)
52
+ subprocess_env = options.delete(:subprocess_env)
53
+ working_dir = Atoms::ExecutionContext.resolve_working_dir(
54
+ working_dir: options[:working_dir],
55
+ subprocess_env: subprocess_env
56
+ )
57
+ prompt = rewrite_skill_commands(prompt, working_dir: working_dir)
58
+
59
+ cmd = build_claude_command(options)
60
+ stdout, stderr, status = execute_claude_command(
61
+ cmd,
62
+ prompt,
63
+ subprocess_env: subprocess_env,
64
+ working_dir: working_dir
65
+ )
66
+
67
+ parse_claude_response(stdout, stderr, status, prompt, options)
68
+ rescue => e
69
+ handle_claude_error(e)
70
+ end
71
+
72
+ # List available models
73
+ def list_models
74
+ [
75
+ {id: "zai/glm-5", name: "GLM-5", description: "Z.ai flagship model (Anthropic-compatible)", context_size: 128_000},
76
+ {id: "zai/glm-4.7", name: "GLM-4.7", description: "Z.ai balanced model (Anthropic-compatible)", context_size: 128_000},
77
+ {id: "zai/glm-4.6", name: "GLM-4.6", description: "Z.ai fast model (Anthropic-compatible)", context_size: 128_000}
78
+ ]
79
+ end
80
+
81
+ # Split "backend/model" into ["backend", "model"]
82
+ # @param model_string [String] e.g. "zai/glm-5"
83
+ # @return [Array<String>] e.g. ["zai", "glm-5"]
84
+ def split_backend_model(model_string)
85
+ return [nil, nil] unless model_string
86
+
87
+ parts = model_string.split("/", 2)
88
+ return [nil, nil] unless parts.length == 2
89
+
90
+ parts
91
+ end
92
+
93
+ private
94
+
95
+ def format_messages_as_prompt(messages)
96
+ return messages if messages.is_a?(String)
97
+
98
+ system_msg = messages.find { |m| (m[:role] || m["role"]) == "system" }
99
+ other_msgs = messages.reject { |m| (m[:role] || m["role"]) == "system" }
100
+
101
+ formatted = other_msgs.map do |msg|
102
+ role = msg[:role] || msg["role"]
103
+ content = msg[:content] || msg["content"]
104
+
105
+ case role
106
+ when "user"
107
+ "User: #{content}"
108
+ when "assistant"
109
+ "Assistant: #{content}"
110
+ else
111
+ content
112
+ end
113
+ end
114
+
115
+ if system_msg
116
+ system_content = system_msg[:content] || system_msg["content"]
117
+ formatted.unshift("System: #{system_content}")
118
+ end
119
+
120
+ formatted.join("\n\n")
121
+ end
122
+
123
+ def claude_available?
124
+ system("which claude > /dev/null 2>&1")
125
+ end
126
+
127
+ def validate_claude_availability!
128
+ unless claude_available?
129
+ raise Ace::LLM::ProviderError, "Claude CLI not found. Install with: npm install -g @anthropic-ai/claude-cli"
130
+ end
131
+ end
132
+
133
+ def build_claude_command(options)
134
+ cmd = ["claude"]
135
+ cmd << "-p"
136
+
137
+ # Always use JSON output for consistent parsing
138
+ cmd << "--output-format" << "json"
139
+
140
+ # Use a tier alias (sonnet/opus/haiku) that claude CLI recognizes,
141
+ # rather than the backend model name (e.g. glm-5) which it doesn't.
142
+ tier = resolve_model_tier
143
+ cmd << "--model" << tier if tier
144
+
145
+ # Add max tokens if provided
146
+ max_tokens = options[:max_tokens] || @generation_config[:max_tokens]
147
+ if max_tokens
148
+ cmd << "--max-tokens" << max_tokens.to_s
149
+ end
150
+
151
+ # User CLI args last so they take precedence
152
+ cmd.concat(normalized_cli_args(options))
153
+
154
+ cmd
155
+ end
156
+
157
+ def execute_claude_command(cmd, prompt, subprocess_env: nil, working_dir: nil)
158
+ timeout_val = @options[:timeout] || 120
159
+
160
+ # Build env with backend-specific vars for Anthropic-compatible routing
161
+ env = {"CLAUDECODE" => nil}
162
+ env.merge!(backend_env_vars)
163
+ env.merge!(subprocess_env) if subprocess_env
164
+
165
+ debug_subprocess("spawn timeout=#{timeout_val}s cmd=#{cmd.join(" ")} prompt_bytes=#{prompt.to_s.bytesize}")
166
+ Molecules::SafeCapture.call(
167
+ cmd,
168
+ timeout: timeout_val,
169
+ stdin_data: prompt.to_s,
170
+ chdir: working_dir,
171
+ env: env,
172
+ provider_name: "Claude OAI"
173
+ )
174
+ end
175
+
176
+ # Build env vars hash for the current backend
177
+ # Sets ANTHROPIC_BASE_URL, ANTHROPIC_AUTH_TOKEN, and clears ANTHROPIC_API_KEY
178
+ def backend_env_vars
179
+ backend_name, _model_name = split_backend_model(@model)
180
+ return {} unless backend_name
181
+
182
+ backend_config = @backends[backend_name] || @backends[backend_name.to_sym] || {}
183
+ return {} if backend_config.empty?
184
+
185
+ env = {}
186
+
187
+ # Set the base URL for the Anthropic-compatible endpoint
188
+ if (base_url = backend_config["base_url"] || backend_config[:base_url])
189
+ env["ANTHROPIC_BASE_URL"] = base_url
190
+ end
191
+
192
+ # Read the API key from the env var specified in config
193
+ if (env_key = backend_config["env_key"] || backend_config[:env_key])
194
+ env["ANTHROPIC_AUTH_TOKEN"] = ENV[env_key]
195
+ end
196
+
197
+ # Clear ANTHROPIC_API_KEY so claude doesn't use cached Anthropic creds
198
+ env["ANTHROPIC_API_KEY"] = ""
199
+
200
+ # Map the tier alias to the backend's actual model name so
201
+ # `--model sonnet` resolves to e.g. "glm-5" at the backend
202
+ _bn, model_name = split_backend_model(@model)
203
+ tier = resolve_model_tier
204
+ if tier && model_name
205
+ env_key_for_tier = "ANTHROPIC_DEFAULT_#{tier.upcase}_MODEL"
206
+ env[env_key_for_tier] = model_name
207
+ end
208
+
209
+ env
210
+ end
211
+
212
+ # Resolve which Claude CLI tier alias to use for --model.
213
+ # Looks up model_tiers in backend config; falls back to "sonnet".
214
+ def resolve_model_tier
215
+ backend_name, model_name = split_backend_model(@model)
216
+ return "sonnet" unless backend_name && model_name
217
+
218
+ backend_config = @backends[backend_name] || @backends[backend_name.to_sym] || {}
219
+ tiers = backend_config["model_tiers"] || backend_config[:model_tiers] || {}
220
+
221
+ # Find the tier whose value matches the requested model.
222
+ # Note: first matching tier wins when multiple tiers map to the same model.
223
+ matched = tiers.find { |_tier, m| m.to_s == model_name }
224
+ return matched[0].to_s if matched
225
+
226
+ # No explicit tier mapping — default to sonnet
227
+ "sonnet"
228
+ end
229
+
230
+ def parse_claude_response(stdout, stderr, status, prompt, options)
231
+ unless status.success?
232
+ error_msg = stderr.empty? ? stdout : stderr
233
+ raise Ace::LLM::ProviderError, "Claude OAI CLI failed: #{error_msg}"
234
+ end
235
+
236
+ begin
237
+ response = JSON.parse(stdout, allow_duplicate_key: true)
238
+ rescue JSON::ParserError => e
239
+ raise Ace::LLM::ProviderError, "Failed to parse Claude OAI response: #{e.message}"
240
+ end
241
+
242
+ text = response["result"] || response["response"] || ""
243
+ metadata = build_metadata(response, prompt, options)
244
+
245
+ {
246
+ text: text,
247
+ metadata: metadata
248
+ }
249
+ end
250
+
251
+ def build_metadata(response, prompt, options)
252
+ usage = response["usage"] || {}
253
+
254
+ metadata = {
255
+ provider: "claudeoai",
256
+ model: @model || DEFAULT_MODEL,
257
+ input_tokens: usage["input_tokens"] || 0,
258
+ output_tokens: usage["output_tokens"] || 0,
259
+ total_tokens: (usage["input_tokens"] || 0) + (usage["output_tokens"] || 0),
260
+ cached_tokens: usage["cache_read_input_tokens"] || 0,
261
+ finish_reason: response["subtype"] || "success",
262
+ took: (response["duration_ms"] || 0) / 1000.0,
263
+ timestamp: Time.now.utc.iso8601
264
+ }
265
+
266
+ if response["total_cost_usd"]
267
+ metadata[:cost] = {
268
+ input_cost: 0.0,
269
+ output_cost: 0.0,
270
+ total_cost: response["total_cost_usd"],
271
+ currency: "USD"
272
+ }
273
+ end
274
+
275
+ metadata[:session_id] = response["session_id"] if response["session_id"]
276
+
277
+ metadata[:provider_specific] = {
278
+ uuid: response["uuid"],
279
+ service_tier: usage["service_tier"],
280
+ duration_api_ms: response["duration_api_ms"],
281
+ cache_creation_tokens: usage["cache_creation_input_tokens"]
282
+ }.compact
283
+
284
+ metadata
285
+ end
286
+
287
+ def handle_claude_error(error)
288
+ raise error
289
+ end
290
+
291
+ def rewrite_skill_commands(prompt, working_dir: nil)
292
+ skills_dir = resolve_skills_dir(working_dir: working_dir)
293
+ return prompt unless skills_dir
294
+
295
+ skill_names = @skill_name_reader.call(skills_dir)
296
+ return prompt if skill_names.empty?
297
+
298
+ Atoms::CommandRewriter.call(prompt, skill_names: skill_names, formatter: Atoms::CommandFormatters::CODEX_FORMATTER)
299
+ end
300
+
301
+ def resolve_skills_dir(working_dir: nil)
302
+ configured = @options[:skills_dir] || @generation_config[:skills_dir]
303
+ return configured if configured && Dir.exist?(configured)
304
+
305
+ working_dir ||= Atoms::ExecutionContext.resolve_working_dir
306
+ candidate_dirs = [
307
+ File.join(working_dir, ".claude", "skills"),
308
+ File.join(working_dir, ".agent", "skills")
309
+ ]
310
+ candidate_dirs.find { |dir| Dir.exist?(dir) }
311
+ end
312
+
313
+ def debug_subprocess(message)
314
+ return unless ENV["ACE_LLM_DEBUG_SUBPROCESS"] == "1"
315
+
316
+ warn("[ClaudeOaiClient] #{message}")
317
+ end
318
+ end
319
+ end
320
+ end
321
+ end
322
+ end
@@ -0,0 +1,19 @@
1
+ # frozen_string_literal: true
2
+
3
+ require_relative "atoms/args_normalizer"
4
+
5
+ module Ace
6
+ module LLM
7
+ module Providers
8
+ module CLI
9
+ module CliArgsSupport
10
+ private
11
+
12
+ def normalized_cli_args(options)
13
+ Atoms::ArgsNormalizer.new.normalize_cli_args(options[:cli_args])
14
+ end
15
+ end
16
+ end
17
+ end
18
+ end
19
+ end
@@ -0,0 +1,291 @@
1
+ # frozen_string_literal: true
2
+
3
+ require "json"
4
+ require "open3"
5
+ require "shellwords"
6
+
7
+ require_relative "cli_args_support"
8
+ require_relative "atoms/execution_context"
9
+ require_relative "atoms/command_rewriter"
10
+ require_relative "atoms/command_formatters"
11
+ require_relative "atoms/worktree_dir_resolver"
12
+ require_relative "molecules/skill_name_reader"
13
+
14
+ module Ace
15
+ module LLM
16
+ module Providers
17
+ module CLI
18
+ # Client for interacting with Codex CLI (OpenAI)
19
+ # Provides access to Codex models through subprocess execution
20
+ class CodexClient < Ace::LLM::Organisms::BaseClient
21
+ include CliArgsSupport
22
+
23
+ # Not used for CLI interaction but required by BaseClient
24
+ API_BASE_URL = "https://api.openai.com"
25
+ DEFAULT_GENERATION_CONFIG = {}.freeze
26
+
27
+ # Provider registration - auto-registers as "codex"
28
+ def self.provider_name
29
+ "codex"
30
+ end
31
+
32
+ # Default model (can be overridden by config)
33
+ DEFAULT_MODEL = "gpt-5"
34
+
35
+ def initialize(model: nil, **options)
36
+ @model = model || DEFAULT_MODEL
37
+ # Skip normal BaseClient initialization that requires API key
38
+ @options = options
39
+ @generation_config = options[:generation_config] || {}
40
+ @skill_name_reader = Molecules::SkillNameReader.new
41
+ end
42
+
43
+ # Override to indicate this client doesn't need API credentials
44
+ def needs_credentials?
45
+ false
46
+ end
47
+
48
+ # Generate a response from the LLM
49
+ # @param messages [Array<Hash>] Conversation messages
50
+ # @param options [Hash] Generation options
51
+ # @return [Hash] Response with text and metadata
52
+ def generate(messages, **options)
53
+ validate_codex_availability!
54
+
55
+ # Convert messages to prompt format
56
+ prompt = format_messages_as_prompt(messages)
57
+ subprocess_env = options[:subprocess_env]
58
+ working_dir = Atoms::ExecutionContext.resolve_working_dir(
59
+ working_dir: options[:working_dir],
60
+ subprocess_env: subprocess_env
61
+ )
62
+ prompt = rewrite_skill_commands(prompt, working_dir: working_dir)
63
+
64
+ cmd = build_codex_command(prompt, options, working_dir: working_dir)
65
+ stdout, stderr, status = execute_codex_command(cmd, prompt, options)
66
+
67
+ parse_codex_response(stdout, stderr, status, prompt, options)
68
+ rescue => e
69
+ handle_codex_error(e)
70
+ end
71
+
72
+ # List available Codex models
73
+ def list_models
74
+ # Return models based on what the CLI supports
75
+ # Actual models come from YAML config
76
+ [
77
+ {id: "gpt-5", name: "GPT-5", description: "Advanced Codex model", context_size: 128_000},
78
+ {id: "gpt-5-mini", name: "GPT-5 Mini", description: "Smaller, faster model", context_size: 128_000}
79
+ ]
80
+ end
81
+
82
+ private
83
+
84
+ def format_messages_as_prompt(messages)
85
+ # Handle both array of message hashes and string prompt
86
+ return messages if messages.is_a?(String)
87
+
88
+ # Extract system message if present
89
+ system_msg = messages.find { |m| (m[:role] || m["role"]) == "system" }
90
+ other_msgs = messages.reject { |m| (m[:role] || m["role"]) == "system" }
91
+
92
+ # Format remaining messages
93
+ formatted = other_msgs.map do |msg|
94
+ role = msg[:role] || msg["role"]
95
+ content = msg[:content] || msg["content"]
96
+
97
+ case role
98
+ when "user"
99
+ "User: #{content}"
100
+ when "assistant"
101
+ "Assistant: #{content}"
102
+ else
103
+ content
104
+ end
105
+ end
106
+
107
+ # Prepend system message if present
108
+ if system_msg
109
+ system_content = system_msg[:content] || system_msg["content"]
110
+ formatted.unshift("System: #{system_content}")
111
+ end
112
+
113
+ formatted.join("\n\n")
114
+ end
115
+
116
+ def codex_available?
117
+ system("which codex > /dev/null 2>&1")
118
+ end
119
+
120
+ def validate_codex_availability!
121
+ unless codex_available?
122
+ raise Ace::LLM::ProviderError, "Codex CLI not found. Install with: npm install -g @openai/codex or visit https://codex.ai"
123
+ end
124
+
125
+ # Check if Codex is authenticated
126
+ unless codex_authenticated?
127
+ raise Ace::LLM::AuthenticationError, "Codex authentication required. Run 'codex login' or configure API key"
128
+ end
129
+ end
130
+
131
+ def codex_authenticated?
132
+ # Quick check if Codex can execute (will fail fast if not authenticated)
133
+
134
+ cmd = ["codex", "--version"]
135
+ stdout, _, status = Open3.capture3(*cmd)
136
+ status.success? && (stdout.include?("codex") || stdout.include?("Codex"))
137
+ rescue
138
+ # If version check fails, try help command
139
+ begin
140
+ cmd = ["codex", "--help"]
141
+ _, _, status = Open3.capture3(*cmd)
142
+ status.success?
143
+ rescue
144
+ false
145
+ end
146
+ end
147
+
148
+ def build_codex_command(prompt, options, working_dir: nil)
149
+ working_dir ||= Atoms::ExecutionContext.resolve_working_dir(
150
+ working_dir: options[:working_dir],
151
+ subprocess_env: options[:subprocess_env]
152
+ )
153
+ # Use codex exec for non-interactive execution
154
+ cmd = ["codex", "exec"]
155
+
156
+ # Add sandbox mode if specified by caller
157
+ if options[:sandbox]
158
+ cmd << "--sandbox" << options[:sandbox].to_s
159
+ end
160
+
161
+ # Add model selection if not default
162
+ if @model && @model != DEFAULT_MODEL
163
+ cmd << "--model" << @model
164
+ end
165
+
166
+ # Note: Codex exec doesn't support direct system prompts or temperature/max_tokens
167
+ # These would need to be incorporated into the prompt itself
168
+
169
+ # Add writable dir for git worktree metadata
170
+ if (git_dir = Atoms::WorktreeDirResolver.call(working_dir: working_dir))
171
+ cmd << "--add-dir" << git_dir
172
+ end
173
+
174
+ # Capture last message progressively for timeout resilience
175
+ if options[:last_message_file]
176
+ cmd << "--output-last-message" << options[:last_message_file]
177
+ end
178
+
179
+ # User CLI args last so they take precedence (last-wins in most CLIs)
180
+ cmd.concat(normalized_cli_args(options))
181
+
182
+ cmd
183
+ end
184
+
185
+ def execute_codex_command(cmd, prompt, options)
186
+ # Prepare the input - combine system prompt with user prompt if needed
187
+ input = prompt.to_s
188
+
189
+ # Check for system prompt in options or generation config
190
+ system_content = options[:system_instruction] ||
191
+ options[:system] ||
192
+ options[:system_prompt] ||
193
+ @generation_config[:system_prompt]
194
+
195
+ if system_content && !prompt.include?("System:")
196
+ input = "System: #{system_content}\n\nUser: #{input}"
197
+ end
198
+
199
+ timeout_val = @options[:timeout] || 120
200
+ working_dir = Atoms::ExecutionContext.resolve_working_dir(
201
+ working_dir: options[:working_dir],
202
+ subprocess_env: options[:subprocess_env]
203
+ )
204
+ Molecules::SafeCapture.call(
205
+ cmd,
206
+ timeout: timeout_val,
207
+ stdin_data: input,
208
+ chdir: working_dir,
209
+ provider_name: "Codex"
210
+ )
211
+ end
212
+
213
+ def parse_codex_response(stdout, stderr, status, prompt, options)
214
+ unless status.success?
215
+ error_msg = stderr.empty? ? stdout : stderr
216
+ raise Ace::LLM::ProviderError, "Codex CLI failed: #{error_msg}"
217
+ end
218
+
219
+ # Parse Codex output format to extract the actual response
220
+ # Codex output includes metadata lines and the actual response
221
+ lines = stdout.split("\n")
222
+
223
+ # Find where the actual response starts (after "codex" header)
224
+ response_start = lines.find_index { |line| line.include?("codex") }
225
+
226
+ if response_start && response_start < lines.length - 1
227
+ # Extract text after the "codex" line, skipping empty lines
228
+ response_lines = lines[(response_start + 1)..-1]
229
+ # Remove token usage lines at the end
230
+ response_lines = response_lines.reject { |line| line.include?("tokens used") }
231
+ text = response_lines.join("\n").strip
232
+ else
233
+ # Fallback: use entire output if we can't parse the format
234
+ text = stdout.strip
235
+ end
236
+
237
+ # Build metadata
238
+ metadata = build_synthetic_metadata(text, prompt)
239
+
240
+ # Return hash compatible with ace-llm format
241
+ {
242
+ text: text,
243
+ metadata: metadata
244
+ }
245
+ end
246
+
247
+ def build_synthetic_metadata(response_text, prompt)
248
+ # Create synthetic metadata since Codex CLI might not provide detailed usage info
249
+ # Rough token estimation
250
+ prompt_tokens = (prompt.to_s.length / 4).round # Rough estimate: 4 chars per token
251
+ response_tokens = (response_text.length / 4).round
252
+
253
+ {
254
+ provider: "codex",
255
+ model: @model || DEFAULT_MODEL,
256
+ input_tokens: prompt_tokens,
257
+ output_tokens: response_tokens,
258
+ total_tokens: prompt_tokens + response_tokens,
259
+ finish_reason: "success",
260
+ timestamp: Time.now.utc.iso8601
261
+ }
262
+ end
263
+
264
+ def handle_codex_error(error)
265
+ # Re-raise the error for proper handling by the base client error flow
266
+ raise error
267
+ end
268
+
269
+ def rewrite_skill_commands(prompt, working_dir: nil)
270
+ skills_dir = resolve_skills_dir(working_dir: working_dir)
271
+ return prompt unless skills_dir
272
+
273
+ skill_names = @skill_name_reader.call(skills_dir)
274
+ return prompt if skill_names.empty?
275
+
276
+ Atoms::CommandRewriter.call(prompt, skill_names: skill_names, formatter: Atoms::CommandFormatters::CODEX_FORMATTER)
277
+ end
278
+
279
+ def resolve_skills_dir(working_dir: nil)
280
+ configured = @options[:skills_dir] || @generation_config[:skills_dir]
281
+ return configured if configured && Dir.exist?(configured)
282
+
283
+ working_dir ||= Atoms::ExecutionContext.resolve_working_dir
284
+ candidate_dir = File.join(working_dir, ".codex", "skills")
285
+ candidate_dir if Dir.exist?(candidate_dir)
286
+ end
287
+ end
288
+ end
289
+ end
290
+ end
291
+ end