pdd-cli 0.0.90__py3-none-any.whl → 0.0.118__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- pdd/__init__.py +38 -6
- pdd/agentic_bug.py +323 -0
- pdd/agentic_bug_orchestrator.py +497 -0
- pdd/agentic_change.py +231 -0
- pdd/agentic_change_orchestrator.py +526 -0
- pdd/agentic_common.py +521 -786
- pdd/agentic_e2e_fix.py +319 -0
- pdd/agentic_e2e_fix_orchestrator.py +426 -0
- pdd/agentic_fix.py +118 -3
- pdd/agentic_update.py +25 -8
- pdd/architecture_sync.py +565 -0
- pdd/auth_service.py +210 -0
- pdd/auto_deps_main.py +63 -53
- pdd/auto_include.py +185 -3
- pdd/auto_update.py +125 -47
- pdd/bug_main.py +195 -23
- pdd/cmd_test_main.py +345 -197
- pdd/code_generator.py +4 -2
- pdd/code_generator_main.py +118 -32
- pdd/commands/__init__.py +6 -0
- pdd/commands/analysis.py +87 -29
- pdd/commands/auth.py +309 -0
- pdd/commands/connect.py +290 -0
- pdd/commands/fix.py +136 -113
- pdd/commands/maintenance.py +3 -2
- pdd/commands/misc.py +8 -0
- pdd/commands/modify.py +190 -164
- pdd/commands/sessions.py +284 -0
- pdd/construct_paths.py +334 -32
- pdd/context_generator_main.py +167 -170
- pdd/continue_generation.py +6 -3
- pdd/core/__init__.py +33 -0
- pdd/core/cli.py +27 -3
- pdd/core/cloud.py +237 -0
- pdd/core/errors.py +4 -0
- pdd/core/remote_session.py +61 -0
- pdd/crash_main.py +219 -23
- pdd/data/llm_model.csv +4 -4
- pdd/docs/prompting_guide.md +864 -0
- pdd/docs/whitepaper_with_benchmarks/data_and_functions/benchmark_analysis.py +495 -0
- pdd/docs/whitepaper_with_benchmarks/data_and_functions/creation_compare.py +528 -0
- pdd/fix_code_loop.py +208 -34
- pdd/fix_code_module_errors.py +6 -2
- pdd/fix_error_loop.py +291 -38
- pdd/fix_main.py +204 -4
- pdd/fix_verification_errors_loop.py +235 -26
- pdd/fix_verification_main.py +269 -83
- pdd/frontend/dist/assets/index-B5DZHykP.css +1 -0
- pdd/frontend/dist/assets/index-DQ3wkeQ2.js +449 -0
- pdd/frontend/dist/index.html +376 -0
- pdd/frontend/dist/logo.svg +33 -0
- pdd/generate_output_paths.py +46 -5
- pdd/generate_test.py +212 -151
- pdd/get_comment.py +19 -44
- pdd/get_extension.py +8 -9
- pdd/get_jwt_token.py +309 -20
- pdd/get_language.py +8 -7
- pdd/get_run_command.py +7 -5
- pdd/insert_includes.py +2 -1
- pdd/llm_invoke.py +459 -95
- pdd/load_prompt_template.py +15 -34
- pdd/path_resolution.py +140 -0
- pdd/postprocess.py +4 -1
- pdd/preprocess.py +68 -12
- pdd/preprocess_main.py +33 -1
- pdd/prompts/agentic_bug_step10_pr_LLM.prompt +182 -0
- pdd/prompts/agentic_bug_step1_duplicate_LLM.prompt +73 -0
- pdd/prompts/agentic_bug_step2_docs_LLM.prompt +129 -0
- pdd/prompts/agentic_bug_step3_triage_LLM.prompt +95 -0
- pdd/prompts/agentic_bug_step4_reproduce_LLM.prompt +97 -0
- pdd/prompts/agentic_bug_step5_root_cause_LLM.prompt +123 -0
- pdd/prompts/agentic_bug_step6_test_plan_LLM.prompt +107 -0
- pdd/prompts/agentic_bug_step7_generate_LLM.prompt +172 -0
- pdd/prompts/agentic_bug_step8_verify_LLM.prompt +119 -0
- pdd/prompts/agentic_bug_step9_e2e_test_LLM.prompt +289 -0
- pdd/prompts/agentic_change_step10_identify_issues_LLM.prompt +1006 -0
- pdd/prompts/agentic_change_step11_fix_issues_LLM.prompt +984 -0
- pdd/prompts/agentic_change_step12_create_pr_LLM.prompt +131 -0
- pdd/prompts/agentic_change_step1_duplicate_LLM.prompt +73 -0
- pdd/prompts/agentic_change_step2_docs_LLM.prompt +101 -0
- pdd/prompts/agentic_change_step3_research_LLM.prompt +126 -0
- pdd/prompts/agentic_change_step4_clarify_LLM.prompt +164 -0
- pdd/prompts/agentic_change_step5_docs_change_LLM.prompt +981 -0
- pdd/prompts/agentic_change_step6_devunits_LLM.prompt +1005 -0
- pdd/prompts/agentic_change_step7_architecture_LLM.prompt +1044 -0
- pdd/prompts/agentic_change_step8_analyze_LLM.prompt +1027 -0
- pdd/prompts/agentic_change_step9_implement_LLM.prompt +1077 -0
- pdd/prompts/agentic_e2e_fix_step1_unit_tests_LLM.prompt +90 -0
- pdd/prompts/agentic_e2e_fix_step2_e2e_tests_LLM.prompt +91 -0
- pdd/prompts/agentic_e2e_fix_step3_root_cause_LLM.prompt +89 -0
- pdd/prompts/agentic_e2e_fix_step4_fix_e2e_tests_LLM.prompt +96 -0
- pdd/prompts/agentic_e2e_fix_step5_identify_devunits_LLM.prompt +91 -0
- pdd/prompts/agentic_e2e_fix_step6_create_unit_tests_LLM.prompt +106 -0
- pdd/prompts/agentic_e2e_fix_step7_verify_tests_LLM.prompt +116 -0
- pdd/prompts/agentic_e2e_fix_step8_run_pdd_fix_LLM.prompt +120 -0
- pdd/prompts/agentic_e2e_fix_step9_verify_all_LLM.prompt +146 -0
- pdd/prompts/agentic_fix_primary_LLM.prompt +2 -2
- pdd/prompts/agentic_update_LLM.prompt +192 -338
- pdd/prompts/auto_include_LLM.prompt +22 -0
- pdd/prompts/change_LLM.prompt +3093 -1
- pdd/prompts/detect_change_LLM.prompt +571 -14
- pdd/prompts/fix_code_module_errors_LLM.prompt +8 -0
- pdd/prompts/fix_errors_from_unit_tests_LLM.prompt +1 -0
- pdd/prompts/generate_test_LLM.prompt +20 -1
- pdd/prompts/generate_test_from_example_LLM.prompt +115 -0
- pdd/prompts/insert_includes_LLM.prompt +262 -252
- pdd/prompts/prompt_code_diff_LLM.prompt +119 -0
- pdd/prompts/prompt_diff_LLM.prompt +82 -0
- pdd/remote_session.py +876 -0
- pdd/server/__init__.py +52 -0
- pdd/server/app.py +335 -0
- pdd/server/click_executor.py +587 -0
- pdd/server/executor.py +338 -0
- pdd/server/jobs.py +661 -0
- pdd/server/models.py +241 -0
- pdd/server/routes/__init__.py +31 -0
- pdd/server/routes/architecture.py +451 -0
- pdd/server/routes/auth.py +364 -0
- pdd/server/routes/commands.py +929 -0
- pdd/server/routes/config.py +42 -0
- pdd/server/routes/files.py +603 -0
- pdd/server/routes/prompts.py +1322 -0
- pdd/server/routes/websocket.py +473 -0
- pdd/server/security.py +243 -0
- pdd/server/terminal_spawner.py +209 -0
- pdd/server/token_counter.py +222 -0
- pdd/summarize_directory.py +236 -237
- pdd/sync_animation.py +8 -4
- pdd/sync_determine_operation.py +329 -47
- pdd/sync_main.py +272 -28
- pdd/sync_orchestration.py +136 -75
- pdd/template_expander.py +161 -0
- pdd/templates/architecture/architecture_json.prompt +41 -46
- pdd/trace.py +1 -1
- pdd/track_cost.py +0 -13
- pdd/unfinished_prompt.py +2 -1
- pdd/update_main.py +23 -5
- {pdd_cli-0.0.90.dist-info → pdd_cli-0.0.118.dist-info}/METADATA +15 -10
- pdd_cli-0.0.118.dist-info/RECORD +227 -0
- pdd_cli-0.0.90.dist-info/RECORD +0 -153
- {pdd_cli-0.0.90.dist-info → pdd_cli-0.0.118.dist-info}/WHEEL +0 -0
- {pdd_cli-0.0.90.dist-info → pdd_cli-0.0.118.dist-info}/entry_points.txt +0 -0
- {pdd_cli-0.0.90.dist-info → pdd_cli-0.0.118.dist-info}/licenses/LICENSE +0 -0
- {pdd_cli-0.0.90.dist-info → pdd_cli-0.0.118.dist-info}/top_level.txt +0 -0
pdd/agentic_common.py
CHANGED
|
@@ -1,863 +1,598 @@
|
|
|
1
|
-
# pdd/agentic_common.py
|
|
2
1
|
from __future__ import annotations
|
|
3
2
|
|
|
4
|
-
import json
|
|
5
3
|
import os
|
|
6
|
-
import
|
|
4
|
+
import sys
|
|
5
|
+
import json
|
|
7
6
|
import shutil
|
|
8
7
|
import subprocess
|
|
9
|
-
|
|
8
|
+
import tempfile
|
|
9
|
+
import uuid
|
|
10
|
+
import re
|
|
10
11
|
from pathlib import Path
|
|
11
|
-
from typing import
|
|
12
|
+
from typing import List, Optional, Tuple, Dict, Any, Union
|
|
13
|
+
from dataclasses import dataclass
|
|
12
14
|
|
|
13
15
|
from rich.console import Console
|
|
14
16
|
|
|
15
|
-
|
|
16
|
-
|
|
17
|
-
|
|
17
|
+
try:
|
|
18
|
+
from pdd.llm_invoke import _load_model_data
|
|
19
|
+
except ImportError:
|
|
20
|
+
def _load_model_data(*args, **kwargs):
|
|
21
|
+
return None
|
|
18
22
|
|
|
23
|
+
# Constants
|
|
19
24
|
AGENT_PROVIDER_PREFERENCE: List[str] = ["anthropic", "google", "openai"]
|
|
20
|
-
|
|
21
|
-
# CLI command mapping for each provider
|
|
22
|
-
CLI_COMMANDS: Dict[str, str] = {
|
|
23
|
-
"anthropic": "claude",
|
|
24
|
-
"google": "gemini",
|
|
25
|
-
"openai": "codex",
|
|
26
|
-
}
|
|
27
|
-
|
|
28
|
-
# Timeouts
|
|
29
25
|
DEFAULT_TIMEOUT_SECONDS: float = 240.0
|
|
30
|
-
|
|
26
|
+
MIN_VALID_OUTPUT_LENGTH: int = 50
|
|
31
27
|
|
|
28
|
+
# GitHub State Markers
|
|
29
|
+
GITHUB_STATE_MARKER_START = "<!-- PDD_WORKFLOW_STATE:"
|
|
30
|
+
GITHUB_STATE_MARKER_END = "-->"
|
|
32
31
|
|
|
33
|
-
@dataclass
|
|
34
|
-
class
|
|
35
|
-
"""
|
|
36
|
-
Simple per-token pricing descriptor.
|
|
37
|
-
|
|
38
|
-
Prices are expressed in USD per 1,000,000 tokens.
|
|
39
|
-
cached_input_multiplier is the fraction of full input price charged
|
|
40
|
-
for cached tokens (e.g. 0.25 == 75% discount).
|
|
41
|
-
"""
|
|
42
|
-
|
|
32
|
+
@dataclass
|
|
33
|
+
class Pricing:
|
|
43
34
|
input_per_million: float
|
|
44
35
|
output_per_million: float
|
|
45
36
|
cached_input_multiplier: float = 1.0
|
|
46
37
|
|
|
47
|
-
|
|
48
|
-
#
|
|
49
|
-
|
|
50
|
-
|
|
51
|
-
|
|
52
|
-
"flash": TokenPricing(input_per_million=0.35, output_per_million=1.05, cached_input_multiplier=0.5),
|
|
53
|
-
"pro": TokenPricing(input_per_million=3.50, output_per_million=10.50, cached_input_multiplier=0.5),
|
|
54
|
-
"default": TokenPricing(input_per_million=0.35, output_per_million=1.05, cached_input_multiplier=0.5),
|
|
38
|
+
# Pricing Configuration
|
|
39
|
+
# Gemini: Based on test expectations (Flash: $0.35/$1.05, Cached 50%)
|
|
40
|
+
GEMINI_PRICING_BY_FAMILY = {
|
|
41
|
+
"flash": Pricing(0.35, 1.05, 0.5),
|
|
42
|
+
"pro": Pricing(3.50, 10.50, 0.5), # Placeholder for Pro
|
|
55
43
|
}
|
|
56
44
|
|
|
57
|
-
# Codex
|
|
58
|
-
CODEX_PRICING
|
|
59
|
-
input_per_million=1.50,
|
|
60
|
-
output_per_million=6.00,
|
|
61
|
-
cached_input_multiplier=0.25, # 75% discount for cached tokens
|
|
62
|
-
)
|
|
63
|
-
|
|
64
|
-
|
|
65
|
-
# ---------------------------------------------------------------------------
|
|
66
|
-
# Logging utilities (Rich-based, respect verbose/quiet flags)
|
|
67
|
-
# ---------------------------------------------------------------------------
|
|
68
|
-
|
|
69
|
-
|
|
70
|
-
def _format_label(label: str) -> str:
|
|
71
|
-
return f"[{label}] " if label else ""
|
|
72
|
-
|
|
73
|
-
|
|
74
|
-
def log_info(message: str, *, verbose: bool, quiet: bool, label: str = "") -> None:
|
|
75
|
-
"""
|
|
76
|
-
Log an informational message.
|
|
77
|
-
|
|
78
|
-
Skips output when quiet=True.
|
|
79
|
-
"""
|
|
80
|
-
if quiet:
|
|
81
|
-
return
|
|
82
|
-
prefix = _format_label(label)
|
|
83
|
-
console.print(f"{prefix}{message}")
|
|
84
|
-
|
|
85
|
-
|
|
86
|
-
def log_debug(message: str, *, verbose: bool, quiet: bool, label: str = "") -> None:
|
|
87
|
-
"""
|
|
88
|
-
Log a debug message.
|
|
89
|
-
|
|
90
|
-
Only emits output when verbose=True and quiet=False.
|
|
91
|
-
"""
|
|
92
|
-
if quiet or not verbose:
|
|
93
|
-
return
|
|
94
|
-
prefix = _format_label(label)
|
|
95
|
-
console.log(f"{prefix}{message}")
|
|
96
|
-
|
|
97
|
-
|
|
98
|
-
def log_error(message: str, *, verbose: bool, quiet: bool, label: str = "") -> None:
|
|
99
|
-
"""
|
|
100
|
-
Log an error message.
|
|
101
|
-
|
|
102
|
-
Errors are always printed, even in quiet mode.
|
|
103
|
-
"""
|
|
104
|
-
prefix = _format_label(label)
|
|
105
|
-
console.print(f"[red]{prefix}{message}[/red]")
|
|
106
|
-
|
|
107
|
-
|
|
108
|
-
# ---------------------------------------------------------------------------
|
|
109
|
-
# Internal helpers
|
|
110
|
-
# ---------------------------------------------------------------------------
|
|
111
|
-
|
|
112
|
-
|
|
113
|
-
def _safe_load_model_data() -> Any | None:
|
|
114
|
-
"""
|
|
115
|
-
Best-effort wrapper around _load_model_data.
|
|
116
|
-
|
|
117
|
-
This is used as part of provider availability checks so that we
|
|
118
|
-
respect whatever configuration llm_invoke is using (including
|
|
119
|
-
any API-key related metadata encoded in the model CSV).
|
|
120
|
-
"""
|
|
121
|
-
try:
|
|
122
|
-
return _load_model_data(LLM_MODEL_CSV_PATH)
|
|
123
|
-
except Exception:
|
|
124
|
-
return None
|
|
125
|
-
|
|
126
|
-
|
|
127
|
-
def _provider_has_api_key(provider: str, model_data: Any | None) -> bool:
|
|
128
|
-
"""
|
|
129
|
-
Determine whether the given provider has an API key or CLI auth configured.
|
|
130
|
-
|
|
131
|
-
This function:
|
|
132
|
-
- For Anthropic: Also checks if Claude CLI is available (subscription auth)
|
|
133
|
-
which doesn't require an API key.
|
|
134
|
-
- Attempts to infer API-key environment variable names from the
|
|
135
|
-
llm_invoke model data (if it is a DataFrame-like object).
|
|
136
|
-
- Falls back to well-known default environment variable names.
|
|
137
|
-
|
|
138
|
-
The actual presence of API keys is checked via os.environ.
|
|
139
|
-
"""
|
|
140
|
-
env = os.environ
|
|
141
|
-
|
|
142
|
-
# For Anthropic: Check if Claude CLI is available for subscription auth
|
|
143
|
-
# This is more robust as it uses the user's Claude subscription instead of API credits
|
|
144
|
-
if provider == "anthropic":
|
|
145
|
-
if shutil.which("claude"):
|
|
146
|
-
# Claude CLI is available - we can use subscription auth
|
|
147
|
-
# even without an API key
|
|
148
|
-
return True
|
|
149
|
-
|
|
150
|
-
# Try to extract env var hints from model_data, if it looks like a DataFrame.
|
|
151
|
-
inferred_env_vars: List[str] = []
|
|
152
|
-
if model_data is not None:
|
|
153
|
-
try:
|
|
154
|
-
columns = list(getattr(model_data, "columns", []))
|
|
155
|
-
if "provider" in columns:
|
|
156
|
-
# DataFrame-like path
|
|
157
|
-
try:
|
|
158
|
-
df = model_data # type: ignore[assignment]
|
|
159
|
-
# Filter rows matching provider name (case-insensitive)
|
|
160
|
-
provider_mask = df["provider"].str.lower() == provider.lower() # type: ignore[index]
|
|
161
|
-
provider_rows = df[provider_mask]
|
|
162
|
-
# Look for any column that might specify an API-key env var
|
|
163
|
-
candidate_cols = [
|
|
164
|
-
c
|
|
165
|
-
for c in columns
|
|
166
|
-
if "api" in c.lower() and "key" in c.lower() or "env" in c.lower()
|
|
167
|
-
]
|
|
168
|
-
for _, row in provider_rows.iterrows(): # type: ignore[attr-defined]
|
|
169
|
-
for col in candidate_cols:
|
|
170
|
-
value = str(row.get(col, "")).strip()
|
|
171
|
-
# Heuristic: looks like an env var name (upper & contains underscore)
|
|
172
|
-
if value and value.upper() == value and "_" in value:
|
|
173
|
-
inferred_env_vars.append(value)
|
|
174
|
-
except Exception:
|
|
175
|
-
# If anything above fails, we silently fall back to defaults.
|
|
176
|
-
pass
|
|
177
|
-
except Exception:
|
|
178
|
-
pass
|
|
179
|
-
|
|
180
|
-
default_env_map: Dict[str, List[str]] = {
|
|
181
|
-
"anthropic": ["ANTHROPIC_API_KEY"],
|
|
182
|
-
"google": ["GEMINI_API_KEY", "GOOGLE_API_KEY"],
|
|
183
|
-
"openai": ["OPENAI_API_KEY"],
|
|
184
|
-
}
|
|
185
|
-
|
|
186
|
-
env_candidates = inferred_env_vars or default_env_map.get(provider, [])
|
|
187
|
-
return any(env.get(name) for name in env_candidates)
|
|
188
|
-
|
|
189
|
-
|
|
190
|
-
def _get_agent_timeout() -> float:
|
|
191
|
-
"""
|
|
192
|
-
Resolve the agentic subprocess timeout from environment, with a sane default.
|
|
193
|
-
"""
|
|
194
|
-
raw = os.getenv(TIMEOUT_ENV_VAR)
|
|
195
|
-
if not raw:
|
|
196
|
-
return DEFAULT_TIMEOUT_SECONDS
|
|
197
|
-
try:
|
|
198
|
-
value = float(raw)
|
|
199
|
-
if value <= 0:
|
|
200
|
-
raise ValueError
|
|
201
|
-
return value
|
|
202
|
-
except ValueError:
|
|
203
|
-
return DEFAULT_TIMEOUT_SECONDS
|
|
204
|
-
|
|
205
|
-
|
|
206
|
-
def _build_subprocess_env(
|
|
207
|
-
base: Optional[Mapping[str, str]] = None,
|
|
208
|
-
*,
|
|
209
|
-
use_cli_auth: bool = False,
|
|
210
|
-
) -> Dict[str, str]:
|
|
211
|
-
"""
|
|
212
|
-
Build a sanitized environment for non-interactive subprocess execution.
|
|
213
|
-
|
|
214
|
-
Ensures:
|
|
215
|
-
- TERM=dumb
|
|
216
|
-
- NO_COLOR=1
|
|
217
|
-
- CI=1
|
|
218
|
-
while preserving existing variables (including API keys).
|
|
219
|
-
|
|
220
|
-
Args:
|
|
221
|
-
base: Optional base environment mapping (defaults to os.environ).
|
|
222
|
-
use_cli_auth: If True, remove ANTHROPIC_API_KEY to force Claude CLI
|
|
223
|
-
subscription auth instead of API key auth. This is more
|
|
224
|
-
robust as it uses the user's Claude subscription.
|
|
225
|
-
"""
|
|
226
|
-
env: Dict[str, str] = dict(base or os.environ)
|
|
227
|
-
env.setdefault("TERM", "dumb")
|
|
228
|
-
env.setdefault("NO_COLOR", "1")
|
|
229
|
-
env.setdefault("CI", "1")
|
|
230
|
-
|
|
231
|
-
if use_cli_auth:
|
|
232
|
-
# Remove API key to force Claude CLI subscription auth
|
|
233
|
-
env.pop("ANTHROPIC_API_KEY", None)
|
|
234
|
-
|
|
235
|
-
return env
|
|
236
|
-
|
|
237
|
-
|
|
238
|
-
def _build_provider_command(
|
|
239
|
-
provider: str,
|
|
240
|
-
instruction: str,
|
|
241
|
-
*,
|
|
242
|
-
use_interactive_mode: bool = False,
|
|
243
|
-
) -> List[str]:
|
|
244
|
-
"""
|
|
245
|
-
Build the CLI command line for the given provider.
|
|
246
|
-
|
|
247
|
-
Provider commands:
|
|
248
|
-
|
|
249
|
-
- Anthropic (Claude Code):
|
|
250
|
-
Normal: ["claude", "-p", <instruction>, "--dangerously-skip-permissions", "--output-format", "json"]
|
|
251
|
-
Interactive (more robust, uses subscription auth):
|
|
252
|
-
["claude", "--dangerously-skip-permissions", "--output-format", "json", <instruction>]
|
|
253
|
-
|
|
254
|
-
- Google (Gemini CLI):
|
|
255
|
-
Normal: ["gemini", "-p", <instruction>, "--yolo", "--output-format", "json"]
|
|
256
|
-
Interactive: ["gemini", "--yolo", "--output-format", "json", <instruction>]
|
|
257
|
-
|
|
258
|
-
- OpenAI (Codex CLI):
|
|
259
|
-
["codex", "exec", "--full-auto", "--json", <instruction>]
|
|
260
|
-
|
|
261
|
-
Args:
|
|
262
|
-
provider: The provider name ("anthropic", "google", "openai").
|
|
263
|
-
instruction: The instruction to pass to the CLI.
|
|
264
|
-
use_interactive_mode: If True, use interactive mode instead of -p flag.
|
|
265
|
-
This is more robust for Anthropic as it uses
|
|
266
|
-
subscription auth and allows full file access.
|
|
267
|
-
"""
|
|
268
|
-
if provider == "anthropic":
|
|
269
|
-
if use_interactive_mode:
|
|
270
|
-
# Interactive mode: no -p flag, uses subscription auth
|
|
271
|
-
# This allows full file access and is more robust
|
|
272
|
-
return [
|
|
273
|
-
"claude",
|
|
274
|
-
"--dangerously-skip-permissions",
|
|
275
|
-
"--output-format",
|
|
276
|
-
"json",
|
|
277
|
-
instruction,
|
|
278
|
-
]
|
|
279
|
-
else:
|
|
280
|
-
return [
|
|
281
|
-
"claude",
|
|
282
|
-
"-p",
|
|
283
|
-
instruction,
|
|
284
|
-
"--dangerously-skip-permissions",
|
|
285
|
-
"--output-format",
|
|
286
|
-
"json",
|
|
287
|
-
]
|
|
288
|
-
if provider == "google":
|
|
289
|
-
if use_interactive_mode:
|
|
290
|
-
# Interactive mode for Gemini
|
|
291
|
-
return [
|
|
292
|
-
"gemini",
|
|
293
|
-
"--yolo",
|
|
294
|
-
"--output-format",
|
|
295
|
-
"json",
|
|
296
|
-
instruction,
|
|
297
|
-
]
|
|
298
|
-
else:
|
|
299
|
-
return [
|
|
300
|
-
"gemini",
|
|
301
|
-
"-p",
|
|
302
|
-
instruction,
|
|
303
|
-
"--yolo",
|
|
304
|
-
"--output-format",
|
|
305
|
-
"json",
|
|
306
|
-
]
|
|
307
|
-
if provider == "openai":
|
|
308
|
-
return [
|
|
309
|
-
"codex",
|
|
310
|
-
"exec",
|
|
311
|
-
"--full-auto",
|
|
312
|
-
"--json",
|
|
313
|
-
instruction,
|
|
314
|
-
]
|
|
315
|
-
raise ValueError(f"Unknown provider: {provider}")
|
|
45
|
+
# Codex: Based on test expectations ($1.50/$6.00, Cached 25%)
|
|
46
|
+
CODEX_PRICING = Pricing(1.50, 6.00, 0.25)
|
|
316
47
|
|
|
48
|
+
console = Console()
|
|
317
49
|
|
|
318
|
-
def
|
|
50
|
+
def get_available_agents() -> List[str]:
|
|
319
51
|
"""
|
|
320
|
-
|
|
52
|
+
Returns list of available provider names based on CLI existence and API key configuration.
|
|
321
53
|
"""
|
|
322
|
-
|
|
323
|
-
if "flash" in lower:
|
|
324
|
-
return "flash"
|
|
325
|
-
if "pro" in lower:
|
|
326
|
-
return "pro"
|
|
327
|
-
return "default"
|
|
328
|
-
|
|
329
|
-
|
|
330
|
-
def _safe_int(value: Any) -> int:
|
|
331
|
-
try:
|
|
332
|
-
return int(value)
|
|
333
|
-
except (TypeError, ValueError):
|
|
334
|
-
return 0
|
|
54
|
+
available = []
|
|
335
55
|
|
|
56
|
+
# 1. Anthropic (Claude)
|
|
57
|
+
# Available if 'claude' CLI exists. API key not strictly required (subscription auth).
|
|
58
|
+
if shutil.which("claude"):
|
|
59
|
+
available.append("anthropic")
|
|
336
60
|
|
|
337
|
-
|
|
338
|
-
|
|
339
|
-
|
|
61
|
+
# 2. Google (Gemini)
|
|
62
|
+
# Available if 'gemini' CLI exists AND (API key is set OR Vertex AI auth is configured)
|
|
63
|
+
has_gemini_cli = shutil.which("gemini") is not None
|
|
64
|
+
has_google_key = os.environ.get("GOOGLE_API_KEY") or os.environ.get("GEMINI_API_KEY")
|
|
65
|
+
has_vertex_auth = (
|
|
66
|
+
os.environ.get("GOOGLE_APPLICATION_CREDENTIALS") and
|
|
67
|
+
os.environ.get("GOOGLE_GENAI_USE_VERTEXAI") == "true"
|
|
68
|
+
)
|
|
69
|
+
|
|
70
|
+
if has_gemini_cli and (has_google_key or has_vertex_auth):
|
|
71
|
+
available.append("google")
|
|
340
72
|
|
|
341
|
-
|
|
342
|
-
|
|
73
|
+
# 3. OpenAI (Codex)
|
|
74
|
+
# Available if 'codex' CLI exists AND OPENAI_API_KEY is set
|
|
75
|
+
if shutil.which("codex") and os.environ.get("OPENAI_API_KEY"):
|
|
76
|
+
available.append("openai")
|
|
343
77
|
|
|
344
|
-
|
|
345
|
-
Cached tokens are charged at a discounted rate.
|
|
346
|
-
"""
|
|
347
|
-
models = stats.get("models") or {}
|
|
348
|
-
if not isinstance(models, Mapping):
|
|
349
|
-
return 0.0
|
|
78
|
+
return available
|
|
350
79
|
|
|
80
|
+
def _calculate_gemini_cost(stats: Dict[str, Any]) -> float:
|
|
81
|
+
"""Calculates cost for Gemini based on token stats."""
|
|
351
82
|
total_cost = 0.0
|
|
352
|
-
|
|
353
|
-
|
|
354
|
-
|
|
355
|
-
tokens =
|
|
356
|
-
|
|
357
|
-
|
|
358
|
-
|
|
359
|
-
|
|
360
|
-
|
|
361
|
-
|
|
362
|
-
|
|
363
|
-
|
|
364
|
-
|
|
365
|
-
|
|
366
|
-
|
|
367
|
-
|
|
368
|
-
|
|
369
|
-
|
|
370
|
-
|
|
371
|
-
|
|
372
|
-
|
|
373
|
-
|
|
374
|
-
* pricing.input_per_million
|
|
375
|
-
* pricing.cached_input_multiplier
|
|
376
|
-
/ 1_000_000
|
|
377
|
-
)
|
|
378
|
-
cost_output = output_tokens * pricing.output_per_million / 1_000_000
|
|
379
|
-
|
|
380
|
-
total_cost += cost_input_new + cost_input_cached + cost_output
|
|
381
|
-
|
|
83
|
+
models = stats.get("models", {})
|
|
84
|
+
|
|
85
|
+
for model_name, data in models.items():
|
|
86
|
+
tokens = data.get("tokens", {})
|
|
87
|
+
prompt = tokens.get("prompt", 0)
|
|
88
|
+
candidates = tokens.get("candidates", 0)
|
|
89
|
+
cached = tokens.get("cached", 0)
|
|
90
|
+
|
|
91
|
+
# Determine pricing family
|
|
92
|
+
family = "flash" if "flash" in model_name.lower() else "pro"
|
|
93
|
+
pricing = GEMINI_PRICING_BY_FAMILY.get(family, GEMINI_PRICING_BY_FAMILY["flash"])
|
|
94
|
+
|
|
95
|
+
# Logic: new_input = max(0, prompt - cached)
|
|
96
|
+
# Assuming 'prompt' is total input tokens
|
|
97
|
+
new_input = max(0, prompt - cached)
|
|
98
|
+
|
|
99
|
+
input_cost = (new_input / 1_000_000) * pricing.input_per_million
|
|
100
|
+
cached_cost = (cached / 1_000_000) * pricing.input_per_million * pricing.cached_input_multiplier
|
|
101
|
+
output_cost = (candidates / 1_000_000) * pricing.output_per_million
|
|
102
|
+
|
|
103
|
+
total_cost += input_cost + cached_cost + output_cost
|
|
104
|
+
|
|
382
105
|
return total_cost
|
|
383
106
|
|
|
384
|
-
|
|
385
|
-
|
|
386
|
-
""
|
|
387
|
-
|
|
388
|
-
|
|
389
|
-
|
|
390
|
-
- output_tokens
|
|
391
|
-
- cached_input_tokens
|
|
392
|
-
|
|
393
|
-
Cached tokens are charged at a 75% discount (i.e. 25% of full price).
|
|
394
|
-
"""
|
|
395
|
-
input_tokens = _safe_int(usage.get("input_tokens"))
|
|
396
|
-
output_tokens = _safe_int(usage.get("output_tokens"))
|
|
397
|
-
cached_input_tokens = _safe_int(usage.get("cached_input_tokens"))
|
|
398
|
-
|
|
399
|
-
new_input_tokens = max(input_tokens - cached_input_tokens, 0)
|
|
400
|
-
effective_cached_tokens = min(cached_input_tokens, input_tokens)
|
|
401
|
-
|
|
107
|
+
def _calculate_codex_cost(usage: Dict[str, Any]) -> float:
|
|
108
|
+
"""Calculates cost for Codex based on usage stats."""
|
|
109
|
+
input_tokens = usage.get("input_tokens", 0)
|
|
110
|
+
output_tokens = usage.get("output_tokens", 0)
|
|
111
|
+
cached_tokens = usage.get("cached_input_tokens", 0)
|
|
112
|
+
|
|
402
113
|
pricing = CODEX_PRICING
|
|
114
|
+
|
|
115
|
+
# Logic: new_input = max(0, input - cached)
|
|
116
|
+
new_input = max(0, input_tokens - cached_tokens)
|
|
117
|
+
|
|
118
|
+
input_cost = (new_input / 1_000_000) * pricing.input_per_million
|
|
119
|
+
cached_cost = (cached_tokens / 1_000_000) * pricing.input_per_million * pricing.cached_input_multiplier
|
|
120
|
+
output_cost = (output_tokens / 1_000_000) * pricing.output_per_million
|
|
121
|
+
|
|
122
|
+
return input_cost + cached_cost + output_cost
|
|
403
123
|
|
|
404
|
-
|
|
405
|
-
|
|
406
|
-
|
|
407
|
-
|
|
408
|
-
|
|
409
|
-
|
|
410
|
-
|
|
411
|
-
|
|
412
|
-
|
|
413
|
-
return cost_input_new + cost_input_cached + cost_output
|
|
414
|
-
|
|
415
|
-
|
|
416
|
-
def _parse_anthropic_result(data: Mapping[str, Any]) -> Tuple[bool, str, float]:
|
|
417
|
-
"""
|
|
418
|
-
Parse Claude Code (Anthropic) JSON result.
|
|
419
|
-
|
|
420
|
-
Expected:
|
|
421
|
-
- data["response"]: main content
|
|
422
|
-
- data["error"]: optional error block
|
|
423
|
-
- data["total_cost_usd"]: total cost in USD (if available)
|
|
124
|
+
def run_agentic_task(
|
|
125
|
+
instruction: str,
|
|
126
|
+
cwd: Path,
|
|
127
|
+
*,
|
|
128
|
+
verbose: bool = False,
|
|
129
|
+
quiet: bool = False,
|
|
130
|
+
label: str = "",
|
|
131
|
+
timeout: Optional[float] = None
|
|
132
|
+
) -> Tuple[bool, str, float, str]:
|
|
424
133
|
"""
|
|
425
|
-
|
|
426
|
-
|
|
427
|
-
|
|
428
|
-
|
|
429
|
-
|
|
430
|
-
|
|
431
|
-
|
|
432
|
-
|
|
433
|
-
|
|
434
|
-
|
|
435
|
-
|
|
436
|
-
|
|
437
|
-
|
|
134
|
+
Runs an agentic task using available providers in preference order.
|
|
135
|
+
|
|
136
|
+
Returns:
|
|
137
|
+
(success, output_text, cost_usd, provider_used)
|
|
138
|
+
"""
|
|
139
|
+
agents = get_available_agents()
|
|
140
|
+
|
|
141
|
+
# Filter agents based on preference order
|
|
142
|
+
candidates = [p for p in AGENT_PROVIDER_PREFERENCE if p in agents]
|
|
143
|
+
|
|
144
|
+
if not candidates:
|
|
145
|
+
msg = "No agent providers are available (check CLI installation and API keys)"
|
|
146
|
+
if not quiet:
|
|
147
|
+
console.print(f"[bold red]{msg}[/bold red]")
|
|
148
|
+
return False, msg, 0.0, ""
|
|
149
|
+
|
|
150
|
+
effective_timeout = timeout if timeout is not None else DEFAULT_TIMEOUT_SECONDS
|
|
151
|
+
|
|
152
|
+
# Create a unique temp file for the prompt
|
|
153
|
+
prompt_filename = f".agentic_prompt_{uuid.uuid4().hex[:8]}.txt"
|
|
154
|
+
prompt_path = cwd / prompt_filename
|
|
155
|
+
|
|
156
|
+
full_instruction = (
|
|
157
|
+
f"{instruction}\n\n"
|
|
158
|
+
f"Read the file {prompt_filename} for instructions. "
|
|
159
|
+
"You have full file access to explore and modify files as needed."
|
|
160
|
+
)
|
|
438
161
|
|
|
439
|
-
cost_raw = data.get("total_cost_usd")
|
|
440
162
|
try:
|
|
441
|
-
|
|
442
|
-
|
|
443
|
-
|
|
163
|
+
# Write prompt to file
|
|
164
|
+
with open(prompt_path, "w", encoding="utf-8") as f:
|
|
165
|
+
f.write(full_instruction)
|
|
444
166
|
|
|
445
|
-
|
|
446
|
-
|
|
447
|
-
|
|
448
|
-
def _parse_gemini_result(data: Mapping[str, Any]) -> Tuple[bool, str, float]:
|
|
449
|
-
"""
|
|
450
|
-
Parse Gemini CLI JSON result.
|
|
451
|
-
|
|
452
|
-
Expected high-level structure:
|
|
453
|
-
{
|
|
454
|
-
"response": "string",
|
|
455
|
-
"stats": { ... per-model token usage ... },
|
|
456
|
-
"error": { ... } # optional
|
|
457
|
-
}
|
|
458
|
-
"""
|
|
459
|
-
error_info = data.get("error")
|
|
460
|
-
has_error = bool(error_info)
|
|
167
|
+
for provider in candidates:
|
|
168
|
+
if verbose:
|
|
169
|
+
console.print(f"[dim]Attempting provider: {provider} for task '{label}'[/dim]")
|
|
461
170
|
|
|
462
|
-
|
|
463
|
-
|
|
464
|
-
|
|
465
|
-
error_msg = str(error_info)
|
|
466
|
-
else:
|
|
467
|
-
error_msg = ""
|
|
468
|
-
|
|
469
|
-
response_text = str(data.get("response") or "")
|
|
470
|
-
if not response_text and error_msg:
|
|
471
|
-
response_text = error_msg
|
|
472
|
-
|
|
473
|
-
stats = data.get("stats") or {}
|
|
474
|
-
cost = 0.0
|
|
475
|
-
if isinstance(stats, Mapping):
|
|
476
|
-
try:
|
|
477
|
-
cost = _calculate_gemini_cost(stats)
|
|
478
|
-
except Exception:
|
|
479
|
-
cost = 0.0
|
|
480
|
-
|
|
481
|
-
return (not has_error, response_text, cost)
|
|
482
|
-
|
|
483
|
-
|
|
484
|
-
def _extract_codex_usage(stdout: str) -> Optional[Mapping[str, Any]]:
|
|
485
|
-
"""
|
|
486
|
-
Extract the latest `usage` object from Codex JSONL output.
|
|
487
|
-
|
|
488
|
-
The `codex exec --json` command emits newline-delimited JSON events.
|
|
489
|
-
We scan all lines and keep the most recent event containing a `usage` key.
|
|
490
|
-
"""
|
|
491
|
-
last_usage: Optional[Mapping[str, Any]] = None
|
|
492
|
-
for line in stdout.splitlines():
|
|
493
|
-
line = line.strip()
|
|
494
|
-
if not line:
|
|
495
|
-
continue
|
|
496
|
-
try:
|
|
497
|
-
event = json.loads(line)
|
|
498
|
-
except json.JSONDecodeError:
|
|
499
|
-
continue
|
|
500
|
-
usage = event.get("usage")
|
|
501
|
-
if isinstance(usage, Mapping):
|
|
502
|
-
last_usage = usage
|
|
503
|
-
return last_usage
|
|
504
|
-
|
|
505
|
-
|
|
506
|
-
def _extract_codex_output(stdout: str) -> str:
|
|
507
|
-
"""
|
|
508
|
-
Extract assistant-visible output text from Codex JSONL output.
|
|
171
|
+
success, output, cost = _run_with_provider(
|
|
172
|
+
provider, prompt_path, cwd, effective_timeout, verbose, quiet
|
|
173
|
+
)
|
|
509
174
|
|
|
510
|
-
|
|
511
|
-
|
|
512
|
-
|
|
513
|
-
|
|
514
|
-
|
|
515
|
-
|
|
516
|
-
|
|
517
|
-
|
|
518
|
-
|
|
519
|
-
|
|
520
|
-
|
|
521
|
-
|
|
522
|
-
|
|
523
|
-
|
|
524
|
-
|
|
525
|
-
|
|
526
|
-
|
|
527
|
-
|
|
528
|
-
|
|
529
|
-
#
|
|
530
|
-
|
|
531
|
-
|
|
532
|
-
|
|
533
|
-
|
|
534
|
-
|
|
535
|
-
|
|
536
|
-
assistant_messages.append("".join(parts))
|
|
537
|
-
|
|
538
|
-
if assistant_messages:
|
|
539
|
-
return "\n".join(assistant_messages)
|
|
540
|
-
|
|
541
|
-
return stdout.strip()
|
|
175
|
+
# False Positive Detection
|
|
176
|
+
if success:
|
|
177
|
+
is_false_positive = (cost == 0.0 and len(output.strip()) < MIN_VALID_OUTPUT_LENGTH)
|
|
178
|
+
|
|
179
|
+
if is_false_positive:
|
|
180
|
+
if not quiet:
|
|
181
|
+
console.print(f"[bold red]Provider '{provider}' returned success but appears to be a false positive (Cost: {cost}, Len: {len(output)})[/bold red]")
|
|
182
|
+
# Treat as failure, try next provider
|
|
183
|
+
continue
|
|
184
|
+
|
|
185
|
+
# Check for suspicious files (C, E, T)
|
|
186
|
+
suspicious = []
|
|
187
|
+
for name in ["C", "E", "T"]:
|
|
188
|
+
if (cwd / name).exists():
|
|
189
|
+
suspicious.append(name)
|
|
190
|
+
|
|
191
|
+
if suspicious:
|
|
192
|
+
console.print(f"[bold red]SUSPICIOUS FILES DETECTED: {', '.join(['- ' + s for s in suspicious])}[/bold red]")
|
|
193
|
+
|
|
194
|
+
# Real success
|
|
195
|
+
return True, output, cost, provider
|
|
196
|
+
else:
|
|
197
|
+
if verbose:
|
|
198
|
+
console.print(f"[yellow]Provider {provider} failed: {output}[/yellow]")
|
|
199
|
+
|
|
200
|
+
return False, "All agent providers failed", 0.0, ""
|
|
542
201
|
|
|
202
|
+
finally:
|
|
203
|
+
# Cleanup prompt file
|
|
204
|
+
if prompt_path.exists():
|
|
205
|
+
try:
|
|
206
|
+
os.remove(prompt_path)
|
|
207
|
+
except OSError:
|
|
208
|
+
pass
|
|
543
209
|
|
|
544
210
|
def _run_with_provider(
|
|
545
|
-
provider: str,
|
|
546
|
-
|
|
547
|
-
cwd: Path,
|
|
548
|
-
|
|
549
|
-
verbose: bool,
|
|
550
|
-
quiet: bool
|
|
551
|
-
label: str = "",
|
|
211
|
+
provider: str,
|
|
212
|
+
prompt_path: Path,
|
|
213
|
+
cwd: Path,
|
|
214
|
+
timeout: float = DEFAULT_TIMEOUT_SECONDS,
|
|
215
|
+
verbose: bool = False,
|
|
216
|
+
quiet: bool = False
|
|
552
217
|
) -> Tuple[bool, str, float]:
|
|
553
218
|
"""
|
|
554
|
-
|
|
555
|
-
|
|
556
|
-
For Anthropic (Claude), uses subscription auth (removes API key from env)
|
|
557
|
-
and interactive mode (no -p flag) for more robust authentication that
|
|
558
|
-
doesn't require API credits.
|
|
559
|
-
|
|
560
|
-
Returns:
|
|
561
|
-
(success, message, cost)
|
|
562
|
-
|
|
563
|
-
- success: True if the CLI completed successfully without reported errors
|
|
564
|
-
- message: natural-language output on success, or error description on failure
|
|
565
|
-
- cost: estimated USD cost for this attempt
|
|
219
|
+
Internal helper to run a specific provider's CLI.
|
|
220
|
+
Returns (success, output_or_error, cost).
|
|
566
221
|
"""
|
|
567
|
-
|
|
568
|
-
|
|
569
|
-
|
|
570
|
-
|
|
571
|
-
|
|
572
|
-
|
|
573
|
-
|
|
574
|
-
|
|
575
|
-
|
|
576
|
-
|
|
577
|
-
|
|
578
|
-
|
|
579
|
-
|
|
580
|
-
|
|
581
|
-
|
|
582
|
-
|
|
583
|
-
|
|
584
|
-
|
|
585
|
-
|
|
586
|
-
|
|
587
|
-
|
|
588
|
-
|
|
589
|
-
|
|
590
|
-
|
|
222
|
+
|
|
223
|
+
# Prepare Environment
|
|
224
|
+
env = os.environ.copy()
|
|
225
|
+
env["TERM"] = "dumb"
|
|
226
|
+
env["NO_COLOR"] = "1"
|
|
227
|
+
env["CI"] = "1"
|
|
228
|
+
|
|
229
|
+
cmd: List[str] = []
|
|
230
|
+
|
|
231
|
+
# Construct Command
|
|
232
|
+
if provider == "anthropic":
|
|
233
|
+
# Remove API key to force subscription auth if configured that way
|
|
234
|
+
env.pop("ANTHROPIC_API_KEY", None)
|
|
235
|
+
# Note: Tests expect NO -p flag for Anthropic, and prompt path as last arg
|
|
236
|
+
cmd = [
|
|
237
|
+
"claude",
|
|
238
|
+
"--dangerously-skip-permissions",
|
|
239
|
+
"--output-format", "json",
|
|
240
|
+
str(prompt_path)
|
|
241
|
+
]
|
|
242
|
+
elif provider == "google":
|
|
243
|
+
cmd = [
|
|
244
|
+
"gemini",
|
|
245
|
+
"-p", str(prompt_path),
|
|
246
|
+
"--yolo",
|
|
247
|
+
"--output-format", "json"
|
|
248
|
+
]
|
|
249
|
+
elif provider == "openai":
|
|
250
|
+
cmd = [
|
|
251
|
+
"codex",
|
|
252
|
+
"exec",
|
|
253
|
+
"--full-auto",
|
|
254
|
+
"--json",
|
|
255
|
+
str(prompt_path)
|
|
256
|
+
]
|
|
257
|
+
else:
|
|
258
|
+
return False, f"Unknown provider {provider}", 0.0
|
|
591
259
|
|
|
592
260
|
try:
|
|
593
|
-
|
|
261
|
+
result = subprocess.run(
|
|
594
262
|
cmd,
|
|
595
|
-
cwd=
|
|
263
|
+
cwd=cwd,
|
|
596
264
|
env=env,
|
|
597
265
|
capture_output=True,
|
|
598
266
|
text=True,
|
|
599
|
-
timeout=timeout
|
|
600
|
-
check=False,
|
|
267
|
+
timeout=timeout
|
|
601
268
|
)
|
|
602
|
-
except FileNotFoundError:
|
|
603
|
-
message = f"CLI command for provider '{provider}' was not found."
|
|
604
|
-
log_error(message, verbose=verbose, quiet=quiet, label=label)
|
|
605
|
-
return False, message, 0.0
|
|
606
269
|
except subprocess.TimeoutExpired:
|
|
607
|
-
|
|
608
|
-
|
|
609
|
-
return False,
|
|
610
|
-
except Exception as exc:
|
|
611
|
-
message = f"Error invoking provider '{provider}': {exc}"
|
|
612
|
-
log_error(message, verbose=verbose, quiet=quiet, label=label)
|
|
613
|
-
return False, message, 0.0
|
|
614
|
-
|
|
615
|
-
stdout = completed.stdout or ""
|
|
616
|
-
stderr = completed.stderr or ""
|
|
617
|
-
if verbose and stdout:
|
|
618
|
-
log_debug(f"{provider} stdout:\n{stdout}", verbose=verbose, quiet=quiet, label=label)
|
|
619
|
-
if verbose and stderr:
|
|
620
|
-
log_debug(f"{provider} stderr:\n{stderr}", verbose=verbose, quiet=quiet, label=label)
|
|
621
|
-
|
|
622
|
-
# Default assumptions
|
|
623
|
-
success = completed.returncode == 0
|
|
624
|
-
cost = 0.0
|
|
625
|
-
message: str
|
|
270
|
+
return False, "Timeout expired", 0.0
|
|
271
|
+
except Exception as e:
|
|
272
|
+
return False, str(e), 0.0
|
|
626
273
|
|
|
627
|
-
|
|
628
|
-
|
|
629
|
-
raw_json = stdout.strip() or stderr.strip()
|
|
630
|
-
if not raw_json:
|
|
631
|
-
message = f"Provider '{provider}' produced no JSON output."
|
|
632
|
-
log_error(message, verbose=verbose, quiet=quiet, label=label)
|
|
633
|
-
return False, message, 0.0
|
|
274
|
+
if result.returncode != 0:
|
|
275
|
+
return False, f"Exit code {result.returncode}: {result.stderr}", 0.0
|
|
634
276
|
|
|
635
|
-
|
|
636
|
-
|
|
637
|
-
|
|
638
|
-
|
|
639
|
-
|
|
640
|
-
|
|
641
|
-
|
|
642
|
-
|
|
643
|
-
|
|
644
|
-
|
|
645
|
-
|
|
646
|
-
|
|
647
|
-
|
|
648
|
-
|
|
649
|
-
|
|
650
|
-
|
|
651
|
-
|
|
652
|
-
|
|
653
|
-
|
|
654
|
-
|
|
655
|
-
|
|
656
|
-
|
|
657
|
-
|
|
658
|
-
|
|
659
|
-
|
|
660
|
-
|
|
661
|
-
return
|
|
662
|
-
|
|
663
|
-
|
|
664
|
-
|
|
665
|
-
usage = _extract_codex_usage(stdout)
|
|
666
|
-
if usage is not None:
|
|
667
|
-
try:
|
|
668
|
-
cost = _calculate_codex_cost(usage)
|
|
669
|
-
except Exception:
|
|
670
|
-
cost = 0.0
|
|
277
|
+
# Parse JSON Output
|
|
278
|
+
try:
|
|
279
|
+
# Handle JSONL output (Codex sometimes streams)
|
|
280
|
+
output_str = result.stdout.strip()
|
|
281
|
+
data = {}
|
|
282
|
+
|
|
283
|
+
if provider == "openai" and "\n" in output_str:
|
|
284
|
+
# Parse JSONL, look for result type
|
|
285
|
+
lines = output_str.splitlines()
|
|
286
|
+
for line in lines:
|
|
287
|
+
try:
|
|
288
|
+
item = json.loads(line)
|
|
289
|
+
if item.get("type") == "result":
|
|
290
|
+
data = item
|
|
291
|
+
break
|
|
292
|
+
except json.JSONDecodeError:
|
|
293
|
+
continue
|
|
294
|
+
# If no result block found, try parsing last line
|
|
295
|
+
if not data and lines:
|
|
296
|
+
try:
|
|
297
|
+
data = json.loads(lines[-1])
|
|
298
|
+
except:
|
|
299
|
+
pass
|
|
300
|
+
else:
|
|
301
|
+
data = json.loads(output_str)
|
|
302
|
+
|
|
303
|
+
return _parse_provider_json(provider, data)
|
|
304
|
+
except json.JSONDecodeError:
|
|
305
|
+
# Fallback if CLI didn't output valid JSON (sometimes happens on crash)
|
|
306
|
+
return False, f"Invalid JSON output: {result.stdout[:200]}...", 0.0
|
|
671
307
|
|
|
672
|
-
|
|
673
|
-
|
|
674
|
-
|
|
675
|
-
|
|
676
|
-
|
|
677
|
-
|
|
678
|
-
else f"Codex CLI failed with exit code {completed.returncode}.\n\nstderr:\n{stderr.strip()}"
|
|
679
|
-
)
|
|
680
|
-
elif not message:
|
|
681
|
-
message = f"Codex CLI failed with exit code {completed.returncode}."
|
|
308
|
+
def _parse_provider_json(provider: str, data: Dict[str, Any]) -> Tuple[bool, str, float]:
|
|
309
|
+
"""
|
|
310
|
+
Extracts (success, text_response, cost_usd) from provider JSON.
|
|
311
|
+
"""
|
|
312
|
+
cost = 0.0
|
|
313
|
+
output_text = ""
|
|
682
314
|
|
|
683
|
-
|
|
315
|
+
try:
|
|
316
|
+
if provider == "anthropic":
|
|
317
|
+
# Anthropic usually provides direct cost
|
|
318
|
+
cost = float(data.get("total_cost_usd", 0.0))
|
|
319
|
+
# Result might be in 'result' or 'response'
|
|
320
|
+
output_text = data.get("result") or data.get("response") or ""
|
|
321
|
+
|
|
322
|
+
elif provider == "google":
|
|
323
|
+
stats = data.get("stats", {})
|
|
324
|
+
cost = _calculate_gemini_cost(stats)
|
|
325
|
+
output_text = data.get("result") or data.get("response") or data.get("output") or ""
|
|
684
326
|
|
|
685
|
-
|
|
686
|
-
|
|
687
|
-
|
|
688
|
-
|
|
327
|
+
elif provider == "openai":
|
|
328
|
+
usage = data.get("usage", {})
|
|
329
|
+
cost = _calculate_codex_cost(usage)
|
|
330
|
+
output_text = data.get("result") or data.get("output") or ""
|
|
689
331
|
|
|
332
|
+
return True, str(output_text), cost
|
|
690
333
|
|
|
691
|
-
|
|
692
|
-
|
|
693
|
-
# ---------------------------------------------------------------------------
|
|
334
|
+
except Exception as e:
|
|
335
|
+
return False, f"Error parsing {provider} JSON: {e}", 0.0
|
|
694
336
|
|
|
695
337
|
|
|
696
|
-
|
|
697
|
-
"""
|
|
698
|
-
Return a list of available agent providers, e.g. ["anthropic", "google"].
|
|
338
|
+
# --- GitHub State Persistence ---
|
|
699
339
|
|
|
700
|
-
|
|
701
|
-
|
|
702
|
-
- Its API key appears configured (using llm_invoke's model data plus
|
|
703
|
-
well-known environment variables)
|
|
704
|
-
"""
|
|
705
|
-
model_data = _safe_load_model_data()
|
|
706
|
-
available: List[str] = []
|
|
707
|
-
|
|
708
|
-
for provider in AGENT_PROVIDER_PREFERENCE:
|
|
709
|
-
cli = CLI_COMMANDS.get(provider)
|
|
710
|
-
if not cli:
|
|
711
|
-
continue
|
|
712
|
-
if shutil.which(cli) is None:
|
|
713
|
-
continue
|
|
714
|
-
if not _provider_has_api_key(provider, model_data):
|
|
715
|
-
continue
|
|
716
|
-
available.append(provider)
|
|
340
|
+
def _build_state_marker(workflow_type: str, issue_number: int) -> str:
|
|
341
|
+
return f"{GITHUB_STATE_MARKER_START}{workflow_type}:issue-{issue_number}"
|
|
717
342
|
|
|
718
|
-
|
|
343
|
+
def _serialize_state_comment(workflow_type: str, issue_number: int, state: Dict) -> str:
|
|
344
|
+
marker = _build_state_marker(workflow_type, issue_number)
|
|
345
|
+
json_str = json.dumps(state, indent=2)
|
|
346
|
+
return f"{marker}\n{json_str}\n{GITHUB_STATE_MARKER_END}"
|
|
719
347
|
|
|
348
|
+
def _parse_state_from_comment(body: str, workflow_type: str, issue_number: int) -> Optional[Dict]:
|
|
349
|
+
marker = _build_state_marker(workflow_type, issue_number)
|
|
350
|
+
if marker not in body:
|
|
351
|
+
return None
|
|
352
|
+
|
|
353
|
+
try:
|
|
354
|
+
# Extract content between marker and end marker
|
|
355
|
+
start_idx = body.find(marker) + len(marker)
|
|
356
|
+
end_idx = body.find(GITHUB_STATE_MARKER_END, start_idx)
|
|
357
|
+
|
|
358
|
+
if end_idx == -1:
|
|
359
|
+
return None
|
|
360
|
+
|
|
361
|
+
json_str = body[start_idx:end_idx].strip()
|
|
362
|
+
return json.loads(json_str)
|
|
363
|
+
except (json.JSONDecodeError, ValueError):
|
|
364
|
+
return None
|
|
720
365
|
|
|
721
|
-
def
|
|
722
|
-
|
|
723
|
-
|
|
724
|
-
|
|
725
|
-
|
|
726
|
-
|
|
727
|
-
|
|
728
|
-
) -> Tuple[bool, str, float, str]:
|
|
366
|
+
def _find_state_comment(
|
|
367
|
+
repo_owner: str,
|
|
368
|
+
repo_name: str,
|
|
369
|
+
issue_number: int,
|
|
370
|
+
workflow_type: str,
|
|
371
|
+
cwd: Path
|
|
372
|
+
) -> Optional[Tuple[int, Dict]]:
|
|
729
373
|
"""
|
|
730
|
-
|
|
731
|
-
|
|
732
|
-
The task is executed in headless mode with JSON output for structured
|
|
733
|
-
parsing and real cost tracking.
|
|
734
|
-
|
|
735
|
-
Process:
|
|
736
|
-
1. Write `instruction` into a unique temp file named
|
|
737
|
-
`.agentic_prompt_<random>.txt` under `cwd`.
|
|
738
|
-
2. Build agentic meta-instruction:
|
|
739
|
-
|
|
740
|
-
"Read the file {prompt_file} for instructions. You have full file
|
|
741
|
-
access to explore and modify files as needed."
|
|
742
|
-
|
|
743
|
-
3. Try providers in `AGENT_PROVIDER_PREFERENCE` order, but only those
|
|
744
|
-
returned by `get_available_agents()`.
|
|
745
|
-
4. For each provider:
|
|
746
|
-
- Invoke its CLI in headless JSON mode with file-write permissions.
|
|
747
|
-
- Parse JSON to extract response text and cost.
|
|
748
|
-
- On success, stop and return.
|
|
749
|
-
- On failure, proceed to next provider.
|
|
750
|
-
5. Clean up the temp prompt file.
|
|
751
|
-
|
|
752
|
-
Args:
|
|
753
|
-
instruction: Natural-language instruction describing the task.
|
|
754
|
-
cwd: Project root where the agent should operate.
|
|
755
|
-
verbose: Enable verbose logging (debug output).
|
|
756
|
-
quiet: Suppress non-error logging.
|
|
757
|
-
label: Optional label prefix for log messages (e.g. "agentic-fix").
|
|
758
|
-
|
|
759
|
-
Returns:
|
|
760
|
-
Tuple[bool, str, float, str]:
|
|
761
|
-
- success: Whether the task completed successfully.
|
|
762
|
-
- output: On success, the agent's main response text.
|
|
763
|
-
On failure, a human-readable error message.
|
|
764
|
-
- cost: Total estimated USD cost across all provider attempts.
|
|
765
|
-
- provider_used: Name of the successful provider
|
|
766
|
-
("anthropic", "google", or "openai"),
|
|
767
|
-
or "" if no provider succeeded.
|
|
374
|
+
Returns (comment_id, state_dict) if found, else None.
|
|
768
375
|
"""
|
|
769
|
-
if not
|
|
770
|
-
|
|
771
|
-
log_error(message, verbose=verbose, quiet=quiet, label=label)
|
|
772
|
-
return False, message, 0.0, ""
|
|
773
|
-
|
|
774
|
-
if not cwd.exists() or not cwd.is_dir():
|
|
775
|
-
message = f"Working directory does not exist or is not a directory: {cwd}"
|
|
776
|
-
log_error(message, verbose=verbose, quiet=quiet, label=label)
|
|
777
|
-
return False, message, 0.0, ""
|
|
778
|
-
|
|
779
|
-
available = get_available_agents()
|
|
780
|
-
if not available:
|
|
781
|
-
message = "No agent providers are available. Ensure CLI tools and API keys are configured."
|
|
782
|
-
log_error(message, verbose=verbose, quiet=quiet, label=label)
|
|
783
|
-
return False, message, 0.0, ""
|
|
784
|
-
|
|
785
|
-
log_info(
|
|
786
|
-
f"Available providers (in preference order): {', '.join(available)}",
|
|
787
|
-
verbose=verbose,
|
|
788
|
-
quiet=quiet,
|
|
789
|
-
label=label,
|
|
790
|
-
)
|
|
791
|
-
|
|
792
|
-
# 1. Write user instruction into a unique prompt file under cwd
|
|
793
|
-
prompt_token = secrets.token_hex(8)
|
|
794
|
-
prompt_file = cwd / f".agentic_prompt_{prompt_token}.txt"
|
|
376
|
+
if not shutil.which("gh"):
|
|
377
|
+
return None
|
|
795
378
|
|
|
796
379
|
try:
|
|
797
|
-
|
|
798
|
-
|
|
799
|
-
|
|
800
|
-
|
|
801
|
-
|
|
802
|
-
|
|
803
|
-
|
|
804
|
-
|
|
805
|
-
|
|
806
|
-
|
|
380
|
+
# List comments
|
|
381
|
+
cmd = [
|
|
382
|
+
"gh", "api",
|
|
383
|
+
f"repos/{repo_owner}/{repo_name}/issues/{issue_number}/comments",
|
|
384
|
+
"--method", "GET"
|
|
385
|
+
]
|
|
386
|
+
result = subprocess.run(cmd, cwd=cwd, capture_output=True, text=True)
|
|
387
|
+
if result.returncode != 0:
|
|
388
|
+
return None
|
|
389
|
+
|
|
390
|
+
comments = json.loads(result.stdout)
|
|
391
|
+
marker = _build_state_marker(workflow_type, issue_number)
|
|
392
|
+
|
|
393
|
+
for comment in comments:
|
|
394
|
+
body = comment.get("body", "")
|
|
395
|
+
if marker in body:
|
|
396
|
+
state = _parse_state_from_comment(body, workflow_type, issue_number)
|
|
397
|
+
if state:
|
|
398
|
+
return comment["id"], state
|
|
399
|
+
|
|
400
|
+
return None
|
|
401
|
+
except Exception:
|
|
402
|
+
return None
|
|
807
403
|
|
|
808
|
-
|
|
809
|
-
|
|
404
|
+
def github_save_state(
|
|
405
|
+
repo_owner: str,
|
|
406
|
+
repo_name: str,
|
|
407
|
+
issue_number: int,
|
|
408
|
+
workflow_type: str,
|
|
409
|
+
state: Dict,
|
|
410
|
+
cwd: Path,
|
|
411
|
+
comment_id: Optional[int] = None
|
|
412
|
+
) -> Optional[int]:
|
|
413
|
+
"""
|
|
414
|
+
Creates or updates a GitHub comment with the state. Returns new/existing comment_id.
|
|
415
|
+
"""
|
|
416
|
+
if not shutil.which("gh"):
|
|
417
|
+
return None
|
|
810
418
|
|
|
419
|
+
body = _serialize_state_comment(workflow_type, issue_number, state)
|
|
420
|
+
|
|
811
421
|
try:
|
|
812
|
-
|
|
813
|
-
|
|
814
|
-
|
|
815
|
-
|
|
816
|
-
|
|
817
|
-
|
|
818
|
-
|
|
819
|
-
|
|
820
|
-
|
|
821
|
-
|
|
822
|
-
|
|
823
|
-
|
|
824
|
-
|
|
825
|
-
|
|
826
|
-
|
|
827
|
-
|
|
828
|
-
|
|
829
|
-
|
|
830
|
-
|
|
831
|
-
|
|
832
|
-
|
|
833
|
-
|
|
834
|
-
|
|
835
|
-
|
|
836
|
-
|
|
837
|
-
|
|
838
|
-
|
|
839
|
-
label=label,
|
|
840
|
-
)
|
|
841
|
-
return True, message, total_cost, provider
|
|
842
|
-
|
|
843
|
-
provider_errors.append(f"{provider}: {message}")
|
|
844
|
-
log_error(
|
|
845
|
-
f"Provider '{provider}' failed: {message}",
|
|
846
|
-
verbose=verbose,
|
|
847
|
-
quiet=quiet,
|
|
848
|
-
label=label,
|
|
849
|
-
)
|
|
422
|
+
if comment_id:
|
|
423
|
+
# PATCH existing
|
|
424
|
+
cmd = [
|
|
425
|
+
"gh", "api",
|
|
426
|
+
f"repos/{repo_owner}/{repo_name}/issues/comments/{comment_id}",
|
|
427
|
+
"-X", "PATCH",
|
|
428
|
+
"-f", f"body={body}"
|
|
429
|
+
]
|
|
430
|
+
res = subprocess.run(cmd, cwd=cwd, capture_output=True, text=True)
|
|
431
|
+
if res.returncode == 0:
|
|
432
|
+
return comment_id
|
|
433
|
+
else:
|
|
434
|
+
# POST new
|
|
435
|
+
cmd = [
|
|
436
|
+
"gh", "api",
|
|
437
|
+
f"repos/{repo_owner}/{repo_name}/issues/{issue_number}/comments",
|
|
438
|
+
"-X", "POST",
|
|
439
|
+
"-f", f"body={body}"
|
|
440
|
+
]
|
|
441
|
+
res = subprocess.run(cmd, cwd=cwd, capture_output=True, text=True)
|
|
442
|
+
if res.returncode == 0:
|
|
443
|
+
data = json.loads(res.stdout)
|
|
444
|
+
return data.get("id")
|
|
445
|
+
|
|
446
|
+
return None
|
|
447
|
+
except Exception:
|
|
448
|
+
return None
|
|
850
449
|
|
|
851
|
-
|
|
852
|
-
|
|
853
|
-
|
|
854
|
-
|
|
450
|
+
def github_load_state(
|
|
451
|
+
repo_owner: str,
|
|
452
|
+
repo_name: str,
|
|
453
|
+
issue_number: int,
|
|
454
|
+
workflow_type: str,
|
|
455
|
+
cwd: Path
|
|
456
|
+
) -> Tuple[Optional[Dict], Optional[int]]:
|
|
457
|
+
"""
|
|
458
|
+
Wrapper to find state. Returns (state, comment_id).
|
|
459
|
+
"""
|
|
460
|
+
result = _find_state_comment(repo_owner, repo_name, issue_number, workflow_type, cwd)
|
|
461
|
+
if result:
|
|
462
|
+
return result[1], result[0]
|
|
463
|
+
return None, None
|
|
464
|
+
|
|
465
|
+
def github_clear_state(
|
|
466
|
+
repo_owner: str,
|
|
467
|
+
repo_name: str,
|
|
468
|
+
issue_number: int,
|
|
469
|
+
workflow_type: str,
|
|
470
|
+
cwd: Path
|
|
471
|
+
) -> bool:
|
|
472
|
+
"""
|
|
473
|
+
Deletes the state comment if it exists.
|
|
474
|
+
"""
|
|
475
|
+
result = _find_state_comment(repo_owner, repo_name, issue_number, workflow_type, cwd)
|
|
476
|
+
if not result:
|
|
477
|
+
return True # Already clear
|
|
478
|
+
|
|
479
|
+
comment_id = result[0]
|
|
480
|
+
try:
|
|
481
|
+
cmd = [
|
|
482
|
+
"gh", "api",
|
|
483
|
+
f"repos/{repo_owner}/{repo_name}/issues/comments/{comment_id}",
|
|
484
|
+
"-X", "DELETE"
|
|
485
|
+
]
|
|
486
|
+
subprocess.run(cmd, cwd=cwd, capture_output=True)
|
|
487
|
+
return True
|
|
488
|
+
except Exception:
|
|
489
|
+
return False
|
|
490
|
+
|
|
491
|
+
def _should_use_github_state(use_github_state: bool) -> bool:
|
|
492
|
+
if not use_github_state:
|
|
493
|
+
return False
|
|
494
|
+
if os.environ.get("PDD_NO_GITHUB_STATE") == "1":
|
|
495
|
+
return False
|
|
496
|
+
return True
|
|
497
|
+
|
|
498
|
+
# --- High Level State Wrappers ---
|
|
499
|
+
|
|
500
|
+
def load_workflow_state(
|
|
501
|
+
cwd: Path,
|
|
502
|
+
issue_number: int,
|
|
503
|
+
workflow_type: str,
|
|
504
|
+
state_dir: Path,
|
|
505
|
+
repo_owner: str,
|
|
506
|
+
repo_name: str,
|
|
507
|
+
use_github_state: bool = True
|
|
508
|
+
) -> Tuple[Optional[Dict], Optional[int]]:
|
|
509
|
+
"""
|
|
510
|
+
Loads state from GitHub (priority) or local file.
|
|
511
|
+
Returns (state_dict, github_comment_id).
|
|
512
|
+
"""
|
|
513
|
+
local_file = state_dir / f"{workflow_type}_state_{issue_number}.json"
|
|
514
|
+
|
|
515
|
+
# Try GitHub first
|
|
516
|
+
if _should_use_github_state(use_github_state):
|
|
517
|
+
gh_state, gh_id = github_load_state(repo_owner, repo_name, issue_number, workflow_type, cwd)
|
|
518
|
+
if gh_state:
|
|
519
|
+
# Cache locally
|
|
520
|
+
try:
|
|
521
|
+
state_dir.mkdir(parents=True, exist_ok=True)
|
|
522
|
+
with open(local_file, "w") as f:
|
|
523
|
+
json.dump(gh_state, f, indent=2)
|
|
524
|
+
except Exception:
|
|
525
|
+
pass # Ignore local cache errors
|
|
526
|
+
return gh_state, gh_id
|
|
855
527
|
|
|
856
|
-
|
|
857
|
-
|
|
528
|
+
# Fallback to local
|
|
529
|
+
if local_file.exists():
|
|
530
|
+
try:
|
|
531
|
+
with open(local_file, "r") as f:
|
|
532
|
+
return json.load(f), None
|
|
533
|
+
except Exception:
|
|
534
|
+
pass
|
|
535
|
+
|
|
536
|
+
return None, None
|
|
537
|
+
|
|
538
|
+
def save_workflow_state(
|
|
539
|
+
cwd: Path,
|
|
540
|
+
issue_number: int,
|
|
541
|
+
workflow_type: str,
|
|
542
|
+
state: Dict,
|
|
543
|
+
state_dir: Path,
|
|
544
|
+
repo_owner: str,
|
|
545
|
+
repo_name: str,
|
|
546
|
+
use_github_state: bool = True,
|
|
547
|
+
github_comment_id: Optional[int] = None
|
|
548
|
+
) -> Optional[int]:
|
|
549
|
+
"""
|
|
550
|
+
Saves state to local file and GitHub.
|
|
551
|
+
Returns updated github_comment_id.
|
|
552
|
+
"""
|
|
553
|
+
local_file = state_dir / f"{workflow_type}_state_{issue_number}.json"
|
|
554
|
+
|
|
555
|
+
# 1. Save Local
|
|
556
|
+
try:
|
|
557
|
+
state_dir.mkdir(parents=True, exist_ok=True)
|
|
558
|
+
with open(local_file, "w") as f:
|
|
559
|
+
json.dump(state, f, indent=2)
|
|
560
|
+
except Exception as e:
|
|
561
|
+
console.print(f"[yellow]Warning: Failed to save local state: {e}[/yellow]")
|
|
562
|
+
|
|
563
|
+
# 2. Save GitHub
|
|
564
|
+
if _should_use_github_state(use_github_state):
|
|
565
|
+
new_id = github_save_state(
|
|
566
|
+
repo_owner, repo_name, issue_number, workflow_type, state, cwd, github_comment_id
|
|
567
|
+
)
|
|
568
|
+
if new_id:
|
|
569
|
+
return new_id
|
|
570
|
+
else:
|
|
571
|
+
console.print("[dim]Warning: Failed to sync state to GitHub[/dim]")
|
|
572
|
+
|
|
573
|
+
return github_comment_id
|
|
574
|
+
|
|
575
|
+
def clear_workflow_state(
|
|
576
|
+
cwd: Path,
|
|
577
|
+
issue_number: int,
|
|
578
|
+
workflow_type: str,
|
|
579
|
+
state_dir: Path,
|
|
580
|
+
repo_owner: str,
|
|
581
|
+
repo_name: str,
|
|
582
|
+
use_github_state: bool = True
|
|
583
|
+
) -> None:
|
|
584
|
+
"""
|
|
585
|
+
Clears local and GitHub state.
|
|
586
|
+
"""
|
|
587
|
+
local_file = state_dir / f"{workflow_type}_state_{issue_number}.json"
|
|
588
|
+
|
|
589
|
+
# Clear Local
|
|
590
|
+
if local_file.exists():
|
|
858
591
|
try:
|
|
859
|
-
|
|
860
|
-
|
|
861
|
-
|
|
862
|
-
|
|
863
|
-
|
|
592
|
+
os.remove(local_file)
|
|
593
|
+
except Exception:
|
|
594
|
+
pass
|
|
595
|
+
|
|
596
|
+
# Clear GitHub
|
|
597
|
+
if _should_use_github_state(use_github_state):
|
|
598
|
+
github_clear_state(repo_owner, repo_name, issue_number, workflow_type, cwd)
|