pdd-cli 0.0.45__py3-none-any.whl → 0.0.118__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- pdd/__init__.py +40 -8
- pdd/agentic_bug.py +323 -0
- pdd/agentic_bug_orchestrator.py +497 -0
- pdd/agentic_change.py +231 -0
- pdd/agentic_change_orchestrator.py +526 -0
- pdd/agentic_common.py +598 -0
- pdd/agentic_crash.py +534 -0
- pdd/agentic_e2e_fix.py +319 -0
- pdd/agentic_e2e_fix_orchestrator.py +426 -0
- pdd/agentic_fix.py +1294 -0
- pdd/agentic_langtest.py +162 -0
- pdd/agentic_update.py +387 -0
- pdd/agentic_verify.py +183 -0
- pdd/architecture_sync.py +565 -0
- pdd/auth_service.py +210 -0
- pdd/auto_deps_main.py +71 -51
- pdd/auto_include.py +245 -5
- pdd/auto_update.py +125 -47
- pdd/bug_main.py +196 -23
- pdd/bug_to_unit_test.py +2 -0
- pdd/change_main.py +11 -4
- pdd/cli.py +22 -1181
- pdd/cmd_test_main.py +350 -150
- pdd/code_generator.py +60 -18
- pdd/code_generator_main.py +790 -57
- pdd/commands/__init__.py +48 -0
- pdd/commands/analysis.py +306 -0
- pdd/commands/auth.py +309 -0
- pdd/commands/connect.py +290 -0
- pdd/commands/fix.py +163 -0
- pdd/commands/generate.py +257 -0
- pdd/commands/maintenance.py +175 -0
- pdd/commands/misc.py +87 -0
- pdd/commands/modify.py +256 -0
- pdd/commands/report.py +144 -0
- pdd/commands/sessions.py +284 -0
- pdd/commands/templates.py +215 -0
- pdd/commands/utility.py +110 -0
- pdd/config_resolution.py +58 -0
- pdd/conflicts_main.py +8 -3
- pdd/construct_paths.py +589 -111
- pdd/context_generator.py +10 -2
- pdd/context_generator_main.py +175 -76
- pdd/continue_generation.py +53 -10
- pdd/core/__init__.py +33 -0
- pdd/core/cli.py +527 -0
- pdd/core/cloud.py +237 -0
- pdd/core/dump.py +554 -0
- pdd/core/errors.py +67 -0
- pdd/core/remote_session.py +61 -0
- pdd/core/utils.py +90 -0
- pdd/crash_main.py +262 -33
- pdd/data/language_format.csv +71 -63
- pdd/data/llm_model.csv +20 -18
- pdd/detect_change_main.py +5 -4
- pdd/docs/prompting_guide.md +864 -0
- pdd/docs/whitepaper_with_benchmarks/data_and_functions/benchmark_analysis.py +495 -0
- pdd/docs/whitepaper_with_benchmarks/data_and_functions/creation_compare.py +528 -0
- pdd/fix_code_loop.py +523 -95
- pdd/fix_code_module_errors.py +6 -2
- pdd/fix_error_loop.py +491 -92
- pdd/fix_errors_from_unit_tests.py +4 -3
- pdd/fix_main.py +278 -21
- pdd/fix_verification_errors.py +12 -100
- pdd/fix_verification_errors_loop.py +529 -286
- pdd/fix_verification_main.py +294 -89
- pdd/frontend/dist/assets/index-B5DZHykP.css +1 -0
- pdd/frontend/dist/assets/index-DQ3wkeQ2.js +449 -0
- pdd/frontend/dist/index.html +376 -0
- pdd/frontend/dist/logo.svg +33 -0
- pdd/generate_output_paths.py +139 -15
- pdd/generate_test.py +218 -146
- pdd/get_comment.py +19 -44
- pdd/get_extension.py +8 -9
- pdd/get_jwt_token.py +318 -22
- pdd/get_language.py +8 -7
- pdd/get_run_command.py +75 -0
- pdd/get_test_command.py +68 -0
- pdd/git_update.py +70 -19
- pdd/incremental_code_generator.py +2 -2
- pdd/insert_includes.py +13 -4
- pdd/llm_invoke.py +1711 -181
- pdd/load_prompt_template.py +19 -12
- pdd/path_resolution.py +140 -0
- pdd/pdd_completion.fish +25 -2
- pdd/pdd_completion.sh +30 -4
- pdd/pdd_completion.zsh +79 -4
- pdd/postprocess.py +14 -4
- pdd/preprocess.py +293 -24
- pdd/preprocess_main.py +41 -6
- pdd/prompts/agentic_bug_step10_pr_LLM.prompt +182 -0
- pdd/prompts/agentic_bug_step1_duplicate_LLM.prompt +73 -0
- pdd/prompts/agentic_bug_step2_docs_LLM.prompt +129 -0
- pdd/prompts/agentic_bug_step3_triage_LLM.prompt +95 -0
- pdd/prompts/agentic_bug_step4_reproduce_LLM.prompt +97 -0
- pdd/prompts/agentic_bug_step5_root_cause_LLM.prompt +123 -0
- pdd/prompts/agentic_bug_step6_test_plan_LLM.prompt +107 -0
- pdd/prompts/agentic_bug_step7_generate_LLM.prompt +172 -0
- pdd/prompts/agentic_bug_step8_verify_LLM.prompt +119 -0
- pdd/prompts/agentic_bug_step9_e2e_test_LLM.prompt +289 -0
- pdd/prompts/agentic_change_step10_identify_issues_LLM.prompt +1006 -0
- pdd/prompts/agentic_change_step11_fix_issues_LLM.prompt +984 -0
- pdd/prompts/agentic_change_step12_create_pr_LLM.prompt +131 -0
- pdd/prompts/agentic_change_step1_duplicate_LLM.prompt +73 -0
- pdd/prompts/agentic_change_step2_docs_LLM.prompt +101 -0
- pdd/prompts/agentic_change_step3_research_LLM.prompt +126 -0
- pdd/prompts/agentic_change_step4_clarify_LLM.prompt +164 -0
- pdd/prompts/agentic_change_step5_docs_change_LLM.prompt +981 -0
- pdd/prompts/agentic_change_step6_devunits_LLM.prompt +1005 -0
- pdd/prompts/agentic_change_step7_architecture_LLM.prompt +1044 -0
- pdd/prompts/agentic_change_step8_analyze_LLM.prompt +1027 -0
- pdd/prompts/agentic_change_step9_implement_LLM.prompt +1077 -0
- pdd/prompts/agentic_crash_explore_LLM.prompt +49 -0
- pdd/prompts/agentic_e2e_fix_step1_unit_tests_LLM.prompt +90 -0
- pdd/prompts/agentic_e2e_fix_step2_e2e_tests_LLM.prompt +91 -0
- pdd/prompts/agentic_e2e_fix_step3_root_cause_LLM.prompt +89 -0
- pdd/prompts/agentic_e2e_fix_step4_fix_e2e_tests_LLM.prompt +96 -0
- pdd/prompts/agentic_e2e_fix_step5_identify_devunits_LLM.prompt +91 -0
- pdd/prompts/agentic_e2e_fix_step6_create_unit_tests_LLM.prompt +106 -0
- pdd/prompts/agentic_e2e_fix_step7_verify_tests_LLM.prompt +116 -0
- pdd/prompts/agentic_e2e_fix_step8_run_pdd_fix_LLM.prompt +120 -0
- pdd/prompts/agentic_e2e_fix_step9_verify_all_LLM.prompt +146 -0
- pdd/prompts/agentic_fix_explore_LLM.prompt +45 -0
- pdd/prompts/agentic_fix_harvest_only_LLM.prompt +48 -0
- pdd/prompts/agentic_fix_primary_LLM.prompt +85 -0
- pdd/prompts/agentic_update_LLM.prompt +925 -0
- pdd/prompts/agentic_verify_explore_LLM.prompt +45 -0
- pdd/prompts/auto_include_LLM.prompt +122 -905
- pdd/prompts/change_LLM.prompt +3093 -1
- pdd/prompts/detect_change_LLM.prompt +686 -27
- pdd/prompts/example_generator_LLM.prompt +22 -1
- pdd/prompts/extract_code_LLM.prompt +5 -1
- pdd/prompts/extract_program_code_fix_LLM.prompt +7 -1
- pdd/prompts/extract_prompt_update_LLM.prompt +7 -8
- pdd/prompts/extract_promptline_LLM.prompt +17 -11
- pdd/prompts/find_verification_errors_LLM.prompt +6 -0
- pdd/prompts/fix_code_module_errors_LLM.prompt +12 -2
- pdd/prompts/fix_errors_from_unit_tests_LLM.prompt +9 -0
- pdd/prompts/fix_verification_errors_LLM.prompt +22 -0
- pdd/prompts/generate_test_LLM.prompt +41 -7
- pdd/prompts/generate_test_from_example_LLM.prompt +115 -0
- pdd/prompts/increase_tests_LLM.prompt +1 -5
- pdd/prompts/insert_includes_LLM.prompt +316 -186
- pdd/prompts/prompt_code_diff_LLM.prompt +119 -0
- pdd/prompts/prompt_diff_LLM.prompt +82 -0
- pdd/prompts/trace_LLM.prompt +25 -22
- pdd/prompts/unfinished_prompt_LLM.prompt +85 -1
- pdd/prompts/update_prompt_LLM.prompt +22 -1
- pdd/pytest_output.py +127 -12
- pdd/remote_session.py +876 -0
- pdd/render_mermaid.py +236 -0
- pdd/server/__init__.py +52 -0
- pdd/server/app.py +335 -0
- pdd/server/click_executor.py +587 -0
- pdd/server/executor.py +338 -0
- pdd/server/jobs.py +661 -0
- pdd/server/models.py +241 -0
- pdd/server/routes/__init__.py +31 -0
- pdd/server/routes/architecture.py +451 -0
- pdd/server/routes/auth.py +364 -0
- pdd/server/routes/commands.py +929 -0
- pdd/server/routes/config.py +42 -0
- pdd/server/routes/files.py +603 -0
- pdd/server/routes/prompts.py +1322 -0
- pdd/server/routes/websocket.py +473 -0
- pdd/server/security.py +243 -0
- pdd/server/terminal_spawner.py +209 -0
- pdd/server/token_counter.py +222 -0
- pdd/setup_tool.py +648 -0
- pdd/simple_math.py +2 -0
- pdd/split_main.py +3 -2
- pdd/summarize_directory.py +237 -195
- pdd/sync_animation.py +8 -4
- pdd/sync_determine_operation.py +839 -112
- pdd/sync_main.py +351 -57
- pdd/sync_orchestration.py +1400 -756
- pdd/sync_tui.py +848 -0
- pdd/template_expander.py +161 -0
- pdd/template_registry.py +264 -0
- pdd/templates/architecture/architecture_json.prompt +237 -0
- pdd/templates/generic/generate_prompt.prompt +174 -0
- pdd/trace.py +168 -12
- pdd/trace_main.py +4 -3
- pdd/track_cost.py +140 -63
- pdd/unfinished_prompt.py +51 -4
- pdd/update_main.py +567 -67
- pdd/update_model_costs.py +2 -2
- pdd/update_prompt.py +19 -4
- {pdd_cli-0.0.45.dist-info → pdd_cli-0.0.118.dist-info}/METADATA +29 -11
- pdd_cli-0.0.118.dist-info/RECORD +227 -0
- {pdd_cli-0.0.45.dist-info → pdd_cli-0.0.118.dist-info}/licenses/LICENSE +1 -1
- pdd_cli-0.0.45.dist-info/RECORD +0 -116
- {pdd_cli-0.0.45.dist-info → pdd_cli-0.0.118.dist-info}/WHEEL +0 -0
- {pdd_cli-0.0.45.dist-info → pdd_cli-0.0.118.dist-info}/entry_points.txt +0 -0
- {pdd_cli-0.0.45.dist-info → pdd_cli-0.0.118.dist-info}/top_level.txt +0 -0
pdd/code_generator_main.py
CHANGED
|
@@ -1,10 +1,12 @@
|
|
|
1
1
|
import os
|
|
2
|
-
import
|
|
2
|
+
import re
|
|
3
3
|
import json
|
|
4
4
|
import pathlib
|
|
5
5
|
import shlex
|
|
6
6
|
import subprocess
|
|
7
7
|
import requests
|
|
8
|
+
import tempfile
|
|
9
|
+
import sys
|
|
8
10
|
from typing import Optional, Tuple, Dict, Any, List
|
|
9
11
|
|
|
10
12
|
import click
|
|
@@ -18,19 +20,37 @@ from .construct_paths import construct_paths
|
|
|
18
20
|
from .preprocess import preprocess as pdd_preprocess
|
|
19
21
|
from .code_generator import code_generator as local_code_generator_func
|
|
20
22
|
from .incremental_code_generator import incremental_code_generator as incremental_code_generator_func
|
|
21
|
-
from .
|
|
23
|
+
from .core.cloud import CloudConfig
|
|
24
|
+
from .python_env_detector import detect_host_python_executable
|
|
25
|
+
from .architecture_sync import (
|
|
26
|
+
get_architecture_entry_for_prompt,
|
|
27
|
+
has_pdd_tags,
|
|
28
|
+
generate_tags_from_architecture,
|
|
29
|
+
)
|
|
22
30
|
|
|
23
|
-
#
|
|
24
|
-
FIREBASE_API_KEY_ENV_VAR = "NEXT_PUBLIC_FIREBASE_API_KEY"
|
|
25
|
-
GITHUB_CLIENT_ID_ENV_VAR = "GITHUB_CLIENT_ID"
|
|
26
|
-
PDD_APP_NAME = "PDD Code Generator"
|
|
27
|
-
|
|
28
|
-
# Cloud function URL
|
|
29
|
-
CLOUD_GENERATE_URL = "https://us-central1-prompt-driven-development.cloudfunctions.net/generateCode"
|
|
31
|
+
# Cloud request timeout
|
|
30
32
|
CLOUD_REQUEST_TIMEOUT = 400 # seconds
|
|
31
33
|
|
|
32
34
|
console = Console()
|
|
33
35
|
|
|
36
|
+
# --- Helper Functions ---
|
|
37
|
+
def _parse_llm_bool(value: str) -> bool:
|
|
38
|
+
"""Parse LLM boolean value from string."""
|
|
39
|
+
if not value:
|
|
40
|
+
return True
|
|
41
|
+
llm_str = str(value).strip().lower()
|
|
42
|
+
if llm_str in {"0", "false", "no", "off"}:
|
|
43
|
+
return False
|
|
44
|
+
else:
|
|
45
|
+
return llm_str in {"1", "true", "yes", "on"}
|
|
46
|
+
|
|
47
|
+
def _env_flag_enabled(name: str) -> bool:
|
|
48
|
+
"""Return True when an env var is set to a truthy value."""
|
|
49
|
+
value = os.environ.get(name)
|
|
50
|
+
if value is None:
|
|
51
|
+
return False
|
|
52
|
+
return str(value).strip().lower() in {"1", "true", "yes", "on"}
|
|
53
|
+
|
|
34
54
|
# --- Git Helper Functions ---
|
|
35
55
|
def _run_git_command(command: List[str], cwd: Optional[str] = None) -> Tuple[int, str, str]:
|
|
36
56
|
"""Runs a git command and returns (return_code, stdout, stderr)."""
|
|
@@ -59,6 +79,102 @@ def is_git_repository(path: Optional[str] = None) -> bool:
|
|
|
59
79
|
return False
|
|
60
80
|
|
|
61
81
|
|
|
82
|
+
def _expand_vars(text: str, vars_map: Optional[Dict[str, str]]) -> str:
|
|
83
|
+
"""Replace $KEY and ${KEY} in text when KEY exists in vars_map. Leave others unchanged."""
|
|
84
|
+
if not text or not vars_map:
|
|
85
|
+
return text
|
|
86
|
+
|
|
87
|
+
def repl_braced(m: re.Match) -> str:
|
|
88
|
+
key = m.group(1)
|
|
89
|
+
return vars_map.get(key, m.group(0))
|
|
90
|
+
|
|
91
|
+
def repl_simple(m: re.Match) -> str:
|
|
92
|
+
key = m.group(1)
|
|
93
|
+
return vars_map.get(key, m.group(0))
|
|
94
|
+
|
|
95
|
+
# Replace ${KEY} first, then $KEY
|
|
96
|
+
text = re.sub(r"\$\{([A-Za-z_][A-Za-z0-9_]*)\}", repl_braced, text)
|
|
97
|
+
text = re.sub(r"\$([A-Za-z_][A-Za-z0-9_]*)", repl_simple, text)
|
|
98
|
+
return text
|
|
99
|
+
|
|
100
|
+
|
|
101
|
+
def _parse_front_matter(text: str) -> Tuple[Optional[Dict[str, Any]], str]:
|
|
102
|
+
"""Parse YAML front matter at the start of a prompt and return (meta, body)."""
|
|
103
|
+
try:
|
|
104
|
+
if not text.startswith("---\n"):
|
|
105
|
+
return None, text
|
|
106
|
+
end_idx = text.find("\n---", 4)
|
|
107
|
+
if end_idx == -1:
|
|
108
|
+
return None, text
|
|
109
|
+
fm_body = text[4:end_idx]
|
|
110
|
+
rest = text[end_idx + len("\n---"):]
|
|
111
|
+
if rest.startswith("\n"):
|
|
112
|
+
rest = rest[1:]
|
|
113
|
+
import yaml as _yaml
|
|
114
|
+
meta = _yaml.safe_load(fm_body) or {}
|
|
115
|
+
if not isinstance(meta, dict):
|
|
116
|
+
meta = {}
|
|
117
|
+
return meta, rest
|
|
118
|
+
except Exception:
|
|
119
|
+
return None, text
|
|
120
|
+
|
|
121
|
+
|
|
122
|
+
def _is_architecture_template(meta: Optional[Dict[str, Any]]) -> bool:
|
|
123
|
+
"""Detect the packaged architecture JSON template via its front matter name."""
|
|
124
|
+
return isinstance(meta, dict) and meta.get("name") == "architecture/architecture_json"
|
|
125
|
+
|
|
126
|
+
|
|
127
|
+
def _repair_architecture_interface_types(payload: Any) -> Tuple[Any, bool]:
|
|
128
|
+
"""
|
|
129
|
+
Patch common LLM slip-ups for the architecture template where interface.type
|
|
130
|
+
occasionally returns an unsupported value like "object". Only normalizes the
|
|
131
|
+
interface.type field and leaves other schema issues untouched so validation
|
|
132
|
+
still fails for genuinely malformed outputs.
|
|
133
|
+
"""
|
|
134
|
+
allowed_types = {
|
|
135
|
+
"component",
|
|
136
|
+
"page",
|
|
137
|
+
"module",
|
|
138
|
+
"api",
|
|
139
|
+
"graphql",
|
|
140
|
+
"cli",
|
|
141
|
+
"job",
|
|
142
|
+
"message",
|
|
143
|
+
"config",
|
|
144
|
+
}
|
|
145
|
+
changed = False
|
|
146
|
+
if not isinstance(payload, list):
|
|
147
|
+
return payload, changed
|
|
148
|
+
|
|
149
|
+
for entry in payload:
|
|
150
|
+
if not isinstance(entry, dict):
|
|
151
|
+
continue
|
|
152
|
+
interface = entry.get("interface")
|
|
153
|
+
if not isinstance(interface, dict):
|
|
154
|
+
continue
|
|
155
|
+
raw_type = interface.get("type")
|
|
156
|
+
normalized = raw_type.lower() if isinstance(raw_type, str) else None
|
|
157
|
+
if normalized in allowed_types:
|
|
158
|
+
if normalized != raw_type:
|
|
159
|
+
interface["type"] = normalized
|
|
160
|
+
changed = True
|
|
161
|
+
continue
|
|
162
|
+
|
|
163
|
+
inferred_type = None
|
|
164
|
+
for key in ("page", "component", "module", "api", "graphql", "cli", "job", "message", "config"):
|
|
165
|
+
if isinstance(interface.get(key), dict):
|
|
166
|
+
inferred_type = key
|
|
167
|
+
break
|
|
168
|
+
if inferred_type is None:
|
|
169
|
+
inferred_type = "module"
|
|
170
|
+
|
|
171
|
+
if raw_type != inferred_type:
|
|
172
|
+
interface["type"] = inferred_type
|
|
173
|
+
changed = True
|
|
174
|
+
|
|
175
|
+
return payload, changed
|
|
176
|
+
|
|
177
|
+
|
|
62
178
|
def get_git_content_at_ref(file_path: str, git_ref: str = "HEAD") -> Optional[str]:
|
|
63
179
|
"""Gets the content of the file as it was at the specified git_ref."""
|
|
64
180
|
abs_file_path = pathlib.Path(file_path).resolve()
|
|
@@ -124,6 +240,28 @@ def git_add_files(file_paths: List[str], verbose: bool = False) -> bool:
|
|
|
124
240
|
return False
|
|
125
241
|
# --- End Git Helper Functions ---
|
|
126
242
|
|
|
243
|
+
def _find_default_test_files(tests_dir: Optional[str], code_file_path: Optional[str]) -> List[str]:
|
|
244
|
+
"""Find default test files for a given code file in the tests directory."""
|
|
245
|
+
if not tests_dir or not code_file_path:
|
|
246
|
+
return []
|
|
247
|
+
|
|
248
|
+
tests_path = pathlib.Path(tests_dir)
|
|
249
|
+
code_path = pathlib.Path(code_file_path)
|
|
250
|
+
|
|
251
|
+
if not tests_path.exists() or not tests_path.is_dir():
|
|
252
|
+
return []
|
|
253
|
+
|
|
254
|
+
code_stem = code_path.stem
|
|
255
|
+
code_suffix = code_path.suffix
|
|
256
|
+
|
|
257
|
+
# Look for files starting with test_{code_stem}
|
|
258
|
+
# We look for test_{code_stem}*.{code_suffix}
|
|
259
|
+
# e.g., hello.py -> test_hello.py, test_hello_1.py
|
|
260
|
+
pattern = f"test_{code_stem}*{code_suffix}"
|
|
261
|
+
found_files = list(tests_path.glob(pattern))
|
|
262
|
+
|
|
263
|
+
return [str(p) for p in sorted(found_files)]
|
|
264
|
+
|
|
127
265
|
|
|
128
266
|
def code_generator_main(
|
|
129
267
|
ctx: click.Context,
|
|
@@ -131,6 +269,9 @@ def code_generator_main(
|
|
|
131
269
|
output: Optional[str],
|
|
132
270
|
original_prompt_file_path: Optional[str],
|
|
133
271
|
force_incremental_flag: bool,
|
|
272
|
+
env_vars: Optional[Dict[str, str]] = None,
|
|
273
|
+
unit_test_file: Optional[str] = None,
|
|
274
|
+
exclude_tests: bool = False,
|
|
134
275
|
) -> Tuple[str, bool, float, str]:
|
|
135
276
|
"""
|
|
136
277
|
CLI wrapper for generating code from prompts. Handles full and incremental generation,
|
|
@@ -157,20 +298,97 @@ def code_generator_main(
|
|
|
157
298
|
command_options: Dict[str, Any] = {"output": output}
|
|
158
299
|
|
|
159
300
|
try:
|
|
301
|
+
# Read prompt content once to determine LLM state and for construct_paths
|
|
302
|
+
with open(prompt_file, 'r', encoding='utf-8') as f:
|
|
303
|
+
raw_prompt_content = f.read()
|
|
304
|
+
|
|
305
|
+
# Phase-2 templates: parse front matter metadata
|
|
306
|
+
fm_meta, body = _parse_front_matter(raw_prompt_content)
|
|
307
|
+
if fm_meta:
|
|
308
|
+
prompt_content = body
|
|
309
|
+
else:
|
|
310
|
+
prompt_content = raw_prompt_content
|
|
311
|
+
|
|
312
|
+
# Determine LLM state early to avoid unnecessary overwrite prompts
|
|
313
|
+
llm_enabled: bool = True
|
|
314
|
+
env_llm_raw = None
|
|
315
|
+
try:
|
|
316
|
+
if env_vars and 'llm' in env_vars:
|
|
317
|
+
env_llm_raw = str(env_vars.get('llm'))
|
|
318
|
+
elif os.environ.get('llm') is not None:
|
|
319
|
+
env_llm_raw = os.environ.get('llm')
|
|
320
|
+
elif os.environ.get('LLM') is not None:
|
|
321
|
+
env_llm_raw = os.environ.get('LLM')
|
|
322
|
+
except Exception:
|
|
323
|
+
env_llm_raw = None
|
|
324
|
+
|
|
325
|
+
# Environment variables should override front matter
|
|
326
|
+
if env_llm_raw is not None:
|
|
327
|
+
llm_enabled = _parse_llm_bool(env_llm_raw)
|
|
328
|
+
elif fm_meta and isinstance(fm_meta, dict) and 'llm' in fm_meta:
|
|
329
|
+
llm_enabled = bool(fm_meta.get('llm', True))
|
|
330
|
+
# else: keep default True
|
|
331
|
+
|
|
332
|
+
# If LLM is disabled, we're only doing post-processing, so skip overwrite confirmation
|
|
333
|
+
effective_force = force_overwrite or not llm_enabled
|
|
334
|
+
|
|
160
335
|
resolved_config, input_strings, output_file_paths, language = construct_paths(
|
|
161
336
|
input_file_paths=input_file_paths_dict,
|
|
162
|
-
force=
|
|
337
|
+
force=effective_force,
|
|
163
338
|
quiet=quiet,
|
|
164
339
|
command="generate",
|
|
165
340
|
command_options=command_options,
|
|
341
|
+
context_override=ctx.obj.get('context'),
|
|
342
|
+
confirm_callback=cli_params.get('confirm_callback')
|
|
166
343
|
)
|
|
167
|
-
|
|
168
|
-
|
|
169
|
-
|
|
344
|
+
# Determine final output path: if user passed a directory, use resolved file path
|
|
345
|
+
resolved_output = output_file_paths.get("output")
|
|
346
|
+
if output is None:
|
|
347
|
+
output_path = resolved_output
|
|
348
|
+
else:
|
|
349
|
+
try:
|
|
350
|
+
is_dir_hint = output.endswith(os.path.sep) or output.endswith("/")
|
|
351
|
+
except Exception:
|
|
352
|
+
is_dir_hint = False
|
|
353
|
+
if is_dir_hint or os.path.isdir(output):
|
|
354
|
+
output_path = resolved_output
|
|
355
|
+
else:
|
|
356
|
+
output_path = output
|
|
357
|
+
|
|
358
|
+
# --- Unit Test Inclusion Logic ---
|
|
359
|
+
test_files_to_include: List[str] = []
|
|
360
|
+
if unit_test_file:
|
|
361
|
+
test_files_to_include.append(unit_test_file)
|
|
362
|
+
elif not exclude_tests:
|
|
363
|
+
# Try to find default test files
|
|
364
|
+
tests_dir = resolved_config.get("tests_dir")
|
|
365
|
+
found_tests = _find_default_test_files(tests_dir, output_path)
|
|
366
|
+
if found_tests:
|
|
367
|
+
if verbose:
|
|
368
|
+
console.print(f"[info]Found default test files: {', '.join(found_tests)}[/info]")
|
|
369
|
+
test_files_to_include.extend(found_tests)
|
|
370
|
+
|
|
371
|
+
if test_files_to_include:
|
|
372
|
+
prompt_content += "\n\n<unit_test_content>\n"
|
|
373
|
+
prompt_content += "The following is the unit test content that the generated code must pass:\n"
|
|
374
|
+
for tf in test_files_to_include:
|
|
375
|
+
try:
|
|
376
|
+
with open(tf, 'r', encoding='utf-8') as f:
|
|
377
|
+
content = f.read()
|
|
378
|
+
# If multiple files, label them? Or just concat?
|
|
379
|
+
# Using code block with file path comment is safer for context.
|
|
380
|
+
prompt_content += f"\nFile: {pathlib.Path(tf).name}\n```python\n{content}\n```\n"
|
|
381
|
+
except Exception as e:
|
|
382
|
+
console.print(f"[yellow]Warning: Could not read unit test file {tf}: {e}[/yellow]")
|
|
383
|
+
prompt_content += "</unit_test_content>\n"
|
|
384
|
+
# ---------------------------------
|
|
170
385
|
|
|
171
386
|
except FileNotFoundError as e:
|
|
172
387
|
console.print(f"[red]Error: Input file not found: {e.filename}[/red]")
|
|
173
388
|
return "", False, 0.0, "error"
|
|
389
|
+
except click.Abort:
|
|
390
|
+
# User cancelled - re-raise to stop the sync loop
|
|
391
|
+
raise
|
|
174
392
|
except Exception as e:
|
|
175
393
|
console.print(f"[red]Error during path construction: {e}[/red]")
|
|
176
394
|
return "", False, 0.0, "error"
|
|
@@ -179,6 +397,108 @@ def code_generator_main(
|
|
|
179
397
|
existing_code_content: Optional[str] = None
|
|
180
398
|
original_prompt_content_for_incremental: Optional[str] = None
|
|
181
399
|
|
|
400
|
+
# Merge -e vars with front-matter defaults; validate required
|
|
401
|
+
if env_vars is None:
|
|
402
|
+
env_vars = {}
|
|
403
|
+
if fm_meta and isinstance(fm_meta.get("variables"), dict):
|
|
404
|
+
for k, spec in (fm_meta["variables"].items()):
|
|
405
|
+
if isinstance(spec, dict):
|
|
406
|
+
if k not in env_vars and "default" in spec:
|
|
407
|
+
env_vars[k] = str(spec["default"])
|
|
408
|
+
# if scalar default allowed, ignore for now
|
|
409
|
+
missing = [k for k, spec in fm_meta["variables"].items() if isinstance(spec, dict) and spec.get("required") and k not in env_vars]
|
|
410
|
+
if missing:
|
|
411
|
+
console.print(f"[error]Missing required variables: {', '.join(missing)}")
|
|
412
|
+
return "", False, 0.0, "error"
|
|
413
|
+
|
|
414
|
+
# Execute optional discovery from front matter to populate env_vars without overriding explicit -e values
|
|
415
|
+
def _run_discovery(discover_cfg: Dict[str, Any]) -> Dict[str, str]:
|
|
416
|
+
results: Dict[str, str] = {}
|
|
417
|
+
try:
|
|
418
|
+
if not discover_cfg:
|
|
419
|
+
return results
|
|
420
|
+
enabled = discover_cfg.get("enabled", False)
|
|
421
|
+
if not enabled:
|
|
422
|
+
return results
|
|
423
|
+
root = discover_cfg.get("root", ".")
|
|
424
|
+
patterns = discover_cfg.get("patterns", []) or []
|
|
425
|
+
exclude = discover_cfg.get("exclude", []) or []
|
|
426
|
+
max_per = int(discover_cfg.get("max_per_pattern", 0) or 0)
|
|
427
|
+
max_total = int(discover_cfg.get("max_total", 0) or 0)
|
|
428
|
+
root_path = pathlib.Path(root).resolve()
|
|
429
|
+
seen: List[str] = []
|
|
430
|
+
def _match_one(patterns_list: List[str]) -> List[str]:
|
|
431
|
+
matches: List[str] = []
|
|
432
|
+
for pat in patterns_list:
|
|
433
|
+
globbed = list(root_path.rglob(pat))
|
|
434
|
+
for p in globbed:
|
|
435
|
+
if any(p.match(ex) for ex in exclude):
|
|
436
|
+
continue
|
|
437
|
+
sp = str(p.resolve())
|
|
438
|
+
if sp not in matches:
|
|
439
|
+
matches.append(sp)
|
|
440
|
+
if max_per and len(matches) >= max_per:
|
|
441
|
+
matches = matches[:max_per]
|
|
442
|
+
break
|
|
443
|
+
return matches
|
|
444
|
+
# If a mapping 'set' is provided, compute per-variable results
|
|
445
|
+
set_map = discover_cfg.get("set") or {}
|
|
446
|
+
if isinstance(set_map, dict) and set_map:
|
|
447
|
+
for var_name, spec in set_map.items():
|
|
448
|
+
if var_name in env_vars:
|
|
449
|
+
continue # don't override explicit -e
|
|
450
|
+
v_patterns = spec.get("patterns", []) if isinstance(spec, dict) else []
|
|
451
|
+
v_exclude = spec.get("exclude", []) if isinstance(spec, dict) else []
|
|
452
|
+
save_exclude = exclude
|
|
453
|
+
try:
|
|
454
|
+
if v_exclude:
|
|
455
|
+
exclude = v_exclude
|
|
456
|
+
matches = _match_one(v_patterns or patterns)
|
|
457
|
+
finally:
|
|
458
|
+
exclude = save_exclude
|
|
459
|
+
if matches:
|
|
460
|
+
results[var_name] = ",".join(matches)
|
|
461
|
+
seen.extend(matches)
|
|
462
|
+
# Fallback: populate SCAN_FILES and SCAN metadata
|
|
463
|
+
if not results:
|
|
464
|
+
files = _match_one(patterns)
|
|
465
|
+
if max_total and len(files) > max_total:
|
|
466
|
+
files = files[:max_total]
|
|
467
|
+
if files:
|
|
468
|
+
results["SCAN_FILES"] = ",".join(files)
|
|
469
|
+
# Always set root/patterns helpers
|
|
470
|
+
if root:
|
|
471
|
+
results.setdefault("SCAN_ROOT", str(root_path))
|
|
472
|
+
if patterns:
|
|
473
|
+
results.setdefault("SCAN_PATTERNS", ",".join(patterns))
|
|
474
|
+
except Exception as e:
|
|
475
|
+
if verbose and not quiet:
|
|
476
|
+
console.print(f"[yellow]Discovery skipped due to error: {e}[/yellow]")
|
|
477
|
+
return results
|
|
478
|
+
|
|
479
|
+
if fm_meta and isinstance(fm_meta.get("discover"), dict):
|
|
480
|
+
discovered = _run_discovery(fm_meta.get("discover") or {})
|
|
481
|
+
for k, v in discovered.items():
|
|
482
|
+
if k not in env_vars:
|
|
483
|
+
env_vars[k] = v
|
|
484
|
+
|
|
485
|
+
# Expand variables in output path if provided
|
|
486
|
+
if output_path:
|
|
487
|
+
output_path = _expand_vars(output_path, env_vars)
|
|
488
|
+
|
|
489
|
+
# Honor front-matter output when CLI did not pass --output
|
|
490
|
+
if output is None and fm_meta and isinstance(fm_meta.get("output"), str):
|
|
491
|
+
try:
|
|
492
|
+
meta_out = _expand_vars(fm_meta["output"], env_vars)
|
|
493
|
+
if meta_out:
|
|
494
|
+
output_path = str(pathlib.Path(meta_out).resolve())
|
|
495
|
+
except Exception:
|
|
496
|
+
pass
|
|
497
|
+
|
|
498
|
+
# Honor front-matter language if provided (overrides detection for both local and cloud)
|
|
499
|
+
if fm_meta and isinstance(fm_meta.get("language"), str) and fm_meta.get("language"):
|
|
500
|
+
language = fm_meta.get("language")
|
|
501
|
+
|
|
182
502
|
if output_path and pathlib.Path(output_path).exists():
|
|
183
503
|
try:
|
|
184
504
|
existing_code_content = pathlib.Path(output_path).read_text(encoding="utf-8")
|
|
@@ -303,7 +623,96 @@ def code_generator_main(
|
|
|
303
623
|
can_attempt_incremental = False
|
|
304
624
|
|
|
305
625
|
try:
|
|
306
|
-
|
|
626
|
+
# Resolve post-process script from env/CLI override, then front matter, then sensible default per template
|
|
627
|
+
post_process_script: Optional[str] = None
|
|
628
|
+
prompt_body_for_script: str = prompt_content
|
|
629
|
+
|
|
630
|
+
if verbose:
|
|
631
|
+
console.print(f"[blue]LLM enabled:[/blue] {llm_enabled}")
|
|
632
|
+
try:
|
|
633
|
+
post_process_script = None
|
|
634
|
+
script_override = None
|
|
635
|
+
if env_vars:
|
|
636
|
+
script_override = env_vars.get('POST_PROCESS_PYTHON') or env_vars.get('post_process_python')
|
|
637
|
+
if not script_override:
|
|
638
|
+
script_override = os.environ.get('POST_PROCESS_PYTHON') or os.environ.get('post_process_python')
|
|
639
|
+
if script_override and str(script_override).strip():
|
|
640
|
+
expanded = _expand_vars(str(script_override), env_vars)
|
|
641
|
+
pkg_dir = pathlib.Path(__file__).parent.resolve()
|
|
642
|
+
repo_root = pathlib.Path.cwd().resolve()
|
|
643
|
+
repo_pdd_dir = (repo_root / 'pdd').resolve()
|
|
644
|
+
candidate = pathlib.Path(expanded)
|
|
645
|
+
if not candidate.is_absolute():
|
|
646
|
+
# 1) As provided, relative to CWD
|
|
647
|
+
as_is = (repo_root / candidate)
|
|
648
|
+
# 2) Under repo pdd/
|
|
649
|
+
under_repo_pdd = (repo_pdd_dir / candidate.name) if not as_is.exists() else as_is
|
|
650
|
+
# 3) Under installed package dir
|
|
651
|
+
under_pkg = (pkg_dir / candidate.name) if not as_is.exists() and not under_repo_pdd.exists() else as_is
|
|
652
|
+
if as_is.exists():
|
|
653
|
+
candidate = as_is
|
|
654
|
+
elif under_repo_pdd.exists():
|
|
655
|
+
candidate = under_repo_pdd
|
|
656
|
+
elif under_pkg.exists():
|
|
657
|
+
candidate = under_pkg
|
|
658
|
+
else:
|
|
659
|
+
candidate = as_is # will fail later with not found
|
|
660
|
+
post_process_script = str(candidate.resolve())
|
|
661
|
+
elif fm_meta and isinstance(fm_meta, dict):
|
|
662
|
+
raw_script = fm_meta.get('post_process_python')
|
|
663
|
+
if isinstance(raw_script, str) and raw_script.strip():
|
|
664
|
+
# Expand variables like $VAR and ${VAR}
|
|
665
|
+
expanded = _expand_vars(raw_script, env_vars)
|
|
666
|
+
pkg_dir = pathlib.Path(__file__).parent.resolve()
|
|
667
|
+
repo_root = pathlib.Path.cwd().resolve()
|
|
668
|
+
repo_pdd_dir = (repo_root / 'pdd').resolve()
|
|
669
|
+
candidate = pathlib.Path(expanded)
|
|
670
|
+
if not candidate.is_absolute():
|
|
671
|
+
as_is = (repo_root / candidate)
|
|
672
|
+
under_repo_pdd = (repo_pdd_dir / candidate.name) if not as_is.exists() else as_is
|
|
673
|
+
under_pkg = (pkg_dir / candidate.name) if not as_is.exists() and not under_repo_pdd.exists() else as_is
|
|
674
|
+
if as_is.exists():
|
|
675
|
+
candidate = as_is
|
|
676
|
+
elif under_repo_pdd.exists():
|
|
677
|
+
candidate = under_repo_pdd
|
|
678
|
+
elif under_pkg.exists():
|
|
679
|
+
candidate = under_pkg
|
|
680
|
+
else:
|
|
681
|
+
candidate = as_is
|
|
682
|
+
post_process_script = str(candidate.resolve())
|
|
683
|
+
# Fallback default: for architecture template, use built-in render_mermaid.py
|
|
684
|
+
if not post_process_script:
|
|
685
|
+
try:
|
|
686
|
+
prompt_str = str(prompt_file)
|
|
687
|
+
looks_like_arch_template = (
|
|
688
|
+
(isinstance(prompt_file, str) and (
|
|
689
|
+
prompt_str.endswith("architecture/architecture_json.prompt") or
|
|
690
|
+
prompt_str.endswith("architecture/architecture_json") or
|
|
691
|
+
"architecture_json.prompt" in prompt_str or
|
|
692
|
+
"architecture/architecture_json" in prompt_str
|
|
693
|
+
))
|
|
694
|
+
)
|
|
695
|
+
looks_like_arch_output = (
|
|
696
|
+
bool(output_path) and pathlib.Path(str(output_path)).name == 'architecture.json'
|
|
697
|
+
)
|
|
698
|
+
if looks_like_arch_template or looks_like_arch_output:
|
|
699
|
+
pkg_dir = pathlib.Path(__file__).parent
|
|
700
|
+
repo_pdd_dir = pathlib.Path.cwd() / 'pdd'
|
|
701
|
+
if (pkg_dir / 'render_mermaid.py').exists():
|
|
702
|
+
post_process_script = str((pkg_dir / 'render_mermaid.py').resolve())
|
|
703
|
+
elif (repo_pdd_dir / 'render_mermaid.py').exists():
|
|
704
|
+
post_process_script = str((repo_pdd_dir / 'render_mermaid.py').resolve())
|
|
705
|
+
except Exception:
|
|
706
|
+
post_process_script = None
|
|
707
|
+
if verbose:
|
|
708
|
+
console.print(f"[blue]Post-process script resolved to:[/blue] {post_process_script if post_process_script else 'None'}")
|
|
709
|
+
except Exception:
|
|
710
|
+
post_process_script = None
|
|
711
|
+
# If LLM is disabled but no post-process script is provided, surface a helpful error
|
|
712
|
+
if not llm_enabled and not post_process_script:
|
|
713
|
+
console.print("[red]Error: llm: false requires 'post_process_python' to be specified in front matter.[/red]")
|
|
714
|
+
return "", was_incremental_operation, total_cost, "error"
|
|
715
|
+
if llm_enabled and can_attempt_incremental and existing_code_content is not None and original_prompt_content_for_incremental is not None:
|
|
307
716
|
if verbose:
|
|
308
717
|
console.print(Panel("Attempting incremental code generation...", title="[blue]Mode[/blue]", expand=False))
|
|
309
718
|
|
|
@@ -326,9 +735,18 @@ def code_generator_main(
|
|
|
326
735
|
if files_to_stage_for_rollback:
|
|
327
736
|
git_add_files(files_to_stage_for_rollback, verbose=verbose)
|
|
328
737
|
|
|
738
|
+
# Preprocess both prompts: expand includes, substitute vars, then double
|
|
739
|
+
orig_proc = pdd_preprocess(original_prompt_content_for_incremental, recursive=True, double_curly_brackets=False)
|
|
740
|
+
orig_proc = _expand_vars(orig_proc, env_vars)
|
|
741
|
+
orig_proc = pdd_preprocess(orig_proc, recursive=False, double_curly_brackets=True)
|
|
742
|
+
|
|
743
|
+
new_proc = pdd_preprocess(prompt_content, recursive=True, double_curly_brackets=False)
|
|
744
|
+
new_proc = _expand_vars(new_proc, env_vars)
|
|
745
|
+
new_proc = pdd_preprocess(new_proc, recursive=False, double_curly_brackets=True)
|
|
746
|
+
|
|
329
747
|
generated_code_content, was_incremental_operation, total_cost, model_name = incremental_code_generator_func(
|
|
330
|
-
original_prompt=
|
|
331
|
-
new_prompt=
|
|
748
|
+
original_prompt=orig_proc,
|
|
749
|
+
new_prompt=new_proc,
|
|
332
750
|
existing_code=existing_code_content,
|
|
333
751
|
language=language,
|
|
334
752
|
strength=strength,
|
|
@@ -336,7 +754,7 @@ def code_generator_main(
|
|
|
336
754
|
time=time_budget,
|
|
337
755
|
force_incremental=force_incremental_flag,
|
|
338
756
|
verbose=verbose,
|
|
339
|
-
preprocess_prompt=
|
|
757
|
+
preprocess_prompt=False
|
|
340
758
|
)
|
|
341
759
|
|
|
342
760
|
if not was_incremental_operation:
|
|
@@ -345,43 +763,37 @@ def code_generator_main(
|
|
|
345
763
|
elif verbose:
|
|
346
764
|
console.print(Panel(f"Incremental update successful. Model: {model_name}, Cost: ${total_cost:.6f}", title="[green]Incremental Success[/green]", expand=False))
|
|
347
765
|
|
|
348
|
-
if not was_incremental_operation: # Full generation path
|
|
766
|
+
if llm_enabled and not was_incremental_operation: # Full generation path
|
|
349
767
|
if verbose:
|
|
350
768
|
console.print(Panel("Performing full code generation...", title="[blue]Mode[/blue]", expand=False))
|
|
351
769
|
|
|
352
|
-
|
|
770
|
+
cloud_only = _env_flag_enabled("PDD_CLOUD_ONLY") or _env_flag_enabled("PDD_NO_LOCAL_FALLBACK")
|
|
771
|
+
current_execution_is_local = is_local_execution_preferred and not cloud_only
|
|
353
772
|
|
|
354
773
|
if not current_execution_is_local:
|
|
355
774
|
if verbose: console.print("Attempting cloud code generation...")
|
|
356
|
-
|
|
357
|
-
processed_prompt_for_cloud = pdd_preprocess(prompt_content, recursive=True, double_curly_brackets=
|
|
775
|
+
# Expand includes, substitute vars, then double
|
|
776
|
+
processed_prompt_for_cloud = pdd_preprocess(prompt_content, recursive=True, double_curly_brackets=False, exclude_keys=[])
|
|
777
|
+
processed_prompt_for_cloud = _expand_vars(processed_prompt_for_cloud, env_vars)
|
|
778
|
+
processed_prompt_for_cloud = pdd_preprocess(processed_prompt_for_cloud, recursive=False, double_curly_brackets=True, exclude_keys=[])
|
|
358
779
|
if verbose: console.print(Panel(Text(processed_prompt_for_cloud, overflow="fold"), title="[cyan]Preprocessed Prompt for Cloud[/cyan]", expand=False))
|
|
359
780
|
|
|
360
|
-
|
|
361
|
-
|
|
362
|
-
|
|
363
|
-
|
|
364
|
-
|
|
365
|
-
|
|
366
|
-
|
|
367
|
-
|
|
368
|
-
jwt_token = asyncio.run(get_jwt_token(
|
|
369
|
-
firebase_api_key=firebase_api_key_val,
|
|
370
|
-
github_client_id=github_client_id_val,
|
|
371
|
-
app_name=PDD_APP_NAME
|
|
372
|
-
))
|
|
373
|
-
except (AuthError, NetworkError, TokenError, UserCancelledError, RateLimitError) as e:
|
|
374
|
-
console.print(f"[yellow]Cloud authentication/token error: {e}. Falling back to local execution.[/yellow]")
|
|
375
|
-
current_execution_is_local = True
|
|
376
|
-
except Exception as e:
|
|
377
|
-
console.print(f"[yellow]Unexpected error during cloud authentication: {e}. Falling back to local execution.[/yellow]")
|
|
781
|
+
# Get JWT token via CloudConfig (handles both injected tokens and device flow)
|
|
782
|
+
jwt_token = CloudConfig.get_jwt_token(verbose=verbose)
|
|
783
|
+
|
|
784
|
+
if not jwt_token:
|
|
785
|
+
if cloud_only:
|
|
786
|
+
console.print("[red]Cloud authentication failed.[/red]")
|
|
787
|
+
raise click.UsageError("Cloud authentication failed")
|
|
788
|
+
console.print("[yellow]Cloud authentication failed. Falling back to local execution.[/yellow]")
|
|
378
789
|
current_execution_is_local = True
|
|
379
790
|
|
|
380
791
|
if jwt_token and not current_execution_is_local:
|
|
381
792
|
payload = {"promptContent": processed_prompt_for_cloud, "language": language, "strength": strength, "temperature": temperature, "verbose": verbose}
|
|
382
793
|
headers = {"Authorization": f"Bearer {jwt_token}", "Content-Type": "application/json"}
|
|
794
|
+
cloud_url = CloudConfig.get_endpoint_url("generateCode")
|
|
383
795
|
try:
|
|
384
|
-
response = requests.post(
|
|
796
|
+
response = requests.post(cloud_url, json=payload, headers=headers, timeout=CLOUD_REQUEST_TIMEOUT)
|
|
385
797
|
response.raise_for_status()
|
|
386
798
|
|
|
387
799
|
response_data = response.json()
|
|
@@ -389,57 +801,378 @@ def code_generator_main(
|
|
|
389
801
|
total_cost = float(response_data.get("totalCost", 0.0))
|
|
390
802
|
model_name = response_data.get("modelName", "cloud_model")
|
|
391
803
|
|
|
392
|
-
if
|
|
804
|
+
# Strip markdown code fences if present (cloud API returns fenced JSON)
|
|
805
|
+
if generated_code_content and isinstance(language, str) and language.strip().lower() == "json":
|
|
806
|
+
cleaned = generated_code_content.strip()
|
|
807
|
+
if cleaned.startswith("```json"):
|
|
808
|
+
cleaned = cleaned[7:]
|
|
809
|
+
elif cleaned.startswith("```"):
|
|
810
|
+
cleaned = cleaned[3:]
|
|
811
|
+
if cleaned.endswith("```"):
|
|
812
|
+
cleaned = cleaned[:-3]
|
|
813
|
+
generated_code_content = cleaned.strip()
|
|
814
|
+
|
|
815
|
+
if not generated_code_content:
|
|
816
|
+
if cloud_only:
|
|
817
|
+
console.print("[red]Cloud execution returned no code.[/red]")
|
|
818
|
+
raise click.UsageError("Cloud execution returned no code")
|
|
393
819
|
console.print("[yellow]Cloud execution returned no code. Falling back to local.[/yellow]")
|
|
394
820
|
current_execution_is_local = True
|
|
395
821
|
elif verbose:
|
|
396
822
|
console.print(Panel(f"Cloud generation successful. Model: {model_name}, Cost: ${total_cost:.6f}", title="[green]Cloud Success[/green]", expand=False))
|
|
397
823
|
except requests.exceptions.Timeout:
|
|
824
|
+
if cloud_only:
|
|
825
|
+
console.print(f"[red]Cloud execution timed out ({CLOUD_REQUEST_TIMEOUT}s).[/red]")
|
|
826
|
+
raise click.UsageError("Cloud execution timed out")
|
|
398
827
|
console.print(f"[yellow]Cloud execution timed out ({CLOUD_REQUEST_TIMEOUT}s). Falling back to local.[/yellow]")
|
|
399
828
|
current_execution_is_local = True
|
|
400
829
|
except requests.exceptions.HTTPError as e:
|
|
830
|
+
status_code = e.response.status_code if e.response else 0
|
|
401
831
|
err_content = e.response.text[:200] if e.response else "No response content"
|
|
402
|
-
|
|
403
|
-
|
|
832
|
+
|
|
833
|
+
# Non-recoverable errors: do NOT fall back to local
|
|
834
|
+
if status_code == 402: # Insufficient credits
|
|
835
|
+
try:
|
|
836
|
+
error_data = e.response.json()
|
|
837
|
+
current_balance = error_data.get("currentBalance", "unknown")
|
|
838
|
+
estimated_cost = error_data.get("estimatedCost", "unknown")
|
|
839
|
+
console.print(f"[red]Insufficient credits. Current balance: {current_balance}, estimated cost: {estimated_cost}[/red]")
|
|
840
|
+
except Exception:
|
|
841
|
+
console.print(f"[red]Insufficient credits: {err_content}[/red]")
|
|
842
|
+
raise click.UsageError("Insufficient credits for cloud code generation")
|
|
843
|
+
elif status_code == 401: # Authentication error
|
|
844
|
+
console.print(f"[red]Authentication failed: {err_content}[/red]")
|
|
845
|
+
raise click.UsageError("Cloud authentication failed")
|
|
846
|
+
elif status_code == 403: # Authorization error (not approved)
|
|
847
|
+
console.print(f"[red]Access denied: {err_content}[/red]")
|
|
848
|
+
raise click.UsageError("Access denied - user not approved")
|
|
849
|
+
elif status_code == 400: # Validation error (e.g., empty prompt)
|
|
850
|
+
console.print(f"[red]Invalid request: {err_content}[/red]")
|
|
851
|
+
raise click.UsageError(f"Invalid request: {err_content}")
|
|
852
|
+
else:
|
|
853
|
+
# Recoverable errors (5xx, unexpected errors): fall back to local
|
|
854
|
+
if cloud_only:
|
|
855
|
+
console.print(f"[red]Cloud HTTP error ({status_code}): {err_content}[/red]")
|
|
856
|
+
raise click.UsageError(f"Cloud HTTP error ({status_code}): {err_content}")
|
|
857
|
+
console.print(f"[yellow]Cloud HTTP error ({status_code}): {err_content}. Falling back to local.[/yellow]")
|
|
858
|
+
current_execution_is_local = True
|
|
404
859
|
except requests.exceptions.RequestException as e:
|
|
860
|
+
if cloud_only:
|
|
861
|
+
console.print(f"[red]Cloud network error: {e}[/red]")
|
|
862
|
+
raise click.UsageError(f"Cloud network error: {e}")
|
|
405
863
|
console.print(f"[yellow]Cloud network error: {e}. Falling back to local.[/yellow]")
|
|
406
864
|
current_execution_is_local = True
|
|
407
865
|
except json.JSONDecodeError:
|
|
866
|
+
if cloud_only:
|
|
867
|
+
console.print("[red]Cloud returned invalid JSON.[/red]")
|
|
868
|
+
raise click.UsageError("Cloud returned invalid JSON")
|
|
408
869
|
console.print("[yellow]Cloud returned invalid JSON. Falling back to local.[/yellow]")
|
|
409
870
|
current_execution_is_local = True
|
|
410
871
|
|
|
411
872
|
if current_execution_is_local:
|
|
412
873
|
if verbose: console.print("Executing code generator locally...")
|
|
874
|
+
# Expand includes, substitute vars, then double; pass to local generator with preprocess_prompt=False
|
|
875
|
+
local_prompt = pdd_preprocess(prompt_content, recursive=True, double_curly_brackets=False, exclude_keys=[])
|
|
876
|
+
local_prompt = _expand_vars(local_prompt, env_vars)
|
|
877
|
+
local_prompt = pdd_preprocess(local_prompt, recursive=False, double_curly_brackets=True, exclude_keys=[])
|
|
878
|
+
# Language already resolved (front matter overrides detection if present)
|
|
879
|
+
gen_language = language
|
|
880
|
+
|
|
881
|
+
# Extract output schema from front matter if available
|
|
882
|
+
output_schema = fm_meta.get("output_schema") if fm_meta else None
|
|
883
|
+
|
|
413
884
|
generated_code_content, total_cost, model_name = local_code_generator_func(
|
|
414
|
-
prompt=
|
|
415
|
-
language=
|
|
885
|
+
prompt=local_prompt,
|
|
886
|
+
language=gen_language,
|
|
416
887
|
strength=strength,
|
|
417
888
|
temperature=temperature,
|
|
418
889
|
time=time_budget,
|
|
419
890
|
verbose=verbose,
|
|
420
|
-
preprocess_prompt=
|
|
891
|
+
preprocess_prompt=False,
|
|
892
|
+
output_schema=output_schema,
|
|
421
893
|
)
|
|
422
894
|
was_incremental_operation = False
|
|
423
895
|
if verbose:
|
|
424
896
|
console.print(Panel(f"Full generation successful. Model: {model_name}, Cost: ${total_cost:.6f}", title="[green]Local Success[/green]", expand=False))
|
|
425
|
-
|
|
897
|
+
|
|
898
|
+
# Optional post-process Python hook (runs after LLM when enabled, or standalone when LLM is disabled)
|
|
899
|
+
if post_process_script:
|
|
900
|
+
try:
|
|
901
|
+
python_executable = detect_host_python_executable()
|
|
902
|
+
# Choose stdin for the script: LLM output if available and enabled, else prompt body
|
|
903
|
+
stdin_payload = generated_code_content if (llm_enabled and generated_code_content is not None) else prompt_body_for_script
|
|
904
|
+
env = os.environ.copy()
|
|
905
|
+
env['PDD_LANGUAGE'] = str(language or '')
|
|
906
|
+
env['PDD_OUTPUT_PATH'] = str(output_path or '')
|
|
907
|
+
env['PDD_PROMPT_FILE'] = str(pathlib.Path(prompt_file).resolve())
|
|
908
|
+
env['PDD_LLM'] = '1' if llm_enabled else '0'
|
|
909
|
+
try:
|
|
910
|
+
env['PDD_ENV_VARS'] = json.dumps(env_vars or {})
|
|
911
|
+
except Exception:
|
|
912
|
+
env['PDD_ENV_VARS'] = '{}'
|
|
913
|
+
# If front matter provides args, run in argv mode with a temp input file
|
|
914
|
+
fm_args = None
|
|
915
|
+
try:
|
|
916
|
+
# Env/CLI override for args (comma-separated or JSON list)
|
|
917
|
+
raw_args_env = None
|
|
918
|
+
if env_vars:
|
|
919
|
+
raw_args_env = env_vars.get('POST_PROCESS_ARGS') or env_vars.get('post_process_args')
|
|
920
|
+
if not raw_args_env:
|
|
921
|
+
raw_args_env = os.environ.get('POST_PROCESS_ARGS') or os.environ.get('post_process_args')
|
|
922
|
+
if raw_args_env:
|
|
923
|
+
s = str(raw_args_env).strip()
|
|
924
|
+
parsed_list = None
|
|
925
|
+
if s.startswith('[') and s.endswith(']'):
|
|
926
|
+
try:
|
|
927
|
+
parsed = json.loads(s)
|
|
928
|
+
if isinstance(parsed, list):
|
|
929
|
+
parsed_list = [str(a) for a in parsed]
|
|
930
|
+
except Exception:
|
|
931
|
+
parsed_list = None
|
|
932
|
+
if parsed_list is None:
|
|
933
|
+
if ',' in s:
|
|
934
|
+
parsed_list = [part.strip() for part in s.split(',') if part.strip()]
|
|
935
|
+
else:
|
|
936
|
+
parsed_list = [part for part in s.split() if part]
|
|
937
|
+
fm_args = parsed_list or None
|
|
938
|
+
if fm_args is None:
|
|
939
|
+
raw_args = fm_meta.get('post_process_args') if isinstance(fm_meta, dict) else None
|
|
940
|
+
if isinstance(raw_args, list):
|
|
941
|
+
fm_args = [str(a) for a in raw_args]
|
|
942
|
+
except Exception:
|
|
943
|
+
fm_args = None
|
|
944
|
+
proc = None
|
|
945
|
+
temp_input_path = None
|
|
946
|
+
try:
|
|
947
|
+
if fm_args is None:
|
|
948
|
+
# Provide sensible default args for architecture template with render_mermaid.py
|
|
949
|
+
try:
|
|
950
|
+
if post_process_script and pathlib.Path(post_process_script).name == 'render_mermaid.py':
|
|
951
|
+
if isinstance(prompt_file, str) and prompt_file.endswith('architecture/architecture_json.prompt'):
|
|
952
|
+
fm_args = ["{INPUT_FILE}", "{APP_NAME}", "{OUTPUT_HTML}"]
|
|
953
|
+
except Exception:
|
|
954
|
+
pass
|
|
955
|
+
if fm_args:
|
|
956
|
+
# When LLM is disabled, use the existing output file instead of creating a temp file
|
|
957
|
+
if not llm_enabled and output_path and pathlib.Path(output_path).exists():
|
|
958
|
+
temp_input_path = str(pathlib.Path(output_path).resolve())
|
|
959
|
+
env['PDD_POSTPROCESS_INPUT_FILE'] = temp_input_path
|
|
960
|
+
else:
|
|
961
|
+
# Write payload to a temp file for scripts expecting a file path input
|
|
962
|
+
suffix = '.json' if (isinstance(language, str) and str(language).lower().strip() == 'json') or (output_path and str(output_path).lower().endswith('.json')) else '.txt'
|
|
963
|
+
if output_path and llm_enabled:
|
|
964
|
+
temp_input_path = str(pathlib.Path(output_path).resolve())
|
|
965
|
+
pathlib.Path(temp_input_path).parent.mkdir(parents=True, exist_ok=True)
|
|
966
|
+
with open(temp_input_path, 'w', encoding='utf-8') as f:
|
|
967
|
+
f.write(stdin_payload or '')
|
|
968
|
+
else:
|
|
969
|
+
with tempfile.NamedTemporaryFile(mode='w', delete=False, suffix=suffix, encoding='utf-8') as tf:
|
|
970
|
+
tf.write(stdin_payload or '')
|
|
971
|
+
temp_input_path = tf.name
|
|
972
|
+
env['PDD_POSTPROCESS_INPUT_FILE'] = temp_input_path
|
|
973
|
+
# Compute placeholder values
|
|
974
|
+
app_name_val = (env_vars or {}).get('APP_NAME') if env_vars else None
|
|
975
|
+
if not app_name_val:
|
|
976
|
+
app_name_val = 'System Architecture'
|
|
977
|
+
output_html_default = None
|
|
978
|
+
if output_path and str(output_path).lower().endswith('.json'):
|
|
979
|
+
output_html_default = str(pathlib.Path(output_path).with_name(f"{pathlib.Path(output_path).stem}_diagram.html").resolve())
|
|
980
|
+
placeholder_map = {
|
|
981
|
+
'INPUT_FILE': temp_input_path or '',
|
|
982
|
+
'OUTPUT': str(output_path or ''),
|
|
983
|
+
'PROMPT_FILE': str(pathlib.Path(prompt_file).resolve()),
|
|
984
|
+
'APP_NAME': str(app_name_val),
|
|
985
|
+
'OUTPUT_HTML': str(output_html_default or ''),
|
|
986
|
+
}
|
|
987
|
+
def _subst_arg(arg: str) -> str:
|
|
988
|
+
# First expand $VARS using existing helper, then {TOKENS}
|
|
989
|
+
expanded = _expand_vars(arg, env_vars)
|
|
990
|
+
for key, val in placeholder_map.items():
|
|
991
|
+
expanded = expanded.replace('{' + key + '}', val)
|
|
992
|
+
return expanded
|
|
993
|
+
args_list = [_subst_arg(a) for a in fm_args]
|
|
994
|
+
if verbose:
|
|
995
|
+
console.print(Panel(f"Post-process hook (argv)\nScript: {post_process_script}\nArgs: {args_list}", title="[blue]Post-process[/blue]", expand=False))
|
|
996
|
+
proc = subprocess.run(
|
|
997
|
+
[python_executable, post_process_script] + args_list,
|
|
998
|
+
text=True,
|
|
999
|
+
capture_output=True,
|
|
1000
|
+
timeout=300,
|
|
1001
|
+
cwd=str(pathlib.Path(post_process_script).parent),
|
|
1002
|
+
env=env
|
|
1003
|
+
)
|
|
1004
|
+
else:
|
|
1005
|
+
# Run the script with stdin payload, capture stdout as final content
|
|
1006
|
+
if verbose:
|
|
1007
|
+
console.print(Panel(f"Post-process hook (stdin)\nScript: {post_process_script}", title="[blue]Post-process[/blue]", expand=False))
|
|
1008
|
+
proc = subprocess.run(
|
|
1009
|
+
[python_executable, post_process_script],
|
|
1010
|
+
input=stdin_payload or '',
|
|
1011
|
+
text=True,
|
|
1012
|
+
capture_output=True,
|
|
1013
|
+
timeout=300,
|
|
1014
|
+
cwd=str(pathlib.Path(post_process_script).parent),
|
|
1015
|
+
env=env
|
|
1016
|
+
)
|
|
1017
|
+
finally:
|
|
1018
|
+
if temp_input_path:
|
|
1019
|
+
try:
|
|
1020
|
+
# Only delete temp files, not the actual output file when llm=false
|
|
1021
|
+
if llm_enabled or not (output_path and pathlib.Path(output_path).exists() and temp_input_path == str(pathlib.Path(output_path).resolve())):
|
|
1022
|
+
os.unlink(temp_input_path)
|
|
1023
|
+
except Exception:
|
|
1024
|
+
pass
|
|
1025
|
+
if proc and proc.returncode == 0:
|
|
1026
|
+
if verbose:
|
|
1027
|
+
console.print(Panel(f"Post-process success (rc=0)\nstdout: {proc.stdout[:150]}\nstderr: {proc.stderr[:150]}", title="[green]Post-process[/green]", expand=False))
|
|
1028
|
+
# Do not modify generated_code_content to preserve architecture.json
|
|
1029
|
+
else:
|
|
1030
|
+
rc = getattr(proc, 'returncode', 'N/A')
|
|
1031
|
+
err = getattr(proc, 'stderr', '')
|
|
1032
|
+
console.print(f"[yellow]Post-process failed (rc={rc}). Stderr:\n{err[:500]}[/yellow]")
|
|
1033
|
+
except FileNotFoundError:
|
|
1034
|
+
console.print(f"[yellow]Post-process script not found: {post_process_script}. Skipping.[/yellow]")
|
|
1035
|
+
except subprocess.TimeoutExpired:
|
|
1036
|
+
console.print("[yellow]Post-process script timed out. Skipping.[/yellow]")
|
|
1037
|
+
except Exception as e:
|
|
1038
|
+
console.print(f"[yellow]Post-process script error: {e}. Skipping.[/yellow]")
|
|
426
1039
|
if generated_code_content is not None:
|
|
1040
|
+
# Optional output_schema JSON validation before writing (only when LLM ran)
|
|
1041
|
+
if llm_enabled:
|
|
1042
|
+
try:
|
|
1043
|
+
if fm_meta and isinstance(fm_meta.get("output_schema"), dict):
|
|
1044
|
+
is_json_output = False
|
|
1045
|
+
if isinstance(language, str) and str(language).lower().strip() == "json":
|
|
1046
|
+
is_json_output = True
|
|
1047
|
+
elif output_path and str(output_path).lower().endswith(".json"):
|
|
1048
|
+
is_json_output = True
|
|
1049
|
+
if is_json_output:
|
|
1050
|
+
# Check if the generated content is an error message from llm_invoke
|
|
1051
|
+
if generated_code_content.strip().startswith("ERROR:"):
|
|
1052
|
+
raise click.UsageError(f"LLM generation failed: {generated_code_content}")
|
|
1053
|
+
|
|
1054
|
+
parsed = json.loads(generated_code_content)
|
|
1055
|
+
|
|
1056
|
+
# Fix common LLM mistake: unwrap arrays wrapped in objects
|
|
1057
|
+
# LLMs often return {"items": [...]} or {"type": "array", "items": [...]}
|
|
1058
|
+
# when the schema expects a plain array [...]
|
|
1059
|
+
output_schema = fm_meta.get("output_schema", {})
|
|
1060
|
+
if output_schema.get("type") == "array" and isinstance(parsed, dict):
|
|
1061
|
+
# Check for common wrapper patterns
|
|
1062
|
+
if "items" in parsed and isinstance(parsed["items"], list):
|
|
1063
|
+
parsed = parsed["items"]
|
|
1064
|
+
generated_code_content = json.dumps(parsed, indent=2)
|
|
1065
|
+
elif "data" in parsed and isinstance(parsed["data"], list):
|
|
1066
|
+
parsed = parsed["data"]
|
|
1067
|
+
generated_code_content = json.dumps(parsed, indent=2)
|
|
1068
|
+
elif "results" in parsed and isinstance(parsed["results"], list):
|
|
1069
|
+
parsed = parsed["results"]
|
|
1070
|
+
generated_code_content = json.dumps(parsed, indent=2)
|
|
1071
|
+
|
|
1072
|
+
if _is_architecture_template(fm_meta):
|
|
1073
|
+
parsed, repaired = _repair_architecture_interface_types(parsed)
|
|
1074
|
+
if repaired:
|
|
1075
|
+
generated_code_content = json.dumps(parsed, indent=2)
|
|
1076
|
+
try:
|
|
1077
|
+
import jsonschema
|
|
1078
|
+
jsonschema.validate(instance=parsed, schema=fm_meta.get("output_schema"))
|
|
1079
|
+
except ModuleNotFoundError:
|
|
1080
|
+
if verbose and not quiet:
|
|
1081
|
+
console.print("[yellow]jsonschema not installed; skipping schema validation.[/yellow]")
|
|
1082
|
+
except Exception as ve:
|
|
1083
|
+
raise click.UsageError(f"Generated JSON does not match output_schema: {ve}")
|
|
1084
|
+
except json.JSONDecodeError as jde:
|
|
1085
|
+
raise click.UsageError(f"Generated output is not valid JSON: {jde}")
|
|
1086
|
+
|
|
427
1087
|
if output_path:
|
|
428
1088
|
p_output = pathlib.Path(output_path)
|
|
429
1089
|
p_output.parent.mkdir(parents=True, exist_ok=True)
|
|
430
|
-
|
|
1090
|
+
|
|
1091
|
+
# Inject architecture metadata tags for .prompt files (reverse sync)
|
|
1092
|
+
final_content = generated_code_content
|
|
1093
|
+
if p_output.suffix == '.prompt':
|
|
1094
|
+
try:
|
|
1095
|
+
# Check if this prompt has an architecture entry
|
|
1096
|
+
arch_entry = get_architecture_entry_for_prompt(p_output.name)
|
|
1097
|
+
|
|
1098
|
+
# Only inject tags if:
|
|
1099
|
+
# 1. Architecture entry exists
|
|
1100
|
+
# 2. Content doesn't already have PDD tags (preserve manual edits)
|
|
1101
|
+
if arch_entry and not has_pdd_tags(generated_code_content):
|
|
1102
|
+
tags = generate_tags_from_architecture(arch_entry)
|
|
1103
|
+
if tags:
|
|
1104
|
+
# Prepend tags to the generated content
|
|
1105
|
+
final_content = tags + '\n\n' + generated_code_content
|
|
1106
|
+
if verbose:
|
|
1107
|
+
console.print("[info]Injected architecture metadata tags from architecture.json[/info]")
|
|
1108
|
+
except Exception as e:
|
|
1109
|
+
# Don't fail generation if tag injection fails
|
|
1110
|
+
if verbose:
|
|
1111
|
+
console.print(f"[yellow]Warning: Could not inject architecture tags: {e}[/yellow]")
|
|
1112
|
+
|
|
1113
|
+
p_output.write_text(final_content, encoding="utf-8")
|
|
431
1114
|
if verbose or not quiet:
|
|
432
1115
|
console.print(f"Generated code saved to: [green]{p_output.resolve()}[/green]")
|
|
433
|
-
|
|
434
|
-
|
|
1116
|
+
# Safety net: ensure architecture HTML is generated post-write if applicable
|
|
1117
|
+
try:
|
|
1118
|
+
# Prefer resolved script if available; else default for architecture outputs
|
|
1119
|
+
script_path2 = post_process_script
|
|
1120
|
+
if not script_path2:
|
|
1121
|
+
looks_like_arch_output2 = pathlib.Path(str(p_output)).name == 'architecture.json'
|
|
1122
|
+
if looks_like_arch_output2:
|
|
1123
|
+
pkg_dir2 = pathlib.Path(__file__).parent
|
|
1124
|
+
repo_pdd_dir2 = pathlib.Path.cwd() / 'pdd'
|
|
1125
|
+
if (pkg_dir2 / 'render_mermaid.py').exists():
|
|
1126
|
+
script_path2 = str((pkg_dir2 / 'render_mermaid.py').resolve())
|
|
1127
|
+
elif (repo_pdd_dir2 / 'render_mermaid.py').exists():
|
|
1128
|
+
script_path2 = str((repo_pdd_dir2 / 'render_mermaid.py').resolve())
|
|
1129
|
+
if script_path2 and pathlib.Path(script_path2).exists():
|
|
1130
|
+
app_name2 = os.environ.get('APP_NAME') or (env_vars or {}).get('APP_NAME') or 'System Architecture'
|
|
1131
|
+
out_html2 = os.environ.get('POST_PROCESS_OUTPUT') or str(p_output.with_name(f"{p_output.stem}_diagram.html").resolve())
|
|
1132
|
+
html_missing = not pathlib.Path(out_html2).exists()
|
|
1133
|
+
always_run_for_arch = pathlib.Path(str(p_output)).name == 'architecture.json'
|
|
1134
|
+
if always_run_for_arch or html_missing:
|
|
1135
|
+
try:
|
|
1136
|
+
py_exec2 = detect_host_python_executable()
|
|
1137
|
+
except Exception:
|
|
1138
|
+
py_exec2 = sys.executable
|
|
1139
|
+
if verbose:
|
|
1140
|
+
console.print(Panel(f"Safety net post-process\nScript: {script_path2}\nArgs: {[str(p_output.resolve()), app_name2, out_html2]}", title="[blue]Post-process[/blue]", expand=False))
|
|
1141
|
+
sp2 = subprocess.run([py_exec2, script_path2, str(p_output.resolve()), app_name2, out_html2],
|
|
1142
|
+
capture_output=True, text=True, cwd=str(pathlib.Path(script_path2).parent))
|
|
1143
|
+
if sp2.returncode == 0 and not quiet:
|
|
1144
|
+
print(f"✅ Generated: {out_html2}")
|
|
1145
|
+
elif verbose:
|
|
1146
|
+
console.print(f"[yellow]Safety net failed (rc={sp2.returncode}). stderr:\n{sp2.stderr[:300]}[/yellow]")
|
|
1147
|
+
except Exception:
|
|
1148
|
+
pass
|
|
1149
|
+
# Post-step now runs regardless of LLM value via the general post-process hook above.
|
|
1150
|
+
elif not quiet:
|
|
1151
|
+
# No destination resolved; surface the generated code directly to the console.
|
|
1152
|
+
console.print(Panel(Text(generated_code_content, overflow="fold"), title="[cyan]Generated Code[/cyan]", expand=False))
|
|
1153
|
+
console.print("[yellow]No output path resolved; skipping file write and stdout print.[/yellow]")
|
|
435
1154
|
else:
|
|
436
|
-
|
|
437
|
-
|
|
1155
|
+
# If LLM was disabled and post-process ran, that's a success (no error)
|
|
1156
|
+
if not llm_enabled and post_process_script:
|
|
1157
|
+
if verbose or not quiet:
|
|
1158
|
+
console.print("[green]Post-process completed successfully (LLM was disabled).[/green]")
|
|
1159
|
+
else:
|
|
1160
|
+
console.print("[red]Error: Code generation failed. No code was produced.[/red]")
|
|
1161
|
+
return "", was_incremental_operation, total_cost, model_name or "error"
|
|
438
1162
|
|
|
1163
|
+
except click.Abort:
|
|
1164
|
+
# User cancelled - re-raise to stop the sync loop
|
|
1165
|
+
raise
|
|
439
1166
|
except Exception as e:
|
|
440
|
-
|
|
441
|
-
|
|
442
|
-
|
|
443
|
-
|
|
1167
|
+
if isinstance(e, click.UsageError):
|
|
1168
|
+
raise
|
|
1169
|
+
|
|
1170
|
+
# For any other unexpected error, we should fail hard so the CLI exits non-zero
|
|
1171
|
+
# Log the detailed traceback first if verbose
|
|
1172
|
+
if verbose:
|
|
1173
|
+
import traceback
|
|
1174
|
+
console.print(traceback.format_exc())
|
|
1175
|
+
|
|
1176
|
+
raise click.UsageError(f"An unexpected error occurred: {e}")
|
|
444
1177
|
|
|
445
|
-
return generated_code_content or "", was_incremental_operation, total_cost, model_name
|
|
1178
|
+
return generated_code_content or "", was_incremental_operation, total_cost, model_name
|