pdd-cli 0.0.55__py3-none-any.whl → 0.0.57__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of pdd-cli might be problematic. Click here for more details.

@@ -79,6 +79,27 @@ def _expand_vars(text: str, vars_map: Optional[Dict[str, str]]) -> str:
79
79
  return text
80
80
 
81
81
 
82
+ def _parse_front_matter(text: str) -> Tuple[Optional[Dict[str, Any]], str]:
83
+ """Parse YAML front matter at the start of a prompt and return (meta, body)."""
84
+ try:
85
+ if not text.startswith("---\n"):
86
+ return None, text
87
+ end_idx = text.find("\n---", 4)
88
+ if end_idx == -1:
89
+ return None, text
90
+ fm_body = text[4:end_idx]
91
+ rest = text[end_idx + len("\n---"):]
92
+ if rest.startswith("\n"):
93
+ rest = rest[1:]
94
+ import yaml as _yaml
95
+ meta = _yaml.safe_load(fm_body) or {}
96
+ if not isinstance(meta, dict):
97
+ meta = {}
98
+ return meta, rest
99
+ except Exception:
100
+ return None, text
101
+
102
+
82
103
  def get_git_content_at_ref(file_path: str, git_ref: str = "HEAD") -> Optional[str]:
83
104
  """Gets the content of the file as it was at the specified git_ref."""
84
105
  abs_file_path = pathlib.Path(file_path).resolve()
@@ -184,8 +205,13 @@ def code_generator_main(
184
205
  quiet=quiet,
185
206
  command="generate",
186
207
  command_options=command_options,
208
+ context_override=ctx.obj.get('context')
187
209
  )
188
210
  prompt_content = input_strings["prompt_file"]
211
+ # Phase-2 templates: parse front matter metadata
212
+ fm_meta, body = _parse_front_matter(prompt_content)
213
+ if fm_meta:
214
+ prompt_content = body
189
215
  # Determine final output path: if user passed a directory, use resolved file path
190
216
  resolved_output = output_file_paths.get("output")
191
217
  if output is None:
@@ -211,10 +237,108 @@ def code_generator_main(
211
237
  existing_code_content: Optional[str] = None
212
238
  original_prompt_content_for_incremental: Optional[str] = None
213
239
 
240
+ # Merge -e vars with front-matter defaults; validate required
241
+ if env_vars is None:
242
+ env_vars = {}
243
+ if fm_meta and isinstance(fm_meta.get("variables"), dict):
244
+ for k, spec in (fm_meta["variables"].items()):
245
+ if isinstance(spec, dict):
246
+ if k not in env_vars and "default" in spec:
247
+ env_vars[k] = str(spec["default"])
248
+ # if scalar default allowed, ignore for now
249
+ missing = [k for k, spec in fm_meta["variables"].items() if isinstance(spec, dict) and spec.get("required") and k not in env_vars]
250
+ if missing:
251
+ console.print(f"[error]Missing required variables: {', '.join(missing)}")
252
+ return "", False, 0.0, "error"
253
+
254
+ # Execute optional discovery from front matter to populate env_vars without overriding explicit -e values
255
+ def _run_discovery(discover_cfg: Dict[str, Any]) -> Dict[str, str]:
256
+ results: Dict[str, str] = {}
257
+ try:
258
+ if not discover_cfg:
259
+ return results
260
+ enabled = discover_cfg.get("enabled", False)
261
+ if not enabled:
262
+ return results
263
+ root = discover_cfg.get("root", ".")
264
+ patterns = discover_cfg.get("patterns", []) or []
265
+ exclude = discover_cfg.get("exclude", []) or []
266
+ max_per = int(discover_cfg.get("max_per_pattern", 0) or 0)
267
+ max_total = int(discover_cfg.get("max_total", 0) or 0)
268
+ root_path = pathlib.Path(root).resolve()
269
+ seen: List[str] = []
270
+ def _match_one(patterns_list: List[str]) -> List[str]:
271
+ matches: List[str] = []
272
+ for pat in patterns_list:
273
+ globbed = list(root_path.rglob(pat))
274
+ for p in globbed:
275
+ if any(p.match(ex) for ex in exclude):
276
+ continue
277
+ sp = str(p.resolve())
278
+ if sp not in matches:
279
+ matches.append(sp)
280
+ if max_per and len(matches) >= max_per:
281
+ matches = matches[:max_per]
282
+ break
283
+ return matches
284
+ # If a mapping 'set' is provided, compute per-variable results
285
+ set_map = discover_cfg.get("set") or {}
286
+ if isinstance(set_map, dict) and set_map:
287
+ for var_name, spec in set_map.items():
288
+ if var_name in env_vars:
289
+ continue # don't override explicit -e
290
+ v_patterns = spec.get("patterns", []) if isinstance(spec, dict) else []
291
+ v_exclude = spec.get("exclude", []) if isinstance(spec, dict) else []
292
+ save_exclude = exclude
293
+ try:
294
+ if v_exclude:
295
+ exclude = v_exclude
296
+ matches = _match_one(v_patterns or patterns)
297
+ finally:
298
+ exclude = save_exclude
299
+ if matches:
300
+ results[var_name] = ",".join(matches)
301
+ seen.extend(matches)
302
+ # Fallback: populate SCAN_FILES and SCAN metadata
303
+ if not results:
304
+ files = _match_one(patterns)
305
+ if max_total and len(files) > max_total:
306
+ files = files[:max_total]
307
+ if files:
308
+ results["SCAN_FILES"] = ",".join(files)
309
+ # Always set root/patterns helpers
310
+ if root:
311
+ results.setdefault("SCAN_ROOT", str(root_path))
312
+ if patterns:
313
+ results.setdefault("SCAN_PATTERNS", ",".join(patterns))
314
+ except Exception as e:
315
+ if verbose and not quiet:
316
+ console.print(f"[yellow]Discovery skipped due to error: {e}[/yellow]")
317
+ return results
318
+
319
+ if fm_meta and isinstance(fm_meta.get("discover"), dict):
320
+ discovered = _run_discovery(fm_meta.get("discover") or {})
321
+ for k, v in discovered.items():
322
+ if k not in env_vars:
323
+ env_vars[k] = v
324
+
214
325
  # Expand variables in output path if provided
215
326
  if output_path:
216
327
  output_path = _expand_vars(output_path, env_vars)
217
328
 
329
+ # Honor front-matter output when CLI did not pass --output
330
+ if output is None and fm_meta and isinstance(fm_meta.get("output"), str):
331
+ try:
332
+ meta_out = _expand_vars(fm_meta["output"], env_vars)
333
+ if meta_out:
334
+ output_path = str(pathlib.Path(meta_out).resolve())
335
+ except Exception:
336
+ pass
337
+
338
+ # Honor front-matter language if provided (overrides detection for both local and cloud)
339
+ if fm_meta and isinstance(fm_meta.get("language"), str) and fm_meta.get("language"):
340
+ language = fm_meta.get("language")
341
+
218
342
  if output_path and pathlib.Path(output_path).exists():
219
343
  try:
220
344
  existing_code_content = pathlib.Path(output_path).read_text(encoding="utf-8")
@@ -461,9 +585,11 @@ def code_generator_main(
461
585
  local_prompt = pdd_preprocess(prompt_content, recursive=True, double_curly_brackets=False, exclude_keys=[])
462
586
  local_prompt = _expand_vars(local_prompt, env_vars)
463
587
  local_prompt = pdd_preprocess(local_prompt, recursive=False, double_curly_brackets=True, exclude_keys=[])
588
+ # Language already resolved (front matter overrides detection if present)
589
+ gen_language = language
464
590
  generated_code_content, total_cost, model_name = local_code_generator_func(
465
591
  prompt=local_prompt,
466
- language=language,
592
+ language=gen_language,
467
593
  strength=strength,
468
594
  temperature=temperature,
469
595
  time=time_budget,
@@ -475,14 +601,37 @@ def code_generator_main(
475
601
  console.print(Panel(f"Full generation successful. Model: {model_name}, Cost: ${total_cost:.6f}", title="[green]Local Success[/green]", expand=False))
476
602
 
477
603
  if generated_code_content is not None:
604
+ # Optional output_schema JSON validation before writing
605
+ try:
606
+ if fm_meta and isinstance(fm_meta.get("output_schema"), dict):
607
+ is_json_output = False
608
+ if isinstance(language, str) and str(language).lower().strip() == "json":
609
+ is_json_output = True
610
+ elif output_path and str(output_path).lower().endswith(".json"):
611
+ is_json_output = True
612
+ if is_json_output:
613
+ parsed = json.loads(generated_code_content)
614
+ try:
615
+ import jsonschema # type: ignore
616
+ jsonschema.validate(instance=parsed, schema=fm_meta.get("output_schema"))
617
+ except ModuleNotFoundError:
618
+ if verbose and not quiet:
619
+ console.print("[yellow]jsonschema not installed; skipping schema validation.[/yellow]")
620
+ except Exception as ve:
621
+ raise click.UsageError(f"Generated JSON does not match output_schema: {ve}")
622
+ except json.JSONDecodeError as jde:
623
+ raise click.UsageError(f"Generated output is not valid JSON: {jde}")
624
+
478
625
  if output_path:
479
626
  p_output = pathlib.Path(output_path)
480
627
  p_output.parent.mkdir(parents=True, exist_ok=True)
481
628
  p_output.write_text(generated_code_content, encoding="utf-8")
482
629
  if verbose or not quiet:
483
630
  console.print(f"Generated code saved to: [green]{p_output.resolve()}[/green]")
484
- elif not quiet: # No output path, print to console if not quiet
485
- console.print(Panel(Text(generated_code_content, overflow="fold"), title="[cyan]Generated Code[/cyan]", expand=True))
631
+ elif not quiet:
632
+ # No destination resolved; surface the generated code directly to the console.
633
+ console.print(Panel(Text(generated_code_content, overflow="fold"), title="[cyan]Generated Code[/cyan]", expand=False))
634
+ console.print("[yellow]No output path resolved; skipping file write and stdout print.[/yellow]")
486
635
  else:
487
636
  console.print("[red]Error: Code generation failed. No code was produced.[/red]")
488
637
  return "", was_incremental_operation, total_cost, model_name or "error"
pdd/conflicts_main.py CHANGED
@@ -33,7 +33,8 @@ def conflicts_main(ctx: click.Context, prompt1: str, prompt2: str, output: Optio
33
33
  force=ctx.obj.get('force', False),
34
34
  quiet=ctx.obj.get('quiet', False),
35
35
  command="conflicts",
36
- command_options=command_options
36
+ command_options=command_options,
37
+ context_override=ctx.obj.get('context')
37
38
  )
38
39
 
39
40
  # Load input files
@@ -91,4 +92,4 @@ def conflicts_main(ctx: click.Context, prompt1: str, prompt2: str, output: Optio
91
92
  except Exception as e:
92
93
  if not ctx.obj.get('quiet', False):
93
94
  rprint(f"[bold red]Error:[/bold red] {str(e)}")
94
- sys.exit(1)
95
+ sys.exit(1)
pdd/construct_paths.py CHANGED
@@ -56,6 +56,23 @@ def _load_pddrc_config(pddrc_path: Path) -> Dict[str, Any]:
56
56
  except Exception as e:
57
57
  raise ValueError(f"Error loading .pddrc: {e}")
58
58
 
59
+ def list_available_contexts(start_path: Optional[Path] = None) -> list[str]:
60
+ """Return sorted context names from the nearest .pddrc.
61
+
62
+ - Searches upward from `start_path` (or CWD) for a `.pddrc` file.
63
+ - If found, loads and validates it, then returns sorted context names.
64
+ - If no `.pddrc` exists, returns ["default"].
65
+ - Propagates ValueError for malformed `.pddrc` to allow callers to render
66
+ helpful errors.
67
+ """
68
+ pddrc = _find_pddrc_file(start_path)
69
+ if not pddrc:
70
+ return ["default"]
71
+ config = _load_pddrc_config(pddrc)
72
+ contexts = config.get("contexts", {})
73
+ names = sorted(contexts.keys()) if isinstance(contexts, dict) else []
74
+ return names or ["default"]
75
+
59
76
  def _detect_context(current_dir: Path, config: Dict[str, Any], context_override: Optional[str] = None) -> Optional[str]:
60
77
  """Detect the appropriate context based on current directory path."""
61
78
  if context_override:
@@ -193,7 +210,9 @@ def _is_known_language(language_name: str) -> bool:
193
210
  builtin_languages = {
194
211
  'python', 'javascript', 'typescript', 'java', 'cpp', 'c', 'go', 'ruby', 'rust',
195
212
  'kotlin', 'swift', 'csharp', 'php', 'scala', 'r', 'lua', 'perl', 'bash', 'shell',
196
- 'powershell', 'sql', 'prompt', 'html', 'css', 'makefile'
213
+ 'powershell', 'sql', 'prompt', 'html', 'css', 'makefile',
214
+ # Common data and config formats for architecture prompts and configs
215
+ 'json', 'jsonl', 'yaml', 'yml', 'toml', 'ini'
197
216
  }
198
217
 
199
218
  pdd_path_str = os.getenv('PDD_PATH')
@@ -619,7 +638,9 @@ def construct_paths(
619
638
  'kotlin': '.kt', 'swift': '.swift', 'csharp': '.cs', 'php': '.php',
620
639
  'scala': '.scala', 'r': '.r', 'lua': '.lua', 'perl': '.pl', 'bash': '.sh',
621
640
  'shell': '.sh', 'powershell': '.ps1', 'sql': '.sql', 'html': '.html', 'css': '.css',
622
- 'prompt': '.prompt', 'makefile': ''
641
+ 'prompt': '.prompt', 'makefile': '',
642
+ # Common data/config formats
643
+ 'json': '.json', 'jsonl': '.jsonl', 'yaml': '.yaml', 'yml': '.yml', 'toml': '.toml', 'ini': '.ini'
623
644
  }
624
645
  file_extension = builtin_ext_map.get(language.lower(), f".{language.lower()}" if language else '')
625
646
 
@@ -31,7 +31,8 @@ def context_generator_main(ctx: click.Context, prompt_file: str, code_file: str,
31
31
  force=ctx.obj.get('force', False),
32
32
  quiet=ctx.obj.get('quiet', False),
33
33
  command="example",
34
- command_options=command_options
34
+ command_options=command_options,
35
+ context_override=ctx.obj.get('context')
35
36
  )
36
37
 
37
38
  # Load input files
pdd/crash_main.py CHANGED
@@ -77,7 +77,8 @@ def crash_main(
77
77
  force=force,
78
78
  quiet=quiet,
79
79
  command="crash",
80
- command_options=command_options
80
+ command_options=command_options,
81
+ context_override=ctx.obj.get('context')
81
82
  )
82
83
 
83
84
  prompt_content = input_strings["prompt_file"]
@@ -177,4 +178,4 @@ def crash_main(
177
178
  except Exception as e:
178
179
  if not quiet:
179
180
  rprint(f"[bold red]An unexpected error occurred:[/bold red] {str(e)}")
180
- sys.exit(1)
181
+ sys.exit(1)
@@ -30,6 +30,7 @@ Groovy,//,.groovy
30
30
  Dart,//,.dart
31
31
  F#,//,.fs
32
32
  YAML,#,.yml
33
+ YAML,#,.yaml
33
34
  JSON,del,.json
34
35
  JSONL,del,.jsonl
35
36
  XML,"<!-- -->",.xml
@@ -60,4 +61,6 @@ LLM,del,.prompt
60
61
  prompt,del,.prompt
61
62
  TOML,#,.toml
62
63
  Log,del,.log
63
- reStructuredText,del,.rst
64
+ reStructuredText,del,.rst
65
+ Text,del,.txt
66
+ INI,;,.ini
pdd/detect_change_main.py CHANGED
@@ -44,7 +44,8 @@ def detect_change_main(
44
44
  force=ctx.obj.get('force', False),
45
45
  quiet=ctx.obj.get('quiet', False),
46
46
  command="detect",
47
- command_options=command_options
47
+ command_options=command_options,
48
+ context_override=ctx.obj.get('context')
48
49
  )
49
50
 
50
51
  # Get change description content
@@ -100,4 +101,4 @@ def detect_change_main(
100
101
  except Exception as e:
101
102
  if not ctx.obj.get('quiet', False):
102
103
  rprint(f"[bold red]Error:[/bold red] {str(e)}")
103
- sys.exit(1)
104
+ sys.exit(1)
pdd/fix_main.py CHANGED
@@ -97,7 +97,8 @@ def fix_main(
97
97
  quiet=ctx.obj.get('quiet', False),
98
98
  command="fix",
99
99
  command_options=command_options,
100
- create_error_file=loop # Only create error file if in loop mode
100
+ create_error_file=loop, # Only create error file if in loop mode
101
+ context_override=ctx.obj.get('context')
101
102
  )
102
103
 
103
104
  # Get parameters from context
@@ -296,4 +297,4 @@ def fix_main(
296
297
  # Print other errors normally, escaping the error string
297
298
  from rich.markup import escape # Ensure escape is imported
298
299
  rprint(f"[bold red]Error:[/bold red] {escape(str(e))}")
299
- sys.exit(1)
300
+ sys.exit(1)
@@ -204,6 +204,7 @@ def fix_verification_main(
204
204
  quiet=quiet,
205
205
  command="verify",
206
206
  command_options=command_options,
207
+ context_override=ctx.obj.get('context')
207
208
  )
208
209
  output_code_path = output_file_paths.get("output_code")
209
210
  output_results_path = output_file_paths.get("output_results")
pdd/preprocess.py CHANGED
@@ -48,7 +48,9 @@ def process_backtick_includes(text: str, recursive: bool) -> str:
48
48
  return f"```{content}```"
49
49
  except FileNotFoundError:
50
50
  console.print(f"[bold red]Warning:[/bold red] File not found: {file_path}")
51
- return match.group(0)
51
+ # First pass (recursive=True): leave the tag so a later env expansion can resolve it
52
+ # Second pass (recursive=False): replace with a visible placeholder
53
+ return match.group(0) if recursive else f"```[File not found: {file_path}]```"
52
54
  except Exception as e:
53
55
  console.print(f"[bold red]Error processing include:[/bold red] {str(e)}")
54
56
  return f"```[Error processing include: {file_path}]```"
@@ -62,9 +64,9 @@ def process_backtick_includes(text: str, recursive: bool) -> str:
62
64
  def process_xml_tags(text: str, recursive: bool) -> str:
63
65
  text = process_pdd_tags(text)
64
66
  text = process_include_tags(text, recursive)
65
-
66
- text = process_shell_tags(text)
67
- text = process_web_tags(text)
67
+ text = process_include_many_tags(text, recursive)
68
+ text = process_shell_tags(text, recursive)
69
+ text = process_web_tags(text, recursive)
68
70
  return text
69
71
 
70
72
  def process_include_tags(text: str, recursive: bool) -> str:
@@ -81,7 +83,9 @@ def process_include_tags(text: str, recursive: bool) -> str:
81
83
  return content
82
84
  except FileNotFoundError:
83
85
  console.print(f"[bold red]Warning:[/bold red] File not found: {file_path}")
84
- return f"[File not found: {file_path}]"
86
+ # First pass (recursive=True): leave the tag so a later env expansion can resolve it
87
+ # Second pass (recursive=False): replace with a visible placeholder
88
+ return match.group(0) if recursive else f"[File not found: {file_path}]"
85
89
  except Exception as e:
86
90
  console.print(f"[bold red]Error processing include:[/bold red] {str(e)}")
87
91
  return f"[Error processing include: {file_path}]"
@@ -101,10 +105,13 @@ def process_pdd_tags(text: str) -> str:
101
105
  return "This is a test "
102
106
  return processed
103
107
 
104
- def process_shell_tags(text: str) -> str:
108
+ def process_shell_tags(text: str, recursive: bool) -> str:
105
109
  pattern = r'<shell>(.*?)</shell>'
106
110
  def replace_shell(match):
107
111
  command = match.group(1).strip()
112
+ if recursive:
113
+ # Defer execution until after env var expansion
114
+ return match.group(0)
108
115
  console.print(f"Executing shell command: [cyan]{escape(command)}[/cyan]")
109
116
  try:
110
117
  result = subprocess.run(command, shell=True, check=True, capture_output=True, text=True)
@@ -118,10 +125,13 @@ def process_shell_tags(text: str) -> str:
118
125
  return f"[Shell execution error: {str(e)}]"
119
126
  return re.sub(pattern, replace_shell, text, flags=re.DOTALL)
120
127
 
121
- def process_web_tags(text: str) -> str:
128
+ def process_web_tags(text: str, recursive: bool) -> str:
122
129
  pattern = r'<web>(.*?)</web>'
123
130
  def replace_web(match):
124
131
  url = match.group(1).strip()
132
+ if recursive:
133
+ # Defer network operations until after env var expansion
134
+ return match.group(0)
125
135
  console.print(f"Scraping web content from: [cyan]{url}[/cyan]")
126
136
  try:
127
137
  try:
@@ -144,6 +154,34 @@ def process_web_tags(text: str) -> str:
144
154
  return f"[Web scraping error: {str(e)}]"
145
155
  return re.sub(pattern, replace_web, text, flags=re.DOTALL)
146
156
 
157
+ def process_include_many_tags(text: str, recursive: bool) -> str:
158
+ """Process <include-many> blocks whose inner content is a comma- or newline-separated
159
+ list of file paths (typically provided via variables after env expansion)."""
160
+ pattern = r'<include-many>(.*?)</include-many>'
161
+ def replace_many(match):
162
+ inner = match.group(1)
163
+ if recursive:
164
+ # Wait for env expansion to materialize the list
165
+ return match.group(0)
166
+ # Split by newlines or commas
167
+ raw_items = [s.strip() for part in inner.split('\n') for s in part.split(',')]
168
+ paths = [p for p in raw_items if p]
169
+ contents: list[str] = []
170
+ for p in paths:
171
+ try:
172
+ full_path = get_file_path(p)
173
+ console.print(f"Including (many): [cyan]{full_path}[/cyan]")
174
+ with open(full_path, 'r', encoding='utf-8') as fh:
175
+ contents.append(fh.read())
176
+ except FileNotFoundError:
177
+ console.print(f"[bold red]Warning:[/bold red] File not found: {p}")
178
+ contents.append(f"[File not found: {p}]")
179
+ except Exception as e:
180
+ console.print(f"[bold red]Error processing include-many:[/bold red] {str(e)}")
181
+ contents.append(f"[Error processing include: {p}]")
182
+ return "\n".join(contents)
183
+ return re.sub(pattern, replace_many, text, flags=re.DOTALL)
184
+
147
185
  def double_curly(text: str, exclude_keys: Optional[List[str]] = None) -> str:
148
186
  if exclude_keys is None:
149
187
  exclude_keys = []
pdd/preprocess_main.py CHANGED
@@ -33,6 +33,7 @@ def preprocess_main(
33
33
  quiet=ctx.obj.get("quiet", False),
34
34
  command="preprocess",
35
35
  command_options=command_options,
36
+ context_override=ctx.obj.get('context')
36
37
  )
37
38
 
38
39
  # Load prompt file
@@ -76,4 +77,4 @@ def preprocess_main(
76
77
  except Exception as e:
77
78
  if not ctx.obj.get("quiet", False):
78
79
  rprint(f"[bold red]Error during preprocessing:[/bold red] {e}")
79
- sys.exit(1)
80
+ sys.exit(1)
@@ -1,11 +1,17 @@
1
- % You are an expert Software Engineer. Your goal is to extract the closest matching substring described by the prompt's output to be outputed in JSON format.
2
-
3
- % Here is the llm_output to parse: <llm_output>{llm_output}</llm_output>
4
-
5
- % When extracting the closest matching substring from llm_output, consider and correct the following for the extracted code:
6
- - Should be a substring of prompt_file.
7
- - Should be a a substring that closely matches code_str in content.
8
-
9
- % Output a JSON object with the following keys:
10
- - 'explanation': String explanation of why this prompt_line matches the code_str and explain any errors detected in the code.
11
- - 'prompt_line': String containing the closest matching verbatim substring of the prompt_file that matches code_str in content. This is not the line number.
1
+ % You are an extremely literal parser. The LLM output below follows this structure:
2
+ % <analysis> ... </analysis>
3
+ % <verbatim_prompt_line>
4
+ % <<EXACT SUBSTRING FROM PROMPT_FILE>>
5
+ % </verbatim_prompt_line>
6
+ %
7
+ % Task
8
+ % • Extract the text between <verbatim_prompt_line> and </verbatim_prompt_line> (if present).
9
+ % Output ONLY JSON with the keys:
10
+ % - "prompt_line": the exact substring between the tags. Do not alter whitespace or characters except for JSON escaping.
11
+ % - "explanation": short confirmation (<=120 characters) that the substring was copied verbatim, or describe why extraction failed.
12
+ % • If the tags are missing or empty, set "prompt_line" to "" and explain the issue.
13
+ % • Do not wrap the JSON in Markdown. No commentary, no additional keys.
14
+ %
15
+ <llm_output>
16
+ {llm_output}
17
+ </llm_output>
@@ -1,30 +1,33 @@
1
- % Imagine you're a an expert Python Software Engineer. Your goal is to find the part of the .prompt file. It will take in three arguments, the text of the .prompt file, the text of the code file, and the line that the debugger is on in the code file. Your task is to find the equivalent line in the .prompt file that matches with the line in the code file.
2
-
3
- % Here are the inputs and outputs of the prompt:
4
- Input:
5
- `code_file` (str) - A string that contains the text of the code file.
6
- `code_str` (str) - A substring of code_file that represents the line that the debugger is on in the code_file.
7
- `prompt_file` (str) - A string that contains the text of the .prompt file.
8
- Output:
9
- `prompt_line` (str) - An string that represents the equivalent line in the .prompt file that matches with the code_str line in the code file.
10
-
11
- % Here is the code_file to reference:
12
-
1
+ % You are a highly accurate Python Software Engineer. Your job is to locate the exact line (or smallest excerpt) in the prompt file that produced the current line in the generated code.
2
+
3
+ % Inputs
4
+ code_file (str) : full contents of the generated code file
5
+ code_str (str) : the single line from the code file currently under inspection
6
+ prompt_file (str) : full contents of the originating prompt file
7
+
8
+ % Rules
9
+ 1. Identify the minimal substring in prompt_file whose wording most directly corresponds to code_str. Copy it VERBATIM.
10
+ 2. Do not paraphrase, summarize, or reformat; the substring must appear exactly in prompt_file.
11
+ 3. If multiple lines apply, choose the most specific line or snippet (prefer the shortest exact match).
12
+ 4. Provide a short explanation of why the substring matches code_str.
13
+
14
+ % Output format (MUST follow exactly; no additional text)
15
+ <analysis>
16
+ Explain your reasoning here in plain text (no JSON). Reference the file sections you compared.
17
+ </analysis>
18
+ <verbatim_prompt_line>
19
+ <<PASTE THE EXACT SUBSTRING FROM prompt_file HERE>>
20
+ </verbatim_prompt_line>
21
+
22
+ % Reference materials
13
23
  <code_file>
14
- {CODE_FILE}
24
+ {CODE_FILE}
15
25
  </code_file>
16
26
 
17
- % Here is the code_str to reference:
18
-
19
27
  <code_str>
20
- {CODE_STR}
28
+ {CODE_STR}
21
29
  </code_str>
22
30
 
23
- % Here is the prompt_file to reference:
24
-
25
31
  <prompt_file>
26
- {PROMPT_FILE}
32
+ {PROMPT_FILE}
27
33
  </prompt_file>
28
-
29
- % To generate the prompt_line, find a substring of prompt_file that matches code_str, which is a substring of code_file.
30
-
pdd/split_main.py CHANGED
@@ -53,7 +53,8 @@ def split_main(
53
53
  force=ctx.obj.get('force', False),
54
54
  quiet=ctx.obj.get('quiet', False),
55
55
  command="split",
56
- command_options=command_options
56
+ command_options=command_options,
57
+ context_override=ctx.obj.get('context')
57
58
  )
58
59
 
59
60
  # Get parameters from context
@@ -113,4 +114,4 @@ def split_main(
113
114
  elif isinstance(e, ValueError):
114
115
  rprint("[yellow]Hint: Check if input files have valid content.[/yellow]")
115
116
 
116
- sys.exit(1)
117
+ sys.exit(1)