pdd-cli 0.0.23__py3-none-any.whl → 0.0.25__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of pdd-cli might be problematic. Click here for more details.

Files changed (49) hide show
  1. pdd/__init__.py +7 -1
  2. pdd/bug_main.py +21 -3
  3. pdd/bug_to_unit_test.py +16 -5
  4. pdd/change.py +2 -1
  5. pdd/change_main.py +407 -189
  6. pdd/cli.py +853 -301
  7. pdd/code_generator.py +2 -1
  8. pdd/conflicts_in_prompts.py +2 -1
  9. pdd/construct_paths.py +377 -222
  10. pdd/context_generator.py +2 -1
  11. pdd/continue_generation.py +3 -2
  12. pdd/crash_main.py +55 -20
  13. pdd/data/llm_model.csv +8 -8
  14. pdd/detect_change.py +2 -1
  15. pdd/fix_code_loop.py +465 -160
  16. pdd/fix_code_module_errors.py +7 -4
  17. pdd/fix_error_loop.py +9 -9
  18. pdd/fix_errors_from_unit_tests.py +207 -365
  19. pdd/fix_main.py +31 -4
  20. pdd/fix_verification_errors.py +285 -0
  21. pdd/fix_verification_errors_loop.py +975 -0
  22. pdd/fix_verification_main.py +412 -0
  23. pdd/generate_output_paths.py +427 -183
  24. pdd/generate_test.py +3 -2
  25. pdd/increase_tests.py +2 -2
  26. pdd/llm_invoke.py +18 -8
  27. pdd/pdd_completion.zsh +38 -1
  28. pdd/preprocess.py +3 -3
  29. pdd/process_csv_change.py +466 -154
  30. pdd/prompts/extract_prompt_split_LLM.prompt +7 -4
  31. pdd/prompts/extract_prompt_update_LLM.prompt +11 -5
  32. pdd/prompts/extract_unit_code_fix_LLM.prompt +2 -2
  33. pdd/prompts/find_verification_errors_LLM.prompt +25 -0
  34. pdd/prompts/fix_code_module_errors_LLM.prompt +29 -0
  35. pdd/prompts/fix_errors_from_unit_tests_LLM.prompt +5 -5
  36. pdd/prompts/fix_verification_errors_LLM.prompt +20 -0
  37. pdd/prompts/generate_test_LLM.prompt +9 -3
  38. pdd/prompts/split_LLM.prompt +3 -3
  39. pdd/prompts/update_prompt_LLM.prompt +3 -3
  40. pdd/split.py +13 -12
  41. pdd/split_main.py +22 -13
  42. pdd/trace_main.py +7 -0
  43. pdd/xml_tagger.py +2 -1
  44. {pdd_cli-0.0.23.dist-info → pdd_cli-0.0.25.dist-info}/METADATA +4 -4
  45. {pdd_cli-0.0.23.dist-info → pdd_cli-0.0.25.dist-info}/RECORD +49 -44
  46. {pdd_cli-0.0.23.dist-info → pdd_cli-0.0.25.dist-info}/WHEEL +1 -1
  47. {pdd_cli-0.0.23.dist-info → pdd_cli-0.0.25.dist-info}/entry_points.txt +0 -0
  48. {pdd_cli-0.0.23.dist-info → pdd_cli-0.0.25.dist-info}/licenses/LICENSE +0 -0
  49. {pdd_cli-0.0.23.dist-info → pdd_cli-0.0.25.dist-info}/top_level.txt +0 -0
pdd/llm_invoke.py CHANGED
@@ -14,7 +14,7 @@ Usage:
14
14
  # result is a dict with keys: 'result', 'cost', 'model_name'
15
15
 
16
16
  Environment:
17
- - PDD_MODEL_DEFAULT: if set, used as the base model name. Otherwise defaults to "gpt-4o-mini".
17
+ - PDD_MODEL_DEFAULT: if set, used as the base model name. Otherwise defaults to "gpt-4.1-nano".
18
18
  - PDD_PATH: if set, models are loaded from $PDD_PATH/data/llm_model.csv; otherwise from ./data/llm_model.csv.
19
19
  - Models that require an API key will check the corresponding environment variable (name provided in the CSV).
20
20
  """
@@ -25,6 +25,7 @@ import json
25
25
 
26
26
  from pydantic import BaseModel, Field
27
27
  from rich import print as rprint
28
+ from rich.errors import MarkupError
28
29
 
29
30
  # Langchain core and community imports
30
31
  from langchain_core.prompts import PromptTemplate, ChatPromptTemplate
@@ -218,10 +219,10 @@ def create_llm_instance(selected_model, temperature, handler):
218
219
  openai_api_key=api_key, callbacks=[handler],
219
220
  openai_api_base=base_url)
220
221
  else:
221
- if model_name.startswith('o') and 'mini' not in model_name:
222
+ if model_name.startswith('o'):
222
223
  llm = ChatOpenAI(model=model_name, temperature=temperature,
223
224
  openai_api_key=api_key, callbacks=[handler],
224
- reasoning_effort='high')
225
+ reasoning={"effort": "high","summary": "auto"})
225
226
  else:
226
227
  llm = ChatOpenAI(model=model_name, temperature=temperature,
227
228
  openai_api_key=api_key, callbacks=[handler])
@@ -301,7 +302,7 @@ def llm_invoke(prompt, input_json, strength, temperature, verbose=False, output_
301
302
  raise ValueError("Input JSON must be a dictionary.")
302
303
 
303
304
  set_llm_cache(SQLiteCache(database_path=".langchain.db"))
304
- base_model_name = os.environ.get('PDD_MODEL_DEFAULT', 'gpt-4o-mini')
305
+ base_model_name = os.environ.get('PDD_MODEL_DEFAULT', 'gpt-4.1-nano')
305
306
  models = load_models()
306
307
 
307
308
  try:
@@ -326,7 +327,6 @@ def llm_invoke(prompt, input_json, strength, temperature, verbose=False, output_
326
327
  llm = create_llm_instance(model, temperature, handler)
327
328
  if output_pydantic:
328
329
  if model.structured_output:
329
- llm.cache = False # TODO: remove this fix once langchain cache is fixed https://github.com/langchain-ai/langchain/issues/29003
330
330
  llm = llm.with_structured_output(output_pydantic)
331
331
  chain = prompt_template | llm
332
332
  else:
@@ -348,12 +348,22 @@ def llm_invoke(prompt, input_json, strength, temperature, verbose=False, output_
348
348
  rprint(f"Strength used: {strength}")
349
349
  rprint(f"Temperature used: {temperature}")
350
350
  try:
351
- rprint(f"Input JSON: {str(input_json)}") # Use str() instead of json.dumps()
351
+ # Try printing with rich formatting first
352
+ rprint(f"Input JSON: {str(input_json)}")
353
+ except MarkupError:
354
+ # Fallback to standard print if rich markup fails
355
+ print(f"Input JSON: {str(input_json)}")
352
356
  except Exception:
353
- rprint(f"Input JSON: {input_json}")
357
+ print(f"Input JSON: {input_json}")
354
358
  if output_pydantic:
355
359
  rprint(f"Output Pydantic format: {output_pydantic}")
356
- rprint(f"Result: {result_output}")
360
+ try:
361
+ # Try printing with rich formatting first
362
+ rprint(f"Result: {result_output}")
363
+ except MarkupError as me:
364
+ # Fallback to standard print if rich markup fails
365
+ print(f"[bold yellow]Warning:[/bold yellow] Failed to render result with rich markup: {me}")
366
+ print(f"Raw Result: {str(result_output)}") # Use standard print
357
367
 
358
368
  return {'result': result_output, 'cost': cost, 'model_name': model.model}
359
369
 
pdd/pdd_completion.zsh CHANGED
@@ -1,5 +1,35 @@
1
1
  #compdef pdd
2
2
 
3
+ ##
4
+ # ZSH Completion for PDD CLI (Prompt-Driven Development)
5
+ #
6
+ # Save this file as "pdd_completion.zsh" and source it from your ~/.zshrc:
7
+ # source /path/to/pdd_completion.zsh
8
+ #
9
+ # The script will handle completion initialization automatically.
10
+ #
11
+ # After installation, typing:
12
+ # pdd <Tab>
13
+ # will offer completions for subcommands and options as described in the PDD CLI README.
14
+ ##
15
+
16
+ # First, make sure we're using zsh
17
+ if [ -z "$ZSH_VERSION" ]; then
18
+ echo >&2 "pdd completion requires zsh."
19
+ return 1
20
+ fi
21
+
22
+ # Add this directory to fpath so ZSH can find our completion function
23
+ script_dir=${0:A:h}
24
+ fpath=($script_dir $fpath)
25
+
26
+ # Check if we need to initialize completion system
27
+ # Use command -v to check if compdef function exists
28
+ if ! command -v compdef >/dev/null 2>&1; then
29
+ autoload -U compinit
30
+ compinit
31
+ fi
32
+
3
33
  ##
4
34
  # ZSH Completion for PDD CLI (Prompt-Driven Development)
5
35
  #
@@ -413,6 +443,13 @@ _pdd() {
413
443
  esac
414
444
  }
415
445
 
416
- compdef _pdd pdd
446
+ # Register the _pdd function as a completion for pdd command
447
+ # Use command -v to safely check if compdef is available again
448
+ # (in case something went wrong with the initialization)
449
+ if command -v compdef >/dev/null 2>&1; then
450
+ compdef _pdd pdd
451
+ else
452
+ echo >&2 "Warning: Could not register pdd completion. Make sure ZSH completion system is working."
453
+ fi
417
454
 
418
455
  # End of pdd_completion.zsh
pdd/preprocess.py CHANGED
@@ -133,9 +133,9 @@ def process_web_tags(text: str) -> str:
133
133
  console.print("[bold yellow]Warning:[/bold yellow] FIRECRAWL_API_KEY not found in environment")
134
134
  return f"[Error: FIRECRAWL_API_KEY not set. Cannot scrape {url}]"
135
135
  app = FirecrawlApp(api_key=api_key)
136
- response = app.scrape_url(url=url, params={'formats': ['markdown']})
137
- if 'markdown' in response:
138
- return response['markdown']
136
+ response = app.scrape_url(url, formats=['markdown'])
137
+ if hasattr(response, 'markdown'):
138
+ return response.markdown
139
139
  else:
140
140
  console.print(f"[bold yellow]Warning:[/bold yellow] No markdown content returned for {url}")
141
141
  return f"[No content available for {url}]"