pdd-cli 0.0.5__py3-none-any.whl → 0.0.6__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of pdd-cli might be problematic. Click here for more details.

pdd/cli.py CHANGED
@@ -10,32 +10,10 @@ from rich import print as rprint
10
10
  from rich.console import Console
11
11
  from rich.panel import Panel
12
12
 
13
- # ----------------------------------------------------------------------
14
- # Dynamically determine PDD_PATH at runtime.
15
- # ----------------------------------------------------------------------
16
- def get_local_pdd_path() -> str:
17
- """
18
- Return the PDD_PATH directory.
19
- First check the environment variable. If not set, attempt to
20
- deduce it via importlib.resources. If that fails, abort.
21
- """
22
- if "PDD_PATH" in os.environ:
23
- return os.environ["PDD_PATH"]
24
- else:
25
- try:
26
- with importlib.resources.path("pdd", "cli.py") as p:
27
- fallback_path = str(p.parent)
28
- # Also set it back into the environment for consistency
29
- os.environ["PDD_PATH"] = fallback_path
30
- return fallback_path
31
- except ImportError:
32
- rprint(
33
- "[red]Error: Could not determine the path to the 'pdd' package. "
34
- "Please set the PDD_PATH environment variable manually.[/red]"
35
- )
36
- sys.exit(1)
37
-
38
- get_local_pdd_path()
13
+ from .install_completion import install_completion as install_completion_main
14
+ import pdd.install_completion
15
+
16
+ pdd.install_completion.get_local_pdd_path()
39
17
  # ----------------------------------------------------------------------
40
18
  # Import sub-command modules
41
19
  # ----------------------------------------------------------------------
@@ -67,7 +45,8 @@ console = Console()
67
45
  @click.option("--output-cost", type=click.Path(), help="Enable cost tracking and output a CSV file with usage details.")
68
46
  @click.option("--review-examples", is_flag=True,
69
47
  help="Review and optionally exclude few-shot examples before command execution.")
70
- @click.version_option(version="0.0.5")
48
+ @click.option('--local', is_flag=True, help='Run commands locally instead of in the cloud.')
49
+ @click.version_option(version="0.0.6")
71
50
  @click.pass_context
72
51
  def cli(
73
52
  ctx,
@@ -78,6 +57,7 @@ def cli(
78
57
  quiet: bool,
79
58
  output_cost: Optional[str],
80
59
  review_examples: bool,
60
+ local: bool
81
61
  ):
82
62
  """
83
63
  PDD (Prompt-Driven Development) Command Line Interface
@@ -90,6 +70,7 @@ def cli(
90
70
  ctx.obj["quiet"] = quiet
91
71
  ctx.obj["output_cost"] = output_cost or os.environ.get("PDD_OUTPUT_COST_PATH")
92
72
  ctx.obj["review_examples"] = review_examples
73
+ ctx.obj['local'] = local
93
74
 
94
75
  # Auto-update check, but handle EOF errors so tests do not crash.
95
76
  auto_update_enabled = os.environ.get("PDD_AUTO_UPDATE", "true").lower() == "true"
@@ -390,63 +371,6 @@ def crash(
390
371
  budget,
391
372
  )
392
373
 
393
- # ----------------------------------------------------------------------
394
- # Simplified shell RC path logic
395
- # ----------------------------------------------------------------------
396
- def get_shell_rc_path(shell: str) -> Optional[str]:
397
- """Return the default RC file path for a given shell name."""
398
- home = os.path.expanduser("~")
399
- if shell == "bash":
400
- return os.path.join(home, ".bashrc")
401
- elif shell == "zsh":
402
- return os.path.join(home, ".zshrc")
403
- elif shell == "fish":
404
- return os.path.join(home, ".config", "fish", "config.fish")
405
- return None
406
-
407
-
408
- def get_current_shell() -> Optional[str]:
409
-
410
-
411
- """Determine the currently running shell more reliably."""
412
- if not os.environ.get('PYTEST_CURRENT_TEST'):
413
- # Method 1: Check process name using 'ps'
414
- try:
415
- import subprocess
416
- result = subprocess.run(['ps', '-p', str(os.getppid()), '-o', 'comm='],
417
- capture_output=True, text=True)
418
- if result.returncode == 0:
419
- # Strip whitespace and get basename without path
420
- shell = os.path.basename(result.stdout.strip())
421
- # Remove leading dash if present (login shell)
422
- return shell.lstrip('-')
423
- except (subprocess.SubprocessError, FileNotFoundError):
424
- pass
425
-
426
- # Method 2: Check $0 special parameter
427
- try:
428
- result = subprocess.run(['sh', '-c', 'echo "$0"'],
429
- capture_output=True, text=True)
430
- if result.returncode == 0:
431
- shell = os.path.basename(result.stdout.strip())
432
- return shell.lstrip('-')
433
- except (subprocess.SubprocessError, FileNotFoundError):
434
- pass
435
-
436
- # Fallback to SHELL env var if all else fails
437
- return os.path.basename(os.environ.get("SHELL", ""))
438
-
439
-
440
- def get_completion_script_extension(shell: str) -> str:
441
- """Get the appropriate file extension for shell completion scripts."""
442
- mapping = {
443
- "bash": "sh",
444
- "zsh": "zsh",
445
- "fish": "fish"
446
- }
447
- return mapping.get(shell, shell)
448
-
449
-
450
374
  @cli.command(name="install_completion")
451
375
  def install_completion():
452
376
  """
@@ -454,46 +378,7 @@ def install_completion():
454
378
  copying the relevant completion script, and appending a source command
455
379
  to the user’s shell RC file if not already present.
456
380
  """
457
- shell = get_current_shell()
458
- rc_file = get_shell_rc_path(shell)
459
- if not rc_file:
460
- rprint(f"[red]Unsupported shell: {shell}[/red]")
461
- raise click.Abort()
462
-
463
- ext = get_completion_script_extension(shell)
464
-
465
- # Dynamically look up the local path at runtime:
466
- local_pdd_path = get_local_pdd_path()
467
- completion_script_path = os.path.join(local_pdd_path, f"pdd_completion.{ext}")
468
-
469
- if not os.path.exists(completion_script_path):
470
- rprint(f"[red]Completion script not found: {completion_script_path}[/red]")
471
- raise click.Abort()
472
-
473
- source_command = f"source {completion_script_path}"
474
-
475
- try:
476
- # Ensure the RC file exists (create if missing).
477
- if not os.path.exists(rc_file):
478
- os.makedirs(os.path.dirname(rc_file), exist_ok=True)
479
- with open(rc_file, "w", encoding="utf-8") as cf:
480
- cf.write("")
481
-
482
- # Read existing content
483
- with open(rc_file, "r", encoding="utf-8") as cf:
484
- content = cf.read()
485
-
486
- if source_command not in content:
487
- with open(rc_file, "a", encoding="utf-8") as rf:
488
- rf.write(f"\n# PDD CLI completion\n{source_command}\n")
489
-
490
- rprint(f"[green]Shell completion installed for {shell}.[/green]")
491
- rprint(f"Please restart your shell or run 'source {rc_file}' to enable completion.")
492
- else:
493
- rprint(f"[yellow]Shell completion already installed for {shell}.[/yellow]")
494
- except OSError as exc:
495
- rprint(f"[red]Failed to install shell completion: {exc}[/red]")
496
- raise click.Abort()
381
+ return install_completion_main()
497
382
 
498
383
 
499
384
  @cli.command()
pdd/code_generator.py CHANGED
@@ -13,7 +13,8 @@ def code_generator(
13
13
  language: str,
14
14
  strength: float,
15
15
  temperature: float = 0.0,
16
- verbose: bool = False
16
+ verbose: bool = False,
17
+ preprocess_prompt: bool = True,
17
18
  ) -> Tuple[str, float, str]:
18
19
  """
19
20
  Generate code from a prompt using a language model.
@@ -47,9 +48,13 @@ def code_generator(
47
48
  model_name = ""
48
49
 
49
50
  # Step 1: Preprocess the prompt
50
- if verbose:
51
- console.print("[bold blue]Step 1: Preprocessing prompt[/bold blue]")
52
- processed_prompt = preprocess(prompt, recursive=False, double_curly_brackets=True)
51
+ if preprocess_prompt:
52
+ if verbose:
53
+ console.print("[bold blue]Step 1: Preprocessing prompt[/bold blue]")
54
+
55
+ processed_prompt = preprocess(prompt, recursive=False, double_curly_brackets=True)
56
+ else:
57
+ processed_prompt = prompt
53
58
 
54
59
  # Step 2: Generate initial response
55
60
  if verbose:
@@ -99,7 +104,7 @@ def code_generator(
99
104
  runnable_code, postprocess_cost, model_name_post = postprocess(
100
105
  llm_output=final_output,
101
106
  language=language,
102
- strength=0.895,
107
+ strength=0.89,
103
108
  temperature=0.0,
104
109
  verbose=verbose
105
110
  )
@@ -3,8 +3,12 @@ from typing import Tuple, Optional
3
3
  import click
4
4
  from rich import print as rprint
5
5
 
6
+ import requests # <── Added at top level so the tests can patch pdd.code_generator_main.requests
7
+
6
8
  from .construct_paths import construct_paths
7
9
  from .code_generator import code_generator
10
+ from .get_jwt_token import get_jwt_token
11
+ from .preprocess import preprocess
8
12
 
9
13
  def code_generator_main(ctx: click.Context, prompt_file: str, output: Optional[str]) -> Tuple[str, float, str]:
10
14
  """
@@ -37,13 +41,67 @@ def code_generator_main(ctx: click.Context, prompt_file: str, output: Optional[s
37
41
  # Generate code
38
42
  strength = ctx.obj.get('strength', 0.5)
39
43
  temperature = ctx.obj.get('temperature', 0.0)
40
- generated_code, total_cost, model_name = code_generator(
41
- prompt_content,
42
- language,
43
- strength,
44
- temperature,
45
- verbose=not ctx.obj.get('quiet', False)
46
- )
44
+ verbose = not ctx.obj.get('quiet', False)
45
+ local = ctx.obj.get('local', False)
46
+
47
+ if local:
48
+ print("Running in local mode")
49
+ # Local execution
50
+ generated_code, total_cost, model_name = code_generator(
51
+ prompt_content,
52
+ language,
53
+ strength,
54
+ temperature,
55
+ verbose=verbose
56
+ )
57
+ else:
58
+ # Cloud execution
59
+ try:
60
+ import asyncio
61
+ import os
62
+ # Get JWT token for cloud authentication
63
+ jwt_token = asyncio.run(get_jwt_token(
64
+ firebase_api_key=os.environ.get("REACT_APP_FIREBASE_API_KEY"),
65
+ github_client_id=os.environ.get("GITHUB_CLIENT_ID"),
66
+ app_name="PDD Code Generator"
67
+ ))
68
+ # Call cloud code generator
69
+ headers = {
70
+ "Authorization": f"Bearer {jwt_token}",
71
+ "Content-Type": "application/json"
72
+ }
73
+ # Preprocess the prompt
74
+ processed_prompt = preprocess(prompt_content, recursive=False, double_curly_brackets=True)
75
+ if verbose:
76
+ print(f"Processed prompt: {processed_prompt}")
77
+ data = {
78
+ "promptContent": processed_prompt,
79
+ "language": language,
80
+ "strength": strength,
81
+ "temperature": temperature,
82
+ "verbose": verbose
83
+ }
84
+ response = requests.post(
85
+ "https://us-central1-prompt-driven-development.cloudfunctions.net/generateCode",
86
+ headers=headers,
87
+ json=data
88
+ )
89
+ response.raise_for_status()
90
+ result = response.json()
91
+ generated_code = result["generatedCode"]
92
+ total_cost = result["totalCost"]
93
+ model_name = result["modelName"]
94
+
95
+ except Exception as e:
96
+ if not ctx.obj.get('quiet', False):
97
+ rprint("[bold red]Cloud execution failed, falling back to local mode[/bold red]")
98
+ generated_code, total_cost, model_name = code_generator(
99
+ prompt_content,
100
+ language,
101
+ strength,
102
+ temperature,
103
+ verbose=verbose
104
+ )
47
105
 
48
106
  # Save results
49
107
  if output_file_paths["output"]:
pdd/context_generator.py CHANGED
@@ -109,7 +109,7 @@ def context_generator(code_module: str, prompt: str, language: str = "python", s
109
109
  example_code, postprocess_cost, postprocess_model = postprocess(
110
110
  llm_output=final_llm_output,
111
111
  language=language,
112
- strength=0.9,
112
+ strength=0.89,
113
113
  temperature=temperature,
114
114
  verbose=verbose
115
115
  )
pdd/crash_main.py CHANGED
@@ -85,7 +85,7 @@ def crash_main(
85
85
  else:
86
86
  # Use single fix attempt
87
87
  from .fix_code_module_errors import fix_code_module_errors
88
- _, _, final_program, final_code, cost, model = fix_code_module_errors(
88
+ update_program, update_code, final_program, final_code, cost, model = fix_code_module_errors(
89
89
  program=program_content,
90
90
  prompt=prompt_content,
91
91
  code=code_content,
@@ -97,11 +97,15 @@ def crash_main(
97
97
  success = True
98
98
  attempts = 1
99
99
 
100
- # Save results
101
- if output_file_paths.get("output"):
100
+ # Determine if contents were actually updated
101
+ update_code = final_code != code_content
102
+ update_program = final_program != program_content
103
+
104
+ # Save results if contents changed
105
+ if update_code and output_file_paths.get("output"):
102
106
  with open(output_file_paths["output"], 'w') as f:
103
107
  f.write(final_code)
104
- if output_file_paths.get("output_program"):
108
+ if update_program and output_file_paths.get("output_program"):
105
109
  with open(output_file_paths["output_program"], 'w') as f:
106
110
  f.write(final_program)
107
111
 
@@ -114,9 +118,9 @@ def crash_main(
114
118
  rprint(f"[bold]Model used:[/bold] {model}")
115
119
  rprint(f"[bold]Total attempts:[/bold] {attempts}")
116
120
  rprint(f"[bold]Total cost:[/bold] ${cost:.6f}")
117
- if output:
121
+ if update_code and output:
118
122
  rprint(f"[bold]Fixed code saved to:[/bold] {output_file_paths['output']}")
119
- if output_program:
123
+ if update_program and output_program:
120
124
  rprint(f"[bold]Fixed program saved to:[/bold] {output_file_paths['output_program']}")
121
125
 
122
126
  return success, final_code, final_program, attempts, cost, model
pdd/data/llm_model.csv CHANGED
@@ -1,15 +1,17 @@
1
1
  provider,model,input,output,coding_arena_elo,base_url,api_key,counter,encoder,max_tokens,max_completion_tokens,structured_output
2
- OpenAI,"o1-mini-2024-09-12",3,12,1301,,,tiktoken,o200k_base,,65536,False
3
- OpenAI,"deepseek-coder",0.14,0.28,1256,"https://api.deepseek.com/beta","DEEPSEEK_API_KEY",autotokenizer,deepseek-coder-7b-instruct-v1.5,8192,,False
4
- Ollama,"qwen2.5-coder:32b-instruct-fp16",0.0,0.0,1227,,,,,,,False
5
- Ollama,"athene-v2:72b-q8_0",0.0,0.0,1253,,,,,,,False
6
- Anthropic,"claude-3-5-sonnet-20241022",3,15,1309,,,anthropic,claude-3-sonnet-20240229,8192,,False
7
- Google,"gemini-2.0-flash-exp",0.15,0.60,1281,,,,,8192,,False
8
- Fireworks,"accounts/fireworks/models/llama-v3p3-70b-instruct",3,3,1280,,,,,16384,,False
9
- Fireworks,"accounts/fireworks/models/qwen2p5-coder-32b-instruct",.9,.9,1226,,,,,2048,,False
10
- OpenAI,"gpt-4o-mini",0.15,0.60,1246,,,tiktoken,o200k_base,16384,,True
11
- OpenAI,"gpt-4o-2024-11-20",2.5,10,1306,,,tiktoken,o200k_base,16384,,True
12
- OpenAI,"o1-2024-12-17",15,60,1311,,,tiktoken,o200k_base,,32768,False
13
- OpenAI,"grok-beta",5,15,1255,"https://api.x.ai/v1","XAI_API_KEY",tiktoken,o200k_base,4096,,False
14
- Google,"gemini-exp-1206",0.15,0.60,1321,,,,,8192,,False
15
- Anthropic,"claude-3-5-haiku-20241022",1,5,1265,,,anthropic,claude-3-sonnet-20240229,8192,,False
2
+ OpenAI,"o3-mini",1.1,4.4,1301,,OPENAI_API_KEY,tiktoken,o200k_base,,100000,True
3
+ OpenAI,"deepseek-coder",0.14,0.28,1256,"https://api.deepseek.com/beta",DEEPSEEK_API_KEY,autotokenizer,deepseek-coder-7b-instruct-v1.5,8192,,False
4
+ OpenAI,"deepseek-reasoner",0.55,2.19,1309,"https://api.deepseek.com/beta",DEEPSEEK_API_KEY,autotokenizer,deepseek-coder-7b-instruct-v1.5,8192,,False
5
+ Ollama,"qwen2.5-coder:32b-instruct-fp16",0.0,0.0,1227,,PWD,,,,,False
6
+ Ollama,"athene-v2:72b-q8_0",0.0,0.0,1253,,PWD,,,,,False
7
+ Anthropic,"claude-3-5-sonnet-20241022",3,15,1308,,ANTHROPIC_API_KEY,anthropic,claude-3-sonnet-20240229,8192,,False
8
+ Google,"gemini-2.0-flash-exp",0.15,0.60,1281,,GOOGLE_API_KEY,,,8192,,False
9
+ Fireworks,"accounts/fireworks/models/llama-v3p3-70b-instruct",3,3,1280,,FIREWORKS_API_KEY,,,16384,,False
10
+ Fireworks,"accounts/fireworks/models/qwen2p5-coder-32b-instruct",.9,.9,1226,,FIREWORKS_API_KEY,,,2048,,False
11
+ OpenAI,"gpt-4o-mini",0.15,0.60,1246,,OPENAI_API_KEY,tiktoken,o200k_base,16384,,True
12
+ OpenAI,"gpt-4o-2024-11-20",2.5,10,1306,,OPENAI_API_KEY,tiktoken,o200k_base,16384,,True
13
+ OpenAI,"o1-2024-12-17",15,60,1311,,OPENAI_API_KEY,tiktoken,o200k_base,,32768,True
14
+ OpenAI,"grok-beta",5,15,1255,"https://api.x.ai/v1",XAI_API_KEY,tiktoken,o200k_base,4096,,False
15
+ Anthropic,"claude-3-5-haiku-20241022",1,5,1265,,ANTHROPIC_API_KEY,anthropic,claude-3-sonnet-20240229,8192,,False
16
+ GoogleVertexAI,gemini-2.0-pro-exp-02-05,0.14,.59,1321,,VERTEX_AI_API_KEY,,,8192,,False
17
+ Google,gemini-exp-1206,0.15,0.60,1320,,GOOGLE_API_KEY,,,8192,,False
pdd/generate_test.py CHANGED
@@ -76,7 +76,7 @@ def generate_test(
76
76
  last_600_chars = result[-600:] if len(result) > 600 else result
77
77
  reasoning, is_finished, check_cost, check_model = unfinished_prompt(
78
78
  prompt_text=last_600_chars,
79
- strength=0.895,
79
+ strength=0.89,
80
80
  temperature=temperature,
81
81
  verbose=verbose
82
82
  )
@@ -101,7 +101,7 @@ def generate_test(
101
101
  processed_result, post_cost, post_model = postprocess(
102
102
  result,
103
103
  language=language,
104
- strength=0.895,
104
+ strength=0.89,
105
105
  temperature=temperature,
106
106
  verbose=verbose
107
107
  )