pdd-cli 0.0.19__py3-none-any.whl → 0.0.21__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of pdd-cli might be problematic. Click here for more details.

pdd/auto_deps_main.py CHANGED
@@ -94,5 +94,4 @@ def auto_deps_main(
94
94
  except Exception as e:
95
95
  if not ctx.obj.get('quiet', False):
96
96
  rprint(f"[bold red]Error:[/bold red] {str(e)}")
97
- sys.exit(1)
98
- # Removed the "raise" line so that we only exit, satisfying the test.
97
+ sys.exit(1)
pdd/cli.py CHANGED
@@ -46,7 +46,7 @@ console = Console()
46
46
  @click.option("--review-examples", is_flag=True,
47
47
  help="Review and optionally exclude few-shot examples before command execution.")
48
48
  @click.option('--local', is_flag=True, help='Run commands locally instead of in the cloud.')
49
- @click.version_option(version="0.0.19")
49
+ @click.version_option(version="0.0.21")
50
50
  @click.pass_context
51
51
  def cli(
52
52
  ctx,
pdd/crash_main.py CHANGED
@@ -51,10 +51,14 @@ def crash_main(
51
51
  "output": output,
52
52
  "output_program": output_program
53
53
  }
54
+
55
+ force = ctx.params.get("force", ctx.obj.get("force", False))
56
+ quiet = ctx.params.get("quiet", ctx.obj.get("quiet", False))
57
+
54
58
  input_strings, output_file_paths, _ = construct_paths(
55
59
  input_file_paths=input_file_paths,
56
- force=ctx.obj.get('force', False),
57
- quiet=ctx.obj.get('quiet', False),
60
+ force=force,
61
+ quiet=quiet,
58
62
  command="crash",
59
63
  command_options=command_options
60
64
  )
@@ -66,72 +70,62 @@ def crash_main(
66
70
  error_content = input_strings["error_file"]
67
71
 
68
72
  # Get model parameters from context
69
- strength = ctx.obj.get('strength', 0.97)
70
- temperature = ctx.obj.get('temperature', 0)
73
+ strength = ctx.obj.get("strength", 0.97)
74
+ temperature = ctx.obj.get("temperature", 0)
75
+
76
+ verbose = ctx.params.get("verbose", ctx.obj.get("verbose", False))
71
77
 
72
78
  if loop:
73
79
  # Use iterative fixing process
74
80
  success, final_code, final_program, attempts, cost, model = fix_code_loop(
75
- code_file=code_file,
76
- prompt=prompt_content,
77
- verification_program=program_file,
78
- strength=strength,
79
- temperature=temperature,
80
- max_attempts=max_attempts or 3,
81
- budget=budget or 5.0,
82
- error_log_file=error_file,
83
- verbose=not ctx.obj.get('verbose', False)
81
+ code_file, prompt_content, program_file, strength, temperature, max_attempts or 3, budget or 5.0, error_file, verbose
84
82
  )
85
83
  else:
86
84
  # Use single fix attempt
87
85
  from .fix_code_module_errors import fix_code_module_errors
88
86
  update_program, update_code, final_program, final_code, cost, model = fix_code_module_errors(
89
- program=program_content,
90
- prompt=prompt_content,
91
- code=code_content,
92
- errors=error_content,
93
- strength=strength,
94
- temperature=temperature,
95
- verbose=not ctx.obj.get('verbose', False)
87
+ program_content, prompt_content, code_content, error_content, strength, temperature, verbose
96
88
  )
97
89
  success = True
98
90
  attempts = 1
99
91
 
100
- # Determine if contents were actually updated
101
- if final_code != "":
102
- update_code = final_code != code_content
103
- else:
104
- update_code = False
105
- if final_program != "":
106
- update_program = final_program != program_content
107
- else:
108
- update_program = False
109
-
110
- # Save results if contents changed
111
- if update_code and output_file_paths.get("output"):
112
- with open(output_file_paths["output"], 'w') as f:
92
+ # Ensure we have content to write, falling back to original content if needed
93
+ if final_code == "":
94
+ final_code = code_content
95
+
96
+ if final_program == "":
97
+ final_program = program_content
98
+
99
+ # Determine whether to write the files based on whether paths are provided
100
+ should_write_code = output_file_paths.get("output") is not None
101
+ should_write_program = output_file_paths.get("output_program") is not None
102
+
103
+ # Write output files
104
+ if should_write_code:
105
+ with open(output_file_paths["output"], "w") as f:
113
106
  f.write(final_code)
114
- if update_program and output_file_paths.get("output_program"):
115
- with open(output_file_paths["output_program"], 'w') as f:
107
+
108
+ if should_write_program:
109
+ with open(output_file_paths["output_program"], "w") as f:
116
110
  f.write(final_program)
117
111
 
118
112
  # Provide user feedback
119
- if not ctx.obj.get('quiet', False):
113
+ if not quiet:
120
114
  if success:
121
115
  rprint("[bold green]Crash fix completed successfully.[/bold green]")
122
116
  else:
123
- rprint("[bold yellow]Crash fix completed with some issues.[/bold yellow]")
117
+ rprint("[bold yellow]Crash fix completed with issues.[/bold yellow]")
124
118
  rprint(f"[bold]Model used:[/bold] {model}")
125
119
  rprint(f"[bold]Total attempts:[/bold] {attempts}")
126
- rprint(f"[bold]Total cost:[/bold] ${cost:.6f}")
127
- if update_code and output:
120
+ rprint(f"[bold]Total cost:[/bold] ${cost:.2f}")
121
+ if should_write_code:
128
122
  rprint(f"[bold]Fixed code saved to:[/bold] {output_file_paths['output']}")
129
- if update_program and output_program:
123
+ if should_write_program:
130
124
  rprint(f"[bold]Fixed program saved to:[/bold] {output_file_paths['output_program']}")
131
125
 
132
126
  return success, final_code, final_program, attempts, cost, model
133
-
127
+
134
128
  except Exception as e:
135
- if not ctx.obj.get('quiet', False):
129
+ if not quiet:
136
130
  rprint(f"[bold red]Error:[/bold red] {str(e)}")
137
131
  sys.exit(1)
pdd/data/llm_model.csv CHANGED
@@ -1,17 +1,17 @@
1
1
  provider,model,input,output,coding_arena_elo,base_url,api_key,counter,encoder,max_tokens,max_completion_tokens,structured_output
2
- OpenAI,"gpt-4o-mini",0.15,0.60,1246,,OPENAI_API_KEY,tiktoken,o200k_base,16384,,True
2
+ OpenAI,"gpt-4o-mini",0.15,0.60,1246,,OPENAI_API_KEY,tiktoken,o200k_base,,16384,True
3
3
  OpenAI,"grok-2-1212",2,10,1255,"https://api.x.ai/v1",XAI_API_KEY,tiktoken,o200k_base,4096,,False
4
4
  Anthropic,"claude-3-5-haiku-20241022",1,5,1259,,ANTHROPIC_API_KEY,anthropic,claude-3-sonnet-20240229,8192,,False
5
5
  OpenAI,"deepseek-coder",0.14,0.28,1279,https://api.deepseek.com/beta,DEEPSEEK_API_KEY,autotokenizer,deepseek-coder-7b-instruct-v1.5,8192,,False
6
6
  Google,"gemini-2.0-flash-thinking-exp-01-21",.1,.4,1291,,GOOGLE_API_KEY,,,8192,,False
7
7
  GoogleVertexAI,"gemini-2.0-pro-exp-02-05",1.25,5,1299,,VERTEX_AI_API_KEY,,,8192,,False
8
8
  Anthropic,claude-3-7-sonnet-20250219,3,15,1312,,ANTHROPIC_API_KEY,anthropic,claude-3-sonnet-20240229,64000,,False
9
- Google,gemini-exp-1206,1.25,5,1313,,GOOGLE_API_KEY,,,8192,,False
9
+ Google,gemini-2.5-pro-exp-03-25,1.25,5,1313,,GOOGLE_API_KEY,,,8192,,False
10
10
  OpenAI,"deepseek-r1-distill-llama-70b-specdec",5,5,1314,https://api.groq.com/openai/v1,GROQ_API_KEY,autotokenizer,deepseek-coder-7b-instruct-v1.5,16384,,False
11
11
  Ollama,"deepseek-r1:70b-llama-distill-q8_0",0.0,0.0,1315,,PWD,,,,,False
12
12
  Ollama,deepseek-r1:32b-qwen-distill-fp16,0.0,0.0,1316,,PWD,,,,,False
13
13
  OpenAI,"o3-mini",1.1,4.4,1319,,OPENAI_API_KEY,tiktoken,o200k_base,,100000,True
14
14
  OpenAI,"o1-2024-12-17",15,60,1331,,OPENAI_API_KEY,tiktoken,o200k_base,,32768,True
15
- OpenAI,"gpt-4o-2024-11-20",2.5,10,1332,,OPENAI_API_KEY,tiktoken,o200k_base,16384,,True
15
+ OpenAI,"gpt-4o-2024-11-20",2.5,10,1332,,OPENAI_API_KEY,tiktoken,o200k_base,,16384,True
16
16
  OpenAI,"deepseek-reasoner",0.55,2.19,1336,https://api.deepseek.com/beta,DEEPSEEK_API_KEY,autotokenizer,deepseek-coder-7b-instruct-v1.5,8192,,False
17
17
  Fireworks,accounts/fireworks/models/deepseek-r1,3,8,1338,,FIREWORKS_API_KEY,,,8192,,False