pdd-cli 0.0.48__py3-none-any.whl → 0.0.50__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of pdd-cli might be problematic. Click here for more details.
- pdd/__init__.py +4 -4
- pdd/bug_to_unit_test.py +2 -0
- pdd/cli.py +8 -1
- pdd/code_generator.py +3 -1
- pdd/context_generator.py +3 -1
- pdd/continue_generation.py +47 -7
- pdd/data/llm_model.csv +15 -16
- pdd/detect_change_main.py +2 -2
- pdd/generate_test.py +3 -1
- pdd/llm_invoke.py +461 -74
- pdd/load_prompt_template.py +30 -9
- pdd/pdd_completion.fish +2 -2
- pdd/pdd_completion.zsh +4 -4
- pdd/postprocess.py +2 -2
- pdd/prompts/extract_prompt_update_LLM.prompt +7 -8
- pdd/prompts/insert_includes_LLM.prompt +4 -4
- pdd/prompts/unfinished_prompt_LLM.prompt +85 -1
- pdd/summarize_directory.py +15 -2
- pdd/sync_orchestration.py +32 -4
- pdd/trace.py +131 -11
- pdd/trace_main.py +2 -2
- pdd/unfinished_prompt.py +41 -2
- {pdd_cli-0.0.48.dist-info → pdd_cli-0.0.50.dist-info}/METADATA +7 -4
- {pdd_cli-0.0.48.dist-info → pdd_cli-0.0.50.dist-info}/RECORD +27 -27
- {pdd_cli-0.0.48.dist-info → pdd_cli-0.0.50.dist-info}/WHEEL +0 -0
- {pdd_cli-0.0.48.dist-info → pdd_cli-0.0.50.dist-info}/entry_points.txt +0 -0
- {pdd_cli-0.0.48.dist-info → pdd_cli-0.0.50.dist-info}/licenses/LICENSE +0 -0
- {pdd_cli-0.0.48.dist-info → pdd_cli-0.0.50.dist-info}/top_level.txt +0 -0
pdd/__init__.py
CHANGED
|
@@ -1,20 +1,20 @@
|
|
|
1
1
|
"""PDD - Prompt Driven Development"""
|
|
2
2
|
|
|
3
|
-
__version__ = "0.0.
|
|
3
|
+
__version__ = "0.0.50"
|
|
4
4
|
|
|
5
5
|
# Strength parameter used for LLM extraction across the codebase
|
|
6
6
|
# Used in postprocessing, XML tagging, code generation, and other extraction
|
|
7
7
|
# operations. The module should have a large context window and be affordable.
|
|
8
|
-
EXTRACTION_STRENGTH = 0.
|
|
8
|
+
EXTRACTION_STRENGTH = 0.3
|
|
9
9
|
|
|
10
|
-
DEFAULT_STRENGTH = 0.
|
|
10
|
+
DEFAULT_STRENGTH = 0.75
|
|
11
11
|
|
|
12
12
|
DEFAULT_TEMPERATURE = 0.0
|
|
13
13
|
|
|
14
14
|
DEFAULT_TIME = 0.25
|
|
15
15
|
|
|
16
16
|
# Define constants used across the package
|
|
17
|
-
DEFAULT_LLM_MODEL = "gpt-
|
|
17
|
+
DEFAULT_LLM_MODEL = "gpt-5-mini"
|
|
18
18
|
# When going to production, set the following constants:
|
|
19
19
|
# REACT_APP_FIREBASE_API_KEY
|
|
20
20
|
# GITHUB_CLIENT_ID
|
pdd/bug_to_unit_test.py
CHANGED
|
@@ -108,6 +108,7 @@ def bug_to_unit_test( # pylint: disable=too-many-arguments, too-many-locals
|
|
|
108
108
|
strength=0.89,
|
|
109
109
|
temperature=temperature,
|
|
110
110
|
time=time,
|
|
111
|
+
language=language,
|
|
111
112
|
verbose=False,
|
|
112
113
|
)
|
|
113
114
|
|
|
@@ -121,6 +122,7 @@ def bug_to_unit_test( # pylint: disable=too-many-arguments, too-many-locals
|
|
|
121
122
|
strength=strength,
|
|
122
123
|
temperature=temperature,
|
|
123
124
|
time=time,
|
|
125
|
+
language=language,
|
|
124
126
|
verbose=True,
|
|
125
127
|
)
|
|
126
128
|
total_cost += continued_cost
|
pdd/cli.py
CHANGED
|
@@ -1001,7 +1001,14 @@ def auto_deps(
|
|
|
1001
1001
|
auto_deps_csv_path: Optional[str],
|
|
1002
1002
|
force_scan: bool,
|
|
1003
1003
|
) -> Optional[Tuple[str, float, str]]: # Modified return type
|
|
1004
|
-
"""Analyze prompt and insert dependencies from a directory.
|
|
1004
|
+
"""Analyze a prompt and insert dependencies from a directory or glob.
|
|
1005
|
+
|
|
1006
|
+
DIRECTORY_PATH accepts either a directory path or a glob pattern and is
|
|
1007
|
+
expanded recursively when you use patterns like `**/*.py`. Examples:
|
|
1008
|
+
- examples/**/*.py
|
|
1009
|
+
- context/*_example.py
|
|
1010
|
+
- examples/*
|
|
1011
|
+
"""
|
|
1005
1012
|
quiet = ctx.obj.get("quiet", False)
|
|
1006
1013
|
command_name = "auto-deps"
|
|
1007
1014
|
try:
|
pdd/code_generator.py
CHANGED
|
@@ -83,6 +83,7 @@ def code_generator(
|
|
|
83
83
|
strength=0.5,
|
|
84
84
|
temperature=0.0,
|
|
85
85
|
time=time,
|
|
86
|
+
language=language,
|
|
86
87
|
verbose=verbose
|
|
87
88
|
)
|
|
88
89
|
total_cost += check_cost
|
|
@@ -97,6 +98,7 @@ def code_generator(
|
|
|
97
98
|
strength=strength,
|
|
98
99
|
temperature=temperature,
|
|
99
100
|
time=time,
|
|
101
|
+
language=language,
|
|
100
102
|
verbose=verbose
|
|
101
103
|
)
|
|
102
104
|
total_cost += continue_cost
|
|
@@ -126,4 +128,4 @@ def code_generator(
|
|
|
126
128
|
except Exception as e:
|
|
127
129
|
if verbose:
|
|
128
130
|
console.print(f"[bold red]Unexpected Error: {str(e)}[/bold red]")
|
|
129
|
-
raise
|
|
131
|
+
raise
|
pdd/context_generator.py
CHANGED
|
@@ -95,6 +95,7 @@ def context_generator(
|
|
|
95
95
|
strength=0.5,
|
|
96
96
|
temperature=temperature,
|
|
97
97
|
time=time,
|
|
98
|
+
language=language,
|
|
98
99
|
verbose=verbose
|
|
99
100
|
)
|
|
100
101
|
except Exception as e:
|
|
@@ -112,6 +113,7 @@ def context_generator(
|
|
|
112
113
|
strength=strength,
|
|
113
114
|
temperature=temperature,
|
|
114
115
|
time=time,
|
|
116
|
+
language=language,
|
|
115
117
|
verbose=verbose
|
|
116
118
|
)
|
|
117
119
|
total_cost = llm_response['cost'] + unfinished_cost + continue_cost
|
|
@@ -149,4 +151,4 @@ if __name__ == "__main__":
|
|
|
149
151
|
print("[bold green]Generated Example Code:[/bold green]")
|
|
150
152
|
print(example_code)
|
|
151
153
|
print(f"[bold blue]Total Cost: ${total_cost:.6f}[/bold blue]")
|
|
152
|
-
print(f"[bold blue]Model Name: {model_name}[/bold blue]")
|
|
154
|
+
print(f"[bold blue]Model Name: {model_name}[/bold blue]")
|
pdd/continue_generation.py
CHANGED
|
@@ -1,4 +1,5 @@
|
|
|
1
|
-
from typing import Tuple
|
|
1
|
+
from typing import Tuple, Optional
|
|
2
|
+
import logging
|
|
2
3
|
from rich.console import Console
|
|
3
4
|
from rich.syntax import Syntax
|
|
4
5
|
from pydantic import BaseModel, Field
|
|
@@ -9,6 +10,10 @@ from .unfinished_prompt import unfinished_prompt
|
|
|
9
10
|
from . import EXTRACTION_STRENGTH, DEFAULT_TIME
|
|
10
11
|
|
|
11
12
|
console = Console()
|
|
13
|
+
logger = logging.getLogger(__name__)
|
|
14
|
+
|
|
15
|
+
# Maximum number of generation loops to prevent infinite loops
|
|
16
|
+
MAX_GENERATION_LOOPS = 20
|
|
12
17
|
|
|
13
18
|
class TrimResultsStartOutput(BaseModel):
|
|
14
19
|
explanation: str = Field(description="The explanation of how you determined what to cut out")
|
|
@@ -24,6 +29,7 @@ def continue_generation(
|
|
|
24
29
|
strength: float,
|
|
25
30
|
temperature: float,
|
|
26
31
|
time: float = DEFAULT_TIME,
|
|
32
|
+
language: Optional[str] = None,
|
|
27
33
|
verbose: bool = False
|
|
28
34
|
) -> Tuple[str, float, str]:
|
|
29
35
|
"""
|
|
@@ -84,10 +90,16 @@ def continue_generation(
|
|
|
84
90
|
code_block = trim_start_response['result'].code_block
|
|
85
91
|
|
|
86
92
|
# Step 4: Continue generation loop
|
|
87
|
-
while
|
|
93
|
+
while loop_count < MAX_GENERATION_LOOPS:
|
|
88
94
|
loop_count += 1
|
|
89
95
|
if verbose:
|
|
90
96
|
console.print(f"[cyan]Generation loop {loop_count}[/cyan]")
|
|
97
|
+
|
|
98
|
+
# Check for maximum loops reached
|
|
99
|
+
if loop_count >= MAX_GENERATION_LOOPS:
|
|
100
|
+
logger.warning(f"Reached maximum generation loops ({MAX_GENERATION_LOOPS}), terminating")
|
|
101
|
+
console.print(f"[yellow]Warning: Reached maximum generation loops ({MAX_GENERATION_LOOPS}), terminating[/yellow]")
|
|
102
|
+
break
|
|
91
103
|
|
|
92
104
|
# Generate continuation
|
|
93
105
|
continue_response = llm_invoke(
|
|
@@ -106,19 +118,47 @@ def continue_generation(
|
|
|
106
118
|
model_name = continue_response['model_name']
|
|
107
119
|
continue_result = continue_response['result']
|
|
108
120
|
|
|
109
|
-
|
|
110
|
-
|
|
111
|
-
|
|
121
|
+
if verbose:
|
|
122
|
+
try:
|
|
123
|
+
preview = (continue_result[:160] + '...') if isinstance(continue_result, str) and len(continue_result) > 160 else continue_result
|
|
124
|
+
except Exception:
|
|
125
|
+
preview = "<non-str>"
|
|
126
|
+
console.print(f"[blue]Continue model:[/blue] {model_name}")
|
|
127
|
+
console.print(f"[blue]Continue preview:[/blue] {preview!r}")
|
|
128
|
+
|
|
129
|
+
# If the model produced no continuation, avoid an endless loop
|
|
130
|
+
if not isinstance(continue_result, str) or not continue_result.strip():
|
|
131
|
+
logger.warning("Empty continuation received; stopping to avoid loop.")
|
|
132
|
+
break
|
|
133
|
+
|
|
134
|
+
# Build prospective new block and check completeness on the updated tail
|
|
135
|
+
new_code_block = code_block + continue_result
|
|
136
|
+
last_chunk = new_code_block[-600:] if len(new_code_block) > 600 else new_code_block
|
|
137
|
+
reasoning, is_finished, check_cost, check_model = unfinished_prompt(
|
|
112
138
|
prompt_text=last_chunk,
|
|
113
139
|
strength=0.5,
|
|
114
140
|
temperature=0,
|
|
115
141
|
time=time,
|
|
142
|
+
language=language,
|
|
116
143
|
verbose=verbose
|
|
117
144
|
)
|
|
118
145
|
total_cost += check_cost
|
|
119
146
|
|
|
147
|
+
if verbose:
|
|
148
|
+
console.print(f"[magenta]Tail length:[/magenta] {len(last_chunk)}")
|
|
149
|
+
# Show a safe, shortened representation of the tail
|
|
150
|
+
try:
|
|
151
|
+
tail_preview = (last_chunk[-200:] if len(last_chunk) > 200 else last_chunk)
|
|
152
|
+
except Exception:
|
|
153
|
+
tail_preview = "<unprintable tail>"
|
|
154
|
+
console.print(f"[magenta]Tail preview (last 200 chars):[/magenta]\n{tail_preview}")
|
|
155
|
+
console.print(f"[magenta]Unfinished check model:[/magenta] {check_model}")
|
|
156
|
+
console.print(f"[magenta]is_finished:[/magenta] {is_finished}")
|
|
157
|
+
console.print(f"[magenta]Reasoning:[/magenta] {reasoning}")
|
|
158
|
+
|
|
120
159
|
if not is_finished:
|
|
121
|
-
code_block
|
|
160
|
+
code_block = new_code_block
|
|
161
|
+
# Continue to next iteration
|
|
122
162
|
else:
|
|
123
163
|
# Trim and append final continuation
|
|
124
164
|
trim_response = llm_invoke(
|
|
@@ -146,4 +186,4 @@ def continue_generation(
|
|
|
146
186
|
|
|
147
187
|
except Exception as e:
|
|
148
188
|
console.print(f"[bold red]Error in continue_generation: {str(e)}[/bold red]")
|
|
149
|
-
raise
|
|
189
|
+
raise
|
pdd/data/llm_model.csv
CHANGED
|
@@ -1,18 +1,17 @@
|
|
|
1
1
|
provider,model,input,output,coding_arena_elo,base_url,api_key,max_reasoning_tokens,structured_output,reasoning_type
|
|
2
|
-
OpenAI,gpt-
|
|
3
|
-
|
|
4
|
-
Anthropic,claude-3-5-haiku-20241022,.8,4,1261,,ANTHROPIC_API_KEY,0,True,none
|
|
5
|
-
OpenAI,deepseek/deepseek-chat,.27,1.1,1353,https://api.deepseek.com/beta,DEEPSEEK_API_KEY,0,False,none
|
|
6
|
-
Google,vertex_ai/gemini-2.5-flash,0.15,0.6,1330,,VERTEX_CREDENTIALS,0,True,effort
|
|
2
|
+
OpenAI,gpt-5-nano,0.05,0.4,1249,,OPENAI_API_KEY,0,True,none
|
|
3
|
+
Google,vertex_ai/gemini-2.5-flash,0.15,0.6,1290,,VERTEX_CREDENTIALS,0,True,effort
|
|
7
4
|
Google,gemini-2.5-pro,1.25,10.0,1360,,GOOGLE_API_KEY,0,True,none
|
|
8
|
-
|
|
9
|
-
Google,vertex_ai/gemini-2.5-pro,1.25,10.0,
|
|
10
|
-
OpenAI,
|
|
11
|
-
OpenAI,
|
|
12
|
-
OpenAI,gpt-4.1,2.0,8.0,
|
|
13
|
-
|
|
14
|
-
Fireworks,fireworks_ai/accounts/fireworks/models/
|
|
15
|
-
|
|
16
|
-
|
|
17
|
-
OpenAI,
|
|
18
|
-
OpenAI,openai
|
|
5
|
+
Google,vertex_ai/claude-sonnet-4,3.0,15.0,1359,,VERTEX_CREDENTIALS,64000,True,budget
|
|
6
|
+
Google,vertex_ai/gemini-2.5-pro,1.25,10.0,1405,,VERTEX_CREDENTIALS,0,True,none
|
|
7
|
+
OpenAI,gpt-5-mini,0.25,2.0,1325,,OPENAI_API_KEY,0,True,effort
|
|
8
|
+
OpenAI,gpt-5,1.25,10.0,1482,,OPENAI_API_KEY,0,True,effort
|
|
9
|
+
OpenAI,gpt-4.1,2.0,8.0,1253,,OPENAI_API_KEY,0,True,none
|
|
10
|
+
Google,vertex_ai/deepseek-ai/deepseek-r1-0528-maas,0.55,2.19,1391,,VERTEX_CREDENTIALS,0,False,none
|
|
11
|
+
Fireworks,fireworks_ai/accounts/fireworks/models/qwen3-coder-480b-a35b-instruct,3.0,8.0,1363,,FIREWORKS_API_KEY,0,False,none
|
|
12
|
+
Google,vertex_ai/claude-opus-4-1,3.0,15.0,1426,,VERTEX_CREDENTIALS,64000,True,budget
|
|
13
|
+
OpenAI,azure/o4-mini,1.1,4.4,1335,,OPENAI_API_KEY,0,True,effort
|
|
14
|
+
OpenAI,openai/mlx-community/Qwen3-30B-A3B-4bit,0,0,1040,http://localhost:8080,,0,False,none
|
|
15
|
+
OpenAI,lm_studio/openai-gpt-oss-120b-mlx-6,0.0001,0,1082,http://localhost:1234/v1,,0,True,none
|
|
16
|
+
Fireworks,fireworks_ai/accounts/fireworks/models/glm-4p5,3.0,8.0,1364,,FIREWORKS_API_KEY,0,False,none
|
|
17
|
+
OpenAI,groq/moonshotai/kimi-k2-instruct,1.0,3.0,1330,,GROQ_API_KEY,0,True,none
|
pdd/detect_change_main.py
CHANGED
|
@@ -6,7 +6,7 @@ from rich import print as rprint
|
|
|
6
6
|
|
|
7
7
|
from .construct_paths import construct_paths
|
|
8
8
|
from .detect_change import detect_change
|
|
9
|
-
from . import DEFAULT_TIME
|
|
9
|
+
from . import DEFAULT_TIME, DEFAULT_STRENGTH
|
|
10
10
|
|
|
11
11
|
def detect_change_main(
|
|
12
12
|
ctx: click.Context,
|
|
@@ -54,7 +54,7 @@ def detect_change_main(
|
|
|
54
54
|
prompt_contents = [input_strings[f"prompt_file_{i}"] for i in range(len(prompt_files))]
|
|
55
55
|
|
|
56
56
|
# Get model parameters from context
|
|
57
|
-
strength = ctx.obj.get('strength',
|
|
57
|
+
strength = ctx.obj.get('strength', DEFAULT_STRENGTH)
|
|
58
58
|
temperature = ctx.obj.get('temperature', 0)
|
|
59
59
|
time_budget = ctx.obj.get('time', DEFAULT_TIME)
|
|
60
60
|
|
pdd/generate_test.py
CHANGED
|
@@ -98,6 +98,7 @@ def generate_test(
|
|
|
98
98
|
strength=strength,
|
|
99
99
|
temperature=temperature,
|
|
100
100
|
time=time,
|
|
101
|
+
language=language,
|
|
101
102
|
verbose=verbose
|
|
102
103
|
)
|
|
103
104
|
total_cost += check_cost
|
|
@@ -112,6 +113,7 @@ def generate_test(
|
|
|
112
113
|
strength=strength,
|
|
113
114
|
temperature=temperature,
|
|
114
115
|
time=time,
|
|
116
|
+
language=language,
|
|
115
117
|
verbose=verbose
|
|
116
118
|
)
|
|
117
119
|
total_cost += continue_cost
|
|
@@ -181,4 +183,4 @@ def _validate_inputs(
|
|
|
181
183
|
if not isinstance(temperature, float):
|
|
182
184
|
raise ValueError("Temperature must be a float")
|
|
183
185
|
if not language or not isinstance(language, str):
|
|
184
|
-
raise ValueError("Language must be a non-empty string")
|
|
186
|
+
raise ValueError("Language must be a non-empty string")
|