pdd-cli 0.0.45__py3-none-any.whl → 0.0.118__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- pdd/__init__.py +40 -8
- pdd/agentic_bug.py +323 -0
- pdd/agentic_bug_orchestrator.py +497 -0
- pdd/agentic_change.py +231 -0
- pdd/agentic_change_orchestrator.py +526 -0
- pdd/agentic_common.py +598 -0
- pdd/agentic_crash.py +534 -0
- pdd/agentic_e2e_fix.py +319 -0
- pdd/agentic_e2e_fix_orchestrator.py +426 -0
- pdd/agentic_fix.py +1294 -0
- pdd/agentic_langtest.py +162 -0
- pdd/agentic_update.py +387 -0
- pdd/agentic_verify.py +183 -0
- pdd/architecture_sync.py +565 -0
- pdd/auth_service.py +210 -0
- pdd/auto_deps_main.py +71 -51
- pdd/auto_include.py +245 -5
- pdd/auto_update.py +125 -47
- pdd/bug_main.py +196 -23
- pdd/bug_to_unit_test.py +2 -0
- pdd/change_main.py +11 -4
- pdd/cli.py +22 -1181
- pdd/cmd_test_main.py +350 -150
- pdd/code_generator.py +60 -18
- pdd/code_generator_main.py +790 -57
- pdd/commands/__init__.py +48 -0
- pdd/commands/analysis.py +306 -0
- pdd/commands/auth.py +309 -0
- pdd/commands/connect.py +290 -0
- pdd/commands/fix.py +163 -0
- pdd/commands/generate.py +257 -0
- pdd/commands/maintenance.py +175 -0
- pdd/commands/misc.py +87 -0
- pdd/commands/modify.py +256 -0
- pdd/commands/report.py +144 -0
- pdd/commands/sessions.py +284 -0
- pdd/commands/templates.py +215 -0
- pdd/commands/utility.py +110 -0
- pdd/config_resolution.py +58 -0
- pdd/conflicts_main.py +8 -3
- pdd/construct_paths.py +589 -111
- pdd/context_generator.py +10 -2
- pdd/context_generator_main.py +175 -76
- pdd/continue_generation.py +53 -10
- pdd/core/__init__.py +33 -0
- pdd/core/cli.py +527 -0
- pdd/core/cloud.py +237 -0
- pdd/core/dump.py +554 -0
- pdd/core/errors.py +67 -0
- pdd/core/remote_session.py +61 -0
- pdd/core/utils.py +90 -0
- pdd/crash_main.py +262 -33
- pdd/data/language_format.csv +71 -63
- pdd/data/llm_model.csv +20 -18
- pdd/detect_change_main.py +5 -4
- pdd/docs/prompting_guide.md +864 -0
- pdd/docs/whitepaper_with_benchmarks/data_and_functions/benchmark_analysis.py +495 -0
- pdd/docs/whitepaper_with_benchmarks/data_and_functions/creation_compare.py +528 -0
- pdd/fix_code_loop.py +523 -95
- pdd/fix_code_module_errors.py +6 -2
- pdd/fix_error_loop.py +491 -92
- pdd/fix_errors_from_unit_tests.py +4 -3
- pdd/fix_main.py +278 -21
- pdd/fix_verification_errors.py +12 -100
- pdd/fix_verification_errors_loop.py +529 -286
- pdd/fix_verification_main.py +294 -89
- pdd/frontend/dist/assets/index-B5DZHykP.css +1 -0
- pdd/frontend/dist/assets/index-DQ3wkeQ2.js +449 -0
- pdd/frontend/dist/index.html +376 -0
- pdd/frontend/dist/logo.svg +33 -0
- pdd/generate_output_paths.py +139 -15
- pdd/generate_test.py +218 -146
- pdd/get_comment.py +19 -44
- pdd/get_extension.py +8 -9
- pdd/get_jwt_token.py +318 -22
- pdd/get_language.py +8 -7
- pdd/get_run_command.py +75 -0
- pdd/get_test_command.py +68 -0
- pdd/git_update.py +70 -19
- pdd/incremental_code_generator.py +2 -2
- pdd/insert_includes.py +13 -4
- pdd/llm_invoke.py +1711 -181
- pdd/load_prompt_template.py +19 -12
- pdd/path_resolution.py +140 -0
- pdd/pdd_completion.fish +25 -2
- pdd/pdd_completion.sh +30 -4
- pdd/pdd_completion.zsh +79 -4
- pdd/postprocess.py +14 -4
- pdd/preprocess.py +293 -24
- pdd/preprocess_main.py +41 -6
- pdd/prompts/agentic_bug_step10_pr_LLM.prompt +182 -0
- pdd/prompts/agentic_bug_step1_duplicate_LLM.prompt +73 -0
- pdd/prompts/agentic_bug_step2_docs_LLM.prompt +129 -0
- pdd/prompts/agentic_bug_step3_triage_LLM.prompt +95 -0
- pdd/prompts/agentic_bug_step4_reproduce_LLM.prompt +97 -0
- pdd/prompts/agentic_bug_step5_root_cause_LLM.prompt +123 -0
- pdd/prompts/agentic_bug_step6_test_plan_LLM.prompt +107 -0
- pdd/prompts/agentic_bug_step7_generate_LLM.prompt +172 -0
- pdd/prompts/agentic_bug_step8_verify_LLM.prompt +119 -0
- pdd/prompts/agentic_bug_step9_e2e_test_LLM.prompt +289 -0
- pdd/prompts/agentic_change_step10_identify_issues_LLM.prompt +1006 -0
- pdd/prompts/agentic_change_step11_fix_issues_LLM.prompt +984 -0
- pdd/prompts/agentic_change_step12_create_pr_LLM.prompt +131 -0
- pdd/prompts/agentic_change_step1_duplicate_LLM.prompt +73 -0
- pdd/prompts/agentic_change_step2_docs_LLM.prompt +101 -0
- pdd/prompts/agentic_change_step3_research_LLM.prompt +126 -0
- pdd/prompts/agentic_change_step4_clarify_LLM.prompt +164 -0
- pdd/prompts/agentic_change_step5_docs_change_LLM.prompt +981 -0
- pdd/prompts/agentic_change_step6_devunits_LLM.prompt +1005 -0
- pdd/prompts/agentic_change_step7_architecture_LLM.prompt +1044 -0
- pdd/prompts/agentic_change_step8_analyze_LLM.prompt +1027 -0
- pdd/prompts/agentic_change_step9_implement_LLM.prompt +1077 -0
- pdd/prompts/agentic_crash_explore_LLM.prompt +49 -0
- pdd/prompts/agentic_e2e_fix_step1_unit_tests_LLM.prompt +90 -0
- pdd/prompts/agentic_e2e_fix_step2_e2e_tests_LLM.prompt +91 -0
- pdd/prompts/agentic_e2e_fix_step3_root_cause_LLM.prompt +89 -0
- pdd/prompts/agentic_e2e_fix_step4_fix_e2e_tests_LLM.prompt +96 -0
- pdd/prompts/agentic_e2e_fix_step5_identify_devunits_LLM.prompt +91 -0
- pdd/prompts/agentic_e2e_fix_step6_create_unit_tests_LLM.prompt +106 -0
- pdd/prompts/agentic_e2e_fix_step7_verify_tests_LLM.prompt +116 -0
- pdd/prompts/agentic_e2e_fix_step8_run_pdd_fix_LLM.prompt +120 -0
- pdd/prompts/agentic_e2e_fix_step9_verify_all_LLM.prompt +146 -0
- pdd/prompts/agentic_fix_explore_LLM.prompt +45 -0
- pdd/prompts/agentic_fix_harvest_only_LLM.prompt +48 -0
- pdd/prompts/agentic_fix_primary_LLM.prompt +85 -0
- pdd/prompts/agentic_update_LLM.prompt +925 -0
- pdd/prompts/agentic_verify_explore_LLM.prompt +45 -0
- pdd/prompts/auto_include_LLM.prompt +122 -905
- pdd/prompts/change_LLM.prompt +3093 -1
- pdd/prompts/detect_change_LLM.prompt +686 -27
- pdd/prompts/example_generator_LLM.prompt +22 -1
- pdd/prompts/extract_code_LLM.prompt +5 -1
- pdd/prompts/extract_program_code_fix_LLM.prompt +7 -1
- pdd/prompts/extract_prompt_update_LLM.prompt +7 -8
- pdd/prompts/extract_promptline_LLM.prompt +17 -11
- pdd/prompts/find_verification_errors_LLM.prompt +6 -0
- pdd/prompts/fix_code_module_errors_LLM.prompt +12 -2
- pdd/prompts/fix_errors_from_unit_tests_LLM.prompt +9 -0
- pdd/prompts/fix_verification_errors_LLM.prompt +22 -0
- pdd/prompts/generate_test_LLM.prompt +41 -7
- pdd/prompts/generate_test_from_example_LLM.prompt +115 -0
- pdd/prompts/increase_tests_LLM.prompt +1 -5
- pdd/prompts/insert_includes_LLM.prompt +316 -186
- pdd/prompts/prompt_code_diff_LLM.prompt +119 -0
- pdd/prompts/prompt_diff_LLM.prompt +82 -0
- pdd/prompts/trace_LLM.prompt +25 -22
- pdd/prompts/unfinished_prompt_LLM.prompt +85 -1
- pdd/prompts/update_prompt_LLM.prompt +22 -1
- pdd/pytest_output.py +127 -12
- pdd/remote_session.py +876 -0
- pdd/render_mermaid.py +236 -0
- pdd/server/__init__.py +52 -0
- pdd/server/app.py +335 -0
- pdd/server/click_executor.py +587 -0
- pdd/server/executor.py +338 -0
- pdd/server/jobs.py +661 -0
- pdd/server/models.py +241 -0
- pdd/server/routes/__init__.py +31 -0
- pdd/server/routes/architecture.py +451 -0
- pdd/server/routes/auth.py +364 -0
- pdd/server/routes/commands.py +929 -0
- pdd/server/routes/config.py +42 -0
- pdd/server/routes/files.py +603 -0
- pdd/server/routes/prompts.py +1322 -0
- pdd/server/routes/websocket.py +473 -0
- pdd/server/security.py +243 -0
- pdd/server/terminal_spawner.py +209 -0
- pdd/server/token_counter.py +222 -0
- pdd/setup_tool.py +648 -0
- pdd/simple_math.py +2 -0
- pdd/split_main.py +3 -2
- pdd/summarize_directory.py +237 -195
- pdd/sync_animation.py +8 -4
- pdd/sync_determine_operation.py +839 -112
- pdd/sync_main.py +351 -57
- pdd/sync_orchestration.py +1400 -756
- pdd/sync_tui.py +848 -0
- pdd/template_expander.py +161 -0
- pdd/template_registry.py +264 -0
- pdd/templates/architecture/architecture_json.prompt +237 -0
- pdd/templates/generic/generate_prompt.prompt +174 -0
- pdd/trace.py +168 -12
- pdd/trace_main.py +4 -3
- pdd/track_cost.py +140 -63
- pdd/unfinished_prompt.py +51 -4
- pdd/update_main.py +567 -67
- pdd/update_model_costs.py +2 -2
- pdd/update_prompt.py +19 -4
- {pdd_cli-0.0.45.dist-info → pdd_cli-0.0.118.dist-info}/METADATA +29 -11
- pdd_cli-0.0.118.dist-info/RECORD +227 -0
- {pdd_cli-0.0.45.dist-info → pdd_cli-0.0.118.dist-info}/licenses/LICENSE +1 -1
- pdd_cli-0.0.45.dist-info/RECORD +0 -116
- {pdd_cli-0.0.45.dist-info → pdd_cli-0.0.118.dist-info}/WHEEL +0 -0
- {pdd_cli-0.0.45.dist-info → pdd_cli-0.0.118.dist-info}/entry_points.txt +0 -0
- {pdd_cli-0.0.45.dist-info → pdd_cli-0.0.118.dist-info}/top_level.txt +0 -0
|
@@ -6,8 +6,19 @@
|
|
|
6
6
|
INPUT:
|
|
7
7
|
<prompt_to_update>% You are an expert Python Software Engineer. Your goal is to write a python function, "postprocess", that will extract code from a string output of an LLM. All output to the console will be pretty printed using the Python rich library.
|
|
8
8
|
|
|
9
|
-
%
|
|
10
|
-
|
|
9
|
+
% You are an expert Python engineer.
|
|
10
|
+
|
|
11
|
+
% Code Style Requirements
|
|
12
|
+
- File must start with `from __future__ import annotations`.
|
|
13
|
+
- All functions must be fully type-hinted.
|
|
14
|
+
- Use `rich.console.Console` for all printing.
|
|
15
|
+
|
|
16
|
+
% Package Structure
|
|
17
|
+
- The function should be part of a Python package, using relative imports (single dot) for internal modules (e.g. 'from .module_name import module_name').
|
|
18
|
+
- The ./pdd/__init__.py file will have the EXTRACTION_STRENGTH, DEFAULT_STRENGTH, DEFAULT_TIME and other global constants. Example: ```from . import DEFAULT_STRENGTH```
|
|
19
|
+
|
|
20
|
+
% Error Handling
|
|
21
|
+
- Ensure the function handles edge cases, such as missing inputs or model errors, and provide clear error messages.
|
|
11
22
|
|
|
12
23
|
% Here are the inputs and outputs of the function:
|
|
13
24
|
Inputs:
|
|
@@ -52,104 +63,147 @@ if __name__ == "__main__":
|
|
|
52
63
|
|
|
53
64
|
For running prompts with llm_invoke:
|
|
54
65
|
<llm_invoke_example>
|
|
55
|
-
|
|
66
|
+
import os
|
|
67
|
+
import sys
|
|
68
|
+
from typing import List, Optional
|
|
69
|
+
from pydantic import BaseModel, Field
|
|
70
|
+
from rich.console import Console
|
|
71
|
+
|
|
72
|
+
# Ensure the package is in the python path for this example
|
|
73
|
+
# In a real installation, this would just be 'from pdd.llm_invoke import llm_invoke'
|
|
74
|
+
sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), "..")))
|
|
75
|
+
|
|
56
76
|
from pdd.llm_invoke import llm_invoke
|
|
57
|
-
from collections import defaultdict
|
|
58
77
|
|
|
59
|
-
|
|
60
|
-
class Joke(BaseModel):
|
|
61
|
-
setup: str = Field(description="The setup of the joke")
|
|
62
|
-
punchline: str = Field(description="The punchline of the joke")
|
|
78
|
+
console = Console()
|
|
63
79
|
|
|
64
|
-
|
|
65
|
-
|
|
66
|
-
|
|
67
|
-
|
|
68
|
-
#
|
|
69
|
-
|
|
70
|
-
|
|
71
|
-
|
|
80
|
+
# --- Example 1: Simple Text Generation ---
|
|
81
|
+
def example_simple_text():
|
|
82
|
+
console.print("[bold blue]--- Example 1: Simple Text Generation ---[/bold blue]")
|
|
83
|
+
|
|
84
|
+
# Define a prompt template
|
|
85
|
+
prompt_template = "Explain the concept of {concept} to a {audience} in one sentence."
|
|
86
|
+
|
|
87
|
+
# Define input variables
|
|
88
|
+
input_data = {
|
|
89
|
+
"concept": "quantum entanglement",
|
|
90
|
+
"audience": "5-year-old"
|
|
91
|
+
}
|
|
92
|
+
|
|
93
|
+
# Invoke the LLM
|
|
94
|
+
# strength=0.5 targets the 'base' model (usually a balance of cost/performance)
|
|
95
|
+
result = llm_invoke(
|
|
96
|
+
prompt=prompt_template,
|
|
97
|
+
input_json=input_data,
|
|
98
|
+
strength=0.5,
|
|
99
|
+
temperature=0.7,
|
|
100
|
+
verbose=True # Set to True to see detailed logs about model selection and cost
|
|
101
|
+
)
|
|
102
|
+
|
|
103
|
+
console.print(f"[green]Result:[/green] {result['result']}")
|
|
104
|
+
console.print(f"[dim]Model used: {result['model_name']} | Cost: ${result['cost']:.6f}[/dim]\n")
|
|
105
|
+
|
|
106
|
+
|
|
107
|
+
# --- Example 2: Structured Output with Pydantic ---
|
|
108
|
+
class MovieReview(BaseModel):
|
|
109
|
+
title: str = Field(..., description="The title of the movie")
|
|
110
|
+
rating: int = Field(..., description="Rating out of 10")
|
|
111
|
+
summary: str = Field(..., description="A brief summary of the plot")
|
|
112
|
+
tags: List[str] = Field(..., description="List of genre tags")
|
|
113
|
+
|
|
114
|
+
def example_structured_output():
|
|
115
|
+
console.print("[bold blue]--- Example 2: Structured Output (Pydantic) ---[/bold blue]")
|
|
116
|
+
|
|
117
|
+
prompt = "Generate a review for a fictional sci-fi movie about {topic}."
|
|
118
|
+
input_data = {"topic": "time traveling cats"}
|
|
119
|
+
|
|
120
|
+
# Invoke with output_pydantic to enforce a schema
|
|
121
|
+
# strength=0.8 targets a higher-performance model (better at following schemas)
|
|
122
|
+
result = llm_invoke(
|
|
123
|
+
prompt=prompt,
|
|
124
|
+
input_json=input_data,
|
|
125
|
+
strength=0.8,
|
|
126
|
+
output_pydantic=MovieReview,
|
|
127
|
+
temperature=0.5
|
|
128
|
+
)
|
|
129
|
+
|
|
130
|
+
# The 'result' key will contain an instance of the Pydantic model
|
|
131
|
+
review: MovieReview = result['result']
|
|
72
132
|
|
|
73
|
-
|
|
74
|
-
|
|
75
|
-
|
|
76
|
-
|
|
133
|
+
console.print(f"[green]Title:[/green] {review.title}")
|
|
134
|
+
console.print(f"[green]Rating:[/green] {review.rating}/10")
|
|
135
|
+
console.print(f"[green]Tags:[/green] {', '.join(review.tags)}")
|
|
136
|
+
console.print(f"[dim]Model used: {result['model_name']}[/dim]\n")
|
|
137
|
+
|
|
138
|
+
|
|
139
|
+
# --- Example 3: Batch Processing ---
|
|
140
|
+
def example_batch_processing():
|
|
141
|
+
console.print("[bold blue]--- Example 3: Batch Processing ---[/bold blue]")
|
|
142
|
+
|
|
143
|
+
prompt = "What is the capital of {country}?"
|
|
77
144
|
|
|
78
|
-
|
|
79
|
-
|
|
80
|
-
|
|
81
|
-
|
|
82
|
-
|
|
83
|
-
|
|
84
|
-
|
|
85
|
-
|
|
86
|
-
|
|
87
|
-
|
|
88
|
-
|
|
89
|
-
|
|
90
|
-
|
|
91
|
-
|
|
92
|
-
|
|
93
|
-
|
|
94
|
-
|
|
95
|
-
|
|
96
|
-
|
|
97
|
-
|
|
98
|
-
|
|
99
|
-
print(f"Result: {response['result']}")
|
|
100
|
-
print(f"Cost: ${response['cost']:.6f}")
|
|
101
|
-
print(f"Model Used: {response['model_name']}")
|
|
102
|
-
|
|
103
|
-
# Example 2: Structured Output with Pydantic Model
|
|
104
|
-
prompt_structured = (
|
|
105
|
-
"Generate a joke about {topic}. \n"
|
|
106
|
-
"Return it in this exact JSON format:\n"
|
|
107
|
-
"{{ \n"
|
|
108
|
-
' "setup": "your setup here",\n'
|
|
109
|
-
' "punchline": "your punchline here"\n'
|
|
110
|
-
"}}\n"
|
|
111
|
-
"Return ONLY the JSON with no additional text or explanation."
|
|
112
|
-
)
|
|
113
|
-
input_json_structured = {"topic": "data scientists"}
|
|
114
|
-
output_pydantic = Joke
|
|
115
|
-
|
|
116
|
-
print("\n--- Structured Output ---")
|
|
117
|
-
try:
|
|
118
|
-
response_structured = llm_invoke(
|
|
119
|
-
prompt=prompt_structured,
|
|
120
|
-
input_json=input_json_structured,
|
|
121
|
-
strength=strength,
|
|
122
|
-
temperature=temperature,
|
|
123
|
-
verbose=True,
|
|
124
|
-
output_pydantic=output_pydantic
|
|
125
|
-
)
|
|
126
|
-
print(f"Result: {response_structured['result']}")
|
|
127
|
-
print(f"Cost: ${response_structured['cost']:.6f}")
|
|
128
|
-
print(f"Model Used: {response_structured['model_name']}")
|
|
129
|
-
|
|
130
|
-
# Access structured data
|
|
131
|
-
joke: Joke = response_structured['result']
|
|
132
|
-
print(f"\nJoke Setup: {joke.setup}")
|
|
133
|
-
print(f"Joke Punchline: {joke.punchline}")
|
|
134
|
-
except Exception as e:
|
|
135
|
-
print(f"Error encountered during structured output: {e}")
|
|
136
|
-
|
|
137
|
-
strength += 0.005
|
|
138
|
-
# round to 3 decimal places
|
|
139
|
-
strength = round(strength, 3)
|
|
145
|
+
# List of inputs triggers batch mode
|
|
146
|
+
batch_inputs = [
|
|
147
|
+
{"country": "France"},
|
|
148
|
+
{"country": "Japan"},
|
|
149
|
+
{"country": "Brazil"}
|
|
150
|
+
]
|
|
151
|
+
|
|
152
|
+
# use_batch_mode=True uses the provider's batch API if available/supported by LiteLLM
|
|
153
|
+
# strength=0.2 targets a cheaper/faster model
|
|
154
|
+
results = llm_invoke(
|
|
155
|
+
prompt=prompt,
|
|
156
|
+
input_json=batch_inputs,
|
|
157
|
+
use_batch_mode=True,
|
|
158
|
+
strength=0.2,
|
|
159
|
+
temperature=0.1
|
|
160
|
+
)
|
|
161
|
+
|
|
162
|
+
# In batch mode, 'result' is a list of strings (or objects)
|
|
163
|
+
for i, res in enumerate(results['result']):
|
|
164
|
+
console.print(f"[green]Input:[/green] {batch_inputs[i]['country']} -> [green]Output:[/green] {res}")
|
|
140
165
|
|
|
141
|
-
|
|
142
|
-
|
|
166
|
+
console.print(f"[dim]Model used: {results['model_name']} | Total Cost: ${results['cost']:.6f}[/dim]\n")
|
|
167
|
+
|
|
168
|
+
|
|
169
|
+
# --- Example 4: Reasoning / Thinking Time ---
|
|
170
|
+
def example_reasoning():
|
|
171
|
+
console.print("[bold blue]--- Example 4: Reasoning / Thinking Time ---[/bold blue]")
|
|
172
|
+
|
|
173
|
+
# Some models (like Claude 3.7 or OpenAI o1/o3) support explicit thinking steps.
|
|
174
|
+
# Setting time > 0 enables this behavior based on the model's configuration in llm_model.csv.
|
|
143
175
|
|
|
144
|
-
|
|
145
|
-
|
|
146
|
-
|
|
147
|
-
|
|
148
|
-
|
|
149
|
-
|
|
176
|
+
prompt = "Solve this riddle: {riddle}"
|
|
177
|
+
input_data = {"riddle": "I speak without a mouth and hear without ears. I have no body, but I come alive with wind. What am I?"}
|
|
178
|
+
|
|
179
|
+
result = llm_invoke(
|
|
180
|
+
prompt=prompt,
|
|
181
|
+
input_json=input_data,
|
|
182
|
+
strength=1.0, # Target highest capability model
|
|
183
|
+
time=0.5, # Request moderate thinking time/budget
|
|
184
|
+
verbose=True
|
|
185
|
+
)
|
|
186
|
+
|
|
187
|
+
console.print(f"[green]Answer:[/green] {result['result']}")
|
|
188
|
+
|
|
189
|
+
# If the model supports it, thinking output is captured separately
|
|
190
|
+
if result.get('thinking_output'):
|
|
191
|
+
console.print(f"[yellow]Thinking Process:[/yellow] {result['thinking_output']}")
|
|
192
|
+
else:
|
|
193
|
+
console.print("[dim]No separate thinking output returned for this model.[/dim]")
|
|
194
|
+
|
|
150
195
|
|
|
151
196
|
if __name__ == "__main__":
|
|
152
|
-
|
|
197
|
+
# Ensure you have a valid .env file or environment variables set for API keys
|
|
198
|
+
# (e.g., OPENAI_API_KEY, ANTHROPIC_API_KEY)
|
|
199
|
+
|
|
200
|
+
try:
|
|
201
|
+
example_simple_text()
|
|
202
|
+
example_structured_output()
|
|
203
|
+
example_batch_processing()
|
|
204
|
+
example_reasoning()
|
|
205
|
+
except Exception as e:
|
|
206
|
+
console.print(f"[bold red]Error running examples:[/bold red] {e}")
|
|
153
207
|
</llm_invoke_example>
|
|
154
208
|
</internal_modules>
|
|
155
209
|
</dependencies_to_insert>
|
|
@@ -157,8 +211,19 @@ if __name__ == "__main__":
|
|
|
157
211
|
OUTPUT:
|
|
158
212
|
<updated_prompt>% You are an expert Python Software Engineer. Your goal is to write a python function, "postprocess", that will extract code from a string output of an LLM. All output to the console will be pretty printed using the Python rich library.
|
|
159
213
|
|
|
160
|
-
%
|
|
161
|
-
|
|
214
|
+
% You are an expert Python engineer.
|
|
215
|
+
|
|
216
|
+
% Code Style Requirements
|
|
217
|
+
- File must start with `from __future__ import annotations`.
|
|
218
|
+
- All functions must be fully type-hinted.
|
|
219
|
+
- Use `rich.console.Console` for all printing.
|
|
220
|
+
|
|
221
|
+
% Package Structure
|
|
222
|
+
- The function should be part of a Python package, using relative imports (single dot) for internal modules (e.g. 'from .module_name import module_name').
|
|
223
|
+
- The ./pdd/__init__.py file will have the EXTRACTION_STRENGTH, DEFAULT_STRENGTH, DEFAULT_TIME and other global constants. Example: ```from . import DEFAULT_STRENGTH```
|
|
224
|
+
|
|
225
|
+
% Error Handling
|
|
226
|
+
- Ensure the function handles edge cases, such as missing inputs or model errors, and provide clear error messages.
|
|
162
227
|
|
|
163
228
|
% Here are the inputs and outputs of the function:
|
|
164
229
|
Inputs:
|
|
@@ -192,104 +257,147 @@ if __name__ == "__main__":
|
|
|
192
257
|
|
|
193
258
|
For running prompts with llm_invoke:
|
|
194
259
|
<llm_invoke_example>
|
|
195
|
-
|
|
260
|
+
import os
|
|
261
|
+
import sys
|
|
262
|
+
from typing import List, Optional
|
|
263
|
+
from pydantic import BaseModel, Field
|
|
264
|
+
from rich.console import Console
|
|
265
|
+
|
|
266
|
+
# Ensure the package is in the python path for this example
|
|
267
|
+
# In a real installation, this would just be 'from pdd.llm_invoke import llm_invoke'
|
|
268
|
+
sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), "..")))
|
|
269
|
+
|
|
196
270
|
from pdd.llm_invoke import llm_invoke
|
|
197
|
-
from collections import defaultdict
|
|
198
271
|
|
|
199
|
-
|
|
200
|
-
class Joke(BaseModel):
|
|
201
|
-
setup: str = Field(description="The setup of the joke")
|
|
202
|
-
punchline: str = Field(description="The punchline of the joke")
|
|
272
|
+
console = Console()
|
|
203
273
|
|
|
204
|
-
|
|
205
|
-
|
|
206
|
-
|
|
207
|
-
|
|
208
|
-
#
|
|
209
|
-
|
|
210
|
-
|
|
211
|
-
|
|
274
|
+
# --- Example 1: Simple Text Generation ---
|
|
275
|
+
def example_simple_text():
|
|
276
|
+
console.print("[bold blue]--- Example 1: Simple Text Generation ---[/bold blue]")
|
|
277
|
+
|
|
278
|
+
# Define a prompt template
|
|
279
|
+
prompt_template = "Explain the concept of {concept} to a {audience} in one sentence."
|
|
280
|
+
|
|
281
|
+
# Define input variables
|
|
282
|
+
input_data = {
|
|
283
|
+
"concept": "quantum entanglement",
|
|
284
|
+
"audience": "5-year-old"
|
|
285
|
+
}
|
|
286
|
+
|
|
287
|
+
# Invoke the LLM
|
|
288
|
+
# strength=0.5 targets the 'base' model (usually a balance of cost/performance)
|
|
289
|
+
result = llm_invoke(
|
|
290
|
+
prompt=prompt_template,
|
|
291
|
+
input_json=input_data,
|
|
292
|
+
strength=0.5,
|
|
293
|
+
temperature=0.7,
|
|
294
|
+
verbose=True # Set to True to see detailed logs about model selection and cost
|
|
295
|
+
)
|
|
296
|
+
|
|
297
|
+
console.print(f"[green]Result:[/green] {result['result']}")
|
|
298
|
+
console.print(f"[dim]Model used: {result['model_name']} | Cost: ${result['cost']:.6f}[/dim]\n")
|
|
299
|
+
|
|
300
|
+
|
|
301
|
+
# --- Example 2: Structured Output with Pydantic ---
|
|
302
|
+
class MovieReview(BaseModel):
|
|
303
|
+
title: str = Field(..., description="The title of the movie")
|
|
304
|
+
rating: int = Field(..., description="Rating out of 10")
|
|
305
|
+
summary: str = Field(..., description="A brief summary of the plot")
|
|
306
|
+
tags: List[str] = Field(..., description="List of genre tags")
|
|
307
|
+
|
|
308
|
+
def example_structured_output():
|
|
309
|
+
console.print("[bold blue]--- Example 2: Structured Output (Pydantic) ---[/bold blue]")
|
|
310
|
+
|
|
311
|
+
prompt = "Generate a review for a fictional sci-fi movie about {topic}."
|
|
312
|
+
input_data = {"topic": "time traveling cats"}
|
|
313
|
+
|
|
314
|
+
# Invoke with output_pydantic to enforce a schema
|
|
315
|
+
# strength=0.8 targets a higher-performance model (better at following schemas)
|
|
316
|
+
result = llm_invoke(
|
|
317
|
+
prompt=prompt,
|
|
318
|
+
input_json=input_data,
|
|
319
|
+
strength=0.8,
|
|
320
|
+
output_pydantic=MovieReview,
|
|
321
|
+
temperature=0.5
|
|
322
|
+
)
|
|
323
|
+
|
|
324
|
+
# The 'result' key will contain an instance of the Pydantic model
|
|
325
|
+
review: MovieReview = result['result']
|
|
326
|
+
|
|
327
|
+
console.print(f"[green]Title:[/green] {review.title}")
|
|
328
|
+
console.print(f"[green]Rating:[/green] {review.rating}/10")
|
|
329
|
+
console.print(f"[green]Tags:[/green] {', '.join(review.tags)}")
|
|
330
|
+
console.print(f"[dim]Model used: {result['model_name']}[/dim]\n")
|
|
331
|
+
|
|
332
|
+
|
|
333
|
+
# --- Example 3: Batch Processing ---
|
|
334
|
+
def example_batch_processing():
|
|
335
|
+
console.print("[bold blue]--- Example 3: Batch Processing ---[/bold blue]")
|
|
336
|
+
|
|
337
|
+
prompt = "What is the capital of {country}?"
|
|
212
338
|
|
|
213
|
-
|
|
214
|
-
|
|
215
|
-
|
|
216
|
-
|
|
339
|
+
# List of inputs triggers batch mode
|
|
340
|
+
batch_inputs = [
|
|
341
|
+
{"country": "France"},
|
|
342
|
+
{"country": "Japan"},
|
|
343
|
+
{"country": "Brazil"}
|
|
344
|
+
]
|
|
345
|
+
|
|
346
|
+
# use_batch_mode=True uses the provider's batch API if available/supported by LiteLLM
|
|
347
|
+
# strength=0.2 targets a cheaper/faster model
|
|
348
|
+
results = llm_invoke(
|
|
349
|
+
prompt=prompt,
|
|
350
|
+
input_json=batch_inputs,
|
|
351
|
+
use_batch_mode=True,
|
|
352
|
+
strength=0.2,
|
|
353
|
+
temperature=0.1
|
|
354
|
+
)
|
|
355
|
+
|
|
356
|
+
# In batch mode, 'result' is a list of strings (or objects)
|
|
357
|
+
for i, res in enumerate(results['result']):
|
|
358
|
+
console.print(f"[green]Input:[/green] {batch_inputs[i]['country']} -> [green]Output:[/green] {res}")
|
|
217
359
|
|
|
218
|
-
|
|
219
|
-
|
|
220
|
-
|
|
221
|
-
|
|
222
|
-
|
|
223
|
-
|
|
224
|
-
|
|
225
|
-
|
|
226
|
-
|
|
227
|
-
strength=strength,
|
|
228
|
-
temperature=temperature,
|
|
229
|
-
verbose=verbose
|
|
230
|
-
)
|
|
231
|
-
|
|
232
|
-
# Track model changes for strength ranges
|
|
233
|
-
if current_model != response['model_name']:
|
|
234
|
-
if current_model is not None:
|
|
235
|
-
model_ranges[current_model].append((range_start, strength - 0.005))
|
|
236
|
-
current_model = response['model_name']
|
|
237
|
-
range_start = strength
|
|
238
|
-
|
|
239
|
-
print(f"Result: {response['result']}")
|
|
240
|
-
print(f"Cost: ${response['cost']:.6f}")
|
|
241
|
-
print(f"Model Used: {response['model_name']}")
|
|
242
|
-
|
|
243
|
-
# Example 2: Structured Output with Pydantic Model
|
|
244
|
-
prompt_structured = (
|
|
245
|
-
"Generate a joke about {topic}. \n"
|
|
246
|
-
"Return it in this exact JSON format:\n"
|
|
247
|
-
"{{ \n"
|
|
248
|
-
' "setup": "your setup here",\n'
|
|
249
|
-
' "punchline": "your punchline here"\n'
|
|
250
|
-
"}}\n"
|
|
251
|
-
"Return ONLY the JSON with no additional text or explanation."
|
|
252
|
-
)
|
|
253
|
-
input_json_structured = {"topic": "data scientists"}
|
|
254
|
-
output_pydantic = Joke
|
|
255
|
-
|
|
256
|
-
print("\n--- Structured Output ---")
|
|
257
|
-
try:
|
|
258
|
-
response_structured = llm_invoke(
|
|
259
|
-
prompt=prompt_structured,
|
|
260
|
-
input_json=input_json_structured,
|
|
261
|
-
strength=strength,
|
|
262
|
-
temperature=temperature,
|
|
263
|
-
verbose=True,
|
|
264
|
-
output_pydantic=output_pydantic
|
|
265
|
-
)
|
|
266
|
-
print(f"Result: {response_structured['result']}")
|
|
267
|
-
print(f"Cost: ${response_structured['cost']:.6f}")
|
|
268
|
-
print(f"Model Used: {response_structured['model_name']}")
|
|
269
|
-
|
|
270
|
-
# Access structured data
|
|
271
|
-
joke: Joke = response_structured['result']
|
|
272
|
-
print(f"\nJoke Setup: {joke.setup}")
|
|
273
|
-
print(f"Joke Punchline: {joke.punchline}")
|
|
274
|
-
except Exception as e:
|
|
275
|
-
print(f"Error encountered during structured output: {e}")
|
|
276
|
-
|
|
277
|
-
strength += 0.005
|
|
278
|
-
# round to 3 decimal places
|
|
279
|
-
strength = round(strength, 3)
|
|
360
|
+
console.print(f"[dim]Model used: {results['model_name']} | Total Cost: ${results['cost']:.6f}[/dim]\n")
|
|
361
|
+
|
|
362
|
+
|
|
363
|
+
# --- Example 4: Reasoning / Thinking Time ---
|
|
364
|
+
def example_reasoning():
|
|
365
|
+
console.print("[bold blue]--- Example 4: Reasoning / Thinking Time ---[/bold blue]")
|
|
366
|
+
|
|
367
|
+
# Some models (like Claude 3.7 or OpenAI o1/o3) support explicit thinking steps.
|
|
368
|
+
# Setting time > 0 enables this behavior based on the model's configuration in llm_model.csv.
|
|
280
369
|
|
|
281
|
-
|
|
282
|
-
|
|
370
|
+
prompt = "Solve this riddle: {riddle}"
|
|
371
|
+
input_data = {"riddle": "I speak without a mouth and hear without ears. I have no body, but I come alive with wind. What am I?"}
|
|
372
|
+
|
|
373
|
+
result = llm_invoke(
|
|
374
|
+
prompt=prompt,
|
|
375
|
+
input_json=input_data,
|
|
376
|
+
strength=1.0, # Target highest capability model
|
|
377
|
+
time=0.5, # Request moderate thinking time/budget
|
|
378
|
+
verbose=True
|
|
379
|
+
)
|
|
380
|
+
|
|
381
|
+
console.print(f"[green]Answer:[/green] {result['result']}")
|
|
283
382
|
|
|
284
|
-
#
|
|
285
|
-
|
|
286
|
-
|
|
287
|
-
|
|
288
|
-
|
|
289
|
-
|
|
383
|
+
# If the model supports it, thinking output is captured separately
|
|
384
|
+
if result.get('thinking_output'):
|
|
385
|
+
console.print(f"[yellow]Thinking Process:[/yellow] {result['thinking_output']}")
|
|
386
|
+
else:
|
|
387
|
+
console.print("[dim]No separate thinking output returned for this model.[/dim]")
|
|
388
|
+
|
|
290
389
|
|
|
291
390
|
if __name__ == "__main__":
|
|
292
|
-
|
|
391
|
+
# Ensure you have a valid .env file or environment variables set for API keys
|
|
392
|
+
# (e.g., OPENAI_API_KEY, ANTHROPIC_API_KEY)
|
|
393
|
+
|
|
394
|
+
try:
|
|
395
|
+
example_simple_text()
|
|
396
|
+
example_structured_output()
|
|
397
|
+
example_batch_processing()
|
|
398
|
+
example_reasoning()
|
|
399
|
+
except Exception as e:
|
|
400
|
+
console.print(f"[bold red]Error running examples:[/bold red] {e}")
|
|
293
401
|
</llm_invoke_example>
|
|
294
402
|
</internal_modules>
|
|
295
403
|
|
|
@@ -310,8 +418,19 @@ if __name__ == "__main__":
|
|
|
310
418
|
INPUT:
|
|
311
419
|
<prompt_to_update>% You are an expert Python engineer. Your goal is to write a Python function, "conflicts_in_prompts", that takes two prompts as input and finds conflicts between them and suggests how to resolve those conflicts.
|
|
312
420
|
|
|
313
|
-
%
|
|
314
|
-
|
|
421
|
+
% You are an expert Python engineer.
|
|
422
|
+
|
|
423
|
+
% Code Style Requirements
|
|
424
|
+
- File must start with `from __future__ import annotations`.
|
|
425
|
+
- All functions must be fully type-hinted.
|
|
426
|
+
- Use `rich.console.Console` for all printing.
|
|
427
|
+
|
|
428
|
+
% Package Structure
|
|
429
|
+
- The function should be part of a Python package, using relative imports (single dot) for internal modules (e.g. 'from .module_name import module_name').
|
|
430
|
+
- The ./pdd/__init__.py file will have the EXTRACTION_STRENGTH, DEFAULT_STRENGTH, DEFAULT_TIME and other global constants. Example: ```from . import DEFAULT_STRENGTH```
|
|
431
|
+
|
|
432
|
+
% Error Handling
|
|
433
|
+
- Ensure the function handles edge cases, such as missing inputs or model errors, and provide clear error messages.
|
|
315
434
|
|
|
316
435
|
% Here are the inputs and outputs of the function:
|
|
317
436
|
Inputs:
|
|
@@ -693,8 +812,19 @@ if __name__ == "__main__":
|
|
|
693
812
|
OUTPUT:
|
|
694
813
|
<updated_prompt>% You are an expert Python engineer. Your goal is to write a Python function, "conflicts_in_prompts", that takes two prompts as input and finds conflicts between them and suggests how to resolve those conflicts.
|
|
695
814
|
|
|
696
|
-
%
|
|
697
|
-
|
|
815
|
+
% You are an expert Python engineer.
|
|
816
|
+
|
|
817
|
+
% Code Style Requirements
|
|
818
|
+
- File must start with `from __future__ import annotations`.
|
|
819
|
+
- All functions must be fully type-hinted.
|
|
820
|
+
- Use `rich.console.Console` for all printing.
|
|
821
|
+
|
|
822
|
+
% Package Structure
|
|
823
|
+
- The function should be part of a Python package, using relative imports (single dot) for internal modules (e.g. 'from .module_name import module_name').
|
|
824
|
+
- The ./pdd/__init__.py file will have the EXTRACTION_STRENGTH, DEFAULT_STRENGTH, DEFAULT_TIME and other global constants. Example: ```from . import DEFAULT_STRENGTH```
|
|
825
|
+
|
|
826
|
+
% Error Handling
|
|
827
|
+
- Ensure the function handles edge cases, such as missing inputs or model errors, and provide clear error messages.
|
|
698
828
|
|
|
699
829
|
% Here are the inputs and outputs of the function:
|
|
700
830
|
Inputs:
|