pdd-cli 0.0.45__py3-none-any.whl → 0.0.118__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (195) hide show
  1. pdd/__init__.py +40 -8
  2. pdd/agentic_bug.py +323 -0
  3. pdd/agentic_bug_orchestrator.py +497 -0
  4. pdd/agentic_change.py +231 -0
  5. pdd/agentic_change_orchestrator.py +526 -0
  6. pdd/agentic_common.py +598 -0
  7. pdd/agentic_crash.py +534 -0
  8. pdd/agentic_e2e_fix.py +319 -0
  9. pdd/agentic_e2e_fix_orchestrator.py +426 -0
  10. pdd/agentic_fix.py +1294 -0
  11. pdd/agentic_langtest.py +162 -0
  12. pdd/agentic_update.py +387 -0
  13. pdd/agentic_verify.py +183 -0
  14. pdd/architecture_sync.py +565 -0
  15. pdd/auth_service.py +210 -0
  16. pdd/auto_deps_main.py +71 -51
  17. pdd/auto_include.py +245 -5
  18. pdd/auto_update.py +125 -47
  19. pdd/bug_main.py +196 -23
  20. pdd/bug_to_unit_test.py +2 -0
  21. pdd/change_main.py +11 -4
  22. pdd/cli.py +22 -1181
  23. pdd/cmd_test_main.py +350 -150
  24. pdd/code_generator.py +60 -18
  25. pdd/code_generator_main.py +790 -57
  26. pdd/commands/__init__.py +48 -0
  27. pdd/commands/analysis.py +306 -0
  28. pdd/commands/auth.py +309 -0
  29. pdd/commands/connect.py +290 -0
  30. pdd/commands/fix.py +163 -0
  31. pdd/commands/generate.py +257 -0
  32. pdd/commands/maintenance.py +175 -0
  33. pdd/commands/misc.py +87 -0
  34. pdd/commands/modify.py +256 -0
  35. pdd/commands/report.py +144 -0
  36. pdd/commands/sessions.py +284 -0
  37. pdd/commands/templates.py +215 -0
  38. pdd/commands/utility.py +110 -0
  39. pdd/config_resolution.py +58 -0
  40. pdd/conflicts_main.py +8 -3
  41. pdd/construct_paths.py +589 -111
  42. pdd/context_generator.py +10 -2
  43. pdd/context_generator_main.py +175 -76
  44. pdd/continue_generation.py +53 -10
  45. pdd/core/__init__.py +33 -0
  46. pdd/core/cli.py +527 -0
  47. pdd/core/cloud.py +237 -0
  48. pdd/core/dump.py +554 -0
  49. pdd/core/errors.py +67 -0
  50. pdd/core/remote_session.py +61 -0
  51. pdd/core/utils.py +90 -0
  52. pdd/crash_main.py +262 -33
  53. pdd/data/language_format.csv +71 -63
  54. pdd/data/llm_model.csv +20 -18
  55. pdd/detect_change_main.py +5 -4
  56. pdd/docs/prompting_guide.md +864 -0
  57. pdd/docs/whitepaper_with_benchmarks/data_and_functions/benchmark_analysis.py +495 -0
  58. pdd/docs/whitepaper_with_benchmarks/data_and_functions/creation_compare.py +528 -0
  59. pdd/fix_code_loop.py +523 -95
  60. pdd/fix_code_module_errors.py +6 -2
  61. pdd/fix_error_loop.py +491 -92
  62. pdd/fix_errors_from_unit_tests.py +4 -3
  63. pdd/fix_main.py +278 -21
  64. pdd/fix_verification_errors.py +12 -100
  65. pdd/fix_verification_errors_loop.py +529 -286
  66. pdd/fix_verification_main.py +294 -89
  67. pdd/frontend/dist/assets/index-B5DZHykP.css +1 -0
  68. pdd/frontend/dist/assets/index-DQ3wkeQ2.js +449 -0
  69. pdd/frontend/dist/index.html +376 -0
  70. pdd/frontend/dist/logo.svg +33 -0
  71. pdd/generate_output_paths.py +139 -15
  72. pdd/generate_test.py +218 -146
  73. pdd/get_comment.py +19 -44
  74. pdd/get_extension.py +8 -9
  75. pdd/get_jwt_token.py +318 -22
  76. pdd/get_language.py +8 -7
  77. pdd/get_run_command.py +75 -0
  78. pdd/get_test_command.py +68 -0
  79. pdd/git_update.py +70 -19
  80. pdd/incremental_code_generator.py +2 -2
  81. pdd/insert_includes.py +13 -4
  82. pdd/llm_invoke.py +1711 -181
  83. pdd/load_prompt_template.py +19 -12
  84. pdd/path_resolution.py +140 -0
  85. pdd/pdd_completion.fish +25 -2
  86. pdd/pdd_completion.sh +30 -4
  87. pdd/pdd_completion.zsh +79 -4
  88. pdd/postprocess.py +14 -4
  89. pdd/preprocess.py +293 -24
  90. pdd/preprocess_main.py +41 -6
  91. pdd/prompts/agentic_bug_step10_pr_LLM.prompt +182 -0
  92. pdd/prompts/agentic_bug_step1_duplicate_LLM.prompt +73 -0
  93. pdd/prompts/agentic_bug_step2_docs_LLM.prompt +129 -0
  94. pdd/prompts/agentic_bug_step3_triage_LLM.prompt +95 -0
  95. pdd/prompts/agentic_bug_step4_reproduce_LLM.prompt +97 -0
  96. pdd/prompts/agentic_bug_step5_root_cause_LLM.prompt +123 -0
  97. pdd/prompts/agentic_bug_step6_test_plan_LLM.prompt +107 -0
  98. pdd/prompts/agentic_bug_step7_generate_LLM.prompt +172 -0
  99. pdd/prompts/agentic_bug_step8_verify_LLM.prompt +119 -0
  100. pdd/prompts/agentic_bug_step9_e2e_test_LLM.prompt +289 -0
  101. pdd/prompts/agentic_change_step10_identify_issues_LLM.prompt +1006 -0
  102. pdd/prompts/agentic_change_step11_fix_issues_LLM.prompt +984 -0
  103. pdd/prompts/agentic_change_step12_create_pr_LLM.prompt +131 -0
  104. pdd/prompts/agentic_change_step1_duplicate_LLM.prompt +73 -0
  105. pdd/prompts/agentic_change_step2_docs_LLM.prompt +101 -0
  106. pdd/prompts/agentic_change_step3_research_LLM.prompt +126 -0
  107. pdd/prompts/agentic_change_step4_clarify_LLM.prompt +164 -0
  108. pdd/prompts/agentic_change_step5_docs_change_LLM.prompt +981 -0
  109. pdd/prompts/agentic_change_step6_devunits_LLM.prompt +1005 -0
  110. pdd/prompts/agentic_change_step7_architecture_LLM.prompt +1044 -0
  111. pdd/prompts/agentic_change_step8_analyze_LLM.prompt +1027 -0
  112. pdd/prompts/agentic_change_step9_implement_LLM.prompt +1077 -0
  113. pdd/prompts/agentic_crash_explore_LLM.prompt +49 -0
  114. pdd/prompts/agentic_e2e_fix_step1_unit_tests_LLM.prompt +90 -0
  115. pdd/prompts/agentic_e2e_fix_step2_e2e_tests_LLM.prompt +91 -0
  116. pdd/prompts/agentic_e2e_fix_step3_root_cause_LLM.prompt +89 -0
  117. pdd/prompts/agentic_e2e_fix_step4_fix_e2e_tests_LLM.prompt +96 -0
  118. pdd/prompts/agentic_e2e_fix_step5_identify_devunits_LLM.prompt +91 -0
  119. pdd/prompts/agentic_e2e_fix_step6_create_unit_tests_LLM.prompt +106 -0
  120. pdd/prompts/agentic_e2e_fix_step7_verify_tests_LLM.prompt +116 -0
  121. pdd/prompts/agentic_e2e_fix_step8_run_pdd_fix_LLM.prompt +120 -0
  122. pdd/prompts/agentic_e2e_fix_step9_verify_all_LLM.prompt +146 -0
  123. pdd/prompts/agentic_fix_explore_LLM.prompt +45 -0
  124. pdd/prompts/agentic_fix_harvest_only_LLM.prompt +48 -0
  125. pdd/prompts/agentic_fix_primary_LLM.prompt +85 -0
  126. pdd/prompts/agentic_update_LLM.prompt +925 -0
  127. pdd/prompts/agentic_verify_explore_LLM.prompt +45 -0
  128. pdd/prompts/auto_include_LLM.prompt +122 -905
  129. pdd/prompts/change_LLM.prompt +3093 -1
  130. pdd/prompts/detect_change_LLM.prompt +686 -27
  131. pdd/prompts/example_generator_LLM.prompt +22 -1
  132. pdd/prompts/extract_code_LLM.prompt +5 -1
  133. pdd/prompts/extract_program_code_fix_LLM.prompt +7 -1
  134. pdd/prompts/extract_prompt_update_LLM.prompt +7 -8
  135. pdd/prompts/extract_promptline_LLM.prompt +17 -11
  136. pdd/prompts/find_verification_errors_LLM.prompt +6 -0
  137. pdd/prompts/fix_code_module_errors_LLM.prompt +12 -2
  138. pdd/prompts/fix_errors_from_unit_tests_LLM.prompt +9 -0
  139. pdd/prompts/fix_verification_errors_LLM.prompt +22 -0
  140. pdd/prompts/generate_test_LLM.prompt +41 -7
  141. pdd/prompts/generate_test_from_example_LLM.prompt +115 -0
  142. pdd/prompts/increase_tests_LLM.prompt +1 -5
  143. pdd/prompts/insert_includes_LLM.prompt +316 -186
  144. pdd/prompts/prompt_code_diff_LLM.prompt +119 -0
  145. pdd/prompts/prompt_diff_LLM.prompt +82 -0
  146. pdd/prompts/trace_LLM.prompt +25 -22
  147. pdd/prompts/unfinished_prompt_LLM.prompt +85 -1
  148. pdd/prompts/update_prompt_LLM.prompt +22 -1
  149. pdd/pytest_output.py +127 -12
  150. pdd/remote_session.py +876 -0
  151. pdd/render_mermaid.py +236 -0
  152. pdd/server/__init__.py +52 -0
  153. pdd/server/app.py +335 -0
  154. pdd/server/click_executor.py +587 -0
  155. pdd/server/executor.py +338 -0
  156. pdd/server/jobs.py +661 -0
  157. pdd/server/models.py +241 -0
  158. pdd/server/routes/__init__.py +31 -0
  159. pdd/server/routes/architecture.py +451 -0
  160. pdd/server/routes/auth.py +364 -0
  161. pdd/server/routes/commands.py +929 -0
  162. pdd/server/routes/config.py +42 -0
  163. pdd/server/routes/files.py +603 -0
  164. pdd/server/routes/prompts.py +1322 -0
  165. pdd/server/routes/websocket.py +473 -0
  166. pdd/server/security.py +243 -0
  167. pdd/server/terminal_spawner.py +209 -0
  168. pdd/server/token_counter.py +222 -0
  169. pdd/setup_tool.py +648 -0
  170. pdd/simple_math.py +2 -0
  171. pdd/split_main.py +3 -2
  172. pdd/summarize_directory.py +237 -195
  173. pdd/sync_animation.py +8 -4
  174. pdd/sync_determine_operation.py +839 -112
  175. pdd/sync_main.py +351 -57
  176. pdd/sync_orchestration.py +1400 -756
  177. pdd/sync_tui.py +848 -0
  178. pdd/template_expander.py +161 -0
  179. pdd/template_registry.py +264 -0
  180. pdd/templates/architecture/architecture_json.prompt +237 -0
  181. pdd/templates/generic/generate_prompt.prompt +174 -0
  182. pdd/trace.py +168 -12
  183. pdd/trace_main.py +4 -3
  184. pdd/track_cost.py +140 -63
  185. pdd/unfinished_prompt.py +51 -4
  186. pdd/update_main.py +567 -67
  187. pdd/update_model_costs.py +2 -2
  188. pdd/update_prompt.py +19 -4
  189. {pdd_cli-0.0.45.dist-info → pdd_cli-0.0.118.dist-info}/METADATA +29 -11
  190. pdd_cli-0.0.118.dist-info/RECORD +227 -0
  191. {pdd_cli-0.0.45.dist-info → pdd_cli-0.0.118.dist-info}/licenses/LICENSE +1 -1
  192. pdd_cli-0.0.45.dist-info/RECORD +0 -116
  193. {pdd_cli-0.0.45.dist-info → pdd_cli-0.0.118.dist-info}/WHEEL +0 -0
  194. {pdd_cli-0.0.45.dist-info → pdd_cli-0.0.118.dist-info}/entry_points.txt +0 -0
  195. {pdd_cli-0.0.45.dist-info → pdd_cli-0.0.118.dist-info}/top_level.txt +0 -0
@@ -6,8 +6,19 @@
6
6
  INPUT:
7
7
  <prompt_to_update>% You are an expert Python Software Engineer. Your goal is to write a python function, "postprocess", that will extract code from a string output of an LLM. All output to the console will be pretty printed using the Python rich library.
8
8
 
9
- % The function should be part of a Python package, using relative imports (single dot) for internal modules (e.g. 'from .module_name import module_name'). All output to the console will be pretty printed using the Python Rich library. Ensure the function handles edge cases, such as missing inputs or model errors, and provide clear error messages.
10
- % The ./pdd/__init__.py file will have the EXTRACTION_STRENGTH, DEFAULT_STRENGTH, DEFAULT_TIME and other global constants. Example: ```from . import DEFAULT_STRENGTH```
9
+ % You are an expert Python engineer.
10
+
11
+ % Code Style Requirements
12
+ - File must start with `from __future__ import annotations`.
13
+ - All functions must be fully type-hinted.
14
+ - Use `rich.console.Console` for all printing.
15
+
16
+ % Package Structure
17
+ - The function should be part of a Python package, using relative imports (single dot) for internal modules (e.g. 'from .module_name import module_name').
18
+ - The ./pdd/__init__.py file will have the EXTRACTION_STRENGTH, DEFAULT_STRENGTH, DEFAULT_TIME and other global constants. Example: ```from . import DEFAULT_STRENGTH```
19
+
20
+ % Error Handling
21
+ - Ensure the function handles edge cases, such as missing inputs or model errors, and provide clear error messages.
11
22
 
12
23
  % Here are the inputs and outputs of the function:
13
24
  Inputs:
@@ -52,104 +63,147 @@ if __name__ == "__main__":
52
63
 
53
64
  For running prompts with llm_invoke:
54
65
  <llm_invoke_example>
55
- from pydantic import BaseModel, Field
66
+ import os
67
+ import sys
68
+ from typing import List, Optional
69
+ from pydantic import BaseModel, Field
70
+ from rich.console import Console
71
+
72
+ # Ensure the package is in the python path for this example
73
+ # In a real installation, this would just be 'from pdd.llm_invoke import llm_invoke'
74
+ sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), "..")))
75
+
56
76
  from pdd.llm_invoke import llm_invoke
57
- from collections import defaultdict
58
77
 
59
- # Define a Pydantic model for structured output
60
- class Joke(BaseModel):
61
- setup: str = Field(description="The setup of the joke")
62
- punchline: str = Field(description="The punchline of the joke")
78
+ console = Console()
63
79
 
64
- def main():
65
- """
66
- Main function to demonstrate the usage of `llm_invoke`.
67
- """
68
- # Dictionary to track strength ranges for each model
69
- model_ranges = defaultdict(list)
70
- current_model = None
71
- range_start = 0.0
80
+ # --- Example 1: Simple Text Generation ---
81
+ def example_simple_text():
82
+ console.print("[bold blue]--- Example 1: Simple Text Generation ---[/bold blue]")
83
+
84
+ # Define a prompt template
85
+ prompt_template = "Explain the concept of {concept} to a {audience} in one sentence."
86
+
87
+ # Define input variables
88
+ input_data = {
89
+ "concept": "quantum entanglement",
90
+ "audience": "5-year-old"
91
+ }
92
+
93
+ # Invoke the LLM
94
+ # strength=0.5 targets the 'base' model (usually a balance of cost/performance)
95
+ result = llm_invoke(
96
+ prompt=prompt_template,
97
+ input_json=input_data,
98
+ strength=0.5,
99
+ temperature=0.7,
100
+ verbose=True # Set to True to see detailed logs about model selection and cost
101
+ )
102
+
103
+ console.print(f"[green]Result:[/green] {result['result']}")
104
+ console.print(f"[dim]Model used: {result['model_name']} | Cost: ${result['cost']:.6f}[/dim]\n")
105
+
106
+
107
+ # --- Example 2: Structured Output with Pydantic ---
108
+ class MovieReview(BaseModel):
109
+ title: str = Field(..., description="The title of the movie")
110
+ rating: int = Field(..., description="Rating out of 10")
111
+ summary: str = Field(..., description="A brief summary of the plot")
112
+ tags: List[str] = Field(..., description="List of genre tags")
113
+
114
+ def example_structured_output():
115
+ console.print("[bold blue]--- Example 2: Structured Output (Pydantic) ---[/bold blue]")
116
+
117
+ prompt = "Generate a review for a fictional sci-fi movie about {topic}."
118
+ input_data = {"topic": "time traveling cats"}
119
+
120
+ # Invoke with output_pydantic to enforce a schema
121
+ # strength=0.8 targets a higher-performance model (better at following schemas)
122
+ result = llm_invoke(
123
+ prompt=prompt,
124
+ input_json=input_data,
125
+ strength=0.8,
126
+ output_pydantic=MovieReview,
127
+ temperature=0.5
128
+ )
129
+
130
+ # The 'result' key will contain an instance of the Pydantic model
131
+ review: MovieReview = result['result']
72
132
 
73
- prompt = "Tell me a joke about {topic}"
74
- input_json = {"topic": "programmers"}
75
- temperature = 1
76
- verbose = False
133
+ console.print(f"[green]Title:[/green] {review.title}")
134
+ console.print(f"[green]Rating:[/green] {review.rating}/10")
135
+ console.print(f"[green]Tags:[/green] {', '.join(review.tags)}")
136
+ console.print(f"[dim]Model used: {result['model_name']}[/dim]\n")
137
+
138
+
139
+ # --- Example 3: Batch Processing ---
140
+ def example_batch_processing():
141
+ console.print("[bold blue]--- Example 3: Batch Processing ---[/bold blue]")
142
+
143
+ prompt = "What is the capital of {country}?"
77
144
 
78
- strength = 0.5
79
- while strength <= 0.5:
80
- print(f"\nStrength: {strength}")
81
-
82
- # Example 1: Unstructured Output
83
- print("\n--- Unstructured Output ---")
84
- response = llm_invoke(
85
- prompt=prompt,
86
- input_json=input_json,
87
- strength=strength,
88
- temperature=temperature,
89
- verbose=verbose
90
- )
91
-
92
- # Track model changes for strength ranges
93
- if current_model != response['model_name']:
94
- if current_model is not None:
95
- model_ranges[current_model].append((range_start, strength - 0.005))
96
- current_model = response['model_name']
97
- range_start = strength
98
-
99
- print(f"Result: {response['result']}")
100
- print(f"Cost: ${response['cost']:.6f}")
101
- print(f"Model Used: {response['model_name']}")
102
-
103
- # Example 2: Structured Output with Pydantic Model
104
- prompt_structured = (
105
- "Generate a joke about {topic}. \n"
106
- "Return it in this exact JSON format:\n"
107
- "{{ \n"
108
- ' "setup": "your setup here",\n'
109
- ' "punchline": "your punchline here"\n'
110
- "}}\n"
111
- "Return ONLY the JSON with no additional text or explanation."
112
- )
113
- input_json_structured = {"topic": "data scientists"}
114
- output_pydantic = Joke
115
-
116
- print("\n--- Structured Output ---")
117
- try:
118
- response_structured = llm_invoke(
119
- prompt=prompt_structured,
120
- input_json=input_json_structured,
121
- strength=strength,
122
- temperature=temperature,
123
- verbose=True,
124
- output_pydantic=output_pydantic
125
- )
126
- print(f"Result: {response_structured['result']}")
127
- print(f"Cost: ${response_structured['cost']:.6f}")
128
- print(f"Model Used: {response_structured['model_name']}")
129
-
130
- # Access structured data
131
- joke: Joke = response_structured['result']
132
- print(f"\nJoke Setup: {joke.setup}")
133
- print(f"Joke Punchline: {joke.punchline}")
134
- except Exception as e:
135
- print(f"Error encountered during structured output: {e}")
136
-
137
- strength += 0.005
138
- # round to 3 decimal places
139
- strength = round(strength, 3)
145
+ # List of inputs triggers batch mode
146
+ batch_inputs = [
147
+ {"country": "France"},
148
+ {"country": "Japan"},
149
+ {"country": "Brazil"}
150
+ ]
151
+
152
+ # use_batch_mode=True uses the provider's batch API if available/supported by LiteLLM
153
+ # strength=0.2 targets a cheaper/faster model
154
+ results = llm_invoke(
155
+ prompt=prompt,
156
+ input_json=batch_inputs,
157
+ use_batch_mode=True,
158
+ strength=0.2,
159
+ temperature=0.1
160
+ )
161
+
162
+ # In batch mode, 'result' is a list of strings (or objects)
163
+ for i, res in enumerate(results['result']):
164
+ console.print(f"[green]Input:[/green] {batch_inputs[i]['country']} -> [green]Output:[/green] {res}")
140
165
 
141
- # Add the final range for the last model
142
- model_ranges[current_model].append((range_start, 1.0))
166
+ console.print(f"[dim]Model used: {results['model_name']} | Total Cost: ${results['cost']:.6f}[/dim]\n")
167
+
168
+
169
+ # --- Example 4: Reasoning / Thinking Time ---
170
+ def example_reasoning():
171
+ console.print("[bold blue]--- Example 4: Reasoning / Thinking Time ---[/bold blue]")
172
+
173
+ # Some models (like Claude 3.7 or OpenAI o1/o3) support explicit thinking steps.
174
+ # Setting time > 0 enables this behavior based on the model's configuration in llm_model.csv.
143
175
 
144
- # Print out the strength ranges for each model
145
- print("\n=== Model Strength Ranges ===")
146
- for model, ranges in model_ranges.items():
147
- print(f"\n{model}:")
148
- for start, end in ranges:
149
- print(f" Strength {start:.3f} to {end:.3f}")
176
+ prompt = "Solve this riddle: {riddle}"
177
+ input_data = {"riddle": "I speak without a mouth and hear without ears. I have no body, but I come alive with wind. What am I?"}
178
+
179
+ result = llm_invoke(
180
+ prompt=prompt,
181
+ input_json=input_data,
182
+ strength=1.0, # Target highest capability model
183
+ time=0.5, # Request moderate thinking time/budget
184
+ verbose=True
185
+ )
186
+
187
+ console.print(f"[green]Answer:[/green] {result['result']}")
188
+
189
+ # If the model supports it, thinking output is captured separately
190
+ if result.get('thinking_output'):
191
+ console.print(f"[yellow]Thinking Process:[/yellow] {result['thinking_output']}")
192
+ else:
193
+ console.print("[dim]No separate thinking output returned for this model.[/dim]")
194
+
150
195
 
151
196
  if __name__ == "__main__":
152
- main()
197
+ # Ensure you have a valid .env file or environment variables set for API keys
198
+ # (e.g., OPENAI_API_KEY, ANTHROPIC_API_KEY)
199
+
200
+ try:
201
+ example_simple_text()
202
+ example_structured_output()
203
+ example_batch_processing()
204
+ example_reasoning()
205
+ except Exception as e:
206
+ console.print(f"[bold red]Error running examples:[/bold red] {e}")
153
207
  </llm_invoke_example>
154
208
  </internal_modules>
155
209
  </dependencies_to_insert>
@@ -157,8 +211,19 @@ if __name__ == "__main__":
157
211
  OUTPUT:
158
212
  <updated_prompt>% You are an expert Python Software Engineer. Your goal is to write a python function, "postprocess", that will extract code from a string output of an LLM. All output to the console will be pretty printed using the Python rich library.
159
213
 
160
- % The function should be part of a Python package, using relative imports (single dot) for internal modules (e.g. 'from .module_name import module_name'). All output to the console will be pretty printed using the Python Rich library. Ensure the function handles edge cases, such as missing inputs or model errors, and provide clear error messages.
161
- % The ./pdd/__init__.py file will have the EXTRACTION_STRENGTH, DEFAULT_STRENGTH, DEFAULT_TIME and other global constants. Example: ```from . import DEFAULT_STRENGTH```
214
+ % You are an expert Python engineer.
215
+
216
+ % Code Style Requirements
217
+ - File must start with `from __future__ import annotations`.
218
+ - All functions must be fully type-hinted.
219
+ - Use `rich.console.Console` for all printing.
220
+
221
+ % Package Structure
222
+ - The function should be part of a Python package, using relative imports (single dot) for internal modules (e.g. 'from .module_name import module_name').
223
+ - The ./pdd/__init__.py file will have the EXTRACTION_STRENGTH, DEFAULT_STRENGTH, DEFAULT_TIME and other global constants. Example: ```from . import DEFAULT_STRENGTH```
224
+
225
+ % Error Handling
226
+ - Ensure the function handles edge cases, such as missing inputs or model errors, and provide clear error messages.
162
227
 
163
228
  % Here are the inputs and outputs of the function:
164
229
  Inputs:
@@ -192,104 +257,147 @@ if __name__ == "__main__":
192
257
 
193
258
  For running prompts with llm_invoke:
194
259
  <llm_invoke_example>
195
- from pydantic import BaseModel, Field
260
+ import os
261
+ import sys
262
+ from typing import List, Optional
263
+ from pydantic import BaseModel, Field
264
+ from rich.console import Console
265
+
266
+ # Ensure the package is in the python path for this example
267
+ # In a real installation, this would just be 'from pdd.llm_invoke import llm_invoke'
268
+ sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), "..")))
269
+
196
270
  from pdd.llm_invoke import llm_invoke
197
- from collections import defaultdict
198
271
 
199
- # Define a Pydantic model for structured output
200
- class Joke(BaseModel):
201
- setup: str = Field(description="The setup of the joke")
202
- punchline: str = Field(description="The punchline of the joke")
272
+ console = Console()
203
273
 
204
- def main():
205
- """
206
- Main function to demonstrate the usage of `llm_invoke`.
207
- """
208
- # Dictionary to track strength ranges for each model
209
- model_ranges = defaultdict(list)
210
- current_model = None
211
- range_start = 0.0
274
+ # --- Example 1: Simple Text Generation ---
275
+ def example_simple_text():
276
+ console.print("[bold blue]--- Example 1: Simple Text Generation ---[/bold blue]")
277
+
278
+ # Define a prompt template
279
+ prompt_template = "Explain the concept of {concept} to a {audience} in one sentence."
280
+
281
+ # Define input variables
282
+ input_data = {
283
+ "concept": "quantum entanglement",
284
+ "audience": "5-year-old"
285
+ }
286
+
287
+ # Invoke the LLM
288
+ # strength=0.5 targets the 'base' model (usually a balance of cost/performance)
289
+ result = llm_invoke(
290
+ prompt=prompt_template,
291
+ input_json=input_data,
292
+ strength=0.5,
293
+ temperature=0.7,
294
+ verbose=True # Set to True to see detailed logs about model selection and cost
295
+ )
296
+
297
+ console.print(f"[green]Result:[/green] {result['result']}")
298
+ console.print(f"[dim]Model used: {result['model_name']} | Cost: ${result['cost']:.6f}[/dim]\n")
299
+
300
+
301
+ # --- Example 2: Structured Output with Pydantic ---
302
+ class MovieReview(BaseModel):
303
+ title: str = Field(..., description="The title of the movie")
304
+ rating: int = Field(..., description="Rating out of 10")
305
+ summary: str = Field(..., description="A brief summary of the plot")
306
+ tags: List[str] = Field(..., description="List of genre tags")
307
+
308
+ def example_structured_output():
309
+ console.print("[bold blue]--- Example 2: Structured Output (Pydantic) ---[/bold blue]")
310
+
311
+ prompt = "Generate a review for a fictional sci-fi movie about {topic}."
312
+ input_data = {"topic": "time traveling cats"}
313
+
314
+ # Invoke with output_pydantic to enforce a schema
315
+ # strength=0.8 targets a higher-performance model (better at following schemas)
316
+ result = llm_invoke(
317
+ prompt=prompt,
318
+ input_json=input_data,
319
+ strength=0.8,
320
+ output_pydantic=MovieReview,
321
+ temperature=0.5
322
+ )
323
+
324
+ # The 'result' key will contain an instance of the Pydantic model
325
+ review: MovieReview = result['result']
326
+
327
+ console.print(f"[green]Title:[/green] {review.title}")
328
+ console.print(f"[green]Rating:[/green] {review.rating}/10")
329
+ console.print(f"[green]Tags:[/green] {', '.join(review.tags)}")
330
+ console.print(f"[dim]Model used: {result['model_name']}[/dim]\n")
331
+
332
+
333
+ # --- Example 3: Batch Processing ---
334
+ def example_batch_processing():
335
+ console.print("[bold blue]--- Example 3: Batch Processing ---[/bold blue]")
336
+
337
+ prompt = "What is the capital of {country}?"
212
338
 
213
- prompt = "Tell me a joke about {topic}"
214
- input_json = {"topic": "programmers"}
215
- temperature = 1
216
- verbose = False
339
+ # List of inputs triggers batch mode
340
+ batch_inputs = [
341
+ {"country": "France"},
342
+ {"country": "Japan"},
343
+ {"country": "Brazil"}
344
+ ]
345
+
346
+ # use_batch_mode=True uses the provider's batch API if available/supported by LiteLLM
347
+ # strength=0.2 targets a cheaper/faster model
348
+ results = llm_invoke(
349
+ prompt=prompt,
350
+ input_json=batch_inputs,
351
+ use_batch_mode=True,
352
+ strength=0.2,
353
+ temperature=0.1
354
+ )
355
+
356
+ # In batch mode, 'result' is a list of strings (or objects)
357
+ for i, res in enumerate(results['result']):
358
+ console.print(f"[green]Input:[/green] {batch_inputs[i]['country']} -> [green]Output:[/green] {res}")
217
359
 
218
- strength = 0.5
219
- while strength <= 0.5:
220
- print(f"\nStrength: {strength}")
221
-
222
- # Example 1: Unstructured Output
223
- print("\n--- Unstructured Output ---")
224
- response = llm_invoke(
225
- prompt=prompt,
226
- input_json=input_json,
227
- strength=strength,
228
- temperature=temperature,
229
- verbose=verbose
230
- )
231
-
232
- # Track model changes for strength ranges
233
- if current_model != response['model_name']:
234
- if current_model is not None:
235
- model_ranges[current_model].append((range_start, strength - 0.005))
236
- current_model = response['model_name']
237
- range_start = strength
238
-
239
- print(f"Result: {response['result']}")
240
- print(f"Cost: ${response['cost']:.6f}")
241
- print(f"Model Used: {response['model_name']}")
242
-
243
- # Example 2: Structured Output with Pydantic Model
244
- prompt_structured = (
245
- "Generate a joke about {topic}. \n"
246
- "Return it in this exact JSON format:\n"
247
- "{{ \n"
248
- ' "setup": "your setup here",\n'
249
- ' "punchline": "your punchline here"\n'
250
- "}}\n"
251
- "Return ONLY the JSON with no additional text or explanation."
252
- )
253
- input_json_structured = {"topic": "data scientists"}
254
- output_pydantic = Joke
255
-
256
- print("\n--- Structured Output ---")
257
- try:
258
- response_structured = llm_invoke(
259
- prompt=prompt_structured,
260
- input_json=input_json_structured,
261
- strength=strength,
262
- temperature=temperature,
263
- verbose=True,
264
- output_pydantic=output_pydantic
265
- )
266
- print(f"Result: {response_structured['result']}")
267
- print(f"Cost: ${response_structured['cost']:.6f}")
268
- print(f"Model Used: {response_structured['model_name']}")
269
-
270
- # Access structured data
271
- joke: Joke = response_structured['result']
272
- print(f"\nJoke Setup: {joke.setup}")
273
- print(f"Joke Punchline: {joke.punchline}")
274
- except Exception as e:
275
- print(f"Error encountered during structured output: {e}")
276
-
277
- strength += 0.005
278
- # round to 3 decimal places
279
- strength = round(strength, 3)
360
+ console.print(f"[dim]Model used: {results['model_name']} | Total Cost: ${results['cost']:.6f}[/dim]\n")
361
+
362
+
363
+ # --- Example 4: Reasoning / Thinking Time ---
364
+ def example_reasoning():
365
+ console.print("[bold blue]--- Example 4: Reasoning / Thinking Time ---[/bold blue]")
366
+
367
+ # Some models (like Claude 3.7 or OpenAI o1/o3) support explicit thinking steps.
368
+ # Setting time > 0 enables this behavior based on the model's configuration in llm_model.csv.
280
369
 
281
- # Add the final range for the last model
282
- model_ranges[current_model].append((range_start, 1.0))
370
+ prompt = "Solve this riddle: {riddle}"
371
+ input_data = {"riddle": "I speak without a mouth and hear without ears. I have no body, but I come alive with wind. What am I?"}
372
+
373
+ result = llm_invoke(
374
+ prompt=prompt,
375
+ input_json=input_data,
376
+ strength=1.0, # Target highest capability model
377
+ time=0.5, # Request moderate thinking time/budget
378
+ verbose=True
379
+ )
380
+
381
+ console.print(f"[green]Answer:[/green] {result['result']}")
283
382
 
284
- # Print out the strength ranges for each model
285
- print("\n=== Model Strength Ranges ===")
286
- for model, ranges in model_ranges.items():
287
- print(f"\n{model}:")
288
- for start, end in ranges:
289
- print(f" Strength {start:.3f} to {end:.3f}")
383
+ # If the model supports it, thinking output is captured separately
384
+ if result.get('thinking_output'):
385
+ console.print(f"[yellow]Thinking Process:[/yellow] {result['thinking_output']}")
386
+ else:
387
+ console.print("[dim]No separate thinking output returned for this model.[/dim]")
388
+
290
389
 
291
390
  if __name__ == "__main__":
292
- main()
391
+ # Ensure you have a valid .env file or environment variables set for API keys
392
+ # (e.g., OPENAI_API_KEY, ANTHROPIC_API_KEY)
393
+
394
+ try:
395
+ example_simple_text()
396
+ example_structured_output()
397
+ example_batch_processing()
398
+ example_reasoning()
399
+ except Exception as e:
400
+ console.print(f"[bold red]Error running examples:[/bold red] {e}")
293
401
  </llm_invoke_example>
294
402
  </internal_modules>
295
403
 
@@ -310,8 +418,19 @@ if __name__ == "__main__":
310
418
  INPUT:
311
419
  <prompt_to_update>% You are an expert Python engineer. Your goal is to write a Python function, "conflicts_in_prompts", that takes two prompts as input and finds conflicts between them and suggests how to resolve those conflicts.
312
420
 
313
- % The function should be part of a Python package, using relative imports (single dot) for internal modules (e.g. 'from .module_name import module_name'). All output to the console will be pretty printed using the Python Rich library. Ensure the function handles edge cases, such as missing inputs or model errors, and provide clear error messages.
314
- % The ./pdd/__init__.py file will have the EXTRACTION_STRENGTH, DEFAULT_STRENGTH, DEFAULT_TIME and other global constants. Example: ```from . import DEFAULT_STRENGTH```
421
+ % You are an expert Python engineer.
422
+
423
+ % Code Style Requirements
424
+ - File must start with `from __future__ import annotations`.
425
+ - All functions must be fully type-hinted.
426
+ - Use `rich.console.Console` for all printing.
427
+
428
+ % Package Structure
429
+ - The function should be part of a Python package, using relative imports (single dot) for internal modules (e.g. 'from .module_name import module_name').
430
+ - The ./pdd/__init__.py file will have the EXTRACTION_STRENGTH, DEFAULT_STRENGTH, DEFAULT_TIME and other global constants. Example: ```from . import DEFAULT_STRENGTH```
431
+
432
+ % Error Handling
433
+ - Ensure the function handles edge cases, such as missing inputs or model errors, and provide clear error messages.
315
434
 
316
435
  % Here are the inputs and outputs of the function:
317
436
  Inputs:
@@ -693,8 +812,19 @@ if __name__ == "__main__":
693
812
  OUTPUT:
694
813
  <updated_prompt>% You are an expert Python engineer. Your goal is to write a Python function, "conflicts_in_prompts", that takes two prompts as input and finds conflicts between them and suggests how to resolve those conflicts.
695
814
 
696
- % The function should be part of a Python package, using relative imports (single dot) for internal modules (e.g. 'from .module_name import module_name'). All output to the console will be pretty printed using the Python Rich library. Ensure the function handles edge cases, such as missing inputs or model errors, and provide clear error messages.
697
- % The ./pdd/__init__.py file will have the EXTRACTION_STRENGTH, DEFAULT_STRENGTH, DEFAULT_TIME and other global constants. Example: ```from . import DEFAULT_STRENGTH```
815
+ % You are an expert Python engineer.
816
+
817
+ % Code Style Requirements
818
+ - File must start with `from __future__ import annotations`.
819
+ - All functions must be fully type-hinted.
820
+ - Use `rich.console.Console` for all printing.
821
+
822
+ % Package Structure
823
+ - The function should be part of a Python package, using relative imports (single dot) for internal modules (e.g. 'from .module_name import module_name').
824
+ - The ./pdd/__init__.py file will have the EXTRACTION_STRENGTH, DEFAULT_STRENGTH, DEFAULT_TIME and other global constants. Example: ```from . import DEFAULT_STRENGTH```
825
+
826
+ % Error Handling
827
+ - Ensure the function handles edge cases, such as missing inputs or model errors, and provide clear error messages.
698
828
 
699
829
  % Here are the inputs and outputs of the function:
700
830
  Inputs: