pdd-cli 0.0.42__py3-none-any.whl → 0.0.90__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (119) hide show
  1. pdd/__init__.py +4 -4
  2. pdd/agentic_common.py +863 -0
  3. pdd/agentic_crash.py +534 -0
  4. pdd/agentic_fix.py +1179 -0
  5. pdd/agentic_langtest.py +162 -0
  6. pdd/agentic_update.py +370 -0
  7. pdd/agentic_verify.py +183 -0
  8. pdd/auto_deps_main.py +15 -5
  9. pdd/auto_include.py +63 -5
  10. pdd/bug_main.py +3 -2
  11. pdd/bug_to_unit_test.py +2 -0
  12. pdd/change_main.py +11 -4
  13. pdd/cli.py +22 -1181
  14. pdd/cmd_test_main.py +80 -19
  15. pdd/code_generator.py +58 -18
  16. pdd/code_generator_main.py +672 -25
  17. pdd/commands/__init__.py +42 -0
  18. pdd/commands/analysis.py +248 -0
  19. pdd/commands/fix.py +140 -0
  20. pdd/commands/generate.py +257 -0
  21. pdd/commands/maintenance.py +174 -0
  22. pdd/commands/misc.py +79 -0
  23. pdd/commands/modify.py +230 -0
  24. pdd/commands/report.py +144 -0
  25. pdd/commands/templates.py +215 -0
  26. pdd/commands/utility.py +110 -0
  27. pdd/config_resolution.py +58 -0
  28. pdd/conflicts_main.py +8 -3
  29. pdd/construct_paths.py +281 -81
  30. pdd/context_generator.py +10 -2
  31. pdd/context_generator_main.py +113 -11
  32. pdd/continue_generation.py +47 -7
  33. pdd/core/__init__.py +0 -0
  34. pdd/core/cli.py +503 -0
  35. pdd/core/dump.py +554 -0
  36. pdd/core/errors.py +63 -0
  37. pdd/core/utils.py +90 -0
  38. pdd/crash_main.py +44 -11
  39. pdd/data/language_format.csv +71 -62
  40. pdd/data/llm_model.csv +20 -18
  41. pdd/detect_change_main.py +5 -4
  42. pdd/fix_code_loop.py +331 -77
  43. pdd/fix_error_loop.py +209 -60
  44. pdd/fix_errors_from_unit_tests.py +4 -3
  45. pdd/fix_main.py +75 -18
  46. pdd/fix_verification_errors.py +12 -100
  47. pdd/fix_verification_errors_loop.py +319 -272
  48. pdd/fix_verification_main.py +57 -17
  49. pdd/generate_output_paths.py +93 -10
  50. pdd/generate_test.py +16 -5
  51. pdd/get_jwt_token.py +48 -9
  52. pdd/get_run_command.py +73 -0
  53. pdd/get_test_command.py +68 -0
  54. pdd/git_update.py +70 -19
  55. pdd/increase_tests.py +7 -0
  56. pdd/incremental_code_generator.py +2 -2
  57. pdd/insert_includes.py +11 -3
  58. pdd/llm_invoke.py +1278 -110
  59. pdd/load_prompt_template.py +36 -10
  60. pdd/pdd_completion.fish +25 -2
  61. pdd/pdd_completion.sh +30 -4
  62. pdd/pdd_completion.zsh +79 -4
  63. pdd/postprocess.py +10 -3
  64. pdd/preprocess.py +228 -15
  65. pdd/preprocess_main.py +8 -5
  66. pdd/prompts/agentic_crash_explore_LLM.prompt +49 -0
  67. pdd/prompts/agentic_fix_explore_LLM.prompt +45 -0
  68. pdd/prompts/agentic_fix_harvest_only_LLM.prompt +48 -0
  69. pdd/prompts/agentic_fix_primary_LLM.prompt +85 -0
  70. pdd/prompts/agentic_update_LLM.prompt +1071 -0
  71. pdd/prompts/agentic_verify_explore_LLM.prompt +45 -0
  72. pdd/prompts/auto_include_LLM.prompt +98 -101
  73. pdd/prompts/change_LLM.prompt +1 -3
  74. pdd/prompts/detect_change_LLM.prompt +562 -3
  75. pdd/prompts/example_generator_LLM.prompt +22 -1
  76. pdd/prompts/extract_code_LLM.prompt +5 -1
  77. pdd/prompts/extract_program_code_fix_LLM.prompt +14 -2
  78. pdd/prompts/extract_prompt_update_LLM.prompt +7 -8
  79. pdd/prompts/extract_promptline_LLM.prompt +17 -11
  80. pdd/prompts/find_verification_errors_LLM.prompt +6 -0
  81. pdd/prompts/fix_code_module_errors_LLM.prompt +16 -4
  82. pdd/prompts/fix_errors_from_unit_tests_LLM.prompt +6 -41
  83. pdd/prompts/fix_verification_errors_LLM.prompt +22 -0
  84. pdd/prompts/generate_test_LLM.prompt +21 -6
  85. pdd/prompts/increase_tests_LLM.prompt +1 -2
  86. pdd/prompts/insert_includes_LLM.prompt +1181 -6
  87. pdd/prompts/split_LLM.prompt +1 -62
  88. pdd/prompts/trace_LLM.prompt +25 -22
  89. pdd/prompts/unfinished_prompt_LLM.prompt +85 -1
  90. pdd/prompts/update_prompt_LLM.prompt +22 -1
  91. pdd/prompts/xml_convertor_LLM.prompt +3246 -7
  92. pdd/pytest_output.py +188 -21
  93. pdd/python_env_detector.py +151 -0
  94. pdd/render_mermaid.py +236 -0
  95. pdd/setup_tool.py +648 -0
  96. pdd/simple_math.py +2 -0
  97. pdd/split_main.py +3 -2
  98. pdd/summarize_directory.py +56 -7
  99. pdd/sync_determine_operation.py +918 -186
  100. pdd/sync_main.py +82 -32
  101. pdd/sync_orchestration.py +1456 -453
  102. pdd/sync_tui.py +848 -0
  103. pdd/template_registry.py +264 -0
  104. pdd/templates/architecture/architecture_json.prompt +242 -0
  105. pdd/templates/generic/generate_prompt.prompt +174 -0
  106. pdd/trace.py +168 -12
  107. pdd/trace_main.py +4 -3
  108. pdd/track_cost.py +151 -61
  109. pdd/unfinished_prompt.py +49 -3
  110. pdd/update_main.py +549 -67
  111. pdd/update_model_costs.py +2 -2
  112. pdd/update_prompt.py +19 -4
  113. {pdd_cli-0.0.42.dist-info → pdd_cli-0.0.90.dist-info}/METADATA +20 -7
  114. pdd_cli-0.0.90.dist-info/RECORD +153 -0
  115. {pdd_cli-0.0.42.dist-info → pdd_cli-0.0.90.dist-info}/licenses/LICENSE +1 -1
  116. pdd_cli-0.0.42.dist-info/RECORD +0 -115
  117. {pdd_cli-0.0.42.dist-info → pdd_cli-0.0.90.dist-info}/WHEEL +0 -0
  118. {pdd_cli-0.0.42.dist-info → pdd_cli-0.0.90.dist-info}/entry_points.txt +0 -0
  119. {pdd_cli-0.0.42.dist-info → pdd_cli-0.0.90.dist-info}/top_level.txt +0 -0
@@ -4,20 +4,1195 @@
4
4
  <examples>
5
5
  <example id="1">
6
6
  INPUT:
7
- <prompt_to_update><include>context/insert/1/prompt_to_update.prompt</include></prompt_to_update>
8
- <dependencies_to_insert><include>context/insert/1/dependencies.prompt</include></dependencies_to_insert>
7
+ <prompt_to_update>% You are an expert Python Software Engineer. Your goal is to write a python function, "postprocess", that will extract code from a string output of an LLM. All output to the console will be pretty printed using the Python rich library.
8
+
9
+ % You are an expert Python engineer.
10
+
11
+ % Code Style Requirements
12
+ - File must start with `from __future__ import annotations`.
13
+ - All functions must be fully type-hinted.
14
+ - Use `rich.console.Console` for all printing.
15
+
16
+ % Package Structure
17
+ - The function should be part of a Python package, using relative imports (single dot) for internal modules (e.g. 'from .module_name import module_name').
18
+ - The ./pdd/__init__.py file will have the EXTRACTION_STRENGTH, DEFAULT_STRENGTH, DEFAULT_TIME and other global constants. Example: ```from . import DEFAULT_STRENGTH```
19
+
20
+ % Error Handling
21
+ - Ensure the function handles edge cases, such as missing inputs or model errors, and provide clear error messages.
22
+
23
+ % Here are the inputs and outputs of the function:
24
+ Inputs:
25
+ 'llm_output' - A string containing a mix of text and code sections.
26
+ 'language' - A string specifying the programming language of the code to be extracted.
27
+ 'strength' - A float between 0 and 1 that represents the strength of the LLM model to use. Default is 0.9.
28
+ 'temperature' - A float between 0 and 1 that represents the temperature parameter for the LLM model. Default is 0.
29
+ 'verbose' - A boolean that indicates whether to print detailed processing information. Default is False.
30
+ Outputs as a tuple:
31
+ 'extracted_code' - A string containing the extracted and processed code.
32
+ 'total_cost' - A float representing the total cost of running the function.
33
+ 'model_name' - A string representing the model name used for extraction.
34
+
35
+ % This function will do the following:
36
+ Step 1. If strength is 0, use postprocess_0 function to extract code and return (extracted_code, 0.0).
37
+ Step 2. Load the 'extract_code_LLM.prompt' template file.
38
+ Step 3. Process the text using llm_invoke:
39
+ 3a. Pass the following parameters to the prompt:
40
+ - 'llm_output'
41
+ - 'language'
42
+ 3b. The Pydantic output will contain the 'extracted_code' key.
43
+ 3c. For the extracted_code, if the first and last line have triple backticks delete the entire first and last line. There will be the name of the language after the first triple backticks and that should be removed as well.
44
+ Step 4. Return the extracted code string, total cost float and model name string.
45
+ </prompt_to_update>
46
+ <dependencies_to_insert>% Here is how to use the internal modules:
47
+ <internal_modules>
48
+ For loading prompt templates:
49
+ <load_prompt_template_example>
50
+ from pdd.load_prompt_template import load_prompt_template
51
+ from rich import print
52
+
53
+ def main():
54
+ prompt_name = "generate_test_LLM" # Name of the prompt file without extension
55
+ prompt = load_prompt_template(prompt_name)
56
+ if prompt:
57
+ print("[blue]Loaded Prompt Template:[/blue]")
58
+ print(prompt)
59
+
60
+ if __name__ == "__main__":
61
+ main()
62
+ </load_prompt_template_example>
63
+
64
+ For running prompts with llm_invoke:
65
+ <llm_invoke_example>
66
+ from pydantic import BaseModel, Field
67
+ from pdd.llm_invoke import llm_invoke, _load_model_data, _select_model_candidates, LLM_MODEL_CSV_PATH, DEFAULT_BASE_MODEL
68
+ from typing import List, Dict, Any
69
+
70
+ # Define a Pydantic model for structured output
71
+ class Joke(BaseModel):
72
+ setup: str = Field(description="The setup of the joke")
73
+ punchline: str = Field(description="The punchline of the joke")
74
+
75
+
76
+ def calculate_model_ranges(step: float = 0.001) -> List[Dict[str, Any]]:
77
+ """
78
+ Calculate the strength ranges for each model by sampling strength values.
79
+
80
+ Args:
81
+ step: The step size for sampling strength values (default 0.001)
82
+
83
+ Returns:
84
+ List of dicts with 'model', 'start', 'end', and 'midpoint' keys
85
+ """
86
+ model_df = _load_model_data(LLM_MODEL_CSV_PATH)
87
+
88
+ ranges = []
89
+ current_model = None
90
+ range_start = 0.0
91
+
92
+ # Sample strength values to find model boundaries
93
+ strength = 0.0
94
+ while strength <= 1.0:
95
+ candidates = _select_model_candidates(strength, DEFAULT_BASE_MODEL, model_df)
96
+ selected_model = candidates[0]['model'] if candidates else None
97
+
98
+ if current_model != selected_model:
99
+ if current_model is not None:
100
+ ranges.append({
101
+ 'model': current_model,
102
+ 'start': range_start,
103
+ 'end': round(strength - step, 3),
104
+ 'midpoint': round((range_start + strength - step) / 2, 3)
105
+ })
106
+ current_model = selected_model
107
+ range_start = strength
108
+
109
+ strength = round(strength + step, 3)
110
+
111
+ # Add the final range
112
+ if current_model is not None:
113
+ ranges.append({
114
+ 'model': current_model,
115
+ 'start': range_start,
116
+ 'end': 1.0,
117
+ 'midpoint': round((range_start + 1.0) / 2, 3)
118
+ })
119
+
120
+ return ranges
121
+
122
+
123
+ def main():
124
+ """
125
+ Main function to demonstrate the usage of `llm_invoke`.
126
+
127
+ Automatically calculates model ranges and runs each model once
128
+ at its midpoint strength value.
129
+ """
130
+ # Calculate model ranges automatically
131
+ print("Calculating model strength ranges...")
132
+ model_ranges = calculate_model_ranges()
133
+
134
+ # Print the calculated ranges
135
+ print("\n=== Model Strength Ranges ===")
136
+ for range_info in model_ranges:
137
+ print(f"{range_info['model']}: {range_info['start']:.3f} to {range_info['end']:.3f} (midpoint: {range_info['midpoint']:.3f})")
138
+
139
+ prompt = "Tell me a joke about {topic}"
140
+ input_json = {"topic": "programmers"}
141
+ temperature = 1
142
+ verbose = False
143
+
144
+ # Run each model once at its midpoint strength
145
+ print("\n=== Running Each Model Once ===")
146
+ for range_info in model_ranges:
147
+ model_name = range_info['model']
148
+ midpoint = range_info['midpoint']
149
+
150
+ print(f"\n--- Model: {model_name} (strength: {midpoint}) ---")
151
+
152
+ # Example 1: Unstructured Output
153
+ print("\n Unstructured Output:")
154
+ response = llm_invoke(
155
+ prompt=prompt,
156
+ input_json=input_json,
157
+ strength=midpoint,
158
+ temperature=temperature,
159
+ verbose=verbose
160
+ )
161
+
162
+ print(f" Result: {response['result']}")
163
+ print(f" Cost: ${response['cost']:.6f}")
164
+ print(f" Model Used: {response['model_name']}")
165
+
166
+ # Example 2: Structured Output with Pydantic Model
167
+ prompt_structured = (
168
+ "Generate a joke about {topic}. \n"
169
+ "Return it in this exact JSON format:\n"
170
+ "{{ \n"
171
+ ' "setup": "your setup here",\n'
172
+ ' "punchline": "your punchline here"\n'
173
+ "}}\n"
174
+ "Return ONLY the JSON with no additional text or explanation."
175
+ )
176
+ input_json_structured = {"topic": "data scientists"}
177
+ output_pydantic = Joke
178
+
179
+ print("\n Structured Output:")
180
+ try:
181
+ response_structured = llm_invoke(
182
+ prompt=prompt_structured,
183
+ input_json=input_json_structured,
184
+ strength=midpoint,
185
+ temperature=temperature,
186
+ verbose=verbose,
187
+ output_pydantic=output_pydantic
188
+ )
189
+ print(f" Result: {response_structured['result']}")
190
+ print(f" Cost: ${response_structured['cost']:.6f}")
191
+ print(f" Model Used: {response_structured['model_name']}")
192
+
193
+ # Access structured data
194
+ joke: Joke = response_structured['result']
195
+ print(f"\n Joke Setup: {joke.setup}")
196
+ print(f" Joke Punchline: {joke.punchline}")
197
+ except Exception as e:
198
+ print(f" Error encountered during structured output: {e}")
199
+
200
+ if __name__ == "__main__":
201
+ main()
202
+ </llm_invoke_example>
203
+ </internal_modules>
204
+ </dependencies_to_insert>
9
205
 
10
206
  OUTPUT:
11
- <updated_prompt><include>context/insert/1/updated_prompt.prompt</include></updated_prompt>
207
+ <updated_prompt>% You are an expert Python Software Engineer. Your goal is to write a python function, "postprocess", that will extract code from a string output of an LLM. All output to the console will be pretty printed using the Python rich library.
208
+
209
+ % You are an expert Python engineer.
210
+
211
+ % Code Style Requirements
212
+ - File must start with `from __future__ import annotations`.
213
+ - All functions must be fully type-hinted.
214
+ - Use `rich.console.Console` for all printing.
215
+
216
+ % Package Structure
217
+ - The function should be part of a Python package, using relative imports (single dot) for internal modules (e.g. 'from .module_name import module_name').
218
+ - The ./pdd/__init__.py file will have the EXTRACTION_STRENGTH, DEFAULT_STRENGTH, DEFAULT_TIME and other global constants. Example: ```from . import DEFAULT_STRENGTH```
219
+
220
+ % Error Handling
221
+ - Ensure the function handles edge cases, such as missing inputs or model errors, and provide clear error messages.
222
+
223
+ % Here are the inputs and outputs of the function:
224
+ Inputs:
225
+ 'llm_output' - A string containing a mix of text and code sections.
226
+ 'language' - A string specifying the programming language of the code to be extracted.
227
+ 'strength' - A float between 0 and 1 that represents the strength of the LLM model to use. Default is 0.9.
228
+ 'temperature' - A float between 0 and 1 that represents the temperature parameter for the LLM model. Default is 0.
229
+ 'verbose' - A boolean that indicates whether to print detailed processing information. Default is False.
230
+ Outputs as a tuple:
231
+ 'extracted_code' - A string containing the extracted and processed code.
232
+ 'total_cost' - A float representing the total cost of running the function.
233
+ 'model_name' - A string representing the model name used for extraction.
234
+
235
+ % Here is how to use the internal modules:
236
+ <internal_modules>
237
+ For loading prompt templates:
238
+ <load_prompt_template_example>
239
+ from pdd.load_prompt_template import load_prompt_template
240
+ from rich import print
241
+
242
+ def main():
243
+ prompt_name = "generate_test_LLM" # Name of the prompt file without extension
244
+ prompt = load_prompt_template(prompt_name)
245
+ if prompt:
246
+ print("[blue]Loaded Prompt Template:[/blue]")
247
+ print(prompt)
248
+
249
+ if __name__ == "__main__":
250
+ main()
251
+ </load_prompt_template_example>
252
+
253
+ For running prompts with llm_invoke:
254
+ <llm_invoke_example>
255
+ from pydantic import BaseModel, Field
256
+ from pdd.llm_invoke import llm_invoke, _load_model_data, _select_model_candidates, LLM_MODEL_CSV_PATH, DEFAULT_BASE_MODEL
257
+ from typing import List, Dict, Any
258
+
259
+ # Define a Pydantic model for structured output
260
+ class Joke(BaseModel):
261
+ setup: str = Field(description="The setup of the joke")
262
+ punchline: str = Field(description="The punchline of the joke")
263
+
264
+
265
+ def calculate_model_ranges(step: float = 0.001) -> List[Dict[str, Any]]:
266
+ """
267
+ Calculate the strength ranges for each model by sampling strength values.
268
+
269
+ Args:
270
+ step: The step size for sampling strength values (default 0.001)
271
+
272
+ Returns:
273
+ List of dicts with 'model', 'start', 'end', and 'midpoint' keys
274
+ """
275
+ model_df = _load_model_data(LLM_MODEL_CSV_PATH)
276
+
277
+ ranges = []
278
+ current_model = None
279
+ range_start = 0.0
280
+
281
+ # Sample strength values to find model boundaries
282
+ strength = 0.0
283
+ while strength <= 1.0:
284
+ candidates = _select_model_candidates(strength, DEFAULT_BASE_MODEL, model_df)
285
+ selected_model = candidates[0]['model'] if candidates else None
286
+
287
+ if current_model != selected_model:
288
+ if current_model is not None:
289
+ ranges.append({
290
+ 'model': current_model,
291
+ 'start': range_start,
292
+ 'end': round(strength - step, 3),
293
+ 'midpoint': round((range_start + strength - step) / 2, 3)
294
+ })
295
+ current_model = selected_model
296
+ range_start = strength
297
+
298
+ strength = round(strength + step, 3)
299
+
300
+ # Add the final range
301
+ if current_model is not None:
302
+ ranges.append({
303
+ 'model': current_model,
304
+ 'start': range_start,
305
+ 'end': 1.0,
306
+ 'midpoint': round((range_start + 1.0) / 2, 3)
307
+ })
308
+
309
+ return ranges
310
+
311
+
312
+ def main():
313
+ """
314
+ Main function to demonstrate the usage of `llm_invoke`.
315
+
316
+ Automatically calculates model ranges and runs each model once
317
+ at its midpoint strength value.
318
+ """
319
+ # Calculate model ranges automatically
320
+ print("Calculating model strength ranges...")
321
+ model_ranges = calculate_model_ranges()
322
+
323
+ # Print the calculated ranges
324
+ print("\n=== Model Strength Ranges ===")
325
+ for range_info in model_ranges:
326
+ print(f"{range_info['model']}: {range_info['start']:.3f} to {range_info['end']:.3f} (midpoint: {range_info['midpoint']:.3f})")
327
+
328
+ prompt = "Tell me a joke about {topic}"
329
+ input_json = {"topic": "programmers"}
330
+ temperature = 1
331
+ verbose = False
332
+
333
+ # Run each model once at its midpoint strength
334
+ print("\n=== Running Each Model Once ===")
335
+ for range_info in model_ranges:
336
+ model_name = range_info['model']
337
+ midpoint = range_info['midpoint']
338
+
339
+ print(f"\n--- Model: {model_name} (strength: {midpoint}) ---")
340
+
341
+ # Example 1: Unstructured Output
342
+ print("\n Unstructured Output:")
343
+ response = llm_invoke(
344
+ prompt=prompt,
345
+ input_json=input_json,
346
+ strength=midpoint,
347
+ temperature=temperature,
348
+ verbose=verbose
349
+ )
350
+
351
+ print(f" Result: {response['result']}")
352
+ print(f" Cost: ${response['cost']:.6f}")
353
+ print(f" Model Used: {response['model_name']}")
354
+
355
+ # Example 2: Structured Output with Pydantic Model
356
+ prompt_structured = (
357
+ "Generate a joke about {topic}. \n"
358
+ "Return it in this exact JSON format:\n"
359
+ "{{ \n"
360
+ ' "setup": "your setup here",\n'
361
+ ' "punchline": "your punchline here"\n'
362
+ "}}\n"
363
+ "Return ONLY the JSON with no additional text or explanation."
364
+ )
365
+ input_json_structured = {"topic": "data scientists"}
366
+ output_pydantic = Joke
367
+
368
+ print("\n Structured Output:")
369
+ try:
370
+ response_structured = llm_invoke(
371
+ prompt=prompt_structured,
372
+ input_json=input_json_structured,
373
+ strength=midpoint,
374
+ temperature=temperature,
375
+ verbose=verbose,
376
+ output_pydantic=output_pydantic
377
+ )
378
+ print(f" Result: {response_structured['result']}")
379
+ print(f" Cost: ${response_structured['cost']:.6f}")
380
+ print(f" Model Used: {response_structured['model_name']}")
381
+
382
+ # Access structured data
383
+ joke: Joke = response_structured['result']
384
+ print(f"\n Joke Setup: {joke.setup}")
385
+ print(f" Joke Punchline: {joke.punchline}")
386
+ except Exception as e:
387
+ print(f" Error encountered during structured output: {e}")
388
+
389
+ if __name__ == "__main__":
390
+ main()
391
+ </llm_invoke_example>
392
+ </internal_modules>
393
+
394
+ % This function will do the following:
395
+ Step 1. If strength is 0, use postprocess_0 function to extract code and return (extracted_code, 0.0).
396
+ Step 2. Load the 'extract_code_LLM.prompt' template file.
397
+ Step 3. Process the text using llm_invoke:
398
+ 3a. Pass the following parameters to the prompt:
399
+ - 'llm_output'
400
+ - 'language'
401
+ 3b. The Pydantic output will contain the 'extracted_code' key.
402
+ 3c. For the extracted_code, if the first and last line have triple backticks delete the entire first and last line. There will be the name of the language after the first triple backticks and that should be removed as well.
403
+ Step 4. Return the extracted code string, total cost float and model name string.
404
+ </updated_prompt>
12
405
  <example>
13
406
 
14
407
  <example id="2">
15
408
  INPUT:
16
- <prompt_to_update><include>context/insert/2/prompt_to_update.prompt</include></prompt_to_update>
17
- <dependencies_to_insert><include>context/insert/2/dependencies.prompt</include></dependencies_to_insert>
409
+ <prompt_to_update>% You are an expert Python engineer. Your goal is to write a Python function, "conflicts_in_prompts", that takes two prompts as input and finds conflicts between them and suggests how to resolve those conflicts.
410
+
411
+ % You are an expert Python engineer.
412
+
413
+ % Code Style Requirements
414
+ - File must start with `from __future__ import annotations`.
415
+ - All functions must be fully type-hinted.
416
+ - Use `rich.console.Console` for all printing.
417
+
418
+ % Package Structure
419
+ - The function should be part of a Python package, using relative imports (single dot) for internal modules (e.g. 'from .module_name import module_name').
420
+ - The ./pdd/__init__.py file will have the EXTRACTION_STRENGTH, DEFAULT_STRENGTH, DEFAULT_TIME and other global constants. Example: ```from . import DEFAULT_STRENGTH```
421
+
422
+ % Error Handling
423
+ - Ensure the function handles edge cases, such as missing inputs or model errors, and provide clear error messages.
424
+
425
+ % Here are the inputs and outputs of the function:
426
+ Inputs:
427
+ 'prompt1' - First prompt in the pair of prompts we are comparing.
428
+ 'prompt2' - Second prompt in the pair of prompts we are comparing.
429
+ 'strength' - A float that is the strength of the LLM model to use. Default is 0.5.
430
+ 'temperature' - A float that is the temperature of the LLM model to use. Default is 0.
431
+ Outputs:
432
+ 'changes_list' - A list of JSON objects, each containing the name of a prompt that needs to be changed and detailed instructions on how to change it.
433
+ 'total_cost' - A float that is the total cost of the model run
434
+ 'model_name' - A string that is the name of the selected LLM model
435
+
436
+ % Here is an example of a LangChain Expression Language (LCEL) program: <lcel_example>import os
437
+ from langchain_core.prompts import PromptTemplate
438
+ from langchain_community.cache import SQLiteCache
439
+ from langchain_community.llms.mlx_pipeline import MLXPipeline
440
+ from langchain.globals import set_llm_cache
441
+ from langchain_core.output_parsers import JsonOutputParser, PydanticOutputParser # Parsers are only avaiable in langchain_core.output_parsers not langchain.output_parsers
442
+ from langchain_core.output_parsers import StrOutputParser
443
+ from langchain_core.prompts import ChatPromptTemplate
444
+ from langchain_core.runnables import RunnablePassthrough, ConfigurableField
445
+
446
+ from langchain_openai import AzureChatOpenAI
447
+ from langchain_fireworks import Fireworks
448
+ from langchain_anthropic import ChatAnthropic
449
+ from langchain_openai import ChatOpenAI # Chatbot and conversational tasks
450
+ from langchain_openai import OpenAI # General language tasks
451
+ from langchain_google_genai import ChatGoogleGenerativeAI
452
+ from langchain_google_vertexai import ChatVertexAI
453
+ from langchain_groq import ChatGroq
454
+ from langchain_together import Together
455
+
456
+ from langchain.callbacks.base import BaseCallbackHandler
457
+ from langchain.schema import LLMResult
458
+
459
+ import json
460
+
461
+ from langchain_community.chat_models.mlx import ChatMLX
462
+ from langchain_core.messages import HumanMessage
463
+
464
+ from langchain_ollama.llms import OllamaLLM
465
+ from langchain_aws import ChatBedrockConverse
466
+
467
+ # Define a base output parser (e.g., PydanticOutputParser)
468
+ from pydantic import BaseModel, Field
469
+
470
+
471
+
472
+ class CompletionStatusHandler(BaseCallbackHandler):
473
+ def __init__(self):
474
+ self.is_complete = False
475
+ self.finish_reason = None
476
+ self.input_tokens = None
477
+ self.output_tokens = None
478
+
479
+ def on_llm_end(self, response: LLMResult, **kwargs) -> None:
480
+ self.is_complete = True
481
+ if response.generations and response.generations[0]:
482
+ generation = response.generations[0][0]
483
+ self.finish_reason = generation.generation_info.get('finish_reason').lower()
484
+
485
+ # Extract token usage
486
+ if hasattr(generation.message, 'usage_metadata'):
487
+ usage_metadata = generation.message.usage_metadata
488
+ self.input_tokens = usage_metadata.get('input_tokens')
489
+ self.output_tokens = usage_metadata.get('output_tokens')
490
+ # print("response:",response)
491
+ print("Extracted information:")
492
+ print(f"Finish reason: {self.finish_reason}")
493
+ print(f"Input tokens: {self.input_tokens}")
494
+ print(f"Output tokens: {self.output_tokens}")
495
+
496
+ # Set up the LLM with the custom handler
497
+ handler = CompletionStatusHandler()
498
+ # Always setup cache to save money and increase speeds
499
+ set_llm_cache(SQLiteCache(database_path=".langchain.db"))
500
+
501
+
502
+ # Create the LCEL template. Make note of the variable {topic} which will be filled in later.
503
+ prompt_template = PromptTemplate.from_template("Tell me a joke about {topic}")
504
+
505
+ llm = ChatGoogleGenerativeAI(model="gemini-2.5-pro-exp-03-25", temperature=0, callbacks=[handler])
506
+ # Combine with a model and parser to output a string
507
+ chain = prompt_template |llm| StrOutputParser()
508
+
509
+ # Run the template. Notice that the input is a dictionary with a single key "topic" which feeds it into the above prompt template. This is needed because the prompt template has a variable {topic} which needs to be filled in when invoked.
510
+ result = chain.invoke({"topic": "cats"})
511
+ print("********Google:", result)
512
+
513
+
514
+ llm = ChatVertexAI(model="gemini-2.5-pro-exp-03-25", temperature=0, callbacks=[handler])
515
+ # Combine with a model and parser to output a string
516
+ chain = prompt_template |llm| StrOutputParser()
517
+
518
+ # Run the template. Notice that the input is a dictionary with a single key "topic" which feeds it into the above prompt template. This is needed because the prompt template has a variable {topic} which needs to be filled in when invoked.
519
+ result = chain.invoke({"topic": "cats"})
520
+ print("********GoogleVertex:", result)
521
+
522
+
523
+ # Define your desired data structure.
524
+ class Joke(BaseModel):
525
+ setup: str = Field(description="question to set up a joke")
526
+ punchline: str = Field(description="answer to resolve the joke")
527
+
528
+
529
+ # Set up a parser
530
+ parser = JsonOutputParser(pydantic_object=Joke)
531
+
532
+ # Create a prompt template
533
+ prompt = PromptTemplate(
534
+ template="Answer the user query.\n{format_instructions}\n{query}\n",
535
+ input_variables=["query"],
536
+ partial_variables={"format_instructions": parser.get_format_instructions()},
537
+ )
538
+
539
+ llm_no_struct = ChatOpenAI(model="gpt-4o-mini", temperature=0,
540
+ callbacks=[handler])
541
+ llm = llm_no_struct.with_structured_output(Joke) # with structured output forces the output to be a specific object, in this case Joke. Only OpenAI models have structured output
542
+ # Chain the components.
543
+ # The class `LLMChain` was deprecated in LangChain 0.1.17 and will be removed in 1.0. Use RunnableSequence, e.g., `prompt | llm` instead.
544
+ chain = prompt | llm
545
+
546
+ # Invoke the chain with a query.
547
+ # IMPORTANT: chain.run is now obsolete. Use chain.invoke instead.
548
+ result = chain.invoke({"query": "Tell me a joke about openai."})
549
+ print("4o mini JSON: ",result)
550
+ print(result.setup) # How to access the structured output
551
+
552
+ llm = ChatOpenAI(model="o1", temperature=1,
553
+ callbacks=[handler],model_kwargs = {"max_completion_tokens" : 1000})
554
+ # Chain the components.
555
+ # The class `LLMChain` was deprecated in LangChain 0.1.17 and will be removed in 1.0. Use RunnableSequence, e.g., `prompt | llm` instead.
556
+ chain = prompt | llm | parser
557
+
558
+ # Invoke the chain with a query.
559
+ # IMPORTANT: chain.run is now obsolete. Use chain.invoke instead.
560
+ result = chain.invoke({"query": "Tell me a joke about openai."})
561
+ print("o1 JSON: ",result)
562
+
563
+ # Get DEEPSEEK_API_KEY environmental variable
564
+
565
+ deepseek_api_key = os.getenv('DEEPSEEK_API_KEY')
566
+
567
+ # Ensure the API key is retrieved successfully
568
+ if deepseek_api_key is None:
569
+ raise ValueError("DEEPSEEK_API_KEY environment variable is not set")
570
+
571
+ llm = ChatOpenAI(
572
+ model='deepseek-chat',
573
+ openai_api_key=deepseek_api_key,
574
+ openai_api_base='https://api.deepseek.com',
575
+ temperature=0, callbacks=[handler]
576
+ )
577
+
578
+ # Chain the components
579
+ chain = prompt | llm | parser
580
+
581
+ # Invoke the chain with a query
582
+ result = chain.invoke({"query": "Write joke about deepseek."})
583
+ print("deepseek",result)
584
+
585
+
586
+ # Set up a parser
587
+ parser = PydanticOutputParser(pydantic_object=Joke)
588
+ # Chain the components
589
+ chain = prompt | llm | parser
590
+
591
+ # Invoke the chain with a query
592
+ result = chain.invoke({"query": "Write joke about deepseek and pydantic."})
593
+ print("deepseek pydantic",result)
594
+
595
+ # Set up the Azure ChatOpenAI LLM instance
596
+ llm_no_struct = AzureChatOpenAI(
597
+ model="o4-mini",
598
+ temperature=1,
599
+ callbacks=[handler]
600
+ )
601
+ llm = llm_no_struct.with_structured_output(Joke) # with structured output forces the output to be a specific JSON format
602
+ # Chain the components: prompt | llm | parser
603
+ chain = prompt | llm # returns a Joke object
604
+
605
+ # Invoke the chain with a query
606
+ result = chain.invoke({"query": "What is Azure?"}) # Pass a dictionary if `invoke` expects it
607
+ print("Azure Result:", result)
608
+
609
+ # Set up a parser
610
+ parser = JsonOutputParser(pydantic_object=Joke)
611
+
612
+ llm = Fireworks(
613
+ model="accounts/fireworks/models/llama4-maverick-instruct-basic",
614
+ temperature=0, callbacks=[handler])
615
+ # Chain the components
616
+ chain = prompt | llm | parser
617
+
618
+ # Invoke the chain with a query
619
+ # no money in account
620
+ # result = chain.invoke({"query": "Tell me a joke about the president"})
621
+ # print("fireworks",result)
622
+
623
+
624
+
625
+
626
+
627
+ prompt = ChatPromptTemplate.from_template(
628
+ "Tell me a short joke about {topic}"
629
+ )
630
+ chat_openai = ChatOpenAI(model="gpt-3.5-turbo", callbacks=[handler])
631
+ openai = OpenAI(model="gpt-3.5-turbo-instruct", callbacks=[handler])
632
+ anthropic = ChatAnthropic(model="claude-2", callbacks=[handler])
633
+ model = (
634
+ chat_openai
635
+ .with_fallbacks([anthropic])
636
+ .configurable_alternatives(
637
+ ConfigurableField(id="model"),
638
+ default_key="chat_openai",
639
+ openai=openai,
640
+ anthropic=anthropic,
641
+ )
642
+ )
643
+
644
+ chain = (
645
+ {"topic": RunnablePassthrough()}
646
+ | prompt
647
+ | model
648
+ | StrOutputParser()
649
+ )
650
+ result = chain.invoke({"topic": "Tell me a joke about the president"})
651
+ print("config alt:",result)
652
+
653
+
654
+
655
+ llm = ChatAnthropic(
656
+ model="claude-3-7-sonnet-latest",
657
+ max_tokens=5000, # Total tokens for the response
658
+ thinking={"type": "enabled", "budget_tokens": 2000}, # Tokens for internal reasoning
659
+ )
660
+
661
+ response = llm.invoke("What is the cube root of 50.653?")
662
+ print(json.dumps(response.content, indent=2))
663
+
664
+
665
+ llm = ChatGroq(temperature=0, model_name="qwen-qwq-32b", callbacks=[handler])
666
+ system = "You are a helpful assistant."
667
+ human = "{text}"
668
+ prompt = ChatPromptTemplate.from_messages([("system", system), ("human", human)])
669
+
670
+ chain = prompt | llm | StrOutputParser()
671
+ print(chain.invoke({"text": "Explain the importance of low latency LLMs."}))
672
+
673
+
674
+ llm = Together(
675
+ model="meta-llama/Llama-3-70b-chat-hf",
676
+ max_tokens=500, callbacks=[handler]
677
+ )
678
+ chain = prompt | llm | StrOutputParser()
679
+ print(chain.invoke({"text": "Explain the importance of together.ai."}))
680
+
681
+
682
+ # Define a prompt template with placeholders for variables
683
+ prompt_template = PromptTemplate.from_template("Tell me a {adjective} joke about {content}.")
684
+
685
+ # Format the prompt with the variables
686
+ formatted_prompt = prompt_template.format(adjective="funny", content="data scientists")
687
+
688
+ # Print the formatted prompt
689
+ print(formatted_prompt)
690
+
691
+
692
+ # Set up the LLM with the custom handler
693
+ handler = CompletionStatusHandler()
694
+
695
+
696
+ llm = ChatOpenAI(model="gpt-3.5-turbo", temperature=0.9, callbacks=[handler])
697
+
698
+ prompt = PromptTemplate.from_template("What is a good name for a company that makes {product}?")
699
+
700
+ chain = prompt | llm
701
+
702
+ # Invoke the chain
703
+ response = chain.invoke({"product":"colorful socks"})
704
+
705
+ # Check completion status
706
+ print(f"Is complete: {handler.is_complete}")
707
+ print(f"Finish reason: {handler.finish_reason}")
708
+ print(f"Response: {response}")
709
+ print(f"Input tokens: {handler.input_tokens}")
710
+ print(f"Output tokens: {handler.output_tokens}")
711
+
712
+
713
+
714
+ template = """Question: {question}"""
715
+
716
+ prompt = ChatPromptTemplate.from_template(template)
717
+
718
+ model = OllamaLLM(model="qwen2.5-coder:32b")
719
+
720
+ chain = prompt | model
721
+
722
+ output = chain.invoke({"question": "Write a python function that calculates Pi"})
723
+ print(output)
724
+
725
+
726
+
727
+ llm = MLXPipeline.from_model_id(
728
+ "mlx-community/quantized-gemma-2b-it",
729
+ pipeline_kwargs={"max_tokens": 10, "temp": 0.1},
730
+ )
731
+
732
+
733
+ chat_model = ChatMLX(llm=llm)
734
+ messages = [HumanMessage(content="What happens when an unstoppable force meets an immovable object?")]
735
+ response = chat_model.invoke(messages)
736
+ print(response.content)
737
+
738
+
739
+
740
+ llm = ChatBedrockConverse(
741
+ model_id="anthropic.claude-3-5-sonnet-20240620-v1:0",
742
+ # Additional parameters like temperature, max_tokens can be set here
743
+ )
744
+
745
+ messages = [HumanMessage(content="What happens when an unstoppable force meets an immovable sonnet?")]
746
+ response = llm.invoke(messages)
747
+ print(response.content)</lcel_example>
748
+
749
+ % This function will use Langchain to do the following:
750
+ Step 1. Use $PDD_PATH environment variable to get the path to the project. Load the '$PDD_PATH/prompts/conflict_LLM.prompt' and '$PDD_PATH/prompts/extract_conflicts_LLM.prompt' files.
751
+ Step 2. Then this will create a Langchain LCEL template from the conflict_LLM prompt.
752
+ Step 3. This will use llm_selector for the model, imported from a relative path.
753
+ Step 4. Pretty print a message letting the user know it is running and how many tokens (using token_counter from llm_selector) are in the prompt and the cost. The cost from llm_selector is in dollars per million tokens.
754
+ Step 5. Run the prompts through the model using Langchain LCEL with string output.
755
+ 5a. Pass the following string parameters to the prompt during invoke:
756
+ - 'PROMPT1'
757
+ - 'PROMPT2'
758
+ 5b. Pretty print the output of 5a which will be in Markdown format.
759
+ Step 6. Create a Langchain LCEL template using a .8 strength llm_selector and token counter from the extract_conflicts_LLM prompt that outputs JSON:
760
+ 6a. Pass the following string parameters to the prompt during invocation: 'llm_output' (this string is from Step 5a).
761
+ 6b. Calculate input and output token count using token_counter from llm_selector and pretty print the running message with the token count and cost.
762
+ 6c. Use 'get' function to extract 'changes_list' list values using from the dictionary output.
763
+ Step 7. Return the changes_list, total_cost and model_name.</prompt_to_update>
764
+ <dependencies_to_insert>% Here are examples of how to use internal modules:
765
+ <internal_example_modules>
766
+ % Example of selecting a Langchain LLM and counting tokens using llm_selector: <llm_selector_example>from pdd.llm_selector import llm_selector
767
+
768
+ def main() -> None:
769
+ """
770
+ Main function to demonstrate the usage of the llm_selector function.
771
+ """
772
+ # Define the strength and temperature parameters
773
+ strength: float = 0.5 # Example strength value for the LLM model
774
+ temperature: float = 1.0 # Example temperature value for the LLM model
775
+
776
+ try:
777
+ while strength <= 1.1:
778
+ # Call the llm_selector function with the specified strength and temperature
779
+ llm, token_counter, input_cost, output_cost, model_name = llm_selector(strength, temperature)
780
+ print(f"Strength: {strength}")
781
+
782
+ # Print the details of the selected LLM model
783
+ print(f"Selected LLM Model: {model_name}")
784
+ print(f"Input Cost per Million Tokens: {input_cost}")
785
+ print(f"Output Cost per Million Tokens: {output_cost}")
786
+
787
+ # Example usage of the token counter function
788
+ sample_text: str = "This is a sample text to count tokens."
789
+ token_count: int = token_counter(sample_text)
790
+ print(f"Token Count for Sample Text: {token_count}")
791
+ print(f"model_name: {model_name}")
792
+ strength += 0.05
793
+ except FileNotFoundError as e:
794
+ print(f"Error: {e}")
795
+ except ValueError as e:
796
+ print(f"Error: {e}")
797
+
798
+ if __name__ == "__main__":
799
+ main()</llm_selector_example>
800
+ </internal_example_modules></dependencies_to_insert>
18
801
 
19
802
  OUTPUT:
20
- <updated_prompt><include>context/insert/2/updated_prompt.prompt</include></updated_prompt>
803
+ <updated_prompt>% You are an expert Python engineer. Your goal is to write a Python function, "conflicts_in_prompts", that takes two prompts as input and finds conflicts between them and suggests how to resolve those conflicts.
804
+
805
+ % You are an expert Python engineer.
806
+
807
+ % Code Style Requirements
808
+ - File must start with `from __future__ import annotations`.
809
+ - All functions must be fully type-hinted.
810
+ - Use `rich.console.Console` for all printing.
811
+
812
+ % Package Structure
813
+ - The function should be part of a Python package, using relative imports (single dot) for internal modules (e.g. 'from .module_name import module_name').
814
+ - The ./pdd/__init__.py file will have the EXTRACTION_STRENGTH, DEFAULT_STRENGTH, DEFAULT_TIME and other global constants. Example: ```from . import DEFAULT_STRENGTH```
815
+
816
+ % Error Handling
817
+ - Ensure the function handles edge cases, such as missing inputs or model errors, and provide clear error messages.
818
+
819
+ % Here are the inputs and outputs of the function:
820
+ Inputs:
821
+ 'prompt1' - First prompt in the pair of prompts we are comparing.
822
+ 'prompt2' - Second prompt in the pair of prompts we are comparing.
823
+ 'strength' - A float that is the strength of the LLM model to use. Default is 0.5.
824
+ 'temperature' - A float that is the temperature of the LLM model to use. Default is 0.
825
+ Outputs:
826
+ 'changes_list' - A list of JSON objects, each containing the name of a prompt that needs to be changed and detailed instructions on how to change it.
827
+ 'total_cost' - A float that is the total cost of the model run
828
+ 'model_name' - A string that is the name of the selected LLM model
829
+
830
+ % Here is an example of a LangChain Expression Language (LCEL) program: <lcel_example>import os
831
+ from langchain_core.prompts import PromptTemplate
832
+ from langchain_community.cache import SQLiteCache
833
+ from langchain_community.llms.mlx_pipeline import MLXPipeline
834
+ from langchain.globals import set_llm_cache
835
+ from langchain_core.output_parsers import JsonOutputParser, PydanticOutputParser # Parsers are only avaiable in langchain_core.output_parsers not langchain.output_parsers
836
+ from langchain_core.output_parsers import StrOutputParser
837
+ from langchain_core.prompts import ChatPromptTemplate
838
+ from langchain_core.runnables import RunnablePassthrough, ConfigurableField
839
+
840
+ from langchain_openai import AzureChatOpenAI
841
+ from langchain_fireworks import Fireworks
842
+ from langchain_anthropic import ChatAnthropic
843
+ from langchain_openai import ChatOpenAI # Chatbot and conversational tasks
844
+ from langchain_openai import OpenAI # General language tasks
845
+ from langchain_google_genai import ChatGoogleGenerativeAI
846
+ from langchain_google_vertexai import ChatVertexAI
847
+ from langchain_groq import ChatGroq
848
+ from langchain_together import Together
849
+
850
+ from langchain.callbacks.base import BaseCallbackHandler
851
+ from langchain.schema import LLMResult
852
+
853
+ import json
854
+
855
+ from langchain_community.chat_models.mlx import ChatMLX
856
+ from langchain_core.messages import HumanMessage
857
+
858
+ from langchain_ollama.llms import OllamaLLM
859
+ from langchain_aws import ChatBedrockConverse
860
+
861
+ # Define a base output parser (e.g., PydanticOutputParser)
862
+ from pydantic import BaseModel, Field
863
+
864
+
865
+
866
+ class CompletionStatusHandler(BaseCallbackHandler):
867
+ def __init__(self):
868
+ self.is_complete = False
869
+ self.finish_reason = None
870
+ self.input_tokens = None
871
+ self.output_tokens = None
872
+
873
+ def on_llm_end(self, response: LLMResult, **kwargs) -> None:
874
+ self.is_complete = True
875
+ if response.generations and response.generations[0]:
876
+ generation = response.generations[0][0]
877
+ self.finish_reason = generation.generation_info.get('finish_reason').lower()
878
+
879
+ # Extract token usage
880
+ if hasattr(generation.message, 'usage_metadata'):
881
+ usage_metadata = generation.message.usage_metadata
882
+ self.input_tokens = usage_metadata.get('input_tokens')
883
+ self.output_tokens = usage_metadata.get('output_tokens')
884
+ # print("response:",response)
885
+ print("Extracted information:")
886
+ print(f"Finish reason: {self.finish_reason}")
887
+ print(f"Input tokens: {self.input_tokens}")
888
+ print(f"Output tokens: {self.output_tokens}")
889
+
890
+ # Set up the LLM with the custom handler
891
+ handler = CompletionStatusHandler()
892
+ # Always setup cache to save money and increase speeds
893
+ set_llm_cache(SQLiteCache(database_path=".langchain.db"))
894
+
895
+
896
+ # Create the LCEL template. Make note of the variable {topic} which will be filled in later.
897
+ prompt_template = PromptTemplate.from_template("Tell me a joke about {topic}")
898
+
899
+ llm = ChatGoogleGenerativeAI(model="gemini-2.5-pro-exp-03-25", temperature=0, callbacks=[handler])
900
+ # Combine with a model and parser to output a string
901
+ chain = prompt_template |llm| StrOutputParser()
902
+
903
+ # Run the template. Notice that the input is a dictionary with a single key "topic" which feeds it into the above prompt template. This is needed because the prompt template has a variable {topic} which needs to be filled in when invoked.
904
+ result = chain.invoke({"topic": "cats"})
905
+ print("********Google:", result)
906
+
907
+
908
+ llm = ChatVertexAI(model="gemini-2.5-pro-exp-03-25", temperature=0, callbacks=[handler])
909
+ # Combine with a model and parser to output a string
910
+ chain = prompt_template |llm| StrOutputParser()
911
+
912
+ # Run the template. Notice that the input is a dictionary with a single key "topic" which feeds it into the above prompt template. This is needed because the prompt template has a variable {topic} which needs to be filled in when invoked.
913
+ result = chain.invoke({"topic": "cats"})
914
+ print("********GoogleVertex:", result)
915
+
916
+
917
+ # Define your desired data structure.
918
+ class Joke(BaseModel):
919
+ setup: str = Field(description="question to set up a joke")
920
+ punchline: str = Field(description="answer to resolve the joke")
921
+
922
+
923
+ # Set up a parser
924
+ parser = JsonOutputParser(pydantic_object=Joke)
925
+
926
+ # Create a prompt template
927
+ prompt = PromptTemplate(
928
+ template="Answer the user query.\n{format_instructions}\n{query}\n",
929
+ input_variables=["query"],
930
+ partial_variables={"format_instructions": parser.get_format_instructions()},
931
+ )
932
+
933
+ llm_no_struct = ChatOpenAI(model="gpt-4o-mini", temperature=0,
934
+ callbacks=[handler])
935
+ llm = llm_no_struct.with_structured_output(Joke) # with structured output forces the output to be a specific object, in this case Joke. Only OpenAI models have structured output
936
+ # Chain the components.
937
+ # The class `LLMChain` was deprecated in LangChain 0.1.17 and will be removed in 1.0. Use RunnableSequence, e.g., `prompt | llm` instead.
938
+ chain = prompt | llm
939
+
940
+ # Invoke the chain with a query.
941
+ # IMPORTANT: chain.run is now obsolete. Use chain.invoke instead.
942
+ result = chain.invoke({"query": "Tell me a joke about openai."})
943
+ print("4o mini JSON: ",result)
944
+ print(result.setup) # How to access the structured output
945
+
946
+ llm = ChatOpenAI(model="o1", temperature=1,
947
+ callbacks=[handler],model_kwargs = {"max_completion_tokens" : 1000})
948
+ # Chain the components.
949
+ # The class `LLMChain` was deprecated in LangChain 0.1.17 and will be removed in 1.0. Use RunnableSequence, e.g., `prompt | llm` instead.
950
+ chain = prompt | llm | parser
951
+
952
+ # Invoke the chain with a query.
953
+ # IMPORTANT: chain.run is now obsolete. Use chain.invoke instead.
954
+ result = chain.invoke({"query": "Tell me a joke about openai."})
955
+ print("o1 JSON: ",result)
956
+
957
+ # Get DEEPSEEK_API_KEY environmental variable
958
+
959
+ deepseek_api_key = os.getenv('DEEPSEEK_API_KEY')
960
+
961
+ # Ensure the API key is retrieved successfully
962
+ if deepseek_api_key is None:
963
+ raise ValueError("DEEPSEEK_API_KEY environment variable is not set")
964
+
965
+ llm = ChatOpenAI(
966
+ model='deepseek-chat',
967
+ openai_api_key=deepseek_api_key,
968
+ openai_api_base='https://api.deepseek.com',
969
+ temperature=0, callbacks=[handler]
970
+ )
971
+
972
+ # Chain the components
973
+ chain = prompt | llm | parser
974
+
975
+ # Invoke the chain with a query
976
+ result = chain.invoke({"query": "Write joke about deepseek."})
977
+ print("deepseek",result)
978
+
979
+
980
+ # Set up a parser
981
+ parser = PydanticOutputParser(pydantic_object=Joke)
982
+ # Chain the components
983
+ chain = prompt | llm | parser
984
+
985
+ # Invoke the chain with a query
986
+ result = chain.invoke({"query": "Write joke about deepseek and pydantic."})
987
+ print("deepseek pydantic",result)
988
+
989
+ # Set up the Azure ChatOpenAI LLM instance
990
+ llm_no_struct = AzureChatOpenAI(
991
+ model="o4-mini",
992
+ temperature=1,
993
+ callbacks=[handler]
994
+ )
995
+ llm = llm_no_struct.with_structured_output(Joke) # with structured output forces the output to be a specific JSON format
996
+ # Chain the components: prompt | llm | parser
997
+ chain = prompt | llm # returns a Joke object
998
+
999
+ # Invoke the chain with a query
1000
+ result = chain.invoke({"query": "What is Azure?"}) # Pass a dictionary if `invoke` expects it
1001
+ print("Azure Result:", result)
1002
+
1003
+ # Set up a parser
1004
+ parser = JsonOutputParser(pydantic_object=Joke)
1005
+
1006
+ llm = Fireworks(
1007
+ model="accounts/fireworks/models/llama4-maverick-instruct-basic",
1008
+ temperature=0, callbacks=[handler])
1009
+ # Chain the components
1010
+ chain = prompt | llm | parser
1011
+
1012
+ # Invoke the chain with a query
1013
+ # no money in account
1014
+ # result = chain.invoke({"query": "Tell me a joke about the president"})
1015
+ # print("fireworks",result)
1016
+
1017
+
1018
+
1019
+
1020
+
1021
+ prompt = ChatPromptTemplate.from_template(
1022
+ "Tell me a short joke about {topic}"
1023
+ )
1024
+ chat_openai = ChatOpenAI(model="gpt-3.5-turbo", callbacks=[handler])
1025
+ openai = OpenAI(model="gpt-3.5-turbo-instruct", callbacks=[handler])
1026
+ anthropic = ChatAnthropic(model="claude-2", callbacks=[handler])
1027
+ model = (
1028
+ chat_openai
1029
+ .with_fallbacks([anthropic])
1030
+ .configurable_alternatives(
1031
+ ConfigurableField(id="model"),
1032
+ default_key="chat_openai",
1033
+ openai=openai,
1034
+ anthropic=anthropic,
1035
+ )
1036
+ )
1037
+
1038
+ chain = (
1039
+ {"topic": RunnablePassthrough()}
1040
+ | prompt
1041
+ | model
1042
+ | StrOutputParser()
1043
+ )
1044
+ result = chain.invoke({"topic": "Tell me a joke about the president"})
1045
+ print("config alt:",result)
1046
+
1047
+
1048
+
1049
+ llm = ChatAnthropic(
1050
+ model="claude-3-7-sonnet-latest",
1051
+ max_tokens=5000, # Total tokens for the response
1052
+ thinking={"type": "enabled", "budget_tokens": 2000}, # Tokens for internal reasoning
1053
+ )
1054
+
1055
+ response = llm.invoke("What is the cube root of 50.653?")
1056
+ print(json.dumps(response.content, indent=2))
1057
+
1058
+
1059
+ llm = ChatGroq(temperature=0, model_name="qwen-qwq-32b", callbacks=[handler])
1060
+ system = "You are a helpful assistant."
1061
+ human = "{text}"
1062
+ prompt = ChatPromptTemplate.from_messages([("system", system), ("human", human)])
1063
+
1064
+ chain = prompt | llm | StrOutputParser()
1065
+ print(chain.invoke({"text": "Explain the importance of low latency LLMs."}))
1066
+
1067
+
1068
+ llm = Together(
1069
+ model="meta-llama/Llama-3-70b-chat-hf",
1070
+ max_tokens=500, callbacks=[handler]
1071
+ )
1072
+ chain = prompt | llm | StrOutputParser()
1073
+ print(chain.invoke({"text": "Explain the importance of together.ai."}))
1074
+
1075
+
1076
+ # Define a prompt template with placeholders for variables
1077
+ prompt_template = PromptTemplate.from_template("Tell me a {adjective} joke about {content}.")
1078
+
1079
+ # Format the prompt with the variables
1080
+ formatted_prompt = prompt_template.format(adjective="funny", content="data scientists")
1081
+
1082
+ # Print the formatted prompt
1083
+ print(formatted_prompt)
1084
+
1085
+
1086
+ # Set up the LLM with the custom handler
1087
+ handler = CompletionStatusHandler()
1088
+
1089
+
1090
+ llm = ChatOpenAI(model="gpt-3.5-turbo", temperature=0.9, callbacks=[handler])
1091
+
1092
+ prompt = PromptTemplate.from_template("What is a good name for a company that makes {product}?")
1093
+
1094
+ chain = prompt | llm
1095
+
1096
+ # Invoke the chain
1097
+ response = chain.invoke({"product":"colorful socks"})
1098
+
1099
+ # Check completion status
1100
+ print(f"Is complete: {handler.is_complete}")
1101
+ print(f"Finish reason: {handler.finish_reason}")
1102
+ print(f"Response: {response}")
1103
+ print(f"Input tokens: {handler.input_tokens}")
1104
+ print(f"Output tokens: {handler.output_tokens}")
1105
+
1106
+
1107
+
1108
+ template = """Question: {question}"""
1109
+
1110
+ prompt = ChatPromptTemplate.from_template(template)
1111
+
1112
+ model = OllamaLLM(model="qwen2.5-coder:32b")
1113
+
1114
+ chain = prompt | model
1115
+
1116
+ output = chain.invoke({"question": "Write a python function that calculates Pi"})
1117
+ print(output)
1118
+
1119
+
1120
+
1121
+ llm = MLXPipeline.from_model_id(
1122
+ "mlx-community/quantized-gemma-2b-it",
1123
+ pipeline_kwargs={"max_tokens": 10, "temp": 0.1},
1124
+ )
1125
+
1126
+
1127
+ chat_model = ChatMLX(llm=llm)
1128
+ messages = [HumanMessage(content="What happens when an unstoppable force meets an immovable object?")]
1129
+ response = chat_model.invoke(messages)
1130
+ print(response.content)
1131
+
1132
+
1133
+
1134
+ llm = ChatBedrockConverse(
1135
+ model_id="anthropic.claude-3-5-sonnet-20240620-v1:0",
1136
+ # Additional parameters like temperature, max_tokens can be set here
1137
+ )
1138
+
1139
+ messages = [HumanMessage(content="What happens when an unstoppable force meets an immovable sonnet?")]
1140
+ response = llm.invoke(messages)
1141
+ print(response.content)</lcel_example>
1142
+
1143
+ % Here are examples of how to use internal modules:
1144
+ <internal_example_modules>
1145
+ % Example of selecting a Langchain LLM and counting tokens using llm_selector: <llm_selector_example>from pdd.llm_selector import llm_selector
1146
+
1147
+ def main() -> None:
1148
+ """
1149
+ Main function to demonstrate the usage of the llm_selector function.
1150
+ """
1151
+ # Define the strength and temperature parameters
1152
+ strength: float = 0.5 # Example strength value for the LLM model
1153
+ temperature: float = 1.0 # Example temperature value for the LLM model
1154
+
1155
+ try:
1156
+ while strength <= 1.1:
1157
+ # Call the llm_selector function with the specified strength and temperature
1158
+ llm, token_counter, input_cost, output_cost, model_name = llm_selector(strength, temperature)
1159
+ print(f"Strength: {strength}")
1160
+
1161
+ # Print the details of the selected LLM model
1162
+ print(f"Selected LLM Model: {model_name}")
1163
+ print(f"Input Cost per Million Tokens: {input_cost}")
1164
+ print(f"Output Cost per Million Tokens: {output_cost}")
1165
+
1166
+ # Example usage of the token counter function
1167
+ sample_text: str = "This is a sample text to count tokens."
1168
+ token_count: int = token_counter(sample_text)
1169
+ print(f"Token Count for Sample Text: {token_count}")
1170
+ print(f"model_name: {model_name}")
1171
+ strength += 0.05
1172
+ except FileNotFoundError as e:
1173
+ print(f"Error: {e}")
1174
+ except ValueError as e:
1175
+ print(f"Error: {e}")
1176
+
1177
+ if __name__ == "__main__":
1178
+ main()</llm_selector_example>
1179
+ </internal_example_modules>
1180
+
1181
+ % This function will use Langchain to do the following:
1182
+ Step 1. Use $PDD_PATH environment variable to get the path to the project. Load the '$PDD_PATH/prompts/conflict_LLM.prompt' and '$PDD_PATH/prompts/extract_conflicts_LLM.prompt' files.
1183
+ Step 2. Then this will create a Langchain LCEL template from the conflict_LLM prompt.
1184
+ Step 3. This will use llm_selector for the model, imported from a relative path.
1185
+ Step 4. Pretty print a message letting the user know it is running and how many tokens (using token_counter from llm_selector) are in the prompt and the cost. The cost from llm_selector is in dollars per million tokens.
1186
+ Step 5. Run the prompts through the model using Langchain LCEL with string output.
1187
+ 5a. Pass the following string parameters to the prompt during invoke:
1188
+ - 'PROMPT1'
1189
+ - 'PROMPT2'
1190
+ 5b. Pretty print the output of 5a which will be in Markdown format.
1191
+ Step 6. Create a Langchain LCEL template using a .8 strength llm_selector and token counter from the extract_conflicts_LLM prompt that outputs JSON:
1192
+ 6a. Pass the following string parameters to the prompt during invocation: 'llm_output' (this string is from Step 5a).
1193
+ 6b. Calculate input and output token count using token_counter from llm_selector and pretty print the running message with the token count and cost.
1194
+ 6c. Use 'get' function to extract 'changes_list' list values using from the dictionary output.
1195
+ Step 7. Return the changes_list, total_cost and model_name.</updated_prompt>
21
1196
  <example>
22
1197
  <examples>
23
1198