pdd-cli 0.0.44__py3-none-any.whl → 0.0.45__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -4,20 +4,1075 @@
4
4
  <examples>
5
5
  <example id="1">
6
6
  INPUT:
7
- <prompt_to_update><include>context/insert/1/prompt_to_update.prompt</include></prompt_to_update>
8
- <dependencies_to_insert><include>context/insert/1/dependencies.prompt</include></dependencies_to_insert>
7
+ <prompt_to_update>% You are an expert Python Software Engineer. Your goal is to write a python function, "postprocess", that will extract code from a string output of an LLM. All output to the console will be pretty printed using the Python rich library.
8
+
9
+ % The function should be part of a Python package, using relative imports (single dot) for internal modules (e.g. 'from .module_name import module_name'). All output to the console will be pretty printed using the Python Rich library. Ensure the function handles edge cases, such as missing inputs or model errors, and provide clear error messages.
10
+ % The ./pdd/__init__.py file will have the EXTRACTION_STRENGTH, DEFAULT_STRENGTH, DEFAULT_TIME and other global constants. Example: ```from . import DEFAULT_STRENGTH```
11
+
12
+ % Here are the inputs and outputs of the function:
13
+ Inputs:
14
+ 'llm_output' - A string containing a mix of text and code sections.
15
+ 'language' - A string specifying the programming language of the code to be extracted.
16
+ 'strength' - A float between 0 and 1 that represents the strength of the LLM model to use. Default is 0.9.
17
+ 'temperature' - A float between 0 and 1 that represents the temperature parameter for the LLM model. Default is 0.
18
+ 'verbose' - A boolean that indicates whether to print detailed processing information. Default is False.
19
+ Outputs as a tuple:
20
+ 'extracted_code' - A string containing the extracted and processed code.
21
+ 'total_cost' - A float representing the total cost of running the function.
22
+ 'model_name' - A string representing the model name used for extraction.
23
+
24
+ % This function will do the following:
25
+ Step 1. If strength is 0, use postprocess_0 function to extract code and return (extracted_code, 0.0).
26
+ Step 2. Load the 'extract_code_LLM.prompt' template file.
27
+ Step 3. Process the text using llm_invoke:
28
+ 3a. Pass the following parameters to the prompt:
29
+ - 'llm_output'
30
+ - 'language'
31
+ 3b. The Pydantic output will contain the 'extracted_code' key.
32
+ 3c. For the extracted_code, if the first and last line have triple backticks delete the entire first and last line. There will be the name of the language after the first triple backticks and that should be removed as well.
33
+ Step 4. Return the extracted code string, total cost float and model name string.
34
+ </prompt_to_update>
35
+ <dependencies_to_insert>% Here is how to use the internal modules:
36
+ <internal_modules>
37
+ For loading prompt templates:
38
+ <load_prompt_template_example>
39
+ from pdd.load_prompt_template import load_prompt_template
40
+ from rich import print
41
+
42
+ def main():
43
+ prompt_name = "generate_test_LLM" # Name of the prompt file without extension
44
+ prompt = load_prompt_template(prompt_name)
45
+ if prompt:
46
+ print("[blue]Loaded Prompt Template:[/blue]")
47
+ print(prompt)
48
+
49
+ if __name__ == "__main__":
50
+ main()
51
+ </load_prompt_template_example>
52
+
53
+ For running prompts with llm_invoke:
54
+ <llm_invoke_example>
55
+ from pydantic import BaseModel, Field
56
+ from pdd.llm_invoke import llm_invoke
57
+ from collections import defaultdict
58
+
59
+ # Define a Pydantic model for structured output
60
+ class Joke(BaseModel):
61
+ setup: str = Field(description="The setup of the joke")
62
+ punchline: str = Field(description="The punchline of the joke")
63
+
64
+ def main():
65
+ """
66
+ Main function to demonstrate the usage of `llm_invoke`.
67
+ """
68
+ # Dictionary to track strength ranges for each model
69
+ model_ranges = defaultdict(list)
70
+ current_model = None
71
+ range_start = 0.0
72
+
73
+ prompt = "Tell me a joke about {topic}"
74
+ input_json = {"topic": "programmers"}
75
+ temperature = 1
76
+ verbose = False
77
+
78
+ strength = 0.5
79
+ while strength <= 0.5:
80
+ print(f"\nStrength: {strength}")
81
+
82
+ # Example 1: Unstructured Output
83
+ print("\n--- Unstructured Output ---")
84
+ response = llm_invoke(
85
+ prompt=prompt,
86
+ input_json=input_json,
87
+ strength=strength,
88
+ temperature=temperature,
89
+ verbose=verbose
90
+ )
91
+
92
+ # Track model changes for strength ranges
93
+ if current_model != response['model_name']:
94
+ if current_model is not None:
95
+ model_ranges[current_model].append((range_start, strength - 0.005))
96
+ current_model = response['model_name']
97
+ range_start = strength
98
+
99
+ print(f"Result: {response['result']}")
100
+ print(f"Cost: ${response['cost']:.6f}")
101
+ print(f"Model Used: {response['model_name']}")
102
+
103
+ # Example 2: Structured Output with Pydantic Model
104
+ prompt_structured = (
105
+ "Generate a joke about {topic}. \n"
106
+ "Return it in this exact JSON format:\n"
107
+ "{{ \n"
108
+ ' "setup": "your setup here",\n'
109
+ ' "punchline": "your punchline here"\n'
110
+ "}}\n"
111
+ "Return ONLY the JSON with no additional text or explanation."
112
+ )
113
+ input_json_structured = {"topic": "data scientists"}
114
+ output_pydantic = Joke
115
+
116
+ print("\n--- Structured Output ---")
117
+ try:
118
+ response_structured = llm_invoke(
119
+ prompt=prompt_structured,
120
+ input_json=input_json_structured,
121
+ strength=strength,
122
+ temperature=temperature,
123
+ verbose=True,
124
+ output_pydantic=output_pydantic
125
+ )
126
+ print(f"Result: {response_structured['result']}")
127
+ print(f"Cost: ${response_structured['cost']:.6f}")
128
+ print(f"Model Used: {response_structured['model_name']}")
129
+
130
+ # Access structured data
131
+ joke: Joke = response_structured['result']
132
+ print(f"\nJoke Setup: {joke.setup}")
133
+ print(f"Joke Punchline: {joke.punchline}")
134
+ except Exception as e:
135
+ print(f"Error encountered during structured output: {e}")
136
+
137
+ strength += 0.005
138
+ # round to 3 decimal places
139
+ strength = round(strength, 3)
140
+
141
+ # Add the final range for the last model
142
+ model_ranges[current_model].append((range_start, 1.0))
143
+
144
+ # Print out the strength ranges for each model
145
+ print("\n=== Model Strength Ranges ===")
146
+ for model, ranges in model_ranges.items():
147
+ print(f"\n{model}:")
148
+ for start, end in ranges:
149
+ print(f" Strength {start:.3f} to {end:.3f}")
150
+
151
+ if __name__ == "__main__":
152
+ main()
153
+ </llm_invoke_example>
154
+ </internal_modules>
155
+ </dependencies_to_insert>
9
156
 
10
157
  OUTPUT:
11
- <updated_prompt><include>context/insert/1/updated_prompt.prompt</include></updated_prompt>
158
+ <updated_prompt>% You are an expert Python Software Engineer. Your goal is to write a python function, "postprocess", that will extract code from a string output of an LLM. All output to the console will be pretty printed using the Python rich library.
159
+
160
+ % The function should be part of a Python package, using relative imports (single dot) for internal modules (e.g. 'from .module_name import module_name'). All output to the console will be pretty printed using the Python Rich library. Ensure the function handles edge cases, such as missing inputs or model errors, and provide clear error messages.
161
+ % The ./pdd/__init__.py file will have the EXTRACTION_STRENGTH, DEFAULT_STRENGTH, DEFAULT_TIME and other global constants. Example: ```from . import DEFAULT_STRENGTH```
162
+
163
+ % Here are the inputs and outputs of the function:
164
+ Inputs:
165
+ 'llm_output' - A string containing a mix of text and code sections.
166
+ 'language' - A string specifying the programming language of the code to be extracted.
167
+ 'strength' - A float between 0 and 1 that represents the strength of the LLM model to use. Default is 0.9.
168
+ 'temperature' - A float between 0 and 1 that represents the temperature parameter for the LLM model. Default is 0.
169
+ 'verbose' - A boolean that indicates whether to print detailed processing information. Default is False.
170
+ Outputs as a tuple:
171
+ 'extracted_code' - A string containing the extracted and processed code.
172
+ 'total_cost' - A float representing the total cost of running the function.
173
+ 'model_name' - A string representing the model name used for extraction.
174
+
175
+ % Here is how to use the internal modules:
176
+ <internal_modules>
177
+ For loading prompt templates:
178
+ <load_prompt_template_example>
179
+ from pdd.load_prompt_template import load_prompt_template
180
+ from rich import print
181
+
182
+ def main():
183
+ prompt_name = "generate_test_LLM" # Name of the prompt file without extension
184
+ prompt = load_prompt_template(prompt_name)
185
+ if prompt:
186
+ print("[blue]Loaded Prompt Template:[/blue]")
187
+ print(prompt)
188
+
189
+ if __name__ == "__main__":
190
+ main()
191
+ </load_prompt_template_example>
192
+
193
+ For running prompts with llm_invoke:
194
+ <llm_invoke_example>
195
+ from pydantic import BaseModel, Field
196
+ from pdd.llm_invoke import llm_invoke
197
+ from collections import defaultdict
198
+
199
+ # Define a Pydantic model for structured output
200
+ class Joke(BaseModel):
201
+ setup: str = Field(description="The setup of the joke")
202
+ punchline: str = Field(description="The punchline of the joke")
203
+
204
+ def main():
205
+ """
206
+ Main function to demonstrate the usage of `llm_invoke`.
207
+ """
208
+ # Dictionary to track strength ranges for each model
209
+ model_ranges = defaultdict(list)
210
+ current_model = None
211
+ range_start = 0.0
212
+
213
+ prompt = "Tell me a joke about {topic}"
214
+ input_json = {"topic": "programmers"}
215
+ temperature = 1
216
+ verbose = False
217
+
218
+ strength = 0.5
219
+ while strength <= 0.5:
220
+ print(f"\nStrength: {strength}")
221
+
222
+ # Example 1: Unstructured Output
223
+ print("\n--- Unstructured Output ---")
224
+ response = llm_invoke(
225
+ prompt=prompt,
226
+ input_json=input_json,
227
+ strength=strength,
228
+ temperature=temperature,
229
+ verbose=verbose
230
+ )
231
+
232
+ # Track model changes for strength ranges
233
+ if current_model != response['model_name']:
234
+ if current_model is not None:
235
+ model_ranges[current_model].append((range_start, strength - 0.005))
236
+ current_model = response['model_name']
237
+ range_start = strength
238
+
239
+ print(f"Result: {response['result']}")
240
+ print(f"Cost: ${response['cost']:.6f}")
241
+ print(f"Model Used: {response['model_name']}")
242
+
243
+ # Example 2: Structured Output with Pydantic Model
244
+ prompt_structured = (
245
+ "Generate a joke about {topic}. \n"
246
+ "Return it in this exact JSON format:\n"
247
+ "{{ \n"
248
+ ' "setup": "your setup here",\n'
249
+ ' "punchline": "your punchline here"\n'
250
+ "}}\n"
251
+ "Return ONLY the JSON with no additional text or explanation."
252
+ )
253
+ input_json_structured = {"topic": "data scientists"}
254
+ output_pydantic = Joke
255
+
256
+ print("\n--- Structured Output ---")
257
+ try:
258
+ response_structured = llm_invoke(
259
+ prompt=prompt_structured,
260
+ input_json=input_json_structured,
261
+ strength=strength,
262
+ temperature=temperature,
263
+ verbose=True,
264
+ output_pydantic=output_pydantic
265
+ )
266
+ print(f"Result: {response_structured['result']}")
267
+ print(f"Cost: ${response_structured['cost']:.6f}")
268
+ print(f"Model Used: {response_structured['model_name']}")
269
+
270
+ # Access structured data
271
+ joke: Joke = response_structured['result']
272
+ print(f"\nJoke Setup: {joke.setup}")
273
+ print(f"Joke Punchline: {joke.punchline}")
274
+ except Exception as e:
275
+ print(f"Error encountered during structured output: {e}")
276
+
277
+ strength += 0.005
278
+ # round to 3 decimal places
279
+ strength = round(strength, 3)
280
+
281
+ # Add the final range for the last model
282
+ model_ranges[current_model].append((range_start, 1.0))
283
+
284
+ # Print out the strength ranges for each model
285
+ print("\n=== Model Strength Ranges ===")
286
+ for model, ranges in model_ranges.items():
287
+ print(f"\n{model}:")
288
+ for start, end in ranges:
289
+ print(f" Strength {start:.3f} to {end:.3f}")
290
+
291
+ if __name__ == "__main__":
292
+ main()
293
+ </llm_invoke_example>
294
+ </internal_modules>
295
+
296
+ % This function will do the following:
297
+ Step 1. If strength is 0, use postprocess_0 function to extract code and return (extracted_code, 0.0).
298
+ Step 2. Load the 'extract_code_LLM.prompt' template file.
299
+ Step 3. Process the text using llm_invoke:
300
+ 3a. Pass the following parameters to the prompt:
301
+ - 'llm_output'
302
+ - 'language'
303
+ 3b. The Pydantic output will contain the 'extracted_code' key.
304
+ 3c. For the extracted_code, if the first and last line have triple backticks delete the entire first and last line. There will be the name of the language after the first triple backticks and that should be removed as well.
305
+ Step 4. Return the extracted code string, total cost float and model name string.
306
+ </updated_prompt>
12
307
  <example>
13
308
 
14
309
  <example id="2">
15
310
  INPUT:
16
- <prompt_to_update><include>context/insert/2/prompt_to_update.prompt</include></prompt_to_update>
17
- <dependencies_to_insert><include>context/insert/2/dependencies.prompt</include></dependencies_to_insert>
311
+ <prompt_to_update>% You are an expert Python engineer. Your goal is to write a Python function, "conflicts_in_prompts", that takes two prompts as input and finds conflicts between them and suggests how to resolve those conflicts.
312
+
313
+ % The function should be part of a Python package, using relative imports (single dot) for internal modules (e.g. 'from .module_name import module_name'). All output to the console will be pretty printed using the Python Rich library. Ensure the function handles edge cases, such as missing inputs or model errors, and provide clear error messages.
314
+ % The ./pdd/__init__.py file will have the EXTRACTION_STRENGTH, DEFAULT_STRENGTH, DEFAULT_TIME and other global constants. Example: ```from . import DEFAULT_STRENGTH```
315
+
316
+ % Here are the inputs and outputs of the function:
317
+ Inputs:
318
+ 'prompt1' - First prompt in the pair of prompts we are comparing.
319
+ 'prompt2' - Second prompt in the pair of prompts we are comparing.
320
+ 'strength' - A float that is the strength of the LLM model to use. Default is 0.5.
321
+ 'temperature' - A float that is the temperature of the LLM model to use. Default is 0.
322
+ Outputs:
323
+ 'changes_list' - A list of JSON objects, each containing the name of a prompt that needs to be changed and detailed instructions on how to change it.
324
+ 'total_cost' - A float that is the total cost of the model run
325
+ 'model_name' - A string that is the name of the selected LLM model
326
+
327
+ % Here is an example of a LangChain Expression Language (LCEL) program: <lcel_example>import os
328
+ from langchain_core.prompts import PromptTemplate
329
+ from langchain_community.cache import SQLiteCache
330
+ from langchain_community.llms.mlx_pipeline import MLXPipeline
331
+ from langchain.globals import set_llm_cache
332
+ from langchain_core.output_parsers import JsonOutputParser, PydanticOutputParser # Parsers are only avaiable in langchain_core.output_parsers not langchain.output_parsers
333
+ from langchain_core.output_parsers import StrOutputParser
334
+ from langchain_core.prompts import ChatPromptTemplate
335
+ from langchain_core.runnables import RunnablePassthrough, ConfigurableField
336
+
337
+ from langchain_openai import AzureChatOpenAI
338
+ from langchain_fireworks import Fireworks
339
+ from langchain_anthropic import ChatAnthropic
340
+ from langchain_openai import ChatOpenAI # Chatbot and conversational tasks
341
+ from langchain_openai import OpenAI # General language tasks
342
+ from langchain_google_genai import ChatGoogleGenerativeAI
343
+ from langchain_google_vertexai import ChatVertexAI
344
+ from langchain_groq import ChatGroq
345
+ from langchain_together import Together
346
+
347
+ from langchain.callbacks.base import BaseCallbackHandler
348
+ from langchain.schema import LLMResult
349
+
350
+ import json
351
+
352
+ from langchain_community.chat_models.mlx import ChatMLX
353
+ from langchain_core.messages import HumanMessage
354
+
355
+ from langchain_ollama.llms import OllamaLLM
356
+ from langchain_aws import ChatBedrockConverse
357
+
358
+ # Define a base output parser (e.g., PydanticOutputParser)
359
+ from pydantic import BaseModel, Field
360
+
361
+
362
+
363
+ class CompletionStatusHandler(BaseCallbackHandler):
364
+ def __init__(self):
365
+ self.is_complete = False
366
+ self.finish_reason = None
367
+ self.input_tokens = None
368
+ self.output_tokens = None
369
+
370
+ def on_llm_end(self, response: LLMResult, **kwargs) -> None:
371
+ self.is_complete = True
372
+ if response.generations and response.generations[0]:
373
+ generation = response.generations[0][0]
374
+ self.finish_reason = generation.generation_info.get('finish_reason').lower()
375
+
376
+ # Extract token usage
377
+ if hasattr(generation.message, 'usage_metadata'):
378
+ usage_metadata = generation.message.usage_metadata
379
+ self.input_tokens = usage_metadata.get('input_tokens')
380
+ self.output_tokens = usage_metadata.get('output_tokens')
381
+ # print("response:",response)
382
+ print("Extracted information:")
383
+ print(f"Finish reason: {self.finish_reason}")
384
+ print(f"Input tokens: {self.input_tokens}")
385
+ print(f"Output tokens: {self.output_tokens}")
386
+
387
+ # Set up the LLM with the custom handler
388
+ handler = CompletionStatusHandler()
389
+ # Always setup cache to save money and increase speeds
390
+ set_llm_cache(SQLiteCache(database_path=".langchain.db"))
391
+
392
+
393
+ # Create the LCEL template. Make note of the variable {topic} which will be filled in later.
394
+ prompt_template = PromptTemplate.from_template("Tell me a joke about {topic}")
395
+
396
+ llm = ChatGoogleGenerativeAI(model="gemini-2.5-pro-exp-03-25", temperature=0, callbacks=[handler])
397
+ # Combine with a model and parser to output a string
398
+ chain = prompt_template |llm| StrOutputParser()
399
+
400
+ # Run the template. Notice that the input is a dictionary with a single key "topic" which feeds it into the above prompt template. This is needed because the prompt template has a variable {topic} which needs to be filled in when invoked.
401
+ result = chain.invoke({"topic": "cats"})
402
+ print("********Google:", result)
403
+
404
+
405
+ llm = ChatVertexAI(model="gemini-2.5-pro-exp-03-25", temperature=0, callbacks=[handler])
406
+ # Combine with a model and parser to output a string
407
+ chain = prompt_template |llm| StrOutputParser()
408
+
409
+ # Run the template. Notice that the input is a dictionary with a single key "topic" which feeds it into the above prompt template. This is needed because the prompt template has a variable {topic} which needs to be filled in when invoked.
410
+ result = chain.invoke({"topic": "cats"})
411
+ print("********GoogleVertex:", result)
412
+
413
+
414
+ # Define your desired data structure.
415
+ class Joke(BaseModel):
416
+ setup: str = Field(description="question to set up a joke")
417
+ punchline: str = Field(description="answer to resolve the joke")
418
+
419
+
420
+ # Set up a parser
421
+ parser = JsonOutputParser(pydantic_object=Joke)
422
+
423
+ # Create a prompt template
424
+ prompt = PromptTemplate(
425
+ template="Answer the user query.\n{format_instructions}\n{query}\n",
426
+ input_variables=["query"],
427
+ partial_variables={"format_instructions": parser.get_format_instructions()},
428
+ )
429
+
430
+ llm_no_struct = ChatOpenAI(model="gpt-4o-mini", temperature=0,
431
+ callbacks=[handler])
432
+ llm = llm_no_struct.with_structured_output(Joke) # with structured output forces the output to be a specific object, in this case Joke. Only OpenAI models have structured output
433
+ # Chain the components.
434
+ # The class `LLMChain` was deprecated in LangChain 0.1.17 and will be removed in 1.0. Use RunnableSequence, e.g., `prompt | llm` instead.
435
+ chain = prompt | llm
436
+
437
+ # Invoke the chain with a query.
438
+ # IMPORTANT: chain.run is now obsolete. Use chain.invoke instead.
439
+ result = chain.invoke({"query": "Tell me a joke about openai."})
440
+ print("4o mini JSON: ",result)
441
+ print(result.setup) # How to access the structured output
442
+
443
+ llm = ChatOpenAI(model="o1", temperature=1,
444
+ callbacks=[handler],model_kwargs = {"max_completion_tokens" : 1000})
445
+ # Chain the components.
446
+ # The class `LLMChain` was deprecated in LangChain 0.1.17 and will be removed in 1.0. Use RunnableSequence, e.g., `prompt | llm` instead.
447
+ chain = prompt | llm | parser
448
+
449
+ # Invoke the chain with a query.
450
+ # IMPORTANT: chain.run is now obsolete. Use chain.invoke instead.
451
+ result = chain.invoke({"query": "Tell me a joke about openai."})
452
+ print("o1 JSON: ",result)
453
+
454
+ # Get DEEPSEEK_API_KEY environmental variable
455
+
456
+ deepseek_api_key = os.getenv('DEEPSEEK_API_KEY')
457
+
458
+ # Ensure the API key is retrieved successfully
459
+ if deepseek_api_key is None:
460
+ raise ValueError("DEEPSEEK_API_KEY environment variable is not set")
461
+
462
+ llm = ChatOpenAI(
463
+ model='deepseek-chat',
464
+ openai_api_key=deepseek_api_key,
465
+ openai_api_base='https://api.deepseek.com',
466
+ temperature=0, callbacks=[handler]
467
+ )
468
+
469
+ # Chain the components
470
+ chain = prompt | llm | parser
471
+
472
+ # Invoke the chain with a query
473
+ result = chain.invoke({"query": "Write joke about deepseek."})
474
+ print("deepseek",result)
475
+
476
+
477
+ # Set up a parser
478
+ parser = PydanticOutputParser(pydantic_object=Joke)
479
+ # Chain the components
480
+ chain = prompt | llm | parser
481
+
482
+ # Invoke the chain with a query
483
+ result = chain.invoke({"query": "Write joke about deepseek and pydantic."})
484
+ print("deepseek pydantic",result)
485
+
486
+ # Set up the Azure ChatOpenAI LLM instance
487
+ llm_no_struct = AzureChatOpenAI(
488
+ model="o4-mini",
489
+ temperature=1,
490
+ callbacks=[handler]
491
+ )
492
+ llm = llm_no_struct.with_structured_output(Joke) # with structured output forces the output to be a specific JSON format
493
+ # Chain the components: prompt | llm | parser
494
+ chain = prompt | llm # returns a Joke object
495
+
496
+ # Invoke the chain with a query
497
+ result = chain.invoke({"query": "What is Azure?"}) # Pass a dictionary if `invoke` expects it
498
+ print("Azure Result:", result)
499
+
500
+ # Set up a parser
501
+ parser = JsonOutputParser(pydantic_object=Joke)
502
+
503
+ llm = Fireworks(
504
+ model="accounts/fireworks/models/llama4-maverick-instruct-basic",
505
+ temperature=0, callbacks=[handler])
506
+ # Chain the components
507
+ chain = prompt | llm | parser
508
+
509
+ # Invoke the chain with a query
510
+ # no money in account
511
+ # result = chain.invoke({"query": "Tell me a joke about the president"})
512
+ # print("fireworks",result)
513
+
514
+
515
+
516
+
517
+
518
+ prompt = ChatPromptTemplate.from_template(
519
+ "Tell me a short joke about {topic}"
520
+ )
521
+ chat_openai = ChatOpenAI(model="gpt-3.5-turbo", callbacks=[handler])
522
+ openai = OpenAI(model="gpt-3.5-turbo-instruct", callbacks=[handler])
523
+ anthropic = ChatAnthropic(model="claude-2", callbacks=[handler])
524
+ model = (
525
+ chat_openai
526
+ .with_fallbacks([anthropic])
527
+ .configurable_alternatives(
528
+ ConfigurableField(id="model"),
529
+ default_key="chat_openai",
530
+ openai=openai,
531
+ anthropic=anthropic,
532
+ )
533
+ )
534
+
535
+ chain = (
536
+ {"topic": RunnablePassthrough()}
537
+ | prompt
538
+ | model
539
+ | StrOutputParser()
540
+ )
541
+ result = chain.invoke({"topic": "Tell me a joke about the president"})
542
+ print("config alt:",result)
543
+
544
+
545
+
546
+ llm = ChatAnthropic(
547
+ model="claude-3-7-sonnet-latest",
548
+ max_tokens=5000, # Total tokens for the response
549
+ thinking={"type": "enabled", "budget_tokens": 2000}, # Tokens for internal reasoning
550
+ )
551
+
552
+ response = llm.invoke("What is the cube root of 50.653?")
553
+ print(json.dumps(response.content, indent=2))
554
+
555
+
556
+ llm = ChatGroq(temperature=0, model_name="qwen-qwq-32b", callbacks=[handler])
557
+ system = "You are a helpful assistant."
558
+ human = "{text}"
559
+ prompt = ChatPromptTemplate.from_messages([("system", system), ("human", human)])
560
+
561
+ chain = prompt | llm | StrOutputParser()
562
+ print(chain.invoke({"text": "Explain the importance of low latency LLMs."}))
563
+
564
+
565
+ llm = Together(
566
+ model="meta-llama/Llama-3-70b-chat-hf",
567
+ max_tokens=500, callbacks=[handler]
568
+ )
569
+ chain = prompt | llm | StrOutputParser()
570
+ print(chain.invoke({"text": "Explain the importance of together.ai."}))
571
+
572
+
573
+ # Define a prompt template with placeholders for variables
574
+ prompt_template = PromptTemplate.from_template("Tell me a {adjective} joke about {content}.")
575
+
576
+ # Format the prompt with the variables
577
+ formatted_prompt = prompt_template.format(adjective="funny", content="data scientists")
578
+
579
+ # Print the formatted prompt
580
+ print(formatted_prompt)
581
+
582
+
583
+ # Set up the LLM with the custom handler
584
+ handler = CompletionStatusHandler()
585
+
586
+
587
+ llm = ChatOpenAI(model="gpt-3.5-turbo", temperature=0.9, callbacks=[handler])
588
+
589
+ prompt = PromptTemplate.from_template("What is a good name for a company that makes {product}?")
590
+
591
+ chain = prompt | llm
592
+
593
+ # Invoke the chain
594
+ response = chain.invoke({"product":"colorful socks"})
595
+
596
+ # Check completion status
597
+ print(f"Is complete: {handler.is_complete}")
598
+ print(f"Finish reason: {handler.finish_reason}")
599
+ print(f"Response: {response}")
600
+ print(f"Input tokens: {handler.input_tokens}")
601
+ print(f"Output tokens: {handler.output_tokens}")
602
+
603
+
604
+
605
+ template = """Question: {question}"""
606
+
607
+ prompt = ChatPromptTemplate.from_template(template)
608
+
609
+ model = OllamaLLM(model="qwen2.5-coder:32b")
610
+
611
+ chain = prompt | model
612
+
613
+ output = chain.invoke({"question": "Write a python function that calculates Pi"})
614
+ print(output)
615
+
616
+
617
+
618
+ llm = MLXPipeline.from_model_id(
619
+ "mlx-community/quantized-gemma-2b-it",
620
+ pipeline_kwargs={"max_tokens": 10, "temp": 0.1},
621
+ )
622
+
623
+
624
+ chat_model = ChatMLX(llm=llm)
625
+ messages = [HumanMessage(content="What happens when an unstoppable force meets an immovable object?")]
626
+ response = chat_model.invoke(messages)
627
+ print(response.content)
628
+
629
+
630
+
631
+ llm = ChatBedrockConverse(
632
+ model_id="anthropic.claude-3-5-sonnet-20240620-v1:0",
633
+ # Additional parameters like temperature, max_tokens can be set here
634
+ )
635
+
636
+ messages = [HumanMessage(content="What happens when an unstoppable force meets an immovable sonnet?")]
637
+ response = llm.invoke(messages)
638
+ print(response.content)</lcel_example>
639
+
640
+ % This function will use Langchain to do the following:
641
+ Step 1. Use $PDD_PATH environment variable to get the path to the project. Load the '$PDD_PATH/prompts/conflict_LLM.prompt' and '$PDD_PATH/prompts/extract_conflicts_LLM.prompt' files.
642
+ Step 2. Then this will create a Langchain LCEL template from the conflict_LLM prompt.
643
+ Step 3. This will use llm_selector for the model, imported from a relative path.
644
+ Step 4. Pretty print a message letting the user know it is running and how many tokens (using token_counter from llm_selector) are in the prompt and the cost. The cost from llm_selector is in dollars per million tokens.
645
+ Step 5. Run the prompts through the model using Langchain LCEL with string output.
646
+ 5a. Pass the following string parameters to the prompt during invoke:
647
+ - 'PROMPT1'
648
+ - 'PROMPT2'
649
+ 5b. Pretty print the output of 5a which will be in Markdown format.
650
+ Step 6. Create a Langchain LCEL template using a .8 strength llm_selector and token counter from the extract_conflicts_LLM prompt that outputs JSON:
651
+ 6a. Pass the following string parameters to the prompt during invocation: 'llm_output' (this string is from Step 5a).
652
+ 6b. Calculate input and output token count using token_counter from llm_selector and pretty print the running message with the token count and cost.
653
+ 6c. Use 'get' function to extract 'changes_list' list values using from the dictionary output.
654
+ Step 7. Return the changes_list, total_cost and model_name.</prompt_to_update>
655
+ <dependencies_to_insert>% Here are examples of how to use internal modules:
656
+ <internal_example_modules>
657
+ % Example of selecting a Langchain LLM and counting tokens using llm_selector: <llm_selector_example>from pdd.llm_selector import llm_selector
658
+
659
+ def main() -> None:
660
+ """
661
+ Main function to demonstrate the usage of the llm_selector function.
662
+ """
663
+ # Define the strength and temperature parameters
664
+ strength: float = 0.5 # Example strength value for the LLM model
665
+ temperature: float = 1.0 # Example temperature value for the LLM model
666
+
667
+ try:
668
+ while strength <= 1.1:
669
+ # Call the llm_selector function with the specified strength and temperature
670
+ llm, token_counter, input_cost, output_cost, model_name = llm_selector(strength, temperature)
671
+ print(f"Strength: {strength}")
672
+
673
+ # Print the details of the selected LLM model
674
+ print(f"Selected LLM Model: {model_name}")
675
+ print(f"Input Cost per Million Tokens: {input_cost}")
676
+ print(f"Output Cost per Million Tokens: {output_cost}")
677
+
678
+ # Example usage of the token counter function
679
+ sample_text: str = "This is a sample text to count tokens."
680
+ token_count: int = token_counter(sample_text)
681
+ print(f"Token Count for Sample Text: {token_count}")
682
+ print(f"model_name: {model_name}")
683
+ strength += 0.05
684
+ except FileNotFoundError as e:
685
+ print(f"Error: {e}")
686
+ except ValueError as e:
687
+ print(f"Error: {e}")
688
+
689
+ if __name__ == "__main__":
690
+ main()</llm_selector_example>
691
+ </internal_example_modules></dependencies_to_insert>
18
692
 
19
693
  OUTPUT:
20
- <updated_prompt><include>context/insert/2/updated_prompt.prompt</include></updated_prompt>
694
+ <updated_prompt>% You are an expert Python engineer. Your goal is to write a Python function, "conflicts_in_prompts", that takes two prompts as input and finds conflicts between them and suggests how to resolve those conflicts.
695
+
696
+ % The function should be part of a Python package, using relative imports (single dot) for internal modules (e.g. 'from .module_name import module_name'). All output to the console will be pretty printed using the Python Rich library. Ensure the function handles edge cases, such as missing inputs or model errors, and provide clear error messages.
697
+ % The ./pdd/__init__.py file will have the EXTRACTION_STRENGTH, DEFAULT_STRENGTH, DEFAULT_TIME and other global constants. Example: ```from . import DEFAULT_STRENGTH```
698
+
699
+ % Here are the inputs and outputs of the function:
700
+ Inputs:
701
+ 'prompt1' - First prompt in the pair of prompts we are comparing.
702
+ 'prompt2' - Second prompt in the pair of prompts we are comparing.
703
+ 'strength' - A float that is the strength of the LLM model to use. Default is 0.5.
704
+ 'temperature' - A float that is the temperature of the LLM model to use. Default is 0.
705
+ Outputs:
706
+ 'changes_list' - A list of JSON objects, each containing the name of a prompt that needs to be changed and detailed instructions on how to change it.
707
+ 'total_cost' - A float that is the total cost of the model run
708
+ 'model_name' - A string that is the name of the selected LLM model
709
+
710
+ % Here is an example of a LangChain Expression Language (LCEL) program: <lcel_example>import os
711
+ from langchain_core.prompts import PromptTemplate
712
+ from langchain_community.cache import SQLiteCache
713
+ from langchain_community.llms.mlx_pipeline import MLXPipeline
714
+ from langchain.globals import set_llm_cache
715
+ from langchain_core.output_parsers import JsonOutputParser, PydanticOutputParser # Parsers are only avaiable in langchain_core.output_parsers not langchain.output_parsers
716
+ from langchain_core.output_parsers import StrOutputParser
717
+ from langchain_core.prompts import ChatPromptTemplate
718
+ from langchain_core.runnables import RunnablePassthrough, ConfigurableField
719
+
720
+ from langchain_openai import AzureChatOpenAI
721
+ from langchain_fireworks import Fireworks
722
+ from langchain_anthropic import ChatAnthropic
723
+ from langchain_openai import ChatOpenAI # Chatbot and conversational tasks
724
+ from langchain_openai import OpenAI # General language tasks
725
+ from langchain_google_genai import ChatGoogleGenerativeAI
726
+ from langchain_google_vertexai import ChatVertexAI
727
+ from langchain_groq import ChatGroq
728
+ from langchain_together import Together
729
+
730
+ from langchain.callbacks.base import BaseCallbackHandler
731
+ from langchain.schema import LLMResult
732
+
733
+ import json
734
+
735
+ from langchain_community.chat_models.mlx import ChatMLX
736
+ from langchain_core.messages import HumanMessage
737
+
738
+ from langchain_ollama.llms import OllamaLLM
739
+ from langchain_aws import ChatBedrockConverse
740
+
741
+ # Define a base output parser (e.g., PydanticOutputParser)
742
+ from pydantic import BaseModel, Field
743
+
744
+
745
+
746
+ class CompletionStatusHandler(BaseCallbackHandler):
747
+ def __init__(self):
748
+ self.is_complete = False
749
+ self.finish_reason = None
750
+ self.input_tokens = None
751
+ self.output_tokens = None
752
+
753
+ def on_llm_end(self, response: LLMResult, **kwargs) -> None:
754
+ self.is_complete = True
755
+ if response.generations and response.generations[0]:
756
+ generation = response.generations[0][0]
757
+ self.finish_reason = generation.generation_info.get('finish_reason').lower()
758
+
759
+ # Extract token usage
760
+ if hasattr(generation.message, 'usage_metadata'):
761
+ usage_metadata = generation.message.usage_metadata
762
+ self.input_tokens = usage_metadata.get('input_tokens')
763
+ self.output_tokens = usage_metadata.get('output_tokens')
764
+ # print("response:",response)
765
+ print("Extracted information:")
766
+ print(f"Finish reason: {self.finish_reason}")
767
+ print(f"Input tokens: {self.input_tokens}")
768
+ print(f"Output tokens: {self.output_tokens}")
769
+
770
+ # Set up the LLM with the custom handler
771
+ handler = CompletionStatusHandler()
772
+ # Always setup cache to save money and increase speeds
773
+ set_llm_cache(SQLiteCache(database_path=".langchain.db"))
774
+
775
+
776
+ # Create the LCEL template. Make note of the variable {topic} which will be filled in later.
777
+ prompt_template = PromptTemplate.from_template("Tell me a joke about {topic}")
778
+
779
+ llm = ChatGoogleGenerativeAI(model="gemini-2.5-pro-exp-03-25", temperature=0, callbacks=[handler])
780
+ # Combine with a model and parser to output a string
781
+ chain = prompt_template |llm| StrOutputParser()
782
+
783
+ # Run the template. Notice that the input is a dictionary with a single key "topic" which feeds it into the above prompt template. This is needed because the prompt template has a variable {topic} which needs to be filled in when invoked.
784
+ result = chain.invoke({"topic": "cats"})
785
+ print("********Google:", result)
786
+
787
+
788
+ llm = ChatVertexAI(model="gemini-2.5-pro-exp-03-25", temperature=0, callbacks=[handler])
789
+ # Combine with a model and parser to output a string
790
+ chain = prompt_template |llm| StrOutputParser()
791
+
792
+ # Run the template. Notice that the input is a dictionary with a single key "topic" which feeds it into the above prompt template. This is needed because the prompt template has a variable {topic} which needs to be filled in when invoked.
793
+ result = chain.invoke({"topic": "cats"})
794
+ print("********GoogleVertex:", result)
795
+
796
+
797
+ # Define your desired data structure.
798
+ class Joke(BaseModel):
799
+ setup: str = Field(description="question to set up a joke")
800
+ punchline: str = Field(description="answer to resolve the joke")
801
+
802
+
803
+ # Set up a parser
804
+ parser = JsonOutputParser(pydantic_object=Joke)
805
+
806
+ # Create a prompt template
807
+ prompt = PromptTemplate(
808
+ template="Answer the user query.\n{format_instructions}\n{query}\n",
809
+ input_variables=["query"],
810
+ partial_variables={"format_instructions": parser.get_format_instructions()},
811
+ )
812
+
813
+ llm_no_struct = ChatOpenAI(model="gpt-4o-mini", temperature=0,
814
+ callbacks=[handler])
815
+ llm = llm_no_struct.with_structured_output(Joke) # with structured output forces the output to be a specific object, in this case Joke. Only OpenAI models have structured output
816
+ # Chain the components.
817
+ # The class `LLMChain` was deprecated in LangChain 0.1.17 and will be removed in 1.0. Use RunnableSequence, e.g., `prompt | llm` instead.
818
+ chain = prompt | llm
819
+
820
+ # Invoke the chain with a query.
821
+ # IMPORTANT: chain.run is now obsolete. Use chain.invoke instead.
822
+ result = chain.invoke({"query": "Tell me a joke about openai."})
823
+ print("4o mini JSON: ",result)
824
+ print(result.setup) # How to access the structured output
825
+
826
+ llm = ChatOpenAI(model="o1", temperature=1,
827
+ callbacks=[handler],model_kwargs = {"max_completion_tokens" : 1000})
828
+ # Chain the components.
829
+ # The class `LLMChain` was deprecated in LangChain 0.1.17 and will be removed in 1.0. Use RunnableSequence, e.g., `prompt | llm` instead.
830
+ chain = prompt | llm | parser
831
+
832
+ # Invoke the chain with a query.
833
+ # IMPORTANT: chain.run is now obsolete. Use chain.invoke instead.
834
+ result = chain.invoke({"query": "Tell me a joke about openai."})
835
+ print("o1 JSON: ",result)
836
+
837
+ # Get DEEPSEEK_API_KEY environmental variable
838
+
839
+ deepseek_api_key = os.getenv('DEEPSEEK_API_KEY')
840
+
841
+ # Ensure the API key is retrieved successfully
842
+ if deepseek_api_key is None:
843
+ raise ValueError("DEEPSEEK_API_KEY environment variable is not set")
844
+
845
+ llm = ChatOpenAI(
846
+ model='deepseek-chat',
847
+ openai_api_key=deepseek_api_key,
848
+ openai_api_base='https://api.deepseek.com',
849
+ temperature=0, callbacks=[handler]
850
+ )
851
+
852
+ # Chain the components
853
+ chain = prompt | llm | parser
854
+
855
+ # Invoke the chain with a query
856
+ result = chain.invoke({"query": "Write joke about deepseek."})
857
+ print("deepseek",result)
858
+
859
+
860
+ # Set up a parser
861
+ parser = PydanticOutputParser(pydantic_object=Joke)
862
+ # Chain the components
863
+ chain = prompt | llm | parser
864
+
865
+ # Invoke the chain with a query
866
+ result = chain.invoke({"query": "Write joke about deepseek and pydantic."})
867
+ print("deepseek pydantic",result)
868
+
869
+ # Set up the Azure ChatOpenAI LLM instance
870
+ llm_no_struct = AzureChatOpenAI(
871
+ model="o4-mini",
872
+ temperature=1,
873
+ callbacks=[handler]
874
+ )
875
+ llm = llm_no_struct.with_structured_output(Joke) # with structured output forces the output to be a specific JSON format
876
+ # Chain the components: prompt | llm | parser
877
+ chain = prompt | llm # returns a Joke object
878
+
879
+ # Invoke the chain with a query
880
+ result = chain.invoke({"query": "What is Azure?"}) # Pass a dictionary if `invoke` expects it
881
+ print("Azure Result:", result)
882
+
883
+ # Set up a parser
884
+ parser = JsonOutputParser(pydantic_object=Joke)
885
+
886
+ llm = Fireworks(
887
+ model="accounts/fireworks/models/llama4-maverick-instruct-basic",
888
+ temperature=0, callbacks=[handler])
889
+ # Chain the components
890
+ chain = prompt | llm | parser
891
+
892
+ # Invoke the chain with a query
893
+ # no money in account
894
+ # result = chain.invoke({"query": "Tell me a joke about the president"})
895
+ # print("fireworks",result)
896
+
897
+
898
+
899
+
900
+
901
+ prompt = ChatPromptTemplate.from_template(
902
+ "Tell me a short joke about {topic}"
903
+ )
904
+ chat_openai = ChatOpenAI(model="gpt-3.5-turbo", callbacks=[handler])
905
+ openai = OpenAI(model="gpt-3.5-turbo-instruct", callbacks=[handler])
906
+ anthropic = ChatAnthropic(model="claude-2", callbacks=[handler])
907
+ model = (
908
+ chat_openai
909
+ .with_fallbacks([anthropic])
910
+ .configurable_alternatives(
911
+ ConfigurableField(id="model"),
912
+ default_key="chat_openai",
913
+ openai=openai,
914
+ anthropic=anthropic,
915
+ )
916
+ )
917
+
918
+ chain = (
919
+ {"topic": RunnablePassthrough()}
920
+ | prompt
921
+ | model
922
+ | StrOutputParser()
923
+ )
924
+ result = chain.invoke({"topic": "Tell me a joke about the president"})
925
+ print("config alt:",result)
926
+
927
+
928
+
929
+ llm = ChatAnthropic(
930
+ model="claude-3-7-sonnet-latest",
931
+ max_tokens=5000, # Total tokens for the response
932
+ thinking={"type": "enabled", "budget_tokens": 2000}, # Tokens for internal reasoning
933
+ )
934
+
935
+ response = llm.invoke("What is the cube root of 50.653?")
936
+ print(json.dumps(response.content, indent=2))
937
+
938
+
939
+ llm = ChatGroq(temperature=0, model_name="qwen-qwq-32b", callbacks=[handler])
940
+ system = "You are a helpful assistant."
941
+ human = "{text}"
942
+ prompt = ChatPromptTemplate.from_messages([("system", system), ("human", human)])
943
+
944
+ chain = prompt | llm | StrOutputParser()
945
+ print(chain.invoke({"text": "Explain the importance of low latency LLMs."}))
946
+
947
+
948
+ llm = Together(
949
+ model="meta-llama/Llama-3-70b-chat-hf",
950
+ max_tokens=500, callbacks=[handler]
951
+ )
952
+ chain = prompt | llm | StrOutputParser()
953
+ print(chain.invoke({"text": "Explain the importance of together.ai."}))
954
+
955
+
956
+ # Define a prompt template with placeholders for variables
957
+ prompt_template = PromptTemplate.from_template("Tell me a {adjective} joke about {content}.")
958
+
959
+ # Format the prompt with the variables
960
+ formatted_prompt = prompt_template.format(adjective="funny", content="data scientists")
961
+
962
+ # Print the formatted prompt
963
+ print(formatted_prompt)
964
+
965
+
966
+ # Set up the LLM with the custom handler
967
+ handler = CompletionStatusHandler()
968
+
969
+
970
+ llm = ChatOpenAI(model="gpt-3.5-turbo", temperature=0.9, callbacks=[handler])
971
+
972
+ prompt = PromptTemplate.from_template("What is a good name for a company that makes {product}?")
973
+
974
+ chain = prompt | llm
975
+
976
+ # Invoke the chain
977
+ response = chain.invoke({"product":"colorful socks"})
978
+
979
+ # Check completion status
980
+ print(f"Is complete: {handler.is_complete}")
981
+ print(f"Finish reason: {handler.finish_reason}")
982
+ print(f"Response: {response}")
983
+ print(f"Input tokens: {handler.input_tokens}")
984
+ print(f"Output tokens: {handler.output_tokens}")
985
+
986
+
987
+
988
+ template = """Question: {question}"""
989
+
990
+ prompt = ChatPromptTemplate.from_template(template)
991
+
992
+ model = OllamaLLM(model="qwen2.5-coder:32b")
993
+
994
+ chain = prompt | model
995
+
996
+ output = chain.invoke({"question": "Write a python function that calculates Pi"})
997
+ print(output)
998
+
999
+
1000
+
1001
+ llm = MLXPipeline.from_model_id(
1002
+ "mlx-community/quantized-gemma-2b-it",
1003
+ pipeline_kwargs={"max_tokens": 10, "temp": 0.1},
1004
+ )
1005
+
1006
+
1007
+ chat_model = ChatMLX(llm=llm)
1008
+ messages = [HumanMessage(content="What happens when an unstoppable force meets an immovable object?")]
1009
+ response = chat_model.invoke(messages)
1010
+ print(response.content)
1011
+
1012
+
1013
+
1014
+ llm = ChatBedrockConverse(
1015
+ model_id="anthropic.claude-3-5-sonnet-20240620-v1:0",
1016
+ # Additional parameters like temperature, max_tokens can be set here
1017
+ )
1018
+
1019
+ messages = [HumanMessage(content="What happens when an unstoppable force meets an immovable sonnet?")]
1020
+ response = llm.invoke(messages)
1021
+ print(response.content)</lcel_example>
1022
+
1023
+ % Here are examples of how to use internal modules:
1024
+ <internal_example_modules>
1025
+ % Example of selecting a Langchain LLM and counting tokens using llm_selector: <llm_selector_example>from pdd.llm_selector import llm_selector
1026
+
1027
+ def main() -> None:
1028
+ """
1029
+ Main function to demonstrate the usage of the llm_selector function.
1030
+ """
1031
+ # Define the strength and temperature parameters
1032
+ strength: float = 0.5 # Example strength value for the LLM model
1033
+ temperature: float = 1.0 # Example temperature value for the LLM model
1034
+
1035
+ try:
1036
+ while strength <= 1.1:
1037
+ # Call the llm_selector function with the specified strength and temperature
1038
+ llm, token_counter, input_cost, output_cost, model_name = llm_selector(strength, temperature)
1039
+ print(f"Strength: {strength}")
1040
+
1041
+ # Print the details of the selected LLM model
1042
+ print(f"Selected LLM Model: {model_name}")
1043
+ print(f"Input Cost per Million Tokens: {input_cost}")
1044
+ print(f"Output Cost per Million Tokens: {output_cost}")
1045
+
1046
+ # Example usage of the token counter function
1047
+ sample_text: str = "This is a sample text to count tokens."
1048
+ token_count: int = token_counter(sample_text)
1049
+ print(f"Token Count for Sample Text: {token_count}")
1050
+ print(f"model_name: {model_name}")
1051
+ strength += 0.05
1052
+ except FileNotFoundError as e:
1053
+ print(f"Error: {e}")
1054
+ except ValueError as e:
1055
+ print(f"Error: {e}")
1056
+
1057
+ if __name__ == "__main__":
1058
+ main()</llm_selector_example>
1059
+ </internal_example_modules>
1060
+
1061
+ % This function will use Langchain to do the following:
1062
+ Step 1. Use $PDD_PATH environment variable to get the path to the project. Load the '$PDD_PATH/prompts/conflict_LLM.prompt' and '$PDD_PATH/prompts/extract_conflicts_LLM.prompt' files.
1063
+ Step 2. Then this will create a Langchain LCEL template from the conflict_LLM prompt.
1064
+ Step 3. This will use llm_selector for the model, imported from a relative path.
1065
+ Step 4. Pretty print a message letting the user know it is running and how many tokens (using token_counter from llm_selector) are in the prompt and the cost. The cost from llm_selector is in dollars per million tokens.
1066
+ Step 5. Run the prompts through the model using Langchain LCEL with string output.
1067
+ 5a. Pass the following string parameters to the prompt during invoke:
1068
+ - 'PROMPT1'
1069
+ - 'PROMPT2'
1070
+ 5b. Pretty print the output of 5a which will be in Markdown format.
1071
+ Step 6. Create a Langchain LCEL template using a .8 strength llm_selector and token counter from the extract_conflicts_LLM prompt that outputs JSON:
1072
+ 6a. Pass the following string parameters to the prompt during invocation: 'llm_output' (this string is from Step 5a).
1073
+ 6b. Calculate input and output token count using token_counter from llm_selector and pretty print the running message with the token count and cost.
1074
+ 6c. Use 'get' function to extract 'changes_list' list values using from the dictionary output.
1075
+ Step 7. Return the changes_list, total_cost and model_name.</updated_prompt>
21
1076
  <example>
22
1077
  <examples>
23
1078