pdd-cli 0.0.45__py3-none-any.whl → 0.0.46__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of pdd-cli might be problematic. Click here for more details.

pdd/__init__.py CHANGED
@@ -1,6 +1,6 @@
1
1
  """PDD - Prompt Driven Development"""
2
2
 
3
- __version__ = "0.0.45"
3
+ __version__ = "0.0.46"
4
4
 
5
5
  # Strength parameter used for LLM extraction across the codebase
6
6
  # Used in postprocessing, XML tagging, code generation, and other extraction
@@ -17,8 +17,8 @@ You are a prompt expert that helps select the necessary subset of "includes" (li
17
17
  </definitions>
18
18
 
19
19
  <context>
20
- Here is the input_prompt to find the includes for: <input_prompt>{input_prompt}</input_prompt>
21
- Here is the available_includes: <available_includes>{available_includes}</available_includes>
20
+ Here is the input_prompt to find the includes for: <input_prompt>{{input_prompt}}</input_prompt>
21
+ Here is the available_includes: <available_includes>{{available_includes}}</available_includes>
22
22
  </context>
23
23
 
24
24
  Here are some examples of how to do this:
@@ -148,12 +148,12 @@ def factorial(n):
148
148
  )
149
149
 
150
150
  # Print the results
151
- console.print(f"[bold]Modified Prompt:[/bold]\n{modified_prompt}")
152
- console.print(f"[bold]Total Cost:[/bold] ${total_cost:.6f}")
153
- console.print(f"[bold]Model Used:[/bold] {model_name}")
151
+ console.print(f"[bold]Modified Prompt:[/bold]\n{{modified_prompt}}")
152
+ console.print(f"[bold]Total Cost:[/bold] ${{total_cost:.6f}}")
153
+ console.print(f"[bold]Model Used:[/bold] {{model_name}}")
154
154
 
155
155
  except Exception as e:
156
- console.print(f"[bold red]An error occurred:[/bold red] {str(e)}")
156
+ console.print(f"[bold red]An error occurred:[/bold red] {{str(e)}}")
157
157
 
158
158
  if __name__ == "__main__":
159
159
  main()
@@ -304,9 +304,9 @@ class CompletionStatusHandler(BaseCallbackHandler):
304
304
  self.output_tokens = usage_metadata.get('output_tokens')
305
305
  # print("response:",response)
306
306
  print("Extracted information:")
307
- print(f"Finish reason: {self.finish_reason}")
308
- print(f"Input tokens: {self.input_tokens}")
309
- print(f"Output tokens: {self.output_tokens}")
307
+ print(f"Finish reason: {{self.finish_reason}}")
308
+ print(f"Input tokens: {{self.input_tokens}}")
309
+ print(f"Output tokens: {{self.output_tokens}}")
310
310
 
311
311
  # Set up the LLM with the custom handler
312
312
  handler = CompletionStatusHandler()
@@ -314,15 +314,15 @@ handler = CompletionStatusHandler()
314
314
  set_llm_cache(SQLiteCache(database_path=".langchain.db"))
315
315
 
316
316
 
317
- # Create the LCEL template. Make note of the variable {topic} which will be filled in later.
318
- prompt_template = PromptTemplate.from_template("Tell me a joke about {topic}")
317
+ # Create the LCEL template. Make note of the variable {{topic}} which will be filled in later.
318
+ prompt_template = PromptTemplate.from_template("Tell me a joke about {{topic}}")
319
319
 
320
320
  llm = ChatGoogleGenerativeAI(model="gemini-2.5-pro-exp-03-25", temperature=0, callbacks=[handler])
321
321
  # Combine with a model and parser to output a string
322
322
  chain = prompt_template |llm| StrOutputParser()
323
323
 
324
- # Run the template. Notice that the input is a dictionary with a single key "topic" which feeds it into the above prompt template. This is needed because the prompt template has a variable {topic} which needs to be filled in when invoked.
325
- result = chain.invoke({"topic": "cats"})
324
+ # Run the template. Notice that the input is a dictionary with a single key "topic" which feeds it into the above prompt template. This is needed because the prompt template has a variable {{topic}} which needs to be filled in when invoked.
325
+ result = chain.invoke({{"topic": "cats"}})
326
326
  print("********Google:", result)
327
327
 
328
328
 
@@ -330,8 +330,8 @@ llm = ChatVertexAI(model="gemini-2.5-pro-exp-03-25", temperature=0, callbacks=[h
330
330
  # Combine with a model and parser to output a string
331
331
  chain = prompt_template |llm| StrOutputParser()
332
332
 
333
- # Run the template. Notice that the input is a dictionary with a single key "topic" which feeds it into the above prompt template. This is needed because the prompt template has a variable {topic} which needs to be filled in when invoked.
334
- result = chain.invoke({"topic": "cats"})
333
+ # Run the template. Notice that the input is a dictionary with a single key "topic" which feeds it into the above prompt template. This is needed because the prompt template has a variable {{topic}} which needs to be filled in when invoked.
334
+ result = chain.invoke({{"topic": "cats"}})
335
335
  print("********GoogleVertex:", result)
336
336
 
337
337
 
@@ -346,9 +346,9 @@ parser = JsonOutputParser(pydantic_object=Joke)
346
346
 
347
347
  # Create a prompt template
348
348
  prompt = PromptTemplate(
349
- template="Answer the user query.\n{format_instructions}\n{query}\n",
349
+ template="Answer the user query.\n{{format_instructions}}\n{{query}}\n",
350
350
  input_variables=["query"],
351
- partial_variables={"format_instructions": parser.get_format_instructions()},
351
+ partial_variables={{"format_instructions": parser.get_format_instructions()}},
352
352
  )
353
353
 
354
354
  llm_no_struct = ChatOpenAI(model="gpt-4o-mini", temperature=0,
@@ -360,19 +360,19 @@ chain = prompt | llm
360
360
 
361
361
  # Invoke the chain with a query.
362
362
  # IMPORTANT: chain.run is now obsolete. Use chain.invoke instead.
363
- result = chain.invoke({"query": "Tell me a joke about openai."})
363
+ result = chain.invoke({{"query": "Tell me a joke about openai."}})
364
364
  print("4o mini JSON: ",result)
365
365
  print(result.setup) # How to access the structured output
366
366
 
367
367
  llm = ChatOpenAI(model="o1", temperature=1,
368
- callbacks=[handler],model_kwargs = {"max_completion_tokens" : 1000})
368
+ callbacks=[handler],model_kwargs = {{"max_completion_tokens" : 1000}})
369
369
  # Chain the components.
370
370
  # The class `LLMChain` was deprecated in LangChain 0.1.17 and will be removed in 1.0. Use RunnableSequence, e.g., `prompt | llm` instead.
371
371
  chain = prompt | llm | parser
372
372
 
373
373
  # Invoke the chain with a query.
374
374
  # IMPORTANT: chain.run is now obsolete. Use chain.invoke instead.
375
- result = chain.invoke({"query": "Tell me a joke about openai."})
375
+ result = chain.invoke({{"query": "Tell me a joke about openai."}})
376
376
  print("o1 JSON: ",result)
377
377
 
378
378
  # Get DEEPSEEK_API_KEY environmental variable
@@ -394,7 +394,7 @@ llm = ChatOpenAI(
394
394
  chain = prompt | llm | parser
395
395
 
396
396
  # Invoke the chain with a query
397
- result = chain.invoke({"query": "Write joke about deepseek."})
397
+ result = chain.invoke({{"query": "Write joke about deepseek."}})
398
398
  print("deepseek",result)
399
399
 
400
400
 
@@ -404,7 +404,7 @@ parser = PydanticOutputParser(pydantic_object=Joke)
404
404
  chain = prompt | llm | parser
405
405
 
406
406
  # Invoke the chain with a query
407
- result = chain.invoke({"query": "Write joke about deepseek and pydantic."})
407
+ result = chain.invoke({{"query": "Write joke about deepseek and pydantic."}})
408
408
  print("deepseek pydantic",result)
409
409
 
410
410
  # Set up the Azure ChatOpenAI LLM instance
@@ -418,7 +418,7 @@ llm = llm_no_struct.with_structured_output(Joke) # with structured output forces
418
418
  chain = prompt | llm # returns a Joke object
419
419
 
420
420
  # Invoke the chain with a query
421
- result = chain.invoke({"query": "What is Azure?"}) # Pass a dictionary if `invoke` expects it
421
+ result = chain.invoke({{"query": "What is Azure?"}}) # Pass a dictionary if `invoke` expects it
422
422
  print("Azure Result:", result)
423
423
 
424
424
  # Set up a parser
@@ -432,7 +432,7 @@ chain = prompt | llm | parser
432
432
 
433
433
  # Invoke the chain with a query
434
434
  # no money in account
435
- # result = chain.invoke({"query": "Tell me a joke about the president"})
435
+ # result = chain.invoke({{"query": "Tell me a joke about the president"}})
436
436
  # print("fireworks",result)
437
437
 
438
438
 
@@ -440,7 +440,7 @@ chain = prompt | llm | parser
440
440
 
441
441
 
442
442
  prompt = ChatPromptTemplate.from_template(
443
- "Tell me a short joke about {topic}"
443
+ "Tell me a short joke about {{topic}}"
444
444
  )
445
445
  chat_openai = ChatOpenAI(model="gpt-3.5-turbo", callbacks=[handler])
446
446
  openai = OpenAI(model="gpt-3.5-turbo-instruct", callbacks=[handler])
@@ -457,12 +457,12 @@ model = (
457
457
  )
458
458
 
459
459
  chain = (
460
- {"topic": RunnablePassthrough()}
460
+ {{"topic": RunnablePassthrough()}}
461
461
  | prompt
462
462
  | model
463
463
  | StrOutputParser()
464
464
  )
465
- result = chain.invoke({"topic": "Tell me a joke about the president"})
465
+ result = chain.invoke({{"topic": "Tell me a joke about the president"}})
466
466
  print("config alt:",result)
467
467
 
468
468
 
@@ -470,7 +470,7 @@ print("config alt:",result)
470
470
  llm = ChatAnthropic(
471
471
  model="claude-3-7-sonnet-latest",
472
472
  max_tokens=5000, # Total tokens for the response
473
- thinking={"type": "enabled", "budget_tokens": 2000}, # Tokens for internal reasoning
473
+ thinking={{"type": "enabled", "budget_tokens": 2000}}, # Tokens for internal reasoning
474
474
  )
475
475
 
476
476
  response = llm.invoke("What is the cube root of 50.653?")
@@ -479,11 +479,11 @@ print(json.dumps(response.content, indent=2))
479
479
 
480
480
  llm = ChatGroq(temperature=0, model_name="qwen-qwq-32b", callbacks=[handler])
481
481
  system = "You are a helpful assistant."
482
- human = "{text}"
482
+ human = "{{text}}"
483
483
  prompt = ChatPromptTemplate.from_messages([("system", system), ("human", human)])
484
484
 
485
485
  chain = prompt | llm | StrOutputParser()
486
- print(chain.invoke({"text": "Explain the importance of low latency LLMs."}))
486
+ print(chain.invoke({{"text": "Explain the importance of low latency LLMs."}}))
487
487
 
488
488
 
489
489
  llm = Together(
@@ -491,11 +491,11 @@ llm = Together(
491
491
  max_tokens=500, callbacks=[handler]
492
492
  )
493
493
  chain = prompt | llm | StrOutputParser()
494
- print(chain.invoke({"text": "Explain the importance of together.ai."}))
494
+ print(chain.invoke({{"text": "Explain the importance of together.ai."}}))
495
495
 
496
496
 
497
497
  # Define a prompt template with placeholders for variables
498
- prompt_template = PromptTemplate.from_template("Tell me a {adjective} joke about {content}.")
498
+ prompt_template = PromptTemplate.from_template("Tell me a {{adjective}} joke about {{content}}.")
499
499
 
500
500
  # Format the prompt with the variables
501
501
  formatted_prompt = prompt_template.format(adjective="funny", content="data scientists")
@@ -510,23 +510,23 @@ handler = CompletionStatusHandler()
510
510
 
511
511
  llm = ChatOpenAI(model="gpt-3.5-turbo", temperature=0.9, callbacks=[handler])
512
512
 
513
- prompt = PromptTemplate.from_template("What is a good name for a company that makes {product}?")
513
+ prompt = PromptTemplate.from_template("What is a good name for a company that makes {{product}}?")
514
514
 
515
515
  chain = prompt | llm
516
516
 
517
517
  # Invoke the chain
518
- response = chain.invoke({"product":"colorful socks"})
518
+ response = chain.invoke({{"product":"colorful socks"}})
519
519
 
520
520
  # Check completion status
521
- print(f"Is complete: {handler.is_complete}")
522
- print(f"Finish reason: {handler.finish_reason}")
523
- print(f"Response: {response}")
524
- print(f"Input tokens: {handler.input_tokens}")
525
- print(f"Output tokens: {handler.output_tokens}")
521
+ print(f"Is complete: {{handler.is_complete}}")
522
+ print(f"Finish reason: {{handler.finish_reason}}")
523
+ print(f"Response: {{response}}")
524
+ print(f"Input tokens: {{handler.input_tokens}}")
525
+ print(f"Output tokens: {{handler.output_tokens}}")
526
526
 
527
527
 
528
528
 
529
- template = """Question: {question}"""
529
+ template = """Question: {{question}}"""
530
530
 
531
531
  prompt = ChatPromptTemplate.from_template(template)
532
532
 
@@ -534,14 +534,14 @@ model = OllamaLLM(model="qwen2.5-coder:32b")
534
534
 
535
535
  chain = prompt | model
536
536
 
537
- output = chain.invoke({"question": "Write a python function that calculates Pi"})
537
+ output = chain.invoke({{"question": "Write a python function that calculates Pi"}})
538
538
  print(output)
539
539
 
540
540
 
541
541
 
542
542
  llm = MLXPipeline.from_model_id(
543
543
  "mlx-community/quantized-gemma-2b-it",
544
- pipeline_kwargs={"max_tokens": 10, "temp": 0.1},
544
+ pipeline_kwargs={{"max_tokens": 10, "temp": 0.1}},
545
545
  )
546
546
 
547
547
 
@@ -637,8 +637,8 @@ Sign in to GoogleGet the most from your Google account
637
637
  Stay signed out
638
638
 
639
639
  Sign in
640
- {test}
641
- {test2}
640
+ {{test}}
641
+ {{test2}}
642
642
  ```<TODO.md>```
643
643
 
644
644
  <pdd>
@@ -653,7 +653,7 @@ double_curly_brackets = True
653
653
  exclude_keys = ["test2"] # exclude test2 from being doubled
654
654
 
655
655
  # Debug info
656
- console.print(f"[bold yellow]Debug: exclude_keys = {exclude_keys}[/bold yellow]")
656
+ console.print(f"[bold yellow]Debug: exclude_keys = {{exclude_keys}}[/bold yellow]")
657
657
 
658
658
  processed = preprocess(prompt, recursive, double_curly_brackets, exclude_keys=exclude_keys)
659
659
  console.print("[bold white]Processed Prompt:[/bold white]")
@@ -674,23 +674,23 @@ def main() -> None:
674
674
  while strength <= 1.1:
675
675
  # Call the llm_selector function with the specified strength and temperature
676
676
  llm, token_counter, input_cost, output_cost, model_name = llm_selector(strength, temperature)
677
- print(f"Strength: {strength}")
677
+ print(f"Strength: {{strength}}")
678
678
 
679
679
  # Print the details of the selected LLM model
680
- print(f"Selected LLM Model: {model_name}")
681
- print(f"Input Cost per Million Tokens: {input_cost}")
682
- print(f"Output Cost per Million Tokens: {output_cost}")
680
+ print(f"Selected LLM Model: {{model_name}}")
681
+ print(f"Input Cost per Million Tokens: {{input_cost}}")
682
+ print(f"Output Cost per Million Tokens: {{output_cost}}")
683
683
 
684
684
  # Example usage of the token counter function
685
685
  sample_text: str = "This is a sample text to count tokens."
686
686
  token_count: int = token_counter(sample_text)
687
- print(f"Token Count for Sample Text: {token_count}")
688
- print(f"model_name: {model_name}")
687
+ print(f"Token Count for Sample Text: {{token_count}}")
688
+ print(f"model_name: {{model_name}}")
689
689
  strength += 0.05
690
690
  except FileNotFoundError as e:
691
- print(f"Error: {e}")
691
+ print(f"Error: {{e}}")
692
692
  except ValueError as e:
693
- print(f"Error: {e}")
693
+ print(f"Error: {{e}}")
694
694
 
695
695
  if __name__ == "__main__":
696
696
  main()</llm_selector_example>
@@ -736,7 +736,7 @@ from rich import print as rprint
736
736
  # the function's ability to detect incompleteness.
737
737
  my_prompt_text = "Write a comprehensive guide on how to bake a sourdough bread, starting from creating a starter, then the kneading process, and finally"
738
738
 
739
- rprint(f"[bold cyan]Analyzing prompt:[/bold cyan] \"{my_prompt_text}\"")
739
+ rprint(f"[bold cyan]Analyzing prompt:[/bold cyan] \"{{my_prompt_text}}\"")
740
740
 
741
741
  # 2. Call the `unfinished_prompt` function.
742
742
  # Review the function's docstring for detailed parameter information.
@@ -761,28 +761,28 @@ reasoning_str, is_complete_flag, call_cost, llm_model = unfinished_prompt(
761
761
 
762
762
  # 3. Print the results returned by the function.
763
763
  rprint("\n[bold green]--- Analysis Results ---[/bold green]")
764
- rprint(f" [bold]Prompt Analyzed:[/bold] \"{my_prompt_text}\"")
765
- rprint(f" [bold]Is prompt complete?:[/bold] {'Yes, the LLM considers the prompt complete.' if is_complete_flag else 'No, the LLM suggests the prompt needs continuation.'}")
766
- rprint(f" [bold]LLM's Reasoning:[/bold]\n {reasoning_str}") # Rich print will handle newlines in the reasoning string
767
- rprint(f" [bold]Cost of Analysis:[/bold] ${call_cost:.6f}") # Display cost, assuming USD. Adjust currency/format as needed.
768
- rprint(f" [bold]LLM Model Used:[/bold] {llm_model}")
764
+ rprint(f" [bold]Prompt Analyzed:[/bold] \"{{my_prompt_text}}\"")
765
+ rprint(f" [bold]Is prompt complete?:[/bold] {{'Yes, the LLM considers the prompt complete.' if is_complete_flag else 'No, the LLM suggests the prompt needs continuation.'}}")
766
+ rprint(f" [bold]LLM's Reasoning:[/bold]\n {{reasoning_str}}") # Rich print will handle newlines in the reasoning string
767
+ rprint(f" [bold]Cost of Analysis:[/bold] ${{call_cost:.6f}}") # Display cost, assuming USD. Adjust currency/format as needed.
768
+ rprint(f" [bold]LLM Model Used:[/bold] {{llm_model}}")
769
769
 
770
770
  # --- Example of calling with default parameters ---
771
771
  # If you want to use the default strength (0.5), temperature (0.0), and verbose (False):
772
772
  #
773
773
  # default_prompt_text = "What is the capital of Canada?"
774
- # rprint(f"\n[bold cyan]Analyzing prompt with default settings:[/bold cyan] \"{default_prompt_text}\"")
774
+ # rprint(f"\n[bold cyan]Analyzing prompt with default settings:[/bold cyan] \"{{default_prompt_text}}\"")
775
775
  #
776
776
  # reasoning_def, is_finished_def, cost_def, model_def = unfinished_prompt(
777
777
  # prompt_text=default_prompt_text
778
778
  # )
779
779
  #
780
780
  # rprint("\n[bold green]--- Default Call Analysis Results ---[/bold green]")
781
- # rprint(f" [bold]Prompt Analyzed:[/bold] \"{default_prompt_text}\"")
782
- # rprint(f" [bold]Is prompt complete?:[/bold] {'Yes' if is_finished_def else 'No'}")
783
- # rprint(f" [bold]LLM's Reasoning:[/bold]\n {reasoning_def}")
784
- # rprint(f" [bold]Cost of Analysis:[/bold] ${cost_def:.6f}")
785
- # rprint(f" [bold]LLM Model Used:[/bold] {model_def}")
781
+ # rprint(f" [bold]Prompt Analyzed:[/bold] \"{{default_prompt_text}}\"")
782
+ # rprint(f" [bold]Is prompt complete?:[/bold] {{'Yes' if is_finished_def else 'No'}}")
783
+ # rprint(f" [bold]LLM's Reasoning:[/bold]\n {{reasoning_def}}")
784
+ # rprint(f" [bold]Cost of Analysis:[/bold] ${{cost_def:.6f}}")
785
+ # rprint(f" [bold]LLM Model Used:[/bold] {{model_def}}")
786
786
  </unfinished_prompt_example>
787
787
 
788
788
  % Here is an example how to continue the generation of a model output: <continue_generation_example>from pdd.continue_generation import continue_generation
@@ -816,17 +816,17 @@ def main() -> None:
816
816
  )
817
817
 
818
818
  # Output the results
819
- # print(f"Final LLM Output: {final_llm_output}")
820
- print(f"Total Cost: ${total_cost:.6f}")
821
- print(f"Model Name: {model_name}")
819
+ # print(f"Final LLM Output: {{final_llm_output}}")
820
+ print(f"Total Cost: ${{total_cost:.6f}}")
821
+ print(f"Model Name: {{model_name}}")
822
822
  # write final_llm_output to context/final_llm_output.txt
823
823
  with open("context/final_llm_output.py", "w") as file:
824
824
  file.write(final_llm_output)
825
825
 
826
826
  except FileNotFoundError as e:
827
- print(f"Error: {e}")
827
+ print(f"Error: {{e}}")
828
828
  except Exception as e:
829
- print(f"An error occurred: {e}")
829
+ print(f"An error occurred: {{e}}")
830
830
 
831
831
  if __name__ == "__main__":
832
832
  main()</continue_generation_example>
@@ -871,7 +871,7 @@ It includes a Python code block:
871
871
  ```python
872
872
  def greet(name):
873
873
  # A simple greeting function
874
- print(f"Hello, {name}!")
874
+ print(f"Hello, {{name}}!")
875
875
 
876
876
  greet("Developer")
877
877
  ```
@@ -891,8 +891,8 @@ But we are only interested in Python.
891
891
  print("[bold cyan]Scenario 1: Simple Extraction (strength = 0)[/bold cyan]")
892
892
  print("Demonstrates extracting code using basic string processing.")
893
893
  print(f" Input LLM Output: (see below)")
894
- # print(f"[dim]{llm_output_text_with_code}[/dim]") # Printing for brevity in console
895
- print(f" Target Language: '{target_language}' (Note: simple extraction is language-agnostic but extracts first block)")
894
+ # print(f"[dim]{{llm_output_text_with_code}}[/dim]") # Printing for brevity in console
895
+ print(f" Target Language: '{{target_language}}' (Note: simple extraction is language-agnostic but extracts first block)")
896
896
  print(f" Strength: 0 (activates simple, non-LLM extraction)")
897
897
  print(f" Verbose: True (enables detailed console output from `postprocess`)\n")
898
898
 
@@ -916,9 +916,9 @@ But we are only interested in Python.
916
916
  # extracted_code (str): The extracted code.
917
917
  # total_cost (float): Cost of the operation (in dollars). Expected to be 0.0 for simple extraction.
918
918
  # model_name (str): Identifier for the method/model used. Expected to be 'simple_extraction'.
919
- print(f" Extracted Code:\n[yellow]{extracted_code_s0}[/yellow]")
920
- print(f" Total Cost: ${cost_s0:.6f}")
921
- print(f" Model Name: '{model_s0}'")
919
+ print(f" Extracted Code:\n[yellow]{{extracted_code_s0}}[/yellow]")
920
+ print(f" Total Cost: ${{cost_s0:.6f}}")
921
+ print(f" Model Name: '{{model_s0}}'")
922
922
  print("-" * 60)
923
923
 
924
924
  # --- Scenario 2: LLM-based Extraction (strength > 0) ---
@@ -928,7 +928,7 @@ But we are only interested in Python.
928
928
  print("\n[bold cyan]Scenario 2: LLM-based Extraction (strength = 0.9)[/bold cyan]")
929
929
  print("Demonstrates extracting code using an LLM (mocked).")
930
930
  print(f" Input LLM Output: (same as above)")
931
- print(f" Target Language: '{target_language}'")
931
+ print(f" Target Language: '{{target_language}}'")
932
932
  print(f" Strength: 0.9 (activates LLM-based extraction)")
933
933
  print(f" Temperature: 0.0 (LLM creativity, 0-1 scale)")
934
934
  print(f" Time: 0.5 (LLM thinking effort, 0-1 scale, influences model choice/cost)")
@@ -955,11 +955,11 @@ result = sophisticated_extraction("test data from llm")
955
955
  print(result)
956
956
  ```"""
957
957
  mock_extracted_code_pydantic_obj = ExtractedCode(extracted_code=mock_llm_response_code_from_llm)
958
- mock_llm_invoke_return_value = {
958
+ mock_llm_invoke_return_value = {{
959
959
  'result': mock_extracted_code_pydantic_obj,
960
960
  'cost': 0.00025, # Example cost in dollars
961
961
  'model_name': 'mock-llm-extractor-v1'
962
- }
962
+ }}
963
963
  mock_llm_invoke_function = MagicMock(return_value=mock_llm_invoke_return_value)
964
964
 
965
965
  # Patch the internal dependencies within the 'pdd.postprocess' module's namespace.
@@ -977,9 +977,9 @@ print(result)
977
977
  )
978
978
 
979
979
  print("[bold green]Output for Scenario 2:[/bold green]")
980
- print(f" Extracted Code:\n[yellow]{extracted_code_llm}[/yellow]")
981
- print(f" Total Cost: ${cost_llm:.6f} (cost is in dollars)")
982
- print(f" Model Name: '{model_llm}'")
980
+ print(f" Extracted Code:\n[yellow]{{extracted_code_llm}}[/yellow]")
981
+ print(f" Total Cost: ${{cost_llm:.6f}} (cost is in dollars)")
982
+ print(f" Model Name: '{{model_llm}}'")
983
983
 
984
984
  # --- Verification of Mock Calls (for developer understanding) ---
985
985
  # Check that `load_prompt_template` was called correctly.
@@ -990,10 +990,10 @@ print(result)
990
990
  # Inspect the arguments passed to the mocked llm_invoke
991
991
  call_args_to_llm_invoke = mock_llm_invoke_function.call_args[1] # kwargs
992
992
  assert call_args_to_llm_invoke['prompt'] == mock_load_template.return_value
993
- assert call_args_to_llm_invoke['input_json'] == {
993
+ assert call_args_to_llm_invoke['input_json'] == {{
994
994
  "llm_output": llm_output_text_with_code,
995
995
  "language": target_language
996
- }
996
+ }}
997
997
  assert call_args_to_llm_invoke['strength'] == 0.9
998
998
  assert call_args_to_llm_invoke['temperature'] == 0.0
999
999
  assert call_args_to_llm_invoke['time'] == 0.5
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: pdd-cli
3
- Version: 0.0.45
3
+ Version: 0.0.46
4
4
  Summary: PDD (Prompt-Driven Development) Command Line Interface
5
5
  Author: Greg Tanaka
6
6
  Author-email: glt@alumni.caltech.edu
@@ -46,7 +46,7 @@ Requires-Dist: pytest-asyncio; extra == "dev"
46
46
  Requires-Dist: z3-solver; extra == "dev"
47
47
  Dynamic: license-file
48
48
 
49
- .. image:: https://img.shields.io/badge/pdd--cli-v0.0.45-blue
49
+ .. image:: https://img.shields.io/badge/pdd--cli-v0.0.46-blue
50
50
  :alt: PDD-CLI Version
51
51
 
52
52
  .. image:: https://img.shields.io/badge/Discord-join%20chat-7289DA.svg?logo=discord&logoColor=white&link=https://discord.gg/Yp4RTh8bG7
@@ -123,7 +123,7 @@ After installation, verify:
123
123
 
124
124
  pdd --version
125
125
 
126
- You'll see the current PDD version (e.g., 0.0.45).
126
+ You'll see the current PDD version (e.g., 0.0.46).
127
127
 
128
128
  Getting Started with Examples
129
129
  -----------------------------
@@ -1,4 +1,4 @@
1
- pdd/__init__.py,sha256=DMuWLMd_lMRsYmsoBQJdl7YzLTVJrDrArBSc0xR_4hk,634
1
+ pdd/__init__.py,sha256=PbIPsuLF2Upu0emZLIEvF2I3velEBvmE249EYAK78tI,634
2
2
  pdd/auto_deps_main.py,sha256=iV2khcgSejiXjh5hiQqeu_BJQOLfTKXhMx14j6vRlf8,3916
3
3
  pdd/auto_include.py,sha256=OJcdcwTwJNqHPHKG9P4m9Ij-PiLex0EbuwJP0uiQi_Y,7484
4
4
  pdd/auto_update.py,sha256=w6jzTnMiYRNpwQHQxWNiIAwQ0d6xh1iOB3xgDsabWtc,5236
@@ -108,9 +108,9 @@ pdd/prompts/trim_results_start_LLM.prompt,sha256=OKz8fAf1cYWKWgslFOHEkUpfaUDARh3
108
108
  pdd/prompts/unfinished_prompt_LLM.prompt,sha256=-JgBpiPTQZdWOAwOG1XpfpD9waynFTAT3Jo84eQ4bTw,1543
109
109
  pdd/prompts/update_prompt_LLM.prompt,sha256=prIc8uLp2jqnLTHt6JvWDZGanPZipivhhYeXe0lVaYw,1328
110
110
  pdd/prompts/xml_convertor_LLM.prompt,sha256=YGRGXJeg6EhM9690f-SKqQrKqSJjLFD51UrPOlO0Frg,2786
111
- pdd_cli-0.0.45.dist-info/licenses/LICENSE,sha256=-1bjYH-CEjGEQ8VixtnRYuu37kN6F9NxmZSDkBuUQ9o,1062
112
- pdd_cli-0.0.45.dist-info/METADATA,sha256=gjZhY6Q-S92srif53XEtXBT9uw2jtpAke3WSBM_EpT4,12399
113
- pdd_cli-0.0.45.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
114
- pdd_cli-0.0.45.dist-info/entry_points.txt,sha256=Kr8HtNVb8uHZtQJNH4DnF8j7WNgWQbb7_Pw5hECSR-I,36
115
- pdd_cli-0.0.45.dist-info/top_level.txt,sha256=xjnhIACeMcMeDfVNREgQZl4EbTni2T11QkL5r7E-sbE,4
116
- pdd_cli-0.0.45.dist-info/RECORD,,
111
+ pdd_cli-0.0.46.dist-info/licenses/LICENSE,sha256=-1bjYH-CEjGEQ8VixtnRYuu37kN6F9NxmZSDkBuUQ9o,1062
112
+ pdd_cli-0.0.46.dist-info/METADATA,sha256=Sw6JyoPxHZETZVNwob9sLafRtrc9FbbGWJiVTUMmRZ8,12399
113
+ pdd_cli-0.0.46.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
114
+ pdd_cli-0.0.46.dist-info/entry_points.txt,sha256=Kr8HtNVb8uHZtQJNH4DnF8j7WNgWQbb7_Pw5hECSR-I,36
115
+ pdd_cli-0.0.46.dist-info/top_level.txt,sha256=xjnhIACeMcMeDfVNREgQZl4EbTni2T11QkL5r7E-sbE,4
116
+ pdd_cli-0.0.46.dist-info/RECORD,,