pdd-cli 0.0.45__py3-none-any.whl → 0.0.118__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- pdd/__init__.py +40 -8
- pdd/agentic_bug.py +323 -0
- pdd/agentic_bug_orchestrator.py +497 -0
- pdd/agentic_change.py +231 -0
- pdd/agentic_change_orchestrator.py +526 -0
- pdd/agentic_common.py +598 -0
- pdd/agentic_crash.py +534 -0
- pdd/agentic_e2e_fix.py +319 -0
- pdd/agentic_e2e_fix_orchestrator.py +426 -0
- pdd/agentic_fix.py +1294 -0
- pdd/agentic_langtest.py +162 -0
- pdd/agentic_update.py +387 -0
- pdd/agentic_verify.py +183 -0
- pdd/architecture_sync.py +565 -0
- pdd/auth_service.py +210 -0
- pdd/auto_deps_main.py +71 -51
- pdd/auto_include.py +245 -5
- pdd/auto_update.py +125 -47
- pdd/bug_main.py +196 -23
- pdd/bug_to_unit_test.py +2 -0
- pdd/change_main.py +11 -4
- pdd/cli.py +22 -1181
- pdd/cmd_test_main.py +350 -150
- pdd/code_generator.py +60 -18
- pdd/code_generator_main.py +790 -57
- pdd/commands/__init__.py +48 -0
- pdd/commands/analysis.py +306 -0
- pdd/commands/auth.py +309 -0
- pdd/commands/connect.py +290 -0
- pdd/commands/fix.py +163 -0
- pdd/commands/generate.py +257 -0
- pdd/commands/maintenance.py +175 -0
- pdd/commands/misc.py +87 -0
- pdd/commands/modify.py +256 -0
- pdd/commands/report.py +144 -0
- pdd/commands/sessions.py +284 -0
- pdd/commands/templates.py +215 -0
- pdd/commands/utility.py +110 -0
- pdd/config_resolution.py +58 -0
- pdd/conflicts_main.py +8 -3
- pdd/construct_paths.py +589 -111
- pdd/context_generator.py +10 -2
- pdd/context_generator_main.py +175 -76
- pdd/continue_generation.py +53 -10
- pdd/core/__init__.py +33 -0
- pdd/core/cli.py +527 -0
- pdd/core/cloud.py +237 -0
- pdd/core/dump.py +554 -0
- pdd/core/errors.py +67 -0
- pdd/core/remote_session.py +61 -0
- pdd/core/utils.py +90 -0
- pdd/crash_main.py +262 -33
- pdd/data/language_format.csv +71 -63
- pdd/data/llm_model.csv +20 -18
- pdd/detect_change_main.py +5 -4
- pdd/docs/prompting_guide.md +864 -0
- pdd/docs/whitepaper_with_benchmarks/data_and_functions/benchmark_analysis.py +495 -0
- pdd/docs/whitepaper_with_benchmarks/data_and_functions/creation_compare.py +528 -0
- pdd/fix_code_loop.py +523 -95
- pdd/fix_code_module_errors.py +6 -2
- pdd/fix_error_loop.py +491 -92
- pdd/fix_errors_from_unit_tests.py +4 -3
- pdd/fix_main.py +278 -21
- pdd/fix_verification_errors.py +12 -100
- pdd/fix_verification_errors_loop.py +529 -286
- pdd/fix_verification_main.py +294 -89
- pdd/frontend/dist/assets/index-B5DZHykP.css +1 -0
- pdd/frontend/dist/assets/index-DQ3wkeQ2.js +449 -0
- pdd/frontend/dist/index.html +376 -0
- pdd/frontend/dist/logo.svg +33 -0
- pdd/generate_output_paths.py +139 -15
- pdd/generate_test.py +218 -146
- pdd/get_comment.py +19 -44
- pdd/get_extension.py +8 -9
- pdd/get_jwt_token.py +318 -22
- pdd/get_language.py +8 -7
- pdd/get_run_command.py +75 -0
- pdd/get_test_command.py +68 -0
- pdd/git_update.py +70 -19
- pdd/incremental_code_generator.py +2 -2
- pdd/insert_includes.py +13 -4
- pdd/llm_invoke.py +1711 -181
- pdd/load_prompt_template.py +19 -12
- pdd/path_resolution.py +140 -0
- pdd/pdd_completion.fish +25 -2
- pdd/pdd_completion.sh +30 -4
- pdd/pdd_completion.zsh +79 -4
- pdd/postprocess.py +14 -4
- pdd/preprocess.py +293 -24
- pdd/preprocess_main.py +41 -6
- pdd/prompts/agentic_bug_step10_pr_LLM.prompt +182 -0
- pdd/prompts/agentic_bug_step1_duplicate_LLM.prompt +73 -0
- pdd/prompts/agentic_bug_step2_docs_LLM.prompt +129 -0
- pdd/prompts/agentic_bug_step3_triage_LLM.prompt +95 -0
- pdd/prompts/agentic_bug_step4_reproduce_LLM.prompt +97 -0
- pdd/prompts/agentic_bug_step5_root_cause_LLM.prompt +123 -0
- pdd/prompts/agentic_bug_step6_test_plan_LLM.prompt +107 -0
- pdd/prompts/agentic_bug_step7_generate_LLM.prompt +172 -0
- pdd/prompts/agentic_bug_step8_verify_LLM.prompt +119 -0
- pdd/prompts/agentic_bug_step9_e2e_test_LLM.prompt +289 -0
- pdd/prompts/agentic_change_step10_identify_issues_LLM.prompt +1006 -0
- pdd/prompts/agentic_change_step11_fix_issues_LLM.prompt +984 -0
- pdd/prompts/agentic_change_step12_create_pr_LLM.prompt +131 -0
- pdd/prompts/agentic_change_step1_duplicate_LLM.prompt +73 -0
- pdd/prompts/agentic_change_step2_docs_LLM.prompt +101 -0
- pdd/prompts/agentic_change_step3_research_LLM.prompt +126 -0
- pdd/prompts/agentic_change_step4_clarify_LLM.prompt +164 -0
- pdd/prompts/agentic_change_step5_docs_change_LLM.prompt +981 -0
- pdd/prompts/agentic_change_step6_devunits_LLM.prompt +1005 -0
- pdd/prompts/agentic_change_step7_architecture_LLM.prompt +1044 -0
- pdd/prompts/agentic_change_step8_analyze_LLM.prompt +1027 -0
- pdd/prompts/agentic_change_step9_implement_LLM.prompt +1077 -0
- pdd/prompts/agentic_crash_explore_LLM.prompt +49 -0
- pdd/prompts/agentic_e2e_fix_step1_unit_tests_LLM.prompt +90 -0
- pdd/prompts/agentic_e2e_fix_step2_e2e_tests_LLM.prompt +91 -0
- pdd/prompts/agentic_e2e_fix_step3_root_cause_LLM.prompt +89 -0
- pdd/prompts/agentic_e2e_fix_step4_fix_e2e_tests_LLM.prompt +96 -0
- pdd/prompts/agentic_e2e_fix_step5_identify_devunits_LLM.prompt +91 -0
- pdd/prompts/agentic_e2e_fix_step6_create_unit_tests_LLM.prompt +106 -0
- pdd/prompts/agentic_e2e_fix_step7_verify_tests_LLM.prompt +116 -0
- pdd/prompts/agentic_e2e_fix_step8_run_pdd_fix_LLM.prompt +120 -0
- pdd/prompts/agentic_e2e_fix_step9_verify_all_LLM.prompt +146 -0
- pdd/prompts/agentic_fix_explore_LLM.prompt +45 -0
- pdd/prompts/agentic_fix_harvest_only_LLM.prompt +48 -0
- pdd/prompts/agentic_fix_primary_LLM.prompt +85 -0
- pdd/prompts/agentic_update_LLM.prompt +925 -0
- pdd/prompts/agentic_verify_explore_LLM.prompt +45 -0
- pdd/prompts/auto_include_LLM.prompt +122 -905
- pdd/prompts/change_LLM.prompt +3093 -1
- pdd/prompts/detect_change_LLM.prompt +686 -27
- pdd/prompts/example_generator_LLM.prompt +22 -1
- pdd/prompts/extract_code_LLM.prompt +5 -1
- pdd/prompts/extract_program_code_fix_LLM.prompt +7 -1
- pdd/prompts/extract_prompt_update_LLM.prompt +7 -8
- pdd/prompts/extract_promptline_LLM.prompt +17 -11
- pdd/prompts/find_verification_errors_LLM.prompt +6 -0
- pdd/prompts/fix_code_module_errors_LLM.prompt +12 -2
- pdd/prompts/fix_errors_from_unit_tests_LLM.prompt +9 -0
- pdd/prompts/fix_verification_errors_LLM.prompt +22 -0
- pdd/prompts/generate_test_LLM.prompt +41 -7
- pdd/prompts/generate_test_from_example_LLM.prompt +115 -0
- pdd/prompts/increase_tests_LLM.prompt +1 -5
- pdd/prompts/insert_includes_LLM.prompt +316 -186
- pdd/prompts/prompt_code_diff_LLM.prompt +119 -0
- pdd/prompts/prompt_diff_LLM.prompt +82 -0
- pdd/prompts/trace_LLM.prompt +25 -22
- pdd/prompts/unfinished_prompt_LLM.prompt +85 -1
- pdd/prompts/update_prompt_LLM.prompt +22 -1
- pdd/pytest_output.py +127 -12
- pdd/remote_session.py +876 -0
- pdd/render_mermaid.py +236 -0
- pdd/server/__init__.py +52 -0
- pdd/server/app.py +335 -0
- pdd/server/click_executor.py +587 -0
- pdd/server/executor.py +338 -0
- pdd/server/jobs.py +661 -0
- pdd/server/models.py +241 -0
- pdd/server/routes/__init__.py +31 -0
- pdd/server/routes/architecture.py +451 -0
- pdd/server/routes/auth.py +364 -0
- pdd/server/routes/commands.py +929 -0
- pdd/server/routes/config.py +42 -0
- pdd/server/routes/files.py +603 -0
- pdd/server/routes/prompts.py +1322 -0
- pdd/server/routes/websocket.py +473 -0
- pdd/server/security.py +243 -0
- pdd/server/terminal_spawner.py +209 -0
- pdd/server/token_counter.py +222 -0
- pdd/setup_tool.py +648 -0
- pdd/simple_math.py +2 -0
- pdd/split_main.py +3 -2
- pdd/summarize_directory.py +237 -195
- pdd/sync_animation.py +8 -4
- pdd/sync_determine_operation.py +839 -112
- pdd/sync_main.py +351 -57
- pdd/sync_orchestration.py +1400 -756
- pdd/sync_tui.py +848 -0
- pdd/template_expander.py +161 -0
- pdd/template_registry.py +264 -0
- pdd/templates/architecture/architecture_json.prompt +237 -0
- pdd/templates/generic/generate_prompt.prompt +174 -0
- pdd/trace.py +168 -12
- pdd/trace_main.py +4 -3
- pdd/track_cost.py +140 -63
- pdd/unfinished_prompt.py +51 -4
- pdd/update_main.py +567 -67
- pdd/update_model_costs.py +2 -2
- pdd/update_prompt.py +19 -4
- {pdd_cli-0.0.45.dist-info → pdd_cli-0.0.118.dist-info}/METADATA +29 -11
- pdd_cli-0.0.118.dist-info/RECORD +227 -0
- {pdd_cli-0.0.45.dist-info → pdd_cli-0.0.118.dist-info}/licenses/LICENSE +1 -1
- pdd_cli-0.0.45.dist-info/RECORD +0 -116
- {pdd_cli-0.0.45.dist-info → pdd_cli-0.0.118.dist-info}/WHEEL +0 -0
- {pdd_cli-0.0.45.dist-info → pdd_cli-0.0.118.dist-info}/entry_points.txt +0 -0
- {pdd_cli-0.0.45.dist-info → pdd_cli-0.0.118.dist-info}/top_level.txt +0 -0
|
@@ -12,10 +12,46 @@ You are a prompt expert that helps select the necessary subset of "includes" (li
|
|
|
12
12
|
'Step 1.' - A string of possible includes based on the input_prompt.
|
|
13
13
|
'Step 2.' - A string explaining why an include might or might not be necessary for the prompt.
|
|
14
14
|
'Step 3.' - A string of the minimum set of includes required to achieve the goal of the input_prompt.
|
|
15
|
-
'Step 4.' - A string of the string_of_includes based on Step 3.
|
|
15
|
+
'Step 4.' - A string of the string_of_includes based on Step 3 (see strict tag naming rules below).
|
|
16
16
|
</output>
|
|
17
17
|
</definitions>
|
|
18
18
|
|
|
19
|
+
<tag_naming_rules_for_step_4>
|
|
20
|
+
IMPORTANT: Step 4 must emit XML snippets that wrap each <include> in a canonical dotted Python module tag.
|
|
21
|
+
|
|
22
|
+
Format:
|
|
23
|
+
<CANONICAL_MODULE_PATH><include>INCLUDE_PATH</include></CANONICAL_MODULE_PATH>
|
|
24
|
+
|
|
25
|
+
Canonical tag name rules:
|
|
26
|
+
- The wrapper tag MUST be a dotted Python import path (examples: utils.auth_helpers, utils.db_helpers, models.user).
|
|
27
|
+
- NEVER use *_example as the wrapper tag (e.g. do NOT output <auth_helpers_example>...</auth_helpers_example>).
|
|
28
|
+
- If INCLUDE_PATH is a context example (e.g. context/auth_helpers_example.py), the wrapper tag must be the real module
|
|
29
|
+
being exemplified (e.g. utils.auth_helpers). Prefer extracting the module path from the input_prompt text. If it is not
|
|
30
|
+
explicitly present, infer from filename + summary:
|
|
31
|
+
- default to utils.<base_name> for helpers/config/clients/etc
|
|
32
|
+
- default to models.<base_name> for data models
|
|
33
|
+
- If INCLUDE_PATH is not a context example, derive CANONICAL_MODULE_PATH from the file path by stripping the extension and
|
|
34
|
+
replacing '/' with '.' (e.g. core/change_handler.py -> core.change_handler).
|
|
35
|
+
</tag_naming_rules_for_step_4>
|
|
36
|
+
|
|
37
|
+
<common_mistakes>
|
|
38
|
+
CRITICAL: The available_includes list uses "File: path" format for INPUT only.
|
|
39
|
+
You must TRANSFORM this into proper <include> syntax for OUTPUT.
|
|
40
|
+
|
|
41
|
+
These output formats are WRONG (never output these):
|
|
42
|
+
- [File: path/to/file.py]
|
|
43
|
+
- File: path/to/file.py
|
|
44
|
+
- {{path/to/file.py}}
|
|
45
|
+
- path/to/file.py (without include tags)
|
|
46
|
+
|
|
47
|
+
This output format is CORRECT:
|
|
48
|
+
- <include>path/to/file.py</include>
|
|
49
|
+
|
|
50
|
+
Example transformation:
|
|
51
|
+
INPUT (available_includes): File: context/auth_helpers_example.py
|
|
52
|
+
OUTPUT (Step 4): <utils.auth_helpers><include>context/auth_helpers_example.py</include></utils.auth_helpers>
|
|
53
|
+
</common_mistakes>
|
|
54
|
+
|
|
19
55
|
<context>
|
|
20
56
|
Here is the input_prompt to find the includes for: <input_prompt>{input_prompt}</input_prompt>
|
|
21
57
|
Here is the available_includes: <available_includes>{available_includes}</available_includes>
|
|
@@ -27,8 +63,7 @@ Here are some examples of how to do this:
|
|
|
27
63
|
<example_input_prompt>
|
|
28
64
|
% You are an expert Python Software Engineer. Your goal is to write a Python function, "process_csv_change", that will read in a take in a csv file name and call change_example for each of the lines.
|
|
29
65
|
|
|
30
|
-
|
|
31
|
-
% The ./pdd/__init__.py file will have the EXTRACTION_STRENGTH, DEFAULT_STRENGTH, DEFAULT_TIME and other global constants. Example: ```from . import DEFAULT_STRENGTH```
|
|
66
|
+
<include>context/python_preamble.prompt</include>
|
|
32
67
|
|
|
33
68
|
% Here are the inputs and outputs of the function:
|
|
34
69
|
Inputs:
|
|
@@ -64,100 +99,32 @@ Here are some examples of how to do this:
|
|
|
64
99
|
Step 3. Return the success status, list of modified prompts, total cost, and model name.
|
|
65
100
|
</example_input_prompt>
|
|
66
101
|
<example_available_includes>
|
|
67
|
-
|
|
68
|
-
|
|
69
|
-
|
|
70
|
-
|
|
71
|
-
|
|
72
|
-
|
|
73
|
-
|
|
74
|
-
|
|
75
|
-
|
|
76
|
-
|
|
77
|
-
|
|
78
|
-
|
|
79
|
-
|
|
80
|
-
|
|
81
|
-
|
|
82
|
-
|
|
83
|
-
|
|
84
|
-
|
|
85
|
-
|
|
86
|
-
|
|
87
|
-
context/find_section_example.py
|
|
88
|
-
context/fix_code_module_errors_example.py
|
|
89
|
-
context/fix_error_loop_example.py
|
|
90
|
-
context/fix_errors_from_unit_tests_example.py
|
|
91
|
-
context/generate_output_paths_example.py
|
|
92
|
-
context/generate_test_example.py
|
|
93
|
-
context/get_comment_example.py
|
|
94
|
-
context/get_extension_example.py
|
|
95
|
-
context/get_language_example.py
|
|
96
|
-
context/git_update_example.py
|
|
97
|
-
context/langchain_lcel_example.py
|
|
98
|
-
context/llm_selector_example.py
|
|
99
|
-
context/llm_token_counter_example.py
|
|
100
|
-
context/postprocess_0_example.py
|
|
101
|
-
context/postprocess_example.py
|
|
102
|
-
context/postprocessed_runnable_llm_output.py
|
|
103
|
-
context/preprocess_example.py
|
|
104
|
-
context/process_csv_change_example.py
|
|
105
|
-
context/prompt_caching.ipynb
|
|
106
|
-
context/split_example.py
|
|
107
|
-
context/tiktoken_example.py
|
|
108
|
-
context/trace_example.py
|
|
109
|
-
context/unfinished_prompt_example.py
|
|
110
|
-
context/unrunnable_raw_llm_output.py
|
|
111
|
-
context/update_prompt_example.py
|
|
112
|
-
context/xml_tagger_example.py
|
|
102
|
+
File: utils/csv_parser.py
|
|
103
|
+
Summary: Utility functions for parsing and processing CSV files with pandas
|
|
104
|
+
File: utils/file_processor.py
|
|
105
|
+
Summary: Generic file processing utilities for reading and writing various formats
|
|
106
|
+
File: core/change_handler.py
|
|
107
|
+
Summary: Main function for applying changes to code files with validation
|
|
108
|
+
File: models/data_validator.py
|
|
109
|
+
Summary: Pydantic models and validation functions for input data
|
|
110
|
+
File: cli/command_interface.py
|
|
111
|
+
Summary: Click-based command line interface utilities
|
|
112
|
+
File: database/connection.py
|
|
113
|
+
Summary: Database connection and query utilities
|
|
114
|
+
File: api/http_client.py
|
|
115
|
+
Summary: HTTP client for making API requests with retry logic
|
|
116
|
+
File: processing/text_analyzer.py
|
|
117
|
+
Summary: Text analysis and natural language processing functions
|
|
118
|
+
File: config/settings.py
|
|
119
|
+
Summary: Configuration management and environment variable handling
|
|
120
|
+
File: logging/logger.py
|
|
121
|
+
Summary: Centralized logging configuration and utilities
|
|
113
122
|
</example_available_includes>
|
|
114
123
|
<example_string_of_includes>
|
|
115
124
|
% Here are examples of how to use internal modules:
|
|
116
125
|
<internal_example_modules>
|
|
117
|
-
% Here is an example of the change function that will be used:
|
|
118
|
-
|
|
119
|
-
from rich.console import Console
|
|
120
|
-
|
|
121
|
-
console = Console()
|
|
122
|
-
|
|
123
|
-
def main() -> None:
|
|
124
|
-
"""
|
|
125
|
-
Main function to demonstrate the use of the `change` function from the `pdd.change` module.
|
|
126
|
-
Sets up environment variables, defines input parameters, and calls the `change` function.
|
|
127
|
-
"""
|
|
128
|
-
# Set up the environment variable for PDD_PATH
|
|
129
|
-
# os.environ['PDD_PATH'] = '/path/to/pdd' # Replace with actual path
|
|
130
|
-
|
|
131
|
-
# Example inputs
|
|
132
|
-
input_prompt = "Write a function to calculate the factorial of a number."
|
|
133
|
-
input_code = """
|
|
134
|
-
def factorial(n):
|
|
135
|
-
if n == 0 or n == 1:
|
|
136
|
-
return 1
|
|
137
|
-
else:
|
|
138
|
-
return n * factorial(n-1)
|
|
139
|
-
"""
|
|
140
|
-
change_prompt = "Modify the function to take the square root of the factorial output."
|
|
141
|
-
strength = 0.5 # Strength parameter for the LLM (0.0 to 1.0)
|
|
142
|
-
temperature = 0.0 # Temperature parameter for the LLM (0.0 to 1.0)
|
|
143
|
-
|
|
144
|
-
try:
|
|
145
|
-
# Call the change function
|
|
146
|
-
modified_prompt, total_cost, model_name = change(
|
|
147
|
-
input_prompt, input_code, change_prompt, strength, temperature
|
|
148
|
-
)
|
|
149
|
-
|
|
150
|
-
# Print the results
|
|
151
|
-
console.print(f"[bold]Modified Prompt:[/bold]\n{modified_prompt}")
|
|
152
|
-
console.print(f"[bold]Total Cost:[/bold] ${total_cost:.6f}")
|
|
153
|
-
console.print(f"[bold]Model Used:[/bold] {model_name}")
|
|
154
|
-
|
|
155
|
-
except Exception as e:
|
|
156
|
-
console.print(f"[bold red]An error occurred:[/bold red] {str(e)}")
|
|
157
|
-
|
|
158
|
-
if __name__ == "__main__":
|
|
159
|
-
main()
|
|
160
|
-
</change_example>
|
|
126
|
+
% Here is an example of the change function that will be used:
|
|
127
|
+
<core.change_handler><include>core/change_handler.py</include></core.change_handler>
|
|
161
128
|
</internal_example_modules>
|
|
162
129
|
</example_string_of_includes>
|
|
163
130
|
</example_1>
|
|
@@ -166,8 +133,7 @@ if __name__ == "__main__":
|
|
|
166
133
|
<example_input_prompt>
|
|
167
134
|
% You are an expert Python Software Engineer. Your goal is to write a Python function, "generate_test", that will create a unit test from a code file.
|
|
168
135
|
|
|
169
|
-
|
|
170
|
-
% The ./pdd/__init__.py file will have the EXTRACTION_STRENGTH, DEFAULT_STRENGTH, DEFAULT_TIME and other global constants. Example: ```from . import DEFAULT_STRENGTH```
|
|
136
|
+
<include>./context/python_preamble.prompt</include>
|
|
171
137
|
|
|
172
138
|
% Here are the inputs and outputs of the function:
|
|
173
139
|
Inputs:
|
|
@@ -200,827 +166,74 @@ if __name__ == "__main__":
|
|
|
200
166
|
Step 7. Return the unit_test, total_cost and model_name
|
|
201
167
|
</example_input_prompt>
|
|
202
168
|
<example_available_includes>
|
|
203
|
-
|
|
204
|
-
|
|
205
|
-
|
|
206
|
-
|
|
207
|
-
|
|
208
|
-
|
|
209
|
-
|
|
210
|
-
|
|
211
|
-
|
|
212
|
-
|
|
213
|
-
|
|
214
|
-
|
|
215
|
-
|
|
216
|
-
|
|
217
|
-
|
|
218
|
-
|
|
219
|
-
|
|
220
|
-
|
|
221
|
-
|
|
222
|
-
|
|
223
|
-
context/find_section_example.py
|
|
224
|
-
context/fix_code_module_errors_example.py
|
|
225
|
-
context/fix_error_loop_example.py
|
|
226
|
-
context/fix_errors_from_unit_tests_example.py
|
|
227
|
-
context/generate_output_paths_example.py
|
|
228
|
-
context/generate_test_example.py
|
|
229
|
-
context/get_comment_example.py
|
|
230
|
-
context/get_extension_example.py
|
|
231
|
-
context/get_language_example.py
|
|
232
|
-
context/git_update_example.py
|
|
233
|
-
context/langchain_lcel_example.py
|
|
234
|
-
context/llm_selector_example.py
|
|
235
|
-
context/llm_token_counter_example.py
|
|
236
|
-
context/postprocess_0_example.py
|
|
237
|
-
context/postprocess_example.py
|
|
238
|
-
context/postprocessed_runnable_llm_output.py
|
|
239
|
-
context/preprocess_example.py
|
|
240
|
-
context/process_csv_change_example.py
|
|
241
|
-
context/prompt_caching.ipynb
|
|
242
|
-
context/split_example.py
|
|
243
|
-
context/tiktoken_example.py
|
|
244
|
-
context/trace_example.py
|
|
245
|
-
context/unfinished_prompt_example.py
|
|
246
|
-
context/unrunnable_raw_llm_output.py
|
|
247
|
-
context/update_prompt_example.py
|
|
248
|
-
context/xml_tagger_example.py
|
|
169
|
+
File: frameworks/langchain_utils.py
|
|
170
|
+
Summary: LangChain LCEL utilities and chain composition helpers
|
|
171
|
+
File: llm/model_selector.py
|
|
172
|
+
Summary: Dynamic LLM model selection based on task requirements
|
|
173
|
+
File: llm/token_counter.py
|
|
174
|
+
Summary: Token counting utilities for various LLM providers
|
|
175
|
+
File: processing/prompt_processor.py
|
|
176
|
+
Summary: Prompt preprocessing and template management functions
|
|
177
|
+
File: testing/test_generator.py
|
|
178
|
+
Summary: Automated unit test generation from code and prompts
|
|
179
|
+
File: processing/completion_detector.py
|
|
180
|
+
Summary: Functions to detect incomplete LLM outputs and continue generation
|
|
181
|
+
File: processing/output_postprocessor.py
|
|
182
|
+
Summary: Post-processing utilities for cleaning and formatting LLM outputs
|
|
183
|
+
File: utils/file_reader.py
|
|
184
|
+
Summary: Safe file reading utilities with encoding detection
|
|
185
|
+
File: config/environment.py
|
|
186
|
+
Summary: Environment variable management and project path resolution
|
|
187
|
+
File: markdown/renderer.py
|
|
188
|
+
Summary: Rich markdown rendering and formatting utilities
|
|
249
189
|
</example_available_includes>
|
|
250
190
|
<example_string_of_includes>
|
|
251
|
-
% Here is an example of a LangChain Expression Language (LCEL) program:
|
|
252
|
-
|
|
253
|
-
from langchain_community.cache import SQLiteCache
|
|
254
|
-
from langchain_community.llms.mlx_pipeline import MLXPipeline
|
|
255
|
-
from langchain.globals import set_llm_cache
|
|
256
|
-
from langchain_core.output_parsers import JsonOutputParser, PydanticOutputParser # Parsers are only avaiable in langchain_core.output_parsers not langchain.output_parsers
|
|
257
|
-
from langchain_core.output_parsers import StrOutputParser
|
|
258
|
-
from langchain_core.prompts import ChatPromptTemplate
|
|
259
|
-
from langchain_core.runnables import RunnablePassthrough, ConfigurableField
|
|
260
|
-
|
|
261
|
-
from langchain_openai import AzureChatOpenAI
|
|
262
|
-
from langchain_fireworks import Fireworks
|
|
263
|
-
from langchain_anthropic import ChatAnthropic
|
|
264
|
-
from langchain_openai import ChatOpenAI # Chatbot and conversational tasks
|
|
265
|
-
from langchain_openai import OpenAI # General language tasks
|
|
266
|
-
from langchain_google_genai import ChatGoogleGenerativeAI
|
|
267
|
-
from langchain_google_vertexai import ChatVertexAI
|
|
268
|
-
from langchain_groq import ChatGroq
|
|
269
|
-
from langchain_together import Together
|
|
270
|
-
|
|
271
|
-
from langchain.callbacks.base import BaseCallbackHandler
|
|
272
|
-
from langchain.schema import LLMResult
|
|
273
|
-
|
|
274
|
-
import json
|
|
275
|
-
|
|
276
|
-
from langchain_community.chat_models.mlx import ChatMLX
|
|
277
|
-
from langchain_core.messages import HumanMessage
|
|
278
|
-
|
|
279
|
-
from langchain_ollama.llms import OllamaLLM
|
|
280
|
-
from langchain_aws import ChatBedrockConverse
|
|
281
|
-
|
|
282
|
-
# Define a base output parser (e.g., PydanticOutputParser)
|
|
283
|
-
from pydantic import BaseModel, Field
|
|
284
|
-
|
|
285
|
-
|
|
286
|
-
|
|
287
|
-
class CompletionStatusHandler(BaseCallbackHandler):
|
|
288
|
-
def __init__(self):
|
|
289
|
-
self.is_complete = False
|
|
290
|
-
self.finish_reason = None
|
|
291
|
-
self.input_tokens = None
|
|
292
|
-
self.output_tokens = None
|
|
293
|
-
|
|
294
|
-
def on_llm_end(self, response: LLMResult, **kwargs) -> None:
|
|
295
|
-
self.is_complete = True
|
|
296
|
-
if response.generations and response.generations[0]:
|
|
297
|
-
generation = response.generations[0][0]
|
|
298
|
-
self.finish_reason = generation.generation_info.get('finish_reason').lower()
|
|
299
|
-
|
|
300
|
-
# Extract token usage
|
|
301
|
-
if hasattr(generation.message, 'usage_metadata'):
|
|
302
|
-
usage_metadata = generation.message.usage_metadata
|
|
303
|
-
self.input_tokens = usage_metadata.get('input_tokens')
|
|
304
|
-
self.output_tokens = usage_metadata.get('output_tokens')
|
|
305
|
-
# print("response:",response)
|
|
306
|
-
print("Extracted information:")
|
|
307
|
-
print(f"Finish reason: {self.finish_reason}")
|
|
308
|
-
print(f"Input tokens: {self.input_tokens}")
|
|
309
|
-
print(f"Output tokens: {self.output_tokens}")
|
|
310
|
-
|
|
311
|
-
# Set up the LLM with the custom handler
|
|
312
|
-
handler = CompletionStatusHandler()
|
|
313
|
-
# Always setup cache to save money and increase speeds
|
|
314
|
-
set_llm_cache(SQLiteCache(database_path=".langchain.db"))
|
|
315
|
-
|
|
316
|
-
|
|
317
|
-
# Create the LCEL template. Make note of the variable {topic} which will be filled in later.
|
|
318
|
-
prompt_template = PromptTemplate.from_template("Tell me a joke about {topic}")
|
|
319
|
-
|
|
320
|
-
llm = ChatGoogleGenerativeAI(model="gemini-2.5-pro-exp-03-25", temperature=0, callbacks=[handler])
|
|
321
|
-
# Combine with a model and parser to output a string
|
|
322
|
-
chain = prompt_template |llm| StrOutputParser()
|
|
323
|
-
|
|
324
|
-
# Run the template. Notice that the input is a dictionary with a single key "topic" which feeds it into the above prompt template. This is needed because the prompt template has a variable {topic} which needs to be filled in when invoked.
|
|
325
|
-
result = chain.invoke({"topic": "cats"})
|
|
326
|
-
print("********Google:", result)
|
|
327
|
-
|
|
328
|
-
|
|
329
|
-
llm = ChatVertexAI(model="gemini-2.5-pro-exp-03-25", temperature=0, callbacks=[handler])
|
|
330
|
-
# Combine with a model and parser to output a string
|
|
331
|
-
chain = prompt_template |llm| StrOutputParser()
|
|
332
|
-
|
|
333
|
-
# Run the template. Notice that the input is a dictionary with a single key "topic" which feeds it into the above prompt template. This is needed because the prompt template has a variable {topic} which needs to be filled in when invoked.
|
|
334
|
-
result = chain.invoke({"topic": "cats"})
|
|
335
|
-
print("********GoogleVertex:", result)
|
|
336
|
-
|
|
337
|
-
|
|
338
|
-
# Define your desired data structure.
|
|
339
|
-
class Joke(BaseModel):
|
|
340
|
-
setup: str = Field(description="question to set up a joke")
|
|
341
|
-
punchline: str = Field(description="answer to resolve the joke")
|
|
342
|
-
|
|
343
|
-
|
|
344
|
-
# Set up a parser
|
|
345
|
-
parser = JsonOutputParser(pydantic_object=Joke)
|
|
346
|
-
|
|
347
|
-
# Create a prompt template
|
|
348
|
-
prompt = PromptTemplate(
|
|
349
|
-
template="Answer the user query.\n{format_instructions}\n{query}\n",
|
|
350
|
-
input_variables=["query"],
|
|
351
|
-
partial_variables={"format_instructions": parser.get_format_instructions()},
|
|
352
|
-
)
|
|
353
|
-
|
|
354
|
-
llm_no_struct = ChatOpenAI(model="gpt-4o-mini", temperature=0,
|
|
355
|
-
callbacks=[handler])
|
|
356
|
-
llm = llm_no_struct.with_structured_output(Joke) # with structured output forces the output to be a specific object, in this case Joke. Only OpenAI models have structured output
|
|
357
|
-
# Chain the components.
|
|
358
|
-
# The class `LLMChain` was deprecated in LangChain 0.1.17 and will be removed in 1.0. Use RunnableSequence, e.g., `prompt | llm` instead.
|
|
359
|
-
chain = prompt | llm
|
|
360
|
-
|
|
361
|
-
# Invoke the chain with a query.
|
|
362
|
-
# IMPORTANT: chain.run is now obsolete. Use chain.invoke instead.
|
|
363
|
-
result = chain.invoke({"query": "Tell me a joke about openai."})
|
|
364
|
-
print("4o mini JSON: ",result)
|
|
365
|
-
print(result.setup) # How to access the structured output
|
|
366
|
-
|
|
367
|
-
llm = ChatOpenAI(model="o1", temperature=1,
|
|
368
|
-
callbacks=[handler],model_kwargs = {"max_completion_tokens" : 1000})
|
|
369
|
-
# Chain the components.
|
|
370
|
-
# The class `LLMChain` was deprecated in LangChain 0.1.17 and will be removed in 1.0. Use RunnableSequence, e.g., `prompt | llm` instead.
|
|
371
|
-
chain = prompt | llm | parser
|
|
372
|
-
|
|
373
|
-
# Invoke the chain with a query.
|
|
374
|
-
# IMPORTANT: chain.run is now obsolete. Use chain.invoke instead.
|
|
375
|
-
result = chain.invoke({"query": "Tell me a joke about openai."})
|
|
376
|
-
print("o1 JSON: ",result)
|
|
377
|
-
|
|
378
|
-
# Get DEEPSEEK_API_KEY environmental variable
|
|
379
|
-
|
|
380
|
-
deepseek_api_key = os.getenv('DEEPSEEK_API_KEY')
|
|
381
|
-
|
|
382
|
-
# Ensure the API key is retrieved successfully
|
|
383
|
-
if deepseek_api_key is None:
|
|
384
|
-
raise ValueError("DEEPSEEK_API_KEY environment variable is not set")
|
|
385
|
-
|
|
386
|
-
llm = ChatOpenAI(
|
|
387
|
-
model='deepseek-chat',
|
|
388
|
-
openai_api_key=deepseek_api_key,
|
|
389
|
-
openai_api_base='https://api.deepseek.com',
|
|
390
|
-
temperature=0, callbacks=[handler]
|
|
391
|
-
)
|
|
392
|
-
|
|
393
|
-
# Chain the components
|
|
394
|
-
chain = prompt | llm | parser
|
|
395
|
-
|
|
396
|
-
# Invoke the chain with a query
|
|
397
|
-
result = chain.invoke({"query": "Write joke about deepseek."})
|
|
398
|
-
print("deepseek",result)
|
|
399
|
-
|
|
400
|
-
|
|
401
|
-
# Set up a parser
|
|
402
|
-
parser = PydanticOutputParser(pydantic_object=Joke)
|
|
403
|
-
# Chain the components
|
|
404
|
-
chain = prompt | llm | parser
|
|
405
|
-
|
|
406
|
-
# Invoke the chain with a query
|
|
407
|
-
result = chain.invoke({"query": "Write joke about deepseek and pydantic."})
|
|
408
|
-
print("deepseek pydantic",result)
|
|
409
|
-
|
|
410
|
-
# Set up the Azure ChatOpenAI LLM instance
|
|
411
|
-
llm_no_struct = AzureChatOpenAI(
|
|
412
|
-
model="o4-mini",
|
|
413
|
-
temperature=1,
|
|
414
|
-
callbacks=[handler]
|
|
415
|
-
)
|
|
416
|
-
llm = llm_no_struct.with_structured_output(Joke) # with structured output forces the output to be a specific JSON format
|
|
417
|
-
# Chain the components: prompt | llm | parser
|
|
418
|
-
chain = prompt | llm # returns a Joke object
|
|
419
|
-
|
|
420
|
-
# Invoke the chain with a query
|
|
421
|
-
result = chain.invoke({"query": "What is Azure?"}) # Pass a dictionary if `invoke` expects it
|
|
422
|
-
print("Azure Result:", result)
|
|
423
|
-
|
|
424
|
-
# Set up a parser
|
|
425
|
-
parser = JsonOutputParser(pydantic_object=Joke)
|
|
426
|
-
|
|
427
|
-
llm = Fireworks(
|
|
428
|
-
model="accounts/fireworks/models/llama4-maverick-instruct-basic",
|
|
429
|
-
temperature=0, callbacks=[handler])
|
|
430
|
-
# Chain the components
|
|
431
|
-
chain = prompt | llm | parser
|
|
432
|
-
|
|
433
|
-
# Invoke the chain with a query
|
|
434
|
-
# no money in account
|
|
435
|
-
# result = chain.invoke({"query": "Tell me a joke about the president"})
|
|
436
|
-
# print("fireworks",result)
|
|
437
|
-
|
|
438
|
-
|
|
439
|
-
|
|
440
|
-
|
|
441
|
-
|
|
442
|
-
prompt = ChatPromptTemplate.from_template(
|
|
443
|
-
"Tell me a short joke about {topic}"
|
|
444
|
-
)
|
|
445
|
-
chat_openai = ChatOpenAI(model="gpt-3.5-turbo", callbacks=[handler])
|
|
446
|
-
openai = OpenAI(model="gpt-3.5-turbo-instruct", callbacks=[handler])
|
|
447
|
-
anthropic = ChatAnthropic(model="claude-2", callbacks=[handler])
|
|
448
|
-
model = (
|
|
449
|
-
chat_openai
|
|
450
|
-
.with_fallbacks([anthropic])
|
|
451
|
-
.configurable_alternatives(
|
|
452
|
-
ConfigurableField(id="model"),
|
|
453
|
-
default_key="chat_openai",
|
|
454
|
-
openai=openai,
|
|
455
|
-
anthropic=anthropic,
|
|
456
|
-
)
|
|
457
|
-
)
|
|
458
|
-
|
|
459
|
-
chain = (
|
|
460
|
-
{"topic": RunnablePassthrough()}
|
|
461
|
-
| prompt
|
|
462
|
-
| model
|
|
463
|
-
| StrOutputParser()
|
|
464
|
-
)
|
|
465
|
-
result = chain.invoke({"topic": "Tell me a joke about the president"})
|
|
466
|
-
print("config alt:",result)
|
|
467
|
-
|
|
468
|
-
|
|
469
|
-
|
|
470
|
-
llm = ChatAnthropic(
|
|
471
|
-
model="claude-3-7-sonnet-latest",
|
|
472
|
-
max_tokens=5000, # Total tokens for the response
|
|
473
|
-
thinking={"type": "enabled", "budget_tokens": 2000}, # Tokens for internal reasoning
|
|
474
|
-
)
|
|
475
|
-
|
|
476
|
-
response = llm.invoke("What is the cube root of 50.653?")
|
|
477
|
-
print(json.dumps(response.content, indent=2))
|
|
478
|
-
|
|
479
|
-
|
|
480
|
-
llm = ChatGroq(temperature=0, model_name="qwen-qwq-32b", callbacks=[handler])
|
|
481
|
-
system = "You are a helpful assistant."
|
|
482
|
-
human = "{text}"
|
|
483
|
-
prompt = ChatPromptTemplate.from_messages([("system", system), ("human", human)])
|
|
484
|
-
|
|
485
|
-
chain = prompt | llm | StrOutputParser()
|
|
486
|
-
print(chain.invoke({"text": "Explain the importance of low latency LLMs."}))
|
|
487
|
-
|
|
488
|
-
|
|
489
|
-
llm = Together(
|
|
490
|
-
model="meta-llama/Llama-3-70b-chat-hf",
|
|
491
|
-
max_tokens=500, callbacks=[handler]
|
|
492
|
-
)
|
|
493
|
-
chain = prompt | llm | StrOutputParser()
|
|
494
|
-
print(chain.invoke({"text": "Explain the importance of together.ai."}))
|
|
495
|
-
|
|
496
|
-
|
|
497
|
-
# Define a prompt template with placeholders for variables
|
|
498
|
-
prompt_template = PromptTemplate.from_template("Tell me a {adjective} joke about {content}.")
|
|
499
|
-
|
|
500
|
-
# Format the prompt with the variables
|
|
501
|
-
formatted_prompt = prompt_template.format(adjective="funny", content="data scientists")
|
|
502
|
-
|
|
503
|
-
# Print the formatted prompt
|
|
504
|
-
print(formatted_prompt)
|
|
505
|
-
|
|
506
|
-
|
|
507
|
-
# Set up the LLM with the custom handler
|
|
508
|
-
handler = CompletionStatusHandler()
|
|
509
|
-
|
|
510
|
-
|
|
511
|
-
llm = ChatOpenAI(model="gpt-3.5-turbo", temperature=0.9, callbacks=[handler])
|
|
512
|
-
|
|
513
|
-
prompt = PromptTemplate.from_template("What is a good name for a company that makes {product}?")
|
|
514
|
-
|
|
515
|
-
chain = prompt | llm
|
|
516
|
-
|
|
517
|
-
# Invoke the chain
|
|
518
|
-
response = chain.invoke({"product":"colorful socks"})
|
|
519
|
-
|
|
520
|
-
# Check completion status
|
|
521
|
-
print(f"Is complete: {handler.is_complete}")
|
|
522
|
-
print(f"Finish reason: {handler.finish_reason}")
|
|
523
|
-
print(f"Response: {response}")
|
|
524
|
-
print(f"Input tokens: {handler.input_tokens}")
|
|
525
|
-
print(f"Output tokens: {handler.output_tokens}")
|
|
526
|
-
|
|
527
|
-
|
|
528
|
-
|
|
529
|
-
template = """Question: {question}"""
|
|
530
|
-
|
|
531
|
-
prompt = ChatPromptTemplate.from_template(template)
|
|
532
|
-
|
|
533
|
-
model = OllamaLLM(model="qwen2.5-coder:32b")
|
|
534
|
-
|
|
535
|
-
chain = prompt | model
|
|
536
|
-
|
|
537
|
-
output = chain.invoke({"question": "Write a python function that calculates Pi"})
|
|
538
|
-
print(output)
|
|
539
|
-
|
|
540
|
-
|
|
541
|
-
|
|
542
|
-
llm = MLXPipeline.from_model_id(
|
|
543
|
-
"mlx-community/quantized-gemma-2b-it",
|
|
544
|
-
pipeline_kwargs={"max_tokens": 10, "temp": 0.1},
|
|
545
|
-
)
|
|
546
|
-
|
|
547
|
-
|
|
548
|
-
chat_model = ChatMLX(llm=llm)
|
|
549
|
-
messages = [HumanMessage(content="What happens when an unstoppable force meets an immovable object?")]
|
|
550
|
-
response = chat_model.invoke(messages)
|
|
551
|
-
print(response.content)
|
|
552
|
-
|
|
553
|
-
|
|
554
|
-
|
|
555
|
-
llm = ChatBedrockConverse(
|
|
556
|
-
model_id="anthropic.claude-3-5-sonnet-20240620-v1:0",
|
|
557
|
-
# Additional parameters like temperature, max_tokens can be set here
|
|
558
|
-
)
|
|
559
|
-
|
|
560
|
-
messages = [HumanMessage(content="What happens when an unstoppable force meets an immovable sonnet?")]
|
|
561
|
-
response = llm.invoke(messages)
|
|
562
|
-
print(response.content)</lcel_example>
|
|
191
|
+
% Here is an example of a LangChain Expression Language (LCEL) program:
|
|
192
|
+
<frameworks.langchain_utils><include>frameworks/langchain_utils.py</include></frameworks.langchain_utils>
|
|
563
193
|
|
|
564
194
|
% Here are examples of how to use internal modules:
|
|
565
195
|
<internal_example_modules>
|
|
566
|
-
% Here is an example how to preprocess the prompt from a file:
|
|
567
|
-
|
|
568
|
-
console = Console()
|
|
569
|
-
|
|
570
|
-
prompt = """
|
|
571
|
-
<prompt>
|
|
572
|
-
Hello World
|
|
573
|
-
|
|
574
|
-
<pdd>This is a comment</pdd>
|
|
575
|
-
[About](https://about.google/?fg=1&utm_source=google-US&utm_medium=referral&utm_campaign=hp-header) [Store](https://store.google.com/US?utm_source=hp_header&utm_medium=google_ooo&utm_campaign=GS100042&hl=en-US)
|
|
576
|
-
|
|
577
|
-
AI Mode
|
|
578
|
-
|
|
579
|
-
Choose what you’re giving feedback on
|
|
580
|
-
|
|
581
|
-
* * *
|
|
582
|
-
|
|
583
|
-
See more
|
|
584
|
-
|
|
585
|
-
Delete
|
|
586
|
-
|
|
587
|
-
Delete
|
|
588
|
-
|
|
589
|
-
Report inappropriate predictions
|
|
590
|
-
|
|
591
|
-
I'm Feeling Curious
|
|
592
|
-
|
|
593
|
-
I'm Feeling Hungry
|
|
594
|
-
|
|
595
|
-
I'm Feeling Adventurous
|
|
596
|
-
|
|
597
|
-
I'm Feeling Playful
|
|
598
|
-
|
|
599
|
-
I'm Feeling Stellar
|
|
600
|
-
|
|
601
|
-
I'm Feeling Doodley
|
|
602
|
-
|
|
603
|
-
I'm Feeling Trendy
|
|
604
|
-
|
|
605
|
-
I'm Feeling Artistic
|
|
606
|
-
|
|
607
|
-
I'm Feeling Funny
|
|
608
|
-
|
|
609
|
-
[Advertising](https://www.google.com/intl/en_us/ads/?subid=ww-ww-et-g-awa-a-g_hpafoot1_1!o2&utm_source=google.com&utm_medium=referral&utm_campaign=google_hpafooter&fg=1) [Business](https://www.google.com/services/?subid=ww-ww-et-g-awa-a-g_hpbfoot1_1!o2&utm_source=google.com&utm_medium=referral&utm_campaign=google_hpbfooter&fg=1) [How Search works](https://google.com/search/howsearchworks/?fg=1)
|
|
610
|
-
|
|
611
|
-
[Applying AI towards science and the environment](https://ai.google/societal-impact/?utm_source=googlehpfooter&utm_medium=housepromos&utm_campaign=bottom-footer)
|
|
612
|
-
|
|
613
|
-
[Privacy](https://policies.google.com/privacy?hl=en&fg=1) [Terms](https://policies.google.com/terms?hl=en&fg=1)
|
|
614
|
-
|
|
615
|
-
Settings
|
|
616
|
-
|
|
617
|
-
[Search settings](https://www.google.com/preferences?hl=en&fg=1)
|
|
618
|
-
|
|
619
|
-
[Advanced search](https://www.google.com/advanced_search?hl=en&fg=1)
|
|
620
|
-
|
|
621
|
-
[Your data in Search](https://www.google.com/history/privacyadvisor/search/unauth?utm_source=googlemenu&fg=1&cctld=com)
|
|
622
|
-
|
|
623
|
-
[Search history](https://www.google.com/history/optout?hl=en&fg=1)
|
|
624
|
-
|
|
625
|
-
[Search help](https://support.google.com/websearch/?p=ws_results_help&hl=en&fg=1)
|
|
626
|
-
|
|
627
|
-
Send feedback
|
|
628
|
-
|
|
629
|
-
Dark theme: Off
|
|
630
|
-
|
|
631
|
-
Google apps
|
|
632
|
-
|
|
633
|
-

|
|
634
|
-
|
|
635
|
-
Sign in to GoogleGet the most from your Google account
|
|
636
|
-
|
|
637
|
-
Stay signed out
|
|
638
|
-
|
|
639
|
-
Sign in
|
|
640
|
-
{test}
|
|
641
|
-
{test2}
|
|
642
|
-
```<TODO.md>```
|
|
643
|
-
|
|
644
|
-
<pdd>
|
|
645
|
-
multi-line
|
|
646
|
-
comment should not show up
|
|
647
|
-
</pdd>
|
|
648
|
-
</prompt>
|
|
649
|
-
"""
|
|
650
|
-
|
|
651
|
-
recursive = False
|
|
652
|
-
double_curly_brackets = True
|
|
653
|
-
exclude_keys = ["test2"] # exclude test2 from being doubled
|
|
654
|
-
|
|
655
|
-
# Debug info
|
|
656
|
-
console.print(f"[bold yellow]Debug: exclude_keys = {exclude_keys}[/bold yellow]")
|
|
657
|
-
|
|
658
|
-
processed = preprocess(prompt, recursive, double_curly_brackets, exclude_keys=exclude_keys)
|
|
659
|
-
console.print("[bold white]Processed Prompt:[/bold white]")
|
|
660
|
-
console.print(processed)
|
|
661
|
-
</preprocess_example>
|
|
662
|
-
|
|
663
|
-
% Example of selecting a Langchain LLM and counting tokens using llm_selector: <llm_selector_example>from pdd.llm_selector import llm_selector
|
|
664
|
-
|
|
665
|
-
def main() -> None:
|
|
666
|
-
"""
|
|
667
|
-
Main function to demonstrate the usage of the llm_selector function.
|
|
668
|
-
"""
|
|
669
|
-
# Define the strength and temperature parameters
|
|
670
|
-
strength: float = 0.5 # Example strength value for the LLM model
|
|
671
|
-
temperature: float = 1.0 # Example temperature value for the LLM model
|
|
672
|
-
|
|
673
|
-
try:
|
|
674
|
-
while strength <= 1.1:
|
|
675
|
-
# Call the llm_selector function with the specified strength and temperature
|
|
676
|
-
llm, token_counter, input_cost, output_cost, model_name = llm_selector(strength, temperature)
|
|
677
|
-
print(f"Strength: {strength}")
|
|
678
|
-
|
|
679
|
-
# Print the details of the selected LLM model
|
|
680
|
-
print(f"Selected LLM Model: {model_name}")
|
|
681
|
-
print(f"Input Cost per Million Tokens: {input_cost}")
|
|
682
|
-
print(f"Output Cost per Million Tokens: {output_cost}")
|
|
683
|
-
|
|
684
|
-
# Example usage of the token counter function
|
|
685
|
-
sample_text: str = "This is a sample text to count tokens."
|
|
686
|
-
token_count: int = token_counter(sample_text)
|
|
687
|
-
print(f"Token Count for Sample Text: {token_count}")
|
|
688
|
-
print(f"model_name: {model_name}")
|
|
689
|
-
strength += 0.05
|
|
690
|
-
except FileNotFoundError as e:
|
|
691
|
-
print(f"Error: {e}")
|
|
692
|
-
except ValueError as e:
|
|
693
|
-
print(f"Error: {e}")
|
|
196
|
+
% Here is an example how to preprocess the prompt from a file:
|
|
197
|
+
<processing.prompt_processor><include>processing/prompt_processor.py</include></processing.prompt_processor>
|
|
694
198
|
|
|
695
|
-
|
|
696
|
-
|
|
199
|
+
% Example of selecting a Langchain LLM and counting tokens using llm_selector:
|
|
200
|
+
<llm.model_selector><include>llm/model_selector.py</include></llm.model_selector>
|
|
697
201
|
|
|
698
|
-
% Example usage of the unfinished_prompt function:
|
|
699
|
-
|
|
202
|
+
% Example usage of the unfinished_prompt function:
|
|
203
|
+
<processing.completion_detector><include>processing/completion_detector.py</include></processing.completion_detector>
|
|
700
204
|
|
|
701
|
-
|
|
702
|
-
|
|
205
|
+
% Here is an example how to continue the generation of a model output:
|
|
206
|
+
<processing.completion_detector><include>processing/completion_detector.py</include></processing.completion_detector>
|
|
703
207
|
|
|
704
|
-
|
|
705
|
-
|
|
706
|
-
# - It's installed in your Python environment (e.g., via pip if it's a package), OR
|
|
707
|
-
# - The directory containing the `pdd` package is added to your PYTHONPATH.
|
|
708
|
-
# For instance, if your project structure is:
|
|
709
|
-
# my_project/
|
|
710
|
-
# ├── pdd/ # The module's package
|
|
711
|
-
# │ ├── __init__.py
|
|
712
|
-
# │ ├── unfinished_prompt.py
|
|
713
|
-
# │ ├── load_prompt_template.py
|
|
714
|
-
# │ └── llm_invoke.py
|
|
715
|
-
# └── examples/
|
|
716
|
-
# └── run_this_example.py (this file)
|
|
717
|
-
# You would typically run this script from the `my_project` directory
|
|
718
|
-
# (e.g., `python examples/run_this_example.py`) after ensuring `my_project`
|
|
719
|
-
# is in PYTHONPATH (e.g., `export PYTHONPATH=$PYTHONPATH:/path/to/my_project`).
|
|
720
|
-
#
|
|
721
|
-
# 2. The `pdd` package requires internal setup for its dependencies:
|
|
722
|
-
# - A prompt template file named "unfinished_prompt_LLM" (e.g., "unfinished_prompt_LLM.txt")
|
|
723
|
-
# must be present where `pdd.load_prompt_template` (used internally by `unfinished_prompt`)
|
|
724
|
-
# can find it. This location is usually relative to the `pdd` package structure.
|
|
725
|
-
# - The `pdd.llm_invoke` function (used internally) must be configured for access to an LLM.
|
|
726
|
-
# This typically involves setting environment variables for API keys (e.g., `OPENAI_API_KEY`).
|
|
727
|
-
#
|
|
728
|
-
# This script should be saved outside the `pdd` package, for instance, in an
|
|
729
|
-
# `examples/` directory as shown above.
|
|
730
|
-
# To run: `python name_of_this_script.py` (adjust path as needed).
|
|
731
|
-
|
|
732
|
-
# --- Example Usage ---
|
|
733
|
-
|
|
734
|
-
# 1. Define the prompt text you want to analyze.
|
|
735
|
-
# This example uses a prompt that is intentionally incomplete to demonstrate
|
|
736
|
-
# the function's ability to detect incompleteness.
|
|
737
|
-
my_prompt_text = "Write a comprehensive guide on how to bake a sourdough bread, starting from creating a starter, then the kneading process, and finally"
|
|
738
|
-
|
|
739
|
-
rprint(f"[bold cyan]Analyzing prompt:[/bold cyan] \"{my_prompt_text}\"")
|
|
740
|
-
|
|
741
|
-
# 2. Call the `unfinished_prompt` function.
|
|
742
|
-
# Review the function's docstring for detailed parameter information.
|
|
743
|
-
# - `prompt_text` (str): The text of the prompt to analyze.
|
|
744
|
-
# - `strength` (float, optional, 0.0-1.0, default=0.5): Influences the LLM's behavior or model choice.
|
|
745
|
-
# - `temperature` (float, optional, 0.0-1.0, default=0.0): Controls the randomness of the LLM's output.
|
|
746
|
-
# - `verbose` (bool, optional, default=False): If True, the function will print detailed internal logs.
|
|
747
|
-
#
|
|
748
|
-
# The function returns a tuple: (reasoning, is_finished, total_cost, model_name)
|
|
749
|
-
# - `reasoning` (str): The LLM's structured explanation for its completeness assessment.
|
|
750
|
-
# - `is_finished` (bool): True if the prompt is considered complete, False otherwise.
|
|
751
|
-
# - `total_cost` (float): The estimated cost of the LLM call. The unit (e.g., USD) depends on the LLM provider.
|
|
752
|
-
# - `model_name` (str): The name of the LLM model that was used for the analysis.
|
|
753
|
-
|
|
754
|
-
# Example call with verbose output and custom strength/temperature settings.
|
|
755
|
-
reasoning_str, is_complete_flag, call_cost, llm_model = unfinished_prompt(
|
|
756
|
-
prompt_text=my_prompt_text,
|
|
757
|
-
strength=0.6, # Example: using a specific strength value
|
|
758
|
-
temperature=0.1, # Example: using a low temperature for more deterministic reasoning
|
|
759
|
-
verbose=True # Set to True to see detailed logs from within the unfinished_prompt function
|
|
760
|
-
)
|
|
761
|
-
|
|
762
|
-
# 3. Print the results returned by the function.
|
|
763
|
-
rprint("\n[bold green]--- Analysis Results ---[/bold green]")
|
|
764
|
-
rprint(f" [bold]Prompt Analyzed:[/bold] \"{my_prompt_text}\"")
|
|
765
|
-
rprint(f" [bold]Is prompt complete?:[/bold] {'Yes, the LLM considers the prompt complete.' if is_complete_flag else 'No, the LLM suggests the prompt needs continuation.'}")
|
|
766
|
-
rprint(f" [bold]LLM's Reasoning:[/bold]\n {reasoning_str}") # Rich print will handle newlines in the reasoning string
|
|
767
|
-
rprint(f" [bold]Cost of Analysis:[/bold] ${call_cost:.6f}") # Display cost, assuming USD. Adjust currency/format as needed.
|
|
768
|
-
rprint(f" [bold]LLM Model Used:[/bold] {llm_model}")
|
|
769
|
-
|
|
770
|
-
# --- Example of calling with default parameters ---
|
|
771
|
-
# If you want to use the default strength (0.5), temperature (0.0), and verbose (False):
|
|
772
|
-
#
|
|
773
|
-
# default_prompt_text = "What is the capital of Canada?"
|
|
774
|
-
# rprint(f"\n[bold cyan]Analyzing prompt with default settings:[/bold cyan] \"{default_prompt_text}\"")
|
|
775
|
-
#
|
|
776
|
-
# reasoning_def, is_finished_def, cost_def, model_def = unfinished_prompt(
|
|
777
|
-
# prompt_text=default_prompt_text
|
|
778
|
-
# )
|
|
779
|
-
#
|
|
780
|
-
# rprint("\n[bold green]--- Default Call Analysis Results ---[/bold green]")
|
|
781
|
-
# rprint(f" [bold]Prompt Analyzed:[/bold] \"{default_prompt_text}\"")
|
|
782
|
-
# rprint(f" [bold]Is prompt complete?:[/bold] {'Yes' if is_finished_def else 'No'}")
|
|
783
|
-
# rprint(f" [bold]LLM's Reasoning:[/bold]\n {reasoning_def}")
|
|
784
|
-
# rprint(f" [bold]Cost of Analysis:[/bold] ${cost_def:.6f}")
|
|
785
|
-
# rprint(f" [bold]LLM Model Used:[/bold] {model_def}")
|
|
786
|
-
</unfinished_prompt_example>
|
|
787
|
-
|
|
788
|
-
% Here is an example how to continue the generation of a model output: <continue_generation_example>from pdd.continue_generation import continue_generation
|
|
789
|
-
|
|
790
|
-
def main() -> None:
|
|
791
|
-
"""
|
|
792
|
-
Main function to demonstrate the usage of the continue_generation function.
|
|
793
|
-
It continues the generation of text using a language model and calculates the cost.
|
|
794
|
-
"""
|
|
795
|
-
# Define the input parameters for the continue_generation function
|
|
796
|
-
# formatted_input_prompt: str = "Once upon a time in a land far away, there was a"
|
|
797
|
-
# load context/cli_python_preprocessed.prompt into formatted_input_prompt
|
|
798
|
-
with open("context/cli_python_preprocessed.prompt", "r") as file:
|
|
799
|
-
formatted_input_prompt = file.read()
|
|
800
|
-
|
|
801
|
-
# llm_output: str = "" # Initial LLM output is empty
|
|
802
|
-
# load context/unfinished_prompt.txt into llm_output
|
|
803
|
-
with open("context/llm_output_fragment.txt", "r") as file:
|
|
804
|
-
llm_output = file.read()
|
|
805
|
-
strength: float = .915 # Strength parameter for the LLM model
|
|
806
|
-
temperature: float = 0 # Temperature parameter for the LLM model
|
|
807
|
-
|
|
808
|
-
try:
|
|
809
|
-
# Call the continue_generation function
|
|
810
|
-
final_llm_output, total_cost, model_name = continue_generation(
|
|
811
|
-
formatted_input_prompt=formatted_input_prompt,
|
|
812
|
-
llm_output=llm_output,
|
|
813
|
-
strength=strength,
|
|
814
|
-
temperature=temperature,
|
|
815
|
-
verbose=True
|
|
816
|
-
)
|
|
817
|
-
|
|
818
|
-
# Output the results
|
|
819
|
-
# print(f"Final LLM Output: {final_llm_output}")
|
|
820
|
-
print(f"Total Cost: ${total_cost:.6f}")
|
|
821
|
-
print(f"Model Name: {model_name}")
|
|
822
|
-
# write final_llm_output to context/final_llm_output.txt
|
|
823
|
-
with open("context/final_llm_output.py", "w") as file:
|
|
824
|
-
file.write(final_llm_output)
|
|
825
|
-
|
|
826
|
-
except FileNotFoundError as e:
|
|
827
|
-
print(f"Error: {e}")
|
|
828
|
-
except Exception as e:
|
|
829
|
-
print(f"An error occurred: {e}")
|
|
830
|
-
|
|
831
|
-
if __name__ == "__main__":
|
|
832
|
-
main()</continue_generation_example>
|
|
833
|
-
|
|
834
|
-
% Here is an example how to postprocess the model output result: <postprocess_example>"""
|
|
835
|
-
Example demonstrating the usage of the `postprocess` function
|
|
836
|
-
from the `pdd.postprocess` module.
|
|
837
|
-
|
|
838
|
-
This example showcases two scenarios for extracting code from an LLM's text output:
|
|
839
|
-
1. Simple code extraction (strength = 0): Uses basic string manipulation to find code
|
|
840
|
-
blocks enclosed in triple backticks. This method is fast and has no cost.
|
|
841
|
-
2. Advanced code extraction (strength > 0): Leverages an LLM for more robust extraction.
|
|
842
|
-
This method is more powerful but incurs a cost and takes more time.
|
|
843
|
-
|
|
844
|
-
To run this example:
|
|
845
|
-
1. Ensure the `pdd` package (containing the `postprocess` module) is in your PYTHONPATH
|
|
846
|
-
or installed in your environment.
|
|
847
|
-
2. Ensure the `rich` library is installed (`pip install rich`).
|
|
848
|
-
3. This script uses `unittest.mock` (part of Python's standard library) to simulate
|
|
849
|
-
the behavior of internal dependencies (`load_prompt_template` and `llm_invoke`)
|
|
850
|
-
for the LLM-based extraction scenario. This allows the example to run without
|
|
851
|
-
requiring actual LLM API calls or specific prompt files.
|
|
852
|
-
"""
|
|
853
|
-
from rich import print
|
|
854
|
-
from unittest.mock import patch, MagicMock
|
|
855
|
-
|
|
856
|
-
# Assuming 'pdd' package is in PYTHONPATH or installed.
|
|
857
|
-
# The 'postprocess' module is expected to be at pdd/postprocess.py
|
|
858
|
-
from pdd.postprocess import postprocess, ExtractedCode # ExtractedCode is needed for the mock
|
|
859
|
-
|
|
860
|
-
def main():
|
|
861
|
-
"""
|
|
862
|
-
Runs the demonstration for the postprocess function.
|
|
863
|
-
"""
|
|
864
|
-
print("[bold underline blue]Demonstrating `postprocess` function from `pdd.postprocess`[/bold underline blue]\n")
|
|
865
|
-
|
|
866
|
-
# --- Common Inputs ---
|
|
867
|
-
# This is a sample string that might be output by an LLM, containing text and code.
|
|
868
|
-
llm_output_text_with_code = """
|
|
869
|
-
This is some text from an LLM.
|
|
870
|
-
It includes a Python code block:
|
|
871
|
-
```python
|
|
872
|
-
def greet(name):
|
|
873
|
-
# A simple greeting function
|
|
874
|
-
print(f"Hello, {name}!")
|
|
875
|
-
|
|
876
|
-
greet("Developer")
|
|
877
|
-
```
|
|
878
|
-
And some more text after the code block.
|
|
879
|
-
There might be other language blocks too:
|
|
880
|
-
```javascript
|
|
881
|
-
console.log("This is JavaScript");
|
|
882
|
-
```
|
|
883
|
-
But we are only interested in Python.
|
|
884
|
-
"""
|
|
885
|
-
# The target programming language for extraction.
|
|
886
|
-
target_language = "python"
|
|
887
|
-
|
|
888
|
-
# --- Scenario 1: Simple Extraction (strength = 0) ---
|
|
889
|
-
# This mode uses the `postprocess_0` internal function, which performs a basic
|
|
890
|
-
# extraction of content between triple backticks. It does not use an LLM.
|
|
891
|
-
print("[bold cyan]Scenario 1: Simple Extraction (strength = 0)[/bold cyan]")
|
|
892
|
-
print("Demonstrates extracting code using basic string processing.")
|
|
893
|
-
print(f" Input LLM Output: (see below)")
|
|
894
|
-
# print(f"[dim]{llm_output_text_with_code}[/dim]") # Printing for brevity in console
|
|
895
|
-
print(f" Target Language: '{target_language}' (Note: simple extraction is language-agnostic but extracts first block)")
|
|
896
|
-
print(f" Strength: 0 (activates simple, non-LLM extraction)")
|
|
897
|
-
print(f" Verbose: True (enables detailed console output from `postprocess`)\n")
|
|
898
|
-
|
|
899
|
-
# Call postprocess with strength = 0
|
|
900
|
-
# Input parameters:
|
|
901
|
-
# llm_output (str): The LLM's raw output string.
|
|
902
|
-
# language (str): The programming language to extract (less critical for strength=0).
|
|
903
|
-
# strength (float): 0-1, model strength. 0 means simple extraction.
|
|
904
|
-
# temperature (float): 0-1, LLM temperature (not used for strength=0).
|
|
905
|
-
# time (float): 0-1, LLM thinking effort (not used for strength=0).
|
|
906
|
-
# verbose (bool): If True, prints internal processing steps.
|
|
907
|
-
extracted_code_s0, cost_s0, model_s0 = postprocess(
|
|
908
|
-
llm_output=llm_output_text_with_code,
|
|
909
|
-
language=target_language,
|
|
910
|
-
strength=0,
|
|
911
|
-
verbose=True
|
|
912
|
-
)
|
|
913
|
-
|
|
914
|
-
print("[bold green]Output for Scenario 1:[/bold green]")
|
|
915
|
-
# Output tuple:
|
|
916
|
-
# extracted_code (str): The extracted code.
|
|
917
|
-
# total_cost (float): Cost of the operation (in dollars). Expected to be 0.0 for simple extraction.
|
|
918
|
-
# model_name (str): Identifier for the method/model used. Expected to be 'simple_extraction'.
|
|
919
|
-
print(f" Extracted Code:\n[yellow]{extracted_code_s0}[/yellow]")
|
|
920
|
-
print(f" Total Cost: ${cost_s0:.6f}")
|
|
921
|
-
print(f" Model Name: '{model_s0}'")
|
|
922
|
-
print("-" * 60)
|
|
923
|
-
|
|
924
|
-
# --- Scenario 2: LLM-based Extraction (strength > 0) ---
|
|
925
|
-
# This mode uses an LLM via `llm_invoke` to perform a more sophisticated extraction.
|
|
926
|
-
# It requires a prompt template (`extract_code_LLM.prompt`).
|
|
927
|
-
# For this example, `load_prompt_template` and `llm_invoke` are mocked.
|
|
928
|
-
print("\n[bold cyan]Scenario 2: LLM-based Extraction (strength = 0.9)[/bold cyan]")
|
|
929
|
-
print("Demonstrates extracting code using an LLM (mocked).")
|
|
930
|
-
print(f" Input LLM Output: (same as above)")
|
|
931
|
-
print(f" Target Language: '{target_language}'")
|
|
932
|
-
print(f" Strength: 0.9 (activates LLM-based extraction)")
|
|
933
|
-
print(f" Temperature: 0.0 (LLM creativity, 0-1 scale)")
|
|
934
|
-
print(f" Time: 0.5 (LLM thinking effort, 0-1 scale, influences model choice/cost)")
|
|
935
|
-
print(f" Verbose: True\n")
|
|
936
|
-
|
|
937
|
-
# Mock for `load_prompt_template`:
|
|
938
|
-
# This function is expected to load a prompt template file (e.g., 'extract_code_LLM.prompt').
|
|
939
|
-
# In a real scenario, this file would exist in a 'prompts' directory.
|
|
940
|
-
mock_load_template = MagicMock(return_value="Mocked Prompt: Extract {{language}} code from: {{llm_output}}")
|
|
941
|
-
|
|
942
|
-
# Mock for `llm_invoke`:
|
|
943
|
-
# This function handles the actual LLM API call.
|
|
944
|
-
# It's expected to return a dictionary containing the LLM's result (parsed into
|
|
945
|
-
# an `ExtractedCode` Pydantic model), the cost, and the model name.
|
|
946
|
-
# The `extracted_code` from the LLM mock should include backticks and language identifier
|
|
947
|
-
# to test the cleaning step within the `postprocess` function.
|
|
948
|
-
mock_llm_response_code_from_llm = """```python
|
|
949
|
-
def sophisticated_extraction(data):
|
|
950
|
-
# This code is supposedly extracted by an LLM
|
|
951
|
-
processed_data = data.upper() # Example processing
|
|
952
|
-
return processed_data
|
|
953
|
-
|
|
954
|
-
result = sophisticated_extraction("test data from llm")
|
|
955
|
-
print(result)
|
|
956
|
-
```"""
|
|
957
|
-
mock_extracted_code_pydantic_obj = ExtractedCode(extracted_code=mock_llm_response_code_from_llm)
|
|
958
|
-
mock_llm_invoke_return_value = {
|
|
959
|
-
'result': mock_extracted_code_pydantic_obj,
|
|
960
|
-
'cost': 0.00025, # Example cost in dollars
|
|
961
|
-
'model_name': 'mock-llm-extractor-v1'
|
|
962
|
-
}
|
|
963
|
-
mock_llm_invoke_function = MagicMock(return_value=mock_llm_invoke_return_value)
|
|
964
|
-
|
|
965
|
-
# Patch the internal dependencies within the 'pdd.postprocess' module's namespace.
|
|
966
|
-
# This ensures that when `postprocess` calls `load_prompt_template` or `llm_invoke`,
|
|
967
|
-
# our mocks are used instead of the real implementations.
|
|
968
|
-
with patch('pdd.postprocess.load_prompt_template', mock_load_template):
|
|
969
|
-
with patch('pdd.postprocess.llm_invoke', mock_llm_invoke_function):
|
|
970
|
-
extracted_code_llm, cost_llm, model_llm = postprocess(
|
|
971
|
-
llm_output=llm_output_text_with_code,
|
|
972
|
-
language=target_language,
|
|
973
|
-
strength=0.9,
|
|
974
|
-
temperature=0.0,
|
|
975
|
-
time=0.5,
|
|
976
|
-
verbose=True
|
|
977
|
-
)
|
|
978
|
-
|
|
979
|
-
print("[bold green]Output for Scenario 2:[/bold green]")
|
|
980
|
-
print(f" Extracted Code:\n[yellow]{extracted_code_llm}[/yellow]")
|
|
981
|
-
print(f" Total Cost: ${cost_llm:.6f} (cost is in dollars)")
|
|
982
|
-
print(f" Model Name: '{model_llm}'")
|
|
983
|
-
|
|
984
|
-
# --- Verification of Mock Calls (for developer understanding) ---
|
|
985
|
-
# Check that `load_prompt_template` was called correctly.
|
|
986
|
-
mock_load_template.assert_called_once_with("extract_code_LLM")
|
|
987
|
-
|
|
988
|
-
# Check that `llm_invoke` was called correctly.
|
|
989
|
-
mock_llm_invoke_function.assert_called_once()
|
|
990
|
-
# Inspect the arguments passed to the mocked llm_invoke
|
|
991
|
-
call_args_to_llm_invoke = mock_llm_invoke_function.call_args[1] # kwargs
|
|
992
|
-
assert call_args_to_llm_invoke['prompt'] == mock_load_template.return_value
|
|
993
|
-
assert call_args_to_llm_invoke['input_json'] == {
|
|
994
|
-
"llm_output": llm_output_text_with_code,
|
|
995
|
-
"language": target_language
|
|
996
|
-
}
|
|
997
|
-
assert call_args_to_llm_invoke['strength'] == 0.9
|
|
998
|
-
assert call_args_to_llm_invoke['temperature'] == 0.0
|
|
999
|
-
assert call_args_to_llm_invoke['time'] == 0.5
|
|
1000
|
-
assert call_args_to_llm_invoke['verbose'] is True
|
|
1001
|
-
assert call_args_to_llm_invoke['output_pydantic'] == ExtractedCode
|
|
1002
|
-
print("[dim] (Mocked LLM calls verified successfully)[/dim]")
|
|
1003
|
-
|
|
1004
|
-
print("\n[bold underline blue]Demonstration finished.[/bold underline blue]")
|
|
1005
|
-
print("\n[italic]Important Notes:[/italic]")
|
|
1006
|
-
print(" - For Scenario 2 (LLM-based extraction), `load_prompt_template` and `llm_invoke` were mocked.")
|
|
1007
|
-
print(" In a real-world scenario:")
|
|
1008
|
-
print(" - `load_prompt_template('extract_code_LLM')` would attempt to load a file named ")
|
|
1009
|
-
print(" `extract_code_LLM.prompt` (typically from a 'prompts' directory configured within the `pdd` package).")
|
|
1010
|
-
print(" - `llm_invoke` would make an actual API call to a Large Language Model, which requires")
|
|
1011
|
-
print(" API keys and network access.")
|
|
1012
|
-
print(" - The `time` parameter (0-1) for `postprocess` (and `llm_invoke`) generally controls the")
|
|
1013
|
-
print(" 'thinking effort' or computational resources allocated to the LLM, potentially affecting")
|
|
1014
|
-
print(" which underlying LLM model is chosen and the quality/cost of the result.")
|
|
1015
|
-
print(" - No actual files (like prompt files or output files) are created or read by this example script,")
|
|
1016
|
-
print(" particularly in the './output' directory, due to the use of mocks for file-dependent operations.")
|
|
1017
|
-
|
|
1018
|
-
if __name__ == "__main__":
|
|
1019
|
-
main()
|
|
1020
|
-
</postprocess_example>
|
|
208
|
+
% Here is an example how to postprocess the model output result:
|
|
209
|
+
<processing.output_postprocessor><include>processing/output_postprocessor.py</include></processing.output_postprocessor>
|
|
1021
210
|
</internal_example_modules>
|
|
1022
211
|
</example_string_of_includes>
|
|
1023
212
|
</example_2>
|
|
213
|
+
|
|
214
|
+
<example_3>
|
|
215
|
+
<example_input_prompt>
|
|
216
|
+
% You are an expert Python engineer. Build an admin-only HTTP endpoint.
|
|
217
|
+
% The function must be decorated with @require_admin from utils.auth_helpers.
|
|
218
|
+
% Use utils.db_helpers to query Firestore and utils.error_handling for structured errors.
|
|
219
|
+
</example_input_prompt>
|
|
220
|
+
<example_available_includes>
|
|
221
|
+
File: context/auth_helpers_example.py
|
|
222
|
+
Summary: Example usage of authentication/authorization helpers such as require_admin
|
|
223
|
+
File: context/db_helpers_example.py
|
|
224
|
+
Summary: Example usage of Firestore database helper utilities for queries and pagination
|
|
225
|
+
File: context/error_handling_example.py
|
|
226
|
+
Summary: Example usage of structured error handling and custom exceptions
|
|
227
|
+
</example_available_includes>
|
|
228
|
+
<example_string_of_includes>
|
|
229
|
+
% Here are examples of how to use internal modules:
|
|
230
|
+
<internal_example_modules>
|
|
231
|
+
<utils.auth_helpers><include>context/auth_helpers_example.py</include></utils.auth_helpers>
|
|
232
|
+
<utils.db_helpers><include>context/db_helpers_example.py</include></utils.db_helpers>
|
|
233
|
+
<utils.error_handling><include>context/error_handling_example.py</include></utils.error_handling>
|
|
234
|
+
</internal_example_modules>
|
|
235
|
+
</example_string_of_includes>
|
|
236
|
+
</example_3>
|
|
1024
237
|
</examples>
|
|
1025
238
|
|
|
1026
239
|
<instructions>
|
|
@@ -1028,5 +241,9 @@ if __name__ == "__main__":
|
|
|
1028
241
|
Step 1. Select possible includes from the available_includes based on the input_prompt.
|
|
1029
242
|
Step 2. Explain why an include might or might not be necessary for the prompt.
|
|
1030
243
|
Step 3. Determine the minimum set of includes required to achieve the goal of the input_prompt.
|
|
1031
|
-
Step 4. Generate the string_of_includes based on Step 3
|
|
244
|
+
Step 4. Generate the string_of_includes based on Step 3, following <tag_naming_rules_for_step_4>.
|
|
245
|
+
|
|
246
|
+
IMPORTANT for Step 4: Transform each file path from available_includes (format: "File: PATH")
|
|
247
|
+
into proper include syntax: <CANONICAL_MODULE><include>PATH</include></CANONICAL_MODULE>
|
|
248
|
+
Do NOT echo the "File:" format - always use <include>...</include> tags.
|
|
1032
249
|
</instructions>
|