pdd-cli 0.0.45__py3-none-any.whl → 0.0.90__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (114) hide show
  1. pdd/__init__.py +4 -4
  2. pdd/agentic_common.py +863 -0
  3. pdd/agentic_crash.py +534 -0
  4. pdd/agentic_fix.py +1179 -0
  5. pdd/agentic_langtest.py +162 -0
  6. pdd/agentic_update.py +370 -0
  7. pdd/agentic_verify.py +183 -0
  8. pdd/auto_deps_main.py +15 -5
  9. pdd/auto_include.py +63 -5
  10. pdd/bug_main.py +3 -2
  11. pdd/bug_to_unit_test.py +2 -0
  12. pdd/change_main.py +11 -4
  13. pdd/cli.py +22 -1181
  14. pdd/cmd_test_main.py +73 -21
  15. pdd/code_generator.py +58 -18
  16. pdd/code_generator_main.py +672 -25
  17. pdd/commands/__init__.py +42 -0
  18. pdd/commands/analysis.py +248 -0
  19. pdd/commands/fix.py +140 -0
  20. pdd/commands/generate.py +257 -0
  21. pdd/commands/maintenance.py +174 -0
  22. pdd/commands/misc.py +79 -0
  23. pdd/commands/modify.py +230 -0
  24. pdd/commands/report.py +144 -0
  25. pdd/commands/templates.py +215 -0
  26. pdd/commands/utility.py +110 -0
  27. pdd/config_resolution.py +58 -0
  28. pdd/conflicts_main.py +8 -3
  29. pdd/construct_paths.py +258 -82
  30. pdd/context_generator.py +10 -2
  31. pdd/context_generator_main.py +113 -11
  32. pdd/continue_generation.py +47 -7
  33. pdd/core/__init__.py +0 -0
  34. pdd/core/cli.py +503 -0
  35. pdd/core/dump.py +554 -0
  36. pdd/core/errors.py +63 -0
  37. pdd/core/utils.py +90 -0
  38. pdd/crash_main.py +44 -11
  39. pdd/data/language_format.csv +71 -63
  40. pdd/data/llm_model.csv +20 -18
  41. pdd/detect_change_main.py +5 -4
  42. pdd/fix_code_loop.py +330 -76
  43. pdd/fix_error_loop.py +207 -61
  44. pdd/fix_errors_from_unit_tests.py +4 -3
  45. pdd/fix_main.py +75 -18
  46. pdd/fix_verification_errors.py +12 -100
  47. pdd/fix_verification_errors_loop.py +306 -272
  48. pdd/fix_verification_main.py +28 -9
  49. pdd/generate_output_paths.py +93 -10
  50. pdd/generate_test.py +16 -5
  51. pdd/get_jwt_token.py +9 -2
  52. pdd/get_run_command.py +73 -0
  53. pdd/get_test_command.py +68 -0
  54. pdd/git_update.py +70 -19
  55. pdd/incremental_code_generator.py +2 -2
  56. pdd/insert_includes.py +11 -3
  57. pdd/llm_invoke.py +1269 -103
  58. pdd/load_prompt_template.py +36 -10
  59. pdd/pdd_completion.fish +25 -2
  60. pdd/pdd_completion.sh +30 -4
  61. pdd/pdd_completion.zsh +79 -4
  62. pdd/postprocess.py +10 -3
  63. pdd/preprocess.py +228 -15
  64. pdd/preprocess_main.py +8 -5
  65. pdd/prompts/agentic_crash_explore_LLM.prompt +49 -0
  66. pdd/prompts/agentic_fix_explore_LLM.prompt +45 -0
  67. pdd/prompts/agentic_fix_harvest_only_LLM.prompt +48 -0
  68. pdd/prompts/agentic_fix_primary_LLM.prompt +85 -0
  69. pdd/prompts/agentic_update_LLM.prompt +1071 -0
  70. pdd/prompts/agentic_verify_explore_LLM.prompt +45 -0
  71. pdd/prompts/auto_include_LLM.prompt +100 -905
  72. pdd/prompts/detect_change_LLM.prompt +122 -20
  73. pdd/prompts/example_generator_LLM.prompt +22 -1
  74. pdd/prompts/extract_code_LLM.prompt +5 -1
  75. pdd/prompts/extract_program_code_fix_LLM.prompt +7 -1
  76. pdd/prompts/extract_prompt_update_LLM.prompt +7 -8
  77. pdd/prompts/extract_promptline_LLM.prompt +17 -11
  78. pdd/prompts/find_verification_errors_LLM.prompt +6 -0
  79. pdd/prompts/fix_code_module_errors_LLM.prompt +4 -2
  80. pdd/prompts/fix_errors_from_unit_tests_LLM.prompt +8 -0
  81. pdd/prompts/fix_verification_errors_LLM.prompt +22 -0
  82. pdd/prompts/generate_test_LLM.prompt +21 -6
  83. pdd/prompts/increase_tests_LLM.prompt +1 -5
  84. pdd/prompts/insert_includes_LLM.prompt +228 -108
  85. pdd/prompts/trace_LLM.prompt +25 -22
  86. pdd/prompts/unfinished_prompt_LLM.prompt +85 -1
  87. pdd/prompts/update_prompt_LLM.prompt +22 -1
  88. pdd/pytest_output.py +127 -12
  89. pdd/render_mermaid.py +236 -0
  90. pdd/setup_tool.py +648 -0
  91. pdd/simple_math.py +2 -0
  92. pdd/split_main.py +3 -2
  93. pdd/summarize_directory.py +49 -6
  94. pdd/sync_determine_operation.py +543 -98
  95. pdd/sync_main.py +81 -31
  96. pdd/sync_orchestration.py +1334 -751
  97. pdd/sync_tui.py +848 -0
  98. pdd/template_registry.py +264 -0
  99. pdd/templates/architecture/architecture_json.prompt +242 -0
  100. pdd/templates/generic/generate_prompt.prompt +174 -0
  101. pdd/trace.py +168 -12
  102. pdd/trace_main.py +4 -3
  103. pdd/track_cost.py +151 -61
  104. pdd/unfinished_prompt.py +49 -3
  105. pdd/update_main.py +549 -67
  106. pdd/update_model_costs.py +2 -2
  107. pdd/update_prompt.py +19 -4
  108. {pdd_cli-0.0.45.dist-info → pdd_cli-0.0.90.dist-info}/METADATA +19 -6
  109. pdd_cli-0.0.90.dist-info/RECORD +153 -0
  110. {pdd_cli-0.0.45.dist-info → pdd_cli-0.0.90.dist-info}/licenses/LICENSE +1 -1
  111. pdd_cli-0.0.45.dist-info/RECORD +0 -116
  112. {pdd_cli-0.0.45.dist-info → pdd_cli-0.0.90.dist-info}/WHEEL +0 -0
  113. {pdd_cli-0.0.45.dist-info → pdd_cli-0.0.90.dist-info}/entry_points.txt +0 -0
  114. {pdd_cli-0.0.45.dist-info → pdd_cli-0.0.90.dist-info}/top_level.txt +0 -0
@@ -6,8 +6,19 @@
6
6
  INPUT:
7
7
  <prompt_to_update>% You are an expert Python Software Engineer. Your goal is to write a python function, "postprocess", that will extract code from a string output of an LLM. All output to the console will be pretty printed using the Python rich library.
8
8
 
9
- % The function should be part of a Python package, using relative imports (single dot) for internal modules (e.g. 'from .module_name import module_name'). All output to the console will be pretty printed using the Python Rich library. Ensure the function handles edge cases, such as missing inputs or model errors, and provide clear error messages.
10
- % The ./pdd/__init__.py file will have the EXTRACTION_STRENGTH, DEFAULT_STRENGTH, DEFAULT_TIME and other global constants. Example: ```from . import DEFAULT_STRENGTH```
9
+ % You are an expert Python engineer.
10
+
11
+ % Code Style Requirements
12
+ - File must start with `from __future__ import annotations`.
13
+ - All functions must be fully type-hinted.
14
+ - Use `rich.console.Console` for all printing.
15
+
16
+ % Package Structure
17
+ - The function should be part of a Python package, using relative imports (single dot) for internal modules (e.g. 'from .module_name import module_name').
18
+ - The ./pdd/__init__.py file will have the EXTRACTION_STRENGTH, DEFAULT_STRENGTH, DEFAULT_TIME and other global constants. Example: ```from . import DEFAULT_STRENGTH```
19
+
20
+ % Error Handling
21
+ - Ensure the function handles edge cases, such as missing inputs or model errors, and provide clear error messages.
11
22
 
12
23
  % Here are the inputs and outputs of the function:
13
24
  Inputs:
@@ -53,53 +64,105 @@ if __name__ == "__main__":
53
64
  For running prompts with llm_invoke:
54
65
  <llm_invoke_example>
55
66
  from pydantic import BaseModel, Field
56
- from pdd.llm_invoke import llm_invoke
57
- from collections import defaultdict
67
+ from pdd.llm_invoke import llm_invoke, _load_model_data, _select_model_candidates, LLM_MODEL_CSV_PATH, DEFAULT_BASE_MODEL
68
+ from typing import List, Dict, Any
58
69
 
59
70
  # Define a Pydantic model for structured output
60
71
  class Joke(BaseModel):
61
72
  setup: str = Field(description="The setup of the joke")
62
73
  punchline: str = Field(description="The punchline of the joke")
63
74
 
64
- def main():
75
+
76
+ def calculate_model_ranges(step: float = 0.001) -> List[Dict[str, Any]]:
65
77
  """
66
- Main function to demonstrate the usage of `llm_invoke`.
78
+ Calculate the strength ranges for each model by sampling strength values.
79
+
80
+ Args:
81
+ step: The step size for sampling strength values (default 0.001)
82
+
83
+ Returns:
84
+ List of dicts with 'model', 'start', 'end', and 'midpoint' keys
67
85
  """
68
- # Dictionary to track strength ranges for each model
69
- model_ranges = defaultdict(list)
86
+ model_df = _load_model_data(LLM_MODEL_CSV_PATH)
87
+
88
+ ranges = []
70
89
  current_model = None
71
90
  range_start = 0.0
72
-
91
+
92
+ # Sample strength values to find model boundaries
93
+ strength = 0.0
94
+ while strength <= 1.0:
95
+ candidates = _select_model_candidates(strength, DEFAULT_BASE_MODEL, model_df)
96
+ selected_model = candidates[0]['model'] if candidates else None
97
+
98
+ if current_model != selected_model:
99
+ if current_model is not None:
100
+ ranges.append({
101
+ 'model': current_model,
102
+ 'start': range_start,
103
+ 'end': round(strength - step, 3),
104
+ 'midpoint': round((range_start + strength - step) / 2, 3)
105
+ })
106
+ current_model = selected_model
107
+ range_start = strength
108
+
109
+ strength = round(strength + step, 3)
110
+
111
+ # Add the final range
112
+ if current_model is not None:
113
+ ranges.append({
114
+ 'model': current_model,
115
+ 'start': range_start,
116
+ 'end': 1.0,
117
+ 'midpoint': round((range_start + 1.0) / 2, 3)
118
+ })
119
+
120
+ return ranges
121
+
122
+
123
+ def main():
124
+ """
125
+ Main function to demonstrate the usage of `llm_invoke`.
126
+
127
+ Automatically calculates model ranges and runs each model once
128
+ at its midpoint strength value.
129
+ """
130
+ # Calculate model ranges automatically
131
+ print("Calculating model strength ranges...")
132
+ model_ranges = calculate_model_ranges()
133
+
134
+ # Print the calculated ranges
135
+ print("\n=== Model Strength Ranges ===")
136
+ for range_info in model_ranges:
137
+ print(f"{range_info['model']}: {range_info['start']:.3f} to {range_info['end']:.3f} (midpoint: {range_info['midpoint']:.3f})")
138
+
73
139
  prompt = "Tell me a joke about {topic}"
74
140
  input_json = {"topic": "programmers"}
75
141
  temperature = 1
76
142
  verbose = False
77
-
78
- strength = 0.5
79
- while strength <= 0.5:
80
- print(f"\nStrength: {strength}")
81
-
143
+
144
+ # Run each model once at its midpoint strength
145
+ print("\n=== Running Each Model Once ===")
146
+ for range_info in model_ranges:
147
+ model_name = range_info['model']
148
+ midpoint = range_info['midpoint']
149
+
150
+ print(f"\n--- Model: {model_name} (strength: {midpoint}) ---")
151
+
82
152
  # Example 1: Unstructured Output
83
- print("\n--- Unstructured Output ---")
153
+ print("\n Unstructured Output:")
84
154
  response = llm_invoke(
85
155
  prompt=prompt,
86
156
  input_json=input_json,
87
- strength=strength,
157
+ strength=midpoint,
88
158
  temperature=temperature,
89
159
  verbose=verbose
90
160
  )
91
-
92
- # Track model changes for strength ranges
93
- if current_model != response['model_name']:
94
- if current_model is not None:
95
- model_ranges[current_model].append((range_start, strength - 0.005))
96
- current_model = response['model_name']
97
- range_start = strength
98
-
99
- print(f"Result: {response['result']}")
100
- print(f"Cost: ${response['cost']:.6f}")
101
- print(f"Model Used: {response['model_name']}")
102
-
161
+
162
+ print(f" Result: {response['result']}")
163
+ print(f" Cost: ${response['cost']:.6f}")
164
+ print(f" Model Used: {response['model_name']}")
165
+
103
166
  # Example 2: Structured Output with Pydantic Model
104
167
  prompt_structured = (
105
168
  "Generate a joke about {topic}. \n"
@@ -112,41 +175,27 @@ def main():
112
175
  )
113
176
  input_json_structured = {"topic": "data scientists"}
114
177
  output_pydantic = Joke
115
-
116
- print("\n--- Structured Output ---")
178
+
179
+ print("\n Structured Output:")
117
180
  try:
118
181
  response_structured = llm_invoke(
119
182
  prompt=prompt_structured,
120
183
  input_json=input_json_structured,
121
- strength=strength,
184
+ strength=midpoint,
122
185
  temperature=temperature,
123
- verbose=True,
186
+ verbose=verbose,
124
187
  output_pydantic=output_pydantic
125
188
  )
126
- print(f"Result: {response_structured['result']}")
127
- print(f"Cost: ${response_structured['cost']:.6f}")
128
- print(f"Model Used: {response_structured['model_name']}")
189
+ print(f" Result: {response_structured['result']}")
190
+ print(f" Cost: ${response_structured['cost']:.6f}")
191
+ print(f" Model Used: {response_structured['model_name']}")
129
192
 
130
193
  # Access structured data
131
194
  joke: Joke = response_structured['result']
132
- print(f"\nJoke Setup: {joke.setup}")
133
- print(f"Joke Punchline: {joke.punchline}")
195
+ print(f"\n Joke Setup: {joke.setup}")
196
+ print(f" Joke Punchline: {joke.punchline}")
134
197
  except Exception as e:
135
- print(f"Error encountered during structured output: {e}")
136
-
137
- strength += 0.005
138
- # round to 3 decimal places
139
- strength = round(strength, 3)
140
-
141
- # Add the final range for the last model
142
- model_ranges[current_model].append((range_start, 1.0))
143
-
144
- # Print out the strength ranges for each model
145
- print("\n=== Model Strength Ranges ===")
146
- for model, ranges in model_ranges.items():
147
- print(f"\n{model}:")
148
- for start, end in ranges:
149
- print(f" Strength {start:.3f} to {end:.3f}")
198
+ print(f" Error encountered during structured output: {e}")
150
199
 
151
200
  if __name__ == "__main__":
152
201
  main()
@@ -157,8 +206,19 @@ if __name__ == "__main__":
157
206
  OUTPUT:
158
207
  <updated_prompt>% You are an expert Python Software Engineer. Your goal is to write a python function, "postprocess", that will extract code from a string output of an LLM. All output to the console will be pretty printed using the Python rich library.
159
208
 
160
- % The function should be part of a Python package, using relative imports (single dot) for internal modules (e.g. 'from .module_name import module_name'). All output to the console will be pretty printed using the Python Rich library. Ensure the function handles edge cases, such as missing inputs or model errors, and provide clear error messages.
161
- % The ./pdd/__init__.py file will have the EXTRACTION_STRENGTH, DEFAULT_STRENGTH, DEFAULT_TIME and other global constants. Example: ```from . import DEFAULT_STRENGTH```
209
+ % You are an expert Python engineer.
210
+
211
+ % Code Style Requirements
212
+ - File must start with `from __future__ import annotations`.
213
+ - All functions must be fully type-hinted.
214
+ - Use `rich.console.Console` for all printing.
215
+
216
+ % Package Structure
217
+ - The function should be part of a Python package, using relative imports (single dot) for internal modules (e.g. 'from .module_name import module_name').
218
+ - The ./pdd/__init__.py file will have the EXTRACTION_STRENGTH, DEFAULT_STRENGTH, DEFAULT_TIME and other global constants. Example: ```from . import DEFAULT_STRENGTH```
219
+
220
+ % Error Handling
221
+ - Ensure the function handles edge cases, such as missing inputs or model errors, and provide clear error messages.
162
222
 
163
223
  % Here are the inputs and outputs of the function:
164
224
  Inputs:
@@ -193,53 +253,105 @@ if __name__ == "__main__":
193
253
  For running prompts with llm_invoke:
194
254
  <llm_invoke_example>
195
255
  from pydantic import BaseModel, Field
196
- from pdd.llm_invoke import llm_invoke
197
- from collections import defaultdict
256
+ from pdd.llm_invoke import llm_invoke, _load_model_data, _select_model_candidates, LLM_MODEL_CSV_PATH, DEFAULT_BASE_MODEL
257
+ from typing import List, Dict, Any
198
258
 
199
259
  # Define a Pydantic model for structured output
200
260
  class Joke(BaseModel):
201
261
  setup: str = Field(description="The setup of the joke")
202
262
  punchline: str = Field(description="The punchline of the joke")
203
263
 
204
- def main():
264
+
265
+ def calculate_model_ranges(step: float = 0.001) -> List[Dict[str, Any]]:
205
266
  """
206
- Main function to demonstrate the usage of `llm_invoke`.
267
+ Calculate the strength ranges for each model by sampling strength values.
268
+
269
+ Args:
270
+ step: The step size for sampling strength values (default 0.001)
271
+
272
+ Returns:
273
+ List of dicts with 'model', 'start', 'end', and 'midpoint' keys
207
274
  """
208
- # Dictionary to track strength ranges for each model
209
- model_ranges = defaultdict(list)
275
+ model_df = _load_model_data(LLM_MODEL_CSV_PATH)
276
+
277
+ ranges = []
210
278
  current_model = None
211
279
  range_start = 0.0
212
-
280
+
281
+ # Sample strength values to find model boundaries
282
+ strength = 0.0
283
+ while strength <= 1.0:
284
+ candidates = _select_model_candidates(strength, DEFAULT_BASE_MODEL, model_df)
285
+ selected_model = candidates[0]['model'] if candidates else None
286
+
287
+ if current_model != selected_model:
288
+ if current_model is not None:
289
+ ranges.append({
290
+ 'model': current_model,
291
+ 'start': range_start,
292
+ 'end': round(strength - step, 3),
293
+ 'midpoint': round((range_start + strength - step) / 2, 3)
294
+ })
295
+ current_model = selected_model
296
+ range_start = strength
297
+
298
+ strength = round(strength + step, 3)
299
+
300
+ # Add the final range
301
+ if current_model is not None:
302
+ ranges.append({
303
+ 'model': current_model,
304
+ 'start': range_start,
305
+ 'end': 1.0,
306
+ 'midpoint': round((range_start + 1.0) / 2, 3)
307
+ })
308
+
309
+ return ranges
310
+
311
+
312
+ def main():
313
+ """
314
+ Main function to demonstrate the usage of `llm_invoke`.
315
+
316
+ Automatically calculates model ranges and runs each model once
317
+ at its midpoint strength value.
318
+ """
319
+ # Calculate model ranges automatically
320
+ print("Calculating model strength ranges...")
321
+ model_ranges = calculate_model_ranges()
322
+
323
+ # Print the calculated ranges
324
+ print("\n=== Model Strength Ranges ===")
325
+ for range_info in model_ranges:
326
+ print(f"{range_info['model']}: {range_info['start']:.3f} to {range_info['end']:.3f} (midpoint: {range_info['midpoint']:.3f})")
327
+
213
328
  prompt = "Tell me a joke about {topic}"
214
329
  input_json = {"topic": "programmers"}
215
330
  temperature = 1
216
331
  verbose = False
217
-
218
- strength = 0.5
219
- while strength <= 0.5:
220
- print(f"\nStrength: {strength}")
221
-
332
+
333
+ # Run each model once at its midpoint strength
334
+ print("\n=== Running Each Model Once ===")
335
+ for range_info in model_ranges:
336
+ model_name = range_info['model']
337
+ midpoint = range_info['midpoint']
338
+
339
+ print(f"\n--- Model: {model_name} (strength: {midpoint}) ---")
340
+
222
341
  # Example 1: Unstructured Output
223
- print("\n--- Unstructured Output ---")
342
+ print("\n Unstructured Output:")
224
343
  response = llm_invoke(
225
344
  prompt=prompt,
226
345
  input_json=input_json,
227
- strength=strength,
346
+ strength=midpoint,
228
347
  temperature=temperature,
229
348
  verbose=verbose
230
349
  )
231
-
232
- # Track model changes for strength ranges
233
- if current_model != response['model_name']:
234
- if current_model is not None:
235
- model_ranges[current_model].append((range_start, strength - 0.005))
236
- current_model = response['model_name']
237
- range_start = strength
238
-
239
- print(f"Result: {response['result']}")
240
- print(f"Cost: ${response['cost']:.6f}")
241
- print(f"Model Used: {response['model_name']}")
242
-
350
+
351
+ print(f" Result: {response['result']}")
352
+ print(f" Cost: ${response['cost']:.6f}")
353
+ print(f" Model Used: {response['model_name']}")
354
+
243
355
  # Example 2: Structured Output with Pydantic Model
244
356
  prompt_structured = (
245
357
  "Generate a joke about {topic}. \n"
@@ -252,41 +364,27 @@ def main():
252
364
  )
253
365
  input_json_structured = {"topic": "data scientists"}
254
366
  output_pydantic = Joke
255
-
256
- print("\n--- Structured Output ---")
367
+
368
+ print("\n Structured Output:")
257
369
  try:
258
370
  response_structured = llm_invoke(
259
371
  prompt=prompt_structured,
260
372
  input_json=input_json_structured,
261
- strength=strength,
373
+ strength=midpoint,
262
374
  temperature=temperature,
263
- verbose=True,
375
+ verbose=verbose,
264
376
  output_pydantic=output_pydantic
265
377
  )
266
- print(f"Result: {response_structured['result']}")
267
- print(f"Cost: ${response_structured['cost']:.6f}")
268
- print(f"Model Used: {response_structured['model_name']}")
378
+ print(f" Result: {response_structured['result']}")
379
+ print(f" Cost: ${response_structured['cost']:.6f}")
380
+ print(f" Model Used: {response_structured['model_name']}")
269
381
 
270
382
  # Access structured data
271
383
  joke: Joke = response_structured['result']
272
- print(f"\nJoke Setup: {joke.setup}")
273
- print(f"Joke Punchline: {joke.punchline}")
384
+ print(f"\n Joke Setup: {joke.setup}")
385
+ print(f" Joke Punchline: {joke.punchline}")
274
386
  except Exception as e:
275
- print(f"Error encountered during structured output: {e}")
276
-
277
- strength += 0.005
278
- # round to 3 decimal places
279
- strength = round(strength, 3)
280
-
281
- # Add the final range for the last model
282
- model_ranges[current_model].append((range_start, 1.0))
283
-
284
- # Print out the strength ranges for each model
285
- print("\n=== Model Strength Ranges ===")
286
- for model, ranges in model_ranges.items():
287
- print(f"\n{model}:")
288
- for start, end in ranges:
289
- print(f" Strength {start:.3f} to {end:.3f}")
387
+ print(f" Error encountered during structured output: {e}")
290
388
 
291
389
  if __name__ == "__main__":
292
390
  main()
@@ -310,8 +408,19 @@ if __name__ == "__main__":
310
408
  INPUT:
311
409
  <prompt_to_update>% You are an expert Python engineer. Your goal is to write a Python function, "conflicts_in_prompts", that takes two prompts as input and finds conflicts between them and suggests how to resolve those conflicts.
312
410
 
313
- % The function should be part of a Python package, using relative imports (single dot) for internal modules (e.g. 'from .module_name import module_name'). All output to the console will be pretty printed using the Python Rich library. Ensure the function handles edge cases, such as missing inputs or model errors, and provide clear error messages.
314
- % The ./pdd/__init__.py file will have the EXTRACTION_STRENGTH, DEFAULT_STRENGTH, DEFAULT_TIME and other global constants. Example: ```from . import DEFAULT_STRENGTH```
411
+ % You are an expert Python engineer.
412
+
413
+ % Code Style Requirements
414
+ - File must start with `from __future__ import annotations`.
415
+ - All functions must be fully type-hinted.
416
+ - Use `rich.console.Console` for all printing.
417
+
418
+ % Package Structure
419
+ - The function should be part of a Python package, using relative imports (single dot) for internal modules (e.g. 'from .module_name import module_name').
420
+ - The ./pdd/__init__.py file will have the EXTRACTION_STRENGTH, DEFAULT_STRENGTH, DEFAULT_TIME and other global constants. Example: ```from . import DEFAULT_STRENGTH```
421
+
422
+ % Error Handling
423
+ - Ensure the function handles edge cases, such as missing inputs or model errors, and provide clear error messages.
315
424
 
316
425
  % Here are the inputs and outputs of the function:
317
426
  Inputs:
@@ -693,8 +802,19 @@ if __name__ == "__main__":
693
802
  OUTPUT:
694
803
  <updated_prompt>% You are an expert Python engineer. Your goal is to write a Python function, "conflicts_in_prompts", that takes two prompts as input and finds conflicts between them and suggests how to resolve those conflicts.
695
804
 
696
- % The function should be part of a Python package, using relative imports (single dot) for internal modules (e.g. 'from .module_name import module_name'). All output to the console will be pretty printed using the Python Rich library. Ensure the function handles edge cases, such as missing inputs or model errors, and provide clear error messages.
697
- % The ./pdd/__init__.py file will have the EXTRACTION_STRENGTH, DEFAULT_STRENGTH, DEFAULT_TIME and other global constants. Example: ```from . import DEFAULT_STRENGTH```
805
+ % You are an expert Python engineer.
806
+
807
+ % Code Style Requirements
808
+ - File must start with `from __future__ import annotations`.
809
+ - All functions must be fully type-hinted.
810
+ - Use `rich.console.Console` for all printing.
811
+
812
+ % Package Structure
813
+ - The function should be part of a Python package, using relative imports (single dot) for internal modules (e.g. 'from .module_name import module_name').
814
+ - The ./pdd/__init__.py file will have the EXTRACTION_STRENGTH, DEFAULT_STRENGTH, DEFAULT_TIME and other global constants. Example: ```from . import DEFAULT_STRENGTH```
815
+
816
+ % Error Handling
817
+ - Ensure the function handles edge cases, such as missing inputs or model errors, and provide clear error messages.
698
818
 
699
819
  % Here are the inputs and outputs of the function:
700
820
  Inputs:
@@ -1,30 +1,33 @@
1
- % Imagine you're a an expert Python Software Engineer. Your goal is to find the part of the .prompt file. It will take in three arguments, the text of the .prompt file, the text of the code file, and the line that the debugger is on in the code file. Your task is to find the equivalent line in the .prompt file that matches with the line in the code file.
2
-
3
- % Here are the inputs and outputs of the prompt:
4
- Input:
5
- `code_file` (str) - A string that contains the text of the code file.
6
- `code_str` (str) - A substring of code_file that represents the line that the debugger is on in the code_file.
7
- `prompt_file` (str) - A string that contains the text of the .prompt file.
8
- Output:
9
- `prompt_line` (str) - An string that represents the equivalent line in the .prompt file that matches with the code_str line in the code file.
10
-
11
- % Here is the code_file to reference:
12
-
1
+ % You are a highly accurate Python Software Engineer. Your job is to locate the exact line (or smallest excerpt) in the prompt file that produced the current line in the generated code.
2
+
3
+ % Inputs
4
+ code_file (str) : full contents of the generated code file
5
+ code_str (str) : the single line from the code file currently under inspection
6
+ prompt_file (str) : full contents of the originating prompt file
7
+
8
+ % Rules
9
+ 1. Identify the minimal substring in prompt_file whose wording most directly corresponds to code_str. Copy it VERBATIM.
10
+ 2. Do not paraphrase, summarize, or reformat; the substring must appear exactly in prompt_file.
11
+ 3. If multiple lines apply, choose the most specific line or snippet (prefer the shortest exact match).
12
+ 4. Provide a short explanation of why the substring matches code_str.
13
+
14
+ % Output format (MUST follow exactly; no additional text)
15
+ <analysis>
16
+ Explain your reasoning here in plain text (no JSON). Reference the file sections you compared.
17
+ </analysis>
18
+ <verbatim_prompt_line>
19
+ <<PASTE THE EXACT SUBSTRING FROM prompt_file HERE>>
20
+ </verbatim_prompt_line>
21
+
22
+ % Reference materials
13
23
  <code_file>
14
- {CODE_FILE}
24
+ {CODE_FILE}
15
25
  </code_file>
16
26
 
17
- % Here is the code_str to reference:
18
-
19
27
  <code_str>
20
- {CODE_STR}
28
+ {CODE_STR}
21
29
  </code_str>
22
30
 
23
- % Here is the prompt_file to reference:
24
-
25
31
  <prompt_file>
26
- {PROMPT_FILE}
32
+ {PROMPT_FILE}
27
33
  </prompt_file>
28
-
29
- % To generate the prompt_line, find a substring of prompt_file that matches code_str, which is a substring of code_file.
30
-
@@ -1,18 +1,102 @@
1
1
  % You are tasked with determining whether a given prompt has finished outputting everything or if it still needs to continue. This is crucial for ensuring that all necessary information has been provided before proceeding with further actions. You will often be provided the last few hundred characters of the prompt_text to analyze and determine if it appears to be complete or if it seems to be cut off or unfinished. You are just looking at the prompt_text and not the entire prompt file. The beginning part of the prompt_text is not always provided, so you will need to make a judgment based on the text you are given.
2
2
 
3
+ % IMPORTANT:
4
+ % - The prompt_text may contain code in various languages without Markdown fences.
5
+ % - Do NOT require triple backticks for completeness; judge the code/text itself.
6
+ % - Prefer concrete syntactic signals of completeness over stylistic ones.
7
+
3
8
  % Here is the prompt text to analyze:
4
9
  <prompt_text>
5
10
  {PROMPT_TEXT}
6
11
  </prompt_text>
7
12
 
13
+ % Optional language hint (may be empty or missing). If not provided, infer the language from the text:
14
+ <language>
15
+ {LANGUAGE}
16
+ </language>
17
+
8
18
  % Carefully examine the provided prompt text and determine if it appears to be complete or if it seems to be cut off or unfinished. Consider the following factors:
9
19
  1. Sentence structure: Are all sentences grammatically complete?
10
20
  2. Content flow: Does the text end abruptly or does it have a natural conclusion?
11
21
  3. Context: Based on the content, does it seem like all necessary information has been provided?
12
22
  4. Formatting: Are there any unclosed parentheses, quotation marks, or other formatting issues that suggest incompleteness?
13
23
 
24
+ % Multi-language code completeness heuristics (apply when text looks like code):
25
+ - If the text forms a syntactically complete module/snippet for the language, treat it as finished (even without Markdown fences).
26
+ - Generic signals across languages:
27
+ * Balanced delimiters: (), [], {{}}, quotes, and block comments are closed.
28
+ * No mid-token/mid-statement tail: it does not end on `return a +`, `a =`, `def foo(`, `function f(`, trailing `.`, `->`, `::`, trailing `,`, or a line-continuation like `\\`.
29
+ * Block closure: constructs that open a block are closed (e.g., Python indentation after `:`, or matching `{{}}` in C/Java/JS/TS/Go).
30
+ - Language specifics (use LANGUAGE if given; otherwise infer from the text):
31
+ * Python: colon-introduced blocks closed; indentation consistent; triple-quoted strings balanced.
32
+ * JS/TS: braces and parentheses balanced; no dangling `export`/`import` without a following specifier; `/* ... */` comments closed.
33
+ * Java/C/C++/C#: braces and parentheses balanced; string/char literals closed; block comments closed.
34
+ * Go: braces balanced; no dangling keyword indicating an unfinished clause.
35
+ * HTML/XML: tags properly nested/closed; attributes properly quoted; no unfinished `<tag` or dangling `</`.
36
+ - If this is only the tail of a longer file, mark finished when the tail itself is syntactically complete and does not indicate a dangling continuation.
37
+
14
38
  % Provide your reasoning for why you believe the prompt is complete or incomplete.
15
39
 
16
40
  % Output a JSON object with two keys:
17
41
  1. "reasoning": A string containing your structured reasoning
18
- 2. "is_finished": A boolean value (true if the prompt is complete, false if it's incomplete)
42
+ 2. "is_finished": A boolean value (true if the prompt is complete, false if it's incomplete)
43
+
44
+ % Examples (concise):
45
+ <examples>
46
+ <example1>
47
+ <input>
48
+ <prompt_text>
49
+ def add(a, b):\n return a + b\n
50
+ </prompt_text>
51
+ </input>
52
+ <output>
53
+ {{"reasoning": "Python code parses; blocks and quotes are closed; ends on a complete return statement.", "is_finished": true}}
54
+ </output>
55
+ </example1>
56
+ <example2>
57
+ <input>
58
+ <prompt_text>
59
+ def add(a, b):\n return a +
60
+ </prompt_text>
61
+ </input>
62
+ <output>
63
+ {{"reasoning": "Ends mid-expression (`return a +`), indicates unfinished statement.", "is_finished": false}}
64
+ </output>
65
+ </example2>
66
+ <example3>
67
+ <input>
68
+ <prompt_text>
69
+ function add(a, b) {{\n return a + b;\n}}\n
70
+ </prompt_text>
71
+ <language>
72
+ JavaScript
73
+ </language>
74
+ </input>
75
+ <output>
76
+ {{"reasoning": "JS braces and parentheses balanced; ends at a statement boundary; no dangling tokens.", "is_finished": true}}
77
+ </output>
78
+ </example3>
79
+ <example4>
80
+ <input>
81
+ <prompt_text>
82
+ <div class=\"box\">Hello
83
+ </prompt_text>
84
+ <language>
85
+ HTML
86
+ </language>
87
+ </input>
88
+ <output>
89
+ {{"reasoning": "HTML tag not closed (missing </div>); attribute quotes OK but element is unclosed.", "is_finished": false}}
90
+ </output>
91
+ </example4>
92
+ <example5>
93
+ <input>
94
+ <prompt_text>
95
+ class C:\n def f(self):\n x = 1\n
96
+ </prompt_text>
97
+ </input>
98
+ <output>
99
+ {{"reasoning": "All blocks properly indented and closed in the visible tail; no dangling colon blocks or open delimiters; tail is syntactically complete.", "is_finished": true}}
100
+ </output>
101
+ </example5>
102
+ </examples>
@@ -16,4 +16,25 @@
16
16
  1. Using the provided input_code and input_prompt, identify what the code does and how it was generated.
17
17
  2. Compare the input_code and modified_code to determine the changes made by the user.
18
18
  3. Identify what the modified_code does differently from the input_code.
19
- 4. Generate a modified_prompt that will guide the generation of the modified_code based on the identified changes.
19
+ 4. Generate a modified_prompt that will guide the generation of the modified_code based on the identified changes.
20
+ 5. Ensure that the modified_prompt adheres to the principles of Prompt-Driven Development (PDD) and includes all necessary sections: Role and Scope, Requirements, Dependencies & Context, Instructions, and Deliverables.
21
+ 6. Try to preserve the structure and format of the existing prompt as much as possible while incorporating the necessary changes to reflect the modifications in the code.
22
+
23
+ % When generating the modified prompt, you must follow the core principles of Prompt-Driven Development (PDD).
24
+ % Here are the essential guidelines for structuring a PDD prompt:
25
+ <pdd_prompting_guide>
26
+ % The prompt you generate must follow this structure:
27
+ 1) First paragraph: describe the role and responsibility of the module/component within the system (consider the LAYER if provided).
28
+ 2) A "Requirements" section with numbered points covering functionality, contracts, error handling, validation, logging, performance, and security.
29
+ 3) A "Dependencies" section using XML include tags for each dependency (see format below).
30
+ 4) An "Instructions" section with precise implementation guidance (clarify inputs/outputs, function/class responsibilities, edge cases, and testing notes).
31
+ 5) A clear "Deliverable" section describing the expected code artifacts and entry points.
32
+
33
+ % Dependencies format and conventions:
34
+ - Represent each dependency using an XML tag with the dependency name, and put the file path inside an &lt;include&gt; tag. For example:
35
+ &lt;orders_service&gt;
36
+ &lt;include&gt;context/orders_service_example.py&lt;/include&gt;
37
+ &lt;/orders_service&gt;
38
+ - Prefer real example files available in the provided context (use &lt;include-many&gt; when listing multiple). If examples are not provided, assume dependency examples live under context/ using the pattern context/[dependency_name]_example. You should always try to include example files when possible.
39
+ - Include all necessary dependencies for the module/component (based on the provided context and references).
40
+ </pdd_prompting_guide>