pdd-cli 0.0.40__py3-none-any.whl → 0.0.41__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of pdd-cli might be problematic. Click here for more details.

@@ -0,0 +1,574 @@
1
+ # pdd/sync_determine_operation.py
2
+
3
+ import os
4
+ import sys
5
+ import json
6
+ import hashlib
7
+ import subprocess
8
+ import threading
9
+ from dataclasses import dataclass, asdict, field
10
+ from datetime import datetime, timezone
11
+ from pathlib import Path
12
+ from typing import Optional, Dict, Any, List
13
+
14
+ # --- Dependencies ---
15
+ # This implementation requires the 'psutil' library for robust PID checking.
16
+ # It can be installed with: pip install psutil
17
+ try:
18
+ import psutil
19
+ except ImportError:
20
+ print("Error: 'psutil' library not found. Please install it using 'pip install psutil'", file=sys.stderr)
21
+ sys.exit(1)
22
+
23
+ # Platform-specific locking
24
+ if sys.platform == 'win32':
25
+ import msvcrt
26
+ else:
27
+ import fcntl
28
+
29
+ # --- Constants for Directory Structure ---
30
+ PDD_DIR = Path(".pdd")
31
+ META_DIR = PDD_DIR / "meta"
32
+ LOCKS_DIR = PDD_DIR / "locks"
33
+
34
+ PROMPTS_ROOT_DIR = Path("prompts")
35
+ CODE_ROOT_DIR = Path("src")
36
+ EXAMPLES_ROOT_DIR = Path("examples")
37
+ TESTS_ROOT_DIR = Path("tests")
38
+
39
+
40
+ # --- Data Structures ---
41
+
42
+ @dataclass
43
+ class Fingerprint:
44
+ """Represents the last known good state of a PDD unit."""
45
+ pdd_version: str
46
+ timestamp: str # ISO 8601 format
47
+ command: str
48
+ prompt_hash: Optional[str] = None
49
+ code_hash: Optional[str] = None
50
+ example_hash: Optional[str] = None
51
+ test_hash: Optional[str] = None
52
+
53
+ @dataclass
54
+ class RunReport:
55
+ """Represents the results of the last test or execution run."""
56
+ timestamp: str
57
+ exit_code: int
58
+ tests_passed: int
59
+ tests_failed: int
60
+ coverage: float
61
+
62
+ @dataclass
63
+ class LLMConflictResolutionOutput:
64
+ """Represents the structured output from the LLM for conflict resolution."""
65
+ next_operation: str
66
+ reason: str
67
+ confidence: float
68
+
69
+ @dataclass
70
+ class SyncDecision:
71
+ """Represents the recommended operation to run next."""
72
+ operation: str
73
+ reason: str
74
+ details: Dict[str, Any] = field(default_factory=dict)
75
+
76
+ # --- Mock Internal PDD Modules ---
77
+ # These are placeholders for the internal pdd library functions.
78
+
79
+ def load_prompt_template(prompt_name: str) -> Optional[str]:
80
+ """
81
+ (MOCK) Loads a prompt template from the pdd library.
82
+ In a real scenario, this would load from a package resource.
83
+ """
84
+ templates = {
85
+ "sync_analysis_LLM.prompt": """
86
+ You are an expert software development assistant. Your task is to resolve a synchronization conflict in a PDD unit.
87
+ Both the user and the PDD tool have made changes, and you must decide the best course of action.
88
+
89
+ Analyze the following information:
90
+
91
+ **Last Known Good State (Fingerprint):**
92
+ ```json
93
+ {fingerprint}
94
+ ```
95
+
96
+ **Files Changed Since Last Sync:**
97
+ - {changed_files_list}
98
+
99
+ **Diffs:**
100
+
101
+ --- PROMPT DIFF ---
102
+ {prompt_diff}
103
+ --- END PROMPT DIFF ---
104
+
105
+ --- CODE DIFF ---
106
+ {code_diff}
107
+ --- END CODE DIFF ---
108
+
109
+ --- TEST DIFF ---
110
+ {test_diff}
111
+ --- END TEST DIFF ---
112
+
113
+ --- EXAMPLE DIFF ---
114
+ {example_diff}
115
+ --- END EXAMPLE DIFF ---
116
+
117
+ Based on the diffs, determine the user's intent and the nature of the conflict.
118
+ Respond with a JSON object recommending the next operation. The possible operations are:
119
+ - "generate": The prompt changes are significant; regenerate the code.
120
+ - "update": The code changes are valuable; update the prompt to reflect them.
121
+ - "fix": The test changes seem to be fixing a bug; try to fix the code.
122
+ - "merge_manually": The conflict is too complex. Ask the user to merge changes.
123
+
124
+ Your JSON response must have the following format:
125
+ {{
126
+ "next_operation": "your_recommendation",
127
+ "reason": "A clear, concise explanation of why you chose this operation.",
128
+ "confidence": 0.9
129
+ }}
130
+ """
131
+ }
132
+ return templates.get(prompt_name)
133
+
134
+ def llm_invoke(prompt: str, **kwargs) -> Dict[str, Any]:
135
+ """
136
+ (MOCK) Invokes the LLM with a given prompt.
137
+ This mock version provides a deterministic response for demonstration.
138
+ """
139
+ print("--- (MOCK) LLM Invocation ---")
140
+ print(f"Prompt sent to LLM:\n{prompt[:500]}...")
141
+ # In a real scenario, this would call an actual LLM API.
142
+ # Here, we return a canned response with low confidence to test the failure path.
143
+ response_obj = LLMConflictResolutionOutput(
144
+ next_operation="update",
145
+ reason="Mock LLM analysis determined that the manual code changes are significant but confidence is low.",
146
+ confidence=0.70
147
+ )
148
+ return {
149
+ "result": response_obj,
150
+ "cost": 0.001,
151
+ "model_name": "mock-gpt-4"
152
+ }
153
+
154
+
155
+ # --- Directory and Locking Mechanism ---
156
+
157
+ def _ensure_pdd_dirs_exist():
158
+ """Ensures that the .pdd metadata and lock directories exist."""
159
+ META_DIR.mkdir(parents=True, exist_ok=True)
160
+ LOCKS_DIR.mkdir(parents=True, exist_ok=True)
161
+
162
+ _lock_registry = threading.local()
163
+
164
+ class SyncLock:
165
+ """
166
+ A robust, re-entrant, PID-aware file lock for synchronizing operations.
167
+ Ensures only one process can operate on a PDD unit at a time.
168
+ """
169
+ def __init__(self, basename: str, language: str):
170
+ _ensure_pdd_dirs_exist() # Ensure directories exist before creating lock file
171
+ self.lock_dir = LOCKS_DIR
172
+ self.lock_path = self.lock_dir / f"{basename}_{language}.lock"
173
+ self._lock_fd = None
174
+ self._is_reentrant_acquisition = False
175
+ self.lock_key = str(self.lock_path)
176
+ # The file descriptor is only stored on the instance that actually acquires the lock
177
+ self._is_lock_owner = False
178
+
179
+ @property
180
+ def lock_file_path(self):
181
+ return self.lock_path
182
+
183
+ def _get_lock_count(self) -> int:
184
+ if not hasattr(_lock_registry, 'counts'):
185
+ _lock_registry.counts = {}
186
+ return _lock_registry.counts.get(self.lock_key, 0)
187
+
188
+ def _increment_lock_count(self):
189
+ if not hasattr(_lock_registry, 'counts'):
190
+ _lock_registry.counts = {}
191
+ count = _lock_registry.counts.get(self.lock_key, 0)
192
+ _lock_registry.counts[self.lock_key] = count + 1
193
+
194
+ def _decrement_lock_count(self) -> int:
195
+ if not hasattr(_lock_registry, 'counts'):
196
+ _lock_registry.counts = {}
197
+ count = _lock_registry.counts.get(self.lock_key, 0)
198
+ if count > 0:
199
+ _lock_registry.counts[self.lock_key] = count - 1
200
+ return _lock_registry.counts.get(self.lock_key, 0)
201
+
202
+ def acquire(self):
203
+ """
204
+ Acquires an exclusive lock, handling stale locks from crashed processes.
205
+ Raises TimeoutError if the lock is held by another active process.
206
+ """
207
+ lock_count = self._get_lock_count()
208
+ if lock_count > 0: # Re-entrancy
209
+ self._is_reentrant_acquisition = True
210
+ self._increment_lock_count()
211
+ return
212
+
213
+ # First time acquiring in this thread. Perform the actual lock.
214
+ if self.lock_path.exists():
215
+ try:
216
+ pid_str = self.lock_path.read_text().strip()
217
+ if pid_str:
218
+ pid = int(pid_str)
219
+ if psutil.pid_exists(pid):
220
+ raise TimeoutError(f"is locked by another process (PID: {pid})")
221
+ else:
222
+ self.lock_path.unlink()
223
+ except (ValueError, FileNotFoundError):
224
+ # Corrupted or unreadable lock file, treat as stale
225
+ self.lock_path.unlink(missing_ok=True)
226
+
227
+ # Use O_TRUNC to ensure we overwrite any previous (e.g., corrupted) content
228
+ self._lock_fd = os.open(self.lock_path, os.O_CREAT | os.O_WRONLY | os.O_TRUNC)
229
+ self._is_lock_owner = True
230
+
231
+ try:
232
+ if sys.platform == 'win32':
233
+ msvcrt.locking(self._lock_fd, msvcrt.LK_NBLCK, 1)
234
+ else:
235
+ fcntl.flock(self._lock_fd, fcntl.LOCK_EX | fcntl.LOCK_NB)
236
+ except (IOError, BlockingIOError):
237
+ os.close(self._lock_fd)
238
+ self._lock_fd = None
239
+ self._is_lock_owner = False
240
+ raise TimeoutError("Failed to acquire lock; another process may have just started.")
241
+
242
+ os.write(self._lock_fd, str(os.getpid()).encode())
243
+ os.fsync(self._lock_fd)
244
+ self._increment_lock_count()
245
+
246
+ def release(self):
247
+ """Releases the lock and cleans up the lock file."""
248
+ new_count = self._decrement_lock_count()
249
+
250
+ if new_count == 0 and self._is_lock_owner:
251
+ # This was the last lock holder in this thread, so release the file lock.
252
+ if self._lock_fd:
253
+ if sys.platform != 'win32':
254
+ fcntl.flock(self._lock_fd, fcntl.LOCK_UN)
255
+ os.close(self._lock_fd)
256
+ self._lock_fd = None
257
+
258
+ try:
259
+ if self.lock_path.exists():
260
+ # Safety check: only delete if we are still the owner
261
+ pid_str = self.lock_path.read_text().strip()
262
+ if not pid_str or int(pid_str) == os.getpid():
263
+ self.lock_path.unlink()
264
+ except (OSError, ValueError, FileNotFoundError):
265
+ pass # Ignore errors on cleanup
266
+
267
+ def __enter__(self):
268
+ self.acquire()
269
+ return self
270
+
271
+ def __exit__(self, exc_type, exc_val, exc_tb):
272
+ self.release()
273
+
274
+
275
+ # --- State Analysis Functions ---
276
+
277
+ LANGUAGE_EXTENSIONS = {
278
+ "python": "py",
279
+ "javascript": "js",
280
+ "typescript": "ts",
281
+ "rust": "rs",
282
+ "go": "go",
283
+ }
284
+
285
+ def get_language_extension(language: str) -> str:
286
+ """Gets the file extension for a given language."""
287
+ if language not in LANGUAGE_EXTENSIONS:
288
+ raise ValueError(f"Unsupported language: {language}")
289
+ return LANGUAGE_EXTENSIONS[language]
290
+
291
+ def get_pdd_file_paths(basename: str, language: str) -> Dict[str, Path]:
292
+ """Returns a dictionary mapping file types to their expected paths."""
293
+ ext = get_language_extension(language)
294
+ return {
295
+ 'prompt': PROMPTS_ROOT_DIR / f"{basename}_{language}.prompt",
296
+ 'code': CODE_ROOT_DIR / f"{basename}.{ext}",
297
+ 'example': EXAMPLES_ROOT_DIR / f"{basename}_example.{ext}",
298
+ 'test': TESTS_ROOT_DIR / f"test_{basename}.{ext}",
299
+ }
300
+
301
+ def calculate_sha256(file_path: Path) -> Optional[str]:
302
+ """Calculates the SHA256 hash of a file if it exists, otherwise returns None."""
303
+ if not file_path.is_file():
304
+ return None
305
+
306
+ sha256_hash = hashlib.sha256()
307
+ with open(file_path, "rb") as f:
308
+ for byte_block in iter(lambda: f.read(4096), b""):
309
+ sha256_hash.update(byte_block)
310
+ return sha256_hash.hexdigest()
311
+
312
+ def _read_json_file(file_path: Path, data_class) -> Optional[Any]:
313
+ """Generic JSON file reader and validator."""
314
+ if not file_path.is_file():
315
+ return None
316
+ try:
317
+ with open(file_path, 'r') as f:
318
+ data = json.load(f)
319
+ return data_class(**data)
320
+ except (json.JSONDecodeError, TypeError):
321
+ # Catches corrupted file, or if data doesn't match dataclass fields
322
+ return None
323
+
324
+ def read_fingerprint(basename: str, language: str) -> Optional[Fingerprint]:
325
+ """Reads and validates the JSON fingerprint file."""
326
+ fingerprint_path = META_DIR / f"{basename}_{language}.json"
327
+ return _read_json_file(fingerprint_path, Fingerprint)
328
+
329
+ def read_run_report(basename: str, language: str) -> Optional[RunReport]:
330
+ """Reads and validates the JSON run report file."""
331
+ report_path = META_DIR / f"{basename}_{language}_run.json"
332
+ return _read_json_file(report_path, RunReport)
333
+
334
+ def calculate_current_hashes(paths: Dict[str, Path]) -> Dict[str, Optional[str]]:
335
+ """Computes the hashes for all current files on disk."""
336
+ return {
337
+ f"{file_type}_hash": calculate_sha256(path)
338
+ for file_type, path in paths.items()
339
+ }
340
+
341
+ # --- LLM-based Conflict Analysis ---
342
+
343
+ def get_git_diff(file_path: Path) -> str:
344
+ """
345
+ Gets the git diff of a file against its last committed version (HEAD).
346
+ Returns the full content for untracked files.
347
+ """
348
+ if not file_path.exists():
349
+ return ""
350
+
351
+ # Try to use a relative path if possible, as git's output is cleaner.
352
+ # This is safe because test fixtures chdir into the repo root.
353
+ try:
354
+ path_for_git = file_path.relative_to(Path.cwd())
355
+ except ValueError:
356
+ # Not relative to CWD, use the original absolute path.
357
+ path_for_git = file_path
358
+
359
+ # Use 'git status' to check if the file is tracked
360
+ try:
361
+ status_result = subprocess.run(
362
+ ['git', 'status', '--porcelain', str(path_for_git)],
363
+ capture_output=True, text=True, check=True, encoding='utf-8'
364
+ )
365
+ is_untracked = status_result.stdout.strip().startswith('??')
366
+ except (subprocess.CalledProcessError, FileNotFoundError):
367
+ # Not a git repo, git not found, or file not in repo. Fallback to content.
368
+ return file_path.read_text(encoding='utf-8')
369
+
370
+ command = ['git', 'diff']
371
+ if is_untracked:
372
+ # Diff against nothing to show the whole file as an addition
373
+ # Use /dev/null for POSIX and NUL for Windows
374
+ null_device = "NUL" if sys.platform == "win32" else "/dev/null"
375
+ command.extend(['--no-index', null_device, str(path_for_git)])
376
+ else:
377
+ # Diff against the last commit
378
+ command.extend(['HEAD', '--', str(path_for_git)])
379
+
380
+ try:
381
+ # The `git diff` command returns exit code 1 if there are differences,
382
+ # which `check=True` would interpret as an error. We must not use it.
383
+ diff_result = subprocess.run(
384
+ command, capture_output=True, text=True, encoding='utf-8'
385
+ )
386
+ return diff_result.stdout
387
+ except FileNotFoundError:
388
+ # Fallback if git command is not found
389
+ return file_path.read_text(encoding='utf-8')
390
+
391
+ def analyze_conflict_with_llm(
392
+ basename: str,
393
+ language: str,
394
+ fingerprint: Fingerprint,
395
+ changed_files: List[str]
396
+ ) -> SyncDecision:
397
+ """
398
+ Uses an LLM to analyze a complex sync conflict and recommend an operation.
399
+ """
400
+ try:
401
+ prompt_template = load_prompt_template("sync_analysis_LLM.prompt")
402
+ if not prompt_template:
403
+ return SyncDecision(
404
+ operation="fail_and_request_manual_merge",
405
+ reason="Failed to load LLM analysis prompt template 'sync_analysis_LLM.prompt'."
406
+ )
407
+
408
+ paths = get_pdd_file_paths(basename, language)
409
+ diffs = {ftype: "" for ftype in ['prompt', 'code', 'test', 'example']}
410
+
411
+ for file_type in changed_files:
412
+ if file_type in paths:
413
+ diffs[file_type] = get_git_diff(paths[file_type])
414
+
415
+ # Format the prompt for the LLM
416
+ formatted_prompt = prompt_template.format(
417
+ fingerprint=json.dumps(asdict(fingerprint), indent=2),
418
+ changed_files_list=", ".join(changed_files),
419
+ prompt_diff=diffs['prompt'],
420
+ code_diff=diffs['code'],
421
+ test_diff=diffs['test'],
422
+ example_diff=diffs['example']
423
+ )
424
+
425
+ # Invoke the LLM
426
+ llm_response = llm_invoke(prompt=formatted_prompt)
427
+ response_obj = llm_response.get('result')
428
+
429
+ # Validate the response object
430
+ if not isinstance(response_obj, LLMConflictResolutionOutput):
431
+ return SyncDecision(
432
+ operation="fail_and_request_manual_merge",
433
+ reason=f"LLM did not return the expected Pydantic object. Got type: {type(response_obj).__name__}",
434
+ details={"raw_response": str(response_obj)}
435
+ )
436
+
437
+ next_op = response_obj.next_operation
438
+ reason = response_obj.reason
439
+ confidence = response_obj.confidence
440
+
441
+ if confidence < 0.75:
442
+ return SyncDecision(
443
+ operation="fail_and_request_manual_merge",
444
+ reason=f"LLM analysis confidence ({confidence:.2f}) is below threshold. "
445
+ f"LLM suggestion was: '{next_op}' - {reason}",
446
+ details=asdict(response_obj)
447
+ )
448
+
449
+ return SyncDecision(
450
+ operation=next_op,
451
+ reason=f"LLM analysis: {reason}",
452
+ details=asdict(response_obj)
453
+ )
454
+
455
+ except Exception as e:
456
+ return SyncDecision(
457
+ operation="fail_and_request_manual_merge",
458
+ reason=f"LLM conflict analysis failed: {e}",
459
+ details={"raw_response": str(locals().get('llm_response', {}).get('result'))}
460
+ )
461
+
462
+
463
+ # --- Main Decision Function ---
464
+
465
+ def determine_sync_operation(
466
+ basename: str,
467
+ language: str,
468
+ target_coverage: float = 80.0
469
+ ) -> SyncDecision:
470
+ """
471
+ Analyzes a PDD unit's state and determines the next operation.
472
+
473
+ This function is the core of the `pdd sync` command, providing a deterministic,
474
+ reliable, and safe decision based on runtime signals and file fingerprints.
475
+
476
+ Args:
477
+ basename: The base name of the PDD unit (e.g., 'calculator').
478
+ language: The programming language of the unit (e.g., 'python').
479
+ target_coverage: The desired test coverage percentage.
480
+
481
+ Returns:
482
+ A SyncDecision object with the recommended operation and reason.
483
+ """
484
+ with SyncLock(basename, language):
485
+ # 1. Check Runtime Signals First (highest priority)
486
+ run_report = read_run_report(basename, language)
487
+ if run_report:
488
+ if run_report.exit_code != 0:
489
+ return SyncDecision(
490
+ operation='crash',
491
+ reason=f"The last run exited with a non-zero code ({run_report.exit_code}). "
492
+ "This indicates a crash that must be fixed.",
493
+ details=asdict(run_report)
494
+ )
495
+ if run_report.tests_failed > 0:
496
+ return SyncDecision(
497
+ operation='fix',
498
+ reason=f"The last test run had {run_report.tests_failed} failing tests. "
499
+ "These must be fixed.",
500
+ details=asdict(run_report)
501
+ )
502
+ if run_report.coverage < target_coverage:
503
+ return SyncDecision(
504
+ operation='test',
505
+ reason=f"Current test coverage ({run_report.coverage}%) is below the "
506
+ f"target ({target_coverage}%). More tests are needed.",
507
+ details=asdict(run_report)
508
+ )
509
+
510
+ # 2. Analyze File State
511
+ paths = get_pdd_file_paths(basename, language)
512
+ fingerprint = read_fingerprint(basename, language)
513
+ current_hashes = calculate_current_hashes(paths)
514
+
515
+ # 3. Implement the Decision Tree
516
+
517
+ # Case: No Fingerprint (new or untracked unit)
518
+ if not fingerprint:
519
+ if paths['prompt'].exists():
520
+ return SyncDecision(
521
+ operation='generate',
522
+ reason="No fingerprint file found, but a prompt exists. This appears to be a new PDD unit."
523
+ )
524
+ else:
525
+ return SyncDecision(
526
+ operation='nothing',
527
+ reason="No PDD fingerprint and no prompt file found. Nothing to do."
528
+ )
529
+
530
+ # Compare current hashes with fingerprint
531
+ fingerprint_hashes = {
532
+ 'prompt_hash': fingerprint.prompt_hash,
533
+ 'code_hash': fingerprint.code_hash,
534
+ 'example_hash': fingerprint.example_hash,
535
+ 'test_hash': fingerprint.test_hash,
536
+ }
537
+
538
+ changed_files = [
539
+ file_type.replace('_hash', '')
540
+ for file_type, f_hash in fingerprint_hashes.items()
541
+ if current_hashes.get(file_type) != f_hash
542
+ ]
543
+
544
+ # Case: No Changes
545
+ if not changed_files:
546
+ return SyncDecision(
547
+ operation='nothing',
548
+ reason="All files are synchronized with the last known good state."
549
+ )
550
+
551
+ details = {"changed_files": changed_files}
552
+ # Case: Simple Changes (Single File Modified)
553
+ if len(changed_files) == 1:
554
+ change = changed_files[0]
555
+ if change == 'prompt':
556
+ return SyncDecision('generate', "The prompt has been modified. Code should be regenerated.", details)
557
+ if change == 'code':
558
+ return SyncDecision('update', "The code has been modified manually. The prompt should be updated.", details)
559
+ if change == 'test':
560
+ return SyncDecision('test', "The test file has been modified. The new tests should be run.", details)
561
+ if change == 'example':
562
+ # 'verify' is a pdd command to run the example file
563
+ return SyncDecision('verify', "The example file has been modified. It should be verified.", details)
564
+
565
+ # Case: Complex Changes (Multiple Files Modified / Conflicts)
566
+ if len(changed_files) > 1:
567
+ return SyncDecision(
568
+ operation='analyze_conflict',
569
+ reason=f"Multiple files have been modified since the last sync: {', '.join(changed_files)}.",
570
+ details=details
571
+ )
572
+
573
+ # Fallback, should not be reached
574
+ return SyncDecision('nothing', 'Analysis complete, no operation required.')
pdd/xml_tagger.py CHANGED
@@ -1,3 +1,9 @@
1
+ """XML tagging module for improving prompt structure with XML tags.
2
+
3
+ This module provides functionality to enhance LLM prompts by adding XML tags,
4
+ making them more structured and readable for better processing.
5
+ """
6
+
1
7
  from typing import Tuple
2
8
  from rich import print as rprint
3
9
  from rich.markdown import Markdown
@@ -6,7 +12,9 @@ from .load_prompt_template import load_prompt_template
6
12
  from .llm_invoke import llm_invoke
7
13
  from . import EXTRACTION_STRENGTH
8
14
  from . import DEFAULT_TIME
15
+
9
16
  class XMLOutput(BaseModel):
17
+ """Pydantic model for XML-tagged prompt output."""
10
18
  xml_tagged: str = Field(description="The XML-tagged version of the prompt")
11
19
 
12
20
  def xml_tagger(
@@ -96,8 +104,8 @@ def xml_tagger(
96
104
  # Step 5 & 6: Return results
97
105
  return result.xml_tagged, total_cost, model_name
98
106
 
99
- except Exception as e:
100
- rprint(f"[red]Error in xml_tagger: {str(e)}[/red]")
107
+ except Exception as error:
108
+ rprint(f"[red]Error in xml_tagger: {str(error)}[/red]")
101
109
  raise
102
110
 
103
111
  def main():
@@ -109,7 +117,7 @@ def main():
109
117
  Include examples of usage and error cases.
110
118
  """
111
119
 
112
- xml_tagged, cost, model = xml_tagger(
120
+ tagged_result, cost, model = xml_tagger(
113
121
  raw_prompt=sample_prompt,
114
122
  strength=0.7,
115
123
  temperature=0.8,
@@ -120,9 +128,10 @@ def main():
120
128
  rprint("[blue]XML Tagging Complete[/blue]")
121
129
  rprint(f"Total Cost: ${cost:.6f}")
122
130
  rprint(f"Model Used: {model}")
131
+ rprint(f"Result length: {len(tagged_result)}")
123
132
 
124
- except Exception as e:
125
- rprint(f"[red]Error in main: {str(e)}[/red]")
133
+ except Exception as error:
134
+ rprint(f"[red]Error in main: {str(error)}[/red]")
126
135
 
127
136
  if __name__ == "__main__":
128
- main()
137
+ main()
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: pdd-cli
3
- Version: 0.0.40
3
+ Version: 0.0.41
4
4
  Summary: PDD (Prompt-Driven Development) Command Line Interface
5
5
  Author: Greg Tanaka
6
6
  Author-email: glt@alumni.caltech.edu
@@ -46,7 +46,7 @@ Requires-Dist: pytest-asyncio; extra == "dev"
46
46
  Requires-Dist: z3-solver; extra == "dev"
47
47
  Dynamic: license-file
48
48
 
49
- .. image:: https://img.shields.io/badge/pdd--cli-v0.0.40-blue
49
+ .. image:: https://img.shields.io/badge/pdd--cli-v0.0.41-blue
50
50
  :alt: PDD-CLI Version
51
51
 
52
52
  .. image:: https://img.shields.io/badge/Discord-join%20chat-7289DA.svg?logo=discord&logoColor=white&link=https://discord.gg/Yp4RTh8bG7
@@ -134,7 +134,7 @@ After installation, verify:
134
134
 
135
135
  pdd --version
136
136
 
137
- You'll see the current PDD version (e.g., 0.0.40).
137
+ You'll see the current PDD version (e.g., 0.0.41).
138
138
 
139
139
  Advanced Installation Tips
140
140
  --------------------------