pdd-cli 0.0.42__py3-none-any.whl → 0.0.44__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of pdd-cli might be problematic. Click here for more details.

pdd/__init__.py CHANGED
@@ -1,6 +1,6 @@
1
1
  """PDD - Prompt Driven Development"""
2
2
 
3
- __version__ = "0.0.42"
3
+ __version__ = "0.0.44"
4
4
 
5
5
  # Strength parameter used for LLM extraction across the codebase
6
6
  # Used in postprocessing, XML tagging, code generation, and other extraction
pdd/cli.py CHANGED
@@ -362,7 +362,7 @@ def example(
362
362
  "--target-coverage",
363
363
  type=click.FloatRange(0.0, 100.0),
364
364
  default=None, # Use None, default handled in cmd_test_main or env var
365
- help="Desired code coverage percentage (default: 90.0 or PDD_TEST_COVERAGE_TARGET).",
365
+ help="Desired code coverage percentage (default: 10.0 or PDD_TEST_COVERAGE_TARGET).",
366
366
  )
367
367
  @click.option(
368
368
  "--merge",
@@ -1130,7 +1130,7 @@ def verify(
1130
1130
  @click.option(
1131
1131
  "--target-coverage",
1132
1132
  type=click.FloatRange(0.0, 100.0),
1133
- default=90.0,
1133
+ default=10.0,
1134
1134
  show_default=True,
1135
1135
  help="Target code coverage percentage for generated tests.",
1136
1136
  )
pdd/cmd_test_main.py CHANGED
@@ -155,6 +155,15 @@ def cmd_test_main(
155
155
  print("[bold red]Error: Output file path could not be determined.[/bold red]")
156
156
  ctx.exit(1)
157
157
  return "", 0.0, ""
158
+
159
+ # Check if unit_test content is empty
160
+ if not unit_test or not unit_test.strip():
161
+ print(f"[bold red]Error: Generated unit test content is empty or whitespace-only.[/bold red]")
162
+ print(f"[bold yellow]Debug: unit_test length: {len(unit_test) if unit_test else 0}[/bold yellow]")
163
+ print(f"[bold yellow]Debug: unit_test content preview: {repr(unit_test[:100]) if unit_test else 'None'}[/bold yellow]")
164
+ ctx.exit(1)
165
+ return "", 0.0, ""
166
+
158
167
  try:
159
168
  with open(output_file, "w", encoding="utf-8") as file_handle:
160
169
  file_handle.write(unit_test)
pdd/construct_paths.py CHANGED
@@ -392,6 +392,7 @@ def construct_paths(
392
392
  pddrc_config = {}
393
393
  context = None
394
394
  context_config = {}
395
+ original_context_config = {} # Keep track of original context config for sync discovery
395
396
 
396
397
  try:
397
398
  # Find and load .pddrc file
@@ -405,6 +406,7 @@ def construct_paths(
405
406
 
406
407
  # Get context-specific configuration
407
408
  context_config = _get_context_config(pddrc_config, context)
409
+ original_context_config = context_config.copy() # Store original before modifications
408
410
 
409
411
  if not quiet and context:
410
412
  console.print(f"[info]Using .pddrc context:[/info] {context}")
@@ -451,8 +453,30 @@ def construct_paths(
451
453
  )
452
454
  # Infer base directories from a sample output path
453
455
  gen_path = Path(output_paths_str.get("generate_output_path", "src"))
454
- resolved_config["prompts_dir"] = str(gen_path.parent.parent / "prompts")
455
- resolved_config["code_dir"] = str(gen_path.parent)
456
+
457
+ # First, check current working directory for prompt files matching the basename pattern
458
+ current_dir = Path.cwd()
459
+ prompt_pattern = f"{basename}_*.prompt"
460
+ if list(current_dir.glob(prompt_pattern)):
461
+ # Found prompt files in current working directory
462
+ resolved_config["prompts_dir"] = str(current_dir)
463
+ resolved_config["code_dir"] = str(current_dir)
464
+ if not quiet:
465
+ console.print(f"[info]Found prompt files in current directory:[/info] {current_dir}")
466
+ else:
467
+ # Fall back to context-aware logic
468
+ # Use original_context_config to avoid checking augmented config with env vars
469
+ if original_context_config and any(key.endswith('_output_path') for key in original_context_config):
470
+ # For configured contexts, prompts are typically at the same level as output dirs
471
+ # e.g., if code goes to "pdd/", prompts should be at "prompts/" (siblings)
472
+ resolved_config["prompts_dir"] = "prompts"
473
+ resolved_config["code_dir"] = str(gen_path.parent)
474
+ else:
475
+ # For default contexts, maintain relative relationship
476
+ # e.g., if code goes to "pi.py", prompts should be at "prompts/" (siblings)
477
+ resolved_config["prompts_dir"] = str(gen_path.parent / "prompts")
478
+ resolved_config["code_dir"] = str(gen_path.parent)
479
+
456
480
  resolved_config["tests_dir"] = str(Path(output_paths_str.get("test_output_path", "tests")).parent)
457
481
  resolved_config["examples_dir"] = str(Path(output_paths_str.get("example_output_path", "examples")).parent)
458
482
 
@@ -31,6 +31,7 @@ Dart,//,.dart
31
31
  F#,//,.fs
32
32
  YAML,#,.yml
33
33
  JSON,del,.json
34
+ JSONL,del,.jsonl
34
35
  XML,"<!-- -->",.xml
35
36
  Makefile,#,
36
37
  CSV,del,.csv
pdd/data/llm_model.csv CHANGED
@@ -3,8 +3,8 @@ OpenAI,gpt-4.1-nano,0.1,0.4,1249,,OPENAI_API_KEY,0,True,none
3
3
  xai,xai/grok-3-beta,3.0,15.0,1332,https://api.x.ai/v1,XAI_API_KEY,0,False,none
4
4
  Anthropic,claude-3-5-haiku-20241022,.8,4,1261,,ANTHROPIC_API_KEY,0,True,none
5
5
  OpenAI,deepseek/deepseek-chat,.27,1.1,1353,https://api.deepseek.com/beta,DEEPSEEK_API_KEY,0,False,none
6
- Google,vertex_ai/gemini-2.5-flash-preview-04-17,0.15,0.6,1330,,VERTEX_CREDENTIALS,0,True,effort
7
- Google,gemini-2.5-pro-exp-03-25,1.25,10.0,1360,,GOOGLE_API_KEY,0,True,none
6
+ Google,vertex_ai/gemini-2.5-flash,0.15,0.6,1330,,VERTEX_CREDENTIALS,0,True,effort
7
+ Google,gemini-2.5-pro,1.25,10.0,1360,,GOOGLE_API_KEY,0,True,none
8
8
  Anthropic,claude-sonnet-4-20250514,3.0,15.0,1340,,ANTHROPIC_API_KEY,64000,True,budget
9
9
  Google,vertex_ai/gemini-2.5-pro,1.25,10.0,1361,,VERTEX_CREDENTIALS,0,True,none
10
10
  OpenAI,o4-mini,1.1,4.4,1333,,OPENAI_API_KEY,0,True,effort
pdd/fix_code_loop.py CHANGED
@@ -3,7 +3,7 @@ import shutil
3
3
  import subprocess
4
4
  import sys
5
5
  from pathlib import Path
6
- from typing import Tuple
6
+ from typing import Tuple, Optional, Union
7
7
  from . import DEFAULT_TIME # Added DEFAULT_TIME
8
8
 
9
9
  # Use Rich for pretty printing to the console
@@ -38,7 +38,7 @@ def fix_code_loop(
38
38
  error_log_file: str,
39
39
  verbose: bool = False,
40
40
  time: float = DEFAULT_TIME,
41
- ) -> Tuple[bool, str, str, int, float, str | None]:
41
+ ) -> Tuple[bool, str, str, int, float, Optional[str]]:
42
42
  """
43
43
  Attempts to fix errors in a code module through multiple iterations.
44
44
 
pdd/fix_error_loop.py CHANGED
@@ -12,6 +12,7 @@ from rich.console import Console
12
12
  # Relative import from an internal module.
13
13
  from .fix_errors_from_unit_tests import fix_errors_from_unit_tests
14
14
  from . import DEFAULT_TIME # Import DEFAULT_TIME
15
+ from .python_env_detector import detect_host_python_executable
15
16
 
16
17
  console = Console()
17
18
 
@@ -26,7 +27,9 @@ def run_pytest_on_file(test_file: str) -> tuple[int, int, int, str]:
26
27
  """
27
28
  try:
28
29
  # Include "--json-only" to ensure only valid JSON is printed.
29
- cmd = [sys.executable, "-m", "pdd.pytest_output", "--json-only", test_file]
30
+ # Use environment-aware Python executable for pytest execution
31
+ python_executable = detect_host_python_executable()
32
+ cmd = [python_executable, "-m", "pdd.pytest_output", "--json-only", test_file]
30
33
  result = subprocess.run(cmd, capture_output=True, text=True)
31
34
 
32
35
  # Parse the JSON output from stdout
@@ -380,7 +383,7 @@ def fix_error_loop(unit_test_file: str,
380
383
 
381
384
  # Run the verification:
382
385
  try:
383
- verify_cmd = [sys.executable, verification_program]
386
+ verify_cmd = [detect_host_python_executable(), verification_program]
384
387
  verify_result = subprocess.run(verify_cmd, capture_output=True, text=True)
385
388
  # Safely handle None for stdout or stderr:
386
389
  verify_stdout = verify_result.stdout or ""
@@ -2,6 +2,7 @@ import os
2
2
  import shutil
3
3
  import subprocess
4
4
  import datetime
5
+ import sys
5
6
  from pathlib import Path
6
7
  from typing import Dict, Tuple, Any, Optional
7
8
  from xml.sax.saxutils import escape
@@ -25,6 +26,7 @@ except ImportError:
25
26
  )
26
27
 
27
28
  from . import DEFAULT_TIME # Import DEFAULT_TIME
29
+ from .python_env_detector import detect_host_python_executable
28
30
 
29
31
  # Initialize Rich Console for pretty printing
30
32
  console = Console()
@@ -49,19 +51,30 @@ def _run_program(
49
51
  if not program_path.is_file():
50
52
  return -1, f"Error: Program file not found at {program_path}"
51
53
 
52
- command = ["python", str(program_path)]
54
+ command = [detect_host_python_executable(), str(program_path)]
53
55
  if args:
54
56
  command.extend(args)
55
57
 
56
58
  try:
59
+ # Run from staging root directory instead of examples/ directory
60
+ # This allows imports from both pdd/ and examples/ subdirectories
61
+ staging_root = program_path.parent.parent # Go up from examples/ to staging root
62
+
57
63
  result = subprocess.run(
58
64
  command,
59
65
  capture_output=True,
60
66
  text=True,
61
67
  timeout=timeout,
62
68
  check=False, # Don't raise exception for non-zero exit codes
69
+ env=os.environ.copy(), # Pass current environment variables
70
+ cwd=staging_root # Set working directory to staging root
63
71
  )
64
72
  combined_output = result.stdout + result.stderr
73
+
74
+ # Check for syntax errors
75
+ if result.returncode != 0 and "SyntaxError" in result.stderr:
76
+ return result.returncode, f"SYNTAX_ERROR: {combined_output}"
77
+
65
78
  return result.returncode, combined_output
66
79
  except FileNotFoundError:
67
80
  return -1, f"Error: Python interpreter not found or '{program_path}' not found."
@@ -17,6 +17,7 @@ from .fix_verification_errors import fix_verification_errors
17
17
  from .fix_verification_errors_loop import fix_verification_errors_loop
18
18
  # Import DEFAULT_STRENGTH from the main package
19
19
  from . import DEFAULT_STRENGTH, DEFAULT_TIME
20
+ from .python_env_detector import detect_host_python_executable
20
21
 
21
22
  # Default values from the README
22
23
  DEFAULT_TEMPERATURE = 0.0
@@ -48,7 +49,7 @@ def run_program(program_path: str, args: List[str] = []) -> Tuple[bool, str, str
48
49
  # A more robust solution might use the 'language' from construct_paths
49
50
  interpreter = []
50
51
  if program_path.endswith(".py"):
51
- interpreter = [sys.executable] # Use the current Python interpreter
52
+ interpreter = [detect_host_python_executable()] # Use environment-aware Python executable
52
53
  elif program_path.endswith(".js"):
53
54
  interpreter = ["node"]
54
55
  elif program_path.endswith(".sh"):
@@ -57,13 +58,21 @@ def run_program(program_path: str, args: List[str] = []) -> Tuple[bool, str, str
57
58
 
58
59
  command = interpreter + [program_path] + args
59
60
  rich_print(f"[dim]Running command:[/dim] {' '.join(command)}")
61
+ rich_print(f"[dim]Working directory:[/dim] {os.path.dirname(program_path) if program_path else 'None'}")
62
+ rich_print(f"[dim]Environment PYTHONPATH:[/dim] {os.environ.get('PYTHONPATH', 'Not set')}")
60
63
 
64
+ # Create a copy of environment with PYTHONUNBUFFERED set
65
+ env = os.environ.copy()
66
+ env['PYTHONUNBUFFERED'] = '1' # Force unbuffered output
67
+
61
68
  process = subprocess.run(
62
69
  command,
63
70
  capture_output=True,
64
71
  text=True,
65
72
  check=False, # Don't raise exception on non-zero exit code
66
- timeout=60 # Add a timeout to prevent hangs
73
+ timeout=60, # Add a timeout to prevent hangs
74
+ env=env, # Pass modified environment variables
75
+ cwd=os.path.dirname(program_path) if program_path else None # Set working directory
67
76
  )
68
77
 
69
78
  success = process.returncode == 0
@@ -71,11 +80,17 @@ def run_program(program_path: str, args: List[str] = []) -> Tuple[bool, str, str
71
80
  stderr = process.stderr
72
81
 
73
82
  if not success:
74
- rich_print(f"[yellow]Warning:[/yellow] Program '{os.path.basename(program_path)}' exited with code {process.returncode}.")
75
- if stderr:
76
- rich_print("[yellow]Stderr:[/yellow]")
77
- rich_print(Panel(stderr, border_style="yellow"))
78
-
83
+ rich_print(f"[yellow]Warning:[/yellow] Program '{os.path.basename(program_path)}' exited with code {process.returncode}.")
84
+
85
+ # Check for syntax errors specifically
86
+ if "SyntaxError" in stderr:
87
+ rich_print("[bold red]Syntax Error Detected:[/bold red]")
88
+ rich_print(Panel(stderr, border_style="red", title="Python Syntax Error"))
89
+ # Return with special indicator for syntax errors
90
+ return False, stdout, f"SYNTAX_ERROR: {stderr}"
91
+ elif stderr:
92
+ rich_print("[yellow]Stderr:[/yellow]")
93
+ rich_print(Panel(stderr, border_style="yellow"))
79
94
 
80
95
  return success, stdout, stderr
81
96
 
@@ -354,7 +369,13 @@ def fix_verification_main(
354
369
  results_log_content += f"Model Used: {model_name}\n"
355
370
  results_log_content += f"Total Cost: ${total_cost:.6f}\n"
356
371
  results_log_content += "\n--- LLM Explanation ---\n"
357
- results_log_content += "\n".join(fix_results.get('explanation', ['N/A']))
372
+ # The original code here was:
373
+ # results_log_content += "\n".join(fix_results.get('explanation', ['N/A']))
374
+ # This was incorrect because fix_results['explanation'] is a single string.
375
+ # The list constructor would then iterate through it character-by-character,
376
+ # causing the single-character-per-line output.
377
+ # The fix is to just append the string directly, using a default value if it is None.
378
+ results_log_content += fix_results.get('explanation') or 'N/A'
358
379
  results_log_content += "\n\n--- Program Output Used for Verification ---\n"
359
380
  results_log_content += program_output
360
381
 
pdd/get_jwt_token.py CHANGED
@@ -2,7 +2,20 @@ import asyncio
2
2
  import time
3
3
  from typing import Dict, Optional, Tuple
4
4
 
5
- import keyring
5
+ # Cross-platform keyring import with fallback for WSL compatibility
6
+ try:
7
+ import keyring
8
+ KEYRING_AVAILABLE = True
9
+ except ImportError:
10
+ try:
11
+ import keyrings.alt.file
12
+ keyring = keyrings.alt.file.PlaintextKeyring()
13
+ KEYRING_AVAILABLE = True
14
+ print("Warning: Using alternative keyring (PlaintextKeyring) - tokens stored in plaintext")
15
+ except ImportError:
16
+ keyring = None
17
+ KEYRING_AVAILABLE = False
18
+ print("Warning: No keyring available - token storage disabled")
6
19
  import requests
7
20
 
8
21
  # Custom exception classes for better error handling
@@ -128,20 +141,39 @@ class FirebaseAuthenticator:
128
141
 
129
142
  def _store_refresh_token(self, refresh_token: str):
130
143
  """Stores the Firebase refresh token in the system keyring."""
131
- keyring.set_password(self.keyring_service_name, self.keyring_user_name, refresh_token)
144
+ if not KEYRING_AVAILABLE or keyring is None:
145
+ print("Warning: No keyring available, refresh token not stored")
146
+ return
147
+ try:
148
+ keyring.set_password(self.keyring_service_name, self.keyring_user_name, refresh_token)
149
+ except Exception as e:
150
+ print(f"Warning: Failed to store refresh token in keyring: {e}")
132
151
 
133
152
  def _get_stored_refresh_token(self) -> Optional[str]:
134
153
  """Retrieves the Firebase refresh token from the system keyring."""
135
- return keyring.get_password(self.keyring_service_name, self.keyring_user_name)
154
+ if not KEYRING_AVAILABLE or keyring is None:
155
+ return None
156
+ try:
157
+ return keyring.get_password(self.keyring_service_name, self.keyring_user_name)
158
+ except Exception as e:
159
+ print(f"Warning: Failed to retrieve refresh token from keyring: {e}")
160
+ return None
136
161
 
137
162
  def _delete_stored_refresh_token(self):
138
163
  """Deletes the stored Firebase refresh token from the keyring."""
164
+ if not KEYRING_AVAILABLE or keyring is None:
165
+ print("No keyring available. Token deletion skipped.")
166
+ return
139
167
  try:
140
168
  keyring.delete_password(self.keyring_service_name, self.keyring_user_name)
141
- except keyring.errors.NoKeyringError:
142
- print("No keyring found. Token deletion skipped.")
143
- except keyring.errors.PasswordDeleteError:
144
- print("Failed to delete token from keyring.")
169
+ except Exception as e:
170
+ # Handle both keyring.errors and generic exceptions for cross-platform compatibility
171
+ if "NoKeyringError" in str(type(e)) or "no keyring" in str(e).lower():
172
+ print("No keyring found. Token deletion skipped.")
173
+ elif "PasswordDeleteError" in str(type(e)) or "delete" in str(e).lower():
174
+ print("Failed to delete token from keyring.")
175
+ else:
176
+ print(f"Warning: Error deleting token from keyring: {e}")
145
177
 
146
178
  async def _refresh_firebase_token(self, refresh_token: str) -> str:
147
179
  """
pdd/increase_tests.py CHANGED
@@ -78,6 +78,13 @@ def increase_tests(
78
78
  time=time,
79
79
  verbose=verbose
80
80
  )
81
+
82
+ # Debug: Check LLM response
83
+ console.print(f"[blue]DEBUG increase_tests: LLM response type: {type(llm_response)}[/blue]")
84
+ console.print(f"[blue]DEBUG increase_tests: LLM response keys: {llm_response.keys() if isinstance(llm_response, dict) else 'Not a dict'}[/blue]")
85
+ console.print(f"[blue]DEBUG increase_tests: LLM result type: {type(llm_response.get('result', 'No result key'))}[/blue]")
86
+ console.print(f"[blue]DEBUG increase_tests: LLM result length: {len(llm_response['result']) if 'result' in llm_response and llm_response['result'] else 0}[/blue]")
87
+ console.print(f"[blue]DEBUG increase_tests: LLM result preview: {repr(llm_response['result'][:300]) if 'result' in llm_response and llm_response['result'] else 'Empty or no result'}[/blue]")
81
88
 
82
89
  increase_test_function, total_cost, model_name = postprocess(
83
90
  llm_response['result'],
pdd/llm_invoke.py CHANGED
@@ -79,7 +79,11 @@ import time as time_module # Alias to avoid conflict with 'time' parameter
79
79
  from pdd import DEFAULT_LLM_MODEL
80
80
 
81
81
  # Opt-in to future pandas behavior regarding downcasting
82
- pd.set_option('future.no_silent_downcasting', True)
82
+ try:
83
+ pd.set_option('future.no_silent_downcasting', True)
84
+ except pd._config.config.OptionError:
85
+ # Skip if option doesn't exist in older pandas versions
86
+ pass
83
87
 
84
88
 
85
89
  def _is_wsl_environment() -> bool:
@@ -152,8 +156,8 @@ if PDD_PATH_ENV:
152
156
 
153
157
  if PROJECT_ROOT is None: # If PDD_PATH wasn't set or was invalid
154
158
  try:
155
- # Start from the directory containing this script
156
- current_dir = Path(__file__).resolve().parent
159
+ # Start from the current working directory (where user is running PDD)
160
+ current_dir = Path.cwd().resolve()
157
161
  # Look for project markers (e.g., .git, pyproject.toml, data/, .env)
158
162
  # Go up a maximum of 5 levels to prevent infinite loops
159
163
  for _ in range(5):
@@ -164,7 +168,7 @@ if PROJECT_ROOT is None: # If PDD_PATH wasn't set or was invalid
164
168
 
165
169
  if has_git or has_pyproject or has_data or has_dotenv:
166
170
  PROJECT_ROOT = current_dir
167
- logger.debug(f"Determined PROJECT_ROOT by marker search: {PROJECT_ROOT}")
171
+ logger.debug(f"Determined PROJECT_ROOT by marker search from CWD: {PROJECT_ROOT}")
168
172
  break
169
173
 
170
174
  parent_dir = current_dir.parent
@@ -172,10 +176,8 @@ if PROJECT_ROOT is None: # If PDD_PATH wasn't set or was invalid
172
176
  break
173
177
  current_dir = parent_dir
174
178
 
175
- except NameError: # __file__ might not be defined (e.g., interactive session)
176
- warnings.warn("__file__ not defined. Cannot automatically detect project root from script location.")
177
179
  except Exception as e: # Catch potential permission errors etc.
178
- warnings.warn(f"Error during project root auto-detection: {e}")
180
+ warnings.warn(f"Error during project root auto-detection from current working directory: {e}")
179
181
 
180
182
  if PROJECT_ROOT is None: # Fallback to CWD if no method succeeded
181
183
  PROJECT_ROOT = Path.cwd().resolve()
@@ -1,4 +1,10 @@
1
- % You are an expert Software Engineer. Your goal is to extract a JSON from a analysis of a program and code module bug fix report. If there is a choice of updating the program or the code module, you should chose to update the code module.
1
+ % You are an expert Software Engineer. Your goal is to extract a JSON from a analysis of a program and code module bug fix report.
2
+
3
+ % IMPORTANT: The crash command is designed to fix errors in BOTH the code module AND the calling program that caused the crash. You should fix whatever needs to be fixed to make the program run successfully:
4
+ - If the code module has bugs, fix the code module
5
+ - If the calling program has bugs, fix the calling program
6
+ - If both have issues that contribute to the crash, fix BOTH
7
+ - The goal is to ensure the program runs without errors after the fix
2
8
 
3
9
  % Here is the original program: <program>{program}</program>
4
10
 
@@ -1,4 +1,10 @@
1
- % You are an expert Software Engineer. Your goal is to fix the errors in a code_module or program that is causing that program to crash.
1
+ % You are an expert Software Engineer. Your goal is to fix the errors in a code_module AND/OR program that is causing that program to crash.
2
+
3
+ % IMPORTANT: The crash command should fix whatever needs to be fixed to make the program run successfully:
4
+ - If the code module has bugs, fix the code module
5
+ - If the calling program has bugs, fix the calling program
6
+ - If both have issues that contribute to the crash, fix BOTH
7
+ - The goal is to ensure the program runs without errors after all fixes are applied
2
8
 
3
9
  % Here is the program that is running the code_module that crashed and/or has errors: <program>{program}</program>
4
10
 
@@ -41,6 +47,10 @@
41
47
  Step 1. Compare the prompt to the code_module and explain differences, if any.
42
48
  Step 2. Compare the prompt to the program and explain differences, if any.
43
49
  Step 3. Explain in detail step by step why there might be an an error and why prior attempted fixes, if any, may not have worked. Write several paragraphs explaining the root cause of each of the errors.
44
- Step 4. Explain in detail step by step how to solve each of the errors. For each error, there should be several paragraphs description of the steps. Sometimes logging or print statements can help debug the code_module or program.
50
+ Step 4. Explain in detail step by step how to solve each of the errors. For each error, there should be several paragraphs description of the steps. Consider whether the fix requires:
51
+ - Updating the code_module only
52
+ - Updating the calling program only
53
+ - Updating BOTH the code_module AND the calling program
54
+ Sometimes logging or print statements can help debug the code_module or program.
45
55
  Step 5. Review the above steps and correct for any errors in the logic.
46
- Step 6. For the code that need changes, write the corrected code_module and/or corrected program in its/their entirety.
56
+ Step 6. For ALL code that needs changes, write the corrected code_module and/or corrected program in their entirety. If both need fixes, provide both complete fixed versions.
pdd/pytest_output.py CHANGED
@@ -3,9 +3,11 @@ import json
3
3
  import io
4
4
  import sys
5
5
  import pytest
6
+ import subprocess
6
7
  from rich.console import Console
7
8
  from rich.pretty import pprint
8
9
  import os
10
+ from .python_env_detector import detect_host_python_executable
9
11
 
10
12
  console = Console()
11
13
 
@@ -80,27 +82,77 @@ def run_pytest_and_capture_output(test_file: str) -> dict:
80
82
  )
81
83
  return {}
82
84
 
83
- collector = TestResultCollector()
85
+ # Use environment-aware Python executable for pytest execution
86
+ python_executable = detect_host_python_executable()
87
+
84
88
  try:
85
- collector.capture_logs()
86
- result = pytest.main([test_file], plugins=[collector])
87
- finally:
88
- stdout, stderr = collector.get_logs()
89
-
90
- return {
91
- "test_file": test_file,
92
- "test_results": [
93
- {
94
- "standard_output": stdout,
95
- "standard_error": stderr,
96
- "return_code": int(result),
97
- "warnings": collector.warnings,
98
- "errors": collector.errors,
99
- "failures": collector.failures,
100
- "passed": collector.passed,
101
- }
102
- ],
103
- }
89
+ # Run pytest using subprocess with the detected Python executable
90
+ result = subprocess.run(
91
+ [python_executable, "-m", "pytest", test_file, "-v"],
92
+ capture_output=True,
93
+ text=True,
94
+ timeout=300
95
+ )
96
+
97
+ stdout = result.stdout
98
+ stderr = result.stderr
99
+ return_code = result.returncode
100
+
101
+ # Parse the output to extract test results
102
+ # Count passed, failed, and skipped tests from the output
103
+ passed = stdout.count(" PASSED")
104
+ failures = stdout.count(" FAILED") + stdout.count(" ERROR")
105
+ errors = 0 # Will be included in failures for subprocess execution
106
+ warnings = stdout.count("warning")
107
+
108
+ # If return code is 2, it indicates a pytest error
109
+ if return_code == 2:
110
+ errors = 1
111
+
112
+ return {
113
+ "test_file": test_file,
114
+ "test_results": [
115
+ {
116
+ "standard_output": stdout,
117
+ "standard_error": stderr,
118
+ "return_code": return_code,
119
+ "warnings": warnings,
120
+ "errors": errors,
121
+ "failures": failures,
122
+ "passed": passed,
123
+ }
124
+ ],
125
+ }
126
+ except subprocess.TimeoutExpired:
127
+ return {
128
+ "test_file": test_file,
129
+ "test_results": [
130
+ {
131
+ "standard_output": "",
132
+ "standard_error": "Test execution timed out",
133
+ "return_code": -1,
134
+ "warnings": 0,
135
+ "errors": 1,
136
+ "failures": 0,
137
+ "passed": 0,
138
+ }
139
+ ],
140
+ }
141
+ except Exception as e:
142
+ return {
143
+ "test_file": test_file,
144
+ "test_results": [
145
+ {
146
+ "standard_output": "",
147
+ "standard_error": f"Error running pytest: {str(e)}",
148
+ "return_code": -1,
149
+ "warnings": 0,
150
+ "errors": 1,
151
+ "failures": 0,
152
+ "passed": 0,
153
+ }
154
+ ],
155
+ }
104
156
 
105
157
  def save_output_to_json(output: dict, output_file: str = "pytest.json"):
106
158
  """