ostruct-cli 0.8.29__py3-none-any.whl → 1.0.1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (49) hide show
  1. ostruct/cli/__init__.py +3 -15
  2. ostruct/cli/attachment_processor.py +455 -0
  3. ostruct/cli/attachment_template_bridge.py +973 -0
  4. ostruct/cli/cli.py +157 -33
  5. ostruct/cli/click_options.py +775 -692
  6. ostruct/cli/code_interpreter.py +195 -12
  7. ostruct/cli/commands/__init__.py +0 -3
  8. ostruct/cli/commands/run.py +289 -62
  9. ostruct/cli/config.py +23 -22
  10. ostruct/cli/constants.py +89 -0
  11. ostruct/cli/errors.py +175 -5
  12. ostruct/cli/explicit_file_processor.py +0 -15
  13. ostruct/cli/file_info.py +97 -15
  14. ostruct/cli/file_list.py +43 -1
  15. ostruct/cli/file_search.py +68 -2
  16. ostruct/cli/help_json.py +235 -0
  17. ostruct/cli/mcp_integration.py +13 -16
  18. ostruct/cli/params.py +217 -0
  19. ostruct/cli/plan_assembly.py +335 -0
  20. ostruct/cli/plan_printing.py +385 -0
  21. ostruct/cli/progress_reporting.py +8 -56
  22. ostruct/cli/quick_ref_help.py +128 -0
  23. ostruct/cli/rich_config.py +299 -0
  24. ostruct/cli/runner.py +397 -190
  25. ostruct/cli/security/__init__.py +2 -0
  26. ostruct/cli/security/allowed_checker.py +41 -0
  27. ostruct/cli/security/normalization.py +13 -9
  28. ostruct/cli/security/security_manager.py +558 -17
  29. ostruct/cli/security/types.py +15 -0
  30. ostruct/cli/template_debug.py +283 -261
  31. ostruct/cli/template_debug_help.py +233 -142
  32. ostruct/cli/template_env.py +46 -5
  33. ostruct/cli/template_filters.py +415 -8
  34. ostruct/cli/template_processor.py +240 -619
  35. ostruct/cli/template_rendering.py +49 -73
  36. ostruct/cli/template_validation.py +2 -1
  37. ostruct/cli/token_validation.py +35 -15
  38. ostruct/cli/types.py +15 -19
  39. ostruct/cli/unicode_compat.py +283 -0
  40. ostruct/cli/upload_manager.py +448 -0
  41. ostruct/cli/validators.py +255 -54
  42. {ostruct_cli-0.8.29.dist-info → ostruct_cli-1.0.1.dist-info}/METADATA +231 -128
  43. ostruct_cli-1.0.1.dist-info/RECORD +80 -0
  44. ostruct/cli/commands/quick_ref.py +0 -54
  45. ostruct/cli/template_optimizer.py +0 -478
  46. ostruct_cli-0.8.29.dist-info/RECORD +0 -71
  47. {ostruct_cli-0.8.29.dist-info → ostruct_cli-1.0.1.dist-info}/LICENSE +0 -0
  48. {ostruct_cli-0.8.29.dist-info → ostruct_cli-1.0.1.dist-info}/WHEEL +0 -0
  49. {ostruct_cli-0.8.29.dist-info → ostruct_cli-1.0.1.dist-info}/entry_points.txt +0 -0
@@ -7,10 +7,20 @@ and integrating code execution capabilities with the OpenAI Responses API.
7
7
  import logging
8
8
  import os
9
9
  from pathlib import Path
10
- from typing import Any, Dict, List, Optional
10
+ from typing import TYPE_CHECKING, Any, Dict, List, Optional
11
11
 
12
12
  from openai import AsyncOpenAI
13
13
 
14
+ from .errors import (
15
+ DownloadError,
16
+ DownloadFileNotFoundError,
17
+ DownloadNetworkError,
18
+ DownloadPermissionError,
19
+ )
20
+
21
+ if TYPE_CHECKING:
22
+ from .upload_manager import SharedUploadManager
23
+
14
24
  logger = logging.getLogger(__name__)
15
25
 
16
26
 
@@ -18,17 +28,22 @@ class CodeInterpreterManager:
18
28
  """Manager for Code Interpreter file uploads and tool integration."""
19
29
 
20
30
  def __init__(
21
- self, client: AsyncOpenAI, config: Optional[Dict[str, Any]] = None
22
- ):
31
+ self,
32
+ client: AsyncOpenAI,
33
+ config: Optional[Dict[str, Any]] = None,
34
+ upload_manager: Optional["SharedUploadManager"] = None,
35
+ ) -> None:
23
36
  """Initialize Code Interpreter manager.
24
37
 
25
38
  Args:
26
39
  client: AsyncOpenAI client instance
27
40
  config: Code interpreter configuration dict
41
+ upload_manager: Optional shared upload manager for deduplication
28
42
  """
29
43
  self.client = client
30
44
  self.uploaded_file_ids: List[str] = []
31
45
  self.config = config or {}
46
+ self.upload_manager = upload_manager
32
47
 
33
48
  async def upload_files_for_code_interpreter(
34
49
  self, files: List[str]
@@ -83,6 +98,32 @@ class CodeInterpreterManager:
83
98
  self.uploaded_file_ids.extend(file_ids)
84
99
  return file_ids
85
100
 
101
+ async def get_files_from_shared_manager(self) -> List[str]:
102
+ """Get file IDs from shared upload manager for Code Interpreter.
103
+
104
+ Returns:
105
+ List of OpenAI file IDs for Code Interpreter use
106
+ """
107
+ if not self.upload_manager:
108
+ logger.warning("No shared upload manager available")
109
+ return []
110
+
111
+ # Trigger upload processing for code-interpreter tool
112
+ await self.upload_manager.upload_for_tool("code-interpreter")
113
+
114
+ # Get the uploaded file IDs
115
+ file_ids = list(
116
+ self.upload_manager.get_files_for_tool("code-interpreter")
117
+ )
118
+
119
+ # Track for cleanup
120
+ self.uploaded_file_ids.extend(file_ids)
121
+
122
+ logger.debug(
123
+ f"Retrieved {len(file_ids)} file IDs from shared manager for CI"
124
+ )
125
+ return file_ids
126
+
86
127
  def build_tool_config(self, file_ids: List[str]) -> dict:
87
128
  """Build Code Interpreter tool configuration.
88
129
 
@@ -187,7 +228,14 @@ class CodeInterpreterManager:
187
228
 
188
229
  # Ensure output directory exists
189
230
  output_path = Path(output_dir)
190
- output_path.mkdir(exist_ok=True)
231
+ try:
232
+ output_path.mkdir(exist_ok=True)
233
+ except PermissionError as e:
234
+ raise DownloadPermissionError(str(output_path)) from e
235
+ except OSError as e:
236
+ raise DownloadError(
237
+ f"Failed to create download directory: {e}"
238
+ ) from e
191
239
 
192
240
  # Collect file annotations using new method
193
241
  annotations = self._collect_file_annotations(response)
@@ -292,21 +340,156 @@ class CodeInterpreterManager:
292
340
  )
293
341
  file_content = file_content_resp.read()
294
342
 
295
- # Save to local file
343
+ # Handle file naming conflicts
296
344
  local_path = output_path / filename
297
- with open(local_path, "wb") as f:
298
- f.write(file_content)
345
+ resolved_path = self._handle_file_conflict(local_path)
299
346
 
300
- downloaded_paths.append(str(local_path))
301
- logger.info(f"Downloaded generated file: {local_path}")
347
+ if resolved_path is None:
348
+ # Skip this file according to conflict resolution strategy
349
+ logger.info(f"Skipping existing file: {local_path}")
350
+ continue
302
351
 
352
+ # Save to resolved local file path
353
+ try:
354
+ with open(resolved_path, "wb") as f:
355
+ f.write(file_content)
356
+ except PermissionError as e:
357
+ raise DownloadPermissionError(
358
+ str(resolved_path.parent)
359
+ ) from e
360
+ except OSError as e:
361
+ raise DownloadError(
362
+ f"Failed to write file {resolved_path}: {e}"
363
+ ) from e
364
+
365
+ # Validate the downloaded file
366
+ self._validate_downloaded_file(resolved_path)
367
+
368
+ downloaded_paths.append(str(resolved_path))
369
+ logger.info(f"Downloaded generated file: {resolved_path}")
370
+
371
+ except DownloadError:
372
+ # Re-raise download-specific errors without modification
373
+ raise
374
+ except FileNotFoundError as e:
375
+ raise DownloadFileNotFoundError(file_id) from e
303
376
  except Exception as e:
304
- logger.error(f"Failed to download file {file_id}: {e}")
305
- # Continue with other files instead of raising
306
- continue
377
+ # Check if it's a network-related error
378
+ if any(
379
+ keyword in str(e).lower()
380
+ for keyword in ["network", "connection", "timeout", "http"]
381
+ ):
382
+ raise DownloadNetworkError(
383
+ file_id, original_error=e
384
+ ) from e
385
+ else:
386
+ logger.error(f"Failed to download file {file_id}: {e}")
387
+ # Continue with other files instead of raising
388
+ continue
307
389
 
308
390
  return downloaded_paths
309
391
 
392
+ def _handle_file_conflict(self, local_path: Path) -> Optional[Path]:
393
+ """Handle file naming conflicts based on configuration.
394
+
395
+ Args:
396
+ local_path: The original path where file would be saved
397
+
398
+ Returns:
399
+ Resolved path where file should be saved, or None to skip
400
+ """
401
+ if not local_path.exists():
402
+ return local_path
403
+
404
+ strategy = self.config.get("duplicate_outputs", "overwrite")
405
+
406
+ if strategy == "overwrite":
407
+ logger.info(f"Overwriting existing file: {local_path}")
408
+ return local_path
409
+
410
+ elif strategy == "rename":
411
+ # Generate unique name: file.txt -> file_1.txt, file_2.txt, etc.
412
+ counter = 1
413
+ stem = local_path.stem
414
+ suffix = local_path.suffix
415
+ parent = local_path.parent
416
+
417
+ while True:
418
+ new_path = parent / f"{stem}_{counter}{suffix}"
419
+ if not new_path.exists():
420
+ logger.info(f"File exists, using: {new_path}")
421
+ return new_path
422
+ counter += 1
423
+
424
+ elif strategy == "skip":
425
+ logger.info(f"File exists, skipping: {local_path}")
426
+ return None # Signal to skip this file
427
+
428
+ # Default to overwrite for unknown strategies
429
+ logger.warning(
430
+ f"Unknown duplicate_outputs strategy '{strategy}', defaulting to overwrite"
431
+ )
432
+ return local_path
433
+
434
+ def _validate_downloaded_file(self, file_path: Path) -> None:
435
+ """Perform basic validation of downloaded files.
436
+
437
+ Args:
438
+ file_path: Path to the downloaded file
439
+
440
+ Note:
441
+ This only logs warnings, it does not block downloads.
442
+ """
443
+ validation_level = self.config.get("output_validation", "basic")
444
+
445
+ if validation_level == "off":
446
+ return
447
+
448
+ try:
449
+ # Check file size (prevent huge files)
450
+ size = file_path.stat().st_size
451
+ size_mb = size / (1024 * 1024)
452
+
453
+ if size_mb > 100: # 100MB limit
454
+ logger.warning(
455
+ f"Large file downloaded: {file_path} ({size_mb:.1f}MB)"
456
+ )
457
+
458
+ # Check for potentially dangerous file types
459
+ dangerous_extensions = {
460
+ ".exe",
461
+ ".bat",
462
+ ".sh",
463
+ ".cmd",
464
+ ".com",
465
+ ".scr",
466
+ ".vbs",
467
+ ".js",
468
+ }
469
+ if file_path.suffix.lower() in dangerous_extensions:
470
+ logger.warning(
471
+ f"Potentially dangerous file type downloaded: {file_path} "
472
+ f"(extension: {file_path.suffix})"
473
+ )
474
+
475
+ # In strict mode, perform additional checks
476
+ if validation_level == "strict":
477
+ # Check for hidden files
478
+ if file_path.name.startswith("."):
479
+ logger.warning(f"Hidden file downloaded: {file_path}")
480
+
481
+ # Check for files with multiple extensions (potential masquerading)
482
+ parts = file_path.name.split(".")
483
+ if len(parts) > 2:
484
+ logger.warning(
485
+ f"File with multiple extensions downloaded: {file_path} "
486
+ f"(could be masquerading)"
487
+ )
488
+
489
+ except Exception as e:
490
+ logger.debug(f"Error during file validation: {e}")
491
+ # Don't fail downloads due to validation errors
492
+
310
493
  def _extract_filename_from_message(self, msg: Any) -> str:
311
494
  """Extract filename from message content if available.
312
495
 
@@ -3,7 +3,6 @@
3
3
  import click
4
4
 
5
5
  from .list_models import list_models
6
- from .quick_ref import quick_reference
7
6
  from .run import run
8
7
  from .update_registry import update_registry
9
8
 
@@ -15,7 +14,6 @@ def create_command_group() -> click.Group:
15
14
 
16
15
  # Add all commands to the group
17
16
  group.add_command(run)
18
- group.add_command(quick_reference)
19
17
  group.add_command(update_registry)
20
18
  group.add_command(list_models)
21
19
 
@@ -25,7 +23,6 @@ def create_command_group() -> click.Group:
25
23
  # Export commands for easy importing
26
24
  __all__ = [
27
25
  "run",
28
- "quick_reference",
29
26
  "update_registry",
30
27
  "list_models",
31
28
  "create_command_group",
@@ -4,9 +4,10 @@ import asyncio
4
4
  import json
5
5
  import logging
6
6
  import sys
7
- from typing import Any
7
+ from pathlib import Path
8
+ from typing import Any, Tuple
8
9
 
9
- import click
10
+ import rich_click as click
10
11
 
11
12
  from ..click_options import all_options
12
13
  from ..config import OstructConfig
@@ -24,29 +25,12 @@ from ..types import CLIParams
24
25
  logger = logging.getLogger(__name__)
25
26
 
26
27
 
27
- def _emit_deprecation_warnings(params: CLIParams) -> None:
28
- """Emit deprecation warnings for legacy tool-specific flags."""
29
- import warnings
30
-
31
- # Web Search flags
32
- if params.get("web_search"):
33
- warnings.warn(
34
- "The --web-search flag is deprecated and will be removed in v0.9.0. "
35
- "Use --enable-tool web-search instead.",
36
- DeprecationWarning,
37
- stacklevel=3,
38
- )
39
-
40
- if params.get("no_web_search"):
41
- warnings.warn(
42
- "The --no-web-search flag is deprecated and will be removed in v0.9.0. "
43
- "Use --disable-tool web-search instead.",
44
- DeprecationWarning,
45
- stacklevel=3,
46
- )
47
-
48
-
49
- @click.command()
28
+ @click.command(
29
+ cls=click.RichCommand,
30
+ context_settings={
31
+ "help_option_names": ["-h", "--help"],
32
+ },
33
+ )
50
34
  @click.argument("task_template", type=click.Path(exists=True))
51
35
  @click.argument("schema_file", type=click.Path(exists=True))
52
36
  @all_options
@@ -57,50 +41,46 @@ def run(
57
41
  schema_file: str,
58
42
  **kwargs: Any,
59
43
  ) -> None:
60
- """Run structured output generation with multi-tool integration.
44
+ """Transform unstructured inputs into structured JSON using OpenAI APIs, Jinja2 templates, and powerful tool integrations.
45
+
46
+ 🚀 QUICK START
61
47
 
62
- \b
63
- 📁 FILE ROUTING OPTIONS:
48
+ ostruct run template.j2 schema.json -V name=value
64
49
 
65
- Template Access Only:
66
- -ft, --file-for-template FILE Files available in template only
67
- -dt, --dir-for-template DIR Directories for template access
50
+ 📎 FILE ATTACHMENT
68
51
 
69
- Code Interpreter (execution & analysis):
70
- -fc, --file-for-code-interpreter FILE Upload files for code execution
71
- -dc, --dir-for-code-interpreter DIR Upload directories for analysis
52
+ --file data file.txt Template access (default)
72
53
 
73
- File Search (document retrieval):
74
- -fs, --file-for-file-search FILE Upload files for vector search
75
- -ds, --dir-for-search DIR Upload directories for search
54
+ --file ci:data data.csv Code Interpreter upload
76
55
 
77
- Advanced Routing:
78
- --file-for TOOL PATH Route files to specific tools
79
- Example: --file-for code-interpreter data.json
56
+ --file fs:docs manual.pdf File Search upload
80
57
 
81
- \b
82
- 🔧 TOOL INTEGRATION:
58
+ 🔧 TOOL INTEGRATION
83
59
 
84
- MCP Servers:
85
- --mcp-server [LABEL@]URL Connect to MCP server
86
- Example: --mcp-server deepwiki@https://mcp.deepwiki.com/sse
60
+ --enable-tool code-interpreter Code execution & analysis
87
61
 
88
- \b
89
- ⚡ EXAMPLES:
62
+ --enable-tool file-search Document search & retrieval
90
63
 
91
- Basic usage:
92
- ostruct run template.j2 schema.json -V name=value
64
+ --enable-tool web-search Real-time web information
93
65
 
94
- Multi-tool explicit routing:
95
- ostruct run analysis.j2 schema.json -fc data.csv -fs docs.pdf -ft config.yaml
66
+ 🔧 ENVIRONMENT VARIABLES
96
67
 
97
- Legacy compatibility (still works):
98
- ostruct run template.j2 schema.json -f config main.py -d src ./src
68
+ ```text
69
+ Core API Configuration:
70
+ OPENAI_API_KEY OpenAI API authentication key
71
+ OPENAI_API_BASE Custom OpenAI API base URL
99
72
 
100
- \b
101
- Arguments:
102
- TASK_TEMPLATE Path to Jinja2 template file
103
- SCHEMA_FILE Path to JSON schema file defining output structure
73
+ Template Processing Limits:
74
+ OSTRUCT_TEMPLATE_FILE_LIMIT Max individual file size (default: 64KB)
75
+ OSTRUCT_TEMPLATE_TOTAL_LIMIT Max total files size (default: 1MB)
76
+ OSTRUCT_TEMPLATE_PREVIEW_LIMIT Template preview size limit (default: 4096)
77
+
78
+ System Behavior:
79
+ OSTRUCT_DISABLE_REGISTRY_UPDATE_CHECKS Disable model registry updates
80
+ OSTRUCT_MCP_URL_<name> Custom MCP server URLs
81
+ ```
82
+
83
+ See organized option groups below for complete functionality.
104
84
  """
105
85
  try:
106
86
  # Convert Click parameters to typed dict
@@ -113,9 +93,19 @@ def run(
113
93
  for k, v in kwargs.items():
114
94
  params[k] = v # type: ignore[literal-required]
115
95
 
116
- # Process tool toggle flags (Step 2: Conflict guard & normalisation)
117
- from typing import Tuple
96
+ # UNIFIED GUIDELINES: Validate JSON flag combinations
97
+ if kwargs.get("dry_run_json") and not kwargs.get("dry_run"):
98
+ raise click.BadOptionUsage(
99
+ "--dry-run-json", "--dry-run-json requires --dry-run"
100
+ )
118
101
 
102
+ if kwargs.get("run_summary_json") and kwargs.get("dry_run"):
103
+ raise click.BadOptionUsage(
104
+ "--run-summary-json",
105
+ "--run-summary-json cannot be used with --dry-run",
106
+ )
107
+
108
+ # Process tool toggle flags (Step 2: Conflict guard & normalisation)
119
109
  enabled_tools_raw: Tuple[str, ...] = params.get("enabled_tools", ()) # type: ignore[assignment]
120
110
  disabled_tools_raw: Tuple[str, ...] = params.get("disabled_tools", ()) # type: ignore[assignment]
121
111
 
@@ -144,9 +134,6 @@ def run(
144
134
  params["_enabled_tools"] = enabled_tools # type: ignore[typeddict-unknown-key]
145
135
  params["_disabled_tools"] = disabled_tools # type: ignore[typeddict-unknown-key]
146
136
 
147
- # Emit deprecation warnings for legacy tool-specific flags
148
- _emit_deprecation_warnings(params)
149
-
150
137
  # Apply configuration defaults if values not explicitly provided
151
138
  # Check for command-level config option first, then group-level
152
139
  command_config = kwargs.get("config")
@@ -158,6 +145,246 @@ def run(
158
145
  if params.get("model") is None:
159
146
  params["model"] = config.get_model_default()
160
147
 
148
+ # UNIFIED GUIDELINES: Perform basic validation even in dry-run mode
149
+ if kwargs.get("dry_run"):
150
+ # Import validation functions
151
+ from ..attachment_processor import (
152
+ AttachmentSpec,
153
+ ProcessedAttachments,
154
+ )
155
+ from ..plan_assembly import PlanAssembler
156
+ from ..plan_printing import PlanPrinter
157
+ from ..validators import validate_inputs
158
+
159
+ # Variables to track validation state
160
+ validation_passed = True
161
+ template_warning = None
162
+ original_template_path = task_template
163
+
164
+ # Process attachments for the dry-run plan
165
+ processed_attachments = ProcessedAttachments()
166
+
167
+ # Process --file attachments
168
+ files = kwargs.get("attaches", [])
169
+ for file_spec in files:
170
+ spec = AttachmentSpec(
171
+ alias=file_spec["alias"],
172
+ path=file_spec["path"],
173
+ targets=file_spec["targets"],
174
+ recursive=file_spec.get("recursive", False),
175
+ pattern=file_spec.get("pattern"),
176
+ )
177
+ processed_attachments.alias_map[spec.alias] = spec
178
+
179
+ # Route to appropriate lists based on targets
180
+ if "prompt" in spec.targets:
181
+ processed_attachments.template_files.append(spec)
182
+ if "code-interpreter" in spec.targets or "ci" in spec.targets:
183
+ processed_attachments.ci_files.append(spec)
184
+ if "file-search" in spec.targets or "fs" in spec.targets:
185
+ processed_attachments.fs_files.append(spec)
186
+
187
+ # Process --dir attachments
188
+ dirs = kwargs.get("dirs", [])
189
+ for dir_spec in dirs:
190
+ spec = AttachmentSpec(
191
+ alias=dir_spec["alias"],
192
+ path=dir_spec["path"],
193
+ targets=dir_spec["targets"],
194
+ recursive=dir_spec.get("recursive", True),
195
+ pattern=dir_spec.get("pattern"),
196
+ )
197
+ processed_attachments.alias_map[spec.alias] = spec
198
+
199
+ # Route to appropriate lists based on targets
200
+ if "prompt" in spec.targets:
201
+ processed_attachments.template_dirs.append(spec)
202
+ if "code-interpreter" in spec.targets or "ci" in spec.targets:
203
+ processed_attachments.ci_dirs.append(spec)
204
+ if "file-search" in spec.targets or "fs" in spec.targets:
205
+ processed_attachments.fs_dirs.append(spec)
206
+
207
+ try:
208
+ # Perform the same input validation as live runs (async)
209
+ logger.debug("Performing dry-run validation")
210
+
211
+ # Run async validation
212
+ loop = asyncio.new_event_loop()
213
+ asyncio.set_event_loop(loop)
214
+ try:
215
+ # Use the same params structure as the live run
216
+ validation_result = loop.run_until_complete(
217
+ validate_inputs(params)
218
+ )
219
+ # Extract components from the tuple
220
+ (
221
+ security_manager,
222
+ validated_template,
223
+ schema,
224
+ template_context,
225
+ env,
226
+ template_path,
227
+ ) = validation_result
228
+
229
+ # Update task_template with validated content
230
+ task_template = validated_template
231
+
232
+ # Perform template rendering validation to catch binary file access errors
233
+ logger.debug("Performing template rendering validation")
234
+ from ..template_processor import process_templates
235
+
236
+ system_prompt, user_prompt = loop.run_until_complete(
237
+ process_templates(
238
+ params,
239
+ task_template,
240
+ template_context,
241
+ env,
242
+ template_path,
243
+ )
244
+ )
245
+ logger.debug("Template rendering validation passed")
246
+
247
+ # Check for template warnings by processing system prompt
248
+ from typing import cast
249
+
250
+ from ..template_processor import process_system_prompt
251
+
252
+ result = cast(
253
+ Tuple[str, bool],
254
+ process_system_prompt(
255
+ task_template,
256
+ params.get("system_prompt"),
257
+ params.get("system_prompt_file"),
258
+ template_context,
259
+ env,
260
+ params.get("ignore_task_sysprompt", False),
261
+ template_path,
262
+ ),
263
+ )
264
+ _system_prompt_check: str
265
+ template_has_conflict: bool
266
+ _system_prompt_check, template_has_conflict = result
267
+
268
+ if template_has_conflict:
269
+ template_warning = (
270
+ "Template has YAML frontmatter with 'system_prompt' field, but --sys-file was also provided. "
271
+ "Using --sys-file and ignoring YAML frontmatter system_prompt."
272
+ )
273
+
274
+ finally:
275
+ loop.close()
276
+
277
+ except Exception as e:
278
+ validation_passed = False
279
+ template_warning = str(e)
280
+ logger.error(f"Dry-run validation failed: {e}")
281
+
282
+ # For critical errors, exit immediately with proper error handling
283
+ if not isinstance(e, (ValueError, FileNotFoundError)):
284
+ handle_error(e)
285
+ if hasattr(e, "exit_code"):
286
+ ctx.exit(int(e.exit_code))
287
+ else:
288
+ ctx.exit(1)
289
+
290
+ # Build plan with warning information
291
+ plan_kwargs = {
292
+ "allowed_paths": params.get("allowed_paths", []),
293
+ "cost_estimate": None, # We don't have cost estimate in dry run
294
+ "template_warning": template_warning,
295
+ "original_template_path": original_template_path,
296
+ "validation_passed": validation_passed,
297
+ }
298
+
299
+ # Add enabled tools from routing result and explicit tool toggles
300
+ plan_enabled_tools: set[str] = set()
301
+
302
+ # Get tools from routing result (auto-enabled by file attachments)
303
+ routing_result = params.get("_routing_result")
304
+ if routing_result and hasattr(routing_result, "enabled_tools"):
305
+ plan_enabled_tools.update(routing_result.enabled_tools)
306
+
307
+ # Add explicitly enabled tools
308
+ explicit_enabled = params.get("_enabled_tools", set())
309
+ if isinstance(explicit_enabled, set):
310
+ plan_enabled_tools.update(explicit_enabled)
311
+
312
+ # Remove explicitly disabled tools
313
+ explicit_disabled = params.get("_disabled_tools", set())
314
+ if isinstance(explicit_disabled, set):
315
+ plan_enabled_tools -= explicit_disabled
316
+
317
+ if plan_enabled_tools:
318
+ plan_kwargs["enabled_tools"] = plan_enabled_tools
319
+
320
+ # Add CI configuration for download validation
321
+ if "code-interpreter" in plan_enabled_tools:
322
+ config_path = params.get("config")
323
+ config = OstructConfig.load(
324
+ config_path
325
+ if isinstance(config_path, (str, Path))
326
+ else None
327
+ )
328
+ ci_config = config.get_code_interpreter_config()
329
+ plan_kwargs["ci_config"] = ci_config
330
+
331
+ plan = PlanAssembler.build_execution_plan(
332
+ processed_attachments=processed_attachments,
333
+ template_path=original_template_path, # Use original path, not template content
334
+ schema_path=schema_file,
335
+ variables=ctx.obj.get("vars", {}) if ctx.obj else {},
336
+ security_mode=kwargs.get("path_security", "permissive"),
337
+ model=params.get("model", "gpt-4o"),
338
+ **plan_kwargs,
339
+ )
340
+
341
+ if kwargs.get("dry_run_json"):
342
+ # Output JSON to stdout
343
+ click.echo(json.dumps(plan, indent=2))
344
+ else:
345
+ # Output human-readable to stdout
346
+ PlanPrinter.human(plan)
347
+
348
+ # Add debug output for tool states if debug mode is enabled
349
+ if kwargs.get("debug"):
350
+ click.echo("\n--- DEBUG TOOL STATES ---")
351
+ debug_enabled_tools = cast(
352
+ set[str], params.get("_enabled_tools", set())
353
+ )
354
+ debug_disabled_tools = cast(
355
+ set[str], params.get("_disabled_tools", set())
356
+ )
357
+
358
+ # Check web search state
359
+ web_search_enabled = "web-search" in debug_enabled_tools
360
+ if "web-search" in debug_disabled_tools:
361
+ web_search_enabled = False
362
+ click.echo(
363
+ f"web_search_enabled: bool ({web_search_enabled})"
364
+ )
365
+
366
+ # Check code interpreter state
367
+ ci_enabled = "code-interpreter" in debug_enabled_tools
368
+ if "code-interpreter" in debug_disabled_tools:
369
+ ci_enabled = False
370
+ # Also enable if CI attachments present
371
+ if plan.get("tools", {}).get("code_interpreter", False):
372
+ ci_enabled = True
373
+ click.echo(
374
+ f"code_interpreter_enabled: bool ({ci_enabled})"
375
+ )
376
+
377
+ # Add completion message based on validation result
378
+ if validation_passed:
379
+ click.echo("\nDry run completed successfully")
380
+ else:
381
+ click.echo(
382
+ "\nDry run completed with warnings - see template status above"
383
+ )
384
+
385
+ # Exit with appropriate code
386
+ ctx.exit(0 if validation_passed else 1)
387
+
161
388
  # Run the async function synchronously
162
389
  loop = asyncio.new_event_loop()
163
390
  asyncio.set_event_loop(loop)