ostruct-cli 0.7.1__py3-none-any.whl → 0.8.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (46) hide show
  1. ostruct/cli/__init__.py +21 -3
  2. ostruct/cli/base_errors.py +1 -1
  3. ostruct/cli/cli.py +66 -1983
  4. ostruct/cli/click_options.py +460 -28
  5. ostruct/cli/code_interpreter.py +238 -0
  6. ostruct/cli/commands/__init__.py +32 -0
  7. ostruct/cli/commands/list_models.py +128 -0
  8. ostruct/cli/commands/quick_ref.py +50 -0
  9. ostruct/cli/commands/run.py +137 -0
  10. ostruct/cli/commands/update_registry.py +71 -0
  11. ostruct/cli/config.py +277 -0
  12. ostruct/cli/cost_estimation.py +134 -0
  13. ostruct/cli/errors.py +310 -6
  14. ostruct/cli/exit_codes.py +1 -0
  15. ostruct/cli/explicit_file_processor.py +548 -0
  16. ostruct/cli/field_utils.py +69 -0
  17. ostruct/cli/file_info.py +42 -9
  18. ostruct/cli/file_list.py +301 -102
  19. ostruct/cli/file_search.py +455 -0
  20. ostruct/cli/file_utils.py +47 -13
  21. ostruct/cli/mcp_integration.py +541 -0
  22. ostruct/cli/model_creation.py +150 -1
  23. ostruct/cli/model_validation.py +204 -0
  24. ostruct/cli/progress_reporting.py +398 -0
  25. ostruct/cli/registry_updates.py +14 -9
  26. ostruct/cli/runner.py +1418 -0
  27. ostruct/cli/schema_utils.py +113 -0
  28. ostruct/cli/services.py +626 -0
  29. ostruct/cli/template_debug.py +748 -0
  30. ostruct/cli/template_debug_help.py +162 -0
  31. ostruct/cli/template_env.py +15 -6
  32. ostruct/cli/template_filters.py +55 -3
  33. ostruct/cli/template_optimizer.py +474 -0
  34. ostruct/cli/template_processor.py +1080 -0
  35. ostruct/cli/template_rendering.py +69 -34
  36. ostruct/cli/token_validation.py +286 -0
  37. ostruct/cli/types.py +78 -0
  38. ostruct/cli/unattended_operation.py +269 -0
  39. ostruct/cli/validators.py +386 -3
  40. {ostruct_cli-0.7.1.dist-info → ostruct_cli-0.8.0.dist-info}/LICENSE +2 -0
  41. ostruct_cli-0.8.0.dist-info/METADATA +633 -0
  42. ostruct_cli-0.8.0.dist-info/RECORD +69 -0
  43. {ostruct_cli-0.7.1.dist-info → ostruct_cli-0.8.0.dist-info}/WHEEL +1 -1
  44. ostruct_cli-0.7.1.dist-info/METADATA +0 -369
  45. ostruct_cli-0.7.1.dist-info/RECORD +0 -45
  46. {ostruct_cli-0.7.1.dist-info → ostruct_cli-0.8.0.dist-info}/entry_points.txt +0 -0
ostruct/cli/config.py ADDED
@@ -0,0 +1,277 @@
1
+ """Configuration management for ostruct CLI."""
2
+
3
+ import logging
4
+ import os
5
+ from pathlib import Path
6
+ from typing import Any, Dict, Optional, Union
7
+
8
+ import yaml
9
+ from pydantic import BaseModel, Field, field_validator
10
+
11
+ logger = logging.getLogger(__name__)
12
+
13
+
14
+ class WebSearchUserLocationConfig(BaseModel):
15
+ """Configuration for web search user location."""
16
+
17
+ country: Optional[str] = None
18
+ city: Optional[str] = None
19
+ region: Optional[str] = None
20
+
21
+
22
+ class WebSearchToolConfig(BaseModel):
23
+ """Configuration for web search tool settings."""
24
+
25
+ enable_by_default: bool = False
26
+ user_location: Optional[WebSearchUserLocationConfig] = None
27
+ search_context_size: Optional[str] = Field(default=None)
28
+
29
+ @field_validator("search_context_size")
30
+ @classmethod
31
+ def validate_search_context_size(cls, v: Optional[str]) -> Optional[str]:
32
+ if v is not None and v not in ["low", "medium", "high"]:
33
+ raise ValueError(
34
+ "search_context_size must be one of: low, medium, high"
35
+ )
36
+ return v
37
+
38
+
39
+ class ToolsConfig(BaseModel):
40
+ """Configuration for tool-specific settings."""
41
+
42
+ code_interpreter: Dict[str, Any] = Field(
43
+ default_factory=lambda: {
44
+ "auto_download": True,
45
+ "output_directory": "./output",
46
+ }
47
+ )
48
+ file_search: Dict[str, Any] = Field(
49
+ default_factory=lambda: {"max_results": 10}
50
+ )
51
+ web_search: WebSearchToolConfig = Field(
52
+ default_factory=WebSearchToolConfig
53
+ )
54
+
55
+
56
+ class ModelsConfig(BaseModel):
57
+ """Configuration for model settings."""
58
+
59
+ default: str = "gpt-4o"
60
+
61
+
62
+ class OperationConfig(BaseModel):
63
+ """Configuration for operation settings."""
64
+
65
+ timeout_minutes: int = 60
66
+ retry_attempts: int = 3
67
+ require_approval: str = "never"
68
+
69
+ @field_validator("require_approval")
70
+ @classmethod
71
+ def validate_approval_setting(cls, v: str) -> str:
72
+ valid_values = ["never", "always", "expensive"]
73
+ if v not in valid_values:
74
+ raise ValueError(f"require_approval must be one of {valid_values}")
75
+ return v
76
+
77
+
78
+ class LimitsConfig(BaseModel):
79
+ """Configuration for cost and operation limits."""
80
+
81
+ max_cost_per_run: float = 10.00
82
+ warn_expensive_operations: bool = True
83
+
84
+
85
+ class OstructConfig(BaseModel):
86
+ """Main configuration class for ostruct."""
87
+
88
+ models: ModelsConfig = Field(default_factory=ModelsConfig)
89
+ tools: ToolsConfig = Field(default_factory=ToolsConfig)
90
+ mcp: Dict[str, str] = Field(default_factory=dict)
91
+ operation: OperationConfig = Field(default_factory=OperationConfig)
92
+ limits: LimitsConfig = Field(default_factory=LimitsConfig)
93
+
94
+ @classmethod
95
+ def load(
96
+ cls, config_path: Optional[Union[str, Path]] = None
97
+ ) -> "OstructConfig":
98
+ """Load configuration from YAML file with smart defaults.
99
+
100
+ Args:
101
+ config_path: Path to configuration file. If None, looks for ostruct.yaml
102
+ in current directory, then user's home directory.
103
+
104
+ Returns:
105
+ OstructConfig instance with loaded settings and defaults.
106
+ """
107
+ config_data: Dict[str, Any] = {}
108
+
109
+ # Determine config file path
110
+ if config_path is None:
111
+ # Look for ostruct.yaml in current directory first
112
+ current_config = Path("ostruct.yaml")
113
+ home_config = Path.home() / ".ostruct" / "config.yaml"
114
+
115
+ if current_config.exists():
116
+ config_path = current_config
117
+ elif home_config.exists():
118
+ config_path = home_config
119
+ else:
120
+ # No config file found, use defaults
121
+ logger.info("No configuration file found, using defaults")
122
+ return cls()
123
+ else:
124
+ config_path = Path(config_path)
125
+
126
+ # Load configuration file if it exists
127
+ if config_path and config_path.exists():
128
+ try:
129
+ with open(config_path, "r") as f:
130
+ config_data = yaml.safe_load(f) or {}
131
+ logger.info(f"Loaded configuration from {config_path}")
132
+ except Exception as e:
133
+ logger.warning(
134
+ f"Failed to load configuration from {config_path}: {e}"
135
+ )
136
+ logger.info("Using default configuration")
137
+ config_data = {}
138
+
139
+ # Apply environment variable overrides for secrets
140
+ config_data = cls._apply_env_overrides(config_data)
141
+
142
+ return cls(**config_data)
143
+
144
+ @staticmethod
145
+ def _apply_env_overrides(config_data: Dict[str, Any]) -> Dict[str, Any]:
146
+ """Apply environment variable overrides for sensitive settings."""
147
+
148
+ # Model configuration from environment
149
+ openai_api_key = os.getenv("OPENAI_API_KEY")
150
+ if openai_api_key:
151
+ # Environment variable exists, configuration valid
152
+ pass
153
+
154
+ # MCP server URLs from environment
155
+ mcp_config = config_data.setdefault("mcp", {})
156
+
157
+ # Look for MCP_* environment variables
158
+ for key, value in os.environ.items():
159
+ if key.startswith("MCP_") and key.endswith("_URL"):
160
+ server_name = key[
161
+ 4:-4
162
+ ].lower() # Remove MCP_ prefix and _URL suffix
163
+ mcp_config[server_name] = value
164
+
165
+ # Built-in MCP server shortcuts
166
+ builtin_servers = {
167
+ "stripe": "https://mcp.stripe.com",
168
+ "shopify": "https://mcp.shopify.com",
169
+ }
170
+
171
+ for name, url in builtin_servers.items():
172
+ if name not in mcp_config:
173
+ env_key = f"MCP_{name.upper()}_URL"
174
+ if os.getenv(env_key):
175
+ mcp_config[name] = os.getenv(env_key)
176
+
177
+ return config_data
178
+
179
+ def get_model_default(self) -> str:
180
+ """Get the default model to use."""
181
+ return self.models.default
182
+
183
+ def get_mcp_servers(self) -> Dict[str, str]:
184
+ """Get configured MCP servers."""
185
+ return self.mcp
186
+
187
+ def get_code_interpreter_config(self) -> Dict[str, Any]:
188
+ """Get code interpreter configuration."""
189
+ return self.tools.code_interpreter
190
+
191
+ def get_file_search_config(self) -> Dict[str, Any]:
192
+ """Get file search configuration."""
193
+ return self.tools.file_search
194
+
195
+ def get_web_search_config(self) -> WebSearchToolConfig:
196
+ """Get web search configuration."""
197
+ return self.tools.web_search
198
+
199
+ def should_require_approval(self, cost_estimate: float = 0.0) -> bool:
200
+ """Determine if approval should be required for an operation."""
201
+ if self.operation.require_approval == "always":
202
+ return True
203
+ elif self.operation.require_approval == "never":
204
+ return False
205
+ elif self.operation.require_approval == "expensive":
206
+ return cost_estimate > self.limits.max_cost_per_run * 0.5
207
+ return False
208
+
209
+ def is_within_cost_limits(self, cost_estimate: float) -> bool:
210
+ """Check if operation is within configured cost limits."""
211
+ return cost_estimate <= self.limits.max_cost_per_run
212
+
213
+ def should_warn_expensive(self, cost_estimate: float) -> bool:
214
+ """Check if expensive operation warning should be shown."""
215
+ return (
216
+ self.limits.warn_expensive_operations
217
+ and cost_estimate > self.limits.max_cost_per_run * 0.3
218
+ )
219
+
220
+
221
+ def create_example_config() -> str:
222
+ """Create example configuration YAML content."""
223
+ return """# ostruct Configuration File
224
+ # This file configures default behavior for the ostruct CLI tool.
225
+ # All settings are optional - ostruct works with smart defaults.
226
+
227
+ # Model configuration
228
+ models:
229
+ default: gpt-4o # Default model to use
230
+
231
+ # Tool-specific settings
232
+ tools:
233
+ code_interpreter:
234
+ auto_download: true
235
+ output_directory: "./output"
236
+
237
+ file_search:
238
+ max_results: 10
239
+
240
+ web_search:
241
+ enable_by_default: false # Whether to enable web search by default
242
+ search_context_size: medium # Options: low, medium, high
243
+ user_location:
244
+ country: US # Optional: country for geographically relevant results
245
+ city: San Francisco # Optional: city for local context
246
+ region: California # Optional: region/state for regional relevance
247
+
248
+ # MCP (Model Context Protocol) server configurations
249
+ # You can define shortcuts to commonly used MCP servers
250
+ mcp:
251
+ # Built-in server shortcuts (uncomment to use)
252
+ # stripe: "https://mcp.stripe.com"
253
+ # shopify: "https://mcp.shopify.com"
254
+
255
+ # Custom servers
256
+ # my_server: "https://my-mcp-server.com"
257
+
258
+ # Operation settings
259
+ operation:
260
+ timeout_minutes: 60
261
+ retry_attempts: 3
262
+ require_approval: never # Options: never, always, expensive
263
+
264
+ # Cost and safety limits
265
+ limits:
266
+ max_cost_per_run: 10.00
267
+ warn_expensive_operations: true
268
+
269
+ # Environment Variables for Secrets:
270
+ # OPENAI_API_KEY - Your OpenAI API key
271
+ # MCP_<NAME>_URL - URL for custom MCP servers (e.g., MCP_STRIPE_URL)
272
+ """
273
+
274
+
275
+ def get_config() -> OstructConfig:
276
+ """Get the global configuration instance."""
277
+ return OstructConfig.load()
@@ -0,0 +1,134 @@
1
+ """Cost estimation functionality for ostruct CLI.
2
+
3
+ This module provides functionality to estimate the cost of API calls
4
+ before making them, helping users plan their usage and budget.
5
+ """
6
+
7
+ from typing import Optional
8
+
9
+ from openai_model_registry import ModelRegistry
10
+
11
+ # Static pricing mapping for major models (per 1K tokens)
12
+ # These should be updated periodically or fetched from an external source
13
+ MODEL_PRICING = {
14
+ "gpt-4o": {"input": 0.0025, "output": 0.01},
15
+ "gpt-4o-mini": {"input": 0.00015, "output": 0.0006},
16
+ "gpt-4o-2024-05-13": {"input": 0.0025, "output": 0.01},
17
+ "gpt-4o-mini-2024-07-18": {"input": 0.00015, "output": 0.0006},
18
+ "o1": {"input": 0.015, "output": 0.06},
19
+ "o1-mini": {"input": 0.003, "output": 0.012},
20
+ "o1-2024-12-17": {"input": 0.015, "output": 0.06},
21
+ "o1-mini-2024-09-12": {"input": 0.003, "output": 0.012},
22
+ "o3": {"input": 0.015, "output": 0.06}, # Estimated
23
+ "o3-mini": {"input": 0.003, "output": 0.012}, # Estimated
24
+ "o4-mini": {"input": 0.003, "output": 0.012}, # Estimated
25
+ "gpt-4.1": {"input": 0.0025, "output": 0.01}, # Estimated
26
+ "gpt-4.1-mini": {"input": 0.00015, "output": 0.0006}, # Estimated
27
+ "gpt-4.1-nano": {"input": 0.00015, "output": 0.0006}, # Estimated
28
+ "gpt-4.5-preview": {"input": 0.0025, "output": 0.01}, # Estimated
29
+ }
30
+
31
+
32
+ def calculate_cost_estimate(
33
+ model: str,
34
+ input_tokens: int,
35
+ output_tokens: Optional[int] = None,
36
+ registry: Optional[ModelRegistry] = None,
37
+ ) -> float:
38
+ """Calculate estimated cost for API call.
39
+
40
+ Args:
41
+ model: Model name
42
+ input_tokens: Number of input tokens
43
+ output_tokens: Number of output tokens (if None, uses max for model)
44
+ registry: ModelRegistry instance (if None, creates new one)
45
+
46
+ Returns:
47
+ Estimated cost in USD
48
+ """
49
+ if registry is None:
50
+ registry = ModelRegistry.get_instance()
51
+
52
+ # Get output tokens if not specified
53
+ if output_tokens is None:
54
+ try:
55
+ capabilities = registry.get_capabilities(model)
56
+ output_tokens = capabilities.max_output_tokens
57
+ except Exception:
58
+ # Fallback if model capabilities not available
59
+ output_tokens = 4096
60
+
61
+ # Get pricing for model
62
+ pricing = MODEL_PRICING.get(model)
63
+ if pricing is None:
64
+ # Try to find pricing for base model name
65
+ base_model = model.split("-")[
66
+ 0
67
+ ] # e.g., "gpt-4o" from "gpt-4o-2024-05-13"
68
+ pricing = MODEL_PRICING.get(base_model)
69
+
70
+ if pricing is None:
71
+ # Use default pricing as fallback
72
+ pricing = {"input": 0.0025, "output": 0.01}
73
+
74
+ # Calculate cost (pricing is per 1K tokens)
75
+ input_cost = (input_tokens / 1000) * pricing["input"]
76
+ output_cost = (output_tokens / 1000) * pricing["output"]
77
+
78
+ return input_cost + output_cost
79
+
80
+
81
+ def format_cost_breakdown(
82
+ model: str,
83
+ input_tokens: int,
84
+ output_tokens: int,
85
+ total_cost: float,
86
+ context_window: int,
87
+ ) -> str:
88
+ """Format cost breakdown for display.
89
+
90
+ Args:
91
+ model: Model name
92
+ input_tokens: Number of input tokens
93
+ output_tokens: Number of output tokens
94
+ total_cost: Total estimated cost
95
+ context_window: Model's context window
96
+
97
+ Returns:
98
+ Formatted cost breakdown string
99
+ """
100
+ lines = [
101
+ "📊 Token Analysis:",
102
+ f" • Input tokens: {input_tokens:,}",
103
+ f" • Max output tokens: {output_tokens:,}",
104
+ f" • Context window: {context_window:,}",
105
+ f" • Estimated cost: ${total_cost:.4f} (using {model} rates)",
106
+ ]
107
+
108
+ # Add utilization percentage
109
+ total_tokens = input_tokens + output_tokens
110
+ utilization = (total_tokens / context_window) * 100
111
+ lines.append(f" • Context utilization: {utilization:.1f}%")
112
+
113
+ return "\n".join(lines)
114
+
115
+
116
+ def check_cost_limits(
117
+ estimated_cost: float, max_cost_per_run: Optional[float] = None
118
+ ) -> Optional[str]:
119
+ """Check if estimated cost exceeds configured limits.
120
+
121
+ Args:
122
+ estimated_cost: Estimated cost in USD
123
+ max_cost_per_run: Maximum allowed cost per run
124
+
125
+ Returns:
126
+ Warning message if cost exceeds limits, None otherwise
127
+ """
128
+ if max_cost_per_run is not None and estimated_cost > max_cost_per_run:
129
+ return (
130
+ f"⚠️ Estimated cost (${estimated_cost:.4f}) exceeds configured "
131
+ f"limit of ${max_cost_per_run:.4f}. Use --force to proceed anyway."
132
+ )
133
+
134
+ return None