ostruct-cli 0.3.0__py3-none-any.whl → 0.4.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
ostruct/cli/cli.py CHANGED
@@ -43,6 +43,8 @@ from openai import (
43
43
  )
44
44
  from openai_structured.client import (
45
45
  async_openai_structured_stream,
46
+ get_context_window_limit,
47
+ get_default_token_limit,
46
48
  supports_structured_output,
47
49
  )
48
50
  from openai_structured.errors import (
@@ -441,48 +443,6 @@ def estimate_tokens_for_chat(
441
443
  return num_tokens
442
444
 
443
445
 
444
- def get_default_token_limit(model: str) -> int:
445
- """Get the default token limit for a given model.
446
-
447
- Note: These limits are based on current OpenAI model specifications as of 2024 and may
448
- need to be updated if OpenAI changes the models' capabilities.
449
-
450
- Args:
451
- model: The model name (e.g., 'gpt-4o', 'o1-mini', 'o3-mini')
452
-
453
- Returns:
454
- The default token limit for the model
455
- """
456
- if "o1-" in model:
457
- return 100_000 # o1-mini supports up to 100K output tokens
458
- elif "gpt-4o" in model:
459
- return 16_384 # gpt-4o supports up to 16K output tokens
460
- elif "o3-" in model:
461
- return 16_384 # o3-mini supports up to 16K output tokens
462
- else:
463
- return 4_096 # default fallback
464
-
465
-
466
- def get_context_window_limit(model: str) -> int:
467
- """Get the total context window limit for a given model.
468
-
469
- Note: These limits are based on current OpenAI model specifications as of 2024 and may
470
- need to be updated if OpenAI changes the models' capabilities.
471
-
472
- Args:
473
- model: The model name (e.g., 'gpt-4o', 'o1-mini', 'o3-mini')
474
-
475
- Returns:
476
- The context window limit for the model
477
- """
478
- if "o1-" in model:
479
- return 200_000 # o1-mini supports 200K total context window
480
- elif "gpt-4o" in model or "o3-" in model:
481
- return 128_000 # gpt-4o and o3-mini support 128K context window
482
- else:
483
- return 8_192 # default fallback
484
-
485
-
486
446
  def validate_token_limits(
487
447
  model: str, total_tokens: int, max_token_limit: Optional[int] = None
488
448
  ) -> None:
@@ -771,7 +731,9 @@ def _validate_path_mapping_internal(
771
731
  raise
772
732
 
773
733
  if security_manager:
774
- if not security_manager.is_allowed_file(str(resolved_path)):
734
+ try:
735
+ security_manager.validate_path(str(resolved_path))
736
+ except PathSecurityError:
775
737
  raise PathSecurityError.from_expanded_paths(
776
738
  original_path=str(path),
777
739
  expanded_path=str(resolved_path),
@@ -1192,40 +1154,13 @@ def validate_security_manager(
1192
1154
  if base_dir is None:
1193
1155
  base_dir = os.getcwd()
1194
1156
 
1195
- # Default to empty list if allowed_dirs is None
1196
- if allowed_dirs is None:
1197
- allowed_dirs = []
1198
-
1199
- # Add base directory if it exists
1200
- try:
1201
- base_dir_path = Path(base_dir).resolve()
1202
- if not base_dir_path.exists():
1203
- raise DirectoryNotFoundError(
1204
- f"Base directory not found: {base_dir}"
1205
- )
1206
- if not base_dir_path.is_dir():
1207
- raise DirectoryNotFoundError(
1208
- f"Base directory is not a directory: {base_dir}"
1209
- )
1210
- all_allowed_dirs = [str(base_dir_path)]
1211
- except OSError as e:
1212
- raise DirectoryNotFoundError(f"Invalid base directory: {e}")
1157
+ # Create security manager with base directory
1158
+ security_manager = SecurityManager(base_dir)
1213
1159
 
1214
1160
  # Add explicitly allowed directories
1215
- for dir_path in allowed_dirs:
1216
- try:
1217
- resolved_path = Path(dir_path).resolve()
1218
- if not resolved_path.exists():
1219
- raise DirectoryNotFoundError(
1220
- f"Directory not found: {dir_path}"
1221
- )
1222
- if not resolved_path.is_dir():
1223
- raise DirectoryNotFoundError(
1224
- f"Path is not a directory: {dir_path}"
1225
- )
1226
- all_allowed_dirs.append(str(resolved_path))
1227
- except OSError as e:
1228
- raise DirectoryNotFoundError(f"Invalid directory path: {e}")
1161
+ if allowed_dirs:
1162
+ for dir_path in allowed_dirs:
1163
+ security_manager.add_allowed_directory(dir_path)
1229
1164
 
1230
1165
  # Add directories from file if specified
1231
1166
  if allowed_dir_file:
@@ -1234,28 +1169,13 @@ def validate_security_manager(
1234
1169
  for line in f:
1235
1170
  line = line.strip()
1236
1171
  if line and not line.startswith("#"):
1237
- try:
1238
- resolved_path = Path(line).resolve()
1239
- if not resolved_path.exists():
1240
- raise DirectoryNotFoundError(
1241
- f"Directory not found: {line}"
1242
- )
1243
- if not resolved_path.is_dir():
1244
- raise DirectoryNotFoundError(
1245
- f"Path is not a directory: {line}"
1246
- )
1247
- all_allowed_dirs.append(str(resolved_path))
1248
- except OSError as e:
1249
- raise DirectoryNotFoundError(
1250
- f"Invalid directory path in {allowed_dir_file}: {e}"
1251
- )
1172
+ security_manager.add_allowed_directory(line)
1252
1173
  except OSError as e:
1253
1174
  raise DirectoryNotFoundError(
1254
1175
  f"Failed to read allowed directories file: {e}"
1255
1176
  )
1256
1177
 
1257
- # Create security manager with all allowed directories
1258
- return SecurityManager(base_dir=base_dir, allowed_dirs=all_allowed_dirs)
1178
+ return security_manager
1259
1179
 
1260
1180
 
1261
1181
  def parse_var(var_str: str) -> Tuple[str, str]:
@@ -1416,29 +1336,78 @@ async def stream_structured_output(
1416
1336
  It handles the core streaming logic and resource cleanup.
1417
1337
  """
1418
1338
  try:
1419
- async for chunk in async_openai_structured_stream(
1420
- client=client,
1421
- model=model,
1422
- output_schema=output_schema,
1423
- system_prompt=system_prompt,
1424
- user_prompt=user_prompt,
1425
- **kwargs,
1426
- ):
1427
- if not chunk:
1428
- continue
1429
-
1430
- # Process and output the chunk
1431
- dumped = chunk.model_dump(mode="json")
1432
- json_str = json.dumps(dumped, indent=2)
1433
-
1434
- if output_file:
1435
- with open(output_file, "a", encoding="utf-8") as f:
1436
- f.write(json_str)
1437
- f.write("\n")
1438
- f.flush() # Ensure immediate flush to file
1339
+ # Base models that don't support streaming
1340
+ non_streaming_models = {"o1", "o3"}
1341
+
1342
+ # Check if model supports streaming
1343
+ # o3-mini and o3-mini-high support streaming, base o3 does not
1344
+ use_streaming = model not in non_streaming_models and (
1345
+ not model.startswith("o3") or model.startswith("o3-mini")
1346
+ )
1347
+
1348
+ # All o1 and o3 models (base and variants) have fixed settings
1349
+ stream_kwargs = {}
1350
+ if not (model.startswith("o1") or model.startswith("o3")):
1351
+ stream_kwargs = kwargs
1352
+
1353
+ if use_streaming:
1354
+ async for chunk in async_openai_structured_stream(
1355
+ client=client,
1356
+ model=model,
1357
+ output_schema=output_schema,
1358
+ system_prompt=system_prompt,
1359
+ user_prompt=user_prompt,
1360
+ **stream_kwargs,
1361
+ ):
1362
+ if not chunk:
1363
+ continue
1364
+
1365
+ # Process and output the chunk
1366
+ dumped = chunk.model_dump(mode="json")
1367
+ json_str = json.dumps(dumped, indent=2)
1368
+
1369
+ if output_file:
1370
+ with open(output_file, "a", encoding="utf-8") as f:
1371
+ f.write(json_str)
1372
+ f.write("\n")
1373
+ f.flush() # Ensure immediate flush to file
1374
+ else:
1375
+ # Print directly to stdout with immediate flush
1376
+ print(json_str, flush=True)
1377
+ else:
1378
+ # For non-streaming models, use regular completion
1379
+ response = await client.chat.completions.create(
1380
+ model=model,
1381
+ messages=[
1382
+ {"role": "system", "content": system_prompt},
1383
+ {"role": "user", "content": user_prompt},
1384
+ ],
1385
+ stream=False,
1386
+ **stream_kwargs,
1387
+ )
1388
+
1389
+ # Process the single response
1390
+ content = response.choices[0].message.content
1391
+ if content:
1392
+ try:
1393
+ # Parse and validate against schema
1394
+ result = output_schema.model_validate_json(content)
1395
+ json_str = json.dumps(
1396
+ result.model_dump(mode="json"), indent=2
1397
+ )
1398
+
1399
+ if output_file:
1400
+ with open(output_file, "w", encoding="utf-8") as f:
1401
+ f.write(json_str)
1402
+ f.write("\n")
1403
+ else:
1404
+ print(json_str, flush=True)
1405
+ except ValidationError as e:
1406
+ raise InvalidResponseFormatError(
1407
+ f"Response validation failed: {e}"
1408
+ )
1439
1409
  else:
1440
- # Print directly to stdout with immediate flush
1441
- print(json_str, flush=True)
1410
+ raise EmptyResponseError("Model returned empty response")
1442
1411
 
1443
1412
  except (
1444
1413
  StreamInterruptedError,
@@ -1657,8 +1626,7 @@ def create_cli() -> click.Command:
1657
1626
  logger.exception("Unexpected error")
1658
1627
  raise CLIError(str(e), context={"error_type": type(e).__name__})
1659
1628
 
1660
- # The decorated function is a Command, but mypy can't detect this
1661
- return cast(click.Command, cli)
1629
+ return cli
1662
1630
 
1663
1631
 
1664
1632
  def main() -> None:
@@ -1671,8 +1639,6 @@ def main() -> None:
1671
1639
  __all__ = [
1672
1640
  "ExitCode",
1673
1641
  "estimate_tokens_for_chat",
1674
- "get_context_window_limit",
1675
- "get_default_token_limit",
1676
1642
  "parse_json_var",
1677
1643
  "create_dynamic_model",
1678
1644
  "validate_path_mapping",
@@ -1,17 +1,14 @@
1
1
  """Click command and options for the CLI.
2
2
 
3
3
  This module contains all Click-related code separated from the main CLI logic.
4
- We isolate this code here and disable mypy type checking for the entire module
5
- because Click's decorator-based API is not easily type-checkable, leading to
6
- many type: ignore comments in the main code.
4
+ We isolate this code here and provide proper type annotations for Click's
5
+ decorator-based API.
7
6
  """
8
7
 
9
- # mypy: ignore-errors
10
- # ^ This tells mypy to ignore type checking for this entire file
11
-
12
- from typing import Any, Callable
8
+ from typing import Any, Callable, TypeVar, Union, cast
13
9
 
14
10
  import click
11
+ from click import Command
15
12
 
16
13
  from ostruct import __version__
17
14
  from ostruct.cli.errors import ( # noqa: F401 - Used in error handling
@@ -19,6 +16,9 @@ from ostruct.cli.errors import ( # noqa: F401 - Used in error handling
19
16
  TaskTemplateVariableError,
20
17
  )
21
18
 
19
+ F = TypeVar("F", bound=Callable[..., Any])
20
+ DecoratedCommand = Union[Command, Callable[..., Any]]
21
+
22
22
 
23
23
  def validate_task_params(
24
24
  ctx: click.Context, param: click.Parameter, value: Any
@@ -162,87 +162,96 @@ def model_options(f: Callable) -> Callable:
162
162
  return f
163
163
 
164
164
 
165
- def create_click_command() -> Callable:
166
- """Create the Click command with all options."""
165
+ def create_click_command() -> Callable[[F], Command]:
166
+ """Create the Click command with all options.
167
+
168
+ Returns:
169
+ A decorator function that adds all CLI options to the command.
170
+ """
171
+
172
+ def decorator(f: F) -> Command:
173
+ # Start with the base command
174
+ cmd: DecoratedCommand = click.command(
175
+ help="Make structured OpenAI API calls."
176
+ )(f)
167
177
 
168
- def decorator(f: Callable) -> Callable:
169
- f = click.command(help="Make structured OpenAI API calls.")(f)
170
- f = click.option(
178
+ # Add all options
179
+ cmd = click.option(
171
180
  "--task",
172
181
  help="Task template string",
173
182
  type=str,
174
183
  callback=validate_task_params,
175
- )(f)
176
- f = click.option(
184
+ )(cmd)
185
+ cmd = click.option(
177
186
  "--task-file",
178
187
  help="Task template file path",
179
188
  type=str,
180
189
  callback=validate_task_params,
181
- )(f)
182
- f = click.option(
190
+ )(cmd)
191
+ cmd = click.option(
183
192
  "--system-prompt",
184
193
  help="System prompt string",
185
194
  type=str,
186
195
  callback=validate_system_prompt_params,
187
- )(f)
188
- f = click.option(
196
+ )(cmd)
197
+ cmd = click.option(
189
198
  "--system-prompt-file",
190
199
  help="System prompt file path",
191
200
  type=str,
192
201
  callback=validate_system_prompt_params,
193
- )(f)
194
- f = click.option(
202
+ )(cmd)
203
+ cmd = click.option(
195
204
  "--schema-file",
196
205
  required=True,
197
206
  help="JSON schema file for response validation",
198
207
  type=str,
199
- )(f)
200
- f = click.option(
208
+ )(cmd)
209
+ cmd = click.option(
201
210
  "--ignore-task-sysprompt",
202
211
  is_flag=True,
203
212
  help="Ignore system prompt from task template YAML frontmatter",
204
- )(f)
205
- f = click.option(
213
+ )(cmd)
214
+ cmd = click.option(
206
215
  "--timeout",
207
216
  type=float,
208
217
  default=60.0,
209
218
  help="API timeout in seconds",
210
- )(f)
211
- f = click.option(
219
+ )(cmd)
220
+ cmd = click.option(
212
221
  "--output-file", help="Write JSON output to file", type=str
213
- )(f)
214
- f = click.option(
222
+ )(cmd)
223
+ cmd = click.option(
215
224
  "--dry-run",
216
225
  is_flag=True,
217
226
  help="Simulate API call without making request",
218
- )(f)
219
- f = click.option(
227
+ )(cmd)
228
+ cmd = click.option(
220
229
  "--no-progress", is_flag=True, help="Disable progress indicators"
221
- )(f)
222
- f = click.option(
230
+ )(cmd)
231
+ cmd = click.option(
223
232
  "--progress-level",
224
233
  type=click.Choice(["none", "basic", "detailed"]),
225
234
  default="basic",
226
235
  help="Progress reporting level",
227
- )(f)
228
- f = click.option(
236
+ )(cmd)
237
+ cmd = click.option(
229
238
  "--api-key", help="OpenAI API key (overrides env var)", type=str
230
- )(f)
231
- f = click.option(
239
+ )(cmd)
240
+ cmd = click.option(
232
241
  "--verbose",
233
242
  is_flag=True,
234
243
  help="Enable verbose output and detailed logging",
235
- )(f)
236
- f = click.option(
244
+ )(cmd)
245
+ cmd = click.option(
237
246
  "--debug-openai-stream",
238
247
  is_flag=True,
239
248
  help="Enable low-level debug output for OpenAI streaming",
240
- )(f)
241
- f = debug_options(f)
242
- f = file_options(f)
243
- f = variable_options(f)
244
- f = model_options(f)
245
- f = click.version_option(version=__version__)(f)
246
- return f
249
+ )(cmd)
250
+ cmd = debug_options(cmd)
251
+ cmd = file_options(cmd)
252
+ cmd = variable_options(cmd)
253
+ cmd = model_options(cmd)
254
+ cmd = click.version_option(version=__version__)(cmd)
255
+ return cast(Command, cmd)
247
256
 
248
257
  return decorator