ostruct-cli 0.7.1__py3-none-any.whl → 0.8.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- ostruct/cli/__init__.py +21 -3
- ostruct/cli/base_errors.py +1 -1
- ostruct/cli/cli.py +66 -1983
- ostruct/cli/click_options.py +460 -28
- ostruct/cli/code_interpreter.py +238 -0
- ostruct/cli/commands/__init__.py +32 -0
- ostruct/cli/commands/list_models.py +128 -0
- ostruct/cli/commands/quick_ref.py +50 -0
- ostruct/cli/commands/run.py +137 -0
- ostruct/cli/commands/update_registry.py +71 -0
- ostruct/cli/config.py +277 -0
- ostruct/cli/cost_estimation.py +134 -0
- ostruct/cli/errors.py +310 -6
- ostruct/cli/exit_codes.py +1 -0
- ostruct/cli/explicit_file_processor.py +548 -0
- ostruct/cli/field_utils.py +69 -0
- ostruct/cli/file_info.py +42 -9
- ostruct/cli/file_list.py +301 -102
- ostruct/cli/file_search.py +455 -0
- ostruct/cli/file_utils.py +47 -13
- ostruct/cli/mcp_integration.py +541 -0
- ostruct/cli/model_creation.py +150 -1
- ostruct/cli/model_validation.py +204 -0
- ostruct/cli/progress_reporting.py +398 -0
- ostruct/cli/registry_updates.py +14 -9
- ostruct/cli/runner.py +1418 -0
- ostruct/cli/schema_utils.py +113 -0
- ostruct/cli/services.py +626 -0
- ostruct/cli/template_debug.py +748 -0
- ostruct/cli/template_debug_help.py +162 -0
- ostruct/cli/template_env.py +15 -6
- ostruct/cli/template_filters.py +55 -3
- ostruct/cli/template_optimizer.py +474 -0
- ostruct/cli/template_processor.py +1080 -0
- ostruct/cli/template_rendering.py +69 -34
- ostruct/cli/token_validation.py +286 -0
- ostruct/cli/types.py +78 -0
- ostruct/cli/unattended_operation.py +269 -0
- ostruct/cli/validators.py +386 -3
- {ostruct_cli-0.7.1.dist-info → ostruct_cli-0.8.0.dist-info}/LICENSE +2 -0
- ostruct_cli-0.8.0.dist-info/METADATA +633 -0
- ostruct_cli-0.8.0.dist-info/RECORD +69 -0
- {ostruct_cli-0.7.1.dist-info → ostruct_cli-0.8.0.dist-info}/WHEEL +1 -1
- ostruct_cli-0.7.1.dist-info/METADATA +0 -369
- ostruct_cli-0.7.1.dist-info/RECORD +0 -45
- {ostruct_cli-0.7.1.dist-info → ostruct_cli-0.8.0.dist-info}/entry_points.txt +0 -0
@@ -64,7 +64,12 @@ from jinja2 import Environment
|
|
64
64
|
from .errors import TaskTemplateVariableError, TemplateValidationError
|
65
65
|
from .file_utils import FileInfo
|
66
66
|
from .progress import ProgressContext
|
67
|
+
from .progress_reporting import get_progress_reporter
|
67
68
|
from .template_env import create_jinja_env
|
69
|
+
from .template_optimizer import (
|
70
|
+
is_optimization_beneficial,
|
71
|
+
optimize_template_for_llm,
|
72
|
+
)
|
68
73
|
from .template_schema import DotDict, StdinProxy
|
69
74
|
|
70
75
|
__all__ = [
|
@@ -162,20 +167,6 @@ def render_template(
|
|
162
167
|
if "stdin" not in wrapped_context:
|
163
168
|
wrapped_context["stdin"] = StdinProxy()
|
164
169
|
|
165
|
-
# Load file content for FileInfo objects
|
166
|
-
for key, value in context.items():
|
167
|
-
if isinstance(value, FileInfo):
|
168
|
-
# Access content property to trigger loading
|
169
|
-
_ = value.content
|
170
|
-
elif (
|
171
|
-
isinstance(value, list)
|
172
|
-
and value
|
173
|
-
and isinstance(value[0], FileInfo)
|
174
|
-
):
|
175
|
-
for file_info in value:
|
176
|
-
# Access content property to trigger loading
|
177
|
-
_ = file_info.content
|
178
|
-
|
179
170
|
if progress:
|
180
171
|
progress.update(1) # Update progress for template creation
|
181
172
|
|
@@ -235,24 +226,14 @@ def render_template(
|
|
235
226
|
)
|
236
227
|
if value and isinstance(value[0], FileInfo):
|
237
228
|
logger.info(
|
238
|
-
" First file: %s
|
229
|
+
" First file: %s",
|
239
230
|
value[0].path,
|
240
|
-
(
|
241
|
-
len(value[0].content)
|
242
|
-
if hasattr(value[0], "content")
|
243
|
-
else -1
|
244
|
-
),
|
245
231
|
)
|
246
232
|
elif isinstance(value, FileInfo):
|
247
233
|
logger.info(
|
248
|
-
" %s: FileInfo(%s)
|
234
|
+
" %s: FileInfo(%s)",
|
249
235
|
key,
|
250
236
|
value.path,
|
251
|
-
(
|
252
|
-
len(value.content)
|
253
|
-
if hasattr(value, "content")
|
254
|
-
else -1
|
255
|
-
),
|
256
237
|
)
|
257
238
|
else:
|
258
239
|
logger.info(" %s: %s", key, type(value).__name__)
|
@@ -278,18 +259,72 @@ def render_template(
|
|
278
259
|
" exists: %r",
|
279
260
|
os.path.exists(item.path),
|
280
261
|
)
|
281
|
-
logger.debug(
|
282
|
-
" content length: %d",
|
283
|
-
(
|
284
|
-
len(item.content)
|
285
|
-
if hasattr(item, "content")
|
286
|
-
else -1
|
287
|
-
),
|
288
|
-
)
|
289
262
|
else:
|
290
263
|
logger.debug(
|
291
264
|
" %s: %s (%r)", key, type(value).__name__, value
|
292
265
|
)
|
266
|
+
|
267
|
+
# Apply template optimization for better LLM performance
|
268
|
+
try:
|
269
|
+
# Get template source - use template_str for string templates or template.source for file templates
|
270
|
+
if hasattr(template, "source") and template.source:
|
271
|
+
original_template_source = template.source
|
272
|
+
else:
|
273
|
+
original_template_source = template_str
|
274
|
+
|
275
|
+
if (
|
276
|
+
original_template_source
|
277
|
+
and is_optimization_beneficial(
|
278
|
+
original_template_source
|
279
|
+
)
|
280
|
+
):
|
281
|
+
logger.debug("=== Template Optimization ===")
|
282
|
+
optimization_result = optimize_template_for_llm(
|
283
|
+
original_template_source
|
284
|
+
)
|
285
|
+
|
286
|
+
if optimization_result.has_optimizations:
|
287
|
+
# Report optimization to user
|
288
|
+
progress_reporter = get_progress_reporter()
|
289
|
+
progress_reporter.report_optimization(
|
290
|
+
optimization_result.transformations
|
291
|
+
)
|
292
|
+
|
293
|
+
logger.info(
|
294
|
+
"Template optimized for LLM performance:"
|
295
|
+
)
|
296
|
+
for (
|
297
|
+
transformation
|
298
|
+
) in optimization_result.transformations:
|
299
|
+
logger.info(f" • {transformation}")
|
300
|
+
logger.info(
|
301
|
+
f" • Optimization time: {optimization_result.optimization_time_ms:.1f}ms"
|
302
|
+
)
|
303
|
+
|
304
|
+
# Create new template from optimized content
|
305
|
+
template = env.from_string(
|
306
|
+
optimization_result.optimized_template
|
307
|
+
)
|
308
|
+
# Re-add globals to new template
|
309
|
+
template.globals["template_name"] = getattr(
|
310
|
+
template, "name", "<string>"
|
311
|
+
)
|
312
|
+
template.globals["template_path"] = getattr(
|
313
|
+
template, "filename", None
|
314
|
+
)
|
315
|
+
else:
|
316
|
+
logger.debug("No beneficial optimizations found")
|
317
|
+
else:
|
318
|
+
logger.debug(
|
319
|
+
"Template optimization not beneficial - skipping"
|
320
|
+
)
|
321
|
+
except Exception as e:
|
322
|
+
# If optimization fails, continue with original template
|
323
|
+
logger.warning(
|
324
|
+
f"Template optimization failed, using original: {e}"
|
325
|
+
)
|
326
|
+
# template remains unchanged
|
327
|
+
|
293
328
|
result = template.render(**wrapped_context)
|
294
329
|
if not isinstance(result, str):
|
295
330
|
raise TemplateValidationError(
|
@@ -0,0 +1,286 @@
|
|
1
|
+
"""Token limit validation with actionable error guidance."""
|
2
|
+
|
3
|
+
import logging
|
4
|
+
import os
|
5
|
+
from pathlib import Path
|
6
|
+
from typing import List, Optional, Tuple
|
7
|
+
|
8
|
+
import tiktoken
|
9
|
+
|
10
|
+
from .errors import PromptTooLargeError
|
11
|
+
|
12
|
+
|
13
|
+
class TokenLimitValidator:
|
14
|
+
"""Validate prompt size and provide corrective guidance for explicit file routing."""
|
15
|
+
|
16
|
+
MAX_TOKENS = 128000 # Model context window limit
|
17
|
+
|
18
|
+
def __init__(self, model: str = "gpt-4o"):
|
19
|
+
"""Initialize validator with model-specific encoding.
|
20
|
+
|
21
|
+
Args:
|
22
|
+
model: Model name for token encoding selection
|
23
|
+
"""
|
24
|
+
self.model = model
|
25
|
+
self.encoder = self._get_encoder(model)
|
26
|
+
|
27
|
+
def _get_encoder(self, model: str) -> tiktoken.Encoding:
|
28
|
+
"""Get appropriate tiktoken encoder for model."""
|
29
|
+
if model.startswith(("gpt-4o", "o1", "o3")):
|
30
|
+
return tiktoken.get_encoding("o200k_base")
|
31
|
+
else:
|
32
|
+
return tiktoken.get_encoding("cl100k_base")
|
33
|
+
|
34
|
+
def validate_prompt_size(
|
35
|
+
self,
|
36
|
+
template_content: str,
|
37
|
+
template_files: List[str],
|
38
|
+
context_limit: Optional[int] = None,
|
39
|
+
) -> None:
|
40
|
+
"""Check if prompt will exceed context window and provide actionable guidance.
|
41
|
+
|
42
|
+
Args:
|
43
|
+
template_content: Rendered template content
|
44
|
+
template_files: List of file paths included in template
|
45
|
+
context_limit: Optional custom context limit (defaults to MAX_TOKENS)
|
46
|
+
|
47
|
+
Raises:
|
48
|
+
PromptTooLargeError: If prompt exceeds context window with actionable guidance
|
49
|
+
"""
|
50
|
+
logger = logging.getLogger(__name__)
|
51
|
+
|
52
|
+
limit = context_limit or self.MAX_TOKENS
|
53
|
+
total_tokens = self._count_template_tokens(template_content)
|
54
|
+
|
55
|
+
oversized_files = []
|
56
|
+
for file_path in template_files:
|
57
|
+
try:
|
58
|
+
file_tokens = self._count_file_tokens(file_path)
|
59
|
+
total_tokens += file_tokens
|
60
|
+
|
61
|
+
# Flag files over 5K tokens for routing guidance
|
62
|
+
if file_tokens > 5000:
|
63
|
+
oversized_files.append((file_path, file_tokens))
|
64
|
+
except (OSError, IOError):
|
65
|
+
# Skip files that can't be read for token counting
|
66
|
+
continue
|
67
|
+
|
68
|
+
# Add 90% warning threshold
|
69
|
+
if total_tokens > limit * 0.9:
|
70
|
+
logger.warning(
|
71
|
+
"Prompt is %.1f%% of the %d-token window (%d tokens)",
|
72
|
+
total_tokens / limit * 100,
|
73
|
+
limit,
|
74
|
+
total_tokens,
|
75
|
+
)
|
76
|
+
|
77
|
+
if total_tokens > limit:
|
78
|
+
self._raise_actionable_error(total_tokens, limit, oversized_files)
|
79
|
+
|
80
|
+
def _count_template_tokens(self, content: str) -> int:
|
81
|
+
"""Count tokens in template content."""
|
82
|
+
return len(self.encoder.encode(content))
|
83
|
+
|
84
|
+
def _count_file_tokens(self, file_path: str) -> int:
|
85
|
+
"""Count tokens in a file."""
|
86
|
+
try:
|
87
|
+
with open(file_path, "r", encoding="utf-8") as f:
|
88
|
+
content = f.read()
|
89
|
+
return len(self.encoder.encode(content))
|
90
|
+
except UnicodeDecodeError:
|
91
|
+
# For binary files, estimate based on file size
|
92
|
+
# Rough estimate: 1 token per 4 bytes
|
93
|
+
file_size = os.path.getsize(file_path)
|
94
|
+
return file_size // 4
|
95
|
+
|
96
|
+
def _is_data_file(self, file_path: str) -> bool:
|
97
|
+
"""Detect if file is likely a data file suitable for Code Interpreter."""
|
98
|
+
data_extensions = {
|
99
|
+
".csv",
|
100
|
+
".json",
|
101
|
+
".xlsx",
|
102
|
+
".xls",
|
103
|
+
".tsv",
|
104
|
+
".parquet",
|
105
|
+
".sql",
|
106
|
+
".db",
|
107
|
+
".sqlite",
|
108
|
+
".sqlite3",
|
109
|
+
".pkl",
|
110
|
+
".pickle",
|
111
|
+
".npy",
|
112
|
+
".npz",
|
113
|
+
".h5",
|
114
|
+
".hdf5",
|
115
|
+
".xml",
|
116
|
+
".yaml",
|
117
|
+
".yml",
|
118
|
+
}
|
119
|
+
return Path(file_path).suffix.lower() in data_extensions
|
120
|
+
|
121
|
+
def _is_document_file(self, file_path: str) -> bool:
|
122
|
+
"""Detect if file is likely a document suitable for File Search."""
|
123
|
+
doc_extensions = {
|
124
|
+
".pdf",
|
125
|
+
".doc",
|
126
|
+
".docx",
|
127
|
+
".txt",
|
128
|
+
".md",
|
129
|
+
".rst",
|
130
|
+
".tex",
|
131
|
+
".html",
|
132
|
+
".htm",
|
133
|
+
".rtf",
|
134
|
+
".odt",
|
135
|
+
".epub",
|
136
|
+
".mobi",
|
137
|
+
}
|
138
|
+
return Path(file_path).suffix.lower() in doc_extensions
|
139
|
+
|
140
|
+
def _is_code_file(self, file_path: str) -> bool:
|
141
|
+
"""Detect if file is likely source code."""
|
142
|
+
code_extensions = {
|
143
|
+
".py",
|
144
|
+
".js",
|
145
|
+
".ts",
|
146
|
+
".java",
|
147
|
+
".cpp",
|
148
|
+
".c",
|
149
|
+
".h",
|
150
|
+
".hpp",
|
151
|
+
".cs",
|
152
|
+
".go",
|
153
|
+
".rs",
|
154
|
+
".rb",
|
155
|
+
".php",
|
156
|
+
".swift",
|
157
|
+
".kt",
|
158
|
+
".scala",
|
159
|
+
".r",
|
160
|
+
".m",
|
161
|
+
".sh",
|
162
|
+
".bash",
|
163
|
+
".ps1",
|
164
|
+
".pl",
|
165
|
+
".lua",
|
166
|
+
".dart",
|
167
|
+
}
|
168
|
+
return Path(file_path).suffix.lower() in code_extensions
|
169
|
+
|
170
|
+
def _raise_actionable_error(
|
171
|
+
self,
|
172
|
+
total_tokens: int,
|
173
|
+
limit: int,
|
174
|
+
oversized_files: List[Tuple[str, int]],
|
175
|
+
) -> None:
|
176
|
+
"""Raise PromptTooLargeError with specific guidance for explicit file routing."""
|
177
|
+
error_msg = (
|
178
|
+
f"❌ Error: Prompt exceeds model context window "
|
179
|
+
f"({total_tokens:,} tokens > {limit:,} limit)\n\n"
|
180
|
+
)
|
181
|
+
|
182
|
+
if oversized_files:
|
183
|
+
error_msg += "💡 Suggestion: Re-run with explicit file routing to move large files out of template context:\n\n"
|
184
|
+
|
185
|
+
for file_path, tokens in oversized_files:
|
186
|
+
file_name = Path(file_path).name
|
187
|
+
|
188
|
+
if self._is_data_file(file_path):
|
189
|
+
error_msg += f" 📊 Data file: ostruct -fc {file_name} <template> <schema>\n"
|
190
|
+
error_msg += f" (Moves {file_name} to Code Interpreter for data processing)\n\n"
|
191
|
+
elif self._is_document_file(file_path):
|
192
|
+
error_msg += f" 📄 Document: ostruct -fs {file_name} <template> <schema>\n"
|
193
|
+
error_msg += f" (Moves {file_name} to File Search for semantic retrieval)\n\n"
|
194
|
+
elif self._is_code_file(file_path):
|
195
|
+
error_msg += f" 💻 Code file: ostruct -fc {file_name} <template> <schema>\n"
|
196
|
+
error_msg += f" (Moves {file_name} to Code Interpreter for analysis)\n\n"
|
197
|
+
else:
|
198
|
+
error_msg += f" 📁 Large file: ostruct -fc {file_name} OR -fs {file_name} <template> <schema>\n"
|
199
|
+
error_msg += " (Choose based on usage: -fc for processing, -fs for retrieval)\n\n"
|
200
|
+
|
201
|
+
error_msg += (
|
202
|
+
f" Size: {tokens:,} tokens ({file_path})\n\n"
|
203
|
+
)
|
204
|
+
|
205
|
+
error_msg += (
|
206
|
+
"🔧 Alternative: Use --file-for for specific tool routing:\n"
|
207
|
+
)
|
208
|
+
error_msg += f" ostruct --file-for code-interpreter {oversized_files[0][0]} <template> <schema>\n\n"
|
209
|
+
|
210
|
+
else:
|
211
|
+
error_msg += "💡 Suggestion: Consider breaking down your template or using fewer input files\n\n"
|
212
|
+
|
213
|
+
error_msg += "🔍 Check file sizes: tiktoken_cli count <filename>\n"
|
214
|
+
error_msg += "📖 Learn more: ostruct --help (see File Routing section)"
|
215
|
+
|
216
|
+
raise PromptTooLargeError(
|
217
|
+
error_msg,
|
218
|
+
context={
|
219
|
+
"total_tokens": total_tokens,
|
220
|
+
"context_limit": limit,
|
221
|
+
"oversized_files": [
|
222
|
+
(path, tokens) for path, tokens in oversized_files
|
223
|
+
],
|
224
|
+
"suggested_routing": self._generate_routing_suggestions(
|
225
|
+
oversized_files
|
226
|
+
),
|
227
|
+
},
|
228
|
+
)
|
229
|
+
|
230
|
+
def _generate_routing_suggestions(
|
231
|
+
self, oversized_files: List[Tuple[str, int]]
|
232
|
+
) -> List[dict]:
|
233
|
+
"""Generate structured routing suggestions for programmatic use."""
|
234
|
+
suggestions = []
|
235
|
+
for file_path, tokens in oversized_files:
|
236
|
+
suggestion = {
|
237
|
+
"file_path": file_path,
|
238
|
+
"tokens": tokens,
|
239
|
+
"file_type": self._classify_file(file_path),
|
240
|
+
"recommended_flags": self._get_recommended_flags(file_path),
|
241
|
+
}
|
242
|
+
suggestions.append(suggestion)
|
243
|
+
return suggestions
|
244
|
+
|
245
|
+
def _classify_file(self, file_path: str) -> str:
|
246
|
+
"""Classify file type for routing suggestions."""
|
247
|
+
if self._is_data_file(file_path):
|
248
|
+
return "data"
|
249
|
+
elif self._is_document_file(file_path):
|
250
|
+
return "document"
|
251
|
+
elif self._is_code_file(file_path):
|
252
|
+
return "code"
|
253
|
+
else:
|
254
|
+
return "unknown"
|
255
|
+
|
256
|
+
def _get_recommended_flags(self, file_path: str) -> List[str]:
|
257
|
+
"""Get recommended CLI flags for file routing."""
|
258
|
+
if self._is_data_file(file_path) or self._is_code_file(file_path):
|
259
|
+
return ["-fc", "--file-for code-interpreter"]
|
260
|
+
elif self._is_document_file(file_path):
|
261
|
+
return ["-fs", "--file-for file-search"]
|
262
|
+
else:
|
263
|
+
return ["-fc", "-fs"] # Both options for unknown files
|
264
|
+
|
265
|
+
|
266
|
+
def validate_token_limits(
|
267
|
+
template_content: str,
|
268
|
+
template_files: List[str],
|
269
|
+
model: str,
|
270
|
+
context_limit: Optional[int] = None,
|
271
|
+
) -> None:
|
272
|
+
"""Convenience function for token limit validation.
|
273
|
+
|
274
|
+
Args:
|
275
|
+
template_content: Rendered template content
|
276
|
+
template_files: List of file paths included in template
|
277
|
+
model: Model name for encoding selection
|
278
|
+
context_limit: Optional custom context limit
|
279
|
+
|
280
|
+
Raises:
|
281
|
+
PromptTooLargeError: If prompt exceeds context window
|
282
|
+
"""
|
283
|
+
validator = TokenLimitValidator(model)
|
284
|
+
validator.validate_prompt_size(
|
285
|
+
template_content, template_files, context_limit
|
286
|
+
)
|
ostruct/cli/types.py
ADDED
@@ -0,0 +1,78 @@
|
|
1
|
+
"""Type definitions for ostruct CLI."""
|
2
|
+
|
3
|
+
from pathlib import Path
|
4
|
+
from typing import List, Optional, Tuple, TypedDict, Union
|
5
|
+
|
6
|
+
# Import FileRoutingResult from validators
|
7
|
+
FileRoutingResult = List[Tuple[Optional[str], Union[str, Path]]]
|
8
|
+
|
9
|
+
|
10
|
+
class CLIParams(TypedDict, total=False):
|
11
|
+
"""Type-safe CLI parameters."""
|
12
|
+
|
13
|
+
files: List[
|
14
|
+
Tuple[str, str]
|
15
|
+
] # List of (name, path) tuples from Click's nargs=2
|
16
|
+
dir: List[
|
17
|
+
Tuple[str, str]
|
18
|
+
] # List of (name, dir) tuples from Click's nargs=2
|
19
|
+
patterns: List[
|
20
|
+
Tuple[str, str]
|
21
|
+
] # List of (name, pattern) tuples from Click's nargs=2
|
22
|
+
allowed_dirs: List[str]
|
23
|
+
base_dir: str
|
24
|
+
allowed_dir_file: Optional[str]
|
25
|
+
recursive: bool
|
26
|
+
var: List[str]
|
27
|
+
json_var: List[str]
|
28
|
+
system_prompt: Optional[str]
|
29
|
+
system_prompt_file: Optional[str]
|
30
|
+
ignore_task_sysprompt: bool
|
31
|
+
model: str
|
32
|
+
timeout: float
|
33
|
+
output_file: Optional[str]
|
34
|
+
dry_run: bool
|
35
|
+
no_progress: bool
|
36
|
+
api_key: Optional[str]
|
37
|
+
verbose: bool
|
38
|
+
debug_openai_stream: bool
|
39
|
+
show_model_schema: bool
|
40
|
+
debug_validation: bool
|
41
|
+
temperature: Optional[float]
|
42
|
+
max_output_tokens: Optional[int]
|
43
|
+
top_p: Optional[float]
|
44
|
+
frequency_penalty: Optional[float]
|
45
|
+
presence_penalty: Optional[float]
|
46
|
+
reasoning_effort: Optional[str]
|
47
|
+
progress_level: str
|
48
|
+
task_file: Optional[str]
|
49
|
+
task: Optional[str]
|
50
|
+
schema_file: str
|
51
|
+
mcp_servers: List[str]
|
52
|
+
mcp_allowed_tools: List[str]
|
53
|
+
mcp_require_approval: str
|
54
|
+
mcp_headers: Optional[str]
|
55
|
+
code_interpreter_files: FileRoutingResult # Fixed: was List[str]
|
56
|
+
code_interpreter_dirs: List[str]
|
57
|
+
code_interpreter_download_dir: str
|
58
|
+
code_interpreter_cleanup: bool
|
59
|
+
file_search_files: FileRoutingResult # Fixed: was List[str]
|
60
|
+
file_search_dirs: List[str]
|
61
|
+
file_search_vector_store_name: str
|
62
|
+
file_search_cleanup: bool
|
63
|
+
file_search_retry_count: int
|
64
|
+
file_search_timeout: float
|
65
|
+
template_files: FileRoutingResult # Fixed: was List[str]
|
66
|
+
template_dirs: List[str]
|
67
|
+
template_file_aliases: List[
|
68
|
+
Tuple[str, Union[str, Path]]
|
69
|
+
] # Fixed: was List[Tuple[str, str]]
|
70
|
+
code_interpreter_file_aliases: List[
|
71
|
+
Tuple[str, Union[str, Path]]
|
72
|
+
] # Fixed: was List[Tuple[str, str]]
|
73
|
+
file_search_file_aliases: List[
|
74
|
+
Tuple[str, Union[str, Path]]
|
75
|
+
] # Fixed: was List[Tuple[str, str]]
|
76
|
+
tool_files: List[
|
77
|
+
Tuple[str, str]
|
78
|
+
] # List of (tool, path) tuples from --file-for
|