hamtaa-texttools 1.1.16__py3-none-any.whl → 1.1.18__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {hamtaa_texttools-1.1.16.dist-info → hamtaa_texttools-1.1.18.dist-info}/METADATA +3 -2
- {hamtaa_texttools-1.1.16.dist-info → hamtaa_texttools-1.1.18.dist-info}/RECORD +17 -15
- texttools/__init__.py +1 -1
- texttools/batch/batch_runner.py +75 -64
- texttools/{tools/internals → internals}/async_operator.py +96 -48
- texttools/internals/exceptions.py +28 -0
- texttools/{tools/internals → internals}/models.py +63 -56
- texttools/internals/prompt_loader.py +80 -0
- texttools/{tools/internals → internals}/sync_operator.py +92 -47
- texttools/prompts/propositionize.yaml +15 -0
- texttools/tools/async_tools.py +627 -321
- texttools/tools/sync_tools.py +625 -319
- texttools/tools/internals/prompt_loader.py +0 -56
- {hamtaa_texttools-1.1.16.dist-info → hamtaa_texttools-1.1.18.dist-info}/WHEEL +0 -0
- {hamtaa_texttools-1.1.16.dist-info → hamtaa_texttools-1.1.18.dist-info}/licenses/LICENSE +0 -0
- {hamtaa_texttools-1.1.16.dist-info → hamtaa_texttools-1.1.18.dist-info}/top_level.txt +0 -0
- /texttools/{tools/internals → internals}/formatters.py +0 -0
- /texttools/{tools/internals → internals}/operator_utils.py +0 -0
texttools/tools/sync_tools.py
CHANGED
|
@@ -4,19 +4,20 @@ from collections.abc import Callable
|
|
|
4
4
|
|
|
5
5
|
from openai import OpenAI
|
|
6
6
|
|
|
7
|
-
from texttools.
|
|
8
|
-
import texttools.
|
|
7
|
+
from texttools.internals.sync_operator import Operator
|
|
8
|
+
import texttools.internals.models as Models
|
|
9
|
+
from texttools.internals.exceptions import (
|
|
10
|
+
TextToolsError,
|
|
11
|
+
PromptError,
|
|
12
|
+
LLMError,
|
|
13
|
+
ValidationError,
|
|
14
|
+
)
|
|
9
15
|
|
|
10
16
|
|
|
11
17
|
class TheTool:
|
|
12
18
|
"""
|
|
13
19
|
Each method configures the operator with a specific YAML prompt,
|
|
14
20
|
output schema, and flags, then delegates execution to `operator.run()`.
|
|
15
|
-
|
|
16
|
-
Usage:
|
|
17
|
-
client = OpenAI(...)
|
|
18
|
-
tool = TheTool(client, model="model-name")
|
|
19
|
-
result = tool.categorize("text ...", with_analysis=True)
|
|
20
21
|
"""
|
|
21
22
|
|
|
22
23
|
def __init__(
|
|
@@ -60,43 +61,103 @@ class TheTool:
|
|
|
60
61
|
- result (str): The assigned category
|
|
61
62
|
- logprobs (list | None): Probability data if logprobs enabled
|
|
62
63
|
- analysis (str | None): Detailed reasoning if with_analysis enabled
|
|
64
|
+
- process (str | None): Description of the process used
|
|
65
|
+
- processed_at (datetime): Timestamp when the processing occurred
|
|
66
|
+
- execution_time (float): Time taken for execution in seconds (-1.0 if not measured)
|
|
63
67
|
- errors (list(str) | None): Errors occured during tool call
|
|
68
|
+
|
|
64
69
|
"""
|
|
65
|
-
|
|
66
|
-
|
|
67
|
-
|
|
68
|
-
|
|
69
|
-
|
|
70
|
-
|
|
71
|
-
|
|
72
|
-
|
|
73
|
-
|
|
74
|
-
|
|
75
|
-
|
|
76
|
-
|
|
77
|
-
|
|
78
|
-
|
|
79
|
-
|
|
80
|
-
|
|
81
|
-
|
|
82
|
-
|
|
70
|
+
output = Models.ToolOutput()
|
|
71
|
+
|
|
72
|
+
try:
|
|
73
|
+
start = datetime.now()
|
|
74
|
+
|
|
75
|
+
if mode == "category_tree":
|
|
76
|
+
# Initializations
|
|
77
|
+
output = Models.ToolOutput()
|
|
78
|
+
levels = categories.get_level_count()
|
|
79
|
+
parent_id = 0
|
|
80
|
+
final_output = []
|
|
81
|
+
|
|
82
|
+
for _ in range(levels):
|
|
83
|
+
# Get child nodes for current parent
|
|
84
|
+
parent_node = categories.get_node(parent_id)
|
|
85
|
+
children = categories.get_children(parent_node)
|
|
86
|
+
|
|
87
|
+
# Check if child nodes exist
|
|
88
|
+
if not children:
|
|
89
|
+
output.errors.append(
|
|
90
|
+
f"No categories found for parent_id {parent_id} in the tree"
|
|
91
|
+
)
|
|
92
|
+
end = datetime.now()
|
|
93
|
+
output.execution_time = (end - start).total_seconds()
|
|
94
|
+
return output
|
|
95
|
+
|
|
96
|
+
# Extract category names and descriptions
|
|
97
|
+
category_list = [
|
|
98
|
+
f"Category Name: {node.name}, Description: {node.description}"
|
|
99
|
+
for node in children
|
|
100
|
+
]
|
|
101
|
+
category_names = [node.name for node in children]
|
|
102
|
+
|
|
103
|
+
# Run categorization for this level
|
|
104
|
+
level_output = self._operator.run(
|
|
105
|
+
# User parameters
|
|
106
|
+
text=text,
|
|
107
|
+
category_list=category_list,
|
|
108
|
+
with_analysis=with_analysis,
|
|
109
|
+
user_prompt=user_prompt,
|
|
110
|
+
temperature=temperature,
|
|
111
|
+
logprobs=logprobs,
|
|
112
|
+
top_logprobs=top_logprobs,
|
|
113
|
+
mode=mode,
|
|
114
|
+
validator=validator,
|
|
115
|
+
max_validation_retries=max_validation_retries,
|
|
116
|
+
priority=priority,
|
|
117
|
+
# Internal parameters
|
|
118
|
+
prompt_file="categorize.yaml",
|
|
119
|
+
output_model=Models.create_dynamic_model(category_names),
|
|
120
|
+
output_lang=None,
|
|
83
121
|
)
|
|
84
|
-
|
|
85
|
-
|
|
86
|
-
|
|
87
|
-
|
|
88
|
-
|
|
89
|
-
|
|
90
|
-
|
|
91
|
-
|
|
92
|
-
|
|
93
|
-
|
|
94
|
-
|
|
95
|
-
|
|
96
|
-
|
|
122
|
+
|
|
123
|
+
# Check for errors from operator
|
|
124
|
+
if level_output.errors:
|
|
125
|
+
output.errors.extend(level_output.errors)
|
|
126
|
+
end = datetime.now()
|
|
127
|
+
output.execution_time = (end - start).total_seconds()
|
|
128
|
+
return output
|
|
129
|
+
|
|
130
|
+
# Get the chosen category
|
|
131
|
+
chosen_category = level_output.result
|
|
132
|
+
|
|
133
|
+
# Find the corresponding node
|
|
134
|
+
parent_node = categories.get_node(chosen_category)
|
|
135
|
+
if parent_node is None:
|
|
136
|
+
output.errors.append(
|
|
137
|
+
f"Category '{chosen_category}' not found in tree after selection"
|
|
138
|
+
)
|
|
139
|
+
end = datetime.now()
|
|
140
|
+
output.execution_time = (end - start).total_seconds()
|
|
141
|
+
return output
|
|
142
|
+
|
|
143
|
+
parent_id = parent_node.node_id
|
|
144
|
+
final_output.append(parent_node.name)
|
|
145
|
+
|
|
146
|
+
# Copy analysis/logprobs/process from the last level's output
|
|
147
|
+
output.analysis = level_output.analysis
|
|
148
|
+
output.logprobs = level_output.logprobs
|
|
149
|
+
output.process = level_output.process
|
|
150
|
+
|
|
151
|
+
output.result = final_output
|
|
152
|
+
end = datetime.now()
|
|
153
|
+
output.execution_time = (end - start).total_seconds()
|
|
154
|
+
return output
|
|
155
|
+
|
|
156
|
+
else:
|
|
157
|
+
output = self._operator.run(
|
|
97
158
|
# User parameters
|
|
98
159
|
text=text,
|
|
99
|
-
category_list=
|
|
160
|
+
category_list=categories,
|
|
100
161
|
with_analysis=with_analysis,
|
|
101
162
|
user_prompt=user_prompt,
|
|
102
163
|
temperature=temperature,
|
|
@@ -108,65 +169,25 @@ class TheTool:
|
|
|
108
169
|
priority=priority,
|
|
109
170
|
# Internal parameters
|
|
110
171
|
prompt_file="categorize.yaml",
|
|
111
|
-
output_model=Models.create_dynamic_model(
|
|
172
|
+
output_model=Models.create_dynamic_model(categories),
|
|
112
173
|
output_lang=None,
|
|
113
174
|
)
|
|
175
|
+
end = datetime.now()
|
|
176
|
+
output.execution_time = (end - start).total_seconds()
|
|
177
|
+
return output
|
|
114
178
|
|
|
115
|
-
|
|
116
|
-
|
|
117
|
-
|
|
118
|
-
|
|
119
|
-
|
|
120
|
-
|
|
121
|
-
|
|
122
|
-
|
|
123
|
-
|
|
124
|
-
|
|
125
|
-
# Find the corresponding node
|
|
126
|
-
parent_node = categories.find_node(chosen_category)
|
|
127
|
-
if parent_node is None:
|
|
128
|
-
output.errors.append(
|
|
129
|
-
f"Category '{chosen_category}' not found in tree after selection"
|
|
130
|
-
)
|
|
131
|
-
end = datetime.now()
|
|
132
|
-
output.execution_time = (end - start).total_seconds()
|
|
133
|
-
return output
|
|
134
|
-
|
|
135
|
-
parent_id = parent_node.node_id
|
|
136
|
-
final_output.append(parent_node.name)
|
|
137
|
-
|
|
138
|
-
# Copy analysis/logprobs/process from the last level's output
|
|
139
|
-
output.analysis = level_output.analysis
|
|
140
|
-
output.logprobs = level_output.logprobs
|
|
141
|
-
output.process = level_output.process
|
|
142
|
-
|
|
143
|
-
output.result = final_output
|
|
144
|
-
end = datetime.now()
|
|
145
|
-
output.execution_time = (end - start).total_seconds()
|
|
146
|
-
return output
|
|
179
|
+
except PromptError as e:
|
|
180
|
+
output.errors.append(f"Prompt error: {e}")
|
|
181
|
+
except LLMError as e:
|
|
182
|
+
output.errors.append(f"LLM error: {e}")
|
|
183
|
+
except ValidationError as e:
|
|
184
|
+
output.errors.append(f"Validation error: {e}")
|
|
185
|
+
except TextToolsError as e:
|
|
186
|
+
output.errors.append(f"TextTools error: {e}")
|
|
187
|
+
except Exception as e:
|
|
188
|
+
output.errors.append(f"Unexpected error: {e}")
|
|
147
189
|
|
|
148
|
-
|
|
149
|
-
output = self._operator.run(
|
|
150
|
-
# User parameters
|
|
151
|
-
text=text,
|
|
152
|
-
category_list=categories,
|
|
153
|
-
with_analysis=with_analysis,
|
|
154
|
-
user_prompt=user_prompt,
|
|
155
|
-
temperature=temperature,
|
|
156
|
-
logprobs=logprobs,
|
|
157
|
-
top_logprobs=top_logprobs,
|
|
158
|
-
mode=mode,
|
|
159
|
-
validator=validator,
|
|
160
|
-
max_validation_retries=max_validation_retries,
|
|
161
|
-
priority=priority,
|
|
162
|
-
# Internal parameters
|
|
163
|
-
prompt_file="categorize.yaml",
|
|
164
|
-
output_model=Models.create_dynamic_model(categories),
|
|
165
|
-
output_lang=None,
|
|
166
|
-
)
|
|
167
|
-
end = datetime.now()
|
|
168
|
-
output.execution_time = (end - start).total_seconds()
|
|
169
|
-
return output
|
|
190
|
+
return output
|
|
170
191
|
|
|
171
192
|
def extract_keywords(
|
|
172
193
|
self,
|
|
@@ -203,29 +224,48 @@ class TheTool:
|
|
|
203
224
|
- result (list[str]): List of extracted keywords
|
|
204
225
|
- logprobs (list | None): Probability data if logprobs enabled
|
|
205
226
|
- analysis (str | None): Detailed reasoning if with_analysis enabled
|
|
227
|
+
- process (str | None): Description of the process used
|
|
228
|
+
- processed_at (datetime): Timestamp when the processing occurred
|
|
229
|
+
- execution_time (float): Time taken for execution in seconds (-1.0 if not measured)
|
|
206
230
|
- errors (list(str) | None): Errors occured during tool call
|
|
207
231
|
"""
|
|
208
|
-
|
|
209
|
-
|
|
210
|
-
|
|
211
|
-
|
|
212
|
-
|
|
213
|
-
|
|
214
|
-
|
|
215
|
-
|
|
216
|
-
|
|
217
|
-
|
|
218
|
-
|
|
219
|
-
|
|
220
|
-
|
|
221
|
-
|
|
222
|
-
|
|
223
|
-
|
|
224
|
-
|
|
225
|
-
|
|
226
|
-
|
|
227
|
-
|
|
228
|
-
|
|
232
|
+
output = Models.ToolOutput()
|
|
233
|
+
|
|
234
|
+
try:
|
|
235
|
+
start = datetime.now()
|
|
236
|
+
output = self._operator.run(
|
|
237
|
+
# User parameters
|
|
238
|
+
text=text,
|
|
239
|
+
with_analysis=with_analysis,
|
|
240
|
+
output_lang=output_lang,
|
|
241
|
+
user_prompt=user_prompt,
|
|
242
|
+
temperature=temperature,
|
|
243
|
+
logprobs=logprobs,
|
|
244
|
+
top_logprobs=top_logprobs,
|
|
245
|
+
mode=mode,
|
|
246
|
+
number_of_keywords=number_of_keywords,
|
|
247
|
+
validator=validator,
|
|
248
|
+
max_validation_retries=max_validation_retries,
|
|
249
|
+
priority=priority,
|
|
250
|
+
# Internal parameters
|
|
251
|
+
prompt_file="extract_keywords.yaml",
|
|
252
|
+
output_model=Models.ListStrOutput,
|
|
253
|
+
)
|
|
254
|
+
end = datetime.now()
|
|
255
|
+
output.execution_time = (end - start).total_seconds()
|
|
256
|
+
return output
|
|
257
|
+
|
|
258
|
+
except PromptError as e:
|
|
259
|
+
output.errors.append(f"Prompt error: {e}")
|
|
260
|
+
except LLMError as e:
|
|
261
|
+
output.errors.append(f"LLM error: {e}")
|
|
262
|
+
except ValidationError as e:
|
|
263
|
+
output.errors.append(f"Validation error: {e}")
|
|
264
|
+
except TextToolsError as e:
|
|
265
|
+
output.errors.append(f"TextTools error: {e}")
|
|
266
|
+
except Exception as e:
|
|
267
|
+
output.errors.append(f"Unexpected error: {e}")
|
|
268
|
+
|
|
229
269
|
return output
|
|
230
270
|
|
|
231
271
|
def extract_entities(
|
|
@@ -261,28 +301,47 @@ class TheTool:
|
|
|
261
301
|
- result (list[dict]): List of entities with 'text' and 'type' keys
|
|
262
302
|
- logprobs (list | None): Probability data if logprobs enabled
|
|
263
303
|
- analysis (str | None): Detailed reasoning if with_analysis enabled
|
|
304
|
+
- process (str | None): Description of the process used
|
|
305
|
+
- processed_at (datetime): Timestamp when the processing occurred
|
|
306
|
+
- execution_time (float): Time taken for execution in seconds (-1.0 if not measured)
|
|
264
307
|
- errors (list(str) | None): Errors occured during tool call
|
|
265
308
|
"""
|
|
266
|
-
|
|
267
|
-
|
|
268
|
-
|
|
269
|
-
|
|
270
|
-
|
|
271
|
-
|
|
272
|
-
|
|
273
|
-
|
|
274
|
-
|
|
275
|
-
|
|
276
|
-
|
|
277
|
-
|
|
278
|
-
|
|
279
|
-
|
|
280
|
-
|
|
281
|
-
|
|
282
|
-
|
|
283
|
-
|
|
284
|
-
|
|
285
|
-
|
|
309
|
+
output = Models.ToolOutput()
|
|
310
|
+
|
|
311
|
+
try:
|
|
312
|
+
start = datetime.now()
|
|
313
|
+
output = self._operator.run(
|
|
314
|
+
# User parameters
|
|
315
|
+
text=text,
|
|
316
|
+
with_analysis=with_analysis,
|
|
317
|
+
output_lang=output_lang,
|
|
318
|
+
user_prompt=user_prompt,
|
|
319
|
+
temperature=temperature,
|
|
320
|
+
logprobs=logprobs,
|
|
321
|
+
top_logprobs=top_logprobs,
|
|
322
|
+
validator=validator,
|
|
323
|
+
max_validation_retries=max_validation_retries,
|
|
324
|
+
priority=priority,
|
|
325
|
+
# Internal parameters
|
|
326
|
+
prompt_file="extract_entities.yaml",
|
|
327
|
+
output_model=Models.ListDictStrStrOutput,
|
|
328
|
+
mode=None,
|
|
329
|
+
)
|
|
330
|
+
end = datetime.now()
|
|
331
|
+
output.execution_time = (end - start).total_seconds()
|
|
332
|
+
return output
|
|
333
|
+
|
|
334
|
+
except PromptError as e:
|
|
335
|
+
output.errors.append(f"Prompt error: {e}")
|
|
336
|
+
except LLMError as e:
|
|
337
|
+
output.errors.append(f"LLM error: {e}")
|
|
338
|
+
except ValidationError as e:
|
|
339
|
+
output.errors.append(f"Validation error: {e}")
|
|
340
|
+
except TextToolsError as e:
|
|
341
|
+
output.errors.append(f"TextTools error: {e}")
|
|
342
|
+
except Exception as e:
|
|
343
|
+
output.errors.append(f"Unexpected error: {e}")
|
|
344
|
+
|
|
286
345
|
return output
|
|
287
346
|
|
|
288
347
|
def is_question(
|
|
@@ -316,28 +375,47 @@ class TheTool:
|
|
|
316
375
|
- result (bool): True if text is a question, False otherwise
|
|
317
376
|
- logprobs (list | None): Probability data if logprobs enabled
|
|
318
377
|
- analysis (str | None): Detailed reasoning if with_analysis enabled
|
|
378
|
+
- process (str | None): Description of the process used
|
|
379
|
+
- processed_at (datetime): Timestamp when the processing occurred
|
|
380
|
+
- execution_time (float): Time taken for execution in seconds (-1.0 if not measured)
|
|
319
381
|
- errors (list(str) | None): Errors occured during tool call
|
|
320
382
|
"""
|
|
321
|
-
|
|
322
|
-
|
|
323
|
-
|
|
324
|
-
|
|
325
|
-
|
|
326
|
-
|
|
327
|
-
|
|
328
|
-
|
|
329
|
-
|
|
330
|
-
|
|
331
|
-
|
|
332
|
-
|
|
333
|
-
|
|
334
|
-
|
|
335
|
-
|
|
336
|
-
|
|
337
|
-
|
|
338
|
-
|
|
339
|
-
|
|
340
|
-
|
|
383
|
+
output = Models.ToolOutput()
|
|
384
|
+
|
|
385
|
+
try:
|
|
386
|
+
start = datetime.now()
|
|
387
|
+
output = self._operator.run(
|
|
388
|
+
# User parameters
|
|
389
|
+
text=text,
|
|
390
|
+
with_analysis=with_analysis,
|
|
391
|
+
user_prompt=user_prompt,
|
|
392
|
+
temperature=temperature,
|
|
393
|
+
logprobs=logprobs,
|
|
394
|
+
top_logprobs=top_logprobs,
|
|
395
|
+
validator=validator,
|
|
396
|
+
max_validation_retries=max_validation_retries,
|
|
397
|
+
priority=priority,
|
|
398
|
+
# Internal parameters
|
|
399
|
+
prompt_file="is_question.yaml",
|
|
400
|
+
output_model=Models.BoolOutput,
|
|
401
|
+
mode=None,
|
|
402
|
+
output_lang=None,
|
|
403
|
+
)
|
|
404
|
+
end = datetime.now()
|
|
405
|
+
output.execution_time = (end - start).total_seconds()
|
|
406
|
+
return output
|
|
407
|
+
|
|
408
|
+
except PromptError as e:
|
|
409
|
+
output.errors.append(f"Prompt error: {e}")
|
|
410
|
+
except LLMError as e:
|
|
411
|
+
output.errors.append(f"LLM error: {e}")
|
|
412
|
+
except ValidationError as e:
|
|
413
|
+
output.errors.append(f"Validation error: {e}")
|
|
414
|
+
except TextToolsError as e:
|
|
415
|
+
output.errors.append(f"TextTools error: {e}")
|
|
416
|
+
except Exception as e:
|
|
417
|
+
output.errors.append(f"Unexpected error: {e}")
|
|
418
|
+
|
|
341
419
|
return output
|
|
342
420
|
|
|
343
421
|
def text_to_question(
|
|
@@ -373,28 +451,47 @@ class TheTool:
|
|
|
373
451
|
- result (str): The generated question
|
|
374
452
|
- logprobs (list | None): Probability data if logprobs enabled
|
|
375
453
|
- analysis (str | None): Detailed reasoning if with_analysis enabled
|
|
454
|
+
- process (str | None): Description of the process used
|
|
455
|
+
- processed_at (datetime): Timestamp when the processing occurred
|
|
456
|
+
- execution_time (float): Time taken for execution in seconds (-1.0 if not measured)
|
|
376
457
|
- errors (list(str) | None): Errors occured during tool call
|
|
377
458
|
"""
|
|
378
|
-
|
|
379
|
-
|
|
380
|
-
|
|
381
|
-
|
|
382
|
-
|
|
383
|
-
|
|
384
|
-
|
|
385
|
-
|
|
386
|
-
|
|
387
|
-
|
|
388
|
-
|
|
389
|
-
|
|
390
|
-
|
|
391
|
-
|
|
392
|
-
|
|
393
|
-
|
|
394
|
-
|
|
395
|
-
|
|
396
|
-
|
|
397
|
-
|
|
459
|
+
output = Models.ToolOutput()
|
|
460
|
+
|
|
461
|
+
try:
|
|
462
|
+
start = datetime.now()
|
|
463
|
+
output = self._operator.run(
|
|
464
|
+
# User parameters
|
|
465
|
+
text=text,
|
|
466
|
+
with_analysis=with_analysis,
|
|
467
|
+
output_lang=output_lang,
|
|
468
|
+
user_prompt=user_prompt,
|
|
469
|
+
temperature=temperature,
|
|
470
|
+
logprobs=logprobs,
|
|
471
|
+
top_logprobs=top_logprobs,
|
|
472
|
+
validator=validator,
|
|
473
|
+
max_validation_retries=max_validation_retries,
|
|
474
|
+
priority=priority,
|
|
475
|
+
# Internal parameters
|
|
476
|
+
prompt_file="text_to_question.yaml",
|
|
477
|
+
output_model=Models.StrOutput,
|
|
478
|
+
mode=None,
|
|
479
|
+
)
|
|
480
|
+
end = datetime.now()
|
|
481
|
+
output.execution_time = (end - start).total_seconds()
|
|
482
|
+
return output
|
|
483
|
+
|
|
484
|
+
except PromptError as e:
|
|
485
|
+
output.errors.append(f"Prompt error: {e}")
|
|
486
|
+
except LLMError as e:
|
|
487
|
+
output.errors.append(f"LLM error: {e}")
|
|
488
|
+
except ValidationError as e:
|
|
489
|
+
output.errors.append(f"Validation error: {e}")
|
|
490
|
+
except TextToolsError as e:
|
|
491
|
+
output.errors.append(f"TextTools error: {e}")
|
|
492
|
+
except Exception as e:
|
|
493
|
+
output.errors.append(f"Unexpected error: {e}")
|
|
494
|
+
|
|
398
495
|
return output
|
|
399
496
|
|
|
400
497
|
def merge_questions(
|
|
@@ -432,29 +529,48 @@ class TheTool:
|
|
|
432
529
|
- result (str): The merged question
|
|
433
530
|
- logprobs (list | None): Probability data if logprobs enabled
|
|
434
531
|
- analysis (str | None): Detailed reasoning if with_analysis enabled
|
|
532
|
+
- process (str | None): Description of the process used
|
|
533
|
+
- processed_at (datetime): Timestamp when the processing occurred
|
|
534
|
+
- execution_time (float): Time taken for execution in seconds (-1.0 if not measured)
|
|
435
535
|
- errors (list(str) | None): Errors occured during tool call
|
|
436
536
|
"""
|
|
437
|
-
|
|
438
|
-
|
|
439
|
-
|
|
440
|
-
|
|
441
|
-
text=text
|
|
442
|
-
|
|
443
|
-
|
|
444
|
-
|
|
445
|
-
|
|
446
|
-
|
|
447
|
-
|
|
448
|
-
|
|
449
|
-
|
|
450
|
-
|
|
451
|
-
|
|
452
|
-
|
|
453
|
-
|
|
454
|
-
|
|
455
|
-
|
|
456
|
-
|
|
457
|
-
|
|
537
|
+
output = Models.ToolOutput()
|
|
538
|
+
|
|
539
|
+
try:
|
|
540
|
+
start = datetime.now()
|
|
541
|
+
text = ", ".join(text)
|
|
542
|
+
output = self._operator.run(
|
|
543
|
+
# User parameters
|
|
544
|
+
text=text,
|
|
545
|
+
with_analysis=with_analysis,
|
|
546
|
+
output_lang=output_lang,
|
|
547
|
+
user_prompt=user_prompt,
|
|
548
|
+
temperature=temperature,
|
|
549
|
+
logprobs=logprobs,
|
|
550
|
+
top_logprobs=top_logprobs,
|
|
551
|
+
validator=validator,
|
|
552
|
+
max_validation_retries=max_validation_retries,
|
|
553
|
+
priority=priority,
|
|
554
|
+
# Internal parameters
|
|
555
|
+
prompt_file="merge_questions.yaml",
|
|
556
|
+
output_model=Models.StrOutput,
|
|
557
|
+
mode=mode,
|
|
558
|
+
)
|
|
559
|
+
end = datetime.now()
|
|
560
|
+
output.execution_time = (end - start).total_seconds()
|
|
561
|
+
return output
|
|
562
|
+
|
|
563
|
+
except PromptError as e:
|
|
564
|
+
output.errors.append(f"Prompt error: {e}")
|
|
565
|
+
except LLMError as e:
|
|
566
|
+
output.errors.append(f"LLM error: {e}")
|
|
567
|
+
except ValidationError as e:
|
|
568
|
+
output.errors.append(f"Validation error: {e}")
|
|
569
|
+
except TextToolsError as e:
|
|
570
|
+
output.errors.append(f"TextTools error: {e}")
|
|
571
|
+
except Exception as e:
|
|
572
|
+
output.errors.append(f"Unexpected error: {e}")
|
|
573
|
+
|
|
458
574
|
return output
|
|
459
575
|
|
|
460
576
|
def rewrite(
|
|
@@ -492,28 +608,47 @@ class TheTool:
|
|
|
492
608
|
- result (str): The rewritten text
|
|
493
609
|
- logprobs (list | None): Probability data if logprobs enabled
|
|
494
610
|
- analysis (str | None): Detailed reasoning if with_analysis enabled
|
|
611
|
+
- process (str | None): Description of the process used
|
|
612
|
+
- processed_at (datetime): Timestamp when the processing occurred
|
|
613
|
+
- execution_time (float): Time taken for execution in seconds (-1.0 if not measured)
|
|
495
614
|
- errors (list(str) | None): Errors occured during tool call
|
|
496
615
|
"""
|
|
497
|
-
|
|
498
|
-
|
|
499
|
-
|
|
500
|
-
|
|
501
|
-
|
|
502
|
-
|
|
503
|
-
|
|
504
|
-
|
|
505
|
-
|
|
506
|
-
|
|
507
|
-
|
|
508
|
-
|
|
509
|
-
|
|
510
|
-
|
|
511
|
-
|
|
512
|
-
|
|
513
|
-
|
|
514
|
-
|
|
515
|
-
|
|
516
|
-
|
|
616
|
+
output = Models.ToolOutput()
|
|
617
|
+
|
|
618
|
+
try:
|
|
619
|
+
start = datetime.now()
|
|
620
|
+
output = self._operator.run(
|
|
621
|
+
# User parameters
|
|
622
|
+
text=text,
|
|
623
|
+
with_analysis=with_analysis,
|
|
624
|
+
output_lang=output_lang,
|
|
625
|
+
user_prompt=user_prompt,
|
|
626
|
+
temperature=temperature,
|
|
627
|
+
logprobs=logprobs,
|
|
628
|
+
top_logprobs=top_logprobs,
|
|
629
|
+
validator=validator,
|
|
630
|
+
max_validation_retries=max_validation_retries,
|
|
631
|
+
priority=priority,
|
|
632
|
+
# Internal parameters
|
|
633
|
+
prompt_file="rewrite.yaml",
|
|
634
|
+
output_model=Models.StrOutput,
|
|
635
|
+
mode=mode,
|
|
636
|
+
)
|
|
637
|
+
end = datetime.now()
|
|
638
|
+
output.execution_time = (end - start).total_seconds()
|
|
639
|
+
return output
|
|
640
|
+
|
|
641
|
+
except PromptError as e:
|
|
642
|
+
output.errors.append(f"Prompt error: {e}")
|
|
643
|
+
except LLMError as e:
|
|
644
|
+
output.errors.append(f"LLM error: {e}")
|
|
645
|
+
except ValidationError as e:
|
|
646
|
+
output.errors.append(f"Validation error: {e}")
|
|
647
|
+
except TextToolsError as e:
|
|
648
|
+
output.errors.append(f"TextTools error: {e}")
|
|
649
|
+
except Exception as e:
|
|
650
|
+
output.errors.append(f"Unexpected error: {e}")
|
|
651
|
+
|
|
517
652
|
return output
|
|
518
653
|
|
|
519
654
|
def subject_to_question(
|
|
@@ -551,29 +686,48 @@ class TheTool:
|
|
|
551
686
|
- result (list[str]): List of generated questions
|
|
552
687
|
- logprobs (list | None): Probability data if logprobs enabled
|
|
553
688
|
- analysis (str | None): Detailed reasoning if with_analysis enabled
|
|
689
|
+
- process (str | None): Description of the process used
|
|
690
|
+
- processed_at (datetime): Timestamp when the processing occurred
|
|
691
|
+
- execution_time (float): Time taken for execution in seconds (-1.0 if not measured)
|
|
554
692
|
- errors (list(str) | None): Errors occured during tool call
|
|
555
693
|
"""
|
|
556
|
-
|
|
557
|
-
|
|
558
|
-
|
|
559
|
-
|
|
560
|
-
|
|
561
|
-
|
|
562
|
-
|
|
563
|
-
|
|
564
|
-
|
|
565
|
-
|
|
566
|
-
|
|
567
|
-
|
|
568
|
-
|
|
569
|
-
|
|
570
|
-
|
|
571
|
-
|
|
572
|
-
|
|
573
|
-
|
|
574
|
-
|
|
575
|
-
|
|
576
|
-
|
|
694
|
+
output = Models.ToolOutput()
|
|
695
|
+
|
|
696
|
+
try:
|
|
697
|
+
start = datetime.now()
|
|
698
|
+
output = self._operator.run(
|
|
699
|
+
# User parameters
|
|
700
|
+
text=text,
|
|
701
|
+
number_of_questions=number_of_questions,
|
|
702
|
+
with_analysis=with_analysis,
|
|
703
|
+
output_lang=output_lang,
|
|
704
|
+
user_prompt=user_prompt,
|
|
705
|
+
temperature=temperature,
|
|
706
|
+
logprobs=logprobs,
|
|
707
|
+
top_logprobs=top_logprobs,
|
|
708
|
+
validator=validator,
|
|
709
|
+
max_validation_retries=max_validation_retries,
|
|
710
|
+
priority=priority,
|
|
711
|
+
# Internal parameters
|
|
712
|
+
prompt_file="subject_to_question.yaml",
|
|
713
|
+
output_model=Models.ReasonListStrOutput,
|
|
714
|
+
mode=None,
|
|
715
|
+
)
|
|
716
|
+
end = datetime.now()
|
|
717
|
+
output.execution_time = (end - start).total_seconds()
|
|
718
|
+
return output
|
|
719
|
+
|
|
720
|
+
except PromptError as e:
|
|
721
|
+
output.errors.append(f"Prompt error: {e}")
|
|
722
|
+
except LLMError as e:
|
|
723
|
+
output.errors.append(f"LLM error: {e}")
|
|
724
|
+
except ValidationError as e:
|
|
725
|
+
output.errors.append(f"Validation error: {e}")
|
|
726
|
+
except TextToolsError as e:
|
|
727
|
+
output.errors.append(f"TextTools error: {e}")
|
|
728
|
+
except Exception as e:
|
|
729
|
+
output.errors.append(f"Unexpected error: {e}")
|
|
730
|
+
|
|
577
731
|
return output
|
|
578
732
|
|
|
579
733
|
def summarize(
|
|
@@ -609,28 +763,47 @@ class TheTool:
|
|
|
609
763
|
- result (str): The summary text
|
|
610
764
|
- logprobs (list | None): Probability data if logprobs enabled
|
|
611
765
|
- analysis (str | None): Detailed reasoning if with_analysis enabled
|
|
766
|
+
- process (str | None): Description of the process used
|
|
767
|
+
- processed_at (datetime): Timestamp when the processing occurred
|
|
768
|
+
- execution_time (float): Time taken for execution in seconds (-1.0 if not measured)
|
|
612
769
|
- errors (list(str) | None): Errors occured during tool call
|
|
613
770
|
"""
|
|
614
|
-
|
|
615
|
-
|
|
616
|
-
|
|
617
|
-
|
|
618
|
-
|
|
619
|
-
|
|
620
|
-
|
|
621
|
-
|
|
622
|
-
|
|
623
|
-
|
|
624
|
-
|
|
625
|
-
|
|
626
|
-
|
|
627
|
-
|
|
628
|
-
|
|
629
|
-
|
|
630
|
-
|
|
631
|
-
|
|
632
|
-
|
|
633
|
-
|
|
771
|
+
output = Models.ToolOutput()
|
|
772
|
+
|
|
773
|
+
try:
|
|
774
|
+
start = datetime.now()
|
|
775
|
+
output = self._operator.run(
|
|
776
|
+
# User parameters
|
|
777
|
+
text=text,
|
|
778
|
+
with_analysis=with_analysis,
|
|
779
|
+
output_lang=output_lang,
|
|
780
|
+
user_prompt=user_prompt,
|
|
781
|
+
temperature=temperature,
|
|
782
|
+
logprobs=logprobs,
|
|
783
|
+
top_logprobs=top_logprobs,
|
|
784
|
+
validator=validator,
|
|
785
|
+
max_validation_retries=max_validation_retries,
|
|
786
|
+
priority=priority,
|
|
787
|
+
# Internal parameters
|
|
788
|
+
prompt_file="summarize.yaml",
|
|
789
|
+
output_model=Models.StrOutput,
|
|
790
|
+
mode=None,
|
|
791
|
+
)
|
|
792
|
+
end = datetime.now()
|
|
793
|
+
output.execution_time = (end - start).total_seconds()
|
|
794
|
+
return output
|
|
795
|
+
|
|
796
|
+
except PromptError as e:
|
|
797
|
+
output.errors.append(f"Prompt error: {e}")
|
|
798
|
+
except LLMError as e:
|
|
799
|
+
output.errors.append(f"LLM error: {e}")
|
|
800
|
+
except ValidationError as e:
|
|
801
|
+
output.errors.append(f"Validation error: {e}")
|
|
802
|
+
except TextToolsError as e:
|
|
803
|
+
output.errors.append(f"TextTools error: {e}")
|
|
804
|
+
except Exception as e:
|
|
805
|
+
output.errors.append(f"Unexpected error: {e}")
|
|
806
|
+
|
|
634
807
|
return output
|
|
635
808
|
|
|
636
809
|
def translate(
|
|
@@ -666,29 +839,48 @@ class TheTool:
|
|
|
666
839
|
- result (str): The translated text
|
|
667
840
|
- logprobs (list | None): Probability data if logprobs enabled
|
|
668
841
|
- analysis (str | None): Detailed reasoning if with_analysis enabled
|
|
842
|
+
- process (str | None): Description of the process used
|
|
843
|
+
- processed_at (datetime): Timestamp when the processing occurred
|
|
844
|
+
- execution_time (float): Time taken for execution in seconds (-1.0 if not measured)
|
|
669
845
|
- errors (list(str) | None): Errors occured during tool call
|
|
670
846
|
"""
|
|
671
|
-
|
|
672
|
-
|
|
673
|
-
|
|
674
|
-
|
|
675
|
-
|
|
676
|
-
|
|
677
|
-
|
|
678
|
-
|
|
679
|
-
|
|
680
|
-
|
|
681
|
-
|
|
682
|
-
|
|
683
|
-
|
|
684
|
-
|
|
685
|
-
|
|
686
|
-
|
|
687
|
-
|
|
688
|
-
|
|
689
|
-
|
|
690
|
-
|
|
691
|
-
|
|
847
|
+
output = Models.ToolOutput()
|
|
848
|
+
|
|
849
|
+
try:
|
|
850
|
+
start = datetime.now()
|
|
851
|
+
output = self._operator.run(
|
|
852
|
+
# User parameters
|
|
853
|
+
text=text,
|
|
854
|
+
target_language=target_language,
|
|
855
|
+
with_analysis=with_analysis,
|
|
856
|
+
user_prompt=user_prompt,
|
|
857
|
+
temperature=temperature,
|
|
858
|
+
logprobs=logprobs,
|
|
859
|
+
top_logprobs=top_logprobs,
|
|
860
|
+
validator=validator,
|
|
861
|
+
max_validation_retries=max_validation_retries,
|
|
862
|
+
priority=priority,
|
|
863
|
+
# Internal parameters
|
|
864
|
+
prompt_file="translate.yaml",
|
|
865
|
+
output_model=Models.StrOutput,
|
|
866
|
+
mode=None,
|
|
867
|
+
output_lang=None,
|
|
868
|
+
)
|
|
869
|
+
end = datetime.now()
|
|
870
|
+
output.execution_time = (end - start).total_seconds()
|
|
871
|
+
return output
|
|
872
|
+
|
|
873
|
+
except PromptError as e:
|
|
874
|
+
output.errors.append(f"Prompt error: {e}")
|
|
875
|
+
except LLMError as e:
|
|
876
|
+
output.errors.append(f"LLM error: {e}")
|
|
877
|
+
except ValidationError as e:
|
|
878
|
+
output.errors.append(f"Validation error: {e}")
|
|
879
|
+
except TextToolsError as e:
|
|
880
|
+
output.errors.append(f"TextTools error: {e}")
|
|
881
|
+
except Exception as e:
|
|
882
|
+
output.errors.append(f"Unexpected error: {e}")
|
|
883
|
+
|
|
692
884
|
return output
|
|
693
885
|
|
|
694
886
|
def detect_entity(
|
|
@@ -724,28 +916,123 @@ class TheTool:
|
|
|
724
916
|
- result (list[Entity]): The entities
|
|
725
917
|
- logprobs (list | None): Probability data if logprobs enabled
|
|
726
918
|
- analysis (str | None): Detailed reasoning if with_analysis enabled
|
|
919
|
+
- process (str | None): Description of the process used
|
|
920
|
+
- processed_at (datetime): Timestamp when the processing occurred
|
|
921
|
+
- execution_time (float): Time taken for execution in seconds (-1.0 if not measured)
|
|
922
|
+
- errors (list(str) | None): Errors occured during tool call
|
|
923
|
+
"""
|
|
924
|
+
output = Models.ToolOutput()
|
|
925
|
+
|
|
926
|
+
try:
|
|
927
|
+
start = datetime.now()
|
|
928
|
+
output = self._operator.run(
|
|
929
|
+
# User parameters
|
|
930
|
+
text=text,
|
|
931
|
+
with_analysis=with_analysis,
|
|
932
|
+
output_lang=output_lang,
|
|
933
|
+
user_prompt=user_prompt,
|
|
934
|
+
temperature=temperature,
|
|
935
|
+
logprobs=logprobs,
|
|
936
|
+
top_logprobs=top_logprobs,
|
|
937
|
+
validator=validator,
|
|
938
|
+
max_validation_retries=max_validation_retries,
|
|
939
|
+
priority=priority,
|
|
940
|
+
# Internal parameters
|
|
941
|
+
prompt_file="detect_entity.yaml",
|
|
942
|
+
output_model=Models.EntityDetectorOutput,
|
|
943
|
+
mode=None,
|
|
944
|
+
)
|
|
945
|
+
end = datetime.now()
|
|
946
|
+
output.execution_time = (end - start).total_seconds()
|
|
947
|
+
return output
|
|
948
|
+
|
|
949
|
+
except PromptError as e:
|
|
950
|
+
output.errors.append(f"Prompt error: {e}")
|
|
951
|
+
except LLMError as e:
|
|
952
|
+
output.errors.append(f"LLM error: {e}")
|
|
953
|
+
except ValidationError as e:
|
|
954
|
+
output.errors.append(f"Validation error: {e}")
|
|
955
|
+
except TextToolsError as e:
|
|
956
|
+
output.errors.append(f"TextTools error: {e}")
|
|
957
|
+
except Exception as e:
|
|
958
|
+
output.errors.append(f"Unexpected error: {e}")
|
|
959
|
+
|
|
960
|
+
return output
|
|
961
|
+
|
|
962
|
+
def propositionize(
|
|
963
|
+
self,
|
|
964
|
+
text: str,
|
|
965
|
+
with_analysis: bool = False,
|
|
966
|
+
output_lang: str | None = None,
|
|
967
|
+
user_prompt: str | None = None,
|
|
968
|
+
temperature: float | None = 0.0,
|
|
969
|
+
logprobs: bool = False,
|
|
970
|
+
top_logprobs: int | None = None,
|
|
971
|
+
validator: Callable[[Any], bool] | None = None,
|
|
972
|
+
max_validation_retries: int | None = None,
|
|
973
|
+
priority: int | None = 0,
|
|
974
|
+
) -> Models.ToolOutput:
|
|
975
|
+
"""
|
|
976
|
+
Proposition input text to meaningful sentences.
|
|
977
|
+
|
|
978
|
+
Arguments:
|
|
979
|
+
text: The input text
|
|
980
|
+
with_analysis: Whether to include detailed reasoning analysis
|
|
981
|
+
output_lang: Language for the output summary
|
|
982
|
+
user_prompt: Additional instructions for summarization
|
|
983
|
+
temperature: Controls randomness (0.0 = deterministic, 1.0 = creative)
|
|
984
|
+
logprobs: Whether to return token probability information
|
|
985
|
+
top_logprobs: Number of top token alternatives to return if logprobs enabled
|
|
986
|
+
validator: Custom validation function to validate the output
|
|
987
|
+
max_validation_retries: Maximum number of retry attempts if validation fails
|
|
988
|
+
priority: Task execution priority (if enabled by vLLM and model)
|
|
989
|
+
|
|
990
|
+
Returns:
|
|
991
|
+
ToolOutput: Object containing:
|
|
992
|
+
- result (list[str]): The propositions
|
|
993
|
+
- logprobs (list | None): Probability data if logprobs enabled
|
|
994
|
+
- analysis (str | None): Detailed reasoning if with_analysis enabled
|
|
995
|
+
- process (str | None): Description of the process used
|
|
996
|
+
- processed_at (datetime): Timestamp when the processing occurred
|
|
997
|
+
- execution_time (float): Time taken for execution in seconds (-1.0 if not measured)
|
|
727
998
|
- errors (list(str) | None): Errors occured during tool call
|
|
728
999
|
"""
|
|
729
|
-
|
|
730
|
-
|
|
731
|
-
|
|
732
|
-
|
|
733
|
-
|
|
734
|
-
|
|
735
|
-
|
|
736
|
-
|
|
737
|
-
|
|
738
|
-
|
|
739
|
-
|
|
740
|
-
|
|
741
|
-
|
|
742
|
-
|
|
743
|
-
|
|
744
|
-
|
|
745
|
-
|
|
746
|
-
|
|
747
|
-
|
|
748
|
-
|
|
1000
|
+
output = Models.ToolOutput()
|
|
1001
|
+
|
|
1002
|
+
try:
|
|
1003
|
+
start = datetime.now()
|
|
1004
|
+
output = self._operator.run(
|
|
1005
|
+
# User parameters
|
|
1006
|
+
text=text,
|
|
1007
|
+
with_analysis=with_analysis,
|
|
1008
|
+
output_lang=output_lang,
|
|
1009
|
+
user_prompt=user_prompt,
|
|
1010
|
+
temperature=temperature,
|
|
1011
|
+
logprobs=logprobs,
|
|
1012
|
+
top_logprobs=top_logprobs,
|
|
1013
|
+
validator=validator,
|
|
1014
|
+
max_validation_retries=max_validation_retries,
|
|
1015
|
+
priority=priority,
|
|
1016
|
+
# Internal parameters
|
|
1017
|
+
prompt_file="propositionize.yaml",
|
|
1018
|
+
output_model=Models.ListStrOutput,
|
|
1019
|
+
mode=None,
|
|
1020
|
+
)
|
|
1021
|
+
end = datetime.now()
|
|
1022
|
+
output.execution_time = (end - start).total_seconds()
|
|
1023
|
+
return output
|
|
1024
|
+
|
|
1025
|
+
except PromptError as e:
|
|
1026
|
+
output.errors.append(f"Prompt error: {e}")
|
|
1027
|
+
except LLMError as e:
|
|
1028
|
+
output.errors.append(f"LLM error: {e}")
|
|
1029
|
+
except ValidationError as e:
|
|
1030
|
+
output.errors.append(f"Validation error: {e}")
|
|
1031
|
+
except TextToolsError as e:
|
|
1032
|
+
output.errors.append(f"TextTools error: {e}")
|
|
1033
|
+
except Exception as e:
|
|
1034
|
+
output.errors.append(f"Unexpected error: {e}")
|
|
1035
|
+
|
|
749
1036
|
return output
|
|
750
1037
|
|
|
751
1038
|
def run_custom(
|
|
@@ -778,27 +1065,46 @@ class TheTool:
|
|
|
778
1065
|
- result (str): The translated text
|
|
779
1066
|
- logprobs (list | None): Probability data if logprobs enabled
|
|
780
1067
|
- analysis (str | None): Detailed reasoning if with_analysis enabled
|
|
1068
|
+
- process (str | None): Description of the process used
|
|
1069
|
+
- processed_at (datetime): Timestamp when the processing occurred
|
|
1070
|
+
- execution_time (float): Time taken for execution in seconds (-1.0 if not measured)
|
|
781
1071
|
- errors (list(str) | None): Errors occured during tool call
|
|
782
1072
|
"""
|
|
783
|
-
|
|
784
|
-
|
|
785
|
-
|
|
786
|
-
|
|
787
|
-
|
|
788
|
-
|
|
789
|
-
|
|
790
|
-
|
|
791
|
-
|
|
792
|
-
|
|
793
|
-
|
|
794
|
-
|
|
795
|
-
|
|
796
|
-
|
|
797
|
-
|
|
798
|
-
|
|
799
|
-
|
|
800
|
-
|
|
801
|
-
|
|
802
|
-
|
|
803
|
-
|
|
1073
|
+
output = Models.ToolOutput()
|
|
1074
|
+
|
|
1075
|
+
try:
|
|
1076
|
+
start = datetime.now()
|
|
1077
|
+
output = self._operator.run(
|
|
1078
|
+
# User paramaeters
|
|
1079
|
+
text=prompt,
|
|
1080
|
+
output_model=output_model,
|
|
1081
|
+
output_model_str=output_model.model_json_schema(),
|
|
1082
|
+
output_lang=output_lang,
|
|
1083
|
+
temperature=temperature,
|
|
1084
|
+
logprobs=logprobs,
|
|
1085
|
+
top_logprobs=top_logprobs,
|
|
1086
|
+
validator=validator,
|
|
1087
|
+
max_validation_retries=max_validation_retries,
|
|
1088
|
+
priority=priority,
|
|
1089
|
+
# Internal parameters
|
|
1090
|
+
prompt_file="run_custom.yaml",
|
|
1091
|
+
user_prompt=None,
|
|
1092
|
+
with_analysis=False,
|
|
1093
|
+
mode=None,
|
|
1094
|
+
)
|
|
1095
|
+
end = datetime.now()
|
|
1096
|
+
output.execution_time = (end - start).total_seconds()
|
|
1097
|
+
return output
|
|
1098
|
+
|
|
1099
|
+
except PromptError as e:
|
|
1100
|
+
output.errors.append(f"Prompt error: {e}")
|
|
1101
|
+
except LLMError as e:
|
|
1102
|
+
output.errors.append(f"LLM error: {e}")
|
|
1103
|
+
except ValidationError as e:
|
|
1104
|
+
output.errors.append(f"Validation error: {e}")
|
|
1105
|
+
except TextToolsError as e:
|
|
1106
|
+
output.errors.append(f"TextTools error: {e}")
|
|
1107
|
+
except Exception as e:
|
|
1108
|
+
output.errors.append(f"Unexpected error: {e}")
|
|
1109
|
+
|
|
804
1110
|
return output
|