hamtaa-texttools 1.1.17__py3-none-any.whl → 1.1.18__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {hamtaa_texttools-1.1.17.dist-info → hamtaa_texttools-1.1.18.dist-info}/METADATA +1 -1
- {hamtaa_texttools-1.1.17.dist-info → hamtaa_texttools-1.1.18.dist-info}/RECORD +16 -15
- texttools/__init__.py +1 -1
- texttools/batch/batch_runner.py +75 -64
- texttools/{tools/internals → internals}/async_operator.py +96 -48
- texttools/internals/exceptions.py +28 -0
- texttools/{tools/internals → internals}/models.py +2 -2
- texttools/internals/prompt_loader.py +80 -0
- texttools/{tools/internals → internals}/sync_operator.py +92 -47
- texttools/tools/async_tools.py +551 -341
- texttools/tools/sync_tools.py +548 -339
- texttools/tools/internals/prompt_loader.py +0 -56
- {hamtaa_texttools-1.1.17.dist-info → hamtaa_texttools-1.1.18.dist-info}/WHEEL +0 -0
- {hamtaa_texttools-1.1.17.dist-info → hamtaa_texttools-1.1.18.dist-info}/licenses/LICENSE +0 -0
- {hamtaa_texttools-1.1.17.dist-info → hamtaa_texttools-1.1.18.dist-info}/top_level.txt +0 -0
- /texttools/{tools/internals → internals}/formatters.py +0 -0
- /texttools/{tools/internals → internals}/operator_utils.py +0 -0
texttools/tools/sync_tools.py
CHANGED
|
@@ -4,19 +4,20 @@ from collections.abc import Callable
|
|
|
4
4
|
|
|
5
5
|
from openai import OpenAI
|
|
6
6
|
|
|
7
|
-
from texttools.
|
|
8
|
-
import texttools.
|
|
7
|
+
from texttools.internals.sync_operator import Operator
|
|
8
|
+
import texttools.internals.models as Models
|
|
9
|
+
from texttools.internals.exceptions import (
|
|
10
|
+
TextToolsError,
|
|
11
|
+
PromptError,
|
|
12
|
+
LLMError,
|
|
13
|
+
ValidationError,
|
|
14
|
+
)
|
|
9
15
|
|
|
10
16
|
|
|
11
17
|
class TheTool:
|
|
12
18
|
"""
|
|
13
19
|
Each method configures the operator with a specific YAML prompt,
|
|
14
20
|
output schema, and flags, then delegates execution to `operator.run()`.
|
|
15
|
-
|
|
16
|
-
Usage:
|
|
17
|
-
client = OpenAI(...)
|
|
18
|
-
tool = TheTool(client, model="model-name")
|
|
19
|
-
result = tool.categorize("text ...", with_analysis=True)
|
|
20
21
|
"""
|
|
21
22
|
|
|
22
23
|
def __init__(
|
|
@@ -66,41 +67,97 @@ class TheTool:
|
|
|
66
67
|
- errors (list(str) | None): Errors occured during tool call
|
|
67
68
|
|
|
68
69
|
"""
|
|
69
|
-
|
|
70
|
-
|
|
71
|
-
|
|
72
|
-
|
|
73
|
-
|
|
74
|
-
|
|
75
|
-
|
|
76
|
-
|
|
77
|
-
|
|
78
|
-
|
|
79
|
-
|
|
80
|
-
|
|
81
|
-
|
|
82
|
-
|
|
83
|
-
|
|
84
|
-
|
|
85
|
-
|
|
86
|
-
|
|
70
|
+
output = Models.ToolOutput()
|
|
71
|
+
|
|
72
|
+
try:
|
|
73
|
+
start = datetime.now()
|
|
74
|
+
|
|
75
|
+
if mode == "category_tree":
|
|
76
|
+
# Initializations
|
|
77
|
+
output = Models.ToolOutput()
|
|
78
|
+
levels = categories.get_level_count()
|
|
79
|
+
parent_id = 0
|
|
80
|
+
final_output = []
|
|
81
|
+
|
|
82
|
+
for _ in range(levels):
|
|
83
|
+
# Get child nodes for current parent
|
|
84
|
+
parent_node = categories.get_node(parent_id)
|
|
85
|
+
children = categories.get_children(parent_node)
|
|
86
|
+
|
|
87
|
+
# Check if child nodes exist
|
|
88
|
+
if not children:
|
|
89
|
+
output.errors.append(
|
|
90
|
+
f"No categories found for parent_id {parent_id} in the tree"
|
|
91
|
+
)
|
|
92
|
+
end = datetime.now()
|
|
93
|
+
output.execution_time = (end - start).total_seconds()
|
|
94
|
+
return output
|
|
95
|
+
|
|
96
|
+
# Extract category names and descriptions
|
|
97
|
+
category_list = [
|
|
98
|
+
f"Category Name: {node.name}, Description: {node.description}"
|
|
99
|
+
for node in children
|
|
100
|
+
]
|
|
101
|
+
category_names = [node.name for node in children]
|
|
102
|
+
|
|
103
|
+
# Run categorization for this level
|
|
104
|
+
level_output = self._operator.run(
|
|
105
|
+
# User parameters
|
|
106
|
+
text=text,
|
|
107
|
+
category_list=category_list,
|
|
108
|
+
with_analysis=with_analysis,
|
|
109
|
+
user_prompt=user_prompt,
|
|
110
|
+
temperature=temperature,
|
|
111
|
+
logprobs=logprobs,
|
|
112
|
+
top_logprobs=top_logprobs,
|
|
113
|
+
mode=mode,
|
|
114
|
+
validator=validator,
|
|
115
|
+
max_validation_retries=max_validation_retries,
|
|
116
|
+
priority=priority,
|
|
117
|
+
# Internal parameters
|
|
118
|
+
prompt_file="categorize.yaml",
|
|
119
|
+
output_model=Models.create_dynamic_model(category_names),
|
|
120
|
+
output_lang=None,
|
|
87
121
|
)
|
|
88
|
-
|
|
89
|
-
|
|
90
|
-
|
|
91
|
-
|
|
92
|
-
|
|
93
|
-
|
|
94
|
-
|
|
95
|
-
|
|
96
|
-
|
|
97
|
-
|
|
98
|
-
|
|
99
|
-
|
|
100
|
-
|
|
122
|
+
|
|
123
|
+
# Check for errors from operator
|
|
124
|
+
if level_output.errors:
|
|
125
|
+
output.errors.extend(level_output.errors)
|
|
126
|
+
end = datetime.now()
|
|
127
|
+
output.execution_time = (end - start).total_seconds()
|
|
128
|
+
return output
|
|
129
|
+
|
|
130
|
+
# Get the chosen category
|
|
131
|
+
chosen_category = level_output.result
|
|
132
|
+
|
|
133
|
+
# Find the corresponding node
|
|
134
|
+
parent_node = categories.get_node(chosen_category)
|
|
135
|
+
if parent_node is None:
|
|
136
|
+
output.errors.append(
|
|
137
|
+
f"Category '{chosen_category}' not found in tree after selection"
|
|
138
|
+
)
|
|
139
|
+
end = datetime.now()
|
|
140
|
+
output.execution_time = (end - start).total_seconds()
|
|
141
|
+
return output
|
|
142
|
+
|
|
143
|
+
parent_id = parent_node.node_id
|
|
144
|
+
final_output.append(parent_node.name)
|
|
145
|
+
|
|
146
|
+
# Copy analysis/logprobs/process from the last level's output
|
|
147
|
+
output.analysis = level_output.analysis
|
|
148
|
+
output.logprobs = level_output.logprobs
|
|
149
|
+
output.process = level_output.process
|
|
150
|
+
|
|
151
|
+
output.result = final_output
|
|
152
|
+
end = datetime.now()
|
|
153
|
+
output.execution_time = (end - start).total_seconds()
|
|
154
|
+
return output
|
|
155
|
+
|
|
156
|
+
else:
|
|
157
|
+
output = self._operator.run(
|
|
101
158
|
# User parameters
|
|
102
159
|
text=text,
|
|
103
|
-
category_list=
|
|
160
|
+
category_list=categories,
|
|
104
161
|
with_analysis=with_analysis,
|
|
105
162
|
user_prompt=user_prompt,
|
|
106
163
|
temperature=temperature,
|
|
@@ -112,65 +169,25 @@ class TheTool:
|
|
|
112
169
|
priority=priority,
|
|
113
170
|
# Internal parameters
|
|
114
171
|
prompt_file="categorize.yaml",
|
|
115
|
-
output_model=Models.create_dynamic_model(
|
|
172
|
+
output_model=Models.create_dynamic_model(categories),
|
|
116
173
|
output_lang=None,
|
|
117
174
|
)
|
|
175
|
+
end = datetime.now()
|
|
176
|
+
output.execution_time = (end - start).total_seconds()
|
|
177
|
+
return output
|
|
118
178
|
|
|
119
|
-
|
|
120
|
-
|
|
121
|
-
|
|
122
|
-
|
|
123
|
-
|
|
124
|
-
|
|
125
|
-
|
|
126
|
-
|
|
127
|
-
|
|
128
|
-
|
|
129
|
-
# Find the corresponding node
|
|
130
|
-
parent_node = categories.get_node(chosen_category)
|
|
131
|
-
if parent_node is None:
|
|
132
|
-
output.errors.append(
|
|
133
|
-
f"Category '{chosen_category}' not found in tree after selection"
|
|
134
|
-
)
|
|
135
|
-
end = datetime.now()
|
|
136
|
-
output.execution_time = (end - start).total_seconds()
|
|
137
|
-
return output
|
|
138
|
-
|
|
139
|
-
parent_id = parent_node.node_id
|
|
140
|
-
final_output.append(parent_node.name)
|
|
141
|
-
|
|
142
|
-
# Copy analysis/logprobs/process from the last level's output
|
|
143
|
-
output.analysis = level_output.analysis
|
|
144
|
-
output.logprobs = level_output.logprobs
|
|
145
|
-
output.process = level_output.process
|
|
179
|
+
except PromptError as e:
|
|
180
|
+
output.errors.append(f"Prompt error: {e}")
|
|
181
|
+
except LLMError as e:
|
|
182
|
+
output.errors.append(f"LLM error: {e}")
|
|
183
|
+
except ValidationError as e:
|
|
184
|
+
output.errors.append(f"Validation error: {e}")
|
|
185
|
+
except TextToolsError as e:
|
|
186
|
+
output.errors.append(f"TextTools error: {e}")
|
|
187
|
+
except Exception as e:
|
|
188
|
+
output.errors.append(f"Unexpected error: {e}")
|
|
146
189
|
|
|
147
|
-
|
|
148
|
-
end = datetime.now()
|
|
149
|
-
output.execution_time = (end - start).total_seconds()
|
|
150
|
-
return output
|
|
151
|
-
|
|
152
|
-
else:
|
|
153
|
-
output = self._operator.run(
|
|
154
|
-
# User parameters
|
|
155
|
-
text=text,
|
|
156
|
-
category_list=categories,
|
|
157
|
-
with_analysis=with_analysis,
|
|
158
|
-
user_prompt=user_prompt,
|
|
159
|
-
temperature=temperature,
|
|
160
|
-
logprobs=logprobs,
|
|
161
|
-
top_logprobs=top_logprobs,
|
|
162
|
-
mode=mode,
|
|
163
|
-
validator=validator,
|
|
164
|
-
max_validation_retries=max_validation_retries,
|
|
165
|
-
priority=priority,
|
|
166
|
-
# Internal parameters
|
|
167
|
-
prompt_file="categorize.yaml",
|
|
168
|
-
output_model=Models.create_dynamic_model(categories),
|
|
169
|
-
output_lang=None,
|
|
170
|
-
)
|
|
171
|
-
end = datetime.now()
|
|
172
|
-
output.execution_time = (end - start).total_seconds()
|
|
173
|
-
return output
|
|
190
|
+
return output
|
|
174
191
|
|
|
175
192
|
def extract_keywords(
|
|
176
193
|
self,
|
|
@@ -212,27 +229,43 @@ class TheTool:
|
|
|
212
229
|
- execution_time (float): Time taken for execution in seconds (-1.0 if not measured)
|
|
213
230
|
- errors (list(str) | None): Errors occured during tool call
|
|
214
231
|
"""
|
|
215
|
-
|
|
216
|
-
|
|
217
|
-
|
|
218
|
-
|
|
219
|
-
|
|
220
|
-
|
|
221
|
-
|
|
222
|
-
|
|
223
|
-
|
|
224
|
-
|
|
225
|
-
|
|
226
|
-
|
|
227
|
-
|
|
228
|
-
|
|
229
|
-
|
|
230
|
-
|
|
231
|
-
|
|
232
|
-
|
|
233
|
-
|
|
234
|
-
|
|
235
|
-
|
|
232
|
+
output = Models.ToolOutput()
|
|
233
|
+
|
|
234
|
+
try:
|
|
235
|
+
start = datetime.now()
|
|
236
|
+
output = self._operator.run(
|
|
237
|
+
# User parameters
|
|
238
|
+
text=text,
|
|
239
|
+
with_analysis=with_analysis,
|
|
240
|
+
output_lang=output_lang,
|
|
241
|
+
user_prompt=user_prompt,
|
|
242
|
+
temperature=temperature,
|
|
243
|
+
logprobs=logprobs,
|
|
244
|
+
top_logprobs=top_logprobs,
|
|
245
|
+
mode=mode,
|
|
246
|
+
number_of_keywords=number_of_keywords,
|
|
247
|
+
validator=validator,
|
|
248
|
+
max_validation_retries=max_validation_retries,
|
|
249
|
+
priority=priority,
|
|
250
|
+
# Internal parameters
|
|
251
|
+
prompt_file="extract_keywords.yaml",
|
|
252
|
+
output_model=Models.ListStrOutput,
|
|
253
|
+
)
|
|
254
|
+
end = datetime.now()
|
|
255
|
+
output.execution_time = (end - start).total_seconds()
|
|
256
|
+
return output
|
|
257
|
+
|
|
258
|
+
except PromptError as e:
|
|
259
|
+
output.errors.append(f"Prompt error: {e}")
|
|
260
|
+
except LLMError as e:
|
|
261
|
+
output.errors.append(f"LLM error: {e}")
|
|
262
|
+
except ValidationError as e:
|
|
263
|
+
output.errors.append(f"Validation error: {e}")
|
|
264
|
+
except TextToolsError as e:
|
|
265
|
+
output.errors.append(f"TextTools error: {e}")
|
|
266
|
+
except Exception as e:
|
|
267
|
+
output.errors.append(f"Unexpected error: {e}")
|
|
268
|
+
|
|
236
269
|
return output
|
|
237
270
|
|
|
238
271
|
def extract_entities(
|
|
@@ -273,26 +306,42 @@ class TheTool:
|
|
|
273
306
|
- execution_time (float): Time taken for execution in seconds (-1.0 if not measured)
|
|
274
307
|
- errors (list(str) | None): Errors occured during tool call
|
|
275
308
|
"""
|
|
276
|
-
|
|
277
|
-
|
|
278
|
-
|
|
279
|
-
|
|
280
|
-
|
|
281
|
-
|
|
282
|
-
|
|
283
|
-
|
|
284
|
-
|
|
285
|
-
|
|
286
|
-
|
|
287
|
-
|
|
288
|
-
|
|
289
|
-
|
|
290
|
-
|
|
291
|
-
|
|
292
|
-
|
|
293
|
-
|
|
294
|
-
|
|
295
|
-
|
|
309
|
+
output = Models.ToolOutput()
|
|
310
|
+
|
|
311
|
+
try:
|
|
312
|
+
start = datetime.now()
|
|
313
|
+
output = self._operator.run(
|
|
314
|
+
# User parameters
|
|
315
|
+
text=text,
|
|
316
|
+
with_analysis=with_analysis,
|
|
317
|
+
output_lang=output_lang,
|
|
318
|
+
user_prompt=user_prompt,
|
|
319
|
+
temperature=temperature,
|
|
320
|
+
logprobs=logprobs,
|
|
321
|
+
top_logprobs=top_logprobs,
|
|
322
|
+
validator=validator,
|
|
323
|
+
max_validation_retries=max_validation_retries,
|
|
324
|
+
priority=priority,
|
|
325
|
+
# Internal parameters
|
|
326
|
+
prompt_file="extract_entities.yaml",
|
|
327
|
+
output_model=Models.ListDictStrStrOutput,
|
|
328
|
+
mode=None,
|
|
329
|
+
)
|
|
330
|
+
end = datetime.now()
|
|
331
|
+
output.execution_time = (end - start).total_seconds()
|
|
332
|
+
return output
|
|
333
|
+
|
|
334
|
+
except PromptError as e:
|
|
335
|
+
output.errors.append(f"Prompt error: {e}")
|
|
336
|
+
except LLMError as e:
|
|
337
|
+
output.errors.append(f"LLM error: {e}")
|
|
338
|
+
except ValidationError as e:
|
|
339
|
+
output.errors.append(f"Validation error: {e}")
|
|
340
|
+
except TextToolsError as e:
|
|
341
|
+
output.errors.append(f"TextTools error: {e}")
|
|
342
|
+
except Exception as e:
|
|
343
|
+
output.errors.append(f"Unexpected error: {e}")
|
|
344
|
+
|
|
296
345
|
return output
|
|
297
346
|
|
|
298
347
|
def is_question(
|
|
@@ -331,26 +380,42 @@ class TheTool:
|
|
|
331
380
|
- execution_time (float): Time taken for execution in seconds (-1.0 if not measured)
|
|
332
381
|
- errors (list(str) | None): Errors occured during tool call
|
|
333
382
|
"""
|
|
334
|
-
|
|
335
|
-
|
|
336
|
-
|
|
337
|
-
|
|
338
|
-
|
|
339
|
-
|
|
340
|
-
|
|
341
|
-
|
|
342
|
-
|
|
343
|
-
|
|
344
|
-
|
|
345
|
-
|
|
346
|
-
|
|
347
|
-
|
|
348
|
-
|
|
349
|
-
|
|
350
|
-
|
|
351
|
-
|
|
352
|
-
|
|
353
|
-
|
|
383
|
+
output = Models.ToolOutput()
|
|
384
|
+
|
|
385
|
+
try:
|
|
386
|
+
start = datetime.now()
|
|
387
|
+
output = self._operator.run(
|
|
388
|
+
# User parameters
|
|
389
|
+
text=text,
|
|
390
|
+
with_analysis=with_analysis,
|
|
391
|
+
user_prompt=user_prompt,
|
|
392
|
+
temperature=temperature,
|
|
393
|
+
logprobs=logprobs,
|
|
394
|
+
top_logprobs=top_logprobs,
|
|
395
|
+
validator=validator,
|
|
396
|
+
max_validation_retries=max_validation_retries,
|
|
397
|
+
priority=priority,
|
|
398
|
+
# Internal parameters
|
|
399
|
+
prompt_file="is_question.yaml",
|
|
400
|
+
output_model=Models.BoolOutput,
|
|
401
|
+
mode=None,
|
|
402
|
+
output_lang=None,
|
|
403
|
+
)
|
|
404
|
+
end = datetime.now()
|
|
405
|
+
output.execution_time = (end - start).total_seconds()
|
|
406
|
+
return output
|
|
407
|
+
|
|
408
|
+
except PromptError as e:
|
|
409
|
+
output.errors.append(f"Prompt error: {e}")
|
|
410
|
+
except LLMError as e:
|
|
411
|
+
output.errors.append(f"LLM error: {e}")
|
|
412
|
+
except ValidationError as e:
|
|
413
|
+
output.errors.append(f"Validation error: {e}")
|
|
414
|
+
except TextToolsError as e:
|
|
415
|
+
output.errors.append(f"TextTools error: {e}")
|
|
416
|
+
except Exception as e:
|
|
417
|
+
output.errors.append(f"Unexpected error: {e}")
|
|
418
|
+
|
|
354
419
|
return output
|
|
355
420
|
|
|
356
421
|
def text_to_question(
|
|
@@ -391,26 +456,42 @@ class TheTool:
|
|
|
391
456
|
- execution_time (float): Time taken for execution in seconds (-1.0 if not measured)
|
|
392
457
|
- errors (list(str) | None): Errors occured during tool call
|
|
393
458
|
"""
|
|
394
|
-
|
|
395
|
-
|
|
396
|
-
|
|
397
|
-
|
|
398
|
-
|
|
399
|
-
|
|
400
|
-
|
|
401
|
-
|
|
402
|
-
|
|
403
|
-
|
|
404
|
-
|
|
405
|
-
|
|
406
|
-
|
|
407
|
-
|
|
408
|
-
|
|
409
|
-
|
|
410
|
-
|
|
411
|
-
|
|
412
|
-
|
|
413
|
-
|
|
459
|
+
output = Models.ToolOutput()
|
|
460
|
+
|
|
461
|
+
try:
|
|
462
|
+
start = datetime.now()
|
|
463
|
+
output = self._operator.run(
|
|
464
|
+
# User parameters
|
|
465
|
+
text=text,
|
|
466
|
+
with_analysis=with_analysis,
|
|
467
|
+
output_lang=output_lang,
|
|
468
|
+
user_prompt=user_prompt,
|
|
469
|
+
temperature=temperature,
|
|
470
|
+
logprobs=logprobs,
|
|
471
|
+
top_logprobs=top_logprobs,
|
|
472
|
+
validator=validator,
|
|
473
|
+
max_validation_retries=max_validation_retries,
|
|
474
|
+
priority=priority,
|
|
475
|
+
# Internal parameters
|
|
476
|
+
prompt_file="text_to_question.yaml",
|
|
477
|
+
output_model=Models.StrOutput,
|
|
478
|
+
mode=None,
|
|
479
|
+
)
|
|
480
|
+
end = datetime.now()
|
|
481
|
+
output.execution_time = (end - start).total_seconds()
|
|
482
|
+
return output
|
|
483
|
+
|
|
484
|
+
except PromptError as e:
|
|
485
|
+
output.errors.append(f"Prompt error: {e}")
|
|
486
|
+
except LLMError as e:
|
|
487
|
+
output.errors.append(f"LLM error: {e}")
|
|
488
|
+
except ValidationError as e:
|
|
489
|
+
output.errors.append(f"Validation error: {e}")
|
|
490
|
+
except TextToolsError as e:
|
|
491
|
+
output.errors.append(f"TextTools error: {e}")
|
|
492
|
+
except Exception as e:
|
|
493
|
+
output.errors.append(f"Unexpected error: {e}")
|
|
494
|
+
|
|
414
495
|
return output
|
|
415
496
|
|
|
416
497
|
def merge_questions(
|
|
@@ -453,27 +534,43 @@ class TheTool:
|
|
|
453
534
|
- execution_time (float): Time taken for execution in seconds (-1.0 if not measured)
|
|
454
535
|
- errors (list(str) | None): Errors occured during tool call
|
|
455
536
|
"""
|
|
456
|
-
|
|
457
|
-
|
|
458
|
-
|
|
459
|
-
|
|
460
|
-
text=text
|
|
461
|
-
|
|
462
|
-
|
|
463
|
-
|
|
464
|
-
|
|
465
|
-
|
|
466
|
-
|
|
467
|
-
|
|
468
|
-
|
|
469
|
-
|
|
470
|
-
|
|
471
|
-
|
|
472
|
-
|
|
473
|
-
|
|
474
|
-
|
|
475
|
-
|
|
476
|
-
|
|
537
|
+
output = Models.ToolOutput()
|
|
538
|
+
|
|
539
|
+
try:
|
|
540
|
+
start = datetime.now()
|
|
541
|
+
text = ", ".join(text)
|
|
542
|
+
output = self._operator.run(
|
|
543
|
+
# User parameters
|
|
544
|
+
text=text,
|
|
545
|
+
with_analysis=with_analysis,
|
|
546
|
+
output_lang=output_lang,
|
|
547
|
+
user_prompt=user_prompt,
|
|
548
|
+
temperature=temperature,
|
|
549
|
+
logprobs=logprobs,
|
|
550
|
+
top_logprobs=top_logprobs,
|
|
551
|
+
validator=validator,
|
|
552
|
+
max_validation_retries=max_validation_retries,
|
|
553
|
+
priority=priority,
|
|
554
|
+
# Internal parameters
|
|
555
|
+
prompt_file="merge_questions.yaml",
|
|
556
|
+
output_model=Models.StrOutput,
|
|
557
|
+
mode=mode,
|
|
558
|
+
)
|
|
559
|
+
end = datetime.now()
|
|
560
|
+
output.execution_time = (end - start).total_seconds()
|
|
561
|
+
return output
|
|
562
|
+
|
|
563
|
+
except PromptError as e:
|
|
564
|
+
output.errors.append(f"Prompt error: {e}")
|
|
565
|
+
except LLMError as e:
|
|
566
|
+
output.errors.append(f"LLM error: {e}")
|
|
567
|
+
except ValidationError as e:
|
|
568
|
+
output.errors.append(f"Validation error: {e}")
|
|
569
|
+
except TextToolsError as e:
|
|
570
|
+
output.errors.append(f"TextTools error: {e}")
|
|
571
|
+
except Exception as e:
|
|
572
|
+
output.errors.append(f"Unexpected error: {e}")
|
|
573
|
+
|
|
477
574
|
return output
|
|
478
575
|
|
|
479
576
|
def rewrite(
|
|
@@ -516,26 +613,42 @@ class TheTool:
|
|
|
516
613
|
- execution_time (float): Time taken for execution in seconds (-1.0 if not measured)
|
|
517
614
|
- errors (list(str) | None): Errors occured during tool call
|
|
518
615
|
"""
|
|
519
|
-
|
|
520
|
-
|
|
521
|
-
|
|
522
|
-
|
|
523
|
-
|
|
524
|
-
|
|
525
|
-
|
|
526
|
-
|
|
527
|
-
|
|
528
|
-
|
|
529
|
-
|
|
530
|
-
|
|
531
|
-
|
|
532
|
-
|
|
533
|
-
|
|
534
|
-
|
|
535
|
-
|
|
536
|
-
|
|
537
|
-
|
|
538
|
-
|
|
616
|
+
output = Models.ToolOutput()
|
|
617
|
+
|
|
618
|
+
try:
|
|
619
|
+
start = datetime.now()
|
|
620
|
+
output = self._operator.run(
|
|
621
|
+
# User parameters
|
|
622
|
+
text=text,
|
|
623
|
+
with_analysis=with_analysis,
|
|
624
|
+
output_lang=output_lang,
|
|
625
|
+
user_prompt=user_prompt,
|
|
626
|
+
temperature=temperature,
|
|
627
|
+
logprobs=logprobs,
|
|
628
|
+
top_logprobs=top_logprobs,
|
|
629
|
+
validator=validator,
|
|
630
|
+
max_validation_retries=max_validation_retries,
|
|
631
|
+
priority=priority,
|
|
632
|
+
# Internal parameters
|
|
633
|
+
prompt_file="rewrite.yaml",
|
|
634
|
+
output_model=Models.StrOutput,
|
|
635
|
+
mode=mode,
|
|
636
|
+
)
|
|
637
|
+
end = datetime.now()
|
|
638
|
+
output.execution_time = (end - start).total_seconds()
|
|
639
|
+
return output
|
|
640
|
+
|
|
641
|
+
except PromptError as e:
|
|
642
|
+
output.errors.append(f"Prompt error: {e}")
|
|
643
|
+
except LLMError as e:
|
|
644
|
+
output.errors.append(f"LLM error: {e}")
|
|
645
|
+
except ValidationError as e:
|
|
646
|
+
output.errors.append(f"Validation error: {e}")
|
|
647
|
+
except TextToolsError as e:
|
|
648
|
+
output.errors.append(f"TextTools error: {e}")
|
|
649
|
+
except Exception as e:
|
|
650
|
+
output.errors.append(f"Unexpected error: {e}")
|
|
651
|
+
|
|
539
652
|
return output
|
|
540
653
|
|
|
541
654
|
def subject_to_question(
|
|
@@ -578,27 +691,43 @@ class TheTool:
|
|
|
578
691
|
- execution_time (float): Time taken for execution in seconds (-1.0 if not measured)
|
|
579
692
|
- errors (list(str) | None): Errors occured during tool call
|
|
580
693
|
"""
|
|
581
|
-
|
|
582
|
-
|
|
583
|
-
|
|
584
|
-
|
|
585
|
-
|
|
586
|
-
|
|
587
|
-
|
|
588
|
-
|
|
589
|
-
|
|
590
|
-
|
|
591
|
-
|
|
592
|
-
|
|
593
|
-
|
|
594
|
-
|
|
595
|
-
|
|
596
|
-
|
|
597
|
-
|
|
598
|
-
|
|
599
|
-
|
|
600
|
-
|
|
601
|
-
|
|
694
|
+
output = Models.ToolOutput()
|
|
695
|
+
|
|
696
|
+
try:
|
|
697
|
+
start = datetime.now()
|
|
698
|
+
output = self._operator.run(
|
|
699
|
+
# User parameters
|
|
700
|
+
text=text,
|
|
701
|
+
number_of_questions=number_of_questions,
|
|
702
|
+
with_analysis=with_analysis,
|
|
703
|
+
output_lang=output_lang,
|
|
704
|
+
user_prompt=user_prompt,
|
|
705
|
+
temperature=temperature,
|
|
706
|
+
logprobs=logprobs,
|
|
707
|
+
top_logprobs=top_logprobs,
|
|
708
|
+
validator=validator,
|
|
709
|
+
max_validation_retries=max_validation_retries,
|
|
710
|
+
priority=priority,
|
|
711
|
+
# Internal parameters
|
|
712
|
+
prompt_file="subject_to_question.yaml",
|
|
713
|
+
output_model=Models.ReasonListStrOutput,
|
|
714
|
+
mode=None,
|
|
715
|
+
)
|
|
716
|
+
end = datetime.now()
|
|
717
|
+
output.execution_time = (end - start).total_seconds()
|
|
718
|
+
return output
|
|
719
|
+
|
|
720
|
+
except PromptError as e:
|
|
721
|
+
output.errors.append(f"Prompt error: {e}")
|
|
722
|
+
except LLMError as e:
|
|
723
|
+
output.errors.append(f"LLM error: {e}")
|
|
724
|
+
except ValidationError as e:
|
|
725
|
+
output.errors.append(f"Validation error: {e}")
|
|
726
|
+
except TextToolsError as e:
|
|
727
|
+
output.errors.append(f"TextTools error: {e}")
|
|
728
|
+
except Exception as e:
|
|
729
|
+
output.errors.append(f"Unexpected error: {e}")
|
|
730
|
+
|
|
602
731
|
return output
|
|
603
732
|
|
|
604
733
|
def summarize(
|
|
@@ -639,26 +768,42 @@ class TheTool:
|
|
|
639
768
|
- execution_time (float): Time taken for execution in seconds (-1.0 if not measured)
|
|
640
769
|
- errors (list(str) | None): Errors occured during tool call
|
|
641
770
|
"""
|
|
642
|
-
|
|
643
|
-
|
|
644
|
-
|
|
645
|
-
|
|
646
|
-
|
|
647
|
-
|
|
648
|
-
|
|
649
|
-
|
|
650
|
-
|
|
651
|
-
|
|
652
|
-
|
|
653
|
-
|
|
654
|
-
|
|
655
|
-
|
|
656
|
-
|
|
657
|
-
|
|
658
|
-
|
|
659
|
-
|
|
660
|
-
|
|
661
|
-
|
|
771
|
+
output = Models.ToolOutput()
|
|
772
|
+
|
|
773
|
+
try:
|
|
774
|
+
start = datetime.now()
|
|
775
|
+
output = self._operator.run(
|
|
776
|
+
# User parameters
|
|
777
|
+
text=text,
|
|
778
|
+
with_analysis=with_analysis,
|
|
779
|
+
output_lang=output_lang,
|
|
780
|
+
user_prompt=user_prompt,
|
|
781
|
+
temperature=temperature,
|
|
782
|
+
logprobs=logprobs,
|
|
783
|
+
top_logprobs=top_logprobs,
|
|
784
|
+
validator=validator,
|
|
785
|
+
max_validation_retries=max_validation_retries,
|
|
786
|
+
priority=priority,
|
|
787
|
+
# Internal parameters
|
|
788
|
+
prompt_file="summarize.yaml",
|
|
789
|
+
output_model=Models.StrOutput,
|
|
790
|
+
mode=None,
|
|
791
|
+
)
|
|
792
|
+
end = datetime.now()
|
|
793
|
+
output.execution_time = (end - start).total_seconds()
|
|
794
|
+
return output
|
|
795
|
+
|
|
796
|
+
except PromptError as e:
|
|
797
|
+
output.errors.append(f"Prompt error: {e}")
|
|
798
|
+
except LLMError as e:
|
|
799
|
+
output.errors.append(f"LLM error: {e}")
|
|
800
|
+
except ValidationError as e:
|
|
801
|
+
output.errors.append(f"Validation error: {e}")
|
|
802
|
+
except TextToolsError as e:
|
|
803
|
+
output.errors.append(f"TextTools error: {e}")
|
|
804
|
+
except Exception as e:
|
|
805
|
+
output.errors.append(f"Unexpected error: {e}")
|
|
806
|
+
|
|
662
807
|
return output
|
|
663
808
|
|
|
664
809
|
def translate(
|
|
@@ -699,27 +844,43 @@ class TheTool:
|
|
|
699
844
|
- execution_time (float): Time taken for execution in seconds (-1.0 if not measured)
|
|
700
845
|
- errors (list(str) | None): Errors occured during tool call
|
|
701
846
|
"""
|
|
702
|
-
|
|
703
|
-
|
|
704
|
-
|
|
705
|
-
|
|
706
|
-
|
|
707
|
-
|
|
708
|
-
|
|
709
|
-
|
|
710
|
-
|
|
711
|
-
|
|
712
|
-
|
|
713
|
-
|
|
714
|
-
|
|
715
|
-
|
|
716
|
-
|
|
717
|
-
|
|
718
|
-
|
|
719
|
-
|
|
720
|
-
|
|
721
|
-
|
|
722
|
-
|
|
847
|
+
output = Models.ToolOutput()
|
|
848
|
+
|
|
849
|
+
try:
|
|
850
|
+
start = datetime.now()
|
|
851
|
+
output = self._operator.run(
|
|
852
|
+
# User parameters
|
|
853
|
+
text=text,
|
|
854
|
+
target_language=target_language,
|
|
855
|
+
with_analysis=with_analysis,
|
|
856
|
+
user_prompt=user_prompt,
|
|
857
|
+
temperature=temperature,
|
|
858
|
+
logprobs=logprobs,
|
|
859
|
+
top_logprobs=top_logprobs,
|
|
860
|
+
validator=validator,
|
|
861
|
+
max_validation_retries=max_validation_retries,
|
|
862
|
+
priority=priority,
|
|
863
|
+
# Internal parameters
|
|
864
|
+
prompt_file="translate.yaml",
|
|
865
|
+
output_model=Models.StrOutput,
|
|
866
|
+
mode=None,
|
|
867
|
+
output_lang=None,
|
|
868
|
+
)
|
|
869
|
+
end = datetime.now()
|
|
870
|
+
output.execution_time = (end - start).total_seconds()
|
|
871
|
+
return output
|
|
872
|
+
|
|
873
|
+
except PromptError as e:
|
|
874
|
+
output.errors.append(f"Prompt error: {e}")
|
|
875
|
+
except LLMError as e:
|
|
876
|
+
output.errors.append(f"LLM error: {e}")
|
|
877
|
+
except ValidationError as e:
|
|
878
|
+
output.errors.append(f"Validation error: {e}")
|
|
879
|
+
except TextToolsError as e:
|
|
880
|
+
output.errors.append(f"TextTools error: {e}")
|
|
881
|
+
except Exception as e:
|
|
882
|
+
output.errors.append(f"Unexpected error: {e}")
|
|
883
|
+
|
|
723
884
|
return output
|
|
724
885
|
|
|
725
886
|
def detect_entity(
|
|
@@ -760,26 +921,42 @@ class TheTool:
|
|
|
760
921
|
- execution_time (float): Time taken for execution in seconds (-1.0 if not measured)
|
|
761
922
|
- errors (list(str) | None): Errors occured during tool call
|
|
762
923
|
"""
|
|
763
|
-
|
|
764
|
-
|
|
765
|
-
|
|
766
|
-
|
|
767
|
-
|
|
768
|
-
|
|
769
|
-
|
|
770
|
-
|
|
771
|
-
|
|
772
|
-
|
|
773
|
-
|
|
774
|
-
|
|
775
|
-
|
|
776
|
-
|
|
777
|
-
|
|
778
|
-
|
|
779
|
-
|
|
780
|
-
|
|
781
|
-
|
|
782
|
-
|
|
924
|
+
output = Models.ToolOutput()
|
|
925
|
+
|
|
926
|
+
try:
|
|
927
|
+
start = datetime.now()
|
|
928
|
+
output = self._operator.run(
|
|
929
|
+
# User parameters
|
|
930
|
+
text=text,
|
|
931
|
+
with_analysis=with_analysis,
|
|
932
|
+
output_lang=output_lang,
|
|
933
|
+
user_prompt=user_prompt,
|
|
934
|
+
temperature=temperature,
|
|
935
|
+
logprobs=logprobs,
|
|
936
|
+
top_logprobs=top_logprobs,
|
|
937
|
+
validator=validator,
|
|
938
|
+
max_validation_retries=max_validation_retries,
|
|
939
|
+
priority=priority,
|
|
940
|
+
# Internal parameters
|
|
941
|
+
prompt_file="detect_entity.yaml",
|
|
942
|
+
output_model=Models.EntityDetectorOutput,
|
|
943
|
+
mode=None,
|
|
944
|
+
)
|
|
945
|
+
end = datetime.now()
|
|
946
|
+
output.execution_time = (end - start).total_seconds()
|
|
947
|
+
return output
|
|
948
|
+
|
|
949
|
+
except PromptError as e:
|
|
950
|
+
output.errors.append(f"Prompt error: {e}")
|
|
951
|
+
except LLMError as e:
|
|
952
|
+
output.errors.append(f"LLM error: {e}")
|
|
953
|
+
except ValidationError as e:
|
|
954
|
+
output.errors.append(f"Validation error: {e}")
|
|
955
|
+
except TextToolsError as e:
|
|
956
|
+
output.errors.append(f"TextTools error: {e}")
|
|
957
|
+
except Exception as e:
|
|
958
|
+
output.errors.append(f"Unexpected error: {e}")
|
|
959
|
+
|
|
783
960
|
return output
|
|
784
961
|
|
|
785
962
|
def propositionize(
|
|
@@ -820,26 +997,42 @@ class TheTool:
|
|
|
820
997
|
- execution_time (float): Time taken for execution in seconds (-1.0 if not measured)
|
|
821
998
|
- errors (list(str) | None): Errors occured during tool call
|
|
822
999
|
"""
|
|
823
|
-
|
|
824
|
-
|
|
825
|
-
|
|
826
|
-
|
|
827
|
-
|
|
828
|
-
|
|
829
|
-
|
|
830
|
-
|
|
831
|
-
|
|
832
|
-
|
|
833
|
-
|
|
834
|
-
|
|
835
|
-
|
|
836
|
-
|
|
837
|
-
|
|
838
|
-
|
|
839
|
-
|
|
840
|
-
|
|
841
|
-
|
|
842
|
-
|
|
1000
|
+
output = Models.ToolOutput()
|
|
1001
|
+
|
|
1002
|
+
try:
|
|
1003
|
+
start = datetime.now()
|
|
1004
|
+
output = self._operator.run(
|
|
1005
|
+
# User parameters
|
|
1006
|
+
text=text,
|
|
1007
|
+
with_analysis=with_analysis,
|
|
1008
|
+
output_lang=output_lang,
|
|
1009
|
+
user_prompt=user_prompt,
|
|
1010
|
+
temperature=temperature,
|
|
1011
|
+
logprobs=logprobs,
|
|
1012
|
+
top_logprobs=top_logprobs,
|
|
1013
|
+
validator=validator,
|
|
1014
|
+
max_validation_retries=max_validation_retries,
|
|
1015
|
+
priority=priority,
|
|
1016
|
+
# Internal parameters
|
|
1017
|
+
prompt_file="propositionize.yaml",
|
|
1018
|
+
output_model=Models.ListStrOutput,
|
|
1019
|
+
mode=None,
|
|
1020
|
+
)
|
|
1021
|
+
end = datetime.now()
|
|
1022
|
+
output.execution_time = (end - start).total_seconds()
|
|
1023
|
+
return output
|
|
1024
|
+
|
|
1025
|
+
except PromptError as e:
|
|
1026
|
+
output.errors.append(f"Prompt error: {e}")
|
|
1027
|
+
except LLMError as e:
|
|
1028
|
+
output.errors.append(f"LLM error: {e}")
|
|
1029
|
+
except ValidationError as e:
|
|
1030
|
+
output.errors.append(f"Validation error: {e}")
|
|
1031
|
+
except TextToolsError as e:
|
|
1032
|
+
output.errors.append(f"TextTools error: {e}")
|
|
1033
|
+
except Exception as e:
|
|
1034
|
+
output.errors.append(f"Unexpected error: {e}")
|
|
1035
|
+
|
|
843
1036
|
return output
|
|
844
1037
|
|
|
845
1038
|
def run_custom(
|
|
@@ -877,25 +1070,41 @@ class TheTool:
|
|
|
877
1070
|
- execution_time (float): Time taken for execution in seconds (-1.0 if not measured)
|
|
878
1071
|
- errors (list(str) | None): Errors occured during tool call
|
|
879
1072
|
"""
|
|
880
|
-
|
|
881
|
-
|
|
882
|
-
|
|
883
|
-
|
|
884
|
-
|
|
885
|
-
|
|
886
|
-
|
|
887
|
-
|
|
888
|
-
|
|
889
|
-
|
|
890
|
-
|
|
891
|
-
|
|
892
|
-
|
|
893
|
-
|
|
894
|
-
|
|
895
|
-
|
|
896
|
-
|
|
897
|
-
|
|
898
|
-
|
|
899
|
-
|
|
900
|
-
|
|
1073
|
+
output = Models.ToolOutput()
|
|
1074
|
+
|
|
1075
|
+
try:
|
|
1076
|
+
start = datetime.now()
|
|
1077
|
+
output = self._operator.run(
|
|
1078
|
+
# User paramaeters
|
|
1079
|
+
text=prompt,
|
|
1080
|
+
output_model=output_model,
|
|
1081
|
+
output_model_str=output_model.model_json_schema(),
|
|
1082
|
+
output_lang=output_lang,
|
|
1083
|
+
temperature=temperature,
|
|
1084
|
+
logprobs=logprobs,
|
|
1085
|
+
top_logprobs=top_logprobs,
|
|
1086
|
+
validator=validator,
|
|
1087
|
+
max_validation_retries=max_validation_retries,
|
|
1088
|
+
priority=priority,
|
|
1089
|
+
# Internal parameters
|
|
1090
|
+
prompt_file="run_custom.yaml",
|
|
1091
|
+
user_prompt=None,
|
|
1092
|
+
with_analysis=False,
|
|
1093
|
+
mode=None,
|
|
1094
|
+
)
|
|
1095
|
+
end = datetime.now()
|
|
1096
|
+
output.execution_time = (end - start).total_seconds()
|
|
1097
|
+
return output
|
|
1098
|
+
|
|
1099
|
+
except PromptError as e:
|
|
1100
|
+
output.errors.append(f"Prompt error: {e}")
|
|
1101
|
+
except LLMError as e:
|
|
1102
|
+
output.errors.append(f"LLM error: {e}")
|
|
1103
|
+
except ValidationError as e:
|
|
1104
|
+
output.errors.append(f"Validation error: {e}")
|
|
1105
|
+
except TextToolsError as e:
|
|
1106
|
+
output.errors.append(f"TextTools error: {e}")
|
|
1107
|
+
except Exception as e:
|
|
1108
|
+
output.errors.append(f"Unexpected error: {e}")
|
|
1109
|
+
|
|
901
1110
|
return output
|