hamtaa-texttools 1.1.19__py3-none-any.whl → 1.1.21__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {hamtaa_texttools-1.1.19.dist-info → hamtaa_texttools-1.1.21.dist-info}/METADATA +15 -34
- hamtaa_texttools-1.1.21.dist-info/RECORD +32 -0
- texttools/batch/batch_config.py +14 -1
- texttools/batch/{internals/batch_manager.py → batch_manager.py} +6 -6
- texttools/batch/batch_runner.py +7 -7
- texttools/internals/async_operator.py +48 -84
- texttools/internals/models.py +73 -113
- texttools/internals/operator_utils.py +2 -2
- texttools/internals/prompt_loader.py +3 -20
- texttools/internals/sync_operator.py +47 -83
- texttools/internals/text_to_chunks.py +97 -0
- texttools/prompts/README.md +2 -2
- texttools/prompts/categorize.yaml +35 -77
- texttools/prompts/check_fact.yaml +2 -2
- texttools/prompts/extract_entities.yaml +3 -3
- texttools/prompts/extract_keywords.yaml +6 -6
- texttools/prompts/is_question.yaml +2 -2
- texttools/prompts/merge_questions.yaml +4 -4
- texttools/prompts/propositionize.yaml +2 -2
- texttools/prompts/rewrite.yaml +6 -6
- texttools/prompts/run_custom.yaml +1 -1
- texttools/prompts/subject_to_question.yaml +2 -2
- texttools/prompts/summarize.yaml +2 -2
- texttools/prompts/text_to_question.yaml +8 -6
- texttools/prompts/translate.yaml +2 -2
- texttools/tools/async_tools.py +497 -519
- texttools/tools/sync_tools.py +498 -520
- hamtaa_texttools-1.1.19.dist-info/RECORD +0 -33
- texttools/batch/internals/utils.py +0 -16
- texttools/internals/formatters.py +0 -24
- {hamtaa_texttools-1.1.19.dist-info → hamtaa_texttools-1.1.21.dist-info}/WHEEL +0 -0
- {hamtaa_texttools-1.1.19.dist-info → hamtaa_texttools-1.1.21.dist-info}/licenses/LICENSE +0 -0
- {hamtaa_texttools-1.1.19.dist-info → hamtaa_texttools-1.1.21.dist-info}/top_level.txt +0 -0
texttools/tools/async_tools.py
CHANGED
|
@@ -1,5 +1,6 @@
|
|
|
1
|
-
|
|
2
|
-
from
|
|
1
|
+
import sys
|
|
2
|
+
from time import perf_counter
|
|
3
|
+
from typing import Literal
|
|
3
4
|
from collections.abc import Callable
|
|
4
5
|
|
|
5
6
|
from openai import AsyncOpenAI
|
|
@@ -12,6 +13,7 @@ from texttools.internals.exceptions import (
|
|
|
12
13
|
LLMError,
|
|
13
14
|
ValidationError,
|
|
14
15
|
)
|
|
16
|
+
from texttools.internals.text_to_chunks import text_to_chunks
|
|
15
17
|
|
|
16
18
|
|
|
17
19
|
class AsyncTheTool:
|
|
@@ -35,11 +37,10 @@ class AsyncTheTool:
|
|
|
35
37
|
user_prompt: str | None = None,
|
|
36
38
|
temperature: float | None = 0.0,
|
|
37
39
|
logprobs: bool = False,
|
|
38
|
-
top_logprobs: int
|
|
39
|
-
|
|
40
|
-
validator: Callable[[Any], bool] | None = None,
|
|
40
|
+
top_logprobs: int = 3,
|
|
41
|
+
validator: Callable[[object], bool] | None = None,
|
|
41
42
|
max_validation_retries: int | None = None,
|
|
42
|
-
priority: int
|
|
43
|
+
priority: int = 0,
|
|
43
44
|
) -> Models.ToolOutput:
|
|
44
45
|
"""
|
|
45
46
|
Categorize a text into a category / category tree.
|
|
@@ -48,62 +49,73 @@ class AsyncTheTool:
|
|
|
48
49
|
|
|
49
50
|
Arguments:
|
|
50
51
|
text: The input text to categorize
|
|
51
|
-
categories: The category /
|
|
52
|
+
categories: The category list / category tree
|
|
52
53
|
with_analysis: Whether to include detailed reasoning analysis
|
|
53
54
|
user_prompt: Additional instructions for the categorization
|
|
54
|
-
temperature: Controls randomness
|
|
55
|
+
temperature: Controls randomness
|
|
55
56
|
logprobs: Whether to return token probability information
|
|
56
57
|
top_logprobs: Number of top token alternatives to return if logprobs enabled
|
|
57
58
|
validator: Custom validation function to validate the output
|
|
58
59
|
max_validation_retries: Maximum number of retry attempts if validation fails
|
|
59
|
-
priority: Task execution priority (if enabled by vLLM and model)
|
|
60
|
+
priority: Task execution priority (if enabled by vLLM and the model)
|
|
60
61
|
|
|
61
62
|
Returns:
|
|
62
|
-
ToolOutput
|
|
63
|
-
- result (str): The assigned category
|
|
64
|
-
- logprobs (list | None): Probability data if logprobs enabled
|
|
65
|
-
- analysis (str | None): Detailed reasoning if with_analysis enabled
|
|
66
|
-
- process (str | None): Description of the process used
|
|
67
|
-
- processed_at (datetime): Timestamp when the processing occurred
|
|
68
|
-
- execution_time (float): Time taken for execution in seconds (-1.0 if not measured)
|
|
69
|
-
- errors (list(str) | None): Errors occured during tool call
|
|
63
|
+
ToolOutput
|
|
70
64
|
|
|
71
65
|
"""
|
|
72
|
-
|
|
66
|
+
tool_name = sys._getframe().f_code.co_name
|
|
67
|
+
prompt_file = tool_name + ".yaml"
|
|
68
|
+
start = perf_counter()
|
|
73
69
|
|
|
74
70
|
try:
|
|
75
|
-
|
|
71
|
+
if isinstance(categories, list):
|
|
72
|
+
operator_output = await self._operator.run(
|
|
73
|
+
# User parameters
|
|
74
|
+
text=text,
|
|
75
|
+
category_list=categories,
|
|
76
|
+
with_analysis=with_analysis,
|
|
77
|
+
user_prompt=user_prompt,
|
|
78
|
+
temperature=temperature,
|
|
79
|
+
logprobs=logprobs,
|
|
80
|
+
top_logprobs=top_logprobs,
|
|
81
|
+
validator=validator,
|
|
82
|
+
max_validation_retries=max_validation_retries,
|
|
83
|
+
priority=priority,
|
|
84
|
+
# Internal parameters
|
|
85
|
+
prompt_file=prompt_file,
|
|
86
|
+
output_model=Models.create_dynamic_model(categories),
|
|
87
|
+
mode=None,
|
|
88
|
+
output_lang=None,
|
|
89
|
+
)
|
|
76
90
|
|
|
77
|
-
|
|
78
|
-
|
|
79
|
-
|
|
91
|
+
metadata = Models.ToolOutputMetadata(
|
|
92
|
+
tool_name=tool_name, execution_time=perf_counter() - start
|
|
93
|
+
)
|
|
94
|
+
tool_output = Models.ToolOutput(
|
|
95
|
+
result=operator_output.result,
|
|
96
|
+
analysis=operator_output.analysis,
|
|
97
|
+
logprobs=operator_output.logprobs,
|
|
98
|
+
metadata=metadata,
|
|
99
|
+
)
|
|
100
|
+
|
|
101
|
+
else:
|
|
80
102
|
levels = categories.get_level_count()
|
|
81
|
-
|
|
82
|
-
|
|
103
|
+
parent_node = categories.get_node("root")
|
|
104
|
+
final_categories = []
|
|
105
|
+
analysis = ""
|
|
106
|
+
logprobs_list = []
|
|
83
107
|
|
|
84
108
|
for _ in range(levels):
|
|
85
|
-
|
|
86
|
-
|
|
87
|
-
|
|
88
|
-
|
|
89
|
-
# Check if child nodes exist
|
|
90
|
-
if not children:
|
|
91
|
-
output.errors.append(
|
|
92
|
-
f"No categories found for parent_id {parent_id} in the tree"
|
|
93
|
-
)
|
|
94
|
-
end = datetime.now()
|
|
95
|
-
output.execution_time = (end - start).total_seconds()
|
|
96
|
-
return output
|
|
97
|
-
|
|
98
|
-
# Extract category names and descriptions
|
|
109
|
+
if not parent_node.children:
|
|
110
|
+
break
|
|
111
|
+
|
|
99
112
|
category_list = [
|
|
100
|
-
f"Category Name: {
|
|
101
|
-
for node in children
|
|
113
|
+
f"Category Name: {name}, Description: {node.description}"
|
|
114
|
+
for name, node in parent_node.children.items()
|
|
102
115
|
]
|
|
103
|
-
category_names =
|
|
116
|
+
category_names = list(parent_node.children.keys())
|
|
104
117
|
|
|
105
|
-
|
|
106
|
-
level_output = await self._operator.run(
|
|
118
|
+
level_operator_output = await self._operator.run(
|
|
107
119
|
# User parameters
|
|
108
120
|
text=text,
|
|
109
121
|
category_list=category_list,
|
|
@@ -112,84 +124,44 @@ class AsyncTheTool:
|
|
|
112
124
|
temperature=temperature,
|
|
113
125
|
logprobs=logprobs,
|
|
114
126
|
top_logprobs=top_logprobs,
|
|
115
|
-
mode=mode,
|
|
116
127
|
validator=validator,
|
|
117
128
|
max_validation_retries=max_validation_retries,
|
|
118
129
|
priority=priority,
|
|
119
130
|
# Internal parameters
|
|
120
|
-
prompt_file=
|
|
131
|
+
prompt_file=prompt_file,
|
|
121
132
|
output_model=Models.create_dynamic_model(category_names),
|
|
133
|
+
mode=None,
|
|
122
134
|
output_lang=None,
|
|
123
135
|
)
|
|
124
136
|
|
|
125
|
-
|
|
126
|
-
if level_output.errors:
|
|
127
|
-
output.errors.extend(level_output.errors)
|
|
128
|
-
end = datetime.now()
|
|
129
|
-
output.execution_time = (end - start).total_seconds()
|
|
130
|
-
return output
|
|
131
|
-
|
|
132
|
-
# Get the chosen category
|
|
133
|
-
chosen_category = level_output.result
|
|
134
|
-
|
|
135
|
-
# Find the corresponding node
|
|
137
|
+
chosen_category = level_operator_output.result
|
|
136
138
|
parent_node = categories.get_node(chosen_category)
|
|
137
|
-
if parent_node
|
|
138
|
-
|
|
139
|
-
|
|
140
|
-
)
|
|
141
|
-
end = datetime.now()
|
|
142
|
-
output.execution_time = (end - start).total_seconds()
|
|
143
|
-
return output
|
|
144
|
-
|
|
145
|
-
parent_id = parent_node.node_id
|
|
146
|
-
final_output.append(parent_node.name)
|
|
147
|
-
|
|
148
|
-
# Copy analysis/logprobs/process from the last level's output
|
|
149
|
-
output.analysis = level_output.analysis
|
|
150
|
-
output.logprobs = level_output.logprobs
|
|
151
|
-
output.process = level_output.process
|
|
152
|
-
|
|
153
|
-
output.result = final_output
|
|
154
|
-
end = datetime.now()
|
|
155
|
-
output.execution_time = (end - start).total_seconds()
|
|
156
|
-
return output
|
|
139
|
+
if not parent_node:
|
|
140
|
+
break
|
|
141
|
+
final_categories.append(chosen_category)
|
|
157
142
|
|
|
158
|
-
|
|
159
|
-
|
|
160
|
-
|
|
161
|
-
|
|
162
|
-
|
|
163
|
-
|
|
164
|
-
|
|
165
|
-
|
|
166
|
-
|
|
167
|
-
|
|
168
|
-
|
|
169
|
-
|
|
170
|
-
|
|
171
|
-
priority=priority,
|
|
172
|
-
# Internal parameters
|
|
173
|
-
prompt_file="categorize.yaml",
|
|
174
|
-
output_model=Models.create_dynamic_model(categories),
|
|
175
|
-
output_lang=None,
|
|
143
|
+
if with_analysis:
|
|
144
|
+
analysis += level_operator_output.analysis
|
|
145
|
+
if logprobs:
|
|
146
|
+
logprobs_list.extend(level_operator_output.logprobs)
|
|
147
|
+
|
|
148
|
+
metadata = Models.ToolOutputMetadata(
|
|
149
|
+
tool_name=tool_name, execution_time=(perf_counter() - start)
|
|
150
|
+
)
|
|
151
|
+
tool_output = Models.ToolOutput(
|
|
152
|
+
result=final_categories,
|
|
153
|
+
analysis=analysis,
|
|
154
|
+
logprobs=logprobs_list,
|
|
155
|
+
metadata=metadata,
|
|
176
156
|
)
|
|
177
|
-
|
|
178
|
-
|
|
179
|
-
|
|
180
|
-
|
|
181
|
-
|
|
182
|
-
|
|
183
|
-
|
|
184
|
-
|
|
185
|
-
except ValidationError as e:
|
|
186
|
-
output.errors.append(f"Validation error: {e}")
|
|
187
|
-
except TextToolsError as e:
|
|
188
|
-
output.errors.append(f"TextTools error: {e}")
|
|
189
|
-
except Exception as e:
|
|
190
|
-
output.errors.append(f"Unexpected error: {e}")
|
|
191
|
-
|
|
192
|
-
return output
|
|
157
|
+
|
|
158
|
+
except (PromptError, LLMError, ValidationError, TextToolsError, Exception) as e:
|
|
159
|
+
metadata = Models.ToolOutputMetadata(tool_name=tool_name)
|
|
160
|
+
tool_output = Models.ToolOutput(
|
|
161
|
+
errors=[f"{type(e).__name__}: {e}"], metadata=metadata
|
|
162
|
+
)
|
|
163
|
+
|
|
164
|
+
return tool_output
|
|
193
165
|
|
|
194
166
|
async def extract_keywords(
|
|
195
167
|
self,
|
|
@@ -199,10 +171,10 @@ class AsyncTheTool:
|
|
|
199
171
|
user_prompt: str | None = None,
|
|
200
172
|
temperature: float | None = 0.0,
|
|
201
173
|
logprobs: bool = False,
|
|
202
|
-
top_logprobs: int
|
|
174
|
+
top_logprobs: int = 3,
|
|
203
175
|
mode: Literal["auto", "threshold", "count"] = "auto",
|
|
204
176
|
number_of_keywords: int | None = None,
|
|
205
|
-
validator: Callable[[
|
|
177
|
+
validator: Callable[[object], bool] | None = None,
|
|
206
178
|
max_validation_retries: int | None = None,
|
|
207
179
|
priority: int | None = 0,
|
|
208
180
|
) -> Models.ToolOutput:
|
|
@@ -214,28 +186,22 @@ class AsyncTheTool:
|
|
|
214
186
|
with_analysis: Whether to include detailed reasoning analysis
|
|
215
187
|
output_lang: Language for the output response
|
|
216
188
|
user_prompt: Additional instructions for keyword extraction
|
|
217
|
-
temperature: Controls randomness
|
|
189
|
+
temperature: Controls randomness
|
|
218
190
|
logprobs: Whether to return token probability information
|
|
219
191
|
top_logprobs: Number of top token alternatives to return if logprobs enabled
|
|
220
192
|
validator: Custom validation function to validate the output
|
|
221
193
|
max_validation_retries: Maximum number of retry attempts if validation fails
|
|
222
|
-
priority: Task execution priority (if enabled by vLLM and model)
|
|
194
|
+
priority: Task execution priority (if enabled by vLLM and the model)
|
|
223
195
|
|
|
224
196
|
Returns:
|
|
225
|
-
ToolOutput
|
|
226
|
-
- result (list[str]): List of extracted keywords
|
|
227
|
-
- logprobs (list | None): Probability data if logprobs enabled
|
|
228
|
-
- analysis (str | None): Detailed reasoning if with_analysis enabled
|
|
229
|
-
- process (str | None): Description of the process used
|
|
230
|
-
- processed_at (datetime): Timestamp when the processing occurred
|
|
231
|
-
- execution_time (float): Time taken for execution in seconds (-1.0 if not measured)
|
|
232
|
-
- errors (list(str) | None): Errors occured during tool call
|
|
197
|
+
ToolOutput
|
|
233
198
|
"""
|
|
234
|
-
|
|
199
|
+
tool_name = sys._getframe().f_code.co_name
|
|
200
|
+
prompt_file = tool_name + ".yaml"
|
|
201
|
+
start = perf_counter()
|
|
235
202
|
|
|
236
203
|
try:
|
|
237
|
-
|
|
238
|
-
output = await self._operator.run(
|
|
204
|
+
operator_output = await self._operator.run(
|
|
239
205
|
# User parameters
|
|
240
206
|
text=text,
|
|
241
207
|
with_analysis=with_analysis,
|
|
@@ -250,36 +216,39 @@ class AsyncTheTool:
|
|
|
250
216
|
max_validation_retries=max_validation_retries,
|
|
251
217
|
priority=priority,
|
|
252
218
|
# Internal parameters
|
|
253
|
-
prompt_file=
|
|
254
|
-
output_model=Models.
|
|
219
|
+
prompt_file=prompt_file,
|
|
220
|
+
output_model=Models.ListStr,
|
|
221
|
+
)
|
|
222
|
+
|
|
223
|
+
metadata = Models.ToolOutputMetadata(
|
|
224
|
+
tool_name=tool_name, execution_time=perf_counter() - start
|
|
225
|
+
)
|
|
226
|
+
tool_output = Models.ToolOutput(
|
|
227
|
+
result=operator_output.result,
|
|
228
|
+
logprobs=operator_output.logprobs,
|
|
229
|
+
analysis=operator_output.analysis,
|
|
230
|
+
metadata=metadata,
|
|
255
231
|
)
|
|
256
|
-
|
|
257
|
-
|
|
258
|
-
|
|
259
|
-
|
|
260
|
-
|
|
261
|
-
|
|
262
|
-
|
|
263
|
-
|
|
264
|
-
except ValidationError as e:
|
|
265
|
-
output.errors.append(f"Validation error: {e}")
|
|
266
|
-
except TextToolsError as e:
|
|
267
|
-
output.errors.append(f"TextTools error: {e}")
|
|
268
|
-
except Exception as e:
|
|
269
|
-
output.errors.append(f"Unexpected error: {e}")
|
|
270
|
-
|
|
271
|
-
return output
|
|
232
|
+
|
|
233
|
+
except (PromptError, LLMError, ValidationError, TextToolsError, Exception) as e:
|
|
234
|
+
metadata = Models.ToolOutputMetadata(tool_name=tool_name)
|
|
235
|
+
tool_output = Models.ToolOutput(
|
|
236
|
+
errors=[f"{type(e).__name__}: {e}"], metadata=metadata
|
|
237
|
+
)
|
|
238
|
+
|
|
239
|
+
return tool_output
|
|
272
240
|
|
|
273
241
|
async def extract_entities(
|
|
274
242
|
self,
|
|
275
243
|
text: str,
|
|
244
|
+
entities: list[str] | None = None,
|
|
276
245
|
with_analysis: bool = False,
|
|
277
246
|
output_lang: str | None = None,
|
|
278
247
|
user_prompt: str | None = None,
|
|
279
248
|
temperature: float | None = 0.0,
|
|
280
249
|
logprobs: bool = False,
|
|
281
|
-
top_logprobs: int
|
|
282
|
-
validator: Callable[[
|
|
250
|
+
top_logprobs: int = 3,
|
|
251
|
+
validator: Callable[[object], bool] | None = None,
|
|
283
252
|
max_validation_retries: int | None = None,
|
|
284
253
|
priority: int | None = 0,
|
|
285
254
|
) -> Models.ToolOutput:
|
|
@@ -288,33 +257,30 @@ class AsyncTheTool:
|
|
|
288
257
|
|
|
289
258
|
Arguments:
|
|
290
259
|
text: The input text to extract entities from
|
|
260
|
+
entities: List of entities provided by user (Optional)
|
|
291
261
|
with_analysis: Whether to include detailed reasoning analysis
|
|
292
262
|
output_lang: Language for the output response
|
|
293
263
|
user_prompt: Additional instructions for entity extraction
|
|
294
|
-
temperature: Controls randomness
|
|
264
|
+
temperature: Controls randomness
|
|
295
265
|
logprobs: Whether to return token probability information
|
|
296
266
|
top_logprobs: Number of top token alternatives to return if logprobs enabled
|
|
297
267
|
validator: Custom validation function to validate the output
|
|
298
268
|
max_validation_retries: Maximum number of retry attempts if validation fails
|
|
299
|
-
priority: Task execution priority (if enabled by vLLM and model)
|
|
269
|
+
priority: Task execution priority (if enabled by vLLM and the model)
|
|
300
270
|
|
|
301
271
|
Returns:
|
|
302
|
-
ToolOutput
|
|
303
|
-
- result (list[dict]): List of entities with 'text' and 'type' keys
|
|
304
|
-
- logprobs (list | None): Probability data if logprobs enabled
|
|
305
|
-
- analysis (str | None): Detailed reasoning if with_analysis enabled
|
|
306
|
-
- process (str | None): Description of the process used
|
|
307
|
-
- processed_at (datetime): Timestamp when the processing occurred
|
|
308
|
-
- execution_time (float): Time taken for execution in seconds (-1.0 if not measured)
|
|
309
|
-
- errors (list(str) | None): Errors occured during tool call
|
|
272
|
+
ToolOutput
|
|
310
273
|
"""
|
|
311
|
-
|
|
274
|
+
tool_name = sys._getframe().f_code.co_name
|
|
275
|
+
prompt_file = tool_name + ".yaml"
|
|
276
|
+
start = perf_counter()
|
|
312
277
|
|
|
313
278
|
try:
|
|
314
|
-
|
|
315
|
-
output = await self._operator.run(
|
|
279
|
+
operator_output = await self._operator.run(
|
|
316
280
|
# User parameters
|
|
317
281
|
text=text,
|
|
282
|
+
entities=entities
|
|
283
|
+
or "all named entities (e.g., PER, ORG, LOC, DAT, etc.)",
|
|
318
284
|
with_analysis=with_analysis,
|
|
319
285
|
output_lang=output_lang,
|
|
320
286
|
user_prompt=user_prompt,
|
|
@@ -325,26 +291,28 @@ class AsyncTheTool:
|
|
|
325
291
|
max_validation_retries=max_validation_retries,
|
|
326
292
|
priority=priority,
|
|
327
293
|
# Internal parameters
|
|
328
|
-
prompt_file=
|
|
329
|
-
output_model=Models.
|
|
294
|
+
prompt_file=prompt_file,
|
|
295
|
+
output_model=Models.ListDictStrStr,
|
|
330
296
|
mode=None,
|
|
331
297
|
)
|
|
332
|
-
|
|
333
|
-
|
|
334
|
-
|
|
335
|
-
|
|
336
|
-
|
|
337
|
-
|
|
338
|
-
|
|
339
|
-
|
|
340
|
-
|
|
341
|
-
|
|
342
|
-
|
|
343
|
-
|
|
344
|
-
|
|
345
|
-
|
|
346
|
-
|
|
347
|
-
|
|
298
|
+
|
|
299
|
+
metadata = Models.ToolOutputMetadata(
|
|
300
|
+
tool_name=tool_name, execution_time=perf_counter() - start
|
|
301
|
+
)
|
|
302
|
+
tool_output = Models.ToolOutput(
|
|
303
|
+
result=operator_output.result,
|
|
304
|
+
logprobs=operator_output.logprobs,
|
|
305
|
+
analysis=operator_output.analysis,
|
|
306
|
+
metadata=metadata,
|
|
307
|
+
)
|
|
308
|
+
|
|
309
|
+
except (PromptError, LLMError, ValidationError, TextToolsError, Exception) as e:
|
|
310
|
+
metadata = Models.ToolOutputMetadata(tool_name=tool_name)
|
|
311
|
+
tool_output = Models.ToolOutput(
|
|
312
|
+
errors=[f"{type(e).__name__}: {e}"], metadata=metadata
|
|
313
|
+
)
|
|
314
|
+
|
|
315
|
+
return tool_output
|
|
348
316
|
|
|
349
317
|
async def is_question(
|
|
350
318
|
self,
|
|
@@ -353,8 +321,8 @@ class AsyncTheTool:
|
|
|
353
321
|
user_prompt: str | None = None,
|
|
354
322
|
temperature: float | None = 0.0,
|
|
355
323
|
logprobs: bool = False,
|
|
356
|
-
top_logprobs: int
|
|
357
|
-
validator: Callable[[
|
|
324
|
+
top_logprobs: int = 3,
|
|
325
|
+
validator: Callable[[object], bool] | None = None,
|
|
358
326
|
max_validation_retries: int | None = None,
|
|
359
327
|
priority: int | None = 0,
|
|
360
328
|
) -> Models.ToolOutput:
|
|
@@ -365,28 +333,22 @@ class AsyncTheTool:
|
|
|
365
333
|
text: The input text to analyze
|
|
366
334
|
with_analysis: Whether to include detailed reasoning analysis
|
|
367
335
|
user_prompt: Additional instructions for question detection
|
|
368
|
-
temperature: Controls randomness
|
|
336
|
+
temperature: Controls randomness
|
|
369
337
|
logprobs: Whether to return token probability information
|
|
370
338
|
top_logprobs: Number of top token alternatives to return if logprobs enabled
|
|
371
339
|
validator: Custom validation function to validate the output
|
|
372
340
|
max_validation_retries: Maximum number of retry attempts if validation fails
|
|
373
|
-
priority: Task execution priority (if enabled by vLLM and model)
|
|
341
|
+
priority: Task execution priority (if enabled by vLLM and the model)
|
|
374
342
|
|
|
375
343
|
Returns:
|
|
376
|
-
ToolOutput
|
|
377
|
-
- result (bool): True if text is a question, False otherwise
|
|
378
|
-
- logprobs (list | None): Probability data if logprobs enabled
|
|
379
|
-
- analysis (str | None): Detailed reasoning if with_analysis enabled
|
|
380
|
-
- process (str | None): Description of the process used
|
|
381
|
-
- processed_at (datetime): Timestamp when the processing occurred
|
|
382
|
-
- execution_time (float): Time taken for execution in seconds (-1.0 if not measured)
|
|
383
|
-
- errors (list(str) | None): Errors occured during tool call
|
|
344
|
+
ToolOutput
|
|
384
345
|
"""
|
|
385
|
-
|
|
346
|
+
tool_name = sys._getframe().f_code.co_name
|
|
347
|
+
prompt_file = tool_name + ".yaml"
|
|
348
|
+
start = perf_counter()
|
|
386
349
|
|
|
387
350
|
try:
|
|
388
|
-
|
|
389
|
-
output = await self._operator.run(
|
|
351
|
+
operator_output = await self._operator.run(
|
|
390
352
|
# User parameters
|
|
391
353
|
text=text,
|
|
392
354
|
with_analysis=with_analysis,
|
|
@@ -398,38 +360,41 @@ class AsyncTheTool:
|
|
|
398
360
|
max_validation_retries=max_validation_retries,
|
|
399
361
|
priority=priority,
|
|
400
362
|
# Internal parameters
|
|
401
|
-
prompt_file=
|
|
402
|
-
output_model=Models.
|
|
363
|
+
prompt_file=prompt_file,
|
|
364
|
+
output_model=Models.Bool,
|
|
403
365
|
mode=None,
|
|
404
366
|
output_lang=None,
|
|
405
367
|
)
|
|
406
|
-
|
|
407
|
-
|
|
408
|
-
|
|
409
|
-
|
|
410
|
-
|
|
411
|
-
|
|
412
|
-
|
|
413
|
-
|
|
414
|
-
|
|
415
|
-
|
|
416
|
-
|
|
417
|
-
|
|
418
|
-
|
|
419
|
-
|
|
420
|
-
|
|
421
|
-
|
|
368
|
+
|
|
369
|
+
metadata = Models.ToolOutputMetadata(
|
|
370
|
+
tool_name=tool_name, execution_time=perf_counter() - start
|
|
371
|
+
)
|
|
372
|
+
tool_output = Models.ToolOutput(
|
|
373
|
+
result=operator_output.result,
|
|
374
|
+
logprobs=operator_output.logprobs,
|
|
375
|
+
analysis=operator_output.analysis,
|
|
376
|
+
metadata=metadata,
|
|
377
|
+
)
|
|
378
|
+
|
|
379
|
+
except (PromptError, LLMError, ValidationError, TextToolsError, Exception) as e:
|
|
380
|
+
metadata = Models.ToolOutputMetadata(tool_name=tool_name)
|
|
381
|
+
tool_output = Models.ToolOutput(
|
|
382
|
+
errors=[f"{type(e).__name__}: {e}"], metadata=metadata
|
|
383
|
+
)
|
|
384
|
+
|
|
385
|
+
return tool_output
|
|
422
386
|
|
|
423
387
|
async def text_to_question(
|
|
424
388
|
self,
|
|
425
389
|
text: str,
|
|
390
|
+
number_of_questions: int,
|
|
426
391
|
with_analysis: bool = False,
|
|
427
392
|
output_lang: str | None = None,
|
|
428
393
|
user_prompt: str | None = None,
|
|
429
394
|
temperature: float | None = 0.0,
|
|
430
395
|
logprobs: bool = False,
|
|
431
|
-
top_logprobs: int
|
|
432
|
-
validator: Callable[[
|
|
396
|
+
top_logprobs: int = 3,
|
|
397
|
+
validator: Callable[[object], bool] | None = None,
|
|
433
398
|
max_validation_retries: int | None = None,
|
|
434
399
|
priority: int | None = 0,
|
|
435
400
|
) -> Models.ToolOutput:
|
|
@@ -438,33 +403,29 @@ class AsyncTheTool:
|
|
|
438
403
|
|
|
439
404
|
Arguments:
|
|
440
405
|
text: The input text to generate a question from
|
|
406
|
+
number_of_questions: Number of questions to generate
|
|
441
407
|
with_analysis: Whether to include detailed reasoning analysis
|
|
442
408
|
output_lang: Language for the output question
|
|
443
409
|
user_prompt: Additional instructions for question generation
|
|
444
|
-
temperature: Controls randomness
|
|
410
|
+
temperature: Controls randomness
|
|
445
411
|
logprobs: Whether to return token probability information
|
|
446
412
|
top_logprobs: Number of top token alternatives to return if logprobs enabled
|
|
447
413
|
validator: Custom validation function to validate the output
|
|
448
414
|
max_validation_retries: Maximum number of retry attempts if validation fails
|
|
449
|
-
priority: Task execution priority (if enabled by vLLM and model)
|
|
415
|
+
priority: Task execution priority (if enabled by vLLM and the model)
|
|
450
416
|
|
|
451
417
|
Returns:
|
|
452
|
-
ToolOutput
|
|
453
|
-
- result (str): The generated question
|
|
454
|
-
- logprobs (list | None): Probability data if logprobs enabled
|
|
455
|
-
- analysis (str | None): Detailed reasoning if with_analysis enabled
|
|
456
|
-
- process (str | None): Description of the process used
|
|
457
|
-
- processed_at (datetime): Timestamp when the processing occurred
|
|
458
|
-
- execution_time (float): Time taken for execution in seconds (-1.0 if not measured)
|
|
459
|
-
- errors (list(str) | None): Errors occured during tool call
|
|
418
|
+
ToolOutput
|
|
460
419
|
"""
|
|
461
|
-
|
|
420
|
+
tool_name = sys._getframe().f_code.co_name
|
|
421
|
+
prompt_file = tool_name + ".yaml"
|
|
422
|
+
start = perf_counter()
|
|
462
423
|
|
|
463
424
|
try:
|
|
464
|
-
|
|
465
|
-
output = await self._operator.run(
|
|
425
|
+
operator_output = await self._operator.run(
|
|
466
426
|
# User parameters
|
|
467
427
|
text=text,
|
|
428
|
+
number_of_questions=number_of_questions,
|
|
468
429
|
with_analysis=with_analysis,
|
|
469
430
|
output_lang=output_lang,
|
|
470
431
|
user_prompt=user_prompt,
|
|
@@ -475,26 +436,28 @@ class AsyncTheTool:
|
|
|
475
436
|
max_validation_retries=max_validation_retries,
|
|
476
437
|
priority=priority,
|
|
477
438
|
# Internal parameters
|
|
478
|
-
prompt_file=
|
|
479
|
-
output_model=Models.
|
|
439
|
+
prompt_file=prompt_file,
|
|
440
|
+
output_model=Models.ReasonListStr,
|
|
480
441
|
mode=None,
|
|
481
442
|
)
|
|
482
|
-
|
|
483
|
-
|
|
484
|
-
|
|
485
|
-
|
|
486
|
-
|
|
487
|
-
|
|
488
|
-
|
|
489
|
-
|
|
490
|
-
|
|
491
|
-
|
|
492
|
-
|
|
493
|
-
|
|
494
|
-
|
|
495
|
-
|
|
496
|
-
|
|
497
|
-
|
|
443
|
+
|
|
444
|
+
metadata = Models.ToolOutputMetadata(
|
|
445
|
+
tool_name=tool_name, execution_time=perf_counter() - start
|
|
446
|
+
)
|
|
447
|
+
tool_output = Models.ToolOutput(
|
|
448
|
+
result=operator_output.result,
|
|
449
|
+
logprobs=operator_output.logprobs,
|
|
450
|
+
analysis=operator_output.analysis,
|
|
451
|
+
metadata=metadata,
|
|
452
|
+
)
|
|
453
|
+
|
|
454
|
+
except (PromptError, LLMError, ValidationError, TextToolsError, Exception) as e:
|
|
455
|
+
metadata = Models.ToolOutputMetadata(tool_name=tool_name)
|
|
456
|
+
tool_output = Models.ToolOutput(
|
|
457
|
+
errors=[f"{type(e).__name__}: {e}"], metadata=metadata
|
|
458
|
+
)
|
|
459
|
+
|
|
460
|
+
return tool_output
|
|
498
461
|
|
|
499
462
|
async def merge_questions(
|
|
500
463
|
self,
|
|
@@ -504,9 +467,9 @@ class AsyncTheTool:
|
|
|
504
467
|
user_prompt: str | None = None,
|
|
505
468
|
temperature: float | None = 0.0,
|
|
506
469
|
logprobs: bool = False,
|
|
507
|
-
top_logprobs: int
|
|
470
|
+
top_logprobs: int = 3,
|
|
508
471
|
mode: Literal["default", "reason"] = "default",
|
|
509
|
-
validator: Callable[[
|
|
472
|
+
validator: Callable[[object], bool] | None = None,
|
|
510
473
|
max_validation_retries: int | None = None,
|
|
511
474
|
priority: int | None = 0,
|
|
512
475
|
) -> Models.ToolOutput:
|
|
@@ -518,30 +481,24 @@ class AsyncTheTool:
|
|
|
518
481
|
with_analysis: Whether to include detailed reasoning analysis
|
|
519
482
|
output_lang: Language for the output merged question
|
|
520
483
|
user_prompt: Additional instructions for question merging
|
|
521
|
-
temperature: Controls randomness
|
|
484
|
+
temperature: Controls randomness
|
|
522
485
|
logprobs: Whether to return token probability information
|
|
523
486
|
top_logprobs: Number of top token alternatives to return if logprobs enabled
|
|
524
487
|
mode: Merging strategy - 'default' for direct merge, 'reason' for reasoned merge
|
|
525
488
|
validator: Custom validation function to validate the output
|
|
526
489
|
max_validation_retries: Maximum number of retry attempts if validation fails
|
|
527
|
-
priority: Task execution priority (if enabled by vLLM and model)
|
|
490
|
+
priority: Task execution priority (if enabled by vLLM and the model)
|
|
528
491
|
|
|
529
492
|
Returns:
|
|
530
|
-
ToolOutput
|
|
531
|
-
- result (str): The merged question
|
|
532
|
-
- logprobs (list | None): Probability data if logprobs enabled
|
|
533
|
-
- analysis (str | None): Detailed reasoning if with_analysis enabled
|
|
534
|
-
- process (str | None): Description of the process used
|
|
535
|
-
- processed_at (datetime): Timestamp when the processing occurred
|
|
536
|
-
- execution_time (float): Time taken for execution in seconds (-1.0 if not measured)
|
|
537
|
-
- errors (list(str) | None): Errors occured during tool call
|
|
493
|
+
ToolOutput
|
|
538
494
|
"""
|
|
539
|
-
|
|
495
|
+
tool_name = sys._getframe().f_code.co_name
|
|
496
|
+
prompt_file = tool_name + ".yaml"
|
|
497
|
+
start = perf_counter()
|
|
540
498
|
|
|
541
499
|
try:
|
|
542
|
-
start = datetime.now()
|
|
543
500
|
text = ", ".join(text)
|
|
544
|
-
|
|
501
|
+
operator_output = await self._operator.run(
|
|
545
502
|
# User parameters
|
|
546
503
|
text=text,
|
|
547
504
|
with_analysis=with_analysis,
|
|
@@ -554,26 +511,28 @@ class AsyncTheTool:
|
|
|
554
511
|
max_validation_retries=max_validation_retries,
|
|
555
512
|
priority=priority,
|
|
556
513
|
# Internal parameters
|
|
557
|
-
prompt_file=
|
|
558
|
-
output_model=Models.
|
|
514
|
+
prompt_file=prompt_file,
|
|
515
|
+
output_model=Models.Str,
|
|
559
516
|
mode=mode,
|
|
560
517
|
)
|
|
561
|
-
|
|
562
|
-
|
|
563
|
-
|
|
564
|
-
|
|
565
|
-
|
|
566
|
-
|
|
567
|
-
|
|
568
|
-
|
|
569
|
-
|
|
570
|
-
|
|
571
|
-
|
|
572
|
-
|
|
573
|
-
|
|
574
|
-
|
|
575
|
-
|
|
576
|
-
|
|
518
|
+
|
|
519
|
+
metadata = Models.ToolOutputMetadata(
|
|
520
|
+
tool_name=tool_name, execution_time=perf_counter() - start
|
|
521
|
+
)
|
|
522
|
+
tool_output = Models.ToolOutput(
|
|
523
|
+
result=operator_output.result,
|
|
524
|
+
logprobs=operator_output.logprobs,
|
|
525
|
+
analysis=operator_output.analysis,
|
|
526
|
+
metadata=metadata,
|
|
527
|
+
)
|
|
528
|
+
|
|
529
|
+
except (PromptError, LLMError, ValidationError, TextToolsError, Exception) as e:
|
|
530
|
+
metadata = Models.ToolOutputMetadata(tool_name=tool_name)
|
|
531
|
+
tool_output = Models.ToolOutput(
|
|
532
|
+
errors=[f"{type(e).__name__}: {e}"], metadata=metadata
|
|
533
|
+
)
|
|
534
|
+
|
|
535
|
+
return tool_output
|
|
577
536
|
|
|
578
537
|
async def rewrite(
|
|
579
538
|
self,
|
|
@@ -583,9 +542,9 @@ class AsyncTheTool:
|
|
|
583
542
|
user_prompt: str | None = None,
|
|
584
543
|
temperature: float | None = 0.0,
|
|
585
544
|
logprobs: bool = False,
|
|
586
|
-
top_logprobs: int
|
|
545
|
+
top_logprobs: int = 3,
|
|
587
546
|
mode: Literal["positive", "negative", "hard_negative"] = "positive",
|
|
588
|
-
validator: Callable[[
|
|
547
|
+
validator: Callable[[object], bool] | None = None,
|
|
589
548
|
max_validation_retries: int | None = None,
|
|
590
549
|
priority: int | None = 0,
|
|
591
550
|
) -> Models.ToolOutput:
|
|
@@ -597,29 +556,23 @@ class AsyncTheTool:
|
|
|
597
556
|
with_analysis: Whether to include detailed reasoning analysis
|
|
598
557
|
output_lang: Language for the output rewritten text
|
|
599
558
|
user_prompt: Additional instructions for rewriting
|
|
600
|
-
temperature: Controls randomness
|
|
559
|
+
temperature: Controls randomness
|
|
601
560
|
logprobs: Whether to return token probability information
|
|
602
561
|
top_logprobs: Number of top token alternatives to return if logprobs enabled
|
|
603
562
|
mode: Rewriting mode - 'positive', 'negative', or 'hard_negative'
|
|
604
563
|
validator: Custom validation function to validate the output
|
|
605
564
|
max_validation_retries: Maximum number of retry attempts if validation fails
|
|
606
|
-
priority: Task execution priority (if enabled by vLLM and model)
|
|
565
|
+
priority: Task execution priority (if enabled by vLLM and the model)
|
|
607
566
|
|
|
608
567
|
Returns:
|
|
609
|
-
ToolOutput
|
|
610
|
-
- result (str): The rewritten text
|
|
611
|
-
- logprobs (list | None): Probability data if logprobs enabled
|
|
612
|
-
- analysis (str | None): Detailed reasoning if with_analysis enabled
|
|
613
|
-
- process (str | None): Description of the process used
|
|
614
|
-
- processed_at (datetime): Timestamp when the processing occurred
|
|
615
|
-
- execution_time (float): Time taken for execution in seconds (-1.0 if not measured)
|
|
616
|
-
- errors (list(str) | None): Errors occured during tool call
|
|
568
|
+
ToolOutput
|
|
617
569
|
"""
|
|
618
|
-
|
|
570
|
+
tool_name = sys._getframe().f_code.co_name
|
|
571
|
+
prompt_file = tool_name + ".yaml"
|
|
572
|
+
start = perf_counter()
|
|
619
573
|
|
|
620
574
|
try:
|
|
621
|
-
|
|
622
|
-
output = await self._operator.run(
|
|
575
|
+
operator_output = await self._operator.run(
|
|
623
576
|
# User parameters
|
|
624
577
|
text=text,
|
|
625
578
|
with_analysis=with_analysis,
|
|
@@ -632,26 +585,28 @@ class AsyncTheTool:
|
|
|
632
585
|
max_validation_retries=max_validation_retries,
|
|
633
586
|
priority=priority,
|
|
634
587
|
# Internal parameters
|
|
635
|
-
prompt_file=
|
|
636
|
-
output_model=Models.
|
|
588
|
+
prompt_file=prompt_file,
|
|
589
|
+
output_model=Models.Str,
|
|
637
590
|
mode=mode,
|
|
638
591
|
)
|
|
639
|
-
|
|
640
|
-
|
|
641
|
-
|
|
642
|
-
|
|
643
|
-
|
|
644
|
-
|
|
645
|
-
|
|
646
|
-
|
|
647
|
-
|
|
648
|
-
|
|
649
|
-
|
|
650
|
-
|
|
651
|
-
|
|
652
|
-
|
|
653
|
-
|
|
654
|
-
|
|
592
|
+
|
|
593
|
+
metadata = Models.ToolOutputMetadata(
|
|
594
|
+
tool_name=tool_name, execution_time=perf_counter() - start
|
|
595
|
+
)
|
|
596
|
+
tool_output = Models.ToolOutput(
|
|
597
|
+
result=operator_output.result,
|
|
598
|
+
logprobs=operator_output.logprobs,
|
|
599
|
+
analysis=operator_output.analysis,
|
|
600
|
+
metadata=metadata,
|
|
601
|
+
)
|
|
602
|
+
|
|
603
|
+
except (PromptError, LLMError, ValidationError, TextToolsError, Exception) as e:
|
|
604
|
+
metadata = Models.ToolOutputMetadata(tool_name=tool_name)
|
|
605
|
+
tool_output = Models.ToolOutput(
|
|
606
|
+
errors=[f"{type(e).__name__}: {e}"], metadata=metadata
|
|
607
|
+
)
|
|
608
|
+
|
|
609
|
+
return tool_output
|
|
655
610
|
|
|
656
611
|
async def subject_to_question(
|
|
657
612
|
self,
|
|
@@ -662,8 +617,8 @@ class AsyncTheTool:
|
|
|
662
617
|
user_prompt: str | None = None,
|
|
663
618
|
temperature: float | None = 0.0,
|
|
664
619
|
logprobs: bool = False,
|
|
665
|
-
top_logprobs: int
|
|
666
|
-
validator: Callable[[
|
|
620
|
+
top_logprobs: int = 3,
|
|
621
|
+
validator: Callable[[object], bool] | None = None,
|
|
667
622
|
max_validation_retries: int | None = None,
|
|
668
623
|
priority: int | None = 0,
|
|
669
624
|
) -> Models.ToolOutput:
|
|
@@ -676,28 +631,22 @@ class AsyncTheTool:
|
|
|
676
631
|
with_analysis: Whether to include detailed reasoning analysis
|
|
677
632
|
output_lang: Language for the output questions
|
|
678
633
|
user_prompt: Additional instructions for question generation
|
|
679
|
-
temperature: Controls randomness
|
|
634
|
+
temperature: Controls randomness
|
|
680
635
|
logprobs: Whether to return token probability information
|
|
681
636
|
top_logprobs: Number of top token alternatives to return if logprobs enabled
|
|
682
637
|
validator: Custom validation function to validate the output
|
|
683
638
|
max_validation_retries: Maximum number of retry attempts if validation fails
|
|
684
|
-
priority: Task execution priority (if enabled by vLLM and model)
|
|
639
|
+
priority: Task execution priority (if enabled by vLLM and the model)
|
|
685
640
|
|
|
686
641
|
Returns:
|
|
687
|
-
ToolOutput
|
|
688
|
-
- result (list[str]): List of generated questions
|
|
689
|
-
- logprobs (list | None): Probability data if logprobs enabled
|
|
690
|
-
- analysis (str | None): Detailed reasoning if with_analysis enabled
|
|
691
|
-
- process (str | None): Description of the process used
|
|
692
|
-
- processed_at (datetime): Timestamp when the processing occurred
|
|
693
|
-
- execution_time (float): Time taken for execution in seconds (-1.0 if not measured)
|
|
694
|
-
- errors (list(str) | None): Errors occured during tool call
|
|
642
|
+
ToolOutput
|
|
695
643
|
"""
|
|
696
|
-
|
|
644
|
+
tool_name = sys._getframe().f_code.co_name
|
|
645
|
+
prompt_file = tool_name + ".yaml"
|
|
646
|
+
start = perf_counter()
|
|
697
647
|
|
|
698
648
|
try:
|
|
699
|
-
|
|
700
|
-
output = await self._operator.run(
|
|
649
|
+
operator_output = await self._operator.run(
|
|
701
650
|
# User parameters
|
|
702
651
|
text=text,
|
|
703
652
|
number_of_questions=number_of_questions,
|
|
@@ -711,26 +660,28 @@ class AsyncTheTool:
|
|
|
711
660
|
max_validation_retries=max_validation_retries,
|
|
712
661
|
priority=priority,
|
|
713
662
|
# Internal parameters
|
|
714
|
-
prompt_file=
|
|
715
|
-
output_model=Models.
|
|
663
|
+
prompt_file=prompt_file,
|
|
664
|
+
output_model=Models.ReasonListStr,
|
|
716
665
|
mode=None,
|
|
717
666
|
)
|
|
718
|
-
|
|
719
|
-
|
|
720
|
-
|
|
721
|
-
|
|
722
|
-
|
|
723
|
-
|
|
724
|
-
|
|
725
|
-
|
|
726
|
-
|
|
727
|
-
|
|
728
|
-
|
|
729
|
-
|
|
730
|
-
|
|
731
|
-
|
|
732
|
-
|
|
733
|
-
|
|
667
|
+
|
|
668
|
+
metadata = Models.ToolOutputMetadata(
|
|
669
|
+
tool_name=tool_name, execution_time=perf_counter() - start
|
|
670
|
+
)
|
|
671
|
+
tool_output = Models.ToolOutput(
|
|
672
|
+
result=operator_output.result,
|
|
673
|
+
logprobs=operator_output.logprobs,
|
|
674
|
+
analysis=operator_output.analysis,
|
|
675
|
+
metadata=metadata,
|
|
676
|
+
)
|
|
677
|
+
|
|
678
|
+
except (PromptError, LLMError, ValidationError, TextToolsError, Exception) as e:
|
|
679
|
+
metadata = Models.ToolOutputMetadata(tool_name=tool_name)
|
|
680
|
+
tool_output = Models.ToolOutput(
|
|
681
|
+
errors=[f"{type(e).__name__}: {e}"], metadata=metadata
|
|
682
|
+
)
|
|
683
|
+
|
|
684
|
+
return tool_output
|
|
734
685
|
|
|
735
686
|
async def summarize(
|
|
736
687
|
self,
|
|
@@ -740,8 +691,8 @@ class AsyncTheTool:
|
|
|
740
691
|
user_prompt: str | None = None,
|
|
741
692
|
temperature: float | None = 0.0,
|
|
742
693
|
logprobs: bool = False,
|
|
743
|
-
top_logprobs: int
|
|
744
|
-
validator: Callable[[
|
|
694
|
+
top_logprobs: int = 3,
|
|
695
|
+
validator: Callable[[object], bool] | None = None,
|
|
745
696
|
max_validation_retries: int | None = None,
|
|
746
697
|
priority: int | None = 0,
|
|
747
698
|
) -> Models.ToolOutput:
|
|
@@ -753,28 +704,22 @@ class AsyncTheTool:
|
|
|
753
704
|
with_analysis: Whether to include detailed reasoning analysis
|
|
754
705
|
output_lang: Language for the output summary
|
|
755
706
|
user_prompt: Additional instructions for summarization
|
|
756
|
-
temperature: Controls randomness
|
|
707
|
+
temperature: Controls randomness
|
|
757
708
|
logprobs: Whether to return token probability information
|
|
758
709
|
top_logprobs: Number of top token alternatives to return if logprobs enabled
|
|
759
710
|
validator: Custom validation function to validate the output
|
|
760
711
|
max_validation_retries: Maximum number of retry attempts if validation fails
|
|
761
|
-
priority: Task execution priority (if enabled by vLLM and model)
|
|
712
|
+
priority: Task execution priority (if enabled by vLLM and the model)
|
|
762
713
|
|
|
763
714
|
Returns:
|
|
764
|
-
ToolOutput
|
|
765
|
-
- result (str): The summary text
|
|
766
|
-
- logprobs (list | None): Probability data if logprobs enabled
|
|
767
|
-
- analysis (str | None): Detailed reasoning if with_analysis enabled
|
|
768
|
-
- process (str | None): Description of the process used
|
|
769
|
-
- processed_at (datetime): Timestamp when the processing occurred
|
|
770
|
-
- execution_time (float): Time taken for execution in seconds (-1.0 if not measured)
|
|
771
|
-
- errors (list(str) | None): Errors occured during tool call
|
|
715
|
+
ToolOutput
|
|
772
716
|
"""
|
|
773
|
-
|
|
717
|
+
tool_name = sys._getframe().f_code.co_name
|
|
718
|
+
prompt_file = tool_name + ".yaml"
|
|
719
|
+
start = perf_counter()
|
|
774
720
|
|
|
775
721
|
try:
|
|
776
|
-
|
|
777
|
-
output = await self._operator.run(
|
|
722
|
+
operator_output = await self._operator.run(
|
|
778
723
|
# User parameters
|
|
779
724
|
text=text,
|
|
780
725
|
with_analysis=with_analysis,
|
|
@@ -787,37 +732,40 @@ class AsyncTheTool:
|
|
|
787
732
|
max_validation_retries=max_validation_retries,
|
|
788
733
|
priority=priority,
|
|
789
734
|
# Internal parameters
|
|
790
|
-
prompt_file=
|
|
791
|
-
output_model=Models.
|
|
735
|
+
prompt_file=prompt_file,
|
|
736
|
+
output_model=Models.Str,
|
|
792
737
|
mode=None,
|
|
793
738
|
)
|
|
794
|
-
|
|
795
|
-
|
|
796
|
-
|
|
797
|
-
|
|
798
|
-
|
|
799
|
-
|
|
800
|
-
|
|
801
|
-
|
|
802
|
-
|
|
803
|
-
|
|
804
|
-
|
|
805
|
-
|
|
806
|
-
|
|
807
|
-
|
|
808
|
-
|
|
809
|
-
|
|
739
|
+
|
|
740
|
+
metadata = Models.ToolOutputMetadata(
|
|
741
|
+
tool_name=tool_name, execution_time=perf_counter() - start
|
|
742
|
+
)
|
|
743
|
+
tool_output = Models.ToolOutput(
|
|
744
|
+
result=operator_output.result,
|
|
745
|
+
logprobs=operator_output.logprobs,
|
|
746
|
+
analysis=operator_output.analysis,
|
|
747
|
+
metadata=metadata,
|
|
748
|
+
)
|
|
749
|
+
|
|
750
|
+
except (PromptError, LLMError, ValidationError, TextToolsError, Exception) as e:
|
|
751
|
+
metadata = Models.ToolOutputMetadata(tool_name=tool_name)
|
|
752
|
+
tool_output = Models.ToolOutput(
|
|
753
|
+
errors=[f"{type(e).__name__}: {e}"], metadata=metadata
|
|
754
|
+
)
|
|
755
|
+
|
|
756
|
+
return tool_output
|
|
810
757
|
|
|
811
758
|
async def translate(
|
|
812
759
|
self,
|
|
813
760
|
text: str,
|
|
814
761
|
target_language: str,
|
|
762
|
+
use_chunker: bool = True,
|
|
815
763
|
with_analysis: bool = False,
|
|
816
764
|
user_prompt: str | None = None,
|
|
817
765
|
temperature: float | None = 0.0,
|
|
818
766
|
logprobs: bool = False,
|
|
819
|
-
top_logprobs: int
|
|
820
|
-
validator: Callable[[
|
|
767
|
+
top_logprobs: int = 3,
|
|
768
|
+
validator: Callable[[object], bool] | None = None,
|
|
821
769
|
max_validation_retries: int | None = None,
|
|
822
770
|
priority: int | None = 0,
|
|
823
771
|
) -> Models.ToolOutput:
|
|
@@ -829,63 +777,104 @@ class AsyncTheTool:
|
|
|
829
777
|
Arguments:
|
|
830
778
|
text: The input text to translate
|
|
831
779
|
target_language: The target language for translation
|
|
780
|
+
use_chunker: Whether to use text chunker for text length bigger than 1500
|
|
832
781
|
with_analysis: Whether to include detailed reasoning analysis
|
|
833
782
|
user_prompt: Additional instructions for translation
|
|
834
|
-
temperature: Controls randomness
|
|
783
|
+
temperature: Controls randomness
|
|
835
784
|
logprobs: Whether to return token probability information
|
|
836
785
|
top_logprobs: Number of top token alternatives to return if logprobs enabled
|
|
837
786
|
validator: Custom validation function to validate the output
|
|
838
787
|
max_validation_retries: Maximum number of retry attempts if validation fails
|
|
839
|
-
priority: Task execution priority (if enabled by vLLM and model)
|
|
788
|
+
priority: Task execution priority (if enabled by vLLM and the model)
|
|
840
789
|
|
|
841
790
|
Returns:
|
|
842
|
-
ToolOutput
|
|
843
|
-
- result (str): The translated text
|
|
844
|
-
- logprobs (list | None): Probability data if logprobs enabled
|
|
845
|
-
- analysis (str | None): Detailed reasoning if with_analysis enabled
|
|
846
|
-
- process (str | None): Description of the process used
|
|
847
|
-
- processed_at (datetime): Timestamp when the processing occurred
|
|
848
|
-
- execution_time (float): Time taken for execution in seconds (-1.0 if not measured)
|
|
849
|
-
- errors (list(str) | None): Errors occured during tool call
|
|
791
|
+
ToolOutput
|
|
850
792
|
"""
|
|
851
|
-
|
|
793
|
+
tool_name = sys._getframe().f_code.co_name
|
|
794
|
+
prompt_file = tool_name + ".yaml"
|
|
795
|
+
start = perf_counter()
|
|
852
796
|
|
|
853
797
|
try:
|
|
854
|
-
|
|
855
|
-
|
|
856
|
-
|
|
857
|
-
|
|
858
|
-
|
|
859
|
-
|
|
860
|
-
|
|
861
|
-
|
|
862
|
-
|
|
863
|
-
|
|
864
|
-
|
|
865
|
-
|
|
866
|
-
|
|
867
|
-
|
|
868
|
-
|
|
869
|
-
|
|
870
|
-
|
|
871
|
-
|
|
798
|
+
if len(text.split(" ")) > 1500 and use_chunker:
|
|
799
|
+
chunks = text_to_chunks(text, 1200, 0)
|
|
800
|
+
translation = ""
|
|
801
|
+
analysis = ""
|
|
802
|
+
logprobs_list = []
|
|
803
|
+
|
|
804
|
+
for chunk in chunks:
|
|
805
|
+
chunk_operator_output = await self._operator.run(
|
|
806
|
+
# User parameters
|
|
807
|
+
text=chunk,
|
|
808
|
+
target_language=target_language,
|
|
809
|
+
with_analysis=with_analysis,
|
|
810
|
+
user_prompt=user_prompt,
|
|
811
|
+
temperature=temperature,
|
|
812
|
+
logprobs=logprobs,
|
|
813
|
+
top_logprobs=top_logprobs,
|
|
814
|
+
validator=validator,
|
|
815
|
+
max_validation_retries=max_validation_retries,
|
|
816
|
+
priority=priority,
|
|
817
|
+
# Internal parameters
|
|
818
|
+
prompt_file=prompt_file,
|
|
819
|
+
output_model=Models.Str,
|
|
820
|
+
mode=None,
|
|
821
|
+
output_lang=None,
|
|
822
|
+
)
|
|
823
|
+
|
|
824
|
+
translation += chunk_operator_output.result + "\n"
|
|
825
|
+
|
|
826
|
+
if with_analysis:
|
|
827
|
+
analysis += chunk_operator_output.analysis
|
|
828
|
+
if logprobs:
|
|
829
|
+
logprobs_list.extend(chunk_operator_output.logprobs)
|
|
830
|
+
|
|
831
|
+
metadata = Models.ToolOutputMetadata(
|
|
832
|
+
tool_name=tool_name, execution_time=perf_counter() - start
|
|
833
|
+
)
|
|
834
|
+
tool_output = Models.ToolOutput(
|
|
835
|
+
result=translation,
|
|
836
|
+
logprobs=logprobs_list,
|
|
837
|
+
analysis=analysis,
|
|
838
|
+
metadata=metadata,
|
|
839
|
+
)
|
|
840
|
+
|
|
841
|
+
else:
|
|
842
|
+
operator_output = await self._operator.run(
|
|
843
|
+
# User parameters
|
|
844
|
+
text=text,
|
|
845
|
+
target_language=target_language,
|
|
846
|
+
with_analysis=with_analysis,
|
|
847
|
+
user_prompt=user_prompt,
|
|
848
|
+
temperature=temperature,
|
|
849
|
+
logprobs=logprobs,
|
|
850
|
+
top_logprobs=top_logprobs,
|
|
851
|
+
validator=validator,
|
|
852
|
+
max_validation_retries=max_validation_retries,
|
|
853
|
+
priority=priority,
|
|
854
|
+
# Internal parameters
|
|
855
|
+
prompt_file=prompt_file,
|
|
856
|
+
output_model=Models.Str,
|
|
857
|
+
mode=None,
|
|
858
|
+
output_lang=None,
|
|
859
|
+
)
|
|
860
|
+
|
|
861
|
+
metadata = Models.ToolOutputMetadata(
|
|
862
|
+
tool_name=tool_name, execution_time=perf_counter() - start
|
|
863
|
+
)
|
|
864
|
+
tool_output = Models.ToolOutput(
|
|
865
|
+
result=operator_output.result,
|
|
866
|
+
logprobs=operator_output.logprobs,
|
|
867
|
+
analysis=operator_output.analysis,
|
|
868
|
+
metadata=metadata,
|
|
869
|
+
)
|
|
870
|
+
|
|
871
|
+
except (PromptError, LLMError, ValidationError, TextToolsError, Exception) as e:
|
|
872
|
+
metadata = Models.ToolOutputMetadata(tool_name=tool_name)
|
|
873
|
+
tool_output = Models.ToolOutput(
|
|
874
|
+
errors=[f"{type(e).__name__}: {e}"], metadata=metadata
|
|
872
875
|
)
|
|
873
|
-
|
|
874
|
-
|
|
875
|
-
return output
|
|
876
|
-
|
|
877
|
-
except PromptError as e:
|
|
878
|
-
output.errors.append(f"Prompt error: {e}")
|
|
879
|
-
except LLMError as e:
|
|
880
|
-
output.errors.append(f"LLM error: {e}")
|
|
881
|
-
except ValidationError as e:
|
|
882
|
-
output.errors.append(f"Validation error: {e}")
|
|
883
|
-
except TextToolsError as e:
|
|
884
|
-
output.errors.append(f"TextTools error: {e}")
|
|
885
|
-
except Exception as e:
|
|
886
|
-
output.errors.append(f"Unexpected error: {e}")
|
|
887
|
-
|
|
888
|
-
return output
|
|
876
|
+
|
|
877
|
+
return tool_output
|
|
889
878
|
|
|
890
879
|
async def propositionize(
|
|
891
880
|
self,
|
|
@@ -895,8 +884,8 @@ class AsyncTheTool:
|
|
|
895
884
|
user_prompt: str | None = None,
|
|
896
885
|
temperature: float | None = 0.0,
|
|
897
886
|
logprobs: bool = False,
|
|
898
|
-
top_logprobs: int
|
|
899
|
-
validator: Callable[[
|
|
887
|
+
top_logprobs: int = 3,
|
|
888
|
+
validator: Callable[[object], bool] | None = None,
|
|
900
889
|
max_validation_retries: int | None = None,
|
|
901
890
|
priority: int | None = 0,
|
|
902
891
|
) -> Models.ToolOutput:
|
|
@@ -910,28 +899,22 @@ class AsyncTheTool:
|
|
|
910
899
|
with_analysis: Whether to include detailed reasoning analysis
|
|
911
900
|
output_lang: Language for the output summary
|
|
912
901
|
user_prompt: Additional instructions for summarization
|
|
913
|
-
temperature: Controls randomness
|
|
902
|
+
temperature: Controls randomness
|
|
914
903
|
logprobs: Whether to return token probability information
|
|
915
904
|
top_logprobs: Number of top token alternatives to return if logprobs enabled
|
|
916
905
|
validator: Custom validation function to validate the output
|
|
917
906
|
max_validation_retries: Maximum number of retry attempts if validation fails
|
|
918
|
-
priority: Task execution priority (if enabled by vLLM and model)
|
|
907
|
+
priority: Task execution priority (if enabled by vLLM and the model)
|
|
919
908
|
|
|
920
909
|
Returns:
|
|
921
|
-
ToolOutput
|
|
922
|
-
- result (list[str]): The propositions
|
|
923
|
-
- logprobs (list | None): Probability data if logprobs enabled
|
|
924
|
-
- analysis (str | None): Detailed reasoning if with_analysis enabled
|
|
925
|
-
- process (str | None): Description of the process used
|
|
926
|
-
- processed_at (datetime): Timestamp when the processing occurred
|
|
927
|
-
- execution_time (float): Time taken for execution in seconds (-1.0 if not measured)
|
|
928
|
-
- errors (list(str) | None): Errors occured during tool call
|
|
910
|
+
ToolOutput
|
|
929
911
|
"""
|
|
930
|
-
|
|
912
|
+
tool_name = sys._getframe().f_code.co_name
|
|
913
|
+
prompt_file = tool_name + ".yaml"
|
|
914
|
+
start = perf_counter()
|
|
931
915
|
|
|
932
916
|
try:
|
|
933
|
-
|
|
934
|
-
output = await self._operator.run(
|
|
917
|
+
operator_output = await self._operator.run(
|
|
935
918
|
# User parameters
|
|
936
919
|
text=text,
|
|
937
920
|
with_analysis=with_analysis,
|
|
@@ -944,26 +927,28 @@ class AsyncTheTool:
|
|
|
944
927
|
max_validation_retries=max_validation_retries,
|
|
945
928
|
priority=priority,
|
|
946
929
|
# Internal parameters
|
|
947
|
-
prompt_file=
|
|
948
|
-
output_model=Models.
|
|
930
|
+
prompt_file=prompt_file,
|
|
931
|
+
output_model=Models.ListStr,
|
|
949
932
|
mode=None,
|
|
950
933
|
)
|
|
951
|
-
|
|
952
|
-
|
|
953
|
-
|
|
954
|
-
|
|
955
|
-
|
|
956
|
-
|
|
957
|
-
|
|
958
|
-
|
|
959
|
-
|
|
960
|
-
|
|
961
|
-
|
|
962
|
-
|
|
963
|
-
|
|
964
|
-
|
|
965
|
-
|
|
966
|
-
|
|
934
|
+
|
|
935
|
+
metadata = Models.ToolOutputMetadata(
|
|
936
|
+
tool_name=tool_name, execution_time=perf_counter() - start
|
|
937
|
+
)
|
|
938
|
+
tool_output = Models.ToolOutput(
|
|
939
|
+
result=operator_output.result,
|
|
940
|
+
logprobs=operator_output.logprobs,
|
|
941
|
+
analysis=operator_output.analysis,
|
|
942
|
+
metadata=metadata,
|
|
943
|
+
)
|
|
944
|
+
|
|
945
|
+
except (PromptError, LLMError, ValidationError, TextToolsError, Exception) as e:
|
|
946
|
+
metadata = Models.ToolOutputMetadata(tool_name=tool_name)
|
|
947
|
+
tool_output = Models.ToolOutput(
|
|
948
|
+
errors=[f"{type(e).__name__}: {e}"], metadata=metadata
|
|
949
|
+
)
|
|
950
|
+
|
|
951
|
+
return tool_output
|
|
967
952
|
|
|
968
953
|
async def check_fact(
|
|
969
954
|
self,
|
|
@@ -974,8 +959,8 @@ class AsyncTheTool:
|
|
|
974
959
|
user_prompt: str | None = None,
|
|
975
960
|
temperature: float | None = 0.0,
|
|
976
961
|
logprobs: bool = False,
|
|
977
|
-
top_logprobs: int
|
|
978
|
-
validator: Callable[[
|
|
962
|
+
top_logprobs: int = 3,
|
|
963
|
+
validator: Callable[[object], bool] | None = None,
|
|
979
964
|
max_validation_retries: int | None = None,
|
|
980
965
|
priority: int | None = 0,
|
|
981
966
|
) -> Models.ToolOutput:
|
|
@@ -990,28 +975,22 @@ class AsyncTheTool:
|
|
|
990
975
|
with_analysis: Whether to include detailed reasoning analysis
|
|
991
976
|
output_lang: Language for the output summary
|
|
992
977
|
user_prompt: Additional instructions for summarization
|
|
993
|
-
temperature: Controls randomness
|
|
978
|
+
temperature: Controls randomness
|
|
994
979
|
logprobs: Whether to return token probability information
|
|
995
980
|
top_logprobs: Number of top token alternatives to return if logprobs enabled
|
|
996
981
|
validator: Custom validation function to validate the output
|
|
997
982
|
max_validation_retries: Maximum number of retry attempts if validation fails
|
|
998
|
-
priority: Task execution priority (if enabled by vLLM and model)
|
|
983
|
+
priority: Task execution priority (if enabled by vLLM and the model)
|
|
999
984
|
|
|
1000
985
|
Returns:
|
|
1001
|
-
ToolOutput
|
|
1002
|
-
- result (bool): statement is relevant to source text or not
|
|
1003
|
-
- logprobs (list | None): Probability data if logprobs enabled
|
|
1004
|
-
- analysis (str | None): Detailed reasoning if with_analysis enabled
|
|
1005
|
-
- process (str | None): Description of the process used
|
|
1006
|
-
- processed_at (datetime): Timestamp when the processing occurred
|
|
1007
|
-
- execution_time (float): Time taken for execution in seconds (-1.0 if not measured)
|
|
1008
|
-
- errors (list(str) | None): Errors occured during tool call
|
|
986
|
+
ToolOutput
|
|
1009
987
|
"""
|
|
1010
|
-
|
|
988
|
+
tool_name = sys._getframe().f_code.co_name
|
|
989
|
+
prompt_file = tool_name + ".yaml"
|
|
990
|
+
start = perf_counter()
|
|
1011
991
|
|
|
1012
992
|
try:
|
|
1013
|
-
|
|
1014
|
-
output = await self._operator.run(
|
|
993
|
+
operator_output = await self._operator.run(
|
|
1015
994
|
# User parameters
|
|
1016
995
|
text=text,
|
|
1017
996
|
with_analysis=with_analysis,
|
|
@@ -1024,38 +1003,41 @@ class AsyncTheTool:
|
|
|
1024
1003
|
max_validation_retries=max_validation_retries,
|
|
1025
1004
|
priority=priority,
|
|
1026
1005
|
# Internal parameters
|
|
1027
|
-
prompt_file=
|
|
1028
|
-
output_model=Models.
|
|
1006
|
+
prompt_file=prompt_file,
|
|
1007
|
+
output_model=Models.Bool,
|
|
1029
1008
|
mode=None,
|
|
1030
1009
|
source_text=source_text,
|
|
1031
1010
|
)
|
|
1032
|
-
|
|
1033
|
-
|
|
1034
|
-
|
|
1035
|
-
|
|
1036
|
-
|
|
1037
|
-
|
|
1038
|
-
|
|
1039
|
-
|
|
1040
|
-
|
|
1041
|
-
|
|
1042
|
-
|
|
1043
|
-
except Exception as e:
|
|
1044
|
-
|
|
1045
|
-
|
|
1046
|
-
|
|
1011
|
+
|
|
1012
|
+
metadata = Models.ToolOutputMetadata(
|
|
1013
|
+
tool_name=tool_name, execution_time=perf_counter() - start
|
|
1014
|
+
)
|
|
1015
|
+
tool_output = Models.ToolOutput(
|
|
1016
|
+
result=operator_output.result,
|
|
1017
|
+
logprobs=operator_output.logprobs,
|
|
1018
|
+
analysis=operator_output.analysis,
|
|
1019
|
+
metadata=metadata,
|
|
1020
|
+
)
|
|
1021
|
+
|
|
1022
|
+
except (PromptError, LLMError, ValidationError, TextToolsError, Exception) as e:
|
|
1023
|
+
metadata = Models.ToolOutputMetadata(tool_name=tool_name)
|
|
1024
|
+
tool_output = Models.ToolOutput(
|
|
1025
|
+
errors=[f"{type(e).__name__}: {e}"], metadata=metadata
|
|
1026
|
+
)
|
|
1027
|
+
|
|
1028
|
+
return tool_output
|
|
1047
1029
|
|
|
1048
1030
|
async def run_custom(
|
|
1049
1031
|
self,
|
|
1050
1032
|
prompt: str,
|
|
1051
|
-
output_model:
|
|
1033
|
+
output_model: object,
|
|
1052
1034
|
with_analysis: bool = False,
|
|
1053
1035
|
analyze_template: str | None = None,
|
|
1054
1036
|
output_lang: str | None = None,
|
|
1055
1037
|
temperature: float | None = None,
|
|
1056
1038
|
logprobs: bool | None = None,
|
|
1057
|
-
top_logprobs: int
|
|
1058
|
-
validator: Callable[[
|
|
1039
|
+
top_logprobs: int = 3,
|
|
1040
|
+
validator: Callable[[object], bool] | None = None,
|
|
1059
1041
|
max_validation_retries: int | None = None,
|
|
1060
1042
|
priority: int | None = 0,
|
|
1061
1043
|
) -> Models.ToolOutput:
|
|
@@ -1070,28 +1052,22 @@ class AsyncTheTool:
|
|
|
1070
1052
|
with_analysis: Whether to include detailed reasoning analysis
|
|
1071
1053
|
analyze_template: The analyze template used for reasoning analysis
|
|
1072
1054
|
output_lang: Language for the output summary
|
|
1073
|
-
temperature: Controls randomness
|
|
1055
|
+
temperature: Controls randomness
|
|
1074
1056
|
logprobs: Whether to return token probability information
|
|
1075
1057
|
top_logprobs: Number of top token alternatives to return if logprobs enabled
|
|
1076
1058
|
validator: Custom validation function to validate the output
|
|
1077
1059
|
max_validation_retries: Maximum number of retry attempts if validation fails
|
|
1078
|
-
priority: Task execution priority (if enabled by vLLM and model)
|
|
1060
|
+
priority: Task execution priority (if enabled by vLLM and the model)
|
|
1079
1061
|
|
|
1080
1062
|
Returns:
|
|
1081
|
-
ToolOutput
|
|
1082
|
-
- result (str): The translated text
|
|
1083
|
-
- logprobs (list | None): Probability data if logprobs enabled
|
|
1084
|
-
- analysis (str | None): Detailed reasoning if with_analysis enabled
|
|
1085
|
-
- process (str | None): Description of the process used
|
|
1086
|
-
- processed_at (datetime): Timestamp when the processing occurred
|
|
1087
|
-
- execution_time (float): Time taken for execution in seconds (-1.0 if not measured)
|
|
1088
|
-
- errors (list(str) | None): Errors occured during tool call
|
|
1063
|
+
ToolOutput
|
|
1089
1064
|
"""
|
|
1090
|
-
|
|
1065
|
+
tool_name = sys._getframe().f_code.co_name
|
|
1066
|
+
prompt_file = tool_name + ".yaml"
|
|
1067
|
+
start = perf_counter()
|
|
1091
1068
|
|
|
1092
1069
|
try:
|
|
1093
|
-
|
|
1094
|
-
output = await self._operator.run(
|
|
1070
|
+
operator_output = await self._operator.run(
|
|
1095
1071
|
# User paramaeters
|
|
1096
1072
|
text=prompt,
|
|
1097
1073
|
output_model=output_model,
|
|
@@ -1106,23 +1082,25 @@ class AsyncTheTool:
|
|
|
1106
1082
|
max_validation_retries=max_validation_retries,
|
|
1107
1083
|
priority=priority,
|
|
1108
1084
|
# Internal parameters
|
|
1109
|
-
prompt_file=
|
|
1085
|
+
prompt_file=prompt_file,
|
|
1110
1086
|
user_prompt=None,
|
|
1111
1087
|
mode=None,
|
|
1112
1088
|
)
|
|
1113
|
-
|
|
1114
|
-
|
|
1115
|
-
|
|
1116
|
-
|
|
1117
|
-
|
|
1118
|
-
|
|
1119
|
-
|
|
1120
|
-
|
|
1121
|
-
|
|
1122
|
-
|
|
1123
|
-
|
|
1124
|
-
|
|
1125
|
-
|
|
1126
|
-
|
|
1127
|
-
|
|
1128
|
-
|
|
1089
|
+
|
|
1090
|
+
metadata = Models.ToolOutputMetadata(
|
|
1091
|
+
tool_name=tool_name, execution_time=perf_counter() - start
|
|
1092
|
+
)
|
|
1093
|
+
tool_output = Models.ToolOutput(
|
|
1094
|
+
result=operator_output.result,
|
|
1095
|
+
logprobs=operator_output.logprobs,
|
|
1096
|
+
analysis=operator_output.analysis,
|
|
1097
|
+
metadata=metadata,
|
|
1098
|
+
)
|
|
1099
|
+
|
|
1100
|
+
except (PromptError, LLMError, ValidationError, TextToolsError, Exception) as e:
|
|
1101
|
+
metadata = Models.ToolOutputMetadata(tool_name=tool_name)
|
|
1102
|
+
tool_output = Models.ToolOutput(
|
|
1103
|
+
errors=[f"{type(e).__name__}: {e}"], metadata=metadata
|
|
1104
|
+
)
|
|
1105
|
+
|
|
1106
|
+
return tool_output
|