hamtaa-texttools 1.1.20__py3-none-any.whl → 1.1.21__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {hamtaa_texttools-1.1.20.dist-info → hamtaa_texttools-1.1.21.dist-info}/METADATA +8 -27
- hamtaa_texttools-1.1.21.dist-info/RECORD +32 -0
- texttools/batch/batch_config.py +14 -1
- texttools/batch/batch_runner.py +1 -1
- texttools/internals/async_operator.py +45 -79
- texttools/internals/models.py +74 -105
- texttools/internals/operator_utils.py +2 -26
- texttools/internals/prompt_loader.py +3 -20
- texttools/internals/sync_operator.py +44 -78
- texttools/prompts/README.md +2 -2
- texttools/prompts/categorize.yaml +35 -77
- texttools/prompts/check_fact.yaml +2 -2
- texttools/prompts/extract_entities.yaml +2 -2
- texttools/prompts/extract_keywords.yaml +6 -6
- texttools/prompts/is_question.yaml +2 -2
- texttools/prompts/merge_questions.yaml +4 -4
- texttools/prompts/propositionize.yaml +2 -2
- texttools/prompts/rewrite.yaml +6 -6
- texttools/prompts/run_custom.yaml +1 -1
- texttools/prompts/subject_to_question.yaml +2 -2
- texttools/prompts/summarize.yaml +2 -2
- texttools/prompts/text_to_question.yaml +2 -2
- texttools/prompts/translate.yaml +2 -2
- texttools/tools/async_tools.py +393 -485
- texttools/tools/sync_tools.py +394 -486
- hamtaa_texttools-1.1.20.dist-info/RECORD +0 -33
- texttools/batch/internals/utils.py +0 -13
- {hamtaa_texttools-1.1.20.dist-info → hamtaa_texttools-1.1.21.dist-info}/WHEEL +0 -0
- {hamtaa_texttools-1.1.20.dist-info → hamtaa_texttools-1.1.21.dist-info}/licenses/LICENSE +0 -0
- {hamtaa_texttools-1.1.20.dist-info → hamtaa_texttools-1.1.21.dist-info}/top_level.txt +0 -0
- /texttools/batch/{internals/batch_manager.py → batch_manager.py} +0 -0
texttools/tools/sync_tools.py
CHANGED
|
@@ -1,4 +1,5 @@
|
|
|
1
|
-
|
|
1
|
+
import sys
|
|
2
|
+
from time import perf_counter
|
|
2
3
|
from typing import Literal
|
|
3
4
|
from collections.abc import Callable
|
|
4
5
|
|
|
@@ -37,10 +38,9 @@ class TheTool:
|
|
|
37
38
|
temperature: float | None = 0.0,
|
|
38
39
|
logprobs: bool = False,
|
|
39
40
|
top_logprobs: int = 3,
|
|
40
|
-
mode: Literal["category_list", "category_tree"] = "category_list",
|
|
41
41
|
validator: Callable[[object], bool] | None = None,
|
|
42
42
|
max_validation_retries: int | None = None,
|
|
43
|
-
priority: int
|
|
43
|
+
priority: int = 0,
|
|
44
44
|
) -> Models.ToolOutput:
|
|
45
45
|
"""
|
|
46
46
|
Categorize a text into a category / category tree.
|
|
@@ -49,62 +49,73 @@ class TheTool:
|
|
|
49
49
|
|
|
50
50
|
Arguments:
|
|
51
51
|
text: The input text to categorize
|
|
52
|
-
categories: The category /
|
|
52
|
+
categories: The category list / category tree
|
|
53
53
|
with_analysis: Whether to include detailed reasoning analysis
|
|
54
54
|
user_prompt: Additional instructions for the categorization
|
|
55
|
-
temperature: Controls randomness
|
|
55
|
+
temperature: Controls randomness
|
|
56
56
|
logprobs: Whether to return token probability information
|
|
57
57
|
top_logprobs: Number of top token alternatives to return if logprobs enabled
|
|
58
58
|
validator: Custom validation function to validate the output
|
|
59
59
|
max_validation_retries: Maximum number of retry attempts if validation fails
|
|
60
|
-
priority: Task execution priority (if enabled by vLLM and model)
|
|
60
|
+
priority: Task execution priority (if enabled by vLLM and the model)
|
|
61
61
|
|
|
62
62
|
Returns:
|
|
63
|
-
ToolOutput
|
|
64
|
-
- result (str): The assigned category
|
|
65
|
-
- logprobs (list | None): Probability data if logprobs enabled
|
|
66
|
-
- analysis (str | None): Detailed reasoning if with_analysis enabled
|
|
67
|
-
- process (str | None): Description of the process used
|
|
68
|
-
- processed_at (datetime): Timestamp when the processing occurred
|
|
69
|
-
- execution_time (float): Time taken for execution in seconds (-1.0 if not measured)
|
|
70
|
-
- errors (list(str) | None): Errors occured during tool call
|
|
63
|
+
ToolOutput
|
|
71
64
|
|
|
72
65
|
"""
|
|
73
|
-
|
|
66
|
+
tool_name = sys._getframe().f_code.co_name
|
|
67
|
+
prompt_file = tool_name + ".yaml"
|
|
68
|
+
start = perf_counter()
|
|
74
69
|
|
|
75
70
|
try:
|
|
76
|
-
|
|
71
|
+
if isinstance(categories, list):
|
|
72
|
+
operator_output = self._operator.run(
|
|
73
|
+
# User parameters
|
|
74
|
+
text=text,
|
|
75
|
+
category_list=categories,
|
|
76
|
+
with_analysis=with_analysis,
|
|
77
|
+
user_prompt=user_prompt,
|
|
78
|
+
temperature=temperature,
|
|
79
|
+
logprobs=logprobs,
|
|
80
|
+
top_logprobs=top_logprobs,
|
|
81
|
+
validator=validator,
|
|
82
|
+
max_validation_retries=max_validation_retries,
|
|
83
|
+
priority=priority,
|
|
84
|
+
# Internal parameters
|
|
85
|
+
prompt_file=prompt_file,
|
|
86
|
+
output_model=Models.create_dynamic_model(categories),
|
|
87
|
+
mode=None,
|
|
88
|
+
output_lang=None,
|
|
89
|
+
)
|
|
90
|
+
|
|
91
|
+
metadata = Models.ToolOutputMetadata(
|
|
92
|
+
tool_name=tool_name, execution_time=perf_counter() - start
|
|
93
|
+
)
|
|
94
|
+
tool_output = Models.ToolOutput(
|
|
95
|
+
result=operator_output.result,
|
|
96
|
+
analysis=operator_output.analysis,
|
|
97
|
+
logprobs=operator_output.logprobs,
|
|
98
|
+
metadata=metadata,
|
|
99
|
+
)
|
|
77
100
|
|
|
78
|
-
|
|
101
|
+
else:
|
|
79
102
|
levels = categories.get_level_count()
|
|
80
|
-
|
|
103
|
+
parent_node = categories.get_node("root")
|
|
81
104
|
final_categories = []
|
|
82
105
|
analysis = ""
|
|
83
|
-
|
|
106
|
+
logprobs_list = []
|
|
84
107
|
|
|
85
108
|
for _ in range(levels):
|
|
86
|
-
|
|
87
|
-
|
|
88
|
-
|
|
89
|
-
|
|
90
|
-
# Check if child nodes exist
|
|
91
|
-
if not children:
|
|
92
|
-
output.errors.append(
|
|
93
|
-
f"No categories found for parent_id {parent_id} in the tree"
|
|
94
|
-
)
|
|
95
|
-
end = datetime.now()
|
|
96
|
-
output.execution_time = (end - start).total_seconds()
|
|
97
|
-
return output
|
|
98
|
-
|
|
99
|
-
# Extract category names and descriptions
|
|
109
|
+
if not parent_node.children:
|
|
110
|
+
break
|
|
111
|
+
|
|
100
112
|
category_list = [
|
|
101
|
-
f"Category Name: {
|
|
102
|
-
for node in children
|
|
113
|
+
f"Category Name: {name}, Description: {node.description}"
|
|
114
|
+
for name, node in parent_node.children.items()
|
|
103
115
|
]
|
|
104
|
-
category_names =
|
|
116
|
+
category_names = list(parent_node.children.keys())
|
|
105
117
|
|
|
106
|
-
|
|
107
|
-
level_output = self._operator.run(
|
|
118
|
+
level_operator_output = self._operator.run(
|
|
108
119
|
# User parameters
|
|
109
120
|
text=text,
|
|
110
121
|
category_list=category_list,
|
|
@@ -113,90 +124,44 @@ class TheTool:
|
|
|
113
124
|
temperature=temperature,
|
|
114
125
|
logprobs=logprobs,
|
|
115
126
|
top_logprobs=top_logprobs,
|
|
116
|
-
mode=mode,
|
|
117
127
|
validator=validator,
|
|
118
128
|
max_validation_retries=max_validation_retries,
|
|
119
129
|
priority=priority,
|
|
120
130
|
# Internal parameters
|
|
121
|
-
prompt_file=
|
|
131
|
+
prompt_file=prompt_file,
|
|
122
132
|
output_model=Models.create_dynamic_model(category_names),
|
|
133
|
+
mode=None,
|
|
123
134
|
output_lang=None,
|
|
124
135
|
)
|
|
125
136
|
|
|
126
|
-
|
|
127
|
-
if level_output.errors:
|
|
128
|
-
output.errors.extend(level_output.errors)
|
|
129
|
-
end = datetime.now()
|
|
130
|
-
output.execution_time = (end - start).total_seconds()
|
|
131
|
-
return output
|
|
132
|
-
|
|
133
|
-
# Get the chosen category
|
|
134
|
-
chosen_category = level_output.result
|
|
135
|
-
|
|
136
|
-
# Find the corresponding node
|
|
137
|
+
chosen_category = level_operator_output.result
|
|
137
138
|
parent_node = categories.get_node(chosen_category)
|
|
138
|
-
if parent_node
|
|
139
|
-
|
|
140
|
-
|
|
141
|
-
)
|
|
142
|
-
end = datetime.now()
|
|
143
|
-
output.execution_time = (end - start).total_seconds()
|
|
144
|
-
return output
|
|
145
|
-
|
|
146
|
-
parent_id = parent_node.node_id
|
|
147
|
-
final_categories.append(parent_node.name)
|
|
139
|
+
if not parent_node:
|
|
140
|
+
break
|
|
141
|
+
final_categories.append(chosen_category)
|
|
148
142
|
|
|
149
143
|
if with_analysis:
|
|
150
|
-
analysis +=
|
|
144
|
+
analysis += level_operator_output.analysis
|
|
151
145
|
if logprobs:
|
|
152
|
-
|
|
146
|
+
logprobs_list.extend(level_operator_output.logprobs)
|
|
153
147
|
|
|
154
|
-
|
|
155
|
-
|
|
148
|
+
metadata = Models.ToolOutputMetadata(
|
|
149
|
+
tool_name=tool_name, execution_time=(perf_counter() - start)
|
|
150
|
+
)
|
|
151
|
+
tool_output = Models.ToolOutput(
|
|
156
152
|
result=final_categories,
|
|
157
|
-
logprobs=logprobs,
|
|
158
153
|
analysis=analysis,
|
|
159
|
-
|
|
160
|
-
|
|
154
|
+
logprobs=logprobs_list,
|
|
155
|
+
metadata=metadata,
|
|
161
156
|
)
|
|
162
157
|
|
|
163
|
-
|
|
158
|
+
except (PromptError, LLMError, ValidationError, TextToolsError, Exception) as e:
|
|
159
|
+
metadata = Models.ToolOutputMetadata(tool_name=tool_name)
|
|
160
|
+
tool_output = Models.ToolOutput(
|
|
161
|
+
errors=[f"{type(e).__name__}: {e}"], metadata=metadata
|
|
162
|
+
)
|
|
164
163
|
|
|
165
|
-
|
|
166
|
-
output = self._operator.run(
|
|
167
|
-
# User parameters
|
|
168
|
-
text=text,
|
|
169
|
-
category_list=categories,
|
|
170
|
-
with_analysis=with_analysis,
|
|
171
|
-
user_prompt=user_prompt,
|
|
172
|
-
temperature=temperature,
|
|
173
|
-
logprobs=logprobs,
|
|
174
|
-
top_logprobs=top_logprobs,
|
|
175
|
-
mode=mode,
|
|
176
|
-
validator=validator,
|
|
177
|
-
max_validation_retries=max_validation_retries,
|
|
178
|
-
priority=priority,
|
|
179
|
-
# Internal parameters
|
|
180
|
-
prompt_file="categorize.yaml",
|
|
181
|
-
output_model=Models.create_dynamic_model(categories),
|
|
182
|
-
output_lang=None,
|
|
183
|
-
)
|
|
184
|
-
end = datetime.now()
|
|
185
|
-
output.execution_time = (end - start).total_seconds()
|
|
186
|
-
return output
|
|
187
|
-
|
|
188
|
-
except PromptError as e:
|
|
189
|
-
output.errors.append(f"Prompt error: {e}")
|
|
190
|
-
except LLMError as e:
|
|
191
|
-
output.errors.append(f"LLM error: {e}")
|
|
192
|
-
except ValidationError as e:
|
|
193
|
-
output.errors.append(f"Validation error: {e}")
|
|
194
|
-
except TextToolsError as e:
|
|
195
|
-
output.errors.append(f"TextTools error: {e}")
|
|
196
|
-
except Exception as e:
|
|
197
|
-
output.errors.append(f"Unexpected error: {e}")
|
|
198
|
-
|
|
199
|
-
return output
|
|
164
|
+
return tool_output
|
|
200
165
|
|
|
201
166
|
def extract_keywords(
|
|
202
167
|
self,
|
|
@@ -221,28 +186,22 @@ class TheTool:
|
|
|
221
186
|
with_analysis: Whether to include detailed reasoning analysis
|
|
222
187
|
output_lang: Language for the output response
|
|
223
188
|
user_prompt: Additional instructions for keyword extraction
|
|
224
|
-
temperature: Controls randomness
|
|
189
|
+
temperature: Controls randomness
|
|
225
190
|
logprobs: Whether to return token probability information
|
|
226
191
|
top_logprobs: Number of top token alternatives to return if logprobs enabled
|
|
227
192
|
validator: Custom validation function to validate the output
|
|
228
193
|
max_validation_retries: Maximum number of retry attempts if validation fails
|
|
229
|
-
priority: Task execution priority (if enabled by vLLM and model)
|
|
194
|
+
priority: Task execution priority (if enabled by vLLM and the model)
|
|
230
195
|
|
|
231
196
|
Returns:
|
|
232
|
-
ToolOutput
|
|
233
|
-
- result (list[str]): List of extracted keywords
|
|
234
|
-
- logprobs (list | None): Probability data if logprobs enabled
|
|
235
|
-
- analysis (str | None): Detailed reasoning if with_analysis enabled
|
|
236
|
-
- process (str | None): Description of the process used
|
|
237
|
-
- processed_at (datetime): Timestamp when the processing occurred
|
|
238
|
-
- execution_time (float): Time taken for execution in seconds (-1.0 if not measured)
|
|
239
|
-
- errors (list(str) | None): Errors occured during tool call
|
|
197
|
+
ToolOutput
|
|
240
198
|
"""
|
|
241
|
-
|
|
199
|
+
tool_name = sys._getframe().f_code.co_name
|
|
200
|
+
prompt_file = tool_name + ".yaml"
|
|
201
|
+
start = perf_counter()
|
|
242
202
|
|
|
243
203
|
try:
|
|
244
|
-
|
|
245
|
-
output = self._operator.run(
|
|
204
|
+
operator_output = self._operator.run(
|
|
246
205
|
# User parameters
|
|
247
206
|
text=text,
|
|
248
207
|
with_analysis=with_analysis,
|
|
@@ -257,25 +216,27 @@ class TheTool:
|
|
|
257
216
|
max_validation_retries=max_validation_retries,
|
|
258
217
|
priority=priority,
|
|
259
218
|
# Internal parameters
|
|
260
|
-
prompt_file=
|
|
219
|
+
prompt_file=prompt_file,
|
|
261
220
|
output_model=Models.ListStr,
|
|
262
221
|
)
|
|
263
|
-
|
|
264
|
-
|
|
265
|
-
|
|
266
|
-
|
|
267
|
-
|
|
268
|
-
|
|
269
|
-
|
|
270
|
-
|
|
271
|
-
|
|
272
|
-
|
|
273
|
-
|
|
274
|
-
|
|
275
|
-
|
|
276
|
-
|
|
277
|
-
|
|
278
|
-
|
|
222
|
+
|
|
223
|
+
metadata = Models.ToolOutputMetadata(
|
|
224
|
+
tool_name=tool_name, execution_time=perf_counter() - start
|
|
225
|
+
)
|
|
226
|
+
tool_output = Models.ToolOutput(
|
|
227
|
+
result=operator_output.result,
|
|
228
|
+
logprobs=operator_output.logprobs,
|
|
229
|
+
analysis=operator_output.analysis,
|
|
230
|
+
metadata=metadata,
|
|
231
|
+
)
|
|
232
|
+
|
|
233
|
+
except (PromptError, LLMError, ValidationError, TextToolsError, Exception) as e:
|
|
234
|
+
metadata = Models.ToolOutputMetadata(tool_name=tool_name)
|
|
235
|
+
tool_output = Models.ToolOutput(
|
|
236
|
+
errors=[f"{type(e).__name__}: {e}"], metadata=metadata
|
|
237
|
+
)
|
|
238
|
+
|
|
239
|
+
return tool_output
|
|
279
240
|
|
|
280
241
|
def extract_entities(
|
|
281
242
|
self,
|
|
@@ -300,28 +261,22 @@ class TheTool:
|
|
|
300
261
|
with_analysis: Whether to include detailed reasoning analysis
|
|
301
262
|
output_lang: Language for the output response
|
|
302
263
|
user_prompt: Additional instructions for entity extraction
|
|
303
|
-
temperature: Controls randomness
|
|
264
|
+
temperature: Controls randomness
|
|
304
265
|
logprobs: Whether to return token probability information
|
|
305
266
|
top_logprobs: Number of top token alternatives to return if logprobs enabled
|
|
306
267
|
validator: Custom validation function to validate the output
|
|
307
268
|
max_validation_retries: Maximum number of retry attempts if validation fails
|
|
308
|
-
priority: Task execution priority (if enabled by vLLM and model)
|
|
269
|
+
priority: Task execution priority (if enabled by vLLM and the model)
|
|
309
270
|
|
|
310
271
|
Returns:
|
|
311
|
-
ToolOutput
|
|
312
|
-
- result (list[dict]): List of entities with 'text' and 'type' keys
|
|
313
|
-
- logprobs (list | None): Probability data if logprobs enabled
|
|
314
|
-
- analysis (str | None): Detailed reasoning if with_analysis enabled
|
|
315
|
-
- process (str | None): Description of the process used
|
|
316
|
-
- processed_at (datetime): Timestamp when the processing occurred
|
|
317
|
-
- execution_time (float): Time taken for execution in seconds (-1.0 if not measured)
|
|
318
|
-
- errors (list(str) | None): Errors occured during tool call
|
|
272
|
+
ToolOutput
|
|
319
273
|
"""
|
|
320
|
-
|
|
274
|
+
tool_name = sys._getframe().f_code.co_name
|
|
275
|
+
prompt_file = tool_name + ".yaml"
|
|
276
|
+
start = perf_counter()
|
|
321
277
|
|
|
322
278
|
try:
|
|
323
|
-
|
|
324
|
-
output = self._operator.run(
|
|
279
|
+
operator_output = self._operator.run(
|
|
325
280
|
# User parameters
|
|
326
281
|
text=text,
|
|
327
282
|
entities=entities
|
|
@@ -336,26 +291,28 @@ class TheTool:
|
|
|
336
291
|
max_validation_retries=max_validation_retries,
|
|
337
292
|
priority=priority,
|
|
338
293
|
# Internal parameters
|
|
339
|
-
prompt_file=
|
|
294
|
+
prompt_file=prompt_file,
|
|
340
295
|
output_model=Models.ListDictStrStr,
|
|
341
296
|
mode=None,
|
|
342
297
|
)
|
|
343
|
-
|
|
344
|
-
|
|
345
|
-
|
|
346
|
-
|
|
347
|
-
|
|
348
|
-
|
|
349
|
-
|
|
350
|
-
|
|
351
|
-
|
|
352
|
-
|
|
353
|
-
|
|
354
|
-
|
|
355
|
-
|
|
356
|
-
|
|
357
|
-
|
|
358
|
-
|
|
298
|
+
|
|
299
|
+
metadata = Models.ToolOutputMetadata(
|
|
300
|
+
tool_name=tool_name, execution_time=perf_counter() - start
|
|
301
|
+
)
|
|
302
|
+
tool_output = Models.ToolOutput(
|
|
303
|
+
result=operator_output.result,
|
|
304
|
+
logprobs=operator_output.logprobs,
|
|
305
|
+
analysis=operator_output.analysis,
|
|
306
|
+
metadata=metadata,
|
|
307
|
+
)
|
|
308
|
+
|
|
309
|
+
except (PromptError, LLMError, ValidationError, TextToolsError, Exception) as e:
|
|
310
|
+
metadata = Models.ToolOutputMetadata(tool_name=tool_name)
|
|
311
|
+
tool_output = Models.ToolOutput(
|
|
312
|
+
errors=[f"{type(e).__name__}: {e}"], metadata=metadata
|
|
313
|
+
)
|
|
314
|
+
|
|
315
|
+
return tool_output
|
|
359
316
|
|
|
360
317
|
def is_question(
|
|
361
318
|
self,
|
|
@@ -376,28 +333,22 @@ class TheTool:
|
|
|
376
333
|
text: The input text to analyze
|
|
377
334
|
with_analysis: Whether to include detailed reasoning analysis
|
|
378
335
|
user_prompt: Additional instructions for question detection
|
|
379
|
-
temperature: Controls randomness
|
|
336
|
+
temperature: Controls randomness
|
|
380
337
|
logprobs: Whether to return token probability information
|
|
381
338
|
top_logprobs: Number of top token alternatives to return if logprobs enabled
|
|
382
339
|
validator: Custom validation function to validate the output
|
|
383
340
|
max_validation_retries: Maximum number of retry attempts if validation fails
|
|
384
|
-
priority: Task execution priority (if enabled by vLLM and model)
|
|
341
|
+
priority: Task execution priority (if enabled by vLLM and the model)
|
|
385
342
|
|
|
386
343
|
Returns:
|
|
387
|
-
ToolOutput
|
|
388
|
-
- result (bool): True if text is a question, False otherwise
|
|
389
|
-
- logprobs (list | None): Probability data if logprobs enabled
|
|
390
|
-
- analysis (str | None): Detailed reasoning if with_analysis enabled
|
|
391
|
-
- process (str | None): Description of the process used
|
|
392
|
-
- processed_at (datetime): Timestamp when the processing occurred
|
|
393
|
-
- execution_time (float): Time taken for execution in seconds (-1.0 if not measured)
|
|
394
|
-
- errors (list(str) | None): Errors occured during tool call
|
|
344
|
+
ToolOutput
|
|
395
345
|
"""
|
|
396
|
-
|
|
346
|
+
tool_name = sys._getframe().f_code.co_name
|
|
347
|
+
prompt_file = tool_name + ".yaml"
|
|
348
|
+
start = perf_counter()
|
|
397
349
|
|
|
398
350
|
try:
|
|
399
|
-
|
|
400
|
-
output = self._operator.run(
|
|
351
|
+
operator_output = self._operator.run(
|
|
401
352
|
# User parameters
|
|
402
353
|
text=text,
|
|
403
354
|
with_analysis=with_analysis,
|
|
@@ -409,27 +360,29 @@ class TheTool:
|
|
|
409
360
|
max_validation_retries=max_validation_retries,
|
|
410
361
|
priority=priority,
|
|
411
362
|
# Internal parameters
|
|
412
|
-
prompt_file=
|
|
363
|
+
prompt_file=prompt_file,
|
|
413
364
|
output_model=Models.Bool,
|
|
414
365
|
mode=None,
|
|
415
366
|
output_lang=None,
|
|
416
367
|
)
|
|
417
|
-
|
|
418
|
-
|
|
419
|
-
|
|
420
|
-
|
|
421
|
-
|
|
422
|
-
|
|
423
|
-
|
|
424
|
-
|
|
425
|
-
|
|
426
|
-
|
|
427
|
-
|
|
428
|
-
|
|
429
|
-
|
|
430
|
-
|
|
431
|
-
|
|
432
|
-
|
|
368
|
+
|
|
369
|
+
metadata = Models.ToolOutputMetadata(
|
|
370
|
+
tool_name=tool_name, execution_time=perf_counter() - start
|
|
371
|
+
)
|
|
372
|
+
tool_output = Models.ToolOutput(
|
|
373
|
+
result=operator_output.result,
|
|
374
|
+
logprobs=operator_output.logprobs,
|
|
375
|
+
analysis=operator_output.analysis,
|
|
376
|
+
metadata=metadata,
|
|
377
|
+
)
|
|
378
|
+
|
|
379
|
+
except (PromptError, LLMError, ValidationError, TextToolsError, Exception) as e:
|
|
380
|
+
metadata = Models.ToolOutputMetadata(tool_name=tool_name)
|
|
381
|
+
tool_output = Models.ToolOutput(
|
|
382
|
+
errors=[f"{type(e).__name__}: {e}"], metadata=metadata
|
|
383
|
+
)
|
|
384
|
+
|
|
385
|
+
return tool_output
|
|
433
386
|
|
|
434
387
|
def text_to_question(
|
|
435
388
|
self,
|
|
@@ -454,28 +407,22 @@ class TheTool:
|
|
|
454
407
|
with_analysis: Whether to include detailed reasoning analysis
|
|
455
408
|
output_lang: Language for the output question
|
|
456
409
|
user_prompt: Additional instructions for question generation
|
|
457
|
-
temperature: Controls randomness
|
|
410
|
+
temperature: Controls randomness
|
|
458
411
|
logprobs: Whether to return token probability information
|
|
459
412
|
top_logprobs: Number of top token alternatives to return if logprobs enabled
|
|
460
413
|
validator: Custom validation function to validate the output
|
|
461
414
|
max_validation_retries: Maximum number of retry attempts if validation fails
|
|
462
|
-
priority: Task execution priority (if enabled by vLLM and model)
|
|
415
|
+
priority: Task execution priority (if enabled by vLLM and the model)
|
|
463
416
|
|
|
464
417
|
Returns:
|
|
465
|
-
ToolOutput
|
|
466
|
-
- result (str): The generated question
|
|
467
|
-
- logprobs (list | None): Probability data if logprobs enabled
|
|
468
|
-
- analysis (str | None): Detailed reasoning if with_analysis enabled
|
|
469
|
-
- process (str | None): Description of the process used
|
|
470
|
-
- processed_at (datetime): Timestamp when the processing occurred
|
|
471
|
-
- execution_time (float): Time taken for execution in seconds (-1.0 if not measured)
|
|
472
|
-
- errors (list(str) | None): Errors occured during tool call
|
|
418
|
+
ToolOutput
|
|
473
419
|
"""
|
|
474
|
-
|
|
420
|
+
tool_name = sys._getframe().f_code.co_name
|
|
421
|
+
prompt_file = tool_name + ".yaml"
|
|
422
|
+
start = perf_counter()
|
|
475
423
|
|
|
476
424
|
try:
|
|
477
|
-
|
|
478
|
-
output = self._operator.run(
|
|
425
|
+
operator_output = self._operator.run(
|
|
479
426
|
# User parameters
|
|
480
427
|
text=text,
|
|
481
428
|
number_of_questions=number_of_questions,
|
|
@@ -489,26 +436,28 @@ class TheTool:
|
|
|
489
436
|
max_validation_retries=max_validation_retries,
|
|
490
437
|
priority=priority,
|
|
491
438
|
# Internal parameters
|
|
492
|
-
prompt_file=
|
|
439
|
+
prompt_file=prompt_file,
|
|
493
440
|
output_model=Models.ReasonListStr,
|
|
494
441
|
mode=None,
|
|
495
442
|
)
|
|
496
|
-
|
|
497
|
-
|
|
498
|
-
|
|
499
|
-
|
|
500
|
-
|
|
501
|
-
|
|
502
|
-
|
|
503
|
-
|
|
504
|
-
|
|
505
|
-
|
|
506
|
-
|
|
507
|
-
|
|
508
|
-
|
|
509
|
-
|
|
510
|
-
|
|
511
|
-
|
|
443
|
+
|
|
444
|
+
metadata = Models.ToolOutputMetadata(
|
|
445
|
+
tool_name=tool_name, execution_time=perf_counter() - start
|
|
446
|
+
)
|
|
447
|
+
tool_output = Models.ToolOutput(
|
|
448
|
+
result=operator_output.result,
|
|
449
|
+
logprobs=operator_output.logprobs,
|
|
450
|
+
analysis=operator_output.analysis,
|
|
451
|
+
metadata=metadata,
|
|
452
|
+
)
|
|
453
|
+
|
|
454
|
+
except (PromptError, LLMError, ValidationError, TextToolsError, Exception) as e:
|
|
455
|
+
metadata = Models.ToolOutputMetadata(tool_name=tool_name)
|
|
456
|
+
tool_output = Models.ToolOutput(
|
|
457
|
+
errors=[f"{type(e).__name__}: {e}"], metadata=metadata
|
|
458
|
+
)
|
|
459
|
+
|
|
460
|
+
return tool_output
|
|
512
461
|
|
|
513
462
|
def merge_questions(
|
|
514
463
|
self,
|
|
@@ -532,30 +481,24 @@ class TheTool:
|
|
|
532
481
|
with_analysis: Whether to include detailed reasoning analysis
|
|
533
482
|
output_lang: Language for the output merged question
|
|
534
483
|
user_prompt: Additional instructions for question merging
|
|
535
|
-
temperature: Controls randomness
|
|
484
|
+
temperature: Controls randomness
|
|
536
485
|
logprobs: Whether to return token probability information
|
|
537
486
|
top_logprobs: Number of top token alternatives to return if logprobs enabled
|
|
538
487
|
mode: Merging strategy - 'default' for direct merge, 'reason' for reasoned merge
|
|
539
488
|
validator: Custom validation function to validate the output
|
|
540
489
|
max_validation_retries: Maximum number of retry attempts if validation fails
|
|
541
|
-
priority: Task execution priority (if enabled by vLLM and model)
|
|
490
|
+
priority: Task execution priority (if enabled by vLLM and the model)
|
|
542
491
|
|
|
543
492
|
Returns:
|
|
544
|
-
ToolOutput
|
|
545
|
-
- result (str): The merged question
|
|
546
|
-
- logprobs (list | None): Probability data if logprobs enabled
|
|
547
|
-
- analysis (str | None): Detailed reasoning if with_analysis enabled
|
|
548
|
-
- process (str | None): Description of the process used
|
|
549
|
-
- processed_at (datetime): Timestamp when the processing occurred
|
|
550
|
-
- execution_time (float): Time taken for execution in seconds (-1.0 if not measured)
|
|
551
|
-
- errors (list(str) | None): Errors occured during tool call
|
|
493
|
+
ToolOutput
|
|
552
494
|
"""
|
|
553
|
-
|
|
495
|
+
tool_name = sys._getframe().f_code.co_name
|
|
496
|
+
prompt_file = tool_name + ".yaml"
|
|
497
|
+
start = perf_counter()
|
|
554
498
|
|
|
555
499
|
try:
|
|
556
|
-
start = datetime.now()
|
|
557
500
|
text = ", ".join(text)
|
|
558
|
-
|
|
501
|
+
operator_output = self._operator.run(
|
|
559
502
|
# User parameters
|
|
560
503
|
text=text,
|
|
561
504
|
with_analysis=with_analysis,
|
|
@@ -568,26 +511,28 @@ class TheTool:
|
|
|
568
511
|
max_validation_retries=max_validation_retries,
|
|
569
512
|
priority=priority,
|
|
570
513
|
# Internal parameters
|
|
571
|
-
prompt_file=
|
|
514
|
+
prompt_file=prompt_file,
|
|
572
515
|
output_model=Models.Str,
|
|
573
516
|
mode=mode,
|
|
574
517
|
)
|
|
575
|
-
|
|
576
|
-
|
|
577
|
-
|
|
578
|
-
|
|
579
|
-
|
|
580
|
-
|
|
581
|
-
|
|
582
|
-
|
|
583
|
-
|
|
584
|
-
|
|
585
|
-
|
|
586
|
-
|
|
587
|
-
|
|
588
|
-
|
|
589
|
-
|
|
590
|
-
|
|
518
|
+
|
|
519
|
+
metadata = Models.ToolOutputMetadata(
|
|
520
|
+
tool_name=tool_name, execution_time=perf_counter() - start
|
|
521
|
+
)
|
|
522
|
+
tool_output = Models.ToolOutput(
|
|
523
|
+
result=operator_output.result,
|
|
524
|
+
logprobs=operator_output.logprobs,
|
|
525
|
+
analysis=operator_output.analysis,
|
|
526
|
+
metadata=metadata,
|
|
527
|
+
)
|
|
528
|
+
|
|
529
|
+
except (PromptError, LLMError, ValidationError, TextToolsError, Exception) as e:
|
|
530
|
+
metadata = Models.ToolOutputMetadata(tool_name=tool_name)
|
|
531
|
+
tool_output = Models.ToolOutput(
|
|
532
|
+
errors=[f"{type(e).__name__}: {e}"], metadata=metadata
|
|
533
|
+
)
|
|
534
|
+
|
|
535
|
+
return tool_output
|
|
591
536
|
|
|
592
537
|
def rewrite(
|
|
593
538
|
self,
|
|
@@ -611,29 +556,23 @@ class TheTool:
|
|
|
611
556
|
with_analysis: Whether to include detailed reasoning analysis
|
|
612
557
|
output_lang: Language for the output rewritten text
|
|
613
558
|
user_prompt: Additional instructions for rewriting
|
|
614
|
-
temperature: Controls randomness
|
|
559
|
+
temperature: Controls randomness
|
|
615
560
|
logprobs: Whether to return token probability information
|
|
616
561
|
top_logprobs: Number of top token alternatives to return if logprobs enabled
|
|
617
562
|
mode: Rewriting mode - 'positive', 'negative', or 'hard_negative'
|
|
618
563
|
validator: Custom validation function to validate the output
|
|
619
564
|
max_validation_retries: Maximum number of retry attempts if validation fails
|
|
620
|
-
priority: Task execution priority (if enabled by vLLM and model)
|
|
565
|
+
priority: Task execution priority (if enabled by vLLM and the model)
|
|
621
566
|
|
|
622
567
|
Returns:
|
|
623
|
-
ToolOutput
|
|
624
|
-
- result (str): The rewritten text
|
|
625
|
-
- logprobs (list | None): Probability data if logprobs enabled
|
|
626
|
-
- analysis (str | None): Detailed reasoning if with_analysis enabled
|
|
627
|
-
- process (str | None): Description of the process used
|
|
628
|
-
- processed_at (datetime): Timestamp when the processing occurred
|
|
629
|
-
- execution_time (float): Time taken for execution in seconds (-1.0 if not measured)
|
|
630
|
-
- errors (list(str) | None): Errors occured during tool call
|
|
568
|
+
ToolOutput
|
|
631
569
|
"""
|
|
632
|
-
|
|
570
|
+
tool_name = sys._getframe().f_code.co_name
|
|
571
|
+
prompt_file = tool_name + ".yaml"
|
|
572
|
+
start = perf_counter()
|
|
633
573
|
|
|
634
574
|
try:
|
|
635
|
-
|
|
636
|
-
output = self._operator.run(
|
|
575
|
+
operator_output = self._operator.run(
|
|
637
576
|
# User parameters
|
|
638
577
|
text=text,
|
|
639
578
|
with_analysis=with_analysis,
|
|
@@ -646,26 +585,28 @@ class TheTool:
|
|
|
646
585
|
max_validation_retries=max_validation_retries,
|
|
647
586
|
priority=priority,
|
|
648
587
|
# Internal parameters
|
|
649
|
-
prompt_file=
|
|
588
|
+
prompt_file=prompt_file,
|
|
650
589
|
output_model=Models.Str,
|
|
651
590
|
mode=mode,
|
|
652
591
|
)
|
|
653
|
-
|
|
654
|
-
|
|
655
|
-
|
|
656
|
-
|
|
657
|
-
|
|
658
|
-
|
|
659
|
-
|
|
660
|
-
|
|
661
|
-
|
|
662
|
-
|
|
663
|
-
|
|
664
|
-
|
|
665
|
-
|
|
666
|
-
|
|
667
|
-
|
|
668
|
-
|
|
592
|
+
|
|
593
|
+
metadata = Models.ToolOutputMetadata(
|
|
594
|
+
tool_name=tool_name, execution_time=perf_counter() - start
|
|
595
|
+
)
|
|
596
|
+
tool_output = Models.ToolOutput(
|
|
597
|
+
result=operator_output.result,
|
|
598
|
+
logprobs=operator_output.logprobs,
|
|
599
|
+
analysis=operator_output.analysis,
|
|
600
|
+
metadata=metadata,
|
|
601
|
+
)
|
|
602
|
+
|
|
603
|
+
except (PromptError, LLMError, ValidationError, TextToolsError, Exception) as e:
|
|
604
|
+
metadata = Models.ToolOutputMetadata(tool_name=tool_name)
|
|
605
|
+
tool_output = Models.ToolOutput(
|
|
606
|
+
errors=[f"{type(e).__name__}: {e}"], metadata=metadata
|
|
607
|
+
)
|
|
608
|
+
|
|
609
|
+
return tool_output
|
|
669
610
|
|
|
670
611
|
def subject_to_question(
|
|
671
612
|
self,
|
|
@@ -690,28 +631,22 @@ class TheTool:
|
|
|
690
631
|
with_analysis: Whether to include detailed reasoning analysis
|
|
691
632
|
output_lang: Language for the output questions
|
|
692
633
|
user_prompt: Additional instructions for question generation
|
|
693
|
-
temperature: Controls randomness
|
|
634
|
+
temperature: Controls randomness
|
|
694
635
|
logprobs: Whether to return token probability information
|
|
695
636
|
top_logprobs: Number of top token alternatives to return if logprobs enabled
|
|
696
637
|
validator: Custom validation function to validate the output
|
|
697
638
|
max_validation_retries: Maximum number of retry attempts if validation fails
|
|
698
|
-
priority: Task execution priority (if enabled by vLLM and model)
|
|
639
|
+
priority: Task execution priority (if enabled by vLLM and the model)
|
|
699
640
|
|
|
700
641
|
Returns:
|
|
701
|
-
ToolOutput
|
|
702
|
-
- result (list[str]): List of generated questions
|
|
703
|
-
- logprobs (list | None): Probability data if logprobs enabled
|
|
704
|
-
- analysis (str | None): Detailed reasoning if with_analysis enabled
|
|
705
|
-
- process (str | None): Description of the process used
|
|
706
|
-
- processed_at (datetime): Timestamp when the processing occurred
|
|
707
|
-
- execution_time (float): Time taken for execution in seconds (-1.0 if not measured)
|
|
708
|
-
- errors (list(str) | None): Errors occured during tool call
|
|
642
|
+
ToolOutput
|
|
709
643
|
"""
|
|
710
|
-
|
|
644
|
+
tool_name = sys._getframe().f_code.co_name
|
|
645
|
+
prompt_file = tool_name + ".yaml"
|
|
646
|
+
start = perf_counter()
|
|
711
647
|
|
|
712
648
|
try:
|
|
713
|
-
|
|
714
|
-
output = self._operator.run(
|
|
649
|
+
operator_output = self._operator.run(
|
|
715
650
|
# User parameters
|
|
716
651
|
text=text,
|
|
717
652
|
number_of_questions=number_of_questions,
|
|
@@ -725,26 +660,28 @@ class TheTool:
|
|
|
725
660
|
max_validation_retries=max_validation_retries,
|
|
726
661
|
priority=priority,
|
|
727
662
|
# Internal parameters
|
|
728
|
-
prompt_file=
|
|
663
|
+
prompt_file=prompt_file,
|
|
729
664
|
output_model=Models.ReasonListStr,
|
|
730
665
|
mode=None,
|
|
731
666
|
)
|
|
732
|
-
|
|
733
|
-
|
|
734
|
-
|
|
735
|
-
|
|
736
|
-
|
|
737
|
-
|
|
738
|
-
|
|
739
|
-
|
|
740
|
-
|
|
741
|
-
|
|
742
|
-
|
|
743
|
-
|
|
744
|
-
|
|
745
|
-
|
|
746
|
-
|
|
747
|
-
|
|
667
|
+
|
|
668
|
+
metadata = Models.ToolOutputMetadata(
|
|
669
|
+
tool_name=tool_name, execution_time=perf_counter() - start
|
|
670
|
+
)
|
|
671
|
+
tool_output = Models.ToolOutput(
|
|
672
|
+
result=operator_output.result,
|
|
673
|
+
logprobs=operator_output.logprobs,
|
|
674
|
+
analysis=operator_output.analysis,
|
|
675
|
+
metadata=metadata,
|
|
676
|
+
)
|
|
677
|
+
|
|
678
|
+
except (PromptError, LLMError, ValidationError, TextToolsError, Exception) as e:
|
|
679
|
+
metadata = Models.ToolOutputMetadata(tool_name=tool_name)
|
|
680
|
+
tool_output = Models.ToolOutput(
|
|
681
|
+
errors=[f"{type(e).__name__}: {e}"], metadata=metadata
|
|
682
|
+
)
|
|
683
|
+
|
|
684
|
+
return tool_output
|
|
748
685
|
|
|
749
686
|
def summarize(
|
|
750
687
|
self,
|
|
@@ -767,28 +704,22 @@ class TheTool:
|
|
|
767
704
|
with_analysis: Whether to include detailed reasoning analysis
|
|
768
705
|
output_lang: Language for the output summary
|
|
769
706
|
user_prompt: Additional instructions for summarization
|
|
770
|
-
temperature: Controls randomness
|
|
707
|
+
temperature: Controls randomness
|
|
771
708
|
logprobs: Whether to return token probability information
|
|
772
709
|
top_logprobs: Number of top token alternatives to return if logprobs enabled
|
|
773
710
|
validator: Custom validation function to validate the output
|
|
774
711
|
max_validation_retries: Maximum number of retry attempts if validation fails
|
|
775
|
-
priority: Task execution priority (if enabled by vLLM and model)
|
|
712
|
+
priority: Task execution priority (if enabled by vLLM and the model)
|
|
776
713
|
|
|
777
714
|
Returns:
|
|
778
|
-
ToolOutput
|
|
779
|
-
- result (str): The summary text
|
|
780
|
-
- logprobs (list | None): Probability data if logprobs enabled
|
|
781
|
-
- analysis (str | None): Detailed reasoning if with_analysis enabled
|
|
782
|
-
- process (str | None): Description of the process used
|
|
783
|
-
- processed_at (datetime): Timestamp when the processing occurred
|
|
784
|
-
- execution_time (float): Time taken for execution in seconds (-1.0 if not measured)
|
|
785
|
-
- errors (list(str) | None): Errors occured during tool call
|
|
715
|
+
ToolOutput
|
|
786
716
|
"""
|
|
787
|
-
|
|
717
|
+
tool_name = sys._getframe().f_code.co_name
|
|
718
|
+
prompt_file = tool_name + ".yaml"
|
|
719
|
+
start = perf_counter()
|
|
788
720
|
|
|
789
721
|
try:
|
|
790
|
-
|
|
791
|
-
output = self._operator.run(
|
|
722
|
+
operator_output = self._operator.run(
|
|
792
723
|
# User parameters
|
|
793
724
|
text=text,
|
|
794
725
|
with_analysis=with_analysis,
|
|
@@ -801,26 +732,28 @@ class TheTool:
|
|
|
801
732
|
max_validation_retries=max_validation_retries,
|
|
802
733
|
priority=priority,
|
|
803
734
|
# Internal parameters
|
|
804
|
-
prompt_file=
|
|
735
|
+
prompt_file=prompt_file,
|
|
805
736
|
output_model=Models.Str,
|
|
806
737
|
mode=None,
|
|
807
738
|
)
|
|
808
|
-
|
|
809
|
-
|
|
810
|
-
|
|
811
|
-
|
|
812
|
-
|
|
813
|
-
|
|
814
|
-
|
|
815
|
-
|
|
816
|
-
|
|
817
|
-
|
|
818
|
-
|
|
819
|
-
|
|
820
|
-
|
|
821
|
-
|
|
822
|
-
|
|
823
|
-
|
|
739
|
+
|
|
740
|
+
metadata = Models.ToolOutputMetadata(
|
|
741
|
+
tool_name=tool_name, execution_time=perf_counter() - start
|
|
742
|
+
)
|
|
743
|
+
tool_output = Models.ToolOutput(
|
|
744
|
+
result=operator_output.result,
|
|
745
|
+
logprobs=operator_output.logprobs,
|
|
746
|
+
analysis=operator_output.analysis,
|
|
747
|
+
metadata=metadata,
|
|
748
|
+
)
|
|
749
|
+
|
|
750
|
+
except (PromptError, LLMError, ValidationError, TextToolsError, Exception) as e:
|
|
751
|
+
metadata = Models.ToolOutputMetadata(tool_name=tool_name)
|
|
752
|
+
tool_output = Models.ToolOutput(
|
|
753
|
+
errors=[f"{type(e).__name__}: {e}"], metadata=metadata
|
|
754
|
+
)
|
|
755
|
+
|
|
756
|
+
return tool_output
|
|
824
757
|
|
|
825
758
|
def translate(
|
|
826
759
|
self,
|
|
@@ -847,38 +780,29 @@ class TheTool:
|
|
|
847
780
|
use_chunker: Whether to use text chunker for text length bigger than 1500
|
|
848
781
|
with_analysis: Whether to include detailed reasoning analysis
|
|
849
782
|
user_prompt: Additional instructions for translation
|
|
850
|
-
temperature: Controls randomness
|
|
783
|
+
temperature: Controls randomness
|
|
851
784
|
logprobs: Whether to return token probability information
|
|
852
785
|
top_logprobs: Number of top token alternatives to return if logprobs enabled
|
|
853
786
|
validator: Custom validation function to validate the output
|
|
854
787
|
max_validation_retries: Maximum number of retry attempts if validation fails
|
|
855
|
-
priority: Task execution priority (if enabled by vLLM and model)
|
|
788
|
+
priority: Task execution priority (if enabled by vLLM and the model)
|
|
856
789
|
|
|
857
790
|
Returns:
|
|
858
|
-
ToolOutput
|
|
859
|
-
- result (str): The translated text
|
|
860
|
-
- logprobs (list | None): Probability data if logprobs enabled
|
|
861
|
-
- analysis (str | None): Detailed reasoning if with_analysis enabled
|
|
862
|
-
- process (str | None): Description of the process used
|
|
863
|
-
- processed_at (datetime): Timestamp when the processing occurred
|
|
864
|
-
- execution_time (float): Time taken for execution in seconds (-1.0 if not measured)
|
|
865
|
-
- errors (list(str) | None): Errors occured during tool call
|
|
791
|
+
ToolOutput
|
|
866
792
|
"""
|
|
867
|
-
|
|
793
|
+
tool_name = sys._getframe().f_code.co_name
|
|
794
|
+
prompt_file = tool_name + ".yaml"
|
|
795
|
+
start = perf_counter()
|
|
868
796
|
|
|
869
797
|
try:
|
|
870
|
-
start = datetime.now()
|
|
871
|
-
|
|
872
798
|
if len(text.split(" ")) > 1500 and use_chunker:
|
|
873
799
|
chunks = text_to_chunks(text, 1200, 0)
|
|
874
|
-
|
|
875
800
|
translation = ""
|
|
876
801
|
analysis = ""
|
|
877
|
-
|
|
802
|
+
logprobs_list = []
|
|
878
803
|
|
|
879
|
-
# Run translation for each chunk
|
|
880
804
|
for chunk in chunks:
|
|
881
|
-
|
|
805
|
+
chunk_operator_output = self._operator.run(
|
|
882
806
|
# User parameters
|
|
883
807
|
text=chunk,
|
|
884
808
|
target_language=target_language,
|
|
@@ -891,38 +815,31 @@ class TheTool:
|
|
|
891
815
|
max_validation_retries=max_validation_retries,
|
|
892
816
|
priority=priority,
|
|
893
817
|
# Internal parameters
|
|
894
|
-
prompt_file=
|
|
818
|
+
prompt_file=prompt_file,
|
|
895
819
|
output_model=Models.Str,
|
|
896
820
|
mode=None,
|
|
897
821
|
output_lang=None,
|
|
898
822
|
)
|
|
899
823
|
|
|
900
|
-
|
|
901
|
-
if chunk_output.errors:
|
|
902
|
-
output.errors.extend(chunk_output.errors)
|
|
903
|
-
end = datetime.now()
|
|
904
|
-
output.execution_time = (end - start).total_seconds()
|
|
905
|
-
return output
|
|
824
|
+
translation += chunk_operator_output.result + "\n"
|
|
906
825
|
|
|
907
|
-
# Concatenate the outputs
|
|
908
|
-
translation += chunk_output.result + "\n"
|
|
909
826
|
if with_analysis:
|
|
910
|
-
analysis +=
|
|
827
|
+
analysis += chunk_operator_output.analysis
|
|
911
828
|
if logprobs:
|
|
912
|
-
|
|
829
|
+
logprobs_list.extend(chunk_operator_output.logprobs)
|
|
913
830
|
|
|
914
|
-
|
|
915
|
-
|
|
831
|
+
metadata = Models.ToolOutputMetadata(
|
|
832
|
+
tool_name=tool_name, execution_time=perf_counter() - start
|
|
833
|
+
)
|
|
834
|
+
tool_output = Models.ToolOutput(
|
|
916
835
|
result=translation,
|
|
917
|
-
logprobs=
|
|
836
|
+
logprobs=logprobs_list,
|
|
918
837
|
analysis=analysis,
|
|
919
|
-
|
|
920
|
-
execution_time=(end - start).total_seconds(),
|
|
838
|
+
metadata=metadata,
|
|
921
839
|
)
|
|
922
|
-
return output
|
|
923
840
|
|
|
924
841
|
else:
|
|
925
|
-
|
|
842
|
+
operator_output = self._operator.run(
|
|
926
843
|
# User parameters
|
|
927
844
|
text=text,
|
|
928
845
|
target_language=target_language,
|
|
@@ -935,27 +852,29 @@ class TheTool:
|
|
|
935
852
|
max_validation_retries=max_validation_retries,
|
|
936
853
|
priority=priority,
|
|
937
854
|
# Internal parameters
|
|
938
|
-
prompt_file=
|
|
855
|
+
prompt_file=prompt_file,
|
|
939
856
|
output_model=Models.Str,
|
|
940
857
|
mode=None,
|
|
941
858
|
output_lang=None,
|
|
942
859
|
)
|
|
943
|
-
|
|
944
|
-
|
|
945
|
-
|
|
946
|
-
|
|
947
|
-
|
|
948
|
-
|
|
949
|
-
|
|
950
|
-
|
|
951
|
-
|
|
952
|
-
|
|
953
|
-
|
|
954
|
-
|
|
955
|
-
|
|
956
|
-
|
|
957
|
-
|
|
958
|
-
|
|
860
|
+
|
|
861
|
+
metadata = Models.ToolOutputMetadata(
|
|
862
|
+
tool_name=tool_name, execution_time=perf_counter() - start
|
|
863
|
+
)
|
|
864
|
+
tool_output = Models.ToolOutput(
|
|
865
|
+
result=operator_output.result,
|
|
866
|
+
logprobs=operator_output.logprobs,
|
|
867
|
+
analysis=operator_output.analysis,
|
|
868
|
+
metadata=metadata,
|
|
869
|
+
)
|
|
870
|
+
|
|
871
|
+
except (PromptError, LLMError, ValidationError, TextToolsError, Exception) as e:
|
|
872
|
+
metadata = Models.ToolOutputMetadata(tool_name=tool_name)
|
|
873
|
+
tool_output = Models.ToolOutput(
|
|
874
|
+
errors=[f"{type(e).__name__}: {e}"], metadata=metadata
|
|
875
|
+
)
|
|
876
|
+
|
|
877
|
+
return tool_output
|
|
959
878
|
|
|
960
879
|
def propositionize(
|
|
961
880
|
self,
|
|
@@ -980,28 +899,22 @@ class TheTool:
|
|
|
980
899
|
with_analysis: Whether to include detailed reasoning analysis
|
|
981
900
|
output_lang: Language for the output summary
|
|
982
901
|
user_prompt: Additional instructions for summarization
|
|
983
|
-
temperature: Controls randomness
|
|
902
|
+
temperature: Controls randomness
|
|
984
903
|
logprobs: Whether to return token probability information
|
|
985
904
|
top_logprobs: Number of top token alternatives to return if logprobs enabled
|
|
986
905
|
validator: Custom validation function to validate the output
|
|
987
906
|
max_validation_retries: Maximum number of retry attempts if validation fails
|
|
988
|
-
priority: Task execution priority (if enabled by vLLM and model)
|
|
907
|
+
priority: Task execution priority (if enabled by vLLM and the model)
|
|
989
908
|
|
|
990
909
|
Returns:
|
|
991
|
-
ToolOutput
|
|
992
|
-
- result (list[str]): The propositions
|
|
993
|
-
- logprobs (list | None): Probability data if logprobs enabled
|
|
994
|
-
- analysis (str | None): Detailed reasoning if with_analysis enabled
|
|
995
|
-
- process (str | None): Description of the process used
|
|
996
|
-
- processed_at (datetime): Timestamp when the processing occurred
|
|
997
|
-
- execution_time (float): Time taken for execution in seconds (-1.0 if not measured)
|
|
998
|
-
- errors (list(str) | None): Errors occured during tool call
|
|
910
|
+
ToolOutput
|
|
999
911
|
"""
|
|
1000
|
-
|
|
912
|
+
tool_name = sys._getframe().f_code.co_name
|
|
913
|
+
prompt_file = tool_name + ".yaml"
|
|
914
|
+
start = perf_counter()
|
|
1001
915
|
|
|
1002
916
|
try:
|
|
1003
|
-
|
|
1004
|
-
output = self._operator.run(
|
|
917
|
+
operator_output = self._operator.run(
|
|
1005
918
|
# User parameters
|
|
1006
919
|
text=text,
|
|
1007
920
|
with_analysis=with_analysis,
|
|
@@ -1014,26 +927,28 @@ class TheTool:
|
|
|
1014
927
|
max_validation_retries=max_validation_retries,
|
|
1015
928
|
priority=priority,
|
|
1016
929
|
# Internal parameters
|
|
1017
|
-
prompt_file=
|
|
930
|
+
prompt_file=prompt_file,
|
|
1018
931
|
output_model=Models.ListStr,
|
|
1019
932
|
mode=None,
|
|
1020
933
|
)
|
|
1021
|
-
|
|
1022
|
-
|
|
1023
|
-
|
|
1024
|
-
|
|
1025
|
-
|
|
1026
|
-
|
|
1027
|
-
|
|
1028
|
-
|
|
1029
|
-
|
|
1030
|
-
|
|
1031
|
-
|
|
1032
|
-
|
|
1033
|
-
|
|
1034
|
-
|
|
1035
|
-
|
|
1036
|
-
|
|
934
|
+
|
|
935
|
+
metadata = Models.ToolOutputMetadata(
|
|
936
|
+
tool_name=tool_name, execution_time=perf_counter() - start
|
|
937
|
+
)
|
|
938
|
+
tool_output = Models.ToolOutput(
|
|
939
|
+
result=operator_output.result,
|
|
940
|
+
logprobs=operator_output.logprobs,
|
|
941
|
+
analysis=operator_output.analysis,
|
|
942
|
+
metadata=metadata,
|
|
943
|
+
)
|
|
944
|
+
|
|
945
|
+
except (PromptError, LLMError, ValidationError, TextToolsError, Exception) as e:
|
|
946
|
+
metadata = Models.ToolOutputMetadata(tool_name=tool_name)
|
|
947
|
+
tool_output = Models.ToolOutput(
|
|
948
|
+
errors=[f"{type(e).__name__}: {e}"], metadata=metadata
|
|
949
|
+
)
|
|
950
|
+
|
|
951
|
+
return tool_output
|
|
1037
952
|
|
|
1038
953
|
def check_fact(
|
|
1039
954
|
self,
|
|
@@ -1060,27 +975,22 @@ class TheTool:
|
|
|
1060
975
|
with_analysis: Whether to include detailed reasoning analysis
|
|
1061
976
|
output_lang: Language for the output summary
|
|
1062
977
|
user_prompt: Additional instructions for summarization
|
|
1063
|
-
temperature: Controls randomness
|
|
978
|
+
temperature: Controls randomness
|
|
1064
979
|
logprobs: Whether to return token probability information
|
|
1065
980
|
top_logprobs: Number of top token alternatives to return if logprobs enabled
|
|
1066
981
|
validator: Custom validation function to validate the output
|
|
1067
982
|
max_validation_retries: Maximum number of retry attempts if validation fails
|
|
1068
|
-
priority: Task execution priority (if enabled by vLLM and model)
|
|
983
|
+
priority: Task execution priority (if enabled by vLLM and the model)
|
|
1069
984
|
|
|
1070
985
|
Returns:
|
|
1071
|
-
ToolOutput
|
|
1072
|
-
- result (bool): statement is relevant to source text or not
|
|
1073
|
-
- logprobs (list | None): Probability data if logprobs enabled
|
|
1074
|
-
- analysis (str | None): Detailed reasoning if with_analysis enabled
|
|
1075
|
-
- process (str | None): Description of the process used
|
|
1076
|
-
- processed_at (datetime): Timestamp when the processing occurred
|
|
1077
|
-
- execution_time (float): Time taken for execution in seconds (-1.0 if not measured)
|
|
1078
|
-
- errors (list(str) | None): Errors occured during tool call
|
|
986
|
+
ToolOutput
|
|
1079
987
|
"""
|
|
1080
|
-
|
|
988
|
+
tool_name = sys._getframe().f_code.co_name
|
|
989
|
+
prompt_file = tool_name + ".yaml"
|
|
990
|
+
start = perf_counter()
|
|
991
|
+
|
|
1081
992
|
try:
|
|
1082
|
-
|
|
1083
|
-
output = self._operator.run(
|
|
993
|
+
operator_output = self._operator.run(
|
|
1084
994
|
# User parameters
|
|
1085
995
|
text=text,
|
|
1086
996
|
with_analysis=with_analysis,
|
|
@@ -1093,27 +1003,29 @@ class TheTool:
|
|
|
1093
1003
|
max_validation_retries=max_validation_retries,
|
|
1094
1004
|
priority=priority,
|
|
1095
1005
|
# Internal parameters
|
|
1096
|
-
prompt_file=
|
|
1006
|
+
prompt_file=prompt_file,
|
|
1097
1007
|
output_model=Models.Bool,
|
|
1098
1008
|
mode=None,
|
|
1099
1009
|
source_text=source_text,
|
|
1100
1010
|
)
|
|
1101
|
-
|
|
1102
|
-
|
|
1103
|
-
|
|
1104
|
-
|
|
1105
|
-
|
|
1106
|
-
|
|
1107
|
-
|
|
1108
|
-
|
|
1109
|
-
|
|
1110
|
-
|
|
1111
|
-
|
|
1112
|
-
|
|
1113
|
-
|
|
1114
|
-
|
|
1115
|
-
|
|
1116
|
-
|
|
1011
|
+
|
|
1012
|
+
metadata = Models.ToolOutputMetadata(
|
|
1013
|
+
tool_name=tool_name, execution_time=perf_counter() - start
|
|
1014
|
+
)
|
|
1015
|
+
tool_output = Models.ToolOutput(
|
|
1016
|
+
result=operator_output.result,
|
|
1017
|
+
logprobs=operator_output.logprobs,
|
|
1018
|
+
analysis=operator_output.analysis,
|
|
1019
|
+
metadata=metadata,
|
|
1020
|
+
)
|
|
1021
|
+
|
|
1022
|
+
except (PromptError, LLMError, ValidationError, TextToolsError, Exception) as e:
|
|
1023
|
+
metadata = Models.ToolOutputMetadata(tool_name=tool_name)
|
|
1024
|
+
tool_output = Models.ToolOutput(
|
|
1025
|
+
errors=[f"{type(e).__name__}: {e}"], metadata=metadata
|
|
1026
|
+
)
|
|
1027
|
+
|
|
1028
|
+
return tool_output
|
|
1117
1029
|
|
|
1118
1030
|
def run_custom(
|
|
1119
1031
|
self,
|
|
@@ -1140,28 +1052,22 @@ class TheTool:
|
|
|
1140
1052
|
with_analysis: Whether to include detailed reasoning analysis
|
|
1141
1053
|
analyze_template: The analyze template used for reasoning analysis
|
|
1142
1054
|
output_lang: Language for the output summary
|
|
1143
|
-
temperature: Controls randomness
|
|
1055
|
+
temperature: Controls randomness
|
|
1144
1056
|
logprobs: Whether to return token probability information
|
|
1145
1057
|
top_logprobs: Number of top token alternatives to return if logprobs enabled
|
|
1146
1058
|
validator: Custom validation function to validate the output
|
|
1147
1059
|
max_validation_retries: Maximum number of retry attempts if validation fails
|
|
1148
|
-
priority: Task execution priority (if enabled by vLLM and model)
|
|
1060
|
+
priority: Task execution priority (if enabled by vLLM and the model)
|
|
1149
1061
|
|
|
1150
1062
|
Returns:
|
|
1151
|
-
ToolOutput
|
|
1152
|
-
- result (str): The translated text
|
|
1153
|
-
- logprobs (list | None): Probability data if logprobs enabled
|
|
1154
|
-
- analysis (str | None): Detailed reasoning if with_analysis enabled
|
|
1155
|
-
- process (str | None): Description of the process used
|
|
1156
|
-
- processed_at (datetime): Timestamp when the processing occurred
|
|
1157
|
-
- execution_time (float): Time taken for execution in seconds (-1.0 if not measured)
|
|
1158
|
-
- errors (list(str) | None): Errors occured during tool call
|
|
1063
|
+
ToolOutput
|
|
1159
1064
|
"""
|
|
1160
|
-
|
|
1065
|
+
tool_name = sys._getframe().f_code.co_name
|
|
1066
|
+
prompt_file = tool_name + ".yaml"
|
|
1067
|
+
start = perf_counter()
|
|
1161
1068
|
|
|
1162
1069
|
try:
|
|
1163
|
-
|
|
1164
|
-
output = self._operator.run(
|
|
1070
|
+
operator_output = self._operator.run(
|
|
1165
1071
|
# User paramaeters
|
|
1166
1072
|
text=prompt,
|
|
1167
1073
|
output_model=output_model,
|
|
@@ -1176,23 +1082,25 @@ class TheTool:
|
|
|
1176
1082
|
max_validation_retries=max_validation_retries,
|
|
1177
1083
|
priority=priority,
|
|
1178
1084
|
# Internal parameters
|
|
1179
|
-
prompt_file=
|
|
1085
|
+
prompt_file=prompt_file,
|
|
1180
1086
|
user_prompt=None,
|
|
1181
1087
|
mode=None,
|
|
1182
1088
|
)
|
|
1183
|
-
|
|
1184
|
-
|
|
1185
|
-
|
|
1186
|
-
|
|
1187
|
-
|
|
1188
|
-
|
|
1189
|
-
|
|
1190
|
-
|
|
1191
|
-
|
|
1192
|
-
|
|
1193
|
-
|
|
1194
|
-
|
|
1195
|
-
|
|
1196
|
-
|
|
1197
|
-
|
|
1198
|
-
|
|
1089
|
+
|
|
1090
|
+
metadata = Models.ToolOutputMetadata(
|
|
1091
|
+
tool_name=tool_name, execution_time=perf_counter() - start
|
|
1092
|
+
)
|
|
1093
|
+
tool_output = Models.ToolOutput(
|
|
1094
|
+
result=operator_output.result,
|
|
1095
|
+
logprobs=operator_output.logprobs,
|
|
1096
|
+
analysis=operator_output.analysis,
|
|
1097
|
+
metadata=metadata,
|
|
1098
|
+
)
|
|
1099
|
+
|
|
1100
|
+
except (PromptError, LLMError, ValidationError, TextToolsError, Exception) as e:
|
|
1101
|
+
metadata = Models.ToolOutputMetadata(tool_name=tool_name)
|
|
1102
|
+
tool_output = Models.ToolOutput(
|
|
1103
|
+
errors=[f"{type(e).__name__}: {e}"], metadata=metadata
|
|
1104
|
+
)
|
|
1105
|
+
|
|
1106
|
+
return tool_output
|