hamtaa-texttools 1.1.20__py3-none-any.whl → 1.1.22__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (32) hide show
  1. {hamtaa_texttools-1.1.20.dist-info → hamtaa_texttools-1.1.22.dist-info}/METADATA +49 -109
  2. hamtaa_texttools-1.1.22.dist-info/RECORD +32 -0
  3. texttools/__init__.py +3 -3
  4. texttools/batch/batch_config.py +14 -1
  5. texttools/batch/batch_runner.py +2 -2
  6. texttools/internals/async_operator.py +49 -92
  7. texttools/internals/models.py +74 -105
  8. texttools/internals/operator_utils.py +25 -27
  9. texttools/internals/prompt_loader.py +3 -20
  10. texttools/internals/sync_operator.py +49 -92
  11. texttools/prompts/README.md +2 -2
  12. texttools/prompts/categorize.yaml +35 -77
  13. texttools/prompts/check_fact.yaml +2 -2
  14. texttools/prompts/extract_entities.yaml +2 -2
  15. texttools/prompts/extract_keywords.yaml +6 -6
  16. texttools/prompts/is_question.yaml +2 -2
  17. texttools/prompts/merge_questions.yaml +4 -4
  18. texttools/prompts/propositionize.yaml +2 -2
  19. texttools/prompts/rewrite.yaml +6 -6
  20. texttools/prompts/run_custom.yaml +1 -1
  21. texttools/prompts/subject_to_question.yaml +2 -2
  22. texttools/prompts/summarize.yaml +2 -2
  23. texttools/prompts/text_to_question.yaml +2 -2
  24. texttools/prompts/translate.yaml +2 -2
  25. texttools/tools/async_tools.py +393 -487
  26. texttools/tools/sync_tools.py +394 -488
  27. hamtaa_texttools-1.1.20.dist-info/RECORD +0 -33
  28. texttools/batch/internals/utils.py +0 -13
  29. {hamtaa_texttools-1.1.20.dist-info → hamtaa_texttools-1.1.22.dist-info}/WHEEL +0 -0
  30. {hamtaa_texttools-1.1.20.dist-info → hamtaa_texttools-1.1.22.dist-info}/licenses/LICENSE +0 -0
  31. {hamtaa_texttools-1.1.20.dist-info → hamtaa_texttools-1.1.22.dist-info}/top_level.txt +0 -0
  32. /texttools/batch/{internals/batch_manager.py → batch_manager.py} +0 -0
@@ -1,4 +1,5 @@
1
- from datetime import datetime
1
+ import sys
2
+ from time import perf_counter
2
3
  from typing import Literal
3
4
  from collections.abc import Callable
4
5
 
@@ -37,10 +38,9 @@ class TheTool:
37
38
  temperature: float | None = 0.0,
38
39
  logprobs: bool = False,
39
40
  top_logprobs: int = 3,
40
- mode: Literal["category_list", "category_tree"] = "category_list",
41
41
  validator: Callable[[object], bool] | None = None,
42
42
  max_validation_retries: int | None = None,
43
- priority: int | None = 0,
43
+ priority: int = 0,
44
44
  ) -> Models.ToolOutput:
45
45
  """
46
46
  Categorize a text into a category / category tree.
@@ -49,62 +49,73 @@ class TheTool:
49
49
 
50
50
  Arguments:
51
51
  text: The input text to categorize
52
- categories: The category / category_tree to give to LLM
52
+ categories: The category list / category tree
53
53
  with_analysis: Whether to include detailed reasoning analysis
54
54
  user_prompt: Additional instructions for the categorization
55
- temperature: Controls randomness (0.0 = deterministic, 1.0 = creative)
55
+ temperature: Controls randomness
56
56
  logprobs: Whether to return token probability information
57
57
  top_logprobs: Number of top token alternatives to return if logprobs enabled
58
58
  validator: Custom validation function to validate the output
59
59
  max_validation_retries: Maximum number of retry attempts if validation fails
60
- priority: Task execution priority (if enabled by vLLM and model)
60
+ priority: Task execution priority (if enabled by vLLM and the model)
61
61
 
62
62
  Returns:
63
- ToolOutput: Object containing:
64
- - result (str): The assigned category
65
- - logprobs (list | None): Probability data if logprobs enabled
66
- - analysis (str | None): Detailed reasoning if with_analysis enabled
67
- - process (str | None): Description of the process used
68
- - processed_at (datetime): Timestamp when the processing occurred
69
- - execution_time (float): Time taken for execution in seconds (-1.0 if not measured)
70
- - errors (list(str) | None): Errors occured during tool call
63
+ ToolOutput
71
64
 
72
65
  """
73
- output = Models.ToolOutput()
66
+ tool_name = sys._getframe().f_code.co_name
67
+ prompt_file = tool_name + ".yaml"
68
+ start = perf_counter()
74
69
 
75
70
  try:
76
- start = datetime.now()
71
+ if isinstance(categories, list):
72
+ operator_output = self._operator.run(
73
+ # User parameters
74
+ text=text,
75
+ category_list=categories,
76
+ with_analysis=with_analysis,
77
+ user_prompt=user_prompt,
78
+ temperature=temperature,
79
+ logprobs=logprobs,
80
+ top_logprobs=top_logprobs,
81
+ validator=validator,
82
+ max_validation_retries=max_validation_retries,
83
+ priority=priority,
84
+ # Internal parameters
85
+ prompt_file=prompt_file,
86
+ output_model=Models.create_dynamic_model(categories),
87
+ mode=None,
88
+ output_lang=None,
89
+ )
90
+
91
+ metadata = Models.ToolOutputMetadata(
92
+ tool_name=tool_name, execution_time=perf_counter() - start
93
+ )
94
+ tool_output = Models.ToolOutput(
95
+ result=operator_output.result,
96
+ analysis=operator_output.analysis,
97
+ logprobs=operator_output.logprobs,
98
+ metadata=metadata,
99
+ )
77
100
 
78
- if mode == "category_tree":
101
+ else:
79
102
  levels = categories.get_level_count()
80
- parent_id = 0
103
+ parent_node = categories.get_node("root")
81
104
  final_categories = []
82
105
  analysis = ""
83
- logprobs = []
106
+ logprobs_list = []
84
107
 
85
108
  for _ in range(levels):
86
- # Get child nodes for current parent
87
- parent_node = categories.get_node(parent_id)
88
- children = categories.get_children(parent_node)
89
-
90
- # Check if child nodes exist
91
- if not children:
92
- output.errors.append(
93
- f"No categories found for parent_id {parent_id} in the tree"
94
- )
95
- end = datetime.now()
96
- output.execution_time = (end - start).total_seconds()
97
- return output
98
-
99
- # Extract category names and descriptions
109
+ if not parent_node.children:
110
+ break
111
+
100
112
  category_list = [
101
- f"Category Name: {node.name}, Description: {node.description}"
102
- for node in children
113
+ f"Category Name: {name}, Description: {node.description}"
114
+ for name, node in parent_node.children.items()
103
115
  ]
104
- category_names = [node.name for node in children]
116
+ category_names = list(parent_node.children.keys())
105
117
 
106
- # Run categorization for current level
107
- level_output = self._operator.run(
118
+ level_operator_output = self._operator.run(
108
119
  # User parameters
109
120
  text=text,
110
121
  category_list=category_list,
@@ -113,90 +124,44 @@ class TheTool:
113
124
  temperature=temperature,
114
125
  logprobs=logprobs,
115
126
  top_logprobs=top_logprobs,
116
- mode=mode,
117
127
  validator=validator,
118
128
  max_validation_retries=max_validation_retries,
119
129
  priority=priority,
120
130
  # Internal parameters
121
- prompt_file="categorize.yaml",
131
+ prompt_file=prompt_file,
122
132
  output_model=Models.create_dynamic_model(category_names),
133
+ mode=None,
123
134
  output_lang=None,
124
135
  )
125
136
 
126
- # Check for errors from operator
127
- if level_output.errors:
128
- output.errors.extend(level_output.errors)
129
- end = datetime.now()
130
- output.execution_time = (end - start).total_seconds()
131
- return output
132
-
133
- # Get the chosen category
134
- chosen_category = level_output.result
135
-
136
- # Find the corresponding node
137
+ chosen_category = level_operator_output.result
137
138
  parent_node = categories.get_node(chosen_category)
138
- if parent_node is None:
139
- output.errors.append(
140
- f"Category '{chosen_category}' not found in tree after selection"
141
- )
142
- end = datetime.now()
143
- output.execution_time = (end - start).total_seconds()
144
- return output
145
-
146
- parent_id = parent_node.node_id
147
- final_categories.append(parent_node.name)
139
+ if not parent_node:
140
+ break
141
+ final_categories.append(chosen_category)
148
142
 
149
143
  if with_analysis:
150
- analysis += level_output.analysis
144
+ analysis += level_operator_output.analysis
151
145
  if logprobs:
152
- logprobs += level_output.logprobs
146
+ logprobs_list.extend(level_operator_output.logprobs)
153
147
 
154
- end = datetime.now()
155
- output = Models.ToolOutput(
148
+ metadata = Models.ToolOutputMetadata(
149
+ tool_name=tool_name, execution_time=(perf_counter() - start)
150
+ )
151
+ tool_output = Models.ToolOutput(
156
152
  result=final_categories,
157
- logprobs=logprobs,
158
153
  analysis=analysis,
159
- process="categorize",
160
- execution_time=(end - start).total_seconds(),
154
+ logprobs=logprobs_list,
155
+ metadata=metadata,
161
156
  )
162
157
 
163
- return output
158
+ except (PromptError, LLMError, ValidationError, TextToolsError, Exception) as e:
159
+ metadata = Models.ToolOutputMetadata(tool_name=tool_name)
160
+ tool_output = Models.ToolOutput(
161
+ errors=[f"{type(e).__name__}: {e}"], metadata=metadata
162
+ )
164
163
 
165
- else:
166
- output = self._operator.run(
167
- # User parameters
168
- text=text,
169
- category_list=categories,
170
- with_analysis=with_analysis,
171
- user_prompt=user_prompt,
172
- temperature=temperature,
173
- logprobs=logprobs,
174
- top_logprobs=top_logprobs,
175
- mode=mode,
176
- validator=validator,
177
- max_validation_retries=max_validation_retries,
178
- priority=priority,
179
- # Internal parameters
180
- prompt_file="categorize.yaml",
181
- output_model=Models.create_dynamic_model(categories),
182
- output_lang=None,
183
- )
184
- end = datetime.now()
185
- output.execution_time = (end - start).total_seconds()
186
- return output
187
-
188
- except PromptError as e:
189
- output.errors.append(f"Prompt error: {e}")
190
- except LLMError as e:
191
- output.errors.append(f"LLM error: {e}")
192
- except ValidationError as e:
193
- output.errors.append(f"Validation error: {e}")
194
- except TextToolsError as e:
195
- output.errors.append(f"TextTools error: {e}")
196
- except Exception as e:
197
- output.errors.append(f"Unexpected error: {e}")
198
-
199
- return output
164
+ return tool_output
200
165
 
201
166
  def extract_keywords(
202
167
  self,
@@ -221,28 +186,22 @@ class TheTool:
221
186
  with_analysis: Whether to include detailed reasoning analysis
222
187
  output_lang: Language for the output response
223
188
  user_prompt: Additional instructions for keyword extraction
224
- temperature: Controls randomness (0.0 = deterministic, 1.0 = creative)
189
+ temperature: Controls randomness
225
190
  logprobs: Whether to return token probability information
226
191
  top_logprobs: Number of top token alternatives to return if logprobs enabled
227
192
  validator: Custom validation function to validate the output
228
193
  max_validation_retries: Maximum number of retry attempts if validation fails
229
- priority: Task execution priority (if enabled by vLLM and model)
194
+ priority: Task execution priority (if enabled by vLLM and the model)
230
195
 
231
196
  Returns:
232
- ToolOutput: Object containing:
233
- - result (list[str]): List of extracted keywords
234
- - logprobs (list | None): Probability data if logprobs enabled
235
- - analysis (str | None): Detailed reasoning if with_analysis enabled
236
- - process (str | None): Description of the process used
237
- - processed_at (datetime): Timestamp when the processing occurred
238
- - execution_time (float): Time taken for execution in seconds (-1.0 if not measured)
239
- - errors (list(str) | None): Errors occured during tool call
197
+ ToolOutput
240
198
  """
241
- output = Models.ToolOutput()
199
+ tool_name = sys._getframe().f_code.co_name
200
+ prompt_file = tool_name + ".yaml"
201
+ start = perf_counter()
242
202
 
243
203
  try:
244
- start = datetime.now()
245
- output = self._operator.run(
204
+ operator_output = self._operator.run(
246
205
  # User parameters
247
206
  text=text,
248
207
  with_analysis=with_analysis,
@@ -257,25 +216,27 @@ class TheTool:
257
216
  max_validation_retries=max_validation_retries,
258
217
  priority=priority,
259
218
  # Internal parameters
260
- prompt_file="extract_keywords.yaml",
219
+ prompt_file=prompt_file,
261
220
  output_model=Models.ListStr,
262
221
  )
263
- end = datetime.now()
264
- output.execution_time = (end - start).total_seconds()
265
- return output
266
-
267
- except PromptError as e:
268
- output.errors.append(f"Prompt error: {e}")
269
- except LLMError as e:
270
- output.errors.append(f"LLM error: {e}")
271
- except ValidationError as e:
272
- output.errors.append(f"Validation error: {e}")
273
- except TextToolsError as e:
274
- output.errors.append(f"TextTools error: {e}")
275
- except Exception as e:
276
- output.errors.append(f"Unexpected error: {e}")
277
-
278
- return output
222
+
223
+ metadata = Models.ToolOutputMetadata(
224
+ tool_name=tool_name, execution_time=perf_counter() - start
225
+ )
226
+ tool_output = Models.ToolOutput(
227
+ result=operator_output.result,
228
+ logprobs=operator_output.logprobs,
229
+ analysis=operator_output.analysis,
230
+ metadata=metadata,
231
+ )
232
+
233
+ except (PromptError, LLMError, ValidationError, TextToolsError, Exception) as e:
234
+ metadata = Models.ToolOutputMetadata(tool_name=tool_name)
235
+ tool_output = Models.ToolOutput(
236
+ errors=[f"{type(e).__name__}: {e}"], metadata=metadata
237
+ )
238
+
239
+ return tool_output
279
240
 
280
241
  def extract_entities(
281
242
  self,
@@ -300,28 +261,22 @@ class TheTool:
300
261
  with_analysis: Whether to include detailed reasoning analysis
301
262
  output_lang: Language for the output response
302
263
  user_prompt: Additional instructions for entity extraction
303
- temperature: Controls randomness (0.0 = deterministic, 1.0 = creative)
264
+ temperature: Controls randomness
304
265
  logprobs: Whether to return token probability information
305
266
  top_logprobs: Number of top token alternatives to return if logprobs enabled
306
267
  validator: Custom validation function to validate the output
307
268
  max_validation_retries: Maximum number of retry attempts if validation fails
308
- priority: Task execution priority (if enabled by vLLM and model)
269
+ priority: Task execution priority (if enabled by vLLM and the model)
309
270
 
310
271
  Returns:
311
- ToolOutput: Object containing:
312
- - result (list[dict]): List of entities with 'text' and 'type' keys
313
- - logprobs (list | None): Probability data if logprobs enabled
314
- - analysis (str | None): Detailed reasoning if with_analysis enabled
315
- - process (str | None): Description of the process used
316
- - processed_at (datetime): Timestamp when the processing occurred
317
- - execution_time (float): Time taken for execution in seconds (-1.0 if not measured)
318
- - errors (list(str) | None): Errors occured during tool call
272
+ ToolOutput
319
273
  """
320
- output = Models.ToolOutput()
274
+ tool_name = sys._getframe().f_code.co_name
275
+ prompt_file = tool_name + ".yaml"
276
+ start = perf_counter()
321
277
 
322
278
  try:
323
- start = datetime.now()
324
- output = self._operator.run(
279
+ operator_output = self._operator.run(
325
280
  # User parameters
326
281
  text=text,
327
282
  entities=entities
@@ -336,26 +291,28 @@ class TheTool:
336
291
  max_validation_retries=max_validation_retries,
337
292
  priority=priority,
338
293
  # Internal parameters
339
- prompt_file="extract_entities.yaml",
294
+ prompt_file=prompt_file,
340
295
  output_model=Models.ListDictStrStr,
341
296
  mode=None,
342
297
  )
343
- end = datetime.now()
344
- output.execution_time = (end - start).total_seconds()
345
- return output
346
-
347
- except PromptError as e:
348
- output.errors.append(f"Prompt error: {e}")
349
- except LLMError as e:
350
- output.errors.append(f"LLM error: {e}")
351
- except ValidationError as e:
352
- output.errors.append(f"Validation error: {e}")
353
- except TextToolsError as e:
354
- output.errors.append(f"TextTools error: {e}")
355
- except Exception as e:
356
- output.errors.append(f"Unexpected error: {e}")
357
-
358
- return output
298
+
299
+ metadata = Models.ToolOutputMetadata(
300
+ tool_name=tool_name, execution_time=perf_counter() - start
301
+ )
302
+ tool_output = Models.ToolOutput(
303
+ result=operator_output.result,
304
+ logprobs=operator_output.logprobs,
305
+ analysis=operator_output.analysis,
306
+ metadata=metadata,
307
+ )
308
+
309
+ except (PromptError, LLMError, ValidationError, TextToolsError, Exception) as e:
310
+ metadata = Models.ToolOutputMetadata(tool_name=tool_name)
311
+ tool_output = Models.ToolOutput(
312
+ errors=[f"{type(e).__name__}: {e}"], metadata=metadata
313
+ )
314
+
315
+ return tool_output
359
316
 
360
317
  def is_question(
361
318
  self,
@@ -376,28 +333,22 @@ class TheTool:
376
333
  text: The input text to analyze
377
334
  with_analysis: Whether to include detailed reasoning analysis
378
335
  user_prompt: Additional instructions for question detection
379
- temperature: Controls randomness (0.0 = deterministic, 1.0 = creative)
336
+ temperature: Controls randomness
380
337
  logprobs: Whether to return token probability information
381
338
  top_logprobs: Number of top token alternatives to return if logprobs enabled
382
339
  validator: Custom validation function to validate the output
383
340
  max_validation_retries: Maximum number of retry attempts if validation fails
384
- priority: Task execution priority (if enabled by vLLM and model)
341
+ priority: Task execution priority (if enabled by vLLM and the model)
385
342
 
386
343
  Returns:
387
- ToolOutput: Object containing:
388
- - result (bool): True if text is a question, False otherwise
389
- - logprobs (list | None): Probability data if logprobs enabled
390
- - analysis (str | None): Detailed reasoning if with_analysis enabled
391
- - process (str | None): Description of the process used
392
- - processed_at (datetime): Timestamp when the processing occurred
393
- - execution_time (float): Time taken for execution in seconds (-1.0 if not measured)
394
- - errors (list(str) | None): Errors occured during tool call
344
+ ToolOutput
395
345
  """
396
- output = Models.ToolOutput()
346
+ tool_name = sys._getframe().f_code.co_name
347
+ prompt_file = tool_name + ".yaml"
348
+ start = perf_counter()
397
349
 
398
350
  try:
399
- start = datetime.now()
400
- output = self._operator.run(
351
+ operator_output = self._operator.run(
401
352
  # User parameters
402
353
  text=text,
403
354
  with_analysis=with_analysis,
@@ -409,27 +360,29 @@ class TheTool:
409
360
  max_validation_retries=max_validation_retries,
410
361
  priority=priority,
411
362
  # Internal parameters
412
- prompt_file="is_question.yaml",
363
+ prompt_file=prompt_file,
413
364
  output_model=Models.Bool,
414
365
  mode=None,
415
366
  output_lang=None,
416
367
  )
417
- end = datetime.now()
418
- output.execution_time = (end - start).total_seconds()
419
- return output
420
-
421
- except PromptError as e:
422
- output.errors.append(f"Prompt error: {e}")
423
- except LLMError as e:
424
- output.errors.append(f"LLM error: {e}")
425
- except ValidationError as e:
426
- output.errors.append(f"Validation error: {e}")
427
- except TextToolsError as e:
428
- output.errors.append(f"TextTools error: {e}")
429
- except Exception as e:
430
- output.errors.append(f"Unexpected error: {e}")
431
-
432
- return output
368
+
369
+ metadata = Models.ToolOutputMetadata(
370
+ tool_name=tool_name, execution_time=perf_counter() - start
371
+ )
372
+ tool_output = Models.ToolOutput(
373
+ result=operator_output.result,
374
+ logprobs=operator_output.logprobs,
375
+ analysis=operator_output.analysis,
376
+ metadata=metadata,
377
+ )
378
+
379
+ except (PromptError, LLMError, ValidationError, TextToolsError, Exception) as e:
380
+ metadata = Models.ToolOutputMetadata(tool_name=tool_name)
381
+ tool_output = Models.ToolOutput(
382
+ errors=[f"{type(e).__name__}: {e}"], metadata=metadata
383
+ )
384
+
385
+ return tool_output
433
386
 
434
387
  def text_to_question(
435
388
  self,
@@ -454,28 +407,22 @@ class TheTool:
454
407
  with_analysis: Whether to include detailed reasoning analysis
455
408
  output_lang: Language for the output question
456
409
  user_prompt: Additional instructions for question generation
457
- temperature: Controls randomness (0.0 = deterministic, 1.0 = creative)
410
+ temperature: Controls randomness
458
411
  logprobs: Whether to return token probability information
459
412
  top_logprobs: Number of top token alternatives to return if logprobs enabled
460
413
  validator: Custom validation function to validate the output
461
414
  max_validation_retries: Maximum number of retry attempts if validation fails
462
- priority: Task execution priority (if enabled by vLLM and model)
415
+ priority: Task execution priority (if enabled by vLLM and the model)
463
416
 
464
417
  Returns:
465
- ToolOutput: Object containing:
466
- - result (str): The generated question
467
- - logprobs (list | None): Probability data if logprobs enabled
468
- - analysis (str | None): Detailed reasoning if with_analysis enabled
469
- - process (str | None): Description of the process used
470
- - processed_at (datetime): Timestamp when the processing occurred
471
- - execution_time (float): Time taken for execution in seconds (-1.0 if not measured)
472
- - errors (list(str) | None): Errors occured during tool call
418
+ ToolOutput
473
419
  """
474
- output = Models.ToolOutput()
420
+ tool_name = sys._getframe().f_code.co_name
421
+ prompt_file = tool_name + ".yaml"
422
+ start = perf_counter()
475
423
 
476
424
  try:
477
- start = datetime.now()
478
- output = self._operator.run(
425
+ operator_output = self._operator.run(
479
426
  # User parameters
480
427
  text=text,
481
428
  number_of_questions=number_of_questions,
@@ -489,26 +436,28 @@ class TheTool:
489
436
  max_validation_retries=max_validation_retries,
490
437
  priority=priority,
491
438
  # Internal parameters
492
- prompt_file="text_to_question.yaml",
439
+ prompt_file=prompt_file,
493
440
  output_model=Models.ReasonListStr,
494
441
  mode=None,
495
442
  )
496
- end = datetime.now()
497
- output.execution_time = (end - start).total_seconds()
498
- return output
499
-
500
- except PromptError as e:
501
- output.errors.append(f"Prompt error: {e}")
502
- except LLMError as e:
503
- output.errors.append(f"LLM error: {e}")
504
- except ValidationError as e:
505
- output.errors.append(f"Validation error: {e}")
506
- except TextToolsError as e:
507
- output.errors.append(f"TextTools error: {e}")
508
- except Exception as e:
509
- output.errors.append(f"Unexpected error: {e}")
510
-
511
- return output
443
+
444
+ metadata = Models.ToolOutputMetadata(
445
+ tool_name=tool_name, execution_time=perf_counter() - start
446
+ )
447
+ tool_output = Models.ToolOutput(
448
+ result=operator_output.result,
449
+ logprobs=operator_output.logprobs,
450
+ analysis=operator_output.analysis,
451
+ metadata=metadata,
452
+ )
453
+
454
+ except (PromptError, LLMError, ValidationError, TextToolsError, Exception) as e:
455
+ metadata = Models.ToolOutputMetadata(tool_name=tool_name)
456
+ tool_output = Models.ToolOutput(
457
+ errors=[f"{type(e).__name__}: {e}"], metadata=metadata
458
+ )
459
+
460
+ return tool_output
512
461
 
513
462
  def merge_questions(
514
463
  self,
@@ -532,30 +481,24 @@ class TheTool:
532
481
  with_analysis: Whether to include detailed reasoning analysis
533
482
  output_lang: Language for the output merged question
534
483
  user_prompt: Additional instructions for question merging
535
- temperature: Controls randomness (0.0 = deterministic, 1.0 = creative)
484
+ temperature: Controls randomness
536
485
  logprobs: Whether to return token probability information
537
486
  top_logprobs: Number of top token alternatives to return if logprobs enabled
538
487
  mode: Merging strategy - 'default' for direct merge, 'reason' for reasoned merge
539
488
  validator: Custom validation function to validate the output
540
489
  max_validation_retries: Maximum number of retry attempts if validation fails
541
- priority: Task execution priority (if enabled by vLLM and model)
490
+ priority: Task execution priority (if enabled by vLLM and the model)
542
491
 
543
492
  Returns:
544
- ToolOutput: Object containing:
545
- - result (str): The merged question
546
- - logprobs (list | None): Probability data if logprobs enabled
547
- - analysis (str | None): Detailed reasoning if with_analysis enabled
548
- - process (str | None): Description of the process used
549
- - processed_at (datetime): Timestamp when the processing occurred
550
- - execution_time (float): Time taken for execution in seconds (-1.0 if not measured)
551
- - errors (list(str) | None): Errors occured during tool call
493
+ ToolOutput
552
494
  """
553
- output = Models.ToolOutput()
495
+ tool_name = sys._getframe().f_code.co_name
496
+ prompt_file = tool_name + ".yaml"
497
+ start = perf_counter()
554
498
 
555
499
  try:
556
- start = datetime.now()
557
500
  text = ", ".join(text)
558
- output = self._operator.run(
501
+ operator_output = self._operator.run(
559
502
  # User parameters
560
503
  text=text,
561
504
  with_analysis=with_analysis,
@@ -568,26 +511,28 @@ class TheTool:
568
511
  max_validation_retries=max_validation_retries,
569
512
  priority=priority,
570
513
  # Internal parameters
571
- prompt_file="merge_questions.yaml",
514
+ prompt_file=prompt_file,
572
515
  output_model=Models.Str,
573
516
  mode=mode,
574
517
  )
575
- end = datetime.now()
576
- output.execution_time = (end - start).total_seconds()
577
- return output
578
-
579
- except PromptError as e:
580
- output.errors.append(f"Prompt error: {e}")
581
- except LLMError as e:
582
- output.errors.append(f"LLM error: {e}")
583
- except ValidationError as e:
584
- output.errors.append(f"Validation error: {e}")
585
- except TextToolsError as e:
586
- output.errors.append(f"TextTools error: {e}")
587
- except Exception as e:
588
- output.errors.append(f"Unexpected error: {e}")
589
-
590
- return output
518
+
519
+ metadata = Models.ToolOutputMetadata(
520
+ tool_name=tool_name, execution_time=perf_counter() - start
521
+ )
522
+ tool_output = Models.ToolOutput(
523
+ result=operator_output.result,
524
+ logprobs=operator_output.logprobs,
525
+ analysis=operator_output.analysis,
526
+ metadata=metadata,
527
+ )
528
+
529
+ except (PromptError, LLMError, ValidationError, TextToolsError, Exception) as e:
530
+ metadata = Models.ToolOutputMetadata(tool_name=tool_name)
531
+ tool_output = Models.ToolOutput(
532
+ errors=[f"{type(e).__name__}: {e}"], metadata=metadata
533
+ )
534
+
535
+ return tool_output
591
536
 
592
537
  def rewrite(
593
538
  self,
@@ -611,29 +556,23 @@ class TheTool:
611
556
  with_analysis: Whether to include detailed reasoning analysis
612
557
  output_lang: Language for the output rewritten text
613
558
  user_prompt: Additional instructions for rewriting
614
- temperature: Controls randomness (0.0 = deterministic, 1.0 = creative)
559
+ temperature: Controls randomness
615
560
  logprobs: Whether to return token probability information
616
561
  top_logprobs: Number of top token alternatives to return if logprobs enabled
617
562
  mode: Rewriting mode - 'positive', 'negative', or 'hard_negative'
618
563
  validator: Custom validation function to validate the output
619
564
  max_validation_retries: Maximum number of retry attempts if validation fails
620
- priority: Task execution priority (if enabled by vLLM and model)
565
+ priority: Task execution priority (if enabled by vLLM and the model)
621
566
 
622
567
  Returns:
623
- ToolOutput: Object containing:
624
- - result (str): The rewritten text
625
- - logprobs (list | None): Probability data if logprobs enabled
626
- - analysis (str | None): Detailed reasoning if with_analysis enabled
627
- - process (str | None): Description of the process used
628
- - processed_at (datetime): Timestamp when the processing occurred
629
- - execution_time (float): Time taken for execution in seconds (-1.0 if not measured)
630
- - errors (list(str) | None): Errors occured during tool call
568
+ ToolOutput
631
569
  """
632
- output = Models.ToolOutput()
570
+ tool_name = sys._getframe().f_code.co_name
571
+ prompt_file = tool_name + ".yaml"
572
+ start = perf_counter()
633
573
 
634
574
  try:
635
- start = datetime.now()
636
- output = self._operator.run(
575
+ operator_output = self._operator.run(
637
576
  # User parameters
638
577
  text=text,
639
578
  with_analysis=with_analysis,
@@ -646,26 +585,28 @@ class TheTool:
646
585
  max_validation_retries=max_validation_retries,
647
586
  priority=priority,
648
587
  # Internal parameters
649
- prompt_file="rewrite.yaml",
588
+ prompt_file=prompt_file,
650
589
  output_model=Models.Str,
651
590
  mode=mode,
652
591
  )
653
- end = datetime.now()
654
- output.execution_time = (end - start).total_seconds()
655
- return output
656
-
657
- except PromptError as e:
658
- output.errors.append(f"Prompt error: {e}")
659
- except LLMError as e:
660
- output.errors.append(f"LLM error: {e}")
661
- except ValidationError as e:
662
- output.errors.append(f"Validation error: {e}")
663
- except TextToolsError as e:
664
- output.errors.append(f"TextTools error: {e}")
665
- except Exception as e:
666
- output.errors.append(f"Unexpected error: {e}")
667
-
668
- return output
592
+
593
+ metadata = Models.ToolOutputMetadata(
594
+ tool_name=tool_name, execution_time=perf_counter() - start
595
+ )
596
+ tool_output = Models.ToolOutput(
597
+ result=operator_output.result,
598
+ logprobs=operator_output.logprobs,
599
+ analysis=operator_output.analysis,
600
+ metadata=metadata,
601
+ )
602
+
603
+ except (PromptError, LLMError, ValidationError, TextToolsError, Exception) as e:
604
+ metadata = Models.ToolOutputMetadata(tool_name=tool_name)
605
+ tool_output = Models.ToolOutput(
606
+ errors=[f"{type(e).__name__}: {e}"], metadata=metadata
607
+ )
608
+
609
+ return tool_output
669
610
 
670
611
  def subject_to_question(
671
612
  self,
@@ -690,28 +631,22 @@ class TheTool:
690
631
  with_analysis: Whether to include detailed reasoning analysis
691
632
  output_lang: Language for the output questions
692
633
  user_prompt: Additional instructions for question generation
693
- temperature: Controls randomness (0.0 = deterministic, 1.0 = creative)
634
+ temperature: Controls randomness
694
635
  logprobs: Whether to return token probability information
695
636
  top_logprobs: Number of top token alternatives to return if logprobs enabled
696
637
  validator: Custom validation function to validate the output
697
638
  max_validation_retries: Maximum number of retry attempts if validation fails
698
- priority: Task execution priority (if enabled by vLLM and model)
639
+ priority: Task execution priority (if enabled by vLLM and the model)
699
640
 
700
641
  Returns:
701
- ToolOutput: Object containing:
702
- - result (list[str]): List of generated questions
703
- - logprobs (list | None): Probability data if logprobs enabled
704
- - analysis (str | None): Detailed reasoning if with_analysis enabled
705
- - process (str | None): Description of the process used
706
- - processed_at (datetime): Timestamp when the processing occurred
707
- - execution_time (float): Time taken for execution in seconds (-1.0 if not measured)
708
- - errors (list(str) | None): Errors occured during tool call
642
+ ToolOutput
709
643
  """
710
- output = Models.ToolOutput()
644
+ tool_name = sys._getframe().f_code.co_name
645
+ prompt_file = tool_name + ".yaml"
646
+ start = perf_counter()
711
647
 
712
648
  try:
713
- start = datetime.now()
714
- output = self._operator.run(
649
+ operator_output = self._operator.run(
715
650
  # User parameters
716
651
  text=text,
717
652
  number_of_questions=number_of_questions,
@@ -725,26 +660,28 @@ class TheTool:
725
660
  max_validation_retries=max_validation_retries,
726
661
  priority=priority,
727
662
  # Internal parameters
728
- prompt_file="subject_to_question.yaml",
663
+ prompt_file=prompt_file,
729
664
  output_model=Models.ReasonListStr,
730
665
  mode=None,
731
666
  )
732
- end = datetime.now()
733
- output.execution_time = (end - start).total_seconds()
734
- return output
735
-
736
- except PromptError as e:
737
- output.errors.append(f"Prompt error: {e}")
738
- except LLMError as e:
739
- output.errors.append(f"LLM error: {e}")
740
- except ValidationError as e:
741
- output.errors.append(f"Validation error: {e}")
742
- except TextToolsError as e:
743
- output.errors.append(f"TextTools error: {e}")
744
- except Exception as e:
745
- output.errors.append(f"Unexpected error: {e}")
746
-
747
- return output
667
+
668
+ metadata = Models.ToolOutputMetadata(
669
+ tool_name=tool_name, execution_time=perf_counter() - start
670
+ )
671
+ tool_output = Models.ToolOutput(
672
+ result=operator_output.result,
673
+ logprobs=operator_output.logprobs,
674
+ analysis=operator_output.analysis,
675
+ metadata=metadata,
676
+ )
677
+
678
+ except (PromptError, LLMError, ValidationError, TextToolsError, Exception) as e:
679
+ metadata = Models.ToolOutputMetadata(tool_name=tool_name)
680
+ tool_output = Models.ToolOutput(
681
+ errors=[f"{type(e).__name__}: {e}"], metadata=metadata
682
+ )
683
+
684
+ return tool_output
748
685
 
749
686
  def summarize(
750
687
  self,
@@ -767,28 +704,22 @@ class TheTool:
767
704
  with_analysis: Whether to include detailed reasoning analysis
768
705
  output_lang: Language for the output summary
769
706
  user_prompt: Additional instructions for summarization
770
- temperature: Controls randomness (0.0 = deterministic, 1.0 = creative)
707
+ temperature: Controls randomness
771
708
  logprobs: Whether to return token probability information
772
709
  top_logprobs: Number of top token alternatives to return if logprobs enabled
773
710
  validator: Custom validation function to validate the output
774
711
  max_validation_retries: Maximum number of retry attempts if validation fails
775
- priority: Task execution priority (if enabled by vLLM and model)
712
+ priority: Task execution priority (if enabled by vLLM and the model)
776
713
 
777
714
  Returns:
778
- ToolOutput: Object containing:
779
- - result (str): The summary text
780
- - logprobs (list | None): Probability data if logprobs enabled
781
- - analysis (str | None): Detailed reasoning if with_analysis enabled
782
- - process (str | None): Description of the process used
783
- - processed_at (datetime): Timestamp when the processing occurred
784
- - execution_time (float): Time taken for execution in seconds (-1.0 if not measured)
785
- - errors (list(str) | None): Errors occured during tool call
715
+ ToolOutput
786
716
  """
787
- output = Models.ToolOutput()
717
+ tool_name = sys._getframe().f_code.co_name
718
+ prompt_file = tool_name + ".yaml"
719
+ start = perf_counter()
788
720
 
789
721
  try:
790
- start = datetime.now()
791
- output = self._operator.run(
722
+ operator_output = self._operator.run(
792
723
  # User parameters
793
724
  text=text,
794
725
  with_analysis=with_analysis,
@@ -801,26 +732,28 @@ class TheTool:
801
732
  max_validation_retries=max_validation_retries,
802
733
  priority=priority,
803
734
  # Internal parameters
804
- prompt_file="summarize.yaml",
735
+ prompt_file=prompt_file,
805
736
  output_model=Models.Str,
806
737
  mode=None,
807
738
  )
808
- end = datetime.now()
809
- output.execution_time = (end - start).total_seconds()
810
- return output
811
-
812
- except PromptError as e:
813
- output.errors.append(f"Prompt error: {e}")
814
- except LLMError as e:
815
- output.errors.append(f"LLM error: {e}")
816
- except ValidationError as e:
817
- output.errors.append(f"Validation error: {e}")
818
- except TextToolsError as e:
819
- output.errors.append(f"TextTools error: {e}")
820
- except Exception as e:
821
- output.errors.append(f"Unexpected error: {e}")
822
-
823
- return output
739
+
740
+ metadata = Models.ToolOutputMetadata(
741
+ tool_name=tool_name, execution_time=perf_counter() - start
742
+ )
743
+ tool_output = Models.ToolOutput(
744
+ result=operator_output.result,
745
+ logprobs=operator_output.logprobs,
746
+ analysis=operator_output.analysis,
747
+ metadata=metadata,
748
+ )
749
+
750
+ except (PromptError, LLMError, ValidationError, TextToolsError, Exception) as e:
751
+ metadata = Models.ToolOutputMetadata(tool_name=tool_name)
752
+ tool_output = Models.ToolOutput(
753
+ errors=[f"{type(e).__name__}: {e}"], metadata=metadata
754
+ )
755
+
756
+ return tool_output
824
757
 
825
758
  def translate(
826
759
  self,
@@ -847,38 +780,29 @@ class TheTool:
847
780
  use_chunker: Whether to use text chunker for text length bigger than 1500
848
781
  with_analysis: Whether to include detailed reasoning analysis
849
782
  user_prompt: Additional instructions for translation
850
- temperature: Controls randomness (0.0 = deterministic, 1.0 = creative)
783
+ temperature: Controls randomness
851
784
  logprobs: Whether to return token probability information
852
785
  top_logprobs: Number of top token alternatives to return if logprobs enabled
853
786
  validator: Custom validation function to validate the output
854
787
  max_validation_retries: Maximum number of retry attempts if validation fails
855
- priority: Task execution priority (if enabled by vLLM and model)
788
+ priority: Task execution priority (if enabled by vLLM and the model)
856
789
 
857
790
  Returns:
858
- ToolOutput: Object containing:
859
- - result (str): The translated text
860
- - logprobs (list | None): Probability data if logprobs enabled
861
- - analysis (str | None): Detailed reasoning if with_analysis enabled
862
- - process (str | None): Description of the process used
863
- - processed_at (datetime): Timestamp when the processing occurred
864
- - execution_time (float): Time taken for execution in seconds (-1.0 if not measured)
865
- - errors (list(str) | None): Errors occured during tool call
791
+ ToolOutput
866
792
  """
867
- output = Models.ToolOutput()
793
+ tool_name = sys._getframe().f_code.co_name
794
+ prompt_file = tool_name + ".yaml"
795
+ start = perf_counter()
868
796
 
869
797
  try:
870
- start = datetime.now()
871
-
872
798
  if len(text.split(" ")) > 1500 and use_chunker:
873
799
  chunks = text_to_chunks(text, 1200, 0)
874
-
875
800
  translation = ""
876
801
  analysis = ""
877
- logprobs = []
802
+ logprobs_list = []
878
803
 
879
- # Run translation for each chunk
880
804
  for chunk in chunks:
881
- chunk_output = self._operator.run(
805
+ chunk_operator_output = self._operator.run(
882
806
  # User parameters
883
807
  text=chunk,
884
808
  target_language=target_language,
@@ -891,38 +815,31 @@ class TheTool:
891
815
  max_validation_retries=max_validation_retries,
892
816
  priority=priority,
893
817
  # Internal parameters
894
- prompt_file="translate.yaml",
818
+ prompt_file=prompt_file,
895
819
  output_model=Models.Str,
896
820
  mode=None,
897
821
  output_lang=None,
898
822
  )
899
823
 
900
- # Check for errors from operator
901
- if chunk_output.errors:
902
- output.errors.extend(chunk_output.errors)
903
- end = datetime.now()
904
- output.execution_time = (end - start).total_seconds()
905
- return output
824
+ translation += chunk_operator_output.result + "\n"
906
825
 
907
- # Concatenate the outputs
908
- translation += chunk_output.result + "\n"
909
826
  if with_analysis:
910
- analysis += chunk_output.analysis
827
+ analysis += chunk_operator_output.analysis
911
828
  if logprobs:
912
- logprobs += chunk_output.logprobs
829
+ logprobs_list.extend(chunk_operator_output.logprobs)
913
830
 
914
- end = datetime.now()
915
- output = Models.ToolOutput(
831
+ metadata = Models.ToolOutputMetadata(
832
+ tool_name=tool_name, execution_time=perf_counter() - start
833
+ )
834
+ tool_output = Models.ToolOutput(
916
835
  result=translation,
917
- logprobs=logprobs,
836
+ logprobs=logprobs_list,
918
837
  analysis=analysis,
919
- process="translate",
920
- execution_time=(end - start).total_seconds(),
838
+ metadata=metadata,
921
839
  )
922
- return output
923
840
 
924
841
  else:
925
- output = self._operator.run(
842
+ operator_output = self._operator.run(
926
843
  # User parameters
927
844
  text=text,
928
845
  target_language=target_language,
@@ -935,27 +852,29 @@ class TheTool:
935
852
  max_validation_retries=max_validation_retries,
936
853
  priority=priority,
937
854
  # Internal parameters
938
- prompt_file="translate.yaml",
855
+ prompt_file=prompt_file,
939
856
  output_model=Models.Str,
940
857
  mode=None,
941
858
  output_lang=None,
942
859
  )
943
- end = datetime.now()
944
- output.execution_time = (end - start).total_seconds()
945
- return output
946
-
947
- except PromptError as e:
948
- output.errors.append(f"Prompt error: {e}")
949
- except LLMError as e:
950
- output.errors.append(f"LLM error: {e}")
951
- except ValidationError as e:
952
- output.errors.append(f"Validation error: {e}")
953
- except TextToolsError as e:
954
- output.errors.append(f"TextTools error: {e}")
955
- except Exception as e:
956
- output.errors.append(f"Unexpected error: {e}")
957
-
958
- return output
860
+
861
+ metadata = Models.ToolOutputMetadata(
862
+ tool_name=tool_name, execution_time=perf_counter() - start
863
+ )
864
+ tool_output = Models.ToolOutput(
865
+ result=operator_output.result,
866
+ logprobs=operator_output.logprobs,
867
+ analysis=operator_output.analysis,
868
+ metadata=metadata,
869
+ )
870
+
871
+ except (PromptError, LLMError, ValidationError, TextToolsError, Exception) as e:
872
+ metadata = Models.ToolOutputMetadata(tool_name=tool_name)
873
+ tool_output = Models.ToolOutput(
874
+ errors=[f"{type(e).__name__}: {e}"], metadata=metadata
875
+ )
876
+
877
+ return tool_output
959
878
 
960
879
  def propositionize(
961
880
  self,
@@ -980,28 +899,22 @@ class TheTool:
980
899
  with_analysis: Whether to include detailed reasoning analysis
981
900
  output_lang: Language for the output summary
982
901
  user_prompt: Additional instructions for summarization
983
- temperature: Controls randomness (0.0 = deterministic, 1.0 = creative)
902
+ temperature: Controls randomness
984
903
  logprobs: Whether to return token probability information
985
904
  top_logprobs: Number of top token alternatives to return if logprobs enabled
986
905
  validator: Custom validation function to validate the output
987
906
  max_validation_retries: Maximum number of retry attempts if validation fails
988
- priority: Task execution priority (if enabled by vLLM and model)
907
+ priority: Task execution priority (if enabled by vLLM and the model)
989
908
 
990
909
  Returns:
991
- ToolOutput: Object containing:
992
- - result (list[str]): The propositions
993
- - logprobs (list | None): Probability data if logprobs enabled
994
- - analysis (str | None): Detailed reasoning if with_analysis enabled
995
- - process (str | None): Description of the process used
996
- - processed_at (datetime): Timestamp when the processing occurred
997
- - execution_time (float): Time taken for execution in seconds (-1.0 if not measured)
998
- - errors (list(str) | None): Errors occured during tool call
910
+ ToolOutput
999
911
  """
1000
- output = Models.ToolOutput()
912
+ tool_name = sys._getframe().f_code.co_name
913
+ prompt_file = tool_name + ".yaml"
914
+ start = perf_counter()
1001
915
 
1002
916
  try:
1003
- start = datetime.now()
1004
- output = self._operator.run(
917
+ operator_output = self._operator.run(
1005
918
  # User parameters
1006
919
  text=text,
1007
920
  with_analysis=with_analysis,
@@ -1014,26 +927,28 @@ class TheTool:
1014
927
  max_validation_retries=max_validation_retries,
1015
928
  priority=priority,
1016
929
  # Internal parameters
1017
- prompt_file="propositionize.yaml",
930
+ prompt_file=prompt_file,
1018
931
  output_model=Models.ListStr,
1019
932
  mode=None,
1020
933
  )
1021
- end = datetime.now()
1022
- output.execution_time = (end - start).total_seconds()
1023
- return output
1024
-
1025
- except PromptError as e:
1026
- output.errors.append(f"Prompt error: {e}")
1027
- except LLMError as e:
1028
- output.errors.append(f"LLM error: {e}")
1029
- except ValidationError as e:
1030
- output.errors.append(f"Validation error: {e}")
1031
- except TextToolsError as e:
1032
- output.errors.append(f"TextTools error: {e}")
1033
- except Exception as e:
1034
- output.errors.append(f"Unexpected error: {e}")
1035
-
1036
- return output
934
+
935
+ metadata = Models.ToolOutputMetadata(
936
+ tool_name=tool_name, execution_time=perf_counter() - start
937
+ )
938
+ tool_output = Models.ToolOutput(
939
+ result=operator_output.result,
940
+ logprobs=operator_output.logprobs,
941
+ analysis=operator_output.analysis,
942
+ metadata=metadata,
943
+ )
944
+
945
+ except (PromptError, LLMError, ValidationError, TextToolsError, Exception) as e:
946
+ metadata = Models.ToolOutputMetadata(tool_name=tool_name)
947
+ tool_output = Models.ToolOutput(
948
+ errors=[f"{type(e).__name__}: {e}"], metadata=metadata
949
+ )
950
+
951
+ return tool_output
1037
952
 
1038
953
  def check_fact(
1039
954
  self,
@@ -1060,27 +975,22 @@ class TheTool:
1060
975
  with_analysis: Whether to include detailed reasoning analysis
1061
976
  output_lang: Language for the output summary
1062
977
  user_prompt: Additional instructions for summarization
1063
- temperature: Controls randomness (0.0 = deterministic, 1.0 = creative)
978
+ temperature: Controls randomness
1064
979
  logprobs: Whether to return token probability information
1065
980
  top_logprobs: Number of top token alternatives to return if logprobs enabled
1066
981
  validator: Custom validation function to validate the output
1067
982
  max_validation_retries: Maximum number of retry attempts if validation fails
1068
- priority: Task execution priority (if enabled by vLLM and model)
983
+ priority: Task execution priority (if enabled by vLLM and the model)
1069
984
 
1070
985
  Returns:
1071
- ToolOutput: Object containing:
1072
- - result (bool): statement is relevant to source text or not
1073
- - logprobs (list | None): Probability data if logprobs enabled
1074
- - analysis (str | None): Detailed reasoning if with_analysis enabled
1075
- - process (str | None): Description of the process used
1076
- - processed_at (datetime): Timestamp when the processing occurred
1077
- - execution_time (float): Time taken for execution in seconds (-1.0 if not measured)
1078
- - errors (list(str) | None): Errors occured during tool call
986
+ ToolOutput
1079
987
  """
1080
- output = Models.ToolOutput()
988
+ tool_name = sys._getframe().f_code.co_name
989
+ prompt_file = tool_name + ".yaml"
990
+ start = perf_counter()
991
+
1081
992
  try:
1082
- start = datetime.now()
1083
- output = self._operator.run(
993
+ operator_output = self._operator.run(
1084
994
  # User parameters
1085
995
  text=text,
1086
996
  with_analysis=with_analysis,
@@ -1093,27 +1003,29 @@ class TheTool:
1093
1003
  max_validation_retries=max_validation_retries,
1094
1004
  priority=priority,
1095
1005
  # Internal parameters
1096
- prompt_file="check_fact.yaml",
1006
+ prompt_file=prompt_file,
1097
1007
  output_model=Models.Bool,
1098
1008
  mode=None,
1099
1009
  source_text=source_text,
1100
1010
  )
1101
- end = datetime.now()
1102
- output.execution_time = (end - start).total_seconds()
1103
- return output
1104
-
1105
- except PromptError as e:
1106
- output.errors.append(f"Prompt error: {e}")
1107
- except LLMError as e:
1108
- output.errors.append(f"LLM error: {e}")
1109
- except ValidationError as e:
1110
- output.errors.append(f"Validation error: {e}")
1111
- except TextToolsError as e:
1112
- output.errors.append(f"TextTools error: {e}")
1113
- except Exception as e:
1114
- output.errors.append(f"Unexpected error: {e}")
1115
-
1116
- return output
1011
+
1012
+ metadata = Models.ToolOutputMetadata(
1013
+ tool_name=tool_name, execution_time=perf_counter() - start
1014
+ )
1015
+ tool_output = Models.ToolOutput(
1016
+ result=operator_output.result,
1017
+ logprobs=operator_output.logprobs,
1018
+ analysis=operator_output.analysis,
1019
+ metadata=metadata,
1020
+ )
1021
+
1022
+ except (PromptError, LLMError, ValidationError, TextToolsError, Exception) as e:
1023
+ metadata = Models.ToolOutputMetadata(tool_name=tool_name)
1024
+ tool_output = Models.ToolOutput(
1025
+ errors=[f"{type(e).__name__}: {e}"], metadata=metadata
1026
+ )
1027
+
1028
+ return tool_output
1117
1029
 
1118
1030
  def run_custom(
1119
1031
  self,
@@ -1132,36 +1044,28 @@ class TheTool:
1132
1044
  """
1133
1045
  Custom tool that can do almost anything!
1134
1046
 
1135
- Important Note: This tool is EXPERIMENTAL, you can use it but it isn't reliable.
1136
-
1137
1047
  Arguments:
1138
1048
  prompt: The user prompt
1139
1049
  output_model: Pydantic BaseModel used for structured output
1140
1050
  with_analysis: Whether to include detailed reasoning analysis
1141
1051
  analyze_template: The analyze template used for reasoning analysis
1142
1052
  output_lang: Language for the output summary
1143
- temperature: Controls randomness (0.0 = deterministic, 1.0 = creative)
1053
+ temperature: Controls randomness
1144
1054
  logprobs: Whether to return token probability information
1145
1055
  top_logprobs: Number of top token alternatives to return if logprobs enabled
1146
1056
  validator: Custom validation function to validate the output
1147
1057
  max_validation_retries: Maximum number of retry attempts if validation fails
1148
- priority: Task execution priority (if enabled by vLLM and model)
1058
+ priority: Task execution priority (if enabled by vLLM and the model)
1149
1059
 
1150
1060
  Returns:
1151
- ToolOutput: Object containing:
1152
- - result (str): The translated text
1153
- - logprobs (list | None): Probability data if logprobs enabled
1154
- - analysis (str | None): Detailed reasoning if with_analysis enabled
1155
- - process (str | None): Description of the process used
1156
- - processed_at (datetime): Timestamp when the processing occurred
1157
- - execution_time (float): Time taken for execution in seconds (-1.0 if not measured)
1158
- - errors (list(str) | None): Errors occured during tool call
1061
+ ToolOutput
1159
1062
  """
1160
- output = Models.ToolOutput()
1063
+ tool_name = sys._getframe().f_code.co_name
1064
+ prompt_file = tool_name + ".yaml"
1065
+ start = perf_counter()
1161
1066
 
1162
1067
  try:
1163
- start = datetime.now()
1164
- output = self._operator.run(
1068
+ operator_output = self._operator.run(
1165
1069
  # User paramaeters
1166
1070
  text=prompt,
1167
1071
  output_model=output_model,
@@ -1176,23 +1080,25 @@ class TheTool:
1176
1080
  max_validation_retries=max_validation_retries,
1177
1081
  priority=priority,
1178
1082
  # Internal parameters
1179
- prompt_file="run_custom.yaml",
1083
+ prompt_file=prompt_file,
1180
1084
  user_prompt=None,
1181
1085
  mode=None,
1182
1086
  )
1183
- end = datetime.now()
1184
- output.execution_time = (end - start).total_seconds()
1185
- return output
1186
-
1187
- except PromptError as e:
1188
- output.errors.append(f"Prompt error: {e}")
1189
- except LLMError as e:
1190
- output.errors.append(f"LLM error: {e}")
1191
- except ValidationError as e:
1192
- output.errors.append(f"Validation error: {e}")
1193
- except TextToolsError as e:
1194
- output.errors.append(f"TextTools error: {e}")
1195
- except Exception as e:
1196
- output.errors.append(f"Unexpected error: {e}")
1197
-
1198
- return output
1087
+
1088
+ metadata = Models.ToolOutputMetadata(
1089
+ tool_name=tool_name, execution_time=perf_counter() - start
1090
+ )
1091
+ tool_output = Models.ToolOutput(
1092
+ result=operator_output.result,
1093
+ logprobs=operator_output.logprobs,
1094
+ analysis=operator_output.analysis,
1095
+ metadata=metadata,
1096
+ )
1097
+
1098
+ except (PromptError, LLMError, ValidationError, TextToolsError, Exception) as e:
1099
+ metadata = Models.ToolOutputMetadata(tool_name=tool_name)
1100
+ tool_output = Models.ToolOutput(
1101
+ errors=[f"{type(e).__name__}: {e}"], metadata=metadata
1102
+ )
1103
+
1104
+ return tool_output