hamtaa-texttools 1.1.16__py3-none-any.whl → 1.1.18__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -4,21 +4,20 @@ from collections.abc import Callable
4
4
 
5
5
  from openai import AsyncOpenAI
6
6
 
7
- from texttools.tools.internals.async_operator import AsyncOperator
8
- import texttools.tools.internals.models as Models
7
+ from texttools.internals.async_operator import AsyncOperator
8
+ import texttools.internals.models as Models
9
+ from texttools.internals.exceptions import (
10
+ TextToolsError,
11
+ PromptError,
12
+ LLMError,
13
+ ValidationError,
14
+ )
9
15
 
10
16
 
11
17
  class AsyncTheTool:
12
18
  """
13
- Async counterpart to TheTool.
14
-
15
- Each method configures the async operator with a specific YAML prompt,
19
+ Each method configures the operator with a specific YAML prompt,
16
20
  output schema, and flags, then delegates execution to `operator.run()`.
17
-
18
- Usage:
19
- async_client = AsyncOpenAI(...)
20
- tool = TheToolAsync(async_client, model="model-name")
21
- result = await tool.categorize("text ...", with_analysis=True)
22
21
  """
23
22
 
24
23
  def __init__(
@@ -62,43 +61,103 @@ class AsyncTheTool:
62
61
  - result (str): The assigned category
63
62
  - logprobs (list | None): Probability data if logprobs enabled
64
63
  - analysis (str | None): Detailed reasoning if with_analysis enabled
64
+ - process (str | None): Description of the process used
65
+ - processed_at (datetime): Timestamp when the processing occurred
66
+ - execution_time (float): Time taken for execution in seconds (-1.0 if not measured)
65
67
  - errors (list(str) | None): Errors occured during tool call
68
+
66
69
  """
67
- start = datetime.now()
68
-
69
- if mode == "category_tree":
70
- # Initializations
71
- output = Models.ToolOutput()
72
- levels = categories.get_level_count()
73
- parent_id = 0
74
- final_output = []
75
-
76
- for _ in range(levels):
77
- # Get child nodes for current parent
78
- parent_node = categories.find_node(parent_id)
79
- children = categories.find_children(parent_node)
80
-
81
- # Check if child nodes exist
82
- if not children:
83
- output.errors.append(
84
- f"No categories found for parent_id {parent_id} in the tree"
70
+ output = Models.ToolOutput()
71
+
72
+ try:
73
+ start = datetime.now()
74
+
75
+ if mode == "category_tree":
76
+ # Initializations
77
+ output = Models.ToolOutput()
78
+ levels = categories.get_level_count()
79
+ parent_id = 0
80
+ final_output = []
81
+
82
+ for _ in range(levels):
83
+ # Get child nodes for current parent
84
+ parent_node = categories.get_node(parent_id)
85
+ children = categories.get_children(parent_node)
86
+
87
+ # Check if child nodes exist
88
+ if not children:
89
+ output.errors.append(
90
+ f"No categories found for parent_id {parent_id} in the tree"
91
+ )
92
+ end = datetime.now()
93
+ output.execution_time = (end - start).total_seconds()
94
+ return output
95
+
96
+ # Extract category names and descriptions
97
+ category_list = [
98
+ f"Category Name: {node.name}, Description: {node.description}"
99
+ for node in children
100
+ ]
101
+ category_names = [node.name for node in children]
102
+
103
+ # Run categorization for this level
104
+ level_output = await self._operator.run(
105
+ # User parameters
106
+ text=text,
107
+ category_list=category_list,
108
+ with_analysis=with_analysis,
109
+ user_prompt=user_prompt,
110
+ temperature=temperature,
111
+ logprobs=logprobs,
112
+ top_logprobs=top_logprobs,
113
+ mode=mode,
114
+ validator=validator,
115
+ max_validation_retries=max_validation_retries,
116
+ priority=priority,
117
+ # Internal parameters
118
+ prompt_file="categorize.yaml",
119
+ output_model=Models.create_dynamic_model(category_names),
120
+ output_lang=None,
85
121
  )
86
- end = datetime.now()
87
- output.execution_time = (end - start).total_seconds()
88
- return output
89
-
90
- # Extract category names and descriptions
91
- category_list = [
92
- f"Category Name: {node.name}, Description: {node.description}"
93
- for node in children
94
- ]
95
- category_names = [node.name for node in children]
96
-
97
- # Run categorization for this level
98
- level_output = await self._operator.run(
122
+
123
+ # Check for errors from operator
124
+ if level_output.errors:
125
+ output.errors.extend(level_output.errors)
126
+ end = datetime.now()
127
+ output.execution_time = (end - start).total_seconds()
128
+ return output
129
+
130
+ # Get the chosen category
131
+ chosen_category = level_output.result
132
+
133
+ # Find the corresponding node
134
+ parent_node = categories.get_node(chosen_category)
135
+ if parent_node is None:
136
+ output.errors.append(
137
+ f"Category '{chosen_category}' not found in tree after selection"
138
+ )
139
+ end = datetime.now()
140
+ output.execution_time = (end - start).total_seconds()
141
+ return output
142
+
143
+ parent_id = parent_node.node_id
144
+ final_output.append(parent_node.name)
145
+
146
+ # Copy analysis/logprobs/process from the last level's output
147
+ output.analysis = level_output.analysis
148
+ output.logprobs = level_output.logprobs
149
+ output.process = level_output.process
150
+
151
+ output.result = final_output
152
+ end = datetime.now()
153
+ output.execution_time = (end - start).total_seconds()
154
+ return output
155
+
156
+ else:
157
+ output = await self._operator.run(
99
158
  # User parameters
100
159
  text=text,
101
- category_list=category_list,
160
+ category_list=categories,
102
161
  with_analysis=with_analysis,
103
162
  user_prompt=user_prompt,
104
163
  temperature=temperature,
@@ -107,66 +166,28 @@ class AsyncTheTool:
107
166
  mode=mode,
108
167
  validator=validator,
109
168
  max_validation_retries=max_validation_retries,
169
+ priority=priority,
110
170
  # Internal parameters
111
171
  prompt_file="categorize.yaml",
112
- output_model=Models.create_dynamic_model(category_names),
172
+ output_model=Models.create_dynamic_model(categories),
113
173
  output_lang=None,
114
174
  )
175
+ end = datetime.now()
176
+ output.execution_time = (end - start).total_seconds()
177
+ return output
115
178
 
116
- # Check for errors from operator
117
- if level_output.errors:
118
- output.errors.extend(level_output.errors)
119
- end = datetime.now()
120
- output.execution_time = (end - start).total_seconds()
121
- return output
122
-
123
- # Get the chosen category
124
- chosen_category = level_output.result
125
-
126
- # Find the corresponding node
127
- parent_node = categories.find_node(chosen_category)
128
- if parent_node is None:
129
- output.errors.append(
130
- f"Category '{chosen_category}' not found in tree after selection"
131
- )
132
- end = datetime.now()
133
- output.execution_time = (end - start).total_seconds()
134
- return output
135
-
136
- parent_id = parent_node.node_id
137
- final_output.append(parent_node.name)
138
-
139
- # Copy analysis/logprobs/process from the last level's output
140
- output.analysis = level_output.analysis
141
- output.logprobs = level_output.logprobs
142
- output.process = level_output.process
179
+ except PromptError as e:
180
+ output.errors.append(f"Prompt error: {e}")
181
+ except LLMError as e:
182
+ output.errors.append(f"LLM error: {e}")
183
+ except ValidationError as e:
184
+ output.errors.append(f"Validation error: {e}")
185
+ except TextToolsError as e:
186
+ output.errors.append(f"TextTools error: {e}")
187
+ except Exception as e:
188
+ output.errors.append(f"Unexpected error: {e}")
143
189
 
144
- output.result = final_output
145
- end = datetime.now()
146
- output.execution_time = (end - start).total_seconds()
147
- return output
148
-
149
- else:
150
- output = await self._operator.run(
151
- # User parameters
152
- text=text,
153
- category_list=categories,
154
- with_analysis=with_analysis,
155
- user_prompt=user_prompt,
156
- temperature=temperature,
157
- logprobs=logprobs,
158
- top_logprobs=top_logprobs,
159
- mode=mode,
160
- validator=validator,
161
- max_validation_retries=max_validation_retries,
162
- # Internal parameters
163
- prompt_file="categorize.yaml",
164
- output_model=Models.create_dynamic_model(categories),
165
- output_lang=None,
166
- )
167
- end = datetime.now()
168
- output.execution_time = (end - start).total_seconds()
169
- return output
190
+ return output
170
191
 
171
192
  async def extract_keywords(
172
193
  self,
@@ -203,29 +224,48 @@ class AsyncTheTool:
203
224
  - result (list[str]): List of extracted keywords
204
225
  - logprobs (list | None): Probability data if logprobs enabled
205
226
  - analysis (str | None): Detailed reasoning if with_analysis enabled
227
+ - process (str | None): Description of the process used
228
+ - processed_at (datetime): Timestamp when the processing occurred
229
+ - execution_time (float): Time taken for execution in seconds (-1.0 if not measured)
206
230
  - errors (list(str) | None): Errors occured during tool call
207
231
  """
208
- start = datetime.now()
209
- output = await self._operator.run(
210
- # User parameters
211
- text=text,
212
- with_analysis=with_analysis,
213
- output_lang=output_lang,
214
- user_prompt=user_prompt,
215
- temperature=temperature,
216
- logprobs=logprobs,
217
- top_logprobs=top_logprobs,
218
- mode=mode,
219
- number_of_keywords=number_of_keywords,
220
- validator=validator,
221
- max_validation_retries=max_validation_retries,
222
- priority=priority,
223
- # Internal parameters
224
- prompt_file="extract_keywords.yaml",
225
- output_model=Models.ListStrOutput,
226
- )
227
- end = datetime.now()
228
- output.execution_time = (end - start).total_seconds()
232
+ output = Models.ToolOutput()
233
+
234
+ try:
235
+ start = datetime.now()
236
+ output = await self._operator.run(
237
+ # User parameters
238
+ text=text,
239
+ with_analysis=with_analysis,
240
+ output_lang=output_lang,
241
+ user_prompt=user_prompt,
242
+ temperature=temperature,
243
+ logprobs=logprobs,
244
+ top_logprobs=top_logprobs,
245
+ mode=mode,
246
+ number_of_keywords=number_of_keywords,
247
+ validator=validator,
248
+ max_validation_retries=max_validation_retries,
249
+ priority=priority,
250
+ # Internal parameters
251
+ prompt_file="extract_keywords.yaml",
252
+ output_model=Models.ListStrOutput,
253
+ )
254
+ end = datetime.now()
255
+ output.execution_time = (end - start).total_seconds()
256
+ return output
257
+
258
+ except PromptError as e:
259
+ output.errors.append(f"Prompt error: {e}")
260
+ except LLMError as e:
261
+ output.errors.append(f"LLM error: {e}")
262
+ except ValidationError as e:
263
+ output.errors.append(f"Validation error: {e}")
264
+ except TextToolsError as e:
265
+ output.errors.append(f"TextTools error: {e}")
266
+ except Exception as e:
267
+ output.errors.append(f"Unexpected error: {e}")
268
+
229
269
  return output
230
270
 
231
271
  async def extract_entities(
@@ -261,28 +301,47 @@ class AsyncTheTool:
261
301
  - result (list[dict]): List of entities with 'text' and 'type' keys
262
302
  - logprobs (list | None): Probability data if logprobs enabled
263
303
  - analysis (str | None): Detailed reasoning if with_analysis enabled
304
+ - process (str | None): Description of the process used
305
+ - processed_at (datetime): Timestamp when the processing occurred
306
+ - execution_time (float): Time taken for execution in seconds (-1.0 if not measured)
264
307
  - errors (list(str) | None): Errors occured during tool call
265
308
  """
266
- start = datetime.now()
267
- output = await self._operator.run(
268
- # User parameters
269
- text=text,
270
- with_analysis=with_analysis,
271
- output_lang=output_lang,
272
- user_prompt=user_prompt,
273
- temperature=temperature,
274
- logprobs=logprobs,
275
- top_logprobs=top_logprobs,
276
- validator=validator,
277
- max_validation_retries=max_validation_retries,
278
- priority=priority,
279
- # Internal parameters
280
- prompt_file="extract_entities.yaml",
281
- output_model=Models.ListDictStrStrOutput,
282
- mode=None,
283
- )
284
- end = datetime.now()
285
- output.execution_time = (end - start).total_seconds()
309
+ output = Models.ToolOutput()
310
+
311
+ try:
312
+ start = datetime.now()
313
+ output = await self._operator.run(
314
+ # User parameters
315
+ text=text,
316
+ with_analysis=with_analysis,
317
+ output_lang=output_lang,
318
+ user_prompt=user_prompt,
319
+ temperature=temperature,
320
+ logprobs=logprobs,
321
+ top_logprobs=top_logprobs,
322
+ validator=validator,
323
+ max_validation_retries=max_validation_retries,
324
+ priority=priority,
325
+ # Internal parameters
326
+ prompt_file="extract_entities.yaml",
327
+ output_model=Models.ListDictStrStrOutput,
328
+ mode=None,
329
+ )
330
+ end = datetime.now()
331
+ output.execution_time = (end - start).total_seconds()
332
+ return output
333
+
334
+ except PromptError as e:
335
+ output.errors.append(f"Prompt error: {e}")
336
+ except LLMError as e:
337
+ output.errors.append(f"LLM error: {e}")
338
+ except ValidationError as e:
339
+ output.errors.append(f"Validation error: {e}")
340
+ except TextToolsError as e:
341
+ output.errors.append(f"TextTools error: {e}")
342
+ except Exception as e:
343
+ output.errors.append(f"Unexpected error: {e}")
344
+
286
345
  return output
287
346
 
288
347
  async def is_question(
@@ -316,28 +375,47 @@ class AsyncTheTool:
316
375
  - result (bool): True if text is a question, False otherwise
317
376
  - logprobs (list | None): Probability data if logprobs enabled
318
377
  - analysis (str | None): Detailed reasoning if with_analysis enabled
378
+ - process (str | None): Description of the process used
379
+ - processed_at (datetime): Timestamp when the processing occurred
380
+ - execution_time (float): Time taken for execution in seconds (-1.0 if not measured)
319
381
  - errors (list(str) | None): Errors occured during tool call
320
382
  """
321
- start = datetime.now()
322
- output = await self._operator.run(
323
- # User parameters
324
- text=text,
325
- with_analysis=with_analysis,
326
- user_prompt=user_prompt,
327
- temperature=temperature,
328
- logprobs=logprobs,
329
- top_logprobs=top_logprobs,
330
- validator=validator,
331
- max_validation_retries=max_validation_retries,
332
- priority=priority,
333
- # Internal parameters
334
- prompt_file="is_question.yaml",
335
- output_model=Models.BoolOutput,
336
- mode=None,
337
- output_lang=None,
338
- )
339
- end = datetime.now()
340
- output.execution_time = (end - start).total_seconds()
383
+ output = Models.ToolOutput()
384
+
385
+ try:
386
+ start = datetime.now()
387
+ output = await self._operator.run(
388
+ # User parameters
389
+ text=text,
390
+ with_analysis=with_analysis,
391
+ user_prompt=user_prompt,
392
+ temperature=temperature,
393
+ logprobs=logprobs,
394
+ top_logprobs=top_logprobs,
395
+ validator=validator,
396
+ max_validation_retries=max_validation_retries,
397
+ priority=priority,
398
+ # Internal parameters
399
+ prompt_file="is_question.yaml",
400
+ output_model=Models.BoolOutput,
401
+ mode=None,
402
+ output_lang=None,
403
+ )
404
+ end = datetime.now()
405
+ output.execution_time = (end - start).total_seconds()
406
+ return output
407
+
408
+ except PromptError as e:
409
+ output.errors.append(f"Prompt error: {e}")
410
+ except LLMError as e:
411
+ output.errors.append(f"LLM error: {e}")
412
+ except ValidationError as e:
413
+ output.errors.append(f"Validation error: {e}")
414
+ except TextToolsError as e:
415
+ output.errors.append(f"TextTools error: {e}")
416
+ except Exception as e:
417
+ output.errors.append(f"Unexpected error: {e}")
418
+
341
419
  return output
342
420
 
343
421
  async def text_to_question(
@@ -373,28 +451,47 @@ class AsyncTheTool:
373
451
  - result (str): The generated question
374
452
  - logprobs (list | None): Probability data if logprobs enabled
375
453
  - analysis (str | None): Detailed reasoning if with_analysis enabled
454
+ - process (str | None): Description of the process used
455
+ - processed_at (datetime): Timestamp when the processing occurred
456
+ - execution_time (float): Time taken for execution in seconds (-1.0 if not measured)
376
457
  - errors (list(str) | None): Errors occured during tool call
377
458
  """
378
- start = datetime.now()
379
- output = await self._operator.run(
380
- # User parameters
381
- text=text,
382
- with_analysis=with_analysis,
383
- output_lang=output_lang,
384
- user_prompt=user_prompt,
385
- temperature=temperature,
386
- logprobs=logprobs,
387
- top_logprobs=top_logprobs,
388
- validator=validator,
389
- max_validation_retries=max_validation_retries,
390
- priority=priority,
391
- # Internal parameters
392
- prompt_file="text_to_question.yaml",
393
- output_model=Models.StrOutput,
394
- mode=None,
395
- )
396
- end = datetime.now()
397
- output.execution_time = (end - start).total_seconds()
459
+ output = Models.ToolOutput()
460
+
461
+ try:
462
+ start = datetime.now()
463
+ output = await self._operator.run(
464
+ # User parameters
465
+ text=text,
466
+ with_analysis=with_analysis,
467
+ output_lang=output_lang,
468
+ user_prompt=user_prompt,
469
+ temperature=temperature,
470
+ logprobs=logprobs,
471
+ top_logprobs=top_logprobs,
472
+ validator=validator,
473
+ max_validation_retries=max_validation_retries,
474
+ priority=priority,
475
+ # Internal parameters
476
+ prompt_file="text_to_question.yaml",
477
+ output_model=Models.StrOutput,
478
+ mode=None,
479
+ )
480
+ end = datetime.now()
481
+ output.execution_time = (end - start).total_seconds()
482
+ return output
483
+
484
+ except PromptError as e:
485
+ output.errors.append(f"Prompt error: {e}")
486
+ except LLMError as e:
487
+ output.errors.append(f"LLM error: {e}")
488
+ except ValidationError as e:
489
+ output.errors.append(f"Validation error: {e}")
490
+ except TextToolsError as e:
491
+ output.errors.append(f"TextTools error: {e}")
492
+ except Exception as e:
493
+ output.errors.append(f"Unexpected error: {e}")
494
+
398
495
  return output
399
496
 
400
497
  async def merge_questions(
@@ -432,29 +529,48 @@ class AsyncTheTool:
432
529
  - result (str): The merged question
433
530
  - logprobs (list | None): Probability data if logprobs enabled
434
531
  - analysis (str | None): Detailed reasoning if with_analysis enabled
532
+ - process (str | None): Description of the process used
533
+ - processed_at (datetime): Timestamp when the processing occurred
534
+ - execution_time (float): Time taken for execution in seconds (-1.0 if not measured)
435
535
  - errors (list(str) | None): Errors occured during tool call
436
536
  """
437
- start = datetime.now()
438
- text_combined = ", ".join(text)
439
- output = await self._operator.run(
440
- # User parameters
441
- text=text_combined,
442
- with_analysis=with_analysis,
443
- output_lang=output_lang,
444
- user_prompt=user_prompt,
445
- temperature=temperature,
446
- logprobs=logprobs,
447
- top_logprobs=top_logprobs,
448
- validator=validator,
449
- max_validation_retries=max_validation_retries,
450
- priority=priority,
451
- # Internal parameters
452
- prompt_file="merge_questions.yaml",
453
- output_model=Models.StrOutput,
454
- mode=mode,
455
- )
456
- end = datetime.now()
457
- output.execution_time = (end - start).total_seconds()
537
+ output = Models.ToolOutput()
538
+
539
+ try:
540
+ start = datetime.now()
541
+ text = ", ".join(text)
542
+ output = await self._operator.run(
543
+ # User parameters
544
+ text=text,
545
+ with_analysis=with_analysis,
546
+ output_lang=output_lang,
547
+ user_prompt=user_prompt,
548
+ temperature=temperature,
549
+ logprobs=logprobs,
550
+ top_logprobs=top_logprobs,
551
+ validator=validator,
552
+ max_validation_retries=max_validation_retries,
553
+ priority=priority,
554
+ # Internal parameters
555
+ prompt_file="merge_questions.yaml",
556
+ output_model=Models.StrOutput,
557
+ mode=mode,
558
+ )
559
+ end = datetime.now()
560
+ output.execution_time = (end - start).total_seconds()
561
+ return output
562
+
563
+ except PromptError as e:
564
+ output.errors.append(f"Prompt error: {e}")
565
+ except LLMError as e:
566
+ output.errors.append(f"LLM error: {e}")
567
+ except ValidationError as e:
568
+ output.errors.append(f"Validation error: {e}")
569
+ except TextToolsError as e:
570
+ output.errors.append(f"TextTools error: {e}")
571
+ except Exception as e:
572
+ output.errors.append(f"Unexpected error: {e}")
573
+
458
574
  return output
459
575
 
460
576
  async def rewrite(
@@ -492,28 +608,47 @@ class AsyncTheTool:
492
608
  - result (str): The rewritten text
493
609
  - logprobs (list | None): Probability data if logprobs enabled
494
610
  - analysis (str | None): Detailed reasoning if with_analysis enabled
611
+ - process (str | None): Description of the process used
612
+ - processed_at (datetime): Timestamp when the processing occurred
613
+ - execution_time (float): Time taken for execution in seconds (-1.0 if not measured)
495
614
  - errors (list(str) | None): Errors occured during tool call
496
615
  """
497
- start = datetime.now()
498
- output = await self._operator.run(
499
- # User parameters
500
- text=text,
501
- with_analysis=with_analysis,
502
- output_lang=output_lang,
503
- user_prompt=user_prompt,
504
- temperature=temperature,
505
- logprobs=logprobs,
506
- top_logprobs=top_logprobs,
507
- validator=validator,
508
- max_validation_retries=max_validation_retries,
509
- priority=priority,
510
- # Internal parameters
511
- prompt_file="rewrite.yaml",
512
- output_model=Models.StrOutput,
513
- mode=mode,
514
- )
515
- end = datetime.now()
516
- output.execution_time = (end - start).total_seconds()
616
+ output = Models.ToolOutput()
617
+
618
+ try:
619
+ start = datetime.now()
620
+ output = await self._operator.run(
621
+ # User parameters
622
+ text=text,
623
+ with_analysis=with_analysis,
624
+ output_lang=output_lang,
625
+ user_prompt=user_prompt,
626
+ temperature=temperature,
627
+ logprobs=logprobs,
628
+ top_logprobs=top_logprobs,
629
+ validator=validator,
630
+ max_validation_retries=max_validation_retries,
631
+ priority=priority,
632
+ # Internal parameters
633
+ prompt_file="rewrite.yaml",
634
+ output_model=Models.StrOutput,
635
+ mode=mode,
636
+ )
637
+ end = datetime.now()
638
+ output.execution_time = (end - start).total_seconds()
639
+ return output
640
+
641
+ except PromptError as e:
642
+ output.errors.append(f"Prompt error: {e}")
643
+ except LLMError as e:
644
+ output.errors.append(f"LLM error: {e}")
645
+ except ValidationError as e:
646
+ output.errors.append(f"Validation error: {e}")
647
+ except TextToolsError as e:
648
+ output.errors.append(f"TextTools error: {e}")
649
+ except Exception as e:
650
+ output.errors.append(f"Unexpected error: {e}")
651
+
517
652
  return output
518
653
 
519
654
  async def subject_to_question(
@@ -551,29 +686,48 @@ class AsyncTheTool:
551
686
  - result (list[str]): List of generated questions
552
687
  - logprobs (list | None): Probability data if logprobs enabled
553
688
  - analysis (str | None): Detailed reasoning if with_analysis enabled
689
+ - process (str | None): Description of the process used
690
+ - processed_at (datetime): Timestamp when the processing occurred
691
+ - execution_time (float): Time taken for execution in seconds (-1.0 if not measured)
554
692
  - errors (list(str) | None): Errors occured during tool call
555
693
  """
556
- start = datetime.now()
557
- output = await self._operator.run(
558
- # User parameters
559
- text=text,
560
- number_of_questions=number_of_questions,
561
- with_analysis=with_analysis,
562
- output_lang=output_lang,
563
- user_prompt=user_prompt,
564
- temperature=temperature,
565
- logprobs=logprobs,
566
- top_logprobs=top_logprobs,
567
- validator=validator,
568
- max_validation_retries=max_validation_retries,
569
- priority=priority,
570
- # Internal parameters
571
- prompt_file="subject_to_question.yaml",
572
- output_model=Models.ReasonListStrOutput,
573
- mode=None,
574
- )
575
- end = datetime.now()
576
- output.execution_time = (end - start).total_seconds()
694
+ output = Models.ToolOutput()
695
+
696
+ try:
697
+ start = datetime.now()
698
+ output = await self._operator.run(
699
+ # User parameters
700
+ text=text,
701
+ number_of_questions=number_of_questions,
702
+ with_analysis=with_analysis,
703
+ output_lang=output_lang,
704
+ user_prompt=user_prompt,
705
+ temperature=temperature,
706
+ logprobs=logprobs,
707
+ top_logprobs=top_logprobs,
708
+ validator=validator,
709
+ max_validation_retries=max_validation_retries,
710
+ priority=priority,
711
+ # Internal parameters
712
+ prompt_file="subject_to_question.yaml",
713
+ output_model=Models.ReasonListStrOutput,
714
+ mode=None,
715
+ )
716
+ end = datetime.now()
717
+ output.execution_time = (end - start).total_seconds()
718
+ return output
719
+
720
+ except PromptError as e:
721
+ output.errors.append(f"Prompt error: {e}")
722
+ except LLMError as e:
723
+ output.errors.append(f"LLM error: {e}")
724
+ except ValidationError as e:
725
+ output.errors.append(f"Validation error: {e}")
726
+ except TextToolsError as e:
727
+ output.errors.append(f"TextTools error: {e}")
728
+ except Exception as e:
729
+ output.errors.append(f"Unexpected error: {e}")
730
+
577
731
  return output
578
732
 
579
733
  async def summarize(
@@ -609,28 +763,47 @@ class AsyncTheTool:
609
763
  - result (str): The summary text
610
764
  - logprobs (list | None): Probability data if logprobs enabled
611
765
  - analysis (str | None): Detailed reasoning if with_analysis enabled
766
+ - process (str | None): Description of the process used
767
+ - processed_at (datetime): Timestamp when the processing occurred
768
+ - execution_time (float): Time taken for execution in seconds (-1.0 if not measured)
612
769
  - errors (list(str) | None): Errors occured during tool call
613
770
  """
614
- start = datetime.now()
615
- output = await self._operator.run(
616
- # User parameters
617
- text=text,
618
- with_analysis=with_analysis,
619
- output_lang=output_lang,
620
- user_prompt=user_prompt,
621
- temperature=temperature,
622
- logprobs=logprobs,
623
- top_logprobs=top_logprobs,
624
- validator=validator,
625
- max_validation_retries=max_validation_retries,
626
- priority=priority,
627
- # Internal parameters
628
- prompt_file="summarize.yaml",
629
- output_model=Models.StrOutput,
630
- mode=None,
631
- )
632
- end = datetime.now()
633
- output.execution_time = (end - start).total_seconds()
771
+ output = Models.ToolOutput()
772
+
773
+ try:
774
+ start = datetime.now()
775
+ output = await self._operator.run(
776
+ # User parameters
777
+ text=text,
778
+ with_analysis=with_analysis,
779
+ output_lang=output_lang,
780
+ user_prompt=user_prompt,
781
+ temperature=temperature,
782
+ logprobs=logprobs,
783
+ top_logprobs=top_logprobs,
784
+ validator=validator,
785
+ max_validation_retries=max_validation_retries,
786
+ priority=priority,
787
+ # Internal parameters
788
+ prompt_file="summarize.yaml",
789
+ output_model=Models.StrOutput,
790
+ mode=None,
791
+ )
792
+ end = datetime.now()
793
+ output.execution_time = (end - start).total_seconds()
794
+ return output
795
+
796
+ except PromptError as e:
797
+ output.errors.append(f"Prompt error: {e}")
798
+ except LLMError as e:
799
+ output.errors.append(f"LLM error: {e}")
800
+ except ValidationError as e:
801
+ output.errors.append(f"Validation error: {e}")
802
+ except TextToolsError as e:
803
+ output.errors.append(f"TextTools error: {e}")
804
+ except Exception as e:
805
+ output.errors.append(f"Unexpected error: {e}")
806
+
634
807
  return output
635
808
 
636
809
  async def translate(
@@ -666,29 +839,48 @@ class AsyncTheTool:
666
839
  - result (str): The translated text
667
840
  - logprobs (list | None): Probability data if logprobs enabled
668
841
  - analysis (str | None): Detailed reasoning if with_analysis enabled
842
+ - process (str | None): Description of the process used
843
+ - processed_at (datetime): Timestamp when the processing occurred
844
+ - execution_time (float): Time taken for execution in seconds (-1.0 if not measured)
669
845
  - errors (list(str) | None): Errors occured during tool call
670
846
  """
671
- start = datetime.now()
672
- output = await self._operator.run(
673
- # User parameters
674
- text=text,
675
- target_language=target_language,
676
- with_analysis=with_analysis,
677
- user_prompt=user_prompt,
678
- temperature=temperature,
679
- logprobs=logprobs,
680
- top_logprobs=top_logprobs,
681
- validator=validator,
682
- max_validation_retries=max_validation_retries,
683
- priority=priority,
684
- # Internal parameters
685
- prompt_file="translate.yaml",
686
- output_model=Models.StrOutput,
687
- mode=None,
688
- output_lang=None,
689
- )
690
- end = datetime.now()
691
- output.execution_time = (end - start).total_seconds()
847
+ output = Models.ToolOutput()
848
+
849
+ try:
850
+ start = datetime.now()
851
+ output = await self._operator.run(
852
+ # User parameters
853
+ text=text,
854
+ target_language=target_language,
855
+ with_analysis=with_analysis,
856
+ user_prompt=user_prompt,
857
+ temperature=temperature,
858
+ logprobs=logprobs,
859
+ top_logprobs=top_logprobs,
860
+ validator=validator,
861
+ max_validation_retries=max_validation_retries,
862
+ priority=priority,
863
+ # Internal parameters
864
+ prompt_file="translate.yaml",
865
+ output_model=Models.StrOutput,
866
+ mode=None,
867
+ output_lang=None,
868
+ )
869
+ end = datetime.now()
870
+ output.execution_time = (end - start).total_seconds()
871
+ return output
872
+
873
+ except PromptError as e:
874
+ output.errors.append(f"Prompt error: {e}")
875
+ except LLMError as e:
876
+ output.errors.append(f"LLM error: {e}")
877
+ except ValidationError as e:
878
+ output.errors.append(f"Validation error: {e}")
879
+ except TextToolsError as e:
880
+ output.errors.append(f"TextTools error: {e}")
881
+ except Exception as e:
882
+ output.errors.append(f"Unexpected error: {e}")
883
+
692
884
  return output
693
885
 
694
886
  async def detect_entity(
@@ -724,28 +916,123 @@ class AsyncTheTool:
724
916
  - result (list[Entity]): The entities
725
917
  - logprobs (list | None): Probability data if logprobs enabled
726
918
  - analysis (str | None): Detailed reasoning if with_analysis enabled
919
+ - process (str | None): Description of the process used
920
+ - processed_at (datetime): Timestamp when the processing occurred
921
+ - execution_time (float): Time taken for execution in seconds (-1.0 if not measured)
922
+ - errors (list(str) | None): Errors occured during tool call
923
+ """
924
+ output = Models.ToolOutput()
925
+
926
+ try:
927
+ start = datetime.now()
928
+ output = await self._operator.run(
929
+ # User parameters
930
+ text=text,
931
+ with_analysis=with_analysis,
932
+ output_lang=output_lang,
933
+ user_prompt=user_prompt,
934
+ temperature=temperature,
935
+ logprobs=logprobs,
936
+ top_logprobs=top_logprobs,
937
+ validator=validator,
938
+ max_validation_retries=max_validation_retries,
939
+ priority=priority,
940
+ # Internal parameters
941
+ prompt_file="detect_entity.yaml",
942
+ output_model=Models.EntityDetectorOutput,
943
+ mode=None,
944
+ )
945
+ end = datetime.now()
946
+ output.execution_time = (end - start).total_seconds()
947
+ return output
948
+
949
+ except PromptError as e:
950
+ output.errors.append(f"Prompt error: {e}")
951
+ except LLMError as e:
952
+ output.errors.append(f"LLM error: {e}")
953
+ except ValidationError as e:
954
+ output.errors.append(f"Validation error: {e}")
955
+ except TextToolsError as e:
956
+ output.errors.append(f"TextTools error: {e}")
957
+ except Exception as e:
958
+ output.errors.append(f"Unexpected error: {e}")
959
+
960
+ return output
961
+
962
+ async def propositionize(
963
+ self,
964
+ text: str,
965
+ with_analysis: bool = False,
966
+ output_lang: str | None = None,
967
+ user_prompt: str | None = None,
968
+ temperature: float | None = 0.0,
969
+ logprobs: bool = False,
970
+ top_logprobs: int | None = None,
971
+ validator: Callable[[Any], bool] | None = None,
972
+ max_validation_retries: int | None = None,
973
+ priority: int | None = 0,
974
+ ) -> Models.ToolOutput:
975
+ """
976
+ Proposition input text to meaningful sentences.
977
+
978
+ Arguments:
979
+ text: The input text
980
+ with_analysis: Whether to include detailed reasoning analysis
981
+ output_lang: Language for the output summary
982
+ user_prompt: Additional instructions for summarization
983
+ temperature: Controls randomness (0.0 = deterministic, 1.0 = creative)
984
+ logprobs: Whether to return token probability information
985
+ top_logprobs: Number of top token alternatives to return if logprobs enabled
986
+ validator: Custom validation function to validate the output
987
+ max_validation_retries: Maximum number of retry attempts if validation fails
988
+ priority: Task execution priority (if enabled by vLLM and model)
989
+
990
+ Returns:
991
+ ToolOutput: Object containing:
992
+ - result (list[str]): The propositions
993
+ - logprobs (list | None): Probability data if logprobs enabled
994
+ - analysis (str | None): Detailed reasoning if with_analysis enabled
995
+ - process (str | None): Description of the process used
996
+ - processed_at (datetime): Timestamp when the processing occurred
997
+ - execution_time (float): Time taken for execution in seconds (-1.0 if not measured)
727
998
  - errors (list(str) | None): Errors occured during tool call
728
999
  """
729
- start = datetime.now()
730
- output = await self._operator.run(
731
- # User parameters
732
- text=text,
733
- with_analysis=with_analysis,
734
- output_lang=output_lang,
735
- user_prompt=user_prompt,
736
- temperature=temperature,
737
- logprobs=logprobs,
738
- top_logprobs=top_logprobs,
739
- validator=validator,
740
- max_validation_retries=max_validation_retries,
741
- priority=priority,
742
- # Internal parameters
743
- prompt_file="detect_entity.yaml",
744
- output_model=Models.EntityDetectorOutput,
745
- mode=None,
746
- )
747
- end = datetime.now()
748
- output.execution_time = (end - start).total_seconds()
1000
+ output = Models.ToolOutput()
1001
+
1002
+ try:
1003
+ start = datetime.now()
1004
+ output = await self._operator.run(
1005
+ # User parameters
1006
+ text=text,
1007
+ with_analysis=with_analysis,
1008
+ output_lang=output_lang,
1009
+ user_prompt=user_prompt,
1010
+ temperature=temperature,
1011
+ logprobs=logprobs,
1012
+ top_logprobs=top_logprobs,
1013
+ validator=validator,
1014
+ max_validation_retries=max_validation_retries,
1015
+ priority=priority,
1016
+ # Internal parameters
1017
+ prompt_file="propositionize.yaml",
1018
+ output_model=Models.ListStrOutput,
1019
+ mode=None,
1020
+ )
1021
+ end = datetime.now()
1022
+ output.execution_time = (end - start).total_seconds()
1023
+ return output
1024
+
1025
+ except PromptError as e:
1026
+ output.errors.append(f"Prompt error: {e}")
1027
+ except LLMError as e:
1028
+ output.errors.append(f"LLM error: {e}")
1029
+ except ValidationError as e:
1030
+ output.errors.append(f"Validation error: {e}")
1031
+ except TextToolsError as e:
1032
+ output.errors.append(f"TextTools error: {e}")
1033
+ except Exception as e:
1034
+ output.errors.append(f"Unexpected error: {e}")
1035
+
749
1036
  return output
750
1037
 
751
1038
  async def run_custom(
@@ -778,27 +1065,46 @@ class AsyncTheTool:
778
1065
  - result (str): The translated text
779
1066
  - logprobs (list | None): Probability data if logprobs enabled
780
1067
  - analysis (str | None): Detailed reasoning if with_analysis enabled
1068
+ - process (str | None): Description of the process used
1069
+ - processed_at (datetime): Timestamp when the processing occurred
1070
+ - execution_time (float): Time taken for execution in seconds (-1.0 if not measured)
781
1071
  - errors (list(str) | None): Errors occured during tool call
782
1072
  """
783
- start = datetime.now()
784
- output = await self._operator.run(
785
- # User paramaeters
786
- text=prompt,
787
- output_model=output_model,
788
- output_model_str=output_model.model_json_schema(),
789
- output_lang=output_lang,
790
- temperature=temperature,
791
- logprobs=logprobs,
792
- top_logprobs=top_logprobs,
793
- validator=validator,
794
- max_validation_retries=max_validation_retries,
795
- priority=priority,
796
- # Internal parameters
797
- prompt_file="run_custom.yaml",
798
- user_prompt=None,
799
- with_analysis=False,
800
- mode=None,
801
- )
802
- end = datetime.now()
803
- output.execution_time = (end - start).total_seconds()
1073
+ output = Models.ToolOutput()
1074
+
1075
+ try:
1076
+ start = datetime.now()
1077
+ output = await self._operator.run(
1078
+ # User paramaeters
1079
+ text=prompt,
1080
+ output_model=output_model,
1081
+ output_model_str=output_model.model_json_schema(),
1082
+ output_lang=output_lang,
1083
+ temperature=temperature,
1084
+ logprobs=logprobs,
1085
+ top_logprobs=top_logprobs,
1086
+ validator=validator,
1087
+ max_validation_retries=max_validation_retries,
1088
+ priority=priority,
1089
+ # Internal parameters
1090
+ prompt_file="run_custom.yaml",
1091
+ user_prompt=None,
1092
+ with_analysis=False,
1093
+ mode=None,
1094
+ )
1095
+ end = datetime.now()
1096
+ output.execution_time = (end - start).total_seconds()
1097
+ return output
1098
+
1099
+ except PromptError as e:
1100
+ output.errors.append(f"Prompt error: {e}")
1101
+ except LLMError as e:
1102
+ output.errors.append(f"LLM error: {e}")
1103
+ except ValidationError as e:
1104
+ output.errors.append(f"Validation error: {e}")
1105
+ except TextToolsError as e:
1106
+ output.errors.append(f"TextTools error: {e}")
1107
+ except Exception as e:
1108
+ output.errors.append(f"Unexpected error: {e}")
1109
+
804
1110
  return output